summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--board/servo_v4/usb_pd_policy.c7
-rw-r--r--common/usb_common.c190
-rw-r--r--common/usb_pd_policy.c141
-rw-r--r--common/usb_pd_protocol.c8
-rw-r--r--common/usbc/build.mk1
-rw-r--r--common/usbc/usb_pe_ctvpd_sm.c19
-rw-r--r--common/usbc/usb_pe_drp_sm.c4883
-rw-r--r--common/usbc/usb_prl_sm.c290
-rw-r--r--common/usbc/usb_tc_drp_acc_trysrc_sm.c1389
-rw-r--r--include/config.h18
-rw-r--r--include/usb_common.h38
-rw-r--r--include/usb_pd.h94
-rw-r--r--include/usb_pe_sm.h94
-rw-r--r--include/usb_prl_sm.h15
-rw-r--r--include/usb_tc_sm.h211
-rw-r--r--test/usb_prl.c34
16 files changed, 7062 insertions, 370 deletions
diff --git a/board/servo_v4/usb_pd_policy.c b/board/servo_v4/usb_pd_policy.c
index 3c49eefeda..07008c0cca 100644
--- a/board/servo_v4/usb_pd_policy.c
+++ b/board/servo_v4/usb_pd_policy.c
@@ -17,6 +17,7 @@
#include "tcpm.h"
#include "timer.h"
#include "util.h"
+#include "usb_common.h"
#include "usb_mux.h"
#include "usb_pd.h"
#include "usb_pd_config.h"
@@ -287,8 +288,10 @@ static void update_ports(void)
break;
/* Find the 'best' PDO <= voltage */
- pdo_index = pd_find_pdo_index(
- CHG, pd_src_voltages_mv[i], &pdo);
+ pdo_index =
+ pd_find_pdo_index(pd_get_src_cap_cnt(CHG),
+ pd_get_src_caps(CHG),
+ pd_src_voltages_mv[i], &pdo);
/* Don't duplicate PDOs */
if (pdo_index == snk_index)
continue;
diff --git a/common/usb_common.c b/common/usb_common.c
index 86c85898dd..c060a5e006 100644
--- a/common/usb_common.c
+++ b/common/usb_common.c
@@ -12,6 +12,7 @@
#include "charge_state.h"
#include "usb_pd.h"
#include "usb_pd_tcpm.h"
+#include "util.h"
int usb_get_battery_soc(void)
{
@@ -81,3 +82,192 @@ enum pd_cc_polarity_type get_snk_polarity(enum tcpc_cc_voltage_status cc1,
*/
return cc2 > cc1;
}
+
+/*
+ * Zinger implements a board specific usb policy that does not define
+ * PD_MAX_VOLTAGE_MV and PD_OPERATING_POWER_MW. And in turn, does not
+ * use the following functions.
+ */
+#if defined(PD_MAX_VOLTAGE_MV) && defined(PD_OPERATING_POWER_MW)
+int pd_find_pdo_index(uint32_t src_cap_cnt, const uint32_t * const src_caps,
+ int max_mv, uint32_t *selected_pdo)
+{
+ int i, uw, mv;
+ int ret = 0;
+ int cur_uw = 0;
+ int prefer_cur;
+
+ int __attribute__((unused)) cur_mv = 0;
+
+ /* max voltage is always limited by this boards max request */
+ max_mv = MIN(max_mv, PD_MAX_VOLTAGE_MV);
+
+ /* Get max power that is under our max voltage input */
+ for (i = 0; i < src_cap_cnt; i++) {
+ /* its an unsupported Augmented PDO (PD3.0) */
+ if ((src_caps[i] & PDO_TYPE_MASK) == PDO_TYPE_AUGMENTED)
+ continue;
+
+ mv = ((src_caps[i] >> 10) & 0x3FF) * 50;
+ /* Skip invalid voltage */
+ if (!mv)
+ continue;
+ /* Skip any voltage not supported by this board */
+ if (!pd_is_valid_input_voltage(mv))
+ continue;
+
+ if ((src_caps[i] & PDO_TYPE_MASK) == PDO_TYPE_BATTERY) {
+ uw = 250000 * (src_caps[i] & 0x3FF);
+ } else {
+ int ma = (src_caps[i] & 0x3FF) * 10;
+
+ ma = MIN(ma, PD_MAX_CURRENT_MA);
+ uw = ma * mv;
+ }
+
+ if (mv > max_mv)
+ continue;
+ uw = MIN(uw, PD_MAX_POWER_MW * 1000);
+ prefer_cur = 0;
+
+ /* Apply special rules in case of 'tie' */
+ if (IS_ENABLED(PD_PREFER_LOW_VOLTAGE)) {
+ if (uw == cur_uw && mv < cur_mv)
+ prefer_cur = 1;
+ } else if (IS_ENABLED(PD_PREFER_HIGH_VOLTAGE)) {
+ if (uw == cur_uw && mv > cur_mv)
+ prefer_cur = 1;
+ }
+
+ /* Prefer higher power, except for tiebreaker */
+ if (uw > cur_uw || prefer_cur) {
+ ret = i;
+ cur_uw = uw;
+ cur_mv = mv;
+ }
+ }
+
+ if (selected_pdo)
+ *selected_pdo = src_caps[ret];
+
+ return ret;
+}
+
+void pd_extract_pdo_power(uint32_t pdo, uint32_t *ma, uint32_t *mv)
+{
+ int max_ma, uw;
+
+ *mv = ((pdo >> 10) & 0x3FF) * 50;
+
+ if (*mv == 0) {
+ *ma = 0;
+ return;
+ }
+
+ if ((pdo & PDO_TYPE_MASK) == PDO_TYPE_BATTERY) {
+ uw = 250000 * (pdo & 0x3FF);
+ max_ma = 1000 * MIN(1000 * uw, PD_MAX_POWER_MW) / *mv;
+ } else {
+ max_ma = 10 * (pdo & 0x3FF);
+ max_ma = MIN(max_ma, PD_MAX_POWER_MW * 1000 / *mv);
+ }
+
+ *ma = MIN(max_ma, PD_MAX_CURRENT_MA);
+}
+
+void pd_build_request(uint32_t src_cap_cnt, const uint32_t * const src_caps,
+ int32_t vpd_vdo, uint32_t *rdo, uint32_t *ma,
+ uint32_t *mv, enum pd_request_type req_type,
+ uint32_t max_request_mv)
+{
+ uint32_t pdo;
+ int pdo_index, flags = 0;
+ int uw;
+ int max_or_min_ma;
+ int max_or_min_mw;
+ int max_vbus;
+ int vpd_vbus_dcr;
+ int vpd_gnd_dcr;
+
+ if (req_type == PD_REQUEST_VSAFE5V) {
+ /* src cap 0 should be vSafe5V */
+ pdo_index = 0;
+ pdo = src_caps[0];
+ } else {
+ /* find pdo index for max voltage we can request */
+ pdo_index = pd_find_pdo_index(src_cap_cnt, src_caps,
+ max_request_mv, &pdo);
+ }
+
+ pd_extract_pdo_power(pdo, ma, mv);
+
+ /*
+ * Adjust VBUS current if CTVPD device was detected.
+ */
+ if (vpd_vdo > 0) {
+ max_vbus = VPD_VDO_MAX_VBUS(vpd_vdo);
+ vpd_vbus_dcr = VPD_VDO_VBUS_IMP(vpd_vdo) << 1;
+ vpd_gnd_dcr = VPD_VDO_GND_IMP(vpd_vdo);
+
+ if (max_vbus > VPD_MAX_VBUS_50V)
+ max_vbus = VPD_MAX_VBUS_20V;
+
+ /*
+ * Valid max_vbus values:
+ * 20000 mV
+ * 30000 mV
+ * 40000 mV
+ * 50000 mV
+ */
+ max_vbus = 20000 + max_vbus * 10000;
+ if (*mv > max_vbus)
+ *mv = max_vbus;
+
+ /*
+ * 5000 mA cable: 150 = 750000 / 50000
+ * 3000 mA cable: 250 = 750000 / 30000
+ */
+ if (*ma > 3000)
+ *ma = 750000 / (150 + vpd_vbus_dcr + vpd_gnd_dcr);
+ else
+ *ma = 750000 / (250 + vpd_vbus_dcr + vpd_gnd_dcr);
+ }
+
+ uw = *ma * *mv;
+ /* Mismatch bit set if less power offered than the operating power */
+ if (uw < (1000 * PD_OPERATING_POWER_MW))
+ flags |= RDO_CAP_MISMATCH;
+
+#ifdef CONFIG_USB_PD_GIVE_BACK
+ /* Tell source we are give back capable. */
+ flags |= RDO_GIVE_BACK;
+
+ /*
+ * BATTERY PDO: Inform the source that the sink will reduce
+ * power to this minimum level on receipt of a GotoMin Request.
+ */
+ max_or_min_mw = PD_MIN_POWER_MW;
+
+ /*
+ * FIXED or VARIABLE PDO: Inform the source that the sink will
+ * reduce current to this minimum level on receipt of a GotoMin
+ * Request.
+ */
+ max_or_min_ma = PD_MIN_CURRENT_MA;
+#else
+ /*
+ * Can't give back, so set maximum current and power to
+ * operating level.
+ */
+ max_or_min_ma = *ma;
+ max_or_min_mw = uw / 1000;
+#endif
+
+ if ((pdo & PDO_TYPE_MASK) == PDO_TYPE_BATTERY) {
+ int mw = uw / 1000;
+ *rdo = RDO_BATT(pdo_index + 1, mw, max_or_min_mw, flags);
+ } else {
+ *rdo = RDO_FIXED(pdo_index + 1, *ma, max_or_min_ma, flags);
+ }
+}
+#endif
diff --git a/common/usb_pd_policy.c b/common/usb_pd_policy.c
index 554cf86151..6e29cb34aa 100644
--- a/common/usb_pd_policy.c
+++ b/common/usb_pd_policy.c
@@ -22,6 +22,7 @@
#include "timer.h"
#include "util.h"
#include "usb_api.h"
+#include "usb_common.h"
#include "usb_pd.h"
#include "usbc_ppc.h"
#include "version.h"
@@ -104,144 +105,23 @@ static uint8_t pd_src_cap_cnt[CONFIG_USB_PD_PORT_COUNT];
/* Cap on the max voltage requested as a sink (in millivolts) */
static unsigned max_request_mv = PD_MAX_VOLTAGE_MV; /* no cap */
-int pd_find_pdo_index(int port, int max_mv, uint32_t *selected_pdo)
+const uint32_t * const pd_get_src_caps(int port)
{
- int i, uw, mv;
- int ret = 0;
- int __attribute__((unused)) cur_mv = 0;
- int cur_uw = 0;
- int prefer_cur;
- const uint32_t *src_caps = pd_src_caps[port];
-
- /* max voltage is always limited by this boards max request */
- max_mv = MIN(max_mv, PD_MAX_VOLTAGE_MV);
-
- /* Get max power that is under our max voltage input */
- for (i = 0; i < pd_src_cap_cnt[port]; i++) {
- /* its an unsupported Augmented PDO (PD3.0) */
- if ((src_caps[i] & PDO_TYPE_MASK) == PDO_TYPE_AUGMENTED)
- continue;
-
- mv = ((src_caps[i] >> 10) & 0x3FF) * 50;
- /* Skip invalid voltage */
- if (!mv)
- continue;
- /* Skip any voltage not supported by this board */
- if (!pd_is_valid_input_voltage(mv))
- continue;
-
- if ((src_caps[i] & PDO_TYPE_MASK) == PDO_TYPE_BATTERY) {
- uw = 250000 * (src_caps[i] & 0x3FF);
- } else {
- int ma = (src_caps[i] & 0x3FF) * 10;
- ma = MIN(ma, PD_MAX_CURRENT_MA);
- uw = ma * mv;
- }
+ ASSERT(port < CONFIG_USB_PD_PORT_COUNT);
- if (mv > max_mv)
- continue;
- uw = MIN(uw, PD_MAX_POWER_MW * 1000);
- prefer_cur = 0;
-
- /* Apply special rules in case of 'tie' */
-#ifdef PD_PREFER_LOW_VOLTAGE
- if (uw == cur_uw && mv < cur_mv)
- prefer_cur = 1;
-#elif defined(PD_PREFER_HIGH_VOLTAGE)
- if (uw == cur_uw && mv > cur_mv)
- prefer_cur = 1;
-#endif
- /* Prefer higher power, except for tiebreaker */
- if (uw > cur_uw || prefer_cur) {
- ret = i;
- cur_uw = uw;
- cur_mv = mv;
- }
- }
-
- if (selected_pdo)
- *selected_pdo = src_caps[ret];
-
- return ret;
+ return pd_src_caps[port];
}
-void pd_extract_pdo_power(uint32_t pdo, uint32_t *ma, uint32_t *mv)
+uint8_t pd_get_src_cap_cnt(int port)
{
- int max_ma, uw;
+ ASSERT(port < CONFIG_USB_PD_PORT_COUNT);
- *mv = ((pdo >> 10) & 0x3FF) * 50;
-
- if (*mv == 0) {
- CPRINTF("ERR:PDO mv=0\n");
- *ma = 0;
- return;
- }
-
- if ((pdo & PDO_TYPE_MASK) == PDO_TYPE_BATTERY) {
- uw = 250000 * (pdo & 0x3FF);
- max_ma = 1000 * MIN(1000 * uw, PD_MAX_POWER_MW) / *mv;
- } else {
- max_ma = 10 * (pdo & 0x3FF);
- max_ma = MIN(max_ma, PD_MAX_POWER_MW * 1000 / *mv);
- }
-
- *ma = MIN(max_ma, PD_MAX_CURRENT_MA);
+ return pd_src_cap_cnt[port];
}
-void pd_build_request(int port, uint32_t *rdo, uint32_t *ma, uint32_t *mv,
- enum pd_request_type req_type)
+uint32_t get_max_request_mv(void)
{
- uint32_t pdo;
- int pdo_index, flags = 0;
- int uw;
- int max_or_min_ma;
- int max_or_min_mw;
-
- if (req_type == PD_REQUEST_VSAFE5V) {
- /* src cap 0 should be vSafe5V */
- pdo_index = 0;
- pdo = pd_src_caps[port][0];
- } else {
- /* find pdo index for max voltage we can request */
- pdo_index = pd_find_pdo_index(port, max_request_mv, &pdo);
- }
-
- pd_extract_pdo_power(pdo, ma, mv);
- uw = *ma * *mv;
- /* Mismatch bit set if less power offered than the operating power */
- if (uw < (1000 * PD_OPERATING_POWER_MW))
- flags |= RDO_CAP_MISMATCH;
-
-#ifdef CONFIG_USB_PD_GIVE_BACK
- /* Tell source we are give back capable. */
- flags |= RDO_GIVE_BACK;
-
- /*
- * BATTERY PDO: Inform the source that the sink will reduce
- * power to this minimum level on receipt of a GotoMin Request.
- */
- max_or_min_mw = PD_MIN_POWER_MW;
-
- /*
- * FIXED or VARIABLE PDO: Inform the source that the sink will reduce
- * current to this minimum level on receipt of a GotoMin Request.
- */
- max_or_min_ma = PD_MIN_CURRENT_MA;
-#else
- /*
- * Can't give back, so set maximum current and power to operating
- * level.
- */
- max_or_min_ma = *ma;
- max_or_min_mw = uw / 1000;
-#endif
-
- if ((pdo & PDO_TYPE_MASK) == PDO_TYPE_BATTERY) {
- int mw = uw / 1000;
- *rdo = RDO_BATT(pdo_index + 1, mw, max_or_min_mw, flags);
- } else {
- *rdo = RDO_FIXED(pdo_index + 1, *ma, max_or_min_ma, flags);
- }
+ return max_request_mv;
}
void pd_process_source_cap(int port, int cnt, uint32_t *src_caps)
@@ -257,7 +137,8 @@ void pd_process_source_cap(int port, int cnt, uint32_t *src_caps)
#ifdef CONFIG_CHARGE_MANAGER
/* Get max power info that we could request */
- pd_find_pdo_index(port, PD_MAX_VOLTAGE_MV, &pdo);
+ pd_find_pdo_index(pd_get_src_cap_cnt(port), pd_get_src_caps(port),
+ PD_MAX_VOLTAGE_MV, &pdo);
pd_extract_pdo_power(pdo, &ma, &mv);
/* Set max. limit, but apply 500mA ceiling */
diff --git a/common/usb_pd_protocol.c b/common/usb_pd_protocol.c
index f7500ceb6a..123fbaa475 100644
--- a/common/usb_pd_protocol.c
+++ b/common/usb_pd_protocol.c
@@ -1487,9 +1487,11 @@ static int pd_send_request_msg(int port, int always_send_request)
* If this port is not actively charging or we are not allowed to
* request the max voltage, then select vSafe5V
*/
- pd_build_request(port, &rdo, &curr_limit, &supply_voltage,
- charging && max_request_allowed ?
- PD_REQUEST_MAX : PD_REQUEST_VSAFE5V);
+ pd_build_request(pd_get_src_cap_cnt(port), pd_get_src_caps(port), 0,
+ &rdo, &curr_limit, &supply_voltage,
+ charging && max_request_allowed ?
+ PD_REQUEST_MAX : PD_REQUEST_VSAFE5V,
+ get_max_request_mv());
if (!always_send_request) {
/* Don't re-request the same voltage */
diff --git a/common/usbc/build.mk b/common/usbc/build.mk
index e1a90cdb32..fea97cde43 100644
--- a/common/usbc/build.mk
+++ b/common/usbc/build.mk
@@ -14,6 +14,7 @@ all-obj-$(CONFIG_USB_PRL_SM)+=$(_usbc_dir)usb_prl_sm.o
ifneq ($(CONFIG_USB_PE_SM),)
all-obj-$(CONFIG_USB_TYPEC_VPD)+=$(_usbc_dir)usb_pe_ctvpd_sm.o
all-obj-$(CONFIG_USB_TYPEC_CTVPD)+=$(_usbc_dir)usb_pe_ctvpd_sm.o
+all-obj-$(CONFIG_USB_TYPEC_DRP_ACC_TRYSRC)+=$(_usbc_dir)usb_pe_drp_sm.o
endif
all-obj-$(CONFIG_USB_TYPEC_VPD)+=$(_usbc_dir)usb_tc_vpd_sm.o
all-obj-$(CONFIG_USB_TYPEC_CTVPD)+=$(_usbc_dir)usb_tc_ctvpd_sm.o
diff --git a/common/usbc/usb_pe_ctvpd_sm.c b/common/usbc/usb_pe_ctvpd_sm.c
index f7db231907..355724ddcc 100644
--- a/common/usbc/usb_pe_ctvpd_sm.c
+++ b/common/usbc/usb_pe_ctvpd_sm.c
@@ -75,35 +75,42 @@ void pe_run(int port, int evt, int en)
}
}
-void pe_pass_up_message(int port)
+void pe_message_received(int port)
{
pe[port].flags |= PE_FLAGS_MSG_RECEIVED;
task_set_event(PD_PORT_TO_TASK_ID(port), PD_EVENT_SM, 0);
}
+/**
+ * NOTE:
+ * The Charge-Through Vconn Powered Device's Policy Engine is very
+ * simple and no implementation is needed for the following functions
+ * that might be called by the Protocol Layer.
+ */
+
void pe_hard_reset_sent(int port)
{
- /* Do nothing */
+ /* No implementation needed by this policy engine */
}
void pe_got_hard_reset(int port)
{
- /* Do nothing */
+ /* No implementation needed by this policy engine */
}
void pe_report_error(int port, enum pe_error e)
{
- /* Do nothing */
+ /* No implementation needed by this policy engine */
}
void pe_got_soft_reset(int port)
{
- /* Do nothing */
+ /* No implementation needed by this policy engine */
}
void pe_message_sent(int port)
{
- /* Do nothing */
+ /* No implementation needed by this policy engine */
}
static void pe_request_run(const int port)
diff --git a/common/usbc/usb_pe_drp_sm.c b/common/usbc/usb_pe_drp_sm.c
new file mode 100644
index 0000000000..a977cb063a
--- /dev/null
+++ b/common/usbc/usb_pe_drp_sm.c
@@ -0,0 +1,4883 @@
+/* Copyright 2019 The Chromium OS Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "atomic.h"
+#include "battery.h"
+#include "battery_smart.h"
+#include "charge_manager.h"
+#include "charge_state.h"
+#include "common.h"
+#include "console.h"
+#include "hooks.h"
+#include "host_command.h"
+#include "task.h"
+#include "tcpm.h"
+#include "util.h"
+#include "usb_common.h"
+#include "usb_pd.h"
+#include "usb_pd_tcpm.h"
+#include "usb_pe_sm.h"
+#include "usb_prl_sm.h"
+#include "usb_tc_sm.h"
+#include "usb_emsg.h"
+#include "usb_sm.h"
+#include "usbc_ppc.h"
+
+/*
+ * USB Policy Engine Sink / Source module
+ *
+ * Based on Revision 3.0, Version 1.2 of
+ * the USB Power Delivery Specification.
+ */
+
+#ifdef CONFIG_COMMON_RUNTIME
+#define CPRINTF(format, args...) cprintf(CC_USBPD, format, ## args)
+#define CPRINTS(format, args...) cprints(CC_USBPD, format, ## args)
+#endif
+
+#define PE_SET_FLAG(port, flag) atomic_or(&pe[port].flags, (flag))
+#define PE_CLR_FLAG(port, flag) atomic_clear(&pe[port].flags, (flag))
+#define PE_CHK_FLAG(port, flag) (pe[port].flags & (flag))
+
+/*
+ * These macros SET, CLEAR, and CHECK, a DPM (Device Policy Manager)
+ * Request. The Requests are listed in usb_pe_sm.h.
+ */
+#define PE_SET_DPM_REQUEST(port, req) (pe[port].dpm_request |= (req))
+#define PE_CLR_DPM_REQUEST(port, req) (pe[port].dpm_request &= ~(req))
+#define PE_CHK_DPM_REQUEST(port, req) (pe[port].dpm_request & (req))
+
+/* Policy Engine Layer Flags */
+#define PE_FLAGS_PD_CONNECTION BIT(0)
+#define PE_FLAGS_ACCEPT BIT(1)
+#define PE_FLAGS_PS_READY BIT(2)
+#define PE_FLAGS_PROTOCOL_ERROR BIT(3)
+#define PE_FLAGS_MODAL_OPERATION BIT(4)
+#define PE_FLAGS_TX_COMPLETE BIT(5)
+#define PE_FLAGS_MSG_RECEIVED BIT(6)
+#define PE_FLAGS_HARD_RESET_PENDING BIT(7)
+#define PE_FLAGS_WAIT BIT(8)
+#define PE_FLAGS_EXPLICIT_CONTRACT BIT(9)
+#define PE_FLAGS_SNK_WAIT_CAP_TIMEOUT BIT(10)
+#define PE_FLAGS_PS_TRANSITION_TIMEOUT BIT(11)
+#define PE_FLAGS_INTERRUPTIBLE_AMS BIT(12)
+#define PE_FLAGS_PS_RESET_COMPLETE BIT(13)
+#define PE_FLAGS_SEND_SVDM BIT(14)
+#define PE_FLAGS_VCONN_SWAP_COMPLETE BIT(15)
+#define PE_FLAGS_DISCOVER_PORT_IDENTITY_DONE BIT(16)
+#define PE_FLAGS_DISCOVER_VDM_IDENTITY_DONE BIT(17)
+#define PE_FLAGS_RUN_SOURCE_START_TIMER BIT(19)
+#define PE_FLAGS_VDM_REQUEST_BUSY BIT(20)
+#define PE_FLAGS_VDM_REQUEST_NAKED BIT(21)
+
+/* 6.7.3 Hard Reset Counter */
+#define N_HARD_RESET_COUNT 2
+
+/* 6.7.4 Capabilities Counter */
+#define N_CAPS_COUNT 25
+
+/* 6.7.5 Discover Identity Counter */
+#define N_DISCOVER_IDENTITY_COUNT 20
+
+/*
+ * Function pointer to a Structured Vendor Defined Message (SVDM) response
+ * function defined in the board's usb_pd_policy.c file.
+ */
+typedef int (*svdm_rsp_func)(int port, uint32_t *payload);
+
+/* List of all Policy Engine level states */
+enum usb_pe_state {
+ /* Normal States */
+ PE_SRC_STARTUP,
+ PE_SRC_DISCOVERY,
+ PE_SRC_SEND_CAPABILITIES,
+ PE_SRC_NEGOTIATE_CAPABILITY,
+ PE_SRC_TRANSITION_SUPPLY,
+ PE_SRC_READY,
+ PE_SRC_DISABLED,
+ PE_SRC_CAPABILITY_RESPONSE,
+ PE_SRC_HARD_RESET,
+ PE_SRC_HARD_RESET_RECEIVED,
+ PE_SRC_TRANSITION_TO_DEFAULT,
+ PE_SRC_VDM_IDENTITY_REQUEST,
+ PE_SNK_STARTUP,
+ PE_SNK_DISCOVERY,
+ PE_SNK_WAIT_FOR_CAPABILITIES,
+ PE_SNK_EVALUATE_CAPABILITY,
+ PE_SNK_SELECT_CAPABILITY,
+ PE_SNK_READY,
+ PE_SNK_HARD_RESET,
+ PE_SNK_TRANSITION_TO_DEFAULT,
+ PE_SNK_GIVE_SINK_CAP,
+ PE_SNK_GET_SOURCE_CAP,
+ PE_SNK_TRANSITION_SINK,
+ PE_SEND_SOFT_RESET,
+ PE_SOFT_RESET,
+ PE_SEND_NOT_SUPPORTED,
+ PE_SRC_PING,
+ PE_GIVE_BATTERY_CAP,
+ PE_GIVE_BATTERY_STATUS,
+ PE_DRS_EVALUATE_SWAP,
+ PE_DRS_CHANGE,
+ PE_DRS_SEND_SWAP,
+ PE_PRS_SRC_SNK_EVALUATE_SWAP,
+ PE_PRS_SRC_SNK_TRANSITION_TO_OFF,
+ PE_PRS_SRC_SNK_WAIT_SOURCE_ON,
+ PE_PRS_SRC_SNK_SEND_SWAP,
+ PE_PRS_SNK_SRC_EVALUATE_SWAP,
+ PE_PRS_SNK_SRC_TRANSITION_TO_OFF,
+ PE_PRS_SNK_SRC_ASSERT_RP,
+ PE_PRS_SNK_SRC_SOURCE_ON,
+ PE_PRS_SNK_SRC_SEND_SWAP,
+ PE_VCS_EVALUATE_SWAP,
+ PE_VCS_SEND_SWAP,
+ PE_VCS_WAIT_FOR_VCONN_SWAP,
+ PE_VCS_TURN_ON_VCONN_SWAP,
+ PE_VCS_TURN_OFF_VCONN_SWAP,
+ PE_VCS_SEND_PS_RDY_SWAP,
+ PE_DO_PORT_DISCOVERY,
+ PE_VDM_REQUEST,
+ PE_VDM_ACKED,
+ PE_VDM_RESPONSE,
+ PE_HANDLE_CUSTOM_VDM_REQUEST,
+ PE_WAIT_FOR_ERROR_RECOVERY,
+ PE_BIST,
+};
+
+/* Forward declare the full list of states. This is indexed by usb_pe_state */
+static const struct usb_state pe_states[];
+
+#ifdef CONFIG_COMMON_RUNTIME
+/* List of human readable state names for console debugging */
+static const char * const pe_state_names[] = {
+ [PE_SRC_STARTUP] = "PE_SRC_Startup",
+ [PE_SRC_DISCOVERY] = "PE_SRC_Discovery",
+ [PE_SRC_SEND_CAPABILITIES] = "PE_SRC_Send_Capabilities",
+ [PE_SRC_NEGOTIATE_CAPABILITY] = "PE_SRC_Negotiate_Capability",
+ [PE_SRC_TRANSITION_SUPPLY] = "PE_SRC_Transition_Supply",
+ [PE_SRC_READY] = "PE_SRC_Ready",
+ [PE_SRC_DISABLED] = "PE_SRC_Disabled",
+ [PE_SRC_CAPABILITY_RESPONSE] = "PE_SRC_Capability_Response",
+ [PE_SRC_HARD_RESET] = "PE_SRC_Hard_Reset",
+ [PE_SRC_HARD_RESET_RECEIVED] = "PE_SRC_Hard_Reset_Received",
+ [PE_SRC_TRANSITION_TO_DEFAULT] = "PE_SRC_Transition_to_default",
+ [PE_SRC_VDM_IDENTITY_REQUEST] = "PE_SRC_Vdm_Identity_Request",
+ [PE_SNK_STARTUP] = "PE_SNK_Startup",
+ [PE_SNK_DISCOVERY] = "PE_SNK_Discovery",
+ [PE_SNK_WAIT_FOR_CAPABILITIES] = "PE_SNK_Wait_for_Capabilities",
+ [PE_SNK_EVALUATE_CAPABILITY] = "PE_SNK_Evaluate_Capability",
+ [PE_SNK_SELECT_CAPABILITY] = "PE_SNK_Select_Capability",
+ [PE_SNK_READY] = "PE_SNK_Ready",
+ [PE_SNK_HARD_RESET] = "PE_SNK_Hard_Reset",
+ [PE_SNK_TRANSITION_TO_DEFAULT] = "PE_SNK_Transition_to_default",
+ [PE_SNK_GIVE_SINK_CAP] = "PE_SNK_Give_Sink_Cap",
+ [PE_SNK_GET_SOURCE_CAP] = "PE_SNK_Get_Source_Cap",
+ [PE_SNK_TRANSITION_SINK] = "PE_SNK_Transition_Sink",
+ [PE_SEND_SOFT_RESET] = "PE_Send_Soft_Reset",
+ [PE_SOFT_RESET] = "PE_Soft_Reset",
+ [PE_SEND_NOT_SUPPORTED] = "PE_Send_Not_Supported",
+ [PE_SRC_PING] = "PE_SRC_Ping",
+ [PE_GIVE_BATTERY_CAP] = "PE_Give_Battery_Cap",
+ [PE_GIVE_BATTERY_STATUS] = "PE_Give_Battery_Status",
+ [PE_DRS_EVALUATE_SWAP] = "PE_DRS_Evaluate_Swap",
+ [PE_DRS_CHANGE] = "PE_DRS_Change",
+ [PE_DRS_SEND_SWAP] = "PE_DRS_Send_Swap",
+ [PE_PRS_SRC_SNK_EVALUATE_SWAP] = "PE_PRS_SRC_SNK_Evaluate_Swap",
+ [PE_PRS_SRC_SNK_TRANSITION_TO_OFF] = "PE_PRS_SRC_SNK_Transition_To_Off",
+ [PE_PRS_SRC_SNK_WAIT_SOURCE_ON] = "PE_PRS_SRC_SNK_Wait_Source_On",
+ [PE_PRS_SRC_SNK_SEND_SWAP] = "PE_PRS_SRC_SNK_Send_Swap",
+ [PE_PRS_SNK_SRC_EVALUATE_SWAP] = "PE_SNK_SRC_Evaluate_Swap",
+ [PE_PRS_SNK_SRC_TRANSITION_TO_OFF] = "PE_SNK_SRC_Transition_To_Off",
+ [PE_PRS_SNK_SRC_ASSERT_RP] = "PE_SNK_SRC_Assert_Rp",
+ [PE_PRS_SNK_SRC_SOURCE_ON] = "PE_SNK_SRC_Source_On",
+ [PE_PRS_SNK_SRC_SEND_SWAP] = "PE_SNK_SRC_Send_Swap",
+ [PE_VCS_EVALUATE_SWAP] = "PE_VCS_Evaluate_Swap",
+ [PE_VCS_SEND_SWAP] = "PE_VCS_Send_Swap",
+ [PE_VCS_WAIT_FOR_VCONN_SWAP] = "PE_VCS_Wait_For_Vconn_Swap",
+ [PE_VCS_TURN_ON_VCONN_SWAP] = "PE_VCS_Turn_On_Vconn_Swap",
+ [PE_VCS_TURN_OFF_VCONN_SWAP] = "PE_VCS_Turn_Off_Vconn_Swap",
+ [PE_VCS_SEND_PS_RDY_SWAP] = "PE_VCS_Send_Ps_Rdy_Swap",
+ [PE_DO_PORT_DISCOVERY] = "PE_Do_Port_Discovery",
+ [PE_VDM_REQUEST] = "PE_VDM_Request",
+ [PE_VDM_ACKED] = "PE_VDM_Acked",
+ [PE_VDM_RESPONSE] = "PE_VDM_Response",
+ [PE_HANDLE_CUSTOM_VDM_REQUEST] = "PE_Handle_Custom_Vdm_Request",
+ [PE_WAIT_FOR_ERROR_RECOVERY] = "PE_Wait_For_Error_Recovery",
+ [PE_BIST] = "PE_Bist",
+};
+#endif
+
+/*
+ * NOTE:
+ * DO_PORT_DISCOVERY_START is not actually a vdm command. It is used
+ * to start the port partner discovery proccess.
+ */
+enum vdm_cmd {
+ DO_PORT_DISCOVERY_START,
+ DISCOVER_IDENTITY,
+ DISCOVER_SVIDS,
+ DISCOVER_MODES,
+ ENTER_MODE,
+ EXIT_MODE,
+ ATTENTION,
+};
+
+enum port_partner {
+ PORT,
+ CABLE,
+};
+
+/*
+ * This enum is used to implement a state machine consisting of at most
+ * 3 states, inside a Policy Engine State.
+ */
+enum sub_state {
+ PE_SUB0,
+ PE_SUB1,
+ PE_SUB2
+};
+
+static enum sm_local_state local_state[CONFIG_USB_PD_PORT_COUNT];
+
+/*
+ * Policy Engine State Machine Object
+ */
+static struct policy_engine {
+ /* state machine context */
+ struct sm_ctx ctx;
+ /* current port power role (SOURCE or SINK) */
+ enum pd_power_role power_role;
+ /* current port data role (DFP or UFP) */
+ enum pd_data_role data_role;
+ /* saved data and power roles while communicating with a cable plug */
+ enum pd_data_role saved_data_role;
+ enum pd_power_role saved_power_role;
+ /* state machine flags */
+ uint32_t flags;
+ /* Device Policy Manager Request */
+ uint32_t dpm_request;
+ /* state timeout timer */
+ uint64_t timeout;
+ /* last requested voltage PDO index */
+ int requested_idx;
+
+ /* Current limit / voltage based on the last request message */
+ uint32_t curr_limit;
+ uint32_t supply_voltage;
+
+ /* state specific state machine variable */
+ enum sub_state sub;
+
+ /* VDO */
+ int32_t active_cable_vdo1;
+ int32_t active_cable_vdo2;
+ int32_t passive_cable_vdo;
+ int32_t ama_vdo;
+ int32_t vpd_vdo;
+ /* alternate mode policy*/
+ struct pd_policy am_policy;
+
+ /* VDM */
+ enum port_partner partner_type;
+ uint32_t vdm_cmd;
+ uint32_t vdm_cnt;
+ uint32_t vdm_data[VDO_HDR_SIZE + VDO_MAX_SIZE];
+
+ /* Timers */
+
+ /*
+ * The NoResponseTimer is used by the Policy Engine in a Source
+ * to determine that its Port Partner is not responding after a
+ * Hard Reset.
+ */
+ uint64_t no_response_timer;
+
+ /*
+ * Prior to a successful negotiation, a Source Shall use the
+ * SourceCapabilityTimer to periodically send out a
+ * Source_Capabilities Message.
+ */
+ uint64_t source_cap_timer;
+
+ /*
+ * This timer is started when a request for a new Capability has been
+ * accepted and will timeout after PD_T_PS_TRANSITION if a PS_RDY
+ * Message has not been received.
+ */
+ uint64_t ps_transition_timer;
+
+ /*
+ * This timer is used to ensure that a Message requesting a response
+ * (e.g. Get_Source_Cap Message) is responded to within a bounded time
+ * of PD_T_SENDER_RESPONSE.
+ */
+ uint64_t sender_response_timer;
+
+ /*
+ * This timer is used during an Explicit Contract when discovering
+ * whether a Cable Plug is PD Capable using SOP’.
+ */
+ uint64_t discover_identity_timer;
+
+ /*
+ * This timer is used in a Source to ensure that the Sink has had
+ * sufficient time to process Hard Reset Signaling before turning
+ * off its power supply to VBUS.
+ */
+ uint64_t ps_hard_reset_timer;
+
+ /*
+ * This timer is used to ensure that the time before the next Sink
+ * Request Message, after a Wait Message has been received from the
+ * Source in response to a Sink Request Message.
+ */
+ uint64_t sink_request_timer;
+
+ /*
+ * This timer combines the PSSourceOffTimer and PSSourceOnTimer timers.
+ * For PSSourceOffTimer, when this DRP device is currently acting as a
+ * Sink, this timer times out on a PS_RDY Message during a Power Role
+ * Swap sequence.
+ *
+ * For PSSourceOnTimer, when this DRP device is currently acting as a
+ * Source that has just stopped sourcing power and is waiting to start
+ * sinking power to timeout on a PS_RDY Message during a Power Role
+ * Swap.
+ */
+ uint64_t ps_source_timer;
+
+ /*
+ * This timer is used by a UUT to ensure that a Continuous BIST Mode
+ * (i.e. BIST Carrier Mode) is exited in a timely fashion.
+ */
+ uint64_t bist_cont_mode_timer;
+
+ /*
+ * This timer is used by the new Source, after a Power Role Swap or
+ * Fast Role Swap, to ensure that it does not send Source_Capabilities
+ * Message before the new Sink is ready to receive the
+ * Source_Capabilities Message.
+ */
+ uint64_t swap_source_start_timer;
+
+ /*
+ * This timer is used by the Initiator’s Policy Engine to ensure that
+ * a Structured VDM Command request needing a response (e.g. Discover
+ * Identity Command request) is responded to within a bounded time of
+ * tVDMSenderResponse.
+ */
+ uint64_t vdm_response_timer;
+
+ /*
+ * This timer is used during a VCONN Swap.
+ */
+ uint64_t vconn_on_timer;
+
+ /* Counters */
+
+ /*
+ * This counter is used to retry the Hard Reset whenever there is no
+ * response from the remote device.
+ */
+ uint32_t hard_reset_counter;
+
+ /*
+ * This counter is used to count the number of Source_Capabilities
+ * Messages which have been sent by a Source at power up or after a
+ * Hard Reset.
+ */
+ uint32_t caps_counter;
+
+ /*
+ * These counter maintain a count of Messages sent to a Port and
+ * Cable Plug, respectively.
+ */
+ uint32_t port_discover_identity_count;
+ uint32_t cable_discover_identity_count;
+
+ /* Last received source cap */
+ uint32_t src_caps[PDO_MAX_OBJECTS];
+ int src_cap_cnt;
+
+} pe[CONFIG_USB_PD_PORT_COUNT];
+
+/*
+ * As a sink, this is the max voltage (in millivolts) we can request
+ * before getting source caps
+ */
+static unsigned int max_request_mv = PD_MAX_VOLTAGE_MV;
+
+/*
+ * Private VDM utility functions
+ */
+#ifdef CONFIG_USB_PD_ALT_MODE_DFP
+static int validate_mode_request(struct svdm_amode_data *modep,
+ uint16_t svid, int opos);
+static void dfp_consume_attention(int port, uint32_t *payload);
+static void dfp_consume_identity(int port, int cnt, uint32_t *payload);
+static void dfp_consume_svids(int port, int cnt, uint32_t *payload);
+static int dfp_discover_modes(int port, uint32_t *payload);
+static void dfp_consume_modes(int port, int cnt, uint32_t *payload);
+static int get_mode_idx(int port, uint16_t svid);
+static struct svdm_amode_data *get_modep(int port, uint16_t svid);
+#endif
+
+test_export_static enum usb_pe_state get_state_pe(const int port);
+static void set_state_pe(const int port, const enum usb_pe_state new_state);
+
+void pe_init(int port)
+{
+ pe[port].flags = 0;
+ pe[port].dpm_request = 0;
+ pe[port].source_cap_timer = 0;
+ pe[port].no_response_timer = 0;
+ pe[port].data_role = tc_get_data_role(port);
+
+ tc_pd_connection(port, 0);
+
+ if (tc_get_power_role(port) == PD_ROLE_SOURCE)
+ set_state_pe(port, PE_SRC_STARTUP);
+ else
+ set_state_pe(port, PE_SNK_STARTUP);
+}
+
+int pe_is_running(int port)
+{
+ return local_state[port] == SM_RUN;
+}
+
+void pe_run(int port, int evt, int en)
+{
+ switch (local_state[port]) {
+ case SM_PAUSED:
+ if (!en)
+ break;
+ /* fall through */
+ case SM_INIT:
+ pe_init(port);
+ local_state[port] = SM_RUN;
+ /* fall through */
+ case SM_RUN:
+ if (!en) {
+ local_state[port] = SM_PAUSED;
+ /*
+ * While we are paused, exit all states and wait until
+ * initialized again.
+ */
+ set_state(port, &pe[port].ctx, NULL);
+ break;
+ }
+
+ /* Run state machine */
+ exe_state(port, &pe[port].ctx);
+ break;
+ }
+}
+
+int pe_is_explicit_contract(int port)
+{
+ return PE_CHK_FLAG(port, PE_FLAGS_EXPLICIT_CONTRACT);
+}
+
+void pe_message_received(int port)
+{
+ /* This should only be called from the PD task */
+ assert(port == TASK_ID_TO_PD_PORT(task_get_current()));
+
+ PE_SET_FLAG(port, PE_FLAGS_MSG_RECEIVED);
+}
+
+void pe_hard_reset_sent(int port)
+{
+ /* This should only be called from the PD task */
+ assert(port == TASK_ID_TO_PD_PORT(task_get_current()));
+
+ PE_CLR_FLAG(port, PE_FLAGS_HARD_RESET_PENDING);
+}
+
+void pe_got_hard_reset(int port)
+{
+ /* This should only be called from the PD task */
+ assert(port == TASK_ID_TO_PD_PORT(task_get_current()));
+
+ /*
+ * Transition from any state to the PE_SRC_Hard_Reset_Received or
+ * PE_SNK_Transition_to_default state when:
+ * 1) Hard Reset Signaling is detected.
+ */
+ pe[port].power_role = tc_get_power_role(port);
+
+ if (pe[port].power_role == PD_ROLE_SOURCE)
+ set_state_pe(port, PE_SRC_HARD_RESET_RECEIVED);
+ else
+ set_state_pe(port, PE_SNK_TRANSITION_TO_DEFAULT);
+}
+
+void pe_report_error(int port, enum pe_error e)
+{
+ /* This should only be called from the PD task */
+ assert(port == TASK_ID_TO_PD_PORT(task_get_current()));
+
+ /*
+ * Generate Hard Reset if Protocol Error occurred
+ * while in PE_Send_Soft_Reset state.
+ */
+ if (get_state_pe(port) == PE_SEND_SOFT_RESET) {
+ if (pe[port].power_role == PD_ROLE_SINK)
+ set_state_pe(port, PE_SNK_HARD_RESET);
+ else
+ set_state_pe(port, PE_SRC_HARD_RESET);
+ return;
+ }
+
+ if (get_state_pe(port) == PE_SRC_SEND_CAPABILITIES ||
+ get_state_pe(port) == PE_SRC_TRANSITION_SUPPLY ||
+ get_state_pe(port) == PE_PRS_SRC_SNK_WAIT_SOURCE_ON ||
+ get_state_pe(port) == PE_SRC_DISABLED ||
+ get_state_pe(port) == PE_SRC_DISCOVERY ||
+ get_state_pe(port) == PE_VDM_REQUEST) {
+ PE_SET_FLAG(port, PE_FLAGS_PROTOCOL_ERROR);
+ return;
+ }
+
+ /*
+ * See section 8.3.3.4.1.1 PE_SRC_Send_Soft_Reset State:
+ *
+ * The PE_Send_Soft_Reset state shall be entered from
+ * any state when a Protocol Error is detected by
+ * Protocol Layer during a Non-Interruptible AMS or when
+ * Message has not been sent after retries.
+ */
+ if (!PE_CHK_FLAG(port, PE_FLAGS_INTERRUPTIBLE_AMS) ||
+ (e == ERR_TCH_XMIT)) {
+ set_state_pe(port, PE_SEND_SOFT_RESET);
+ }
+ /*
+ * Transition to PE_Snk_Ready or PE_Src_Ready by a Protocol
+ * Error during an Interruptible AMS.
+ */
+ else {
+ PE_SET_FLAG(port, PE_FLAGS_PROTOCOL_ERROR);
+ if (pe[port].power_role == PD_ROLE_SINK)
+ set_state_pe(port, PE_SNK_READY);
+ else
+ set_state_pe(port, PE_SRC_READY);
+ }
+}
+
+void pe_got_soft_reset(int port)
+{
+ /* This should only be called from the PD task */
+ assert(port == TASK_ID_TO_PD_PORT(task_get_current()));
+
+ /*
+ * The PE_SRC_Soft_Reset state Shall be entered from any state when a
+ * Soft_Reset Message is received from the Protocol Layer.
+ */
+ set_state_pe(port, PE_SOFT_RESET);
+}
+
+void pe_dpm_request(int port, enum pe_dpm_request req)
+{
+ if (get_state_pe(port) == PE_SRC_READY ||
+ get_state_pe(port) == PE_SNK_READY)
+ PE_SET_DPM_REQUEST(port, req);
+}
+
+void pe_vconn_swap_complete(int port)
+{
+ /* This should only be called from the PD task */
+ assert(port == TASK_ID_TO_PD_PORT(task_get_current()));
+
+ PE_SET_FLAG(port, PE_FLAGS_VCONN_SWAP_COMPLETE);
+}
+
+void pe_ps_reset_complete(int port)
+{
+ /* This should only be called from the PD task */
+ assert(port == TASK_ID_TO_PD_PORT(task_get_current()));
+
+ PE_SET_FLAG(port, PE_FLAGS_PS_RESET_COMPLETE);
+}
+
+void pe_message_sent(int port)
+{
+ /* This should only be called from the PD task */
+ assert(port == TASK_ID_TO_PD_PORT(task_get_current()));
+
+ PE_SET_FLAG(port, PE_FLAGS_TX_COMPLETE);
+}
+
+void pe_send_vdm(int port, uint32_t vid, int cmd, const uint32_t *data,
+ int count)
+{
+ /* This should only be called from the PD task */
+ assert(port == TASK_ID_TO_PD_PORT(task_get_current()));
+
+ pe[port].partner_type = PORT;
+
+ /* Copy VDM Header */
+ pe[port].vdm_data[0] = VDO(vid, ((vid & USB_SID_PD) == USB_SID_PD) ?
+ 1 : (PD_VDO_CMD(cmd) <= CMD_ATTENTION),
+ VDO_SVDM_VERS(1) | cmd);
+
+ /* Copy Data after VDM Header */
+ memcpy((pe[port].vdm_data + 1), data, count);
+
+ pe[port].vdm_cnt = count + 1;
+
+ PE_SET_FLAG(port, PE_FLAGS_SEND_SVDM);
+ task_wake(PD_PORT_TO_TASK_ID(port));
+}
+
+void pe_exit_dp_mode(int port)
+{
+ /* This should only be called from the PD task */
+ assert(port == TASK_ID_TO_PD_PORT(task_get_current()));
+
+ if (IS_ENABLED(CONFIG_USB_PD_ALT_MODE_DFP)) {
+ int opos = pd_alt_mode(port, USB_SID_DISPLAYPORT);
+
+ if (opos <= 0)
+ return;
+
+ CPRINTS("C%d Exiting DP mode", port);
+ if (!pd_dfp_exit_mode(port, USB_SID_DISPLAYPORT, opos))
+ return;
+
+ pe_send_vdm(port, USB_SID_DISPLAYPORT,
+ CMD_EXIT_MODE | VDO_OPOS(opos), NULL, 0);
+ }
+}
+
+/*
+ * Private functions
+ */
+
+/* Set the TypeC state machine to a new state. */
+static void set_state_pe(const int port, const enum usb_pe_state new_state)
+{
+ set_state(port, &pe[port].ctx, &pe_states[new_state]);
+}
+
+/* Get the current TypeC state. */
+test_export_static enum usb_pe_state get_state_pe(const int port)
+{
+ return pe[port].ctx.current - &pe_states[0];
+}
+
+/* Get the previous TypeC state. */
+static enum usb_pe_state get_last_state_pe(const int port)
+{
+ return pe[port].ctx.previous - &pe_states[0];
+}
+
+static void print_current_state(const int port)
+{
+ CPRINTS("C%d: %s", port, pe_state_names[get_state_pe(port)]);
+}
+
+static void send_source_cap(int port)
+{
+#if defined(CONFIG_USB_PD_DYNAMIC_SRC_CAP) || \
+ defined(CONFIG_USB_PD_MAX_SINGLE_SOURCE_CURRENT)
+ const uint32_t *src_pdo;
+ const int src_pdo_cnt = charge_manager_get_source_pdo(&src_pdo, port);
+#else
+ const uint32_t *src_pdo = pd_src_pdo;
+ const int src_pdo_cnt = pd_src_pdo_cnt;
+#endif
+
+ if (src_pdo_cnt == 0) {
+ /* No source capabilities defined, sink only */
+ prl_send_ctrl_msg(port, TCPC_TX_SOP, PD_CTRL_REJECT);
+ }
+
+ emsg[port].len = src_pdo_cnt * 4;
+ memcpy(emsg[port].buf, (uint8_t *)src_pdo, emsg[port].len);
+
+ prl_send_data_msg(port, TCPC_TX_SOP, PD_DATA_SOURCE_CAP);
+}
+
+/*
+ * Request desired charge voltage from source.
+ */
+static void pe_send_request_msg(int port)
+{
+ uint32_t rdo;
+ uint32_t curr_limit;
+ uint32_t supply_voltage;
+ int charging;
+ int max_request_allowed;
+
+ if (IS_ENABLED(CONFIG_CHARGE_MANAGER))
+ charging = (charge_manager_get_active_charge_port() == port);
+ else
+ charging = 1;
+
+ if (IS_ENABLED(CONFIG_USB_PD_CHECK_MAX_REQUEST_ALLOWED))
+ max_request_allowed = pd_is_max_request_allowed();
+ else
+ max_request_allowed = 1;
+
+ /* Build and send request RDO */
+ /*
+ * If this port is not actively charging or we are not allowed to
+ * request the max voltage, then select vSafe5V
+ */
+ pd_build_request(pe[port].src_cap_cnt, pe[port].src_caps,
+ pe[port].vpd_vdo, &rdo, &curr_limit,
+ &supply_voltage, charging && max_request_allowed ?
+ PD_REQUEST_MAX : PD_REQUEST_VSAFE5V, max_request_mv);
+
+ CPRINTF("C%d Req [%d] %dmV %dmA", port, RDO_POS(rdo),
+ supply_voltage, curr_limit);
+ if (rdo & RDO_CAP_MISMATCH)
+ CPRINTF(" Mismatch");
+ CPRINTF("\n");
+
+ pe[port].curr_limit = curr_limit;
+ pe[port].supply_voltage = supply_voltage;
+
+ emsg[port].len = 4;
+
+ memcpy(emsg[port].buf, (uint8_t *)&rdo, emsg[port].len);
+ prl_send_data_msg(port, TCPC_TX_SOP, PD_DATA_REQUEST);
+}
+
+static void pe_update_pdo_flags(int port, uint32_t pdo)
+{
+#ifdef CONFIG_CHARGE_MANAGER
+#ifdef CONFIG_USB_PD_ALT_MODE_DFP
+ int charge_whitelisted =
+ (tc_get_power_role(port) == PD_ROLE_SINK &&
+ pd_charge_from_device(pd_get_identity_vid(port),
+ pd_get_identity_pid(port)));
+#else
+ const int charge_whitelisted = 0;
+#endif
+#endif
+
+ /* can only parse PDO flags if type is fixed */
+ if ((pdo & PDO_TYPE_MASK) != PDO_TYPE_FIXED)
+ return;
+
+ if (pdo & PDO_FIXED_DUAL_ROLE)
+ tc_partner_dr_power(port, 1);
+ else
+ tc_partner_dr_power(port, 0);
+
+ if (pdo & PDO_FIXED_EXTERNAL)
+ tc_partner_extpower(port, 1);
+ else
+ tc_partner_extpower(port, 0);
+
+ if (pdo & PDO_FIXED_COMM_CAP)
+ tc_partner_usb_comm(port, 1);
+ else
+ tc_partner_usb_comm(port, 0);
+
+ if (pdo & PDO_FIXED_DATA_SWAP)
+ tc_partner_dr_data(port, 1);
+ else
+ tc_partner_dr_data(port, 0);
+
+#ifdef CONFIG_CHARGE_MANAGER
+ /*
+ * Treat device as a dedicated charger (meaning we should charge
+ * from it) if it does not support power swap, or if it is externally
+ * powered, or if we are a sink and the device identity matches a
+ * charging white-list.
+ */
+ if (!(pdo & PDO_FIXED_DUAL_ROLE) || (pdo & PDO_FIXED_EXTERNAL) ||
+ charge_whitelisted)
+ charge_manager_update_dualrole(port, CAP_DEDICATED);
+ else
+ charge_manager_update_dualrole(port, CAP_DUALROLE);
+#endif
+}
+
+int pd_board_check_request(uint32_t rdo, int pdo_cnt)
+{
+ int idx = RDO_POS(rdo);
+
+ /* Check for invalid index */
+ return (!idx || idx > pdo_cnt) ?
+ EC_ERROR_INVAL : EC_SUCCESS;
+}
+
+static void pe_prl_execute_hard_reset(int port)
+{
+ prl_execute_hard_reset(port);
+}
+
+/**
+ * PE_SRC_Startup
+ */
+static void pe_src_startup_entry(int port)
+{
+ print_current_state(port);
+
+ /* Initialize VDOs to default values */
+ pe[port].active_cable_vdo1 = -1;
+ pe[port].active_cable_vdo2 = -1;
+ pe[port].passive_cable_vdo = -1;
+ pe[port].ama_vdo = -1;
+ pe[port].vpd_vdo = -1;
+
+ /* Reset CapsCounter */
+ pe[port].caps_counter = 0;
+
+ /* Reset the protocol layer */
+ prl_reset(port);
+
+ /* Set initial data role */
+ pe[port].data_role = tc_get_data_role(port);
+
+ /* Set initial power role */
+ pe[port].power_role = PD_ROLE_SOURCE;
+
+ /* Clear explicit contract. */
+ PE_CLR_FLAG(port, PE_FLAGS_EXPLICIT_CONTRACT);
+
+ pe[port].cable_discover_identity_count = 0;
+ pe[port].port_discover_identity_count = 0;
+
+ if (PE_CHK_FLAG(port, PE_FLAGS_RUN_SOURCE_START_TIMER)) {
+ PE_CLR_FLAG(port, PE_FLAGS_RUN_SOURCE_START_TIMER);
+ /* Start SwapSourceStartTimer */
+ pe[port].swap_source_start_timer =
+ get_time().val + PD_T_SWAP_SOURCE_START;
+ } else {
+ pe[port].swap_source_start_timer = 0;
+ }
+}
+
+static void pe_src_startup_run(int port)
+{
+ /* Wait until protocol layer is running */
+ if (!prl_is_running(port))
+ return;
+
+ if (pe[port].swap_source_start_timer == 0 ||
+ get_time().val > pe[port].swap_source_start_timer)
+ set_state_pe(port, PE_SRC_VDM_IDENTITY_REQUEST);
+}
+
+/**
+ * PE_SRC_VDM_Identity_Request
+ */
+static void pe_src_vdm_identity_request_entry(int port)
+{
+ print_current_state(port);
+}
+
+static void pe_src_vdm_identity_request_run(int port)
+{
+ /*
+ * Discover identity of the Cable Plug
+ */
+ if (!PE_CHK_FLAG(port, PE_FLAGS_DISCOVER_VDM_IDENTITY_DONE) &&
+ tc_is_vconn_src(port) &&
+ pe[port].cable_discover_identity_count <
+ N_DISCOVER_IDENTITY_COUNT) {
+ pe[port].cable_discover_identity_count++;
+
+ pe[port].partner_type = CABLE;
+ pe[port].vdm_cmd = DISCOVER_IDENTITY;
+ pe[port].vdm_data[0] = VDO(USB_SID_PD, 1, /* structured */
+ VDO_SVDM_VERS(1) | DISCOVER_IDENTITY);
+ pe[port].vdm_cnt = 1;
+
+ set_state_pe(port, PE_VDM_REQUEST);
+ } else {
+ set_state_pe(port, PE_SRC_SEND_CAPABILITIES);
+ }
+}
+
+/**
+ * PE_SRC_Discovery
+ */
+static void pe_src_discovery_entry(int port)
+{
+ print_current_state(port);
+
+ /*
+ * Initialize and run the SourceCapabilityTimer in order
+ * to trigger sending a Source_Capabilities Message.
+ *
+ * The SourceCapabilityTimer Shall continue to run during cable
+ * identity discover and Shall Not be initialized on re-entry
+ * to PE_SRC_Discovery.
+ */
+ if (get_last_state_pe(port) != PE_VDM_REQUEST)
+ pe[port].source_cap_timer =
+ get_time().val + PD_T_SEND_SOURCE_CAP;
+}
+
+static void pe_src_discovery_run(int port)
+{
+ /*
+ * A VCONN or Charge-Through VCONN Powered Device was detected.
+ */
+ if (pe[port].vpd_vdo >= 0 && VPD_VDO_CTS(pe[port].vpd_vdo)) {
+ set_state_pe(port, PE_SRC_DISABLED);
+ return;
+ }
+
+ /*
+ * Transition to the PE_SRC_Send_Capabilities state when:
+ * 1) The SourceCapabilityTimer times out and
+ * CapsCounter ≤ nCapsCount.
+ *
+ * Transition to the PE_SRC_Disabled state when:
+ * 1) The Port Partners are not presently PD Connected
+ * 2) And the SourceCapabilityTimer times out
+ * 3) And CapsCounter > nCapsCount.
+ */
+ if (get_time().val > pe[port].source_cap_timer) {
+ if (pe[port].caps_counter <= N_CAPS_COUNT)
+ set_state_pe(port, PE_SRC_SEND_CAPABILITIES);
+ else if (!PE_CHK_FLAG(port, PE_FLAGS_PD_CONNECTION))
+ set_state_pe(port, PE_SRC_DISABLED);
+ return;
+ }
+
+ /*
+ * Transition to the PE_SRC_Disabled state when:
+ * 1) The Port Partners have not been PD Connected.
+ * 2) And the NoResponseTimer times out.
+ * 3) And the HardResetCounter > nHardResetCount.
+ */
+ if (!PE_CHK_FLAG(port, PE_FLAGS_PD_CONNECTION) &&
+ pe[port].no_response_timer > 0 &&
+ get_time().val > pe[port].no_response_timer &&
+ pe[port].hard_reset_counter > N_HARD_RESET_COUNT) {
+ set_state_pe(port, PE_SRC_DISABLED);
+ return;
+ }
+
+ /*
+ * Discover identity of the Cable Plug
+ */
+ if (!PE_CHK_FLAG(port, PE_FLAGS_DISCOVER_VDM_IDENTITY_DONE) &&
+ pe[port].cable_discover_identity_count < N_DISCOVER_IDENTITY_COUNT) {
+ set_state_pe(port, PE_SRC_VDM_IDENTITY_REQUEST);
+ }
+}
+
+/**
+ * PE_SRC_Send_Capabilities
+ */
+static void pe_src_send_capabilities_entry(int port)
+{
+ print_current_state(port);
+
+ /* Send PD Capabilities message */
+ send_source_cap(port);
+
+ /* Increment CapsCounter */
+ pe[port].caps_counter++;
+
+ /* Stop sender response timer */
+ pe[port].sender_response_timer = 0;
+
+ /*
+ * Clear PE_FLAGS_INTERRUPTIBLE_AMS flag if it was set
+ * in the src_discovery state
+ */
+ PE_CLR_FLAG(port, PE_FLAGS_INTERRUPTIBLE_AMS);
+}
+
+static void pe_src_send_capabilities_run(int port)
+{
+ /*
+ * If a GoodCRC Message is received then the Policy Engine Shall:
+ * 1) Stop the NoResponseTimer.
+ * 2) Reset the HardResetCounter and CapsCounter to zero.
+ * 3) Initialize and run the SenderResponseTimer.
+ */
+ if (PE_CHK_FLAG(port, PE_FLAGS_TX_COMPLETE) &&
+ pe[port].sender_response_timer == 0) {
+ PE_CLR_FLAG(port, PE_FLAGS_TX_COMPLETE);
+
+ /* Stop the NoResponseTimer */
+ pe[port].no_response_timer = 0;
+
+ /* Reset the HardResetCounter to zero */
+ pe[port].hard_reset_counter = 0;
+
+ /* Reset the CapsCounter to zero */
+ pe[port].caps_counter = 0;
+
+ /* Initialize and run the SenderResponseTimer */
+ pe[port].sender_response_timer = get_time().val +
+ PD_T_SENDER_RESPONSE;
+ }
+
+ /*
+ * Transition to the PE_SRC_Negotiate_Capability state when:
+ * 1) A Request Message is received from the Sink
+ */
+ if (pe[port].sender_response_timer != 0 &&
+ PE_CHK_FLAG(port, PE_FLAGS_MSG_RECEIVED)) {
+ PE_CLR_FLAG(port, PE_FLAGS_MSG_RECEIVED);
+
+ /*
+ * Request Message Received?
+ */
+ if (PD_HEADER_CNT(emsg[port].header) > 0 &&
+ PD_HEADER_TYPE(emsg[port].header) == PD_DATA_REQUEST) {
+
+ /*
+ * Set to highest revision supported by both
+ * ports.
+ */
+ prl_set_rev(port,
+ (PD_HEADER_REV(emsg[port].header) > PD_REV30) ?
+ PD_REV30 : PD_HEADER_REV(emsg[port].header));
+
+ /* We are PD connected */
+ PE_SET_FLAG(port, PE_FLAGS_PD_CONNECTION);
+ tc_pd_connection(port, 1);
+
+ /*
+ * Handle the Sink Request in
+ * PE_SRC_Negotiate_Capability state
+ */
+ set_state_pe(port, PE_SRC_NEGOTIATE_CAPABILITY);
+ return;
+ }
+
+ /* We have a Protocol Error. Send Soft Reset Message */
+ set_state_pe(port, PE_SEND_SOFT_RESET);
+ return;
+ }
+
+ /*
+ * Transition to the PE_SRC_Discovery state when:
+ * 1) The Protocol Layer indicates that the Message has not been sent
+ * and we are presently not Connected
+ *
+ * NOTE: The PE_FLAGS_PROTOCOL_ERROR is set if a GoodCRC Message
+ * is not received.
+ */
+ if (PE_CHK_FLAG(port, PE_FLAGS_PROTOCOL_ERROR) &&
+ !PE_CHK_FLAG(port, PE_FLAGS_PD_CONNECTION)) {
+ PE_CLR_FLAG(port, PE_FLAGS_PROTOCOL_ERROR);
+
+ set_state_pe(port, PE_SRC_DISCOVERY);
+ return;
+ }
+
+ /*
+ * Transition to the PE_SRC_Disabled state when:
+ * 1) The Port Partners have not been PD Connected
+ * 2) The NoResponseTimer times out
+ * 3) And the HardResetCounter > nHardResetCount.
+ *
+ * Transition to the Error Recovery state when:
+ * 1) The Port Partners have previously been PD Connected
+ * 2) The NoResponseTimer times out
+ * 3) And the HardResetCounter > nHardResetCount.
+ */
+ if (pe[port].no_response_timer > 0 &&
+ get_time().val > pe[port].no_response_timer &&
+ pe[port].hard_reset_counter > N_HARD_RESET_COUNT) {
+ if (PE_CHK_FLAG(port, PE_FLAGS_PD_CONNECTION))
+ set_state_pe(port, PE_WAIT_FOR_ERROR_RECOVERY);
+ else
+ set_state_pe(port, PE_SRC_DISABLED);
+ return;
+ }
+
+ /*
+ * Transition to the PE_SRC_Hard_Reset state when:
+ * 1) The SenderResponseTimer times out.
+ */
+ if ((pe[port].sender_response_timer > 0) &&
+ get_time().val > pe[port].sender_response_timer) {
+ set_state_pe(port, PE_SRC_HARD_RESET);
+ }
+}
+
+/**
+ * PE_SRC_Negotiate_Capability
+ */
+static void pe_src_negotiate_capability_entry(int port)
+{
+ uint32_t payload;
+
+ print_current_state(port);
+
+ /* Get message payload */
+ payload = *(uint32_t *)(&emsg[port].buf);
+
+ /*
+ * Evaluate the Request from the Attached Sink
+ */
+
+ /*
+ * Transition to the PE_SRC_Capability_Response state when:
+ * 1) The Request cannot be met.
+ * 2) Or the Request can be met later from the Power Reserve
+ *
+ * Transition to the PE_SRC_Transition_Supply state when:
+ * 1) The Request can be met
+ *
+ */
+ if (pd_check_requested_voltage(payload, port) != EC_SUCCESS) {
+ set_state_pe(port, PE_SRC_CAPABILITY_RESPONSE);
+ } else {
+ PE_SET_FLAG(port, PE_FLAGS_ACCEPT);
+ pe[port].requested_idx = RDO_POS(payload);
+ set_state_pe(port, PE_SRC_TRANSITION_SUPPLY);
+ }
+}
+
+/**
+ * PE_SRC_Transition_Supply
+ */
+static void pe_src_transition_supply_entry(int port)
+{
+ print_current_state(port);
+
+ /* Transition Power Supply */
+ pd_transition_voltage(pe[port].requested_idx);
+
+ /* Send a GotoMin Message or otherwise an Accept Message */
+ if (PE_CHK_FLAG(port, PE_FLAGS_ACCEPT)) {
+ PE_CLR_FLAG(port, PE_FLAGS_ACCEPT);
+ prl_send_ctrl_msg(port, TCPC_TX_SOP, PD_CTRL_ACCEPT);
+ } else {
+ prl_send_ctrl_msg(port, TCPC_TX_SOP, PD_CTRL_GOTO_MIN);
+ }
+
+}
+
+static void pe_src_transition_supply_run(int port)
+{
+ /*
+ * Transition to the PE_SRC_Ready state when:
+ * 1) The power supply is ready.
+ *
+ * NOTE: This code block is executed twice:
+ * First Pass)
+ * When PE_FLAGS_TX_COMPLETE is set due to the
+ * PD_CTRL_ACCEPT or PD_CTRL_GOTO_MIN messages
+ * being sent.
+ *
+ * Second Pass)
+ * When PE_FLAGS_TX_COMPLETE is set due to the
+ * PD_CTRL_PS_RDY message being sent.
+ */
+ if (PE_CHK_FLAG(port, PE_FLAGS_TX_COMPLETE)) {
+ PE_CLR_FLAG(port, PE_FLAGS_TX_COMPLETE);
+
+ /*
+ * NOTE: If a message was received,
+ * pe_src_ready state will handle it.
+ */
+
+ if (PE_CHK_FLAG(port, PE_FLAGS_PS_READY)) {
+ PE_CLR_FLAG(port, PE_FLAGS_PS_READY);
+ /* NOTE: Second pass through this code block */
+ /* Explicit Contract is now in place */
+ PE_SET_FLAG(port, PE_FLAGS_EXPLICIT_CONTRACT);
+ set_state_pe(port, PE_SRC_READY);
+ } else {
+ /* NOTE: First pass through this code block */
+ /* Send PS_RDY message */
+ prl_send_ctrl_msg(port, TCPC_TX_SOP, PD_CTRL_PS_RDY);
+ PE_SET_FLAG(port, PE_FLAGS_PS_READY);
+ }
+
+ return;
+ }
+
+ /*
+ * Transition to the PE_SRC_Hard_Reset state when:
+ * 1) A Protocol Error occurs.
+ */
+ if (PE_CHK_FLAG(port, PE_FLAGS_PROTOCOL_ERROR)) {
+ PE_CLR_FLAG(port, PE_FLAGS_PROTOCOL_ERROR);
+ set_state_pe(port, PE_SRC_HARD_RESET);
+ }
+}
+
+/**
+ * PE_SRC_Ready
+ */
+static void pe_src_ready_entry(int port)
+{
+ print_current_state(port);
+
+ /*
+ * If the transition into PE_SRC_Ready is the result of Protocol Error
+ * that has not caused a Soft Reset (see Section 8.3.3.4.1) then the
+ * notification to the Protocol Layer of the end of the AMS Shall Not
+ * be sent since there is a Message to be processed.
+ */
+ if (PE_CHK_FLAG(port, PE_FLAGS_PROTOCOL_ERROR)) {
+ PE_CLR_FLAG(port, PE_FLAGS_PROTOCOL_ERROR);
+ } else {
+ PE_CLR_FLAG(port, PE_FLAGS_INTERRUPTIBLE_AMS);
+ prl_end_ams(port);
+ }
+
+ /*
+ * Do port partner discovery
+ */
+ if (!PE_CHK_FLAG(port, PE_FLAGS_MODAL_OPERATION |
+ PE_FLAGS_DISCOVER_PORT_IDENTITY_DONE) &&
+ pe[port].port_discover_identity_count <=
+ N_DISCOVER_IDENTITY_COUNT) {
+ pe[port].discover_identity_timer =
+ get_time().val + PD_T_DISCOVER_IDENTITY;
+ } else {
+ PE_SET_FLAG(port, PE_FLAGS_DISCOVER_PORT_IDENTITY_DONE);
+ pe[port].discover_identity_timer = 0;
+ }
+
+ /* NOTE: PPS Implementation should be added here. */
+
+ tc_set_timeout(port, 5 * MSEC);
+}
+
+static void pe_src_ready_run(int port)
+{
+ uint32_t payload;
+ uint8_t type;
+ uint8_t cnt;
+ uint8_t ext;
+
+ /*
+ * Start Port Discovery when:
+ * 1) The DiscoverIdentityTimer times out.
+ */
+ if (pe[port].discover_identity_timer > 0 &&
+ get_time().val > pe[port].discover_identity_timer) {
+ pe[port].port_discover_identity_count++;
+ pe[port].vdm_cmd = DO_PORT_DISCOVERY_START;
+ PE_CLR_FLAG(port, PE_FLAGS_VDM_REQUEST_NAKED |
+ PE_FLAGS_VDM_REQUEST_BUSY);
+ set_state_pe(port, PE_DO_PORT_DISCOVERY);
+ return;
+ }
+
+ /*
+ * Handle Device Policy Manager Requests
+ */
+
+ /*
+ * Ignore sink specific request:
+ * DPM_REQUEST_NEW_POWER_LEVEL
+ * DPM_REQUEST_SOURCE_CAP
+ */
+
+ PE_CLR_DPM_REQUEST(port, DPM_REQUEST_NEW_POWER_LEVEL |
+ DPM_REQUEST_SOURCE_CAP);
+
+ if (pe[port].dpm_request) {
+ if (PE_CHK_DPM_REQUEST(port, DPM_REQUEST_DR_SWAP)) {
+ PE_CLR_DPM_REQUEST(port, DPM_REQUEST_DR_SWAP);
+ if (PE_CHK_FLAG(port, PE_FLAGS_MODAL_OPERATION))
+ set_state_pe(port, PE_SRC_HARD_RESET);
+ else
+ set_state_pe(port, PE_DRS_SEND_SWAP);
+ } else if (PE_CHK_DPM_REQUEST(port, DPM_REQUEST_PR_SWAP)) {
+ PE_CLR_DPM_REQUEST(port, DPM_REQUEST_PR_SWAP);
+ set_state_pe(port, PE_PRS_SRC_SNK_SEND_SWAP);
+ } else if (PE_CHK_DPM_REQUEST(port, DPM_REQUEST_VCONN_SWAP)) {
+ PE_CLR_DPM_REQUEST(port, DPM_REQUEST_VCONN_SWAP);
+ set_state_pe(port, PE_VCS_SEND_SWAP);
+ } else if (PE_CHK_DPM_REQUEST(port, DPM_REQUEST_GOTO_MIN)) {
+ PE_CLR_DPM_REQUEST(port, DPM_REQUEST_GOTO_MIN);
+ set_state_pe(port, PE_SRC_TRANSITION_SUPPLY);
+ } else if (PE_CHK_DPM_REQUEST(port,
+ DPM_REQUEST_SRC_CAP_CHANGE)) {
+ PE_CLR_DPM_REQUEST(port, DPM_REQUEST_SRC_CAP_CHANGE);
+ set_state_pe(port, PE_SRC_SEND_CAPABILITIES);
+ } else if (PE_CHK_DPM_REQUEST(port, DPM_REQUEST_SEND_PING)) {
+ PE_CLR_DPM_REQUEST(port, DPM_REQUEST_SEND_PING);
+ set_state_pe(port, PE_SRC_PING);
+ } else if (PE_CHK_DPM_REQUEST(port,
+ DPM_REQUEST_DISCOVER_IDENTITY)) {
+ PE_CLR_DPM_REQUEST(port, DPM_REQUEST_DISCOVER_IDENTITY);
+
+ pe[port].partner_type = CABLE;
+ pe[port].vdm_cmd = DISCOVER_IDENTITY;
+ pe[port].vdm_data[0] = VDO(
+ USB_SID_PD,
+ 1, /* structured */
+ VDO_SVDM_VERS(1) | DISCOVER_IDENTITY);
+ pe[port].vdm_cnt = 1;
+ set_state_pe(port, PE_VDM_REQUEST);
+ }
+ return;
+ }
+
+ /*
+ * Handle Source Requests
+ */
+ if (PE_CHK_FLAG(port, PE_FLAGS_MSG_RECEIVED)) {
+ PE_CLR_FLAG(port, PE_FLAGS_MSG_RECEIVED);
+
+ type = PD_HEADER_TYPE(emsg[port].header);
+ cnt = PD_HEADER_CNT(emsg[port].header);
+ ext = PD_HEADER_EXT(emsg[port].header);
+ payload = *(uint32_t *)emsg[port].buf;
+
+ /* Extended Message Requests */
+ if (ext > 0) {
+ switch (type) {
+ case PD_EXT_GET_BATTERY_CAP:
+ set_state_pe(port, PE_GIVE_BATTERY_CAP);
+ break;
+ case PD_EXT_GET_BATTERY_STATUS:
+ set_state_pe(port, PE_GIVE_BATTERY_STATUS);
+ break;
+ default:
+ set_state_pe(port, PE_SEND_NOT_SUPPORTED);
+ }
+ }
+ /* Data Message Requests */
+ else if (cnt > 0) {
+ switch (type) {
+ case PD_DATA_REQUEST:
+ set_state_pe(port, PE_SRC_NEGOTIATE_CAPABILITY);
+ break;
+ case PD_DATA_SINK_CAP:
+ break;
+ case PD_DATA_VENDOR_DEF:
+ if (PD_HEADER_TYPE(emsg[port].header) ==
+ PD_DATA_VENDOR_DEF) {
+ if (PD_VDO_SVDM(payload)) {
+ set_state_pe(port,
+ PE_VDM_RESPONSE);
+ } else
+ set_state_pe(port,
+ PE_HANDLE_CUSTOM_VDM_REQUEST);
+ }
+ break;
+ case PD_DATA_BIST:
+ set_state_pe(port, PE_BIST);
+ break;
+ default:
+ set_state_pe(port, PE_SEND_NOT_SUPPORTED);
+ }
+ }
+ /* Control Message Requests */
+ else {
+ switch (type) {
+ case PD_CTRL_GOOD_CRC:
+ break;
+ case PD_CTRL_NOT_SUPPORTED:
+ break;
+ case PD_CTRL_PING:
+ break;
+ case PD_CTRL_GET_SOURCE_CAP:
+ set_state_pe(port, PE_SRC_SEND_CAPABILITIES);
+ break;
+ case PD_CTRL_GET_SINK_CAP:
+ set_state_pe(port, PE_SNK_GIVE_SINK_CAP);
+ break;
+ case PD_CTRL_GOTO_MIN:
+ break;
+ case PD_CTRL_PR_SWAP:
+ set_state_pe(port,
+ PE_PRS_SRC_SNK_EVALUATE_SWAP);
+ break;
+ case PD_CTRL_DR_SWAP:
+ if (PE_CHK_FLAG(port,
+ PE_FLAGS_MODAL_OPERATION)) {
+ set_state_pe(port, PE_SRC_HARD_RESET);
+ return;
+ }
+
+ set_state_pe(port, PE_DRS_EVALUATE_SWAP);
+ break;
+ case PD_CTRL_VCONN_SWAP:
+ set_state_pe(port, PE_VCS_EVALUATE_SWAP);
+ break;
+ default:
+ set_state_pe(port, PE_SEND_NOT_SUPPORTED);
+ }
+ }
+ }
+}
+
+static void pe_src_ready_exit(int port)
+{
+ PE_CLR_FLAG(port, PE_FLAGS_TX_COMPLETE);
+
+ /*
+ * If the Source is initiating an AMS then the Policy Engine Shall
+ * notify the Protocol Layer that the first Message in an AMS will
+ * follow.
+ */
+ if (!PE_CHK_FLAG(port, PE_FLAGS_INTERRUPTIBLE_AMS))
+ prl_start_ams(port);
+
+ tc_set_timeout(port, 2 * MSEC);
+}
+
+/**
+ * PE_SRC_Disabled
+ */
+static void pe_src_disabled_entry(int port)
+{
+ print_current_state(port);
+
+ if ((pe[port].vpd_vdo >= 0) && VPD_VDO_CTS(pe[port].vpd_vdo)) {
+ /*
+ * Inform the Device Policy Manager that a Charge-Through VCONN
+ * Powered Device was detected.
+ */
+ tc_ctvpd_detected(port);
+ }
+
+ /*
+ * Unresponsive to USB Power Delivery messaging, but not to Hard Reset
+ * Signaling. See pe_got_hard_reset
+ */
+}
+
+/**
+ * PE_SRC_Capability_Response
+ */
+static void pe_src_capability_response_entry(int port)
+{
+ print_current_state(port);
+
+ /* NOTE: Wait messaging should be implemented. */
+
+ prl_send_ctrl_msg(port, TCPC_TX_SOP, PD_CTRL_REJECT);
+}
+
+static void pe_src_capability_response_run(int port)
+{
+ /*
+ * Transition to the PE_SRC_Ready state when:
+ * 1) There is an Explicit Contract and
+ * 2) A Reject Message has been sent and the present Contract is still
+ * Valid or
+ * 3) A Wait Message has been sent.
+ *
+ * Transition to the PE_SRC_Hard_Reset state when:
+ * 1) There is an Explicit Contract and
+ * 2) The Reject Message has been sent and the present
+ * Contract is Invalid
+ *
+ * Transition to the PE_SRC_Wait_New_Capabilities state when:
+ * 1) There is no Explicit Contract and
+ * 2) A Reject Message has been sent or
+ * 3) A Wait Message has been sent.
+ */
+ if (PE_CHK_FLAG(port, PE_FLAGS_TX_COMPLETE)) {
+ PE_CLR_FLAG(port, PE_FLAGS_TX_COMPLETE);
+
+ if (PE_CHK_FLAG(port, PE_FLAGS_EXPLICIT_CONTRACT))
+ /*
+ * NOTE: The src capabilities listed in
+ * board/xxx/usb_pd_policy.c will not
+ * change so the present contract will
+ * never be invalid.
+ */
+ set_state_pe(port, PE_SRC_READY);
+ else
+ /*
+ * NOTE: The src capabilities listed in
+ * board/xxx/usb_pd_policy.c will not
+ * change, so no need to resending them
+ * again. Transition to disabled state.
+ */
+ set_state_pe(port, PE_SRC_DISABLED);
+ }
+}
+
+/**
+ * PE_SRC_Hard_Reset
+ */
+static void pe_src_hard_reset_entry(int port)
+{
+ print_current_state(port);
+
+ /* Generate Hard Reset Signal */
+ prl_execute_hard_reset(port);
+
+ /* Increment the HardResetCounter */
+ pe[port].hard_reset_counter++;
+
+ /* Start NoResponseTimer */
+ pe[port].no_response_timer = get_time().val + PD_T_NO_RESPONSE;
+
+ /* Start PSHardResetTimer */
+ pe[port].ps_hard_reset_timer = get_time().val + PD_T_PS_HARD_RESET;
+}
+
+static void pe_src_hard_reset_run(int port)
+{
+ /*
+ * Transition to the PE_SRC_Transition_to_default state when:
+ * 1) The PSHardResetTimer times out.
+ */
+ if (get_time().val > pe[port].ps_hard_reset_timer)
+ set_state_pe(port, PE_SRC_TRANSITION_TO_DEFAULT);
+}
+
+/**
+ * PE_SRC_Hard_Reset_Received
+ */
+static void pe_src_hard_reset_received_entry(int port)
+{
+ print_current_state(port);
+
+ /* Start NoResponseTimer */
+ pe[port].no_response_timer = get_time().val + PD_T_NO_RESPONSE;
+
+ /* Start PSHardResetTimer */
+ pe[port].ps_hard_reset_timer = get_time().val + PD_T_PS_HARD_RESET;
+}
+
+static void pe_src_hard_reset_received_run(int port)
+{
+ /*
+ * Transition to the PE_SRC_Transition_to_default state when:
+ * 1) The PSHardResetTimer times out.
+ */
+ if (get_time().val > pe[port].ps_hard_reset_timer)
+ set_state_pe(port, PE_SRC_TRANSITION_TO_DEFAULT);
+}
+
+/**
+ * PE_SRC_Transition_To_Default
+ */
+static void pe_src_transition_to_default_entry(int port)
+{
+ print_current_state(port);
+
+ /* Reset flags */
+ pe[port].flags = 0;
+
+ /* Reset DPM Request */
+ pe[port].dpm_request = 0;
+
+ /*
+ * Request Device Policy Manager to request power
+ * supply Hard Resets to vSafe5V via vSafe0V
+ * Reset local HW
+ * Request Device Policy Manager to set Port Data
+ * Role to DFP and turn off VCONN
+ */
+ tc_hard_reset(port);
+}
+
+static void pe_src_transition_to_default_run(int port)
+{
+ /*
+ * Transition to the PE_SRC_Startup state when:
+ * 1) The power supply has reached the default level.
+ */
+ if (PE_CHK_FLAG(port, PE_FLAGS_PS_RESET_COMPLETE)) {
+ PE_CLR_FLAG(port, PE_FLAGS_PS_RESET_COMPLETE);
+ /* Inform the Protocol Layer that the Hard Reset is complete */
+ prl_hard_reset_complete(port);
+ set_state_pe(port, PE_SRC_STARTUP);
+ }
+}
+
+/**
+ * PE_SNK_Startup State
+ */
+static void pe_snk_startup_entry(int port)
+{
+ print_current_state(port);
+
+ /* Reset the protocol layer */
+ prl_reset(port);
+
+ /* Set initial data role */
+ pe[port].data_role = tc_get_data_role(port);
+
+ /* Set initial power role */
+ pe[port].power_role = PD_ROLE_SINK;
+
+ /* Clear explicit contract */
+ PE_CLR_FLAG(port, PE_FLAGS_EXPLICIT_CONTRACT);
+}
+
+static void pe_snk_startup_run(int port)
+{
+ /* Wait until protocol layer is running */
+ if (!prl_is_running(port))
+ return;
+
+ /*
+ * Once the reset process completes, the Policy Engine Shall
+ * transition to the PE_SNK_Discovery state
+ */
+ set_state_pe(port, PE_SNK_DISCOVERY);
+}
+
+/**
+ * PE_SNK_Discovery State
+ */
+static void pe_snk_discovery_entry(int port)
+{
+ print_current_state(port);
+}
+
+static void pe_snk_discovery_run(int port)
+{
+ /*
+ * Transition to the PE_SNK_Wait_for_Capabilities state when:
+ * 1) VBUS has been detected
+ */
+ if (pd_is_vbus_present(port))
+ set_state_pe(port, PE_SNK_WAIT_FOR_CAPABILITIES);
+}
+
+/**
+ * PE_SNK_Wait_For_Capabilities State
+ */
+static void pe_snk_wait_for_capabilities_entry(int port)
+{
+ print_current_state(port);
+
+ /* Initialize and start the SinkWaitCapTimer */
+ pe[port].timeout = get_time().val + PD_T_SINK_WAIT_CAP;
+}
+
+static void pe_snk_wait_for_capabilities_run(int port)
+{
+ uint8_t type;
+ uint8_t cnt;
+ uint8_t ext;
+
+ /*
+ * Transition to the PE_SNK_Evaluate_Capability state when:
+ * 1) A Source_Capabilities Message is received.
+ */
+ if (PE_CHK_FLAG(port, PE_FLAGS_MSG_RECEIVED)) {
+ PE_CLR_FLAG(port, PE_FLAGS_MSG_RECEIVED);
+
+ type = PD_HEADER_TYPE(emsg[port].header);
+ cnt = PD_HEADER_CNT(emsg[port].header);
+ ext = PD_HEADER_EXT(emsg[port].header);
+
+ if ((ext == 0) && (cnt > 0) && (type == PD_DATA_SOURCE_CAP)) {
+ set_state_pe(port, PE_SNK_EVALUATE_CAPABILITY);
+ return;
+ }
+ }
+
+ /* When the SinkWaitCapTimer times out, perform a Hard Reset. */
+ if (get_time().val > pe[port].timeout) {
+ PE_SET_FLAG(port, PE_FLAGS_SNK_WAIT_CAP_TIMEOUT);
+ set_state_pe(port, PE_SNK_HARD_RESET);
+ }
+}
+
+/**
+ * PE_SNK_Evaluate_Capability State
+ */
+static void pe_snk_evaluate_capability_entry(int port)
+{
+ uint32_t *pdo = (uint32_t *)emsg[port].buf;
+ uint32_t header = emsg[port].header;
+ uint32_t num = emsg[port].len >> 2;
+ int i;
+
+ print_current_state(port);
+
+ /* Reset Hard Reset counter to zero */
+ pe[port].hard_reset_counter = 0;
+
+ /* Set to highest revision supported by both ports. */
+ prl_set_rev(port, (PD_HEADER_REV(header) > PD_REV30) ?
+ PD_REV30 : PD_HEADER_REV(header));
+
+ pe[port].src_cap_cnt = num;
+
+ for (i = 0; i < num; i++)
+ pe[port].src_caps[i] = *pdo++;
+
+ /* src cap 0 should be fixed PDO */
+ pe_update_pdo_flags(port, pdo[0]);
+
+ /* Evaluate the options based on supplied capabilities */
+ pd_process_source_cap(port, pe[port].src_cap_cnt, pe[port].src_caps);
+
+ /* We are PD Connected */
+ PE_SET_FLAG(port, PE_FLAGS_PD_CONNECTION);
+ tc_pd_connection(port, 1);
+
+ /* Device Policy Response Received */
+ set_state_pe(port, PE_SNK_SELECT_CAPABILITY);
+}
+
+/**
+ * PE_SNK_Select_Capability State
+ */
+static void pe_snk_select_capability_entry(int port)
+{
+ print_current_state(port);
+
+ pe[port].sender_response_timer = 0;
+ /* Send Request */
+ pe_send_request_msg(port);
+}
+
+static void pe_snk_select_capability_run(int port)
+{
+ uint8_t type;
+ uint8_t cnt;
+
+ /* Wait until message is sent */
+ if (pe[port].sender_response_timer == 0 &&
+ (PE_CHK_FLAG(port, PE_FLAGS_TX_COMPLETE))) {
+ PE_CLR_FLAG(port, PE_FLAGS_TX_COMPLETE);
+
+ /* Initialize and run SenderResponseTimer */
+ pe[port].sender_response_timer =
+ get_time().val + PD_T_SENDER_RESPONSE;
+ }
+
+ if (pe[port].sender_response_timer == 0)
+ return;
+
+ if (PE_CHK_FLAG(port, PE_FLAGS_MSG_RECEIVED)) {
+ PE_CLR_FLAG(port, PE_FLAGS_MSG_RECEIVED);
+ type = PD_HEADER_TYPE(emsg[port].header);
+ cnt = PD_HEADER_CNT(emsg[port].header);
+
+ /*
+ * Transition to the PE_SNK_Transition_Sink state when:
+ * 1) An Accept Message is received from the Source.
+ *
+ * Transition to the PE_SNK_Wait_for_Capabilities state when:
+ * 1) There is no Explicit Contract in place and
+ * 2) A Reject Message is received from the Source or
+ * 3) A Wait Message is received from the Source.
+ *
+ * Transition to the PE_SNK_Ready state when:
+ * 1) There is an Explicit Contract in place and
+ * 2) A Reject Message is received from the Source or
+ * 3) A Wait Message is received from the Source.
+ *
+ * Transition to the PE_SNK_Hard_Reset state when:
+ * 1) A SenderResponseTimer timeout occurs.
+ */
+
+ /* Only look at control messages */
+ if (cnt == 0) {
+ /*
+ * Accept Message Received
+ */
+ if (type == PD_CTRL_ACCEPT) {
+ /* explicit contract is now in place */
+ PE_SET_FLAG(port, PE_FLAGS_EXPLICIT_CONTRACT);
+ set_state_pe(port, PE_SNK_TRANSITION_SINK);
+ return;
+ }
+ /*
+ * Reject or Wait Message Received
+ */
+ else if (type == PD_CTRL_REJECT ||
+ type == PD_CTRL_WAIT) {
+ if (type == PD_CTRL_WAIT)
+ PE_SET_FLAG(port, PE_FLAGS_WAIT);
+
+ /*
+ * We had a previous explicit contract, so
+ * transition to PE_SNK_Ready
+ */
+ if (PE_CHK_FLAG(port,
+ PE_FLAGS_EXPLICIT_CONTRACT))
+ set_state_pe(port, PE_SNK_READY);
+ /*
+ * No previous explicit contract, so transition
+ * to PE_SNK_Wait_For_Capabilities
+ */
+ else
+ set_state_pe(port,
+ PE_SNK_WAIT_FOR_CAPABILITIES);
+ return;
+ }
+ }
+ }
+
+ /* SenderResponsetimer timeout */
+ if (get_time().val > pe[port].sender_response_timer)
+ set_state_pe(port, PE_SNK_HARD_RESET);
+}
+
+/**
+ * PE_SNK_Transition_Sink State
+ */
+static void pe_snk_transition_sink_entry(int port)
+{
+ print_current_state(port);
+
+ /* Initialize and run PSTransitionTimer */
+ pe[port].ps_transition_timer = get_time().val + PD_T_PS_TRANSITION;
+}
+
+static void pe_snk_transition_sink_run(int port)
+{
+ /*
+ * Transition to the PE_SNK_Ready state when:
+ * 1) A PS_RDY Message is received from the Source.
+ *
+ * Transition to the PE_SNK_Hard_Reset state when:
+ * 1) A Protocol Error occurs.
+ */
+
+ if (PE_CHK_FLAG(port, PE_FLAGS_MSG_RECEIVED)) {
+ PE_CLR_FLAG(port, PE_FLAGS_MSG_RECEIVED);
+
+ /*
+ * PS_RDY message received
+ */
+ if ((PD_HEADER_CNT(emsg[port].header) == 0) &&
+ (PD_HEADER_TYPE(emsg[port].header) ==
+ PD_CTRL_PS_RDY)) {
+ set_state_pe(port, PE_SNK_READY);
+ return;
+ }
+
+ /*
+ * Protocol Error
+ */
+ set_state_pe(port, PE_SNK_HARD_RESET);
+ }
+
+ /*
+ * Timeout will lead to a Hard Reset
+ */
+ if (get_time().val > pe[port].ps_transition_timer &&
+ pe[port].hard_reset_counter <= N_HARD_RESET_COUNT) {
+ PE_SET_FLAG(port, PE_FLAGS_PS_TRANSITION_TIMEOUT);
+
+ set_state_pe(port, PE_SNK_HARD_RESET);
+ }
+}
+
+static void pe_snk_transition_sink_exit(int port)
+{
+ /* Transition Sink's power supply to the new power level */
+ pd_set_input_current_limit(port,
+ pe[port].curr_limit, pe[port].supply_voltage);
+
+ if (IS_ENABLED(CONFIG_CHARGE_MANAGER))
+ /* Set ceiling based on what's negotiated */
+ charge_manager_set_ceil(port,
+ CEIL_REQUESTOR_PD, pe[port].curr_limit);
+}
+
+/**
+ * PE_SNK_Ready State
+ */
+static void pe_snk_ready_entry(int port)
+{
+ print_current_state(port);
+
+ PE_CLR_FLAG(port, PE_FLAGS_INTERRUPTIBLE_AMS);
+ prl_end_ams(port);
+
+ /*
+ * On entry to the PE_SNK_Ready state as the result of a wait, then do
+ * the following:
+ * 1) Initialize and run the SinkRequestTimer
+ */
+ if (PE_CHK_FLAG(port, PE_FLAGS_WAIT)) {
+ PE_CLR_FLAG(port, PE_FLAGS_WAIT);
+ pe[port].sink_request_timer =
+ get_time().val + PD_T_SINK_REQUEST;
+ } else {
+ pe[port].sink_request_timer = 0;
+ }
+
+ /*
+ * Do port partner discovery
+ */
+ if (!PE_CHK_FLAG(port, PE_FLAGS_MODAL_OPERATION |
+ PE_FLAGS_DISCOVER_PORT_IDENTITY_DONE) &&
+ pe[port].port_discover_identity_count <=
+ N_DISCOVER_IDENTITY_COUNT) {
+ pe[port].discover_identity_timer =
+ get_time().val + PD_T_DISCOVER_IDENTITY;
+ } else {
+ PE_SET_FLAG(port, PE_FLAGS_DISCOVER_PORT_IDENTITY_DONE);
+ pe[port].discover_identity_timer = 0;
+ }
+
+ /*
+ * On entry to the PE_SNK_Ready state if the current Explicit Contract
+ * is for a PPS APDO, then do the following:
+ * 1) Initialize and run the SinkPPSPeriodicTimer.
+ * NOTE: PPS Implementation should be added here.
+ */
+
+ tc_set_timeout(port, 5 * MSEC);
+}
+
+static void pe_snk_ready_run(int port)
+{
+ uint32_t payload;
+ uint8_t type;
+ uint8_t cnt;
+ uint8_t ext;
+
+ if (pe[port].sink_request_timer > 0 &&
+ get_time().val > pe[port].sink_request_timer) {
+ set_state_pe(port, PE_SNK_SELECT_CAPABILITY);
+ return;
+ }
+
+ /*
+ * Start Port Discovery when:
+ * 1) The DiscoverIdentityTimer times out.
+ */
+ if (pe[port].discover_identity_timer > 0 &&
+ get_time().val > pe[port].discover_identity_timer) {
+ pe[port].port_discover_identity_count++;
+ pe[port].vdm_cmd = DO_PORT_DISCOVERY_START;
+ PE_CLR_FLAG(port, PE_FLAGS_VDM_REQUEST_NAKED |
+ PE_FLAGS_VDM_REQUEST_BUSY);
+ set_state_pe(port, PE_DO_PORT_DISCOVERY);
+ return;
+ }
+
+ /*
+ * Handle Device Policy Manager Requests
+ */
+ /*
+ * Ignore source specific requests:
+ * DPM_REQUEST_GOTO_MIN
+ * DPM_REQUEST_SRC_CAP_CHANGE,
+ * DPM_REQUEST_GET_SNK_CAPS,
+ * DPM_REQUEST_SEND_PING
+ */
+ PE_CLR_DPM_REQUEST(port, DPM_REQUEST_GOTO_MIN |
+ DPM_REQUEST_SRC_CAP_CHANGE |
+ DPM_REQUEST_GET_SNK_CAPS |
+ DPM_REQUEST_SEND_PING);
+
+ if (pe[port].dpm_request) {
+ if (PE_CHK_DPM_REQUEST(port, DPM_REQUEST_DR_SWAP)) {
+ PE_CLR_DPM_REQUEST(port, DPM_REQUEST_DR_SWAP);
+ if (PE_CHK_FLAG(port, PE_FLAGS_MODAL_OPERATION))
+ set_state_pe(port, PE_SNK_HARD_RESET);
+ else
+ set_state_pe(port, PE_DRS_SEND_SWAP);
+ } else if (PE_CHK_DPM_REQUEST(port, DPM_REQUEST_PR_SWAP)) {
+ PE_CLR_DPM_REQUEST(port, DPM_REQUEST_PR_SWAP);
+ set_state_pe(port, PE_PRS_SNK_SRC_SEND_SWAP);
+ } else if (PE_CHK_DPM_REQUEST(port, DPM_REQUEST_VCONN_SWAP)) {
+ PE_CLR_DPM_REQUEST(port, DPM_REQUEST_VCONN_SWAP);
+ set_state_pe(port, PE_VCS_SEND_SWAP);
+ } else if (PE_CHK_DPM_REQUEST(port, DPM_REQUEST_SOURCE_CAP)) {
+ PE_CLR_DPM_REQUEST(port, DPM_REQUEST_SOURCE_CAP);
+ set_state_pe(port, PE_SNK_GET_SOURCE_CAP);
+ } else if (PE_CHK_DPM_REQUEST(port,
+ DPM_REQUEST_NEW_POWER_LEVEL)) {
+ PE_CLR_DPM_REQUEST(port, DPM_REQUEST_NEW_POWER_LEVEL);
+ set_state_pe(port, PE_SNK_SELECT_CAPABILITY);
+ } else if (PE_CHK_DPM_REQUEST(port,
+ DPM_REQUEST_DISCOVER_IDENTITY)) {
+ PE_CLR_DPM_REQUEST(port, DPM_REQUEST_DISCOVER_IDENTITY);
+
+ pe[port].partner_type = CABLE;
+ pe[port].vdm_cmd = DISCOVER_IDENTITY;
+ pe[port].vdm_data[0] = VDO(
+ USB_SID_PD,
+ 1, /* structured */
+ VDO_SVDM_VERS(1) | DISCOVER_IDENTITY);
+ pe[port].vdm_cnt = 1;
+
+ set_state_pe(port, PE_VDM_REQUEST);
+ }
+ return;
+ }
+
+ /*
+ * Handle Source Requests
+ */
+ if (PE_CHK_FLAG(port, PE_FLAGS_MSG_RECEIVED)) {
+ PE_CLR_FLAG(port, PE_FLAGS_MSG_RECEIVED);
+
+ type = PD_HEADER_TYPE(emsg[port].header);
+ cnt = PD_HEADER_CNT(emsg[port].header);
+ ext = PD_HEADER_EXT(emsg[port].header);
+ payload = *(uint32_t *)emsg[port].buf;
+
+ /* Extended Message Request */
+ if (ext > 0) {
+ switch (type) {
+ case PD_EXT_GET_BATTERY_CAP:
+ set_state_pe(port, PE_GIVE_BATTERY_CAP);
+ break;
+ case PD_EXT_GET_BATTERY_STATUS:
+ set_state_pe(port, PE_GIVE_BATTERY_STATUS);
+ break;
+ default:
+ set_state_pe(port, PE_SEND_NOT_SUPPORTED);
+ }
+ }
+ /* Data Messages */
+ else if (cnt > 0) {
+ switch (type) {
+ case PD_DATA_SOURCE_CAP:
+ set_state_pe(port,
+ PE_SNK_EVALUATE_CAPABILITY);
+ break;
+ case PD_DATA_VENDOR_DEF:
+ if (PD_HEADER_TYPE(emsg[port].header) ==
+ PD_DATA_VENDOR_DEF) {
+ if (PD_VDO_SVDM(payload))
+ set_state_pe(port,
+ PE_VDM_RESPONSE);
+ else
+ set_state_pe(port,
+ PE_HANDLE_CUSTOM_VDM_REQUEST);
+ }
+ break;
+ case PD_DATA_BIST:
+ set_state_pe(port, PE_BIST);
+ break;
+ default:
+ set_state_pe(port, PE_SEND_NOT_SUPPORTED);
+ }
+ }
+ /* Control Messages */
+ else {
+ switch (type) {
+ case PD_CTRL_GOOD_CRC:
+ /* Do nothing */
+ break;
+ case PD_CTRL_PING:
+ /* Do noghing */
+ break;
+ case PD_CTRL_GET_SOURCE_CAP:
+ set_state_pe(port, PE_SNK_GET_SOURCE_CAP);
+ break;
+ case PD_CTRL_GET_SINK_CAP:
+ set_state_pe(port, PE_SNK_GIVE_SINK_CAP);
+ break;
+ case PD_CTRL_GOTO_MIN:
+ set_state_pe(port, PE_SNK_TRANSITION_SINK);
+ break;
+ case PD_CTRL_PR_SWAP:
+ set_state_pe(port,
+ PE_PRS_SNK_SRC_EVALUATE_SWAP);
+ break;
+ case PD_CTRL_DR_SWAP:
+ if (PE_CHK_FLAG(port, PE_FLAGS_MODAL_OPERATION))
+ set_state_pe(port, PE_SNK_HARD_RESET);
+ else
+ set_state_pe(port,
+ PE_DRS_EVALUATE_SWAP);
+ break;
+ case PD_CTRL_VCONN_SWAP:
+ set_state_pe(port, PE_VCS_EVALUATE_SWAP);
+ break;
+ case PD_CTRL_NOT_SUPPORTED:
+ /* Do nothing */
+ break;
+ default:
+ set_state_pe(port, PE_SEND_NOT_SUPPORTED);
+ }
+ }
+ }
+}
+
+static void pe_snk_ready_exit(int port)
+{
+ if (!PE_CHK_FLAG(port, PE_FLAGS_INTERRUPTIBLE_AMS))
+ prl_start_ams(port);
+
+ tc_set_timeout(port, 2 * MSEC);
+}
+
+/**
+ * PE_SNK_Hard_Reset
+ */
+static void pe_snk_hard_reset_entry(int port)
+{
+ print_current_state(port);
+
+ /*
+ * Note: If the SinkWaitCapTimer times out and the HardResetCounter is
+ * greater than nHardResetCount the Sink Shall assume that the
+ * Source is non-responsive.
+ */
+ if (PE_CHK_FLAG(port, PE_FLAGS_SNK_WAIT_CAP_TIMEOUT) &&
+ pe[port].hard_reset_counter > N_HARD_RESET_COUNT) {
+ set_state_pe(port, PE_SRC_DISABLED);
+ }
+
+ PE_CLR_FLAG(port, PE_FLAGS_SNK_WAIT_CAP_TIMEOUT);
+
+ /* Request the generation of Hard Reset Signaling by the PHY Layer */
+ pe_prl_execute_hard_reset(port);
+
+ /* Increment the HardResetCounter */
+ pe[port].hard_reset_counter++;
+
+ /*
+ * Transition the Sink’s power supply to the new power level if
+ * PSTransistionTimer timeout occurred.
+ */
+ if (PE_CHK_FLAG(port, PE_FLAGS_PS_TRANSITION_TIMEOUT)) {
+ PE_SET_FLAG(port, PE_FLAGS_PS_TRANSITION_TIMEOUT);
+
+ /* Transition Sink's power supply to the new power level */
+ pd_set_input_current_limit(port, pe[port].curr_limit,
+ pe[port].supply_voltage);
+#ifdef CONFIG_CHARGE_MANAGER
+ /* Set ceiling based on what's negotiated */
+ charge_manager_set_ceil(port, CEIL_REQUESTOR_PD,
+ pe[port].curr_limit);
+#endif
+ }
+}
+
+static void pe_snk_hard_reset_run(int port)
+{
+ /*
+ * Transition to the PE_SNK_Transition_to_default state when:
+ * 1) The Hard Reset is complete.
+ */
+ if (PE_CHK_FLAG(port, PE_FLAGS_HARD_RESET_PENDING))
+ return;
+
+ set_state_pe(port, PE_SNK_TRANSITION_TO_DEFAULT);
+}
+
+/**
+ * PE_SNK_Transition_to_default
+ */
+static void pe_snk_transition_to_default_entry(int port)
+{
+ print_current_state(port);
+
+ tc_hard_reset(port);
+}
+
+static void pe_snk_transition_to_default_run(int port)
+{
+ if (PE_CHK_FLAG(port, PE_FLAGS_PS_RESET_COMPLETE)) {
+ /* PE_SNK_Startup clears all flags */
+
+ /* Inform the Protocol Layer that the Hard Reset is complete */
+ prl_hard_reset_complete(port);
+ set_state_pe(port, PE_SNK_STARTUP);
+ }
+}
+
+/**
+ * PE_SNK_Get_Source_Cap
+ */
+static void pe_snk_get_source_cap_entry(int port)
+{
+ print_current_state(port);
+
+ /* Send a Get_Source_Cap Message */
+ emsg[port].len = 0;
+ prl_send_ctrl_msg(port, TCPC_TX_SOP, PD_CTRL_GET_SOURCE_CAP);
+}
+
+static void pe_snk_get_source_cap_run(int port)
+{
+ if (PE_CHK_FLAG(port, PE_FLAGS_TX_COMPLETE)) {
+ PE_CLR_FLAG(port, PE_FLAGS_TX_COMPLETE);
+
+ set_state_pe(port, PE_SNK_READY);
+ }
+}
+
+/**
+ * PE_SNK_Send_Soft_Reset and PE_SRC_Send_Soft_Reset
+ */
+static void pe_send_soft_reset_entry(int port)
+{
+ print_current_state(port);
+
+ /* Reset Protocol Layer */
+ prl_reset(port);
+
+ pe[port].sender_response_timer = 0;
+}
+
+static void pe_send_soft_reset_run(int port)
+{
+ int type;
+ int cnt;
+ int ext;
+
+ /* Wait until protocol layer is running */
+ if (!prl_is_running(port))
+ return;
+
+ if (pe[port].sender_response_timer == 0) {
+ /* Send Soft Reset message */
+ prl_send_ctrl_msg(port, TCPC_TX_SOP, PD_CTRL_SOFT_RESET);
+
+ /* Initialize and run SenderResponseTimer */
+ pe[port].sender_response_timer =
+ get_time().val + PD_T_SENDER_RESPONSE;
+ }
+
+ /*
+ * Transition to PE_SNK_Hard_Reset or PE_SRC_Hard_Reset on Sender
+ * Response Timer Timeout or Protocol Layer or Protocol Error
+ */
+ if (get_time().val > pe[port].sender_response_timer ||
+ PE_CHK_FLAG(port, PE_FLAGS_PROTOCOL_ERROR)) {
+ PE_CLR_FLAG(port, PE_FLAGS_PROTOCOL_ERROR);
+
+ if (pe[port].power_role == PD_ROLE_SINK)
+ set_state_pe(port, PE_SRC_HARD_RESET);
+ else
+ set_state_pe(port, PE_SRC_HARD_RESET);
+ return;
+ }
+
+ /*
+ * Transition to the PE_SNK_Send_Capabilities or
+ * PE_SRC_Send_Capabilities state when:
+ * 1) An Accept Message has been received.
+ */
+ if (PE_CHK_FLAG(port, PE_FLAGS_MSG_RECEIVED)) {
+ PE_CLR_FLAG(port, PE_FLAGS_MSG_RECEIVED);
+
+ type = PD_HEADER_TYPE(emsg[port].header);
+ cnt = PD_HEADER_CNT(emsg[port].header);
+ ext = PD_HEADER_EXT(emsg[port].header);
+
+ if ((ext == 0) && (cnt == 0) && (type == PD_CTRL_ACCEPT)) {
+ if (pe[port].power_role == PD_ROLE_SINK)
+ set_state_pe(port,
+ PE_SNK_WAIT_FOR_CAPABILITIES);
+ else
+ set_state_pe(port,
+ PE_SRC_SEND_CAPABILITIES);
+ return;
+ }
+ }
+}
+
+static void pe_send_soft_reset_exit(int port)
+{
+ /* Clear TX Complete Flag */
+ PE_CLR_FLAG(port, PE_FLAGS_TX_COMPLETE);
+}
+
+/**
+ * PE_SNK_Soft_Reset and PE_SNK_Soft_Reset
+ */
+static void pe_soft_reset_entry(int port)
+{
+ print_current_state(port);
+
+ pe[port].sender_response_timer = 0;
+}
+
+static void pe_soft_reset_run(int port)
+{
+ if (pe[port].sender_response_timer == 0) {
+ /* Send Accept message */
+ prl_send_ctrl_msg(port, TCPC_TX_SOP, PD_CTRL_ACCEPT);
+ pe[port].sender_response_timer++;
+ }
+
+ if (PE_CHK_FLAG(port, PE_FLAGS_TX_COMPLETE)) {
+ PE_CLR_FLAG(port, PE_FLAGS_TX_COMPLETE);
+
+ if (pe[port].power_role == PD_ROLE_SINK)
+ set_state_pe(port, PE_SNK_WAIT_FOR_CAPABILITIES);
+ else
+ set_state_pe(port, PE_SRC_SEND_CAPABILITIES);
+ } else if (PE_CHK_FLAG(port, PE_FLAGS_PROTOCOL_ERROR)) {
+ PE_CLR_FLAG(port, PE_FLAGS_PROTOCOL_ERROR);
+
+ if (pe[port].power_role == PD_ROLE_SINK)
+ set_state_pe(port, PE_SNK_HARD_RESET);
+ else
+ set_state_pe(port, PE_SRC_HARD_RESET);
+ }
+}
+
+/**
+ * PE_SRC_Not_Supported and PE_SNK_Not_Supported
+ */
+static void pe_send_not_supported_entry(int port)
+{
+ print_current_state(port);
+
+ /* Request the Protocol Layer to send a Not_Supported Message. */
+ if (prl_get_rev(port) > PD_REV20)
+ prl_send_ctrl_msg(port, TCPC_TX_SOP, PD_CTRL_NOT_SUPPORTED);
+ else
+ prl_send_ctrl_msg(port, TCPC_TX_SOP, PD_CTRL_REJECT);
+}
+
+static void pe_send_not_supported_run(int port)
+{
+ if (PE_CHK_FLAG(port, PE_FLAGS_TX_COMPLETE)) {
+ PE_CLR_FLAG(port, PE_FLAGS_TX_COMPLETE);
+
+ if (pe[port].power_role == PD_ROLE_SOURCE)
+ set_state_pe(port, PE_SRC_READY);
+ else
+ set_state_pe(port, PE_SNK_READY);
+ }
+}
+
+/**
+ * PE_SRC_Ping
+ */
+static void pe_src_ping_entry(int port)
+{
+ print_current_state(port);
+ prl_send_ctrl_msg(port, TCPC_TX_SOP, PD_CTRL_PING);
+}
+
+static void pe_src_ping_run(int port)
+{
+ if (PE_CHK_FLAG(port, PE_FLAGS_TX_COMPLETE)) {
+ PE_CLR_FLAG(port, PE_FLAGS_TX_COMPLETE);
+ set_state_pe(port, PE_SRC_READY);
+ }
+}
+
+/**
+ * PE_Give_Battery_Cap
+ */
+static void pe_give_battery_cap_entry(int port)
+{
+ uint32_t payload = *(uint32_t *)(&emsg[port].buf);
+ uint16_t *msg = (uint16_t *)emsg[port].buf;
+
+ print_current_state(port);
+
+ /* msg[0] - extended header is set by Protocol Layer */
+
+ /* Set VID */
+ msg[1] = USB_VID_GOOGLE;
+
+ /* Set PID */
+ msg[2] = CONFIG_USB_PID;
+
+ if (battery_is_present()) {
+ /*
+ * We only have one fixed battery,
+ * so make sure batt cap ref is 0.
+ */
+ if (BATT_CAP_REF(payload) != 0) {
+ /* Invalid battery reference */
+ msg[3] = 0;
+ msg[4] = 0;
+ msg[5] = 1;
+ } else {
+ uint32_t v;
+ uint32_t c;
+
+ /*
+ * The Battery Design Capacity field shall return the
+ * Battery’s design capacity in tenths of Wh. If the
+ * Battery is Hot Swappable and is not present, the
+ * Battery Design Capacity field shall be set to 0. If
+ * the Battery is unable to report its Design Capacity,
+ * it shall return 0xFFFF
+ */
+ msg[3] = 0xffff;
+
+ /*
+ * The Battery Last Full Charge Capacity field shall
+ * return the Battery’s last full charge capacity in
+ * tenths of Wh. If the Battery is Hot Swappable and
+ * is not present, the Battery Last Full Charge Capacity
+ * field shall be set to 0. If the Battery is unable to
+ * report its Design Capacity, the Battery Last Full
+ * Charge Capacity field shall be set to 0xFFFF.
+ */
+ msg[4] = 0xffff;
+
+ if (battery_design_voltage(&v) == 0) {
+ if (battery_design_capacity(&c) == 0) {
+ /*
+ * Wh = (c * v) / 1000000
+ * 10th of a Wh = Wh * 10
+ */
+ msg[3] = DIV_ROUND_NEAREST((c * v),
+ 100000);
+ }
+
+ if (battery_full_charge_capacity(&c) == 0) {
+ /*
+ * Wh = (c * v) / 1000000
+ * 10th of a Wh = Wh * 10
+ */
+ msg[4] = DIV_ROUND_NEAREST((c * v),
+ 100000);
+ }
+ }
+ }
+ }
+
+ /* Extended Battery Cap data is 9 bytes */
+ emsg[port].len = 9;
+
+ prl_send_ext_data_msg(port, TCPC_TX_SOP, PD_EXT_BATTERY_CAP);
+}
+
+static void pe_give_battery_cap_run(int port)
+{
+ if (PE_CHK_FLAG(port, PE_FLAGS_TX_COMPLETE)) {
+ PE_CLR_FLAG(port, PE_FLAGS_TX_COMPLETE);
+ if (pe[port].power_role == PD_ROLE_SOURCE)
+ set_state_pe(port, PE_SRC_READY);
+ else
+ set_state_pe(port, PE_SNK_READY);
+ }
+}
+
+/**
+ * PE_Give_Battery_Status
+ */
+static void pe_give_battery_status_entry(int port)
+{
+ uint32_t payload = *(uint32_t *)(&emsg[port].buf);
+ uint32_t *msg = (uint32_t *)emsg[port].buf;
+
+ print_current_state(port);
+
+ if (battery_is_present()) {
+ /*
+ * We only have one fixed battery,
+ * so make sure batt cap ref is 0.
+ */
+ if (BATT_CAP_REF(payload) != 0) {
+ /* Invalid battery reference */
+ *msg |= BSDO_INVALID;
+ } else {
+ uint32_t v;
+ uint32_t c;
+
+ if (battery_design_voltage(&v) != 0 ||
+ battery_remaining_capacity(&c) != 0) {
+ *msg |= BSDO_CAP(BSDO_CAP_UNKNOWN);
+ } else {
+ /*
+ * Wh = (c * v) / 1000000
+ * 10th of a Wh = Wh * 10
+ */
+ *msg |= BSDO_CAP(DIV_ROUND_NEAREST((c * v),
+ 100000));
+ }
+
+ /* Battery is present */
+ *msg |= BSDO_PRESENT;
+
+ /*
+ * For drivers that are not smart battery compliant,
+ * battery_status() returns EC_ERROR_UNIMPLEMENTED and
+ * the battery is assumed to be idle.
+ */
+ if (battery_status(&c) != 0) {
+ *msg |= BSDO_IDLE; /* assume idle */
+ } else {
+ if (c & STATUS_FULLY_CHARGED)
+ /* Fully charged */
+ *msg |= BSDO_IDLE;
+ else if (c & STATUS_DISCHARGING)
+ /* Discharging */
+ *msg |= BSDO_DISCHARGING;
+ /* else battery is charging.*/
+ }
+ }
+ } else {
+ *msg = BSDO_CAP(BSDO_CAP_UNKNOWN);
+ }
+
+ /* Battery Status data is 4 bytes */
+ emsg[port].len = 4;
+
+ prl_send_data_msg(port, TCPC_TX_SOP, PD_DATA_BATTERY_STATUS);
+}
+
+static void pe_give_battery_status_run(int port)
+{
+ if (PE_CHK_FLAG(port, PE_FLAGS_TX_COMPLETE)) {
+ PE_CLR_FLAG(port, PE_FLAGS_TX_COMPLETE);
+ set_state_pe(port, PE_SRC_READY);
+ }
+}
+
+/**
+ * PE_DRS_Evaluate_Swap
+ */
+static void pe_drs_evaluate_swap_entry(int port)
+{
+ print_current_state(port);
+
+ /* Get evaluation of Data Role Swap request from DPM */
+ if (pd_check_data_swap(port, pe[port].data_role)) {
+ PE_SET_FLAG(port, PE_FLAGS_ACCEPT);
+ /*
+ * PE_DRS_UFP_DFP_Evaluate_Swap and
+ * PE_DRS_DFP_UFP_Evaluate_Swap states embedded here.
+ */
+ prl_send_ctrl_msg(port, TCPC_TX_SOP, PD_CTRL_ACCEPT);
+ } else {
+ /*
+ * PE_DRS_UFP_DFP_Reject_Swap and PE_DRS_DFP_UFP_Reject_Swap
+ * states embedded here.
+ */
+ prl_send_ctrl_msg(port, TCPC_TX_SOP, PD_CTRL_REJECT);
+ }
+}
+
+static void pe_drs_evaluate_swap_run(int port)
+{
+ if (PE_CHK_FLAG(port, PE_FLAGS_TX_COMPLETE)) {
+ PE_CLR_FLAG(port, PE_FLAGS_TX_COMPLETE);
+
+ /* Accept Message sent. Transtion to PE_DRS_Change */
+ if (PE_CHK_FLAG(port, PE_FLAGS_ACCEPT)) {
+ PE_CLR_FLAG(port, PE_FLAGS_ACCEPT);
+ set_state_pe(port, PE_DRS_CHANGE);
+ } else {
+ /*
+ * Message sent. Transition back to PE_SRC_Ready or
+ * PE_SNK_Ready.
+ */
+ if (pe[port].power_role == PD_ROLE_SOURCE)
+ set_state_pe(port, PE_SRC_READY);
+ else
+ set_state_pe(port, PE_SNK_READY);
+ }
+ }
+}
+
+/**
+ * PE_DRS_Change
+ */
+static void pe_drs_change_entry(int port)
+{
+ print_current_state(port);
+
+ /*
+ * PE_DRS_UFP_DFP_Change_to_DFP and PE_DRS_DFP_UFP_Change_to_UFP
+ * states embedded here.
+ */
+ /* Request DPM to change port data role */
+ pd_request_data_swap(port);
+}
+
+static void pe_drs_change_run(int port)
+{
+ /* Wait until the data role is changed */
+ if (pe[port].data_role == tc_get_data_role(port))
+ return;
+
+ /* Update the data role */
+ pe[port].data_role = tc_get_data_role(port);
+
+ /*
+ * Port changed. Transition back to PE_SRC_Ready or
+ * PE_SNK_Ready.
+ */
+ if (pe[port].power_role == PD_ROLE_SINK)
+ set_state_pe(port, PE_SNK_READY);
+ else
+ set_state_pe(port, PE_SRC_READY);
+}
+
+/**
+ * PE_DRS_Send_Swap
+ */
+static void pe_drs_send_swap_entry(int port)
+{
+ print_current_state(port);
+
+ /*
+ * PE_DRS_UFP_DFP_Send_Swap and PE_DRS_DFP_UFP_Send_Swap
+ * states embedded here.
+ */
+ /* Request the Protocol Layer to send a DR_Swap Message */
+ prl_send_ctrl_msg(port, TCPC_TX_SOP, PD_CTRL_DR_SWAP);
+
+ pe[port].sender_response_timer = 0;
+}
+
+static void pe_drs_send_swap_run(int port)
+{
+ int type;
+ int cnt;
+ int ext;
+
+ /* Wait until message is sent */
+ if (pe[port].sender_response_timer == 0 &&
+ PE_CHK_FLAG(port, PE_FLAGS_TX_COMPLETE)) {
+ PE_CLR_FLAG(port, PE_FLAGS_TX_COMPLETE);
+ /* start the SenderResponseTimer */
+ pe[port].sender_response_timer =
+ get_time().val + PD_T_SENDER_RESPONSE;
+ }
+
+ if (pe[port].sender_response_timer == 0)
+ return;
+
+ /*
+ * Transition to PE_SRC_Ready or PE_SNK_Ready state when:
+ * 1) Or the SenderResponseTimer times out.
+ */
+ if (get_time().val > pe[port].sender_response_timer) {
+ if (pe[port].power_role == PD_ROLE_SINK)
+ set_state_pe(port, PE_SNK_READY);
+ else
+ set_state_pe(port, PE_SRC_READY);
+ return;
+ }
+
+ /*
+ * Transition to PE_DRS_Change when:
+ * 1) An Accept Message is received.
+ *
+ * Transition to PE_SRC_Ready or PE_SNK_Ready state when:
+ * 1) A Reject Message is received.
+ * 2) Or a Wait Message is received.
+ */
+ if (PE_CHK_FLAG(port, PE_FLAGS_MSG_RECEIVED)) {
+ PE_CLR_FLAG(port, PE_FLAGS_MSG_RECEIVED);
+
+ type = PD_HEADER_TYPE(emsg[port].header);
+ cnt = PD_HEADER_CNT(emsg[port].header);
+ ext = PD_HEADER_EXT(emsg[port].header);
+
+ if ((ext == 0) && (cnt == 0)) {
+ if (type == PD_CTRL_ACCEPT) {
+ set_state_pe(port, PE_DRS_CHANGE);
+ } else if ((type == PD_CTRL_REJECT) ||
+ (type == PD_CTRL_WAIT)) {
+ if (pe[port].power_role == PD_ROLE_SINK)
+ set_state_pe(port, PE_SNK_READY);
+ else
+ set_state_pe(port, PE_SRC_READY);
+ }
+ }
+ }
+}
+
+/**
+ * PE_PRS_SRC_SNK_Evaluate_Swap
+ */
+static void pe_prs_src_snk_evaluate_swap_entry(int port)
+{
+ print_current_state(port);
+
+ if (!pd_check_power_swap(port)) {
+ /* PE_PRS_SRC_SNK_Reject_PR_Swap state embedded here */
+ prl_send_ctrl_msg(port, TCPC_TX_SOP, PD_CTRL_REJECT);
+ } else {
+ pd_request_power_swap(port);
+ /* PE_PRS_SRC_SNK_Accept_Swap state embedded here */
+ PE_SET_FLAG(port, PE_FLAGS_ACCEPT);
+ prl_send_ctrl_msg(port, TCPC_TX_SOP, PD_CTRL_ACCEPT);
+ }
+}
+
+static void pe_prs_src_snk_evaluate_swap_run(int port)
+{
+ if (PE_CHK_FLAG(port, PE_FLAGS_TX_COMPLETE)) {
+ PE_CLR_FLAG(port, PE_FLAGS_TX_COMPLETE);
+
+ if (PE_CHK_FLAG(port, PE_FLAGS_ACCEPT)) {
+ PE_CLR_FLAG(port, PE_FLAGS_ACCEPT);
+
+ /*
+ * Power Role Swap OK, transition to
+ * PE_PRS_SRC_SNK_Transition_to_off
+ */
+ set_state_pe(port, PE_PRS_SRC_SNK_TRANSITION_TO_OFF);
+ } else {
+ /* Message sent, return to PE_SRC_Ready */
+ set_state_pe(port, PE_SRC_READY);
+ }
+ }
+}
+
+/**
+ * PE_PRS_SRC_SNK_Transition_To_Off
+ */
+static void pe_prs_src_snk_transition_to_off_entry(int port)
+{
+ print_current_state(port);
+
+ /* Tell TypeC to swap from Attached.SRC to Attached.SNK */
+ tc_prs_src_snk_assert_rd(port);
+ pe[port].ps_source_timer =
+ get_time().val + PD_POWER_SUPPLY_TURN_OFF_DELAY;
+}
+
+static void pe_prs_src_snk_transition_to_off_run(int port)
+{
+ /* Give time for supply to power off */
+ if (get_time().val < pe[port].ps_source_timer)
+ return;
+
+ /* Wait until Rd is asserted */
+ if (tc_is_attached_snk(port)) {
+ /* Contract is invalid */
+ PE_CLR_FLAG(port, PE_FLAGS_EXPLICIT_CONTRACT);
+ set_state_pe(port, PE_PRS_SRC_SNK_WAIT_SOURCE_ON);
+ }
+}
+
+/**
+ * PE_PRS_SRC_SNK_Wait_Source_On
+ */
+static void pe_prs_src_snk_wait_source_on_entry(int port)
+{
+ print_current_state(port);
+ prl_send_ctrl_msg(port, TCPC_TX_SOP, PD_CTRL_PS_RDY);
+ pe[port].ps_source_timer = 0;
+}
+
+static void pe_prs_src_snk_wait_source_on_run(int port)
+{
+ int type;
+ int cnt;
+ int ext;
+
+ if (PE_CHK_FLAG(port, PE_FLAGS_TX_COMPLETE)) {
+ PE_CLR_FLAG(port, PE_FLAGS_TX_COMPLETE);
+
+ /* Update pe power role */
+ pe[port].power_role = tc_get_power_role(port);
+ pe[port].ps_source_timer = get_time().val + PD_T_PS_SOURCE_ON;
+ }
+
+ /*
+ * Transition to PE_SNK_Startup when:
+ * 1) An PS_RDY Message is received.
+ */
+ if (pe[port].ps_source_timer > 0 &&
+ PE_CHK_FLAG(port, PE_FLAGS_MSG_RECEIVED)) {
+ PE_CLR_FLAG(port, PE_FLAGS_MSG_RECEIVED);
+
+ type = PD_HEADER_TYPE(emsg[port].header);
+ cnt = PD_HEADER_CNT(emsg[port].header);
+ ext = PD_HEADER_EXT(emsg[port].header);
+
+ if ((ext == 0) && (cnt == 0) && (type == PD_CTRL_PS_RDY)) {
+ tc_pr_swap_complete(port);
+ pe[port].ps_source_timer = 0;
+ set_state_pe(port, PE_SNK_STARTUP);
+ return;
+ }
+ }
+
+ /*
+ * Transition to ErrorRecovery state when:
+ * 1) The PSSourceOnTimer times out.
+ * 2) PS_RDY not sent after retries.
+ */
+ if ((pe[port].ps_source_timer > 0 &&
+ get_time().val > pe[port].ps_source_timer) ||
+ PE_CHK_FLAG(port, PE_FLAGS_PROTOCOL_ERROR)) {
+ PE_CLR_FLAG(port, PE_FLAGS_PROTOCOL_ERROR);
+
+ set_state_pe(port, PE_WAIT_FOR_ERROR_RECOVERY);
+ return;
+ }
+}
+
+/**
+ * PE_PRS_SRC_SNK_Send_Swap
+ */
+static void pe_prs_src_snk_send_swap_entry(int port)
+{
+ print_current_state(port);
+
+ /* Request the Protocol Layer to send a PR_Swap Message. */
+ prl_send_ctrl_msg(port, TCPC_TX_SOP, PD_CTRL_PR_SWAP);
+
+ /* Start the SenderResponseTimer */
+ pe[port].sender_response_timer =
+ get_time().val + PD_T_SENDER_RESPONSE;
+}
+
+static void pe_prs_src_snk_send_swap_run(int port)
+{
+ int type;
+ int cnt;
+ int ext;
+
+ /*
+ * Transition to PE_SRC_Ready state when:
+ * 1) Or the SenderResponseTimer times out.
+ */
+ if (get_time().val > pe[port].sender_response_timer) {
+ set_state_pe(port, PE_SRC_READY);
+ return;
+ }
+
+ /*
+ * Transition to PE_PRS_SRC_SNK_Transition_To_Off when:
+ * 1) An Accept Message is received.
+ *
+ * Transition to PE_SRC_Ready state when:
+ * 1) A Reject Message is received.
+ * 2) Or a Wait Message is received.
+ */
+ if (PE_CHK_FLAG(port, PE_FLAGS_MSG_RECEIVED)) {
+ PE_CLR_FLAG(port, PE_FLAGS_MSG_RECEIVED);
+
+ type = PD_HEADER_TYPE(emsg[port].header);
+ cnt = PD_HEADER_CNT(emsg[port].header);
+ ext = PD_HEADER_EXT(emsg[port].header);
+
+ if ((ext == 0) && (cnt == 0)) {
+ if (type == PD_CTRL_ACCEPT)
+ set_state_pe(port,
+ PE_PRS_SRC_SNK_TRANSITION_TO_OFF);
+ else if ((type == PD_CTRL_REJECT) ||
+ (type == PD_CTRL_WAIT))
+ set_state_pe(port, PE_SRC_READY);
+ }
+ }
+}
+
+static void pe_prs_src_snk_send_swap_exit(int port)
+{
+ /* Clear TX Complete Flag if set */
+ PE_CLR_FLAG(port, PE_FLAGS_TX_COMPLETE);
+}
+
+/**
+ * PE_PRS_SNK_SRC_Evaluate_Swap
+ */
+static void pe_prs_snk_src_evaluate_swap_entry(int port)
+{
+ print_current_state(port);
+
+ if (!pd_check_power_swap(port)) {
+ /* PE_PRS_SNK_SRC_Reject_Swap state embedded here */
+ prl_send_ctrl_msg(port, TCPC_TX_SOP, PD_CTRL_REJECT);
+ } else {
+ pd_request_power_swap(port);
+ /* PE_PRS_SNK_SRC_Accept_Swap state embedded here */
+ PE_SET_FLAG(port, PE_FLAGS_ACCEPT);
+ prl_send_ctrl_msg(port, TCPC_TX_SOP, PD_CTRL_ACCEPT);
+ }
+}
+
+static void pe_prs_snk_src_evaluate_swap_run(int port)
+{
+ if (PE_CHK_FLAG(port, PE_FLAGS_TX_COMPLETE)) {
+ PE_CLR_FLAG(port, PE_FLAGS_TX_COMPLETE);
+ if (PE_CHK_FLAG(port, PE_FLAGS_ACCEPT)) {
+ PE_CLR_FLAG(port, PE_FLAGS_ACCEPT);
+
+ /*
+ * Accept message sent, transition to
+ * PE_PRS_SNK_SRC_Transition_to_off
+ */
+ set_state_pe(port, PE_PRS_SNK_SRC_TRANSITION_TO_OFF);
+ } else {
+ /* Message sent, return to PE_SNK_Ready */
+ set_state_pe(port, PE_SNK_READY);
+ }
+ }
+}
+
+/**
+ * PE_PRS_SNK_SRC_Transition_To_Off
+ */
+static void pe_prs_snk_src_transition_to_off_entry(int port)
+{
+ print_current_state(port);
+ tc_snk_power_off(port);
+ pe[port].ps_source_timer = get_time().val + PD_T_PS_SOURCE_OFF;
+}
+
+static void pe_prs_snk_src_transition_to_off_run(int port)
+{
+ int type;
+ int cnt;
+ int ext;
+
+ /*
+ * Transition to ErrorRecovery state when:
+ * 1) The PSSourceOffTimer times out.
+ */
+ if (get_time().val > pe[port].ps_source_timer) {
+ set_state_pe(port, PE_WAIT_FOR_ERROR_RECOVERY);
+ return;
+ }
+
+ /*
+ * Transition to PE_PRS_SNK_SRC_Assert_Rp when:
+ * 1) An PS_RDY Message is received.
+ */
+ if (PE_CHK_FLAG(port, PE_FLAGS_MSG_RECEIVED)) {
+ PE_CLR_FLAG(port, PE_FLAGS_MSG_RECEIVED);
+
+ type = PD_HEADER_TYPE(emsg[port].header);
+ cnt = PD_HEADER_CNT(emsg[port].header);
+ ext = PD_HEADER_EXT(emsg[port].header);
+
+ if ((ext == 0) && (cnt == 0) && (type == PD_CTRL_PS_RDY))
+ set_state_pe(port, PE_PRS_SNK_SRC_ASSERT_RP);
+ }
+}
+
+/**
+ * PE_PRS_SNK_SRC_Assert_Rp
+ */
+static void pe_prs_snk_src_assert_rp_entry(int port)
+{
+ print_current_state(port);
+
+ /*
+ * Tell TypeC to Power Role Swap (PRS) from
+ * Attached.SNK to Attached.SRC
+ */
+ tc_prs_snk_src_assert_rp(port);
+}
+
+static void pe_prs_snk_src_assert_rp_run(int port)
+{
+ /* Wait until TypeC is in the Attached.SRC state */
+ if (tc_is_attached_src(port)) {
+ /* Contract is invalid now */
+ PE_CLR_FLAG(port, PE_FLAGS_EXPLICIT_CONTRACT);
+ set_state_pe(port, PE_PRS_SNK_SRC_SOURCE_ON);
+ }
+}
+
+/**
+ * PE_PRS_SNK_SRC_Source_On
+ */
+static void pe_prs_snk_src_source_on_entry(int port)
+{
+ print_current_state(port);
+
+ /*
+ * VBUS was enabled when the TypeC state machine entered
+ * Attached.SRC state
+ */
+ pe[port].ps_source_timer = get_time().val +
+ PD_POWER_SUPPLY_TURN_ON_DELAY;
+}
+
+static void pe_prs_snk_src_source_on_run(int port)
+{
+ /* Wait until power supply turns on */
+ if (get_time().val < pe[port].ps_source_timer)
+ return;
+
+ if (pe[port].ps_source_timer != 0) {
+ /* update pe power role */
+ pe[port].power_role = tc_get_power_role(port);
+ prl_send_ctrl_msg(port, TCPC_TX_SOP, PD_CTRL_PS_RDY);
+ /* reset timer so PD_CTRL_PS_RDY isn't sent again */
+ pe[port].ps_source_timer = 0;
+ return;
+ }
+
+ /*
+ * Transition to ErrorRecovery state when:
+ * 1) On protocol error
+ */
+ if (PE_CHK_FLAG(port, PE_FLAGS_PROTOCOL_ERROR)) {
+ PE_CLR_FLAG(port, PE_FLAGS_PROTOCOL_ERROR);
+ set_state_pe(port, PE_WAIT_FOR_ERROR_RECOVERY);
+ return;
+ }
+
+ if (PE_CHK_FLAG(port, PE_FLAGS_TX_COMPLETE)) {
+ PE_CLR_FLAG(port, PE_FLAGS_TX_COMPLETE);
+
+ /* Run swap source timer on entry to pe_src_startup */
+ PE_SET_FLAG(port, PE_FLAGS_RUN_SOURCE_START_TIMER);
+ tc_pr_swap_complete(port);
+ set_state_pe(port, PE_SRC_STARTUP);
+ }
+}
+
+/**
+ * PE_PRS_SNK_SRC_Send_Swap
+ */
+static void pe_prs_snk_src_send_swap_entry(int port)
+{
+ print_current_state(port);
+
+ /* Request the Protocol Layer to send a PR_Swap Message. */
+ prl_send_ctrl_msg(port, TCPC_TX_SOP, PD_CTRL_PR_SWAP);
+
+ /* Start the SenderResponseTimer */
+ pe[port].sender_response_timer =
+ get_time().val + PD_T_SENDER_RESPONSE;
+}
+
+static void pe_prs_snk_src_send_swap_run(int port)
+{
+ int type;
+ int cnt;
+ int ext;
+
+ /*
+ * Transition to PE_SNK_Ready state when:
+ * 1) The SenderResponseTimer times out.
+ */
+ if (get_time().val > pe[port].sender_response_timer) {
+ set_state_pe(port, PE_SNK_READY);
+ return;
+ }
+
+ /*
+ * Transition to PE_PRS_SNK_SRC_Transition_to_off when:
+ * 1) An Accept Message is received.
+ *
+ * Transition to PE_SNK_Ready state when:
+ * 1) A Reject Message is received.
+ * 2) Or a Wait Message is received.
+ */
+ if (PE_CHK_FLAG(port, PE_FLAGS_MSG_RECEIVED)) {
+ PE_CLR_FLAG(port, PE_FLAGS_MSG_RECEIVED);
+
+ type = PD_HEADER_TYPE(emsg[port].header);
+ cnt = PD_HEADER_CNT(emsg[port].header);
+ ext = PD_HEADER_EXT(emsg[port].header);
+
+ if ((ext == 0) && (cnt == 0)) {
+ if (type == PD_CTRL_ACCEPT)
+ set_state_pe(port,
+ PE_PRS_SNK_SRC_TRANSITION_TO_OFF);
+ else if ((type == PD_CTRL_REJECT) ||
+ (type == PD_CTRL_WAIT))
+ set_state_pe(port, PE_SNK_READY);
+ }
+ }
+}
+
+static void pe_prs_snk_src_send_swap_exit(int port)
+{
+ /* Clear TX Complete Flag if set */
+ PE_CLR_FLAG(port, PE_FLAGS_TX_COMPLETE);
+}
+
+/**
+ * BIST
+ */
+static void pe_bist_entry(int port)
+{
+ uint32_t *payload = (uint32_t *)emsg[port].buf;
+ uint8_t mode = BIST_MODE(payload[0]);
+
+ print_current_state(port);
+
+ /*
+ * See section 6.4.3.6 BIST Carrier Mode 2:
+ * With a BIST Carrier Mode 2 BIST Data Object, the UUT Shall send out
+ * a continuous string of alternating "1"s and “0”s.
+ * The UUT Shall exit the Continuous BIST Mode within tBISTContMode of
+ * this Continuous BIST Mode being enabled.
+ */
+ if (mode == BIST_CARRIER_MODE_2) {
+ prl_send_ctrl_msg(port, TCPC_TX_BIST_MODE_2, 0);
+ pe[port].bist_cont_mode_timer =
+ get_time().val + PD_T_BIST_CONT_MODE;
+ }
+ /*
+ * See section 6.4.3.9 BIST Test Data:
+ * With a BIST Test Data BIST Data Object, the UUT Shall return a
+ * GoodCRC Message and Shall enter a test mode in which it sends no
+ * further Messages except for GoodCRC Messages in response to received
+ * Messages.
+ */
+ else if (mode == BIST_TEST_DATA)
+ pe[port].bist_cont_mode_timer = 0;
+}
+
+static void pe_bist_run(int port)
+{
+ if (pe[port].bist_cont_mode_timer > 0 &&
+ get_time().val > pe[port].bist_cont_mode_timer) {
+
+ if (pe[port].power_role == PD_ROLE_SOURCE)
+ set_state_pe(port, PE_SRC_TRANSITION_TO_DEFAULT);
+ else
+ set_state_pe(port, PE_SNK_TRANSITION_TO_DEFAULT);
+ } else {
+ /*
+ * We are in test data mode and no further Messages except for
+ * GoodCRC Messages in response to received Messages will
+ * be sent.
+ */
+ if (PE_CHK_FLAG(port, PE_FLAGS_MSG_RECEIVED))
+ PE_CLR_FLAG(port, PE_FLAGS_MSG_RECEIVED);
+ }
+}
+
+/**
+ * Give_Sink_Cap Message
+ */
+static void pe_snk_give_sink_cap_entry(int port)
+{
+ print_current_state(port);
+
+ /* Send a Sink_Capabilities Message */
+ emsg[port].len = pd_snk_pdo_cnt * 4;
+ memcpy(emsg[port].buf, (uint8_t *)pd_snk_pdo, emsg[port].len);
+ prl_send_data_msg(port, TCPC_TX_SOP, PD_DATA_SINK_CAP);
+}
+
+static void pe_snk_give_sink_cap_run(int port)
+{
+ if (PE_CHK_FLAG(port, PE_FLAGS_TX_COMPLETE)) {
+ PE_CLR_FLAG(port, PE_FLAGS_TX_COMPLETE);
+ if (pe[port].power_role == PD_ROLE_SOURCE)
+ set_state_pe(port, PE_SRC_READY);
+ else
+ set_state_pe(port, PE_SNK_READY);
+ }
+}
+
+/**
+ * Wait For Error Recovery
+ */
+static void pe_wait_for_error_recovery_entry(int port)
+{
+ print_current_state(port);
+ tc_start_error_recovery(port);
+}
+
+static void pe_wait_for_error_recovery_run(int port)
+{
+ /* Stay here until error recovery is complete */
+}
+
+/**
+ * PE_Handle_Custom_Vdm_Request
+ */
+static void pe_handle_custom_vdm_request_entry(int port)
+{
+ /* Get the message */
+ uint32_t *payload = (uint32_t *)emsg[port].buf;
+ int cnt = PD_HEADER_CNT(emsg[port].header);
+ int sop = PD_HEADER_GET_SOP(emsg[port].header);
+ int rlen = 0;
+ uint32_t *rdata;
+
+ print_current_state(port);
+
+ /* This is an Interruptible AMS */
+ PE_SET_FLAG(port, PE_FLAGS_INTERRUPTIBLE_AMS);
+
+ rlen = pd_custom_vdm(port, cnt, payload, &rdata);
+ if (rlen > 0) {
+ emsg[port].len = rlen * 4;
+ memcpy(emsg[port].buf, (uint8_t *)rdata, emsg[port].len);
+ prl_send_data_msg(port, sop, PD_DATA_VENDOR_DEF);
+ }
+}
+
+static void pe_handle_custom_vdm_request_run(int port)
+{
+ /* Wait for ACCEPT, WAIT or Reject message to send. */
+ if (PE_CHK_FLAG(port, PE_FLAGS_TX_COMPLETE)) {
+ PE_CLR_FLAG(port, PE_FLAGS_TX_COMPLETE);
+
+ /*
+ * Message sent. Transition back to
+ * PE_SRC_Ready or PE_SINK_Ready
+ */
+ if (pe[port].power_role == PD_ROLE_SOURCE)
+ set_state_pe(port, PE_SRC_READY);
+ else
+ set_state_pe(port, PE_SNK_READY);
+ }
+}
+
+/**
+ * PE_DO_PORT_Discovery
+ *
+ * NOTE: Port Discovery Policy
+ * To discover a port partner, Vendor Defined Messages (VDMs) are
+ * sent to the port partner. The sequence of commands are
+ * sent in the following order:
+ * 1) CMD_DISCOVER_IDENT
+ * 2) CMD_DISCOVER_SVID
+ * 3) CMD_DISCOVER_MODES
+ * 4) CMD_ENTER_MODE
+ * 5) CMD_DP_STATUS
+ * 6) CMD_DP_CONFIG
+ *
+ * If a the port partner replies with BUSY, the sequence is resent
+ * N_DISCOVER_IDENTITY_COUNT times before giving up.
+ */
+static void pe_do_port_discovery_entry(int port)
+{
+ print_current_state(port);
+
+ pe[port].partner_type = PORT;
+ pe[port].vdm_cnt = 0;
+}
+
+static void pe_do_port_discovery_run(int port)
+{
+ uint32_t *payload = (uint32_t *)emsg[port].buf;
+ struct svdm_amode_data *modep = get_modep(port, PD_VDO_VID(payload[0]));
+ int ret = 0;
+
+ if (!PE_CHK_FLAG(port,
+ PE_FLAGS_VDM_REQUEST_NAKED | PE_FLAGS_VDM_REQUEST_BUSY)) {
+ switch (pe[port].vdm_cmd) {
+ case DO_PORT_DISCOVERY_START:
+ pe[port].vdm_cmd = CMD_DISCOVER_IDENT;
+ pe[port].vdm_data[0] = 0;
+ ret = 1;
+ break;
+ case CMD_DISCOVER_IDENT:
+ pe[port].vdm_cmd = CMD_DISCOVER_SVID;
+ pe[port].vdm_data[0] = 0;
+ ret = 1;
+ break;
+ case CMD_DISCOVER_SVID:
+ pe[port].vdm_cmd = CMD_DISCOVER_MODES;
+ ret = dfp_discover_modes(port, pe[port].vdm_data);
+ break;
+ case CMD_DISCOVER_MODES:
+ pe[port].vdm_cmd = CMD_ENTER_MODE;
+ pe[port].vdm_data[0] = pd_dfp_enter_mode(port, 0, 0);
+ if (pe[port].vdm_data[0])
+ ret = 1;
+ break;
+ case CMD_ENTER_MODE:
+ pe[port].vdm_cmd = CMD_DP_STATUS;
+ if (modep->opos) {
+ ret = modep->fx->status(port,
+ pe[port].vdm_data);
+ pe[port].vdm_data[0] |=
+ PD_VDO_OPOS(modep->opos);
+ }
+ break;
+ case CMD_DP_STATUS:
+ pe[port].vdm_cmd = CMD_DP_CONFIG;
+
+ /*
+ * DP status response & UFP's DP attention have same
+ * payload
+ */
+ dfp_consume_attention(port, pe[port].vdm_data);
+ if (modep && modep->opos)
+ ret = modep->fx->config(port,
+ pe[port].vdm_data);
+ break;
+ case CMD_DP_CONFIG:
+ if (modep && modep->opos && modep->fx->post_config)
+ modep->fx->post_config(port);
+ PE_SET_FLAG(port, PE_FLAGS_DISCOVER_PORT_IDENTITY_DONE);
+ break;
+ case CMD_EXIT_MODE:
+ /* Do nothing */
+ break;
+ case CMD_ATTENTION:
+ /* Do nothing */
+ break;
+ }
+ }
+
+ if (ret == 0) {
+ if (PE_CHK_FLAG(port, PE_FLAGS_VDM_REQUEST_NAKED))
+ PE_SET_FLAG(port, PE_FLAGS_DISCOVER_PORT_IDENTITY_DONE);
+
+ if (pe[port].power_role == PD_ROLE_SOURCE)
+ set_state_pe(port, PE_SRC_READY);
+ else
+ set_state_pe(port, PE_SNK_READY);
+ } else {
+ PE_CLR_FLAG(port, PE_FLAGS_VDM_REQUEST_BUSY);
+
+ /*
+ * Copy Vendor Defined Message (VDM) Header into
+ * message buffer
+ */
+ if (pe[port].vdm_data[0] == 0)
+ pe[port].vdm_data[0] = VDO(
+ USB_SID_PD,
+ 1, /* structured */
+ VDO_SVDM_VERS(1) | pe[port].vdm_cmd);
+
+ pe[port].vdm_data[0] |= VDO_CMDT(CMDT_INIT);
+ pe[port].vdm_data[0] |= VDO_SVDM_VERS(pd_get_vdo_ver(port));
+
+ pe[port].vdm_cnt = ret;
+ set_state_pe(port, PE_VDM_REQUEST);
+ }
+}
+
+/**
+ * PE_VDM_REQUEST
+ */
+
+static void pe_vdm_request_entry(int port)
+{
+ print_current_state(port);
+
+ /* This is an Interruptible AMS */
+ PE_SET_FLAG(port, PE_FLAGS_INTERRUPTIBLE_AMS);
+
+ /* Copy Vendor Data Objects (VDOs) into message buffer */
+ if (pe[port].vdm_cnt > 0) {
+ /* Copy data after header */
+ memcpy(&emsg[port].buf,
+ (uint8_t *)pe[port].vdm_data,
+ pe[port].vdm_cnt * 4);
+ /* Update len with the number of VDO bytes */
+ emsg[port].len = pe[port].vdm_cnt * 4;
+ }
+
+ if (pe[port].partner_type) {
+ /* Save power and data roles */
+ pe[port].saved_power_role = tc_get_power_role(port);
+ pe[port].saved_data_role = tc_get_data_role(port);
+
+ prl_send_data_msg(port, TCPC_TX_SOP_PRIME, PD_DATA_VENDOR_DEF);
+ } else {
+ prl_send_data_msg(port, TCPC_TX_SOP, PD_DATA_VENDOR_DEF);
+ }
+
+ pe[port].vdm_response_timer = 0;
+}
+
+static void pe_vdm_request_run(int port)
+{
+ if (pe[port].vdm_response_timer == 0 &&
+ PE_CHK_FLAG(port, PE_FLAGS_TX_COMPLETE)) {
+ /* Message was sent */
+ PE_CLR_FLAG(port, PE_FLAGS_TX_COMPLETE);
+
+ if (pe[port].partner_type) {
+ /* Restore power and data roles */
+ tc_set_power_role(port, pe[port].saved_power_role);
+ tc_set_data_role(port, pe[port].saved_data_role);
+ }
+
+ /* Start no response timer */
+ pe[port].vdm_response_timer =
+ get_time().val + PD_T_VDM_SNDR_RSP;
+ } else if (PE_CHK_FLAG(port, PE_FLAGS_PROTOCOL_ERROR)) {
+ /* Message not sent and we received a protocol error */
+ PE_CLR_FLAG(port, PE_FLAGS_PROTOCOL_ERROR);
+
+ if (pe[port].partner_type) {
+ /* Restore power and data roles */
+ tc_set_power_role(port, pe[port].saved_power_role);
+ tc_set_data_role(port, pe[port].saved_data_role);
+ }
+
+ /* Fake busy response so we try to send command again */
+ PE_SET_FLAG(port, PE_FLAGS_VDM_REQUEST_BUSY);
+ if (get_last_state_pe(port) == PE_DO_PORT_DISCOVERY)
+ set_state_pe(port, PE_DO_PORT_DISCOVERY);
+ else if (get_last_state_pe(port) == PE_SRC_VDM_IDENTITY_REQUEST)
+ set_state_pe(port, PE_SRC_VDM_IDENTITY_REQUEST);
+ else if (pe[port].power_role == PD_ROLE_SOURCE)
+ set_state_pe(port, PE_SRC_READY);
+ else
+ set_state_pe(port, PE_SNK_READY);
+ }
+
+ if (PE_CHK_FLAG(port, PE_FLAGS_MSG_RECEIVED)) {
+ uint32_t *payload;
+ int sop;
+ uint8_t type;
+ uint8_t cnt;
+ uint8_t ext;
+
+ /* Message received */
+ PE_CLR_FLAG(port, PE_FLAGS_MSG_RECEIVED);
+
+ /* Get the message */
+ payload = (uint32_t *)emsg[port].buf;
+ sop = PD_HEADER_GET_SOP(emsg[port].header);
+ type = PD_HEADER_TYPE(emsg[port].header);
+ cnt = PD_HEADER_CNT(emsg[port].header);
+ ext = PD_HEADER_EXT(emsg[port].header);
+
+ if ((sop == TCPC_TX_SOP || sop == TCPC_TX_SOP_PRIME) &&
+ type == PD_DATA_VENDOR_DEF && cnt > 0 && ext == 0) {
+ if (PD_VDO_CMDT(payload[0]) == CMDT_RSP_ACK)
+ return set_state_pe(port, PE_VDM_ACKED);
+ else if (PD_VDO_CMDT(payload[0]) == CMDT_RSP_NAK ||
+ PD_VDO_CMDT(payload[0]) == CMDT_RSP_BUSY) {
+ if (PD_VDO_CMDT(payload[0]) == CMDT_RSP_NAK)
+ PE_SET_FLAG(port,
+ PE_FLAGS_VDM_REQUEST_NAKED);
+ else
+ PE_SET_FLAG(port,
+ PE_FLAGS_VDM_REQUEST_BUSY);
+
+ /* Return to previous state */
+ if (get_last_state_pe(port) ==
+ PE_DO_PORT_DISCOVERY)
+ set_state_pe(port,
+ PE_DO_PORT_DISCOVERY);
+ else if (get_last_state_pe(port) ==
+ PE_SRC_VDM_IDENTITY_REQUEST)
+ set_state_pe(port,
+ PE_SRC_VDM_IDENTITY_REQUEST);
+ else if (pe[port].power_role == PD_ROLE_SOURCE)
+ set_state_pe(port, PE_SRC_READY);
+ else
+ set_state_pe(port, PE_SNK_READY);
+ return;
+ }
+ }
+ }
+
+ if (pe[port].vdm_response_timer > 0 &&
+ get_time().val > pe[port].vdm_response_timer) {
+ CPRINTF("VDM %s Response Timeout\n",
+ pe[port].partner_type ? "Cable" : "Port");
+
+ PE_SET_FLAG(port, PE_FLAGS_VDM_REQUEST_NAKED);
+
+ /* Return to previous state */
+ if (get_last_state_pe(port) == PE_DO_PORT_DISCOVERY)
+ set_state_pe(port, PE_DO_PORT_DISCOVERY);
+ else if (get_last_state_pe(port) == PE_SRC_VDM_IDENTITY_REQUEST)
+ set_state_pe(port, PE_SRC_VDM_IDENTITY_REQUEST);
+ else if (pe[port].power_role == PD_ROLE_SOURCE)
+ set_state_pe(port, PE_SRC_READY);
+ else
+ set_state_pe(port, PE_SNK_READY);
+ }
+}
+
+static void pe_vdm_request_exit(int port)
+{
+ PE_CLR_FLAG(port, PE_FLAGS_INTERRUPTIBLE_AMS);
+}
+
+/**
+ * PE_VDM_Acked
+ */
+static void pe_vdm_acked_entry(int port)
+{
+ uint32_t *payload;
+ uint8_t vdo_cmd;
+ int sop;
+
+ print_current_state(port);
+
+ /* Get the message */
+ payload = (uint32_t *)emsg[port].buf;
+ vdo_cmd = PD_VDO_CMD(payload[0]);
+ sop = PD_HEADER_GET_SOP(emsg[port].header);
+
+ if (sop == TCPC_TX_SOP_PRIME) {
+ /*
+ * Handle Message From Cable Plug
+ */
+
+ uint32_t vdm_header = payload[0];
+ uint32_t id_header = payload[1];
+ uint8_t ptype_ufp;
+
+ if (PD_VDO_CMD(vdm_header) == CMD_DISCOVER_IDENT &&
+ PD_VDO_SVDM(vdm_header) &&
+ PD_HEADER_CNT(emsg[port].header) == 5) {
+ ptype_ufp = PD_IDH_PTYPE(id_header);
+
+ switch (ptype_ufp) {
+ case IDH_PTYPE_UNDEF:
+ break;
+ case IDH_PTYPE_HUB:
+ break;
+ case IDH_PTYPE_PERIPH:
+ break;
+ case IDH_PTYPE_PCABLE:
+ /* Passive Cable Detected */
+ pe[port].passive_cable_vdo =
+ payload[4];
+ break;
+ case IDH_PTYPE_ACABLE:
+ /* Active Cable Detected */
+ pe[port].active_cable_vdo1 =
+ payload[4];
+ pe[port].active_cable_vdo2 =
+ payload[5];
+ break;
+ case IDH_PTYPE_AMA:
+ /*
+ * Alternate Mode Adapter
+ * Detected
+ */
+ pe[port].ama_vdo = payload[4];
+ break;
+ case IDH_PTYPE_VPD:
+ /*
+ * VCONN Powered Device
+ * Detected
+ */
+ pe[port].vpd_vdo = payload[4];
+
+ /*
+ * If a CTVPD device was not discovered, inform
+ * the Device Policy Manager that the Discover
+ * Identity is done.
+ *
+ * If a CTVPD device is discovered, the Device
+ * Policy Manager will clear the DISC_IDENT flag
+ * set by tc_disc_ident_in_progress.
+ */
+ if (pe[port].vpd_vdo < 0 ||
+ !VPD_VDO_CTS(pe[port].vpd_vdo))
+ tc_disc_ident_complete(port);
+ break;
+ }
+ }
+ } else {
+ /*
+ * Handle Message From Port Partner
+ */
+
+#ifdef CONFIG_USB_PD_ALT_MODE_DFP
+ int cnt = PD_HEADER_CNT(emsg[port].header);
+ struct svdm_amode_data *modep;
+
+ modep = get_modep(port, PD_VDO_VID(payload[0]));
+#endif
+
+ switch (vdo_cmd) {
+#ifdef CONFIG_USB_PD_ALT_MODE_DFP
+ case CMD_DISCOVER_IDENT:
+ dfp_consume_identity(port, cnt, payload);
+#ifdef CONFIG_CHARGE_MANAGER
+ if (pd_charge_from_device(pd_get_identity_vid(port),
+ pd_get_identity_pid(port))) {
+ charge_manager_update_dualrole(port,
+ CAP_DEDICATED);
+ }
+#endif
+ break;
+ case CMD_DISCOVER_SVID:
+ dfp_consume_svids(port, cnt, payload);
+ break;
+ case CMD_DISCOVER_MODES:
+ dfp_consume_modes(port, cnt, payload);
+ break;
+ case CMD_ENTER_MODE:
+ break;
+ case CMD_DP_STATUS:
+ /*
+ * DP status response & UFP's DP attention have same
+ * payload
+ */
+ dfp_consume_attention(port, payload);
+ break;
+ case CMD_DP_CONFIG:
+ if (modep && modep->opos && modep->fx->post_config)
+ modep->fx->post_config(port);
+ break;
+ case CMD_EXIT_MODE:
+ /* Do nothing */
+ break;
+#endif
+ case CMD_ATTENTION:
+ /* Do nothing */
+ break;
+ default:
+ CPRINTF("ERR:CMD:%d\n", vdo_cmd);
+ }
+ }
+
+ if (!PE_CHK_FLAG(port, PE_FLAGS_DISCOVER_VDM_IDENTITY_DONE)) {
+ PE_SET_FLAG(port, PE_FLAGS_DISCOVER_VDM_IDENTITY_DONE);
+ set_state_pe(port, PE_SRC_VDM_IDENTITY_REQUEST);
+ } else if (!PE_CHK_FLAG(port, PE_FLAGS_DISCOVER_PORT_IDENTITY_DONE)) {
+ set_state_pe(port, PE_DO_PORT_DISCOVERY);
+ } else if (pe[port].power_role == PD_ROLE_SOURCE) {
+ set_state_pe(port, PE_SRC_READY);
+ } else {
+ set_state_pe(port, PE_SNK_READY);
+ }
+}
+
+/**
+ * PE_VDM_Response
+ */
+static void pe_vdm_response_entry(int port)
+{
+ int ret = 0;
+ uint32_t *payload;
+ uint8_t vdo_cmd;
+ int cmd_type;
+ svdm_rsp_func func = NULL;
+
+ print_current_state(port);
+
+ /* Get the message */
+ payload = (uint32_t *)emsg[port].buf;
+ vdo_cmd = PD_VDO_CMD(payload[0]);
+ cmd_type = PD_VDO_CMDT(payload[0]);
+ payload[0] &= ~VDO_CMDT_MASK;
+
+ if (cmd_type != CMDT_INIT) {
+ CPRINTF("ERR:CMDT:%d\n", vdo_cmd);
+
+ if (pe[port].power_role == PD_ROLE_SOURCE)
+ set_state_pe(port, PE_SRC_READY);
+ else
+ set_state_pe(port, PE_SNK_READY);
+ return;
+ }
+
+ switch (vdo_cmd) {
+ case CMD_DISCOVER_IDENT:
+ func = svdm_rsp.identity;
+ break;
+ case CMD_DISCOVER_SVID:
+ func = svdm_rsp.svids;
+ break;
+ case CMD_DISCOVER_MODES:
+ func = svdm_rsp.modes;
+ break;
+ case CMD_ENTER_MODE:
+ func = svdm_rsp.enter_mode;
+ break;
+ case CMD_DP_STATUS:
+ func = svdm_rsp.amode->status;
+ break;
+ case CMD_DP_CONFIG:
+ func = svdm_rsp.amode->config;
+ break;
+ case CMD_EXIT_MODE:
+ func = svdm_rsp.exit_mode;
+ break;
+#ifdef CONFIG_USB_PD_ALT_MODE_DFP
+ case CMD_ATTENTION:
+ /*
+ * attention is only SVDM with no response
+ * (just goodCRC) return zero here.
+ */
+ dfp_consume_attention(port, payload);
+ if (pe[port].power_role == PD_ROLE_SOURCE)
+ set_state_pe(port, PE_SRC_READY);
+ else
+ set_state_pe(port, PE_SNK_READY);
+ return;
+#endif
+ default:
+ CPRINTF("VDO ERR:CMD:%d\n", vdo_cmd);
+ }
+
+ if (func) {
+ ret = func(port, payload);
+ if (ret)
+ /* ACK */
+ payload[0] = VDO(
+ USB_VID_GOOGLE,
+ 1, /* Structured VDM */
+ VDO_SVDM_VERS(pd_get_vdo_ver(port)) |
+ VDO_CMDT(CMDT_RSP_ACK) |
+ vdo_cmd);
+ else if (!ret)
+ /* NAK */
+ payload[0] = VDO(
+ USB_VID_GOOGLE,
+ 1, /* Structured VDM */
+ VDO_SVDM_VERS(pd_get_vdo_ver(port)) |
+ VDO_CMDT(CMDT_RSP_NAK) |
+ vdo_cmd);
+ else
+ /* BUSY */
+ payload[0] = VDO(
+ USB_VID_GOOGLE,
+ 1, /* Structured VDM */
+ VDO_SVDM_VERS(pd_get_vdo_ver(port)) |
+ VDO_CMDT(CMDT_RSP_BUSY) |
+ vdo_cmd);
+
+ if (ret <= 0)
+ ret = 4;
+ } else {
+ /* not supported : NACK it */
+ payload[0] = VDO(
+ USB_VID_GOOGLE,
+ 1, /* Structured VDM */
+ VDO_SVDM_VERS(pd_get_vdo_ver(port)) |
+ VDO_CMDT(CMDT_RSP_NAK) |
+ vdo_cmd);
+ ret = 4;
+ }
+
+ /* Send ACK, NAK, or BUSY */
+ emsg[port].len = ret;
+ prl_send_data_msg(port, TCPC_TX_SOP, PD_DATA_VENDOR_DEF);
+}
+
+static void pe_vdm_response_run(int port)
+{
+ if (PE_CHK_FLAG(port, PE_FLAGS_TX_COMPLETE) ||
+ PE_CHK_FLAG(port, PE_FLAGS_PROTOCOL_ERROR)) {
+ PE_CLR_FLAG(port, PE_FLAGS_TX_COMPLETE |
+ PE_FLAGS_PROTOCOL_ERROR);
+
+ if (pe[port].power_role == PD_ROLE_SOURCE)
+ set_state_pe(port, PE_SRC_READY);
+ else
+ set_state_pe(port, PE_SNK_READY);
+ }
+}
+
+/*
+ * PE_VCS_Evaluate_Swap
+ */
+static void pe_vcs_evaluate_swap_entry(int port)
+{
+ print_current_state(port);
+
+ /*
+ * Request the DPM for an evaluation of the VCONN Swap request.
+ * Note: Ports that are presently the VCONN Source must always
+ * accept a VCONN
+ */
+
+ /*
+ * Transition to the PE_VCS_Accept_Swap state when:
+ * 1) The Device Policy Manager indicates that a VCONN Swap is ok.
+ *
+ * Transition to the PE_VCS_Reject_Swap state when:
+ * 1) Port is not presently the VCONN Source and
+ * 2) The DPM indicates that a VCONN Swap is not ok or
+ * 3) The DPM indicates that a VCONN Swap cannot be done at this time.
+ */
+
+ /* DPM rejects a VCONN Swap and port is not a VCONN source*/
+ if (!tc_check_vconn_swap(port) && tc_is_vconn_src(port) < 1) {
+ /* NOTE: PE_VCS_Reject_Swap State embedded here */
+ prl_send_ctrl_msg(port, TCPC_TX_SOP, PD_CTRL_REJECT);
+ }
+ /* Port is not ready to perform a VCONN swap */
+ else if (tc_is_vconn_src(port) < 0) {
+ /* NOTE: PE_VCS_Reject_Swap State embedded here */
+ prl_send_ctrl_msg(port, TCPC_TX_SOP, PD_CTRL_WAIT);
+ }
+ /* Port is ready to perform a VCONN swap */
+ else {
+ /* NOTE: PE_VCS_Accept_Swap State embedded here */
+ PE_SET_FLAG(port, PE_FLAGS_ACCEPT);
+ prl_send_ctrl_msg(port, TCPC_TX_SOP, PD_CTRL_ACCEPT);
+ }
+}
+
+static void pe_vcs_evaluate_swap_run(int port)
+{
+ /* Wait for ACCEPT, WAIT or Reject message to send. */
+ if (PE_CHK_FLAG(port, PE_FLAGS_TX_COMPLETE)) {
+ PE_CLR_FLAG(port, PE_FLAGS_TX_COMPLETE);
+
+ if (PE_CHK_FLAG(port, PE_FLAGS_ACCEPT)) {
+ PE_CLR_FLAG(port, PE_FLAGS_ACCEPT);
+ /* Accept Message sent and Presently VCONN Source */
+ if (tc_is_vconn_src(port))
+ set_state_pe(port, PE_VCS_WAIT_FOR_VCONN_SWAP);
+ /* Accept Message sent and Not presently VCONN Source */
+ else
+ set_state_pe(port, PE_VCS_TURN_ON_VCONN_SWAP);
+ } else {
+ /*
+ * Message sent. Transition back to PE_SRC_Ready or
+ * PE_SINK_Ready
+ */
+ if (pe[port].power_role == PD_ROLE_SOURCE)
+ set_state_pe(port, PE_SRC_READY);
+ else
+ set_state_pe(port, PE_SNK_READY);
+
+ }
+ }
+}
+
+/*
+ * PE_VCS_Send_Swap
+ */
+static void pe_vcs_send_swap_entry(int port)
+{
+ print_current_state(port);
+
+ /* Send a VCONN_Swap Message */
+ prl_send_ctrl_msg(port, TCPC_TX_SOP, PD_CTRL_VCONN_SWAP);
+
+ pe[port].sender_response_timer = 0;
+}
+
+static void pe_vcs_send_swap_run(int port)
+{
+ uint8_t type;
+ uint8_t cnt;
+
+ /* Wait until message is sent */
+ if (pe[port].sender_response_timer == 0 &&
+ PE_CHK_FLAG(port, PE_FLAGS_TX_COMPLETE)) {
+ PE_CLR_FLAG(port, PE_FLAGS_TX_COMPLETE);
+ /* Start the SenderResponseTimer */
+ pe[port].sender_response_timer = get_time().val +
+ PD_T_SENDER_RESPONSE;
+ }
+
+ if (pe[port].sender_response_timer == 0)
+ return;
+
+ if (PE_CHK_FLAG(port, PE_FLAGS_MSG_RECEIVED)) {
+ PE_CLR_FLAG(port, PE_FLAGS_MSG_RECEIVED);
+
+ type = PD_HEADER_TYPE(emsg[port].header);
+ cnt = PD_HEADER_CNT(emsg[port].header);
+
+ /* Only look at control messages */
+ if (cnt == 0) {
+ /*
+ * Transition to the PE_VCS_Wait_For_VCONN state when:
+ * 1) Accept Message Received and
+ * 2) The Port is presently the VCONN Source.
+ *
+ * Transition to the PE_VCS_Turn_On_VCONN state when:
+ * 1) Accept Message Received and
+ * 2) The Port is not presently the VCONN Source.
+ */
+ if (type == PD_CTRL_ACCEPT) {
+ if (tc_is_vconn_src(port))
+ set_state_pe(port,
+ PE_VCS_WAIT_FOR_VCONN_SWAP);
+ else
+ set_state_pe(port,
+ PE_VCS_TURN_ON_VCONN_SWAP);
+ return;
+ }
+
+ /*
+ * Transition back to either the PE_SRC_Ready or
+ * PE_SNK_Ready state when:
+ * 1) SenderResponseTimer Timeout or
+ * 2) Reject message is received or
+ * 3) Wait message Received.
+ */
+ if (get_time().val > pe[port].sender_response_timer ||
+ type == PD_CTRL_REJECT ||
+ type == PD_CTRL_WAIT) {
+ if (pe[port].power_role == PD_ROLE_SOURCE)
+ set_state_pe(port, PE_SRC_READY);
+ else
+ set_state_pe(port, PE_SNK_READY);
+ }
+ }
+ }
+}
+
+/*
+ * PE_VCS_Wait_for_VCONN_Swap
+ */
+static void pe_vcs_wait_for_vconn_swap_entry(int port)
+{
+ print_current_state(port);
+
+ /* Start the VCONNOnTimer */
+ pe[port].vconn_on_timer = get_time().val + PD_T_VCONN_SOURCE_ON;
+}
+
+static void pe_vcs_wait_for_vconn_swap_run(int port)
+{
+ /*
+ * Transition to the PE_VCS_Turn_Off_VCONN state when:
+ * 1) A PS_RDY Message is received.
+ */
+ if (PE_CHK_FLAG(port, PE_FLAGS_MSG_RECEIVED)) {
+ PE_CLR_FLAG(port, PE_FLAGS_MSG_RECEIVED);
+ /*
+ * PS_RDY message received
+ */
+ if ((PD_HEADER_CNT(emsg[port].header) == 0) &&
+ (PD_HEADER_TYPE(emsg[port].header) ==
+ PD_CTRL_PS_RDY)) {
+ set_state_pe(port, PE_VCS_TURN_OFF_VCONN_SWAP);
+ return;
+ }
+ }
+
+ /*
+ * Transition to either the PE_SRC_Hard_Reset or
+ * PE_SNK_Hard_Reset state when:
+ * 1) The VCONNOnTimer times out.
+ */
+ if (get_time().val > pe[port].vconn_on_timer) {
+ if (pe[port].power_role == PD_ROLE_SOURCE)
+ set_state_pe(port, PE_SRC_HARD_RESET);
+ else
+ set_state_pe(port, PE_SNK_HARD_RESET);
+ }
+}
+
+/*
+ * PE_VCS_Turn_On_VCONN_Swap
+ */
+static void pe_vcs_turn_on_vconn_swap_entry(int port)
+{
+ print_current_state(port);
+
+ /* Request DPM to turn on VCONN */
+ pd_request_vconn_swap_on(port);
+ pe[port].timeout = 0;
+}
+
+static void pe_vcs_turn_on_vconn_swap_run(int port)
+{
+
+ /*
+ * Transition to the PE_VCS_Send_Ps_Rdy state when:
+ * 1) The Port’s VCONN is on.
+ */
+ if (pe[port].timeout == 0 &&
+ PE_CHK_FLAG(port, PE_FLAGS_VCONN_SWAP_COMPLETE)) {
+ PE_CLR_FLAG(port, PE_FLAGS_VCONN_SWAP_COMPLETE);
+ pe[port].timeout = get_time().val + PD_VCONN_SWAP_DELAY;
+ }
+
+ if (pe[port].timeout > 0 && get_time().val > pe[port].timeout)
+ set_state_pe(port, PE_VCS_SEND_PS_RDY_SWAP);
+}
+
+/*
+ * PE_VCS_Turn_Off_VCONN_Swap
+ */
+static void pe_vcs_turn_off_vconn_swap_entry(int port)
+{
+ print_current_state(port);
+
+ /* Request DPM to turn off VCONN */
+ pd_request_vconn_swap_off(port);
+ pe[port].timeout = 0;
+}
+
+static void pe_vcs_turn_off_vconn_swap_run(int port)
+{
+ /* Wait for VCONN to turn off */
+ if (pe[port].timeout == 0 &&
+ PE_CHK_FLAG(port, PE_FLAGS_VCONN_SWAP_COMPLETE)) {
+ PE_CLR_FLAG(port, PE_FLAGS_VCONN_SWAP_COMPLETE);
+ pe[port].timeout = get_time().val + PD_VCONN_SWAP_DELAY;
+ }
+
+ if (pe[port].timeout > 0 && get_time().val > pe[port].timeout) {
+ /*
+ * A VCONN Swap Shall reset the DiscoverIdentityCounter
+ * to zero
+ */
+ pe[port].cable_discover_identity_count = 0;
+ pe[port].port_discover_identity_count = 0;
+
+ if (pe[port].power_role == PD_ROLE_SOURCE)
+ set_state_pe(port, PE_SRC_READY);
+ else
+ set_state_pe(port, PE_SNK_READY);
+ }
+}
+
+/*
+ * PE_VCS_Send_PS_Rdy_Swap
+ */
+static void pe_vcs_send_ps_rdy_swap_entry(int port)
+{
+ print_current_state(port);
+
+ /* Send a PS_RDY Message */
+ prl_send_ctrl_msg(port, TCPC_TX_SOP, PD_CTRL_PS_RDY);
+
+ pe[port].sub = PE_SUB0;
+}
+
+static void pe_vcs_send_ps_rdy_swap_run(int port)
+{
+ if (PE_CHK_FLAG(port, PE_FLAGS_TX_COMPLETE)) {
+ PE_CLR_FLAG(port, PE_FLAGS_TX_COMPLETE);
+
+ switch (pe[port].sub) {
+ case PE_SUB0:
+ /*
+ * After a VCONN Swap the VCONN Source needs to reset
+ * the Cable Plug’s Protocol Layer in order to ensure
+ * MessageID synchronization.
+ */
+ prl_send_ctrl_msg(port, TCPC_TX_SOP_PRIME,
+ PD_CTRL_SOFT_RESET);
+ pe[port].sub = PE_SUB1;
+ pe[port].timeout = get_time().val + 100*MSEC;
+ break;
+ case PE_SUB1:
+ /* Got ACCEPT or REJECT from Cable Plug */
+ if (PE_CHK_FLAG(port, PE_FLAGS_MSG_RECEIVED) ||
+ get_time().val > pe[port].timeout) {
+ PE_CLR_FLAG(port, PE_FLAGS_MSG_RECEIVED);
+ /*
+ * A VCONN Swap Shall reset the
+ * DiscoverIdentityCounter to zero
+ */
+ pe[port].cable_discover_identity_count = 0;
+ pe[port].port_discover_identity_count = 0;
+
+ if (pe[port].power_role == PD_ROLE_SOURCE)
+ set_state_pe(port, PE_SRC_READY);
+ else
+ set_state_pe(port, PE_SNK_READY);
+ }
+ break;
+ case PE_SUB2:
+ /* Do nothing */
+ break;
+ }
+ }
+}
+
+/* Policy Engine utility functions */
+int pd_check_requested_voltage(uint32_t rdo, const int port)
+{
+ int max_ma = rdo & 0x3FF;
+ int op_ma = (rdo >> 10) & 0x3FF;
+ int idx = RDO_POS(rdo);
+ uint32_t pdo;
+ uint32_t pdo_ma;
+#if defined(CONFIG_USB_PD_DYNAMIC_SRC_CAP) || \
+ defined(CONFIG_USB_PD_MAX_SINGLE_SOURCE_CURRENT)
+ const uint32_t *src_pdo;
+ const int pdo_cnt = charge_manager_get_source_pdo(&src_pdo, port);
+#else
+ const uint32_t *src_pdo = pd_src_pdo;
+ const int pdo_cnt = pd_src_pdo_cnt;
+#endif
+
+ /* Board specific check for this request */
+ if (pd_board_check_request(rdo, pdo_cnt))
+ return EC_ERROR_INVAL;
+
+ /* check current ... */
+ pdo = src_pdo[idx - 1];
+ pdo_ma = (pdo & 0x3ff);
+
+ if (op_ma > pdo_ma)
+ return EC_ERROR_INVAL; /* too much op current */
+
+ if (max_ma > pdo_ma && !(rdo & RDO_CAP_MISMATCH))
+ return EC_ERROR_INVAL; /* too much max current */
+
+ CPRINTF("Requested %d V %d mA (for %d/%d mA)\n",
+ ((pdo >> 10) & 0x3ff) * 50, (pdo & 0x3ff) * 10,
+ op_ma * 10, max_ma * 10);
+
+ /* Accept the requested voltage */
+ return EC_SUCCESS;
+}
+
+void pd_process_source_cap(int port, int cnt, uint32_t *src_caps)
+{
+#ifdef CONFIG_CHARGE_MANAGER
+ uint32_t ma, mv, pdo;
+#endif
+ int i;
+
+ pe[port].src_cap_cnt = cnt;
+ for (i = 0; i < cnt; i++)
+ pe[port].src_caps[i] = *src_caps++;
+
+#ifdef CONFIG_CHARGE_MANAGER
+ /* Get max power info that we could request */
+ pd_find_pdo_index(pe[port].src_cap_cnt, pe[port].src_caps,
+ PD_MAX_VOLTAGE_MV, &pdo);
+ pd_extract_pdo_power(pdo, &ma, &mv);
+ /* Set max. limit, but apply 500mA ceiling */
+ charge_manager_set_ceil(port, CEIL_REQUESTOR_PD, PD_MIN_MA);
+ pd_set_input_current_limit(port, ma, mv);
+#endif
+}
+
+void pd_set_max_voltage(unsigned int mv)
+{
+ max_request_mv = mv;
+}
+
+unsigned int pd_get_max_voltage(void)
+{
+ return max_request_mv;
+}
+
+int pd_charge_from_device(uint16_t vid, uint16_t pid)
+{
+ /* TODO: rewrite into table if we get more of these */
+ /*
+ * White-list Apple charge-through accessory since it doesn't set
+ * externally powered bit, but we still need to charge from it when
+ * we are a sink.
+ */
+ return (vid == USB_VID_APPLE &&
+ (pid == USB_PID1_APPLE || pid == USB_PID2_APPLE));
+}
+
+#ifdef CONFIG_USB_PD_DISCHARGE
+void pd_set_vbus_discharge(int port, int enable)
+{
+ static struct mutex discharge_lock[CONFIG_USB_PD_PORT_COUNT];
+
+ mutex_lock(&discharge_lock[port]);
+ enable &= !board_vbus_source_enabled(port);
+
+#ifdef CONFIG_USB_PD_DISCHARGE_GPIO
+#if CONFIG_USB_PD_PORT_COUNT == 0
+ gpio_set_level(GPIO_USB_C0_DISCHARGE, enable);
+#elif CONFIG_USB_PD_PORT_COUNT == 1
+ gpio_set_level(GPIO_USB_C1_DISCHARGE, enable);
+#elif CONFIG_USB_PD_PORT_COUNT == 2
+ gpio_set_level(GPIO_USB_C2_DISCHARGE, enable);
+#elif CONFIG_USB_PD_PORT_COUNT == 3
+ gpio_set_level(GPIO_USB_C3_DISCHARGE, enable);
+#endif
+#else
+ if (IS_ENABLED(CONFIG_USB_PD_DISCHARGE_TCPC))
+ tcpc_discharge_vbus(port, enable);
+ else if (IS_ENABLED(CONFIG_USB_PD_DISCHARGE_PPC))
+ ppc_discharge_vbus(port, enable);
+#endif
+ mutex_unlock(&discharge_lock[port]);
+}
+#endif /* CONFIG_USB_PD_DISCHARGE */
+
+/* VDM utility functions */
+#ifdef CONFIG_USB_PD_ALT_MODE_DFP
+static void pd_usb_billboard_deferred(void)
+{
+#if defined(CONFIG_USB_PD_ALT_MODE) && !defined(CONFIG_USB_PD_ALT_MODE_DFP) \
+ && !defined(CONFIG_USB_PD_SIMPLE_DFP) && defined(CONFIG_USB_BOS)
+
+ /*
+ * TODO(tbroch)
+ * 1. Will we have multiple type-C port UFPs
+ * 2. Will there be other modes applicable to DFPs besides DP
+ */
+ if (!pd_alt_mode(0, USB_SID_DISPLAYPORT))
+ usb_connect();
+
+#endif
+}
+DECLARE_DEFERRED(pd_usb_billboard_deferred);
+
+void pd_dfp_pe_init(int port)
+{
+ memset(&pe[port].am_policy, 0, sizeof(struct pd_policy));
+}
+
+#ifdef CONFIG_USB_PD_ALT_MODE_DFP
+static void dfp_consume_identity(int port, int cnt, uint32_t *payload)
+{
+ int ptype = PD_IDH_PTYPE(payload[VDO_I(IDH)]);
+ size_t identity_size = MIN(sizeof(pe[port].am_policy.identity),
+ (cnt - 1) * sizeof(uint32_t));
+
+ pd_dfp_pe_init(port);
+ memcpy(&pe[port].am_policy.identity, payload + 1, identity_size);
+
+ switch (ptype) {
+ case IDH_PTYPE_AMA:
+/* Leave vbus ON if the following macro is false */
+#if defined(CONFIG_USB_PD_DUAL_ROLE) && defined(CONFIG_USBC_VCONN_SWAP)
+ /* Adapter is requesting vconn, try to supply it */
+ if (PD_VDO_AMA_VCONN_REQ(payload[VDO_I(AMA)]))
+ tc_vconn_on(port);
+
+ /* Only disable vbus if vconn was requested */
+ if (PD_VDO_AMA_VCONN_REQ(payload[VDO_I(AMA)]) &&
+ !PD_VDO_AMA_VBUS_REQ(payload[VDO_I(AMA)]))
+ pd_power_supply_reset(port);
+#endif
+ break;
+ default:
+ break;
+ }
+}
+
+static void dfp_consume_svids(int port, int cnt, uint32_t *payload)
+{
+ int i;
+ uint32_t *ptr = payload + 1;
+ int vdo = 1;
+ uint16_t svid0, svid1;
+
+ for (i = pe[port].am_policy.svid_cnt;
+ i < pe[port].am_policy.svid_cnt + 12; i += 2) {
+ if (i == SVID_DISCOVERY_MAX) {
+ CPRINTF("ERR:SVIDCNT\n");
+ break;
+ }
+ /*
+ * Verify we're still within the valid packet (count will be one
+ * for the VDM header + xVDOs)
+ */
+ if (vdo >= cnt)
+ break;
+
+ svid0 = PD_VDO_SVID_SVID0(*ptr);
+ if (!svid0)
+ break;
+ pe[port].am_policy.svids[i].svid = svid0;
+ pe[port].am_policy.svid_cnt++;
+
+ svid1 = PD_VDO_SVID_SVID1(*ptr);
+ if (!svid1)
+ break;
+ pe[port].am_policy.svids[i + 1].svid = svid1;
+ pe[port].am_policy.svid_cnt++;
+ ptr++;
+ vdo++;
+ }
+
+ /* TODO(tbroch) need to re-issue discover svids if > 12 */
+ if (i && ((i % 12) == 0))
+ CPRINTF("ERR:SVID+12\n");
+}
+
+static int dfp_discover_modes(int port, uint32_t *payload)
+{
+ uint16_t svid =
+ pe[port].am_policy.svids[pe[port].am_policy.svid_idx].svid;
+
+ if (pe[port].am_policy.svid_idx >= pe[port].am_policy.svid_cnt)
+ return 0;
+
+ payload[0] = VDO(svid, 1, CMD_DISCOVER_MODES);
+
+ return 1;
+}
+
+static void dfp_consume_modes(int port, int cnt, uint32_t *payload)
+{
+ int idx = pe[port].am_policy.svid_idx;
+
+ pe[port].am_policy.svids[idx].mode_cnt = cnt - 1;
+
+ if (pe[port].am_policy.svids[idx].mode_cnt < 0) {
+ CPRINTF("ERR:NOMODE\n");
+ } else {
+ memcpy(
+ pe[port].am_policy.svids[pe[port].am_policy.svid_idx].mode_vdo,
+ &payload[1],
+ sizeof(uint32_t) * pe[port].am_policy.svids[idx].mode_cnt);
+ }
+
+ pe[port].am_policy.svid_idx++;
+}
+
+static int get_mode_idx(int port, uint16_t svid)
+{
+ int i;
+
+ for (i = 0; i < PD_AMODE_COUNT; i++) {
+ if (pe[port].am_policy.amodes[i].fx->svid == svid)
+ return i;
+ }
+
+ return -1;
+}
+
+static struct svdm_amode_data *get_modep(int port, uint16_t svid)
+{
+ int idx = get_mode_idx(port, svid);
+
+ return (idx == -1) ? NULL : &pe[port].am_policy.amodes[idx];
+}
+
+int pd_alt_mode(int port, uint16_t svid)
+{
+ struct svdm_amode_data *modep = get_modep(port, svid);
+
+ return (modep) ? modep->opos : -1;
+}
+
+int allocate_mode(int port, uint16_t svid)
+{
+ int i, j;
+ struct svdm_amode_data *modep;
+ int mode_idx = get_mode_idx(port, svid);
+
+ if (mode_idx != -1)
+ return mode_idx;
+
+ /* There's no space to enter another mode */
+ if (pe[port].am_policy.amode_idx == PD_AMODE_COUNT) {
+ CPRINTF("ERR:NO AMODE SPACE\n");
+ return -1;
+ }
+
+ /* Allocate ... if SVID == 0 enter default supported policy */
+ for (i = 0; i < supported_modes_cnt; i++) {
+ if (!&supported_modes[i])
+ continue;
+
+ for (j = 0; j < pe[port].am_policy.svid_cnt; j++) {
+ struct svdm_svid_data *svidp =
+ &pe[port].am_policy.svids[j];
+
+ if ((svidp->svid != supported_modes[i].svid) ||
+ (svid && (svidp->svid != svid)))
+ continue;
+
+ modep =
+ &pe[port].am_policy.amodes[pe[port].am_policy.amode_idx];
+ modep->fx = &supported_modes[i];
+ modep->data = &pe[port].am_policy.svids[j];
+ pe[port].am_policy.amode_idx++;
+ return pe[port].am_policy.amode_idx - 1;
+ }
+ }
+ return -1;
+}
+
+uint32_t pd_dfp_enter_mode(int port, uint16_t svid, int opos)
+{
+ int mode_idx = allocate_mode(port, svid);
+ struct svdm_amode_data *modep;
+ uint32_t mode_caps;
+
+ if (mode_idx == -1)
+ return 0;
+
+ modep = &pe[port].am_policy.amodes[mode_idx];
+
+ if (!opos) {
+ /* choose the lowest as default */
+ modep->opos = 1;
+ } else if (opos <= modep->data->mode_cnt) {
+ modep->opos = opos;
+ } else {
+ CPRINTF("opos error\n");
+ return 0;
+ }
+
+ mode_caps = modep->data->mode_vdo[modep->opos - 1];
+ if (modep->fx->enter(port, mode_caps) == -1)
+ return 0;
+
+ PE_SET_FLAG(port, PE_FLAGS_MODAL_OPERATION);
+
+ /* SVDM to send to UFP for mode entry */
+ return VDO(modep->fx->svid, 1, CMD_ENTER_MODE | VDO_OPOS(modep->opos));
+}
+
+static int validate_mode_request(struct svdm_amode_data *modep,
+ uint16_t svid, int opos)
+{
+ if (!modep->fx)
+ return 0;
+
+ if (svid != modep->fx->svid) {
+ CPRINTF("ERR:svid r:0x%04x != c:0x%04x\n",
+ svid, modep->fx->svid);
+ return 0;
+ }
+
+ if (opos != modep->opos) {
+ CPRINTF("ERR:opos r:%d != c:%d\n",
+ opos, modep->opos);
+ return 0;
+ }
+
+ return 1;
+}
+
+static void dfp_consume_attention(int port, uint32_t *payload)
+{
+ uint16_t svid = PD_VDO_VID(payload[0]);
+ int opos = PD_VDO_OPOS(payload[0]);
+ struct svdm_amode_data *modep = get_modep(port, svid);
+
+ if (!modep || !validate_mode_request(modep, svid, opos))
+ return;
+
+ if (modep->fx->attention)
+ modep->fx->attention(port, payload);
+}
+#endif
+/*
+ * This algorithm defaults to choosing higher pin config over lower ones in
+ * order to prefer multi-function if desired.
+ *
+ * NAME | SIGNALING | OUTPUT TYPE | MULTI-FUNCTION | PIN CONFIG
+ * -------------------------------------------------------------
+ * A | USB G2 | ? | no | 00_0001
+ * B | USB G2 | ? | yes | 00_0010
+ * C | DP | CONVERTED | no | 00_0100
+ * D | PD | CONVERTED | yes | 00_1000
+ * E | DP | DP | no | 01_0000
+ * F | PD | DP | yes | 10_0000
+ *
+ * if UFP has NOT asserted multi-function preferred code masks away B/D/F
+ * leaving only A/C/E. For single-output dongles that should leave only one
+ * possible pin config depending on whether its a converter DP->(VGA|HDMI) or DP
+ * output. If UFP is a USB-C receptacle it may assert C/D/E/F. The DFP USB-C
+ * receptacle must always choose C/D in those cases.
+ */
+int pd_dfp_dp_get_pin_mode(int port, uint32_t status)
+{
+ struct svdm_amode_data *modep = get_modep(port, USB_SID_DISPLAYPORT);
+ uint32_t mode_caps;
+ uint32_t pin_caps;
+
+ if (!modep)
+ return 0;
+
+ mode_caps = modep->data->mode_vdo[modep->opos - 1];
+
+ /* TODO(crosbug.com/p/39656) revisit with DFP that can be a sink */
+ pin_caps = PD_DP_PIN_CAPS(mode_caps);
+
+ /* if don't want multi-function then ignore those pin configs */
+ if (!PD_VDO_DPSTS_MF_PREF(status))
+ pin_caps &= ~MODE_DP_PIN_MF_MASK;
+
+ /* TODO(crosbug.com/p/39656) revisit if DFP drives USB Gen 2 signals */
+ pin_caps &= ~MODE_DP_PIN_BR2_MASK;
+
+ /* if C/D present they have precedence over E/F for USB-C->USB-C */
+ if (pin_caps & (MODE_DP_PIN_C | MODE_DP_PIN_D))
+ pin_caps &= ~(MODE_DP_PIN_E | MODE_DP_PIN_F);
+
+ /* get_next_bit returns undefined for zero */
+ if (!pin_caps)
+ return 0;
+
+ return 1 << get_next_bit(&pin_caps);
+}
+
+int pd_dfp_exit_mode(int port, uint16_t svid, int opos)
+{
+ struct svdm_amode_data *modep;
+ int idx;
+
+
+ /*
+ * Empty svid signals we should reset DFP VDM state by exiting all
+ * entered modes then clearing state. This occurs when we've
+ * disconnected or for hard reset.
+ */
+ if (!svid) {
+ for (idx = 0; idx < PD_AMODE_COUNT; idx++)
+ if (pe[port].am_policy.amodes[idx].fx)
+ pe[port].am_policy.amodes[idx].fx->exit(port);
+
+ pd_dfp_pe_init(port);
+ return 0;
+ }
+
+ /*
+ * TODO(crosbug.com/p/33946) : below needs revisited to allow multiple
+ * mode exit. Additionally it should honor OPOS == 7 as DFP's request
+ * to exit all modes. We currently don't have any UFPs that support
+ * multiple modes on one SVID.
+ */
+ modep = get_modep(port, svid);
+ if (!modep || !validate_mode_request(modep, svid, opos))
+ return 0;
+
+ /* call DFPs exit function */
+ modep->fx->exit(port);
+
+ PE_CLR_FLAG(port, PE_FLAGS_MODAL_OPERATION);
+
+ /* exit the mode */
+ modep->opos = 0;
+
+ return 1;
+}
+
+uint16_t pd_get_identity_vid(int port)
+{
+ return PD_IDH_VID(pe[port].am_policy.identity[0]);
+}
+
+uint16_t pd_get_identity_pid(int port)
+{
+ return PD_PRODUCT_PID(pe[port].am_policy.identity[2]);
+}
+
+
+#ifdef CONFIG_CMD_USB_PD_PE
+static void dump_pe(int port)
+{
+ const char * const idh_ptype_names[] = {
+ "UNDEF", "Hub", "Periph", "PCable", "ACable", "AMA",
+ "RSV6", "RSV7"};
+
+ int i, j, idh_ptype;
+ struct svdm_amode_data *modep;
+ uint32_t mode_caps;
+
+ if (pe[port].am_policy.identity[0] == 0) {
+ ccprintf("No identity discovered yet.\n");
+ return;
+ }
+ idh_ptype = PD_IDH_PTYPE(pe[port].am_policy.identity[0]);
+ ccprintf("IDENT:\n");
+ ccprintf("\t[ID Header] %08x :: %s, VID:%04x\n",
+ pe[port].am_policy.identity[0],
+ idh_ptype_names[idh_ptype],
+ pd_get_identity_vid(port));
+ ccprintf("\t[Cert Stat] %08x\n", pe[port].am_policy.identity[1]);
+ for (i = 2; i < ARRAY_SIZE(pe[port].am_policy.identity); i++) {
+ ccprintf("\t");
+ if (pe[port].am_policy.identity[i])
+ ccprintf("[%d] %08x ", i,
+ pe[port].am_policy.identity[i]);
+ }
+ ccprintf("\n");
+
+ if (pe[port].am_policy.svid_cnt < 1) {
+ ccprintf("No SVIDS discovered yet.\n");
+ return;
+ }
+
+ for (i = 0; i < pe[port].am_policy.svid_cnt; i++) {
+ ccprintf("SVID[%d]: %04x MODES:", i,
+ pe[port].am_policy.svids[i].svid);
+ for (j = 0; j < pe[port].am_policy.svids[j].mode_cnt; j++)
+ ccprintf(" [%d] %08x", j + 1,
+ pe[port].am_policy.svids[i].mode_vdo[j]);
+ ccprintf("\n");
+ modep = get_modep(port, pe[port].am_policy.svids[i].svid);
+ if (modep) {
+ mode_caps = modep->data->mode_vdo[modep->opos - 1];
+ ccprintf("MODE[%d]: svid:%04x caps:%08x\n", modep->opos,
+ modep->fx->svid, mode_caps);
+ }
+ }
+}
+
+static int command_pe(int argc, char **argv)
+{
+ int port;
+ char *e;
+
+ if (argc < 3)
+ return EC_ERROR_PARAM_COUNT;
+
+ /* command: pe <port> <subcmd> <args> */
+ port = strtoi(argv[1], &e, 10);
+ if (*e || port >= CONFIG_USB_PD_PORT_COUNT)
+ return EC_ERROR_PARAM2;
+ if (!strncasecmp(argv[2], "dump", 4))
+ dump_pe(port);
+
+ return EC_SUCCESS;
+}
+
+DECLARE_CONSOLE_COMMAND(pe, command_pe,
+ "<port> dump",
+ "USB PE");
+#endif /* CONFIG_CMD_USB_PD_PE */
+
+static int hc_remote_pd_discovery(struct host_cmd_handler_args *args)
+{
+ const uint8_t *port = args->params;
+ struct ec_params_usb_pd_discovery_entry *r = args->response;
+
+ if (*port >= CONFIG_USB_PD_PORT_COUNT)
+ return EC_RES_INVALID_PARAM;
+
+ r->vid = pd_get_identity_vid(*port);
+ r->ptype = PD_IDH_PTYPE(pe[*port].am_policy.identity[0]);
+
+ /* pid only included if vid is assigned */
+ if (r->vid)
+ r->pid = PD_PRODUCT_PID(pe[*port].am_policy.identity[2]);
+
+ args->response_size = sizeof(*r);
+ return EC_RES_SUCCESS;
+}
+DECLARE_HOST_COMMAND(EC_CMD_USB_PD_DISCOVERY,
+ hc_remote_pd_discovery,
+ EC_VER_MASK(0));
+
+static int hc_remote_pd_get_amode(struct host_cmd_handler_args *args)
+{
+ struct svdm_amode_data *modep;
+ const struct ec_params_usb_pd_get_mode_request *p = args->params;
+ struct ec_params_usb_pd_get_mode_response *r = args->response;
+
+ if (p->port >= CONFIG_USB_PD_PORT_COUNT)
+ return EC_RES_INVALID_PARAM;
+
+ /* no more to send */
+ if (p->svid_idx >= pe[p->port].am_policy.svid_cnt) {
+ r->svid = 0;
+ args->response_size = sizeof(r->svid);
+ return EC_RES_SUCCESS;
+ }
+
+ r->svid = pe[p->port].am_policy.svids[p->svid_idx].svid;
+ r->opos = 0;
+ memcpy(r->vdo, pe[p->port].am_policy.svids[p->svid_idx].mode_vdo, 24);
+ modep = get_modep(p->port, r->svid);
+
+ if (modep)
+ r->opos = pd_alt_mode(p->port, r->svid);
+
+ args->response_size = sizeof(*r);
+ return EC_RES_SUCCESS;
+}
+DECLARE_HOST_COMMAND(EC_CMD_USB_PD_GET_AMODE,
+ hc_remote_pd_get_amode,
+ EC_VER_MASK(0));
+
+#endif /* CONFIG_USB_PD_ALT_MODE_DFP */
+
+static const struct usb_state pe_states[] = {
+ /* Normal States */
+ [PE_SRC_STARTUP] = {
+ .entry = pe_src_startup_entry,
+ .run = pe_src_startup_run,
+ },
+ [PE_SRC_DISCOVERY] = {
+ .entry = pe_src_discovery_entry,
+ .run = pe_src_discovery_run,
+ },
+ [PE_SRC_SEND_CAPABILITIES] = {
+ .entry = pe_src_send_capabilities_entry,
+ .run = pe_src_send_capabilities_run,
+ },
+ [PE_SRC_NEGOTIATE_CAPABILITY] = {
+ .entry = pe_src_negotiate_capability_entry,
+ },
+ [PE_SRC_TRANSITION_SUPPLY] = {
+ .entry = pe_src_transition_supply_entry,
+ .run = pe_src_transition_supply_run,
+ },
+ [PE_SRC_READY] = {
+ .entry = pe_src_ready_entry,
+ .run = pe_src_ready_run,
+ .exit = pe_src_ready_exit,
+ },
+ [PE_SRC_DISABLED] = {
+ .entry = pe_src_disabled_entry,
+ },
+ [PE_SRC_CAPABILITY_RESPONSE] = {
+ .entry = pe_src_capability_response_entry,
+ .run = pe_src_capability_response_run,
+ },
+ [PE_SRC_HARD_RESET] = {
+ .entry = pe_src_hard_reset_entry,
+ .run = pe_src_hard_reset_run,
+ },
+ [PE_SRC_HARD_RESET_RECEIVED] = {
+ .entry = pe_src_hard_reset_received_entry,
+ .run = pe_src_hard_reset_received_run,
+ },
+ [PE_SRC_TRANSITION_TO_DEFAULT] = {
+ .entry = pe_src_transition_to_default_entry,
+ .run = pe_src_transition_to_default_run,
+ },
+ [PE_SRC_VDM_IDENTITY_REQUEST] = {
+ .entry = pe_src_vdm_identity_request_entry,
+ .run = pe_src_vdm_identity_request_run,
+ },
+ [PE_SNK_STARTUP] = {
+ .entry = pe_snk_startup_entry,
+ .run = pe_snk_startup_run,
+ },
+ [PE_SNK_DISCOVERY] = {
+ .entry = pe_snk_discovery_entry,
+ .run = pe_snk_discovery_run,
+ },
+ [PE_SNK_WAIT_FOR_CAPABILITIES] = {
+ .entry = pe_snk_wait_for_capabilities_entry,
+ .run = pe_snk_wait_for_capabilities_run,
+ },
+ [PE_SNK_EVALUATE_CAPABILITY] = {
+ .entry = pe_snk_evaluate_capability_entry,
+ },
+ [PE_SNK_SELECT_CAPABILITY] = {
+ .entry = pe_snk_select_capability_entry,
+ .run = pe_snk_select_capability_run,
+ },
+ [PE_SNK_READY] = {
+ .entry = pe_snk_ready_entry,
+ .run = pe_snk_ready_run,
+ .exit = pe_snk_ready_exit,
+ },
+ [PE_SNK_HARD_RESET] = {
+ .entry = pe_snk_hard_reset_entry,
+ .run = pe_snk_hard_reset_run,
+ },
+ [PE_SNK_TRANSITION_TO_DEFAULT] = {
+ .entry = pe_snk_transition_to_default_entry,
+ .run = pe_snk_transition_to_default_run,
+ },
+ [PE_SNK_GIVE_SINK_CAP] = {
+ .entry = pe_snk_give_sink_cap_entry,
+ .run = pe_snk_give_sink_cap_run,
+ },
+ [PE_SNK_GET_SOURCE_CAP] = {
+ .entry = pe_snk_get_source_cap_entry,
+ .run = pe_snk_get_source_cap_run,
+ },
+ [PE_SNK_TRANSITION_SINK] = {
+ .entry = pe_snk_transition_sink_entry,
+ .run = pe_snk_transition_sink_run,
+ .exit = pe_snk_transition_sink_exit,
+ },
+ [PE_SEND_SOFT_RESET] = {
+ .entry = pe_send_soft_reset_entry,
+ .run = pe_send_soft_reset_run,
+ .exit = pe_send_soft_reset_exit,
+ },
+ [PE_SOFT_RESET] = {
+ .entry = pe_soft_reset_entry,
+ .run = pe_soft_reset_run,
+ },
+ [PE_SEND_NOT_SUPPORTED] = {
+ .entry = pe_send_not_supported_entry,
+ .run = pe_send_not_supported_run,
+ },
+ [PE_SRC_PING] = {
+ .entry = pe_src_ping_entry,
+ .run = pe_src_ping_run,
+ },
+ [PE_GIVE_BATTERY_CAP] = {
+ .entry = pe_give_battery_cap_entry,
+ .run = pe_give_battery_cap_run,
+ },
+ [PE_GIVE_BATTERY_STATUS] = {
+ .entry = pe_give_battery_status_entry,
+ .run = pe_give_battery_status_run,
+ },
+ [PE_DRS_EVALUATE_SWAP] = {
+ .entry = pe_drs_evaluate_swap_entry,
+ .run = pe_drs_evaluate_swap_run,
+ },
+ [PE_DRS_CHANGE] = {
+ .entry = pe_drs_change_entry,
+ .run = pe_drs_change_run,
+ },
+ [PE_DRS_SEND_SWAP] = {
+ .entry = pe_drs_send_swap_entry,
+ .run = pe_drs_send_swap_run,
+ },
+ [PE_PRS_SRC_SNK_EVALUATE_SWAP] = {
+ .entry = pe_prs_src_snk_evaluate_swap_entry,
+ .run = pe_prs_src_snk_evaluate_swap_run,
+ },
+ [PE_PRS_SRC_SNK_TRANSITION_TO_OFF] = {
+ .entry = pe_prs_src_snk_transition_to_off_entry,
+ .run = pe_prs_src_snk_transition_to_off_run,
+ },
+ [PE_PRS_SRC_SNK_WAIT_SOURCE_ON] = {
+ .entry = pe_prs_src_snk_wait_source_on_entry,
+ .run = pe_prs_src_snk_wait_source_on_run,
+ },
+ [PE_PRS_SRC_SNK_SEND_SWAP] = {
+ .entry = pe_prs_src_snk_send_swap_entry,
+ .run = pe_prs_src_snk_send_swap_run,
+ .exit = pe_prs_src_snk_send_swap_exit,
+ },
+ [PE_PRS_SNK_SRC_EVALUATE_SWAP] = {
+ .entry = pe_prs_snk_src_evaluate_swap_entry,
+ .run = pe_prs_snk_src_evaluate_swap_run,
+ },
+ [PE_PRS_SNK_SRC_TRANSITION_TO_OFF] = {
+ .entry = pe_prs_snk_src_transition_to_off_entry,
+ .run = pe_prs_snk_src_transition_to_off_run,
+ },
+ [PE_PRS_SNK_SRC_ASSERT_RP] = {
+ .entry = pe_prs_snk_src_assert_rp_entry,
+ .run = pe_prs_snk_src_assert_rp_run,
+ },
+ [PE_PRS_SNK_SRC_SOURCE_ON] = {
+ .entry = pe_prs_snk_src_source_on_entry,
+ .run = pe_prs_snk_src_source_on_run,
+ },
+ [PE_PRS_SNK_SRC_SEND_SWAP] = {
+ .entry = pe_prs_snk_src_send_swap_entry,
+ .run = pe_prs_snk_src_send_swap_run,
+ .exit = pe_prs_snk_src_send_swap_exit,
+ },
+ [PE_VCS_EVALUATE_SWAP] = {
+ .entry = pe_vcs_evaluate_swap_entry,
+ .run = pe_vcs_evaluate_swap_run,
+ },
+ [PE_VCS_SEND_SWAP] = {
+ .entry = pe_vcs_send_swap_entry,
+ .run = pe_vcs_send_swap_run,
+ },
+ [PE_VCS_WAIT_FOR_VCONN_SWAP] = {
+ .entry = pe_vcs_wait_for_vconn_swap_entry,
+ .run = pe_vcs_wait_for_vconn_swap_run,
+ },
+ [PE_VCS_TURN_ON_VCONN_SWAP] = {
+ .entry = pe_vcs_turn_on_vconn_swap_entry,
+ .run = pe_vcs_turn_on_vconn_swap_run,
+ },
+ [PE_VCS_TURN_OFF_VCONN_SWAP] = {
+ .entry = pe_vcs_turn_off_vconn_swap_entry,
+ .run = pe_vcs_turn_off_vconn_swap_run,
+ },
+ [PE_VCS_SEND_PS_RDY_SWAP] = {
+ .entry = pe_vcs_send_ps_rdy_swap_entry,
+ .run = pe_vcs_send_ps_rdy_swap_run,
+ },
+ [PE_DO_PORT_DISCOVERY] = {
+ .entry = pe_do_port_discovery_entry,
+ .run = pe_do_port_discovery_run,
+ },
+ [PE_VDM_REQUEST] = {
+ .entry = pe_vdm_request_entry,
+ .run = pe_vdm_request_run,
+ .exit = pe_vdm_request_exit,
+ },
+ [PE_VDM_ACKED] = {
+ .entry = pe_vdm_acked_entry,
+ },
+ [PE_VDM_RESPONSE] = {
+ .entry = pe_vdm_response_entry,
+ .run = pe_vdm_response_run,
+ },
+ [PE_HANDLE_CUSTOM_VDM_REQUEST] = {
+ .entry = pe_handle_custom_vdm_request_entry,
+ .run = pe_handle_custom_vdm_request_run
+ },
+ [PE_WAIT_FOR_ERROR_RECOVERY] = {
+ .entry = pe_wait_for_error_recovery_entry,
+ .run = pe_wait_for_error_recovery_run,
+ },
+ [PE_BIST] = {
+ .entry = pe_bist_entry,
+ .run = pe_bist_run,
+ },
+};
+
+#ifdef TEST_BUILD
+const struct test_sm_data test_pe_sm_data[] = {
+ {
+ .base = pe_states,
+ .size = ARRAY_SIZE(pe_states),
+ .names = pe_state_names,
+ .names_size = ARRAY_SIZE(pe_state_names),
+ },
+};
+const int test_pe_sm_data_size = ARRAY_SIZE(test_pe_sm_data);
+#endif
diff --git a/common/usbc/usb_prl_sm.c b/common/usbc/usb_prl_sm.c
index 546506ad0f..0321c0c1b0 100644
--- a/common/usbc/usb_prl_sm.c
+++ b/common/usbc/usb_prl_sm.c
@@ -32,6 +32,26 @@
#include "vpd_api.h"
#include "version.h"
+#define RCH_SET_FLAG(port, flag) atomic_or(&rch[port].flags, (flag))
+#define RCH_CLR_FLAG(port, flag) atomic_clear(&rch[port].flags, (flag))
+#define RCH_CHK_FLAG(port, flag) (rch[port].flags & (flag))
+
+#define TCH_SET_FLAG(port, flag) atomic_or(&tch[port].flags, (flag))
+#define TCH_CLR_FLAG(port, flag) atomic_clear(&tch[port].flags, (flag))
+#define TCH_CHK_FLAG(port, flag) (tch[port].flags & (flag))
+
+#define PRL_TX_SET_FLAG(port, flag) atomic_or(&prl_tx[port].flags, (flag))
+#define PRL_TX_CLR_FLAG(port, flag) atomic_clear(&prl_tx[port].flags, (flag))
+#define PRL_TX_CHK_FLAG(port, flag) (prl_tx[port].flags & (flag))
+
+#define PRL_HR_SET_FLAG(port, flag) atomic_or(&prl_hr[port].flags, (flag))
+#define PRL_HR_CLR_FLAG(port, flag) atomic_clear(&prl_hr[port].flags, (flag))
+#define PRL_HR_CHK_FLAG(port, flag) (prl_hr[port].flags & (flag))
+
+#define PDMSG_SET_FLAG(port, flag) atomic_or(&pdmsg[port].flags, (flag))
+#define PDMSG_CLR_FLAG(port, flag) atomic_clear(&pdmsg[port].flags, (flag))
+#define PDMSG_CHK_FLAG(port, flag) (pdmsg[port].flags & (flag))
+
/* Protocol Layer Flags */
#define PRL_FLAGS_TX_COMPLETE BIT(0)
#define PRL_FLAGS_START_AMS BIT(1)
@@ -157,7 +177,7 @@ static struct protocol_hard_reset {
/* Chunking Message Object */
static struct pd_message {
/* message status flags */
- uint32_t status_flags;
+ uint32_t flags;
/* SOP* */
enum tcpm_transmit_type xmit_type;
/* type of message */
@@ -166,6 +186,8 @@ static struct pd_message {
uint8_t ext;
/* PD revision */
enum pd_rev_type rev;
+ /* Cable PD revision */
+ enum pd_rev_type cable_rev;
/* Number of 32-bit objects in chk_buf */
uint16_t data_objs;
/* temp chunk buffer */
@@ -242,10 +264,10 @@ void pd_transmit_complete(int port, int status)
void pd_execute_hard_reset(int port)
{
/* Only allow async. function calls when state machine is running */
- if (local_state[port] != SM_RUN)
+ if (!prl_is_running(port))
return;
- prl_hr[port].flags |= PRL_FLAGS_PORT_PARTNER_HARD_RESET;
+ PRL_HR_SET_FLAG(port, PRL_FLAGS_PORT_PARTNER_HARD_RESET);
set_state_prl_hr(port, PRL_HR_RESET_LAYER);
task_set_event(PD_PORT_TO_TASK_ID(port), PD_EVENT_SM, 0);
}
@@ -253,14 +275,19 @@ void pd_execute_hard_reset(int port)
void prl_execute_hard_reset(int port)
{
/* Only allow async. function calls when state machine is running */
- if (local_state[port] != SM_RUN)
+ if (!prl_is_running(port))
return;
- prl_hr[port].flags |= PRL_FLAGS_PE_HARD_RESET;
+ PRL_HR_SET_FLAG(port, PRL_FLAGS_PE_HARD_RESET);
set_state_prl_hr(port, PRL_HR_RESET_LAYER);
task_set_event(PD_PORT_TO_TASK_ID(port), PD_EVENT_SM, 0);
}
+int prl_is_running(int port)
+{
+ return local_state[port] == SM_RUN;
+}
+
static void prl_init(int port)
{
int i;
@@ -273,12 +300,13 @@ static void prl_init(int port)
rch[port].flags = 0;
/*
- * Initialize to highest revision supported. If the port partner
- * doesn't support this revision, the Protocol Engine will lower
- * this value to the revision supported by the port partner.
+ * Initialize to highest revision supported. If the port or cable
+ * partner doesn't support this revision, the Protocol Engine will
+ * lower this value to the revision supported by the partner.
*/
+ pdmsg[port].cable_rev = PD_REV30;
pdmsg[port].rev = PD_REV30;
- pdmsg[port].status_flags = 0;
+ pdmsg[port].flags = 0;
prl_hr[port].flags = 0;
@@ -303,17 +331,17 @@ static void prl_init(int port)
void prl_start_ams(int port)
{
- prl_tx[port].flags |= PRL_FLAGS_START_AMS;
+ PRL_TX_SET_FLAG(port, PRL_FLAGS_START_AMS);
}
void prl_end_ams(int port)
{
- prl_tx[port].flags |= PRL_FLAGS_END_AMS;
+ PRL_TX_SET_FLAG(port, PRL_FLAGS_END_AMS);
}
void prl_hard_reset_complete(int port)
{
- prl_hr[port].flags |= PRL_FLAGS_HARD_RESET_COMPLETE;
+ PRL_HR_SET_FLAG(port, PRL_FLAGS_HARD_RESET_COMPLETE);
task_set_event(PD_PORT_TO_TASK_ID(port), PD_EVENT_SM, 0);
}
@@ -326,7 +354,7 @@ void prl_send_ctrl_msg(int port,
pdmsg[port].ext = 0;
emsg[port].len = 0;
- tch[port].flags |= PRL_FLAGS_MSG_XMIT;
+ TCH_SET_FLAG(port, PRL_FLAGS_MSG_XMIT);
task_set_event(PD_PORT_TO_TASK_ID(port), PD_EVENT_SM, 0);
}
@@ -338,7 +366,7 @@ void prl_send_data_msg(int port,
pdmsg[port].msg_type = msg;
pdmsg[port].ext = 0;
- tch[port].flags |= PRL_FLAGS_MSG_XMIT;
+ TCH_SET_FLAG(port, PRL_FLAGS_MSG_XMIT);
task_set_event(PD_PORT_TO_TASK_ID(port), PD_EVENT_SM, 0);
}
@@ -350,10 +378,15 @@ void prl_send_ext_data_msg(int port,
pdmsg[port].msg_type = msg;
pdmsg[port].ext = 1;
- tch[port].flags |= PRL_FLAGS_MSG_XMIT;
+ TCH_SET_FLAG(port, PRL_FLAGS_MSG_XMIT);
task_set_event(PD_PORT_TO_TASK_ID(port), PD_EVENT_SM, 0);
}
+void prl_reset(int port)
+{
+ local_state[port] = SM_INIT;
+}
+
void prl_run(int port, int evt, int en)
{
switch (local_state[port]) {
@@ -409,6 +442,16 @@ enum pd_rev_type prl_get_rev(int port)
return pdmsg[port].rev;
}
+void prl_set_cable_rev(int port, enum pd_rev_type rev)
+{
+ pdmsg[port].cable_rev = rev;
+}
+
+enum pd_rev_type prl_get_cable_rev(int port)
+{
+ return pdmsg[port].cable_rev;
+}
+
/* Common Protocol Layer Message Transmission */
static void prl_tx_phy_layer_reset_entry(const int port)
{
@@ -434,8 +477,8 @@ static void prl_tx_wait_for_message_request_entry(const int port)
static void prl_tx_wait_for_message_request_run(const int port)
{
- if (prl_tx[port].flags & PRL_FLAGS_MSG_XMIT) {
- prl_tx[port].flags &= ~PRL_FLAGS_MSG_XMIT;
+ if (PRL_TX_CHK_FLAG(port, PRL_FLAGS_MSG_XMIT)) {
+ PRL_TX_CLR_FLAG(port, PRL_FLAGS_MSG_XMIT);
/*
* Soft Reset Message Message pending
*/
@@ -453,16 +496,15 @@ static void prl_tx_wait_for_message_request_run(const int port)
}
return;
- } else if ((pdmsg[port].rev == PD_REV30) &&
- (prl_tx[port].flags &
+ } else if ((pdmsg[port].rev == PD_REV30) && PRL_TX_CHK_FLAG(port,
(PRL_FLAGS_START_AMS | PRL_FLAGS_END_AMS))) {
if (tc_get_power_role(port) == PD_ROLE_SOURCE) {
/*
* Start of AMS notification received from
* Policy Engine
*/
- if (prl_tx[port].flags & PRL_FLAGS_START_AMS) {
- prl_tx[port].flags &= ~PRL_FLAGS_START_AMS;
+ if (PRL_TX_CHK_FLAG(port, PRL_FLAGS_START_AMS)) {
+ PRL_TX_CLR_FLAG(port, PRL_FLAGS_START_AMS);
set_state_prl_tx(port, PRL_TX_SRC_SOURCE_TX);
return;
}
@@ -470,8 +512,8 @@ static void prl_tx_wait_for_message_request_run(const int port)
* End of AMS notification received from
* Policy Engine
*/
- else if (prl_tx[port].flags & PRL_FLAGS_END_AMS) {
- prl_tx[port].flags &= ~PRL_FLAGS_END_AMS;
+ else if (PRL_TX_CHK_FLAG(port, PRL_FLAGS_END_AMS)) {
+ PRL_TX_CLR_FLAG(port, PRL_FLAGS_END_AMS);
/* Set Rp = SinkTxOk */
tcpm_select_rp_value(port, SINK_TX_OK);
tcpm_set_cc(port, TYPEC_CC_RP);
@@ -479,8 +521,8 @@ static void prl_tx_wait_for_message_request_run(const int port)
prl_tx[port].flags = 0;
}
} else {
- if (prl_tx[port].flags & PRL_FLAGS_START_AMS) {
- prl_tx[port].flags &= ~PRL_FLAGS_START_AMS;
+ if (PRL_TX_CHK_FLAG(port, PRL_FLAGS_START_AMS)) {
+ PRL_TX_CLR_FLAG(port, PRL_FLAGS_START_AMS);
/*
* First Message in AMS notification
* received from Policy Engine.
@@ -521,8 +563,8 @@ static void prl_tx_src_source_tx_entry(const int port)
static void prl_tx_src_source_tx_run(const int port)
{
- if (prl_tx[port].flags & PRL_FLAGS_MSG_XMIT) {
- prl_tx[port].flags &= ~PRL_FLAGS_MSG_XMIT;
+ if (PRL_TX_CHK_FLAG(port, PRL_FLAGS_MSG_XMIT)) {
+ PRL_TX_CLR_FLAG(port, PRL_FLAGS_MSG_XMIT);
set_state_prl_tx(port, PRL_TX_SRC_PENDING);
}
@@ -533,8 +575,8 @@ static void prl_tx_src_source_tx_run(const int port)
*/
static void prl_tx_snk_start_ams_run(const int port)
{
- if (prl_tx[port].flags & PRL_FLAGS_MSG_XMIT) {
- prl_tx[port].flags &= ~PRL_FLAGS_MSG_XMIT;
+ if (PRL_TX_CHK_FLAG(port, PRL_FLAGS_MSG_XMIT)) {
+ PRL_TX_CLR_FLAG(port, PRL_FLAGS_MSG_XMIT);
set_state_prl_tx(port, PRL_TX_SNK_PENDING);
}
@@ -567,12 +609,21 @@ static void prl_tx_construct_message(int port)
tc_get_data_role(port),
prl_tx[port].msg_id_counter[pdmsg[port].xmit_type],
pdmsg[port].data_objs,
- pdmsg[port].rev,
+ (prl_tx[port].sop == TCPC_TX_SOP) ?
+ pdmsg[port].rev : pdmsg[port].cable_rev,
pdmsg[port].ext);
/* Save SOP* so the correct msg_id_counter can be incremented */
prl_tx[port].sop = pdmsg[port].xmit_type;
+ /*
+ * These flags could be set if this function is called before the
+ * Policy Engine is informed of the previous transmission. Clear the
+ * flags so that this message can be sent.
+ */
+ prl_tx[port].xmit_status = TCPC_TX_UNSET;
+ PDMSG_CLR_FLAG(port, PRL_FLAGS_TX_COMPLETE);
+
/* Pass message to PHY Layer */
tcpm_transmit(port, pdmsg[port].xmit_type, header,
pdmsg[port].chk_buf);
@@ -623,7 +674,7 @@ static void prl_tx_wait_for_phy_response_run(const int port)
* State tch_wait_for_transmission_complete will
* inform policy engine of error
*/
- pdmsg[port].status_flags |= PRL_FLAGS_TX_ERROR;
+ PDMSG_SET_FLAG(port, PRL_FLAGS_TX_ERROR);
/* Increment message id counter */
increment_msgid_counter(port);
@@ -644,7 +695,7 @@ static void prl_tx_wait_for_phy_response_run(const int port)
/* Increment messageId counter */
increment_msgid_counter(port);
/* Inform Policy Engine Message was sent */
- pdmsg[port].status_flags |= PRL_FLAGS_TX_COMPLETE;
+ PDMSG_SET_FLAG(port, PRL_FLAGS_TX_COMPLETE);
set_state_prl_tx(port, PRL_TX_WAIT_FOR_MESSAGE_REQUEST);
return;
}
@@ -727,10 +778,9 @@ static void prl_hr_wait_for_request_entry(const int port)
static void prl_hr_wait_for_request_run(const int port)
{
- if (prl_hr[port].flags & PRL_FLAGS_PE_HARD_RESET ||
- prl_hr[port].flags & PRL_FLAGS_PORT_PARTNER_HARD_RESET) {
+ if (PRL_HR_CHK_FLAG(port, PRL_FLAGS_PE_HARD_RESET |
+ PRL_FLAGS_PORT_PARTNER_HARD_RESET))
set_state_prl_hr(port, PRL_HR_RESET_LAYER);
- }
}
/*
@@ -748,6 +798,25 @@ static void prl_hr_reset_layer_entry(const int port)
* PRL_Tx_Wait_For_Message_Request state.
*/
set_state_prl_tx(port, PRL_TX_WAIT_FOR_MESSAGE_REQUEST);
+
+ tch[port].flags = 0;
+ rch[port].flags = 0;
+ pdmsg[port].flags = 0;
+
+ /* Reset message ids */
+ for (i = 0; i < NUM_XMIT_TYPES; i++) {
+ prl_rx[port].msg_id[i] = -1;
+ prl_tx[port].msg_id_counter[i] = 0;
+ }
+
+ /* Disable RX */
+ if (IS_ENABLED(CONFIG_USB_TYPEC_CTVPD) ||
+ IS_ENABLED(CONFIG_USB_TYPEC_VPD))
+ vpd_rx_enable(0);
+ else
+ tcpm_set_rx_enable(port, 0);
+
+ return;
}
static void prl_hr_reset_layer_run(const int port)
@@ -756,7 +825,7 @@ static void prl_hr_reset_layer_run(const int port)
* Protocol Layer reset Complete &
* Hard Reset was initiated by Policy Engine
*/
- if (prl_hr[port].flags & PRL_FLAGS_PE_HARD_RESET) {
+ if (PRL_HR_CHK_FLAG(port, PRL_FLAGS_PE_HARD_RESET)) {
/* Request PHY to perform a Hard Reset */
prl_send_ctrl_msg(port, TCPC_TX_HARD_RESET, 0);
set_state_prl_hr(port, PRL_HR_WAIT_FOR_PHY_HARD_RESET_COMPLETE);
@@ -788,7 +857,7 @@ static void prl_hr_wait_for_phy_hard_reset_complete_run(const int port)
* Wait for hard reset from PHY
* or timeout
*/
- if ((pdmsg[port].status_flags & PRL_FLAGS_TX_COMPLETE) ||
+ if (PDMSG_CHK_FLAG(port, PRL_FLAGS_TX_COMPLETE) ||
(get_time().val > prl_hr[port].hard_reset_complete_timer)) {
/* PRL_HR_PHY_Hard_Reset_Requested */
@@ -808,7 +877,7 @@ static void prl_hr_wait_for_pe_hard_reset_complete_run(const int port)
/*
* Wait for Hard Reset complete indication from Policy Engine
*/
- if (prl_hr[port].flags & PRL_FLAGS_HARD_RESET_COMPLETE)
+ if (PRL_HR_CHK_FLAG(port, PRL_FLAGS_HARD_RESET_COMPLETE))
set_state_prl_hr(port, PRL_HR_WAIT_FOR_REQUEST);
}
@@ -837,24 +906,19 @@ static void copy_chunk_to_ext(int port)
/*
* Chunked Rx State Machine
*/
-static inline void rch_clear_abort_set_chunking(int port)
+static void rch_wait_for_message_from_protocol_layer_entry(const int port)
{
/* Clear Abort flag */
- pdmsg[port].status_flags &= ~PRL_FLAGS_ABORT;
+ PDMSG_CLR_FLAG(port, PRL_FLAGS_ABORT);
/* All Messages are chunked */
rch[port].flags = PRL_FLAGS_CHUNKING;
}
-static void rch_wait_for_message_from_protocol_layer_entry(const int port)
-{
- rch_clear_abort_set_chunking(port);
-}
-
static void rch_wait_for_message_from_protocol_layer_run(const int port)
{
- if (rch[port].flags & PRL_FLAGS_MSG_RECEIVED) {
- rch[port].flags &= ~PRL_FLAGS_MSG_RECEIVED;
+ if (RCH_CHK_FLAG(port, PRL_FLAGS_MSG_RECEIVED)) {
+ RCH_CLR_FLAG(port, PRL_FLAGS_MSG_RECEIVED);
/*
* Are we communicating with a PD3.0 device and is
* this an extended message?
@@ -868,17 +932,17 @@ static void rch_wait_for_message_from_protocol_layer_run(const int port)
* Received Extended Message &
* (Chunking = 1 & Chunked = 1)
*/
- if ((rch[port].flags & PRL_FLAGS_CHUNKING) && chunked) {
+ if ((RCH_CHK_FLAG(port, PRL_FLAGS_CHUNKING)) &&
+ chunked) {
set_state_rch(port,
RCH_PROCESSING_EXTENDED_MESSAGE);
- return;
}
/*
* (Received Extended Message &
* (Chunking = 0 & Chunked = 0))
*/
- else if (!(rch[port].flags &
- PRL_FLAGS_CHUNKING) && !chunked) {
+ else if (!RCH_CHK_FLAG(port, PRL_FLAGS_CHUNKING) &&
+ !chunked) {
/* Copy chunk to extended buffer */
copy_chunk_to_ext(port);
set_state_rch(port, RCH_PASS_UP_MESSAGE);
@@ -888,7 +952,6 @@ static void rch_wait_for_message_from_protocol_layer_run(const int port)
*/
else {
set_state_rch(port, RCH_REPORT_ERROR);
- return;
}
}
/*
@@ -905,7 +968,6 @@ static void rch_wait_for_message_from_protocol_layer_run(const int port)
*/
else {
set_state_rch(port, RCH_REPORT_ERROR);
- return;
}
}
}
@@ -916,7 +978,7 @@ static void rch_wait_for_message_from_protocol_layer_run(const int port)
static void rch_pass_up_message_entry(const int port)
{
/* Pass Message to Policy Engine */
- pe_pass_up_message(port);
+ pe_message_received(port);
set_state_rch(port, RCH_WAIT_FOR_MESSAGE_FROM_PROTOCOL_LAYER);
}
@@ -951,9 +1013,9 @@ static void rch_processing_extended_message_run(const int port)
/*
* Abort Flag Set
*/
- if (pdmsg[port].status_flags & PRL_FLAGS_ABORT) {
+ if (PDMSG_CHK_FLAG(port, PRL_FLAGS_ABORT))
set_state_rch(port, RCH_WAIT_FOR_MESSAGE_FROM_PROTOCOL_LAYER);
- }
+
/*
* If expected Chunk Number:
* Append data to Extended_Message_Buffer
@@ -1019,7 +1081,7 @@ static void rch_requesting_chunk_entry(const int port)
pdmsg[port].data_objs = 1;
pdmsg[port].ext = 1;
- prl_tx[port].flags |= PRL_FLAGS_MSG_XMIT;
+ PRL_TX_SET_FLAG(port, PRL_FLAGS_MSG_XMIT);
task_set_event(PD_PORT_TO_TASK_ID(port), PD_EVENT_TX, 0);
}
@@ -1029,8 +1091,8 @@ static void rch_requesting_chunk_run(const int port)
* Transmission Error from Protocol Layer or
* Message Received From Protocol Layer
*/
- if (rch[port].flags & PRL_FLAGS_MSG_RECEIVED ||
- pdmsg[port].status_flags & PRL_FLAGS_TX_ERROR) {
+ if (RCH_CHK_FLAG(port, PRL_FLAGS_MSG_RECEIVED) ||
+ PDMSG_CHK_FLAG(port, PRL_FLAGS_TX_ERROR)) {
/*
* Leave PRL_FLAGS_MSG_RECEIVED flag set. It'll be
* cleared in rch_report_error state
@@ -1040,8 +1102,8 @@ static void rch_requesting_chunk_run(const int port)
/*
* Message Transmitted received from Protocol Layer
*/
- else if (pdmsg[port].status_flags & PRL_FLAGS_TX_COMPLETE) {
- pdmsg[port].status_flags &= ~PRL_FLAGS_TX_COMPLETE;
+ else if (PDMSG_CHK_FLAG(port, PRL_FLAGS_TX_COMPLETE)) {
+ PDMSG_CLR_FLAG(port, PRL_FLAGS_TX_COMPLETE);
set_state_rch(port, RCH_WAITING_CHUNK);
}
}
@@ -1060,7 +1122,7 @@ static void rch_waiting_chunk_entry(const int port)
static void rch_waiting_chunk_run(const int port)
{
- if ((rch[port].flags & PRL_FLAGS_MSG_RECEIVED)) {
+ if (RCH_CHK_FLAG(port, PRL_FLAGS_MSG_RECEIVED)) {
/*
* Leave PRL_FLAGS_MSG_RECEIVED flag set just in case an error
* is detected. If an error is detected, PRL_FLAGS_MSG_RECEIVED
@@ -1084,7 +1146,7 @@ static void rch_waiting_chunk_run(const int port)
* No error wad detected, so clear
* PRL_FLAGS_MSG_RECEIVED flag.
*/
- rch[port].flags &= ~PRL_FLAGS_MSG_RECEIVED;
+ RCH_CLR_FLAG(port, PRL_FLAGS_MSG_RECEIVED);
set_state_rch(port,
RCH_PROCESSING_EXTENDED_MESSAGE);
}
@@ -1107,13 +1169,13 @@ static void rch_report_error_entry(const int port)
* If the state was entered because a message was received,
* this message is passed to the Policy Engine.
*/
- if (rch[port].flags & PRL_FLAGS_MSG_RECEIVED) {
- rch[port].flags &= ~PRL_FLAGS_MSG_RECEIVED;
+ if (RCH_CHK_FLAG(port, PRL_FLAGS_MSG_RECEIVED)) {
+ RCH_CLR_FLAG(port, PRL_FLAGS_MSG_RECEIVED);
/* Copy chunk to extended buffer */
copy_chunk_to_ext(port);
/* Pass Message to Policy Engine */
- pe_pass_up_message(port);
+ pe_message_received(port);
/* Report error */
pe_report_error(port, ERR_RCH_MSG_REC);
} else {
@@ -1133,7 +1195,7 @@ static void rch_report_error_run(const int port)
static inline void tch_clear_abort_set_chunking(int port)
{
/* Clear Abort flag */
- pdmsg[port].status_flags &= ~PRL_FLAGS_ABORT;
+ PDMSG_CLR_FLAG(port, PRL_FLAGS_ABORT);
/* All Messages are chunked */
tch[port].flags = PRL_FLAGS_CHUNKING;
@@ -1149,12 +1211,11 @@ static void tch_wait_for_message_request_from_pe_run(const int port)
/*
* Any message received and not in state TCH_Wait_Chunk_Request
*/
- if (tch[port].flags & PRL_FLAGS_MSG_RECEIVED) {
- tch[port].flags &= ~PRL_FLAGS_MSG_RECEIVED;
+ if (TCH_CHK_FLAG(port, PRL_FLAGS_MSG_RECEIVED)) {
+ TCH_CLR_FLAG(port, PRL_FLAGS_MSG_RECEIVED);
set_state_tch(port, TCH_MESSAGE_RECEIVED);
- return;
- } else if (tch[port].flags & PRL_FLAGS_MSG_XMIT) {
- tch[port].flags &= ~PRL_FLAGS_MSG_XMIT;
+ } else if (TCH_CHK_FLAG(port, PRL_FLAGS_MSG_XMIT)) {
+ TCH_CLR_FLAG(port, PRL_FLAGS_MSG_XMIT);
/*
* Rx Chunking State != RCH_Wait_For_Message_From_Protocol_Layer
* & Abort Supported
@@ -1171,7 +1232,7 @@ static void tch_wait_for_message_request_from_pe_run(const int port)
* Extended Message Request & Chunking
*/
if ((pdmsg[port].rev == PD_REV30) && pdmsg[port].ext &&
- (tch[port].flags & PRL_FLAGS_CHUNKING)) {
+ TCH_CHK_FLAG(port, PRL_FLAGS_CHUNKING)) {
pdmsg[port].send_offset = 0;
pdmsg[port].chunk_number_to_send = 0;
set_state_tch(port,
@@ -1205,12 +1266,10 @@ static void tch_wait_for_message_request_from_pe_run(const int port)
pdmsg[port].data_objs =
(emsg[port].len + 3) >> 2;
/* Pass Message to Protocol Layer */
- prl_tx[port].flags |= PRL_FLAGS_MSG_XMIT;
+ PRL_TX_SET_FLAG(port, PRL_FLAGS_MSG_XMIT);
set_state_tch(port,
TCH_WAIT_FOR_TRANSMISSION_COMPLETE);
}
-
- return;
}
}
}
@@ -1221,33 +1280,30 @@ static void tch_wait_for_message_request_from_pe_run(const int port)
static void tch_wait_for_transmission_complete_run(const int port)
{
/*
- * Any message received and not in state TCH_Wait_Chunk_Request
- */
- if (tch[port].flags & PRL_FLAGS_MSG_RECEIVED) {
- tch[port].flags &= ~PRL_FLAGS_MSG_RECEIVED;
- set_state_tch(port, TCH_MESSAGE_RECEIVED);
- return;
- }
-
- /*
* Inform Policy Engine that Message was sent.
*/
- if (pdmsg[port].status_flags & PRL_FLAGS_TX_COMPLETE) {
- pdmsg[port].status_flags &= ~PRL_FLAGS_TX_COMPLETE;
- set_state_tch(port, TCH_WAIT_FOR_MESSAGE_REQUEST_FROM_PE);
-
+ if (PDMSG_CHK_FLAG(port, PRL_FLAGS_TX_COMPLETE)) {
+ PDMSG_CLR_FLAG(port, PRL_FLAGS_TX_COMPLETE);
/* Tell PE message was sent */
pe_message_sent(port);
+ set_state_tch(port, TCH_WAIT_FOR_MESSAGE_REQUEST_FROM_PE);
}
/*
* Inform Policy Engine of Tx Error
*/
- else if (pdmsg[port].status_flags & PRL_FLAGS_TX_ERROR) {
- pdmsg[port].status_flags &= ~PRL_FLAGS_TX_ERROR;
+ else if (PDMSG_CHK_FLAG(port, PRL_FLAGS_TX_ERROR)) {
+ PDMSG_CLR_FLAG(port, PRL_FLAGS_TX_ERROR);
/* Tell PE an error occurred */
pe_report_error(port, ERR_TCH_XMIT);
set_state_tch(port, TCH_WAIT_FOR_MESSAGE_REQUEST_FROM_PE);
}
+ /*
+ * Any message received and not in state TCH_Wait_Chunk_Request
+ */
+ else if (TCH_CHK_FLAG(port, PRL_FLAGS_MSG_RECEIVED)) {
+ TCH_CLR_FLAG(port, PRL_FLAGS_MSG_RECEIVED);
+ set_state_tch(port, TCH_MESSAGE_RECEIVED);
+ }
}
/*
@@ -1262,11 +1318,12 @@ static void tch_construct_chunked_message_entry(const int port)
/*
* Any message received and not in state TCH_Wait_Chunk_Request
*/
- if (tch[port].flags & PRL_FLAGS_MSG_RECEIVED) {
- tch[port].flags &= ~PRL_FLAGS_MSG_RECEIVED;
+ if (TCH_CHK_FLAG(port, PRL_FLAGS_MSG_RECEIVED)) {
+ TCH_CLR_FLAG(port, PRL_FLAGS_MSG_RECEIVED);
set_state_tch(port, TCH_MESSAGE_RECEIVED);
return;
}
+
/* Prepare to copy chunk into chk_buf */
ext_hdr = (uint16_t *)pdmsg[port].chk_buf;
@@ -1297,13 +1354,12 @@ static void tch_construct_chunked_message_entry(const int port)
pdmsg[port].data_objs = (num + 2 + 3) >> 2;
/* Pass message chunk to Protocol Layer */
- prl_tx[port].flags |= PRL_FLAGS_MSG_XMIT;
- task_set_event(PD_PORT_TO_TASK_ID(port), PD_EVENT_SM, 0);
+ PRL_TX_SET_FLAG(port, PRL_FLAGS_MSG_XMIT);
}
static void tch_construct_chunked_message_run(const int port)
{
- if (pdmsg[port].status_flags & PRL_FLAGS_ABORT)
+ if (PDMSG_CHK_FLAG(port, PRL_FLAGS_ABORT))
set_state_tch(port, TCH_WAIT_FOR_MESSAGE_REQUEST_FROM_PE);
else
set_state_tch(port, TCH_SENDING_CHUNKED_MESSAGE);
@@ -1315,18 +1371,9 @@ static void tch_construct_chunked_message_run(const int port)
static void tch_sending_chunked_message_run(const int port)
{
/*
- * Any message received and not in state TCH_Wait_Chunk_Request
- */
- if (tch[port].flags & PRL_FLAGS_MSG_RECEIVED) {
- tch[port].flags &= ~PRL_FLAGS_MSG_RECEIVED;
- set_state_tch(port, TCH_MESSAGE_RECEIVED);
- return;
- }
-
- /*
* Transmission Error
*/
- if (pdmsg[port].status_flags & PRL_FLAGS_TX_ERROR) {
+ if (PDMSG_CHK_FLAG(port, PRL_FLAGS_TX_ERROR)) {
pe_report_error(port, ERR_TCH_XMIT);
set_state_tch(port, TCH_WAIT_FOR_MESSAGE_REQUEST_FROM_PE);
}
@@ -1341,6 +1388,13 @@ static void tch_sending_chunked_message_run(const int port)
pe_message_sent(port);
}
/*
+ * Any message received and not in state TCH_Wait_Chunk_Request
+ */
+ else if (TCH_CHK_FLAG(port, PRL_FLAGS_MSG_RECEIVED)) {
+ TCH_CLR_FLAG(port, PRL_FLAGS_MSG_RECEIVED);
+ set_state_tch(port, TCH_MESSAGE_RECEIVED);
+ }
+ /*
* Message Transmitted from Protocol Layer &
* Not Last Chunk
*/
@@ -1362,8 +1416,8 @@ static void tch_wait_chunk_request_entry(const int port)
static void tch_wait_chunk_request_run(const int port)
{
- if (tch[port].flags & PRL_FLAGS_MSG_RECEIVED) {
- tch[port].flags &= ~PRL_FLAGS_MSG_RECEIVED;
+ if (TCH_CHK_FLAG(port, PRL_FLAGS_MSG_RECEIVED)) {
+ TCH_CLR_FLAG(port, PRL_FLAGS_MSG_RECEIVED);
if (PD_HEADER_EXT(emsg[port].header)) {
uint16_t exthdr;
@@ -1415,9 +1469,7 @@ static void tch_wait_chunk_request_run(const int port)
static void tch_message_received_entry(const int port)
{
/* Pass message to chunked Rx */
- rch[port].flags |= PRL_FLAGS_MSG_RECEIVED;
- task_set_event(PD_PORT_TO_TASK_ID(port), PD_EVENT_SM, 0);
-
+ RCH_SET_FLAG(port, PRL_FLAGS_MSG_RECEIVED);
}
static void tch_message_received_run(const int port)
@@ -1447,6 +1499,16 @@ static void prl_rx_wait_for_phy_message(const int port, int evt)
msid = PD_HEADER_ID(header);
sop = PD_HEADER_GET_SOP(header);
+ /*
+ * Ignore messages sent to the cable from our
+ * port partner if we aren't Vconn powered device.
+ */
+ if (!IS_ENABLED(CONFIG_USB_TYPEC_CTVPD) &&
+ !IS_ENABLED(CONFIG_USB_TYPEC_VPD) &&
+ PD_HEADER_GET_SOP(header) != PD_MSG_SOP &&
+ PD_HEADER_PROLE(header) == PD_PLUG_DFP_UFP)
+ return;
+
if (cnt == 0 && type == PD_CTRL_SOFT_RESET) {
int i;
@@ -1493,7 +1555,7 @@ static void prl_rx_wait_for_phy_message(const int port, int evt)
if (cnt == 0 && type == PD_CTRL_PING) {
/* NOTE: RTR_PING State embedded here. */
emsg[port].len = 0;
- pe_pass_up_message(port);
+ pe_message_received(port);
return;
}
/*
@@ -1506,7 +1568,7 @@ static void prl_rx_wait_for_phy_message(const int port, int evt)
* Send Message to Tx Chunk
* Chunk State Machine
*/
- tch[port].flags |= PRL_FLAGS_MSG_RECEIVED;
+ TCH_SET_FLAG(port, PRL_FLAGS_MSG_RECEIVED);
}
/*
* Message (not Ping) Received from
@@ -1518,7 +1580,7 @@ static void prl_rx_wait_for_phy_message(const int port, int evt)
* Send Message to Rx
* Chunk State Machine
*/
- rch[port].flags |= PRL_FLAGS_MSG_RECEIVED;
+ RCH_SET_FLAG(port, PRL_FLAGS_MSG_RECEIVED);
}
task_set_event(PD_PORT_TO_TASK_ID(port), PD_EVENT_SM, 0);
diff --git a/common/usbc/usb_tc_drp_acc_trysrc_sm.c b/common/usbc/usb_tc_drp_acc_trysrc_sm.c
index 896ffffdd1..44cb8bf9d4 100644
--- a/common/usbc/usb_tc_drp_acc_trysrc_sm.c
+++ b/common/usbc/usb_tc_drp_acc_trysrc_sm.c
@@ -14,6 +14,8 @@
#include "usb_common.h"
#include "usb_mux.h"
#include "usb_pd.h"
+#include "usb_pe_sm.h"
+#include "usb_prl_sm.h"
#include "usb_sm.h"
#include "usb_tc_sm.h"
#include "usbc_ppc.h"
@@ -38,6 +40,29 @@
#define TC_FLAGS_LPM_TRANSITION BIT(3)
#define TC_FLAGS_LPM_ENGAGED BIT(4)
#define TC_FLAGS_LPM_REQUESTED BIT(5)
+#define TC_FLAGS_CTVPD_DETECTED BIT(6)
+#define TC_FLAGS_REQUEST_VC_SWAP_ON BIT(7)
+#define TC_FLAGS_REQUEST_VC_SWAP_OFF BIT(8)
+#define TC_FLAGS_REJECT_VCONN_SWAP BIT(9)
+#define TC_FLAGS_REQUEST_PR_SWAP BIT(10)
+#define TC_FLAGS_REQUEST_DR_SWAP BIT(11)
+#define TC_FLAGS_POWER_OFF_SNK BIT(12)
+#define TC_FLAGS_PARTNER_EXTPOWER BIT(13)
+#define TC_FLAGS_PARTNER_DR_DATA BIT(14)
+#define TC_FLAGS_PARTNER_DR_POWER BIT(15)
+#define TC_FLAGS_PARTNER_PD_CAPABLE BIT(16)
+#define TC_FLAGS_HARD_RESET BIT(17)
+#define TC_FLAGS_PARTNER_USB_COMM BIT(18)
+#define TC_FLAGS_PR_SWAP_IN_PROGRESS BIT(19)
+#define TC_FLAGS_DO_PR_SWAP BIT(20)
+#define TC_FLAGS_DISC_IDENT_IN_PROGRESS BIT(21)
+
+enum ps_reset_sequence {
+ PS_STATE0,
+ PS_STATE1,
+ PS_STATE2,
+ PS_STATE3
+};
/* List of all TypeC-level states */
enum usb_tc_state {
@@ -54,6 +79,10 @@ enum usb_tc_state {
TC_ATTACHED_SRC,
TC_TRY_SRC,
TC_TRY_WAIT_SNK,
+#ifdef CONFIG_USB_PE_SM
+ TC_CT_UNATTACHED_SNK,
+ TC_CT_ATTACHED_SNK,
+#endif
/* Super States */
TC_CC_OPEN,
TC_CC_RD,
@@ -95,6 +124,10 @@ static struct type_c {
uint8_t data_role;
/* Higher-level power deliver state machines are enabled if true. */
uint8_t pd_enable;
+#ifdef CONFIG_USB_PE_SM
+ /* Power supply reset sequence during a hard reset */
+ enum ps_reset_sequence ps_reset_state;
+#endif
/* Port polarity : 0 => CC1 is CC line, 1 => CC2 is CC line */
uint8_t polarity;
/* port flags, see TC_FLAGS_* */
@@ -148,6 +181,17 @@ static enum pd_dual_role_states drp_state[CONFIG_USB_PD_PORT_COUNT] = {
[0 ... (CONFIG_USB_PD_PORT_COUNT - 1)] =
CONFIG_USB_PD_INITIAL_DRP_STATE};
+#ifdef CONFIG_USBC_VCONN
+static void set_vconn(int port, int enable);
+#endif
+
+#ifdef CONFIG_USB_PE_SM
+
+#ifdef CONFIG_USB_PD_ALT_MODE_DFP
+/* Tracker for which task is waiting on sysjump prep to finish */
+static volatile task_id_t sysjump_task_waiting = TASK_ID_INVALID;
+#endif
+
/*
* 4 entry rw_hash table of type-C devices that AP has firmware updates for.
*/
@@ -157,7 +201,22 @@ static struct ec_params_usb_pd_rw_hash_entry rw_hash_table[RW_HASH_ENTRIES];
#endif
/* Forward declare common, private functions */
-static void tc_set_data_role(int port, int role);
+#ifdef CONFIG_USB_PD_TCPC_LOW_POWER
+static void exit_low_power_mode(int port);
+static void handle_device_access(int port);
+static int pd_device_in_low_power(int port);
+static void pd_wait_for_wakeup(int port);
+static int reset_device_and_notify(int port);
+#endif /* CONFIG_USB_PD_TCPC_LOW_POWER */
+
+#ifdef CONFIG_POWER_COMMON
+static void handle_new_power_state(int port);
+#endif /* CONFIG_POWER_COMMON */
+
+static void pd_update_dual_role_config(int port);
+#endif /* CONFIG_USB_PE_SM */
+
+/* Forward declare common, private functions */
static void set_state_tc(const int port, const enum usb_tc_state new_state);
test_export_static enum usb_tc_state get_state_tc(const int port);
@@ -167,6 +226,8 @@ static uint8_t pd_try_src_enable;
static void pd_update_try_source(void);
#endif
+static void sink_stop_drawing_current(int port);
+
/*
* Public Functions
*
@@ -201,21 +262,299 @@ uint16_t pd_get_identity_vid(int port)
return 0;
}
-#endif /* !defined(CONFIG_USB_PRL_SM) */
+#endif /* !CONFIG_USB_PRL_SM */
void pd_update_contract(int port)
{
- /* DO NOTHING */
+ if (IS_ENABLED(CONFIG_USB_PE_SM)) {
+ /* Must be in Attached.SRC when this function is called */
+ if (get_state_tc(port) == TC_ATTACHED_SRC)
+ pe_dpm_request(port, DPM_REQUEST_SRC_CAP_CHANGE);
+ }
+}
+
+void pd_request_source_voltage(int port, int mv)
+{
+ if (IS_ENABLED(CONFIG_USB_PE_SM)) {
+ pd_set_max_voltage(mv);
+
+ /* Must be in Attached.SNK when this function is called */
+ if (get_state_tc(port) == TC_ATTACHED_SNK)
+ pe_dpm_request(port, DPM_REQUEST_NEW_POWER_LEVEL);
+ else
+ TC_SET_FLAG(port, TC_FLAGS_REQUEST_PR_SWAP);
+
+ task_wake(PD_PORT_TO_TASK_ID(port));
+ }
+}
+
+void pd_set_external_voltage_limit(int port, int mv)
+{
+ if (IS_ENABLED(CONFIG_USB_PE_SM)) {
+ pd_set_max_voltage(mv);
+
+ /* Must be in Attached.SNK when this function is called */
+ if (get_state_tc(port) == TC_ATTACHED_SNK)
+ pe_dpm_request(port, DPM_REQUEST_NEW_POWER_LEVEL);
+
+ task_wake(PD_PORT_TO_TASK_ID(port));
+ }
}
void pd_set_new_power_request(int port)
{
- /* DO NOTHING */
+ if (IS_ENABLED(CONFIG_USB_PE_SM)) {
+ /* Must be in Attached.SNK when this function is called */
+ if (get_state_tc(port) == TC_ATTACHED_SNK)
+ pe_dpm_request(port, DPM_REQUEST_NEW_POWER_LEVEL);
+ }
}
void pd_request_power_swap(int port)
{
- /* DO NOTHING */
+ if (IS_ENABLED(CONFIG_USB_PE_SM)) {
+ /*
+ * Must be in Attached.SRC or Attached.SNK when this function
+ * is called
+ */
+ if (get_state_tc(port) == TC_ATTACHED_SRC ||
+ get_state_tc(port) == TC_ATTACHED_SNK) {
+ TC_SET_FLAG(port, TC_FLAGS_PR_SWAP_IN_PROGRESS);
+ }
+ }
+}
+
+#ifdef CONFIG_USB_PE_SM
+void pd_set_dual_role(int port, enum pd_dual_role_states state)
+{
+ drp_state[port] = state;
+
+ if (IS_ENABLED(CONFIG_USB_PD_TRY_SRC))
+ pd_update_try_source();
+}
+
+int pd_get_partner_data_swap_capable(int port)
+{
+ /* return data swap capable status of port partner */
+ return TC_CHK_FLAG(port, TC_FLAGS_PARTNER_DR_DATA);
+}
+
+int pd_comm_is_enabled(int port)
+{
+ return tc[port].pd_enable;
+}
+
+void pd_send_vdm(int port, uint32_t vid, int cmd, const uint32_t *data,
+ int count)
+{
+ pe_send_vdm(port, vid, cmd, data, count);
+}
+
+void pd_request_data_swap(int port)
+{
+ /*
+ * Must be in Attached.SRC or Attached.SNK when this function
+ * is called
+ */
+ if (get_state_tc(port) == TC_ATTACHED_SRC ||
+ get_state_tc(port) == TC_ATTACHED_SNK) {
+ TC_SET_FLAG(port, TC_FLAGS_REQUEST_DR_SWAP);
+ task_set_event(PD_PORT_TO_TASK_ID(port), PD_EVENT_SM, 0);
+ }
+}
+
+/*
+ * Return true if partner port is a DTS or TS capable of entering debug
+ * mode (eg. is presenting Rp/Rp or Rd/Rd).
+ */
+int pd_ts_dts_plugged(int port)
+{
+ return TC_CHK_FLAG(port, TC_FLAGS_TS_DTS_PARTNER);
+}
+
+/* Return true if partner port is known to be PD capable. */
+int pd_capable(int port)
+{
+ return TC_CHK_FLAG(port, TC_FLAGS_PARTNER_PD_CAPABLE);
+}
+
+/*
+ * Return true if partner port is capable of communication over USB data
+ * lines.
+ */
+int pd_get_partner_usb_comm_capable(int port)
+{
+ return TC_CHK_FLAG(port, TC_FLAGS_PARTNER_USB_COMM);
+}
+
+enum pd_dual_role_states pd_get_dual_role(int port)
+{
+ return drp_state[port];
+}
+
+int pd_dev_store_rw_hash(int port, uint16_t dev_id, uint32_t *rw_hash,
+ uint32_t current_image)
+{
+ int i;
+
+ tc[port].dev_id = dev_id;
+ memcpy(tc[port].dev_rw_hash, rw_hash, PD_RW_HASH_SIZE);
+#ifdef CONFIG_CMD_PD_DEV_DUMP_INFO
+ if (debug_level >= 2)
+ pd_dev_dump_info(dev_id, (uint8_t *)rw_hash);
+#endif
+ tc[port].current_image = current_image;
+
+ /* Search table for matching device / hash */
+ for (i = 0; i < RW_HASH_ENTRIES; i++)
+ if (dev_id == rw_hash_table[i].dev_id)
+ return !memcmp(rw_hash,
+ rw_hash_table[i].dev_rw_hash,
+ PD_RW_HASH_SIZE);
+ return 0;
+}
+
+int tc_is_attached_src(int port)
+{
+ return get_state_tc(port) == TC_ATTACHED_SRC;
+}
+
+int tc_is_attached_snk(int port)
+{
+ return get_state_tc(port) == TC_ATTACHED_SNK;
+}
+
+void tc_partner_dr_power(int port, int en)
+{
+ if (en)
+ TC_SET_FLAG(port, TC_FLAGS_PARTNER_DR_POWER);
+ else
+ TC_CLR_FLAG(port, TC_FLAGS_PARTNER_DR_POWER);
+}
+
+void tc_partner_extpower(int port, int en)
+{
+ if (en)
+ TC_SET_FLAG(port, TC_FLAGS_PARTNER_EXTPOWER);
+ else
+ TC_CLR_FLAG(port, TC_FLAGS_PARTNER_EXTPOWER);
+}
+
+void tc_partner_usb_comm(int port, int en)
+{
+ if (en)
+ TC_SET_FLAG(port, TC_FLAGS_PARTNER_USB_COMM);
+ else
+ TC_CLR_FLAG(port, TC_FLAGS_PARTNER_USB_COMM);
+}
+
+void tc_partner_dr_data(int port, int en)
+{
+ if (en)
+ TC_SET_FLAG(port, TC_FLAGS_PARTNER_DR_DATA);
+ else
+ TC_CLR_FLAG(port, TC_FLAGS_PARTNER_DR_DATA);
+}
+
+void tc_pd_connection(int port, int en)
+{
+ if (en)
+ TC_SET_FLAG(port, TC_FLAGS_PARTNER_PD_CAPABLE);
+ else
+ TC_CLR_FLAG(port, TC_FLAGS_PARTNER_PD_CAPABLE);
+}
+
+void tc_ctvpd_detected(int port)
+{
+ TC_SET_FLAG(port, TC_FLAGS_CTVPD_DETECTED);
+}
+
+void tc_vconn_on(int port)
+{
+ set_vconn(port, 1);
+}
+
+int tc_check_vconn_swap(int port)
+{
+#ifdef CONFIG_USBC_VCONN
+ if (TC_CHK_FLAG(port, TC_FLAGS_REJECT_VCONN_SWAP))
+ return 0;
+
+ return pd_check_vconn_swap(port);
+#else
+ return 0;
+#endif
+}
+
+void tc_pr_swap_complete(int port)
+{
+ TC_CLR_FLAG(port, TC_FLAGS_PR_SWAP_IN_PROGRESS);
+}
+
+void tc_prs_src_snk_assert_rd(int port)
+{
+ /* Must be in Attached.SRC when this function is called */
+ if (get_state_tc(port) == TC_ATTACHED_SRC) {
+ /* Transition to Attached.SNK to assert Rd */
+ TC_SET_FLAG(port, TC_FLAGS_DO_PR_SWAP);
+ task_set_event(PD_PORT_TO_TASK_ID(port), PD_EVENT_SM, 0);
+ }
+}
+
+void tc_prs_snk_src_assert_rp(int port)
+{
+ /* Must be in Attached.SNK when this function is called */
+ if (get_state_tc(port) == TC_ATTACHED_SNK) {
+ /* Transition to Attached.SRC to assert Rp */
+ TC_SET_FLAG(port, TC_FLAGS_DO_PR_SWAP);
+ task_set_event(PD_PORT_TO_TASK_ID(port), PD_EVENT_SM, 0);
+ }
+}
+
+void tc_hard_reset(int port)
+{
+ TC_SET_FLAG(port, TC_FLAGS_HARD_RESET);
+ task_set_event(PD_PORT_TO_TASK_ID(port), PD_EVENT_SM, 0);
+}
+
+void tc_disc_ident_in_progress(int port)
+{
+ TC_SET_FLAG(port, TC_FLAGS_DISC_IDENT_IN_PROGRESS);
+}
+
+void tc_disc_ident_complete(int port)
+{
+ TC_CLR_FLAG(port, TC_FLAGS_DISC_IDENT_IN_PROGRESS);
+}
+#endif /* CONFIG_USB_PE_SM */
+
+void tc_snk_power_off(int port)
+{
+ if (get_state_tc(port) == TC_ATTACHED_SNK) {
+ TC_SET_FLAG(port, TC_FLAGS_POWER_OFF_SNK);
+ sink_stop_drawing_current(port);
+ }
+}
+
+int tc_src_power_on(int port)
+{
+ if (get_state_tc(port) == TC_ATTACHED_SRC)
+ return pd_set_power_supply_ready(port);
+
+ return 0;
+}
+
+void tc_src_power_off(int port)
+{
+ if (get_state_tc(port) == TC_ATTACHED_SRC) {
+ /* Remove VBUS */
+ pd_power_supply_reset(port);
+
+ if (IS_ENABLED(CONFIG_CHARGE_MANAGER)) {
+ charge_manager_set_ceil(port, CEIL_REQUESTOR_PD,
+ CHARGE_CEIL_NONE);
+ }
+ }
}
void pd_set_suspend(int port, int enable)
@@ -234,6 +573,9 @@ int pd_is_port_enabled(int port)
int pd_fetch_acc_log_entry(int port)
{
+ if (IS_ENABLED(CONFIG_USB_PE_SM))
+ pd_send_vdm(port, USB_VID_GOOGLE, VDO_CMD_GET_LOG, NULL, 0);
+
return EC_RES_SUCCESS;
}
@@ -273,25 +615,30 @@ int pd_is_connected(int port)
*/
void pd_prepare_sysjump(void)
{
- /*
- * We can't be in an alternate mode since PD comm is disabled, so
- * no need to send the event
- */
-}
-#endif
-
-void tc_src_power_off(int port)
-{
- if (get_state_tc(port) == TC_ATTACHED_SRC) {
- /* Remove VBUS */
- pd_power_supply_reset(port);
+ if (IS_ENABLED(CONFIG_USB_PE_SM)) {
+ int i;
- if (IS_ENABLED(CONFIG_CHARGE_MANAGER)) {
- charge_manager_set_ceil(port, CEIL_REQUESTOR_PD,
- CHARGE_CEIL_NONE);
+ /*
+ * Exit modes before sysjump so we can cleanly enter again
+ * later
+ */
+ for (i = 0; i < CONFIG_USB_PD_PORT_COUNT; i++) {
+ /*
+ * We can't be in an alternate mode if PD comm is
+ * disabled, so no need to send the event
+ */
+ if (!pd_comm_is_enabled(i))
+ continue;
+
+ sysjump_task_waiting = task_get_current();
+ task_set_event(PD_PORT_TO_TASK_ID(i),
+ PD_EVENT_SYSJUMP, 0);
+ task_wait_event_mask(TASK_EVENT_SYSJUMP_READY, -1);
+ sysjump_task_waiting = TASK_ID_INVALID;
}
}
}
+#endif
void tc_start_error_recovery(int port)
{
@@ -329,6 +676,11 @@ static void restart_tc_sm(int port, enum usb_tc_state start_state)
tc[port].flags = 0;
tc[port].evt_timeout = 5*MSEC;
+
+#ifdef CONFIG_USB_PE_SM
+ tc[port].pd_enable = 0;
+ tc[port].ps_reset_state = PS_STATE0;
+#endif
}
void tc_state_init(int port)
@@ -372,11 +724,6 @@ void tc_set_timeout(int port, uint64_t timeout)
tc[port].evt_timeout = timeout;
}
-void tc_event_check(int port, int evt)
-{
- /* NO EVENTS TO CHECK */
-}
-
/*
* Private Functions
*/
@@ -404,6 +751,41 @@ static void print_current_state(const int port)
CPRINTS("C%d: %s", port, tc_state_names[get_state_tc(port)]);
}
+#ifdef CONFIG_USB_PE_SM
+#ifdef CONFIG_USB_PD_TCPC_LOW_POWER
+/* This is only called from the PD tasks that owns the port. */
+static void exit_low_power_mode(int port)
+{
+ if (TC_CHK_FLAG(port, TC_FLAGS_LPM_ENGAGED))
+ reset_device_and_notify(port);
+ else
+ TC_CLR_FLAG(port, TC_FLAGS_LPM_REQUESTED);
+}
+#endif
+#endif
+
+void tc_event_check(int port, int evt)
+{
+#ifdef CONFIG_USB_PE_SM
+ if (IS_ENABLED(CONFIG_USB_PD_TCPC_LOW_POWER)) {
+ if (evt & PD_EXIT_LOW_POWER_EVENT_MASK)
+ exit_low_power_mode(port);
+
+ if (evt & PD_EVENT_DEVICE_ACCESSED)
+ handle_device_access(port);
+ }
+
+ if (IS_ENABLED(CONFIG_POWER_COMMON)) {
+ if (evt & PD_EVENT_POWER_STATE_CHANGE)
+ handle_new_power_state(port);
+ }
+
+ if (evt & PD_EVENT_UPDATE_DUAL_ROLE)
+ pd_update_dual_role_config(port);
+#endif
+
+}
+
/*
* CC values for regular sources and Debug sources (aka DTS)
*
@@ -417,7 +799,7 @@ static void print_current_state(const int port)
* DTS USB-C @ 3 A Rp3A0 RpUSB
*/
-static void tc_set_data_role(int port, int role)
+void tc_set_data_role(int port, int role)
{
tc[port].data_role = role;
@@ -544,9 +926,54 @@ void pd_deferred_resume(int port)
}
#endif /* CONFIG_USB_PD_DEFERRED_RESUME */
+#ifdef CONFIG_USB_PE_SM
+
+/* This must only be called from the PD task */
+static void pd_update_dual_role_config(int port)
+{
+ /*
+ * Change to sink if port is currently a source AND (new DRP
+ * state is force sink OR new DRP state is either toggle off
+ * or debug accessory toggle only and we are in the source
+ * disconnected state).
+ */
+ if (tc[port].power_role == PD_ROLE_SOURCE &&
+ ((drp_state[port] == PD_DRP_FORCE_SINK &&
+ !pd_ts_dts_plugged(port)) ||
+ (drp_state[port] == PD_DRP_TOGGLE_OFF &&
+ get_state_tc(port) == TC_UNATTACHED_SRC))) {
+ set_state_tc(port, TC_UNATTACHED_SNK);
+ } else if (tc[port].power_role == PD_ROLE_SINK &&
+ drp_state[port] == PD_DRP_FORCE_SOURCE) {
+ /*
+ * Change to source if port is currently a sink and the
+ * new DRP state is force source.
+ */
+ set_state_tc(port, TC_UNATTACHED_SRC);
+ }
+}
+
+#ifdef CONFIG_POWER_COMMON
+static void handle_new_power_state(int port)
+{
+ if (IS_ENABLED(CONFIG_USB_PE_SM)) {
+ if (chipset_in_or_transitioning_to_state(CHIPSET_STATE_ANY_OFF))
+ /*
+ * The SoC will negotiated DP mode again when it
+ * boots up
+ */
+ pe_exit_dp_mode(port);
+ }
+
+ /* Ensure mux is set properly after chipset transition */
+ set_usb_mux_with_current_data_role(port);
+}
+#endif /* CONFIG_POWER_COMMON */
+
/*
* HOST COMMANDS
*/
+#ifdef HAS_TASK_HOSTCMD
static int hc_pd_ports(struct host_cmd_handler_args *args)
{
struct ec_response_usb_pd_ports *r = args->response;
@@ -559,12 +986,167 @@ static int hc_pd_ports(struct host_cmd_handler_args *args)
DECLARE_HOST_COMMAND(EC_CMD_USB_PD_PORTS,
hc_pd_ports,
EC_VER_MASK(0));
+static const enum pd_dual_role_states dual_role_map[USB_PD_CTRL_ROLE_COUNT] = {
+ [USB_PD_CTRL_ROLE_TOGGLE_ON] = PD_DRP_TOGGLE_ON,
+ [USB_PD_CTRL_ROLE_TOGGLE_OFF] = PD_DRP_TOGGLE_OFF,
+ [USB_PD_CTRL_ROLE_FORCE_SINK] = PD_DRP_FORCE_SINK,
+ [USB_PD_CTRL_ROLE_FORCE_SOURCE] = PD_DRP_FORCE_SOURCE,
+ [USB_PD_CTRL_ROLE_FREEZE] = PD_DRP_FREEZE,
+};
+
+#ifdef CONFIG_USBC_SS_MUX
+static const enum typec_mux typec_mux_map[USB_PD_CTRL_MUX_COUNT] = {
+ [USB_PD_CTRL_MUX_NONE] = TYPEC_MUX_NONE,
+ [USB_PD_CTRL_MUX_USB] = TYPEC_MUX_USB,
+ [USB_PD_CTRL_MUX_AUTO] = TYPEC_MUX_DP,
+ [USB_PD_CTRL_MUX_DP] = TYPEC_MUX_DP,
+ [USB_PD_CTRL_MUX_DOCK] = TYPEC_MUX_DOCK,
+};
+#endif
+
+static int hc_usb_pd_control(struct host_cmd_handler_args *args)
+{
+ const struct ec_params_usb_pd_control *p = args->params;
+ struct ec_response_usb_pd_control_v1 *r_v1 = args->response;
+ struct ec_response_usb_pd_control *r = args->response;
+
+ if (p->port >= CONFIG_USB_PD_PORT_COUNT)
+ return EC_RES_INVALID_PARAM;
+
+ if (p->role >= USB_PD_CTRL_ROLE_COUNT ||
+ p->mux >= USB_PD_CTRL_MUX_COUNT)
+ return EC_RES_INVALID_PARAM;
+
+ if (p->role != USB_PD_CTRL_ROLE_NO_CHANGE)
+ pd_set_dual_role(p->port, dual_role_map[p->role]);
+
+#ifdef CONFIG_USBC_SS_MUX
+ if (p->mux != USB_PD_CTRL_MUX_NO_CHANGE)
+ usb_mux_set(p->port, typec_mux_map[p->mux],
+ typec_mux_map[p->mux] == TYPEC_MUX_NONE ?
+ USB_SWITCH_DISCONNECT :
+ USB_SWITCH_CONNECT,
+ pd_get_polarity(p->port));
+#endif /* CONFIG_USBC_SS_MUX */
+
+ if (p->swap == USB_PD_CTRL_SWAP_DATA)
+ pd_request_data_swap(p->port);
+ else if (p->swap == USB_PD_CTRL_SWAP_POWER)
+ pd_request_power_swap(p->port);
+#ifdef CONFIG_USBC_VCONN_SWAP
+ else if (p->swap == USB_PD_CTRL_SWAP_VCONN)
+ pe_dpm_request(p->port, DPM_REQUEST_VCONN_SWAP);
+#endif
+
+ if (args->version == 0) {
+ r->enabled = pd_comm_is_enabled(p->port);
+ r->role = tc[p->port].power_role;
+ r->polarity = tc[p->port].polarity;
+ r->state = get_state_tc(p->port);
+ args->response_size = sizeof(*r);
+ } else {
+ r_v1->enabled =
+ (pd_comm_is_enabled(p->port) ?
+ PD_CTRL_RESP_ENABLED_COMMS : 0) |
+ (pd_is_connected(p->port) ?
+ PD_CTRL_RESP_ENABLED_CONNECTED : 0) |
+ (TC_CHK_FLAG(p->port, TC_FLAGS_PARTNER_PD_CAPABLE) ?
+ PD_CTRL_RESP_ENABLED_PD_CAPABLE : 0);
+ r_v1->role =
+ (tc[p->port].power_role ? PD_CTRL_RESP_ROLE_POWER : 0) |
+ (tc[p->port].data_role ? PD_CTRL_RESP_ROLE_DATA : 0) |
+ (TC_CHK_FLAG(p->port, TC_FLAGS_VCONN_ON) ?
+ PD_CTRL_RESP_ROLE_VCONN : 0) |
+ (TC_CHK_FLAG(p->port, TC_FLAGS_PARTNER_DR_POWER) ?
+ PD_CTRL_RESP_ROLE_DR_POWER : 0) |
+ (TC_CHK_FLAG(p->port, TC_FLAGS_PARTNER_DR_DATA) ?
+ PD_CTRL_RESP_ROLE_DR_DATA : 0) |
+ (TC_CHK_FLAG(p->port, TC_FLAGS_PARTNER_USB_COMM) ?
+ PD_CTRL_RESP_ROLE_USB_COMM : 0) |
+ (TC_CHK_FLAG(p->port, TC_FLAGS_PARTNER_EXTPOWER) ?
+ PD_CTRL_RESP_ROLE_EXT_POWERED : 0);
+ r_v1->polarity = tc[p->port].polarity;
+ strzcpy(r_v1->state, tc_state_names[get_state_tc(p->port)],
+ sizeof(r_v1->state));
+ args->response_size = sizeof(*r_v1);
+ }
+ return EC_RES_SUCCESS;
+}
+DECLARE_HOST_COMMAND(EC_CMD_USB_PD_CONTROL,
+ hc_usb_pd_control,
+ EC_VER_MASK(0) | EC_VER_MASK(1));
+
+static int hc_remote_flash(struct host_cmd_handler_args *args)
+{
+ const struct ec_params_usb_pd_fw_update *p = args->params;
+ int port = p->port;
+ int rv = EC_RES_SUCCESS;
+ const uint32_t *data = &(p->size) + 1;
+ int i, size;
+
+ if (port >= CONFIG_USB_PD_PORT_COUNT)
+ return EC_RES_INVALID_PARAM;
+
+ if (p->size + sizeof(*p) > args->params_size)
+ return EC_RES_INVALID_PARAM;
+
+#if defined(CONFIG_BATTERY_PRESENT_CUSTOM) || \
+defined(CONFIG_BATTERY_PRESENT_GPIO)
+ /*
+ * Do not allow PD firmware update if no battery and this port
+ * is sinking power, because we will lose power.
+ */
+ if (battery_is_present() != BP_YES &&
+ charge_manager_get_active_charge_port() == port)
+ return EC_RES_UNAVAILABLE;
+#endif
+
+ switch (p->cmd) {
+ case USB_PD_FW_REBOOT:
+ pe_send_vdm(port, USB_VID_GOOGLE, VDO_CMD_REBOOT, NULL, 0);
+ /*
+ * Return immediately to free pending i2c bus. Host needs to
+ * manage this delay.
+ */
+ return EC_RES_SUCCESS;
+
+ case USB_PD_FW_FLASH_ERASE:
+ pe_send_vdm(port, USB_VID_GOOGLE, VDO_CMD_FLASH_ERASE, NULL, 0);
+ /*
+ * Return immediately. Host needs to manage delays here which
+ * can be as long as 1.2 seconds on 64KB RW flash.
+ */
+ return EC_RES_SUCCESS;
+
+ case USB_PD_FW_ERASE_SIG:
+ pe_send_vdm(port, USB_VID_GOOGLE, VDO_CMD_ERASE_SIG, NULL, 0);
+ break;
+
+ case USB_PD_FW_FLASH_WRITE:
+ /* Data size must be a multiple of 4 */
+ if (!p->size || p->size % 4)
+ return EC_RES_INVALID_PARAM;
+
+ size = p->size / 4;
+ for (i = 0; i < size; i += VDO_MAX_SIZE - 1) {
+ pe_send_vdm(port, USB_VID_GOOGLE, VDO_CMD_FLASH_WRITE,
+ data + i, MIN(size - i, VDO_MAX_SIZE - 1));
+ }
+ return EC_RES_SUCCESS;
+
+ default:
+ return EC_RES_INVALID_PARAM;
+ }
+
+ return rv;
+}
+DECLARE_HOST_COMMAND(EC_CMD_USB_PD_FW_UPDATE,
+ hc_remote_flash,
+ EC_VER_MASK(0));
static int hc_remote_rw_hash_entry(struct host_cmd_handler_args *args)
{
- int i;
- int idx = 0;
- int found = 0;
+ int i, idx = 0, found = 0;
const struct ec_params_usb_pd_rw_hash_entry *p = args->params;
static int rw_hash_next_idx;
@@ -585,7 +1167,6 @@ static int hc_remote_rw_hash_entry(struct host_cmd_handler_args *args)
if (rw_hash_next_idx == RW_HASH_ENTRIES)
rw_hash_next_idx = 0;
}
-
memcpy(&rw_hash_table[idx], p, sizeof(*p));
return EC_RES_SUCCESS;
@@ -617,6 +1198,143 @@ DECLARE_HOST_COMMAND(EC_CMD_USB_PD_DEV_INFO,
hc_remote_pd_dev_info,
EC_VER_MASK(0));
+#ifndef CONFIG_USB_PD_TCPC
+#ifdef CONFIG_EC_CMD_PD_CHIP_INFO
+static int hc_remote_pd_chip_info(struct host_cmd_handler_args *args)
+{
+ const struct ec_params_pd_chip_info *p = args->params;
+ struct ec_response_pd_chip_info_v1 *info;
+
+ if (p->port >= CONFIG_USB_PD_PORT_COUNT)
+ return EC_RES_INVALID_PARAM;
+
+ if (tcpm_get_chip_info(p->port, p->live, &info))
+ return EC_RES_ERROR;
+
+ /*
+ * Take advantage of the fact that v0 and v1 structs have the
+ * same layout for v0 data. (v1 just appends data)
+ */
+ args->response_size =
+ args->version ? sizeof(struct ec_response_pd_chip_info_v1)
+ : sizeof(struct ec_response_pd_chip_info);
+
+ memcpy(args->response, info, args->response_size);
+
+ return EC_RES_SUCCESS;
+}
+DECLARE_HOST_COMMAND(EC_CMD_PD_CHIP_INFO,
+ hc_remote_pd_chip_info,
+ EC_VER_MASK(0) | EC_VER_MASK(1));
+#endif /* CONFIG_EC_CMD_PD_CHIP_INFO */
+#endif /* !CONFIG_USB_PD_TCPC */
+
+#ifdef CONFIG_HOSTCMD_EVENTS
+void pd_notify_dp_alt_mode_entry(void)
+{
+ /*
+ * Note: EC_HOST_EVENT_PD_MCU may be a more appropriate host event to
+ * send, but we do not send that here because there are other cases
+ * where we send EC_HOST_EVENT_PD_MCU such as charger insertion or
+ * removal. Currently, those do not wake the system up, but
+ * EC_HOST_EVENT_MODE_CHANGE does. If we made the system wake up on
+ * EC_HOST_EVENT_PD_MCU, we would be turning the internal display on on
+ * every charger insertion/removal, which is not desired.
+ */
+ CPRINTS("Notifying AP of DP Alt Mode Entry...");
+ host_set_single_event(EC_HOST_EVENT_MODE_CHANGE);
+}
+#endif /* CONFIG_HOSTCMD_EVENTS */
+
+#ifdef CONFIG_USB_PD_ALT_MODE_DFP
+static int hc_remote_pd_set_amode(struct host_cmd_handler_args *args)
+{
+ const struct ec_params_usb_pd_set_mode_request *p = args->params;
+
+ if ((p->port >= CONFIG_USB_PD_PORT_COUNT) || (!p->svid) || (!p->opos))
+ return EC_RES_INVALID_PARAM;
+
+ switch (p->cmd) {
+ case PD_EXIT_MODE:
+ if (pd_dfp_exit_mode(p->port, p->svid, p->opos))
+ pd_send_vdm(p->port, p->svid,
+ CMD_EXIT_MODE | VDO_OPOS(p->opos), NULL, 0);
+ else {
+ CPRINTF("Failed exit mode\n");
+ return EC_RES_ERROR;
+ }
+ break;
+ case PD_ENTER_MODE:
+ if (pd_dfp_enter_mode(p->port, p->svid, p->opos))
+ pd_send_vdm(p->port, p->svid, CMD_ENTER_MODE |
+ VDO_OPOS(p->opos), NULL, 0);
+ break;
+ default:
+ return EC_RES_INVALID_PARAM;
+ }
+ return EC_RES_SUCCESS;
+}
+DECLARE_HOST_COMMAND(EC_CMD_USB_PD_SET_AMODE,
+ hc_remote_pd_set_amode,
+ EC_VER_MASK(0));
+#endif /* CONFIG_USB_PD_ALT_MODE_DFP */
+#endif /* HAS_TASK_HOSTCMD */
+
+#if defined(CONFIG_USB_PD_ALT_MODE) && !defined(CONFIG_USB_PD_ALT_MODE_DFP)
+void pd_send_hpd(int port, enum hpd_event hpd)
+{
+ uint32_t data[1];
+ int opos = pd_alt_mode(port, USB_SID_DISPLAYPORT);
+
+ if (!opos)
+ return;
+
+ data[0] =
+ VDO_DP_STATUS((hpd == hpd_irq), /* IRQ_HPD */
+ (hpd != hpd_low), /* HPD_HI|LOW */
+ 0, /* request exit DP */
+ 0, /* request exit USB */
+ 0, /* MF pref */
+ 1, /* enabled */
+ 0, /* power low */
+ 0x2);
+ pd_send_vdm(port, USB_SID_DISPLAYPORT,
+ VDO_OPOS(opos) | CMD_ATTENTION, data, 1);
+}
+#endif
+#endif /* CONFIG_USB_PE_SM */
+
+#ifdef CONFIG_USBC_VCONN_SWAP
+void pd_request_vconn_swap_off(int port)
+{
+ if (get_state_tc(port) == TC_ATTACHED_SRC ||
+ get_state_tc(port) == TC_ATTACHED_SNK) {
+ TC_SET_FLAG(port, TC_FLAGS_REQUEST_VC_SWAP_OFF);
+ task_wake(PD_PORT_TO_TASK_ID(port));
+ }
+}
+
+void pd_request_vconn_swap_on(int port)
+{
+ if (get_state_tc(port) == TC_ATTACHED_SRC ||
+ get_state_tc(port) == TC_ATTACHED_SNK) {
+ TC_SET_FLAG(port, TC_FLAGS_REQUEST_VC_SWAP_ON);
+ task_wake(PD_PORT_TO_TASK_ID(port));
+ }
+}
+#endif
+
+#ifdef CONFIG_USBC_VCONN
+int tc_is_vconn_src(int port)
+{
+ if (get_state_tc(port) == TC_ATTACHED_SRC ||
+ get_state_tc(port) == TC_ATTACHED_SNK)
+ return TC_CHK_FLAG(port, TC_FLAGS_VCONN_ON);
+ else
+ return -1;
+}
+#endif
+
#ifdef CONFIG_USBC_PPC
static void pd_send_hard_reset(int port)
{
@@ -956,6 +1674,11 @@ static void tc_unattached_snk_entry(const int port)
*/
pd_execute_data_swap(port, PD_ROLE_DISCONNECTED);
tc[port].next_role_swap = get_time().val + PD_T_DRP_SNK;
+
+ if (IS_ENABLED(CONFIG_USB_PE_SM)) {
+ tc[port].flags = 0;
+ tc[port].pd_enable = 0;
+ }
}
static void tc_unattached_snk_run(const int port)
@@ -967,6 +1690,15 @@ static void tc_unattached_snk_run(const int port)
* status after role changes
*/
+ if (IS_ENABLED(CONFIG_USB_PE_SM)) {
+ if (TC_CHK_FLAG(port, TC_FLAGS_HARD_RESET)) {
+ TC_CLR_FLAG(port, TC_FLAGS_HARD_RESET);
+ tc_set_data_role(port, PD_ROLE_UFP);
+ /* Inform Policy Engine that hard reset is complete */
+ pe_ps_reset_complete(port);
+ }
+ }
+
/* Check for connection */
tcpm_get_cc(port, &cc1, &cc2);
@@ -1034,6 +1766,11 @@ static void tc_attach_wait_snk_run(const int port)
*/
if (new_cc_state == PD_CC_NONE &&
get_time().val > tc[port].pd_debounce) {
+ if (IS_ENABLED(CONFIG_USB_PE_SM) &&
+ IS_ENABLED(CONFIG_USB_PD_ALT_MODE_DFP)) {
+ pd_dfp_exit_mode(port, 0, 0);
+ }
+
/* We are detached */
set_state_tc(port, TC_UNATTACHED_SRC);
return;
@@ -1044,13 +1781,14 @@ static void tc_attach_wait_snk_run(const int port)
return;
/*
- * The port shall transition to Attached.SNK after the state of only one
- * of the CC1 or CC2 pins is SNK.Rp for at least tCCDebounce and VBUS is
- * detected.
+ * The port shall transition to Attached.SNK after the state of only
+ * one of the CC1 or CC2 pins is SNK.Rp for at least tCCDebounce and
+ * VBUS is detected.
*
- * A DRP that strongly prefers the Source role may optionally transition
- * to Try.SRC instead of Attached.SNK when the state of only one CC pin
- * has been SNK.Rp for at least tCCDebounce and VBUS is detected.
+ * A DRP that strongly prefers the Source role may optionally
+ * transition to Try.SRC instead of Attached.SNK when the state of only
+ * one CC pin has been SNK.Rp for at least tCCDebounce and VBUS is
+ * detected.
*
* If the port supports Debug Accessory Mode, the port shall transition
* to DebugAccessory.SNK if the state of both the CC1 and CC2 pins is
@@ -1069,6 +1807,12 @@ static void tc_attach_wait_snk_run(const int port)
TC_SET_FLAG(port, TC_FLAGS_TS_DTS_PARTNER);
set_state_tc(port, TC_DBG_ACC_SNK);
}
+
+ if (IS_ENABLED(CONFIG_USB_PE_SM) &&
+ IS_ENABLED(CONFIG_USB_PD_ALT_MODE_DFP)) {
+ hook_call_deferred(&pd_usb_billboard_deferred_data,
+ PD_T_AME);
+ }
}
}
@@ -1081,46 +1825,192 @@ static void tc_attached_snk_entry(const int port)
print_current_state(port);
- /* Get connector orientation */
- tcpm_get_cc(port, &cc1, &cc2);
- tc[port].polarity = get_snk_polarity(cc1, cc2);
- set_polarity(port, tc[port].polarity);
+#ifdef CONFIG_USB_PE_SM
+ if (TC_CHK_FLAG(port, TC_FLAGS_PR_SWAP_IN_PROGRESS)) {
+ /*
+ * Both CC1 and CC2 pins shall be independently terminated to
+ * ground through Rd.
+ */
+ tcpm_set_cc(port, TYPEC_CC_RD);
- /*
- * Initial data role for sink is UFP
- * This also sets the usb mux
- */
- tc_set_data_role(port, PD_ROLE_UFP);
+ /* Change role to sink */
+ tc_set_power_role(port, PD_ROLE_SINK);
+ tcpm_set_msg_header(port, tc[port].power_role,
+ tc[port].data_role);
- if (IS_ENABLED(CONFIG_CHARGE_MANAGER)) {
- tc[port].typec_curr = usb_get_typec_current_limit(
- tc[port].polarity, cc1, cc2);
- typec_set_input_current_limit(port, tc[port].typec_curr,
- TYPE_C_VOLTAGE);
- charge_manager_update_dualrole(port, CAP_DEDICATED);
- tc[port].cc_state = (tc[port].polarity) ? cc2 : cc1;
+ /*
+ * Maintain VCONN supply state, whether ON or OFF, and its
+ * data role / usb mux connections.
+ */
+ } else
+#endif
+ {
+ /* Get connector orientation */
+ tcpm_get_cc(port, &cc1, &cc2);
+ tc[port].polarity = get_snk_polarity(cc1, cc2);
+ set_polarity(port, tc[port].polarity);
+
+ /*
+ * Initial data role for sink is UFP
+ * This also sets the usb mux
+ */
+ tc_set_data_role(port, PD_ROLE_UFP);
+
+ if (IS_ENABLED(CONFIG_CHARGE_MANAGER)) {
+ tc[port].typec_curr =
+ usb_get_typec_current_limit(tc[port].polarity,
+ cc1, cc2);
+ typec_set_input_current_limit(port,
+ tc[port].typec_curr, TYPE_C_VOLTAGE);
+ charge_manager_update_dualrole(port, CAP_DEDICATED);
+ tc[port].cc_state = (tc[port].polarity) ? cc2 : cc1;
+ }
}
/* Apply Rd */
tcpm_set_cc(port, TYPEC_CC_RD);
tc[port].cc_debounce = 0;
+
+ /* Enable PD */
+ if (IS_ENABLED(CONFIG_USB_PE_SM))
+ tc[port].pd_enable = 1;
}
static void tc_attached_snk_run(const int port)
{
- /* Detach detection */
- if (!pd_is_vbus_present(port)) {
- set_state_tc(port, TC_UNATTACHED_SNK);
- return;
+#ifdef CONFIG_USB_PE_SM
+ /*
+ * Perform Hard Reset
+ */
+ if (TC_CHK_FLAG(port, TC_FLAGS_HARD_RESET)) {
+ TC_CLR_FLAG(port, TC_FLAGS_HARD_RESET);
+
+ tc_set_data_role(port, PD_ROLE_UFP);
+ /* Clear the input current limit */
+ sink_stop_drawing_current(port);
+
+ /*
+ * When VCONN is supported, the Hard Reset Shall cause
+ * the Port with the Rd resistor asserted to turn off
+ * VCONN.
+ */
+ if (IS_ENABLED(CONFIG_USBC_VCONN))
+ if (TC_CHK_FLAG(port, TC_FLAGS_VCONN_ON))
+ set_vconn(port, 0);
+
+ /*
+ * Inform policy engine that power supply
+ * reset is complete
+ */
+ pe_ps_reset_complete(port);
}
+ /*
+ * The sink will be powered off during a power role swap but we don't
+ * want to trigger a disconnect
+ */
+ if (!TC_CHK_FLAG(port, TC_FLAGS_POWER_OFF_SNK) &&
+ !TC_CHK_FLAG(port, TC_FLAGS_PR_SWAP_IN_PROGRESS)) {
+ /* Detach detection */
+ if (!pd_is_vbus_present(port)) {
+ if (IS_ENABLED(CONFIG_USB_PD_ALT_MODE_DFP))
+ pd_dfp_exit_mode(port, 0, 0);
+
+ return set_state_tc(port, TC_UNATTACHED_SNK);
+ }
+
+ if (!pe_is_explicit_contract(port))
+ sink_power_sub_states(port);
+ }
+
+ /*
+ * PD swap commands
+ */
+ if (tc[port].pd_enable && prl_is_running(port)) {
+ /*
+ * Power Role Swap
+ */
+ if (TC_CHK_FLAG(port, TC_FLAGS_DO_PR_SWAP)) {
+ TC_CLR_FLAG(port, TC_FLAGS_DO_PR_SWAP);
+ return set_state_tc(port, TC_ATTACHED_SRC);
+ }
+
+ /*
+ * Data Role Swap
+ */
+ if (TC_CHK_FLAG(port, TC_FLAGS_REQUEST_DR_SWAP)) {
+ TC_CLR_FLAG(port, TC_FLAGS_REQUEST_DR_SWAP);
+
+ /* Perform Data Role Swap */
+ tc_set_data_role(port, !tc[port].data_role);
+ }
+
+#ifdef CONFIG_USBC_VCONN
+ /*
+ * VCONN Swap
+ */
+ if (TC_CHK_FLAG(port, TC_FLAGS_REQUEST_VC_SWAP_ON)) {
+ TC_CLR_FLAG(port, TC_FLAGS_REQUEST_VC_SWAP_ON);
+
+ set_vconn(port, 1);
+ /* Inform policy engine that vconn swap is complete */
+ pe_vconn_swap_complete(port);
+ } else if (TC_CHK_FLAG(port, TC_FLAGS_REQUEST_VC_SWAP_OFF)) {
+ TC_CLR_FLAG(port, TC_FLAGS_REQUEST_VC_SWAP_OFF);
+
+ set_vconn(port, 0);
+ /* Inform policy engine that vconn swap is complete */
+ pe_vconn_swap_complete(port);
+ }
+#endif
+ /*
+ * If the port supports Charge-Through VCONN-Powered USB
+ * devices, and an explicit PD contract has failed to be
+ * negotiated, the port shall query the identity of the
+ * cable via USB PD on SOP’
+ */
+ if (!pe_is_explicit_contract(port) &&
+ TC_CHK_FLAG(port, TC_FLAGS_CTVPD_DETECTED)) {
+ /*
+ * A port that via SOP’ has detected an attached
+ * Charge-Through VCONN-Powered USB device shall
+ * transition to Unattached.SRC if an explicit PD
+ * contract has failed to be negotiated.
+ */
+ /* CTVPD detected */
+ set_state_tc(port, TC_UNATTACHED_SRC);
+ }
+ }
+
+#else /* CONFIG_USB_PE_SM */
+
+ /* Detach detection */
+ if (!pd_is_vbus_present(port))
+ return set_state_tc(port, TC_UNATTACHED_SNK);
+
/* Run Sink Power Sub-State */
sink_power_sub_states(port);
+#endif /* CONFIG_USB_PE_SM */
}
static void tc_attached_snk_exit(const int port)
{
+ if (IS_ENABLED(CONFIG_USB_PE_SM)) {
+ TC_CLR_FLAG(port, TC_FLAGS_POWER_OFF_SNK);
+
+ if (IS_ENABLED(CONFIG_USBC_VCONN)) {
+ /*
+ * If supplying VCONN, the port shall cease to supply
+ * it within tVCONNOFF of exiting Attached.SNK.
+ */
+ if (!TC_CHK_FLAG(port, TC_FLAGS_PR_SWAP_IN_PROGRESS) &&
+ TC_CHK_FLAG(port, TC_FLAGS_VCONN_ON)) {
+ set_vconn(port, 0);
+ }
+ }
+ }
+
/* Stop drawing power */
sink_stop_drawing_current(port);
}
@@ -1184,8 +2074,14 @@ static void tc_dbg_acc_snk_entry(const int port)
static void tc_dbg_acc_snk_run(const int port)
{
- if (!pd_is_vbus_present(port))
+ if (!pd_is_vbus_present(port)) {
+ if (IS_ENABLED(CONFIG_USB_PE_SM) &&
+ IS_ENABLED(CONFIG_USB_PD_ALT_MODE_DFP)) {
+ pd_dfp_exit_mode(port, 0, 0);
+ }
+
set_state_tc(port, TC_UNATTACHED_SNK);
+ }
}
/**
@@ -1223,6 +2119,11 @@ static void tc_unattached_src_entry(const int port)
*/
pd_execute_data_swap(port, PD_ROLE_DISCONNECTED);
+ if (IS_ENABLED(CONFIG_USB_PE_SM)) {
+ tc[port].flags = 0;
+ tc[port].pd_enable = 0;
+ }
+
tc[port].next_role_swap = get_time().val + PD_T_DRP_SRC;
}
@@ -1230,6 +2131,24 @@ static void tc_unattached_src_run(const int port)
{
enum tcpc_cc_voltage_status cc1, cc2;
+ if (IS_ENABLED(CONFIG_USB_PE_SM)) {
+ if (TC_CHK_FLAG(port, TC_FLAGS_HARD_RESET)) {
+ TC_CLR_FLAG(port, TC_FLAGS_HARD_RESET);
+ tc_set_data_role(port, PD_ROLE_DFP);
+ /* Inform Policy Engine that hard reset is complete */
+ pe_ps_reset_complete(port);
+ }
+ }
+
+ if (IS_ENABLED(CONFIG_USBC_PPC)) {
+ /*
+ * If the port is latched off, just continue to
+ * monitor for a detach.
+ */
+ if (ppc_is_port_latched_off(port))
+ return;
+ }
+
/* Check for connection */
tcpm_get_cc(port, &cc1, &cc2);
@@ -1328,6 +2247,61 @@ static void tc_attached_src_entry(const int port)
print_current_state(port);
+#if defined(CONFIG_USB_PE_SM)
+ if (TC_CHK_FLAG(port, TC_FLAGS_PR_SWAP_IN_PROGRESS)) {
+ /* Change role to source */
+ tc_set_power_role(port, PD_ROLE_SOURCE);
+ tcpm_set_msg_header(port,
+ tc[port].power_role, tc[port].data_role);
+
+ /*
+ * Both CC1 and CC2 pins shall be independently terminated to
+ * ground through Rp.
+ */
+ tcpm_select_rp_value(port, CONFIG_USB_PD_PULLUP);
+
+ /* Enable VBUS */
+ pd_set_power_supply_ready(port);
+
+ /*
+ * Maintain VCONN supply state, whether ON or OFF, and its
+ * data role / usb mux connections.
+ */
+ } else {
+ /*
+ * Start sourcing Vconn before Vbus to ensure
+ * we are within USB Type-C Spec 1.4 tVconnON
+ */
+ if (IS_ENABLED(CONFIG_USBC_VCONN))
+ set_vconn(port, 1);
+
+ /* Enable VBUS */
+ if (pd_set_power_supply_ready(port)) {
+ /* Stop sourcing Vconn if Vbus failed */
+ if (IS_ENABLED(CONFIG_USBC_VCONN))
+ set_vconn(port, 0);
+
+ if (IS_ENABLED(CONFIG_USBC_SS_MUX))
+ usb_mux_set(port, TYPEC_MUX_NONE,
+ USB_SWITCH_DISCONNECT, tc[port].polarity);
+ }
+
+ /* Get connector orientation */
+ tcpm_get_cc(port, &cc1, &cc2);
+ tc[port].polarity = (cc1 != TYPEC_CC_VOLT_RD);
+ set_polarity(port, tc[port].polarity);
+
+ /*
+ * Initial data role for sink is DFP
+ * This also sets the usb mux
+ */
+ tc_set_data_role(port, PD_ROLE_DFP);
+
+ tc[port].pd_enable = 0;
+ tc[port].timeout = get_time().val +
+ PD_POWER_SUPPLY_TURN_ON_DELAY + PD_T_VCONN_STABLE;
+ }
+#else
/* Get connector orientation */
tcpm_get_cc(port, &cc1, &cc2);
tc[port].polarity = (cc1 != TYPEC_CC_VOLT_RD);
@@ -1356,6 +2330,7 @@ static void tc_attached_src_entry(const int port)
usb_mux_set(port, TYPEC_MUX_NONE,
USB_SWITCH_DISCONNECT, tc[port].polarity);
}
+#endif /* CONFIG_USB_PE_SM */
/* Apply Rp */
tcpm_set_cc(port, TYPEC_CC_RP);
@@ -1373,6 +2348,72 @@ static void tc_attached_src_run(const int port)
enum tcpc_cc_voltage_status cc1, cc2;
enum pd_cc_states new_cc_state;
+#ifdef CONFIG_USB_PE_SM
+ /* Enable PD communications after power supply has fully turned on */
+ if (tc[port].pd_enable == 0 &&
+ get_time().val > tc[port].timeout) {
+
+ tc[port].pd_enable = 1;
+ tc[port].timeout = 0;
+ }
+
+ if (tc[port].pd_enable == 0)
+ return;
+
+ /*
+ * Handle Hard Reset from Policy Engine
+ */
+ if (TC_CHK_FLAG(port, TC_FLAGS_HARD_RESET)) {
+ if (get_time().val < tc[port].timeout)
+ return;
+
+ switch (tc[port].ps_reset_state) {
+ case PS_STATE0:
+ /* Remove VBUS */
+ tc_src_power_off(port);
+
+ /* Set role to DFP */
+ tc_set_data_role(port, PD_ROLE_DFP);
+
+ /* Turn off VCONN */
+ if (IS_ENABLED(CONFIG_USBC_VCONN))
+ set_vconn(port, 0);
+
+ /* Remove Rp */
+ tcpm_set_cc(port, TYPEC_CC_OPEN);
+
+ tc[port].ps_reset_state = PS_STATE1;
+ tc[port].timeout = get_time().val + PD_T_SRC_RECOVER;
+ return;
+ case PS_STATE1:
+ /* Enable VBUS */
+ pd_set_power_supply_ready(port);
+
+ /* Apply Rp */
+ tcpm_set_cc(port, TYPEC_CC_RP);
+
+ tc[port].ps_reset_state = PS_STATE2;
+ tc[port].timeout = get_time().val +
+ PD_POWER_SUPPLY_TURN_ON_DELAY;
+ return;
+ case PS_STATE2:
+ /* Turn on VCONN */
+ if (IS_ENABLED(CONFIG_USBC_VCONN))
+ set_vconn(port, 1);
+
+ tc[port].ps_reset_state = PS_STATE3;
+ return;
+ case PS_STATE3:
+ /* Tell Policy Engine Hard Reset is complete */
+ pe_ps_reset_complete(port);
+
+ TC_CLR_FLAG(port, TC_FLAGS_HARD_RESET);
+ tc[port].ps_reset_state = PS_STATE0;
+ return;
+ }
+ }
+#endif
+
/* Check for connection */
tcpm_get_cc(port, &cc1, &cc2);
@@ -1403,10 +2444,86 @@ static void tc_attached_src_run(const int port)
* AttachWait.SNK shall enter TryWait.SNK for a Sink detach from
* Attached.SRC.
*/
- if (tc[port].cc_state == PD_CC_NO_UFP) {
+ if (tc[port].cc_state == PD_CC_NO_UFP &&
+ !TC_CHK_FLAG(port, TC_FLAGS_PR_SWAP_IN_PROGRESS) &&
+ !TC_CHK_FLAG(port, TC_FLAGS_DISC_IDENT_IN_PROGRESS)) {
+
+ if (IS_ENABLED(CONFIG_USB_PE_SM))
+ if (IS_ENABLED(CONFIG_USB_PD_ALT_MODE_DFP))
+ pd_dfp_exit_mode(port, 0, 0);
+
+ tc[port].pd_enable = 0;
set_state_tc(port, IS_ENABLED(CONFIG_USB_PD_TRY_SRC) ?
TC_TRY_WAIT_SNK : TC_UNATTACHED_SNK);
}
+
+#ifdef CONFIG_USB_PE_SM
+ /*
+ * PD swap commands
+ */
+ if (tc[port].pd_enable && prl_is_running(port)) {
+ /*
+ * Power Role Swap Request
+ */
+ if (TC_CHK_FLAG(port, TC_FLAGS_DO_PR_SWAP)) {
+ TC_CLR_FLAG(port, TC_FLAGS_DO_PR_SWAP);
+ return set_state_tc(port, TC_ATTACHED_SNK);
+ }
+
+ /*
+ * Data Role Swap Request
+ */
+ if (TC_CHK_FLAG(port, TC_FLAGS_REQUEST_DR_SWAP)) {
+ TC_CLR_FLAG(port, TC_FLAGS_REQUEST_DR_SWAP);
+
+ /* Perform Data Role Swap */
+ tc_set_data_role(port, !tc[port].data_role);
+ }
+
+ if (IS_ENABLED(CONFIG_USBC_VCONN)) {
+ /*
+ * VCONN Swap Request
+ */
+ if (TC_CHK_FLAG(port, TC_FLAGS_REQUEST_VC_SWAP_ON)) {
+ TC_CLR_FLAG(port, TC_FLAGS_REQUEST_VC_SWAP_ON);
+ set_vconn(port, 1);
+ pe_vconn_swap_complete(port);
+ } else if (TC_CHK_FLAG(port,
+ TC_FLAGS_REQUEST_VC_SWAP_OFF)) {
+ TC_CLR_FLAG(port, TC_FLAGS_REQUEST_VC_SWAP_OFF);
+ set_vconn(port, 0);
+ pe_vconn_swap_complete(port);
+ }
+ }
+
+ /*
+ * A DRP that supports Charge-Through VCONN-Powered USB Devices
+ * shall transition to CTUnattached.SNK if the connected device
+ * identifies itself as a Charge-Through VCONN-Powered USB
+ * Device in its Discover Identity Command response.
+ */
+
+ /*
+ * A DRP that supports Charge-Through VCONN-Powered USB Devices
+ * shall transition to CTUnattached.SNK if the connected device
+ * identifies itself as a Charge-Through VCONN-Powered USB
+ * Device in its Discover Identity Command response.
+ *
+ * If it detects that it is connected to a VCONN-Powered USB
+ * Device, the port may remove VBUS and discharge it to
+ * vSafe0V, while continuing to remain in this state with VCONN
+ * applied.
+ */
+ if (TC_CHK_FLAG(port, TC_FLAGS_CTVPD_DETECTED)) {
+ TC_CLR_FLAG(port, TC_FLAGS_CTVPD_DETECTED);
+
+ /* Clear TC_FLAGS_DISC_IDENT_IN_PROGRESS */
+ TC_CLR_FLAG(port, TC_FLAGS_DISC_IDENT_IN_PROGRESS);
+
+ set_state_tc(port, TC_CT_UNATTACHED_SNK);
+ }
+ }
+#endif
}
static void tc_attached_src_exit(const int port)
@@ -1537,6 +2654,150 @@ static void tc_try_wait_snk_run(const int port)
#endif
+#if defined(CONFIG_USB_PE_SM)
+/*
+ * CTUnattached.SNK
+ */
+static void tc_ct_unattached_snk_entry(int port)
+{
+ print_current_state(port);
+
+ /*
+ * Both CC1 and CC2 pins shall be independently terminated to
+ * ground through Rd.
+ */
+ tcpm_select_rp_value(port, CONFIG_USB_PD_PULLUP);
+ tcpm_set_cc(port, TYPEC_CC_RD);
+ tc[port].cc_state = PD_CC_UNSET;
+
+ /* Set power role to sink */
+ tc_set_power_role(port, PD_ROLE_SINK);
+ tcpm_set_msg_header(port, tc[port].power_role, tc[port].data_role);
+
+ /*
+ * The policy engine is in the disabled state. Disable PD and
+ * re-enable it
+ */
+ tc[port].pd_enable = 0;
+
+ tc[port].timeout = get_time().val + PD_POWER_SUPPLY_TURN_ON_DELAY;
+}
+
+static void tc_ct_unattached_snk_run(int port)
+{
+ enum tcpc_cc_voltage_status cc1;
+ enum tcpc_cc_voltage_status cc2;
+ enum pd_cc_states new_cc_state;
+
+ if (tc[port].timeout > 0 && get_time().val > tc[port].timeout) {
+ tc[port].pd_enable = 1;
+ tc[port].timeout = 0;
+ }
+
+ if (tc[port].timeout > 0)
+ return;
+
+ /* Wait until Protocol Layer is ready */
+ if (!prl_is_running(port))
+ return;
+
+ /*
+ * Hard Reset is sent when the PE layer is disabled due to a
+ * CTVPD connection.
+ */
+ if (TC_CHK_FLAG(port, TC_FLAGS_HARD_RESET)) {
+ TC_CLR_FLAG(port, TC_FLAGS_HARD_RESET);
+ /* Nothing to do. Just signal hard reset completion */
+ pe_ps_reset_complete(port);
+ }
+
+ /* Check for connection */
+ tcpm_get_cc(port, &cc1, &cc2);
+
+ /* We only care about CCs being open */
+ if (cc1 == TYPEC_CC_VOLT_OPEN && cc2 == TYPEC_CC_VOLT_OPEN)
+ new_cc_state = PD_CC_NONE;
+ else
+ new_cc_state = PD_CC_UNSET;
+
+ /* Debounce the cc state */
+ if (new_cc_state != tc[port].cc_state) {
+ tc[port].cc_state = new_cc_state;
+ tc[port].cc_debounce = get_time().val + PD_T_VPDDETACH;
+ }
+
+ /*
+ * The port shall transition to Unattached.SNK if the state of
+ * the CC pin is SNK.Open for tVPDDetach after VBUS is vSafe0V.
+ */
+ if (get_time().val > tc[port].cc_debounce) {
+ if (new_cc_state == PD_CC_NONE && !pd_is_vbus_present(port)) {
+ if (IS_ENABLED(CONFIG_USB_PD_ALT_MODE_DFP))
+ pd_dfp_exit_mode(port, 0, 0);
+
+ set_state_tc(port, TC_UNATTACHED_SNK);
+ return;
+ }
+ }
+
+ /*
+ * The port shall transition to CTAttached.SNK when VBUS is detected.
+ */
+ if (pd_is_vbus_present(port))
+ set_state_tc(port, TC_CT_ATTACHED_SNK);
+}
+
+/**
+ * CTAttached.SNK
+ */
+static void tc_ct_attached_snk_entry(int port)
+{
+ print_current_state(port);
+
+ /* The port shall reject a VCONN swap request. */
+ TC_SET_FLAG(port, TC_FLAGS_REJECT_VCONN_SWAP);
+}
+
+static void tc_ct_attached_snk_run(int port)
+{
+ /*
+ * Hard Reset is sent when the PE layer is disabled due to a
+ * CTVPD connection.
+ */
+ if (TC_CHK_FLAG(port, TC_FLAGS_HARD_RESET)) {
+ TC_CLR_FLAG(port, TC_FLAGS_HARD_RESET);
+ /* Nothing to do. Just signal hard reset completion */
+ pe_ps_reset_complete(port);
+ }
+
+ /*
+ * A port that is not in the process of a USB PD Hard Reset shall
+ * transition to CTUnattached.SNK within tSinkDisconnect when VBUS
+ * falls below vSinkDisconnect
+ */
+ if (!pd_is_vbus_present(port)) {
+ set_state_tc(port, TC_CT_UNATTACHED_SNK);
+ return;
+ }
+
+ /*
+ * The port shall operate in one of the Sink Power Sub-States
+ * and remain within the Sink Power Sub-States, until either VBUS is
+ * removed or a USB PD contract is established with the source.
+ */
+ if (!pe_is_explicit_contract(port))
+ sink_power_sub_states(port);
+}
+
+static void tc_ct_attached_snk_exit(int port)
+{
+ /* Stop drawing power */
+ sink_stop_drawing_current(port);
+
+ TC_CLR_FLAG(port, TC_FLAGS_REJECT_VCONN_SWAP);
+}
+#endif /* CONFIG_USB_PE_SM */
+
/**
* Super State CC_RD
*/
@@ -1557,6 +2818,7 @@ static void tc_cc_rd_entry(const int port)
tcpm_set_msg_header(port, tc[port].power_role, tc[port].data_role);
}
+
/**
* Super State CC_RP
*/
@@ -1705,6 +2967,17 @@ static const struct usb_state tc_states[] = {
.parent = &tc_states[TC_CC_RD],
},
#endif /* CONFIG_USB_PD_TRY_SRC */
+#ifdef CONFIG_USB_PE_SM
+ [TC_CT_UNATTACHED_SNK] = {
+ .entry = tc_ct_unattached_snk_entry,
+ .run = tc_ct_unattached_snk_run,
+ },
+ [TC_CT_ATTACHED_SNK] = {
+ .entry = tc_ct_attached_snk_entry,
+ .run = tc_ct_attached_snk_run,
+ .exit = tc_ct_attached_snk_exit,
+ },
+#endif
};
#ifdef TEST_BUILD
diff --git a/include/config.h b/include/config.h
index 8b4e67420b..81de017ae3 100644
--- a/include/config.h
+++ b/include/config.h
@@ -4918,4 +4918,22 @@
#endif
#endif /* CONFIG_ACCEL_FIFO */
+/*
+ * If USB PD Discharge is enabled, verify that CONFIG_USB_PD_DISCHARGE_GPIO
+ * and CONFIG_USB_PD_PORT_COUNT, CONFIG_USB_PD_DISCHARGE_TCPC, or
+ * CONFIG_USB_PD_DISCHARGE_PPC is defined.
+ */
+#ifdef CONFIG_USB_PD_DISCHARGE
+#ifdef CONFIG_USB_PD_DISCHARGE_GPIO
+#if !defined(CONFIG_USB_PD_PORT_COUNT)
+#error "PD discharge port not defined"
+#endif
+#else
+#if !defined(CONFIG_USB_PD_DISCHARGE_TCPC) && \
+ !defined(CONFIG_USB_PD_DISCHARGE_PPC)
+#error "PD discharge implementation not defined"
+#endif
+#endif /* CONFIG_USB_PD_DISCHARGE_GPIO */
+#endif /* CONFIG_USB_PD_DISCHARGE */
+
#endif /* __CROS_EC_CONFIG_H */
diff --git a/include/usb_common.h b/include/usb_common.h
index 1fbaa4bd50..5006a76a14 100644
--- a/include/usb_common.h
+++ b/include/usb_common.h
@@ -34,4 +34,42 @@ typec_current_t usb_get_typec_current_limit(enum pd_cc_polarity_type polarity,
enum pd_cc_polarity_type get_snk_polarity(enum tcpc_cc_voltage_status cc1,
enum tcpc_cc_voltage_status cc2);
+/**
+ * Find PDO index that offers the most amount of power and stays within
+ * max_mv voltage.
+ *
+ * @param src_cap_cnt
+ * @param src_caps
+ * @param max_mv maximum voltage (or -1 if no limit)
+ * @param pdo raw pdo corresponding to index, or index 0 on error (output)
+ * @return index of PDO within source cap packet
+ */
+int pd_find_pdo_index(uint32_t src_cap_cnt, const uint32_t * const src_caps,
+ int max_mv, uint32_t *selected_pdo);
+
+/**
+ * Extract power information out of a Power Data Object (PDO)
+ *
+ * @param pdo raw pdo to extract
+ * @param ma current of the PDO (output)
+ * @param mv voltage of the PDO (output)
+ */
+void pd_extract_pdo_power(uint32_t pdo, uint32_t *ma, uint32_t *mv);
+
+/**
+ * Decide which PDO to choose from the source capabilities.
+ *
+ * @param src_cap_cnt
+ * @param src_caps
+ * @param rdo requested Request Data Object.
+ * @param ma selected current limit (stored on success)
+ * @param mv selected supply voltage (stored on success)
+ * @param req_type request type
+ * @param max_request_mv max voltage a sink can request before getting
+ * source caps
+ */
+void pd_build_request(uint32_t src_cap_cnt, const uint32_t * const src_caps,
+ int32_t vpd_vdo, uint32_t *rdo, uint32_t *ma, uint32_t *mv,
+ enum pd_request_type req_type, uint32_t max_request_mv);
+
#endif /* __CROS_EC_USB_COMMON_H */
diff --git a/include/usb_pd.h b/include/usb_pd.h
index b0d52737f4..737490f6cc 100644
--- a/include/usb_pd.h
+++ b/include/usb_pd.h
@@ -160,6 +160,18 @@ enum pd_rx_errors {
#define BDO(mode, cnt) ((mode) | ((cnt) & 0xFFFF))
+#define BIST_MODE(n) ((n) >> 28)
+#define BIST_ERROR_COUNTER(n) ((n) & 0xffff)
+#define BIST_RECEIVER_MODE 0
+#define BIST_TRANSMIT_MODE 1
+#define BIST_RETURNED_COUNTER 2
+#define BIST_CARRIER_MODE_0 3
+#define BIST_CARRIER_MODE_1 4
+#define BIST_CARRIER_MODE_2 5
+#define BIST_CARRIER_MODE_3 6
+#define BIST_EYE_PATTERN 7
+#define BIST_TEST_DATA 8
+
#define SVID_DISCOVERY_MAX 16
/* Timers */
@@ -190,6 +202,7 @@ enum pd_rx_errors {
#define PD_T_NO_RESPONSE (5500*MSEC) /* between 4.5s and 5.5s */
#define PD_T_BIST_TRANSMIT (50*MSEC) /* 50ms (used for task_wait arg) */
#define PD_T_BIST_RECEIVE (60*MSEC) /* 60ms (max time to process bist) */
+#define PD_T_BIST_CONT_MODE (60*MSEC) /* 30ms to 60ms */
#define PD_T_VCONN_SOURCE_ON (100*MSEC) /* 100ms */
#define PD_T_DRP_TRY (125*MSEC) /* btween 75 and 150ms(monitor Vbus) */
#define PD_T_TRY_TIMEOUT (550*MSEC) /* between 550ms and 1100ms */
@@ -201,6 +214,8 @@ enum pd_rx_errors {
#define PD_T_SWAP_SOURCE_START (25*MSEC) /* Min of 20ms */
#define PD_T_RP_VALUE_CHANGE (20*MSEC) /* 20ms */
#define PD_T_SRC_DISCONNECT (15*MSEC) /* 15ms */
+#define PD_T_VCONN_STABLE (50*MSEC) /* 50ms */
+#define PD_T_DISCOVER_IDENTITY (45*MSEC) /* between 40ms and 50ms */
/* number of edges and time window to detect CC line is not idle */
#define PD_RX_TRANSITION_COUNT 3
@@ -308,7 +323,7 @@ struct pd_policy {
* VDO : Vendor Defined Message Object
* VDM object is minimum of VDM header + 6 additional data objects.
*/
-
+#define VDO_HDR_SIZE 1
#define VDO_MAX_SIZE 7
#define VDM_VER10 0
@@ -784,6 +799,11 @@ struct pd_cable {
#define PD_VDO_SVID_SVID0(vdo) ((vdo) >> 16)
#define PD_VDO_SVID_SVID1(vdo) ((vdo) & 0xffff)
+#define VPD_VDO_MAX_VBUS(vdo) (((vdo) >> 15) & 0x3)
+#define VPD_VDO_VBUS_IMP(vdo) (((vdo) >> 7) & 0x3f)
+#define VPD_VDO_GND_IMP(vdo) (((vdo) >> 1) & 0x3f)
+#define VPD_VDO_CTS(vdo) ((vdo) & 1)
+
/*
* Google modes capabilities
* <31:8> : reserved
@@ -936,6 +956,8 @@ struct pd_cable {
/* Other Vendor IDs */
#define USB_VID_APPLE 0x05ac
+#define USB_PID1_APPLE 0x1012
+#define USB_PID2_APPLE 0x1013
/* Timeout for message receive in microseconds */
#define USB_PD_RX_TMOUT_US 1800
@@ -1225,15 +1247,22 @@ enum pd_rev_type {
};
/* Power role */
-#define PD_ROLE_SINK 0
-#define PD_ROLE_SOURCE 1
+enum pd_power_role {
+ PD_ROLE_SINK,
+ PD_ROLE_SOURCE
+};
+
+/* Data role */
+enum pd_data_role {
+ PD_ROLE_UFP,
+ PD_ROLE_DFP,
+ PD_ROLE_DISCONNECTED
+};
+
/* Cable plug */
#define PD_PLUG_DFP_UFP 0
#define PD_PLUG_CABLE_VPD 1
-/* Data role */
-#define PD_ROLE_UFP 0
-#define PD_ROLE_DFP 1
-#define PD_ROLE_DISCONNECTED 2
+
/* Vconn role */
#define PD_ROLE_VCONN_OFF 0
#define PD_ROLE_VCONN_ON 1
@@ -1285,6 +1314,7 @@ enum pd_rev_type {
*/
#define PD_HEADER_TYPE(header) ((header) & 0x1F)
#define PD_HEADER_ID(header) (((header) >> 9) & 7)
+#define PD_HEADER_PROLE(header) (((header) >> 8) & 1)
#define PD_HEADER_REV(header) (((header) >> 6) & 3)
#define PD_HEADER_DROLE(header) (((header) >> 5) & 1)
@@ -1369,17 +1399,6 @@ int pd_get_vdo_ver(int port);
#define pd_get_rev(n) PD_REV20
#define pd_get_vdo_ver(n) VDM_VER10
#endif
-/**
- * Decide which PDO to choose from the source capabilities.
- *
- * @param port USB-C port number
- * @param rdo requested Request Data Object.
- * @param ma selected current limit (stored on success)
- * @param mv selected supply voltage (stored on success)
- * @param req_type request type
- */
-void pd_build_request(int port, uint32_t *rdo, uint32_t *ma, uint32_t *mv,
- enum pd_request_type req_type);
/**
* Check if max voltage request is allowed (only used if
@@ -1431,26 +1450,6 @@ void pd_prevent_low_power_mode(int port, int prevent);
void pd_process_source_cap(int port, int cnt, uint32_t *src_caps);
/**
- * Find PDO index that offers the most amount of power and stays within
- * max_mv voltage.
- *
- * @param port USB-C port number
- * @param max_mv maximum voltage (or -1 if no limit)
- * @param pdo raw pdo corresponding to index, or index 0 on error (output)
- * @return index of PDO within source cap packet
- */
-int pd_find_pdo_index(int port, int max_mv, uint32_t *pdo);
-
-/**
- * Extract power information out of a Power Data Object (PDO)
- *
- * @param pdo raw pdo to extract
- * @param ma current of the PDO (output)
- * @param mv voltage of the PDO (output)
- */
-void pd_extract_pdo_power(uint32_t pdo, uint32_t *ma, uint32_t *mv);
-
-/**
* Reduce the sink power consumption to a minimum value.
*
* @param port USB-C port number
@@ -2209,6 +2208,25 @@ int pd_ts_dts_plugged(int port);
*/
int pd_capable(int port);
+/**
+ * Returns the source caps list
+ *
+ * @param port USB-C port number
+ */
+const uint32_t * const pd_get_src_caps(int port);
+
+/**
+ * Returns the number of source caps
+ *
+ * @param port USB-C port number
+ */
+uint8_t pd_get_src_cap_cnt(int port);
+
+/**
+ * Returns the maximum request voltage before receiving a source caps
+ *
+ */
+uint32_t get_max_request_mv(void);
/**
* Return true if partner port is capable of communication over USB data
diff --git a/include/usb_pe_sm.h b/include/usb_pe_sm.h
index 1243484878..21226cbe9c 100644
--- a/include/usb_pe_sm.h
+++ b/include/usb_pe_sm.h
@@ -10,6 +10,7 @@
#include "usb_sm.h"
+/* Policy Engine Receive and Transmit Errors */
enum pe_error {
ERR_RCH_CHUNKED,
ERR_RCH_MSG_REC,
@@ -17,6 +18,25 @@ enum pe_error {
ERR_TCH_XMIT,
};
+/*
+ * Device Policy Manager Requests.
+ * NOTE: These are usually set by host commands from the AP.
+ */
+enum pe_dpm_request {
+ DPM_REQUEST_DR_SWAP = BIT(0),
+ DPM_REQUEST_PR_SWAP = BIT(1),
+ DPM_REQUEST_VCONN_SWAP = BIT(2),
+ DPM_REQUEST_GOTO_MIN = BIT(3),
+ DPM_REQUEST_SRC_CAP_CHANGE = BIT(4),
+ DPM_REQUEST_GET_SNK_CAPS = BIT(5),
+ DPM_REQUEST_SEND_PING = BIT(6),
+ DPM_REQUEST_SOURCE_CAP = BIT(7),
+ DPM_REQUEST_NEW_POWER_LEVEL = BIT(8),
+ DPM_REQUEST_DISCOVER_IDENTITY = BIT(9),
+ DPM_REQUEST_EXIT_DP_MODE = BIT(10),
+ DPM_REQUEST_SVDM = BIT(11),
+};
+
/**
* Initialize the Policy Engine State Machine
*
@@ -44,17 +64,17 @@ void pe_message_sent(int port);
* Informs the Policy Engine of an error.
*
* @param port USB-C port number
- * @parm e error
+ * @param e error
*/
void pe_report_error(int port, enum pe_error e);
/**
- * Informs the Policy Engine that a message has been received
+ * Called by the Protocol Layer to informs the Policy Engine
+ * that a message has been received.
*
* @param port USB-C port number
- * @parm e error
*/
-void pe_pass_up_message(int port);
+void pe_message_received(int port);
/**
* Informs the Policy Engine that a hard reset was received.
@@ -77,5 +97,71 @@ void pe_got_soft_reset(int port);
*/
void pe_hard_reset_sent(int port);
+/**
+ * Exit DP mode
+ *
+ * @param port USB-C port number
+ */
+void pe_exit_dp_mode(int port);
+
+/**
+ * Get the id of the current Policy Engine state
+ *
+ * @param port USB-C port number
+ */
+enum pe_states pe_get_state_id(int port);
+
+/**
+ * Indicates if the Policy Engine State Machine is running.
+ *
+ * @param port USB-C port number
+ * @return 1 if policy engine state machine is running, else 0
+ */
+int pe_is_running(int port);
+
+/**
+ * Informs the Policy Engine that the Power Supply is at it's default state
+ *
+ * @param port USB-C port number
+ */
+void pe_ps_reset_complete(int port);
+
+/**
+ * Informs the Policy Engine that a VCONN Swap has completed
+ *
+ * @param port USB-C port number
+ */
+void pe_vconn_swap_complete(int port);
+
+/**
+ * Instructs the Policy Engine to send a Vendor Defined Message
+ *
+ * @param port USB-C port number
+ * @param vid Vendor ID
+ * @param cmd Vendor Defined Command
+ * @param data Vendor Defined Data
+ * @param count Size of Vendor Defined Data in 32-bit objects
+ */
+void pe_send_vdm(int port, uint32_t vid, int cmd, const uint32_t *data,
+ int count);
+
+/**
+ * Indicates if an explicit contract is in place
+ *
+ * @param port USB-C port number
+ * @return 1 if an explicit contract is in place, else 0
+ */
+int pe_is_explicit_contract(int port);
+
+/**
+ * Instruct the Policy Engine to perform a Device Policy Manager Request
+ * This function is called from the Device Policy Manager and only has effect
+ * if the current Policy Engine state is Src.Ready or Snk.Ready.
+ *
+ * @param port USB-C port number
+ * @param req Device Policy Manager Request
+ */
+void pe_dpm_request(int port, enum pe_dpm_request req);
+
#endif /* __CROS_EC_USB_PE_H */
diff --git a/include/usb_prl_sm.h b/include/usb_prl_sm.h
index 4116bbe1a3..32b3ba8d06 100644
--- a/include/usb_prl_sm.h
+++ b/include/usb_prl_sm.h
@@ -18,6 +18,21 @@
#define N_RETRY_COUNT 2
/**
+ * Returns true if Protocol Layer State Machine is in run mode
+ *
+ * @param port USB-C port number
+ * @return 1 if state machine is running, else 0
+ */
+int prl_is_running(int port);
+
+/**
+ * Resets the Protocol Layer State Machine
+ *
+ * @param port USB-C port number
+ */
+void prl_reset(int port);
+
+/**
* Runs the Protocol Layer State Machine
*
* @param port USB-C port number
diff --git a/include/usb_tc_sm.h b/include/usb_tc_sm.h
index c46f449e44..38fbe270a0 100644
--- a/include/usb_tc_sm.h
+++ b/include/usb_tc_sm.h
@@ -32,6 +32,22 @@
#define TYPE_C_AUDIO_ACC_CURRENT 500 /* mA */
/**
+ * Returns true if TypeC State machine is in attached source state.
+ *
+ * @param port USB-C port number
+ * @return 1 if in attached source state, else 0
+ */
+int tc_is_attached_src(int port);
+
+/**
+ * Returns true if TypeC State machine is in attached sink state.
+ *
+ * @param port USB-C port number
+ * @return 1 if in attached source state, else 0
+ */
+int tc_is_attached_snk(int port);
+
+/**
* Get current data role
*
* @param port USB-C port number
@@ -73,6 +89,14 @@ uint8_t tc_get_pd_enabled(int port);
void tc_set_power_role(int port, int role);
/**
+ * Set the data role
+ *
+ * @param port USB-C port number
+ * @param role data role
+ */
+void tc_set_data_role(int port, int role);
+
+/**
* Sets the USB Mux depending on current data role
* Mux is connected except when:
* 1) PD is disconnected
@@ -91,6 +115,51 @@ void set_usb_mux_with_current_data_role(int port);
uint64_t tc_get_timeout(int port);
/**
+ * Policy Engine informs the Type-C state machine if the port partner
+ * is dualrole power.
+ *
+ * @param port USB_C port number
+ * @param en 1 if port partner is dualrole power, else 0
+ */
+void tc_partner_dr_power(int port, int en);
+
+/**
+ * Policy Engine informs the Type-C state machine if the port partner
+ * has external power
+ *
+ * @param port USB_C port number
+ * @param en 1 if port partner has external power, else 0
+ */
+void tc_partner_extpower(int port, int en);
+
+/**
+ * Policy Engine informs the Type-C state machine if the port partner
+ * is USB comms.
+ *
+ * @param port USB_C port number
+ * @param en 1 if port partner is USB comms, else 0
+ */
+void tc_partner_usb_comm(int port, int en);
+
+/**
+ * Policy Engine informs the Type-C state machine if the port partner
+ * is dualrole data.
+ *
+ * @param port USB_C port number
+ * @param en 1 if port partner is dualrole data, else 0
+ */
+void tc_partner_dr_data(int port, int en);
+
+/**
+ * Policy Engine informs the Type-C state machine if the port partner
+ * had a previous pd connection
+ *
+ * @param port USB_C port number
+ * @param en 1 if port partner had a previous pd connection, else 0
+ */
+void tc_pd_connection(int port, int en);
+
+/**
* Set loop timeout value
*
* @param port USB-C port number
@@ -99,6 +168,120 @@ uint64_t tc_get_timeout(int port);
void tc_set_timeout(int port, uint64_t timeout);
/**
+ * Initiates a Power Role Swap from Attached.SRC to Attached.SNK. This function
+ * has no effect if the current Type-C state is not Attached.SRC.
+ *
+ * @param port USB_C port number
+ */
+void tc_prs_src_snk_assert_rd(int port);
+
+/**
+ * Initiates a Power Role Swap from Attached.SNK to Attached.SRC. This function
+ * has no effect if the current Type-C state is not Attached.SNK.
+ *
+ * @param port USB_C port number
+ */
+void tc_prs_snk_src_assert_rp(int port);
+
+/**
+ * Informs the Type-C State Machine that a Power Role Swap is complete.
+ * This function is called from the Policy Engine.
+ *
+ * @param port USB_C port number
+ */
+void tc_pr_swap_complete(int port);
+
+/**
+ * Informs the Type-C State Machine that a Discover Identity is in progress.
+ * This function is called from the Policy Engine.
+ *
+ * @param port USB_C port number
+ */
+void tc_disc_ident_in_progress(int port);
+
+/**
+ * Informs the Type-C State Machine that a Discover Identity is complete.
+ * This function is called from the Policy Engine.
+ *
+ * @param port USB_C port number
+ */
+void tc_disc_ident_complete(int port);
+
+/**
+ * Instructs the Attached.SNK to stop drawing power. This function is called
+ * from the Policy Engine and only has effect if the current Type-C state
+ * Attached.SNK.
+ *
+ * @param port USB_C port number
+ */
+void tc_snk_power_off(int port);
+
+/**
+ * Instructs the Attached.SRC to stop supplying power. The function has
+ * no effect if the current Type-C state is not Attached.SRC.
+ *
+ * @param port USB_C port number
+ */
+void tc_src_power_off(int port);
+
+/**
+ * Instructs the Attached.SRC to start supplying power. The function has
+ * no effect if the current Type-C state is not Attached.SRC.
+ *
+ * @param port USB_C port number
+ */
+int tc_src_power_on(int port);
+
+/**
+ * Tests if a VCONN Swap is possible.
+ *
+ * @param port USB_C port number
+ * @return 1 if vconn swap is possible, else 0
+ */
+int tc_check_vconn_swap(int port);
+
+#ifdef CONFIG_USBC_VCONN
+/**
+ * Checks if VCONN is being sourced.
+ *
+ * @param port USB_C port number
+ * @return 1 if vconn is being sourced, 0 if it's not, and -1 if
+ * can't answer at this time. -1 is returned if the current
+ * Type-C state is not Attached.SRC or Attached.SNK.
+ */
+int tc_is_vconn_src(int port);
+
+/**
+ * Instructs the Attached.SRC or Attached.SNK to start sourcing VCONN.
+ * This function is called from the Policy Engine and only has effect
+ * if the current Type-C state Attached.SRC or Attached.SNK.
+ *
+ * @param port USB_C port number
+ */
+void pd_request_vconn_swap_on(int port);
+
+/**
+ * Instructs the Attached.SRC or Attached.SNK to stop sourcing VCONN.
+ * This function is called from the Policy Engine and only has effect
+ * if the current Type-C state Attached.SRC or Attached.SNK.
+ *
+ * @param port USB_C port number
+ */
+void pd_request_vconn_swap_off(int port);
+#endif
+
+
+/**
+ * Returns the polarity of a Sink.
+ *
+ * @param cc1 value of CC1 set by tcpm_get_cc
+ * @param cc2 value of CC2 set by tcpm_get_cc
+ * @return 0 if cc1 is connected, else 1 for cc2
+ */
+enum pd_cc_polarity_type get_snk_polarity(enum tcpc_cc_voltage_status cc1,
+ enum tcpc_cc_voltage_status cc2);
+
+/**
* Restarts the TCPC
*
* @param port USB-C port number
@@ -138,7 +321,29 @@ void tc_event_check(int port, int evt);
*/
void tc_run(const int port);
+/**
+ * Attempt to activate VCONN
+ *
+ * @param port USB-C port number
+ */
+void tc_vconn_on(int port);
+
+/**
+ * Start error recovery
+ *
+ * @param port USB-C port number
+ */
+void tc_start_error_recovery(int port);
+
+/**
+ * Hard Reset the TypeC port
+ *
+ * @param port USB-C port number
+ */
+void tc_hard_reset(int port);
+
#ifdef CONFIG_USB_TYPEC_CTVPD
+
/**
* Resets the charge-through support timer. This can be
* called many times but the support timer will only
@@ -148,6 +353,12 @@ void tc_run(const int port);
*/
void tc_reset_support_timer(int port);
+#else
+
+/**
+ *
+ */
+void tc_ctvpd_detected(int port);
#endif /* CONFIG_USB_TYPEC_CTVPD */
#endif /* __CROS_EC_USB_TC_H */
diff --git a/test/usb_prl.c b/test/usb_prl.c
index eab20be46f..0bbcb1412d 100644
--- a/test/usb_prl.c
+++ b/test/usb_prl.c
@@ -102,7 +102,7 @@ static struct pd_prl {
int mock_pe_error;
int mock_pe_hard_reset_sent;
int mock_pe_got_hard_reset;
- int mock_pe_pass_up_message;
+ int mock_pe_message_received;
int mock_got_soft_reset;
} pd_port[CONFIG_USB_PD_PORT_COUNT];
@@ -114,6 +114,7 @@ static void init_port(int port, int rev)
pd_port[port].data_role = PD_ROLE_UFP;
pd_port[port].msg_tx_id = 0;
pd_port[port].msg_rx_id = 0;
+
tcpm_init(port);
tcpm_set_polarity(port, 0);
tcpm_set_rx_enable(port, 0);
@@ -224,7 +225,7 @@ static int verify_data_reception(int port, uint16_t header, int len)
if (pd_port[port].mock_pe_error >= 0)
return 0;
- if (!pd_port[port].mock_pe_pass_up_message)
+ if (!pd_port[port].mock_pe_message_received)
return 0;
if (emsg[port].header != header)
@@ -255,7 +256,7 @@ static int verify_chunk_data_reception(int port, uint16_t header, int len)
if (pd_port[port].mock_got_soft_reset)
return 0;
- if (!pd_port[port].mock_pe_pass_up_message)
+ if (!pd_port[port].mock_pe_message_received)
return 0;
if (pd_port[port].mock_pe_error >= 0)
@@ -282,7 +283,7 @@ static int simulate_receive_data(int port, enum pd_data_msg_type msg_type,
nw, pd_port[port].rev, 0);
pd_port[port].mock_pe_error = -1;
- pd_port[port].mock_pe_pass_up_message = 0;
+ pd_port[port].mock_pe_message_received = 0;
emsg[port].header = 0;
emsg[port].len = 0;
memset(emsg[port].buf, 0, 260);
@@ -322,7 +323,7 @@ static int simulate_receive_extended_data(int port,
int req_timeout;
pd_port[port].mock_pe_error = -1;
- pd_port[port].mock_pe_pass_up_message = 0;
+ pd_port[port].mock_pe_message_received = 0;
emsg[port].header = 0;
emsg[port].len = 0;
memset(emsg[port].buf, 0, 260);
@@ -354,7 +355,7 @@ static int simulate_receive_extended_data(int port,
if (pd_port[port].mock_pe_error >= 0)
return 0;
- if (pd_port[port].mock_pe_pass_up_message)
+ if (pd_port[port].mock_pe_message_received)
return 0;
if (emsg[port].len != 0)
@@ -699,9 +700,9 @@ void pe_got_hard_reset(int port)
pd_port[port].mock_pe_got_hard_reset = 1;
}
-void pe_pass_up_message(int port)
+void pe_message_received(int port)
{
- pd_port[port].mock_pe_pass_up_message = 1;
+ pd_port[port].mock_pe_message_received = 1;
}
void pe_message_sent(int port)
@@ -719,12 +720,14 @@ void pe_got_soft_reset(int port)
pd_port[port].mock_got_soft_reset = 1;
}
-static int test_initial_states(void)
+static int test_prl_reset(void)
{
int port = PORT0;
enable_prl(port, 1);
+ prl_reset(port);
+
TEST_ASSERT(prl_tx_get_state(port) ==
PRL_TX_WAIT_FOR_MESSAGE_REQUEST);
TEST_ASSERT(rch_get_state(port) ==
@@ -733,6 +736,7 @@ static int test_initial_states(void)
TCH_WAIT_FOR_MESSAGE_REQUEST_FROM_PE);
TEST_ASSERT(prl_hr_get_state(port) ==
PRL_HR_WAIT_FOR_REQUEST);
+ enable_prl(port, 0);
return EC_SUCCESS;
}
@@ -1047,7 +1051,7 @@ static int test_receive_soft_reset_msg(void)
pd_port[port].mock_got_soft_reset = 0;
pd_port[port].mock_pe_error = -1;
- pd_port[port].mock_pe_pass_up_message = 0;
+ pd_port[port].mock_pe_message_received = 0;
TEST_ASSERT(simulate_receive_ctrl_msg(port, PD_CTRL_SOFT_RESET));
@@ -1058,7 +1062,7 @@ static int test_receive_soft_reset_msg(void)
TEST_ASSERT(pd_port[port].mock_got_soft_reset);
TEST_ASSERT(pd_port[port].mock_pe_error < 0);
- TEST_ASSERT(pd_port[port].mock_pe_pass_up_message);
+ TEST_ASSERT(pd_port[port].mock_pe_message_received);
TEST_ASSERT(expected_header == emsg[port].header);
TEST_ASSERT(emsg[port].len == 0);
@@ -1090,7 +1094,7 @@ static int test_receive_control_msg(void)
pd_port[port].mock_got_soft_reset = 0;
pd_port[port].mock_pe_error = -1;
- pd_port[port].mock_pe_pass_up_message = 0;
+ pd_port[port].mock_pe_message_received = 0;
TEST_ASSERT(simulate_receive_ctrl_msg(port, PD_CTRL_DR_SWAP));
@@ -1101,7 +1105,7 @@ static int test_receive_control_msg(void)
TEST_ASSERT(!pd_port[port].mock_got_soft_reset);
TEST_ASSERT(pd_port[port].mock_pe_error < 0);
- TEST_ASSERT(pd_port[port].mock_pe_pass_up_message);
+ TEST_ASSERT(pd_port[port].mock_pe_message_received);
TEST_ASSERT(expected_header == emsg[port].header);
TEST_ASSERT(emsg[port].len == 0);
@@ -1317,7 +1321,7 @@ void run_test(void)
/* Test PD 2.0 Protocol */
init_port(PORT0, PD_REV20);
- RUN_TEST(test_initial_states);
+ RUN_TEST(test_prl_reset);
RUN_TEST(test_send_ctrl_msg);
RUN_TEST(test_send_ctrl_msg_with_retry_and_fail);
RUN_TEST(test_send_ctrl_msg_with_retry_and_success);
@@ -1334,7 +1338,7 @@ void run_test(void)
/* Test PD 3.0 Protocol */
init_port(PORT0, PD_REV30);
- RUN_TEST(test_initial_states);
+ RUN_TEST(test_prl_reset);
RUN_TEST(test_send_ctrl_msg);
RUN_TEST(test_send_ctrl_msg_with_retry_and_fail);
RUN_TEST(test_send_ctrl_msg_with_retry_and_success);