summaryrefslogtreecommitdiff
path: root/common
diff options
context:
space:
mode:
Diffstat (limited to 'common')
-rw-r--r--common/mock/tcpci_i2c_mock.c40
-rw-r--r--common/usb_pd_protocol.c3
-rw-r--r--common/usb_pd_tcpc.c3
-rw-r--r--common/usbc/usb_prl_sm.c100
4 files changed, 70 insertions, 76 deletions
diff --git a/common/mock/tcpci_i2c_mock.c b/common/mock/tcpci_i2c_mock.c
index a0aa40dba4..09864cc2d2 100644
--- a/common/mock/tcpci_i2c_mock.c
+++ b/common/mock/tcpci_i2c_mock.c
@@ -10,6 +10,7 @@
#include "timer.h"
#define BUFFER_SIZE 100
+#define MOCK_WAIT_TIMEOUT (5 * SECOND)
struct tcpci_reg {
uint8_t offset;
@@ -137,14 +138,11 @@ static void print_header(const char *prefix, uint16_t header)
id, cnt, ext);
}
-int mock_tcpci_wait_for_transmit(enum tcpm_transmit_type tx_type,
+int verify_tcpci_transmit(enum tcpm_transmit_type tx_type,
enum pd_ctrl_msg_type ctrl_msg,
enum pd_data_msg_type data_msg)
{
- int want_tx_reg = (tx_type == TCPC_TX_SOP_PRIME) ?
- TCPC_REG_TRANSMIT_SET_WITHOUT_RETRY(tx_type) :
- TCPC_REG_TRANSMIT_SET_WITH_RETRY(tx_type);
- uint64_t timeout = get_time().val + 5 * SECOND;
+ uint64_t timeout = get_time().val + MOCK_WAIT_TIMEOUT;
TEST_EQ(tcpci_regs[TCPC_REG_TRANSMIT].value, 0, "%d");
while (get_time().val < timeout) {
@@ -153,14 +151,17 @@ int mock_tcpci_wait_for_transmit(enum tcpm_transmit_type tx_type,
tx_buffer, 1);
int type = PD_HEADER_TYPE(header);
int cnt = PD_HEADER_CNT(header);
+ const uint16_t want_tx_reg = tx_type;
+ const uint16_t tx_wo_retry =
+ tcpci_regs[TCPC_REG_TRANSMIT].value & ~0x0030;
- TEST_EQ(tcpci_regs[TCPC_REG_TRANSMIT].value,
- want_tx_reg, "%d");
+ /* Don't validate the retry portion of reg */
+ TEST_EQ(tx_wo_retry, want_tx_reg, "0x%x");
if (ctrl_msg != 0) {
- TEST_EQ(ctrl_msg, type, "%d");
+ TEST_EQ(ctrl_msg, type, "0x%x");
TEST_EQ(cnt, 0, "%d");
} else {
- TEST_EQ(data_msg, type, "%d");
+ TEST_EQ(data_msg, type, "0x%x");
TEST_GE(cnt, 1, "%d");
}
tcpci_regs[TCPC_REG_TRANSMIT].value = 0;
@@ -172,6 +173,27 @@ int mock_tcpci_wait_for_transmit(enum tcpm_transmit_type tx_type,
return EC_ERROR_UNKNOWN;
}
+int verify_tcpci_tx_retry_count(const uint8_t retry_count)
+{
+ uint64_t timeout = get_time().val + MOCK_WAIT_TIMEOUT;
+
+ TEST_EQ(tcpci_regs[TCPC_REG_TRANSMIT].value, 0, "%d");
+ while (get_time().val < timeout) {
+ if (tcpci_regs[TCPC_REG_TRANSMIT].value != 0) {
+ const uint16_t tx_retry = TCPC_REG_TRANSMIT_RETRY(
+ tcpci_regs[TCPC_REG_TRANSMIT].value);
+
+ TEST_EQ(tx_retry, retry_count, "%d");
+
+ tcpci_regs[TCPC_REG_TRANSMIT].value = 0;
+ return EC_SUCCESS;
+ }
+ task_wait_event(5 * MSEC);
+ }
+ TEST_ASSERT(0);
+ return EC_ERROR_UNKNOWN;
+}
+
void mock_tcpci_receive(enum pd_msg_type sop, uint16_t header,
uint32_t *payload)
{
diff --git a/common/usb_pd_protocol.c b/common/usb_pd_protocol.c
index ececdde8ed..06ef2b630a 100644
--- a/common/usb_pd_protocol.c
+++ b/common/usb_pd_protocol.c
@@ -5046,7 +5046,8 @@ void pd_send_hpd(int port, enum hpd_event hpd)
VDO_OPOS(opos) | CMD_ATTENTION, data, 1);
/* Wait until VDM is done. */
while (pd[0].vdm_state > 0)
- task_wait_event(USB_PD_RX_TMOUT_US * (PD_RETRY_COUNT + 1));
+ task_wait_event(USB_PD_RX_TMOUT_US *
+ (CONFIG_PD_RETRY_COUNT + 1));
}
#endif
diff --git a/common/usb_pd_tcpc.c b/common/usb_pd_tcpc.c
index 5100319115..2878e53574 100644
--- a/common/usb_pd_tcpc.c
+++ b/common/usb_pd_tcpc.c
@@ -387,7 +387,8 @@ static int send_validate_message(int port, uint16_t header,
uint8_t expected_msg_id = PD_HEADER_ID(header);
uint8_t cnt = PD_HEADER_CNT(header);
int retries = PD_HEADER_TYPE(header) == PD_DATA_SOURCE_CAP ?
- 0 : PD_RETRY_COUNT;
+ 0 :
+ CONFIG_PD_RETRY_COUNT;
/* retry 3 times if we are not getting a valid answer */
for (r = 0; r <= retries; r++) {
diff --git a/common/usbc/usb_prl_sm.c b/common/usbc/usb_prl_sm.c
index ce32e0caf0..ec22502523 100644
--- a/common/usbc/usb_prl_sm.c
+++ b/common/usbc/usb_prl_sm.c
@@ -253,14 +253,12 @@ static struct protocol_layer_tx {
uint32_t flags;
/* protocol timer */
uint64_t sink_tx_timer;
- /* tcpc transmit timeout */
+ /* timeout to limit waiting on TCPC response (not in spec) */
uint64_t tcpc_tx_timeout;
/* last message type we transmitted */
enum tcpm_transmit_type last_xmit_type;
/* message id counters for all 6 port partners */
uint32_t msg_id_counter[NUM_SOP_STAR_TYPES];
- /* message retry counter */
- uint32_t retry_counter;
/* transmit status */
int xmit_status;
} prl_tx[CONFIG_USB_PD_PORT_MAX_COUNT];
@@ -768,9 +766,6 @@ static void prl_tx_phy_layer_reset_entry(const int port)
static void prl_tx_wait_for_message_request_entry(const int port)
{
print_current_prl_tx_state(port);
-
- /* Reset RetryCounter */
- prl_tx[port].retry_counter = 0;
}
static void prl_tx_wait_for_message_request_run(const int port)
@@ -980,9 +975,16 @@ static void prl_tx_construct_message(const int port)
prl_tx[port].xmit_status = TCPC_TX_UNSET;
PDMSG_CLR_FLAG(port, PRL_FLAGS_TX_COMPLETE);
- /* Pass message to PHY Layer */
+ /*
+ * Pass message to PHY Layer. It handles retries in hardware as the EC
+ * cannot handle the required timing ~ 1ms (tReceive + tRetry).
+ *
+ * Note if we ever start sending large, extendend messages, then we
+ * should not retry those messages. We do not support that and probably
+ * never will (since we support chunking).
+ */
tcpm_transmit(port, pdmsg[port].xmit_type, header,
- pdmsg[port].tx_chk_buf);
+ pdmsg[port].tx_chk_buf);
}
/*
@@ -997,71 +999,16 @@ static void prl_tx_wait_for_phy_response_entry(const int port)
static void prl_tx_wait_for_phy_response_run(const int port)
{
- int pd3_retry_check;
-
/* Wait until TX is complete */
/*
* NOTE: The TCPC will set xmit_status to TCPC_TX_COMPLETE_DISCARDED
* when a GoodCRC containing an incorrect MessageID is received.
- * This condition satifies the PRL_Tx_Match_MessageID state
+ * This condition satisfies the PRL_Tx_Match_MessageID state
* requirement.
*/
- if (get_time().val > prl_tx[port].tcpc_tx_timeout ||
- prl_tx[port].xmit_status == TCPC_TX_COMPLETE_FAILED ||
- prl_tx[port].xmit_status == TCPC_TX_COMPLETE_DISCARDED) {
-
- /* NOTE: PRL_Tx_Check_RetryCounter State embedded here. */
-
- /* Increment check RetryCounter */
- prl_tx[port].retry_counter++;
-
-#ifdef CONFIG_USB_PD_EXTENDED_MESSAGES
- pd3_retry_check = (pdmsg[port].ext &&
- PD_EXT_HEADER_DATA_SIZE(GET_EXT_HEADER(
- pdmsg[port].tx_chk_buf[0]) >
- PD_MAX_EXTENDED_MSG_CHUNK_LEN));
-#else
- pd3_retry_check = 0;
-#endif /* CONFIG_USB_PD_EXTENDED_MESSAGES */
-
- /*
- * (RetryCounter > nRetryCount) | Large Extended Message
- */
- if (prl_tx[port].retry_counter > N_RETRY_COUNT ||
- pd3_retry_check) {
- /*
- * NOTE: PRL_Tx_Transmission_Error State embedded
- * here.
- */
-
- if (IS_ENABLED(CONFIG_USB_PD_EXTENDED_MESSAGES)) {
- /*
- * State tch_wait_for_transmission_complete will
- * inform policy engine of error
- */
- PDMSG_SET_FLAG(port, PRL_FLAGS_TX_ERROR);
- } else {
- /* Report Error To Policy Engine */
- pe_report_error(port, ERR_TCH_XMIT,
- prl_tx[port].last_xmit_type);
- }
-
- /* Increment message id counter */
- increment_msgid_counter(port);
- set_state_prl_tx(port, PRL_TX_WAIT_FOR_MESSAGE_REQUEST);
- } else {
- /*
- * NOTE: PRL_TX_Construct_Message State embedded
- * here.
- */
- /* Try to resend the message. */
- prl_tx_construct_message(port);
- prl_tx[port].tcpc_tx_timeout = get_time().val
- + PD_T_TCPC_TX_TIMEOUT;
- }
- } else if (prl_tx[port].xmit_status == TCPC_TX_COMPLETE_SUCCESS) {
+ if (prl_tx[port].xmit_status == TCPC_TX_COMPLETE_SUCCESS) {
/* NOTE: PRL_TX_Message_Sent State embedded here. */
/* Increment messageId counter */
increment_msgid_counter(port);
@@ -1078,6 +1025,29 @@ static void prl_tx_wait_for_phy_response_run(const int port)
*/
task_set_event(PD_PORT_TO_TASK_ID(port), PD_EVENT_SM, 0);
set_state_prl_tx(port, PRL_TX_WAIT_FOR_MESSAGE_REQUEST);
+ } else if (get_time().val > prl_tx[port].tcpc_tx_timeout ||
+ prl_tx[port].xmit_status == TCPC_TX_COMPLETE_FAILED ||
+ prl_tx[port].xmit_status == TCPC_TX_COMPLETE_DISCARDED) {
+ /*
+ * NOTE: PRL_Tx_Transmission_Error State embedded
+ * here.
+ */
+
+ if (IS_ENABLED(CONFIG_USB_PD_EXTENDED_MESSAGES)) {
+ /*
+ * State tch_wait_for_transmission_complete will
+ * inform policy engine of error
+ */
+ PDMSG_SET_FLAG(port, PRL_FLAGS_TX_ERROR);
+ } else {
+ /* Report Error To Policy Engine */
+ pe_report_error(port, ERR_TCH_XMIT,
+ prl_tx[port].last_xmit_type);
+ }
+
+ /* Increment message id counter */
+ increment_msgid_counter(port);
+ set_state_prl_tx(port, PRL_TX_WAIT_FOR_MESSAGE_REQUEST);
}
}