summaryrefslogtreecommitdiff
path: root/common
diff options
context:
space:
mode:
authorScott Collyer <scollyer@google.com>2021-03-03 18:29:15 -0800
committerCommit Bot <commit-bot@chromium.org>2021-03-19 02:42:40 +0000
commit1720f734983ab6452884573e3a7c8e184d28cef7 (patch)
treecb70a8370f951218755645d34ab86a2a03494fc5 /common
parentcb20783554410be144f1b552f6677fbc50b53a9a (diff)
downloadchrome-ec-1720f734983ab6452884573e3a7c8e184d28cef7.tar.gz
TCPMv2: PRL: Don't report ERR_RCH_CHUNKED to PE for timeout
This CL adds a new RCH error type used for the case when there was a sender response timeout waiting for the next chunk of a chunked message to be received. This particular error should not result in the PE triggering a soft reset. This new error type allows the PE function to do nothing for this error case. All other RCH error states remain the same. This change is required to pass the TD.PD.SRC3.E32 compliance test where the tester purposely stops sending after the 4th chunk and expects to be able to resend that message without an protocol error being reported. BUG=b:181333342,b:179443762 BRANCH=None TEST=Verified that quiche can now pass TD.PD.SRC3.E32 Signed-off-by: Scott Collyer <scollyer@google.com> Change-Id: I0142ca8d23cd23ef7b347d5c07155cdb17f44b88 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/platform/ec/+/2734158 Reviewed-by: Denis Brockus <dbrockus@chromium.org> Tested-by: Denis Brockus <dbrockus@chromium.org> Tested-by: Scott Collyer <scollyer@chromium.org> Commit-Queue: Scott Collyer <scollyer@chromium.org>
Diffstat (limited to 'common')
-rw-r--r--common/usbc/usb_pe_drp_sm.c7
-rw-r--r--common/usbc/usb_prl_sm.c18
2 files changed, 21 insertions, 4 deletions
diff --git a/common/usbc/usb_pe_drp_sm.c b/common/usbc/usb_pe_drp_sm.c
index 4e9d1b2c44..e3c3ee4627 100644
--- a/common/usbc/usb_pe_drp_sm.c
+++ b/common/usbc/usb_pe_drp_sm.c
@@ -1126,6 +1126,13 @@ void pe_report_error(int port, enum pe_error e, enum tcpm_transmit_type type)
assert(port == TASK_ID_TO_PD_PORT(task_get_current()));
/*
+ * If there is a timeout error while waiting for a chunk of a chunked
+ * message, there is no requirement to trigger a soft reset.
+ */
+ if (e == ERR_RCH_CHUNK_WAIT_TIMEOUT)
+ return;
+
+ /*
* Generate Hard Reset if Protocol Error occurred
* while in PE_Send_Soft_Reset state.
*/
diff --git a/common/usbc/usb_prl_sm.c b/common/usbc/usb_prl_sm.c
index 91bb54746a..34f0ff251d 100644
--- a/common/usbc/usb_prl_sm.c
+++ b/common/usbc/usb_prl_sm.c
@@ -296,6 +296,8 @@ static struct rx_chunked {
struct sm_ctx ctx;
/* PRL_FLAGS */
uint32_t flags;
+ /* error to report when moving to rch_report_error state */
+ enum pe_error error;
} rch[CONFIG_USB_PD_PORT_MAX_COUNT];
/* Chunked Tx State Machine Object */
@@ -1494,6 +1496,7 @@ static void rch_wait_for_message_from_protocol_layer_run(const int port)
* Chunked != Chunking
*/
else {
+ rch[port].error = ERR_RCH_CHUNKED;
set_state_rch(port, RCH_REPORT_ERROR);
}
}
@@ -1510,6 +1513,7 @@ static void rch_wait_for_message_from_protocol_layer_run(const int port)
* revision lower than PD3.0
*/
else {
+ rch[port].error = ERR_RCH_CHUNKED;
set_state_rch(port, RCH_REPORT_ERROR);
}
}
@@ -1563,6 +1567,7 @@ static void rch_processing_extended_message_run(const int port)
/* Make sure extended message buffer does not overflow */
if (pdmsg[port].num_bytes_received +
byte_num > EXTENDED_BUFFER_SIZE) {
+ rch[port].error = ERR_RCH_CHUNKED;
set_state_rch(port, RCH_REPORT_ERROR);
return;
}
@@ -1593,8 +1598,10 @@ static void rch_processing_extended_message_run(const int port)
/*
* Unexpected Chunk Number
*/
- else
+ else {
+ rch[port].error = ERR_RCH_CHUNKED;
set_state_rch(port, RCH_REPORT_ERROR);
+ }
}
/*
@@ -1630,6 +1637,7 @@ static void rch_requesting_chunk_run(const int port)
set_state_rch(port, RCH_WAITING_CHUNK);
} else if (PDMSG_CHK_FLAG(port, PRL_FLAGS_TX_ERROR)) {
/* Transmission Error from Protocol Layer detetected */
+ rch[port].error = ERR_RCH_CHUNKED;
set_state_rch(port, RCH_REPORT_ERROR);
} else if (RCH_CHK_FLAG(port, PRL_FLAGS_MSG_RECEIVED)) {
/*
@@ -1685,6 +1693,7 @@ static void rch_waiting_chunk_run(const int port)
*/
if (PD_EXT_HEADER_REQ_CHUNK(exhdr) ||
!PD_EXT_HEADER_CHUNKED(exhdr)) {
+ rch[port].error = ERR_RCH_CHUNKED;
set_state_rch(port, RCH_REPORT_ERROR);
}
/*
@@ -1704,8 +1713,10 @@ static void rch_waiting_chunk_run(const int port)
/*
* ChunkSenderResponseTimer Timeout
*/
- else if (pd_timer_is_expired(port, PR_TIMER_CHUNK_SENDER_RESPONSE))
+ else if (pd_timer_is_expired(port, PR_TIMER_CHUNK_SENDER_RESPONSE)) {
+ rch[port].error = ERR_RCH_CHUNK_WAIT_TIMEOUT;
set_state_rch(port, RCH_REPORT_ERROR);
+ }
}
static void rch_waiting_chunk_exit(int port)
@@ -1734,8 +1745,7 @@ static void rch_report_error_entry(const int port)
/* Report error */
pe_report_error(port, ERR_RCH_MSG_REC, prl_rx[port].sop);
} else {
- /* Report error */
- pe_report_error(port, ERR_RCH_CHUNKED, prl_rx[port].sop);
+ pe_report_error(port, rch[port].error, prl_rx[port].sop);
}
}