summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorScott <scollyer@chromium.org>2016-06-15 15:33:25 -0700
committerchrome-bot <chrome-bot@chromium.org>2016-06-22 13:22:50 -0700
commitb6920f42f933c06c4c362856a5d257ffccb8ba8c (patch)
treead48e961343fcf83f68a307fe88c91d25e7d19b3
parent00aef53a7b4c1a302b8f1448f2c634712c7b77eb (diff)
downloadchrome-ec-b6920f42f933c06c4c362856a5d257ffccb8ba8c.tar.gz
Cr50: NvMem: Added mutex lock protection for cache memory
Added mutex lock for nvmem write/move operations. In the current implementation, there is no single entry point for the platform specific NvMem calls. The mutex lock is coupled with a task number so that the same task can attempt to grab the lock without stalling itself. In addition to the mutex lock, changed where the cache.base_ptr variable is updated. Previously, this was done prior to the partition being copied from flash to the shared memory area. Now, the variable is only updated after the copy so that read operations will always read from the correctly from either flash or from cache memory if a write operation has been started. BRANCH=none BUG=chrome-os-partner:52520 TEST=Manual make runtests TEST_LIST_HOST=nvmem and verify that all tests pass. Tested with tcg_test utility to test reads/writes using the command "build/test-tpm2/install/bin/compliance --ntpm localhost:9883 --select CPCTPM_TC2_3_33_07_01". Change-Id: Ib6f278ad889424f4df85e4a328da1f45c8d00730 Signed-off-by: Scott <scollyer@chromium.org> Reviewed-on: https://chromium-review.googlesource.com/353026 Commit-Ready: Scott Collyer <scollyer@chromium.org> Tested-by: Scott Collyer <scollyer@chromium.org> Reviewed-by: Bill Richardson <wfrichar@chromium.org>
-rw-r--r--common/nvmem.c130
-rw-r--r--test/nvmem.c92
-rw-r--r--test/nvmem.tasklist4
3 files changed, 174 insertions, 52 deletions
diff --git a/common/nvmem.c b/common/nvmem.c
index c4a373291a..1c7d8d69dc 100644
--- a/common/nvmem.c
+++ b/common/nvmem.c
@@ -8,6 +8,7 @@
#include "flash.h"
#include "nvmem.h"
#include "shared_mem.h"
+#include "task.h"
#include "timer.h"
#include "util.h"
@@ -31,8 +32,14 @@ static uint32_t nvmem_user_start_offset[NVMEM_NUM_USERS];
/* A/B partion that is most up to date */
static int nvmem_act_partition;
-/* NvMem Cache Memory pointer */
-static uint8_t *cache_base_ptr;
+/* NvMem cache memory structure */
+struct nvmem_cache {
+ uint8_t *base_ptr;
+ task_id_t task;
+ struct mutex mtx;
+};
+
+struct nvmem_cache cache;
/* NvMem error state */
static int nvmem_error_state;
@@ -60,10 +67,10 @@ static int nvmem_verify_partition_sha(int index)
static int nvmem_acquire_cache(void)
{
int attempts = 0;
+ uint8_t *shared_mem_ptr;
+ uint8_t *p_src;
int ret;
- /* TODO Need to add mutex lock/unlock crosbug.com/p/52520 */
-
if (shared_mem_size() < NVMEM_PARTITION_SIZE) {
CPRINTF("Not enough shared mem! avail = 0x%x < reqd = 0x%x\n",
shared_mem_size(), NVMEM_PARTITION_SIZE);
@@ -72,10 +79,17 @@ static int nvmem_acquire_cache(void)
while (attempts < NVMEM_ACQUIRE_CACHE_MAX_ATTEMPTS) {
ret = shared_mem_acquire(NVMEM_PARTITION_SIZE,
- (char **)&cache_base_ptr);
- if (ret == EC_SUCCESS)
+ (char **)&shared_mem_ptr);
+ if (ret == EC_SUCCESS) {
+ /* Copy partiion contents from flash into cache */
+ p_src = (uint8_t *)(CONFIG_FLASH_NVMEM_BASE +
+ nvmem_act_partition *
+ NVMEM_PARTITION_SIZE);
+ memcpy(shared_mem_ptr, p_src, NVMEM_PARTITION_SIZE);
+ /* Now that cache is up to date, assign pointer */
+ cache.base_ptr = shared_mem_ptr;
return EC_SUCCESS;
- else if (ret == EC_ERROR_BUSY) {
+ } else if (ret == EC_ERROR_BUSY) {
CPRINTF("Shared Mem not avail! Attempt %d\n", attempts);
/* wait NVMEM_ACQUIRE_CACHE_SLEEP_MS msec */
/* TODO: what time really makes sense? */
@@ -88,24 +102,35 @@ static int nvmem_acquire_cache(void)
return EC_ERROR_TIMEOUT;
}
-static int nvmem_update_cache_ptr(void)
+static int nvmem_lock_cache(void)
{
- uint8_t *p_src;
+ /*
+ * Need to protect the cache contents and pointer value from other tasks
+ * attempting to do nvmem write operations. However, since this function
+ * may be called mutliple times prior to the mutex lock being released,
+ * there is a check first to see if the current task holds the lock. If
+ * it does then the task number will equal the value in cache.task. If
+ * the lock is held by a different task then mutex_lock function will
+ * operate as normal.
+ */
+ if (cache.task != task_get_current()) {
+ mutex_lock(&cache.mtx);
+ cache.task = task_get_current();
+ } else
+ /* Lock is held by current task, nothing else to do. */
+ return EC_SUCCESS;
/*
- * If cache_base_ptr is not NULL, then nothing to do. However, if NULL,
- * then need to first acquire the shared memory buffer and the full
- * partition needs to be copied from flash into the cache buffer.
+ * Acquire the shared memory buffer and copy the full
+ * partition size from flash into the cache buffer.
*/
- if (cache_base_ptr == NULL) {
- if (nvmem_acquire_cache() != EC_SUCCESS)
- return EC_ERROR_TIMEOUT;
- /* Copy partiion contents from flash into cache buffer */
- p_src = (uint8_t *)(CONFIG_FLASH_NVMEM_BASE +
- nvmem_act_partition *
- NVMEM_PARTITION_SIZE);
- memcpy(cache_base_ptr, p_src,
- NVMEM_PARTITION_SIZE);
+ if (nvmem_acquire_cache() != EC_SUCCESS) {
+ /* Shared memory not available, need to release lock */
+ /* Set task number to value that can't match a valid task */
+ cache.task = TASK_ID_COUNT;
+ /* Release lock */
+ mutex_unlock(&cache.mtx);
+ return EC_ERROR_TIMEOUT;
}
return EC_SUCCESS;
@@ -114,10 +139,13 @@ static int nvmem_update_cache_ptr(void)
static void nvmem_release_cache(void)
{
/* Done with shared memory buffer, release it. */
- shared_mem_release(cache_base_ptr);
+ shared_mem_release(cache.base_ptr);
/* Inidicate cache is not available */
- cache_base_ptr = NULL;
- /* TODO Release mutex lock here crosbug.com/p/52520 */
+ cache.base_ptr = NULL;
+ /* Reset task number to max value */
+ cache.task = TASK_ID_COUNT;
+ /* Release mutex lock here */
+ mutex_unlock(&cache.mtx);
}
static int nvmem_is_unitialized(void)
@@ -140,14 +168,14 @@ static int nvmem_is_unitialized(void)
*/
nvmem_act_partition = 0;
/* Need to acquire the shared memory buffer */
- ret = nvmem_update_cache_ptr();
+ ret = nvmem_lock_cache();
if (ret != EC_SUCCESS)
return ret;
- p_part = (struct nvmem_partition *)cache_base_ptr;
+ p_part = (struct nvmem_partition *)cache.base_ptr;
/* Start with version 0 */
p_part->tag.version = 0;
/* Compute sha with updated tag */
- nvmem_compute_sha(&cache_base_ptr[NVMEM_SHA_SIZE],
+ nvmem_compute_sha(&cache.base_ptr[NVMEM_SHA_SIZE],
NVMEM_PARTITION_SIZE - NVMEM_SHA_SIZE,
p_part->tag.sha,
NVMEM_SHA_SIZE);
@@ -156,16 +184,14 @@ static int nvmem_is_unitialized(void)
* partition was just verified to be fully erased, can just do write
* operation.
*/
- if (flash_physical_write(CONFIG_FLASH_NVMEM_OFFSET,
+ ret = flash_physical_write(CONFIG_FLASH_NVMEM_OFFSET,
sizeof(struct nvmem_tag),
- cache_base_ptr)) {
+ cache.base_ptr);
+ nvmem_release_cache();
+ if (ret) {
CPRINTF("%s:%d\n", __func__, __LINE__);
- nvmem_release_cache();
- return EC_ERROR_UNKNOWN;
+ return ret;
}
- /* Can release the cache buffer now */
- nvmem_release_cache();
-
return EC_SUCCESS;
}
@@ -285,16 +311,16 @@ int nvmem_setup(uint8_t starting_version)
/* Set active partition variable */
nvmem_act_partition = part;
/* Get the cache buffer */
- if (nvmem_update_cache_ptr() != EC_SUCCESS) {
+ if (nvmem_lock_cache() != EC_SUCCESS) {
CPRINTF("NvMem: Cache ram not available!\n");
return EC_ERROR_TIMEOUT;
}
/* Fill in tag info */
- p_part = (struct nvmem_partition *)cache_base_ptr;
+ p_part = (struct nvmem_partition *)cache.base_ptr;
/* Commit function will increment version number */
p_part->tag.version = starting_version + part - 1;
- nvmem_compute_sha(&cache_base_ptr[NVMEM_SHA_SIZE],
+ nvmem_compute_sha(&cache.base_ptr[NVMEM_SHA_SIZE],
NVMEM_PARTITION_SIZE -
NVMEM_SHA_SIZE,
p_part->tag.sha,
@@ -325,8 +351,10 @@ int nvmem_init(void)
}
/* Initialize error state, assume everything is good */
nvmem_error_state = EC_SUCCESS;
- /* Default state for cache_base_ptr */
- cache_base_ptr = NULL;
+ /* Default state for cache base_ptr and task number */
+ cache.base_ptr = NULL;
+ cache.task = TASK_ID_COUNT;
+
ret = nvmem_find_partition();
if (ret != EC_SUCCESS) {
/* Change error state to non-zero */
@@ -353,12 +381,12 @@ int nvmem_is_different(uint32_t offset, uint32_t size, void *data,
uint32_t src_offset;
/* Point to either NvMem flash or ram if that's active */
- if (cache_base_ptr == NULL)
+ if (cache.base_ptr == NULL)
src_addr = CONFIG_FLASH_NVMEM_BASE + nvmem_act_partition *
NVMEM_PARTITION_SIZE;
else
- src_addr = (uintptr_t)cache_base_ptr;
+ src_addr = (uintptr_t)cache.base_ptr;
/* Get partition offset for this read operation */
ret = nvmem_get_partition_off(user, offset, size, &src_offset);
@@ -381,12 +409,12 @@ int nvmem_read(uint32_t offset, uint32_t size,
uint32_t src_offset;
/* Point to either NvMem flash or ram if that's active */
- if (cache_base_ptr == NULL)
+ if (cache.base_ptr == NULL)
src_addr = CONFIG_FLASH_NVMEM_BASE + nvmem_act_partition *
NVMEM_PARTITION_SIZE;
else
- src_addr = (uintptr_t)cache_base_ptr;
+ src_addr = (uintptr_t)cache.base_ptr;
/* Get partition offset for this read operation */
ret = nvmem_get_partition_off(user, offset, size, &src_offset);
if (ret != EC_SUCCESS)
@@ -410,7 +438,7 @@ int nvmem_write(uint32_t offset, uint32_t size,
uint32_t dest_offset;
/* Make sure that the cache buffer is active */
- ret = nvmem_update_cache_ptr();
+ ret = nvmem_lock_cache();
if (ret)
/* TODO: What to do when can't access cache buffer? */
return ret;
@@ -421,7 +449,7 @@ int nvmem_write(uint32_t offset, uint32_t size,
return ret;
/* Advance to correct offset within data buffer */
- dest_addr = (uintptr_t)cache_base_ptr;
+ dest_addr = (uintptr_t)cache.base_ptr;
dest_addr += dest_offset;
p_dest = (uint8_t *)dest_addr;
/* Copy data from caller into destination buffer */
@@ -439,7 +467,7 @@ int nvmem_move(uint32_t src_offset, uint32_t dest_offset, uint32_t size,
uint32_t s_buff_offset, d_buff_offset;
/* Make sure that the cache buffer is active */
- ret = nvmem_update_cache_ptr();
+ ret = nvmem_lock_cache();
if (ret)
/* TODO: What to do when can't access cache buffer? */
return ret;
@@ -454,7 +482,7 @@ int nvmem_move(uint32_t src_offset, uint32_t dest_offset, uint32_t size,
if (ret != EC_SUCCESS)
return ret;
- base_addr = (uintptr_t)cache_base_ptr;
+ base_addr = (uintptr_t)cache.base_ptr;
/* Create pointer to src location within partition */
p_src = (uint8_t *)(base_addr + s_buff_offset);
/* Create pointer to dest location within partition */
@@ -479,18 +507,18 @@ int nvmem_commit(void)
*/
/* Update version number */
- if (cache_base_ptr == NULL) {
+ if (cache.base_ptr == NULL) {
CPRINTF("%s:%d\n", __func__, __LINE__);
return EC_ERROR_UNKNOWN;
}
- p_part = (struct nvmem_partition *)cache_base_ptr;
+ p_part = (struct nvmem_partition *)cache.base_ptr;
version = p_part->tag.version + 1;
/* Check for restricted version number */
if (version == NVMEM_VERSION_MASK)
version = 0;
p_part->tag.version = version;
/* Update the sha */
- nvmem_compute_sha(&cache_base_ptr[NVMEM_SHA_SIZE],
+ nvmem_compute_sha(&cache.base_ptr[NVMEM_SHA_SIZE],
NVMEM_PARTITION_SIZE - NVMEM_SHA_SIZE,
p_part->tag.sha,
NVMEM_SHA_SIZE);
@@ -513,7 +541,7 @@ int nvmem_commit(void)
/* Write partition */
if (flash_physical_write(nvmem_offset,
NVMEM_PARTITION_SIZE,
- cache_base_ptr)) {
+ cache.base_ptr)) {
CPRINTF("%s:%d\n", __func__, __LINE__);
/* Free up scratch buffers */
nvmem_release_cache();
diff --git a/test/nvmem.c b/test/nvmem.c
index 2567853341..d3bc0b99e1 100644
--- a/test/nvmem.c
+++ b/test/nvmem.c
@@ -28,6 +28,7 @@ uint32_t nvmem_user_sizes[NVMEM_NUM_USERS] = {
static uint8_t write_buffer[NVMEM_PARTITION_SIZE];
static uint8_t read_buffer[NVMEM_PARTITION_SIZE];
static int flash_write_fail;
+static int lock_test_started;
void nvmem_compute_sha(uint8_t *p_buf, int num_bytes, uint8_t *p_sha,
int sha_bytes)
@@ -444,6 +445,95 @@ static int test_is_different(void)
return EC_SUCCESS;
}
+int nvmem_first_task(void *unused)
+{
+ uint32_t offset = 0;
+ uint32_t num_bytes = WRITE_SEGMENT_LEN;
+ int user = NVMEM_USER_0;
+
+ task_wait_event(0);
+ /* Generate source data */
+ generate_random_data(0, num_bytes);
+ nvmem_write(0, num_bytes, &write_buffer[offset], user);
+ /* Read from cache memory */
+ nvmem_read(0, num_bytes, read_buffer, user);
+ /* Verify that write to nvmem was successful */
+ TEST_ASSERT_ARRAY_EQ(write_buffer, read_buffer, num_bytes);
+ /* Wait here with mutex held by this task */
+ task_wait_event(0);
+ /* Write to flash which releases nvmem mutex */
+ nvmem_commit();
+ nvmem_read(0, num_bytes, read_buffer, user);
+ /* Verify that write to flash was successful */
+ TEST_ASSERT_ARRAY_EQ(write_buffer, read_buffer, num_bytes);
+
+ return EC_SUCCESS;
+}
+
+int nvmem_second_task(void *unused)
+{
+ uint32_t offset = WRITE_SEGMENT_LEN;
+ uint32_t num_bytes = WRITE_SEGMENT_LEN;
+ int user = NVMEM_USER_0;
+
+ task_wait_event(0);
+
+ /* Gen test data and don't overwite test data generated by 1st task */
+ generate_random_data(offset, num_bytes);
+ /* Write test data at offset 0 nvmem user buffer */
+ nvmem_write(0, num_bytes, &write_buffer[offset], user);
+ /* Write to flash */
+ nvmem_commit();
+ /* Read from nvmem */
+ nvmem_read(0, num_bytes, read_buffer, user);
+ /* Verify that write to nvmem was successful */
+ TEST_ASSERT_ARRAY_EQ(&write_buffer[offset], read_buffer, num_bytes);
+ /* Clear flag to indicate lock test is complete */
+ lock_test_started = 0;
+
+ return EC_SUCCESS;
+}
+
+static int test_lock(void)
+{
+ /*
+ * This purpose of this test is to verify the mutex lock portion of the
+ * nvmem module. There are two additional tasks utilized. The first task
+ * is woken and it creates some test data and does an
+ * nvmem_write(). This will cause the mutex to be locked by the 1st
+ * task. The 1st task then waits and control is returned to this
+ * function and the 2nd task is woken, the 2nd task also attempts to
+ * write data to nvmem. The 2nd task should stall waiting for the mutex
+ * to be unlocked.
+ *
+ * When control returns to this function, the 1st task is woken again
+ * and the nvmem operation is completed. This will allow the 2nd task to
+ * grab the lock and finish its nvmem operation. The test will not
+ * complete until the 2nd task finishes the nvmem write. A static global
+ * flag is used to let this function know when the 2nd task is complete.
+ *
+ * Both tasks write to the same location in nvmem so the test will only
+ * pass if the 2nd task can't write until the nvmem write in the 1st
+ * task is completed.
+ */
+
+ /* Set flag for start of test */
+ lock_test_started = 1;
+ /* Wake first_task */
+ task_wake(TASK_ID_NV_1);
+ task_wait_event(1000);
+ /* Wake second_task. It should stall waiting for mutex */
+ task_wake(TASK_ID_NV_2);
+ task_wait_event(1000);
+ /* Go back to first_task so it can complete its nvmem operation */
+ task_wake(TASK_ID_NV_1);
+ /* Wait for 2nd task to complete nvmem operation */
+ while (lock_test_started)
+ task_wait_event(100);
+
+ return EC_SUCCESS;
+}
+
static void run_test_setup(void)
{
/* Allow Flash erase/writes */
@@ -471,5 +561,7 @@ void run_test(void)
RUN_TEST(test_move);
/* Test NvMem IsDifferent function */
RUN_TEST(test_is_different);
+ /* Test Nvmem write lock */
+ RUN_TEST(test_lock);
test_print_result();
}
diff --git a/test/nvmem.tasklist b/test/nvmem.tasklist
index 30e5f7fc22..25f61b13d6 100644
--- a/test/nvmem.tasklist
+++ b/test/nvmem.tasklist
@@ -14,4 +14,6 @@
* 'd' in an opaque parameter passed to the routine at startup
* 's' is the stack size in bytes; must be a multiple of 8
*/
-#define CONFIG_TEST_TASK_LIST
+#define CONFIG_TEST_TASK_LIST \
+ TASK_TEST(NV_1, nvmem_first_task, NULL, 384) \
+ TASK_TEST(NV_2, nvmem_second_task, NULL, 384)