summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/mips/dts/mrvl,cn73xx.dtsi77
-rw-r--r--arch/mips/dts/mrvl,octeon-ebb7304.dts24
-rw-r--r--arch/mips/mach-octeon/Makefile3
-rw-r--r--arch/mips/mach-octeon/bootoctlinux.c661
-rw-r--r--arch/mips/mach-octeon/cache.c12
-rw-r--r--arch/mips/mach-octeon/cpu.c21
-rw-r--r--arch/mips/mach-octeon/cvmx-bootmem.c1460
-rw-r--r--arch/mips/mach-octeon/cvmx-coremask.c366
-rw-r--r--arch/mips/mach-octeon/dram.c72
-rw-r--r--arch/mips/mach-octeon/include/mach/bootoct_cmd.h54
-rw-r--r--arch/mips/mach-octeon/include/mach/cvmx-bootinfo.h350
-rw-r--r--arch/mips/mach-octeon/include/mach/cvmx-bootmem.h533
-rw-r--r--arch/mips/mach-octeon/include/mach/cvmx-coremask.h752
-rw-r--r--arch/mips/mach-octeon/include/mach/cvmx-fuse.h71
-rw-r--r--arch/mips/mach-octeon/include/mach/cvmx-regs.h144
-rw-r--r--arch/mips/mach-octeon/include/mach/cvmx/cvmx-lmcx-defs.h4574
-rw-r--r--arch/mips/mach-octeon/include/mach/octeon-feature.h442
-rw-r--r--arch/mips/mach-octeon/include/mach/octeon-model.h317
-rw-r--r--arch/mips/mach-octeon/include/mach/octeon_ddr.h982
-rw-r--r--arch/mips/mach-octeon/include/mangle-port.h56
-rw-r--r--arch/mips/mach-octeon/lowlevel_init.S76
21 files changed, 11033 insertions, 14 deletions
diff --git a/arch/mips/dts/mrvl,cn73xx.dtsi b/arch/mips/dts/mrvl,cn73xx.dtsi
index f5ad4a6213..40eb85ee0c 100644
--- a/arch/mips/dts/mrvl,cn73xx.dtsi
+++ b/arch/mips/dts/mrvl,cn73xx.dtsi
@@ -72,6 +72,23 @@
<0x0300e 4>, <0x0300f 4>;
};
+ l2c: l2c@1180080000000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "cavium,octeon-7xxx-l2c";
+ reg = <0x11800 0x80000000 0x0 0x01000000>;
+ u-boot,dm-pre-reloc;
+ };
+
+ lmc: lmc@1180088000000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "cavium,octeon-7xxx-ddr4";
+ reg = <0x11800 0x88000000 0x0 0x02000000>; // 2 IFs
+ u-boot,dm-pre-reloc;
+ l2c-handle = <&l2c>;
+ };
+
reset: reset@1180006001600 {
compatible = "mrvl,cn7xxx-rst";
reg = <0x11800 0x06001600 0x0 0x200>;
@@ -126,5 +143,65 @@
spi-max-frequency = <25000000>;
clocks = <&clk OCTEON_CLK_IO>;
};
+
+ /* USB 0 */
+ usb0: uctl@1180068000000 {
+ compatible = "cavium,octeon-7130-usb-uctl";
+ reg = <0x11800 0x68000000 0x0 0x100>;
+ ranges; /* Direct mapping */
+ #address-cells = <2>;
+ #size-cells = <2>;
+ /* Only 100MHz allowed */
+ refclk-frequency = <100000000>;
+ /* Only "dlmc_ref_clk0" is supported for 73xx */
+ refclk-type-ss = "dlmc_ref_clk0";
+ /* Only "dlmc_ref_clk0" is supported for 73xx */
+ refclk-type-hs = "dlmc_ref_clk0";
+
+ /*
+ * Power is specified by three parts:
+ * 1) GPIO handle (must be &gpio)
+ * 2) GPIO pin number
+ * 3) Active high (0) or active low (1)
+ */
+ xhci@1680000000000 {
+ compatible = "cavium,octeon-7130-xhci","synopsys,dwc3","snps,dwc3";
+ reg = <0x16800 0x00000000 0x10 0x0>;
+ interrupts = <0x68080 4>; /* UAHC_IMAN, level */
+ maximum-speed = "super-speed";
+ dr_mode = "host";
+ snps,dis_u3_susphy_quirk;
+ snps,dis_u2_susphy_quirk;
+ snps,dis_enblslpm_quirk;
+ };
+ };
+
+ /* USB 1 */
+ usb1: uctl@1180069000000 {
+ compatible = "cavium,octeon-7130-usb-uctl";
+ reg = <0x11800 0x69000000 0x0 0x100>;
+ ranges; /* Direct mapping */
+ #address-cells = <2>;
+ #size-cells = <2>;
+ /* 50MHz, 100MHz and 125MHz allowed */
+ refclk-frequency = <100000000>;
+ /* Either "dlmc_ref_clk0" or "dlmc_ref_clk0" */
+ refclk-type-ss = "dlmc_ref_clk0";
+ /* Either "dlmc_ref_clk0" "dlmc_ref_clk1" or "pll_ref_clk" */
+ refclk-type-hs = "dlmc_ref_clk0";
+
+ /*
+ * Power is specified by three parts:
+ * 1) GPIO handle (must be &gpio)
+ * 2) GPIO pin number
+ * 3) Active high (0) or active low (1)
+ */
+ xhci@1690000000000 {
+ compatible = "cavium,octeon-7130-xhci","synopsys,dwc3","snps,dwc3";
+ reg = <0x16900 0x00000000 0x10 0x0>;
+ interrupts = <0x69080 4>; /* UAHC_IMAN, level */
+ dr_mode = "host";
+ };
+ };
};
};
diff --git a/arch/mips/dts/mrvl,octeon-ebb7304.dts b/arch/mips/dts/mrvl,octeon-ebb7304.dts
index 6b2e5e84bc..993b4f6890 100644
--- a/arch/mips/dts/mrvl,octeon-ebb7304.dts
+++ b/arch/mips/dts/mrvl,octeon-ebb7304.dts
@@ -113,3 +113,27 @@
reg = <0>;
};
};
+
+/* USB 0 */
+&usb0 {
+ status = "okay";
+ /*
+ * Power is specified by three parts:
+ * 1) GPIO handle (must be &gpio)
+ * 2) GPIO pin number
+ * 3) Active high (0) or active low (1)
+ */
+ power = <&gpio 20 0>;
+};
+
+/* USB 1 */
+&usb1 {
+ status = "okay";
+ /*
+ * Power is specified by three parts:
+ * 1) GPIO handle (must be &gpio)
+ * 2) GPIO pin number
+ * 3) Active high (0) or active low (1)
+ */
+ power = <&gpio 21 0>;
+};
diff --git a/arch/mips/mach-octeon/Makefile b/arch/mips/mach-octeon/Makefile
index 2e37ca572c..3486aa9d8b 100644
--- a/arch/mips/mach-octeon/Makefile
+++ b/arch/mips/mach-octeon/Makefile
@@ -8,3 +8,6 @@ obj-y += cache.o
obj-y += clock.o
obj-y += cpu.o
obj-y += dram.o
+obj-y += cvmx-coremask.o
+obj-y += cvmx-bootmem.o
+obj-y += bootoctlinux.o
diff --git a/arch/mips/mach-octeon/bootoctlinux.c b/arch/mips/mach-octeon/bootoctlinux.c
new file mode 100644
index 0000000000..75d7e83bd7
--- /dev/null
+++ b/arch/mips/mach-octeon/bootoctlinux.c
@@ -0,0 +1,661 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2020 Stefan Roese <sr@denx.de>
+ */
+
+#include <command.h>
+#include <config.h>
+#include <cpu_func.h>
+#include <dm.h>
+#include <elf.h>
+#include <env.h>
+#include <ram.h>
+
+#include <asm/io.h>
+#include <linux/compat.h>
+#include <linux/ctype.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+
+#include <mach/cvmx-coremask.h>
+#include <mach/cvmx-bootinfo.h>
+#include <mach/cvmx-bootmem.h>
+#include <mach/cvmx-regs.h>
+#include <mach/cvmx-fuse.h>
+#include <mach/octeon-model.h>
+#include <mach/octeon-feature.h>
+#include <mach/bootoct_cmd.h>
+
+DECLARE_GLOBAL_DATA_PTR;
+
+/* ToDo: Revisit these settings */
+#define OCTEON_RESERVED_LOW_MEM_SIZE (512 * 1024)
+#define OCTEON_RESERVED_LOW_BOOT_MEM_SIZE (1024 * 1024)
+#define BOOTLOADER_BOOTMEM_DESC_SPACE (1024 * 1024)
+
+/* Default stack and heap sizes, in bytes */
+#define DEFAULT_STACK_SIZE (1 * 1024 * 1024)
+#define DEFAULT_HEAP_SIZE (3 * 1024 * 1024)
+
+/**
+ * NOTE: This must duplicate octeon_boot_descriptor_t in the toolchain
+ * octeon-app-init.h file.
+ */
+enum {
+ /* If set, core should do app-wide init, only one core per app will have
+ * this flag set.
+ */
+ BOOT_FLAG_INIT_CORE = 1,
+ OCTEON_BL_FLAG_DEBUG = 1 << 1,
+ OCTEON_BL_FLAG_NO_MAGIC = 1 << 2,
+ /* If set, use uart1 for console */
+ OCTEON_BL_FLAG_CONSOLE_UART1 = 1 << 3,
+ OCTEON_BL_FLAG_CONSOLE_PCI = 1 << 4, /* If set, use PCI console */
+ /* Call exit on break on serial port */
+ OCTEON_BL_FLAG_BREAK = 1 << 5,
+ /*
+ * Be sure to update OCTEON_APP_INIT_H_VERSION when new fields are added
+ * and to conditionalize the new flag's usage based on the version.
+ */
+} octeon_boot_descriptor_flag;
+
+/**
+ * NOTE: This must duplicate octeon_boot_descriptor_t in the toolchain
+ * octeon-app-init.h file.
+ */
+#ifndef OCTEON_CURRENT_DESC_VERSION
+# define OCTEON_CURRENT_DESC_VERSION 7
+#endif
+/**
+ * NOTE: This must duplicate octeon_boot_descriptor_t in the toolchain
+ * octeon-app-init.h file.
+ */
+/* Version 7 changes: Change names of deprecated fields */
+#ifndef OCTEON_ARGV_MAX_ARGS
+# define OCTEON_ARGV_MAX_ARGS 64
+#endif
+
+/**
+ * NOTE: This must duplicate octeon_boot_descriptor_t in the toolchain
+ * octeon-app-init.h file.
+ */
+#ifndef OCTEON_SERIAL_LEN
+# define OCTEON_SERIAL_LEN 20
+#endif
+
+/**
+ * Bootloader structure used to pass info to Octeon executive startup code.
+ * NOTE: all fields are deprecated except for:
+ * * desc_version
+ * * desc_size,
+ * * heap_base
+ * * heap_end
+ * * eclock_hz
+ * * flags
+ * * argc
+ * * argv
+ * * cvmx_desc_vaddr
+ * * debugger_flags_base_addr
+ *
+ * All other fields have been moved to the cvmx_descriptor, and the new
+ * fields should be added there. They are left as placeholders in this
+ * structure for binary compatibility.
+ *
+ * NOTE: This structure must match what is in the toolchain octeon-app-init.h
+ * file.
+ */
+struct octeon_boot_descriptor {
+ /* Start of block referenced by assembly code - do not change! */
+ u32 desc_version;
+ u32 desc_size;
+ u64 stack_top;
+ u64 heap_base;
+ u64 heap_end;
+ u64 deprecated17;
+ u64 deprecated16;
+ /* End of block referenced by assembly code - do not change! */
+ u32 deprecated18;
+ u32 deprecated15;
+ u32 deprecated14;
+ u32 argc; /* argc for main() */
+ u32 argv[OCTEON_ARGV_MAX_ARGS]; /* argv for main() */
+ u32 flags; /* Flags for application */
+ u32 core_mask; /* Coremask running this image */
+ u32 dram_size; /* DEPRECATED, DRAM size in megabyes. Used up to SDK 1.8.1 */
+ u32 phy_mem_desc_addr;
+ u32 debugger_flags_base_addr; /* used to pass flags from app to debugger. */
+ u32 eclock_hz; /* CPU clock speed, in hz. */
+ u32 deprecated10;
+ u32 deprecated9;
+ u16 deprecated8;
+ u8 deprecated7;
+ u8 deprecated6;
+ u16 deprecated5;
+ u8 deprecated4;
+ u8 deprecated3;
+ char deprecated2[OCTEON_SERIAL_LEN];
+ u8 deprecated1[6];
+ u8 deprecated0;
+ u64 cvmx_desc_vaddr; /* Address of cvmx descriptor */
+};
+
+static struct octeon_boot_descriptor boot_desc[CVMX_MIPS_MAX_CORES];
+static struct cvmx_bootinfo cvmx_bootinfo_array[CVMX_MIPS_MAX_CORES];
+
+/**
+ * Programs the boot bus moveable region
+ * @param base base address to place the boot bus moveable region
+ * (bits [31:7])
+ * @param region_num Selects which region, 0 or 1 for node 0,
+ * 2 or 3 for node 1
+ * @param enable Set true to enable, false to disable
+ * @param data Pointer to data to put in the region, up to
+ * 16 dwords.
+ * @param num_words Number of data dwords (up to 32)
+ *
+ * @return 0 for success, -1 on error
+ */
+static int octeon_set_moveable_region(u32 base, int region_num,
+ bool enable, const u64 *data,
+ unsigned int num_words)
+{
+ int node = region_num >> 1;
+ u64 val;
+ int i;
+ u8 node_mask = 0x01; /* ToDo: Currently only one node is supported */
+
+ debug("%s(0x%x, %d, %d, %p, %u)\n", __func__, base, region_num, enable,
+ data, num_words);
+
+ if (num_words > 32) {
+ printf("%s: Too many words (%d) for region %d\n", __func__,
+ num_words, region_num);
+ return -1;
+ }
+
+ if (base & 0x7f) {
+ printf("%s: Error: base address 0x%x must be 128 byte aligned\n",
+ __func__, base);
+ return -1;
+ }
+
+ if (region_num > (node_mask > 1 ? 3 : 1)) {
+ printf("%s: Region number %d out of range\n",
+ __func__, region_num);
+ return -1;
+ }
+
+ if (!data && num_words > 0) {
+ printf("%s: Error: NULL data\n", __func__);
+ return -1;
+ }
+
+ region_num &= 1;
+
+ val = MIO_BOOT_LOC_CFG_EN |
+ FIELD_PREP(MIO_BOOT_LOC_CFG_BASE, base >> 7);
+ debug("%s: Setting MIO_BOOT_LOC_CFG(%d) on node %d to 0x%llx\n",
+ __func__, region_num, node, val);
+ csr_wr(CVMX_MIO_BOOT_LOC_CFGX(region_num & 1), val);
+
+ val = FIELD_PREP(MIO_BOOT_LOC_ADR_ADR, (region_num ? 0x80 : 0x00) >> 3);
+ debug("%s: Setting MIO_BOOT_LOC_ADR start to 0x%llx\n", __func__, val);
+ csr_wr(CVMX_MIO_BOOT_LOC_ADR, val);
+
+ for (i = 0; i < num_words; i++) {
+ debug(" 0x%02llx: 0x%016llx\n",
+ csr_rd(CVMX_MIO_BOOT_LOC_ADR), data[i]);
+ csr_wr(CVMX_MIO_BOOT_LOC_DAT, data[i]);
+ }
+
+ return 0;
+}
+
+/**
+ * Parse comma separated numbers into an array
+ *
+ * @param[out] values values read for each node
+ * @param[in] str string to parse
+ * @param base 0 for auto, otherwise 8, 10 or 16 for the number base
+ *
+ * @return number of values read.
+ */
+static int octeon_parse_nodes(u64 values[CVMX_MAX_NODES],
+ const char *str, int base)
+{
+ int node = 0;
+ char *sep;
+
+ do {
+ debug("Parsing node %d: \"%s\"\n", node, str);
+ values[node] = simple_strtoull(str, &sep, base);
+ debug(" node %d: 0x%llx\n", node, values[node]);
+ str = sep + 1;
+ } while (++node < CVMX_MAX_NODES && *sep == ',');
+
+ debug("%s: returning %d\n", __func__, node);
+ return node;
+}
+
+/**
+ * Parse command line arguments
+ *
+ * @param argc number of arguments
+ * @param[in] argv array of argument strings
+ * @param cmd command type
+ * @param[out] boot_args parsed values
+ *
+ * @return number of arguments parsed
+ */
+int octeon_parse_bootopts(int argc, char *const argv[],
+ enum octeon_boot_cmd_type cmd,
+ struct octeon_boot_args *boot_args)
+{
+ u64 node_values[CVMX_MAX_NODES];
+ int arg, j;
+ int num_values;
+ int node;
+ u8 node_mask = 0x01; /* ToDo: Currently only one node is supported */
+
+ debug("%s(%d, %p, %d, %p)\n", __func__, argc, argv, cmd, boot_args);
+ memset(boot_args, 0, sizeof(*boot_args));
+ boot_args->stack_size = DEFAULT_STACK_SIZE;
+ boot_args->heap_size = DEFAULT_HEAP_SIZE;
+ boot_args->node_mask = 0;
+
+ for (arg = 0; arg < argc; arg++) {
+ debug(" argv[%d]: %s\n", arg, argv[arg]);
+ if (cmd == BOOTOCT && !strncmp(argv[arg], "stack=", 6)) {
+ boot_args->stack_size = simple_strtoul(argv[arg] + 6,
+ NULL, 0);
+ } else if (cmd == BOOTOCT && !strncmp(argv[arg], "heap=", 5)) {
+ boot_args->heap_size = simple_strtoul(argv[arg] + 5,
+ NULL, 0);
+ } else if (!strncmp(argv[arg], "debug", 5)) {
+ puts("setting debug flag!\n");
+ boot_args->boot_flags |= OCTEON_BL_FLAG_DEBUG;
+ } else if (cmd == BOOTOCT && !strncmp(argv[arg], "break", 5)) {
+ puts("setting break flag!\n");
+ boot_args->boot_flags |= OCTEON_BL_FLAG_BREAK;
+ } else if (!strncmp(argv[arg], "forceboot", 9)) {
+ boot_args->forceboot = true;
+ } else if (!strncmp(argv[arg], "nodemask=", 9)) {
+ boot_args->node_mask = simple_strtoul(argv[arg] + 9,
+ NULL, 16);
+ } else if (!strncmp(argv[arg], "numcores=", 9)) {
+ memset(node_values, 0, sizeof(node_values));
+ num_values = octeon_parse_nodes(node_values,
+ argv[arg] + 9, 0);
+ for (j = 0; j < num_values; j++)
+ boot_args->num_cores[j] = node_values[j];
+ boot_args->num_cores_set = true;
+ } else if (!strncmp(argv[arg], "skipcores=", 10)) {
+ memset(node_values, 0, sizeof(node_values));
+ num_values = octeon_parse_nodes(node_values,
+ argv[arg] + 10, 0);
+ for (j = 0; j < num_values; j++)
+ boot_args->num_skipped[j] = node_values[j];
+ boot_args->num_skipped_set = true;
+ } else if (!strncmp(argv[arg], "console_uart=", 13)) {
+ boot_args->console_uart = simple_strtoul(argv[arg] + 13,
+ NULL, 0);
+ if (boot_args->console_uart == 1) {
+ boot_args->boot_flags |=
+ OCTEON_BL_FLAG_CONSOLE_UART1;
+ } else if (!boot_args->console_uart) {
+ boot_args->boot_flags &=
+ ~OCTEON_BL_FLAG_CONSOLE_UART1;
+ }
+ } else if (!strncmp(argv[arg], "coremask=", 9)) {
+ memset(node_values, 0, sizeof(node_values));
+ num_values = octeon_parse_nodes(node_values,
+ argv[arg] + 9, 16);
+ for (j = 0; j < num_values; j++)
+ cvmx_coremask_set64_node(&boot_args->coremask,
+ j, node_values[j]);
+ boot_args->coremask_set = true;
+ } else if (cmd == BOOTOCTLINUX &&
+ !strncmp(argv[arg], "namedblock=", 11)) {
+ boot_args->named_block = argv[arg] + 11;
+ } else if (!strncmp(argv[arg], "endbootargs", 11)) {
+ boot_args->endbootargs = 1;
+ arg++;
+ if (argc >= arg && cmd != BOOTOCTLINUX)
+ boot_args->app_name = argv[arg];
+ break;
+ } else {
+ debug(" Unknown argument \"%s\"\n", argv[arg]);
+ }
+ }
+
+ if (boot_args->coremask_set && boot_args->num_cores_set) {
+ puts("Warning: both coremask and numcores are set, using coremask.\n");
+ } else if (!boot_args->coremask_set && !boot_args->num_cores_set) {
+ cvmx_coremask_set_core(&boot_args->coremask, 0);
+ boot_args->coremask_set = true;
+ } else if ((!boot_args->coremask_set) && boot_args->num_cores_set) {
+ cvmx_coremask_for_each_node(node, node_mask)
+ cvmx_coremask_set64_node(&boot_args->coremask, node,
+ ((1ull << boot_args->num_cores[node]) - 1) <<
+ boot_args->num_skipped[node]);
+ boot_args->coremask_set = true;
+ }
+
+ /* Update the node mask based on the coremask or the number of cores */
+ for (j = 0; j < CVMX_MAX_NODES; j++) {
+ if (cvmx_coremask_get64_node(&boot_args->coremask, j))
+ boot_args->node_mask |= 1 << j;
+ }
+
+ debug("%s: return %d\n", __func__, arg);
+ return arg;
+}
+
+int do_bootoctlinux(struct cmd_tbl *cmdtp, int flag, int argc,
+ char *const argv[])
+{
+ typedef void __noreturn (*kernel_entry_t)(int, ulong, ulong, ulong);
+ kernel_entry_t kernel;
+ struct octeon_boot_args boot_args;
+ int arg_start = 1;
+ int arg_count;
+ u64 addr = 0; /* Address of the ELF image */
+ int arg0;
+ u64 arg1;
+ u64 arg2;
+ u64 arg3;
+ int ret;
+ struct cvmx_coremask core_mask;
+ struct cvmx_coremask coremask_to_run;
+ struct cvmx_coremask avail_coremask;
+ int first_core;
+ int core;
+ struct ram_info ram;
+ struct udevice *dev;
+ const u64 *nmi_code;
+ int num_dwords;
+ u8 node_mask = 0x01;
+ int i;
+
+ cvmx_coremask_clear_all(&core_mask);
+ cvmx_coremask_clear_all(&coremask_to_run);
+
+ if (argc >= 2 && (isxdigit(argv[1][0]) && (isxdigit(argv[1][1]) ||
+ argv[1][1] == 'x' ||
+ argv[1][1] == 'X' ||
+ argv[1][1] == '\0'))) {
+ addr = simple_strtoul(argv[1], NULL, 16);
+ if (!addr)
+ addr = CONFIG_SYS_LOAD_ADDR;
+ arg_start++;
+ }
+ if (addr == 0)
+ addr = CONFIG_SYS_LOAD_ADDR;
+
+ debug("%s: arg start: %d\n", __func__, arg_start);
+ arg_count = octeon_parse_bootopts(argc - arg_start, argv + arg_start,
+ BOOTOCTLINUX, &boot_args);
+
+ debug("%s:\n"
+ " named block: %s\n"
+ " node mask: 0x%x\n"
+ " stack size: 0x%x\n"
+ " heap size: 0x%x\n"
+ " boot flags: 0x%x\n"
+ " force boot: %s\n"
+ " coremask set: %s\n"
+ " num cores set: %s\n"
+ " num skipped set: %s\n"
+ " endbootargs: %s\n",
+ __func__,
+ boot_args.named_block ? boot_args.named_block : "none",
+ boot_args.node_mask,
+ boot_args.stack_size,
+ boot_args.heap_size,
+ boot_args.boot_flags,
+ boot_args.forceboot ? "true" : "false",
+ boot_args.coremask_set ? "true" : "false",
+ boot_args.num_cores_set ? "true" : "false",
+ boot_args.num_skipped_set ? "true" : "false",
+ boot_args.endbootargs ? "true" : "false");
+ debug(" num cores: ");
+ for (i = 0; i < CVMX_MAX_NODES; i++)
+ debug("%s%d", i > 0 ? ", " : "", boot_args.num_cores[i]);
+ debug("\n num skipped: ");
+ for (i = 0; i < CVMX_MAX_NODES; i++) {
+ debug("%s%d", i > 0 ? ", " : "", boot_args.num_skipped[i]);
+ debug("\n coremask:\n");
+ cvmx_coremask_dprint(&boot_args.coremask);
+ }
+
+ if (boot_args.endbootargs) {
+ debug("endbootargs set, adjusting argc from %d to %d, arg_count: %d, arg_start: %d\n",
+ argc, argc - (arg_count + arg_start), arg_count,
+ arg_start);
+ argc -= (arg_count + arg_start);
+ argv += (arg_count + arg_start);
+ }
+
+ /*
+ * numcores specification overrides a coremask on the same command line
+ */
+ cvmx_coremask_copy(&core_mask, &boot_args.coremask);
+
+ /*
+ * Remove cores from coremask based on environment variable stored in
+ * flash
+ */
+ if (validate_coremask(&core_mask) != 0) {
+ puts("Invalid coremask.\n");
+ return 1;
+ } else if (cvmx_coremask_is_empty(&core_mask)) {
+ puts("Coremask is empty after coremask_override mask. Nothing to do.\n");
+ return 0;
+ }
+
+ if (cvmx_coremask_intersects(&core_mask, &coremask_to_run)) {
+ puts("ERROR: Can't load code on core twice! Provided coremask:\n");
+ cvmx_coremask_print(&core_mask);
+ puts("overlaps previously loaded coremask:\n");
+ cvmx_coremask_print(&coremask_to_run);
+ return -1;
+ }
+
+ debug("Setting up boot descriptor block with core mask:\n");
+ cvmx_coremask_dprint(&core_mask);
+
+ /*
+ * Add coremask to global mask of cores that have been set up and are
+ * runable
+ */
+ cvmx_coremask_or(&coremask_to_run, &coremask_to_run, &core_mask);
+
+ /* Get RAM size */
+ ret = uclass_get_device(UCLASS_RAM, 0, &dev);
+ if (ret) {
+ debug("DRAM init failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = ram_get_info(dev, &ram);
+ if (ret) {
+ debug("Cannot get DRAM size: %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * Load kernel ELF image, or try binary if ELF is not detected.
+ * This way the much smaller vmlinux.bin can also be started but
+ * has to be loaded at the correct address (ep as parameter).
+ */
+ if (!valid_elf_image(addr))
+ printf("Booting binary image instead (vmlinux.bin)...\n");
+ else
+ addr = load_elf_image_shdr(addr);
+
+ /* Set kernel entry point */
+ kernel = (kernel_entry_t)addr;
+
+ /* Init bootmem list for Linux kernel booting */
+ if (!cvmx_bootmem_phy_mem_list_init(
+ ram.size, OCTEON_RESERVED_LOW_MEM_SIZE,
+ (void *)CKSEG0ADDR(BOOTLOADER_BOOTMEM_DESC_SPACE))) {
+ printf("FATAL: Error initializing free memory list\n");
+ return 0;
+ }
+
+ first_core = cvmx_coremask_get_first_core(&coremask_to_run);
+
+ cvmx_coremask_for_each_core(core, &coremask_to_run) {
+ debug("%s: Activating core %d\n", __func__, core);
+
+ cvmx_bootinfo_array[core].core_mask =
+ cvmx_coremask_get32(&coremask_to_run);
+ cvmx_coremask_copy(&cvmx_bootinfo_array[core].ext_core_mask,
+ &coremask_to_run);
+
+ if (core == first_core)
+ cvmx_bootinfo_array[core].flags |= BOOT_FLAG_INIT_CORE;
+
+ cvmx_bootinfo_array[core].dram_size = ram.size / (1024 * 1024);
+
+ cvmx_bootinfo_array[core].dclock_hz = gd->mem_clk * 1000000;
+ cvmx_bootinfo_array[core].eclock_hz = gd->cpu_clk;
+
+ cvmx_bootinfo_array[core].led_display_base_addr = 0;
+ cvmx_bootinfo_array[core].phy_mem_desc_addr =
+ ((u32)(u64)__cvmx_bootmem_internal_get_desc_ptr()) &
+ 0x7ffffff;
+
+ cvmx_bootinfo_array[core].major_version = CVMX_BOOTINFO_MAJ_VER;
+ cvmx_bootinfo_array[core].minor_version = CVMX_BOOTINFO_MIN_VER;
+ cvmx_bootinfo_array[core].fdt_addr = virt_to_phys(gd->fdt_blob);
+
+ boot_desc[core].dram_size = gd->ram_size / (1024 * 1024);
+ boot_desc[core].cvmx_desc_vaddr =
+ virt_to_phys(&cvmx_bootinfo_array[core]);
+
+ boot_desc[core].desc_version = OCTEON_CURRENT_DESC_VERSION;
+ boot_desc[core].desc_size = sizeof(boot_desc[0]);
+
+ boot_desc[core].flags = cvmx_bootinfo_array[core].flags;
+ boot_desc[core].eclock_hz = cvmx_bootinfo_array[core].eclock_hz;
+
+ boot_desc[core].argc = argc;
+ for (i = 0; i < argc; i++)
+ boot_desc[core].argv[i] = (u32)virt_to_phys(argv[i]);
+ }
+
+ core = 0;
+ arg0 = argc;
+ arg1 = (u64)argv;
+ arg2 = 0x1; /* Core 0 sets init core for Linux */
+ arg3 = XKPHYS | virt_to_phys(&boot_desc[core]);
+
+ debug("## Transferring control to Linux (at address %p) ...\n", kernel);
+
+ /*
+ * Flush cache before jumping to application. Let's flush the
+ * whole SDRAM area, since we don't know the size of the image
+ * that was loaded.
+ */
+ flush_cache(gd->ram_base, gd->ram_top - gd->ram_base);
+
+ /* Take all cores out of reset */
+ csr_wr(CVMX_CIU_PP_RST, 0);
+ sync();
+
+ /* Wait a short while for the other cores... */
+ mdelay(100);
+
+ /* Install boot code into moveable bus for NMI (other cores) */
+ nmi_code = (const u64 *)nmi_bootvector;
+ num_dwords = (((u64)&nmi_handler_para[0] - (u64)nmi_code) + 7) / 8;
+
+ ret = octeon_set_moveable_region(0x1fc00000, 0, true, nmi_code,
+ num_dwords);
+ if (ret) {
+ printf("Error installing NMI handler for SMP core startup\n");
+ return 0;
+ }
+
+ /* Write NMI handler parameters for Linux kernel booting */
+ nmi_handler_para[0] = (u64)kernel;
+ nmi_handler_para[1] = arg0;
+ nmi_handler_para[2] = arg1;
+ nmi_handler_para[3] = 0; /* Don't set init core for secondary cores */
+ nmi_handler_para[4] = arg3;
+ sync();
+
+ /* Wait a short while for the other cores... */
+ mdelay(100);
+
+ /*
+ * Cores have already been taken out of reset to conserve power.
+ * We need to send a NMI to get the cores out of their wait loop
+ */
+ octeon_get_available_coremask(&avail_coremask);
+ debug("Available coremask:\n");
+ cvmx_coremask_dprint(&avail_coremask);
+ debug("Starting coremask:\n");
+ cvmx_coremask_dprint(&coremask_to_run);
+ debug("Sending NMIs to other cores\n");
+ if (octeon_has_feature(OCTEON_FEATURE_CIU3)) {
+ u64 avail_cm;
+ int node;
+
+ cvmx_coremask_for_each_node(node, node_mask) {
+ avail_cm = cvmx_coremask_get64_node(&avail_coremask,
+ node);
+
+ if (avail_cm != 0) {
+ debug("Sending NMI to node %d, coremask=0x%llx, CIU3_NMI=0x%llx\n",
+ node, avail_cm,
+ (node > 0 ? -1ull : -2ull) & avail_cm);
+ csr_wr(CVMX_CIU3_NMI,
+ (node > 0 ? -1ull : -2ull) & avail_cm);
+ }
+ }
+ } else {
+ csr_wr(CVMX_CIU_NMI,
+ -2ull & cvmx_coremask_get64(&avail_coremask));
+ }
+ debug("Done sending NMIs\n");
+
+ /* Wait a short while for the other cores... */
+ mdelay(100);
+
+ /*
+ * pass address parameter as argv[0] (aka command name),
+ * and all remaining args
+ * a0 = argc
+ * a1 = argv (32 bit physical addresses, not pointers)
+ * a2 = init core
+ * a3 = boot descriptor address
+ * a4/t0 = entry point (only used by assembly stub)
+ */
+ kernel(arg0, arg1, arg2, arg3);
+
+ return 0;
+}
+
+U_BOOT_CMD(bootoctlinux, 32, 0, do_bootoctlinux,
+ "Boot from a linux ELF image in memory",
+ "elf_address [coremask=mask_to_run | numcores=core_cnt_to_run] "
+ "[forceboot] [skipcores=core_cnt_to_skip] [namedblock=name] [endbootargs] [app_args ...]\n"
+ "elf_address - address of ELF image to load. If 0, default load address\n"
+ " is used.\n"
+ "coremask - mask of cores to run on. Anded with coremask_override\n"
+ " environment variable to ensure only working cores are used\n"
+ "numcores - number of cores to run on. Runs on specified number of cores,\n"
+ " taking into account the coremask_override.\n"
+ "skipcores - only meaningful with numcores. Skips this many cores\n"
+ " (starting from 0) when loading the numcores cores.\n"
+ " For example, setting skipcores to 1 will skip core 0\n"
+ " and load the application starting at the next available core.\n"
+ "forceboot - if set, boots application even if core 0 is not in mask\n"
+ "namedblock - specifies a named block to load the kernel\n"
+ "endbootargs - if set, bootloader does not process any further arguments and\n"
+ " only passes the arguments that follow to the kernel.\n"
+ " If not set, the kernel gets the entire commnad line as\n"
+ " arguments.\n" "\n");
diff --git a/arch/mips/mach-octeon/cache.c b/arch/mips/mach-octeon/cache.c
index 9a88bb97c7..f293d65dae 100644
--- a/arch/mips/mach-octeon/cache.c
+++ b/arch/mips/mach-octeon/cache.c
@@ -5,14 +5,13 @@
#include <cpu_func.h>
-/*
- * The Octeon platform is cache coherent and cache flushes and invalidates
- * are not needed. Define some platform specific empty flush_foo()
- * functions here to overwrite the _weak common function as a no-op.
- * This effectively disables all cache operations.
- */
+/* Octeon memory write barrier */
+#define CVMX_SYNCW asm volatile ("syncw\nsyncw\n" : : : "memory")
+
void flush_dcache_range(ulong start_addr, ulong stop)
{
+ /* Flush all pending writes */
+ CVMX_SYNCW;
}
void flush_cache(ulong start_addr, ulong size)
@@ -21,4 +20,5 @@ void flush_cache(ulong start_addr, ulong size)
void invalidate_dcache_range(ulong start_addr, ulong stop)
{
+ /* Don't need to do anything for OCTEON */
}
diff --git a/arch/mips/mach-octeon/cpu.c b/arch/mips/mach-octeon/cpu.c
index 2680a2e6ed..6f87a4ef8c 100644
--- a/arch/mips/mach-octeon/cpu.c
+++ b/arch/mips/mach-octeon/cpu.c
@@ -13,6 +13,27 @@
DECLARE_GLOBAL_DATA_PTR;
+/*
+ * TRUE for devices having registers with little-endian byte
+ * order, FALSE for registers with native-endian byte order.
+ * PCI mandates little-endian, USB and SATA are configurable,
+ * but we chose little-endian for these.
+ *
+ * This table will be referened in the Octeon platform specific
+ * mangle-port.h header.
+ */
+const bool octeon_should_swizzle_table[256] = {
+ [0x00] = true, /* bootbus/CF */
+ [0x1b] = true, /* PCI mmio window */
+ [0x1c] = true, /* PCI mmio window */
+ [0x1d] = true, /* PCI mmio window */
+ [0x1e] = true, /* PCI mmio window */
+ [0x68] = true, /* OCTEON III USB */
+ [0x69] = true, /* OCTEON III USB */
+ [0x6c] = true, /* OCTEON III SATA */
+ [0x6f] = true, /* OCTEON II USB */
+};
+
static int get_clocks(void)
{
const u64 ref_clock = PLL_REF_CLK;
diff --git a/arch/mips/mach-octeon/cvmx-bootmem.c b/arch/mips/mach-octeon/cvmx-bootmem.c
new file mode 100644
index 0000000000..80bb7ac6c8
--- /dev/null
+++ b/arch/mips/mach-octeon/cvmx-bootmem.c
@@ -0,0 +1,1460 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2020 Marvell International Ltd.
+ */
+
+/*
+ * Simple allocate only memory allocator. Used to allocate memory at
+ * application start time.
+ */
+
+#include <asm/global_data.h>
+
+#include <linux/compat.h>
+#include <linux/io.h>
+#include <linux/types.h>
+
+#include <mach/octeon-model.h>
+#include <mach/cvmx-bootmem.h>
+#include <mach/cvmx-coremask.h>
+#include <mach/cvmx-regs.h>
+
+DECLARE_GLOBAL_DATA_PTR;
+
+#define CVMX_MIPS32_SPACE_KSEG0 1L
+#define CVMX_MIPS_SPACE_XKPHYS 2LL
+
+#define CVMX_ADD_SEG(seg, add) ((((u64)(seg)) << 62) | (add))
+#define CVMX_ADD_SEG32(seg, add) (((u32)(seg) << 31) | (u32)(add))
+
+/**
+ * This is the physical location of a struct cvmx_bootmem_desc
+ * structure in Octeon's memory. Note that dues to addressing
+ * limits or runtime environment it might not be possible to
+ * create a C pointer to this structure.
+ */
+static u64 cvmx_bootmem_desc_addr;
+
+/**
+ * This macro returns the size of a member of a structure.
+ * Logically it is the same as "sizeof(s::field)" in C++, but
+ * C lacks the "::" operator.
+ */
+#define SIZEOF_FIELD(s, field) sizeof(((s *)NULL)->field)
+
+/**
+ * This macro returns a member of the struct cvmx_bootmem_desc
+ * structure. These members can't be directly addressed as
+ * they might be in memory not directly reachable. In the case
+ * where bootmem is compiled with LINUX_HOST, the structure
+ * itself might be located on a remote Octeon. The argument
+ * "field" is the member name of the struct cvmx_bootmem_desc to read.
+ * Regardless of the type of the field, the return type is always
+ * a u64.
+ */
+#define CVMX_BOOTMEM_DESC_GET_FIELD(field) \
+ __cvmx_bootmem_desc_get(cvmx_bootmem_desc_addr, \
+ offsetof(struct cvmx_bootmem_desc, field), \
+ SIZEOF_FIELD(struct cvmx_bootmem_desc, field))
+
+/**
+ * This macro writes a member of the struct cvmx_bootmem_desc
+ * structure. These members can't be directly addressed as
+ * they might be in memory not directly reachable. In the case
+ * where bootmem is compiled with LINUX_HOST, the structure
+ * itself might be located on a remote Octeon. The argument
+ * "field" is the member name of the struct cvmx_bootmem_desc to write.
+ */
+#define CVMX_BOOTMEM_DESC_SET_FIELD(field, value) \
+ __cvmx_bootmem_desc_set(cvmx_bootmem_desc_addr, \
+ offsetof(struct cvmx_bootmem_desc, field), \
+ SIZEOF_FIELD(struct cvmx_bootmem_desc, field), \
+ value)
+
+/**
+ * This macro returns a member of the
+ * struct cvmx_bootmem_named_block_desc structure. These members can't
+ * be directly addressed as they might be in memory not directly
+ * reachable. In the case where bootmem is compiled with
+ * LINUX_HOST, the structure itself might be located on a remote
+ * Octeon. The argument "field" is the member name of the
+ * struct cvmx_bootmem_named_block_desc to read. Regardless of the type
+ * of the field, the return type is always a u64. The "addr"
+ * parameter is the physical address of the structure.
+ */
+#define CVMX_BOOTMEM_NAMED_GET_FIELD(addr, field) \
+ __cvmx_bootmem_desc_get(addr, \
+ offsetof(struct cvmx_bootmem_named_block_desc, field), \
+ SIZEOF_FIELD(struct cvmx_bootmem_named_block_desc, field))
+
+/**
+ * This macro writes a member of the struct cvmx_bootmem_named_block_desc
+ * structure. These members can't be directly addressed as
+ * they might be in memory not directly reachable. In the case
+ * where bootmem is compiled with LINUX_HOST, the structure
+ * itself might be located on a remote Octeon. The argument
+ * "field" is the member name of the
+ * struct cvmx_bootmem_named_block_desc to write. The "addr" parameter
+ * is the physical address of the structure.
+ */
+#define CVMX_BOOTMEM_NAMED_SET_FIELD(addr, field, value) \
+ __cvmx_bootmem_desc_set(addr, \
+ offsetof(struct cvmx_bootmem_named_block_desc, field), \
+ SIZEOF_FIELD(struct cvmx_bootmem_named_block_desc, field), \
+ value)
+
+/**
+ * This function is the implementation of the get macros defined
+ * for individual structure members. The argument are generated
+ * by the macros inorder to read only the needed memory.
+ *
+ * @param base 64bit physical address of the complete structure
+ * @param offset Offset from the beginning of the structure to the member being
+ * accessed.
+ * @param size Size of the structure member.
+ *
+ * @return Value of the structure member promoted into a u64.
+ */
+static inline u64 __cvmx_bootmem_desc_get(u64 base, int offset,
+ int size)
+{
+ base = (1ull << 63) | (base + offset);
+ switch (size) {
+ case 4:
+ return cvmx_read64_uint32(base);
+ case 8:
+ return cvmx_read64_uint64(base);
+ default:
+ return 0;
+ }
+}
+
+/**
+ * This function is the implementation of the set macros defined
+ * for individual structure members. The argument are generated
+ * by the macros in order to write only the needed memory.
+ *
+ * @param base 64bit physical address of the complete structure
+ * @param offset Offset from the beginning of the structure to the member being
+ * accessed.
+ * @param size Size of the structure member.
+ * @param value Value to write into the structure
+ */
+static inline void __cvmx_bootmem_desc_set(u64 base, int offset, int size,
+ u64 value)
+{
+ base = (1ull << 63) | (base + offset);
+ switch (size) {
+ case 4:
+ cvmx_write64_uint32(base, value);
+ break;
+ case 8:
+ cvmx_write64_uint64(base, value);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * This function returns the address of the bootmem descriptor lock.
+ *
+ * @return 64-bit address in KSEG0 of the bootmem descriptor block
+ */
+static inline u64 __cvmx_bootmem_get_lock_addr(void)
+{
+ return (1ull << 63) |
+ (cvmx_bootmem_desc_addr + offsetof(struct cvmx_bootmem_desc, lock));
+}
+
+/**
+ * This function retrieves the string name of a named block. It is
+ * more complicated than a simple memcpy() since the named block
+ * descriptor may not be directly accessible.
+ *
+ * @param addr Physical address of the named block descriptor
+ * @param str String to receive the named block string name
+ * @param len Length of the string buffer, which must match the length
+ * stored in the bootmem descriptor.
+ */
+static void CVMX_BOOTMEM_NAMED_GET_NAME(u64 addr, char *str, int len)
+{
+ int l = len;
+ char *ptr = str;
+
+ addr |= (1ull << 63);
+ addr += offsetof(struct cvmx_bootmem_named_block_desc, name);
+ while (l) {
+ /*
+ * With big-endian in memory byte order, this gives uniform
+ * results for the CPU in either big or Little endian mode.
+ */
+ u64 blob = cvmx_read64_uint64(addr);
+ int sa = 56;
+
+ addr += sizeof(u64);
+ while (l && sa >= 0) {
+ *ptr++ = (char)(blob >> sa);
+ l--;
+ sa -= 8;
+ }
+ }
+ str[len] = 0;
+}
+
+/**
+ * This function stores the string name of a named block. It is
+ * more complicated than a simple memcpy() since the named block
+ * descriptor may not be directly accessible.
+ *
+ * @param addr Physical address of the named block descriptor
+ * @param str String to store into the named block string name
+ * @param len Length of the string buffer, which must match the length
+ * stored in the bootmem descriptor.
+ */
+void CVMX_BOOTMEM_NAMED_SET_NAME(u64 addr, const char *str, int len)
+{
+ int l = len;
+
+ addr |= (1ull << 63);
+ addr += offsetof(struct cvmx_bootmem_named_block_desc, name);
+
+ while (l) {
+ /*
+ * With big-endian in memory byte order, this gives uniform
+ * results for the CPU in either big or Little endian mode.
+ */
+ u64 blob = 0;
+ int sa = 56;
+
+ while (l && sa >= 0) {
+ u64 c = (u8)(*str++);
+
+ l--;
+ if (l == 0)
+ c = 0;
+ blob |= c << sa;
+ sa -= 8;
+ }
+ cvmx_write64_uint64(addr, blob);
+ addr += sizeof(u64);
+ }
+}
+
+/* See header file for descriptions of functions */
+
+/*
+ * Wrapper functions are provided for reading/writing the size and next block
+ * values as these may not be directly addressible (in 32 bit applications, for
+ * instance.)
+ *
+ * Offsets of data elements in bootmem list, must match
+ * struct cvmx_bootmem_block_header
+ */
+#define NEXT_OFFSET 0
+#define SIZE_OFFSET 8
+
+static void cvmx_bootmem_phy_set_size(u64 addr, u64 size)
+{
+ cvmx_write64_uint64((addr + SIZE_OFFSET) | (1ull << 63), size);
+}
+
+static void cvmx_bootmem_phy_set_next(u64 addr, u64 next)
+{
+ cvmx_write64_uint64((addr + NEXT_OFFSET) | (1ull << 63), next);
+}
+
+static u64 cvmx_bootmem_phy_get_size(u64 addr)
+{
+ return cvmx_read64_uint64((addr + SIZE_OFFSET) | (1ull << 63));
+}
+
+static u64 cvmx_bootmem_phy_get_next(u64 addr)
+{
+ return cvmx_read64_uint64((addr + NEXT_OFFSET) | (1ull << 63));
+}
+
+/**
+ * Check the version information on the bootmem descriptor
+ *
+ * @param exact_match
+ * Exact major version to check against. A zero means
+ * check that the version supports named blocks.
+ *
+ * @return Zero if the version is correct. Negative if the version is
+ * incorrect. Failures also cause a message to be displayed.
+ */
+static int __cvmx_bootmem_check_version(int exact_match)
+{
+ int major_version;
+
+ major_version = CVMX_BOOTMEM_DESC_GET_FIELD(major_version);
+ if (major_version > 3 ||
+ (exact_match && major_version) != exact_match) {
+ debug("ERROR: Incompatible bootmem descriptor version: %d.%d at addr: 0x%llx\n",
+ major_version,
+ (int)CVMX_BOOTMEM_DESC_GET_FIELD(minor_version),
+ CAST_ULL(cvmx_bootmem_desc_addr));
+ return -1;
+ } else {
+ return 0;
+ }
+}
+
+/**
+ * Get the low level bootmem descriptor lock. If no locking
+ * is specified in the flags, then nothing is done.
+ *
+ * @param flags CVMX_BOOTMEM_FLAG_NO_LOCKING means this functions should do
+ * nothing. This is used to support nested bootmem calls.
+ */
+static inline void __cvmx_bootmem_lock(u32 flags)
+{
+ if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING)) {
+ /*
+ * Unfortunately we can't use the normal cvmx-spinlock code as
+ * the memory for the bootmem descriptor may be not accessible
+ * by a C pointer. We use a 64bit XKPHYS address to access the
+ * memory directly
+ */
+ u64 lock_addr = (1ull << 63) |
+ (cvmx_bootmem_desc_addr + offsetof(struct cvmx_bootmem_desc,
+ lock));
+ unsigned int tmp;
+
+ __asm__ __volatile__(".set noreorder\n"
+ "1: ll %[tmp], 0(%[addr])\n"
+ " bnez %[tmp], 1b\n"
+ " li %[tmp], 1\n"
+ " sc %[tmp], 0(%[addr])\n"
+ " beqz %[tmp], 1b\n"
+ " nop\n"
+ ".set reorder\n"
+ : [tmp] "=&r"(tmp)
+ : [addr] "r"(lock_addr)
+ : "memory");
+ }
+}
+
+/**
+ * Release the low level bootmem descriptor lock. If no locking
+ * is specified in the flags, then nothing is done.
+ *
+ * @param flags CVMX_BOOTMEM_FLAG_NO_LOCKING means this functions should do
+ * nothing. This is used to support nested bootmem calls.
+ */
+static inline void __cvmx_bootmem_unlock(u32 flags)
+{
+ if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING)) {
+ /*
+ * Unfortunately we can't use the normal cvmx-spinlock code as
+ * the memory for the bootmem descriptor may be not accessible
+ * by a C pointer. We use a 64bit XKPHYS address to access the
+ * memory directly
+ */
+ u64 lock_addr = __cvmx_bootmem_get_lock_addr();
+
+ CVMX_SYNCW;
+ __asm__ __volatile__("sw $0, 0(%[addr])\n"
+ : : [addr] "r"(lock_addr)
+ : "memory");
+ CVMX_SYNCW;
+ }
+}
+
+/*
+ * Some of the cvmx-bootmem functions dealing with C pointers are not
+ * supported when we are compiling for CVMX_BUILD_FOR_LINUX_HOST. This
+ * ifndef removes these functions when they aren't needed.
+ *
+ * This functions takes an address range and adjusts it as necessary
+ * to match the ABI that is currently being used. This is required to
+ * ensure that bootmem_alloc* functions only return valid pointers for
+ * 32 bit ABIs
+ */
+static int __cvmx_validate_mem_range(u64 *min_addr_ptr,
+ u64 *max_addr_ptr)
+{
+ u64 max_phys = (1ull << 29) - 0x10; /* KSEG0 */
+
+ *min_addr_ptr = min_t(u64, max_t(u64, *min_addr_ptr, 0x0), max_phys);
+ if (!*max_addr_ptr) {
+ *max_addr_ptr = max_phys;
+ } else {
+ *max_addr_ptr = max_t(u64, min_t(u64, *max_addr_ptr,
+ max_phys), 0x0);
+ }
+
+ return 0;
+}
+
+u64 cvmx_bootmem_phy_alloc_range(u64 size, u64 alignment,
+ u64 min_addr, u64 max_addr)
+{
+ s64 address;
+
+ __cvmx_validate_mem_range(&min_addr, &max_addr);
+ address = cvmx_bootmem_phy_alloc(size, min_addr, max_addr,
+ alignment, 0);
+ if (address > 0)
+ return address;
+ else
+ return 0;
+}
+
+void *cvmx_bootmem_alloc_range(u64 size, u64 alignment,
+ u64 min_addr, u64 max_addr)
+{
+ s64 address;
+
+ __cvmx_validate_mem_range(&min_addr, &max_addr);
+ address = cvmx_bootmem_phy_alloc(size, min_addr, max_addr,
+ alignment, 0);
+
+ if (address > 0)
+ return cvmx_phys_to_ptr(address);
+ else
+ return NULL;
+}
+
+void *cvmx_bootmem_alloc_address(u64 size, u64 address,
+ u64 alignment)
+{
+ return cvmx_bootmem_alloc_range(size, alignment, address,
+ address + size);
+}
+
+void *cvmx_bootmem_alloc_node(u64 node, u64 size, u64 alignment)
+{
+ return cvmx_bootmem_alloc_range(size, alignment,
+ node << CVMX_NODE_MEM_SHIFT,
+ ((node + 1) << CVMX_NODE_MEM_SHIFT) - 1);
+}
+
+void *cvmx_bootmem_alloc(u64 size, u64 alignment)
+{
+ return cvmx_bootmem_alloc_range(size, alignment, 0, 0);
+}
+
+void *cvmx_bootmem_alloc_named_range_once(u64 size, u64 min_addr,
+ u64 max_addr, u64 align,
+ const char *name,
+ void (*init)(void *))
+{
+ u64 named_block_desc_addr;
+ void *ptr;
+ s64 addr;
+
+ __cvmx_bootmem_lock(0);
+
+ __cvmx_validate_mem_range(&min_addr, &max_addr);
+ named_block_desc_addr =
+ cvmx_bootmem_phy_named_block_find(name,
+ CVMX_BOOTMEM_FLAG_NO_LOCKING);
+
+ if (named_block_desc_addr) {
+ addr = CVMX_BOOTMEM_NAMED_GET_FIELD(named_block_desc_addr,
+ base_addr);
+ __cvmx_bootmem_unlock(0);
+ return cvmx_phys_to_ptr(addr);
+ }
+
+ addr = cvmx_bootmem_phy_named_block_alloc(size, min_addr, max_addr,
+ align, name,
+ CVMX_BOOTMEM_FLAG_NO_LOCKING);
+
+ if (addr < 0) {
+ __cvmx_bootmem_unlock(0);
+ return NULL;
+ }
+ ptr = cvmx_phys_to_ptr(addr);
+
+ if (init)
+ init(ptr);
+ else
+ memset(ptr, 0, size);
+
+ __cvmx_bootmem_unlock(0);
+ return ptr;
+}
+
+void *cvmx_bootmem_alloc_named_range_flags(u64 size, u64 min_addr,
+ u64 max_addr, u64 align,
+ const char *name, u32 flags)
+{
+ s64 addr;
+
+ __cvmx_validate_mem_range(&min_addr, &max_addr);
+ addr = cvmx_bootmem_phy_named_block_alloc(size, min_addr, max_addr,
+ align, name, flags);
+ if (addr >= 0)
+ return cvmx_phys_to_ptr(addr);
+ else
+ return NULL;
+}
+
+void *cvmx_bootmem_alloc_named_range(u64 size, u64 min_addr,
+ u64 max_addr, u64 align,
+ const char *name)
+{
+ return cvmx_bootmem_alloc_named_range_flags(size, min_addr, max_addr,
+ align, name, 0);
+}
+
+void *cvmx_bootmem_alloc_named_address(u64 size, u64 address,
+ const char *name)
+{
+ return cvmx_bootmem_alloc_named_range(size, address, address + size,
+ 0, name);
+}
+
+void *cvmx_bootmem_alloc_named(u64 size, u64 alignment,
+ const char *name)
+{
+ return cvmx_bootmem_alloc_named_range(size, 0, 0, alignment, name);
+}
+
+void *cvmx_bootmem_alloc_named_flags(u64 size, u64 alignment,
+ const char *name, u32 flags)
+{
+ return cvmx_bootmem_alloc_named_range_flags(size, 0, 0, alignment,
+ name, flags);
+}
+
+int cvmx_bootmem_free_named(const char *name)
+{
+ return cvmx_bootmem_phy_named_block_free(name, 0);
+}
+
+/**
+ * Find a named block with flags
+ *
+ * @param name is the block name
+ * @param flags indicates the need to use locking during search
+ * @return pointer to named block descriptor
+ *
+ * Note: this function returns a pointer to a static structure,
+ * and is therefore not re-entrant.
+ * Making this function re-entrant will break backward compatibility.
+ */
+const struct cvmx_bootmem_named_block_desc *
+__cvmx_bootmem_find_named_block_flags(const char *name, u32 flags)
+{
+ static struct cvmx_bootmem_named_block_desc desc;
+ u64 named_addr = cvmx_bootmem_phy_named_block_find(name, flags);
+
+ if (named_addr) {
+ desc.base_addr = CVMX_BOOTMEM_NAMED_GET_FIELD(named_addr,
+ base_addr);
+ desc.size = CVMX_BOOTMEM_NAMED_GET_FIELD(named_addr, size);
+ strncpy(desc.name, name, sizeof(desc.name));
+ desc.name[sizeof(desc.name) - 1] = 0;
+ return &desc;
+ } else {
+ return NULL;
+ }
+}
+
+const struct cvmx_bootmem_named_block_desc *
+cvmx_bootmem_find_named_block(const char *name)
+{
+ return __cvmx_bootmem_find_named_block_flags(name, 0);
+}
+
+void cvmx_bootmem_print_named(void)
+{
+ cvmx_bootmem_phy_named_block_print();
+}
+
+int cvmx_bootmem_init(u64 mem_desc_addr)
+{
+ if (!cvmx_bootmem_desc_addr)
+ cvmx_bootmem_desc_addr = mem_desc_addr;
+
+ return 0;
+}
+
+u64 cvmx_bootmem_available_mem(u64 min_block_size)
+{
+ return cvmx_bootmem_phy_available_mem(min_block_size);
+}
+
+/*
+ * The cvmx_bootmem_phy* functions below return 64 bit physical
+ * addresses, and expose more features that the cvmx_bootmem_functions
+ * above. These are required for full memory space access in 32 bit
+ * applications, as well as for using some advance features. Most
+ * applications should not need to use these.
+ */
+
+s64 cvmx_bootmem_phy_alloc(u64 req_size, u64 address_min,
+ u64 address_max, u64 alignment,
+ u32 flags)
+{
+ u64 head_addr, ent_addr, ent_size;
+ u64 target_ent_addr = 0, target_prev_addr = 0;
+ u64 target_size = ~0ull;
+ u64 free_start, free_end;
+ u64 next_addr, prev_addr = 0;
+ u64 new_ent_addr = 0, new_ent_size;
+ u64 desired_min_addr, usable_max;
+ u64 align, align_mask;
+
+ debug("%s: req_size: 0x%llx, min_addr: 0x%llx, max_addr: 0x%llx, align: 0x%llx\n",
+ __func__, CAST_ULL(req_size), CAST_ULL(address_min),
+ CAST_ULL(address_max), CAST_ULL(alignment));
+
+ if (__cvmx_bootmem_check_version(0))
+ return -1;
+
+ /*
+ * Do a variety of checks to validate the arguments. The
+ * allocator code will later assume that these checks have
+ * been made. We validate that the requested constraints are
+ * not self-contradictory before we look through the list of
+ * available memory
+ */
+
+ /* 0 is not a valid req_size for this allocator */
+ if (!req_size)
+ return -1;
+
+ /* Round req_size up to multiple of minimum alignment bytes */
+ req_size = (req_size + (CVMX_BOOTMEM_ALIGNMENT_SIZE - 1)) &
+ ~(CVMX_BOOTMEM_ALIGNMENT_SIZE - 1);
+
+ /* Make sure alignment is power of 2, and at least the minimum */
+ for (align = CVMX_BOOTMEM_ALIGNMENT_SIZE;
+ align < (1ull << 48);
+ align <<= 1) {
+ if (align >= alignment)
+ break;
+ }
+
+ align_mask = ~(align - 1);
+
+ /*
+ * Adjust address minimum based on requested alignment (round
+ * up to meet alignment). Do this here so we can reject
+ * impossible requests up front. (NOP for address_min == 0)
+ */
+ address_min = (address_min + (align - 1)) & align_mask;
+
+ /*
+ * Convert !0 address_min and 0 address_max to special case of
+ * range that specifies an exact memory block to allocate. Do
+ * this before other checks and adjustments so that this
+ * tranformation will be validated
+ */
+ if (address_min && !address_max)
+ address_max = address_min + req_size;
+ else if (!address_min && !address_max)
+ address_max = ~0ull; /* If no limits given, use max */
+
+ /*
+ * Reject inconsistent args. We have adjusted these, so this
+ * may fail due to our internal changes even if this check
+ * would pass for the values the user supplied.
+ */
+ if (req_size > address_max - address_min)
+ return -1;
+
+ __cvmx_bootmem_lock(flags);
+
+ /* Walk through the list entries to find the right fit */
+ head_addr = CVMX_BOOTMEM_DESC_GET_FIELD(head_addr);
+
+ for (ent_addr = head_addr;
+ ent_addr != 0ULL && ent_addr < address_max;
+ prev_addr = ent_addr,
+ ent_addr = cvmx_bootmem_phy_get_next(ent_addr)) {
+ /* Raw free block size */
+ ent_size = cvmx_bootmem_phy_get_size(ent_addr);
+ next_addr = cvmx_bootmem_phy_get_next(ent_addr);
+
+ /* Validate the free list ascending order */
+ if (ent_size < CVMX_BOOTMEM_ALIGNMENT_SIZE ||
+ (next_addr && ent_addr > next_addr)) {
+ debug("ERROR: %s: bad free list ent: %#llx, next: %#llx\n",
+ __func__, CAST_ULL(ent_addr),
+ CAST_ULL(next_addr));
+ goto error_out;
+ }
+
+ /* adjust free block edges for alignment */
+ free_start = (ent_addr + align - 1) & align_mask;
+ free_end = (ent_addr + ent_size) & align_mask;
+
+ /* check that free block is large enough */
+ if ((free_start + req_size) > free_end)
+ continue;
+
+ /* check that desired start is within the free block */
+ if (free_end < address_min || free_start > address_max)
+ continue;
+ if ((free_end - address_min) < req_size)
+ continue;
+ if ((address_max - free_start) < req_size)
+ continue;
+
+ /* Found usebale free block */
+ target_ent_addr = ent_addr;
+ target_prev_addr = prev_addr;
+ target_size = ent_size;
+
+ /* Continue looking for highest/best block that fits */
+ }
+
+ /* Bail if the search has resulted in no eligible free blocks */
+ if (target_ent_addr == 0) {
+ debug("%s: eligible free block not found\n", __func__);
+ goto error_out;
+ }
+
+ /* Found the free block to allocate from */
+ ent_addr = target_ent_addr;
+ prev_addr = target_prev_addr;
+ ent_size = target_size;
+
+ debug("%s: using free block at %#010llx size %#llx\n",
+ __func__, CAST_ULL(ent_addr), CAST_ULL(ent_size));
+
+ /* Always allocate from the end of a free block */
+ usable_max = min_t(u64, address_max, ent_addr + ent_size);
+ desired_min_addr = usable_max - req_size;
+ desired_min_addr &= align_mask;
+
+ /* Split current free block into up to 3 free blocks */
+
+ /* Check for head room */
+ if (desired_min_addr > ent_addr) {
+ /* Create a new free block at the allocation address */
+ new_ent_addr = desired_min_addr;
+ new_ent_size = ent_size - (desired_min_addr - ent_addr);
+
+ cvmx_bootmem_phy_set_next(new_ent_addr,
+ cvmx_bootmem_phy_get_next(ent_addr));
+ cvmx_bootmem_phy_set_size(new_ent_addr, new_ent_size);
+
+ /* Split out head room into a new free block */
+ ent_size -= new_ent_size;
+ cvmx_bootmem_phy_set_next(ent_addr, new_ent_addr);
+ cvmx_bootmem_phy_set_size(ent_addr, ent_size);
+
+ debug("%s: splitting head, addr %#llx size %#llx\n",
+ __func__, CAST_ULL(ent_addr), CAST_ULL(ent_size));
+
+ /* Make the allocation target the current free block */
+ prev_addr = ent_addr;
+ ent_addr = new_ent_addr;
+ ent_size = new_ent_size;
+ }
+
+ /* Check for tail room */
+ if ((desired_min_addr + req_size) < (ent_addr + ent_size)) {
+ new_ent_addr = ent_addr + req_size;
+ new_ent_size = ent_size - req_size;
+
+ /* Create a new free block from tail room */
+ cvmx_bootmem_phy_set_next(new_ent_addr,
+ cvmx_bootmem_phy_get_next(ent_addr));
+ cvmx_bootmem_phy_set_size(new_ent_addr, new_ent_size);
+
+ debug("%s: splitting tail, addr %#llx size %#llx\n",
+ __func__, CAST_ULL(new_ent_addr), CAST_ULL(new_ent_size));
+
+ /* Adjust the current block to exclude tail room */
+ ent_size = ent_size - new_ent_size;
+ cvmx_bootmem_phy_set_next(ent_addr, new_ent_addr);
+ cvmx_bootmem_phy_set_size(ent_addr, ent_size);
+ }
+
+ /* The current free block IS the allocation target */
+ if (desired_min_addr != ent_addr || ent_size != req_size)
+ debug("ERROR: %s: internal error - addr %#llx %#llx size %#llx %#llx\n",
+ __func__, CAST_ULL(desired_min_addr), CAST_ULL(ent_addr),
+ CAST_ULL(ent_size), CAST_ULL(req_size));
+
+ /* Remove the current free block from list */
+ if (prev_addr) {
+ cvmx_bootmem_phy_set_next(prev_addr,
+ cvmx_bootmem_phy_get_next(ent_addr));
+ } else {
+ /* head of list being returned, so update head ptr */
+ CVMX_BOOTMEM_DESC_SET_FIELD(head_addr,
+ cvmx_bootmem_phy_get_next(ent_addr));
+ }
+
+ __cvmx_bootmem_unlock(flags);
+ debug("%s: allocated size: %#llx, at addr: %#010llx\n",
+ __func__,
+ CAST_ULL(req_size),
+ CAST_ULL(desired_min_addr));
+
+ return desired_min_addr;
+
+error_out:
+ /* Requested memory not found or argument error */
+ __cvmx_bootmem_unlock(flags);
+ return -1;
+}
+
+int __cvmx_bootmem_phy_free(u64 phy_addr, u64 size, u32 flags)
+{
+ u64 cur_addr;
+ u64 prev_addr = 0; /* zero is invalid */
+ int retval = 0;
+
+ debug("%s addr: %#llx, size: %#llx\n", __func__,
+ CAST_ULL(phy_addr), CAST_ULL(size));
+
+ if (__cvmx_bootmem_check_version(0))
+ return 0;
+
+ /* 0 is not a valid size for this allocator */
+ if (!size || !phy_addr)
+ return 0;
+
+ /* Round size up to mult of minimum alignment bytes */
+ size = (size + (CVMX_BOOTMEM_ALIGNMENT_SIZE - 1)) &
+ ~(CVMX_BOOTMEM_ALIGNMENT_SIZE - 1);
+
+ __cvmx_bootmem_lock(flags);
+ cur_addr = CVMX_BOOTMEM_DESC_GET_FIELD(head_addr);
+ if (cur_addr == 0 || phy_addr < cur_addr) {
+ /* add at front of list - special case with changing head ptr */
+ if (cur_addr && phy_addr + size > cur_addr)
+ goto bootmem_free_done; /* error, overlapping section */
+ else if (phy_addr + size == cur_addr) {
+ /* Add to front of existing first block */
+ cvmx_bootmem_phy_set_next(phy_addr,
+ cvmx_bootmem_phy_get_next(cur_addr));
+ cvmx_bootmem_phy_set_size(phy_addr,
+ cvmx_bootmem_phy_get_size(cur_addr) + size);
+ CVMX_BOOTMEM_DESC_SET_FIELD(head_addr, phy_addr);
+
+ } else {
+ /* New block before first block */
+ /* OK if cur_addr is 0 */
+ cvmx_bootmem_phy_set_next(phy_addr, cur_addr);
+ cvmx_bootmem_phy_set_size(phy_addr, size);
+ CVMX_BOOTMEM_DESC_SET_FIELD(head_addr, phy_addr);
+ }
+ retval = 1;
+ goto bootmem_free_done;
+ }
+
+ /* Find place in list to add block */
+ while (cur_addr && phy_addr > cur_addr) {
+ prev_addr = cur_addr;
+ cur_addr = cvmx_bootmem_phy_get_next(cur_addr);
+ }
+
+ if (!cur_addr) {
+ /*
+ * We have reached the end of the list, add on to end, checking
+ * to see if we need to combine with last block
+ */
+ if (prev_addr + cvmx_bootmem_phy_get_size(prev_addr) == phy_addr) {
+ cvmx_bootmem_phy_set_size(prev_addr,
+ cvmx_bootmem_phy_get_size(prev_addr) + size);
+ } else {
+ cvmx_bootmem_phy_set_next(prev_addr, phy_addr);
+ cvmx_bootmem_phy_set_size(phy_addr, size);
+ cvmx_bootmem_phy_set_next(phy_addr, 0);
+ }
+ retval = 1;
+ goto bootmem_free_done;
+ } else {
+ /*
+ * insert between prev and cur nodes, checking for merge with
+ * either/both
+ */
+ if (prev_addr + cvmx_bootmem_phy_get_size(prev_addr) == phy_addr) {
+ /* Merge with previous */
+ cvmx_bootmem_phy_set_size(prev_addr,
+ cvmx_bootmem_phy_get_size(prev_addr) + size);
+ if (phy_addr + size == cur_addr) {
+ /* Also merge with current */
+ cvmx_bootmem_phy_set_size(prev_addr,
+ cvmx_bootmem_phy_get_size(cur_addr) +
+ cvmx_bootmem_phy_get_size(prev_addr));
+ cvmx_bootmem_phy_set_next(prev_addr,
+ cvmx_bootmem_phy_get_next(cur_addr));
+ }
+ retval = 1;
+ goto bootmem_free_done;
+ } else if (phy_addr + size == cur_addr) {
+ /* Merge with current */
+ cvmx_bootmem_phy_set_size(phy_addr,
+ cvmx_bootmem_phy_get_size(cur_addr) + size);
+ cvmx_bootmem_phy_set_next(phy_addr,
+ cvmx_bootmem_phy_get_next(cur_addr));
+ cvmx_bootmem_phy_set_next(prev_addr, phy_addr);
+ retval = 1;
+ goto bootmem_free_done;
+ }
+
+ /* It is a standalone block, add in between prev and cur */
+ cvmx_bootmem_phy_set_size(phy_addr, size);
+ cvmx_bootmem_phy_set_next(phy_addr, cur_addr);
+ cvmx_bootmem_phy_set_next(prev_addr, phy_addr);
+ }
+ retval = 1;
+
+bootmem_free_done:
+ __cvmx_bootmem_unlock(flags);
+ return retval;
+}
+
+void cvmx_bootmem_phy_list_print(void)
+{
+ u64 addr;
+
+ addr = CVMX_BOOTMEM_DESC_GET_FIELD(head_addr);
+ printf("\n\n\nPrinting bootmem block list, descriptor: 0x%llx, head is 0x%llx\n",
+ CAST_ULL(cvmx_bootmem_desc_addr), CAST_ULL(addr));
+ printf("Descriptor version: %d.%d\n",
+ (int)CVMX_BOOTMEM_DESC_GET_FIELD(major_version),
+ (int)CVMX_BOOTMEM_DESC_GET_FIELD(minor_version));
+ if (CVMX_BOOTMEM_DESC_GET_FIELD(major_version) > 3)
+ debug("Warning: Bootmem descriptor version is newer than expected\n");
+
+ if (!addr)
+ printf("mem list is empty!\n");
+
+ while (addr) {
+ printf("Block address: 0x%08llx, size: 0x%08llx, next: 0x%08llx\n", CAST_ULL(addr),
+ CAST_ULL(cvmx_bootmem_phy_get_size(addr)),
+ CAST_ULL(cvmx_bootmem_phy_get_next(addr)));
+ addr = cvmx_bootmem_phy_get_next(addr);
+ }
+ printf("\n\n");
+}
+
+u64 cvmx_bootmem_phy_available_mem(u64 min_block_size)
+{
+ u64 addr;
+
+ u64 available_mem = 0;
+
+ __cvmx_bootmem_lock(0);
+ addr = CVMX_BOOTMEM_DESC_GET_FIELD(head_addr);
+ while (addr) {
+ if (cvmx_bootmem_phy_get_size(addr) >= min_block_size)
+ available_mem += cvmx_bootmem_phy_get_size(addr);
+ addr = cvmx_bootmem_phy_get_next(addr);
+ }
+ __cvmx_bootmem_unlock(0);
+ return available_mem;
+}
+
+u64 cvmx_bootmem_phy_named_block_find(const char *name, u32 flags)
+{
+ u64 result = 0;
+
+ debug("%s: %s\n", __func__, name);
+
+ __cvmx_bootmem_lock(flags);
+ if (!__cvmx_bootmem_check_version(3)) {
+ int i;
+ u64 named_block_array_addr =
+ CVMX_BOOTMEM_DESC_GET_FIELD(named_block_array_addr);
+ int num_blocks =
+ CVMX_BOOTMEM_DESC_GET_FIELD(named_block_num_blocks);
+ int name_length =
+ CVMX_BOOTMEM_DESC_GET_FIELD(named_block_name_len);
+ u64 named_addr = named_block_array_addr;
+
+ for (i = 0; i < num_blocks; i++) {
+ u64 named_size =
+ CVMX_BOOTMEM_NAMED_GET_FIELD(named_addr, size);
+ if (name && named_size) {
+ char name_tmp[name_length + 1];
+
+ CVMX_BOOTMEM_NAMED_GET_NAME(named_addr,
+ name_tmp,
+ name_length);
+ if (!strncmp(name, name_tmp, name_length)) {
+ result = named_addr;
+ break;
+ }
+ } else if (!name && !named_size) {
+ result = named_addr;
+ break;
+ }
+
+ named_addr +=
+ sizeof(struct cvmx_bootmem_named_block_desc);
+ }
+ }
+ __cvmx_bootmem_unlock(flags);
+ return result;
+}
+
+int cvmx_bootmem_phy_named_block_free(const char *name, u32 flags)
+{
+ u64 named_block_addr;
+
+ if (__cvmx_bootmem_check_version(3))
+ return 0;
+
+ debug("%s: %s\n", __func__, name);
+
+ /*
+ * Take lock here, as name lookup/block free/name free need to be
+ * atomic
+ */
+ __cvmx_bootmem_lock(flags);
+
+ named_block_addr = cvmx_bootmem_phy_named_block_find(name,
+ CVMX_BOOTMEM_FLAG_NO_LOCKING);
+ if (named_block_addr) {
+ u64 named_addr =
+ CVMX_BOOTMEM_NAMED_GET_FIELD(named_block_addr,
+ base_addr);
+ u64 named_size =
+ CVMX_BOOTMEM_NAMED_GET_FIELD(named_block_addr, size);
+
+ debug("%s: %s, base: 0x%llx, size: 0x%llx\n",
+ __func__, name, CAST_ULL(named_addr),
+ CAST_ULL(named_size));
+
+ __cvmx_bootmem_phy_free(named_addr, named_size,
+ CVMX_BOOTMEM_FLAG_NO_LOCKING);
+
+ /* Set size to zero to indicate block not used. */
+ CVMX_BOOTMEM_NAMED_SET_FIELD(named_block_addr, size, 0);
+ }
+
+ __cvmx_bootmem_unlock(flags);
+ return !!named_block_addr; /* 0 on failure, 1 on success */
+}
+
+s64 cvmx_bootmem_phy_named_block_alloc(u64 size, u64 min_addr,
+ u64 max_addr,
+ u64 alignment, const char *name,
+ u32 flags)
+{
+ s64 addr_allocated;
+ u64 named_block_desc_addr;
+
+ debug("%s: size: 0x%llx, min: 0x%llx, max: 0x%llx, align: 0x%llx, name: %s\n",
+ __func__, CAST_ULL(size), CAST_ULL(min_addr), CAST_ULL(max_addr),
+ CAST_ULL(alignment), name);
+
+ if (__cvmx_bootmem_check_version(3))
+ return -1;
+
+ /*
+ * Take lock here, as name lookup/block alloc/name add need to be
+ * atomic
+ */
+ __cvmx_bootmem_lock(flags);
+
+ named_block_desc_addr =
+ cvmx_bootmem_phy_named_block_find(name, flags |
+ CVMX_BOOTMEM_FLAG_NO_LOCKING);
+ if (named_block_desc_addr) {
+ __cvmx_bootmem_unlock(flags);
+ return -1;
+ }
+
+ /* Get pointer to first available named block descriptor */
+ named_block_desc_addr =
+ cvmx_bootmem_phy_named_block_find(NULL, flags |
+ CVMX_BOOTMEM_FLAG_NO_LOCKING);
+ if (!named_block_desc_addr) {
+ __cvmx_bootmem_unlock(flags);
+ return -1;
+ }
+
+ /*
+ * Round size up to mult of minimum alignment bytes
+ * We need the actual size allocated to allow for blocks to be
+ * coallesced when they are freed. The alloc routine does the
+ * same rounding up on all allocations.
+ */
+ size = (size + (CVMX_BOOTMEM_ALIGNMENT_SIZE - 1)) &
+ ~(CVMX_BOOTMEM_ALIGNMENT_SIZE - 1);
+
+ addr_allocated = cvmx_bootmem_phy_alloc(size, min_addr, max_addr,
+ alignment,
+ flags | CVMX_BOOTMEM_FLAG_NO_LOCKING);
+ if (addr_allocated >= 0) {
+ CVMX_BOOTMEM_NAMED_SET_FIELD(named_block_desc_addr, base_addr,
+ addr_allocated);
+ CVMX_BOOTMEM_NAMED_SET_FIELD(named_block_desc_addr, size, size);
+ CVMX_BOOTMEM_NAMED_SET_NAME(named_block_desc_addr, name,
+ CVMX_BOOTMEM_DESC_GET_FIELD(named_block_name_len));
+ }
+
+ __cvmx_bootmem_unlock(flags);
+ return addr_allocated;
+}
+
+void cvmx_bootmem_phy_named_block_print(void)
+{
+ int i;
+ int printed = 0;
+
+ u64 named_block_array_addr =
+ CVMX_BOOTMEM_DESC_GET_FIELD(named_block_array_addr);
+ int num_blocks = CVMX_BOOTMEM_DESC_GET_FIELD(named_block_num_blocks);
+ int name_length = CVMX_BOOTMEM_DESC_GET_FIELD(named_block_name_len);
+ u64 named_block_addr = named_block_array_addr;
+
+ debug("%s: desc addr: 0x%llx\n",
+ __func__, CAST_ULL(cvmx_bootmem_desc_addr));
+
+ if (__cvmx_bootmem_check_version(3))
+ return;
+
+ printf("List of currently allocated named bootmem blocks:\n");
+ for (i = 0; i < num_blocks; i++) {
+ u64 named_size =
+ CVMX_BOOTMEM_NAMED_GET_FIELD(named_block_addr, size);
+ if (named_size) {
+ char name_tmp[name_length + 1];
+ u64 named_addr =
+ CVMX_BOOTMEM_NAMED_GET_FIELD(named_block_addr,
+ base_addr);
+ CVMX_BOOTMEM_NAMED_GET_NAME(named_block_addr, name_tmp,
+ name_length);
+ printed++;
+ printf("Name: %s, address: 0x%08llx, size: 0x%08llx, index: %d\n", name_tmp,
+ CAST_ULL(named_addr),
+ CAST_ULL(named_size), i);
+ }
+ named_block_addr +=
+ sizeof(struct cvmx_bootmem_named_block_desc);
+ }
+
+ if (!printed)
+ printf("No named bootmem blocks exist.\n");
+}
+
+s64 cvmx_bootmem_phy_mem_list_init(u64 mem_size,
+ u32 low_reserved_bytes,
+ struct cvmx_bootmem_desc *desc_buffer)
+{
+ u64 cur_block_addr;
+ s64 addr;
+ int i;
+
+ debug("%s (arg desc ptr: %p, cvmx_bootmem_desc: 0x%llx)\n",
+ __func__, desc_buffer, CAST_ULL(cvmx_bootmem_desc_addr));
+
+ /*
+ * Descriptor buffer needs to be in 32 bit addressable space to be
+ * compatible with 32 bit applications
+ */
+ if (!desc_buffer) {
+ debug("ERROR: no memory for cvmx_bootmem descriptor provided\n");
+ return 0;
+ }
+
+ if (mem_size > OCTEON_MAX_PHY_MEM_SIZE) {
+ mem_size = OCTEON_MAX_PHY_MEM_SIZE;
+ debug("ERROR: requested memory size too large, truncating to maximum size\n");
+ }
+
+ if (cvmx_bootmem_desc_addr)
+ return 1;
+
+ /* Initialize cvmx pointer to descriptor */
+ cvmx_bootmem_init(cvmx_ptr_to_phys(desc_buffer));
+
+ /* Fill the bootmem descriptor */
+ CVMX_BOOTMEM_DESC_SET_FIELD(lock, 0);
+ CVMX_BOOTMEM_DESC_SET_FIELD(flags, 0);
+ CVMX_BOOTMEM_DESC_SET_FIELD(head_addr, 0);
+ CVMX_BOOTMEM_DESC_SET_FIELD(major_version, CVMX_BOOTMEM_DESC_MAJ_VER);
+ CVMX_BOOTMEM_DESC_SET_FIELD(minor_version, CVMX_BOOTMEM_DESC_MIN_VER);
+ CVMX_BOOTMEM_DESC_SET_FIELD(app_data_addr, 0);
+ CVMX_BOOTMEM_DESC_SET_FIELD(app_data_size, 0);
+
+ /*
+ * Set up global pointer to start of list, exclude low 64k for exception
+ * vectors, space for global descriptor
+ */
+ cur_block_addr = (OCTEON_DDR0_BASE + low_reserved_bytes);
+
+ if (mem_size <= OCTEON_DDR0_SIZE) {
+ __cvmx_bootmem_phy_free(cur_block_addr,
+ mem_size - low_reserved_bytes, 0);
+ goto frees_done;
+ }
+
+ __cvmx_bootmem_phy_free(cur_block_addr,
+ OCTEON_DDR0_SIZE - low_reserved_bytes, 0);
+
+ mem_size -= OCTEON_DDR0_SIZE;
+
+ /* Add DDR2 block next if present */
+ if (mem_size > OCTEON_DDR1_SIZE) {
+ __cvmx_bootmem_phy_free(OCTEON_DDR1_BASE, OCTEON_DDR1_SIZE, 0);
+ __cvmx_bootmem_phy_free(OCTEON_DDR2_BASE,
+ mem_size - OCTEON_DDR1_SIZE, 0);
+ } else {
+ __cvmx_bootmem_phy_free(OCTEON_DDR1_BASE, mem_size, 0);
+ }
+frees_done:
+
+ /* Initialize the named block structure */
+ CVMX_BOOTMEM_DESC_SET_FIELD(named_block_name_len, CVMX_BOOTMEM_NAME_LEN);
+ CVMX_BOOTMEM_DESC_SET_FIELD(named_block_num_blocks,
+ CVMX_BOOTMEM_NUM_NAMED_BLOCKS);
+ CVMX_BOOTMEM_DESC_SET_FIELD(named_block_array_addr, 0);
+
+ /* Allocate this near the top of the low 256 MBytes of memory */
+ addr = cvmx_bootmem_phy_alloc(CVMX_BOOTMEM_NUM_NAMED_BLOCKS *
+ sizeof(struct cvmx_bootmem_named_block_desc),
+ 0, 0x10000000, 0,
+ CVMX_BOOTMEM_FLAG_END_ALLOC);
+ if (addr >= 0)
+ CVMX_BOOTMEM_DESC_SET_FIELD(named_block_array_addr, addr);
+
+ debug("%s: named_block_array_addr: 0x%llx)\n",
+ __func__, CAST_ULL(addr));
+
+ if (addr < 0) {
+ debug("FATAL ERROR: unable to allocate memory for bootmem descriptor!\n");
+ return 0;
+ }
+
+ for (i = 0; i < CVMX_BOOTMEM_NUM_NAMED_BLOCKS; i++) {
+ CVMX_BOOTMEM_NAMED_SET_FIELD(addr, base_addr, 0);
+ CVMX_BOOTMEM_NAMED_SET_FIELD(addr, size, 0);
+ addr += sizeof(struct cvmx_bootmem_named_block_desc);
+ }
+
+ return 1;
+}
+
+s64 cvmx_bootmem_phy_mem_list_init_multi(u8 node_mask,
+ u32 mem_sizes[],
+ u32 low_reserved_bytes,
+ struct cvmx_bootmem_desc *desc_buffer)
+{
+ u64 cur_block_addr;
+ u64 mem_size;
+ s64 addr;
+ int i;
+ int node;
+ u64 node_base; /* Make u64 to reduce type casting */
+
+ mem_sizes[0] = gd->ram_size / (1024 * 1024);
+
+ debug("cvmx_bootmem_phy_mem_list_init (arg desc ptr: %p, cvmx_bootmem_desc: 0x%llx)\n",
+ desc_buffer, CAST_ULL(cvmx_bootmem_desc_addr));
+
+ /*
+ * Descriptor buffer needs to be in 32 bit addressable space to be
+ * compatible with 32 bit applications
+ */
+ if (!desc_buffer) {
+ debug("ERROR: no memory for cvmx_bootmem descriptor provided\n");
+ return 0;
+ }
+
+ cvmx_coremask_for_each_node(node, node_mask) {
+ if ((mem_sizes[node] * 1024 * 1024) > OCTEON_MAX_PHY_MEM_SIZE) {
+ mem_sizes[node] = OCTEON_MAX_PHY_MEM_SIZE /
+ (1024 * 1024);
+ debug("ERROR node#%lld: requested memory size too large, truncating to maximum size\n",
+ CAST_ULL(node));
+ }
+ }
+
+ if (cvmx_bootmem_desc_addr)
+ return 1;
+
+ /* Initialize cvmx pointer to descriptor */
+ cvmx_bootmem_init(cvmx_ptr_to_phys(desc_buffer));
+
+ /* Fill the bootmem descriptor */
+ CVMX_BOOTMEM_DESC_SET_FIELD(lock, 0);
+ CVMX_BOOTMEM_DESC_SET_FIELD(flags, 0);
+ CVMX_BOOTMEM_DESC_SET_FIELD(head_addr, 0);
+ CVMX_BOOTMEM_DESC_SET_FIELD(major_version, CVMX_BOOTMEM_DESC_MAJ_VER);
+ CVMX_BOOTMEM_DESC_SET_FIELD(minor_version, CVMX_BOOTMEM_DESC_MIN_VER);
+ CVMX_BOOTMEM_DESC_SET_FIELD(app_data_addr, 0);
+ CVMX_BOOTMEM_DESC_SET_FIELD(app_data_size, 0);
+
+ cvmx_coremask_for_each_node(node, node_mask) {
+ if (node != 0) /* do not reserve memory on remote nodes */
+ low_reserved_bytes = 0;
+
+ mem_size = (u64)mem_sizes[node] * (1024 * 1024); /* MBytes */
+
+ /*
+ * Set up global pointer to start of list, exclude low 64k
+ * for exception vectors, space for global descriptor
+ */
+
+ node_base = (u64)node << CVMX_NODE_MEM_SHIFT;
+ cur_block_addr = (OCTEON_DDR0_BASE + low_reserved_bytes) |
+ node_base;
+
+ if (mem_size <= OCTEON_DDR0_SIZE) {
+ __cvmx_bootmem_phy_free(cur_block_addr,
+ mem_size - low_reserved_bytes,
+ 0);
+ continue;
+ }
+
+ __cvmx_bootmem_phy_free(cur_block_addr,
+ OCTEON_DDR0_SIZE - low_reserved_bytes,
+ 0);
+
+ mem_size -= OCTEON_DDR0_SIZE;
+
+ /* Add DDR2 block next if present */
+ if (mem_size > OCTEON_DDR1_SIZE) {
+ __cvmx_bootmem_phy_free(OCTEON_DDR1_BASE |
+ node_base,
+ OCTEON_DDR1_SIZE, 0);
+ __cvmx_bootmem_phy_free(OCTEON_DDR2_BASE |
+ node_base,
+ mem_size - OCTEON_DDR1_SIZE, 0);
+ } else {
+ __cvmx_bootmem_phy_free(OCTEON_DDR1_BASE |
+ node_base,
+ mem_size, 0);
+ }
+ }
+
+ debug("%s: Initialize the named block\n", __func__);
+
+ /* Initialize the named block structure */
+ CVMX_BOOTMEM_DESC_SET_FIELD(named_block_name_len, CVMX_BOOTMEM_NAME_LEN);
+ CVMX_BOOTMEM_DESC_SET_FIELD(named_block_num_blocks,
+ CVMX_BOOTMEM_NUM_NAMED_BLOCKS);
+ CVMX_BOOTMEM_DESC_SET_FIELD(named_block_array_addr, 0);
+
+ /* Allocate this near the top of the low 256 MBytes of memory */
+ addr = cvmx_bootmem_phy_alloc(CVMX_BOOTMEM_NUM_NAMED_BLOCKS *
+ sizeof(struct cvmx_bootmem_named_block_desc),
+ 0, 0x10000000, 0,
+ CVMX_BOOTMEM_FLAG_END_ALLOC);
+ if (addr >= 0)
+ CVMX_BOOTMEM_DESC_SET_FIELD(named_block_array_addr, addr);
+
+ debug("cvmx_bootmem_phy_mem_list_init: named_block_array_addr: 0x%llx)\n",
+ CAST_ULL(addr));
+
+ if (addr < 0) {
+ debug("FATAL ERROR: unable to allocate memory for bootmem descriptor!\n");
+ return 0;
+ }
+
+ for (i = 0; i < CVMX_BOOTMEM_NUM_NAMED_BLOCKS; i++) {
+ CVMX_BOOTMEM_NAMED_SET_FIELD(addr, base_addr, 0);
+ CVMX_BOOTMEM_NAMED_SET_FIELD(addr, size, 0);
+ addr += sizeof(struct cvmx_bootmem_named_block_desc);
+ }
+
+ // test-only: DEBUG ifdef???
+ cvmx_bootmem_phy_list_print();
+
+ return 1;
+}
+
+int cvmx_bootmem_reserve_memory(u64 start_addr, u64 size,
+ const char *name, u32 flags)
+{
+ u64 addr;
+ int rc = 1;
+ static unsigned int block_num;
+ char block_name[CVMX_BOOTMEM_NAME_LEN];
+
+ debug("%s: start %#llx, size: %#llx, name: %s, flags:%#x)\n",
+ __func__, CAST_ULL(start_addr), CAST_ULL(size), name, flags);
+
+ if (__cvmx_bootmem_check_version(3))
+ return 0;
+
+ addr = CVMX_BOOTMEM_DESC_GET_FIELD(head_addr);
+ if (!addr)
+ return 0;
+
+ if (!name)
+ name = "__cvmx_bootmem_reserved";
+
+ while (addr && rc) {
+ u64 block_size = cvmx_bootmem_phy_get_size(addr);
+ u64 reserve_size = 0;
+
+ if (addr >= start_addr && addr < start_addr + size) {
+ reserve_size = size - (addr - start_addr);
+ if (block_size < reserve_size)
+ reserve_size = block_size;
+ } else if (start_addr > addr &&
+ start_addr < (addr + block_size)) {
+ reserve_size = block_size - (start_addr - addr);
+ }
+
+ if (reserve_size) {
+ snprintf(block_name, sizeof(block_name),
+ "%.32s_%012llx_%u",
+ name, (unsigned long long)start_addr,
+ (unsigned int)block_num);
+
+ debug("%s: Reserving 0x%llx bytes at address 0x%llx with name %s\n",
+ __func__, CAST_ULL(reserve_size),
+ CAST_ULL(addr), block_name);
+
+ if (cvmx_bootmem_phy_named_block_alloc(reserve_size,
+ addr, 0, 0,
+ block_name,
+ flags) == -1) {
+ debug("%s: Failed to reserve 0x%llx bytes at address 0x%llx\n",
+ __func__, CAST_ULL(reserve_size),
+ (unsigned long long)addr);
+ rc = 0;
+ break;
+ }
+
+ debug("%s: Reserved 0x%llx bytes at address 0x%llx with name %s\n",
+ __func__, CAST_ULL(reserve_size),
+ CAST_ULL(addr), block_name);
+ }
+
+ addr = cvmx_bootmem_phy_get_next(addr);
+ block_num++;
+ }
+
+ return rc;
+}
+
+void cvmx_bootmem_lock(void)
+{
+ __cvmx_bootmem_lock(0);
+}
+
+void cvmx_bootmem_unlock(void)
+{
+ __cvmx_bootmem_unlock(0);
+}
+
+void *__cvmx_phys_addr_to_ptr(u64 phys, int size)
+{
+ void *tmp;
+
+ if (sizeof(void *) == 8) {
+ tmp = CASTPTR(void, CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, phys));
+ } else {
+ u32 phy32 = (u32)(phys & 0x7fffffffULL);
+
+ tmp = CASTPTR(void, CVMX_ADD_SEG32(CVMX_MIPS32_SPACE_KSEG0,
+ phy32));
+ }
+
+ return tmp;
+}
+
+void *__cvmx_bootmem_internal_get_desc_ptr(void)
+{
+ return cvmx_phys_to_ptr(cvmx_bootmem_desc_addr);
+}
diff --git a/arch/mips/mach-octeon/cvmx-coremask.c b/arch/mips/mach-octeon/cvmx-coremask.c
new file mode 100644
index 0000000000..cff8c08b97
--- /dev/null
+++ b/arch/mips/mach-octeon/cvmx-coremask.c
@@ -0,0 +1,366 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2020 Marvell International Ltd.
+ */
+
+#include <env.h>
+#include <errno.h>
+
+#include <linux/compat.h>
+#include <linux/ctype.h>
+
+#include <mach/cvmx-regs.h>
+#include <mach/cvmx-coremask.h>
+#include <mach/cvmx-fuse.h>
+#include <mach/octeon-model.h>
+#include <mach/octeon-feature.h>
+
+struct cvmx_coremask *get_coremask_override(struct cvmx_coremask *pcm)
+{
+ struct cvmx_coremask pcm_override = CVMX_COREMASK_MAX;
+ char *cptr;
+
+ /* The old code sets the number of cores to be to 16 in this case. */
+ cvmx_coremask_set_cores(pcm, 0, 16);
+
+ if (OCTEON_IS_OCTEON2() || OCTEON_IS_OCTEON3())
+ cvmx_coremask_copy(pcm, &pcm_override);
+
+ cptr = env_get("coremask_override");
+ if (cptr) {
+ if (cvmx_coremask_str2bmp(pcm, cptr) < 0)
+ return NULL;
+ }
+
+ return pcm;
+}
+
+/* Validate the coremask that is passed to a boot* function. */
+int validate_coremask(struct cvmx_coremask *pcm)
+{
+ struct cvmx_coremask coremask_override;
+ struct cvmx_coremask fuse_coremask;
+
+ if (!get_coremask_override(&coremask_override))
+ return -1;
+
+ octeon_get_available_coremask(&fuse_coremask);
+
+ if (!cvmx_coremask_is_subset(&fuse_coremask, pcm)) {
+ puts("ERROR: Can't boot cores that don't exist!\n");
+ puts("Available coremask:\n");
+ cvmx_coremask_print(&fuse_coremask);
+ return -1;
+ }
+
+ if (!cvmx_coremask_is_subset(&coremask_override, pcm)) {
+ struct cvmx_coremask print_cm;
+
+ puts("Notice: coremask changed from:\n");
+ cvmx_coremask_print(pcm);
+ puts("based on coremask_override of:\n");
+ cvmx_coremask_print(&coremask_override);
+ cvmx_coremask_and(&print_cm, pcm, &coremask_override);
+ puts("to:\n");
+ cvmx_coremask_print(&print_cm);
+ }
+
+ return 0;
+}
+
+/**
+ * In CIU_FUSE for the 78XX, odd and even cores are separated out.
+ * For example, a CIU_FUSE value of 0xfffffefffffe indicates that bits 0 and 1
+ * are set.
+ * This function converts the bit number in the CIU_FUSE register to a
+ * physical core number.
+ */
+static int convert_ciu_fuse_to_physical_core(int core, int max_cores)
+{
+ if (!octeon_has_feature(OCTEON_FEATURE_CIU3))
+ return core;
+ else if (!OCTEON_IS_MODEL(OCTEON_CN78XX))
+ return core;
+ else if (core < (max_cores / 2))
+ return core * 2;
+ else
+ return ((core - (max_cores / 2)) * 2) + 1;
+}
+
+/**
+ * Get the total number of fuses blown as well as the number blown per tad.
+ *
+ * @param coremask fuse coremask
+ * @param[out] tad_blown_count number of cores blown for each tad
+ * @param num_tads number of tads
+ * @param max_cores maximum number of cores
+ *
+ * @return void
+ */
+void fill_tad_corecount(u64 coremask, int tad_blown_count[], int num_tads,
+ int max_cores)
+{
+ int core, physical_core;
+
+ for (core = 0; core < max_cores; core++) {
+ if (!(coremask & (1ULL << core))) {
+ int tad;
+
+ physical_core =
+ convert_ciu_fuse_to_physical_core(core,
+ max_cores);
+ tad = physical_core % num_tads;
+ tad_blown_count[tad]++;
+ }
+ }
+}
+
+u64 get_core_pattern(int num_tads, int max_cores)
+{
+ u64 pattern = 1ULL;
+ int cnt;
+
+ for (cnt = 1; cnt < (max_cores / num_tads); cnt++)
+ pattern |= pattern << num_tads;
+
+ return pattern;
+}
+
+/**
+ * For CN78XX and CN68XX this function returns the logical coremask from the
+ * CIU_FUSE register value. For other models there is no difference.
+ *
+ * @param ciu_fuse_value fuse value from CIU_FUSE register
+ * @return logical coremask of CIU_FUSE value.
+ */
+u64 get_logical_coremask(u64 ciu_fuse_value)
+{
+ int tad_blown_count[MAX_CORE_TADS] = {0};
+ int tad;
+ u64 logical_coremask = 0;
+ u64 tad_mask, pattern;
+ int num_tads, max_cores;
+
+ if (OCTEON_IS_MODEL(OCTEON_CN78XX)) {
+ num_tads = 8;
+ max_cores = 48;
+ } else if (OCTEON_IS_MODEL(OCTEON_CN73XX) ||
+ OCTEON_IS_MODEL(OCTEON_CNF75XX)) {
+ num_tads = 4;
+ max_cores = 16;
+ } else if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
+ num_tads = 4;
+ max_cores = 32;
+ } else {
+ /* Most Octeon devices don't need any mapping. */
+ return ciu_fuse_value;
+ }
+
+ pattern = get_core_pattern(num_tads, max_cores);
+ fill_tad_corecount(ciu_fuse_value, tad_blown_count,
+ num_tads, max_cores);
+
+ for (tad = 0; tad < num_tads; tad++) {
+ tad_mask = pattern << tad;
+ logical_coremask |= tad_mask >> (tad_blown_count[tad] * num_tads);
+ }
+ return logical_coremask;
+}
+
+/**
+ * Returns the available coremask either from env or fuses.
+ * If the fuses are blown and locked, they are the definitive coremask.
+ *
+ * @param pcm pointer to coremask to fill in
+ * @return pointer to coremask
+ */
+struct cvmx_coremask *octeon_get_available_coremask(struct cvmx_coremask *pcm)
+{
+ u8 node_mask = 0x01; /* ToDo: Currently only one node is supported */
+ u64 ciu_fuse;
+ u64 cores;
+
+ cvmx_coremask_clear_all(pcm);
+
+ if (octeon_has_feature(OCTEON_FEATURE_CIU3)) {
+ int node;
+
+ cvmx_coremask_for_each_node(node, node_mask) {
+ ciu_fuse = (csr_rd(CVMX_CIU_FUSE) &
+ 0x0000FFFFFFFFFFFFULL);
+
+ ciu_fuse = get_logical_coremask(ciu_fuse);
+ cvmx_coremask_set64_node(pcm, node, ciu_fuse);
+ }
+
+ return pcm;
+ }
+
+ ciu_fuse = (csr_rd(CVMX_CIU_FUSE) & 0x0000FFFFFFFFFFFFULL);
+ ciu_fuse = get_logical_coremask(ciu_fuse);
+
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+ cvmx_coremask_set64(pcm, ciu_fuse);
+
+ /* Get number of cores from fuse register, convert to coremask */
+ cores = __builtin_popcountll(ciu_fuse);
+
+ cvmx_coremask_set_cores(pcm, 0, cores);
+
+ return pcm;
+}
+
+int cvmx_coremask_str2bmp(struct cvmx_coremask *pcm, char *hexstr)
+{
+ int i, j;
+ int l; /* length of the hexstr in characters */
+ int lb; /* number of bits taken by hexstr */
+ int hldr_offset;/* holder's offset within the coremask */
+ int hldr_xsz; /* holder's size in the number of hex digits */
+ u64 h;
+ char c;
+
+#define MINUS_ONE (hexstr[0] == '-' && hexstr[1] == '1' && hexstr[2] == 0)
+ if (MINUS_ONE) {
+ cvmx_coremask_set_all(pcm);
+ return 0;
+ }
+
+ /* Skip '0x' from hexstr */
+ if (hexstr[0] == '0' && (hexstr[1] == 'x' || hexstr[1] == 'X'))
+ hexstr += 2;
+
+ if (!strlen(hexstr)) {
+ printf("%s: Error: hex string is empty\n", __func__);
+ return -2;
+ }
+
+ /* Trim leading zeros */
+ while (*hexstr == '0')
+ hexstr++;
+
+ cvmx_coremask_clear_all(pcm);
+ l = strlen(hexstr);
+
+ /* If length is 0 then the hex string must be all zeros */
+ if (l == 0)
+ return 0;
+
+ for (i = 0; i < l; i++) {
+ if (isxdigit((int)hexstr[i]) == 0) {
+ printf("%s: Non-hex digit within hexstr\n", __func__);
+ return -2;
+ }
+ }
+
+ lb = (l - 1) * 4;
+ if (hexstr[0] > '7')
+ lb += 4;
+ else if (hexstr[0] > '3')
+ lb += 3;
+ else if (hexstr[0] > '1')
+ lb += 2;
+ else
+ lb += 1;
+ if (lb > CVMX_MIPS_MAX_CORES) {
+ printf("%s: hexstr (%s) is too long\n", __func__, hexstr);
+ return -1;
+ }
+
+ hldr_offset = 0;
+ hldr_xsz = 2 * sizeof(u64);
+ for (i = l; i > 0; i -= hldr_xsz) {
+ c = hexstr[i];
+ hexstr[i] = 0;
+ j = i - hldr_xsz;
+ if (j < 0)
+ j = 0;
+ h = simple_strtoull(&hexstr[j], NULL, 16);
+ if (errno == EINVAL) {
+ printf("%s: strtou returns w/ EINVAL\n", __func__);
+ return -2;
+ }
+ pcm->coremask_bitmap[hldr_offset] = h;
+ hexstr[i] = c;
+ hldr_offset++;
+ }
+
+ return 0;
+}
+
+void cvmx_coremask_print(const struct cvmx_coremask *pcm)
+{
+ int i, j;
+ int start;
+ int found = 0;
+
+ /*
+ * Print one node per line. Since the bitmap is stored LSB to MSB
+ * we reverse the order when printing.
+ */
+ if (!octeon_has_feature(OCTEON_FEATURE_MULTINODE)) {
+ start = 0;
+ for (j = CVMX_COREMASK_MAX_CORES_PER_NODE -
+ CVMX_COREMASK_HLDRSZ;
+ j >= 0; j -= CVMX_COREMASK_HLDRSZ) {
+ if (pcm->coremask_bitmap[j / CVMX_COREMASK_HLDRSZ] != 0)
+ start = 1;
+ if (start) {
+ printf(" 0x%llx",
+ (u64)pcm->coremask_bitmap[j /
+ CVMX_COREMASK_HLDRSZ]);
+ }
+ }
+
+ if (start)
+ found = 1;
+
+ /*
+ * If the coremask is empty print <EMPTY> so it is not
+ * confusing
+ */
+ if (!found)
+ printf("<EMPTY>");
+ printf("\n");
+
+ return;
+ }
+
+ for (i = 0; i < CVMX_MAX_USED_CORES_BMP;
+ i += CVMX_COREMASK_MAX_CORES_PER_NODE) {
+ printf("%s node %d:", i > 0 ? "\n" : "",
+ cvmx_coremask_core_to_node(i));
+ start = 0;
+
+ for (j = i + CVMX_COREMASK_MAX_CORES_PER_NODE -
+ CVMX_COREMASK_HLDRSZ;
+ j >= i;
+ j -= CVMX_COREMASK_HLDRSZ) {
+ /* Don't start printing until we get a non-zero word. */
+ if (pcm->coremask_bitmap[j / CVMX_COREMASK_HLDRSZ] != 0)
+ start = 1;
+
+ if (start) {
+ printf(" 0x%llx", (u64)pcm->coremask_bitmap[j /
+ CVMX_COREMASK_HLDRSZ]);
+ }
+ }
+
+ if (start)
+ found = 1;
+ }
+
+ i /= CVMX_COREMASK_HLDRSZ;
+ for (; i < CVMX_COREMASK_BMPSZ; i++) {
+ if (pcm->coremask_bitmap[i]) {
+ printf(" EXTRA GARBAGE[%i]: %016llx\n", i,
+ (u64)pcm->coremask_bitmap[i]);
+ }
+ }
+
+ /* If the coremask is empty print <EMPTY> so it is not confusing */
+ if (!found)
+ printf("<EMPTY>");
+
+ printf("\n");
+}
diff --git a/arch/mips/mach-octeon/dram.c b/arch/mips/mach-octeon/dram.c
index ff7a59f2ab..6dc08e19da 100644
--- a/arch/mips/mach-octeon/dram.c
+++ b/arch/mips/mach-octeon/dram.c
@@ -1,28 +1,84 @@
// SPDX-License-Identifier: GPL-2.0+
/*
- * Copyright (C) Stefan Roese <sr@denx.de>
+ * Copyright (C) 2020 Stefan Roese <sr@denx.de>
*/
+#include <config.h>
#include <dm.h>
#include <ram.h>
#include <asm/global_data.h>
#include <linux/compat.h>
+#include <display_options.h>
DECLARE_GLOBAL_DATA_PTR;
+#define UBOOT_RAM_SIZE_MAX 0x10000000ULL
+
int dram_init(void)
{
- /*
- * No DDR init yet -> run in L2 cache
- */
- gd->ram_size = (4 << 20);
- gd->bd->bi_dram[0].size = gd->ram_size;
- gd->bd->bi_dram[1].size = 0;
+ if (IS_ENABLED(CONFIG_RAM_OCTEON)) {
+ struct ram_info ram;
+ struct udevice *dev;
+ int ret;
+
+ ret = uclass_get_device(UCLASS_RAM, 0, &dev);
+ if (ret) {
+ debug("DRAM init failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = ram_get_info(dev, &ram);
+ if (ret) {
+ debug("Cannot get DRAM size: %d\n", ret);
+ return ret;
+ }
+
+ gd->ram_size = min_t(size_t, ram.size, UBOOT_RAM_SIZE_MAX);
+ debug("SDRAM base=%lx, size=%lx\n",
+ (unsigned long)ram.base, (unsigned long)ram.size);
+ } else {
+ /*
+ * No DDR init yet -> run in L2 cache
+ */
+ gd->ram_size = (4 << 20);
+ gd->bd->bi_dram[0].size = gd->ram_size;
+ gd->bd->bi_dram[1].size = 0;
+ }
return 0;
}
+void board_add_ram_info(int use_default)
+{
+ if (IS_ENABLED(CONFIG_RAM_OCTEON)) {
+ struct ram_info ram;
+ struct udevice *dev;
+ int ret;
+
+ ret = uclass_get_device(UCLASS_RAM, 0, &dev);
+ if (ret) {
+ debug("DRAM init failed: %d\n", ret);
+ return;
+ }
+
+ ret = ram_get_info(dev, &ram);
+ if (ret) {
+ debug("Cannot get DRAM size: %d\n", ret);
+ return;
+ }
+
+ printf(" (");
+ print_size(ram.size, " total)");
+ }
+}
+
ulong board_get_usable_ram_top(ulong total_size)
{
- return gd->ram_top;
+ if (IS_ENABLED(CONFIG_RAM_OCTEON)) {
+ /* Map a maximum of 256MiB - return not size but address */
+ return CONFIG_SYS_SDRAM_BASE + min(gd->ram_size,
+ UBOOT_RAM_SIZE_MAX);
+ } else {
+ return gd->ram_top;
+ }
}
diff --git a/arch/mips/mach-octeon/include/mach/bootoct_cmd.h b/arch/mips/mach-octeon/include/mach/bootoct_cmd.h
new file mode 100644
index 0000000000..657698ba54
--- /dev/null
+++ b/arch/mips/mach-octeon/include/mach/bootoct_cmd.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#ifndef __BOOTOCT_CMD_H__
+#define __BOOTOCT_CMD_H__
+
+#include "cvmx-coremask.h"
+
+enum octeon_boot_cmd_type {
+ BOOTOCT,
+ BOOTOCTLINUX,
+ BOOTOCTELF
+};
+
+/** Structure to contain results of command line argument parsing */
+struct octeon_boot_args {
+ struct cvmx_coremask coremask; /** Parsed coremask */
+ int num_cores[CVMX_MAX_NODES]; /** number of cores */
+ int num_skipped[CVMX_MAX_NODES];/** number of skipped cores */
+ const char *app_name; /** Application name */
+ const char *named_block; /** Named block to load Linux into */
+ u32 stack_size; /** stack size */
+ u32 heap_size; /** heap size */
+ u32 boot_flags; /** boot flags */
+ int node_mask; /** Node mask to use */
+ int console_uart; /** serial console number */
+ bool forceboot; /** force booting if core 0 not set */
+ bool coremask_set; /** set if coremask was set */
+ bool num_cores_set; /** Set if num_cores was set */
+ bool num_skipped_set; /** Set if num_skipped was set */
+ /** Set if endbootargs parameter was passed. */
+ bool endbootargs;
+};
+
+/**
+ * Parse command line arguments
+ *
+ * @param argc number of arguments
+ * @param[in] argv array of argument strings
+ * @param cmd command type
+ * @param[out] boot_args parsed values
+ *
+ * @return number of arguments parsed
+ */
+int octeon_parse_bootopts(int argc, char *const argv[],
+ enum octeon_boot_cmd_type cmd,
+ struct octeon_boot_args *boot_args);
+
+void nmi_bootvector(void);
+extern u64 nmi_handler_para[];
+
+#endif /* __BOOTOCT_CMD_H__ */
diff --git a/arch/mips/mach-octeon/include/mach/cvmx-bootinfo.h b/arch/mips/mach-octeon/include/mach/cvmx-bootinfo.h
new file mode 100644
index 0000000000..337987178f
--- /dev/null
+++ b/arch/mips/mach-octeon/include/mach/cvmx-bootinfo.h
@@ -0,0 +1,350 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+/*
+ * Header file containing the ABI with the bootloader.
+ */
+
+#ifndef __CVMX_BOOTINFO_H__
+#define __CVMX_BOOTINFO_H__
+
+#include "cvmx-coremask.h"
+
+/*
+ * Current major and minor versions of the CVMX bootinfo block that is
+ * passed from the bootloader to the application. This is versioned
+ * so that applications can properly handle multiple bootloader
+ * versions.
+ */
+#define CVMX_BOOTINFO_MAJ_VER 1
+#define CVMX_BOOTINFO_MIN_VER 4
+
+#if (CVMX_BOOTINFO_MAJ_VER == 1)
+#define CVMX_BOOTINFO_OCTEON_SERIAL_LEN 20
+/*
+ * This structure is populated by the bootloader. For binary
+ * compatibility the only changes that should be made are
+ * adding members to the end of the structure, and the minor
+ * version should be incremented at that time.
+ * If an incompatible change is made, the major version
+ * must be incremented, and the minor version should be reset
+ * to 0.
+ */
+struct cvmx_bootinfo {
+ u32 major_version;
+ u32 minor_version;
+
+ u64 stack_top;
+ u64 heap_base;
+ u64 heap_end;
+ u64 desc_vaddr;
+
+ u32 exception_base_addr;
+ u32 stack_size;
+ u32 flags;
+ u32 core_mask;
+ /* DRAM size in megabytes */
+ u32 dram_size;
+ /* physical address of free memory descriptor block*/
+ u32 phy_mem_desc_addr;
+ /* used to pass flags from app to debugger */
+ u32 debugger_flags_base_addr;
+
+ /* CPU clock speed, in hz */
+ u32 eclock_hz;
+
+ /* DRAM clock speed, in hz */
+ u32 dclock_hz;
+
+ u32 reserved0;
+ u16 board_type;
+ u8 board_rev_major;
+ u8 board_rev_minor;
+ u16 reserved1;
+ u8 reserved2;
+ u8 reserved3;
+ char board_serial_number[CVMX_BOOTINFO_OCTEON_SERIAL_LEN];
+ u8 mac_addr_base[6];
+ u8 mac_addr_count;
+#if (CVMX_BOOTINFO_MIN_VER >= 1)
+ /*
+ * Several boards support compact flash on the Octeon boot
+ * bus. The CF memory spaces may be mapped to different
+ * addresses on different boards. These are the physical
+ * addresses, so care must be taken to use the correct
+ * XKPHYS/KSEG0 addressing depending on the application's
+ * ABI. These values will be 0 if CF is not present.
+ */
+ u64 compact_flash_common_base_addr;
+ u64 compact_flash_attribute_base_addr;
+ /*
+ * Base address of the LED display (as on EBT3000 board)
+ * This will be 0 if LED display not present.
+ */
+ u64 led_display_base_addr;
+#endif
+#if (CVMX_BOOTINFO_MIN_VER >= 2)
+ /* DFA reference clock in hz (if applicable)*/
+ u32 dfa_ref_clock_hz;
+
+ /*
+ * flags indicating various configuration options. These
+ * flags supercede the 'flags' variable and should be used
+ * instead if available.
+ */
+ u32 config_flags;
+#endif
+#if (CVMX_BOOTINFO_MIN_VER >= 3)
+ /*
+ * Address of the OF Flattened Device Tree structure
+ * describing the board.
+ */
+ u64 fdt_addr;
+#endif
+#if (CVMX_BOOTINFO_MIN_VER >= 4)
+ /*
+ * Coremask used for processors with more than 32 cores
+ * or with OCI. This replaces core_mask.
+ */
+ struct cvmx_coremask ext_core_mask;
+#endif
+};
+
+#define CVMX_BOOTINFO_CFG_FLAG_PCI_HOST (1ull << 0)
+#define CVMX_BOOTINFO_CFG_FLAG_PCI_TARGET (1ull << 1)
+#define CVMX_BOOTINFO_CFG_FLAG_DEBUG (1ull << 2)
+#define CVMX_BOOTINFO_CFG_FLAG_NO_MAGIC (1ull << 3)
+/*
+ * This flag is set if the TLB mappings are not contained in the
+ * 0x10000000 - 0x20000000 boot bus region.
+ */
+#define CVMX_BOOTINFO_CFG_FLAG_OVERSIZE_TLB_MAPPING (1ull << 4)
+#define CVMX_BOOTINFO_CFG_FLAG_BREAK (1ull << 5)
+
+#endif /* (CVMX_BOOTINFO_MAJ_VER == 1) */
+
+/* Type defines for board and chip types */
+enum cvmx_board_types_enum {
+ CVMX_BOARD_TYPE_NULL = 0,
+ CVMX_BOARD_TYPE_SIM = 1,
+ CVMX_BOARD_TYPE_EBT3000 = 2,
+ CVMX_BOARD_TYPE_KODAMA = 3,
+ CVMX_BOARD_TYPE_NIAGARA = 4,
+ CVMX_BOARD_TYPE_NAC38 = 5, /* formerly NAO38 */
+ CVMX_BOARD_TYPE_THUNDER = 6,
+ CVMX_BOARD_TYPE_TRANTOR = 7,
+ CVMX_BOARD_TYPE_EBH3000 = 8,
+ CVMX_BOARD_TYPE_EBH3100 = 9,
+ CVMX_BOARD_TYPE_HIKARI = 10,
+ CVMX_BOARD_TYPE_CN3010_EVB_HS5 = 11,
+ CVMX_BOARD_TYPE_CN3005_EVB_HS5 = 12,
+ CVMX_BOARD_TYPE_KBP = 13,
+ /* Deprecated, CVMX_BOARD_TYPE_CN3010_EVB_HS5 supports the CN3020 */
+ CVMX_BOARD_TYPE_CN3020_EVB_HS5 = 14,
+ CVMX_BOARD_TYPE_EBT5800 = 15,
+ CVMX_BOARD_TYPE_NICPRO2 = 16,
+ CVMX_BOARD_TYPE_EBH5600 = 17,
+ CVMX_BOARD_TYPE_EBH5601 = 18,
+ CVMX_BOARD_TYPE_EBH5200 = 19,
+ CVMX_BOARD_TYPE_BBGW_REF = 20,
+ CVMX_BOARD_TYPE_NIC_XLE_4G = 21,
+ CVMX_BOARD_TYPE_EBT5600 = 22,
+ CVMX_BOARD_TYPE_EBH5201 = 23,
+ CVMX_BOARD_TYPE_EBT5200 = 24,
+ CVMX_BOARD_TYPE_CB5600 = 25,
+ CVMX_BOARD_TYPE_CB5601 = 26,
+ CVMX_BOARD_TYPE_CB5200 = 27,
+ /* Special 'generic' board type, supports many boards */
+ CVMX_BOARD_TYPE_GENERIC = 28,
+ CVMX_BOARD_TYPE_EBH5610 = 29,
+ CVMX_BOARD_TYPE_LANAI2_A = 30,
+ CVMX_BOARD_TYPE_LANAI2_U = 31,
+ CVMX_BOARD_TYPE_EBB5600 = 32,
+ CVMX_BOARD_TYPE_EBB6300 = 33,
+ CVMX_BOARD_TYPE_NIC_XLE_10G = 34,
+ CVMX_BOARD_TYPE_LANAI2_G = 35,
+ CVMX_BOARD_TYPE_EBT5810 = 36,
+ CVMX_BOARD_TYPE_NIC10E = 37,
+ CVMX_BOARD_TYPE_EP6300C = 38,
+ CVMX_BOARD_TYPE_EBB6800 = 39,
+ CVMX_BOARD_TYPE_NIC4E = 40,
+ CVMX_BOARD_TYPE_NIC2E = 41,
+ CVMX_BOARD_TYPE_EBB6600 = 42,
+ CVMX_BOARD_TYPE_REDWING = 43,
+ CVMX_BOARD_TYPE_NIC68_4 = 44,
+ CVMX_BOARD_TYPE_NIC10E_66 = 45,
+ CVMX_BOARD_TYPE_MAX,
+
+ /*
+ * The range from CVMX_BOARD_TYPE_MAX to
+ * CVMX_BOARD_TYPE_CUST_DEFINED_MIN is reserved for future
+ * SDK use.
+ */
+
+ /*
+ * Set aside a range for customer boards. These numbers are managed
+ * by Cavium.
+ */
+ CVMX_BOARD_TYPE_CUST_DEFINED_MIN = 10000,
+ CVMX_BOARD_TYPE_CUST_WSX16 = 10001,
+ CVMX_BOARD_TYPE_CUST_NS0216 = 10002,
+ CVMX_BOARD_TYPE_CUST_NB5 = 10003,
+ CVMX_BOARD_TYPE_CUST_WMR500 = 10004,
+ CVMX_BOARD_TYPE_CUST_ITB101 = 10005,
+ CVMX_BOARD_TYPE_CUST_NTE102 = 10006,
+ CVMX_BOARD_TYPE_CUST_AGS103 = 10007,
+ CVMX_BOARD_TYPE_CUST_GST104 = 10008,
+ CVMX_BOARD_TYPE_CUST_GCT105 = 10009,
+ CVMX_BOARD_TYPE_CUST_AGS106 = 10010,
+ CVMX_BOARD_TYPE_CUST_SGM107 = 10011,
+ CVMX_BOARD_TYPE_CUST_GCT108 = 10012,
+ CVMX_BOARD_TYPE_CUST_AGS109 = 10013,
+ CVMX_BOARD_TYPE_CUST_GCT110 = 10014,
+ CVMX_BOARD_TYPE_CUST_L2_AIR_SENDER = 10015,
+ CVMX_BOARD_TYPE_CUST_L2_AIR_RECEIVER = 10016,
+ CVMX_BOARD_TYPE_CUST_L2_ACCTON2_TX = 10017,
+ CVMX_BOARD_TYPE_CUST_L2_ACCTON2_RX = 10018,
+ CVMX_BOARD_TYPE_CUST_L2_WSTRNSNIC_TX = 10019,
+ CVMX_BOARD_TYPE_CUST_L2_WSTRNSNIC_RX = 10020,
+ CVMX_BOARD_TYPE_CUST_L2_ZINWELL = 10021,
+ CVMX_BOARD_TYPE_CUST_DEFINED_MAX = 20000,
+
+ /*
+ * Set aside a range for customer private use. The SDK won't
+ * use any numbers in this range.
+ */
+ CVMX_BOARD_TYPE_CUST_PRIVATE_MIN = 20001,
+ CVMX_BOARD_TYPE_UBNT_E100 = 20002,
+ CVMX_BOARD_TYPE_CUST_DSR1000N = 20006,
+ CVMX_BOARD_TYPE_KONTRON_S1901 = 21901,
+ CVMX_BOARD_TYPE_CUST_PRIVATE_MAX = 30000,
+
+ /* The remaining range is reserved for future use. */
+};
+
+enum cvmx_chip_types_enum {
+ CVMX_CHIP_TYPE_NULL = 0,
+ CVMX_CHIP_SIM_TYPE_DEPRECATED = 1,
+ CVMX_CHIP_TYPE_OCTEON_SAMPLE = 2,
+ CVMX_CHIP_TYPE_MAX,
+};
+
+/*
+ * Compatibility alias for NAC38 name change, planned to be removed
+ * from SDK 1.7
+ */
+#define CVMX_BOARD_TYPE_NAO38 CVMX_BOARD_TYPE_NAC38
+
+/* Functions to return string based on type */
+#define ENUM_BRD_TYPE_CASE(x) \
+ case x: \
+ return(#x + 16) /* Skip CVMX_BOARD_TYPE_ */
+
+static inline const char *cvmx_board_type_to_string(enum
+ cvmx_board_types_enum type)
+{
+ switch (type) {
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_NULL);
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_SIM);
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBT3000);
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_KODAMA);
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_NIAGARA);
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_NAC38);
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_THUNDER);
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_TRANTOR);
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBH3000);
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBH3100);
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_HIKARI);
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CN3010_EVB_HS5);
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CN3005_EVB_HS5);
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_KBP);
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CN3020_EVB_HS5);
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBT5800);
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_NICPRO2);
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBH5600);
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBH5601);
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBH5200);
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_BBGW_REF);
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_NIC_XLE_4G);
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBT5600);
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBH5201);
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBT5200);
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CB5600);
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CB5601);
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CB5200);
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_GENERIC);
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBH5610);
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_LANAI2_A);
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_LANAI2_U);
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBB5600);
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBB6300);
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_NIC_XLE_10G);
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_LANAI2_G);
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBT5810);
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_NIC10E);
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EP6300C);
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBB6800);
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_NIC4E);
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_NIC2E);
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBB6600);
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_REDWING);
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_NIC68_4);
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_NIC10E_66);
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_MAX);
+
+ /* Customer boards listed here */
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_DEFINED_MIN);
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_WSX16);
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_NS0216);
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_NB5);
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_WMR500);
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_ITB101);
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_NTE102);
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_AGS103);
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_GST104);
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_GCT105);
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_AGS106);
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_SGM107);
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_GCT108);
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_AGS109);
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_GCT110);
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_L2_AIR_SENDER);
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_L2_AIR_RECEIVER);
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_L2_ACCTON2_TX);
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_L2_ACCTON2_RX);
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_L2_WSTRNSNIC_TX);
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_L2_WSTRNSNIC_RX);
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_L2_ZINWELL);
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_DEFINED_MAX);
+
+ /* Customer private range */
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_PRIVATE_MIN);
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_UBNT_E100);
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_DSR1000N);
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_KONTRON_S1901);
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_PRIVATE_MAX);
+ }
+
+ return NULL;
+}
+
+#define ENUM_CHIP_TYPE_CASE(x) \
+ case x: \
+ return(#x + 15) /* Skip CVMX_CHIP_TYPE */
+
+static inline const char *cvmx_chip_type_to_string(enum
+ cvmx_chip_types_enum type)
+{
+ switch (type) {
+ ENUM_CHIP_TYPE_CASE(CVMX_CHIP_TYPE_NULL);
+ ENUM_CHIP_TYPE_CASE(CVMX_CHIP_SIM_TYPE_DEPRECATED);
+ ENUM_CHIP_TYPE_CASE(CVMX_CHIP_TYPE_OCTEON_SAMPLE);
+ ENUM_CHIP_TYPE_CASE(CVMX_CHIP_TYPE_MAX);
+ }
+
+ return "Unsupported Chip";
+}
+
+#endif /* __CVMX_BOOTINFO_H__ */
diff --git a/arch/mips/mach-octeon/include/mach/cvmx-bootmem.h b/arch/mips/mach-octeon/include/mach/cvmx-bootmem.h
new file mode 100644
index 0000000000..d60668c9ad
--- /dev/null
+++ b/arch/mips/mach-octeon/include/mach/cvmx-bootmem.h
@@ -0,0 +1,533 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+/**
+ * @file
+ * Simple allocate only memory allocator. Used to allocate memory at application
+ * start time.
+ */
+
+#ifndef __CVMX_BOOTMEM_H__
+#define __CVMX_BOOTMEM_H__
+
+/* Must be multiple of 8, changing breaks ABI */
+#define CVMX_BOOTMEM_NAME_LEN 128
+/* Can change without breaking ABI */
+#define CVMX_BOOTMEM_NUM_NAMED_BLOCKS 64
+/* minimum alignment of bootmem alloced blocks */
+#define CVMX_BOOTMEM_ALIGNMENT_SIZE (16ull)
+
+/* Flags for cvmx_bootmem_phy_mem* functions */
+/* Allocate from end of block instead of beginning */
+#define CVMX_BOOTMEM_FLAG_END_ALLOC (1 << 0)
+#define CVMX_BOOTMEM_FLAG_NO_LOCKING (1 << 1) /* Don't do any locking. */
+
+/* Real physical addresses of memory regions */
+#define OCTEON_DDR0_BASE (0x0ULL)
+#define OCTEON_DDR0_SIZE (0x010000000ULL)
+#define OCTEON_DDR1_BASE ((OCTEON_IS_OCTEON2() || OCTEON_IS_OCTEON3()) \
+ ? 0x20000000ULL : 0x410000000ULL)
+#define OCTEON_DDR1_SIZE (0x010000000ULL)
+#define OCTEON_DDR2_BASE ((OCTEON_IS_OCTEON2() || OCTEON_IS_OCTEON3()) \
+ ? 0x30000000ULL : 0x20000000ULL)
+#define OCTEON_DDR2_SIZE ((OCTEON_IS_OCTEON2() || OCTEON_IS_OCTEON3()) \
+ ? 0x7d0000000ULL : 0x3e0000000ULL)
+#define OCTEON_MAX_PHY_MEM_SIZE ((OCTEON_IS_MODEL(OCTEON_CN68XX)) \
+ ? 128 * 1024 * 1024 * 1024ULL \
+ : (OCTEON_IS_OCTEON2()) \
+ ? 32 * 1024 * 1024 * 1024ull \
+ : (OCTEON_IS_OCTEON3()) \
+ ? 512 * 1024 * 1024 * 1024ULL \
+ : 16 * 1024 * 1024 * 1024ULL)
+
+/*
+ * First bytes of each free physical block of memory contain this structure,
+ * which is used to maintain the free memory list. Since the bootloader is
+ * only 32 bits, there is a union providing 64 and 32 bit versions. The
+ * application init code converts addresses to 64 bit addresses before the
+ * application starts.
+ */
+struct cvmx_bootmem_block_header {
+ /* Note: these are referenced from assembly routines in the bootloader,
+ * so this structure should not be changed without changing those
+ * routines as well.
+ */
+ u64 next_block_addr;
+ u64 size;
+
+};
+
+/*
+ * Structure for named memory blocks
+ * Number of descriptors
+ * available can be changed without affecting compatibility,
+ * but name length changes require a bump in the bootmem
+ * descriptor version
+ * Note: This structure must be naturally 64 bit aligned, as a single
+ * memory image will be used by both 32 and 64 bit programs.
+ */
+struct cvmx_bootmem_named_block_desc {
+ u64 base_addr; /* Base address of named block */
+ /*
+ * Size actually allocated for named block (may differ from requested)
+ */
+ u64 size;
+ char name[CVMX_BOOTMEM_NAME_LEN]; /* name of named block */
+};
+
+/* Current descriptor versions */
+/* CVMX bootmem descriptor major version */
+#define CVMX_BOOTMEM_DESC_MAJ_VER 3
+/* CVMX bootmem descriptor minor version */
+#define CVMX_BOOTMEM_DESC_MIN_VER 0
+
+/*
+ * First three members of cvmx_bootmem_desc_t are left in original
+ * positions for backwards compatibility.
+ */
+struct cvmx_bootmem_desc {
+ /* Linux compatible proxy for __BIG_ENDIAN */
+ u32 lock; /* spinlock to control access to list */
+ u32 flags; /* flags for indicating various conditions */
+ u64 head_addr;
+
+ /* incremented changed when incompatible changes made */
+ u32 major_version;
+ /*
+ * incremented changed when compatible changes made, reset to
+ * zero when major incremented
+ */
+ u32 minor_version;
+ u64 app_data_addr;
+ u64 app_data_size;
+
+ /* number of elements in named blocks array */
+ u32 named_block_num_blocks;
+ /* length of name array in bootmem blocks */
+ u32 named_block_name_len;
+ /* address of named memory block descriptors */
+ u64 named_block_array_addr;
+};
+
+/**
+ * Initialize the boot alloc memory structures. This is
+ * normally called inside of cvmx_user_app_init()
+ *
+ * @param mem_desc_addr Address of the free memory list
+ * @return
+ */
+int cvmx_bootmem_init(u64 mem_desc_addr);
+
+/**
+ * Allocate a block of memory from the free list that was passed
+ * to the application by the bootloader.
+ * This is an allocate-only algorithm, so freeing memory is not possible.
+ *
+ * @param size Size in bytes of block to allocate
+ * @param alignment Alignment required - must be power of 2
+ *
+ * @return pointer to block of memory, NULL on error
+ */
+void *cvmx_bootmem_alloc(u64 size, u64 alignment);
+
+/**
+ * Allocate a block of memory from the free list that was passed
+ * to the application by the bootloader from a specific node.
+ * This is an allocate-only algorithm, so freeing memory is not possible.
+ *
+ * @param node The node to allocate memory from
+ * @param size Size in bytes of block to allocate
+ * @param alignment Alignment required - must be power of 2
+ *
+ * @return pointer to block of memory, NULL on error
+ */
+void *cvmx_bootmem_alloc_node(u64 node, u64 size, u64 alignment);
+
+/**
+ * Allocate a block of memory from the free list that was
+ * passed to the application by the bootloader at a specific
+ * address. This is an allocate-only algorithm, so
+ * freeing memory is not possible. Allocation will fail if
+ * memory cannot be allocated at the specified address.
+ *
+ * @param size Size in bytes of block to allocate
+ * @param address Physical address to allocate memory at. If this
+ * memory is not available, the allocation fails.
+ * @param alignment Alignment required - must be power of 2
+ * @return pointer to block of memory, NULL on error
+ */
+void *cvmx_bootmem_alloc_address(u64 size, u64 address,
+ u64 alignment);
+
+/**
+ * Allocate a block of memory from the free list that was
+ * passed to the application by the bootloader within a specified
+ * address range. This is an allocate-only algorithm, so
+ * freeing memory is not possible. Allocation will fail if
+ * memory cannot be allocated in the requested range.
+ *
+ * @param size Size in bytes of block to allocate
+ * @param min_addr defines the minimum address of the range
+ * @param max_addr defines the maximum address of the range
+ * @param alignment Alignment required - must be power of 2
+ * @return pointer to block of memory, NULL on error
+ */
+void *cvmx_bootmem_alloc_range(u64 size, u64 alignment,
+ u64 min_addr, u64 max_addr);
+
+/**
+ * Allocate a block of memory from the free list that was passed
+ * to the application by the bootloader, and assign it a name in the
+ * global named block table. (part of the cvmx_bootmem_descriptor_t structure)
+ * Named blocks can later be freed.
+ *
+ * @param size Size in bytes of block to allocate
+ * @param alignment Alignment required - must be power of 2
+ * @param name name of block - must be less than CVMX_BOOTMEM_NAME_LEN bytes
+ *
+ * @return pointer to block of memory, NULL on error
+ */
+void *cvmx_bootmem_alloc_named(u64 size, u64 alignment,
+ const char *name);
+
+/**
+ * Allocate a block of memory from the free list that was passed
+ * to the application by the bootloader, and assign it a name in the
+ * global named block table. (part of the cvmx_bootmem_descriptor_t structure)
+ * Named blocks can later be freed.
+ *
+ * @param size Size in bytes of block to allocate
+ * @param alignment Alignment required - must be power of 2
+ * @param name name of block - must be less than CVMX_BOOTMEM_NAME_LEN bytes
+ * @param flags Flags to control options for the allocation.
+ *
+ * @return pointer to block of memory, NULL on error
+ */
+void *cvmx_bootmem_alloc_named_flags(u64 size, u64 alignment,
+ const char *name, u32 flags);
+
+/**
+ * Allocate a block of memory from the free list that was passed
+ * to the application by the bootloader, and assign it a name in the
+ * global named block table. (part of the cvmx_bootmem_descriptor_t structure)
+ * Named blocks can later be freed.
+ *
+ * @param size Size in bytes of block to allocate
+ * @param address Physical address to allocate memory at. If this
+ * memory is not available, the allocation fails.
+ * @param name name of block - must be less than CVMX_BOOTMEM_NAME_LEN bytes
+ *
+ * @return pointer to block of memory, NULL on error
+ */
+void *cvmx_bootmem_alloc_named_address(u64 size, u64 address,
+ const char *name);
+
+/**
+ * Allocate a block of memory from a specific range of the free list
+ * that was passed to the application by the bootloader, and assign it
+ * a name in the global named block table. (part of the
+ * cvmx_bootmem_descriptor_t structure) Named blocks can later be
+ * freed. If request cannot be satisfied within the address range
+ * specified, NULL is returned
+ *
+ * @param size Size in bytes of block to allocate
+ * @param min_addr minimum address of range
+ * @param max_addr maximum address of range
+ * @param align Alignment of memory to be allocated. (must be a power of 2)
+ * @param name name of block - must be less than CVMX_BOOTMEM_NAME_LEN bytes
+ *
+ * @return pointer to block of memory, NULL on error
+ */
+void *cvmx_bootmem_alloc_named_range(u64 size, u64 min_addr,
+ u64 max_addr, u64 align,
+ const char *name);
+
+/**
+ * Allocate if needed a block of memory from a specific range of the
+ * free list that was passed to the application by the bootloader, and
+ * assign it a name in the global named block table. (part of the
+ * cvmx_bootmem_descriptor_t structure) Named blocks can later be
+ * freed. If the requested name block is already allocated, return
+ * the pointer to block of memory. If request cannot be satisfied
+ * within the address range specified, NULL is returned
+ *
+ * @param size Size in bytes of block to allocate
+ * @param min_addr minimum address of range
+ * @param max_addr maximum address of range
+ * @param align Alignment of memory to be allocated. (must be a power of 2)
+ * @param name name of block - must be less than CVMX_BOOTMEM_NAME_LEN bytes
+ * @param init Initialization function
+ *
+ * The initialization function is optional, if omitted the named block
+ * is initialized to all zeros when it is created, i.e. once.
+ *
+ * @return pointer to block of memory, NULL on error
+ */
+void *cvmx_bootmem_alloc_named_range_once(u64 size,
+ u64 min_addr,
+ u64 max_addr,
+ u64 align,
+ const char *name,
+ void (*init)(void *));
+
+/**
+ * Allocate all free memory starting at the start address. This is used to
+ * prevent any free blocks from later being allocated within the reserved space.
+ * Note that any memory allocated with this function cannot be later freed.
+ *
+ * @param start_addr Starting address to reserve
+ * @param size Size in bytes to reserve starting at start_addr
+ * @param name Name to assign to reserved blocks
+ * @param flags Flags to use when reserving memory
+ *
+ * @return 0 on failure,
+ * !0 on success
+ */
+int cvmx_bootmem_reserve_memory(u64 start_addr, u64 size,
+ const char *name, u32 flags);
+
+/**
+ * Frees a previously allocated named bootmem block.
+ *
+ * @param name name of block to free
+ *
+ * @return 0 on failure,
+ * !0 on success
+ */
+int cvmx_bootmem_free_named(const char *name);
+
+/**
+ * Finds a named bootmem block by name.
+ *
+ * @param name name of block to free
+ *
+ * @return pointer to named block descriptor on success
+ * 0 on failure
+ */
+const struct cvmx_bootmem_named_block_desc *
+cvmx_bootmem_find_named_block(const char *name);
+
+/**
+ * Returns the size of available memory in bytes, only
+ * counting blocks that are at least as big as the minimum block
+ * size.
+ *
+ * @param min_block_size
+ * Minimum block size to count in total.
+ *
+ * @return Number of bytes available for allocation that meet the
+ * block size requirement
+ */
+u64 cvmx_bootmem_available_mem(u64 min_block_size);
+
+/**
+ * Prints out the list of named blocks that have been allocated
+ * along with their addresses and sizes.
+ * This is primarily used for debugging purposes
+ */
+void cvmx_bootmem_print_named(void);
+
+/**
+ * Allocates a block of physical memory from the free list, at
+ * (optional) requested address and alignment.
+ *
+ * @param req_size size of region to allocate. All requests are
+ * rounded up to be a multiple CVMX_BOOTMEM_ALIGNMENT_SIZE bytes size
+ *
+ * @param address_min Minimum address that block can occupy.
+ *
+ * @param address_max Specifies the maximum address_min (inclusive)
+ * that the allocation can use.
+ *
+ * @param alignment Requested alignment of the block. If this
+ * alignment cannot be met, the allocation fails.
+ * This must be a power of 2. (Note: Alignment of
+ * CVMX_BOOTMEM_ALIGNMENT_SIZE bytes is required, and
+ * internally enforced. Requested alignments of less
+ * than CVMX_BOOTMEM_ALIGNMENT_SIZE are set to
+ * CVMX_BOOTMEM_ALIGNMENT_SIZE.)
+ * @param flags Flags to control options for the allocation.
+ *
+ * @return physical address of block allocated, or -1 on failure
+ */
+s64 cvmx_bootmem_phy_alloc(u64 req_size, u64 address_min, u64 address_max,
+ u64 alignment, u32 flags);
+
+/**
+ * Allocates a named block of physical memory from the free list, at
+ * (optional) requested address and alignment.
+ *
+ * @param size size of region to allocate. All requests are rounded
+ * up to be a multiple CVMX_BOOTMEM_ALIGNMENT_SIZE bytes size
+ *
+ * @param min_addr Minimum address that block can occupy.
+ *
+ * @param max_addr Specifies the maximum address_min (inclusive) that
+ * the allocation can use.
+ *
+ * @param alignment Requested alignment of the block. If this
+ * alignment cannot be met, the allocation fails.
+ * This must be a power of 2. (Note: Alignment of
+ * CVMX_BOOTMEM_ALIGNMENT_SIZE bytes is required, and
+ * internally enforced. Requested alignments of less
+ * than CVMX_BOOTMEM_ALIGNMENT_SIZE are set to
+ * CVMX_BOOTMEM_ALIGNMENT_SIZE.)
+ *
+ * @param name name to assign to named block
+ *
+ * @param flags Flags to control options for the allocation.
+ *
+ * @return physical address of block allocated, or -1 on failure
+ */
+s64 cvmx_bootmem_phy_named_block_alloc(u64 size, u64 min_addr, u64 max_addr,
+ u64 alignment, const char *name,
+ u32 flags);
+
+/**
+ * Finds a named memory block by name.
+ * Also used for finding an unused entry in the named block table.
+ *
+ * @param name Name of memory block to find. If NULL pointer given,
+ * then finds unused descriptor, if available.
+ *
+ * @param flags Flags to control options for the allocation.
+ *
+ * @return Physical address of the memory block descriptor, zero if not
+ * found. If zero returned when name parameter is NULL, then no
+ * memory block descriptors are available.
+ */
+u64 cvmx_bootmem_phy_named_block_find(const char *name, u32 flags);
+
+/**
+ * Returns the size of available memory in bytes, only
+ * counting blocks that are at least as big as the minimum block
+ * size.
+ *
+ * @param min_block_size
+ * Minimum block size to count in total.
+ *
+ * @return Number of bytes available for allocation that meet the
+ * block size requirement
+ */
+u64 cvmx_bootmem_phy_available_mem(u64 min_block_size);
+
+/**
+ * Frees a named block.
+ *
+ * @param name name of block to free
+ * @param flags flags for passing options
+ *
+ * @return 0 on failure
+ * 1 on success
+ */
+int cvmx_bootmem_phy_named_block_free(const char *name, u32 flags);
+
+/**
+ * Frees a block to the bootmem allocator list. This must
+ * be used with care, as the size provided must match the size
+ * of the block that was allocated, or the list will become
+ * corrupted.
+ *
+ * IMPORTANT: This is only intended to be used as part of named block
+ * frees and initial population of the free memory list.
+ * *
+ *
+ * @param phy_addr physical address of block
+ * @param size size of block in bytes.
+ * @param flags flags for passing options
+ *
+ * @return 1 on success,
+ * 0 on failure
+ */
+int __cvmx_bootmem_phy_free(u64 phy_addr, u64 size, u32 flags);
+
+/**
+ * Prints the list of currently allocated named blocks
+ *
+ */
+void cvmx_bootmem_phy_named_block_print(void);
+
+/**
+ * Prints the list of available memory.
+ *
+ */
+void cvmx_bootmem_phy_list_print(void);
+
+/**
+ * This function initializes the free memory list used by cvmx_bootmem.
+ * This must be called before any allocations can be done.
+ *
+ * @param mem_size Total memory available, in bytes
+ *
+ * @param low_reserved_bytes Number of bytes to reserve (leave out of
+ * free list) at address 0x0.
+ *
+ * @param desc_buffer Buffer for the bootmem descriptor. This must be
+ * a 32 bit addressable address.
+ *
+ * @return 1 on success
+ * 0 on failure
+ */
+s64 cvmx_bootmem_phy_mem_list_init(u64 mem_size, u32 low_reserved_bytes,
+ struct cvmx_bootmem_desc *desc_buffer);
+
+/**
+ * This function initializes the free memory list used by cvmx_bootmem.
+ * This must be called before any allocations can be done.
+ *
+ * @param nodemask Nodemask - one bit per node (bit0->node0, bit1->node1,...)
+ *
+ * @param mem_size[] Array of memory sizes in MBytes per node ([0]->node0,...)
+ *
+ * @param low_reserved_bytes Number of bytes to reserve (leave out of
+ * free list) at address 0x0.
+ *
+ * @param desc_buffer Buffer for the bootmem descriptor. This must be
+ * a 32 bit addressable address.
+ *
+ * @return 1 on success
+ * 0 on failure
+ */
+s64 cvmx_bootmem_phy_mem_list_init_multi(u8 nodemask, u32 mem_size[],
+ u32 low_reserved_bytes,
+ struct cvmx_bootmem_desc *desc_buffer);
+/**
+ * Locks the bootmem allocator. This is useful in certain situations
+ * where multiple allocations must be made without being interrupted.
+ * This should be used with the CVMX_BOOTMEM_FLAG_NO_LOCKING flag.
+ *
+ */
+void cvmx_bootmem_lock(void);
+
+/**
+ * Unlocks the bootmem allocator. This is useful in certain situations
+ * where multiple allocations must be made without being interrupted.
+ * This should be used with the CVMX_BOOTMEM_FLAG_NO_LOCKING flag.
+ *
+ */
+void cvmx_bootmem_unlock(void);
+
+/**
+ * Internal use function to get the current descriptor pointer
+ */
+void *__cvmx_bootmem_internal_get_desc_ptr(void);
+
+/**
+ * Internal use. This is userd to get a pointer to a physical
+ * address. For linux n32 the physical address in mmaped to a virtual
+ * address and the virtual address is returned. For n64 the address
+ * is converted to an xkphys address and the xkhpys address is
+ * returned.
+ */
+void *__cvmx_phys_addr_to_ptr(u64 phys, int size);
+const struct cvmx_bootmem_named_block_desc *
+__cvmx_bootmem_find_named_block_flags(const char *name, u32 flags);
+void *cvmx_bootmem_alloc_named_range_flags(u64 size, u64 min_addr,
+ u64 max_addr, u64 align,
+ const char *name, u32 flags);
+u64 cvmx_bootmem_phy_alloc_range(u64 size, u64 alignment,
+ u64 min_addr, u64 max_addr);
+
+#endif /* __CVMX_BOOTMEM_H__ */
diff --git a/arch/mips/mach-octeon/include/mach/cvmx-coremask.h b/arch/mips/mach-octeon/include/mach/cvmx-coremask.h
new file mode 100644
index 0000000000..c34ff46d3a
--- /dev/null
+++ b/arch/mips/mach-octeon/include/mach/cvmx-coremask.h
@@ -0,0 +1,752 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+/**
+ * Module to support operations on bitmap of cores. Coremask can be used to
+ * select a specific core, a group of cores, or all available cores, for
+ * initialization and differentiation of roles within a single shared binary
+ * executable image.
+ *
+ * The core numbers used in this file are the same value as what is found in
+ * the COP0_EBASE register and the rdhwr 0 instruction.
+ *
+ * For the CN78XX and other multi-node environments the core numbers are not
+ * contiguous. The core numbers for the CN78XX are as follows:
+ *
+ * Node 0: Cores 0 - 47
+ * Node 1: Cores 128 - 175
+ * Node 2: Cores 256 - 303
+ * Node 3: Cores 384 - 431
+ *
+ * The coremask environment generally tries to be node agnostic in order to
+ * provide future compatibility if more cores are added to future processors
+ * or more nodes are supported.
+ */
+
+#ifndef __CVMX_COREMASK_H__
+#define __CVMX_COREMASK_H__
+
+#include "cvmx-regs.h"
+
+/* bits per holder */
+#define CVMX_COREMASK_HLDRSZ ((int)(sizeof(u64) * 8))
+
+/** Maximum allowed cores per node */
+#define CVMX_COREMASK_MAX_CORES_PER_NODE (1 << CVMX_NODE_NO_SHIFT)
+
+/** Maximum number of bits actually used in the coremask */
+#define CVMX_MAX_USED_CORES_BMP (1 << (CVMX_NODE_NO_SHIFT + CVMX_NODE_BITS))
+
+/* the number of valid bits in and the mask of the most significant holder */
+#define CVMX_COREMASK_MSHLDR_NBITS \
+ (CVMX_MIPS_MAX_CORES % CVMX_COREMASK_HLDRSZ)
+
+#define CVMX_COREMASK_MSHLDR_MASK \
+ ((CVMX_COREMASK_MSHLDR_NBITS) ? \
+ (((u64)1 << CVMX_COREMASK_MSHLDR_NBITS) - 1) : \
+ ((u64)-1))
+
+/* cvmx_coremask size in u64 */
+#define CVMX_COREMASK_BMPSZ \
+ ((int)(CVMX_MIPS_MAX_CORES / CVMX_COREMASK_HLDRSZ + \
+ (CVMX_COREMASK_MSHLDR_NBITS != 0)))
+
+#define CVMX_COREMASK_USED_BMPSZ \
+ (CVMX_MAX_USED_CORES_BMP / CVMX_COREMASK_HLDRSZ)
+
+#define CVMX_COREMASK_BMP_NODE_CORE_IDX(node, core) \
+ ((((node) << CVMX_NODE_NO_SHIFT) + (core)) / CVMX_COREMASK_HLDRSZ)
+/**
+ * Maximum available coremask.
+ */
+#define CVMX_COREMASK_MAX \
+ { { \
+ 0x0000FFFFFFFFFFFF, 0, \
+ 0x0000FFFFFFFFFFFF, 0, \
+ 0x0000FFFFFFFFFFFF, 0, \
+ 0x0000FFFFFFFFFFFF, 0, \
+ 0, 0, \
+ 0, 0, \
+ 0, 0, \
+ 0, 0} }
+
+/**
+ * Empty coremask
+ */
+#define CVMX_COREMASK_EMPTY \
+ { { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }
+
+struct cvmx_coremask {
+ u64 coremask_bitmap[CVMX_COREMASK_BMPSZ];
+};
+
+/**
+ * Macro to iterate through all available cores in a coremask
+ *
+ * @param core - core variable to use to iterate
+ * @param pcm - pointer to core mask
+ *
+ * Use this like a for statement
+ */
+#define cvmx_coremask_for_each_core(core, pcm) \
+ for ((core) = -1; \
+ (core) = cvmx_coremask_next_core((core), pcm), \
+ (core) >= 0;)
+
+/**
+ * Given a node and node mask, return the next available node.
+ *
+ * @param node starting node number
+ * @param node_mask node mask to use to find the next node
+ *
+ * @return next node number or -1 if no more nodes are available
+ */
+static inline int cvmx_coremask_next_node(int node, u8 node_mask)
+{
+ int next_offset;
+
+ next_offset = __builtin_ffs(node_mask >> (node + 1));
+ if (next_offset == 0)
+ return -1;
+ else
+ return node + next_offset;
+}
+
+/**
+ * Iterate through all nodes in a node mask
+ *
+ * @param node node iterator variable
+ * @param node_mask mask to use for iterating
+ *
+ * Use this like a for statement
+ */
+#define cvmx_coremask_for_each_node(node, node_mask) \
+ for ((node) = __builtin_ffs(node_mask) - 1; \
+ (node) >= 0 && (node) < CVMX_MAX_NODES; \
+ (node) = cvmx_coremask_next_node(node, node_mask))
+
+/**
+ * Is ``core'' set in the coremask?
+ *
+ * @param pcm is the pointer to the coremask.
+ * @param core
+ * @return 1 if core is set and 0 if not.
+ */
+static inline int cvmx_coremask_is_core_set(const struct cvmx_coremask *pcm,
+ int core)
+{
+ int n, i;
+
+ n = core % CVMX_COREMASK_HLDRSZ;
+ i = core / CVMX_COREMASK_HLDRSZ;
+
+ return (pcm->coremask_bitmap[i] & ((u64)1 << n)) != 0;
+}
+
+/**
+ * Is ``current core'' set in the coremask?
+ *
+ * @param pcm is the pointer to the coremask.
+ * @return 1 if core is set and 0 if not.
+ */
+static inline int cvmx_coremask_is_self_set(const struct cvmx_coremask *pcm)
+{
+ return cvmx_coremask_is_core_set(pcm, (int)cvmx_get_core_num());
+}
+
+/**
+ * Is coremask empty?
+ * @param pcm is the pointer to the coremask.
+ * @return 1 if *pcm is empty (all zeros), 0 if not empty.
+ */
+static inline int cvmx_coremask_is_empty(const struct cvmx_coremask *pcm)
+{
+ int i;
+
+ for (i = 0; i < CVMX_COREMASK_USED_BMPSZ; i++)
+ if (pcm->coremask_bitmap[i] != 0)
+ return 0;
+
+ return 1;
+}
+
+/**
+ * Set ``core'' in the coremask.
+ *
+ * @param pcm is the pointer to the coremask.
+ * @param core
+ * @return 0.
+ */
+static inline int cvmx_coremask_set_core(struct cvmx_coremask *pcm, int core)
+{
+ int n, i;
+
+ n = core % CVMX_COREMASK_HLDRSZ;
+ i = core / CVMX_COREMASK_HLDRSZ;
+ pcm->coremask_bitmap[i] |= ((u64)1 << n);
+
+ return 0;
+}
+
+/**
+ * Set ``current core'' in the coremask.
+ *
+ * @param pcm is the pointer to the coremask.
+ * @return 0.
+ */
+static inline int cvmx_coremask_set_self(struct cvmx_coremask *pcm)
+{
+ return cvmx_coremask_set_core(pcm, (int)cvmx_get_core_num());
+}
+
+/**
+ * Clear ``core'' from the coremask.
+ *
+ * @param pcm is the pointer to the coremask.
+ * @param core
+ * @return 0.
+ */
+static inline int cvmx_coremask_clear_core(struct cvmx_coremask *pcm, int core)
+{
+ int n, i;
+
+ n = core % CVMX_COREMASK_HLDRSZ;
+ i = core / CVMX_COREMASK_HLDRSZ;
+ pcm->coremask_bitmap[i] &= ~((u64)1 << n);
+
+ return 0;
+}
+
+/**
+ * Clear ``current core'' from the coremask.
+ *
+ * @param pcm is the pointer to the coremask.
+ * @return 0.
+ */
+static inline int cvmx_coremask_clear_self(struct cvmx_coremask *pcm)
+{
+ return cvmx_coremask_clear_core(pcm, cvmx_get_core_num());
+}
+
+/**
+ * Toggle ``core'' in the coremask.
+ *
+ * @param pcm is the pointer to the coremask.
+ * @param core
+ * @return 0.
+ */
+static inline int cvmx_coremask_toggle_core(struct cvmx_coremask *pcm, int core)
+{
+ int n, i;
+
+ n = core % CVMX_COREMASK_HLDRSZ;
+ i = core / CVMX_COREMASK_HLDRSZ;
+ pcm->coremask_bitmap[i] ^= ((u64)1 << n);
+
+ return 0;
+}
+
+/**
+ * Toggle ``current core'' in the coremask.
+ *
+ * @param pcm is the pointer to the coremask.
+ * @return 0.
+ */
+static inline int cvmx_coremask_toggle_self(struct cvmx_coremask *pcm)
+{
+ return cvmx_coremask_toggle_core(pcm, cvmx_get_core_num());
+}
+
+/**
+ * Set the lower 64-bit of the coremask.
+ * @param pcm pointer to coremask
+ * @param coremask_64 64-bit coremask to apply to the first node (0)
+ */
+static inline void cvmx_coremask_set64(struct cvmx_coremask *pcm,
+ u64 coremask_64)
+{
+ pcm->coremask_bitmap[0] = coremask_64;
+}
+
+/**
+ * Set the 64-bit of the coremask for a particular node.
+ * @param pcm pointer to coremask
+ * @param node node to set
+ * @param coremask_64 64-bit coremask to apply to the specified node
+ */
+static inline void cvmx_coremask_set64_node(struct cvmx_coremask *pcm,
+ u8 node,
+ u64 coremask_64)
+{
+ pcm->coremask_bitmap[CVMX_COREMASK_BMP_NODE_CORE_IDX(node, 0)] =
+ coremask_64;
+}
+
+/**
+ * Gets the lower 64-bits of the coremask
+ *
+ * @param[in] pcm - pointer to coremask
+ * @return 64-bit coremask for the first node
+ */
+static inline u64 cvmx_coremask_get64(const struct cvmx_coremask *pcm)
+{
+ return pcm->coremask_bitmap[0];
+}
+
+/**
+ * Gets the lower 64-bits of the coremask for the specified node
+ *
+ * @param[in] pcm - pointer to coremask
+ * @param node - node to get coremask for
+ * @return 64-bit coremask for the first node
+ */
+static inline u64 cvmx_coremask_get64_node(const struct cvmx_coremask *pcm,
+ u8 node)
+{
+ return pcm->coremask_bitmap[CVMX_COREMASK_BMP_NODE_CORE_IDX(node, 0)];
+}
+
+/**
+ * Gets the lower 32-bits of the coremask for compatibility
+ *
+ * @param[in] pcm - pointer to coremask
+ * @return 32-bit coremask for the first node
+ * @deprecated This function is to maintain compatibility with older
+ * SDK applications and may disappear at some point.
+ * This function is not compatible with the CN78XX or any other
+ * Octeon device with more than 32 cores.
+ */
+static inline u32 cvmx_coremask_get32(const struct cvmx_coremask *pcm)
+{
+ return pcm->coremask_bitmap[0] & 0xffffffff;
+}
+
+/*
+ * cvmx_coremask_cmp() returns an integer less than, equal to, or
+ * greater than zero if *pcm1 is found, respectively, to be less than,
+ * to match, or be greater than *pcm2.
+ */
+static inline int cvmx_coremask_cmp(const struct cvmx_coremask *pcm1,
+ const struct cvmx_coremask *pcm2)
+{
+ int i;
+
+ /* Start from highest node for arithemtically correct result */
+ for (i = CVMX_COREMASK_USED_BMPSZ - 1; i >= 0; i--)
+ if (pcm1->coremask_bitmap[i] != pcm2->coremask_bitmap[i]) {
+ return (pcm1->coremask_bitmap[i] >
+ pcm2->coremask_bitmap[i]) ? 1 : -1;
+ }
+
+ return 0;
+}
+
+/*
+ * cvmx_coremask_OPx(pcm1, pcm2[, pcm3]), where OPx can be
+ * - and
+ * - or
+ * - xor
+ * - not
+ * ...
+ * For binary operators, pcm3 <-- pcm1 OPX pcm2.
+ * For unaries, pcm2 <-- OPx pcm1.
+ */
+#define CVMX_COREMASK_BINARY_DEFUN(binary_op, op) \
+ static inline int cvmx_coremask_##binary_op( \
+ struct cvmx_coremask *pcm1, \
+ const struct cvmx_coremask *pcm2, \
+ const struct cvmx_coremask *pcm3) \
+ { \
+ int i; \
+ \
+ for (i = 0; i < CVMX_COREMASK_USED_BMPSZ; i++) \
+ pcm1->coremask_bitmap[i] = \
+ pcm2->coremask_bitmap[i] \
+ op \
+ pcm3->coremask_bitmap[i]; \
+ \
+ return 0; \
+ }
+
+#define CVMX_COREMASK_UNARY_DEFUN(unary_op, op) \
+ static inline int cvmx_coremask_##unary_op( \
+ struct cvmx_coremask *pcm1, \
+ const struct cvmx_coremask *pcm2) \
+ { \
+ int i; \
+ \
+ for (i = 0; i < CVMX_COREMASK_USED_BMPSZ; i++) \
+ pcm1->coremask_bitmap[i] = \
+ op \
+ pcm2->coremask_bitmap[i]; \
+ \
+ return 0; \
+ }
+
+/* cvmx_coremask_and(pcm1, pcm2, pcm3): pcm1 = pmc2 & pmc3 */
+CVMX_COREMASK_BINARY_DEFUN(and, &)
+/* cvmx_coremask_or(pcm1, pcm2, pcm3): pcm1 = pmc2 | pmc3 */
+CVMX_COREMASK_BINARY_DEFUN(or, |)
+/* cvmx_coremask_xor(pcm1, pcm2, pcm3): pcm1 = pmc2 ^ pmc3 */
+CVMX_COREMASK_BINARY_DEFUN(xor, ^)
+/* cvmx_coremask_maskoff(pcm1, pcm2, pcm3): pcm1 = pmc2 & ~pmc3 */
+CVMX_COREMASK_BINARY_DEFUN(maskoff, & ~)
+/* cvmx_coremask_not(pcm1, pcm2): pcm1 = ~pcm2 */
+CVMX_COREMASK_UNARY_DEFUN(not, ~)
+/* cvmx_coremask_fill(pcm1, pcm2): pcm1 = -1 */
+CVMX_COREMASK_UNARY_DEFUN(fill, -1 |)
+/* cvmx_coremask_clear(pcm1, pcm2): pcm1 = 0 */
+CVMX_COREMASK_UNARY_DEFUN(clear, 0 &)
+/* cvmx_coremask_dup(pcm1, pcm2): pcm1 = pcm2 */
+CVMX_COREMASK_UNARY_DEFUN(dup, +)
+
+/*
+ * Macros using the unary functions defined w/
+ * CVMX_COREMASK_UNARY_DEFUN
+ * - set *pcm to its complement
+ * - set all bits in *pcm to 0
+ * - set all (valid) bits in *pcm to 1
+ */
+#define cvmx_coremask_complement(pcm) cvmx_coremask_not(pcm, pcm)
+/* On clear, even clear the unused bits */
+#define cvmx_coremask_clear_all(pcm) \
+ *(pcm) = (struct cvmx_coremask)CVMX_COREMASK_EMPTY
+#define cvmx_coremask_set_all(pcm) cvmx_coremask_fill(pcm, NULL)
+
+/*
+ * convert a string of hex digits to struct cvmx_coremask
+ *
+ * @param pcm
+ * @param hexstr can be
+ * - "[1-9A-Fa-f][0-9A-Fa-f]*", or
+ * - "-1" to set the bits for all the cores.
+ * return
+ * 0 for success,
+ * -1 for string too long (i.e., hexstr takes more bits than
+ * CVMX_MIPS_MAX_CORES),
+ * -2 for conversion problems from hex string to an unsigned
+ * long long, e.g., non-hex char in hexstr, and
+ * -3 for hexstr starting with '0'.
+ * NOTE:
+ * This function clears the bitmask in *pcm before the conversion.
+ */
+int cvmx_coremask_str2bmp(struct cvmx_coremask *pcm, char *hexstr);
+
+/*
+ * convert a struct cvmx_coremask to a string of hex digits
+ *
+ * @param pcm
+ * @param hexstr is "[1-9A-Fa-f][0-9A-Fa-f]*"
+ *
+ * return 0.
+ */
+int cvmx_coremask_bmp2str(const struct cvmx_coremask *pcm, char *hexstr);
+
+/*
+ * Returns the index of the lowest bit in a coremask holder.
+ */
+static inline int cvmx_coremask_lowest_bit(u64 h)
+{
+ return __builtin_ctzll(h);
+}
+
+/*
+ * Returns the 0-based index of the highest bit in a coremask holder.
+ */
+static inline int cvmx_coremask_highest_bit(u64 h)
+{
+ return (64 - __builtin_clzll(h) - 1);
+}
+
+/**
+ * Returns the last core within the coremask and -1 when the coremask
+ * is empty.
+ *
+ * @param[in] pcm - pointer to coremask
+ * @returns last core set in the coremask or -1 if all clear
+ *
+ */
+static inline int cvmx_coremask_get_last_core(const struct cvmx_coremask *pcm)
+{
+ int i;
+ int found = -1;
+
+ for (i = 0; i < CVMX_COREMASK_USED_BMPSZ; i++) {
+ if (pcm->coremask_bitmap[i])
+ found = i;
+ }
+
+ if (found == -1)
+ return -1;
+
+ return found * CVMX_COREMASK_HLDRSZ +
+ cvmx_coremask_highest_bit(pcm->coremask_bitmap[found]);
+}
+
+/**
+ * Returns the first core within the coremask and -1 when the coremask
+ * is empty.
+ *
+ * @param[in] pcm - pointer to coremask
+ * @returns first core set in the coremask or -1 if all clear
+ *
+ */
+static inline int cvmx_coremask_get_first_core(const struct cvmx_coremask *pcm)
+{
+ int i;
+
+ for (i = 0; i < CVMX_COREMASK_USED_BMPSZ; i++)
+ if (pcm->coremask_bitmap[i])
+ break;
+
+ if (i == CVMX_COREMASK_USED_BMPSZ)
+ return -1;
+
+ return i * CVMX_COREMASK_HLDRSZ +
+ cvmx_coremask_lowest_bit(pcm->coremask_bitmap[i]);
+}
+
+/**
+ * Given a core and coremask, return the next available core in the coremask
+ * or -1 if none are available.
+ *
+ * @param core - starting core to check (can be -1 for core 0)
+ * @param pcm - pointer to coremask to check for the next core.
+ *
+ * @return next core following the core parameter or -1 if no more cores.
+ */
+static inline int cvmx_coremask_next_core(int core,
+ const struct cvmx_coremask *pcm)
+{
+ int n, i;
+
+ core++;
+ n = core % CVMX_COREMASK_HLDRSZ;
+ i = core / CVMX_COREMASK_HLDRSZ;
+
+ if (pcm->coremask_bitmap[i] != 0) {
+ for (; n < CVMX_COREMASK_HLDRSZ; n++)
+ if (pcm->coremask_bitmap[i] & (1ULL << n))
+ return ((i * CVMX_COREMASK_HLDRSZ) + n);
+ }
+
+ for (i = i + 1; i < CVMX_COREMASK_USED_BMPSZ; i++) {
+ if (pcm->coremask_bitmap[i] != 0)
+ return (i * CVMX_COREMASK_HLDRSZ) +
+ cvmx_coremask_lowest_bit(pcm->coremask_bitmap[i]);
+ }
+ return -1;
+}
+
+/**
+ * Compute coremask for count cores starting with start_core.
+ * Note that the coremask for multi-node processors may have
+ * gaps.
+ *
+ * @param[out] pcm pointer to core mask data structure
+ * @param start_core starting code number
+ * @param count number of cores
+ *
+ */
+static inline void cvmx_coremask_set_cores(struct cvmx_coremask *pcm,
+ unsigned int start_core,
+ unsigned int count)
+{
+ int node;
+ int core; /** Current core in node */
+ int cores_in_node;
+ int i;
+
+ assert(CVMX_MAX_CORES < CVMX_COREMASK_HLDRSZ);
+ node = start_core >> CVMX_NODE_NO_SHIFT;
+ core = start_core & ((1 << CVMX_NODE_NO_SHIFT) - 1);
+ assert(core < CVMX_MAX_CORES);
+
+ cvmx_coremask_clear_all(pcm);
+ while (count > 0) {
+ if (count + core > CVMX_MAX_CORES)
+ cores_in_node = CVMX_MAX_CORES - core;
+ else
+ cores_in_node = count;
+
+ i = CVMX_COREMASK_BMP_NODE_CORE_IDX(node, core);
+ pcm->coremask_bitmap[i] = ((1ULL << cores_in_node) - 1) << core;
+ count -= cores_in_node;
+ core = 0;
+ node++;
+ }
+}
+
+/**
+ * Makes a copy of a coremask
+ *
+ * @param[out] dest - pointer to destination coremask
+ * @param[in] src - pointer to source coremask
+ */
+static inline void cvmx_coremask_copy(struct cvmx_coremask *dest,
+ const struct cvmx_coremask *src)
+{
+ memcpy(dest, src, sizeof(*dest));
+}
+
+/**
+ * Test to see if the specified core is first core in coremask.
+ *
+ * @param[in] pcm pointer to the coremask to test against
+ * @param[in] core core to check
+ *
+ * @return 1 if the core is first core in the coremask, 0 otherwise
+ *
+ */
+static inline int cvmx_coremask_is_core_first_core(const struct cvmx_coremask *pcm,
+ unsigned int core)
+{
+ int n, i;
+
+ n = core / CVMX_COREMASK_HLDRSZ;
+
+ for (i = 0; i < n; i++)
+ if (pcm->coremask_bitmap[i] != 0)
+ return 0;
+
+ /* From now on we only care about the core number within an entry */
+ core &= (CVMX_COREMASK_HLDRSZ - 1);
+ if (__builtin_ffsll(pcm->coremask_bitmap[n]) < (core + 1))
+ return 0;
+
+ return (__builtin_ffsll(pcm->coremask_bitmap[n]) == core + 1);
+}
+
+/*
+ * NOTE:
+ * cvmx_coremask_is_first_core() was retired due to improper usage.
+ * For inquiring about the current core being the initializing
+ * core for an application, use cvmx_is_init_core().
+ * For simply inquring if the current core is numerically
+ * lowest in a given mask, use :
+ * cvmx_coremask_is_core_first_core( pcm, dvmx_get_core_num())
+ */
+
+/**
+ * Returns the number of 1 bits set in a coremask
+ *
+ * @param[in] pcm - pointer to core mask
+ *
+ * @return number of bits set in the coremask
+ */
+static inline int cvmx_coremask_get_core_count(const struct cvmx_coremask *pcm)
+{
+ int i;
+ int count = 0;
+
+ for (i = 0; i < CVMX_COREMASK_USED_BMPSZ; i++)
+ count += __builtin_popcountll(pcm->coremask_bitmap[i]);
+
+ return count;
+}
+
+/**
+ * For multi-node systems, return the node a core belongs to.
+ *
+ * @param core - core number (0-1023)
+ *
+ * @return node number core belongs to
+ */
+static inline int cvmx_coremask_core_to_node(int core)
+{
+ return (core >> CVMX_NODE_NO_SHIFT) & CVMX_NODE_MASK;
+}
+
+/**
+ * Given a core number on a multi-node system, return the core number for a
+ * particular node.
+ *
+ * @param core - global core number
+ *
+ * @returns core number local to the node.
+ */
+static inline int cvmx_coremask_core_on_node(int core)
+{
+ return (core & ((1 << CVMX_NODE_NO_SHIFT) - 1));
+}
+
+/**
+ * Returns if one coremask is a subset of another coremask
+ *
+ * @param main - main coremask to test
+ * @param subset - subset coremask to test
+ *
+ * @return 0 if the subset contains cores not in the main coremask or 1 if
+ * the subset is fully contained in the main coremask.
+ */
+static inline int cvmx_coremask_is_subset(const struct cvmx_coremask *main,
+ const struct cvmx_coremask *subset)
+{
+ int i;
+
+ for (i = 0; i < CVMX_COREMASK_USED_BMPSZ; i++)
+ if ((main->coremask_bitmap[i] & subset->coremask_bitmap[i]) !=
+ subset->coremask_bitmap[i])
+ return 0;
+ return 1;
+}
+
+/**
+ * Returns if one coremask intersects another coremask
+ *
+ * @param c1 - main coremask to test
+ * @param c2 - subset coremask to test
+ *
+ * @return 1 if coremask c1 intersects coremask c2, 0 if they are exclusive
+ */
+static inline int cvmx_coremask_intersects(const struct cvmx_coremask *c1,
+ const struct cvmx_coremask *c2)
+{
+ int i;
+
+ for (i = 0; i < CVMX_COREMASK_USED_BMPSZ; i++)
+ if ((c1->coremask_bitmap[i] & c2->coremask_bitmap[i]) != 0)
+ return 1;
+ return 0;
+}
+
+/**
+ * Masks a single node of a coremask
+ *
+ * @param pcm - coremask to mask [inout]
+ * @param node - node number to mask against
+ */
+static inline void cvmx_coremask_mask_node(struct cvmx_coremask *pcm, int node)
+{
+ int i;
+
+ for (i = 0; i < CVMX_COREMASK_BMP_NODE_CORE_IDX(node, 0); i++)
+ pcm->coremask_bitmap[i] = 0;
+
+ for (i = CVMX_COREMASK_BMP_NODE_CORE_IDX(node + 1, 0);
+ i < CVMX_COREMASK_USED_BMPSZ; i++)
+ pcm->coremask_bitmap[i] = 0;
+}
+
+/**
+ * Prints out a coremask in the form of node X: 0x... 0x...
+ *
+ * @param[in] pcm - pointer to core mask
+ *
+ * @return nothing
+ */
+void cvmx_coremask_print(const struct cvmx_coremask *pcm);
+
+static inline void cvmx_coremask_dprint(const struct cvmx_coremask *pcm)
+{
+ if (IS_ENABLED(DEBUG))
+ cvmx_coremask_print(pcm);
+}
+
+struct cvmx_coremask *octeon_get_available_coremask(struct cvmx_coremask *pcm);
+
+int validate_coremask(struct cvmx_coremask *pcm);
+
+#endif /* __CVMX_COREMASK_H__ */
diff --git a/arch/mips/mach-octeon/include/mach/cvmx-fuse.h b/arch/mips/mach-octeon/include/mach/cvmx-fuse.h
new file mode 100644
index 0000000000..a06a1326cb
--- /dev/null
+++ b/arch/mips/mach-octeon/include/mach/cvmx-fuse.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#ifndef __CVMX_FUSE_H__
+#define __CVMX_FUSE_H__
+
+/**
+ * Read a byte of fuse data
+ * @param node node to read from
+ * @param byte_addr address to read
+ *
+ * @return fuse value: 0 or 1
+ */
+static inline u8 cvmx_fuse_read_byte_node(u8 node, int byte_addr)
+{
+ u64 val;
+
+ val = FIELD_PREP(MIO_FUS_RCMD_ADDR, byte_addr) | MIO_FUS_RCMD_PEND;
+ csr_wr_node(node, CVMX_MIO_FUS_RCMD, val);
+
+ do {
+ val = csr_rd_node(node, CVMX_MIO_FUS_RCMD);
+ } while (val & MIO_FUS_RCMD_PEND);
+
+ return FIELD_GET(MIO_FUS_RCMD_DAT, val);
+}
+
+/**
+ * Read a byte of fuse data
+ * @param byte_addr address to read
+ *
+ * @return fuse value: 0 or 1
+ */
+static inline u8 cvmx_fuse_read_byte(int byte_addr)
+{
+ return cvmx_fuse_read_byte_node(0, byte_addr);
+}
+
+/**
+ * Read a single fuse bit
+ *
+ * @param node Node number
+ * @param fuse Fuse number (0-1024)
+ *
+ * @return fuse value: 0 or 1
+ */
+static inline int cvmx_fuse_read_node(u8 node, int fuse)
+{
+ return (cvmx_fuse_read_byte_node(node, fuse >> 3) >> (fuse & 0x7)) & 1;
+}
+
+/**
+ * Read a single fuse bit
+ *
+ * @param fuse Fuse number (0-1024)
+ *
+ * @return fuse value: 0 or 1
+ */
+static inline int cvmx_fuse_read(int fuse)
+{
+ return cvmx_fuse_read_node(0, fuse);
+}
+
+static inline int cvmx_octeon_fuse_locked(void)
+{
+ return cvmx_fuse_read(123);
+}
+
+#endif /* __CVMX_FUSE_H__ */
diff --git a/arch/mips/mach-octeon/include/mach/cvmx-regs.h b/arch/mips/mach-octeon/include/mach/cvmx-regs.h
new file mode 100644
index 0000000000..b84fc9fd57
--- /dev/null
+++ b/arch/mips/mach-octeon/include/mach/cvmx-regs.h
@@ -0,0 +1,144 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2020 Stefan Roese <sr@denx.de>
+ */
+
+#ifndef __CVMX_REGS_H__
+#define __CVMX_REGS_H__
+
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/io.h>
+
+/* General defines */
+#define CVMX_MAX_CORES 48
+/* Maximum # of bits to define core in node */
+#define CVMX_NODE_NO_SHIFT 7
+#define CVMX_NODE_BITS 2 /* Number of bits to define a node */
+#define CVMX_MAX_NODES (1 << CVMX_NODE_BITS)
+#define CVMX_NODE_MASK (CVMX_MAX_NODES - 1)
+#define CVMX_NODE_IO_SHIFT 36
+#define CVMX_NODE_MEM_SHIFT 40
+#define CVMX_NODE_IO_MASK ((u64)CVMX_NODE_MASK << CVMX_NODE_IO_SHIFT)
+
+#define CVMX_MIPS_MAX_CORE_BITS 10 /* Maximum # of bits to define cores */
+#define CVMX_MIPS_MAX_CORES (1 << CVMX_MIPS_MAX_CORE_BITS)
+
+#define MAX_CORE_TADS 8
+
+#define CAST_ULL(v) ((unsigned long long)(v))
+#define CASTPTR(type, v) ((type *)(long)(v))
+
+/* Regs */
+#define CVMX_CIU_PP_RST 0x0001010000000100ULL
+#define CVMX_CIU3_NMI 0x0001010000000160ULL
+#define CVMX_CIU_FUSE 0x00010100000001a0ULL
+#define CVMX_CIU_NMI 0x0001070000000718ULL
+
+#define CVMX_MIO_BOOT_LOC_CFGX(x) (0x0001180000000080ULL + ((x) & 1) * 8)
+#define MIO_BOOT_LOC_CFG_BASE GENMASK_ULL(27, 3)
+#define MIO_BOOT_LOC_CFG_EN BIT_ULL(31)
+
+#define CVMX_MIO_BOOT_LOC_ADR 0x0001180000000090ULL
+#define MIO_BOOT_LOC_ADR_ADR GENMASK_ULL(7, 3)
+
+#define CVMX_MIO_BOOT_LOC_DAT 0x0001180000000098ULL
+
+#define CVMX_MIO_FUS_DAT2 0x0001180000001410ULL
+#define MIO_FUS_DAT2_NOCRYPTO BIT_ULL(26)
+#define MIO_FUS_DAT2_NOMUL BIT_ULL(27)
+#define MIO_FUS_DAT2_DORM_CRYPTO BIT_ULL(34)
+
+#define CVMX_MIO_FUS_RCMD 0x0001180000001500ULL
+#define MIO_FUS_RCMD_ADDR GENMASK_ULL(7, 0)
+#define MIO_FUS_RCMD_PEND BIT_ULL(12)
+#define MIO_FUS_RCMD_DAT GENMASK_ULL(23, 16)
+
+#define CVMX_RNM_CTL_STATUS 0x0001180040000000ULL
+#define RNM_CTL_STATUS_EER_VAL BIT_ULL(9)
+
+/* turn the variable name into a string */
+#define CVMX_TMP_STR(x) CVMX_TMP_STR2(x)
+#define CVMX_TMP_STR2(x) #x
+
+#define CVMX_RDHWRNV(result, regstr) \
+ asm volatile ("rdhwr %[rt],$" CVMX_TMP_STR(regstr) : [rt] "=d" (result))
+
+#define CVMX_SYNCW \
+ asm volatile ("syncw\nsyncw\n" : : : "memory")
+
+/* ToDo: Currently only node = 0 supported */
+static inline u64 csr_rd_node(int node, u64 addr)
+{
+ void __iomem *base;
+
+ base = ioremap_nocache(addr, 0x100);
+ return ioread64(base);
+}
+
+static inline u64 csr_rd(u64 addr)
+{
+ return csr_rd_node(0, addr);
+}
+
+static inline void csr_wr_node(int node, u64 addr, u64 val)
+{
+ void __iomem *base;
+
+ base = ioremap_nocache(addr, 0x100);
+ iowrite64(val, base);
+}
+
+static inline void csr_wr(u64 addr, u64 val)
+{
+ csr_wr_node(0, addr, val);
+}
+
+/*
+ * We need to use the volatile access here, otherwise the IO accessor
+ * functions might swap the bytes
+ */
+static inline u64 cvmx_read64_uint64(u64 addr)
+{
+ return *(volatile u64 *)addr;
+}
+
+static inline void cvmx_write64_uint64(u64 addr, u64 val)
+{
+ *(volatile u64 *)addr = val;
+}
+
+static inline u32 cvmx_read64_uint32(u64 addr)
+{
+ return *(volatile u32 *)addr;
+}
+
+static inline void cvmx_write64_uint32(u64 addr, u32 val)
+{
+ *(volatile u32 *)addr = val;
+}
+
+static inline void *cvmx_phys_to_ptr(u64 addr)
+{
+ return (void *)CKSEG0ADDR(addr);
+}
+
+static inline u64 cvmx_ptr_to_phys(void *ptr)
+{
+ return virt_to_phys(ptr);
+}
+
+/**
+ * Number of the Core on which the program is currently running.
+ *
+ * @return core number
+ */
+static inline unsigned int cvmx_get_core_num(void)
+{
+ unsigned int core_num;
+
+ CVMX_RDHWRNV(core_num, 0);
+ return core_num;
+}
+
+#endif /* __CVMX_REGS_H__ */
diff --git a/arch/mips/mach-octeon/include/mach/cvmx/cvmx-lmcx-defs.h b/arch/mips/mach-octeon/include/mach/cvmx/cvmx-lmcx-defs.h
new file mode 100644
index 0000000000..3b4cba9241
--- /dev/null
+++ b/arch/mips/mach-octeon/include/mach/cvmx/cvmx-lmcx-defs.h
@@ -0,0 +1,4574 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#ifndef __CVMX_LMCX_DEFS_H__
+#define __CVMX_LMCX_DEFS_H__
+
+#define CVMX_LMCX_BANK_CONFLICT1(offs) \
+ ((0x000360ull) + ((offs) & 3) * 0x1000000ull)
+#define CVMX_LMCX_BANK_CONFLICT2(offs) \
+ ((0x000368ull) + ((offs) & 3) * 0x1000000ull)
+#define CVMX_LMCX_BIST_RESULT(offs) \
+ ((0x0000F8ull) + ((offs) & 1) * 0x60000000ull)
+#define CVMX_LMCX_CHAR_CTL(offs) \
+ ((0x000220ull) + ((offs) & 3) * 0x1000000ull)
+#define CVMX_LMCX_CHAR_DQ_ERR_COUNT(offs) \
+ ((0x000040ull) + ((offs) & 3) * 0x1000000ull)
+#define CVMX_LMCX_CHAR_MASK0(offs) \
+ ((0x000228ull) + ((offs) & 3) * 0x1000000ull)
+#define CVMX_LMCX_CHAR_MASK1(offs) \
+ ((0x000230ull) + ((offs) & 3) * 0x1000000ull)
+#define CVMX_LMCX_CHAR_MASK2(offs) \
+ ((0x000238ull) + ((offs) & 3) * 0x1000000ull)
+#define CVMX_LMCX_CHAR_MASK3(offs) \
+ ((0x000240ull) + ((offs) & 3) * 0x1000000ull)
+#define CVMX_LMCX_CHAR_MASK4(offs) \
+ ((0x000318ull) + ((offs) & 3) * 0x1000000ull)
+#define CVMX_LMCX_COMP_CTL(offs) \
+ ((0x000028ull) + ((offs) & 1) * 0x60000000ull)
+#define CVMX_LMCX_COMP_CTL2(offs) \
+ ((0x0001B8ull) + ((offs) & 3) * 0x1000000ull)
+#define CVMX_LMCX_CONFIG(offs) \
+ ((0x000188ull) + ((offs) & 3) * 0x1000000ull)
+#define CVMX_LMCX_CONTROL(offs) \
+ ((0x000190ull) + ((offs) & 3) * 0x1000000ull)
+#define CVMX_LMCX_CTL(offs) \
+ ((0x000010ull) + ((offs) & 1) * 0x60000000ull)
+#define CVMX_LMCX_CTL1(offs) \
+ ((0x000090ull) + ((offs) & 1) * 0x60000000ull)
+#define CVMX_LMCX_DBTRAIN_CTL(offs) \
+ ((0x0003F8ull) + ((offs) & 3) * 0x1000000ull)
+#define CVMX_LMCX_DCLK_CNT(offs) \
+ ((0x0001E0ull) + ((offs) & 3) * 0x1000000ull)
+#define CVMX_LMCX_DCLK_CNT_HI(offs) \
+ ((0x000070ull) + ((offs) & 1) * 0x60000000ull)
+#define CVMX_LMCX_DCLK_CNT_LO(offs) \
+ ((0x000068ull) + ((offs) & 1) * 0x60000000ull)
+#define CVMX_LMCX_DCLK_CTL(offs) \
+ ((0x0000B8ull) + ((offs) & 1) * 0x60000000ull)
+#define CVMX_LMCX_DDR2_CTL(offs) \
+ ((0x000018ull) + ((offs) & 1) * 0x60000000ull)
+#define CVMX_LMCX_DDR4_DIMM_CTL(offs) \
+ ((0x0003F0ull) + ((offs) & 3) * 0x1000000ull)
+#define CVMX_LMCX_DDR_PLL_CTL(offs) \
+ ((0x000258ull) + ((offs) & 3) * 0x1000000ull)
+#define CVMX_LMCX_DELAY_CFG(offs) \
+ ((0x000088ull) + ((offs) & 1) * 0x60000000ull)
+#define CVMX_LMCX_DIMMX_DDR4_PARAMS0(offs, id) \
+ ((0x0000D0ull) + (((offs) & 1) + ((id) & 3) * 0x200000ull) * 8)
+#define CVMX_LMCX_DIMMX_DDR4_PARAMS1(offs, id) \
+ ((0x000140ull) + (((offs) & 1) + ((id) & 3) * 0x200000ull) * 8)
+#define CVMX_LMCX_DIMMX_PARAMS(offs, id) \
+ ((0x000270ull) + (((offs) & 1) + ((id) & 3) * 0x200000ull) * 8)
+#define CVMX_LMCX_DIMM_CTL(offs) \
+ ((0x000310ull) + ((offs) & 3) * 0x1000000ull)
+#define CVMX_LMCX_DLL_CTL(offs) \
+ ((0x0000C0ull) + ((offs) & 1) * 0x60000000ull)
+#define CVMX_LMCX_DLL_CTL2(offs) \
+ ((0x0001C8ull) + ((offs) & 3) * 0x1000000ull)
+#define CVMX_LMCX_DLL_CTL3(offs) \
+ ((0x000218ull) + ((offs) & 3) * 0x1000000ull)
+#define CVMX_LMCX_ECC_PARITY_TEST(offs) \
+ ((0x000108ull) + ((offs) & 3) * 0x1000000ull)
+#define CVMX_LMCX_EXT_CONFIG(offs) \
+ ((0x000030ull) + ((offs) & 3) * 0x1000000ull)
+#define CVMX_LMCX_EXT_CONFIG2(offs) \
+ ((0x000090ull) + ((offs) & 3) * 0x1000000ull)
+#define CVMX_LMCX_GENERAL_PURPOSE0(offs) \
+ ((0x000340ull) + ((offs) & 3) * 0x1000000ull)
+#define CVMX_LMCX_GENERAL_PURPOSE1(offs) \
+ ((0x000348ull) + ((offs) & 3) * 0x1000000ull)
+#define CVMX_LMCX_GENERAL_PURPOSE2(offs) \
+ ((0x000350ull) + ((offs) & 3) * 0x1000000ull)
+#define CVMX_LMCX_IFB_CNT(offs) \
+ ((0x0001D0ull) + ((offs) & 3) * 0x1000000ull)
+#define CVMX_LMCX_IFB_CNT_HI(offs) \
+ ((0x000050ull) + ((offs) & 1) * 0x60000000ull)
+#define CVMX_LMCX_IFB_CNT_LO(offs) \
+ ((0x000048ull) + ((offs) & 1) * 0x60000000ull)
+#define CVMX_LMCX_INT(offs) \
+ ((0x0001F0ull) + ((offs) & 3) * 0x1000000ull)
+#define CVMX_LMCX_INT_EN(offs) \
+ ((0x0001E8ull) + ((offs) & 3) * 0x1000000ull)
+#define CVMX_LMCX_LANEX_CRC_SWIZ(x, id) \
+ ((0x000380ull) + (((offs) & 15) + ((id) & 3) * 0x200000ull) * 8)
+#define CVMX_LMCX_MEM_CFG0(offs) \
+ ((0x000000ull) + ((offs) & 1) * 0x60000000ull)
+#define CVMX_LMCX_MEM_CFG1(offs) \
+ ((0x000008ull) + ((offs) & 1) * 0x60000000ull)
+#define CVMX_LMCX_MODEREG_PARAMS0(offs) \
+ ((0x0001A8ull) + ((offs) & 3) * 0x1000000ull)
+#define CVMX_LMCX_MODEREG_PARAMS1(offs) \
+ ((0x000260ull) + ((offs) & 3) * 0x1000000ull)
+#define CVMX_LMCX_MODEREG_PARAMS2(offs) \
+ ((0x000050ull) + ((offs) & 3) * 0x1000000ull)
+#define CVMX_LMCX_MODEREG_PARAMS3(offs) \
+ ((0x000058ull) + ((offs) & 3) * 0x1000000ull)
+#define CVMX_LMCX_MPR_DATA0(offs) \
+ ((0x000070ull) + ((offs) & 3) * 0x1000000ull)
+#define CVMX_LMCX_MPR_DATA1(offs) \
+ ((0x000078ull) + ((offs) & 3) * 0x1000000ull)
+#define CVMX_LMCX_MPR_DATA2(offs) \
+ ((0x000080ull) + ((offs) & 3) * 0x1000000ull)
+#define CVMX_LMCX_MR_MPR_CTL(offs) \
+ ((0x000068ull) + ((offs) & 3) * 0x1000000ull)
+#define CVMX_LMCX_NS_CTL(offs) \
+ ((0x000178ull) + ((offs) & 3) * 0x1000000ull)
+
+static inline uint64_t CVMX_LMCX_NXM(unsigned long offs)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN70XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ return (0x0000C8ull) + (offs) * 0x60000000ull;
+ case OCTEON_CNF75XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN73XX & OCTEON_FAMILY_MASK:
+ return (0x0000C8ull) + (offs) * 0x1000000ull;
+ case OCTEON_CN78XX & OCTEON_FAMILY_MASK:
+ if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X))
+ return (0x0000C8ull) + (offs) * 0x1000000ull;
+ if (OCTEON_IS_MODEL(OCTEON_CN78XX))
+ return (0x0000C8ull) + (offs) * 0x1000000ull;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ return (0x0000C8ull) + (offs) * 0x1000000ull;
+ }
+ return (0x0000C8ull) + (offs) * 0x1000000ull;
+}
+
+#define CVMX_LMCX_NXM_FADR(offs) \
+ ((0x000028ull) + ((offs) & 3) * 0x1000000ull)
+#define CVMX_LMCX_OPS_CNT(offs) \
+ ((0x0001D8ull) + ((offs) & 3) * 0x1000000ull)
+#define CVMX_LMCX_OPS_CNT_HI(offs) \
+ ((0x000060ull) + ((offs) & 1) * 0x60000000ull)
+#define CVMX_LMCX_OPS_CNT_LO(offs) \
+ ((0x000058ull) + ((offs) & 1) * 0x60000000ull)
+#define CVMX_LMCX_PHY_CTL(offs) \
+ ((0x000210ull) + ((offs) & 3) * 0x1000000ull)
+#define CVMX_LMCX_PHY_CTL2(offs) \
+ ((0x000250ull) + ((offs) & 3) * 0x1000000ull)
+#define CVMX_LMCX_PLL_BWCTL(offs) \
+ ((0x000040ull))
+#define CVMX_LMCX_PLL_CTL(offs) \
+ ((0x0000A8ull) + ((offs) & 1) * 0x60000000ull)
+#define CVMX_LMCX_PLL_STATUS(offs) \
+ ((0x0000B0ull) + ((offs) & 1) * 0x60000000ull)
+#define CVMX_LMCX_PPR_CTL(offs) \
+ ((0x0003E0ull) + ((offs) & 3) * 0x1000000ull)
+#define CVMX_LMCX_READ_LEVEL_CTL(offs) \
+ ((0x000140ull) + ((offs) & 1) * 0x60000000ull)
+#define CVMX_LMCX_READ_LEVEL_DBG(offs) \
+ ((0x000148ull) + ((offs) & 1) * 0x60000000ull)
+#define CVMX_LMCX_READ_LEVEL_RANKX(offs, id) \
+ ((0x000100ull) + (((offs) & 3) + ((id) & 1) * 0xC000000ull) * 8)
+#define CVMX_LMCX_REF_STATUS(offs) \
+ ((0x0000A0ull) + ((offs) & 3) * 0x1000000ull)
+#define CVMX_LMCX_RESET_CTL(offs) \
+ ((0x000180ull) + ((offs) & 3) * 0x1000000ull)
+#define CVMX_LMCX_RETRY_CONFIG(offs) \
+ ((0x000110ull) + ((offs) & 3) * 0x1000000ull)
+#define CVMX_LMCX_RETRY_STATUS(offs) \
+ ((0x000118ull) + ((offs) & 3) * 0x1000000ull)
+#define CVMX_LMCX_RLEVEL_CTL(offs) \
+ ((0x0002A0ull) + ((offs) & 3) * 0x1000000ull)
+#define CVMX_LMCX_RLEVEL_DBG(offs) \
+ ((0x0002A8ull) + ((offs) & 3) * 0x1000000ull)
+#define CVMX_LMCX_RLEVEL_RANKX(offs, id) \
+ ((0x000280ull) + (((offs) & 3) + ((id) & 3) * 0x200000ull) * 8)
+#define CVMX_LMCX_RODT_COMP_CTL(offs) \
+ ((0x0000A0ull) + ((offs) & 1) * 0x60000000ull)
+#define CVMX_LMCX_RODT_CTL(offs) \
+ ((0x000078ull) + ((offs) & 1) * 0x60000000ull)
+#define CVMX_LMCX_RODT_MASK(offs) \
+ ((0x000268ull) + ((offs) & 3) * 0x1000000ull)
+#define CVMX_LMCX_SCRAMBLED_FADR(offs) \
+ ((0x000330ull) + ((offs) & 3) * 0x1000000ull)
+#define CVMX_LMCX_SCRAMBLE_CFG0(offs) \
+ ((0x000320ull) + ((offs) & 3) * 0x1000000ull)
+#define CVMX_LMCX_SCRAMBLE_CFG1(offs) \
+ ((0x000328ull) + ((offs) & 3) * 0x1000000ull)
+#define CVMX_LMCX_SCRAMBLE_CFG2(offs) \
+ ((0x000338ull) + ((offs) & 3) * 0x1000000ull)
+#define CVMX_LMCX_SEQ_CTL(offs) \
+ ((0x000048ull) + ((offs) & 3) * 0x1000000ull)
+#define CVMX_LMCX_SLOT_CTL0(offs) \
+ ((0x0001F8ull) + ((offs) & 3) * 0x1000000ull)
+#define CVMX_LMCX_SLOT_CTL1(offs) \
+ ((0x000200ull) + ((offs) & 3) * 0x1000000ull)
+#define CVMX_LMCX_SLOT_CTL2(offs) \
+ ((0x000208ull) + ((offs) & 3) * 0x1000000ull)
+#define CVMX_LMCX_SLOT_CTL3(offs) \
+ ((0x000248ull) + ((offs) & 3) * 0x1000000ull)
+#define CVMX_LMCX_TIMING_PARAMS0(offs) \
+ ((0x000198ull) + ((offs) & 3) * 0x1000000ull)
+#define CVMX_LMCX_TIMING_PARAMS1(offs) \
+ ((0x0001A0ull) + ((offs) & 3) * 0x1000000ull)
+#define CVMX_LMCX_TIMING_PARAMS2(offs) \
+ ((0x000060ull) + ((offs) & 3) * 0x1000000ull)
+#define CVMX_LMCX_TRO_CTL(offs) \
+ ((0x000248ull) + ((offs) & 3) * 0x1000000ull)
+#define CVMX_LMCX_TRO_STAT(offs) \
+ ((0x000250ull) + ((offs) & 3) * 0x1000000ull)
+#define CVMX_LMCX_WLEVEL_CTL(offs) \
+ ((0x000300ull) + ((offs) & 3) * 0x1000000ull)
+#define CVMX_LMCX_WLEVEL_DBG(offs) \
+ ((0x000308ull) + ((offs) & 3) * 0x1000000ull)
+
+static inline uint64_t CVMX_LMCX_WLEVEL_RANKX(unsigned long offs,
+ unsigned long id)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CN70XX & OCTEON_FAMILY_MASK:
+ return (0x0002C0ull) + ((offs) + (id) * 0x200000ull) * 8;
+ case OCTEON_CNF75XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN73XX & OCTEON_FAMILY_MASK:
+ return (0x0002C0ull) + ((offs) + (id) * 0x200000ull) * 8;
+ case OCTEON_CN78XX & OCTEON_FAMILY_MASK:
+ if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X))
+ return (0x0002C0ull) + ((offs) +
+ (id) * 0x200000ull) * 8;
+ if (OCTEON_IS_MODEL(OCTEON_CN78XX))
+ return (0x0002C0ull) + ((offs) +
+ (id) * 0x200000ull) * 8;
+
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ return (0x0002B0ull) + ((offs) + (id) * 0x0ull) * 8;
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ return (0x0002B0ull) + ((offs) + (id) * 0x200000ull) * 8;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ return (0x0002B0ull) + ((offs) + (id) * 0x200000ull) * 8;
+ }
+ return (0x0002C0ull) + ((offs) + (id) * 0x200000ull) * 8;
+}
+
+#define CVMX_LMCX_WODT_CTL0(offs) \
+ ((0x000030ull) + ((offs) & 1) * 0x60000000ull)
+#define CVMX_LMCX_WODT_CTL1(offs) \
+ ((0x000080ull) + ((offs) & 1) * 0x60000000ull)
+#define CVMX_LMCX_WODT_MASK(offs) \
+ ((0x0001B0ull) + ((offs) & 3) * 0x1000000ull)
+
+/**
+ * cvmx_lmc#_char_ctl
+ *
+ * This register provides an assortment of various control fields needed
+ * to characterize the DDR3 interface.
+ */
+union cvmx_lmcx_char_ctl {
+ u64 u64;
+ struct cvmx_lmcx_char_ctl_s {
+ uint64_t reserved_54_63:10;
+ uint64_t dq_char_byte_check:1;
+ uint64_t dq_char_check_lock:1;
+ uint64_t dq_char_check_enable:1;
+ uint64_t dq_char_bit_sel:3;
+ uint64_t dq_char_byte_sel:4;
+ uint64_t dr:1;
+ uint64_t skew_on:1;
+ uint64_t en:1;
+ uint64_t sel:1;
+ uint64_t prog:8;
+ uint64_t prbs:32;
+ } s;
+ struct cvmx_lmcx_char_ctl_cn61xx {
+ uint64_t reserved_44_63:20;
+ uint64_t dr:1;
+ uint64_t skew_on:1;
+ uint64_t en:1;
+ uint64_t sel:1;
+ uint64_t prog:8;
+ uint64_t prbs:32;
+ } cn61xx;
+ struct cvmx_lmcx_char_ctl_cn63xx {
+ uint64_t reserved_42_63:22;
+ uint64_t en:1;
+ uint64_t sel:1;
+ uint64_t prog:8;
+ uint64_t prbs:32;
+ } cn63xx;
+ struct cvmx_lmcx_char_ctl_cn63xx cn63xxp1;
+ struct cvmx_lmcx_char_ctl_cn61xx cn66xx;
+ struct cvmx_lmcx_char_ctl_cn61xx cn68xx;
+ struct cvmx_lmcx_char_ctl_cn63xx cn68xxp1;
+ struct cvmx_lmcx_char_ctl_cn70xx {
+ uint64_t reserved_53_63:11;
+ uint64_t dq_char_check_lock:1;
+ uint64_t dq_char_check_enable:1;
+ uint64_t dq_char_bit_sel:3;
+ uint64_t dq_char_byte_sel:4;
+ uint64_t dr:1;
+ uint64_t skew_on:1;
+ uint64_t en:1;
+ uint64_t sel:1;
+ uint64_t prog:8;
+ uint64_t prbs:32;
+ } cn70xx;
+ struct cvmx_lmcx_char_ctl_cn70xx cn70xxp1;
+ struct cvmx_lmcx_char_ctl_s cn73xx;
+ struct cvmx_lmcx_char_ctl_s cn78xx;
+ struct cvmx_lmcx_char_ctl_s cn78xxp1;
+ struct cvmx_lmcx_char_ctl_cn61xx cnf71xx;
+ struct cvmx_lmcx_char_ctl_s cnf75xx;
+};
+
+/**
+ * cvmx_lmc#_comp_ctl2
+ *
+ * LMC_COMP_CTL2 = LMC Compensation control
+ *
+ */
+union cvmx_lmcx_comp_ctl2 {
+ u64 u64;
+ struct cvmx_lmcx_comp_ctl2_s {
+ uint64_t reserved_51_63:13;
+ uint64_t rclk_char_mode:1;
+ uint64_t reserved_40_49:10;
+ uint64_t ptune_offset:4;
+ uint64_t reserved_12_35:24;
+ uint64_t cmd_ctl:4;
+ uint64_t ck_ctl:4;
+ uint64_t dqx_ctl:4;
+ } s;
+ struct cvmx_lmcx_comp_ctl2_cn61xx {
+ uint64_t reserved_34_63:30;
+ uint64_t ddr__ptune:4;
+ uint64_t ddr__ntune:4;
+ uint64_t m180:1;
+ uint64_t byp:1;
+ uint64_t ptune:4;
+ uint64_t ntune:4;
+ uint64_t rodt_ctl:4;
+ uint64_t cmd_ctl:4;
+ uint64_t ck_ctl:4;
+ uint64_t dqx_ctl:4;
+ } cn61xx;
+ struct cvmx_lmcx_comp_ctl2_cn61xx cn63xx;
+ struct cvmx_lmcx_comp_ctl2_cn61xx cn63xxp1;
+ struct cvmx_lmcx_comp_ctl2_cn61xx cn66xx;
+ struct cvmx_lmcx_comp_ctl2_cn61xx cn68xx;
+ struct cvmx_lmcx_comp_ctl2_cn61xx cn68xxp1;
+ struct cvmx_lmcx_comp_ctl2_cn70xx {
+ uint64_t reserved_51_63:13;
+ uint64_t rclk_char_mode:1;
+ uint64_t ddr__ptune:5;
+ uint64_t ddr__ntune:5;
+ uint64_t ptune_offset:4;
+ uint64_t ntune_offset:4;
+ uint64_t m180:1;
+ uint64_t byp:1;
+ uint64_t ptune:5;
+ uint64_t ntune:5;
+ uint64_t rodt_ctl:4;
+ uint64_t control_ctl:4;
+ uint64_t cmd_ctl:4;
+ uint64_t ck_ctl:4;
+ uint64_t dqx_ctl:4;
+ } cn70xx;
+ struct cvmx_lmcx_comp_ctl2_cn70xx cn70xxp1;
+ struct cvmx_lmcx_comp_ctl2_cn70xx cn73xx;
+ struct cvmx_lmcx_comp_ctl2_cn70xx cn78xx;
+ struct cvmx_lmcx_comp_ctl2_cn70xx cn78xxp1;
+ struct cvmx_lmcx_comp_ctl2_cn61xx cnf71xx;
+ struct cvmx_lmcx_comp_ctl2_cn70xx cnf75xx;
+};
+
+/**
+ * cvmx_lmc#_config
+ *
+ * This register controls certain parameters required for memory configuration.
+ * Note the following:
+ * * Priority order for hardware write operations to
+ * LMC()_CONFIG/LMC()_FADR/LMC()_ECC_SYND: DED error > SEC error.
+ * * The self-refresh entry sequence(s) power the DLL up/down (depending on
+ * LMC()_MODEREG_PARAMS0[DLL]) when LMC()_CONFIG[SREF_WITH_DLL] is set.
+ * * Prior to the self-refresh exit sequence, LMC()_MODEREG_PARAMS0 should
+ * be reprogrammed
+ * (if needed) to the appropriate values.
+ *
+ * See LMC initialization sequence for the LMC bringup sequence.
+ */
+union cvmx_lmcx_config {
+ u64 u64;
+ struct cvmx_lmcx_config_s {
+ uint64_t lrdimm_ena:1;
+ uint64_t bg2_enable:1;
+ uint64_t mode_x4dev:1;
+ uint64_t mode32b:1;
+ uint64_t scrz:1;
+ uint64_t early_unload_d1_r1:1;
+ uint64_t early_unload_d1_r0:1;
+ uint64_t early_unload_d0_r1:1;
+ uint64_t early_unload_d0_r0:1;
+ uint64_t init_status:4;
+ uint64_t mirrmask:4;
+ uint64_t rankmask:4;
+ uint64_t rank_ena:1;
+ uint64_t sref_with_dll:1;
+ uint64_t early_dqx:1;
+ uint64_t reserved_18_39:22;
+ uint64_t reset:1;
+ uint64_t ecc_adr:1;
+ uint64_t forcewrite:4;
+ uint64_t idlepower:3;
+ uint64_t pbank_lsb:4;
+ uint64_t row_lsb:3;
+ uint64_t ecc_ena:1;
+ uint64_t init_start:1;
+ } s;
+ struct cvmx_lmcx_config_cn61xx {
+ uint64_t reserved_61_63:3;
+ uint64_t mode32b:1;
+ uint64_t scrz:1;
+ uint64_t early_unload_d1_r1:1;
+ uint64_t early_unload_d1_r0:1;
+ uint64_t early_unload_d0_r1:1;
+ uint64_t early_unload_d0_r0:1;
+ uint64_t init_status:4;
+ uint64_t mirrmask:4;
+ uint64_t rankmask:4;
+ uint64_t rank_ena:1;
+ uint64_t sref_with_dll:1;
+ uint64_t early_dqx:1;
+ uint64_t sequence:3;
+ uint64_t ref_zqcs_int:19;
+ uint64_t reset:1;
+ uint64_t ecc_adr:1;
+ uint64_t forcewrite:4;
+ uint64_t idlepower:3;
+ uint64_t pbank_lsb:4;
+ uint64_t row_lsb:3;
+ uint64_t ecc_ena:1;
+ uint64_t init_start:1;
+ } cn61xx;
+ struct cvmx_lmcx_config_cn63xx {
+ uint64_t reserved_59_63:5;
+ uint64_t early_unload_d1_r1:1;
+ uint64_t early_unload_d1_r0:1;
+ uint64_t early_unload_d0_r1:1;
+ uint64_t early_unload_d0_r0:1;
+ uint64_t init_status:4;
+ uint64_t mirrmask:4;
+ uint64_t rankmask:4;
+ uint64_t rank_ena:1;
+ uint64_t sref_with_dll:1;
+ uint64_t early_dqx:1;
+ uint64_t sequence:3;
+ uint64_t ref_zqcs_int:19;
+ uint64_t reset:1;
+ uint64_t ecc_adr:1;
+ uint64_t forcewrite:4;
+ uint64_t idlepower:3;
+ uint64_t pbank_lsb:4;
+ uint64_t row_lsb:3;
+ uint64_t ecc_ena:1;
+ uint64_t init_start:1;
+ } cn63xx;
+ struct cvmx_lmcx_config_cn63xxp1 {
+ uint64_t reserved_55_63:9;
+ uint64_t init_status:4;
+ uint64_t mirrmask:4;
+ uint64_t rankmask:4;
+ uint64_t rank_ena:1;
+ uint64_t sref_with_dll:1;
+ uint64_t early_dqx:1;
+ uint64_t sequence:3;
+ uint64_t ref_zqcs_int:19;
+ uint64_t reset:1;
+ uint64_t ecc_adr:1;
+ uint64_t forcewrite:4;
+ uint64_t idlepower:3;
+ uint64_t pbank_lsb:4;
+ uint64_t row_lsb:3;
+ uint64_t ecc_ena:1;
+ uint64_t init_start:1;
+ } cn63xxp1;
+ struct cvmx_lmcx_config_cn66xx {
+ uint64_t reserved_60_63:4;
+ uint64_t scrz:1;
+ uint64_t early_unload_d1_r1:1;
+ uint64_t early_unload_d1_r0:1;
+ uint64_t early_unload_d0_r1:1;
+ uint64_t early_unload_d0_r0:1;
+ uint64_t init_status:4;
+ uint64_t mirrmask:4;
+ uint64_t rankmask:4;
+ uint64_t rank_ena:1;
+ uint64_t sref_with_dll:1;
+ uint64_t early_dqx:1;
+ uint64_t sequence:3;
+ uint64_t ref_zqcs_int:19;
+ uint64_t reset:1;
+ uint64_t ecc_adr:1;
+ uint64_t forcewrite:4;
+ uint64_t idlepower:3;
+ uint64_t pbank_lsb:4;
+ uint64_t row_lsb:3;
+ uint64_t ecc_ena:1;
+ uint64_t init_start:1;
+ } cn66xx;
+ struct cvmx_lmcx_config_cn63xx cn68xx;
+ struct cvmx_lmcx_config_cn63xx cn68xxp1;
+ struct cvmx_lmcx_config_cn70xx {
+ uint64_t reserved_63_63:1;
+ uint64_t bg2_enable:1;
+ uint64_t mode_x4dev:1;
+ uint64_t mode32b:1;
+ uint64_t scrz:1;
+ uint64_t early_unload_d1_r1:1;
+ uint64_t early_unload_d1_r0:1;
+ uint64_t early_unload_d0_r1:1;
+ uint64_t early_unload_d0_r0:1;
+ uint64_t init_status:4;
+ uint64_t mirrmask:4;
+ uint64_t rankmask:4;
+ uint64_t rank_ena:1;
+ uint64_t sref_with_dll:1;
+ uint64_t early_dqx:1;
+ uint64_t ref_zqcs_int:22;
+ uint64_t reset:1;
+ uint64_t ecc_adr:1;
+ uint64_t forcewrite:4;
+ uint64_t idlepower:3;
+ uint64_t pbank_lsb:4;
+ uint64_t row_lsb:3;
+ uint64_t ecc_ena:1;
+ uint64_t reserved_0_0:1;
+ } cn70xx;
+ struct cvmx_lmcx_config_cn70xx cn70xxp1;
+ struct cvmx_lmcx_config_cn73xx {
+ uint64_t lrdimm_ena:1;
+ uint64_t bg2_enable:1;
+ uint64_t mode_x4dev:1;
+ uint64_t mode32b:1;
+ uint64_t scrz:1;
+ uint64_t early_unload_d1_r1:1;
+ uint64_t early_unload_d1_r0:1;
+ uint64_t early_unload_d0_r1:1;
+ uint64_t early_unload_d0_r0:1;
+ uint64_t init_status:4;
+ uint64_t mirrmask:4;
+ uint64_t rankmask:4;
+ uint64_t rank_ena:1;
+ uint64_t sref_with_dll:1;
+ uint64_t early_dqx:1;
+ uint64_t ref_zqcs_int:22;
+ uint64_t reset:1;
+ uint64_t ecc_adr:1;
+ uint64_t forcewrite:4;
+ uint64_t idlepower:3;
+ uint64_t pbank_lsb:4;
+ uint64_t row_lsb:3;
+ uint64_t ecc_ena:1;
+ uint64_t reserved_0_0:1;
+ } cn73xx;
+ struct cvmx_lmcx_config_cn73xx cn78xx;
+ struct cvmx_lmcx_config_cn73xx cn78xxp1;
+ struct cvmx_lmcx_config_cn61xx cnf71xx;
+ struct cvmx_lmcx_config_cn73xx cnf75xx;
+};
+
+/**
+ * cvmx_lmc#_control
+ *
+ * LMC_CONTROL = LMC Control
+ * This register is an assortment of various control fields needed by the
+ * memory controller
+ */
+union cvmx_lmcx_control {
+ u64 u64;
+ struct cvmx_lmcx_control_s {
+ uint64_t scramble_ena:1;
+ uint64_t thrcnt:12;
+ uint64_t persub:8;
+ uint64_t thrmax:4;
+ uint64_t crm_cnt:5;
+ uint64_t crm_thr:5;
+ uint64_t crm_max:5;
+ uint64_t rodt_bprch:1;
+ uint64_t wodt_bprch:1;
+ uint64_t bprch:2;
+ uint64_t ext_zqcs_dis:1;
+ uint64_t int_zqcs_dis:1;
+ uint64_t auto_dclkdis:1;
+ uint64_t xor_bank:1;
+ uint64_t max_write_batch:4;
+ uint64_t nxm_write_en:1;
+ uint64_t elev_prio_dis:1;
+ uint64_t inorder_wr:1;
+ uint64_t inorder_rd:1;
+ uint64_t throttle_wr:1;
+ uint64_t throttle_rd:1;
+ uint64_t fprch2:2;
+ uint64_t pocas:1;
+ uint64_t ddr2t:1;
+ uint64_t bwcnt:1;
+ uint64_t rdimm_ena:1;
+ } s;
+ struct cvmx_lmcx_control_s cn61xx;
+ struct cvmx_lmcx_control_cn63xx {
+ uint64_t reserved_24_63:40;
+ uint64_t rodt_bprch:1;
+ uint64_t wodt_bprch:1;
+ uint64_t bprch:2;
+ uint64_t ext_zqcs_dis:1;
+ uint64_t int_zqcs_dis:1;
+ uint64_t auto_dclkdis:1;
+ uint64_t xor_bank:1;
+ uint64_t max_write_batch:4;
+ uint64_t nxm_write_en:1;
+ uint64_t elev_prio_dis:1;
+ uint64_t inorder_wr:1;
+ uint64_t inorder_rd:1;
+ uint64_t throttle_wr:1;
+ uint64_t throttle_rd:1;
+ uint64_t fprch2:2;
+ uint64_t pocas:1;
+ uint64_t ddr2t:1;
+ uint64_t bwcnt:1;
+ uint64_t rdimm_ena:1;
+ } cn63xx;
+ struct cvmx_lmcx_control_cn63xx cn63xxp1;
+ struct cvmx_lmcx_control_cn66xx {
+ uint64_t scramble_ena:1;
+ uint64_t reserved_24_62:39;
+ uint64_t rodt_bprch:1;
+ uint64_t wodt_bprch:1;
+ uint64_t bprch:2;
+ uint64_t ext_zqcs_dis:1;
+ uint64_t int_zqcs_dis:1;
+ uint64_t auto_dclkdis:1;
+ uint64_t xor_bank:1;
+ uint64_t max_write_batch:4;
+ uint64_t nxm_write_en:1;
+ uint64_t elev_prio_dis:1;
+ uint64_t inorder_wr:1;
+ uint64_t inorder_rd:1;
+ uint64_t throttle_wr:1;
+ uint64_t throttle_rd:1;
+ uint64_t fprch2:2;
+ uint64_t pocas:1;
+ uint64_t ddr2t:1;
+ uint64_t bwcnt:1;
+ uint64_t rdimm_ena:1;
+ } cn66xx;
+ struct cvmx_lmcx_control_cn68xx {
+ uint64_t reserved_63_63:1;
+ uint64_t thrcnt:12;
+ uint64_t persub:8;
+ uint64_t thrmax:4;
+ uint64_t crm_cnt:5;
+ uint64_t crm_thr:5;
+ uint64_t crm_max:5;
+ uint64_t rodt_bprch:1;
+ uint64_t wodt_bprch:1;
+ uint64_t bprch:2;
+ uint64_t ext_zqcs_dis:1;
+ uint64_t int_zqcs_dis:1;
+ uint64_t auto_dclkdis:1;
+ uint64_t xor_bank:1;
+ uint64_t max_write_batch:4;
+ uint64_t nxm_write_en:1;
+ uint64_t elev_prio_dis:1;
+ uint64_t inorder_wr:1;
+ uint64_t inorder_rd:1;
+ uint64_t throttle_wr:1;
+ uint64_t throttle_rd:1;
+ uint64_t fprch2:2;
+ uint64_t pocas:1;
+ uint64_t ddr2t:1;
+ uint64_t bwcnt:1;
+ uint64_t rdimm_ena:1;
+ } cn68xx;
+ struct cvmx_lmcx_control_cn68xx cn68xxp1;
+ struct cvmx_lmcx_control_s cn70xx;
+ struct cvmx_lmcx_control_s cn70xxp1;
+ struct cvmx_lmcx_control_s cn73xx;
+ struct cvmx_lmcx_control_s cn78xx;
+ struct cvmx_lmcx_control_s cn78xxp1;
+ struct cvmx_lmcx_control_cn66xx cnf71xx;
+ struct cvmx_lmcx_control_s cnf75xx;
+};
+
+/**
+ * cvmx_lmc#_ctl
+ *
+ * LMC_CTL = LMC Control
+ * This register is an assortment of various control fields needed by the
+ * memory controller
+ */
+union cvmx_lmcx_ctl {
+ u64 u64;
+ struct cvmx_lmcx_ctl_s {
+ uint64_t reserved_32_63:32;
+ uint64_t ddr__nctl:4;
+ uint64_t ddr__pctl:4;
+ uint64_t slow_scf:1;
+ uint64_t xor_bank:1;
+ uint64_t max_write_batch:4;
+ uint64_t pll_div2:1;
+ uint64_t pll_bypass:1;
+ uint64_t rdimm_ena:1;
+ uint64_t r2r_slot:1;
+ uint64_t inorder_mwf:1;
+ uint64_t inorder_mrf:1;
+ uint64_t reserved_10_11:2;
+ uint64_t fprch2:1;
+ uint64_t bprch:1;
+ uint64_t sil_lat:2;
+ uint64_t tskw:2;
+ uint64_t qs_dic:2;
+ uint64_t dic:2;
+ } s;
+ struct cvmx_lmcx_ctl_cn30xx {
+ uint64_t reserved_32_63:32;
+ uint64_t ddr__nctl:4;
+ uint64_t ddr__pctl:4;
+ uint64_t slow_scf:1;
+ uint64_t xor_bank:1;
+ uint64_t max_write_batch:4;
+ uint64_t pll_div2:1;
+ uint64_t pll_bypass:1;
+ uint64_t rdimm_ena:1;
+ uint64_t r2r_slot:1;
+ uint64_t inorder_mwf:1;
+ uint64_t inorder_mrf:1;
+ uint64_t dreset:1;
+ uint64_t mode32b:1;
+ uint64_t fprch2:1;
+ uint64_t bprch:1;
+ uint64_t sil_lat:2;
+ uint64_t tskw:2;
+ uint64_t qs_dic:2;
+ uint64_t dic:2;
+ } cn30xx;
+ struct cvmx_lmcx_ctl_cn30xx cn31xx;
+ struct cvmx_lmcx_ctl_cn38xx {
+ uint64_t reserved_32_63:32;
+ uint64_t ddr__nctl:4;
+ uint64_t ddr__pctl:4;
+ uint64_t slow_scf:1;
+ uint64_t xor_bank:1;
+ uint64_t max_write_batch:4;
+ uint64_t reserved_16_17:2;
+ uint64_t rdimm_ena:1;
+ uint64_t r2r_slot:1;
+ uint64_t inorder_mwf:1;
+ uint64_t inorder_mrf:1;
+ uint64_t set_zero:1;
+ uint64_t mode128b:1;
+ uint64_t fprch2:1;
+ uint64_t bprch:1;
+ uint64_t sil_lat:2;
+ uint64_t tskw:2;
+ uint64_t qs_dic:2;
+ uint64_t dic:2;
+ } cn38xx;
+ struct cvmx_lmcx_ctl_cn38xx cn38xxp2;
+ struct cvmx_lmcx_ctl_cn50xx {
+ uint64_t reserved_32_63:32;
+ uint64_t ddr__nctl:4;
+ uint64_t ddr__pctl:4;
+ uint64_t slow_scf:1;
+ uint64_t xor_bank:1;
+ uint64_t max_write_batch:4;
+ uint64_t reserved_17_17:1;
+ uint64_t pll_bypass:1;
+ uint64_t rdimm_ena:1;
+ uint64_t r2r_slot:1;
+ uint64_t inorder_mwf:1;
+ uint64_t inorder_mrf:1;
+ uint64_t dreset:1;
+ uint64_t mode32b:1;
+ uint64_t fprch2:1;
+ uint64_t bprch:1;
+ uint64_t sil_lat:2;
+ uint64_t tskw:2;
+ uint64_t qs_dic:2;
+ uint64_t dic:2;
+ } cn50xx;
+ struct cvmx_lmcx_ctl_cn52xx {
+ uint64_t reserved_32_63:32;
+ uint64_t ddr__nctl:4;
+ uint64_t ddr__pctl:4;
+ uint64_t slow_scf:1;
+ uint64_t xor_bank:1;
+ uint64_t max_write_batch:4;
+ uint64_t reserved_16_17:2;
+ uint64_t rdimm_ena:1;
+ uint64_t r2r_slot:1;
+ uint64_t inorder_mwf:1;
+ uint64_t inorder_mrf:1;
+ uint64_t dreset:1;
+ uint64_t mode32b:1;
+ uint64_t fprch2:1;
+ uint64_t bprch:1;
+ uint64_t sil_lat:2;
+ uint64_t tskw:2;
+ uint64_t qs_dic:2;
+ uint64_t dic:2;
+ } cn52xx;
+ struct cvmx_lmcx_ctl_cn52xx cn52xxp1;
+ struct cvmx_lmcx_ctl_cn52xx cn56xx;
+ struct cvmx_lmcx_ctl_cn52xx cn56xxp1;
+ struct cvmx_lmcx_ctl_cn58xx {
+ uint64_t reserved_32_63:32;
+ uint64_t ddr__nctl:4;
+ uint64_t ddr__pctl:4;
+ uint64_t slow_scf:1;
+ uint64_t xor_bank:1;
+ uint64_t max_write_batch:4;
+ uint64_t reserved_16_17:2;
+ uint64_t rdimm_ena:1;
+ uint64_t r2r_slot:1;
+ uint64_t inorder_mwf:1;
+ uint64_t inorder_mrf:1;
+ uint64_t dreset:1;
+ uint64_t mode128b:1;
+ uint64_t fprch2:1;
+ uint64_t bprch:1;
+ uint64_t sil_lat:2;
+ uint64_t tskw:2;
+ uint64_t qs_dic:2;
+ uint64_t dic:2;
+ } cn58xx;
+ struct cvmx_lmcx_ctl_cn58xx cn58xxp1;
+};
+
+/**
+ * cvmx_lmc#_ctl1
+ *
+ * LMC_CTL1 = LMC Control1
+ * This register is an assortment of various control fields needed by the
+ * memory controller
+ */
+union cvmx_lmcx_ctl1 {
+ u64 u64;
+ struct cvmx_lmcx_ctl1_s {
+ uint64_t reserved_21_63:43;
+ uint64_t ecc_adr:1;
+ uint64_t forcewrite:4;
+ uint64_t idlepower:3;
+ uint64_t sequence:3;
+ uint64_t sil_mode:1;
+ uint64_t dcc_enable:1;
+ uint64_t reserved_2_7:6;
+ uint64_t data_layout:2;
+ } s;
+ struct cvmx_lmcx_ctl1_cn30xx {
+ uint64_t reserved_2_63:62;
+ uint64_t data_layout:2;
+ } cn30xx;
+ struct cvmx_lmcx_ctl1_cn50xx {
+ uint64_t reserved_10_63:54;
+ uint64_t sil_mode:1;
+ uint64_t dcc_enable:1;
+ uint64_t reserved_2_7:6;
+ uint64_t data_layout:2;
+ } cn50xx;
+ struct cvmx_lmcx_ctl1_cn52xx {
+ uint64_t reserved_21_63:43;
+ uint64_t ecc_adr:1;
+ uint64_t forcewrite:4;
+ uint64_t idlepower:3;
+ uint64_t sequence:3;
+ uint64_t sil_mode:1;
+ uint64_t dcc_enable:1;
+ uint64_t reserved_0_7:8;
+ } cn52xx;
+ struct cvmx_lmcx_ctl1_cn52xx cn52xxp1;
+ struct cvmx_lmcx_ctl1_cn52xx cn56xx;
+ struct cvmx_lmcx_ctl1_cn52xx cn56xxp1;
+ struct cvmx_lmcx_ctl1_cn58xx {
+ uint64_t reserved_10_63:54;
+ uint64_t sil_mode:1;
+ uint64_t dcc_enable:1;
+ uint64_t reserved_0_7:8;
+ } cn58xx;
+ struct cvmx_lmcx_ctl1_cn58xx cn58xxp1;
+};
+
+/**
+ * cvmx_lmc#_dbtrain_ctl
+ *
+ * Reserved.
+ *
+ */
+union cvmx_lmcx_dbtrain_ctl {
+ u64 u64;
+ struct cvmx_lmcx_dbtrain_ctl_s {
+ uint64_t reserved_63_63:1;
+ uint64_t lfsr_pattern_sel:1;
+ uint64_t cmd_count_ext:2;
+ uint64_t db_output_impedance:3;
+ uint64_t db_sel:1;
+ uint64_t tccd_sel:1;
+ uint64_t rw_train:1;
+ uint64_t read_dq_count:7;
+ uint64_t read_cmd_count:5;
+ uint64_t write_ena:1;
+ uint64_t activate:1;
+ uint64_t prank:2;
+ uint64_t lrank:3;
+ uint64_t row_a:18;
+ uint64_t bg:2;
+ uint64_t ba:2;
+ uint64_t column_a:13;
+ } s;
+ struct cvmx_lmcx_dbtrain_ctl_cn73xx {
+ uint64_t reserved_60_63:4;
+ uint64_t db_output_impedance:3;
+ uint64_t db_sel:1;
+ uint64_t tccd_sel:1;
+ uint64_t rw_train:1;
+ uint64_t read_dq_count:7;
+ uint64_t read_cmd_count:5;
+ uint64_t write_ena:1;
+ uint64_t activate:1;
+ uint64_t prank:2;
+ uint64_t lrank:3;
+ uint64_t row_a:18;
+ uint64_t bg:2;
+ uint64_t ba:2;
+ uint64_t column_a:13;
+ } cn73xx;
+ struct cvmx_lmcx_dbtrain_ctl_s cn78xx;
+ struct cvmx_lmcx_dbtrain_ctl_cnf75xx {
+ uint64_t reserved_62_63:2;
+ uint64_t cmd_count_ext:2;
+ uint64_t db_output_impedance:3;
+ uint64_t db_sel:1;
+ uint64_t tccd_sel:1;
+ uint64_t rw_train:1;
+ uint64_t read_dq_count:7;
+ uint64_t read_cmd_count:5;
+ uint64_t write_ena:1;
+ uint64_t activate:1;
+ uint64_t prank:2;
+ uint64_t lrank:3;
+ uint64_t row_a:18;
+ uint64_t bg:2;
+ uint64_t ba:2;
+ uint64_t column_a:13;
+ } cnf75xx;
+};
+
+/**
+ * cvmx_lmc#_dclk_cnt
+ *
+ * LMC_DCLK_CNT = Performance Counters
+ *
+ */
+union cvmx_lmcx_dclk_cnt {
+ u64 u64;
+ struct cvmx_lmcx_dclk_cnt_s {
+ uint64_t dclkcnt:64;
+ } s;
+ struct cvmx_lmcx_dclk_cnt_s cn61xx;
+ struct cvmx_lmcx_dclk_cnt_s cn63xx;
+ struct cvmx_lmcx_dclk_cnt_s cn63xxp1;
+ struct cvmx_lmcx_dclk_cnt_s cn66xx;
+ struct cvmx_lmcx_dclk_cnt_s cn68xx;
+ struct cvmx_lmcx_dclk_cnt_s cn68xxp1;
+ struct cvmx_lmcx_dclk_cnt_s cn70xx;
+ struct cvmx_lmcx_dclk_cnt_s cn70xxp1;
+ struct cvmx_lmcx_dclk_cnt_s cn73xx;
+ struct cvmx_lmcx_dclk_cnt_s cn78xx;
+ struct cvmx_lmcx_dclk_cnt_s cn78xxp1;
+ struct cvmx_lmcx_dclk_cnt_s cnf71xx;
+ struct cvmx_lmcx_dclk_cnt_s cnf75xx;
+};
+
+/**
+ * cvmx_lmc#_dclk_cnt_hi
+ *
+ * LMC_DCLK_CNT_HI = Performance Counters
+ *
+ */
+union cvmx_lmcx_dclk_cnt_hi {
+ u64 u64;
+ struct cvmx_lmcx_dclk_cnt_hi_s {
+ uint64_t reserved_32_63:32;
+ uint64_t dclkcnt_hi:32;
+ } s;
+ struct cvmx_lmcx_dclk_cnt_hi_s cn30xx;
+ struct cvmx_lmcx_dclk_cnt_hi_s cn31xx;
+ struct cvmx_lmcx_dclk_cnt_hi_s cn38xx;
+ struct cvmx_lmcx_dclk_cnt_hi_s cn38xxp2;
+ struct cvmx_lmcx_dclk_cnt_hi_s cn50xx;
+ struct cvmx_lmcx_dclk_cnt_hi_s cn52xx;
+ struct cvmx_lmcx_dclk_cnt_hi_s cn52xxp1;
+ struct cvmx_lmcx_dclk_cnt_hi_s cn56xx;
+ struct cvmx_lmcx_dclk_cnt_hi_s cn56xxp1;
+ struct cvmx_lmcx_dclk_cnt_hi_s cn58xx;
+ struct cvmx_lmcx_dclk_cnt_hi_s cn58xxp1;
+};
+
+/**
+ * cvmx_lmc#_dclk_cnt_lo
+ *
+ * LMC_DCLK_CNT_LO = Performance Counters
+ *
+ */
+union cvmx_lmcx_dclk_cnt_lo {
+ u64 u64;
+ struct cvmx_lmcx_dclk_cnt_lo_s {
+ uint64_t reserved_32_63:32;
+ uint64_t dclkcnt_lo:32;
+ } s;
+ struct cvmx_lmcx_dclk_cnt_lo_s cn30xx;
+ struct cvmx_lmcx_dclk_cnt_lo_s cn31xx;
+ struct cvmx_lmcx_dclk_cnt_lo_s cn38xx;
+ struct cvmx_lmcx_dclk_cnt_lo_s cn38xxp2;
+ struct cvmx_lmcx_dclk_cnt_lo_s cn50xx;
+ struct cvmx_lmcx_dclk_cnt_lo_s cn52xx;
+ struct cvmx_lmcx_dclk_cnt_lo_s cn52xxp1;
+ struct cvmx_lmcx_dclk_cnt_lo_s cn56xx;
+ struct cvmx_lmcx_dclk_cnt_lo_s cn56xxp1;
+ struct cvmx_lmcx_dclk_cnt_lo_s cn58xx;
+ struct cvmx_lmcx_dclk_cnt_lo_s cn58xxp1;
+};
+
+/**
+ * cvmx_lmc#_dclk_ctl
+ *
+ * LMC_DCLK_CTL = LMC DCLK generation control
+ *
+ *
+ * Notes:
+ * This CSR is only relevant for LMC1. LMC0_DCLK_CTL is not used.
+ *
+ */
+union cvmx_lmcx_dclk_ctl {
+ u64 u64;
+ struct cvmx_lmcx_dclk_ctl_s {
+ uint64_t reserved_8_63:56;
+ uint64_t off90_ena:1;
+ uint64_t dclk90_byp:1;
+ uint64_t dclk90_ld:1;
+ uint64_t dclk90_vlu:5;
+ } s;
+ struct cvmx_lmcx_dclk_ctl_s cn56xx;
+ struct cvmx_lmcx_dclk_ctl_s cn56xxp1;
+};
+
+/**
+ * cvmx_lmc#_ddr2_ctl
+ *
+ * LMC_DDR2_CTL = LMC DDR2 & DLL Control Register
+ *
+ */
+union cvmx_lmcx_ddr2_ctl {
+ u64 u64;
+ struct cvmx_lmcx_ddr2_ctl_s {
+ uint64_t reserved_32_63:32;
+ uint64_t bank8:1;
+ uint64_t burst8:1;
+ uint64_t addlat:3;
+ uint64_t pocas:1;
+ uint64_t bwcnt:1;
+ uint64_t twr:3;
+ uint64_t silo_hc:1;
+ uint64_t ddr_eof:4;
+ uint64_t tfaw:5;
+ uint64_t crip_mode:1;
+ uint64_t ddr2t:1;
+ uint64_t odt_ena:1;
+ uint64_t qdll_ena:1;
+ uint64_t dll90_vlu:5;
+ uint64_t dll90_byp:1;
+ uint64_t rdqs:1;
+ uint64_t ddr2:1;
+ } s;
+ struct cvmx_lmcx_ddr2_ctl_cn30xx {
+ uint64_t reserved_32_63:32;
+ uint64_t bank8:1;
+ uint64_t burst8:1;
+ uint64_t addlat:3;
+ uint64_t pocas:1;
+ uint64_t bwcnt:1;
+ uint64_t twr:3;
+ uint64_t silo_hc:1;
+ uint64_t ddr_eof:4;
+ uint64_t tfaw:5;
+ uint64_t crip_mode:1;
+ uint64_t ddr2t:1;
+ uint64_t odt_ena:1;
+ uint64_t qdll_ena:1;
+ uint64_t dll90_vlu:5;
+ uint64_t dll90_byp:1;
+ uint64_t reserved_1_1:1;
+ uint64_t ddr2:1;
+ } cn30xx;
+ struct cvmx_lmcx_ddr2_ctl_cn30xx cn31xx;
+ struct cvmx_lmcx_ddr2_ctl_s cn38xx;
+ struct cvmx_lmcx_ddr2_ctl_s cn38xxp2;
+ struct cvmx_lmcx_ddr2_ctl_s cn50xx;
+ struct cvmx_lmcx_ddr2_ctl_s cn52xx;
+ struct cvmx_lmcx_ddr2_ctl_s cn52xxp1;
+ struct cvmx_lmcx_ddr2_ctl_s cn56xx;
+ struct cvmx_lmcx_ddr2_ctl_s cn56xxp1;
+ struct cvmx_lmcx_ddr2_ctl_s cn58xx;
+ struct cvmx_lmcx_ddr2_ctl_s cn58xxp1;
+};
+
+/**
+ * cvmx_lmc#_ddr4_dimm_ctl
+ *
+ * Bits 0-21 of this register are used only when LMC()_CONTROL[RDIMM_ENA] = 1.
+ *
+ * During an RCW initialization sequence, bits 0-21 control LMC's write
+ * operations to the extended DDR4 control words in the JEDEC standard
+ * registering clock driver on an RDIMM.
+ */
+union cvmx_lmcx_ddr4_dimm_ctl {
+ u64 u64;
+ struct cvmx_lmcx_ddr4_dimm_ctl_s {
+ uint64_t reserved_28_63:36;
+ uint64_t rank_timing_enable:1;
+ uint64_t bodt_trans_mode:1;
+ uint64_t trans_mode_ena:1;
+ uint64_t read_preamble_mode:1;
+ uint64_t buff_config_da3:1;
+ uint64_t mpr_over_ena:1;
+ uint64_t ddr4_dimm1_wmask:11;
+ uint64_t ddr4_dimm0_wmask:11;
+ } s;
+ struct cvmx_lmcx_ddr4_dimm_ctl_cn70xx {
+ uint64_t reserved_22_63:42;
+ uint64_t ddr4_dimm1_wmask:11;
+ uint64_t ddr4_dimm0_wmask:11;
+ } cn70xx;
+ struct cvmx_lmcx_ddr4_dimm_ctl_cn70xx cn70xxp1;
+ struct cvmx_lmcx_ddr4_dimm_ctl_s cn73xx;
+ struct cvmx_lmcx_ddr4_dimm_ctl_s cn78xx;
+ struct cvmx_lmcx_ddr4_dimm_ctl_s cn78xxp1;
+ struct cvmx_lmcx_ddr4_dimm_ctl_s cnf75xx;
+};
+
+/**
+ * cvmx_lmc#_ddr_pll_ctl
+ *
+ * This register controls the DDR_CK frequency. For details, refer to CK
+ * speed programming. See LMC initialization sequence for the initialization
+ * sequence.
+ * DDR PLL bringup sequence:
+ *
+ * 1. Write [CLKF], [CLKR], [DDR_PS_EN].
+ *
+ * 2. Wait 128 ref clock cycles (7680 core-clock cycles).
+ *
+ * 3. Write 1 to [RESET_N].
+ *
+ * 4. Wait 1152 ref clocks (1152*16 core-clock cycles).
+ *
+ * 5. Write 0 to [DDR_DIV_RESET].
+ *
+ * 6. Wait 10 ref clock cycles (160 core-clock cycles) before bringing up
+ * the DDR interface.
+ */
+union cvmx_lmcx_ddr_pll_ctl {
+ u64 u64;
+ struct cvmx_lmcx_ddr_pll_ctl_s {
+ uint64_t reserved_45_63:19;
+ uint64_t dclk_alt_refclk_sel:1;
+ uint64_t bwadj:12;
+ uint64_t dclk_invert:1;
+ uint64_t phy_dcok:1;
+ uint64_t ddr4_mode:1;
+ uint64_t pll_fbslip:1;
+ uint64_t pll_lock:1;
+ uint64_t reserved_18_26:9;
+ uint64_t diffamp:4;
+ uint64_t cps:3;
+ uint64_t reserved_8_10:3;
+ uint64_t reset_n:1;
+ uint64_t clkf:7;
+ } s;
+ struct cvmx_lmcx_ddr_pll_ctl_cn61xx {
+ uint64_t reserved_27_63:37;
+ uint64_t jtg_test_mode:1;
+ uint64_t dfm_div_reset:1;
+ uint64_t dfm_ps_en:3;
+ uint64_t ddr_div_reset:1;
+ uint64_t ddr_ps_en:3;
+ uint64_t diffamp:4;
+ uint64_t cps:3;
+ uint64_t cpb:3;
+ uint64_t reset_n:1;
+ uint64_t clkf:7;
+ } cn61xx;
+ struct cvmx_lmcx_ddr_pll_ctl_cn61xx cn63xx;
+ struct cvmx_lmcx_ddr_pll_ctl_cn61xx cn63xxp1;
+ struct cvmx_lmcx_ddr_pll_ctl_cn61xx cn66xx;
+ struct cvmx_lmcx_ddr_pll_ctl_cn61xx cn68xx;
+ struct cvmx_lmcx_ddr_pll_ctl_cn61xx cn68xxp1;
+ struct cvmx_lmcx_ddr_pll_ctl_cn70xx {
+ uint64_t reserved_31_63:33;
+ uint64_t phy_dcok:1;
+ uint64_t ddr4_mode:1;
+ uint64_t pll_fbslip:1;
+ uint64_t pll_lock:1;
+ uint64_t pll_rfslip:1;
+ uint64_t clkr:2;
+ uint64_t jtg_test_mode:1;
+ uint64_t ddr_div_reset:1;
+ uint64_t ddr_ps_en:4;
+ uint64_t reserved_8_17:10;
+ uint64_t reset_n:1;
+ uint64_t clkf:7;
+ } cn70xx;
+ struct cvmx_lmcx_ddr_pll_ctl_cn70xx cn70xxp1;
+ struct cvmx_lmcx_ddr_pll_ctl_cn73xx {
+ uint64_t reserved_45_63:19;
+ uint64_t dclk_alt_refclk_sel:1;
+ uint64_t bwadj:12;
+ uint64_t dclk_invert:1;
+ uint64_t phy_dcok:1;
+ uint64_t ddr4_mode:1;
+ uint64_t pll_fbslip:1;
+ uint64_t pll_lock:1;
+ uint64_t pll_rfslip:1;
+ uint64_t clkr:2;
+ uint64_t jtg_test_mode:1;
+ uint64_t ddr_div_reset:1;
+ uint64_t ddr_ps_en:4;
+ uint64_t reserved_9_17:9;
+ uint64_t clkf_ext:1;
+ uint64_t reset_n:1;
+ uint64_t clkf:7;
+ } cn73xx;
+ struct cvmx_lmcx_ddr_pll_ctl_cn73xx cn78xx;
+ struct cvmx_lmcx_ddr_pll_ctl_cn73xx cn78xxp1;
+ struct cvmx_lmcx_ddr_pll_ctl_cn61xx cnf71xx;
+ struct cvmx_lmcx_ddr_pll_ctl_cn73xx cnf75xx;
+};
+
+/**
+ * cvmx_lmc#_delay_cfg
+ *
+ * LMC_DELAY_CFG = Open-loop delay line settings
+ *
+ *
+ * Notes:
+ * The DQ bits add OUTGOING delay only to dq, dqs_[p,n], cb, cbs_[p,n], dqm.
+ * Delay is approximately 50-80ps per setting depending on process/voltage.
+ * There is no need to add incoming delay since by default all strobe bits
+ * are delayed internally by 90 degrees (as was always the case in previous
+ * passes and past chips.
+ *
+ * The CMD add delay to all command bits DDR_RAS, DDR_CAS, DDR_A<15:0>,
+ * DDR_BA<2:0>, DDR_n_CS<1:0>_L, DDR_WE, DDR_CKE and DDR_ODT_<7:0>.
+ * Again, delay is 50-80ps per tap.
+ *
+ * The CLK bits add delay to all clock signals DDR_CK_<5:0>_P and
+ * DDR_CK_<5:0>_N. Again, delay is 50-80ps per tap.
+ *
+ * The usage scenario is the following: There is too much delay on command
+ * signals and setup on command is not met. The user can then delay the
+ * clock until setup is met.
+ *
+ * At the same time though, dq/dqs should be delayed because there is also
+ * a DDR spec tying dqs with clock. If clock is too much delayed with
+ * respect to dqs, writes will start to fail.
+ *
+ * This scheme should eliminate the board need of adding routing delay to
+ * clock signals to make high frequencies work.
+ */
+union cvmx_lmcx_delay_cfg {
+ u64 u64;
+ struct cvmx_lmcx_delay_cfg_s {
+ uint64_t reserved_15_63:49;
+ uint64_t dq:5;
+ uint64_t cmd:5;
+ uint64_t clk:5;
+ } s;
+ struct cvmx_lmcx_delay_cfg_s cn30xx;
+ struct cvmx_lmcx_delay_cfg_cn38xx {
+ uint64_t reserved_14_63:50;
+ uint64_t dq:4;
+ uint64_t reserved_9_9:1;
+ uint64_t cmd:4;
+ uint64_t reserved_4_4:1;
+ uint64_t clk:4;
+ } cn38xx;
+ struct cvmx_lmcx_delay_cfg_cn38xx cn50xx;
+ struct cvmx_lmcx_delay_cfg_cn38xx cn52xx;
+ struct cvmx_lmcx_delay_cfg_cn38xx cn52xxp1;
+ struct cvmx_lmcx_delay_cfg_cn38xx cn56xx;
+ struct cvmx_lmcx_delay_cfg_cn38xx cn56xxp1;
+ struct cvmx_lmcx_delay_cfg_cn38xx cn58xx;
+ struct cvmx_lmcx_delay_cfg_cn38xx cn58xxp1;
+};
+
+/**
+ * cvmx_lmc#_dimm#_ddr4_params0
+ *
+ * This register contains values to be programmed into the extra DDR4 control
+ * words in the corresponding (registered) DIMM. These are control words
+ * RC1x through RC8x.
+ */
+union cvmx_lmcx_dimmx_ddr4_params0 {
+ u64 u64;
+ struct cvmx_lmcx_dimmx_ddr4_params0_s {
+ uint64_t rc8x:8;
+ uint64_t rc7x:8;
+ uint64_t rc6x:8;
+ uint64_t rc5x:8;
+ uint64_t rc4x:8;
+ uint64_t rc3x:8;
+ uint64_t rc2x:8;
+ uint64_t rc1x:8;
+ } s;
+ struct cvmx_lmcx_dimmx_ddr4_params0_s cn70xx;
+ struct cvmx_lmcx_dimmx_ddr4_params0_s cn70xxp1;
+ struct cvmx_lmcx_dimmx_ddr4_params0_s cn73xx;
+ struct cvmx_lmcx_dimmx_ddr4_params0_s cn78xx;
+ struct cvmx_lmcx_dimmx_ddr4_params0_s cn78xxp1;
+ struct cvmx_lmcx_dimmx_ddr4_params0_s cnf75xx;
+};
+
+/**
+ * cvmx_lmc#_dimm#_ddr4_params1
+ *
+ * This register contains values to be programmed into the extra DDR4 control
+ * words in the corresponding (registered) DIMM. These are control words
+ * RC9x through RCBx.
+ */
+union cvmx_lmcx_dimmx_ddr4_params1 {
+ u64 u64;
+ struct cvmx_lmcx_dimmx_ddr4_params1_s {
+ uint64_t reserved_24_63:40;
+ uint64_t rcbx:8;
+ uint64_t rcax:8;
+ uint64_t rc9x:8;
+ } s;
+ struct cvmx_lmcx_dimmx_ddr4_params1_s cn70xx;
+ struct cvmx_lmcx_dimmx_ddr4_params1_s cn70xxp1;
+ struct cvmx_lmcx_dimmx_ddr4_params1_s cn73xx;
+ struct cvmx_lmcx_dimmx_ddr4_params1_s cn78xx;
+ struct cvmx_lmcx_dimmx_ddr4_params1_s cn78xxp1;
+ struct cvmx_lmcx_dimmx_ddr4_params1_s cnf75xx;
+};
+
+/**
+ * cvmx_lmc#_dimm#_params
+ *
+ * This register contains values to be programmed into each control word in
+ * the corresponding (registered) DIMM. The control words allow optimization
+ * of the device properties for different raw card designs. Note that LMC
+ * only uses this CSR when LMC()_CONTROL[RDIMM_ENA]=1. During a power-up/init
+ * sequence, LMC writes these fields into the control words in the JEDEC
+ * standard DDR3 SSTE32882 registering clock driver or DDR4 Register
+ * DDR4RCD01 on an RDIMM when corresponding LMC()_DIMM_CTL[DIMM*_WMASK]
+ * bits are set.
+ */
+union cvmx_lmcx_dimmx_params {
+ u64 u64;
+ struct cvmx_lmcx_dimmx_params_s {
+ uint64_t rc15:4;
+ uint64_t rc14:4;
+ uint64_t rc13:4;
+ uint64_t rc12:4;
+ uint64_t rc11:4;
+ uint64_t rc10:4;
+ uint64_t rc9:4;
+ uint64_t rc8:4;
+ uint64_t rc7:4;
+ uint64_t rc6:4;
+ uint64_t rc5:4;
+ uint64_t rc4:4;
+ uint64_t rc3:4;
+ uint64_t rc2:4;
+ uint64_t rc1:4;
+ uint64_t rc0:4;
+ } s;
+ struct cvmx_lmcx_dimmx_params_s cn61xx;
+ struct cvmx_lmcx_dimmx_params_s cn63xx;
+ struct cvmx_lmcx_dimmx_params_s cn63xxp1;
+ struct cvmx_lmcx_dimmx_params_s cn66xx;
+ struct cvmx_lmcx_dimmx_params_s cn68xx;
+ struct cvmx_lmcx_dimmx_params_s cn68xxp1;
+ struct cvmx_lmcx_dimmx_params_s cn70xx;
+ struct cvmx_lmcx_dimmx_params_s cn70xxp1;
+ struct cvmx_lmcx_dimmx_params_s cn73xx;
+ struct cvmx_lmcx_dimmx_params_s cn78xx;
+ struct cvmx_lmcx_dimmx_params_s cn78xxp1;
+ struct cvmx_lmcx_dimmx_params_s cnf71xx;
+ struct cvmx_lmcx_dimmx_params_s cnf75xx;
+};
+
+/**
+ * cvmx_lmc#_dimm_ctl
+ *
+ * Note that this CSR is only used when LMC()_CONTROL[RDIMM_ENA] = 1. During
+ * a power-up/init sequence, this CSR controls LMC's write operations to the
+ * control words in the JEDEC standard DDR3 SSTE32882 registering clock
+ * driver or DDR4 Register DDR4RCD01 on an RDIMM.
+ */
+union cvmx_lmcx_dimm_ctl {
+ u64 u64;
+ struct cvmx_lmcx_dimm_ctl_s {
+ uint64_t reserved_46_63:18;
+ uint64_t parity:1;
+ uint64_t tcws:13;
+ uint64_t dimm1_wmask:16;
+ uint64_t dimm0_wmask:16;
+ } s;
+ struct cvmx_lmcx_dimm_ctl_s cn61xx;
+ struct cvmx_lmcx_dimm_ctl_s cn63xx;
+ struct cvmx_lmcx_dimm_ctl_s cn63xxp1;
+ struct cvmx_lmcx_dimm_ctl_s cn66xx;
+ struct cvmx_lmcx_dimm_ctl_s cn68xx;
+ struct cvmx_lmcx_dimm_ctl_s cn68xxp1;
+ struct cvmx_lmcx_dimm_ctl_s cn70xx;
+ struct cvmx_lmcx_dimm_ctl_s cn70xxp1;
+ struct cvmx_lmcx_dimm_ctl_s cn73xx;
+ struct cvmx_lmcx_dimm_ctl_s cn78xx;
+ struct cvmx_lmcx_dimm_ctl_s cn78xxp1;
+ struct cvmx_lmcx_dimm_ctl_s cnf71xx;
+ struct cvmx_lmcx_dimm_ctl_s cnf75xx;
+};
+
+/**
+ * cvmx_lmc#_dll_ctl
+ *
+ * LMC_DLL_CTL = LMC DLL control and DCLK reset
+ *
+ */
+union cvmx_lmcx_dll_ctl {
+ u64 u64;
+ struct cvmx_lmcx_dll_ctl_s {
+ uint64_t reserved_8_63:56;
+ uint64_t dreset:1;
+ uint64_t dll90_byp:1;
+ uint64_t dll90_ena:1;
+ uint64_t dll90_vlu:5;
+ } s;
+ struct cvmx_lmcx_dll_ctl_s cn52xx;
+ struct cvmx_lmcx_dll_ctl_s cn52xxp1;
+ struct cvmx_lmcx_dll_ctl_s cn56xx;
+ struct cvmx_lmcx_dll_ctl_s cn56xxp1;
+};
+
+/**
+ * cvmx_lmc#_dll_ctl2
+ *
+ * See LMC initialization sequence for the initialization sequence.
+ *
+ */
+union cvmx_lmcx_dll_ctl2 {
+ u64 u64;
+ struct cvmx_lmcx_dll_ctl2_s {
+ uint64_t reserved_0_63:64;
+ } s;
+ struct cvmx_lmcx_dll_ctl2_cn61xx {
+ uint64_t reserved_16_63:48;
+ uint64_t intf_en:1;
+ uint64_t dll_bringup:1;
+ uint64_t dreset:1;
+ uint64_t quad_dll_ena:1;
+ uint64_t byp_sel:4;
+ uint64_t byp_setting:8;
+ } cn61xx;
+ struct cvmx_lmcx_dll_ctl2_cn63xx {
+ uint64_t reserved_15_63:49;
+ uint64_t dll_bringup:1;
+ uint64_t dreset:1;
+ uint64_t quad_dll_ena:1;
+ uint64_t byp_sel:4;
+ uint64_t byp_setting:8;
+ } cn63xx;
+ struct cvmx_lmcx_dll_ctl2_cn63xx cn63xxp1;
+ struct cvmx_lmcx_dll_ctl2_cn63xx cn66xx;
+ struct cvmx_lmcx_dll_ctl2_cn61xx cn68xx;
+ struct cvmx_lmcx_dll_ctl2_cn61xx cn68xxp1;
+ struct cvmx_lmcx_dll_ctl2_cn70xx {
+ uint64_t reserved_17_63:47;
+ uint64_t intf_en:1;
+ uint64_t dll_bringup:1;
+ uint64_t dreset:1;
+ uint64_t quad_dll_ena:1;
+ uint64_t byp_sel:4;
+ uint64_t byp_setting:9;
+ } cn70xx;
+ struct cvmx_lmcx_dll_ctl2_cn70xx cn70xxp1;
+ struct cvmx_lmcx_dll_ctl2_cn70xx cn73xx;
+ struct cvmx_lmcx_dll_ctl2_cn70xx cn78xx;
+ struct cvmx_lmcx_dll_ctl2_cn70xx cn78xxp1;
+ struct cvmx_lmcx_dll_ctl2_cn61xx cnf71xx;
+ struct cvmx_lmcx_dll_ctl2_cn70xx cnf75xx;
+};
+
+/**
+ * cvmx_lmc#_dll_ctl3
+ *
+ * LMC_DLL_CTL3 = LMC DLL control and DCLK reset
+ *
+ */
+union cvmx_lmcx_dll_ctl3 {
+ u64 u64;
+ struct cvmx_lmcx_dll_ctl3_s {
+ uint64_t reserved_50_63:14;
+ uint64_t wr_deskew_ena:1;
+ uint64_t wr_deskew_ld:1;
+ uint64_t bit_select:4;
+ uint64_t reserved_0_43:44;
+ } s;
+ struct cvmx_lmcx_dll_ctl3_cn61xx {
+ uint64_t reserved_41_63:23;
+ uint64_t dclk90_fwd:1;
+ uint64_t ddr_90_dly_byp:1;
+ uint64_t dclk90_recal_dis:1;
+ uint64_t dclk90_byp_sel:1;
+ uint64_t dclk90_byp_setting:8;
+ uint64_t dll_fast:1;
+ uint64_t dll90_setting:8;
+ uint64_t fine_tune_mode:1;
+ uint64_t dll_mode:1;
+ uint64_t dll90_byte_sel:4;
+ uint64_t offset_ena:1;
+ uint64_t load_offset:1;
+ uint64_t mode_sel:2;
+ uint64_t byte_sel:4;
+ uint64_t offset:6;
+ } cn61xx;
+ struct cvmx_lmcx_dll_ctl3_cn63xx {
+ uint64_t reserved_29_63:35;
+ uint64_t dll_fast:1;
+ uint64_t dll90_setting:8;
+ uint64_t fine_tune_mode:1;
+ uint64_t dll_mode:1;
+ uint64_t dll90_byte_sel:4;
+ uint64_t offset_ena:1;
+ uint64_t load_offset:1;
+ uint64_t mode_sel:2;
+ uint64_t byte_sel:4;
+ uint64_t offset:6;
+ } cn63xx;
+ struct cvmx_lmcx_dll_ctl3_cn63xx cn63xxp1;
+ struct cvmx_lmcx_dll_ctl3_cn63xx cn66xx;
+ struct cvmx_lmcx_dll_ctl3_cn61xx cn68xx;
+ struct cvmx_lmcx_dll_ctl3_cn61xx cn68xxp1;
+ struct cvmx_lmcx_dll_ctl3_cn70xx {
+ uint64_t reserved_44_63:20;
+ uint64_t dclk90_fwd:1;
+ uint64_t ddr_90_dly_byp:1;
+ uint64_t dclk90_recal_dis:1;
+ uint64_t dclk90_byp_sel:1;
+ uint64_t dclk90_byp_setting:9;
+ uint64_t dll_fast:1;
+ uint64_t dll90_setting:9;
+ uint64_t fine_tune_mode:1;
+ uint64_t dll_mode:1;
+ uint64_t dll90_byte_sel:4;
+ uint64_t offset_ena:1;
+ uint64_t load_offset:1;
+ uint64_t mode_sel:2;
+ uint64_t byte_sel:4;
+ uint64_t offset:7;
+ } cn70xx;
+ struct cvmx_lmcx_dll_ctl3_cn70xx cn70xxp1;
+ struct cvmx_lmcx_dll_ctl3_cn73xx {
+ uint64_t reserved_50_63:14;
+ uint64_t wr_deskew_ena:1;
+ uint64_t wr_deskew_ld:1;
+ uint64_t bit_select:4;
+ uint64_t dclk90_fwd:1;
+ uint64_t ddr_90_dly_byp:1;
+ uint64_t dclk90_recal_dis:1;
+ uint64_t dclk90_byp_sel:1;
+ uint64_t dclk90_byp_setting:9;
+ uint64_t dll_fast:1;
+ uint64_t dll90_setting:9;
+ uint64_t fine_tune_mode:1;
+ uint64_t dll_mode:1;
+ uint64_t dll90_byte_sel:4;
+ uint64_t offset_ena:1;
+ uint64_t load_offset:1;
+ uint64_t mode_sel:2;
+ uint64_t byte_sel:4;
+ uint64_t offset:7;
+ } cn73xx;
+ struct cvmx_lmcx_dll_ctl3_cn73xx cn78xx;
+ struct cvmx_lmcx_dll_ctl3_cn73xx cn78xxp1;
+ struct cvmx_lmcx_dll_ctl3_cn61xx cnf71xx;
+ struct cvmx_lmcx_dll_ctl3_cn73xx cnf75xx;
+};
+
+/**
+ * cvmx_lmc#_dual_memcfg
+ *
+ * This register controls certain parameters of dual-memory configuration.
+ *
+ * This register enables the design to have two separate memory
+ * configurations, selected dynamically by the reference address. Note
+ * however, that both configurations share LMC()_CONTROL[XOR_BANK],
+ * LMC()_CONFIG [PBANK_LSB], LMC()_CONFIG[RANK_ENA], and all timing parameters.
+ *
+ * In this description:
+ * * config0 refers to the normal memory configuration that is defined by the
+ * LMC()_CONFIG[ROW_LSB] parameter
+ * * config1 refers to the dual (or second) memory configuration that is
+ * defined by this register.
+ */
+union cvmx_lmcx_dual_memcfg {
+ u64 u64;
+ struct cvmx_lmcx_dual_memcfg_s {
+ uint64_t reserved_20_63:44;
+ uint64_t bank8:1;
+ uint64_t row_lsb:3;
+ uint64_t reserved_8_15:8;
+ uint64_t cs_mask:8;
+ } s;
+ struct cvmx_lmcx_dual_memcfg_s cn50xx;
+ struct cvmx_lmcx_dual_memcfg_s cn52xx;
+ struct cvmx_lmcx_dual_memcfg_s cn52xxp1;
+ struct cvmx_lmcx_dual_memcfg_s cn56xx;
+ struct cvmx_lmcx_dual_memcfg_s cn56xxp1;
+ struct cvmx_lmcx_dual_memcfg_s cn58xx;
+ struct cvmx_lmcx_dual_memcfg_s cn58xxp1;
+ struct cvmx_lmcx_dual_memcfg_cn61xx {
+ uint64_t reserved_19_63:45;
+ uint64_t row_lsb:3;
+ uint64_t reserved_8_15:8;
+ uint64_t cs_mask:8;
+ } cn61xx;
+ struct cvmx_lmcx_dual_memcfg_cn61xx cn63xx;
+ struct cvmx_lmcx_dual_memcfg_cn61xx cn63xxp1;
+ struct cvmx_lmcx_dual_memcfg_cn61xx cn66xx;
+ struct cvmx_lmcx_dual_memcfg_cn61xx cn68xx;
+ struct cvmx_lmcx_dual_memcfg_cn61xx cn68xxp1;
+ struct cvmx_lmcx_dual_memcfg_cn70xx {
+ uint64_t reserved_19_63:45;
+ uint64_t row_lsb:3;
+ uint64_t reserved_4_15:12;
+ uint64_t cs_mask:4;
+ } cn70xx;
+ struct cvmx_lmcx_dual_memcfg_cn70xx cn70xxp1;
+ struct cvmx_lmcx_dual_memcfg_cn70xx cn73xx;
+ struct cvmx_lmcx_dual_memcfg_cn70xx cn78xx;
+ struct cvmx_lmcx_dual_memcfg_cn70xx cn78xxp1;
+ struct cvmx_lmcx_dual_memcfg_cn61xx cnf71xx;
+ struct cvmx_lmcx_dual_memcfg_cn70xx cnf75xx;
+};
+
+/**
+ * cvmx_lmc#_ecc_parity_test
+ *
+ * This register has bits to control the generation of ECC and command
+ * address parity errors. ECC error is generated by enabling
+ * [CA_PARITY_CORRUPT_ENA] and selecting any of the [ECC_CORRUPT_IDX]
+ * index of the dataword from the cacheline to be corrupted.
+ * User needs to select which bit of the 128-bit dataword to corrupt by
+ * asserting any of the CHAR_MASK0 and CHAR_MASK2 bits. (CHAR_MASK0 and
+ * CHAR_MASK2 corresponds to the lower and upper 64-bit signal that can
+ * corrupt any individual bit of the data).
+ *
+ * Command address parity error is generated by enabling
+ * [CA_PARITY_CORRUPT_ENA] and selecting the DDR command that the parity
+ * is to be corrupted with through [CA_PARITY_SEL].
+ */
+union cvmx_lmcx_ecc_parity_test {
+ u64 u64;
+ struct cvmx_lmcx_ecc_parity_test_s {
+ uint64_t reserved_12_63:52;
+ uint64_t ecc_corrupt_ena:1;
+ uint64_t ecc_corrupt_idx:3;
+ uint64_t reserved_6_7:2;
+ uint64_t ca_parity_corrupt_ena:1;
+ uint64_t ca_parity_sel:5;
+ } s;
+ struct cvmx_lmcx_ecc_parity_test_s cn73xx;
+ struct cvmx_lmcx_ecc_parity_test_s cn78xx;
+ struct cvmx_lmcx_ecc_parity_test_s cn78xxp1;
+ struct cvmx_lmcx_ecc_parity_test_s cnf75xx;
+};
+
+/**
+ * cvmx_lmc#_ecc_synd
+ *
+ * LMC_ECC_SYND = MRD ECC Syndromes
+ *
+ */
+union cvmx_lmcx_ecc_synd {
+ u64 u64;
+ struct cvmx_lmcx_ecc_synd_s {
+ uint64_t reserved_32_63:32;
+ uint64_t mrdsyn3:8;
+ uint64_t mrdsyn2:8;
+ uint64_t mrdsyn1:8;
+ uint64_t mrdsyn0:8;
+ } s;
+ struct cvmx_lmcx_ecc_synd_s cn30xx;
+ struct cvmx_lmcx_ecc_synd_s cn31xx;
+ struct cvmx_lmcx_ecc_synd_s cn38xx;
+ struct cvmx_lmcx_ecc_synd_s cn38xxp2;
+ struct cvmx_lmcx_ecc_synd_s cn50xx;
+ struct cvmx_lmcx_ecc_synd_s cn52xx;
+ struct cvmx_lmcx_ecc_synd_s cn52xxp1;
+ struct cvmx_lmcx_ecc_synd_s cn56xx;
+ struct cvmx_lmcx_ecc_synd_s cn56xxp1;
+ struct cvmx_lmcx_ecc_synd_s cn58xx;
+ struct cvmx_lmcx_ecc_synd_s cn58xxp1;
+ struct cvmx_lmcx_ecc_synd_s cn61xx;
+ struct cvmx_lmcx_ecc_synd_s cn63xx;
+ struct cvmx_lmcx_ecc_synd_s cn63xxp1;
+ struct cvmx_lmcx_ecc_synd_s cn66xx;
+ struct cvmx_lmcx_ecc_synd_s cn68xx;
+ struct cvmx_lmcx_ecc_synd_s cn68xxp1;
+ struct cvmx_lmcx_ecc_synd_s cn70xx;
+ struct cvmx_lmcx_ecc_synd_s cn70xxp1;
+ struct cvmx_lmcx_ecc_synd_s cn73xx;
+ struct cvmx_lmcx_ecc_synd_s cn78xx;
+ struct cvmx_lmcx_ecc_synd_s cn78xxp1;
+ struct cvmx_lmcx_ecc_synd_s cnf71xx;
+ struct cvmx_lmcx_ecc_synd_s cnf75xx;
+};
+
+/**
+ * cvmx_lmc#_ext_config
+ *
+ * This register has additional configuration and control bits for the LMC.
+ *
+ */
+union cvmx_lmcx_ext_config {
+ u64 u64;
+ struct cvmx_lmcx_ext_config_s {
+ uint64_t reserved_61_63:3;
+ uint64_t bc4_dqs_ena:1;
+ uint64_t ref_block:1;
+ uint64_t mrs_side:1;
+ uint64_t mrs_one_side:1;
+ uint64_t mrs_bside_invert_disable:1;
+ uint64_t dimm_sel_invert_off:1;
+ uint64_t dimm_sel_force_invert:1;
+ uint64_t coalesce_address_mode:1;
+ uint64_t dimm1_cid:2;
+ uint64_t dimm0_cid:2;
+ uint64_t rcd_parity_check:1;
+ uint64_t reserved_46_47:2;
+ uint64_t error_alert_n_sample:1;
+ uint64_t ea_int_polarity:1;
+ uint64_t reserved_43_43:1;
+ uint64_t par_addr_mask:3;
+ uint64_t reserved_38_39:2;
+ uint64_t mrs_cmd_override:1;
+ uint64_t mrs_cmd_select:1;
+ uint64_t reserved_33_35:3;
+ uint64_t invert_data:1;
+ uint64_t reserved_30_31:2;
+ uint64_t cmd_rti:1;
+ uint64_t cal_ena:1;
+ uint64_t reserved_27_27:1;
+ uint64_t par_include_a17:1;
+ uint64_t par_include_bg1:1;
+ uint64_t gen_par:1;
+ uint64_t reserved_21_23:3;
+ uint64_t vrefint_seq_deskew:1;
+ uint64_t read_ena_bprch:1;
+ uint64_t read_ena_fprch:1;
+ uint64_t slot_ctl_reset_force:1;
+ uint64_t ref_int_lsbs:9;
+ uint64_t drive_ena_bprch:1;
+ uint64_t drive_ena_fprch:1;
+ uint64_t dlcram_flip_synd:2;
+ uint64_t dlcram_cor_dis:1;
+ uint64_t dlc_nxm_rd:1;
+ uint64_t l2c_nxm_rd:1;
+ uint64_t l2c_nxm_wr:1;
+ } s;
+ struct cvmx_lmcx_ext_config_cn70xx {
+ uint64_t reserved_21_63:43;
+ uint64_t vrefint_seq_deskew:1;
+ uint64_t read_ena_bprch:1;
+ uint64_t read_ena_fprch:1;
+ uint64_t slot_ctl_reset_force:1;
+ uint64_t ref_int_lsbs:9;
+ uint64_t drive_ena_bprch:1;
+ uint64_t drive_ena_fprch:1;
+ uint64_t dlcram_flip_synd:2;
+ uint64_t dlcram_cor_dis:1;
+ uint64_t dlc_nxm_rd:1;
+ uint64_t l2c_nxm_rd:1;
+ uint64_t l2c_nxm_wr:1;
+ } cn70xx;
+ struct cvmx_lmcx_ext_config_cn70xx cn70xxp1;
+ struct cvmx_lmcx_ext_config_cn73xx {
+ uint64_t reserved_60_63:4;
+ uint64_t ref_block:1;
+ uint64_t mrs_side:1;
+ uint64_t mrs_one_side:1;
+ uint64_t mrs_bside_invert_disable:1;
+ uint64_t dimm_sel_invert_off:1;
+ uint64_t dimm_sel_force_invert:1;
+ uint64_t coalesce_address_mode:1;
+ uint64_t dimm1_cid:2;
+ uint64_t dimm0_cid:2;
+ uint64_t rcd_parity_check:1;
+ uint64_t reserved_46_47:2;
+ uint64_t error_alert_n_sample:1;
+ uint64_t ea_int_polarity:1;
+ uint64_t reserved_43_43:1;
+ uint64_t par_addr_mask:3;
+ uint64_t reserved_38_39:2;
+ uint64_t mrs_cmd_override:1;
+ uint64_t mrs_cmd_select:1;
+ uint64_t reserved_33_35:3;
+ uint64_t invert_data:1;
+ uint64_t reserved_30_31:2;
+ uint64_t cmd_rti:1;
+ uint64_t cal_ena:1;
+ uint64_t reserved_27_27:1;
+ uint64_t par_include_a17:1;
+ uint64_t par_include_bg1:1;
+ uint64_t gen_par:1;
+ uint64_t reserved_21_23:3;
+ uint64_t vrefint_seq_deskew:1;
+ uint64_t read_ena_bprch:1;
+ uint64_t read_ena_fprch:1;
+ uint64_t slot_ctl_reset_force:1;
+ uint64_t ref_int_lsbs:9;
+ uint64_t drive_ena_bprch:1;
+ uint64_t drive_ena_fprch:1;
+ uint64_t dlcram_flip_synd:2;
+ uint64_t dlcram_cor_dis:1;
+ uint64_t dlc_nxm_rd:1;
+ uint64_t l2c_nxm_rd:1;
+ uint64_t l2c_nxm_wr:1;
+ } cn73xx;
+ struct cvmx_lmcx_ext_config_s cn78xx;
+ struct cvmx_lmcx_ext_config_s cn78xxp1;
+ struct cvmx_lmcx_ext_config_cn73xx cnf75xx;
+};
+
+/**
+ * cvmx_lmc#_ext_config2
+ *
+ * This register has additional configuration and control bits for the LMC.
+ *
+ */
+union cvmx_lmcx_ext_config2 {
+ u64 u64;
+ struct cvmx_lmcx_ext_config2_s {
+ uint64_t reserved_27_63:37;
+ uint64_t sref_auto_idle_thres:5;
+ uint64_t sref_auto_enable:1;
+ uint64_t delay_unload_r3:1;
+ uint64_t delay_unload_r2:1;
+ uint64_t delay_unload_r1:1;
+ uint64_t delay_unload_r0:1;
+ uint64_t early_dqx2:1;
+ uint64_t xor_bank_sel:4;
+ uint64_t reserved_10_11:2;
+ uint64_t row_col_switch:1;
+ uint64_t trr_on:1;
+ uint64_t mac:3;
+ uint64_t macram_scrub_done:1;
+ uint64_t macram_scrub:1;
+ uint64_t macram_flip_synd:2;
+ uint64_t macram_cor_dis:1;
+ } s;
+ struct cvmx_lmcx_ext_config2_cn73xx {
+ uint64_t reserved_10_63:54;
+ uint64_t row_col_switch:1;
+ uint64_t trr_on:1;
+ uint64_t mac:3;
+ uint64_t macram_scrub_done:1;
+ uint64_t macram_scrub:1;
+ uint64_t macram_flip_synd:2;
+ uint64_t macram_cor_dis:1;
+ } cn73xx;
+ struct cvmx_lmcx_ext_config2_s cn78xx;
+ struct cvmx_lmcx_ext_config2_cnf75xx {
+ uint64_t reserved_21_63:43;
+ uint64_t delay_unload_r3:1;
+ uint64_t delay_unload_r2:1;
+ uint64_t delay_unload_r1:1;
+ uint64_t delay_unload_r0:1;
+ uint64_t early_dqx2:1;
+ uint64_t xor_bank_sel:4;
+ uint64_t reserved_10_11:2;
+ uint64_t row_col_switch:1;
+ uint64_t trr_on:1;
+ uint64_t mac:3;
+ uint64_t macram_scrub_done:1;
+ uint64_t macram_scrub:1;
+ uint64_t macram_flip_synd:2;
+ uint64_t macram_cor_dis:1;
+ } cnf75xx;
+};
+
+/**
+ * cvmx_lmc#_fadr
+ *
+ * This register only captures the first transaction with ECC errors. A DED
+ * error can over-write this register with its failing addresses if the
+ * first error was a SEC. If you write LMC()_INT -> SEC_ERR/DED_ERR, it
+ * clears the error bits and captures the next failing address. If FDIMM
+ * is 1, that means the error is in the high DIMM. LMC()_FADR captures the
+ * failing pre-scrambled address location (split into DIMM, bunk, bank, etc).
+ * If scrambling is off, then LMC()_FADR will also capture the failing
+ * physical location in the DRAM parts. LMC()_SCRAMBLED_FADR captures the
+ * actual failing address location in the physical DRAM parts, i.e.,
+ * If scrambling is on, LMC()_SCRAMBLED_FADR contains the failing physical
+ * location in the DRAM parts (split into DIMM, bunk, bank, etc.)
+ * If scrambling is off, the pre-scramble and post-scramble addresses are
+ * the same; and so the contents of LMC()_SCRAMBLED_FADR match the contents
+ * of LMC()_FADR.
+ */
+union cvmx_lmcx_fadr {
+ u64 u64;
+ struct cvmx_lmcx_fadr_s {
+ uint64_t reserved_43_63:21;
+ uint64_t fcid:3;
+ uint64_t fill_order:2;
+ uint64_t reserved_0_37:38;
+ } s;
+ struct cvmx_lmcx_fadr_cn30xx {
+ uint64_t reserved_32_63:32;
+ uint64_t fdimm:2;
+ uint64_t fbunk:1;
+ uint64_t fbank:3;
+ uint64_t frow:14;
+ uint64_t fcol:12;
+ } cn30xx;
+ struct cvmx_lmcx_fadr_cn30xx cn31xx;
+ struct cvmx_lmcx_fadr_cn30xx cn38xx;
+ struct cvmx_lmcx_fadr_cn30xx cn38xxp2;
+ struct cvmx_lmcx_fadr_cn30xx cn50xx;
+ struct cvmx_lmcx_fadr_cn30xx cn52xx;
+ struct cvmx_lmcx_fadr_cn30xx cn52xxp1;
+ struct cvmx_lmcx_fadr_cn30xx cn56xx;
+ struct cvmx_lmcx_fadr_cn30xx cn56xxp1;
+ struct cvmx_lmcx_fadr_cn30xx cn58xx;
+ struct cvmx_lmcx_fadr_cn30xx cn58xxp1;
+ struct cvmx_lmcx_fadr_cn61xx {
+ uint64_t reserved_36_63:28;
+ uint64_t fdimm:2;
+ uint64_t fbunk:1;
+ uint64_t fbank:3;
+ uint64_t frow:16;
+ uint64_t fcol:14;
+ } cn61xx;
+ struct cvmx_lmcx_fadr_cn61xx cn63xx;
+ struct cvmx_lmcx_fadr_cn61xx cn63xxp1;
+ struct cvmx_lmcx_fadr_cn61xx cn66xx;
+ struct cvmx_lmcx_fadr_cn61xx cn68xx;
+ struct cvmx_lmcx_fadr_cn61xx cn68xxp1;
+ struct cvmx_lmcx_fadr_cn70xx {
+ uint64_t reserved_40_63:24;
+ uint64_t fill_order:2;
+ uint64_t fdimm:1;
+ uint64_t fbunk:1;
+ uint64_t fbank:4;
+ uint64_t frow:18;
+ uint64_t fcol:14;
+ } cn70xx;
+ struct cvmx_lmcx_fadr_cn70xx cn70xxp1;
+ struct cvmx_lmcx_fadr_cn73xx {
+ uint64_t reserved_43_63:21;
+ uint64_t fcid:3;
+ uint64_t fill_order:2;
+ uint64_t fdimm:1;
+ uint64_t fbunk:1;
+ uint64_t fbank:4;
+ uint64_t frow:18;
+ uint64_t fcol:14;
+ } cn73xx;
+ struct cvmx_lmcx_fadr_cn73xx cn78xx;
+ struct cvmx_lmcx_fadr_cn73xx cn78xxp1;
+ struct cvmx_lmcx_fadr_cn61xx cnf71xx;
+ struct cvmx_lmcx_fadr_cn73xx cnf75xx;
+};
+
+/**
+ * cvmx_lmc#_general_purpose0
+ */
+union cvmx_lmcx_general_purpose0 {
+ u64 u64;
+ struct cvmx_lmcx_general_purpose0_s {
+ uint64_t data:64;
+ } s;
+ struct cvmx_lmcx_general_purpose0_s cn73xx;
+ struct cvmx_lmcx_general_purpose0_s cn78xx;
+ struct cvmx_lmcx_general_purpose0_s cnf75xx;
+};
+
+/**
+ * cvmx_lmc#_general_purpose1
+ */
+union cvmx_lmcx_general_purpose1 {
+ u64 u64;
+ struct cvmx_lmcx_general_purpose1_s {
+ uint64_t data:64;
+ } s;
+ struct cvmx_lmcx_general_purpose1_s cn73xx;
+ struct cvmx_lmcx_general_purpose1_s cn78xx;
+ struct cvmx_lmcx_general_purpose1_s cnf75xx;
+};
+
+/**
+ * cvmx_lmc#_general_purpose2
+ */
+union cvmx_lmcx_general_purpose2 {
+ u64 u64;
+ struct cvmx_lmcx_general_purpose2_s {
+ uint64_t reserved_16_63:48;
+ uint64_t data:16;
+ } s;
+ struct cvmx_lmcx_general_purpose2_s cn73xx;
+ struct cvmx_lmcx_general_purpose2_s cn78xx;
+ struct cvmx_lmcx_general_purpose2_s cnf75xx;
+};
+
+/**
+ * cvmx_lmc#_ifb_cnt
+ *
+ * LMC_IFB_CNT = Performance Counters
+ *
+ */
+union cvmx_lmcx_ifb_cnt {
+ u64 u64;
+ struct cvmx_lmcx_ifb_cnt_s {
+ uint64_t ifbcnt:64;
+ } s;
+ struct cvmx_lmcx_ifb_cnt_s cn61xx;
+ struct cvmx_lmcx_ifb_cnt_s cn63xx;
+ struct cvmx_lmcx_ifb_cnt_s cn63xxp1;
+ struct cvmx_lmcx_ifb_cnt_s cn66xx;
+ struct cvmx_lmcx_ifb_cnt_s cn68xx;
+ struct cvmx_lmcx_ifb_cnt_s cn68xxp1;
+ struct cvmx_lmcx_ifb_cnt_s cn70xx;
+ struct cvmx_lmcx_ifb_cnt_s cn70xxp1;
+ struct cvmx_lmcx_ifb_cnt_s cn73xx;
+ struct cvmx_lmcx_ifb_cnt_s cn78xx;
+ struct cvmx_lmcx_ifb_cnt_s cn78xxp1;
+ struct cvmx_lmcx_ifb_cnt_s cnf71xx;
+ struct cvmx_lmcx_ifb_cnt_s cnf75xx;
+};
+
+/**
+ * cvmx_lmc#_ifb_cnt_hi
+ *
+ * LMC_IFB_CNT_HI = Performance Counters
+ *
+ */
+union cvmx_lmcx_ifb_cnt_hi {
+ u64 u64;
+ struct cvmx_lmcx_ifb_cnt_hi_s {
+ uint64_t reserved_32_63:32;
+ uint64_t ifbcnt_hi:32;
+ } s;
+ struct cvmx_lmcx_ifb_cnt_hi_s cn30xx;
+ struct cvmx_lmcx_ifb_cnt_hi_s cn31xx;
+ struct cvmx_lmcx_ifb_cnt_hi_s cn38xx;
+ struct cvmx_lmcx_ifb_cnt_hi_s cn38xxp2;
+ struct cvmx_lmcx_ifb_cnt_hi_s cn50xx;
+ struct cvmx_lmcx_ifb_cnt_hi_s cn52xx;
+ struct cvmx_lmcx_ifb_cnt_hi_s cn52xxp1;
+ struct cvmx_lmcx_ifb_cnt_hi_s cn56xx;
+ struct cvmx_lmcx_ifb_cnt_hi_s cn56xxp1;
+ struct cvmx_lmcx_ifb_cnt_hi_s cn58xx;
+ struct cvmx_lmcx_ifb_cnt_hi_s cn58xxp1;
+};
+
+/**
+ * cvmx_lmc#_ifb_cnt_lo
+ *
+ * LMC_IFB_CNT_LO = Performance Counters
+ *
+ */
+union cvmx_lmcx_ifb_cnt_lo {
+ u64 u64;
+ struct cvmx_lmcx_ifb_cnt_lo_s {
+ uint64_t reserved_32_63:32;
+ uint64_t ifbcnt_lo:32;
+ } s;
+ struct cvmx_lmcx_ifb_cnt_lo_s cn30xx;
+ struct cvmx_lmcx_ifb_cnt_lo_s cn31xx;
+ struct cvmx_lmcx_ifb_cnt_lo_s cn38xx;
+ struct cvmx_lmcx_ifb_cnt_lo_s cn38xxp2;
+ struct cvmx_lmcx_ifb_cnt_lo_s cn50xx;
+ struct cvmx_lmcx_ifb_cnt_lo_s cn52xx;
+ struct cvmx_lmcx_ifb_cnt_lo_s cn52xxp1;
+ struct cvmx_lmcx_ifb_cnt_lo_s cn56xx;
+ struct cvmx_lmcx_ifb_cnt_lo_s cn56xxp1;
+ struct cvmx_lmcx_ifb_cnt_lo_s cn58xx;
+ struct cvmx_lmcx_ifb_cnt_lo_s cn58xxp1;
+};
+
+/**
+ * cvmx_lmc#_int
+ *
+ * This register contains the different interrupt-summary bits of the LMC.
+ *
+ */
+union cvmx_lmcx_int {
+ u64 u64;
+ struct cvmx_lmcx_int_s {
+ uint64_t reserved_14_63:50;
+ uint64_t macram_ded_err:1;
+ uint64_t macram_sec_err:1;
+ uint64_t ddr_err:1;
+ uint64_t dlcram_ded_err:1;
+ uint64_t dlcram_sec_err:1;
+ uint64_t ded_err:4;
+ uint64_t sec_err:4;
+ uint64_t nxm_wr_err:1;
+ } s;
+ struct cvmx_lmcx_int_cn61xx {
+ uint64_t reserved_9_63:55;
+ uint64_t ded_err:4;
+ uint64_t sec_err:4;
+ uint64_t nxm_wr_err:1;
+ } cn61xx;
+ struct cvmx_lmcx_int_cn61xx cn63xx;
+ struct cvmx_lmcx_int_cn61xx cn63xxp1;
+ struct cvmx_lmcx_int_cn61xx cn66xx;
+ struct cvmx_lmcx_int_cn61xx cn68xx;
+ struct cvmx_lmcx_int_cn61xx cn68xxp1;
+ struct cvmx_lmcx_int_cn70xx {
+ uint64_t reserved_12_63:52;
+ uint64_t ddr_err:1;
+ uint64_t dlcram_ded_err:1;
+ uint64_t dlcram_sec_err:1;
+ uint64_t ded_err:4;
+ uint64_t sec_err:4;
+ uint64_t nxm_wr_err:1;
+ } cn70xx;
+ struct cvmx_lmcx_int_cn70xx cn70xxp1;
+ struct cvmx_lmcx_int_s cn73xx;
+ struct cvmx_lmcx_int_s cn78xx;
+ struct cvmx_lmcx_int_s cn78xxp1;
+ struct cvmx_lmcx_int_cn61xx cnf71xx;
+ struct cvmx_lmcx_int_s cnf75xx;
+};
+
+/**
+ * cvmx_lmc#_int_en
+ *
+ * Unused CSR in O75.
+ *
+ */
+union cvmx_lmcx_int_en {
+ u64 u64;
+ struct cvmx_lmcx_int_en_s {
+ uint64_t reserved_6_63:58;
+ uint64_t ddr_error_alert_ena:1;
+ uint64_t dlcram_ded_ena:1;
+ uint64_t dlcram_sec_ena:1;
+ uint64_t intr_ded_ena:1;
+ uint64_t intr_sec_ena:1;
+ uint64_t intr_nxm_wr_ena:1;
+ } s;
+ struct cvmx_lmcx_int_en_cn61xx {
+ uint64_t reserved_3_63:61;
+ uint64_t intr_ded_ena:1;
+ uint64_t intr_sec_ena:1;
+ uint64_t intr_nxm_wr_ena:1;
+ } cn61xx;
+ struct cvmx_lmcx_int_en_cn61xx cn63xx;
+ struct cvmx_lmcx_int_en_cn61xx cn63xxp1;
+ struct cvmx_lmcx_int_en_cn61xx cn66xx;
+ struct cvmx_lmcx_int_en_cn61xx cn68xx;
+ struct cvmx_lmcx_int_en_cn61xx cn68xxp1;
+ struct cvmx_lmcx_int_en_s cn70xx;
+ struct cvmx_lmcx_int_en_s cn70xxp1;
+ struct cvmx_lmcx_int_en_s cn73xx;
+ struct cvmx_lmcx_int_en_s cn78xx;
+ struct cvmx_lmcx_int_en_s cn78xxp1;
+ struct cvmx_lmcx_int_en_cn61xx cnf71xx;
+ struct cvmx_lmcx_int_en_s cnf75xx;
+};
+
+/**
+ * cvmx_lmc#_lane#_crc_swiz
+ *
+ * This register contains the CRC bit swizzle for even and odd ranks.
+ *
+ */
+union cvmx_lmcx_lanex_crc_swiz {
+ u64 u64;
+ struct cvmx_lmcx_lanex_crc_swiz_s {
+ uint64_t reserved_56_63:8;
+ uint64_t r1_swiz7:3;
+ uint64_t r1_swiz6:3;
+ uint64_t r1_swiz5:3;
+ uint64_t r1_swiz4:3;
+ uint64_t r1_swiz3:3;
+ uint64_t r1_swiz2:3;
+ uint64_t r1_swiz1:3;
+ uint64_t r1_swiz0:3;
+ uint64_t reserved_24_31:8;
+ uint64_t r0_swiz7:3;
+ uint64_t r0_swiz6:3;
+ uint64_t r0_swiz5:3;
+ uint64_t r0_swiz4:3;
+ uint64_t r0_swiz3:3;
+ uint64_t r0_swiz2:3;
+ uint64_t r0_swiz1:3;
+ uint64_t r0_swiz0:3;
+ } s;
+ struct cvmx_lmcx_lanex_crc_swiz_s cn73xx;
+ struct cvmx_lmcx_lanex_crc_swiz_s cn78xx;
+ struct cvmx_lmcx_lanex_crc_swiz_s cn78xxp1;
+ struct cvmx_lmcx_lanex_crc_swiz_s cnf75xx;
+};
+
+/**
+ * cvmx_lmc#_mem_cfg0
+ *
+ * Specify the RSL base addresses for the block
+ *
+ * LMC_MEM_CFG0 = LMC Memory Configuration Register0
+ *
+ * This register controls certain parameters of Memory Configuration
+ */
+union cvmx_lmcx_mem_cfg0 {
+ u64 u64;
+ struct cvmx_lmcx_mem_cfg0_s {
+ uint64_t reserved_32_63:32;
+ uint64_t reset:1;
+ uint64_t silo_qc:1;
+ uint64_t bunk_ena:1;
+ uint64_t ded_err:4;
+ uint64_t sec_err:4;
+ uint64_t intr_ded_ena:1;
+ uint64_t intr_sec_ena:1;
+ uint64_t tcl:4;
+ uint64_t ref_int:6;
+ uint64_t pbank_lsb:4;
+ uint64_t row_lsb:3;
+ uint64_t ecc_ena:1;
+ uint64_t init_start:1;
+ } s;
+ struct cvmx_lmcx_mem_cfg0_s cn30xx;
+ struct cvmx_lmcx_mem_cfg0_s cn31xx;
+ struct cvmx_lmcx_mem_cfg0_s cn38xx;
+ struct cvmx_lmcx_mem_cfg0_s cn38xxp2;
+ struct cvmx_lmcx_mem_cfg0_s cn50xx;
+ struct cvmx_lmcx_mem_cfg0_s cn52xx;
+ struct cvmx_lmcx_mem_cfg0_s cn52xxp1;
+ struct cvmx_lmcx_mem_cfg0_s cn56xx;
+ struct cvmx_lmcx_mem_cfg0_s cn56xxp1;
+ struct cvmx_lmcx_mem_cfg0_s cn58xx;
+ struct cvmx_lmcx_mem_cfg0_s cn58xxp1;
+};
+
+/**
+ * cvmx_lmc#_mem_cfg1
+ *
+ * LMC_MEM_CFG1 = LMC Memory Configuration Register1
+ *
+ * This register controls the External Memory Configuration Timing Parameters.
+ * Please refer to the appropriate DDR part spec from your memory vendor for
+ * the various values in this CSR. The details of each of these timing
+ * parameters can be found in the JEDEC spec or the vendor spec of the
+ * memory parts.
+ */
+union cvmx_lmcx_mem_cfg1 {
+ u64 u64;
+ struct cvmx_lmcx_mem_cfg1_s {
+ uint64_t reserved_32_63:32;
+ uint64_t comp_bypass:1;
+ uint64_t trrd:3;
+ uint64_t caslat:3;
+ uint64_t tmrd:3;
+ uint64_t trfc:5;
+ uint64_t trp:4;
+ uint64_t twtr:4;
+ uint64_t trcd:4;
+ uint64_t tras:5;
+ } s;
+ struct cvmx_lmcx_mem_cfg1_s cn30xx;
+ struct cvmx_lmcx_mem_cfg1_s cn31xx;
+ struct cvmx_lmcx_mem_cfg1_cn38xx {
+ uint64_t reserved_31_63:33;
+ uint64_t trrd:3;
+ uint64_t caslat:3;
+ uint64_t tmrd:3;
+ uint64_t trfc:5;
+ uint64_t trp:4;
+ uint64_t twtr:4;
+ uint64_t trcd:4;
+ uint64_t tras:5;
+ } cn38xx;
+ struct cvmx_lmcx_mem_cfg1_cn38xx cn38xxp2;
+ struct cvmx_lmcx_mem_cfg1_s cn50xx;
+ struct cvmx_lmcx_mem_cfg1_cn38xx cn52xx;
+ struct cvmx_lmcx_mem_cfg1_cn38xx cn52xxp1;
+ struct cvmx_lmcx_mem_cfg1_cn38xx cn56xx;
+ struct cvmx_lmcx_mem_cfg1_cn38xx cn56xxp1;
+ struct cvmx_lmcx_mem_cfg1_cn38xx cn58xx;
+ struct cvmx_lmcx_mem_cfg1_cn38xx cn58xxp1;
+};
+
+/**
+ * cvmx_lmc#_modereg_params0
+ *
+ * These parameters are written into the DDR3/DDR4 MR0, MR1, MR2 and MR3
+ * registers.
+ *
+ */
+union cvmx_lmcx_modereg_params0 {
+ u64 u64;
+ struct cvmx_lmcx_modereg_params0_s {
+ uint64_t reserved_28_63:36;
+ uint64_t wrp_ext:1;
+ uint64_t cl_ext:1;
+ uint64_t al_ext:1;
+ uint64_t ppd:1;
+ uint64_t wrp:3;
+ uint64_t dllr:1;
+ uint64_t tm:1;
+ uint64_t rbt:1;
+ uint64_t cl:4;
+ uint64_t bl:2;
+ uint64_t qoff:1;
+ uint64_t tdqs:1;
+ uint64_t wlev:1;
+ uint64_t al:2;
+ uint64_t dll:1;
+ uint64_t mpr:1;
+ uint64_t mprloc:2;
+ uint64_t cwl:3;
+ } s;
+ struct cvmx_lmcx_modereg_params0_cn61xx {
+ uint64_t reserved_25_63:39;
+ uint64_t ppd:1;
+ uint64_t wrp:3;
+ uint64_t dllr:1;
+ uint64_t tm:1;
+ uint64_t rbt:1;
+ uint64_t cl:4;
+ uint64_t bl:2;
+ uint64_t qoff:1;
+ uint64_t tdqs:1;
+ uint64_t wlev:1;
+ uint64_t al:2;
+ uint64_t dll:1;
+ uint64_t mpr:1;
+ uint64_t mprloc:2;
+ uint64_t cwl:3;
+ } cn61xx;
+ struct cvmx_lmcx_modereg_params0_cn61xx cn63xx;
+ struct cvmx_lmcx_modereg_params0_cn61xx cn63xxp1;
+ struct cvmx_lmcx_modereg_params0_cn61xx cn66xx;
+ struct cvmx_lmcx_modereg_params0_cn61xx cn68xx;
+ struct cvmx_lmcx_modereg_params0_cn61xx cn68xxp1;
+ struct cvmx_lmcx_modereg_params0_cn61xx cn70xx;
+ struct cvmx_lmcx_modereg_params0_cn61xx cn70xxp1;
+ struct cvmx_lmcx_modereg_params0_s cn73xx;
+ struct cvmx_lmcx_modereg_params0_s cn78xx;
+ struct cvmx_lmcx_modereg_params0_s cn78xxp1;
+ struct cvmx_lmcx_modereg_params0_cn61xx cnf71xx;
+ struct cvmx_lmcx_modereg_params0_s cnf75xx;
+};
+
+/**
+ * cvmx_lmc#_modereg_params1
+ *
+ * These parameters are written into the DDR3 MR0, MR1, MR2 and MR3 registers.
+ *
+ */
+union cvmx_lmcx_modereg_params1 {
+ u64 u64;
+ struct cvmx_lmcx_modereg_params1_s {
+ uint64_t reserved_55_63:9;
+ uint64_t rtt_wr_11_ext:1;
+ uint64_t rtt_wr_10_ext:1;
+ uint64_t rtt_wr_01_ext:1;
+ uint64_t rtt_wr_00_ext:1;
+ uint64_t db_output_impedance:3;
+ uint64_t rtt_nom_11:3;
+ uint64_t dic_11:2;
+ uint64_t rtt_wr_11:2;
+ uint64_t srt_11:1;
+ uint64_t asr_11:1;
+ uint64_t pasr_11:3;
+ uint64_t rtt_nom_10:3;
+ uint64_t dic_10:2;
+ uint64_t rtt_wr_10:2;
+ uint64_t srt_10:1;
+ uint64_t asr_10:1;
+ uint64_t pasr_10:3;
+ uint64_t rtt_nom_01:3;
+ uint64_t dic_01:2;
+ uint64_t rtt_wr_01:2;
+ uint64_t srt_01:1;
+ uint64_t asr_01:1;
+ uint64_t pasr_01:3;
+ uint64_t rtt_nom_00:3;
+ uint64_t dic_00:2;
+ uint64_t rtt_wr_00:2;
+ uint64_t srt_00:1;
+ uint64_t asr_00:1;
+ uint64_t pasr_00:3;
+ } s;
+ struct cvmx_lmcx_modereg_params1_cn61xx {
+ uint64_t reserved_48_63:16;
+ uint64_t rtt_nom_11:3;
+ uint64_t dic_11:2;
+ uint64_t rtt_wr_11:2;
+ uint64_t srt_11:1;
+ uint64_t asr_11:1;
+ uint64_t pasr_11:3;
+ uint64_t rtt_nom_10:3;
+ uint64_t dic_10:2;
+ uint64_t rtt_wr_10:2;
+ uint64_t srt_10:1;
+ uint64_t asr_10:1;
+ uint64_t pasr_10:3;
+ uint64_t rtt_nom_01:3;
+ uint64_t dic_01:2;
+ uint64_t rtt_wr_01:2;
+ uint64_t srt_01:1;
+ uint64_t asr_01:1;
+ uint64_t pasr_01:3;
+ uint64_t rtt_nom_00:3;
+ uint64_t dic_00:2;
+ uint64_t rtt_wr_00:2;
+ uint64_t srt_00:1;
+ uint64_t asr_00:1;
+ uint64_t pasr_00:3;
+ } cn61xx;
+ struct cvmx_lmcx_modereg_params1_cn61xx cn63xx;
+ struct cvmx_lmcx_modereg_params1_cn61xx cn63xxp1;
+ struct cvmx_lmcx_modereg_params1_cn61xx cn66xx;
+ struct cvmx_lmcx_modereg_params1_cn61xx cn68xx;
+ struct cvmx_lmcx_modereg_params1_cn61xx cn68xxp1;
+ struct cvmx_lmcx_modereg_params1_cn61xx cn70xx;
+ struct cvmx_lmcx_modereg_params1_cn61xx cn70xxp1;
+ struct cvmx_lmcx_modereg_params1_s cn73xx;
+ struct cvmx_lmcx_modereg_params1_s cn78xx;
+ struct cvmx_lmcx_modereg_params1_s cn78xxp1;
+ struct cvmx_lmcx_modereg_params1_cn61xx cnf71xx;
+ struct cvmx_lmcx_modereg_params1_s cnf75xx;
+};
+
+/**
+ * cvmx_lmc#_modereg_params2
+ *
+ * These parameters are written into the DDR4 mode registers.
+ *
+ */
+union cvmx_lmcx_modereg_params2 {
+ u64 u64;
+ struct cvmx_lmcx_modereg_params2_s {
+ uint64_t reserved_41_63:23;
+ uint64_t vrefdq_train_en:1;
+ uint64_t vref_range_11:1;
+ uint64_t vref_value_11:6;
+ uint64_t rtt_park_11:3;
+ uint64_t vref_range_10:1;
+ uint64_t vref_value_10:6;
+ uint64_t rtt_park_10:3;
+ uint64_t vref_range_01:1;
+ uint64_t vref_value_01:6;
+ uint64_t rtt_park_01:3;
+ uint64_t vref_range_00:1;
+ uint64_t vref_value_00:6;
+ uint64_t rtt_park_00:3;
+ } s;
+ struct cvmx_lmcx_modereg_params2_s cn70xx;
+ struct cvmx_lmcx_modereg_params2_cn70xxp1 {
+ uint64_t reserved_40_63:24;
+ uint64_t vref_range_11:1;
+ uint64_t vref_value_11:6;
+ uint64_t rtt_park_11:3;
+ uint64_t vref_range_10:1;
+ uint64_t vref_value_10:6;
+ uint64_t rtt_park_10:3;
+ uint64_t vref_range_01:1;
+ uint64_t vref_value_01:6;
+ uint64_t rtt_park_01:3;
+ uint64_t vref_range_00:1;
+ uint64_t vref_value_00:6;
+ uint64_t rtt_park_00:3;
+ } cn70xxp1;
+ struct cvmx_lmcx_modereg_params2_s cn73xx;
+ struct cvmx_lmcx_modereg_params2_s cn78xx;
+ struct cvmx_lmcx_modereg_params2_s cn78xxp1;
+ struct cvmx_lmcx_modereg_params2_s cnf75xx;
+};
+
+/**
+ * cvmx_lmc#_modereg_params3
+ *
+ * These parameters are written into the DDR4 mode registers.
+ *
+ */
+union cvmx_lmcx_modereg_params3 {
+ u64 u64;
+ struct cvmx_lmcx_modereg_params3_s {
+ uint64_t reserved_39_63:25;
+ uint64_t xrank_add_tccd_l:3;
+ uint64_t xrank_add_tccd_s:3;
+ uint64_t mpr_fmt:2;
+ uint64_t wr_cmd_lat:2;
+ uint64_t fgrm:3;
+ uint64_t temp_sense:1;
+ uint64_t pda:1;
+ uint64_t gd:1;
+ uint64_t crc:1;
+ uint64_t lpasr:2;
+ uint64_t tccd_l:3;
+ uint64_t rd_dbi:1;
+ uint64_t wr_dbi:1;
+ uint64_t dm:1;
+ uint64_t ca_par_pers:1;
+ uint64_t odt_pd:1;
+ uint64_t par_lat_mode:3;
+ uint64_t wr_preamble:1;
+ uint64_t rd_preamble:1;
+ uint64_t sre_abort:1;
+ uint64_t cal:3;
+ uint64_t vref_mon:1;
+ uint64_t tc_ref:1;
+ uint64_t max_pd:1;
+ } s;
+ struct cvmx_lmcx_modereg_params3_cn70xx {
+ uint64_t reserved_33_63:31;
+ uint64_t mpr_fmt:2;
+ uint64_t wr_cmd_lat:2;
+ uint64_t fgrm:3;
+ uint64_t temp_sense:1;
+ uint64_t pda:1;
+ uint64_t gd:1;
+ uint64_t crc:1;
+ uint64_t lpasr:2;
+ uint64_t tccd_l:3;
+ uint64_t rd_dbi:1;
+ uint64_t wr_dbi:1;
+ uint64_t dm:1;
+ uint64_t ca_par_pers:1;
+ uint64_t odt_pd:1;
+ uint64_t par_lat_mode:3;
+ uint64_t wr_preamble:1;
+ uint64_t rd_preamble:1;
+ uint64_t sre_abort:1;
+ uint64_t cal:3;
+ uint64_t vref_mon:1;
+ uint64_t tc_ref:1;
+ uint64_t max_pd:1;
+ } cn70xx;
+ struct cvmx_lmcx_modereg_params3_cn70xx cn70xxp1;
+ struct cvmx_lmcx_modereg_params3_s cn73xx;
+ struct cvmx_lmcx_modereg_params3_s cn78xx;
+ struct cvmx_lmcx_modereg_params3_s cn78xxp1;
+ struct cvmx_lmcx_modereg_params3_s cnf75xx;
+};
+
+/**
+ * cvmx_lmc#_mpr_data0
+ *
+ * This register provides bits <63:0> of MPR data register.
+ *
+ */
+union cvmx_lmcx_mpr_data0 {
+ u64 u64;
+ struct cvmx_lmcx_mpr_data0_s {
+ uint64_t mpr_data:64;
+ } s;
+ struct cvmx_lmcx_mpr_data0_s cn70xx;
+ struct cvmx_lmcx_mpr_data0_s cn70xxp1;
+ struct cvmx_lmcx_mpr_data0_s cn73xx;
+ struct cvmx_lmcx_mpr_data0_s cn78xx;
+ struct cvmx_lmcx_mpr_data0_s cn78xxp1;
+ struct cvmx_lmcx_mpr_data0_s cnf75xx;
+};
+
+/**
+ * cvmx_lmc#_mpr_data1
+ *
+ * This register provides bits <127:64> of MPR data register.
+ *
+ */
+union cvmx_lmcx_mpr_data1 {
+ u64 u64;
+ struct cvmx_lmcx_mpr_data1_s {
+ uint64_t mpr_data:64;
+ } s;
+ struct cvmx_lmcx_mpr_data1_s cn70xx;
+ struct cvmx_lmcx_mpr_data1_s cn70xxp1;
+ struct cvmx_lmcx_mpr_data1_s cn73xx;
+ struct cvmx_lmcx_mpr_data1_s cn78xx;
+ struct cvmx_lmcx_mpr_data1_s cn78xxp1;
+ struct cvmx_lmcx_mpr_data1_s cnf75xx;
+};
+
+/**
+ * cvmx_lmc#_mpr_data2
+ *
+ * This register provides bits <143:128> of MPR data register.
+ *
+ */
+union cvmx_lmcx_mpr_data2 {
+ u64 u64;
+ struct cvmx_lmcx_mpr_data2_s {
+ uint64_t reserved_16_63:48;
+ uint64_t mpr_data:16;
+ } s;
+ struct cvmx_lmcx_mpr_data2_s cn70xx;
+ struct cvmx_lmcx_mpr_data2_s cn70xxp1;
+ struct cvmx_lmcx_mpr_data2_s cn73xx;
+ struct cvmx_lmcx_mpr_data2_s cn78xx;
+ struct cvmx_lmcx_mpr_data2_s cn78xxp1;
+ struct cvmx_lmcx_mpr_data2_s cnf75xx;
+};
+
+/**
+ * cvmx_lmc#_mr_mpr_ctl
+ *
+ * This register provides the control functions when programming the MPR
+ * of DDR4 DRAMs.
+ *
+ */
+union cvmx_lmcx_mr_mpr_ctl {
+ u64 u64;
+ struct cvmx_lmcx_mr_mpr_ctl_s {
+ uint64_t reserved_61_63:3;
+ uint64_t mr_wr_secure_key_ena:1;
+ uint64_t pba_func_space:3;
+ uint64_t mr_wr_bg1:1;
+ uint64_t mpr_sample_dq_enable:1;
+ uint64_t pda_early_dqx:1;
+ uint64_t mr_wr_pba_enable:1;
+ uint64_t mr_wr_use_default_value:1;
+ uint64_t mpr_whole_byte_enable:1;
+ uint64_t mpr_byte_select:4;
+ uint64_t mpr_bit_select:2;
+ uint64_t mpr_wr:1;
+ uint64_t mpr_loc:2;
+ uint64_t mr_wr_pda_enable:1;
+ uint64_t mr_wr_pda_mask:18;
+ uint64_t mr_wr_rank:2;
+ uint64_t mr_wr_sel:3;
+ uint64_t mr_wr_addr:18;
+ } s;
+ struct cvmx_lmcx_mr_mpr_ctl_cn70xx {
+ uint64_t reserved_52_63:12;
+ uint64_t mpr_whole_byte_enable:1;
+ uint64_t mpr_byte_select:4;
+ uint64_t mpr_bit_select:2;
+ uint64_t mpr_wr:1;
+ uint64_t mpr_loc:2;
+ uint64_t mr_wr_pda_enable:1;
+ uint64_t mr_wr_pda_mask:18;
+ uint64_t mr_wr_rank:2;
+ uint64_t mr_wr_sel:3;
+ uint64_t mr_wr_addr:18;
+ } cn70xx;
+ struct cvmx_lmcx_mr_mpr_ctl_cn70xx cn70xxp1;
+ struct cvmx_lmcx_mr_mpr_ctl_s cn73xx;
+ struct cvmx_lmcx_mr_mpr_ctl_s cn78xx;
+ struct cvmx_lmcx_mr_mpr_ctl_s cn78xxp1;
+ struct cvmx_lmcx_mr_mpr_ctl_s cnf75xx;
+};
+
+/**
+ * cvmx_lmc#_ns_ctl
+ *
+ * This register contains control parameters for handling nonsecure accesses.
+ *
+ */
+union cvmx_lmcx_ns_ctl {
+ u64 u64;
+ struct cvmx_lmcx_ns_ctl_s {
+ uint64_t reserved_26_63:38;
+ uint64_t ns_scramble_dis:1;
+ uint64_t reserved_18_24:7;
+ uint64_t adr_offset:18;
+ } s;
+ struct cvmx_lmcx_ns_ctl_s cn73xx;
+ struct cvmx_lmcx_ns_ctl_s cn78xx;
+ struct cvmx_lmcx_ns_ctl_s cnf75xx;
+};
+
+/**
+ * cvmx_lmc#_nxm
+ *
+ * Following is the decoding for mem_msb/rank:
+ * 0x0: mem_msb = mem_adr[25].
+ * 0x1: mem_msb = mem_adr[26].
+ * 0x2: mem_msb = mem_adr[27].
+ * 0x3: mem_msb = mem_adr[28].
+ * 0x4: mem_msb = mem_adr[29].
+ * 0x5: mem_msb = mem_adr[30].
+ * 0x6: mem_msb = mem_adr[31].
+ * 0x7: mem_msb = mem_adr[32].
+ * 0x8: mem_msb = mem_adr[33].
+ * 0x9: mem_msb = mem_adr[34].
+ * 0xA: mem_msb = mem_adr[35].
+ * 0xB: mem_msb = mem_adr[36].
+ * 0xC-0xF = Reserved.
+ *
+ * For example, for a DIMM made of Samsung's K4B1G0846C-ZCF7 1Gb
+ * (16M * 8 bit * 8 bank) parts, the column address width = 10; so with
+ * 10b of col, 3b of bus, 3b of bank, row_lsb = 16.
+ * Therefore, row = mem_adr[29:16] and mem_msb = 4.
+ *
+ * Note also that addresses greater than the max defined space (pbank_msb)
+ * are also treated as NXM accesses.
+ */
+union cvmx_lmcx_nxm {
+ u64 u64;
+ struct cvmx_lmcx_nxm_s {
+ uint64_t reserved_40_63:24;
+ uint64_t mem_msb_d3_r1:4;
+ uint64_t mem_msb_d3_r0:4;
+ uint64_t mem_msb_d2_r1:4;
+ uint64_t mem_msb_d2_r0:4;
+ uint64_t mem_msb_d1_r1:4;
+ uint64_t mem_msb_d1_r0:4;
+ uint64_t mem_msb_d0_r1:4;
+ uint64_t mem_msb_d0_r0:4;
+ uint64_t cs_mask:8;
+ } s;
+ struct cvmx_lmcx_nxm_cn52xx {
+ uint64_t reserved_8_63:56;
+ uint64_t cs_mask:8;
+ } cn52xx;
+ struct cvmx_lmcx_nxm_cn52xx cn56xx;
+ struct cvmx_lmcx_nxm_cn52xx cn58xx;
+ struct cvmx_lmcx_nxm_s cn61xx;
+ struct cvmx_lmcx_nxm_s cn63xx;
+ struct cvmx_lmcx_nxm_s cn63xxp1;
+ struct cvmx_lmcx_nxm_s cn66xx;
+ struct cvmx_lmcx_nxm_s cn68xx;
+ struct cvmx_lmcx_nxm_s cn68xxp1;
+ struct cvmx_lmcx_nxm_cn70xx {
+ uint64_t reserved_24_63:40;
+ uint64_t mem_msb_d1_r1:4;
+ uint64_t mem_msb_d1_r0:4;
+ uint64_t mem_msb_d0_r1:4;
+ uint64_t mem_msb_d0_r0:4;
+ uint64_t reserved_4_7:4;
+ uint64_t cs_mask:4;
+ } cn70xx;
+ struct cvmx_lmcx_nxm_cn70xx cn70xxp1;
+ struct cvmx_lmcx_nxm_cn70xx cn73xx;
+ struct cvmx_lmcx_nxm_cn70xx cn78xx;
+ struct cvmx_lmcx_nxm_cn70xx cn78xxp1;
+ struct cvmx_lmcx_nxm_s cnf71xx;
+ struct cvmx_lmcx_nxm_cn70xx cnf75xx;
+};
+
+/**
+ * cvmx_lmc#_nxm_fadr
+ *
+ * This register captures only the first transaction with a NXM error while
+ * an interrupt is pending, and only captures a subsequent event once the
+ * interrupt is cleared by writing a one to LMC()_INT[NXM_ERR]. It captures
+ * the actual L2C-LMC address provided to the LMC that caused the NXM error.
+ * A read or write NXM error is captured only if enabled using the NXM
+ * event enables.
+ */
+union cvmx_lmcx_nxm_fadr {
+ u64 u64;
+ struct cvmx_lmcx_nxm_fadr_s {
+ uint64_t reserved_40_63:24;
+ uint64_t nxm_faddr_ext:1;
+ uint64_t nxm_src:1;
+ uint64_t nxm_type:1;
+ uint64_t nxm_faddr:37;
+ } s;
+ struct cvmx_lmcx_nxm_fadr_cn70xx {
+ uint64_t reserved_39_63:25;
+ uint64_t nxm_src:1;
+ uint64_t nxm_type:1;
+ uint64_t nxm_faddr:37;
+ } cn70xx;
+ struct cvmx_lmcx_nxm_fadr_cn70xx cn70xxp1;
+ struct cvmx_lmcx_nxm_fadr_s cn73xx;
+ struct cvmx_lmcx_nxm_fadr_s cn78xx;
+ struct cvmx_lmcx_nxm_fadr_s cn78xxp1;
+ struct cvmx_lmcx_nxm_fadr_s cnf75xx;
+};
+
+/**
+ * cvmx_lmc#_ops_cnt
+ *
+ * LMC_OPS_CNT = Performance Counters
+ *
+ */
+union cvmx_lmcx_ops_cnt {
+ u64 u64;
+ struct cvmx_lmcx_ops_cnt_s {
+ uint64_t opscnt:64;
+ } s;
+ struct cvmx_lmcx_ops_cnt_s cn61xx;
+ struct cvmx_lmcx_ops_cnt_s cn63xx;
+ struct cvmx_lmcx_ops_cnt_s cn63xxp1;
+ struct cvmx_lmcx_ops_cnt_s cn66xx;
+ struct cvmx_lmcx_ops_cnt_s cn68xx;
+ struct cvmx_lmcx_ops_cnt_s cn68xxp1;
+ struct cvmx_lmcx_ops_cnt_s cn70xx;
+ struct cvmx_lmcx_ops_cnt_s cn70xxp1;
+ struct cvmx_lmcx_ops_cnt_s cn73xx;
+ struct cvmx_lmcx_ops_cnt_s cn78xx;
+ struct cvmx_lmcx_ops_cnt_s cn78xxp1;
+ struct cvmx_lmcx_ops_cnt_s cnf71xx;
+ struct cvmx_lmcx_ops_cnt_s cnf75xx;
+};
+
+/**
+ * cvmx_lmc#_ops_cnt_hi
+ *
+ * LMC_OPS_CNT_HI = Performance Counters
+ *
+ */
+union cvmx_lmcx_ops_cnt_hi {
+ u64 u64;
+ struct cvmx_lmcx_ops_cnt_hi_s {
+ uint64_t reserved_32_63:32;
+ uint64_t opscnt_hi:32;
+ } s;
+ struct cvmx_lmcx_ops_cnt_hi_s cn30xx;
+ struct cvmx_lmcx_ops_cnt_hi_s cn31xx;
+ struct cvmx_lmcx_ops_cnt_hi_s cn38xx;
+ struct cvmx_lmcx_ops_cnt_hi_s cn38xxp2;
+ struct cvmx_lmcx_ops_cnt_hi_s cn50xx;
+ struct cvmx_lmcx_ops_cnt_hi_s cn52xx;
+ struct cvmx_lmcx_ops_cnt_hi_s cn52xxp1;
+ struct cvmx_lmcx_ops_cnt_hi_s cn56xx;
+ struct cvmx_lmcx_ops_cnt_hi_s cn56xxp1;
+ struct cvmx_lmcx_ops_cnt_hi_s cn58xx;
+ struct cvmx_lmcx_ops_cnt_hi_s cn58xxp1;
+};
+
+/**
+ * cvmx_lmc#_ops_cnt_lo
+ *
+ * LMC_OPS_CNT_LO = Performance Counters
+ *
+ */
+union cvmx_lmcx_ops_cnt_lo {
+ u64 u64;
+ struct cvmx_lmcx_ops_cnt_lo_s {
+ uint64_t reserved_32_63:32;
+ uint64_t opscnt_lo:32;
+ } s;
+ struct cvmx_lmcx_ops_cnt_lo_s cn30xx;
+ struct cvmx_lmcx_ops_cnt_lo_s cn31xx;
+ struct cvmx_lmcx_ops_cnt_lo_s cn38xx;
+ struct cvmx_lmcx_ops_cnt_lo_s cn38xxp2;
+ struct cvmx_lmcx_ops_cnt_lo_s cn50xx;
+ struct cvmx_lmcx_ops_cnt_lo_s cn52xx;
+ struct cvmx_lmcx_ops_cnt_lo_s cn52xxp1;
+ struct cvmx_lmcx_ops_cnt_lo_s cn56xx;
+ struct cvmx_lmcx_ops_cnt_lo_s cn56xxp1;
+ struct cvmx_lmcx_ops_cnt_lo_s cn58xx;
+ struct cvmx_lmcx_ops_cnt_lo_s cn58xxp1;
+};
+
+/**
+ * cvmx_lmc#_phy_ctl
+ *
+ * LMC_PHY_CTL = LMC PHY Control
+ *
+ */
+union cvmx_lmcx_phy_ctl {
+ u64 u64;
+ struct cvmx_lmcx_phy_ctl_s {
+ uint64_t reserved_61_63:3;
+ uint64_t dsk_dbg_load_dis:1;
+ uint64_t dsk_dbg_overwrt_ena:1;
+ uint64_t dsk_dbg_wr_mode:1;
+ uint64_t data_rate_loopback:1;
+ uint64_t dq_shallow_loopback:1;
+ uint64_t dm_disable:1;
+ uint64_t c1_sel:2;
+ uint64_t c0_sel:2;
+ uint64_t phy_reset:1;
+ uint64_t dsk_dbg_rd_complete:1;
+ uint64_t dsk_dbg_rd_data:10;
+ uint64_t dsk_dbg_rd_start:1;
+ uint64_t dsk_dbg_clk_scaler:2;
+ uint64_t dsk_dbg_offset:2;
+ uint64_t dsk_dbg_num_bits_sel:1;
+ uint64_t dsk_dbg_byte_sel:4;
+ uint64_t dsk_dbg_bit_sel:4;
+ uint64_t dbi_mode_ena:1;
+ uint64_t ddr_error_n_ena:1;
+ uint64_t ref_pin_on:1;
+ uint64_t dac_on:1;
+ uint64_t int_pad_loopback_ena:1;
+ uint64_t int_phy_loopback_ena:1;
+ uint64_t phy_dsk_reset:1;
+ uint64_t phy_dsk_byp:1;
+ uint64_t phy_pwr_save_disable:1;
+ uint64_t ten:1;
+ uint64_t rx_always_on:1;
+ uint64_t lv_mode:1;
+ uint64_t ck_tune1:1;
+ uint64_t ck_dlyout1:4;
+ uint64_t ck_tune0:1;
+ uint64_t ck_dlyout0:4;
+ uint64_t loopback:1;
+ uint64_t loopback_pos:1;
+ uint64_t ts_stagger:1;
+ } s;
+ struct cvmx_lmcx_phy_ctl_cn61xx {
+ uint64_t reserved_15_63:49;
+ uint64_t rx_always_on:1;
+ uint64_t lv_mode:1;
+ uint64_t ck_tune1:1;
+ uint64_t ck_dlyout1:4;
+ uint64_t ck_tune0:1;
+ uint64_t ck_dlyout0:4;
+ uint64_t loopback:1;
+ uint64_t loopback_pos:1;
+ uint64_t ts_stagger:1;
+ } cn61xx;
+ struct cvmx_lmcx_phy_ctl_cn61xx cn63xx;
+ struct cvmx_lmcx_phy_ctl_cn63xxp1 {
+ uint64_t reserved_14_63:50;
+ uint64_t lv_mode:1;
+ uint64_t ck_tune1:1;
+ uint64_t ck_dlyout1:4;
+ uint64_t ck_tune0:1;
+ uint64_t ck_dlyout0:4;
+ uint64_t loopback:1;
+ uint64_t loopback_pos:1;
+ uint64_t ts_stagger:1;
+ } cn63xxp1;
+ struct cvmx_lmcx_phy_ctl_cn61xx cn66xx;
+ struct cvmx_lmcx_phy_ctl_cn61xx cn68xx;
+ struct cvmx_lmcx_phy_ctl_cn61xx cn68xxp1;
+ struct cvmx_lmcx_phy_ctl_cn70xx {
+ uint64_t reserved_51_63:13;
+ uint64_t phy_reset:1;
+ uint64_t dsk_dbg_rd_complete:1;
+ uint64_t dsk_dbg_rd_data:10;
+ uint64_t dsk_dbg_rd_start:1;
+ uint64_t dsk_dbg_clk_scaler:2;
+ uint64_t dsk_dbg_offset:2;
+ uint64_t dsk_dbg_num_bits_sel:1;
+ uint64_t dsk_dbg_byte_sel:4;
+ uint64_t dsk_dbg_bit_sel:4;
+ uint64_t dbi_mode_ena:1;
+ uint64_t ddr_error_n_ena:1;
+ uint64_t ref_pin_on:1;
+ uint64_t dac_on:1;
+ uint64_t int_pad_loopback_ena:1;
+ uint64_t int_phy_loopback_ena:1;
+ uint64_t phy_dsk_reset:1;
+ uint64_t phy_dsk_byp:1;
+ uint64_t phy_pwr_save_disable:1;
+ uint64_t ten:1;
+ uint64_t rx_always_on:1;
+ uint64_t lv_mode:1;
+ uint64_t ck_tune1:1;
+ uint64_t ck_dlyout1:4;
+ uint64_t ck_tune0:1;
+ uint64_t ck_dlyout0:4;
+ uint64_t loopback:1;
+ uint64_t loopback_pos:1;
+ uint64_t ts_stagger:1;
+ } cn70xx;
+ struct cvmx_lmcx_phy_ctl_cn70xx cn70xxp1;
+ struct cvmx_lmcx_phy_ctl_cn73xx {
+ uint64_t reserved_58_63:6;
+ uint64_t data_rate_loopback:1;
+ uint64_t dq_shallow_loopback:1;
+ uint64_t dm_disable:1;
+ uint64_t c1_sel:2;
+ uint64_t c0_sel:2;
+ uint64_t phy_reset:1;
+ uint64_t dsk_dbg_rd_complete:1;
+ uint64_t dsk_dbg_rd_data:10;
+ uint64_t dsk_dbg_rd_start:1;
+ uint64_t dsk_dbg_clk_scaler:2;
+ uint64_t dsk_dbg_offset:2;
+ uint64_t dsk_dbg_num_bits_sel:1;
+ uint64_t dsk_dbg_byte_sel:4;
+ uint64_t dsk_dbg_bit_sel:4;
+ uint64_t dbi_mode_ena:1;
+ uint64_t ddr_error_n_ena:1;
+ uint64_t ref_pin_on:1;
+ uint64_t dac_on:1;
+ uint64_t int_pad_loopback_ena:1;
+ uint64_t int_phy_loopback_ena:1;
+ uint64_t phy_dsk_reset:1;
+ uint64_t phy_dsk_byp:1;
+ uint64_t phy_pwr_save_disable:1;
+ uint64_t ten:1;
+ uint64_t rx_always_on:1;
+ uint64_t lv_mode:1;
+ uint64_t ck_tune1:1;
+ uint64_t ck_dlyout1:4;
+ uint64_t ck_tune0:1;
+ uint64_t ck_dlyout0:4;
+ uint64_t loopback:1;
+ uint64_t loopback_pos:1;
+ uint64_t ts_stagger:1;
+ } cn73xx;
+ struct cvmx_lmcx_phy_ctl_s cn78xx;
+ struct cvmx_lmcx_phy_ctl_s cn78xxp1;
+ struct cvmx_lmcx_phy_ctl_cn61xx cnf71xx;
+ struct cvmx_lmcx_phy_ctl_s cnf75xx;
+};
+
+/**
+ * cvmx_lmc#_phy_ctl2
+ */
+union cvmx_lmcx_phy_ctl2 {
+ u64 u64;
+ struct cvmx_lmcx_phy_ctl2_s {
+ uint64_t reserved_27_63:37;
+ uint64_t dqs8_dsk_adj:3;
+ uint64_t dqs7_dsk_adj:3;
+ uint64_t dqs6_dsk_adj:3;
+ uint64_t dqs5_dsk_adj:3;
+ uint64_t dqs4_dsk_adj:3;
+ uint64_t dqs3_dsk_adj:3;
+ uint64_t dqs2_dsk_adj:3;
+ uint64_t dqs1_dsk_adj:3;
+ uint64_t dqs0_dsk_adj:3;
+ } s;
+ struct cvmx_lmcx_phy_ctl2_s cn78xx;
+ struct cvmx_lmcx_phy_ctl2_s cnf75xx;
+};
+
+/**
+ * cvmx_lmc#_pll_bwctl
+ *
+ * LMC_PLL_BWCTL = DDR PLL Bandwidth Control Register
+ *
+ */
+union cvmx_lmcx_pll_bwctl {
+ u64 u64;
+ struct cvmx_lmcx_pll_bwctl_s {
+ uint64_t reserved_5_63:59;
+ uint64_t bwupd:1;
+ uint64_t bwctl:4;
+ } s;
+ struct cvmx_lmcx_pll_bwctl_s cn30xx;
+ struct cvmx_lmcx_pll_bwctl_s cn31xx;
+ struct cvmx_lmcx_pll_bwctl_s cn38xx;
+ struct cvmx_lmcx_pll_bwctl_s cn38xxp2;
+};
+
+/**
+ * cvmx_lmc#_pll_ctl
+ *
+ * LMC_PLL_CTL = LMC pll control
+ *
+ *
+ * Notes:
+ * This CSR is only relevant for LMC0. LMC1_PLL_CTL is not used.
+ *
+ * Exactly one of EN2, EN4, EN6, EN8, EN12, EN16 must be set.
+ *
+ * The resultant DDR_CK frequency is the DDR2_REF_CLK
+ * frequency multiplied by:
+ *
+ * (CLKF + 1) / ((CLKR + 1) * EN(2,4,6,8,12,16))
+ *
+ * The PLL frequency, which is:
+ *
+ * (DDR2_REF_CLK freq) * ((CLKF + 1) / (CLKR + 1))
+ *
+ * must reside between 1.2 and 2.5 GHz. A faster PLL frequency is
+ * desirable if there is a choice.
+ */
+union cvmx_lmcx_pll_ctl {
+ u64 u64;
+ struct cvmx_lmcx_pll_ctl_s {
+ uint64_t reserved_30_63:34;
+ uint64_t bypass:1;
+ uint64_t fasten_n:1;
+ uint64_t div_reset:1;
+ uint64_t reset_n:1;
+ uint64_t clkf:12;
+ uint64_t clkr:6;
+ uint64_t reserved_6_7:2;
+ uint64_t en16:1;
+ uint64_t en12:1;
+ uint64_t en8:1;
+ uint64_t en6:1;
+ uint64_t en4:1;
+ uint64_t en2:1;
+ } s;
+ struct cvmx_lmcx_pll_ctl_cn50xx {
+ uint64_t reserved_29_63:35;
+ uint64_t fasten_n:1;
+ uint64_t div_reset:1;
+ uint64_t reset_n:1;
+ uint64_t clkf:12;
+ uint64_t clkr:6;
+ uint64_t reserved_6_7:2;
+ uint64_t en16:1;
+ uint64_t en12:1;
+ uint64_t en8:1;
+ uint64_t en6:1;
+ uint64_t en4:1;
+ uint64_t en2:1;
+ } cn50xx;
+ struct cvmx_lmcx_pll_ctl_s cn52xx;
+ struct cvmx_lmcx_pll_ctl_s cn52xxp1;
+ struct cvmx_lmcx_pll_ctl_cn50xx cn56xx;
+ struct cvmx_lmcx_pll_ctl_cn56xxp1 {
+ uint64_t reserved_28_63:36;
+ uint64_t div_reset:1;
+ uint64_t reset_n:1;
+ uint64_t clkf:12;
+ uint64_t clkr:6;
+ uint64_t reserved_6_7:2;
+ uint64_t en16:1;
+ uint64_t en12:1;
+ uint64_t en8:1;
+ uint64_t en6:1;
+ uint64_t en4:1;
+ uint64_t en2:1;
+ } cn56xxp1;
+ struct cvmx_lmcx_pll_ctl_cn56xxp1 cn58xx;
+ struct cvmx_lmcx_pll_ctl_cn56xxp1 cn58xxp1;
+};
+
+/**
+ * cvmx_lmc#_pll_status
+ *
+ * LMC_PLL_STATUS = LMC pll status
+ *
+ */
+union cvmx_lmcx_pll_status {
+ u64 u64;
+ struct cvmx_lmcx_pll_status_s {
+ uint64_t reserved_32_63:32;
+ uint64_t ddr__nctl:5;
+ uint64_t ddr__pctl:5;
+ uint64_t reserved_2_21:20;
+ uint64_t rfslip:1;
+ uint64_t fbslip:1;
+ } s;
+ struct cvmx_lmcx_pll_status_s cn50xx;
+ struct cvmx_lmcx_pll_status_s cn52xx;
+ struct cvmx_lmcx_pll_status_s cn52xxp1;
+ struct cvmx_lmcx_pll_status_s cn56xx;
+ struct cvmx_lmcx_pll_status_s cn56xxp1;
+ struct cvmx_lmcx_pll_status_s cn58xx;
+ struct cvmx_lmcx_pll_status_cn58xxp1 {
+ uint64_t reserved_2_63:62;
+ uint64_t rfslip:1;
+ uint64_t fbslip:1;
+ } cn58xxp1;
+};
+
+/**
+ * cvmx_lmc#_ppr_ctl
+ *
+ * This register contains programmable timing and control parameters used
+ * when running the post package repair sequence. The timing fields
+ * PPR_CTL[TPGMPST], PPR_CTL[TPGM_EXIT] and PPR_CTL[TPGM] need to be set as
+ * to satisfy the minimum values mentioned in the JEDEC DDR4 spec before
+ * running the PPR sequence. See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] to run
+ * the PPR sequence.
+ *
+ * Running hard PPR may require LMC to issue security key as four consecutive
+ * MR0 commands, each with a unique address field A[17:0]. Set the security
+ * key in the general purpose CSRs as follows:
+ *
+ * _ Security key 0 = LMC()_GENERAL_PURPOSE0[DATA]<17:0>.
+ * _ Security key 1 = LMC()_GENERAL_PURPOSE0[DATA]<35:18>.
+ * _ Security key 2 = LMC()_GENERAL_PURPOSE1[DATA]<17:0>.
+ * _ Security key 3 = LMC()_GENERAL_PURPOSE1[DATA]<35:18>.
+ */
+union cvmx_lmcx_ppr_ctl {
+ u64 u64;
+ struct cvmx_lmcx_ppr_ctl_s {
+ uint64_t reserved_27_63:37;
+ uint64_t lrank_sel:3;
+ uint64_t skip_issue_security:1;
+ uint64_t sppr:1;
+ uint64_t tpgm:10;
+ uint64_t tpgm_exit:5;
+ uint64_t tpgmpst:7;
+ } s;
+ struct cvmx_lmcx_ppr_ctl_cn73xx {
+ uint64_t reserved_24_63:40;
+ uint64_t skip_issue_security:1;
+ uint64_t sppr:1;
+ uint64_t tpgm:10;
+ uint64_t tpgm_exit:5;
+ uint64_t tpgmpst:7;
+ } cn73xx;
+ struct cvmx_lmcx_ppr_ctl_s cn78xx;
+ struct cvmx_lmcx_ppr_ctl_cn73xx cnf75xx;
+};
+
+/**
+ * cvmx_lmc#_read_level_ctl
+ *
+ * Notes:
+ * The HW writes and reads the cache block selected by ROW, COL, BNK and
+ * the rank as part of a read-leveling sequence for a rank.
+ * A cache block write is 16 72-bit words. PATTERN selects the write value.
+ * For the first 8 words, the write value is the bit PATTERN<i> duplicated
+ * into a 72-bit vector. The write value of the last 8 words is the inverse
+ * of the write value of the first 8 words. See LMC*_READ_LEVEL_RANK*.
+ */
+union cvmx_lmcx_read_level_ctl {
+ u64 u64;
+ struct cvmx_lmcx_read_level_ctl_s {
+ uint64_t reserved_44_63:20;
+ uint64_t rankmask:4;
+ uint64_t pattern:8;
+ uint64_t row:16;
+ uint64_t col:12;
+ uint64_t reserved_3_3:1;
+ uint64_t bnk:3;
+ } s;
+ struct cvmx_lmcx_read_level_ctl_s cn52xx;
+ struct cvmx_lmcx_read_level_ctl_s cn52xxp1;
+ struct cvmx_lmcx_read_level_ctl_s cn56xx;
+ struct cvmx_lmcx_read_level_ctl_s cn56xxp1;
+};
+
+/**
+ * cvmx_lmc#_read_level_dbg
+ *
+ * Notes:
+ * A given read of LMC*_READ_LEVEL_DBG returns the read-leveling pass/fail
+ * results for all possible delay settings (i.e. the BITMASK) for only one
+ * byte in the last rank that the HW read-leveled.
+ * LMC*_READ_LEVEL_DBG[BYTE] selects the particular byte.
+ * To get these pass/fail results for another different rank, you must run
+ * the hardware read-leveling again. For example, it is possible to get the
+ * BITMASK results for every byte of every rank if you run read-leveling
+ * separately for each rank, probing LMC*_READ_LEVEL_DBG between each
+ * read-leveling.
+ */
+union cvmx_lmcx_read_level_dbg {
+ u64 u64;
+ struct cvmx_lmcx_read_level_dbg_s {
+ uint64_t reserved_32_63:32;
+ uint64_t bitmask:16;
+ uint64_t reserved_4_15:12;
+ uint64_t byte:4;
+ } s;
+ struct cvmx_lmcx_read_level_dbg_s cn52xx;
+ struct cvmx_lmcx_read_level_dbg_s cn52xxp1;
+ struct cvmx_lmcx_read_level_dbg_s cn56xx;
+ struct cvmx_lmcx_read_level_dbg_s cn56xxp1;
+};
+
+/**
+ * cvmx_lmc#_read_level_rank#
+ *
+ * Notes:
+ * This is four CSRs per LMC, one per each rank.
+ * Each CSR is written by HW during a read-leveling sequence for the rank.
+ * (HW sets STATUS==3 after HW read-leveling completes for the rank.)
+ * Each CSR may also be written by SW, but not while a read-leveling sequence
+ * is in progress. (HW sets STATUS==1 after a CSR write.)
+ * Deskew setting is measured in units of 1/4 DCLK, so the above BYTE*
+ * values can range over 4 DCLKs.
+ * SW initiates a HW read-leveling sequence by programming
+ * LMC*_READ_LEVEL_CTL and writing INIT_START=1 with SEQUENCE=1.
+ * See LMC*_READ_LEVEL_CTL.
+ */
+union cvmx_lmcx_read_level_rankx {
+ u64 u64;
+ struct cvmx_lmcx_read_level_rankx_s {
+ uint64_t reserved_38_63:26;
+ uint64_t status:2;
+ uint64_t byte8:4;
+ uint64_t byte7:4;
+ uint64_t byte6:4;
+ uint64_t byte5:4;
+ uint64_t byte4:4;
+ uint64_t byte3:4;
+ uint64_t byte2:4;
+ uint64_t byte1:4;
+ uint64_t byte0:4;
+ } s;
+ struct cvmx_lmcx_read_level_rankx_s cn52xx;
+ struct cvmx_lmcx_read_level_rankx_s cn52xxp1;
+ struct cvmx_lmcx_read_level_rankx_s cn56xx;
+ struct cvmx_lmcx_read_level_rankx_s cn56xxp1;
+};
+
+/**
+ * cvmx_lmc#_ref_status
+ *
+ * This register contains the status of the refresh pending counter.
+ *
+ */
+union cvmx_lmcx_ref_status {
+ u64 u64;
+ struct cvmx_lmcx_ref_status_s {
+ uint64_t reserved_4_63:60;
+ uint64_t ref_pend_max_clr:1;
+ uint64_t ref_count:3;
+ } s;
+ struct cvmx_lmcx_ref_status_s cn73xx;
+ struct cvmx_lmcx_ref_status_s cn78xx;
+ struct cvmx_lmcx_ref_status_s cnf75xx;
+};
+
+/**
+ * cvmx_lmc#_reset_ctl
+ *
+ * Specify the RSL base addresses for the block.
+ *
+ */
+union cvmx_lmcx_reset_ctl {
+ u64 u64;
+ struct cvmx_lmcx_reset_ctl_s {
+ uint64_t reserved_4_63:60;
+ uint64_t ddr3psv:1;
+ uint64_t ddr3psoft:1;
+ uint64_t ddr3pwarm:1;
+ uint64_t ddr3rst:1;
+ } s;
+ struct cvmx_lmcx_reset_ctl_s cn61xx;
+ struct cvmx_lmcx_reset_ctl_s cn63xx;
+ struct cvmx_lmcx_reset_ctl_s cn63xxp1;
+ struct cvmx_lmcx_reset_ctl_s cn66xx;
+ struct cvmx_lmcx_reset_ctl_s cn68xx;
+ struct cvmx_lmcx_reset_ctl_s cn68xxp1;
+ struct cvmx_lmcx_reset_ctl_s cn70xx;
+ struct cvmx_lmcx_reset_ctl_s cn70xxp1;
+ struct cvmx_lmcx_reset_ctl_s cn73xx;
+ struct cvmx_lmcx_reset_ctl_s cn78xx;
+ struct cvmx_lmcx_reset_ctl_s cn78xxp1;
+ struct cvmx_lmcx_reset_ctl_s cnf71xx;
+ struct cvmx_lmcx_reset_ctl_s cnf75xx;
+};
+
+/**
+ * cvmx_lmc#_retry_config
+ *
+ * This register configures automatic retry operation.
+ *
+ */
+union cvmx_lmcx_retry_config {
+ u64 u64;
+ struct cvmx_lmcx_retry_config_s {
+ uint64_t reserved_56_63:8;
+ uint64_t max_errors:24;
+ uint64_t reserved_13_31:19;
+ uint64_t error_continue:1;
+ uint64_t reserved_9_11:3;
+ uint64_t auto_error_continue:1;
+ uint64_t reserved_5_7:3;
+ uint64_t pulse_count_auto_clr:1;
+ uint64_t reserved_1_3:3;
+ uint64_t retry_enable:1;
+ } s;
+ struct cvmx_lmcx_retry_config_s cn73xx;
+ struct cvmx_lmcx_retry_config_s cn78xx;
+ struct cvmx_lmcx_retry_config_s cnf75xx;
+};
+
+/**
+ * cvmx_lmc#_retry_status
+ *
+ * This register provides status on automatic retry operation.
+ *
+ */
+union cvmx_lmcx_retry_status {
+ u64 u64;
+ struct cvmx_lmcx_retry_status_s {
+ uint64_t clear_error_count:1;
+ uint64_t clear_error_pulse_count:1;
+ uint64_t reserved_57_61:5;
+ uint64_t error_pulse_count_valid:1;
+ uint64_t error_pulse_count_sat:1;
+ uint64_t reserved_52_54:3;
+ uint64_t error_pulse_count:4;
+ uint64_t reserved_45_47:3;
+ uint64_t error_sequence:5;
+ uint64_t reserved_33_39:7;
+ uint64_t error_type:1;
+ uint64_t reserved_24_31:8;
+ uint64_t error_count:24;
+ } s;
+ struct cvmx_lmcx_retry_status_s cn73xx;
+ struct cvmx_lmcx_retry_status_s cn78xx;
+ struct cvmx_lmcx_retry_status_s cnf75xx;
+};
+
+/**
+ * cvmx_lmc#_rlevel_ctl
+ */
+union cvmx_lmcx_rlevel_ctl {
+ u64 u64;
+ struct cvmx_lmcx_rlevel_ctl_s {
+ uint64_t reserved_33_63:31;
+ uint64_t tccd_sel:1;
+ uint64_t pattern:8;
+ uint64_t reserved_22_23:2;
+ uint64_t delay_unload_3:1;
+ uint64_t delay_unload_2:1;
+ uint64_t delay_unload_1:1;
+ uint64_t delay_unload_0:1;
+ uint64_t bitmask:8;
+ uint64_t or_dis:1;
+ uint64_t offset_en:1;
+ uint64_t offset:4;
+ uint64_t byte:4;
+ } s;
+ struct cvmx_lmcx_rlevel_ctl_cn61xx {
+ uint64_t reserved_22_63:42;
+ uint64_t delay_unload_3:1;
+ uint64_t delay_unload_2:1;
+ uint64_t delay_unload_1:1;
+ uint64_t delay_unload_0:1;
+ uint64_t bitmask:8;
+ uint64_t or_dis:1;
+ uint64_t offset_en:1;
+ uint64_t offset:4;
+ uint64_t byte:4;
+ } cn61xx;
+ struct cvmx_lmcx_rlevel_ctl_cn61xx cn63xx;
+ struct cvmx_lmcx_rlevel_ctl_cn63xxp1 {
+ uint64_t reserved_9_63:55;
+ uint64_t offset_en:1;
+ uint64_t offset:4;
+ uint64_t byte:4;
+ } cn63xxp1;
+ struct cvmx_lmcx_rlevel_ctl_cn61xx cn66xx;
+ struct cvmx_lmcx_rlevel_ctl_cn61xx cn68xx;
+ struct cvmx_lmcx_rlevel_ctl_cn61xx cn68xxp1;
+ struct cvmx_lmcx_rlevel_ctl_cn70xx {
+ uint64_t reserved_32_63:32;
+ uint64_t pattern:8;
+ uint64_t reserved_22_23:2;
+ uint64_t delay_unload_3:1;
+ uint64_t delay_unload_2:1;
+ uint64_t delay_unload_1:1;
+ uint64_t delay_unload_0:1;
+ uint64_t bitmask:8;
+ uint64_t or_dis:1;
+ uint64_t offset_en:1;
+ uint64_t offset:4;
+ uint64_t byte:4;
+ } cn70xx;
+ struct cvmx_lmcx_rlevel_ctl_cn70xx cn70xxp1;
+ struct cvmx_lmcx_rlevel_ctl_cn70xx cn73xx;
+ struct cvmx_lmcx_rlevel_ctl_s cn78xx;
+ struct cvmx_lmcx_rlevel_ctl_s cn78xxp1;
+ struct cvmx_lmcx_rlevel_ctl_cn61xx cnf71xx;
+ struct cvmx_lmcx_rlevel_ctl_s cnf75xx;
+};
+
+/**
+ * cvmx_lmc#_rlevel_dbg
+ *
+ * A given read of LMC()_RLEVEL_DBG returns the read leveling pass/fail
+ * results for all possible delay settings (i.e. the BITMASK) for only
+ * one byte in the last rank that the hardware ran read leveling on.
+ * LMC()_RLEVEL_CTL[BYTE] selects the particular byte. To get these
+ * pass/fail results for a different rank, you must run the hardware
+ * read leveling again. For example, it is possible to get the [BITMASK]
+ * results for every byte of every rank if you run read leveling separately
+ * for each rank, probing LMC()_RLEVEL_DBG between each read- leveling.
+ */
+union cvmx_lmcx_rlevel_dbg {
+ u64 u64;
+ struct cvmx_lmcx_rlevel_dbg_s {
+ uint64_t bitmask:64;
+ } s;
+ struct cvmx_lmcx_rlevel_dbg_s cn61xx;
+ struct cvmx_lmcx_rlevel_dbg_s cn63xx;
+ struct cvmx_lmcx_rlevel_dbg_s cn63xxp1;
+ struct cvmx_lmcx_rlevel_dbg_s cn66xx;
+ struct cvmx_lmcx_rlevel_dbg_s cn68xx;
+ struct cvmx_lmcx_rlevel_dbg_s cn68xxp1;
+ struct cvmx_lmcx_rlevel_dbg_s cn70xx;
+ struct cvmx_lmcx_rlevel_dbg_s cn70xxp1;
+ struct cvmx_lmcx_rlevel_dbg_s cn73xx;
+ struct cvmx_lmcx_rlevel_dbg_s cn78xx;
+ struct cvmx_lmcx_rlevel_dbg_s cn78xxp1;
+ struct cvmx_lmcx_rlevel_dbg_s cnf71xx;
+ struct cvmx_lmcx_rlevel_dbg_s cnf75xx;
+};
+
+/**
+ * cvmx_lmc#_rlevel_rank#
+ *
+ * Four of these CSRs exist per LMC, one for each rank. Read level setting
+ * is measured in units of 1/4 CK, so the BYTEn values can range over 16 CK
+ * cycles. Each CSR is written by hardware during a read leveling sequence
+ * for the rank. (Hardware sets [STATUS] to 3 after hardware read leveling
+ * completes for the rank.)
+ *
+ * If hardware is unable to find a match per LMC()_RLEVEL_CTL[OFFSET_EN] and
+ * LMC()_RLEVEL_CTL[OFFSET], then hardware sets
+ * LMC()_RLEVEL_RANK()[BYTEn<5:0>] to 0x0.
+ *
+ * Each CSR may also be written by software, but not while a read leveling
+ * sequence is in progress. (Hardware sets [STATUS] to 1 after a CSR write.)
+ * Software initiates a hardware read leveling sequence by programming
+ * LMC()_RLEVEL_CTL and writing [INIT_START] = 1 with [SEQ_SEL]=1.
+ * See LMC()_RLEVEL_CTL.
+ *
+ * LMC()_RLEVEL_RANKi values for ranks i without attached DRAM should be set
+ * such that they do not increase the range of possible BYTE values for any
+ * byte lane. The easiest way to do this is to set LMC()_RLEVEL_RANKi =
+ * LMC()_RLEVEL_RANKj, where j is some rank with attached DRAM whose
+ * LMC()_RLEVEL_RANKj is already fully initialized.
+ */
+union cvmx_lmcx_rlevel_rankx {
+ u64 u64;
+ struct cvmx_lmcx_rlevel_rankx_s {
+ uint64_t reserved_56_63:8;
+ uint64_t status:2;
+ uint64_t byte8:6;
+ uint64_t byte7:6;
+ uint64_t byte6:6;
+ uint64_t byte5:6;
+ uint64_t byte4:6;
+ uint64_t byte3:6;
+ uint64_t byte2:6;
+ uint64_t byte1:6;
+ uint64_t byte0:6;
+ } s;
+ struct cvmx_lmcx_rlevel_rankx_s cn61xx;
+ struct cvmx_lmcx_rlevel_rankx_s cn63xx;
+ struct cvmx_lmcx_rlevel_rankx_s cn63xxp1;
+ struct cvmx_lmcx_rlevel_rankx_s cn66xx;
+ struct cvmx_lmcx_rlevel_rankx_s cn68xx;
+ struct cvmx_lmcx_rlevel_rankx_s cn68xxp1;
+ struct cvmx_lmcx_rlevel_rankx_s cn70xx;
+ struct cvmx_lmcx_rlevel_rankx_s cn70xxp1;
+ struct cvmx_lmcx_rlevel_rankx_s cn73xx;
+ struct cvmx_lmcx_rlevel_rankx_s cn78xx;
+ struct cvmx_lmcx_rlevel_rankx_s cn78xxp1;
+ struct cvmx_lmcx_rlevel_rankx_s cnf71xx;
+ struct cvmx_lmcx_rlevel_rankx_s cnf75xx;
+};
+
+/**
+ * cvmx_lmc#_rodt_comp_ctl
+ *
+ * LMC_RODT_COMP_CTL = LMC Compensation control
+ *
+ */
+union cvmx_lmcx_rodt_comp_ctl {
+ u64 u64;
+ struct cvmx_lmcx_rodt_comp_ctl_s {
+ uint64_t reserved_17_63:47;
+ uint64_t enable:1;
+ uint64_t reserved_12_15:4;
+ uint64_t nctl:4;
+ uint64_t reserved_5_7:3;
+ uint64_t pctl:5;
+ } s;
+ struct cvmx_lmcx_rodt_comp_ctl_s cn50xx;
+ struct cvmx_lmcx_rodt_comp_ctl_s cn52xx;
+ struct cvmx_lmcx_rodt_comp_ctl_s cn52xxp1;
+ struct cvmx_lmcx_rodt_comp_ctl_s cn56xx;
+ struct cvmx_lmcx_rodt_comp_ctl_s cn56xxp1;
+ struct cvmx_lmcx_rodt_comp_ctl_s cn58xx;
+ struct cvmx_lmcx_rodt_comp_ctl_s cn58xxp1;
+};
+
+/**
+ * cvmx_lmc#_rodt_ctl
+ *
+ * LMC_RODT_CTL = Obsolete LMC Read OnDieTermination control
+ * See the description in LMC_WODT_CTL1. On Reads, Octeon only supports
+ * turning on ODT's in the lower 2 DIMM's with the masks as below.
+ *
+ * Notes:
+ * When a given RANK in position N is selected, the RODT _HI and _LO masks
+ * for that position are used.
+ * Mask[3:0] is used for RODT control of the RANKs in positions 3, 2, 1,
+ * and 0, respectively.
+ * In 64b mode, DIMMs are assumed to be ordered in the following order:
+ * position 3: [unused , DIMM1_RANK1_LO]
+ * position 2: [unused , DIMM1_RANK0_LO]
+ * position 1: [unused , DIMM0_RANK1_LO]
+ * position 0: [unused , DIMM0_RANK0_LO]
+ * In 128b mode, DIMMs are assumed to be ordered in the following order:
+ * position 3: [DIMM3_RANK1_HI, DIMM1_RANK1_LO]
+ * position 2: [DIMM3_RANK0_HI, DIMM1_RANK0_LO]
+ * position 1: [DIMM2_RANK1_HI, DIMM0_RANK1_LO]
+ * position 0: [DIMM2_RANK0_HI, DIMM0_RANK0_LO]
+ */
+union cvmx_lmcx_rodt_ctl {
+ u64 u64;
+ struct cvmx_lmcx_rodt_ctl_s {
+ uint64_t reserved_32_63:32;
+ uint64_t rodt_hi3:4;
+ uint64_t rodt_hi2:4;
+ uint64_t rodt_hi1:4;
+ uint64_t rodt_hi0:4;
+ uint64_t rodt_lo3:4;
+ uint64_t rodt_lo2:4;
+ uint64_t rodt_lo1:4;
+ uint64_t rodt_lo0:4;
+ } s;
+ struct cvmx_lmcx_rodt_ctl_s cn30xx;
+ struct cvmx_lmcx_rodt_ctl_s cn31xx;
+ struct cvmx_lmcx_rodt_ctl_s cn38xx;
+ struct cvmx_lmcx_rodt_ctl_s cn38xxp2;
+ struct cvmx_lmcx_rodt_ctl_s cn50xx;
+ struct cvmx_lmcx_rodt_ctl_s cn52xx;
+ struct cvmx_lmcx_rodt_ctl_s cn52xxp1;
+ struct cvmx_lmcx_rodt_ctl_s cn56xx;
+ struct cvmx_lmcx_rodt_ctl_s cn56xxp1;
+ struct cvmx_lmcx_rodt_ctl_s cn58xx;
+ struct cvmx_lmcx_rodt_ctl_s cn58xxp1;
+};
+
+/**
+ * cvmx_lmc#_rodt_mask
+ *
+ * System designers may desire to terminate DQ/DQS lines for higher frequency
+ * DDR operations, especially on a multirank system. DDR3 DQ/DQS I/Os have
+ * built-in termination resistors that can be turned on or off by the
+ * controller, after meeting TAOND and TAOF timing requirements.
+ *
+ * Each rank has its own ODT pin that fans out to all the memory parts in
+ * that DIMM. System designers may prefer different combinations of ODT ONs
+ * for read operations into different ranks. CNXXXX supports full
+ * programmability by way of the mask register below. Each rank position has
+ * its own 4-bit programmable field. When the controller does a read to that
+ * rank, it sets the 4 ODT pins to the MASK pins below. For example, when
+ * doing a read from Rank0, a system designer may desire to terminate the
+ * lines with the resistor on DIMM0/Rank1. The mask [RODT_D0_R0] would then
+ * be [0010].
+ *
+ * CNXXXX drives the appropriate mask values on the ODT pins by default.
+ * If this feature is not required, write 0x0 in this register. Note that,
+ * as per the JEDEC DDR3 specifications, the ODT pin for the rank that is
+ * being read should always be 0x0. When a given RANK is selected, the RODT
+ * mask for that rank is used. The resulting RODT mask is driven to the
+ * DIMMs in the following manner:
+ */
+union cvmx_lmcx_rodt_mask {
+ u64 u64;
+ struct cvmx_lmcx_rodt_mask_s {
+ uint64_t rodt_d3_r1:8;
+ uint64_t rodt_d3_r0:8;
+ uint64_t rodt_d2_r1:8;
+ uint64_t rodt_d2_r0:8;
+ uint64_t rodt_d1_r1:8;
+ uint64_t rodt_d1_r0:8;
+ uint64_t rodt_d0_r1:8;
+ uint64_t rodt_d0_r0:8;
+ } s;
+ struct cvmx_lmcx_rodt_mask_s cn61xx;
+ struct cvmx_lmcx_rodt_mask_s cn63xx;
+ struct cvmx_lmcx_rodt_mask_s cn63xxp1;
+ struct cvmx_lmcx_rodt_mask_s cn66xx;
+ struct cvmx_lmcx_rodt_mask_s cn68xx;
+ struct cvmx_lmcx_rodt_mask_s cn68xxp1;
+ struct cvmx_lmcx_rodt_mask_cn70xx {
+ uint64_t reserved_28_63:36;
+ uint64_t rodt_d1_r1:4;
+ uint64_t reserved_20_23:4;
+ uint64_t rodt_d1_r0:4;
+ uint64_t reserved_12_15:4;
+ uint64_t rodt_d0_r1:4;
+ uint64_t reserved_4_7:4;
+ uint64_t rodt_d0_r0:4;
+ } cn70xx;
+ struct cvmx_lmcx_rodt_mask_cn70xx cn70xxp1;
+ struct cvmx_lmcx_rodt_mask_cn70xx cn73xx;
+ struct cvmx_lmcx_rodt_mask_cn70xx cn78xx;
+ struct cvmx_lmcx_rodt_mask_cn70xx cn78xxp1;
+ struct cvmx_lmcx_rodt_mask_s cnf71xx;
+ struct cvmx_lmcx_rodt_mask_cn70xx cnf75xx;
+};
+
+/**
+ * cvmx_lmc#_scramble_cfg0
+ *
+ * LMC_SCRAMBLE_CFG0 = LMC Scramble Config0
+ *
+ */
+union cvmx_lmcx_scramble_cfg0 {
+ u64 u64;
+ struct cvmx_lmcx_scramble_cfg0_s {
+ uint64_t key:64;
+ } s;
+ struct cvmx_lmcx_scramble_cfg0_s cn61xx;
+ struct cvmx_lmcx_scramble_cfg0_s cn66xx;
+ struct cvmx_lmcx_scramble_cfg0_s cn70xx;
+ struct cvmx_lmcx_scramble_cfg0_s cn70xxp1;
+ struct cvmx_lmcx_scramble_cfg0_s cn73xx;
+ struct cvmx_lmcx_scramble_cfg0_s cn78xx;
+ struct cvmx_lmcx_scramble_cfg0_s cn78xxp1;
+ struct cvmx_lmcx_scramble_cfg0_s cnf71xx;
+ struct cvmx_lmcx_scramble_cfg0_s cnf75xx;
+};
+
+/**
+ * cvmx_lmc#_scramble_cfg1
+ *
+ * These registers set the aliasing that uses the lowest, legal chip select(s).
+ *
+ */
+union cvmx_lmcx_scramble_cfg1 {
+ u64 u64;
+ struct cvmx_lmcx_scramble_cfg1_s {
+ uint64_t key:64;
+ } s;
+ struct cvmx_lmcx_scramble_cfg1_s cn61xx;
+ struct cvmx_lmcx_scramble_cfg1_s cn66xx;
+ struct cvmx_lmcx_scramble_cfg1_s cn70xx;
+ struct cvmx_lmcx_scramble_cfg1_s cn70xxp1;
+ struct cvmx_lmcx_scramble_cfg1_s cn73xx;
+ struct cvmx_lmcx_scramble_cfg1_s cn78xx;
+ struct cvmx_lmcx_scramble_cfg1_s cn78xxp1;
+ struct cvmx_lmcx_scramble_cfg1_s cnf71xx;
+ struct cvmx_lmcx_scramble_cfg1_s cnf75xx;
+};
+
+/**
+ * cvmx_lmc#_scramble_cfg2
+ */
+union cvmx_lmcx_scramble_cfg2 {
+ u64 u64;
+ struct cvmx_lmcx_scramble_cfg2_s {
+ uint64_t key:64;
+ } s;
+ struct cvmx_lmcx_scramble_cfg2_s cn73xx;
+ struct cvmx_lmcx_scramble_cfg2_s cn78xx;
+ struct cvmx_lmcx_scramble_cfg2_s cnf75xx;
+};
+
+/**
+ * cvmx_lmc#_scrambled_fadr
+ *
+ * LMC()_FADR captures the failing pre-scrambled address location (split into
+ * DIMM, bunk, bank, etc). If scrambling is off, LMC()_FADR also captures the
+ * failing physical location in the DRAM parts. LMC()_SCRAMBLED_FADR captures
+ * the actual failing address location in the physical DRAM parts, i.e.:
+ *
+ * * If scrambling is on, LMC()_SCRAMBLED_FADR contains the failing physical
+ * location in the
+ * DRAM parts (split into DIMM, bunk, bank, etc).
+ *
+ * * If scrambling is off, the pre-scramble and post-scramble addresses are
+ * the same, and so the
+ * contents of LMC()_SCRAMBLED_FADR match the contents of LMC()_FADR.
+ *
+ * This register only captures the first transaction with ECC errors. A DED
+ * error can over-write this register with its failing addresses if the first
+ * error was a SEC. If you write LMC()_CONFIG -> SEC_ERR/DED_ERR, it clears
+ * the error bits and captures the next failing address. If [FDIMM] is 1,
+ * that means the error is in the higher DIMM.
+ */
+union cvmx_lmcx_scrambled_fadr {
+ u64 u64;
+ struct cvmx_lmcx_scrambled_fadr_s {
+ uint64_t reserved_43_63:21;
+ uint64_t fcid:3;
+ uint64_t fill_order:2;
+ uint64_t reserved_14_37:24;
+ uint64_t fcol:14;
+ } s;
+ struct cvmx_lmcx_scrambled_fadr_cn61xx {
+ uint64_t reserved_36_63:28;
+ uint64_t fdimm:2;
+ uint64_t fbunk:1;
+ uint64_t fbank:3;
+ uint64_t frow:16;
+ uint64_t fcol:14;
+ } cn61xx;
+ struct cvmx_lmcx_scrambled_fadr_cn61xx cn66xx;
+ struct cvmx_lmcx_scrambled_fadr_cn70xx {
+ uint64_t reserved_40_63:24;
+ uint64_t fill_order:2;
+ uint64_t fdimm:1;
+ uint64_t fbunk:1;
+ uint64_t fbank:4;
+ uint64_t frow:18;
+ uint64_t fcol:14;
+ } cn70xx;
+ struct cvmx_lmcx_scrambled_fadr_cn70xx cn70xxp1;
+ struct cvmx_lmcx_scrambled_fadr_cn73xx {
+ uint64_t reserved_43_63:21;
+ uint64_t fcid:3;
+ uint64_t fill_order:2;
+ uint64_t fdimm:1;
+ uint64_t fbunk:1;
+ uint64_t fbank:4;
+ uint64_t frow:18;
+ uint64_t fcol:14;
+ } cn73xx;
+ struct cvmx_lmcx_scrambled_fadr_cn73xx cn78xx;
+ struct cvmx_lmcx_scrambled_fadr_cn73xx cn78xxp1;
+ struct cvmx_lmcx_scrambled_fadr_cn61xx cnf71xx;
+ struct cvmx_lmcx_scrambled_fadr_cn73xx cnf75xx;
+};
+
+/**
+ * cvmx_lmc#_seq_ctl
+ *
+ * This register is used to initiate the various control sequences in the LMC.
+ *
+ */
+union cvmx_lmcx_seq_ctl {
+ u64 u64;
+ struct cvmx_lmcx_seq_ctl_s {
+ uint64_t reserved_6_63:58;
+ uint64_t seq_complete:1;
+ uint64_t seq_sel:4;
+ uint64_t init_start:1;
+ } s;
+ struct cvmx_lmcx_seq_ctl_s cn70xx;
+ struct cvmx_lmcx_seq_ctl_s cn70xxp1;
+ struct cvmx_lmcx_seq_ctl_s cn73xx;
+ struct cvmx_lmcx_seq_ctl_s cn78xx;
+ struct cvmx_lmcx_seq_ctl_s cn78xxp1;
+ struct cvmx_lmcx_seq_ctl_s cnf75xx;
+};
+
+/**
+ * cvmx_lmc#_slot_ctl0
+ *
+ * This register is an assortment of control fields needed by the memory
+ * controller. If software has not previously written to this register
+ * (since the last DRESET), hardware updates the fields in this register to
+ * the minimum allowed value when any of LMC()_RLEVEL_RANK(),
+ * LMC()_WLEVEL_RANK(), LMC()_CONTROL, and LMC()_MODEREG_PARAMS0 registers
+ * change. Ideally, only read this register after LMC has been initialized and
+ * LMC()_RLEVEL_RANK(), LMC()_WLEVEL_RANK() have valid data.
+ *
+ * The interpretation of the fields in this register depends on
+ * LMC(0)_CONFIG[DDR2T]:
+ *
+ * * If LMC()_CONFIG[DDR2T]=1, (FieldValue + 4) is the minimum CK cycles
+ * between when the DRAM part registers CAS commands of the first and
+ * second types from different cache blocks.
+ *
+ * If LMC()_CONFIG[DDR2T]=0, (FieldValue + 3) is the minimum CK cycles
+ * between when the DRAM part registers CAS commands of the first and second
+ * types from different cache blocks.
+ * FieldValue = 0 is always illegal in this case.
+ * The hardware-calculated minimums for these fields are shown in
+ * LMC(0)_SLOT_CTL0 Hardware-Calculated Minimums.
+ */
+union cvmx_lmcx_slot_ctl0 {
+ u64 u64;
+ struct cvmx_lmcx_slot_ctl0_s {
+ uint64_t reserved_50_63:14;
+ uint64_t w2r_l_init_ext:1;
+ uint64_t w2r_init_ext:1;
+ uint64_t w2w_l_init:6;
+ uint64_t w2r_l_init:6;
+ uint64_t r2w_l_init:6;
+ uint64_t r2r_l_init:6;
+ uint64_t w2w_init:6;
+ uint64_t w2r_init:6;
+ uint64_t r2w_init:6;
+ uint64_t r2r_init:6;
+ } s;
+ struct cvmx_lmcx_slot_ctl0_cn61xx {
+ uint64_t reserved_24_63:40;
+ uint64_t w2w_init:6;
+ uint64_t w2r_init:6;
+ uint64_t r2w_init:6;
+ uint64_t r2r_init:6;
+ } cn61xx;
+ struct cvmx_lmcx_slot_ctl0_cn61xx cn63xx;
+ struct cvmx_lmcx_slot_ctl0_cn61xx cn63xxp1;
+ struct cvmx_lmcx_slot_ctl0_cn61xx cn66xx;
+ struct cvmx_lmcx_slot_ctl0_cn61xx cn68xx;
+ struct cvmx_lmcx_slot_ctl0_cn61xx cn68xxp1;
+ struct cvmx_lmcx_slot_ctl0_cn70xx {
+ uint64_t reserved_48_63:16;
+ uint64_t w2w_l_init:6;
+ uint64_t w2r_l_init:6;
+ uint64_t r2w_l_init:6;
+ uint64_t r2r_l_init:6;
+ uint64_t w2w_init:6;
+ uint64_t w2r_init:6;
+ uint64_t r2w_init:6;
+ uint64_t r2r_init:6;
+ } cn70xx;
+ struct cvmx_lmcx_slot_ctl0_cn70xx cn70xxp1;
+ struct cvmx_lmcx_slot_ctl0_s cn73xx;
+ struct cvmx_lmcx_slot_ctl0_s cn78xx;
+ struct cvmx_lmcx_slot_ctl0_s cn78xxp1;
+ struct cvmx_lmcx_slot_ctl0_cn61xx cnf71xx;
+ struct cvmx_lmcx_slot_ctl0_s cnf75xx;
+};
+
+/**
+ * cvmx_lmc#_slot_ctl1
+ *
+ * This register is an assortment of control fields needed by the memory
+ * controller. If software has not previously written to this register
+ * (since the last DRESET), hardware updates the fields in this register to
+ * the minimum allowed value when any of LMC()_RLEVEL_RANK(),
+ * LMC()_WLEVEL_RANK(), LMC()_CONTROL and LMC()_MODEREG_PARAMS0 change.
+ * Ideally, only read this register after LMC has been initialized and
+ * LMC()_RLEVEL_RANK(), LMC()_WLEVEL_RANK() have valid data.
+ *
+ * The interpretation of the fields in this CSR depends on
+ * LMC(0)_CONFIG[DDR2T]:
+ *
+ * * If LMC()_CONFIG[DDR2T]=1, (FieldValue + 4) is the minimum CK cycles
+ * between when the DRAM part registers CAS commands of the first and
+ * second types from different cache blocks.
+ *
+ * * If LMC()_CONFIG[DDR2T]=0, (FieldValue + 3) is the minimum CK cycles
+ * between when the DRAM part registers CAS commands of the first and
+ * second types from different cache blocks.
+ * FieldValue = 0 is always illegal in this case.
+ *
+ * The hardware-calculated minimums for these fields are shown in
+ * LMC(0)_SLOT_CTL1 Hardware-Calculated Minimums.
+ */
+union cvmx_lmcx_slot_ctl1 {
+ u64 u64;
+ struct cvmx_lmcx_slot_ctl1_s {
+ uint64_t reserved_24_63:40;
+ uint64_t w2w_xrank_init:6;
+ uint64_t w2r_xrank_init:6;
+ uint64_t r2w_xrank_init:6;
+ uint64_t r2r_xrank_init:6;
+ } s;
+ struct cvmx_lmcx_slot_ctl1_s cn61xx;
+ struct cvmx_lmcx_slot_ctl1_s cn63xx;
+ struct cvmx_lmcx_slot_ctl1_s cn63xxp1;
+ struct cvmx_lmcx_slot_ctl1_s cn66xx;
+ struct cvmx_lmcx_slot_ctl1_s cn68xx;
+ struct cvmx_lmcx_slot_ctl1_s cn68xxp1;
+ struct cvmx_lmcx_slot_ctl1_s cn70xx;
+ struct cvmx_lmcx_slot_ctl1_s cn70xxp1;
+ struct cvmx_lmcx_slot_ctl1_s cn73xx;
+ struct cvmx_lmcx_slot_ctl1_s cn78xx;
+ struct cvmx_lmcx_slot_ctl1_s cn78xxp1;
+ struct cvmx_lmcx_slot_ctl1_s cnf71xx;
+ struct cvmx_lmcx_slot_ctl1_s cnf75xx;
+};
+
+/**
+ * cvmx_lmc#_slot_ctl2
+ *
+ * This register is an assortment of control fields needed by the memory
+ * controller. If software has not previously written to this register
+ * (since the last DRESET), hardware updates the fields in this register
+ * to the minimum allowed value when any of LMC()_RLEVEL_RANK(),
+ * LMC()_WLEVEL_RANK(), LMC()_CONTROL and LMC()_MODEREG_PARAMS0 change.
+ * Ideally, only read this register after LMC has been initialized and
+ * LMC()_RLEVEL_RANK(), LMC()_WLEVEL_RANK() have valid data.
+ *
+ * The interpretation of the fields in this CSR depends on LMC(0)_CONFIG[DDR2T]:
+ *
+ * * If LMC()_CONFIG[DDR2T] = 1, (FieldValue + 4) is the minimum CK cycles
+ * between when the DRAM part registers CAS commands of the first and
+ * second types from different cache blocks.
+ *
+ * * If LMC()_CONFIG[DDR2T] = 0, (FieldValue + 3) is the minimum CK cycles
+ * between when the DRAM part registers CAS commands of the first and second
+ * types from different cache blocks.
+ * FieldValue = 0 is always illegal in this case.
+ *
+ * The hardware-calculated minimums for these fields are shown in LMC Registers.
+ */
+union cvmx_lmcx_slot_ctl2 {
+ u64 u64;
+ struct cvmx_lmcx_slot_ctl2_s {
+ uint64_t reserved_24_63:40;
+ uint64_t w2w_xdimm_init:6;
+ uint64_t w2r_xdimm_init:6;
+ uint64_t r2w_xdimm_init:6;
+ uint64_t r2r_xdimm_init:6;
+ } s;
+ struct cvmx_lmcx_slot_ctl2_s cn61xx;
+ struct cvmx_lmcx_slot_ctl2_s cn63xx;
+ struct cvmx_lmcx_slot_ctl2_s cn63xxp1;
+ struct cvmx_lmcx_slot_ctl2_s cn66xx;
+ struct cvmx_lmcx_slot_ctl2_s cn68xx;
+ struct cvmx_lmcx_slot_ctl2_s cn68xxp1;
+ struct cvmx_lmcx_slot_ctl2_s cn70xx;
+ struct cvmx_lmcx_slot_ctl2_s cn70xxp1;
+ struct cvmx_lmcx_slot_ctl2_s cn73xx;
+ struct cvmx_lmcx_slot_ctl2_s cn78xx;
+ struct cvmx_lmcx_slot_ctl2_s cn78xxp1;
+ struct cvmx_lmcx_slot_ctl2_s cnf71xx;
+ struct cvmx_lmcx_slot_ctl2_s cnf75xx;
+};
+
+/**
+ * cvmx_lmc#_slot_ctl3
+ *
+ * This register is an assortment of control fields needed by the memory
+ * controller. If software has not previously written to this register
+ * (since the last DRESET), hardware updates the fields in this register
+ * to the minimum allowed value when any of LMC()_RLEVEL_RANK(),
+ * LMC()_WLEVEL_RANK(), LMC()_CONTROL and LMC()_MODEREG_PARAMS0 change.
+ * Ideally, only read this register after LMC has been initialized and
+ * LMC()_RLEVEL_RANK(), LMC()_WLEVEL_RANK() have valid data.
+ *
+ * The interpretation of the fields in this CSR depends on LMC(0)_CONFIG[DDR2T]:
+ *
+ * * If LMC()_CONFIG[DDR2T] = 1, (FieldValue + 4) is the minimum CK cycles
+ * between when the DRAM part registers CAS commands of the first and
+ * second types from different cache blocks.
+ *
+ * * If LMC()_CONFIG[DDR2T] = 0, (FieldValue + 3) is the minimum CK cycles
+ * between when the DRAM part registers CAS commands of the first and second
+ * types from different cache blocks.
+ * FieldValue = 0 is always illegal in this case.
+ *
+ * The hardware-calculated minimums for these fields are shown in LMC Registers.
+ */
+union cvmx_lmcx_slot_ctl3 {
+ u64 u64;
+ struct cvmx_lmcx_slot_ctl3_s {
+ uint64_t reserved_50_63:14;
+ uint64_t w2r_l_xrank_init_ext:1;
+ uint64_t w2r_xrank_init_ext:1;
+ uint64_t w2w_l_xrank_init:6;
+ uint64_t w2r_l_xrank_init:6;
+ uint64_t r2w_l_xrank_init:6;
+ uint64_t r2r_l_xrank_init:6;
+ uint64_t w2w_xrank_init:6;
+ uint64_t w2r_xrank_init:6;
+ uint64_t r2w_xrank_init:6;
+ uint64_t r2r_xrank_init:6;
+ } s;
+ struct cvmx_lmcx_slot_ctl3_s cn73xx;
+ struct cvmx_lmcx_slot_ctl3_s cn78xx;
+ struct cvmx_lmcx_slot_ctl3_s cnf75xx;
+};
+
+/**
+ * cvmx_lmc#_timing_params0
+ */
+union cvmx_lmcx_timing_params0 {
+ u64 u64;
+ struct cvmx_lmcx_timing_params0_s {
+ uint64_t reserved_54_63:10;
+ uint64_t tbcw:6;
+ uint64_t reserved_26_47:22;
+ uint64_t tmrd:4;
+ uint64_t reserved_8_21:14;
+ uint64_t tckeon:8;
+ } s;
+ struct cvmx_lmcx_timing_params0_cn61xx {
+ uint64_t reserved_47_63:17;
+ uint64_t trp_ext:1;
+ uint64_t tcksre:4;
+ uint64_t trp:4;
+ uint64_t tzqinit:4;
+ uint64_t tdllk:4;
+ uint64_t tmod:4;
+ uint64_t tmrd:4;
+ uint64_t txpr:4;
+ uint64_t tcke:4;
+ uint64_t tzqcs:4;
+ uint64_t reserved_0_9:10;
+ } cn61xx;
+ struct cvmx_lmcx_timing_params0_cn61xx cn63xx;
+ struct cvmx_lmcx_timing_params0_cn63xxp1 {
+ uint64_t reserved_46_63:18;
+ uint64_t tcksre:4;
+ uint64_t trp:4;
+ uint64_t tzqinit:4;
+ uint64_t tdllk:4;
+ uint64_t tmod:4;
+ uint64_t tmrd:4;
+ uint64_t txpr:4;
+ uint64_t tcke:4;
+ uint64_t tzqcs:4;
+ uint64_t tckeon:10;
+ } cn63xxp1;
+ struct cvmx_lmcx_timing_params0_cn61xx cn66xx;
+ struct cvmx_lmcx_timing_params0_cn61xx cn68xx;
+ struct cvmx_lmcx_timing_params0_cn61xx cn68xxp1;
+ struct cvmx_lmcx_timing_params0_cn70xx {
+ uint64_t reserved_48_63:16;
+ uint64_t tcksre:4;
+ uint64_t trp:5;
+ uint64_t tzqinit:4;
+ uint64_t tdllk:4;
+ uint64_t tmod:5;
+ uint64_t tmrd:4;
+ uint64_t txpr:6;
+ uint64_t tcke:4;
+ uint64_t tzqcs:4;
+ uint64_t reserved_0_7:8;
+ } cn70xx;
+ struct cvmx_lmcx_timing_params0_cn70xx cn70xxp1;
+ struct cvmx_lmcx_timing_params0_cn73xx {
+ uint64_t reserved_54_63:10;
+ uint64_t tbcw:6;
+ uint64_t tcksre:4;
+ uint64_t trp:5;
+ uint64_t tzqinit:4;
+ uint64_t tdllk:4;
+ uint64_t tmod:5;
+ uint64_t tmrd:4;
+ uint64_t txpr:6;
+ uint64_t tcke:4;
+ uint64_t tzqcs:4;
+ uint64_t reserved_0_7:8;
+ } cn73xx;
+ struct cvmx_lmcx_timing_params0_cn73xx cn78xx;
+ struct cvmx_lmcx_timing_params0_cn73xx cn78xxp1;
+ struct cvmx_lmcx_timing_params0_cn61xx cnf71xx;
+ struct cvmx_lmcx_timing_params0_cn73xx cnf75xx;
+};
+
+/**
+ * cvmx_lmc#_timing_params1
+ */
+union cvmx_lmcx_timing_params1 {
+ u64 u64;
+ struct cvmx_lmcx_timing_params1_s {
+ uint64_t reserved_59_63:5;
+ uint64_t txp_ext:1;
+ uint64_t trcd_ext:1;
+ uint64_t tpdm_full_cycle_ena:1;
+ uint64_t trfc_dlr:7;
+ uint64_t reserved_4_48:45;
+ uint64_t tmprr:4;
+ } s;
+ struct cvmx_lmcx_timing_params1_cn61xx {
+ uint64_t reserved_47_63:17;
+ uint64_t tras_ext:1;
+ uint64_t txpdll:5;
+ uint64_t tfaw:5;
+ uint64_t twldqsen:4;
+ uint64_t twlmrd:4;
+ uint64_t txp:3;
+ uint64_t trrd:3;
+ uint64_t trfc:5;
+ uint64_t twtr:4;
+ uint64_t trcd:4;
+ uint64_t tras:5;
+ uint64_t tmprr:4;
+ } cn61xx;
+ struct cvmx_lmcx_timing_params1_cn61xx cn63xx;
+ struct cvmx_lmcx_timing_params1_cn63xxp1 {
+ uint64_t reserved_46_63:18;
+ uint64_t txpdll:5;
+ uint64_t tfaw:5;
+ uint64_t twldqsen:4;
+ uint64_t twlmrd:4;
+ uint64_t txp:3;
+ uint64_t trrd:3;
+ uint64_t trfc:5;
+ uint64_t twtr:4;
+ uint64_t trcd:4;
+ uint64_t tras:5;
+ uint64_t tmprr:4;
+ } cn63xxp1;
+ struct cvmx_lmcx_timing_params1_cn61xx cn66xx;
+ struct cvmx_lmcx_timing_params1_cn61xx cn68xx;
+ struct cvmx_lmcx_timing_params1_cn61xx cn68xxp1;
+ struct cvmx_lmcx_timing_params1_cn70xx {
+ uint64_t reserved_49_63:15;
+ uint64_t txpdll:5;
+ uint64_t tfaw:5;
+ uint64_t twldqsen:4;
+ uint64_t twlmrd:4;
+ uint64_t txp:3;
+ uint64_t trrd:3;
+ uint64_t trfc:7;
+ uint64_t twtr:4;
+ uint64_t trcd:4;
+ uint64_t tras:6;
+ uint64_t tmprr:4;
+ } cn70xx;
+ struct cvmx_lmcx_timing_params1_cn70xx cn70xxp1;
+ struct cvmx_lmcx_timing_params1_cn73xx {
+ uint64_t reserved_59_63:5;
+ uint64_t txp_ext:1;
+ uint64_t trcd_ext:1;
+ uint64_t tpdm_full_cycle_ena:1;
+ uint64_t trfc_dlr:7;
+ uint64_t txpdll:5;
+ uint64_t tfaw:5;
+ uint64_t twldqsen:4;
+ uint64_t twlmrd:4;
+ uint64_t txp:3;
+ uint64_t trrd:3;
+ uint64_t trfc:7;
+ uint64_t twtr:4;
+ uint64_t trcd:4;
+ uint64_t tras:6;
+ uint64_t tmprr:4;
+ } cn73xx;
+ struct cvmx_lmcx_timing_params1_cn73xx cn78xx;
+ struct cvmx_lmcx_timing_params1_cn73xx cn78xxp1;
+ struct cvmx_lmcx_timing_params1_cn61xx cnf71xx;
+ struct cvmx_lmcx_timing_params1_cn73xx cnf75xx;
+};
+
+/**
+ * cvmx_lmc#_timing_params2
+ *
+ * This register sets timing parameters for DDR4.
+ *
+ */
+union cvmx_lmcx_timing_params2 {
+ u64 u64;
+ struct cvmx_lmcx_timing_params2_s {
+ uint64_t reserved_16_63:48;
+ uint64_t trrd_l_ext:1;
+ uint64_t trtp:4;
+ uint64_t t_rw_op_max:4;
+ uint64_t twtr_l:4;
+ uint64_t trrd_l:3;
+ } s;
+ struct cvmx_lmcx_timing_params2_cn70xx {
+ uint64_t reserved_15_63:49;
+ uint64_t trtp:4;
+ uint64_t t_rw_op_max:4;
+ uint64_t twtr_l:4;
+ uint64_t trrd_l:3;
+ } cn70xx;
+ struct cvmx_lmcx_timing_params2_cn70xx cn70xxp1;
+ struct cvmx_lmcx_timing_params2_s cn73xx;
+ struct cvmx_lmcx_timing_params2_s cn78xx;
+ struct cvmx_lmcx_timing_params2_s cn78xxp1;
+ struct cvmx_lmcx_timing_params2_s cnf75xx;
+};
+
+/**
+ * cvmx_lmc#_tro_ctl
+ *
+ * LMC_TRO_CTL = LMC Temperature Ring Osc Control
+ * This register is an assortment of various control fields needed to
+ * control the temperature ring oscillator
+ *
+ * Notes:
+ * To bring up the temperature ring oscillator, write TRESET to 0, and
+ * follow by initializing RCLK_CNT to desired value
+ */
+union cvmx_lmcx_tro_ctl {
+ u64 u64;
+ struct cvmx_lmcx_tro_ctl_s {
+ uint64_t reserved_33_63:31;
+ uint64_t rclk_cnt:32;
+ uint64_t treset:1;
+ } s;
+ struct cvmx_lmcx_tro_ctl_s cn61xx;
+ struct cvmx_lmcx_tro_ctl_s cn63xx;
+ struct cvmx_lmcx_tro_ctl_s cn63xxp1;
+ struct cvmx_lmcx_tro_ctl_s cn66xx;
+ struct cvmx_lmcx_tro_ctl_s cn68xx;
+ struct cvmx_lmcx_tro_ctl_s cn68xxp1;
+ struct cvmx_lmcx_tro_ctl_s cnf71xx;
+};
+
+/**
+ * cvmx_lmc#_tro_stat
+ *
+ * LMC_TRO_STAT = LMC Temperature Ring Osc Status
+ * This register is an assortment of various control fields needed to
+ * control the temperature ring oscillator
+ */
+union cvmx_lmcx_tro_stat {
+ u64 u64;
+ struct cvmx_lmcx_tro_stat_s {
+ uint64_t reserved_32_63:32;
+ uint64_t ring_cnt:32;
+ } s;
+ struct cvmx_lmcx_tro_stat_s cn61xx;
+ struct cvmx_lmcx_tro_stat_s cn63xx;
+ struct cvmx_lmcx_tro_stat_s cn63xxp1;
+ struct cvmx_lmcx_tro_stat_s cn66xx;
+ struct cvmx_lmcx_tro_stat_s cn68xx;
+ struct cvmx_lmcx_tro_stat_s cn68xxp1;
+ struct cvmx_lmcx_tro_stat_s cnf71xx;
+};
+
+/**
+ * cvmx_lmc#_wlevel_ctl
+ */
+union cvmx_lmcx_wlevel_ctl {
+ u64 u64;
+ struct cvmx_lmcx_wlevel_ctl_s {
+ uint64_t reserved_22_63:42;
+ uint64_t rtt_nom:3;
+ uint64_t bitmask:8;
+ uint64_t or_dis:1;
+ uint64_t sset:1;
+ uint64_t lanemask:9;
+ } s;
+ struct cvmx_lmcx_wlevel_ctl_s cn61xx;
+ struct cvmx_lmcx_wlevel_ctl_s cn63xx;
+ struct cvmx_lmcx_wlevel_ctl_cn63xxp1 {
+ uint64_t reserved_10_63:54;
+ uint64_t sset:1;
+ uint64_t lanemask:9;
+ } cn63xxp1;
+ struct cvmx_lmcx_wlevel_ctl_s cn66xx;
+ struct cvmx_lmcx_wlevel_ctl_s cn68xx;
+ struct cvmx_lmcx_wlevel_ctl_s cn68xxp1;
+ struct cvmx_lmcx_wlevel_ctl_s cn70xx;
+ struct cvmx_lmcx_wlevel_ctl_s cn70xxp1;
+ struct cvmx_lmcx_wlevel_ctl_s cn73xx;
+ struct cvmx_lmcx_wlevel_ctl_s cn78xx;
+ struct cvmx_lmcx_wlevel_ctl_s cn78xxp1;
+ struct cvmx_lmcx_wlevel_ctl_s cnf71xx;
+ struct cvmx_lmcx_wlevel_ctl_s cnf75xx;
+};
+
+/**
+ * cvmx_lmc#_wlevel_dbg
+ *
+ * A given write of LMC()_WLEVEL_DBG returns the write leveling pass/fail
+ * results for all possible delay settings (i.e. the BITMASK) for only one
+ * byte in the last rank that the hardware write leveled.
+ * LMC()_WLEVEL_DBG[BYTE] selects the particular byte. To get these
+ * pass/fail results for a different rank, you must run the hardware write
+ * leveling again. For example, it is possible to get the [BITMASK] results
+ * for every byte of every rank if you run write leveling separately for
+ * each rank, probing LMC()_WLEVEL_DBG between each write-leveling.
+ */
+union cvmx_lmcx_wlevel_dbg {
+ u64 u64;
+ struct cvmx_lmcx_wlevel_dbg_s {
+ uint64_t reserved_12_63:52;
+ uint64_t bitmask:8;
+ uint64_t byte:4;
+ } s;
+ struct cvmx_lmcx_wlevel_dbg_s cn61xx;
+ struct cvmx_lmcx_wlevel_dbg_s cn63xx;
+ struct cvmx_lmcx_wlevel_dbg_s cn63xxp1;
+ struct cvmx_lmcx_wlevel_dbg_s cn66xx;
+ struct cvmx_lmcx_wlevel_dbg_s cn68xx;
+ struct cvmx_lmcx_wlevel_dbg_s cn68xxp1;
+ struct cvmx_lmcx_wlevel_dbg_s cn70xx;
+ struct cvmx_lmcx_wlevel_dbg_s cn70xxp1;
+ struct cvmx_lmcx_wlevel_dbg_s cn73xx;
+ struct cvmx_lmcx_wlevel_dbg_s cn78xx;
+ struct cvmx_lmcx_wlevel_dbg_s cn78xxp1;
+ struct cvmx_lmcx_wlevel_dbg_s cnf71xx;
+ struct cvmx_lmcx_wlevel_dbg_s cnf75xx;
+};
+
+/**
+ * cvmx_lmc#_wlevel_rank#
+ *
+ * Four of these CSRs exist per LMC, one for each rank. Write level setting
+ * is measured in units of 1/8 CK, so the below BYTEn values can range over
+ * 4 CK cycles. Assuming LMC()_WLEVEL_CTL[SSET]=0, the BYTEn<2:0> values are
+ * not used during write leveling, and they are overwritten by the hardware
+ * as part of the write leveling sequence. (Hardware sets [STATUS] to 3 after
+ * hardware write leveling completes for the rank). Software needs to set
+ * BYTEn<4:3> bits.
+ *
+ * Each CSR may also be written by software, but not while a write leveling
+ * sequence is in progress. (Hardware sets [STATUS] to 1 after a CSR write.)
+ * Software initiates a hardware write-leveling sequence by programming
+ * LMC()_WLEVEL_CTL and writing RANKMASK and INIT_START=1 with SEQ_SEL=6 in
+ * LMC*0_CONFIG.
+ *
+ * LMC will then step through and accumulate write leveling results for 8
+ * unique delay settings (twice), starting at a delay of LMC()_WLEVEL_RANK()
+ * [BYTEn<4:3>]* 8 CK increasing by 1/8 CK each setting. Hardware will then
+ * set LMC()_WLEVEL_RANK()[BYTEn<2:0>] to indicate the first write leveling
+ * result of 1 that followed a result of 0 during the sequence by searching
+ * for a '1100' pattern in the generated bitmask, except that LMC will always
+ * write LMC()_WLEVEL_RANK()[BYTEn<0>]=0. If hardware is unable to find a match
+ * for a '1100' pattern, then hardware sets LMC()_WLEVEL_RANK() [BYTEn<2:0>]
+ * to 0x4. See LMC()_WLEVEL_CTL.
+ *
+ * LMC()_WLEVEL_RANKi values for ranks i without attached DRAM should be set
+ * such that they do not increase the range of possible BYTE values for any
+ * byte lane. The easiest way to do this is to set LMC()_WLEVEL_RANKi =
+ * LMC()_WLEVEL_RANKj, where j is some rank with attached DRAM whose
+ * LMC()_WLEVEL_RANKj is already fully initialized.
+ */
+union cvmx_lmcx_wlevel_rankx {
+ u64 u64;
+ struct cvmx_lmcx_wlevel_rankx_s {
+ uint64_t reserved_47_63:17;
+ uint64_t status:2;
+ uint64_t byte8:5;
+ uint64_t byte7:5;
+ uint64_t byte6:5;
+ uint64_t byte5:5;
+ uint64_t byte4:5;
+ uint64_t byte3:5;
+ uint64_t byte2:5;
+ uint64_t byte1:5;
+ uint64_t byte0:5;
+ } s;
+ struct cvmx_lmcx_wlevel_rankx_s cn61xx;
+ struct cvmx_lmcx_wlevel_rankx_s cn63xx;
+ struct cvmx_lmcx_wlevel_rankx_s cn63xxp1;
+ struct cvmx_lmcx_wlevel_rankx_s cn66xx;
+ struct cvmx_lmcx_wlevel_rankx_s cn68xx;
+ struct cvmx_lmcx_wlevel_rankx_s cn68xxp1;
+ struct cvmx_lmcx_wlevel_rankx_s cn70xx;
+ struct cvmx_lmcx_wlevel_rankx_s cn70xxp1;
+ struct cvmx_lmcx_wlevel_rankx_s cn73xx;
+ struct cvmx_lmcx_wlevel_rankx_s cn78xx;
+ struct cvmx_lmcx_wlevel_rankx_s cn78xxp1;
+ struct cvmx_lmcx_wlevel_rankx_s cnf71xx;
+ struct cvmx_lmcx_wlevel_rankx_s cnf75xx;
+};
+
+/**
+ * cvmx_lmc#_wodt_ctl0
+ *
+ * LMC_WODT_CTL0 = LMC Write OnDieTermination control
+ * See the description in LMC_WODT_CTL1.
+ *
+ * Notes:
+ * Together, the LMC_WODT_CTL1 and LMC_WODT_CTL0 CSRs control the write
+ * ODT mask. See LMC_WODT_CTL1.
+ *
+ */
+union cvmx_lmcx_wodt_ctl0 {
+ u64 u64;
+ struct cvmx_lmcx_wodt_ctl0_s {
+ uint64_t reserved_0_63:64;
+ } s;
+ struct cvmx_lmcx_wodt_ctl0_cn30xx {
+ uint64_t reserved_32_63:32;
+ uint64_t wodt_d1_r1:8;
+ uint64_t wodt_d1_r0:8;
+ uint64_t wodt_d0_r1:8;
+ uint64_t wodt_d0_r0:8;
+ } cn30xx;
+ struct cvmx_lmcx_wodt_ctl0_cn30xx cn31xx;
+ struct cvmx_lmcx_wodt_ctl0_cn38xx {
+ uint64_t reserved_32_63:32;
+ uint64_t wodt_hi3:4;
+ uint64_t wodt_hi2:4;
+ uint64_t wodt_hi1:4;
+ uint64_t wodt_hi0:4;
+ uint64_t wodt_lo3:4;
+ uint64_t wodt_lo2:4;
+ uint64_t wodt_lo1:4;
+ uint64_t wodt_lo0:4;
+ } cn38xx;
+ struct cvmx_lmcx_wodt_ctl0_cn38xx cn38xxp2;
+ struct cvmx_lmcx_wodt_ctl0_cn38xx cn50xx;
+ struct cvmx_lmcx_wodt_ctl0_cn30xx cn52xx;
+ struct cvmx_lmcx_wodt_ctl0_cn30xx cn52xxp1;
+ struct cvmx_lmcx_wodt_ctl0_cn30xx cn56xx;
+ struct cvmx_lmcx_wodt_ctl0_cn30xx cn56xxp1;
+ struct cvmx_lmcx_wodt_ctl0_cn38xx cn58xx;
+ struct cvmx_lmcx_wodt_ctl0_cn38xx cn58xxp1;
+};
+
+/**
+ * cvmx_lmc#_wodt_ctl1
+ *
+ * LMC_WODT_CTL1 = LMC Write OnDieTermination control
+ * System designers may desire to terminate DQ/DQS/DM lines for higher
+ * frequency DDR operations (667MHz and faster), especially on a multi-rank
+ * system. DDR2 DQ/DM/DQS I/O's have built in Termination resistor that can
+ * be turned on or off by the controller, after meeting tAOND and tAOF
+ * timing requirements. Each Rank has its own ODT pin that fans out to all
+ * the memory parts in that DIMM. System designers may prefer different
+ * combinations of ODT ON's for read and write into different ranks. Octeon
+ * supports full programmability by way of the mask register below.
+ * Each Rank position has its own 8-bit programmable field.
+ * When the controller does a write to that rank, it sets the 8 ODT pins
+ * to the MASK pins below. For eg., When doing a write into Rank0, a system
+ * designer may desire to terminate the lines with the resistor on
+ * Dimm0/Rank1. The mask WODT_D0_R0 would then be [00000010]. If ODT feature
+ * is not desired, the DDR parts can be programmed to not look at these pins by
+ * writing 0 in QS_DIC. Octeon drives the appropriate mask values on the ODT
+ * pins by default.
+ * If this feature is not required, write 0 in this register.
+ *
+ * Notes:
+ * Together, the LMC_WODT_CTL1 and LMC_WODT_CTL0 CSRs control the write
+ * ODT mask. When a given RANK is selected, the WODT mask for that RANK
+ * is used. The resulting WODT mask is driven to the DIMMs in the following
+ * manner:
+ * BUNK_ENA=1 BUNK_ENA=0
+ * Mask[7] -> DIMM3, RANK1 DIMM3
+ * Mask[6] -> DIMM3, RANK0
+ * Mask[5] -> DIMM2, RANK1 DIMM2
+ * Mask[4] -> DIMM2, RANK0
+ * Mask[3] -> DIMM1, RANK1 DIMM1
+ * Mask[2] -> DIMM1, RANK0
+ * Mask[1] -> DIMM0, RANK1 DIMM0
+ * Mask[0] -> DIMM0, RANK0
+ */
+union cvmx_lmcx_wodt_ctl1 {
+ u64 u64;
+ struct cvmx_lmcx_wodt_ctl1_s {
+ uint64_t reserved_32_63:32;
+ uint64_t wodt_d3_r1:8;
+ uint64_t wodt_d3_r0:8;
+ uint64_t wodt_d2_r1:8;
+ uint64_t wodt_d2_r0:8;
+ } s;
+ struct cvmx_lmcx_wodt_ctl1_s cn30xx;
+ struct cvmx_lmcx_wodt_ctl1_s cn31xx;
+ struct cvmx_lmcx_wodt_ctl1_s cn52xx;
+ struct cvmx_lmcx_wodt_ctl1_s cn52xxp1;
+ struct cvmx_lmcx_wodt_ctl1_s cn56xx;
+ struct cvmx_lmcx_wodt_ctl1_s cn56xxp1;
+};
+
+/**
+ * cvmx_lmc#_wodt_mask
+ *
+ * System designers may desire to terminate DQ/DQS lines for higher-frequency
+ * DDR operations, especially on a multirank system. DDR3 DQ/DQS I/Os have
+ * built-in termination resistors that can be turned on or off by the
+ * controller, after meeting TAOND and TAOF timing requirements. Each rank
+ * has its own ODT pin that fans out to all of the memory parts in that DIMM.
+ * System designers may prefer different combinations of ODT ONs for write
+ * operations into different ranks. CNXXXX supports full programmability by
+ * way of the mask register below. Each rank position has its own 8-bit
+ * programmable field. When the controller does a write to that rank,
+ * it sets the four ODT pins to the mask pins below. For example, when
+ * doing a write into Rank0, a system designer may desire to terminate the
+ * lines with the resistor on DIMM0/Rank1. The mask [WODT_D0_R0] would then
+ * be [00000010].
+ *
+ * CNXXXX drives the appropriate mask values on the ODT pins by default.
+ * If this feature is not required, write 0x0 in this register. When a
+ * given RANK is selected, the WODT mask for that RANK is used. The
+ * resulting WODT mask is driven to the DIMMs in the following manner:
+ */
+union cvmx_lmcx_wodt_mask {
+ u64 u64;
+ struct cvmx_lmcx_wodt_mask_s {
+ uint64_t wodt_d3_r1:8;
+ uint64_t wodt_d3_r0:8;
+ uint64_t wodt_d2_r1:8;
+ uint64_t wodt_d2_r0:8;
+ uint64_t wodt_d1_r1:8;
+ uint64_t wodt_d1_r0:8;
+ uint64_t wodt_d0_r1:8;
+ uint64_t wodt_d0_r0:8;
+ } s;
+ struct cvmx_lmcx_wodt_mask_s cn61xx;
+ struct cvmx_lmcx_wodt_mask_s cn63xx;
+ struct cvmx_lmcx_wodt_mask_s cn63xxp1;
+ struct cvmx_lmcx_wodt_mask_s cn66xx;
+ struct cvmx_lmcx_wodt_mask_s cn68xx;
+ struct cvmx_lmcx_wodt_mask_s cn68xxp1;
+ struct cvmx_lmcx_wodt_mask_cn70xx {
+ uint64_t reserved_28_63:36;
+ uint64_t wodt_d1_r1:4;
+ uint64_t reserved_20_23:4;
+ uint64_t wodt_d1_r0:4;
+ uint64_t reserved_12_15:4;
+ uint64_t wodt_d0_r1:4;
+ uint64_t reserved_4_7:4;
+ uint64_t wodt_d0_r0:4;
+ } cn70xx;
+ struct cvmx_lmcx_wodt_mask_cn70xx cn70xxp1;
+ struct cvmx_lmcx_wodt_mask_cn70xx cn73xx;
+ struct cvmx_lmcx_wodt_mask_cn70xx cn78xx;
+ struct cvmx_lmcx_wodt_mask_cn70xx cn78xxp1;
+ struct cvmx_lmcx_wodt_mask_s cnf71xx;
+ struct cvmx_lmcx_wodt_mask_cn70xx cnf75xx;
+};
+
+#endif
diff --git a/arch/mips/mach-octeon/include/mach/octeon-feature.h b/arch/mips/mach-octeon/include/mach/octeon-feature.h
new file mode 100644
index 0000000000..1202716ba5
--- /dev/null
+++ b/arch/mips/mach-octeon/include/mach/octeon-feature.h
@@ -0,0 +1,442 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#ifndef __OCTEON_FEATURE_H__
+#define __OCTEON_FEATURE_H__
+
+/*
+ * Octeon models are declared after the macros in octeon-model.h with the
+ * suffix _FEATURE. The individual features are declared with the
+ * _FEATURE_ infix.
+ */
+enum octeon_feature {
+ /*
+ * Checks on the critical path are moved to the top (8 positions)
+ * so that the compiler generates one less insn than for the rest
+ * of the checks.
+ */
+ OCTEON_FEATURE_PKND, /* CN68XX uses port kinds for packet interface */
+ /* CN68XX has different fields in word0 - word2 */
+ OCTEON_FEATURE_CN68XX_WQE,
+
+ /*
+ * Features
+ */
+ /*
+ * Octeon models in the CN5XXX family and higher support atomic
+ * add instructions to memory (saa/saad)
+ */
+ OCTEON_FEATURE_SAAD,
+ /* Does this Octeon support the ZIP offload engine? */
+ OCTEON_FEATURE_ZIP,
+ /* Does this Octeon support crypto acceleration using COP2? */
+ OCTEON_FEATURE_CRYPTO,
+ /* Can crypto be enabled by calling cvmx_crypto_dormant_enable()? */
+ OCTEON_FEATURE_DORM_CRYPTO,
+ OCTEON_FEATURE_PCIE, /* Does this Octeon support PCI express? */
+ OCTEON_FEATURE_SRIO, /* Does this Octeon support SRIO */
+ OCTEON_FEATURE_ILK, /* Does this Octeon support Interlaken */
+ /*
+ * Some Octeon models support internal memory for storing
+ * cryptographic keys
+ */
+ OCTEON_FEATURE_KEY_MEMORY,
+ /* Octeon has a LED controller for banks of external LEDs */
+ OCTEON_FEATURE_LED_CONTROLLER,
+ OCTEON_FEATURE_TRA, /* Octeon has a trace buffer */
+ OCTEON_FEATURE_MGMT_PORT, /* Octeon has a management port */
+ OCTEON_FEATURE_RAID, /* Octeon has a raid unit */
+ OCTEON_FEATURE_USB, /* Octeon has a builtin USB */
+ /* Octeon IPD can run without using work queue entries */
+ OCTEON_FEATURE_NO_WPTR,
+ OCTEON_FEATURE_DFA, /* Octeon has DFA state machines */
+ /*
+ * Octeon MDIO block supports clause 45 transactions for
+ * 10 Gig support
+ */
+ OCTEON_FEATURE_MDIO_CLAUSE_45,
+ /*
+ * CN52XX and CN56XX used a block named NPEI for PCIe access.
+ * Newer chips replaced this with SLI+DPI
+ */
+ OCTEON_FEATURE_NPEI,
+ OCTEON_FEATURE_HFA, /* Octeon has DFA/HFA */
+ OCTEON_FEATURE_DFM, /* Octeon has DFM */
+ OCTEON_FEATURE_CIU2, /* Octeon has CIU2 */
+ /* Octeon has DMA Instruction Completion Interrupt mode */
+ OCTEON_FEATURE_DICI_MODE,
+ /* Octeon has Bit Select Extractor schedulor */
+ OCTEON_FEATURE_BIT_EXTRACTOR,
+ OCTEON_FEATURE_NAND, /* Octeon has NAND */
+ OCTEON_FEATURE_MMC, /* Octeon has built-in MMC support */
+ OCTEON_FEATURE_ROM, /* Octeon has built-in ROM support */
+ OCTEON_FEATURE_AUTHENTIK, /* Octeon has Authentik ROM support */
+ OCTEON_FEATURE_MULTICAST_TIMER, /* Octeon has multi_cast timer */
+ OCTEON_FEATURE_MULTINODE, /* Octeon has node support */
+ OCTEON_FEATURE_CIU3, /* Octeon has CIU3 */
+ OCTEON_FEATURE_FPA3, /* Octeon has FPA first seen on 78XX */
+ /* CN78XX has different fields in word0 - word2 */
+ OCTEON_FEATURE_CN78XX_WQE,
+ OCTEON_FEATURE_PKO3, /* Octeon has enhanced PKO block */
+ OCTEON_FEATURE_SPI, /* Octeon supports SPI interfaces */
+ OCTEON_FEATURE_ZIP3, /* Octeon has zip first seen on 78XX */
+ OCTEON_FEATURE_BCH, /* Octeon supports BCH ECC */
+ OCTEON_FEATURE_PKI, /* Octeon has PKI block */
+ OCTEON_FEATURE_OCLA, /* Octeon has OCLA */
+ OCTEON_FEATURE_FAU, /* Octeon has FAU */
+ OCTEON_FEATURE_BGX, /* Octeon has BGX */
+ OCTEON_FEATURE_BGX_MIX, /* On of the BGX is used for MIX */
+ OCTEON_FEATURE_HNA, /* Octeon has HNA */
+ OCTEON_FEATURE_BGX_XCV, /* Octeon has BGX XCV RGMII support */
+ OCTEON_FEATURE_TSO, /* Octeon has tcp segmentation offload */
+ OCTEON_FEATURE_TDM, /* Octeon has PCM/TDM support */
+ OCTEON_FEATURE_PTP, /* Octeon has PTP support */
+ OCTEON_MAX_FEATURE
+};
+
+static inline int octeon_has_feature_OCTEON_FEATURE_SAAD(void)
+{
+ return true;
+}
+
+static inline int octeon_has_feature_OCTEON_FEATURE_ZIP(void)
+{
+ if (OCTEON_IS_MODEL(OCTEON_CNF71XX) ||
+ OCTEON_IS_MODEL(OCTEON_CN70XX) || OCTEON_IS_MODEL(OCTEON_CNF75XX))
+ return 0;
+ else
+ return !cvmx_fuse_read(121);
+}
+
+static inline int octeon_has_feature_OCTEON_FEATURE_ZIP3(void)
+{
+ return (OCTEON_IS_MODEL(OCTEON_CN78XX) ||
+ OCTEON_IS_MODEL(OCTEON_CN73XX));
+}
+
+static inline int octeon_has_feature_OCTEON_FEATURE_BCH(void)
+{
+ return (OCTEON_IS_MODEL(OCTEON_CN70XX) ||
+ OCTEON_IS_MODEL(OCTEON_CNF75XX) ||
+ OCTEON_IS_MODEL(OCTEON_CN73XX));
+}
+
+static inline int octeon_has_feature_OCTEON_FEATURE_CRYPTO(void)
+{
+ /* OCTEON II and later */
+ u64 val;
+
+ val = csr_rd(CVMX_MIO_FUS_DAT2);
+ if (val & MIO_FUS_DAT2_NOCRYPTO || val & MIO_FUS_DAT2_NOMUL)
+ return 0;
+ else if (!(val & MIO_FUS_DAT2_DORM_CRYPTO))
+ return 1;
+
+ val = csr_rd(CVMX_RNM_CTL_STATUS);
+ return val & RNM_CTL_STATUS_EER_VAL;
+}
+
+static inline int octeon_has_feature_OCTEON_FEATURE_DORM_CRYPTO(void)
+{
+ /* OCTEON II and later */
+ u64 val;
+
+ val = csr_rd(CVMX_MIO_FUS_DAT2);
+ return !(val & MIO_FUS_DAT2_NOCRYPTO) && !(val & MIO_FUS_DAT2_NOMUL) &&
+ (val & MIO_FUS_DAT2_DORM_CRYPTO);
+}
+
+static inline int octeon_has_feature_OCTEON_FEATURE_PCIE(void)
+{
+ /* OCTEON II and later have PCIe */
+ return true;
+}
+
+static inline int octeon_has_feature_OCTEON_FEATURE_SRIO(void)
+{
+ if (OCTEON_IS_MODEL(OCTEON_CNF75XX)) {
+ if (cvmx_fuse_read(1601) == 0)
+ return 0;
+ else
+ return 1;
+ } else {
+ return (OCTEON_IS_MODEL(OCTEON_CN63XX) ||
+ OCTEON_IS_MODEL(OCTEON_CN66XX));
+ }
+}
+
+static inline int octeon_has_feature_OCTEON_FEATURE_ILK(void)
+{
+ return (OCTEON_IS_MODEL(OCTEON_CN68XX) ||
+ OCTEON_IS_MODEL(OCTEON_CN78XX));
+}
+
+static inline int octeon_has_feature_OCTEON_FEATURE_KEY_MEMORY(void)
+{
+ /* OCTEON II or later */
+ return true;
+}
+
+static inline int octeon_has_feature_OCTEON_FEATURE_LED_CONTROLLER(void)
+{
+ return false;
+}
+
+static inline int octeon_has_feature_OCTEON_FEATURE_TRA(void)
+{
+ return !OCTEON_IS_OCTEON3();
+}
+
+static inline int octeon_has_feature_OCTEON_FEATURE_MGMT_PORT(void)
+{
+ /* OCTEON II or later */
+ return true;
+}
+
+static inline int octeon_has_feature_OCTEON_FEATURE_RAID(void)
+{
+ return !OCTEON_IS_MODEL(OCTEON_CNF75XX);
+}
+
+static inline int octeon_has_feature_OCTEON_FEATURE_USB(void)
+{
+ return true;
+}
+
+static inline int octeon_has_feature_OCTEON_FEATURE_NO_WPTR(void)
+{
+ return true;
+}
+
+static inline int octeon_has_feature_OCTEON_FEATURE_DFA(void)
+{
+ return 0;
+}
+
+static inline int octeon_has_feature_OCTEON_FEATURE_HFA(void)
+{
+ if (OCTEON_IS_MODEL(OCTEON_CNF75XX))
+ return 0;
+ else
+ return !cvmx_fuse_read(90);
+}
+
+static inline int octeon_has_feature_OCTEON_FEATURE_HNA(void)
+{
+ if (OCTEON_IS_MODEL(OCTEON_CN78XX) || OCTEON_IS_MODEL(OCTEON_CN73XX))
+ return !cvmx_fuse_read(134);
+ else
+ return 0;
+}
+
+static inline int octeon_has_feature_OCTEON_FEATURE_DFM(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX)))
+ return 0;
+ else
+ return !cvmx_fuse_read(90);
+}
+
+static inline int octeon_has_feature_OCTEON_FEATURE_MDIO_CLAUSE_45(void)
+{
+ return true;
+}
+
+static inline int octeon_has_feature_OCTEON_FEATURE_NPEI(void)
+{
+ return false;
+}
+
+static inline int octeon_has_feature_OCTEON_FEATURE_PKND(void)
+{
+ return OCTEON_IS_MODEL(OCTEON_CN68XX) ||
+ OCTEON_IS_MODEL(OCTEON_CNF75XX) ||
+ OCTEON_IS_MODEL(OCTEON_CN73XX) ||
+ OCTEON_IS_MODEL(OCTEON_CN78XX);
+}
+
+static inline int octeon_has_feature_OCTEON_FEATURE_CN68XX_WQE(void)
+{
+ return OCTEON_IS_MODEL(OCTEON_CN68XX);
+}
+
+static inline int octeon_has_feature_OCTEON_FEATURE_CIU2(void)
+{
+ return OCTEON_IS_MODEL(OCTEON_CN68XX);
+}
+
+static inline int octeon_has_feature_OCTEON_FEATURE_CIU3(void)
+{
+ return (OCTEON_IS_MODEL(OCTEON_CN78XX) ||
+ OCTEON_IS_MODEL(OCTEON_CNF75XX) ||
+ OCTEON_IS_MODEL(OCTEON_CN73XX));
+}
+
+static inline int octeon_has_feature_OCTEON_FEATURE_FPA3(void)
+{
+ return (OCTEON_IS_MODEL(OCTEON_CN78XX) ||
+ OCTEON_IS_MODEL(OCTEON_CNF75XX) ||
+ OCTEON_IS_MODEL(OCTEON_CN73XX));
+}
+
+static inline int octeon_has_feature_OCTEON_FEATURE_NAND(void)
+{
+ return (OCTEON_IS_MODEL(OCTEON_CN63XX) ||
+ OCTEON_IS_MODEL(OCTEON_CN66XX) ||
+ OCTEON_IS_MODEL(OCTEON_CN68XX) ||
+ OCTEON_IS_MODEL(OCTEON_CN73XX) ||
+ OCTEON_IS_MODEL(OCTEON_CNF75XX) ||
+ OCTEON_IS_MODEL(OCTEON_CN70XX));
+}
+
+static inline int octeon_has_feature_OCTEON_FEATURE_DICI_MODE(void)
+{
+ return (OCTEON_IS_MODEL(OCTEON_CN68XX_PASS2_X) ||
+ OCTEON_IS_MODEL(OCTEON_CN61XX) ||
+ OCTEON_IS_MODEL(OCTEON_CNF71XX) ||
+ OCTEON_IS_MODEL(OCTEON_CN70XX));
+}
+
+static inline int octeon_has_feature_OCTEON_FEATURE_BIT_EXTRACTOR(void)
+{
+ return (OCTEON_IS_MODEL(OCTEON_CN68XX_PASS2_X) ||
+ OCTEON_IS_MODEL(OCTEON_CN61XX) ||
+ OCTEON_IS_MODEL(OCTEON_CNF71XX) ||
+ OCTEON_IS_MODEL(OCTEON_CN70XX));
+}
+
+static inline int octeon_has_feature_OCTEON_FEATURE_MMC(void)
+{
+ return (OCTEON_IS_MODEL(OCTEON_CN61XX) ||
+ OCTEON_IS_MODEL(OCTEON_CNF71XX) || OCTEON_IS_OCTEON3());
+}
+
+static inline int octeon_has_feature_OCTEON_FEATURE_ROM(void)
+{
+ return OCTEON_IS_MODEL(OCTEON_CN66XX) ||
+ OCTEON_IS_MODEL(OCTEON_CN61XX) ||
+ OCTEON_IS_MODEL(OCTEON_CNF71XX);
+}
+
+static inline int octeon_has_feature_OCTEON_FEATURE_AUTHENTIK(void)
+{
+ if (OCTEON_IS_MODEL(OCTEON_CN66XX) ||
+ OCTEON_IS_MODEL(OCTEON_CN61XX) ||
+ OCTEON_IS_MODEL(OCTEON_CNF71XX) ||
+ OCTEON_IS_MODEL(OCTEON_CN70XX)) {
+ u64 val;
+
+ val = csr_rd(CVMX_MIO_FUS_DAT2);
+ return (val & MIO_FUS_DAT2_NOCRYPTO) &&
+ (val & MIO_FUS_DAT2_DORM_CRYPTO);
+ }
+
+ return 0;
+}
+
+static inline int octeon_has_feature_OCTEON_FEATURE_MULTICAST_TIMER(void)
+{
+ return (OCTEON_IS_MODEL(OCTEON_CN66XX_PASS1_2) ||
+ OCTEON_IS_MODEL(OCTEON_CN61XX) ||
+ OCTEON_IS_MODEL(OCTEON_CNF71XX) ||
+ OCTEON_IS_MODEL(OCTEON_CN70XX));
+}
+
+static inline int octeon_has_feature_OCTEON_FEATURE_MULTINODE(void)
+{
+ return (!OCTEON_IS_MODEL(OCTEON_CN76XX) &&
+ OCTEON_IS_MODEL(OCTEON_CN78XX));
+}
+
+static inline int octeon_has_feature_OCTEON_FEATURE_CN78XX_WQE(void)
+{
+ return (OCTEON_IS_MODEL(OCTEON_CN78XX) ||
+ OCTEON_IS_MODEL(OCTEON_CNF75XX) ||
+ OCTEON_IS_MODEL(OCTEON_CN73XX));
+}
+
+static inline int octeon_has_feature_OCTEON_FEATURE_SPI(void)
+{
+ return (OCTEON_IS_MODEL(OCTEON_CN66XX) ||
+ OCTEON_IS_MODEL(OCTEON_CN61XX) ||
+ OCTEON_IS_MODEL(OCTEON_CNF71XX) || OCTEON_IS_OCTEON3());
+}
+
+static inline int octeon_has_feature_OCTEON_FEATURE_PKI(void)
+{
+ return (OCTEON_IS_MODEL(OCTEON_CN78XX) ||
+ OCTEON_IS_MODEL(OCTEON_CNF75XX) ||
+ OCTEON_IS_MODEL(OCTEON_CN73XX));
+}
+
+static inline int octeon_has_feature_OCTEON_FEATURE_PKO3(void)
+{
+ return (OCTEON_IS_MODEL(OCTEON_CN78XX) ||
+ OCTEON_IS_MODEL(OCTEON_CNF75XX) ||
+ OCTEON_IS_MODEL(OCTEON_CN73XX));
+}
+
+static inline int octeon_has_feature_OCTEON_FEATURE_OCLA(void)
+{
+ return OCTEON_IS_OCTEON3();
+}
+
+static inline int octeon_has_feature_OCTEON_FEATURE_FAU(void)
+{
+ return (!OCTEON_IS_MODEL(OCTEON_CN78XX) &&
+ !OCTEON_IS_MODEL(OCTEON_CNF75XX) &&
+ !OCTEON_IS_MODEL(OCTEON_CN73XX));
+}
+
+static inline int octeon_has_feature_OCTEON_FEATURE_BGX(void)
+{
+ return (OCTEON_IS_MODEL(OCTEON_CN78XX) ||
+ OCTEON_IS_MODEL(OCTEON_CNF75XX) ||
+ OCTEON_IS_MODEL(OCTEON_CN73XX));
+}
+
+static inline int octeon_has_feature_OCTEON_FEATURE_BGX_MIX(void)
+{
+ return (OCTEON_IS_MODEL(OCTEON_CN78XX) ||
+ OCTEON_IS_MODEL(OCTEON_CNF75XX) ||
+ OCTEON_IS_MODEL(OCTEON_CN73XX));
+}
+
+static inline int octeon_has_feature_OCTEON_FEATURE_BGX_XCV(void)
+{
+ return OCTEON_IS_MODEL(OCTEON_CN73XX);
+}
+
+static inline int octeon_has_feature_OCTEON_FEATURE_TSO(void)
+{
+ return (OCTEON_IS_MODEL(OCTEON_CN73XX) ||
+ OCTEON_IS_MODEL(OCTEON_CNF75XX) ||
+ OCTEON_IS_MODEL(OCTEON_CN78XX_PASS2_X));
+}
+
+static inline int octeon_has_feature_OCTEON_FEATURE_TDM(void)
+{
+ return OCTEON_IS_MODEL(OCTEON_CN61XX) ||
+ OCTEON_IS_MODEL(OCTEON_CNF71XX) ||
+ OCTEON_IS_MODEL(OCTEON_CN70XX);
+}
+
+static inline int octeon_has_feature_OCTEON_FEATURE_PTP(void)
+{
+ return OCTEON_IS_MODEL(OCTEON_CN6XXX) ||
+ OCTEON_IS_MODEL(OCTEON_CNF7XXX) ||
+ OCTEON_IS_MODEL(OCTEON_CN73XX) ||
+ OCTEON_IS_MODEL(OCTEON_CNF75XX) ||
+ OCTEON_IS_MODEL(OCTEON_CN78XX_PASS2_X);
+}
+
+/*
+ * Answer ``Is the bit for feature set in the bitmap?''
+ * @param feature
+ * @return 1 when the feature is present and 0 otherwise, -1 in case of error.
+ */
+#define octeon_has_feature(feature_x) octeon_has_feature_##feature_x()
+
+#endif /* __OCTEON_FEATURE_H__ */
diff --git a/arch/mips/mach-octeon/include/mach/octeon-model.h b/arch/mips/mach-octeon/include/mach/octeon-model.h
new file mode 100644
index 0000000000..22d6df6a9e
--- /dev/null
+++ b/arch/mips/mach-octeon/include/mach/octeon-model.h
@@ -0,0 +1,317 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#ifndef __OCTEON_MODEL_H__
+#define __OCTEON_MODEL_H__
+
+/*
+ * NOTE: These must match what is checked in common-config.mk
+ * Defines to represent the different versions of Octeon.
+ *
+ * IMPORTANT: When the default pass is updated for an Octeon Model,
+ * the corresponding change must also be made in the oct-sim script.
+ *
+ * The defines below should be used with the OCTEON_IS_MODEL() macro to
+ * determine what model of chip the software is running on. Models ending
+ * in 'XX' match multiple models (families), while specific models match only
+ * that model. If a pass (revision) is specified, then only that revision
+ * will be matched. Care should be taken when checking for both specific
+ * models and families that the specific models are checked for first.
+ * While these defines are similar to the processor ID, they are not intended
+ * to be used by anything other that the OCTEON_IS_MODEL framework, and
+ * the values are subject to change at anytime without notice.
+ *
+ * NOTE: only the OCTEON_IS_MODEL() macro/function and the OCTEON_CN* macros
+ * should be used outside of this file. All other macros are for internal
+ * use only, and may change without notice.
+ */
+
+#define OCTEON_FAMILY_MASK 0x00ffff00
+#define OCTEON_PRID_MASK 0x00ffffff
+
+/* Flag bits in top byte */
+/* Ignores revision in model checks */
+#define OM_IGNORE_REVISION 0x01000000
+/* Check submodels */
+#define OM_CHECK_SUBMODEL 0x02000000
+/* Match all models previous than the one specified */
+#define OM_MATCH_PREVIOUS_MODELS 0x04000000
+/* Ignores the minor revison on newer parts */
+#define OM_IGNORE_MINOR_REVISION 0x08000000
+#define OM_FLAG_MASK 0xff000000
+
+/* Match all cn5XXX Octeon models. */
+#define OM_MATCH_5XXX_FAMILY_MODELS 0x20000000
+/* Match all cn6XXX Octeon models. */
+#define OM_MATCH_6XXX_FAMILY_MODELS 0x40000000
+/* Match all cnf7XXX Octeon models. */
+#define OM_MATCH_F7XXX_FAMILY_MODELS 0x80000000
+/* Match all cn7XXX Octeon models. */
+#define OM_MATCH_7XXX_FAMILY_MODELS 0x10000000
+#define OM_MATCH_FAMILY_MODELS (OM_MATCH_5XXX_FAMILY_MODELS | \
+ OM_MATCH_6XXX_FAMILY_MODELS | \
+ OM_MATCH_F7XXX_FAMILY_MODELS | \
+ OM_MATCH_7XXX_FAMILY_MODELS)
+
+/*
+ * CN7XXX models with new revision encoding
+ */
+
+#define OCTEON_CNF75XX_PASS1_0 0x000d9800
+#define OCTEON_CNF75XX_PASS1_2 0x000d9802
+#define OCTEON_CNF75XX_PASS1_3 0x000d9803
+#define OCTEON_CNF75XX (OCTEON_CNF75XX_PASS1_0 | OM_IGNORE_REVISION)
+#define OCTEON_CNF75XX_PASS1_X \
+ (OCTEON_CNF75XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
+
+#define OCTEON_CN73XX_PASS1_0 0x000d9700
+#define OCTEON_CN73XX_PASS1_1 0x000d9701
+#define OCTEON_CN73XX_PASS1_2 0x000d9702
+#define OCTEON_CN73XX_PASS1_3 0x000d9703
+#define OCTEON_CN73XX (OCTEON_CN73XX_PASS1_0 | OM_IGNORE_REVISION)
+#define OCTEON_CN73XX_PASS1_X \
+ (OCTEON_CN73XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
+
+#define OCTEON_CN72XX OCTEON_CN73XX
+
+#define OCTEON_CN23XX OCTEON_CN73XX
+#define OCTEON_CN23XX_PASS1_2 OCTEON_CN73XX_PASS1_2
+#define OCTEON_CN23XX_PASS1_3 OCTEON_CN73XX_PASS1_3
+
+#define OCTEON_CN70XX_PASS1_0 0x000d9600
+#define OCTEON_CN70XX_PASS1_1 0x000d9601
+#define OCTEON_CN70XX_PASS1_2 0x000d9602
+
+#define OCTEON_CN70XX_PASS2_0 0x000d9608
+
+#define OCTEON_CN70XX (OCTEON_CN70XX_PASS1_0 | OM_IGNORE_REVISION)
+#define OCTEON_CN70XX_PASS1_X \
+ (OCTEON_CN70XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN70XX_PASS2_X \
+ (OCTEON_CN70XX_PASS2_0 | OM_IGNORE_MINOR_REVISION)
+
+#define OCTEON_CN71XX OCTEON_CN70XX
+
+#define OCTEON_CN78XX_PASS1_0 0x000d9500
+#define OCTEON_CN78XX_PASS1_1 0x000d9501
+#define OCTEON_CN78XX_PASS2_0 0x000d9508
+
+#define OCTEON_CN78XX (OCTEON_CN78XX_PASS2_0 | OM_IGNORE_REVISION)
+#define OCTEON_CN78XX_PASS1_X \
+ (OCTEON_CN78XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN78XX_PASS2_X \
+ (OCTEON_CN78XX_PASS2_0 | OM_IGNORE_MINOR_REVISION)
+
+#define OCTEON_CN76XX (0x000d9540 | OM_CHECK_SUBMODEL)
+
+/*
+ * CNF7XXX models with new revision encoding
+ */
+#define OCTEON_CNF71XX_PASS1_0 0x000d9400
+#define OCTEON_CNF71XX_PASS1_1 0x000d9401
+
+#define OCTEON_CNF71XX (OCTEON_CNF71XX_PASS1_0 | OM_IGNORE_REVISION)
+#define OCTEON_CNF71XX_PASS1_X \
+ (OCTEON_CNF71XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
+
+/*
+ * CN6XXX models with new revision encoding
+ */
+#define OCTEON_CN68XX_PASS1_0 0x000d9100
+#define OCTEON_CN68XX_PASS1_1 0x000d9101
+#define OCTEON_CN68XX_PASS2_0 0x000d9108
+#define OCTEON_CN68XX_PASS2_1 0x000d9109
+#define OCTEON_CN68XX_PASS2_2 0x000d910a
+
+#define OCTEON_CN68XX (OCTEON_CN68XX_PASS2_0 | OM_IGNORE_REVISION)
+#define OCTEON_CN68XX_PASS1_X \
+ (OCTEON_CN68XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN68XX_PASS2_X \
+ (OCTEON_CN68XX_PASS2_0 | OM_IGNORE_MINOR_REVISION)
+
+#define OCTEON_CN68XX_PASS1 OCTEON_CN68XX_PASS1_X
+#define OCTEON_CN68XX_PASS2 OCTEON_CN68XX_PASS2_X
+
+#define OCTEON_CN66XX_PASS1_0 0x000d9200
+#define OCTEON_CN66XX_PASS1_2 0x000d9202
+
+#define OCTEON_CN66XX (OCTEON_CN66XX_PASS1_0 | OM_IGNORE_REVISION)
+#define OCTEON_CN66XX_PASS1_X \
+ (OCTEON_CN66XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
+
+#define OCTEON_CN63XX_PASS1_0 0x000d9000
+#define OCTEON_CN63XX_PASS1_1 0x000d9001
+#define OCTEON_CN63XX_PASS1_2 0x000d9002
+#define OCTEON_CN63XX_PASS2_0 0x000d9008
+#define OCTEON_CN63XX_PASS2_1 0x000d9009
+#define OCTEON_CN63XX_PASS2_2 0x000d900a
+
+#define OCTEON_CN63XX (OCTEON_CN63XX_PASS2_0 | OM_IGNORE_REVISION)
+#define OCTEON_CN63XX_PASS1_X \
+ (OCTEON_CN63XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN63XX_PASS2_X \
+ (OCTEON_CN63XX_PASS2_0 | OM_IGNORE_MINOR_REVISION)
+
+/* CN62XX is same as CN63XX with 1 MB cache */
+#define OCTEON_CN62XX OCTEON_CN63XX
+
+#define OCTEON_CN61XX_PASS1_0 0x000d9300
+#define OCTEON_CN61XX_PASS1_1 0x000d9301
+
+#define OCTEON_CN61XX (OCTEON_CN61XX_PASS1_0 | OM_IGNORE_REVISION)
+#define OCTEON_CN61XX_PASS1_X \
+ (OCTEON_CN61XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
+
+/* CN60XX is same as CN61XX with 512 KB cache */
+#define OCTEON_CN60XX OCTEON_CN61XX
+
+/* This matches the complete family of CN3xxx CPUs, and not subsequent models */
+#define OCTEON_CN6XXX \
+ (OCTEON_CN63XX_PASS1_0 | OM_MATCH_6XXX_FAMILY_MODELS)
+#define OCTEON_CNF7XXX \
+ (OCTEON_CNF71XX_PASS1_0 | OM_MATCH_F7XXX_FAMILY_MODELS)
+#define OCTEON_CN7XXX \
+ (OCTEON_CN78XX_PASS1_0 | OM_MATCH_7XXX_FAMILY_MODELS)
+
+/*
+ * The revision byte (low byte) has two different encodings.
+ * CN3XXX:
+ *
+ * bits
+ * <7:5>: reserved (0)
+ * <4>: alternate package
+ * <3:0>: revision
+ *
+ * CN5XXX and older models:
+ *
+ * bits
+ * <7>: reserved (0)
+ * <6>: alternate package
+ * <5:3>: major revision
+ * <2:0>: minor revision
+ */
+
+/* Masks used for the various types of model/family/revision matching */
+#define OCTEON_38XX_FAMILY_MASK 0x00ffff00
+#define OCTEON_38XX_FAMILY_REV_MASK 0x00ffff0f
+#define OCTEON_38XX_MODEL_MASK 0x00ffff10
+#define OCTEON_38XX_MODEL_REV_MASK \
+ (OCTEON_38XX_FAMILY_REV_MASK | OCTEON_38XX_MODEL_MASK)
+
+/* CN5XXX and later use different layout of bits in the revision ID field */
+#define OCTEON_58XX_FAMILY_MASK OCTEON_38XX_FAMILY_MASK
+#define OCTEON_58XX_FAMILY_REV_MASK 0x00ffff3f
+#define OCTEON_58XX_MODEL_MASK 0x00ffff40
+#define OCTEON_58XX_MODEL_REV_MASK \
+ (OCTEON_58XX_FAMILY_REV_MASK | OCTEON_58XX_MODEL_MASK)
+#define OCTEON_58XX_MODEL_MINOR_REV_MASK \
+ (OCTEON_58XX_MODEL_REV_MASK & 0x00ffff38)
+#define OCTEON_5XXX_MODEL_MASK 0x00ff0fc0
+
+#define __OCTEON_MATCH_MASK__(X, Y, Z) \
+ ({ \
+ typeof(X) x = (X); \
+ typeof(Y) y = (Y); \
+ typeof(Z) z = (Z); \
+ (x & z) == (y & z); \
+ })
+
+/*
+ * __OCTEON_IS_MODEL_COMPILE__(arg_model, chip_model)
+ * returns true if chip_model is identical or belong to the OCTEON
+ * model group specified in arg_model.
+ */
+
+/* Helper macros to make to following macro compacter */
+#define OM_MASK OM_FLAG_MASK
+#define OM_MATCH_MASK __OCTEON_MATCH_MASK__
+#define OM_MATCH_PREVIOUS OM_MATCH_PREVIOUS_MODELS
+
+#define __OCTEON_IS_MODEL_COMPILE__(A, B) \
+ ({ \
+ typeof(A) a = (A); \
+ typeof(B) b = (B); \
+ (((((((a) & OM_MASK) == (OM_IGNORE_REVISION | OM_CHECK_SUBMODEL)) && \
+ OM_MATCH_MASK((b), (a), OCTEON_58XX_MODEL_MASK)) || \
+ ((((a) & OM_MASK) == 0) && \
+ OM_MATCH_MASK((b), (a), OCTEON_58XX_FAMILY_REV_MASK)) || \
+ ((((a) & OM_MASK) == OM_IGNORE_MINOR_REVISION) && \
+ OM_MATCH_MASK((b), (a), OCTEON_58XX_MODEL_MINOR_REV_MASK)) || \
+ ((((a) & OM_MASK) == OM_CHECK_SUBMODEL) && \
+ OM_MATCH_MASK((b), (a), OCTEON_58XX_MODEL_MASK)) || \
+ ((((a) & OM_MASK) == OM_IGNORE_REVISION) && \
+ OM_MATCH_MASK((b), (a), OCTEON_58XX_FAMILY_MASK)) || \
+ ((((a) & (OM_MATCH_5XXX_FAMILY_MODELS)) == \
+ OM_MATCH_5XXX_FAMILY_MODELS) && \
+ ((b & OCTEON_PRID_MASK) < OCTEON_CN63XX_PASS1_0)) || \
+ ((((a) & (OM_MATCH_6XXX_FAMILY_MODELS)) == \
+ OM_MATCH_6XXX_FAMILY_MODELS) && \
+ ((b & OCTEON_PRID_MASK) >= OCTEON_CN63XX_PASS1_0) && \
+ ((b & OCTEON_PRID_MASK) < OCTEON_CNF71XX_PASS1_0)) || \
+ ((((a) & (OM_MATCH_F7XXX_FAMILY_MODELS)) == \
+ OM_MATCH_F7XXX_FAMILY_MODELS) && \
+ ((b & OCTEON_PRID_MASK) >= OCTEON_CNF71XX_PASS1_0) && \
+ ((b & OCTEON_PRID_MASK) < OCTEON_CN78XX_PASS1_0)) || \
+ ((((a) & (OM_MATCH_7XXX_FAMILY_MODELS)) == \
+ OM_MATCH_7XXX_FAMILY_MODELS) && ((b & OCTEON_PRID_MASK) >= \
+ OCTEON_CN78XX_PASS1_0)) || \
+ ((((a) & (OM_MATCH_PREVIOUS)) == OM_MATCH_PREVIOUS) && \
+ (((b) & OCTEON_58XX_MODEL_MASK) < ((a) & OCTEON_58XX_MODEL_MASK))) \
+ ))); \
+ })
+
+#ifndef __ASSEMBLY__
+
+#ifndef OCTEON_IS_MODEL
+
+static inline int __octeon_is_model_runtime_internal__(u32 model)
+{
+ u32 cpuid = read_c0_prid();
+
+ return __OCTEON_IS_MODEL_COMPILE__(model, cpuid);
+}
+
+static inline int __octeon_is_model_runtime__(u32 model)
+{
+ return __octeon_is_model_runtime_internal__(model);
+}
+
+/*
+ * The OCTEON_IS_MODEL macro should be used for all Octeon model checking done
+ * in a program.
+ * This should be kept runtime if at all possible and must be conditionalized
+ * with OCTEON_IS_COMMON_BINARY() if runtime checking support is required.
+ *
+ * Use of the macro in preprocessor directives ( #if OCTEON_IS_MODEL(...) )
+ * is NOT SUPPORTED, and should be replaced with CVMX_COMPILED_FOR()
+ * I.e.:
+ * #if OCTEON_IS_MODEL(OCTEON_CN56XX) -> #if CVMX_COMPILED_FOR(OCTEON_CN56XX)
+ */
+#define OCTEON_IS_MODEL(x) __octeon_is_model_runtime__(x)
+#define OCTEON_IS_COMMON_BINARY() 1
+#undef OCTEON_MODEL
+#endif
+
+#define OCTEON_IS_OCTEON2() \
+ (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF71XX))
+
+#define OCTEON_IS_OCTEON3() OCTEON_IS_MODEL(OCTEON_CN7XXX)
+
+const char *octeon_model_get_string(u32 chip_id);
+const char *octeon_model_get_string_buffer(u32 chip_id, char *buffer);
+
+/**
+ * Return the octeon family, i.e., ProcessorID of the PrID register.
+ *
+ * @return the octeon family on success, ((u32)-1) on error.
+ */
+static inline u32 cvmx_get_octeon_family(void)
+{
+ return (read_c0_prid() & OCTEON_FAMILY_MASK);
+}
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __OCTEON_MODEL_H__ */
diff --git a/arch/mips/mach-octeon/include/mach/octeon_ddr.h b/arch/mips/mach-octeon/include/mach/octeon_ddr.h
new file mode 100644
index 0000000000..4473be4d44
--- /dev/null
+++ b/arch/mips/mach-octeon/include/mach/octeon_ddr.h
@@ -0,0 +1,982 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#ifndef __OCTEON_DDR_H_
+#define __OCTEON_DDR_H_
+
+#include <env.h>
+#include <linux/compat.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <mach/octeon-model.h>
+#include <mach/cvmx/cvmx-lmcx-defs.h>
+
+/* Mapping is done starting from 0x11800.80000000 */
+#define CVMX_L2C_CTL 0x00800000
+#define CVMX_L2C_BIG_CTL 0x00800030
+#define CVMX_L2C_TADX_INT(i) (0x00a00028 + (((i) & 7) * 0x40000))
+#define CVMX_L2C_MCIX_INT(i) (0x00c00028 + (((i) & 3) * 0x40000))
+
+/* Some "external" (non-LMC) registers */
+#define CVMX_IPD_CLK_COUNT 0x00014F0000000338
+#define CVMX_FPA_CLK_COUNT 0x00012800000000F0
+
+#define CVMX_NODE_MEM_SHIFT 40
+
+#define DDR_INTERFACE_MAX 4
+
+/* Private data struct */
+struct ddr_priv {
+ void __iomem *lmc_base;
+ void __iomem *l2c_base;
+
+ bool ddr_clock_initialized[DDR_INTERFACE_MAX];
+ bool ddr_memory_preserved;
+ u32 flags;
+
+ struct ram_info info;
+};
+
+/* Short cut to convert a number to megabytes */
+#define MB(X) ((u64)(X) * (u64)(1024 * 1024))
+
+#define octeon_is_cpuid(x) (__OCTEON_IS_MODEL_COMPILE__(x, read_c0_prid()))
+
+#define strtoull simple_strtoull
+
+/* Access LMC registers */
+static inline u64 lmc_rd(struct ddr_priv *priv, u64 addr)
+{
+ return ioread64(priv->lmc_base + addr);
+}
+
+static inline void lmc_wr(struct ddr_priv *priv, u64 addr, u64 val)
+{
+ iowrite64(val, priv->lmc_base + addr);
+}
+
+/* Access L2C registers */
+static inline u64 l2c_rd(struct ddr_priv *priv, u64 addr)
+{
+ return ioread64(priv->l2c_base + addr);
+}
+
+static inline void l2c_wr(struct ddr_priv *priv, u64 addr, u64 val)
+{
+ iowrite64(val, priv->l2c_base + addr);
+}
+
+/* Access other CSR registers not located inside the LMC address space */
+static inline u64 csr_rd(u64 addr)
+{
+ void __iomem *base;
+
+ base = ioremap_nocache(addr, 0x100);
+ return ioread64(base);
+}
+
+static inline void csr_wr(u64 addr, u64 val)
+{
+ void __iomem *base;
+
+ base = ioremap_nocache(addr, 0x100);
+ return iowrite64(val, base);
+}
+
+/* "Normal" access, without any offsets and/or mapping */
+static inline u64 cvmx_read64_uint64(u64 addr)
+{
+ return readq((void *)addr);
+}
+
+static inline void cvmx_write64_uint64(u64 addr, u64 val)
+{
+ writeq(val, (void *)addr);
+}
+
+/* Failsafe mode */
+#define FLAG_FAILSAFE_MODE 0x01000
+/* Note that the DDR clock initialized flags must be contiguous */
+/* Clock for DDR 0 initialized */
+#define FLAG_DDR0_CLK_INITIALIZED 0x02000
+/* Clock for DDR 1 initialized */
+#define FLAG_DDR1_CLK_INITIALIZED 0x04000
+/* Clock for DDR 2 initialized */
+#define FLAG_DDR2_CLK_INITIALIZED 0x08000
+/* Clock for DDR 3 initialized */
+#define FLAG_DDR3_CLK_INITIALIZED 0x10000
+/* Loaded into RAM externally */
+#define FLAG_RAM_RESIDENT 0x20000
+/* Verbose DDR information */
+#define FLAG_DDR_VERBOSE 0x40000
+/* Check env. for DDR variables */
+#define FLAG_DDR_DEBUG 0x80000
+#define FLAG_DDR_TRACE_INIT 0x100000
+#define FLAG_MEMORY_PRESERVED 0x200000
+#define FLAG_DFM_VERBOSE 0x400000
+#define FLAG_DFM_TRACE_INIT 0x800000
+/* DFM memory clock initialized */
+#define FLAG_DFM_CLK_INITIALIZED 0x1000000
+/* EEPROM clock descr. missing */
+#define FLAG_CLOCK_DESC_MISSING 0x2000000
+/* EEPROM board descr. missing */
+#define FLAG_BOARD_DESC_MISSING 0x4000000
+#define FLAG_DDR_PROMPT 0x8000000
+
+#ifndef DDR_NO_DEBUG
+static inline int ddr_verbose(struct ddr_priv *priv)
+{
+ return !!(priv->flags & FLAG_DDR_VERBOSE);
+}
+
+static inline char *ddr_getenv_debug(struct ddr_priv *priv, char *name)
+{
+ if (priv->flags & FLAG_FAILSAFE_MODE)
+ return NULL;
+
+ if (priv->flags & FLAG_DDR_DEBUG)
+ return env_get(name);
+
+ return NULL;
+}
+#else
+static inline int ddr_verbose(void)
+{
+ return 0;
+}
+#endif
+
+/* turn the variable name into a string */
+#define CVMX_TMP_STR(x) CVMX_TMP_STR2(x)
+#define CVMX_TMP_STR2(x) #x
+
+#define CVMX_SYNC asm volatile ("sync" : : : "memory")
+
+#define CVMX_CACHE(op, address, offset) \
+ asm volatile ("cache " CVMX_TMP_STR(op) ", " \
+ CVMX_TMP_STR(offset) "(%[rbase])" \
+ : : [rbase] "d" (address))
+
+/* unlock the state */
+#define CVMX_CACHE_WBIL2(address, offset) \
+ CVMX_CACHE(23, address, offset)
+
+/* complete prefetches, invalidate entire dcache */
+#define CVMX_DCACHE_INVALIDATE \
+ { CVMX_SYNC; asm volatile ("cache 9, 0($0)" : : ); }
+
+/**
+ * cvmx_l2c_cfg
+ *
+ * Specify the RSL base addresses for the block
+ *
+ * L2C_CFG = L2C Configuration
+ *
+ * Description:
+ */
+union cvmx_l2c_cfg {
+ u64 u64;
+ struct cvmx_l2c_cfg_s {
+ uint64_t reserved_20_63:44;
+ uint64_t bstrun:1;
+ uint64_t lbist:1;
+ uint64_t xor_bank:1;
+ uint64_t dpres1:1;
+ uint64_t dpres0:1;
+ uint64_t dfill_dis:1;
+ uint64_t fpexp:4;
+ uint64_t fpempty:1;
+ uint64_t fpen:1;
+ uint64_t idxalias:1;
+ uint64_t mwf_crd:4;
+ uint64_t rsp_arb_mode:1;
+ uint64_t rfb_arb_mode:1;
+ uint64_t lrf_arb_mode:1;
+ } s;
+};
+
+/**
+ * cvmx_l2c_ctl
+ *
+ * L2C_CTL = L2C Control
+ *
+ *
+ * Notes:
+ * (1) If MAXVAB is != 0, VAB_THRESH should be less than MAXVAB.
+ *
+ * (2) L2DFDBE and L2DFSBE allows software to generate L2DSBE, L2DDBE, VBFSBE,
+ * and VBFDBE errors for the purposes of testing error handling code. When
+ * one (or both) of these bits are set a PL2 which misses in the L2 will fill
+ * with the appropriate error in the first 2 OWs of the fill. Software can
+ * determine which OW pair gets the error by choosing the desired fill order
+ * (address<6:5>). A PL2 which hits in the L2 will not inject any errors.
+ * Therefore sending a WBIL2 prior to the PL2 is recommended to make a miss
+ * likely (if multiple processors are involved software must be careful to be
+ * sure no other processor or IO device can bring the block into the L2).
+ *
+ * To generate a VBFSBE or VBFDBE, software must first get the cache block
+ * into the cache with an error using a PL2 which misses the L2. Then a
+ * store partial to a portion of the cache block without the error must
+ * change the block to dirty. Then, a subsequent WBL2/WBIL2/victim will
+ * trigger the VBFSBE/VBFDBE error.
+ */
+union cvmx_l2c_ctl {
+ u64 u64;
+ struct cvmx_l2c_ctl_s {
+ uint64_t reserved_29_63:35;
+ uint64_t rdf_fast:1;
+ uint64_t disstgl2i:1;
+ uint64_t l2dfsbe:1;
+ uint64_t l2dfdbe:1;
+ uint64_t discclk:1;
+ uint64_t maxvab:4;
+ uint64_t maxlfb:4;
+ uint64_t rsp_arb_mode:1;
+ uint64_t xmc_arb_mode:1;
+ uint64_t reserved_2_13:12;
+ uint64_t disecc:1;
+ uint64_t disidxalias:1;
+ } s;
+
+ struct cvmx_l2c_ctl_cn73xx {
+ uint64_t reserved_32_63:32;
+ uint64_t ocla_qos:3;
+ uint64_t reserved_28_28:1;
+ uint64_t disstgl2i:1;
+ uint64_t reserved_25_26:2;
+ uint64_t discclk:1;
+ uint64_t reserved_16_23:8;
+ uint64_t rsp_arb_mode:1;
+ uint64_t xmc_arb_mode:1;
+ uint64_t rdf_cnt:8;
+ uint64_t reserved_4_5:2;
+ uint64_t disldwb:1;
+ uint64_t dissblkdty:1;
+ uint64_t disecc:1;
+ uint64_t disidxalias:1;
+ } cn73xx;
+
+ struct cvmx_l2c_ctl_cn73xx cn78xx;
+};
+
+/**
+ * cvmx_l2c_big_ctl
+ *
+ * L2C_BIG_CTL = L2C Big memory control register
+ *
+ *
+ * Notes:
+ * (1) BIGRD interrupts can occur during normal operation as the PP's are
+ * allowed to prefetch to non-existent memory locations. Therefore,
+ * BIGRD is for informational purposes only.
+ *
+ * (2) When HOLEWR/BIGWR blocks a store L2C_VER_ID, L2C_VER_PP, L2C_VER_IOB,
+ * and L2C_VER_MSC will be loaded just like a store which is blocked by VRTWR.
+ * Additionally, L2C_ERR_XMC will be loaded.
+ */
+union cvmx_l2c_big_ctl {
+ u64 u64;
+ struct cvmx_l2c_big_ctl_s {
+ uint64_t reserved_8_63:56;
+ uint64_t maxdram:4;
+ uint64_t reserved_0_3:4;
+ } s;
+ struct cvmx_l2c_big_ctl_cn61xx {
+ uint64_t reserved_8_63:56;
+ uint64_t maxdram:4;
+ uint64_t reserved_1_3:3;
+ uint64_t disable:1;
+ } cn61xx;
+ struct cvmx_l2c_big_ctl_cn61xx cn63xx;
+ struct cvmx_l2c_big_ctl_cn61xx cn66xx;
+ struct cvmx_l2c_big_ctl_cn61xx cn68xx;
+ struct cvmx_l2c_big_ctl_cn61xx cn68xxp1;
+ struct cvmx_l2c_big_ctl_cn70xx {
+ uint64_t reserved_8_63:56;
+ uint64_t maxdram:4;
+ uint64_t reserved_1_3:3;
+ uint64_t disbig:1;
+ } cn70xx;
+ struct cvmx_l2c_big_ctl_cn70xx cn70xxp1;
+ struct cvmx_l2c_big_ctl_cn70xx cn73xx;
+ struct cvmx_l2c_big_ctl_cn70xx cn78xx;
+ struct cvmx_l2c_big_ctl_cn70xx cn78xxp1;
+ struct cvmx_l2c_big_ctl_cn61xx cnf71xx;
+ struct cvmx_l2c_big_ctl_cn70xx cnf75xx;
+};
+
+struct rlevel_byte_data {
+ int delay;
+ int loop_total;
+ int loop_count;
+ int best;
+ u64 bm;
+ int bmerrs;
+ int sqerrs;
+ int bestsq;
+};
+
+#define DEBUG_VALIDATE_BITMASK 0
+#if DEBUG_VALIDATE_BITMASK
+#define debug_bitmask_print printf
+#else
+#define debug_bitmask_print(...)
+#endif
+
+#define RLEVEL_BITMASK_TRAILING_BITS_ERROR 5
+// FIXME? now less than TOOLONG
+#define RLEVEL_BITMASK_BUBBLE_BITS_ERROR 11
+#define RLEVEL_BITMASK_NARROW_ERROR 6
+#define RLEVEL_BITMASK_BLANK_ERROR 100
+#define RLEVEL_BITMASK_TOOLONG_ERROR 12
+#define RLEVEL_NONSEQUENTIAL_DELAY_ERROR 50
+#define RLEVEL_ADJACENT_DELAY_ERROR 30
+
+/*
+ * Apply a filter to the BITMASK results returned from Octeon
+ * read-leveling to determine the most likely delay result. This
+ * computed delay may be used to qualify the delay result returned by
+ * Octeon. Accumulate an error penalty for invalid characteristics of
+ * the bitmask so that they can be used to select the most reliable
+ * results.
+ *
+ * The algorithm searches for the largest contiguous MASK within a
+ * maximum RANGE of bits beginning with the MSB.
+ *
+ * 1. a MASK with a WIDTH less than 4 will be penalized
+ * 2. Bubbles in the bitmask that occur before or after the MASK
+ * will be penalized
+ * 3. If there are no trailing bubbles then extra bits that occur
+ * beyond the maximum RANGE will be penalized.
+ *
+ * +++++++++++++++++++++++++++++++++++++++++++++++++++
+ * + +
+ * + e.g. bitmask = 27B00 +
+ * + +
+ * + 63 +--- mstart 0 +
+ * + | | | +
+ * + | +---------+ +--- fb | +
+ * + | | range | | | +
+ * + V V V V V +
+ * + +
+ * + 0 0 ... 1 0 0 1 1 1 1 0 1 1 0 0 0 0 0 0 0 0 +
+ * + +
+ * + ^ ^ ^ +
+ * + | | mask| +
+ * + lb ---+ +-----+ +
+ * + width +
+ * + +
+ * +++++++++++++++++++++++++++++++++++++++++++++++++++
+ */
+
+struct rlevel_bitmask {
+ u64 bm;
+ u8 mstart;
+ u8 width;
+ int errs;
+};
+
+#define MASKRANGE_BITS 6
+#define MASKRANGE ((1 << MASKRANGE_BITS) - 1)
+
+/* data field addresses in the DDR2 SPD eeprom */
+enum ddr2_spd_addrs {
+ DDR2_SPD_BYTES_PROGRAMMED = 0,
+ DDR2_SPD_TOTAL_BYTES = 1,
+ DDR2_SPD_MEM_TYPE = 2,
+ DDR2_SPD_NUM_ROW_BITS = 3,
+ DDR2_SPD_NUM_COL_BITS = 4,
+ DDR2_SPD_NUM_RANKS = 5,
+ DDR2_SPD_CYCLE_CLX = 9,
+ DDR2_SPD_CONFIG_TYPE = 11,
+ DDR2_SPD_REFRESH = 12,
+ DDR2_SPD_SDRAM_WIDTH = 13,
+ DDR2_SPD_BURST_LENGTH = 16,
+ DDR2_SPD_NUM_BANKS = 17,
+ DDR2_SPD_CAS_LATENCY = 18,
+ DDR2_SPD_DIMM_TYPE = 20,
+ DDR2_SPD_CYCLE_CLX1 = 23,
+ DDR2_SPD_CYCLE_CLX2 = 25,
+ DDR2_SPD_TRP = 27,
+ DDR2_SPD_TRRD = 28,
+ DDR2_SPD_TRCD = 29,
+ DDR2_SPD_TRAS = 30,
+ DDR2_SPD_TWR = 36,
+ DDR2_SPD_TWTR = 37,
+ DDR2_SPD_TRFC_EXT = 40,
+ DDR2_SPD_TRFC = 42,
+ DDR2_SPD_CHECKSUM = 63,
+ DDR2_SPD_MFR_ID = 64
+};
+
+/* data field addresses in the DDR2 SPD eeprom */
+enum ddr3_spd_addrs {
+ DDR3_SPD_BYTES_PROGRAMMED = 0,
+ DDR3_SPD_REVISION = 1,
+ DDR3_SPD_KEY_BYTE_DEVICE_TYPE = 2,
+ DDR3_SPD_KEY_BYTE_MODULE_TYPE = 3,
+ DDR3_SPD_DENSITY_BANKS = 4,
+ DDR3_SPD_ADDRESSING_ROW_COL_BITS = 5,
+ DDR3_SPD_NOMINAL_VOLTAGE = 6,
+ DDR3_SPD_MODULE_ORGANIZATION = 7,
+ DDR3_SPD_MEMORY_BUS_WIDTH = 8,
+ DDR3_SPD_FINE_TIMEBASE_DIVIDEND_DIVISOR = 9,
+ DDR3_SPD_MEDIUM_TIMEBASE_DIVIDEND = 10,
+ DDR3_SPD_MEDIUM_TIMEBASE_DIVISOR = 11,
+ DDR3_SPD_MINIMUM_CYCLE_TIME_TCKMIN = 12,
+ DDR3_SPD_CAS_LATENCIES_LSB = 14,
+ DDR3_SPD_CAS_LATENCIES_MSB = 15,
+ DDR3_SPD_MIN_CAS_LATENCY_TAAMIN = 16,
+ DDR3_SPD_MIN_WRITE_RECOVERY_TWRMIN = 17,
+ DDR3_SPD_MIN_RAS_CAS_DELAY_TRCDMIN = 18,
+ DDR3_SPD_MIN_ROW_ACTIVE_DELAY_TRRDMIN = 19,
+ DDR3_SPD_MIN_ROW_PRECHARGE_DELAY_TRPMIN = 20,
+ DDR3_SPD_UPPER_NIBBLES_TRAS_TRC = 21,
+ DDR3_SPD_MIN_ACTIVE_PRECHARGE_LSB_TRASMIN = 22,
+ DDR3_SPD_MIN_ACTIVE_REFRESH_LSB_TRCMIN = 23,
+ DDR3_SPD_MIN_REFRESH_RECOVERY_LSB_TRFCMIN = 24,
+ DDR3_SPD_MIN_REFRESH_RECOVERY_MSB_TRFCMIN = 25,
+ DDR3_SPD_MIN_INTERNAL_WRITE_READ_CMD_TWTRMIN = 26,
+ DDR3_SPD_MIN_INTERNAL_READ_PRECHARGE_CMD_TRTPMIN = 27,
+ DDR3_SPD_UPPER_NIBBLE_TFAW = 28,
+ DDR3_SPD_MIN_FOUR_ACTIVE_WINDOW_TFAWMIN = 29,
+ DDR3_SPD_SDRAM_OPTIONAL_FEATURES = 30,
+ DDR3_SPD_SDRAM_THERMAL_REFRESH_OPTIONS = 31,
+ DDR3_SPD_MODULE_THERMAL_SENSOR = 32,
+ DDR3_SPD_SDRAM_DEVICE_TYPE = 33,
+ DDR3_SPD_MINIMUM_CYCLE_TIME_FINE_TCKMIN = 34,
+ DDR3_SPD_MIN_CAS_LATENCY_FINE_TAAMIN = 35,
+ DDR3_SPD_MIN_RAS_CAS_DELAY_FINE_TRCDMIN = 36,
+ DDR3_SPD_MIN_ROW_PRECHARGE_DELAY_FINE_TRPMIN = 37,
+ DDR3_SPD_MIN_ACTIVE_REFRESH_LSB_FINE_TRCMIN = 38,
+ DDR3_SPD_REFERENCE_RAW_CARD = 62,
+ DDR3_SPD_ADDRESS_MAPPING = 63,
+ DDR3_SPD_REGISTER_MANUFACTURER_ID_LSB = 65,
+ DDR3_SPD_REGISTER_MANUFACTURER_ID_MSB = 66,
+ DDR3_SPD_REGISTER_REVISION_NUMBER = 67,
+ DDR3_SPD_MODULE_SERIAL_NUMBER = 122,
+ DDR3_SPD_CYCLICAL_REDUNDANCY_CODE_LOWER_NIBBLE = 126,
+ DDR3_SPD_CYCLICAL_REDUNDANCY_CODE_UPPER_NIBBLE = 127,
+ DDR3_SPD_MODULE_PART_NUMBER = 128
+};
+
+/* data field addresses in the DDR4 SPD eeprom */
+enum ddr4_spd_addrs {
+ DDR4_SPD_BYTES_PROGRAMMED = 0,
+ DDR4_SPD_REVISION = 1,
+ DDR4_SPD_KEY_BYTE_DEVICE_TYPE = 2,
+ DDR4_SPD_KEY_BYTE_MODULE_TYPE = 3,
+ DDR4_SPD_DENSITY_BANKS = 4,
+ DDR4_SPD_ADDRESSING_ROW_COL_BITS = 5,
+ DDR4_SPD_PACKAGE_TYPE = 6,
+ DDR4_SPD_OPTIONAL_FEATURES = 7,
+ DDR4_SPD_THERMAL_REFRESH_OPTIONS = 8,
+ DDR4_SPD_OTHER_OPTIONAL_FEATURES = 9,
+ DDR4_SPD_SECONDARY_PACKAGE_TYPE = 10,
+ DDR4_SPD_MODULE_NOMINAL_VOLTAGE = 11,
+ DDR4_SPD_MODULE_ORGANIZATION = 12,
+ DDR4_SPD_MODULE_MEMORY_BUS_WIDTH = 13,
+ DDR4_SPD_MODULE_THERMAL_SENSOR = 14,
+ DDR4_SPD_RESERVED_BYTE15 = 15,
+ DDR4_SPD_RESERVED_BYTE16 = 16,
+ DDR4_SPD_TIMEBASES = 17,
+ DDR4_SPD_MINIMUM_CYCLE_TIME_TCKAVGMIN = 18,
+ DDR4_SPD_MAXIMUM_CYCLE_TIME_TCKAVGMAX = 19,
+ DDR4_SPD_CAS_LATENCIES_BYTE0 = 20,
+ DDR4_SPD_CAS_LATENCIES_BYTE1 = 21,
+ DDR4_SPD_CAS_LATENCIES_BYTE2 = 22,
+ DDR4_SPD_CAS_LATENCIES_BYTE3 = 23,
+ DDR4_SPD_MIN_CAS_LATENCY_TAAMIN = 24,
+ DDR4_SPD_MIN_RAS_CAS_DELAY_TRCDMIN = 25,
+ DDR4_SPD_MIN_ROW_PRECHARGE_DELAY_TRPMIN = 26,
+ DDR4_SPD_UPPER_NIBBLES_TRAS_TRC = 27,
+ DDR4_SPD_MIN_ACTIVE_PRECHARGE_LSB_TRASMIN = 28,
+ DDR4_SPD_MIN_ACTIVE_REFRESH_LSB_TRCMIN = 29,
+ DDR4_SPD_MIN_REFRESH_RECOVERY_LSB_TRFC1MIN = 30,
+ DDR4_SPD_MIN_REFRESH_RECOVERY_MSB_TRFC1MIN = 31,
+ DDR4_SPD_MIN_REFRESH_RECOVERY_LSB_TRFC2MIN = 32,
+ DDR4_SPD_MIN_REFRESH_RECOVERY_MSB_TRFC2MIN = 33,
+ DDR4_SPD_MIN_REFRESH_RECOVERY_LSB_TRFC4MIN = 34,
+ DDR4_SPD_MIN_REFRESH_RECOVERY_MSB_TRFC4MIN = 35,
+ DDR4_SPD_MIN_FOUR_ACTIVE_WINDOW_MSN_TFAWMIN = 36,
+ DDR4_SPD_MIN_FOUR_ACTIVE_WINDOW_LSB_TFAWMIN = 37,
+ DDR4_SPD_MIN_ROW_ACTIVE_DELAY_SAME_TRRD_SMIN = 38,
+ DDR4_SPD_MIN_ROW_ACTIVE_DELAY_DIFF_TRRD_LMIN = 39,
+ DDR4_SPD_MIN_CAS_TO_CAS_DELAY_TCCD_LMIN = 40,
+ DDR4_SPD_MIN_CAS_TO_CAS_DELAY_FINE_TCCD_LMIN = 117,
+ DDR4_SPD_MIN_ACT_TO_ACT_DELAY_SAME_FINE_TRRD_LMIN = 118,
+ DDR4_SPD_MIN_ACT_TO_ACT_DELAY_DIFF_FINE_TRRD_SMIN = 119,
+ DDR4_SPD_MIN_ACT_TO_ACT_REFRESH_DELAY_FINE_TRCMIN = 120,
+ DDR4_SPD_MIN_ROW_PRECHARGE_DELAY_FINE_TRPMIN = 121,
+ DDR4_SPD_MIN_RAS_TO_CAS_DELAY_FINE_TRCDMIN = 122,
+ DDR4_SPD_MIN_CAS_LATENCY_FINE_TAAMIN = 123,
+ DDR4_SPD_MAX_CYCLE_TIME_FINE_TCKAVGMAX = 124,
+ DDR4_SPD_MIN_CYCLE_TIME_FINE_TCKAVGMIN = 125,
+ DDR4_SPD_CYCLICAL_REDUNDANCY_CODE_LOWER_NIBBLE = 126,
+ DDR4_SPD_CYCLICAL_REDUNDANCY_CODE_UPPER_NIBBLE = 127,
+ DDR4_SPD_REFERENCE_RAW_CARD = 130,
+ DDR4_SPD_UDIMM_ADDR_MAPPING_FROM_EDGE = 131,
+ DDR4_SPD_REGISTER_MANUFACTURER_ID_LSB = 133,
+ DDR4_SPD_REGISTER_MANUFACTURER_ID_MSB = 134,
+ DDR4_SPD_REGISTER_REVISION_NUMBER = 135,
+ DDR4_SPD_RDIMM_ADDR_MAPPING_FROM_REGISTER_TO_DRAM = 136,
+ DDR4_SPD_RDIMM_REGISTER_DRIVE_STRENGTH_CTL = 137,
+ DDR4_SPD_RDIMM_REGISTER_DRIVE_STRENGTH_CK = 138,
+};
+
+#define SPD_EEPROM_SIZE (DDR4_SPD_RDIMM_REGISTER_DRIVE_STRENGTH_CK + 1)
+
+struct impedence_values {
+ unsigned char *rodt_ohms;
+ unsigned char *rtt_nom_ohms;
+ unsigned char *rtt_nom_table;
+ unsigned char *rtt_wr_ohms;
+ unsigned char *dic_ohms;
+ short *drive_strength;
+ short *dqx_strength;
+};
+
+#define RODT_OHMS_COUNT 8
+#define RTT_NOM_OHMS_COUNT 8
+#define RTT_NOM_TABLE_COUNT 8
+#define RTT_WR_OHMS_COUNT 8
+#define DIC_OHMS_COUNT 3
+#define DRIVE_STRENGTH_COUNT 15
+
+/*
+ * Structure that provides DIMM information, either in the form of an SPD
+ * TWSI address, or a pointer to an array that contains SPD data. One of
+ * the two fields must be valid.
+ */
+struct dimm_config {
+ u16 spd_addrs[2]; /* TWSI address of SPD, 0 if not used */
+ u8 *spd_ptrs[2]; /* pointer to SPD data array, NULL if not used */
+ int spd_cached[2];
+ u8 spd_data[2][SPD_EEPROM_SIZE];
+};
+
+struct dimm_odt_config {
+ u8 odt_ena; /* FIX: dqx_ctl for Octeon 3 DDR4 */
+ u64 odt_mask; /* FIX: wodt_mask for Octeon 3 */
+ union cvmx_lmcx_modereg_params1 modereg_params1;
+ union cvmx_lmcx_modereg_params2 modereg_params2;
+ u8 qs_dic; /* FIX: rodt_ctl for Octeon 3 */
+ u64 rodt_ctl; /* FIX: rodt_mask for Octeon 3 */
+ u8 dic;
+};
+
+struct ddr_delay_config {
+ u32 ddr_board_delay;
+ u8 lmc_delay_clk;
+ u8 lmc_delay_cmd;
+ u8 lmc_delay_dq;
+};
+
+/*
+ * The parameters below make up the custom_lmc_config data structure.
+ * This structure is used to customize the way that the LMC DRAM
+ * Controller is configured for a particular board design.
+ *
+ * The HRM describes LMC Read Leveling which supports automatic
+ * selection of per byte-lane delays. When measuring the read delays
+ * the LMC configuration software sweeps through a range of settings
+ * for LMC0_COMP_CTL2[RODT_CTL], the Octeon II on-die-termination
+ * resistance and LMC0_MODEREG_PARAMS1[RTT_NOM_XX], the DRAM
+ * on-die-termination resistance. The minimum and maximum parameters
+ * for rtt_nom_idx and rodt_ctl listed below determine the ranges of
+ * ODT settings used for the measurements. Note that for rtt_nom an
+ * index is used into a sorted table rather than the direct csr setting
+ * in order to optimize the sweep.
+ *
+ * .min_rtt_nom_idx: 1=120ohms, 2=60ohms, 3=40ohms, 4=30ohms, 5=20ohms
+ * .max_rtt_nom_idx: 1=120ohms, 2=60ohms, 3=40ohms, 4=30ohms, 5=20ohms
+ * .min_rodt_ctl: 1=20ohms, 2=30ohms, 3=40ohms, 4=60ohms, 5=120ohms
+ * .max_rodt_ctl: 1=20ohms, 2=30ohms, 3=40ohms, 4=60ohms, 5=120ohms
+ *
+ * The settings below control the Octeon II drive strength for the CK,
+ * ADD/CMD, and DQ/DQS signals. 1=24ohms, 2=26.67ohms, 3=30ohms,
+ * 4=34.3ohms, 5=40ohms, 6=48ohms, 6=60ohms.
+ *
+ * .dqx_ctl: Drive strength control for DDR_DQX/DDR_DQS_X_P/N drivers.
+ * .ck_ctl: Drive strength control for
+ * DDR_CK_X_P/DDR_DIMMX_CSX_L/DDR_DIMMX_ODT_X drivers.
+ * .cmd_ctl: Drive strength control for CMD/A/RESET_L/CKEX drivers.
+ *
+ * The LMC controller software selects the most optimal CAS Latency
+ * that complies with the appropriate SPD values and the frequency
+ * that the DRAMS are being operated. When operating the DRAMs at
+ * frequencies substantially lower than their rated frequencies it
+ * might be necessary to limit the minimum CAS Latency the LMC
+ * controller software is allowed to select in order to make the DRAM
+ * work reliably.
+ *
+ * .min_cas_latency: Minimum allowed CAS Latency
+ *
+ * The value used for LMC0_RLEVEL_CTL[OFFSET_EN] determine how the
+ * read-leveling information that the Octeon II gathers is interpreted
+ * to determine the per-byte read delays.
+ *
+ * .offset_en: Value used for LMC0_RLEVEL_CTL[OFFSET_EN].
+ * .offset_udimm: Value used for LMC0_RLEVEL_CTL[OFFSET] for UDIMMS.
+ * .offset_rdimm: Value used for LMC0_RLEVEL_CTL[OFFSET] for RDIMMS.
+ *
+ * The LMC configuration software sweeps through a range of ODT
+ * settings while measuring the per-byte read delays. During those
+ * measurements the software makes an assessment of the quality of the
+ * measurements in order to determine which measurements provide the
+ * most accurate delays. The automatic settings provide the option to
+ * allow that same assessment to determine the most optimal RODT_CTL
+ * and/or RTT_NOM settings.
+ *
+ * The automatic approach might provide the best means to determine
+ * the settings used for initial poweron of a new design. However,
+ * the final settings should be determined by board analysis, testing,
+ * and experience.
+ *
+ * .ddr_rtt_nom_auto: 1 means automatically set RTT_NOM value.
+ * .ddr_rodt_ctl_auto: 1 means automatically set RODT_CTL value.
+ *
+ * .rlevel_compute: Enables software interpretation of per-byte read
+ * delays using the measurements collected by the
+ * Octeon II rather than completely relying on the
+ * Octeon II to determine the delays. 1=software
+ * computation is recomended since a more complete
+ * analysis is implemented in software.
+ *
+ * .rlevel_comp_offset: Set to 2 unless instructed differently by Cavium.
+ *
+ * .rlevel_average_loops: Determines the number of times the read-leveling
+ * sequence is run for each rank. The results is
+ * then averaged across the number of loops. The
+ * default setting is 1.
+ *
+ * .ddr2t_udimm:
+ * .ddr2t_rdimm: Turn on the DDR 2T mode. 2-cycle window for CMD and
+ * address. This mode helps relieve setup time pressure
+ * on the address and command bus. Please refer to
+ * Micron's tech note tn_47_01 titled DDR2-533 Memory
+ * Design Guide for Two Dimm Unbuffered Systems for
+ * physical details.
+ *
+ * .disable_sequential_delay_check: As result of the flyby topology
+ * prescribed in the JEDEC specifications the byte delays should
+ * maintain a consistent increasing or decreasing trend across
+ * the bytes on standard dimms. This setting can be used disable
+ * that check for unusual circumstances where the check is not
+ * useful.
+ *
+ * .maximum_adjacent_rlevel_delay_increment: An additional sequential
+ * delay check for the delays that result from the flyby
+ * topology. This value specifies the maximum difference between
+ * the delays of adjacent bytes. A value of 0 disables this
+ * check.
+ *
+ * .fprch2 Front Porch Enable: When set, the turn-off
+ * time for the default DDR_DQ/DQS drivers is FPRCH2 CKs earlier.
+ * 00 = 0 CKs
+ * 01 = 1 CKs
+ * 10 = 2 CKs
+ *
+ * .parity: The parity input signal PAR_IN on each dimm must be
+ * strapped high or low on the board. This bit is programmed
+ * into LMC0_DIMM_CTL[PARITY] and it must be set to match the
+ * board strapping. This signal is typically strapped low.
+ *
+ * .mode32b: Enable 32-bit datapath mode. Set to 1 if only 32 DQ pins
+ * are used. (cn61xx, cn71xx)
+ *
+ * .measured_vref: Set to 1 to measure VREF; set to 0 to compute VREF.
+ *
+ * .dram_connection: Set to 1 if discrete DRAMs; set to 0 if using DIMMs.
+ * This changes the algorithms used to compute VREF.
+ *
+ * .dll_write_offset: FIXME: Add description
+ * .dll_read_offset: FIXME: Add description
+ */
+
+struct rlevel_table {
+ const char part[20];
+ int speed;
+ u64 rl_rank[4][4];
+};
+
+struct ddr3_custom_config {
+ u8 min_rtt_nom_idx;
+ u8 max_rtt_nom_idx;
+ u8 min_rodt_ctl;
+ u8 max_rodt_ctl;
+ u8 dqx_ctl;
+ u8 ck_ctl;
+ u8 cmd_ctl;
+ u8 ctl_ctl;
+ u8 min_cas_latency;
+ u8 offset_en;
+ u8 offset_udimm;
+ u8 offset_rdimm;
+ u8 rlevel_compute;
+ u8 ddr_rtt_nom_auto;
+ u8 ddr_rodt_ctl_auto;
+ u8 rlevel_comp_offset_udimm;
+ u8 rlevel_comp_offset_rdimm;
+ int8_t ptune_offset;
+ int8_t ntune_offset;
+ u8 rlevel_average_loops;
+ u8 ddr2t_udimm;
+ u8 ddr2t_rdimm;
+ u8 disable_sequential_delay_check;
+ u8 maximum_adjacent_rlevel_delay_increment;
+ u8 parity;
+ u8 fprch2;
+ u8 mode32b;
+ u8 measured_vref;
+ u8 dram_connection;
+ const int8_t *dll_write_offset;
+ const int8_t *dll_read_offset;
+ struct rlevel_table *rl_tbl;
+};
+
+#define DDR_CFG_T_MAX_DIMMS 5
+
+struct ddr_conf {
+ struct dimm_config dimm_config_table[DDR_CFG_T_MAX_DIMMS];
+ struct dimm_odt_config odt_1rank_config[4];
+ struct dimm_odt_config odt_2rank_config[4];
+ struct dimm_odt_config odt_4rank_config[4];
+ struct ddr_delay_config unbuffered;
+ struct ddr_delay_config registered;
+ struct ddr3_custom_config custom_lmc_config;
+};
+
+/* Divide and round results to the nearest integer. */
+static inline u64 divide_nint(u64 dividend, u64 divisor)
+{
+ u64 quotent, remainder;
+
+ quotent = dividend / divisor;
+ remainder = dividend % divisor;
+ return (quotent + ((remainder * 2) >= divisor));
+}
+
+/* Divide and round results up to the next higher integer. */
+static inline u64 divide_roundup(u64 dividend, u64 divisor)
+{
+ return ((dividend + divisor - 1) / divisor);
+}
+
+enum ddr_type {
+ DDR3_DRAM = 3,
+ DDR4_DRAM = 4,
+};
+
+#define rttnom_none 0 /* Rtt_Nom disabled */
+#define rttnom_60ohm 1 /* RZQ/4 = 240/4 = 60 ohms */
+#define rttnom_120ohm 2 /* RZQ/2 = 240/2 = 120 ohms */
+#define rttnom_40ohm 3 /* RZQ/6 = 240/6 = 40 ohms */
+#define rttnom_20ohm 4 /* RZQ/12 = 240/12 = 20 ohms */
+#define rttnom_30ohm 5 /* RZQ/8 = 240/8 = 30 ohms */
+#define rttnom_rsrv1 6 /* Reserved */
+#define rttnom_rsrv2 7 /* Reserved */
+
+#define rttwr_none 0 /* Dynamic ODT off */
+#define rttwr_60ohm 1 /* RZQ/4 = 240/4 = 60 ohms */
+#define rttwr_120ohm 2 /* RZQ/2 = 240/2 = 120 ohms */
+#define rttwr_rsrv1 3 /* Reserved */
+
+#define dic_40ohm 0 /* RZQ/6 = 240/6 = 40 ohms */
+#define dic_34ohm 1 /* RZQ/7 = 240/7 = 34 ohms */
+
+#define driver_24_ohm 1
+#define driver_27_ohm 2
+#define driver_30_ohm 3
+#define driver_34_ohm 4
+#define driver_40_ohm 5
+#define driver_48_ohm 6
+#define driver_60_ohm 7
+
+#define rodt_ctl_none 0
+#define rodt_ctl_20_ohm 1
+#define rodt_ctl_30_ohm 2
+#define rodt_ctl_40_ohm 3
+#define rodt_ctl_60_ohm 4
+#define rodt_ctl_120_ohm 5
+
+#define ddr4_rttnom_none 0 /* Rtt_Nom disabled */
+#define ddr4_rttnom_60ohm 1 /* RZQ/4 = 240/4 = 60 ohms */
+#define ddr4_rttnom_120ohm 2 /* RZQ/2 = 240/2 = 120 ohms */
+#define ddr4_rttnom_40ohm 3 /* RZQ/6 = 240/6 = 40 ohms */
+#define ddr4_rttnom_240ohm 4 /* RZQ/1 = 240/1 = 240 ohms */
+#define ddr4_rttnom_48ohm 5 /* RZQ/5 = 240/5 = 48 ohms */
+#define ddr4_rttnom_80ohm 6 /* RZQ/3 = 240/3 = 80 ohms */
+#define ddr4_rttnom_34ohm 7 /* RZQ/7 = 240/7 = 34 ohms */
+
+#define ddr4_rttwr_none 0 /* Dynamic ODT off */
+#define ddr4_rttwr_120ohm 1 /* RZQ/2 = 240/2 = 120 ohms */
+#define ddr4_rttwr_240ohm 2 /* RZQ/1 = 240/1 = 240 ohms */
+#define ddr4_rttwr_hiz 3 /* HiZ */
+/* This setting is available for cn78xx pass 2, and cn73xx & cnf75xx pass 1 */
+#define ddr4_rttwr_80ohm 4 /* RZQ/3 = 240/3 = 80 ohms */
+
+#define ddr4_dic_34ohm 0 /* RZQ/7 = 240/7 = 34 ohms */
+#define ddr4_dic_48ohm 1 /* RZQ/5 = 240/5 = 48 ohms */
+
+#define ddr4_rttpark_none 0 /* Rtt_Park disabled */
+#define ddr4_rttpark_60ohm 1 /* RZQ/4 = 240/4 = 60 ohms */
+#define ddr4_rttpark_120ohm 2 /* RZQ/2 = 240/2 = 120 ohms */
+#define ddr4_rttpark_40ohm 3 /* RZQ/6 = 240/6 = 40 ohms */
+#define ddr4_rttpark_240ohm 4 /* RZQ/1 = 240/1 = 240 ohms */
+#define ddr4_rttpark_48ohm 5 /* RZQ/5 = 240/5 = 48 ohms */
+#define ddr4_rttpark_80ohm 6 /* RZQ/3 = 240/3 = 80 ohms */
+#define ddr4_rttpark_34ohm 7 /* RZQ/7 = 240/7 = 34 ohms */
+
+#define ddr4_driver_26_ohm 2
+#define ddr4_driver_30_ohm 3
+#define ddr4_driver_34_ohm 4
+#define ddr4_driver_40_ohm 5
+#define ddr4_driver_48_ohm 6
+
+#define ddr4_dqx_driver_24_ohm 1
+#define ddr4_dqx_driver_27_ohm 2
+#define ddr4_dqx_driver_30_ohm 3
+#define ddr4_dqx_driver_34_ohm 4
+#define ddr4_dqx_driver_40_ohm 5
+#define ddr4_dqx_driver_48_ohm 6
+#define ddr4_dqx_driver_60_ohm 7
+
+#define ddr4_rodt_ctl_none 0
+#define ddr4_rodt_ctl_40_ohm 1
+#define ddr4_rodt_ctl_60_ohm 2
+#define ddr4_rodt_ctl_80_ohm 3
+#define ddr4_rodt_ctl_120_ohm 4
+#define ddr4_rodt_ctl_240_ohm 5
+#define ddr4_rodt_ctl_34_ohm 6
+#define ddr4_rodt_ctl_48_ohm 7
+
+#define DIMM_CONFIG_TERMINATOR { {0, 0}, {NULL, NULL} }
+
+#define SET_DDR_DLL_CTL3(field, expr) \
+ do { \
+ if (octeon_is_cpuid(OCTEON_CN66XX) || \
+ octeon_is_cpuid(OCTEON_CN63XX)) \
+ ddr_dll_ctl3.cn63xx.field = (expr); \
+ else if (octeon_is_cpuid(OCTEON_CN68XX) || \
+ octeon_is_cpuid(OCTEON_CN61XX) || \
+ octeon_is_cpuid(OCTEON_CNF71XX)) \
+ ddr_dll_ctl3.cn61xx.field = (expr); \
+ else if (octeon_is_cpuid(OCTEON_CN70XX) || \
+ octeon_is_cpuid(OCTEON_CN78XX)) \
+ ddr_dll_ctl3.cn70xx.field = (expr); \
+ else if (octeon_is_cpuid(OCTEON_CN73XX) || \
+ octeon_is_cpuid(OCTEON_CNF75XX)) \
+ ddr_dll_ctl3.cn73xx.field = (expr); \
+ else \
+ debug("%s(): " #field \
+ "not set for unknown chip\n", \
+ __func__); \
+ } while (0)
+
+#define ENCODE_DLL90_BYTE_SEL(byte_sel) \
+ (octeon_is_cpuid(OCTEON_CN70XX) ? ((9 + 7 - (byte_sel)) % 9) : \
+ ((byte_sel) + 1))
+
+/**
+ * If debugging is disabled the ddr_print macro is not compatible
+ * with this macro.
+ */
+# define GET_DDR_DLL_CTL3(field) \
+ ((octeon_is_cpuid(OCTEON_CN66XX) || \
+ octeon_is_cpuid(OCTEON_CN63XX)) ? \
+ ddr_dll_ctl3.cn63xx.field : \
+ (octeon_is_cpuid(OCTEON_CN68XX) || \
+ octeon_is_cpuid(OCTEON_CN61XX) || \
+ octeon_is_cpuid(OCTEON_CNF71XX)) ? \
+ ddr_dll_ctl3.cn61xx.field : \
+ (octeon_is_cpuid(OCTEON_CN70XX) || \
+ octeon_is_cpuid(OCTEON_CN78XX)) ? \
+ ddr_dll_ctl3.cn70xx.field : \
+ (octeon_is_cpuid(OCTEON_CN73XX) || \
+ octeon_is_cpuid(OCTEON_CNF75XX)) ? \
+ ddr_dll_ctl3.cn73xx.field : 0)
+
+extern const char *ddr3_dimm_types[];
+extern const char *ddr4_dimm_types[];
+
+extern const struct dimm_odt_config disable_odt_config[];
+
+#define RLEVEL_BYTE_BITS 6
+#define RLEVEL_BYTE_MSK ((1ULL << 6) - 1)
+
+/* Prototypes */
+int get_ddr_type(struct dimm_config *dimm_config, int upper_dimm);
+int get_dimm_module_type(struct dimm_config *dimm_config, int upper_dimm,
+ int ddr_type);
+int read_spd(struct dimm_config *dimm_config, int dimm_index, int spd_field);
+int read_spd_init(struct dimm_config *dimm_config, int dimm_index);
+void report_dimm(struct dimm_config *dimm_config, int upper_dimm,
+ int dimm, int if_num);
+int validate_dimm(struct ddr_priv *priv, struct dimm_config *dimm_config,
+ int dimm_index);
+char *printable_rank_spec(char *buffer, int num_ranks, int dram_width,
+ int spd_package);
+
+bool ddr_memory_preserved(struct ddr_priv *priv);
+
+int get_wl_rank(union cvmx_lmcx_wlevel_rankx *lmc_wlevel_rank, int byte);
+int get_rl_rank(union cvmx_lmcx_rlevel_rankx *lmc_rlevel_rank, int byte);
+void upd_wl_rank(union cvmx_lmcx_wlevel_rankx *lmc_wlevel_rank, int byte,
+ int delay);
+void upd_rl_rank(union cvmx_lmcx_rlevel_rankx *lmc_rlevel_rank, int byte,
+ int delay);
+
+int compute_ddr3_rlevel_delay(u8 mstart, u8 width,
+ union cvmx_lmcx_rlevel_ctl rlevel_ctl);
+
+int encode_row_lsb_ddr3(int row_lsb);
+int encode_pbank_lsb_ddr3(int pbank_lsb);
+
+int initialize_ddr_clock(struct ddr_priv *priv, struct ddr_conf *ddr_conf,
+ u32 cpu_hertz, u32 ddr_hertz, u32 ddr_ref_hertz,
+ int if_num, u32 if_mask);
+
+void process_custom_dll_offsets(struct ddr_priv *priv, int if_num,
+ const char *enable_str,
+ const int8_t *offsets, const char *byte_str,
+ int mode);
+int nonseq_del(struct rlevel_byte_data *rlevel_byte, int start, int end,
+ int max_adj_delay_inc);
+int roundup_ddr3_wlevel_bitmask(int bitmask);
+
+void oct3_ddr3_seq(struct ddr_priv *priv, int rank_mask, int if_num,
+ int sequence);
+void ddr_init_seq(struct ddr_priv *priv, int rank_mask, int if_num);
+
+void rlevel_to_wlevel(union cvmx_lmcx_rlevel_rankx *lmc_rlevel_rank,
+ union cvmx_lmcx_wlevel_rankx *lmc_wlevel_rank, int byte);
+
+int validate_ddr3_rlevel_bitmask(struct rlevel_bitmask *rlevel_bitmask_p,
+ int ddr_type);
+
+void change_dll_offset_enable(struct ddr_priv *priv, int if_num, int change);
+unsigned short load_dll_offset(struct ddr_priv *priv, int if_num,
+ int dll_offset_mode,
+ int byte_offset, int byte);
+
+u64 lmc_ddr3_rl_dbg_read(struct ddr_priv *priv, int if_num, int idx);
+u64 lmc_ddr3_wl_dbg_read(struct ddr_priv *priv, int if_num, int idx);
+
+void cvmx_maybe_tune_node(struct ddr_priv *priv, u32 ddr_speed);
+void cvmx_dbi_switchover(struct ddr_priv *priv);
+
+int init_octeon3_ddr3_interface(struct ddr_priv *priv,
+ struct ddr_conf *ddr_conf,
+ u32 ddr_hertz, u32 cpu_hertz, u32 ddr_ref_hertz,
+ int if_num, u32 if_mask);
+
+char *lookup_env(struct ddr_priv *priv, const char *format, ...);
+char *lookup_env_ull(struct ddr_priv *priv, const char *format, ...);
+
+/* Each board provides a board-specific config table via this function */
+struct ddr_conf *octeon_ddr_conf_table_get(int *count, int *def_ddr_freq);
+
+#endif /* __OCTEON_DDR_H_ */
diff --git a/arch/mips/mach-octeon/include/mangle-port.h b/arch/mips/mach-octeon/include/mangle-port.h
new file mode 100644
index 0000000000..7e95dcef5a
--- /dev/null
+++ b/arch/mips/mach-octeon/include/mangle-port.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2003, 2004 Ralf Baechle
+ */
+
+#ifndef __ASM_MACH_GENERIC_MANGLE_PORT_H
+#define __ASM_MACH_GENERIC_MANGLE_PORT_H
+
+#include <asm/byteorder.h>
+
+#ifdef __BIG_ENDIAN
+
+static inline bool __should_swizzle_bits(volatile void *a)
+{
+ extern const bool octeon_should_swizzle_table[];
+ u64 did = ((u64)(uintptr_t)a >> 40) & 0xff;
+
+ return octeon_should_swizzle_table[did];
+}
+
+# define __swizzle_addr_b(port) (port)
+# define __swizzle_addr_w(port) (port)
+# define __swizzle_addr_l(port) (port)
+# define __swizzle_addr_q(port) (port)
+
+#else /* __LITTLE_ENDIAN */
+
+#define __should_swizzle_bits(a) false
+
+static inline bool __should_swizzle_addr(u64 p)
+{
+ /* boot bus? */
+ return ((p >> 40) & 0xff) == 0;
+}
+
+# define __swizzle_addr_b(port) \
+ (__should_swizzle_addr(port) ? (port) ^ 7 : (port))
+# define __swizzle_addr_w(port) \
+ (__should_swizzle_addr(port) ? (port) ^ 6 : (port))
+# define __swizzle_addr_l(port) \
+ (__should_swizzle_addr(port) ? (port) ^ 4 : (port))
+# define __swizzle_addr_q(port) (port)
+
+#endif /* __BIG_ENDIAN */
+
+
+# define ioswabb(a, x) (x)
+# define __mem_ioswabb(a, x) (x)
+# define ioswabw(a, x) (__should_swizzle_bits(a) ? le16_to_cpu(x) : x)
+# define __mem_ioswabw(a, x) (x)
+# define ioswabl(a, x) (__should_swizzle_bits(a) ? le32_to_cpu(x) : x)
+# define __mem_ioswabl(a, x) (x)
+# define ioswabq(a, x) (__should_swizzle_bits(a) ? le64_to_cpu(x) : x)
+# define __mem_ioswabq(a, x) (x)
+
+#endif /* __ASM_MACH_GENERIC_MANGLE_PORT_H */
diff --git a/arch/mips/mach-octeon/lowlevel_init.S b/arch/mips/mach-octeon/lowlevel_init.S
index fa87cb4e34..56d1d2261e 100644
--- a/arch/mips/mach-octeon/lowlevel_init.S
+++ b/arch/mips/mach-octeon/lowlevel_init.S
@@ -10,10 +10,36 @@
#include <asm/mipsregs.h>
#include <asm/addrspace.h>
#include <asm/asm.h>
+#include <mach/octeon-model.h>
+
+#define COP0_CVMCTL_REG $9,7 /* Cavium control */
+#define COP0_CVMMEMCTL_REG $11,7 /* Cavium memory control */
+#define COP0_PROC_ID_REG $15,0
.set noreorder
LEAF(lowlevel_init)
+
+ /* Set LMEMSZ in CVMMEMCTL register */
+ dmfc0 a0, COP0_CVMMEMCTL_REG
+ dins a0, zero, 0, 9
+ mfc0 a4, COP0_PROC_ID_REG
+ li a5, OCTEON_CN63XX_PASS1_0 /* Octeon cn63xx pass1 chip id */
+ bgt a5, a4, 2f
+ ori a0, 0x104 /* setup 4 lines of scratch */
+ ori a6, a5, 8 /* Octeon cn63xx pass2 chip id */
+ bge a4, a6, 2f
+ nop
+ li a6, 4
+ ins a0, a6, 11, 4 /* Set WBTHRESH=4 as per Core-14752 errata */
+2:
+ dmtc0 a0, COP0_CVMMEMCTL_REG
+
+ /* Set REPUN bit in CVMCTL register */
+ dmfc0 a0, COP0_CVMCTL_REG
+ ori a0, 1<<14 /* enable fixup of unaligned mem access */
+ dmtc0 a0, COP0_CVMCTL_REG
+
jr ra
nop
END(lowlevel_init)
@@ -67,3 +93,53 @@ __dummy:
nop
END(mips_mach_early_init)
+
+LEAF(nmi_bootvector)
+
+ /*
+ * From Marvell original bootvector setup
+ */
+ mfc0 k0, CP0_STATUS
+ /* Enable 64-bit addressing, set ERL (should already be set) */
+ ori k0, 0x84
+ mtc0 k0, CP0_STATUS
+ /* Core-14345, clear L1 Dcache virtual tags if the core hit an NMI */
+ cache 17, 0($0)
+
+ /*
+ * Needed for Linux kernel booting, otherwise it hangs while
+ * zero'ing all of CVMSEG
+ */
+ dmfc0 a0, COP0_CVMMEMCTL_REG
+ dins a0, zero, 0, 9
+ ori a0, 0x104 /* setup 4 lines of scratch */
+ dmtc0 a0, COP0_CVMMEMCTL_REG
+
+ /*
+ * Load parameters and entry point
+ */
+ PTR_LA t9, nmi_handler_para
+ sync
+
+ ld s0, 0x00(t9)
+ ld a0, 0x08(t9)
+ ld a1, 0x10(t9)
+ ld a2, 0x18(t9)
+ ld a3, 0x20(t9)
+
+ /* Finally jump to entry point (start kernel etc) */
+ j s0
+ nop
+
+ END(nmi_bootvector)
+
+ /*
+ * Add here some space for the NMI parameters (entry point and args)
+ */
+ .globl nmi_handler_para
+nmi_handler_para:
+ .dword 0 // entry-point
+ .dword 0 // arg0
+ .dword 0 // arg1
+ .dword 0 // arg2
+ .dword 0 // arg3