diff options
Diffstat (limited to 'drivers')
362 files changed, 14824 insertions, 3396 deletions
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index 7802d8846a8d..bc9ddd2e3b05 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig @@ -27,9 +27,6 @@ menuconfig ACPI Management (APM) specification. If both ACPI and APM support are configured, ACPI is used. - The project home page for the Linux ACPI subsystem is here: - <https://01.org/linux-acpi> - Linux support for ACPI is based on Intel Corporation's ACPI Component Architecture (ACPI CA). For more information on the ACPI CA, see: @@ -347,7 +344,6 @@ config ACPI_CUSTOM_DSDT_FILE depends on !STANDALONE help This option supports a custom DSDT by linking it into the kernel. - See Documentation/admin-guide/acpi/dsdt-override.rst Enter the full path name to the file which includes the AmlCode or dsdt_aml_code declaration. diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c index c29e41bfcf35..bb9fe7984b1a 100644 --- a/drivers/acpi/ac.c +++ b/drivers/acpi/ac.c @@ -36,11 +36,6 @@ static int acpi_ac_add(struct acpi_device *device); static int acpi_ac_remove(struct acpi_device *device); static void acpi_ac_notify(struct acpi_device *device, u32 event); -struct acpi_ac_bl { - const char *hid; - int hrv; -}; - static const struct acpi_device_id ac_device_ids[] = { {"ACPI0003", 0}, {"", 0}, diff --git a/drivers/acpi/acpi_amba.c b/drivers/acpi/acpi_amba.c index ab8a4e0191b1..f5b443ab01c2 100644 --- a/drivers/acpi/acpi_amba.c +++ b/drivers/acpi/acpi_amba.c @@ -21,6 +21,7 @@ static const struct acpi_device_id amba_id_list[] = { {"ARMH0061", 0}, /* PL061 GPIO Device */ + {"ARMH0330", 0}, /* ARM DMA Controller DMA-330 */ {"ARMHC500", 0}, /* ARM CoreSight ETM4x */ {"ARMHC501", 0}, /* ARM CoreSight ETR */ {"ARMHC502", 0}, /* ARM CoreSight STM */ @@ -48,6 +49,7 @@ static void amba_register_dummy_clk(void) static int amba_handler_attach(struct acpi_device *adev, const struct acpi_device_id *id) { + struct acpi_device *parent = acpi_dev_parent(adev); struct amba_device *dev; struct resource_entry *rentry; struct list_head resource_list; @@ -97,8 +99,8 @@ static int amba_handler_attach(struct acpi_device *adev, * attached to it, that physical device should be the parent of * the amba device we are about to create. */ - if (adev->parent) - dev->dev.parent = acpi_get_first_physical_node(adev->parent); + if (parent) + dev->dev.parent = acpi_get_first_physical_node(parent); ACPI_COMPANION_SET(&dev->dev, adev); diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c index ad245bbd965e..3bbe2276cac7 100644 --- a/drivers/acpi/acpi_apd.c +++ b/drivers/acpi/acpi_apd.c @@ -60,12 +60,6 @@ static int acpi_apd_setup(struct apd_private_data *pdata) } #ifdef CONFIG_X86_AMD_PLATFORM_DEVICE -static int misc_check_res(struct acpi_resource *ares, void *data) -{ - struct resource res; - - return !acpi_dev_resource_memory(ares, &res); -} static int fch_misc_setup(struct apd_private_data *pdata) { @@ -82,8 +76,7 @@ static int fch_misc_setup(struct apd_private_data *pdata) return -ENOMEM; INIT_LIST_HEAD(&resource_list); - ret = acpi_dev_get_resources(adev, &resource_list, misc_check_res, - NULL); + ret = acpi_dev_get_memory_resources(adev, &resource_list); if (ret < 0) return -ENOENT; diff --git a/drivers/acpi/acpi_fpdt.c b/drivers/acpi/acpi_fpdt.c index 6922a44b3ce7..a2056c4c8cb7 100644 --- a/drivers/acpi/acpi_fpdt.c +++ b/drivers/acpi/acpi_fpdt.c @@ -143,6 +143,23 @@ static const struct attribute_group boot_attr_group = { static struct kobject *fpdt_kobj; +#if defined CONFIG_X86 && defined CONFIG_PHYS_ADDR_T_64BIT +#include <linux/processor.h> +static bool fpdt_address_valid(u64 address) +{ + /* + * On some systems the table contains invalid addresses + * with unsuppored high address bits set, check for this. + */ + return !(address >> boot_cpu_data.x86_phys_bits); +} +#else +static bool fpdt_address_valid(u64 address) +{ + return true; +} +#endif + static int fpdt_process_subtable(u64 address, u32 subtable_type) { struct fpdt_subtable_header *subtable_header; @@ -151,6 +168,11 @@ static int fpdt_process_subtable(u64 address, u32 subtable_type) u32 length, offset; int result; + if (!fpdt_address_valid(address)) { + pr_info(FW_BUG "invalid physical address: 0x%llx!\n", address); + return -EINVAL; + } + subtable_header = acpi_os_map_memory(address, sizeof(*subtable_header)); if (!subtable_header) return -ENOMEM; diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c index c4d4d21391d7..f08ffa75f4a7 100644 --- a/drivers/acpi/acpi_lpss.c +++ b/drivers/acpi/acpi_lpss.c @@ -167,10 +167,10 @@ static struct pwm_lookup byt_pwm_lookup[] = { static void byt_pwm_setup(struct lpss_private_data *pdata) { - struct acpi_device *adev = pdata->adev; + u64 uid; /* Only call pwm_add_table for the first PWM controller */ - if (!adev->pnp.unique_id || strcmp(adev->pnp.unique_id, "1")) + if (acpi_dev_uid_to_integer(pdata->adev, &uid) || uid != 1) return; pwm_add_table(byt_pwm_lookup, ARRAY_SIZE(byt_pwm_lookup)); @@ -180,14 +180,13 @@ static void byt_pwm_setup(struct lpss_private_data *pdata) static void byt_i2c_setup(struct lpss_private_data *pdata) { - const char *uid_str = acpi_device_uid(pdata->adev); acpi_handle handle = pdata->adev->handle; unsigned long long shared_host = 0; acpi_status status; - long uid = 0; + u64 uid; - /* Expected to always be true, but better safe then sorry */ - if (uid_str && !kstrtol(uid_str, 10, &uid) && uid) { + /* Expected to always be successfull, but better safe then sorry */ + if (!acpi_dev_uid_to_integer(pdata->adev, &uid) && uid) { /* Detect I2C bus shared with PUNIT and ignore its d3 status */ status = acpi_evaluate_integer(handle, "_SEM", NULL, &shared_host); if (ACPI_SUCCESS(status) && shared_host) @@ -211,10 +210,10 @@ static struct pwm_lookup bsw_pwm_lookup[] = { static void bsw_pwm_setup(struct lpss_private_data *pdata) { - struct acpi_device *adev = pdata->adev; + u64 uid; /* Only call pwm_add_table for the first PWM controller */ - if (!adev->pnp.unique_id || strcmp(adev->pnp.unique_id, "1")) + if (acpi_dev_uid_to_integer(pdata->adev, &uid) || uid != 1) return; pwm_add_table(bsw_pwm_lookup, ARRAY_SIZE(bsw_pwm_lookup)); @@ -392,13 +391,6 @@ static const struct acpi_device_id acpi_lpss_device_ids[] = { #ifdef CONFIG_X86_INTEL_LPSS -static int is_memory(struct acpi_resource *res, void *not_used) -{ - struct resource r; - - return !acpi_dev_resource_memory(res, &r); -} - /* LPSS main clock device. */ static struct platform_device *lpss_clk_dev; @@ -659,29 +651,25 @@ static int acpi_lpss_create_device(struct acpi_device *adev, return -ENOMEM; INIT_LIST_HEAD(&resource_list); - ret = acpi_dev_get_resources(adev, &resource_list, is_memory, NULL); + ret = acpi_dev_get_memory_resources(adev, &resource_list); if (ret < 0) goto err_out; - list_for_each_entry(rentry, &resource_list, node) - if (resource_type(rentry->res) == IORESOURCE_MEM) { - if (dev_desc->prv_size_override) - pdata->mmio_size = dev_desc->prv_size_override; - else - pdata->mmio_size = resource_size(rentry->res); - pdata->mmio_base = ioremap(rentry->res->start, - pdata->mmio_size); - break; - } + rentry = list_first_entry_or_null(&resource_list, struct resource_entry, node); + if (rentry) { + if (dev_desc->prv_size_override) + pdata->mmio_size = dev_desc->prv_size_override; + else + pdata->mmio_size = resource_size(rentry->res); + pdata->mmio_base = ioremap(rentry->res->start, pdata->mmio_size); + } acpi_dev_free_resource_list(&resource_list); if (!pdata->mmio_base) { /* Avoid acpi_bus_attach() instantiating a pdev for this dev. */ adev->pnp.type.platform_id = 0; - /* Skip the device, but continue the namespace scan. */ - ret = 0; - goto err_out; + goto out_free; } pdata->adev = adev; @@ -692,11 +680,8 @@ static int acpi_lpss_create_device(struct acpi_device *adev, if (dev_desc->flags & LPSS_CLK) { ret = register_device_clock(adev, pdata); - if (ret) { - /* Skip the device, but continue the namespace scan. */ - ret = 0; - goto err_out; - } + if (ret) + goto out_free; } /* @@ -708,15 +693,19 @@ static int acpi_lpss_create_device(struct acpi_device *adev, adev->driver_data = pdata; pdev = acpi_create_platform_device(adev, dev_desc->properties); - if (!IS_ERR_OR_NULL(pdev)) { - acpi_lpss_create_device_links(adev, pdev); - return 1; + if (IS_ERR_OR_NULL(pdev)) { + adev->driver_data = NULL; + ret = PTR_ERR(pdev); + goto err_out; } - ret = PTR_ERR(pdev); - adev->driver_data = NULL; + acpi_lpss_create_device_links(adev, pdev); + return 1; - err_out: +out_free: + /* Skip the device, but continue the namespace scan */ + ret = 0; +err_out: kfree(pdata); return ret; } diff --git a/drivers/acpi/acpi_pcc.c b/drivers/acpi/acpi_pcc.c index a12b55d81209..ee4ce5ba1fb2 100644 --- a/drivers/acpi/acpi_pcc.c +++ b/drivers/acpi/acpi_pcc.c @@ -23,6 +23,12 @@ #include <acpi/pcc.h> +/* + * Arbitrary retries in case the remote processor is slow to respond + * to PCC commands + */ +#define PCC_CMD_WAIT_RETRIES_NUM 500 + struct pcc_data { struct pcc_mbox_chan *pcc_chan; void __iomem *pcc_comm_addr; @@ -63,6 +69,7 @@ acpi_pcc_address_space_setup(acpi_handle region_handle, u32 function, if (IS_ERR(data->pcc_chan)) { pr_err("Failed to find PCC channel for subspace %d\n", ctx->subspace_id); + kfree(data); return AE_NOT_FOUND; } @@ -72,6 +79,8 @@ acpi_pcc_address_space_setup(acpi_handle region_handle, u32 function, if (!data->pcc_comm_addr) { pr_err("Failed to ioremap PCC comm region mem for %d\n", ctx->subspace_id); + pcc_mbox_free_channel(data->pcc_chan); + kfree(data); return AE_NO_MEMORY; } @@ -86,6 +95,7 @@ acpi_pcc_address_space_handler(u32 function, acpi_physical_address addr, { int ret; struct pcc_data *data = region_context; + u64 usecs_lat; reinit_completion(&data->done); @@ -96,10 +106,22 @@ acpi_pcc_address_space_handler(u32 function, acpi_physical_address addr, if (ret < 0) return AE_ERROR; - if (data->pcc_chan->mchan->mbox->txdone_irq) - wait_for_completion(&data->done); + if (data->pcc_chan->mchan->mbox->txdone_irq) { + /* + * pcc_chan->latency is just a Nominal value. In reality the remote + * processor could be much slower to reply. So add an arbitrary + * amount of wait on top of Nominal. + */ + usecs_lat = PCC_CMD_WAIT_RETRIES_NUM * data->pcc_chan->latency; + ret = wait_for_completion_timeout(&data->done, + usecs_to_jiffies(usecs_lat)); + if (ret == 0) { + pr_err("PCC command executed timeout!\n"); + return AE_TIME; + } + } - mbox_client_txdone(data->pcc_chan->mchan, ret); + mbox_chan_txdone(data->pcc_chan->mchan, ret); memcpy_fromio(value, data->pcc_comm_addr, data->ctx.length); diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c index de3cbf152dee..fe00a5783f53 100644 --- a/drivers/acpi/acpi_platform.c +++ b/drivers/acpi/acpi_platform.c @@ -20,13 +20,13 @@ #include "internal.h" static const struct acpi_device_id forbidden_id_list[] = { + {"ACPI0009", 0}, /* IOxAPIC */ + {"ACPI000A", 0}, /* IOAPIC */ {"PNP0000", 0}, /* PIC */ {"PNP0100", 0}, /* Timer */ {"PNP0200", 0}, /* AT DMA Controller */ - {"ACPI0009", 0}, /* IOxAPIC */ - {"ACPI000A", 0}, /* IOAPIC */ {"SMB0001", 0}, /* ACPI SMBUS virtual device */ - {"", 0}, + { } }; static struct platform_device *acpi_platform_device_find_by_companion(struct acpi_device *adev) @@ -78,7 +78,7 @@ static void acpi_platform_fill_resource(struct acpi_device *adev, * If the device has parent we need to take its resources into * account as well because this device might consume part of those. */ - parent = acpi_get_first_physical_node(adev->parent); + parent = acpi_get_first_physical_node(acpi_dev_parent(adev)); if (parent && dev_is_pci(parent)) dest->parent = pci_find_resource(to_pci_dev(parent), dest); } @@ -97,6 +97,7 @@ static void acpi_platform_fill_resource(struct acpi_device *adev, struct platform_device *acpi_create_platform_device(struct acpi_device *adev, const struct property_entry *properties) { + struct acpi_device *parent = acpi_dev_parent(adev); struct platform_device *pdev = NULL; struct platform_device_info pdevinfo; struct resource_entry *rentry; @@ -113,13 +114,11 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *adev, INIT_LIST_HEAD(&resource_list); count = acpi_dev_get_resources(adev, &resource_list, NULL, NULL); - if (count < 0) { + if (count < 0) return NULL; - } else if (count > 0) { - resources = kcalloc(count, sizeof(struct resource), - GFP_KERNEL); + if (count > 0) { + resources = kcalloc(count, sizeof(*resources), GFP_KERNEL); if (!resources) { - dev_err(&adev->dev, "No memory for resources\n"); acpi_dev_free_resource_list(&resource_list); return ERR_PTR(-ENOMEM); } @@ -137,10 +136,9 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *adev, * attached to it, that physical device should be the parent of the * platform device we are about to create. */ - pdevinfo.parent = adev->parent ? - acpi_get_first_physical_node(adev->parent) : NULL; + pdevinfo.parent = parent ? acpi_get_first_physical_node(parent) : NULL; pdevinfo.name = dev_name(&adev->dev); - pdevinfo.id = -1; + pdevinfo.id = PLATFORM_DEVID_NONE; pdevinfo.res = resources; pdevinfo.num_res = count; pdevinfo.fwnode = acpi_fwnode_handle(adev); diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c index 5cbe2196176d..6d209ea68bd8 100644 --- a/drivers/acpi/acpi_video.c +++ b/drivers/acpi/acpi_video.c @@ -496,6 +496,22 @@ static const struct dmi_system_id video_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE R830"), }, }, + { + .callback = video_disable_backlight_sysfs_if, + .ident = "Toshiba Satellite Z830", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), + DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE Z830"), + }, + }, + { + .callback = video_disable_backlight_sysfs_if, + .ident = "Toshiba Portege Z830", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), + DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE Z830"), + }, + }, /* * Some machine's _DOD IDs don't have bit 31(Device ID Scheme) set * but the IDs actually follow the Device ID Scheme. @@ -2030,7 +2046,7 @@ static int acpi_video_bus_add(struct acpi_device *device) acpi_status status; status = acpi_walk_namespace(ACPI_TYPE_DEVICE, - device->parent->handle, 1, + acpi_dev_parent(device)->handle, 1, acpi_video_bus_match, NULL, device, NULL); if (status == AE_ALREADY_EXISTS) { diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c index 9f49272cad39..9b52482b4ed5 100644 --- a/drivers/acpi/apei/apei-base.c +++ b/drivers/acpi/apei/apei-base.c @@ -125,12 +125,9 @@ EXPORT_SYMBOL_GPL(apei_exec_write_register); int apei_exec_write_register_value(struct apei_exec_context *ctx, struct acpi_whea_header *entry) { - int rc; - ctx->value = entry->value; - rc = apei_exec_write_register(ctx, entry); - return rc; + return apei_exec_write_register(ctx, entry); } EXPORT_SYMBOL_GPL(apei_exec_write_register_value); diff --git a/drivers/acpi/apei/bert.c b/drivers/acpi/apei/bert.c index 45973aa6e06d..c23eb75866d0 100644 --- a/drivers/acpi/apei/bert.c +++ b/drivers/acpi/apei/bert.c @@ -90,6 +90,9 @@ static void __init bert_print_all(struct acpi_bert_region *region, if (skipped) pr_info(HW_ERR "Skipped %d error records\n", skipped); + + if (printed + skipped) + pr_info("Total records found: %d\n", printed + skipped); } static int __init setup_bert_disable(char *str) diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c index 31b077eedb58..247989060e29 100644 --- a/drivers/acpi/apei/erst.c +++ b/drivers/acpi/apei/erst.c @@ -1020,14 +1020,10 @@ static int reader_pos; static int erst_open_pstore(struct pstore_info *psi) { - int rc; - if (erst_disable) return -ENODEV; - rc = erst_get_record_id_begin(&reader_pos); - - return rc; + return erst_get_record_id_begin(&reader_pos); } static int erst_close_pstore(struct pstore_info *psi) diff --git a/drivers/acpi/arm64/dma.c b/drivers/acpi/arm64/dma.c index f16739ad3cc0..93d796531af3 100644 --- a/drivers/acpi/arm64/dma.c +++ b/drivers/acpi/arm64/dma.c @@ -4,11 +4,12 @@ #include <linux/device.h> #include <linux/dma-direct.h> -void acpi_arch_dma_setup(struct device *dev, u64 *dma_addr, u64 *dma_size) +void acpi_arch_dma_setup(struct device *dev) { int ret; u64 end, mask; - u64 dmaaddr = 0, size = 0, offset = 0; + u64 size = 0; + const struct bus_dma_region *map = NULL; /* * If @dev is expected to be DMA-capable then the bus code that created @@ -26,7 +27,19 @@ void acpi_arch_dma_setup(struct device *dev, u64 *dma_addr, u64 *dma_size) else size = 1ULL << 32; - ret = acpi_dma_get_range(dev, &dmaaddr, &offset, &size); + ret = acpi_dma_get_range(dev, &map); + if (!ret && map) { + const struct bus_dma_region *r = map; + + for (end = 0; r->size; r++) { + if (r->dma_start + r->size - 1 > end) + end = r->dma_start + r->size - 1; + } + + size = end + 1; + dev->dma_range_map = map; + } + if (ret == -ENODEV) ret = iort_dma_get_ranges(dev, &size); if (!ret) { @@ -34,17 +47,10 @@ void acpi_arch_dma_setup(struct device *dev, u64 *dma_addr, u64 *dma_size) * Limit coherent and dma mask based on size retrieved from * firmware. */ - end = dmaaddr + size - 1; + end = size - 1; mask = DMA_BIT_MASK(ilog2(end) + 1); dev->bus_dma_limit = end; dev->coherent_dma_mask = min(dev->coherent_dma_mask, mask); *dev->dma_mask = min(*dev->dma_mask, mask); } - - *dma_addr = dmaaddr; - *dma_size = size; - - ret = dma_direct_set_offset(dev, dmaaddr + offset, dmaaddr, size); - - dev_dbg(dev, "dma_offset(%#08llx)%s\n", offset, ret ? " failed!" : ""); } diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index c0d20d997891..d466c8195314 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c @@ -456,7 +456,7 @@ out_free: Notification Handling -------------------------------------------------------------------------- */ -/** +/* * acpi_bus_notify * --------------- * Callback for all 'system-level' device notifications (values 0x00-0x7F). @@ -511,7 +511,7 @@ static void acpi_bus_notify(acpi_handle handle, u32 type, void *data) break; } - adev = acpi_bus_get_acpi_device(handle); + adev = acpi_get_acpi_dev(handle); if (!adev) goto err; @@ -524,14 +524,14 @@ static void acpi_bus_notify(acpi_handle handle, u32 type, void *data) } if (!hotplug_event) { - acpi_bus_put_acpi_device(adev); + acpi_put_acpi_dev(adev); return; } if (ACPI_SUCCESS(acpi_hotplug_schedule(adev, type))) return; - acpi_bus_put_acpi_device(adev); + acpi_put_acpi_dev(adev); err: acpi_evaluate_ost(handle, type, ost_code, NULL); @@ -802,7 +802,7 @@ static bool acpi_of_modalias(struct acpi_device *adev, str = obj->string.pointer; chr = strchr(str, ','); - strlcpy(modalias, chr ? chr + 1 : str, len); + strscpy(modalias, chr ? chr + 1 : str, len); return true; } @@ -822,7 +822,7 @@ void acpi_set_modalias(struct acpi_device *adev, const char *default_id, char *modalias, size_t len) { if (!acpi_of_modalias(adev, modalias, len)) - strlcpy(modalias, default_id, len); + strscpy(modalias, default_id, len); } EXPORT_SYMBOL_GPL(acpi_set_modalias); @@ -925,12 +925,13 @@ static const void *acpi_of_device_get_match_data(const struct device *dev) const void *acpi_device_get_match_data(const struct device *dev) { + const struct acpi_device_id *acpi_ids = dev->driver->acpi_match_table; const struct acpi_device_id *match; - if (!dev->driver->acpi_match_table) + if (!acpi_ids) return acpi_of_device_get_match_data(dev); - match = acpi_match_device(dev->driver->acpi_match_table, dev); + match = acpi_match_device(acpi_ids, dev); if (!match) return NULL; @@ -948,14 +949,13 @@ EXPORT_SYMBOL(acpi_match_device_ids); bool acpi_driver_match_device(struct device *dev, const struct device_driver *drv) { - if (!drv->acpi_match_table) - return acpi_of_match_device(ACPI_COMPANION(dev), - drv->of_match_table, - NULL); - - return __acpi_match_device(acpi_companion_match(dev), - drv->acpi_match_table, drv->of_match_table, - NULL, NULL); + const struct acpi_device_id *acpi_ids = drv->acpi_match_table; + const struct of_device_id *of_ids = drv->of_match_table; + + if (!acpi_ids) + return acpi_of_match_device(ACPI_COMPANION(dev), of_ids, NULL); + + return __acpi_match_device(acpi_companion_match(dev), acpi_ids, of_ids, NULL, NULL); } EXPORT_SYMBOL_GPL(acpi_driver_match_device); @@ -973,16 +973,13 @@ EXPORT_SYMBOL_GPL(acpi_driver_match_device); */ int acpi_bus_register_driver(struct acpi_driver *driver) { - int ret; - if (acpi_disabled) return -ENODEV; driver->drv.name = driver->name; driver->drv.bus = &acpi_bus_type; driver->drv.owner = driver->owner; - ret = driver_register(&driver->drv); - return ret; + return driver_register(&driver->drv); } EXPORT_SYMBOL(acpi_bus_register_driver); diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c index 1e15a9f25ae9..093675b1a1ff 100644 --- a/drivers/acpi/cppc_acpi.c +++ b/drivers/acpi/cppc_acpi.c @@ -424,6 +424,9 @@ bool acpi_cpc_valid(void) struct cpc_desc *cpc_ptr; int cpu; + if (acpi_disabled) + return false; + for_each_present_cpu(cpu) { cpc_ptr = per_cpu(cpc_desc_ptr, cpu); if (!cpc_ptr) @@ -1241,6 +1244,48 @@ out_err: EXPORT_SYMBOL_GPL(cppc_get_perf_caps); /** + * cppc_perf_ctrs_in_pcc - Check if any perf counters are in a PCC region. + * + * CPPC has flexibility about how CPU performance counters are accessed. + * One of the choices is PCC regions, which can have a high access latency. This + * routine allows callers of cppc_get_perf_ctrs() to know this ahead of time. + * + * Return: true if any of the counters are in PCC regions, false otherwise + */ +bool cppc_perf_ctrs_in_pcc(void) +{ + int cpu; + + for_each_present_cpu(cpu) { + struct cpc_register_resource *ref_perf_reg; + struct cpc_desc *cpc_desc; + + cpc_desc = per_cpu(cpc_desc_ptr, cpu); + + if (CPC_IN_PCC(&cpc_desc->cpc_regs[DELIVERED_CTR]) || + CPC_IN_PCC(&cpc_desc->cpc_regs[REFERENCE_CTR]) || + CPC_IN_PCC(&cpc_desc->cpc_regs[CTR_WRAP_TIME])) + return true; + + + ref_perf_reg = &cpc_desc->cpc_regs[REFERENCE_PERF]; + + /* + * If reference perf register is not supported then we should + * use the nominal perf value + */ + if (!CPC_SUPPORTED(ref_perf_reg)) + ref_perf_reg = &cpc_desc->cpc_regs[NOMINAL_PERF]; + + if (CPC_IN_PCC(ref_perf_reg)) + return true; + } + + return false; +} +EXPORT_SYMBOL_GPL(cppc_perf_ctrs_in_pcc); + +/** * cppc_get_perf_ctrs - Read a CPU's performance feedback counters. * @cpunum: CPU from which to read counters. * @perf_fb_ctrs: ptr to cppc_perf_fb_ctrs. See cppc_acpi.h diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c index 9dce1245689c..d594effe905f 100644 --- a/drivers/acpi/device_pm.c +++ b/drivers/acpi/device_pm.c @@ -75,15 +75,17 @@ static int acpi_dev_pm_explicit_get(struct acpi_device *device, int *state) int acpi_device_get_power(struct acpi_device *device, int *state) { int result = ACPI_STATE_UNKNOWN; + struct acpi_device *parent; int error; if (!device || !state) return -EINVAL; + parent = acpi_dev_parent(device); + if (!device->flags.power_manageable) { /* TBD: Non-recursive algorithm for walking up hierarchy. */ - *state = device->parent ? - device->parent->power.state : ACPI_STATE_D0; + *state = parent ? parent->power.state : ACPI_STATE_D0; goto out; } @@ -122,10 +124,10 @@ int acpi_device_get_power(struct acpi_device *device, int *state) * point, the fact that the device is in D0 implies that the parent has * to be in D0 too, except if ignore_parent is set. */ - if (!device->power.flags.ignore_parent && device->parent - && device->parent->power.state == ACPI_STATE_UNKNOWN - && result == ACPI_STATE_D0) - device->parent->power.state = ACPI_STATE_D0; + if (!device->power.flags.ignore_parent && parent && + parent->power.state == ACPI_STATE_UNKNOWN && + result == ACPI_STATE_D0) + parent->power.state = ACPI_STATE_D0; *state = result; @@ -191,13 +193,17 @@ int acpi_device_set_power(struct acpi_device *device, int state) return -ENODEV; } - if (!device->power.flags.ignore_parent && device->parent && - state < device->parent->power.state) { - acpi_handle_debug(device->handle, - "Cannot transition to %s for parent in %s\n", - acpi_power_state_string(state), - acpi_power_state_string(device->parent->power.state)); - return -ENODEV; + if (!device->power.flags.ignore_parent) { + struct acpi_device *parent; + + parent = acpi_dev_parent(device); + if (parent && state < parent->power.state) { + acpi_handle_debug(device->handle, + "Cannot transition to %s for parent in %s\n", + acpi_power_state_string(state), + acpi_power_state_string(parent->power.state)); + return -ENODEV; + } } /* @@ -497,7 +503,7 @@ static void acpi_pm_notify_handler(acpi_handle handle, u32 val, void *not_used) acpi_handle_debug(handle, "Wake notify\n"); - adev = acpi_bus_get_acpi_device(handle); + adev = acpi_get_acpi_dev(handle); if (!adev) return; @@ -515,7 +521,7 @@ static void acpi_pm_notify_handler(acpi_handle handle, u32 val, void *not_used) mutex_unlock(&acpi_pm_notifier_lock); - acpi_bus_put_acpi_device(adev); + acpi_put_acpi_dev(adev); } /** @@ -1460,7 +1466,7 @@ EXPORT_SYMBOL_GPL(acpi_storage_d3); * not valid to ask for the ACPI power state of the device in that time frame. * * This function is intended to be used in a driver's probe or remove - * function. See Documentation/firmware-guide/acpi/low-power-probe.rst for + * function. See Documentation/firmware-guide/acpi/non-d0-probe.rst for * more information. */ bool acpi_dev_state_d0(struct device *dev) diff --git a/drivers/acpi/dptf/Kconfig b/drivers/acpi/dptf/Kconfig index 1e8c7ce89bf1..4b3fdc03e4ed 100644 --- a/drivers/acpi/dptf/Kconfig +++ b/drivers/acpi/dptf/Kconfig @@ -11,9 +11,6 @@ menuconfig ACPI_DPTF a coordinated approach for different policies to effect the hardware state of a system. - For more information see: - <https://01.org/intel%C2%AE-dynamic-platform-and-thermal-framework-dptf-chromium-os/overview> - if ACPI_DPTF config DPTF_POWER diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index c95e535035a0..9b42628cf21b 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -917,14 +917,10 @@ EXPORT_SYMBOL(ec_read); int ec_write(u8 addr, u8 val) { - int err; - if (!first_ec) return -ENODEV; - err = acpi_ec_write(first_ec, addr, val); - - return err; + return acpi_ec_write(first_ec, addr, val); } EXPORT_SYMBOL(ec_write); diff --git a/drivers/acpi/fan_core.c b/drivers/acpi/fan_core.c index b9a9a59ddcc1..52a0b303b70a 100644 --- a/drivers/acpi/fan_core.c +++ b/drivers/acpi/fan_core.c @@ -19,43 +19,12 @@ #include "fan.h" -MODULE_AUTHOR("Paul Diefenbaugh"); -MODULE_DESCRIPTION("ACPI Fan Driver"); -MODULE_LICENSE("GPL"); - -static int acpi_fan_probe(struct platform_device *pdev); -static int acpi_fan_remove(struct platform_device *pdev); - static const struct acpi_device_id fan_device_ids[] = { ACPI_FAN_DEVICE_IDS, {"", 0}, }; MODULE_DEVICE_TABLE(acpi, fan_device_ids); -#ifdef CONFIG_PM_SLEEP -static int acpi_fan_suspend(struct device *dev); -static int acpi_fan_resume(struct device *dev); -static const struct dev_pm_ops acpi_fan_pm = { - .resume = acpi_fan_resume, - .freeze = acpi_fan_suspend, - .thaw = acpi_fan_resume, - .restore = acpi_fan_resume, -}; -#define FAN_PM_OPS_PTR (&acpi_fan_pm) -#else -#define FAN_PM_OPS_PTR NULL -#endif - -static struct platform_driver acpi_fan_driver = { - .probe = acpi_fan_probe, - .remove = acpi_fan_remove, - .driver = { - .name = "acpi-fan", - .acpi_match_table = fan_device_ids, - .pm = FAN_PM_OPS_PTR, - }, -}; - /* thermal cooling device callbacks */ static int fan_get_max_state(struct thermal_cooling_device *cdev, unsigned long *state) @@ -459,6 +428,33 @@ static int acpi_fan_resume(struct device *dev) return result; } + +static const struct dev_pm_ops acpi_fan_pm = { + .resume = acpi_fan_resume, + .freeze = acpi_fan_suspend, + .thaw = acpi_fan_resume, + .restore = acpi_fan_resume, +}; +#define FAN_PM_OPS_PTR (&acpi_fan_pm) + +#else + +#define FAN_PM_OPS_PTR NULL + #endif +static struct platform_driver acpi_fan_driver = { + .probe = acpi_fan_probe, + .remove = acpi_fan_remove, + .driver = { + .name = "acpi-fan", + .acpi_match_table = fan_device_ids, + .pm = FAN_PM_OPS_PTR, + }, +}; + module_platform_driver(acpi_fan_driver); + +MODULE_AUTHOR("Paul Diefenbaugh"); +MODULE_DESCRIPTION("ACPI Fan Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h index 628bf8f18130..219c02df9a08 100644 --- a/drivers/acpi/internal.h +++ b/drivers/acpi/internal.h @@ -102,10 +102,10 @@ struct acpi_device_bus_id { struct list_head node; }; -int acpi_device_add(struct acpi_device *device, - void (*release)(struct device *)); void acpi_init_device_object(struct acpi_device *device, acpi_handle handle, - int type); + int type, void (*release)(struct device *)); +int acpi_tie_acpi_dev(struct acpi_device *adev); +int acpi_device_add(struct acpi_device *device); int acpi_device_setup_files(struct acpi_device *dev); void acpi_device_remove_files(struct acpi_device *dev); void acpi_device_add_finalize(struct acpi_device *device); diff --git a/drivers/acpi/irq.c b/drivers/acpi/irq.c index dabe45eba055..4db5bb587599 100644 --- a/drivers/acpi/irq.c +++ b/drivers/acpi/irq.c @@ -118,12 +118,12 @@ acpi_get_irq_source_fwhandle(const struct acpi_resource_source *source, if (WARN_ON(ACPI_FAILURE(status))) return NULL; - device = acpi_bus_get_acpi_device(handle); + device = acpi_get_acpi_dev(handle); if (WARN_ON(!device)) return NULL; result = &device->fwnode; - acpi_bus_put_acpi_device(device); + acpi_put_acpi_dev(device); return result; } diff --git a/drivers/acpi/numa/hmat.c b/drivers/acpi/numa/hmat.c index c3d783aca196..23f49a2f4d14 100644 --- a/drivers/acpi/numa/hmat.c +++ b/drivers/acpi/numa/hmat.c @@ -9,7 +9,6 @@ */ #define pr_fmt(fmt) "acpi/hmat: " fmt -#define dev_fmt(fmt) "acpi/hmat: " fmt #include <linux/acpi.h> #include <linux/bitops.h> @@ -302,7 +301,7 @@ static __init int hmat_parse_locality(union acpi_subtable_headers *header, u8 type, mem_hier; if (hmat_loc->header.length < sizeof(*hmat_loc)) { - pr_notice("HMAT: Unexpected locality header length: %u\n", + pr_notice("Unexpected locality header length: %u\n", hmat_loc->header.length); return -EINVAL; } @@ -314,12 +313,12 @@ static __init int hmat_parse_locality(union acpi_subtable_headers *header, total_size = sizeof(*hmat_loc) + sizeof(*entries) * ipds * tpds + sizeof(*inits) * ipds + sizeof(*targs) * tpds; if (hmat_loc->header.length < total_size) { - pr_notice("HMAT: Unexpected locality header length:%u, minimum required:%u\n", + pr_notice("Unexpected locality header length:%u, minimum required:%u\n", hmat_loc->header.length, total_size); return -EINVAL; } - pr_info("HMAT: Locality: Flags:%02x Type:%s Initiator Domains:%u Target Domains:%u Base:%lld\n", + pr_info("Locality: Flags:%02x Type:%s Initiator Domains:%u Target Domains:%u Base:%lld\n", hmat_loc->flags, hmat_data_type(type), ipds, tpds, hmat_loc->entry_base_unit); @@ -363,13 +362,13 @@ static __init int hmat_parse_cache(union acpi_subtable_headers *header, u32 attrs; if (cache->header.length < sizeof(*cache)) { - pr_notice("HMAT: Unexpected cache header length: %u\n", + pr_notice("Unexpected cache header length: %u\n", cache->header.length); return -EINVAL; } attrs = cache->cache_attributes; - pr_info("HMAT: Cache: Domain:%u Size:%llu Attrs:%08x SMBIOS Handles:%d\n", + pr_info("Cache: Domain:%u Size:%llu Attrs:%08x SMBIOS Handles:%d\n", cache->memory_PD, cache->cache_size, attrs, cache->number_of_SMBIOShandles); @@ -424,24 +423,24 @@ static int __init hmat_parse_proximity_domain(union acpi_subtable_headers *heade struct memory_target *target = NULL; if (p->header.length != sizeof(*p)) { - pr_notice("HMAT: Unexpected address range header length: %u\n", + pr_notice("Unexpected address range header length: %u\n", p->header.length); return -EINVAL; } if (hmat_revision == 1) - pr_info("HMAT: Memory (%#llx length %#llx) Flags:%04x Processor Domain:%u Memory Domain:%u\n", + pr_info("Memory (%#llx length %#llx) Flags:%04x Processor Domain:%u Memory Domain:%u\n", p->reserved3, p->reserved4, p->flags, p->processor_PD, p->memory_PD); else - pr_info("HMAT: Memory Flags:%04x Processor Domain:%u Memory Domain:%u\n", + pr_info("Memory Flags:%04x Processor Domain:%u Memory Domain:%u\n", p->flags, p->processor_PD, p->memory_PD); if ((hmat_revision == 1 && p->flags & ACPI_HMAT_MEMORY_PD_VALID) || hmat_revision > 1) { target = find_mem_target(p->memory_PD); if (!target) { - pr_debug("HMAT: Memory Domain missing from SRAT\n"); + pr_debug("Memory Domain missing from SRAT\n"); return -EINVAL; } } @@ -449,7 +448,7 @@ static int __init hmat_parse_proximity_domain(union acpi_subtable_headers *heade int p_node = pxm_to_node(p->processor_PD); if (p_node == NUMA_NO_NODE) { - pr_debug("HMAT: Invalid Processor Domain\n"); + pr_debug("Invalid Processor Domain\n"); return -EINVAL; } target->processor_pxm = p->processor_PD; @@ -840,7 +839,7 @@ static __init int hmat_init(void) case 2: break; default: - pr_notice("Ignoring HMAT: Unknown revision:%d\n", hmat_revision); + pr_notice("Ignoring: Unknown revision:%d\n", hmat_revision); goto out_put; } @@ -848,7 +847,7 @@ static __init int hmat_init(void) if (acpi_table_parse_entries(ACPI_SIG_HMAT, sizeof(struct acpi_table_hmat), i, hmat_parse_subtable, 0) < 0) { - pr_notice("Ignoring HMAT: Invalid table"); + pr_notice("Ignoring: Invalid table"); goto out_put; } } diff --git a/drivers/acpi/osi.c b/drivers/acpi/osi.c index 9f6853809138..d4405e1ca9b9 100644 --- a/drivers/acpi/osi.c +++ b/drivers/acpi/osi.c @@ -44,30 +44,6 @@ osi_setup_entries[OSI_STRING_ENTRIES_MAX] __initdata = { {"Processor Device", true}, {"3.0 _SCP Extensions", true}, {"Processor Aggregator Device", true}, - /* - * Linux-Dell-Video is used by BIOS to disable RTD3 for NVidia graphics - * cards as RTD3 is not supported by drivers now. Systems with NVidia - * cards will hang without RTD3 disabled. - * - * Once NVidia drivers officially support RTD3, this _OSI strings can - * be removed if both new and old graphics cards are supported. - */ - {"Linux-Dell-Video", true}, - /* - * Linux-Lenovo-NV-HDMI-Audio is used by BIOS to power on NVidia's HDMI - * audio device which is turned off for power-saving in Windows OS. - * This power management feature observed on some Lenovo Thinkpad - * systems which will not be able to output audio via HDMI without - * a BIOS workaround. - */ - {"Linux-Lenovo-NV-HDMI-Audio", true}, - /* - * Linux-HPI-Hybrid-Graphics is used by BIOS to enable dGPU to - * output video directly to external monitors on HP Inc. mobile - * workstations as Nvidia and AMD VGA drivers provide limited - * hybrid graphics supports. - */ - {"Linux-HPI-Hybrid-Graphics", true}, }; static u32 acpi_osi_handler(acpi_string interface, u32 supported) diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c index d57cf8454b93..c8385ef54c37 100644 --- a/drivers/acpi/pci_root.c +++ b/drivers/acpi/pci_root.c @@ -312,76 +312,25 @@ struct acpi_handle_node { */ struct pci_dev *acpi_get_pci_dev(acpi_handle handle) { - int dev, fn; - unsigned long long adr; - acpi_status status; - acpi_handle phandle; - struct pci_bus *pbus; - struct pci_dev *pdev = NULL; - struct acpi_handle_node *node, *tmp; - struct acpi_pci_root *root; - LIST_HEAD(device_list); - - /* - * Walk up the ACPI CA namespace until we reach a PCI root bridge. - */ - phandle = handle; - while (!acpi_is_root_bridge(phandle)) { - node = kzalloc(sizeof(struct acpi_handle_node), GFP_KERNEL); - if (!node) - goto out; - - INIT_LIST_HEAD(&node->node); - node->handle = phandle; - list_add(&node->node, &device_list); - - status = acpi_get_parent(phandle, &phandle); - if (ACPI_FAILURE(status)) - goto out; - } - - root = acpi_pci_find_root(phandle); - if (!root) - goto out; + struct acpi_device *adev = acpi_fetch_acpi_dev(handle); + struct acpi_device_physical_node *pn; + struct pci_dev *pci_dev = NULL; - pbus = root->bus; - - /* - * Now, walk back down the PCI device tree until we return to our - * original handle. Assumes that everything between the PCI root - * bridge and the device we're looking for must be a P2P bridge. - */ - list_for_each_entry(node, &device_list, node) { - acpi_handle hnd = node->handle; - status = acpi_evaluate_integer(hnd, "_ADR", NULL, &adr); - if (ACPI_FAILURE(status)) - goto out; - dev = (adr >> 16) & 0xffff; - fn = adr & 0xffff; - - pdev = pci_get_slot(pbus, PCI_DEVFN(dev, fn)); - if (!pdev || hnd == handle) - break; + if (!adev) + return NULL; - pbus = pdev->subordinate; - pci_dev_put(pdev); + mutex_lock(&adev->physical_node_lock); - /* - * This function may be called for a non-PCI device that has a - * PCI parent (eg. a disk under a PCI SATA controller). In that - * case pdev->subordinate will be NULL for the parent. - */ - if (!pbus) { - dev_dbg(&pdev->dev, "Not a PCI-to-PCI bridge\n"); - pdev = NULL; + list_for_each_entry(pn, &adev->physical_node_list, node) { + if (dev_is_pci(pn->dev)) { + pci_dev = to_pci_dev(pn->dev); break; } } -out: - list_for_each_entry_safe(node, tmp, &device_list, node) - kfree(node); - return pdev; + mutex_unlock(&adev->physical_node_lock); + + return pci_dev; } EXPORT_SYMBOL_GPL(acpi_get_pci_dev); diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c index 8c4a73a1351e..f2588aba8421 100644 --- a/drivers/acpi/power.c +++ b/drivers/acpi/power.c @@ -944,13 +944,15 @@ struct acpi_device *acpi_add_power_resource(acpi_handle handle) return NULL; device = &resource->device; - acpi_init_device_object(device, handle, ACPI_BUS_TYPE_POWER); + acpi_init_device_object(device, handle, ACPI_BUS_TYPE_POWER, + acpi_release_power_resource); mutex_init(&resource->resource_lock); INIT_LIST_HEAD(&resource->list_node); INIT_LIST_HEAD(&resource->dependents); strcpy(acpi_device_name(device), ACPI_POWER_DEVICE_NAME); strcpy(acpi_device_class(device), ACPI_POWER_CLASS); device->power.state = ACPI_STATE_UNKNOWN; + device->flags.match_driver = true; /* Evaluate the object to get the system level and resource order. */ status = acpi_evaluate_object(handle, NULL, NULL, &buffer); @@ -967,8 +969,11 @@ struct acpi_device *acpi_add_power_resource(acpi_handle handle) pr_info("%s [%s]\n", acpi_device_name(device), acpi_device_bid(device)); - device->flags.match_driver = true; - result = acpi_device_add(device, acpi_release_power_resource); + result = acpi_tie_acpi_dev(device); + if (result) + goto err; + + result = acpi_device_add(device); if (result) goto err; diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 9f40917c49ef..acfabfe07c4f 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -804,7 +804,7 @@ static int acpi_processor_setup_cstates(struct acpi_processor *pr) state = &drv->states[count]; snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i); - strlcpy(state->desc, cx->desc, CPUIDLE_DESC_LEN); + strscpy(state->desc, cx->desc, CPUIDLE_DESC_LEN); state->exit_latency = cx->latency; state->target_residency = cx->latency * latency_factor; state->enter = acpi_idle_enter; @@ -973,7 +973,7 @@ static int acpi_processor_evaluate_lpi(acpi_handle handle, obj = pkg_elem + 9; if (obj->type == ACPI_TYPE_STRING) - strlcpy(lpi_state->desc, obj->string.pointer, + strscpy(lpi_state->desc, obj->string.pointer, ACPI_CX_DESC_LEN); lpi_state->index = state_idx; @@ -1039,7 +1039,7 @@ static bool combine_lpi_states(struct acpi_lpi_state *local, result->arch_flags = parent->arch_flags; result->index = parent->index; - strlcpy(result->desc, local->desc, ACPI_CX_DESC_LEN); + strscpy(result->desc, local->desc, ACPI_CX_DESC_LEN); strlcat(result->desc, "+", ACPI_CX_DESC_LEN); strlcat(result->desc, parent->desc, ACPI_CX_DESC_LEN); return true; @@ -1213,7 +1213,7 @@ static int acpi_processor_setup_lpi_states(struct acpi_processor *pr) state = &drv->states[i]; snprintf(state->name, CPUIDLE_NAME_LEN, "LPI-%d", i); - strlcpy(state->desc, lpi->desc, CPUIDLE_DESC_LEN); + strscpy(state->desc, lpi->desc, CPUIDLE_DESC_LEN); state->exit_latency = lpi->wake_latency; state->target_residency = lpi->min_residency; if (lpi->arch_flags) diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c index d4c168ce428c..b8d9eb9a433e 100644 --- a/drivers/acpi/property.c +++ b/drivers/acpi/property.c @@ -304,8 +304,10 @@ static void acpi_init_of_compatible(struct acpi_device *adev) ret = acpi_dev_get_property(adev, "compatible", ACPI_TYPE_STRING, &of_compatible); if (ret) { - if (adev->parent - && adev->parent->flags.of_compatible_ok) + struct acpi_device *parent; + + parent = acpi_dev_parent(adev); + if (parent && parent->flags.of_compatible_ok) goto out; return; @@ -1267,10 +1269,11 @@ acpi_node_get_parent(const struct fwnode_handle *fwnode) return to_acpi_data_node(fwnode)->parent; } if (is_acpi_device_node(fwnode)) { - struct device *dev = to_acpi_device_node(fwnode)->dev.parent; + struct acpi_device *parent; - if (dev) - return acpi_fwnode_handle(to_acpi_device(dev)); + parent = acpi_dev_parent(to_acpi_device_node(fwnode)); + if (parent) + return acpi_fwnode_handle(parent); } return NULL; diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c index 510cdec375c4..514d89656dde 100644 --- a/drivers/acpi/resource.c +++ b/drivers/acpi/resource.c @@ -399,6 +399,31 @@ static const struct dmi_system_id medion_laptop[] = { { } }; +static const struct dmi_system_id asus_laptop[] = { + { + .ident = "Asus Vivobook K3402ZA", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_BOARD_NAME, "K3402ZA"), + }, + }, + { + .ident = "Asus Vivobook K3502ZA", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_BOARD_NAME, "K3502ZA"), + }, + }, + { + .ident = "Asus Vivobook S5402ZA", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_BOARD_NAME, "S5402ZA"), + }, + }, + { } +}; + struct irq_override_cmp { const struct dmi_system_id *system; unsigned char irq; @@ -409,6 +434,7 @@ struct irq_override_cmp { static const struct irq_override_cmp skip_override_table[] = { { medion_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0 }, + { asus_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0 }, }; static bool acpi_dev_irq_override(u32 gsi, u8 triggering, u8 polarity, @@ -690,6 +716,9 @@ static int is_memory(struct acpi_resource *ares, void *not_used) memset(&win, 0, sizeof(win)); + if (acpi_dev_filter_resource_type(ares, IORESOURCE_MEM)) + return 1; + return !(acpi_dev_resource_memory(ares, res) || acpi_dev_resource_address_space(ares, &win) || acpi_dev_resource_ext_address_space(ares, &win)); @@ -719,6 +748,23 @@ int acpi_dev_get_dma_resources(struct acpi_device *adev, struct list_head *list) EXPORT_SYMBOL_GPL(acpi_dev_get_dma_resources); /** + * acpi_dev_get_memory_resources - Get current memory resources of a device. + * @adev: ACPI device node to get the resources for. + * @list: Head of the resultant list of resources (must be empty). + * + * This is a helper function that locates all memory type resources of @adev + * with acpi_dev_get_resources(). + * + * The number of resources in the output list is returned on success, an error + * code reflecting the error condition is returned otherwise. + */ +int acpi_dev_get_memory_resources(struct acpi_device *adev, struct list_head *list) +{ + return acpi_dev_get_resources(adev, list, is_memory, NULL); +} +EXPORT_SYMBOL_GPL(acpi_dev_get_memory_resources); + +/** * acpi_dev_filter_resource_type - Filter ACPI resource according to resource * types * @ares: Input ACPI resource object. diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c index 4938010fcac7..e6a01a8df1b8 100644 --- a/drivers/acpi/sbs.c +++ b/drivers/acpi/sbs.c @@ -632,7 +632,7 @@ static int acpi_sbs_add(struct acpi_device *device) mutex_init(&sbs->lock); - sbs->hc = acpi_driver_data(device->parent); + sbs->hc = acpi_driver_data(acpi_dev_parent(device)); sbs->device = device; strcpy(acpi_device_name(device), ACPI_SBS_DEVICE_NAME); strcpy(acpi_device_class(device), ACPI_SBS_CLASS); diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c index 7c62e149a7a1..340e0b61587e 100644 --- a/drivers/acpi/sbshc.c +++ b/drivers/acpi/sbshc.c @@ -266,7 +266,7 @@ static int acpi_smbus_hc_add(struct acpi_device *device) mutex_init(&hc->lock); init_waitqueue_head(&hc->wait); - hc->ec = acpi_driver_data(device->parent); + hc->ec = acpi_driver_data(acpi_dev_parent(device)); hc->offset = (val >> 8) & 0xff; hc->query_bit = val & 0xff; device->driver_data = hc; diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 42cec8120f18..558664d169fc 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -20,6 +20,7 @@ #include <linux/platform_data/x86/apple.h> #include <linux/pgtable.h> #include <linux/crc32.h> +#include <linux/dma-direct.h> #include "internal.h" @@ -29,8 +30,6 @@ extern struct acpi_device *acpi_root; #define ACPI_BUS_HID "LNXSYBUS" #define ACPI_BUS_DEVICE_NAME "System Bus" -#define ACPI_IS_ROOT_DEVICE(device) (!(device)->parent) - #define INVALID_ACPI_HANDLE ((acpi_handle)empty_zero_page) static const char *dummy_hid = "device"; @@ -429,7 +428,7 @@ void acpi_device_hotplug(struct acpi_device *adev, u32 src) acpi_evaluate_ost(adev->handle, src, ost_code, NULL); out: - acpi_bus_put_acpi_device(adev); + acpi_put_acpi_dev(adev); mutex_unlock(&acpi_scan_lock); unlock_device_hotplug(); } @@ -599,11 +598,22 @@ static void get_acpi_device(void *dev) acpi_dev_get(dev); } -struct acpi_device *acpi_bus_get_acpi_device(acpi_handle handle) +/** + * acpi_get_acpi_dev - Retrieve ACPI device object and reference count it. + * @handle: ACPI handle associated with the requested ACPI device object. + * + * Return a pointer to the ACPI device object associated with @handle and bump + * up that object's reference counter (under the ACPI Namespace lock), if + * present, or return NULL otherwise. + * + * The ACPI device object reference acquired by this function needs to be + * dropped via acpi_dev_put(). + */ +struct acpi_device *acpi_get_acpi_dev(acpi_handle handle) { return handle_to_device(handle, get_acpi_device); } -EXPORT_SYMBOL_GPL(acpi_bus_get_acpi_device); +EXPORT_SYMBOL_GPL(acpi_get_acpi_dev); static struct acpi_device_bus_id *acpi_device_bus_id_match(const char *dev_id) { @@ -632,7 +642,7 @@ static int acpi_device_set_name(struct acpi_device *device, return 0; } -static int acpi_tie_acpi_dev(struct acpi_device *adev) +int acpi_tie_acpi_dev(struct acpi_device *adev) { acpi_handle handle = adev->handle; acpi_status status; @@ -662,8 +672,7 @@ static void acpi_store_pld_crc(struct acpi_device *adev) ACPI_FREE(pld); } -static int __acpi_device_add(struct acpi_device *device, - void (*release)(struct device *)) +int acpi_device_add(struct acpi_device *device) { struct acpi_device_bus_id *acpi_device_bus_id; int result; @@ -719,11 +728,6 @@ static int __acpi_device_add(struct acpi_device *device, mutex_unlock(&acpi_device_lock); - if (device->parent) - device->dev.parent = &device->parent->dev; - - device->dev.bus = &acpi_bus_type; - device->dev.release = release; result = device_add(&device->dev); if (result) { dev_err(&device->dev, "Error registering device\n"); @@ -750,17 +754,6 @@ err_unlock: return result; } -int acpi_device_add(struct acpi_device *adev, void (*release)(struct device *)) -{ - int ret; - - ret = acpi_tie_acpi_dev(adev); - if (ret) - return ret; - - return __acpi_device_add(adev, release); -} - /* -------------------------------------------------------------------------- Device Enumeration -------------------------------------------------------------------------- */ @@ -805,10 +798,9 @@ static const char * const acpi_honor_dep_ids[] = { NULL }; -static struct acpi_device *acpi_bus_get_parent(acpi_handle handle) +static struct acpi_device *acpi_find_parent_acpi_dev(acpi_handle handle) { - struct acpi_device *device; - acpi_status status; + struct acpi_device *adev; /* * Fixed hardware devices do not appear in the namespace and do not @@ -819,13 +811,18 @@ static struct acpi_device *acpi_bus_get_parent(acpi_handle handle) return acpi_root; do { + acpi_status status; + status = acpi_get_parent(handle, &handle); - if (ACPI_FAILURE(status)) - return status == AE_NULL_ENTRY ? NULL : acpi_root; + if (ACPI_FAILURE(status)) { + if (status != AE_NULL_ENTRY) + return acpi_root; - device = acpi_fetch_acpi_dev(handle); - } while (!device); - return device; + return NULL; + } + adev = acpi_fetch_acpi_dev(handle); + } while (!adev); + return adev; } acpi_status @@ -1112,7 +1109,7 @@ static void acpi_device_get_busid(struct acpi_device *device) * The device's Bus ID is simply the object name. * TBD: Shouldn't this value be unique (within the ACPI namespace)? */ - if (ACPI_IS_ROOT_DEVICE(device)) { + if (!acpi_dev_parent(device)) { strcpy(device->pnp.bus_id, "ACPI"); return; } @@ -1467,25 +1464,21 @@ enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev) * acpi_dma_get_range() - Get device DMA parameters. * * @dev: device to configure - * @dma_addr: pointer device DMA address result - * @offset: pointer to the DMA offset result - * @size: pointer to DMA range size result + * @map: pointer to DMA ranges result * - * Evaluate DMA regions and return respectively DMA region start, offset - * and size in dma_addr, offset and size on parsing success; it does not - * update the passed in values on failure. + * Evaluate DMA regions and return pointer to DMA regions on + * parsing success; it does not update the passed in values on failure. * * Return 0 on success, < 0 on failure. */ -int acpi_dma_get_range(struct device *dev, u64 *dma_addr, u64 *offset, - u64 *size) +int acpi_dma_get_range(struct device *dev, const struct bus_dma_region **map) { struct acpi_device *adev; LIST_HEAD(list); struct resource_entry *rentry; int ret; struct device *dma_dev = dev; - u64 len, dma_start = U64_MAX, dma_end = 0, dma_offset = 0; + struct bus_dma_region *r; /* * Walk the device tree chasing an ACPI companion with a _DMA @@ -1510,31 +1503,28 @@ int acpi_dma_get_range(struct device *dev, u64 *dma_addr, u64 *offset, ret = acpi_dev_get_dma_resources(adev, &list); if (ret > 0) { + r = kcalloc(ret + 1, sizeof(*r), GFP_KERNEL); + if (!r) { + ret = -ENOMEM; + goto out; + } + list_for_each_entry(rentry, &list, node) { - if (dma_offset && rentry->offset != dma_offset) { + if (rentry->res->start >= rentry->res->end) { + kfree(r); ret = -EINVAL; - dev_warn(dma_dev, "Can't handle multiple windows with different offsets\n"); + dev_dbg(dma_dev, "Invalid DMA regions configuration\n"); goto out; } - dma_offset = rentry->offset; - /* Take lower and upper limits */ - if (rentry->res->start < dma_start) - dma_start = rentry->res->start; - if (rentry->res->end > dma_end) - dma_end = rentry->res->end; - } - - if (dma_start >= dma_end) { - ret = -EINVAL; - dev_dbg(dma_dev, "Invalid DMA regions configuration\n"); - goto out; + r->cpu_start = rentry->res->start; + r->dma_start = rentry->res->start - rentry->offset; + r->size = resource_size(rentry->res); + r->offset = rentry->offset; + r++; } - *dma_addr = dma_start - dma_offset; - len = dma_end - dma_start; - *size = max(len, len + 1); - *offset = dma_offset; + *map = r; } out: acpi_dev_free_resource_list(&list); @@ -1624,20 +1614,19 @@ int acpi_dma_configure_id(struct device *dev, enum dev_dma_attr attr, const u32 *input_id) { const struct iommu_ops *iommu; - u64 dma_addr = 0, size = 0; if (attr == DEV_DMA_NOT_SUPPORTED) { set_dma_ops(dev, &dma_dummy_ops); return 0; } - acpi_arch_dma_setup(dev, &dma_addr, &size); + acpi_arch_dma_setup(dev); iommu = acpi_iommu_configure_id(dev, input_id); if (PTR_ERR(iommu) == -EPROBE_DEFER) return -EPROBE_DEFER; - arch_setup_dma_ops(dev, dma_addr, size, + arch_setup_dma_ops(dev, 0, U64_MAX, iommu, attr == DEV_DMA_COHERENT); return 0; @@ -1648,7 +1637,7 @@ static void acpi_init_coherency(struct acpi_device *adev) { unsigned long long cca = 0; acpi_status status; - struct acpi_device *parent = adev->parent; + struct acpi_device *parent = acpi_dev_parent(adev); if (parent && parent->flags.cca_seen) { /* @@ -1692,7 +1681,7 @@ static int acpi_check_serial_bus_slave(struct acpi_resource *ares, void *data) static bool acpi_is_indirect_io_slave(struct acpi_device *device) { - struct acpi_device *parent = device->parent; + struct acpi_device *parent = acpi_dev_parent(device); static const struct acpi_device_id indirect_io_hosts[] = { {"HISI0191", 0}, {} @@ -1762,12 +1751,16 @@ static bool acpi_device_enumeration_by_parent(struct acpi_device *device) } void acpi_init_device_object(struct acpi_device *device, acpi_handle handle, - int type) + int type, void (*release)(struct device *)) { + struct acpi_device *parent = acpi_find_parent_acpi_dev(handle); + INIT_LIST_HEAD(&device->pnp.ids); device->device_type = type; device->handle = handle; - device->parent = acpi_bus_get_parent(handle); + device->dev.parent = parent ? &parent->dev : NULL; + device->dev.release = release; + device->dev.bus = &acpi_bus_type; fwnode_init(&device->fwnode, &acpi_device_fwnode_ops); acpi_set_device_status(device, ACPI_STA_DEFAULT); acpi_device_get_busid(device); @@ -1821,7 +1814,7 @@ static int acpi_add_single_object(struct acpi_device **child, if (!device) return -ENOMEM; - acpi_init_device_object(device, handle, type); + acpi_init_device_object(device, handle, type, acpi_device_release); /* * Getting the status is delayed till here so that we can call * acpi_bus_get_status() and use its quirk handling. Note that @@ -1851,7 +1844,7 @@ static int acpi_add_single_object(struct acpi_device **child, mutex_unlock(&acpi_dep_list_lock); if (!result) - result = __acpi_device_add(device, acpi_device_release); + result = acpi_device_add(device); if (result) { acpi_device_release(&device->dev); @@ -1862,8 +1855,8 @@ static int acpi_add_single_object(struct acpi_device **child, acpi_device_add_finalize(device); acpi_handle_debug(handle, "Added as %s, parent %s\n", - dev_name(&device->dev), device->parent ? - dev_name(&device->parent->dev) : "(null)"); + dev_name(&device->dev), device->dev.parent ? + dev_name(device->dev.parent) : "(null)"); *child = device; return 0; @@ -2235,11 +2228,24 @@ ok: return 0; } -static int acpi_dev_get_first_consumer_dev_cb(struct acpi_dep_data *dep, void *data) +static int acpi_dev_get_next_consumer_dev_cb(struct acpi_dep_data *dep, void *data) { - struct acpi_device *adev; + struct acpi_device **adev_p = data; + struct acpi_device *adev = *adev_p; - adev = acpi_bus_get_acpi_device(dep->consumer); + /* + * If we're passed a 'previous' consumer device then we need to skip + * any consumers until we meet the previous one, and then NULL @data + * so the next one can be returned. + */ + if (adev) { + if (dep->consumer == adev->handle) + *adev_p = NULL; + + return 0; + } + + adev = acpi_get_acpi_dev(dep->consumer); if (adev) { *(struct acpi_device **)data = adev; return 1; @@ -2292,7 +2298,7 @@ static bool acpi_scan_clear_dep_queue(struct acpi_device *adev) static int acpi_scan_clear_dep(struct acpi_dep_data *dep, void *data) { - struct acpi_device *adev = acpi_bus_get_acpi_device(dep->consumer); + struct acpi_device *adev = acpi_get_acpi_dev(dep->consumer); if (adev) { adev->dep_unmet--; @@ -2368,25 +2374,32 @@ bool acpi_dev_ready_for_enumeration(const struct acpi_device *device) EXPORT_SYMBOL_GPL(acpi_dev_ready_for_enumeration); /** - * acpi_dev_get_first_consumer_dev - Return ACPI device dependent on @supplier + * acpi_dev_get_next_consumer_dev - Return the next adev dependent on @supplier * @supplier: Pointer to the dependee device + * @start: Pointer to the current dependent device * - * Returns the first &struct acpi_device which declares itself dependent on + * Returns the next &struct acpi_device which declares itself dependent on * @supplier via the _DEP buffer, parsed from the acpi_dep_list. * - * The caller is responsible for putting the reference to adev when it is no - * longer needed. + * If the returned adev is not passed as @start to this function, the caller is + * responsible for putting the reference to adev when it is no longer needed. */ -struct acpi_device *acpi_dev_get_first_consumer_dev(struct acpi_device *supplier) +struct acpi_device *acpi_dev_get_next_consumer_dev(struct acpi_device *supplier, + struct acpi_device *start) { - struct acpi_device *adev = NULL; + struct acpi_device *adev = start; acpi_walk_dep_device_list(supplier->handle, - acpi_dev_get_first_consumer_dev_cb, &adev); + acpi_dev_get_next_consumer_dev_cb, &adev); + + acpi_dev_put(start); + + if (adev == start) + return NULL; return adev; } -EXPORT_SYMBOL_GPL(acpi_dev_get_first_consumer_dev); +EXPORT_SYMBOL_GPL(acpi_dev_get_next_consumer_dev); /** * acpi_bus_scan - Add ACPI device node objects in a given namespace scope. diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c index 5a7b8065e77f..2ea14648a661 100644 --- a/drivers/acpi/utils.c +++ b/drivers/acpi/utils.c @@ -794,6 +794,30 @@ bool acpi_dev_hid_uid_match(struct acpi_device *adev, EXPORT_SYMBOL(acpi_dev_hid_uid_match); /** + * acpi_dev_uid_to_integer - treat ACPI device _UID as integer + * @adev: ACPI device to get _UID from + * @integer: output buffer for integer + * + * Considers _UID as integer and converts it to @integer. + * + * Returns 0 on success, or negative error code otherwise. + */ +int acpi_dev_uid_to_integer(struct acpi_device *adev, u64 *integer) +{ + const char *uid; + + if (!adev) + return -ENODEV; + + uid = acpi_device_uid(adev); + if (!uid) + return -ENODATA; + + return kstrtou64(uid, 0, integer); +} +EXPORT_SYMBOL(acpi_dev_uid_to_integer); + +/** * acpi_dev_found - Detect presence of a given ACPI device in the namespace. * @hid: Hardware ID of the device. * @@ -878,7 +902,7 @@ bool acpi_dev_present(const char *hid, const char *uid, s64 hrv) struct acpi_dev_match_info match = {}; struct device *dev; - strlcpy(match.hid[0].id, hid, sizeof(match.hid[0].id)); + strscpy(match.hid[0].id, hid, sizeof(match.hid[0].id)); match.uid = uid; match.hrv = hrv; @@ -911,7 +935,7 @@ acpi_dev_get_next_match_dev(struct acpi_device *adev, const char *hid, const cha struct acpi_dev_match_info match = {}; struct device *dev; - strlcpy(match.hid[0].id, hid, sizeof(match.hid[0].id)); + strscpy(match.hid[0].id, hid, sizeof(match.hid[0].id)); match.uid = uid; match.hrv = hrv; @@ -961,7 +985,7 @@ EXPORT_SYMBOL(acpi_video_backlight_string); static int __init acpi_backlight(char *str) { - strlcpy(acpi_video_backlight_string, str, + strscpy(acpi_video_backlight_string, str, sizeof(acpi_video_backlight_string)); return 1; } diff --git a/drivers/acpi/x86/apple.c b/drivers/acpi/x86/apple.c index c285c91a5e9c..8812ecd03d55 100644 --- a/drivers/acpi/x86/apple.c +++ b/drivers/acpi/x86/apple.c @@ -8,6 +8,7 @@ #include <linux/bitmap.h> #include <linux/platform_data/x86/apple.h> #include <linux/uuid.h> +#include "../internal.h" /* Apple _DSM device properties GUID */ static const guid_t apple_prp_guid = diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c index f9ac12b778e6..0155c1d2d608 100644 --- a/drivers/acpi/x86/s2idle.c +++ b/drivers/acpi/x86/s2idle.c @@ -17,6 +17,7 @@ #include <linux/acpi.h> #include <linux/device.h> +#include <linux/dmi.h> #include <linux/suspend.h> #include "../sleep.h" @@ -27,6 +28,10 @@ static bool sleep_no_lps0 __read_mostly; module_param(sleep_no_lps0, bool, 0644); MODULE_PARM_DESC(sleep_no_lps0, "Do not use the special LPS0 device interface"); +static bool prefer_microsoft_dsm_guid __read_mostly; +module_param(prefer_microsoft_dsm_guid, bool, 0644); +MODULE_PARM_DESC(prefer_microsoft_dsm_guid, "Prefer using Microsoft GUID in LPS0 device _DSM evaluation"); + static const struct acpi_device_id lps0_device_ids[] = { {"PNP0D80", }, {"", }, @@ -363,40 +368,132 @@ out: return ret; } +struct amd_lps0_hid_device_data { + const unsigned int rev_id; + const bool check_off_by_one; + const bool prefer_amd_guid; +}; + +static const struct amd_lps0_hid_device_data amd_picasso = { + .rev_id = 0, + .check_off_by_one = true, + .prefer_amd_guid = false, +}; + +static const struct amd_lps0_hid_device_data amd_cezanne = { + .rev_id = 0, + .check_off_by_one = false, + .prefer_amd_guid = false, +}; + +static const struct amd_lps0_hid_device_data amd_rembrandt = { + .rev_id = 2, + .check_off_by_one = false, + .prefer_amd_guid = true, +}; + +static const struct acpi_device_id amd_hid_ids[] = { + {"AMD0004", (kernel_ulong_t)&amd_picasso, }, + {"AMD0005", (kernel_ulong_t)&amd_picasso, }, + {"AMDI0005", (kernel_ulong_t)&amd_picasso, }, + {"AMDI0006", (kernel_ulong_t)&amd_cezanne, }, + {"AMDI0007", (kernel_ulong_t)&amd_rembrandt, }, + {} +}; + +static int lps0_prefer_microsoft(const struct dmi_system_id *id) +{ + pr_debug("Preferring Microsoft GUID.\n"); + prefer_microsoft_dsm_guid = true; + return 0; +} + +static const struct dmi_system_id s2idle_dmi_table[] __initconst = { + { + /* + * ASUS TUF Gaming A17 FA707RE + * https://bugzilla.kernel.org/show_bug.cgi?id=216101 + */ + .callback = lps0_prefer_microsoft, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "ASUS TUF Gaming A17"), + }, + }, + { + /* ASUS ROG Zephyrus G14 (2022) */ + .callback = lps0_prefer_microsoft, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "ROG Zephyrus G14 GA402"), + }, + }, + { + /* + * Lenovo Yoga Slim 7 Pro X 14ARH7 + * https://bugzilla.kernel.org/show_bug.cgi?id=216473 : 82V2 + * https://bugzilla.kernel.org/show_bug.cgi?id=216438 : 82TL + */ + .callback = lps0_prefer_microsoft, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "82"), + }, + }, + { + /* + * ASUSTeK COMPUTER INC. ROG Flow X13 GV301RE_GV301RE + * https://gitlab.freedesktop.org/drm/amd/-/issues/2148 + */ + .callback = lps0_prefer_microsoft, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "ROG Flow X13 GV301"), + }, + }, + { + /* + * ASUSTeK COMPUTER INC. ROG Flow X16 GV601RW_GV601RW + * https://gitlab.freedesktop.org/drm/amd/-/issues/2148 + */ + .callback = lps0_prefer_microsoft, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "ROG Flow X16 GV601"), + }, + }, + {} +}; + static int lps0_device_attach(struct acpi_device *adev, const struct acpi_device_id *not_used) { if (lps0_device_handle) return 0; + lps0_dsm_func_mask_microsoft = validate_dsm(adev->handle, + ACPI_LPS0_DSM_UUID_MICROSOFT, 0, + &lps0_dsm_guid_microsoft); if (acpi_s2idle_vendor_amd()) { - /* AMD0004, AMD0005, AMDI0005: - * - Should use rev_id 0x0 - * - function mask > 0x3: Should use AMD method, but has off by one bug - * - function mask = 0x3: Should use Microsoft method - * AMDI0006: - * - should use rev_id 0x0 - * - function mask = 0x3: Should use Microsoft method - * AMDI0007: - * - Should use rev_id 0x2 - * - Should only use AMD method - */ - const char *hid = acpi_device_hid(adev); - rev_id = strcmp(hid, "AMDI0007") ? 0 : 2; + static const struct acpi_device_id *dev_id; + const struct amd_lps0_hid_device_data *data; + + for (dev_id = &amd_hid_ids[0]; dev_id->id[0]; dev_id++) + if (acpi_dev_hid_uid_match(adev, dev_id->id, NULL)) + break; + if (dev_id->id[0]) + data = (const struct amd_lps0_hid_device_data *) dev_id->driver_data; + else + data = &amd_rembrandt; + rev_id = data->rev_id; lps0_dsm_func_mask = validate_dsm(adev->handle, ACPI_LPS0_DSM_UUID_AMD, rev_id, &lps0_dsm_guid); - lps0_dsm_func_mask_microsoft = validate_dsm(adev->handle, - ACPI_LPS0_DSM_UUID_MICROSOFT, 0, - &lps0_dsm_guid_microsoft); - if (lps0_dsm_func_mask > 0x3 && (!strcmp(hid, "AMD0004") || - !strcmp(hid, "AMD0005") || - !strcmp(hid, "AMDI0005"))) { + if (lps0_dsm_func_mask > 0x3 && data->check_off_by_one) { lps0_dsm_func_mask = (lps0_dsm_func_mask << 1) | 0x1; acpi_handle_debug(adev->handle, "_DSM UUID %s: Adjusted function mask: 0x%x\n", ACPI_LPS0_DSM_UUID_AMD, lps0_dsm_func_mask); - } else if (lps0_dsm_func_mask_microsoft > 0 && - (!strcmp(hid, "AMDI0007") || - !strcmp(hid, "AMDI0008"))) { + } else if (lps0_dsm_func_mask_microsoft > 0 && data->prefer_amd_guid && + !prefer_microsoft_dsm_guid) { lps0_dsm_func_mask_microsoft = -EINVAL; acpi_handle_debug(adev->handle, "_DSM Using AMD method\n"); } @@ -404,7 +501,8 @@ static int lps0_device_attach(struct acpi_device *adev, rev_id = 1; lps0_dsm_func_mask = validate_dsm(adev->handle, ACPI_LPS0_DSM_UUID, rev_id, &lps0_dsm_guid); - lps0_dsm_func_mask_microsoft = -EINVAL; + if (!prefer_microsoft_dsm_guid) + lps0_dsm_func_mask_microsoft = -EINVAL; } if (lps0_dsm_func_mask < 0 && lps0_dsm_func_mask_microsoft < 0) @@ -533,8 +631,9 @@ static const struct platform_s2idle_ops acpi_s2idle_ops_lps0 = { .end = acpi_s2idle_end, }; -void acpi_s2idle_setup(void) +void __init acpi_s2idle_setup(void) { + dmi_check_system(s2idle_dmi_table); acpi_scan_add_handler(&lps0_handler); s2idle_set_ops(&acpi_s2idle_ops_lps0); } diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c index 664070fc8349..f8a2cbdc0ce2 100644 --- a/drivers/acpi/x86/utils.c +++ b/drivers/acpi/x86/utils.c @@ -207,9 +207,26 @@ static const struct x86_cpu_id storage_d3_cpu_ids[] = { {} }; +static const struct dmi_system_id force_storage_d3_dmi[] = { + { + /* + * _ADR is ambiguous between GPP1.DEV0 and GPP1.NVME + * but .NVME is needed to get StorageD3Enable node + * https://bugzilla.kernel.org/show_bug.cgi?id=216440 + */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 14 7425 2-in-1"), + } + }, + {} +}; + bool force_storage_d3(void) { - return x86_match_cpu(storage_d3_cpu_ids); + const struct dmi_system_id *dmi_id = dmi_first_match(force_storage_d3_dmi); + + return dmi_id || x86_match_cpu(storage_d3_cpu_ids); } /* @@ -351,11 +368,17 @@ int acpi_quirk_skip_serdev_enumeration(struct device *controller_parent, bool *s struct acpi_device *adev = ACPI_COMPANION(controller_parent); const struct dmi_system_id *dmi_id; long quirks = 0; + u64 uid; + int ret; *skip = false; - /* !dev_is_platform() to not match on PNP enumerated debug UARTs */ - if (!adev || !adev->pnp.unique_id || !dev_is_platform(controller_parent)) + ret = acpi_dev_uid_to_integer(adev, &uid); + if (ret) + return 0; + + /* to not match on PNP enumerated debug UARTs */ + if (!dev_is_platform(controller_parent)) return 0; dmi_id = dmi_first_match(acpi_quirk_skip_dmi_ids); @@ -363,10 +386,10 @@ int acpi_quirk_skip_serdev_enumeration(struct device *controller_parent, bool *s quirks = (unsigned long)dmi_id->driver_data; if (quirks & ACPI_QUIRK_UART1_TTY_UART2_SKIP) { - if (!strcmp(adev->pnp.unique_id, "1")) + if (uid == 1) return -ENODEV; /* Create tty cdev instead of serdev */ - if (!strcmp(adev->pnp.unique_id, "2")) + if (uid == 2) *skip = true; } diff --git a/drivers/ata/ahci_imx.c b/drivers/ata/ahci_imx.c index 79aa9f285312..b734e069034d 100644 --- a/drivers/ata/ahci_imx.c +++ b/drivers/ata/ahci_imx.c @@ -327,7 +327,7 @@ static int read_adc_sum(void *dev, u16 rtune_ctl_reg, void __iomem * mmio) } /* SATA AHCI temperature monitor */ -static int sata_ahci_read_temperature(void *dev, int *temp) +static int __sata_ahci_read_temperature(void *dev, int *temp) { u16 mpll_test_reg, rtune_ctl_reg, dac_ctl_reg, read_sum; u32 str1, str2, str3, str4; @@ -416,6 +416,11 @@ static int sata_ahci_read_temperature(void *dev, int *temp) return 0; } +static int sata_ahci_read_temperature(struct thermal_zone_device *tz, int *temp) +{ + return __sata_ahci_read_temperature(tz->devdata, temp); +} + static ssize_t sata_ahci_show_temp(struct device *dev, struct device_attribute *da, char *buf) @@ -423,14 +428,14 @@ static ssize_t sata_ahci_show_temp(struct device *dev, unsigned int temp = 0; int err; - err = sata_ahci_read_temperature(dev, &temp); + err = __sata_ahci_read_temperature(dev, &temp); if (err < 0) return err; return sprintf(buf, "%u\n", temp); } -static const struct thermal_zone_of_device_ops fsl_sata_ahci_of_thermal_ops = { +static const struct thermal_zone_device_ops fsl_sata_ahci_of_thermal_ops = { .get_temp = sata_ahci_read_temperature, }; @@ -1131,8 +1136,8 @@ static int imx_ahci_probe(struct platform_device *pdev) ret = PTR_ERR(hwmon_dev); goto disable_clk; } - devm_thermal_zone_of_sensor_register(hwmon_dev, 0, hwmon_dev, - &fsl_sata_ahci_of_thermal_ops); + devm_thermal_of_zone_register(hwmon_dev, 0, hwmon_dev, + &fsl_sata_ahci_of_thermal_ops); dev_info(dev, "%s: sensor 'sata_ahci'\n", dev_name(hwmon_dev)); } diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c index 46cbe4471e78..dd90591e51ba 100644 --- a/drivers/base/arch_topology.c +++ b/drivers/base/arch_topology.c @@ -353,7 +353,7 @@ void topology_init_cpu_capacity_cppc(void) struct cppc_perf_caps perf_caps; int cpu; - if (likely(acpi_disabled || !acpi_cpc_valid())) + if (likely(!acpi_cpc_valid())) return; raw_capacity = kcalloc(num_possible_cpus(), sizeof(*raw_capacity), diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 997be3ac20a7..b52049098d4e 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -792,10 +792,13 @@ static int rpm_resume(struct device *dev, int rpmflags) DEFINE_WAIT(wait); if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) { - if (dev->power.runtime_status == RPM_SUSPENDING) + if (dev->power.runtime_status == RPM_SUSPENDING) { dev->power.deferred_resume = true; - else + if (rpmflags & RPM_NOWAIT) + retval = -EINPROGRESS; + } else { retval = -EINPROGRESS; + } goto out; } diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index e3befa2c1b66..7cc0c0cf8eaa 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c @@ -944,6 +944,8 @@ void pm_system_irq_wakeup(unsigned int irq_number) else irq_number = 0; + pm_pr_dbg("Triggering wakeup from IRQ %d\n", irq_number); + raw_spin_unlock_irqrestore(&wakeup_irq_lock, flags); if (irq_number) diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 30255fcaf181..dd9a05174726 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -322,14 +322,14 @@ static blk_status_t virtblk_prep_rq(struct blk_mq_hw_ctx *hctx, if (unlikely(status)) return status; - blk_mq_start_request(req); - vbr->sg_table.nents = virtblk_map_data(hctx, req, vbr); if (unlikely(vbr->sg_table.nents < 0)) { virtblk_cleanup_cmd(req); return BLK_STS_RESOURCE; } + blk_mq_start_request(req); + return BLK_STS_OK; } @@ -391,8 +391,7 @@ static bool virtblk_prep_rq_batch(struct request *req) } static bool virtblk_add_req_batch(struct virtio_blk_vq *vq, - struct request **rqlist, - struct request **requeue_list) + struct request **rqlist) { unsigned long flags; int err; @@ -408,7 +407,7 @@ static bool virtblk_add_req_batch(struct virtio_blk_vq *vq, if (err) { virtblk_unmap_data(req, vbr); virtblk_cleanup_cmd(req); - rq_list_add(requeue_list, req); + blk_mq_requeue_request(req, true); } } @@ -436,7 +435,7 @@ static void virtio_queue_rqs(struct request **rqlist) if (!next || req->mq_hctx != next->mq_hctx) { req->rq_next = NULL; - kick = virtblk_add_req_batch(vq, rqlist, &requeue_list); + kick = virtblk_add_req_batch(vq, rqlist); if (kick) virtqueue_notify(vq->vq); diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c index 818681c89db8..a657e9a3e96a 100644 --- a/drivers/bluetooth/btintel.c +++ b/drivers/bluetooth/btintel.c @@ -449,6 +449,7 @@ static int btintel_version_info_tlv(struct hci_dev *hdev, case 0x17: /* TyP */ case 0x18: /* Slr */ case 0x19: /* Slr-F */ + case 0x1b: /* Mgr */ break; default: bt_dev_err(hdev, "Unsupported Intel hardware variant (0x%x)", @@ -2330,6 +2331,7 @@ static void btintel_set_msft_opcode(struct hci_dev *hdev, u8 hw_variant) case 0x17: case 0x18: case 0x19: + case 0x1b: hci_set_msft_opcode(hdev, 0xFC1E); break; default: @@ -2439,15 +2441,20 @@ static int btintel_setup_combined(struct hci_dev *hdev) INTEL_ROM_LEGACY_NO_WBS_SUPPORT)) set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks); + if (ver.hw_variant == 0x08 && ver.fw_variant == 0x22) + set_bit(HCI_QUIRK_VALID_LE_STATES, + &hdev->quirks); err = btintel_legacy_rom_setup(hdev, &ver); break; case 0x0b: /* SfP */ - case 0x0c: /* WsP */ case 0x11: /* JfP */ case 0x12: /* ThP */ case 0x13: /* HrP */ case 0x14: /* CcP */ + set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks); + fallthrough; + case 0x0c: /* WsP */ /* Apply the device specific HCI quirks * * All Legacy bootloader devices support WBS @@ -2455,11 +2462,6 @@ static int btintel_setup_combined(struct hci_dev *hdev) set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks); - /* Valid LE States quirk for JfP/ThP familiy */ - if (ver.hw_variant == 0x11 || ver.hw_variant == 0x12) - set_bit(HCI_QUIRK_VALID_LE_STATES, - &hdev->quirks); - /* Setup MSFT Extension support */ btintel_set_msft_opcode(hdev, ver.hw_variant); @@ -2530,9 +2532,8 @@ static int btintel_setup_combined(struct hci_dev *hdev) */ set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks); - /* Valid LE States quirk for JfP/ThP familiy */ - if (ver.hw_variant == 0x11 || ver.hw_variant == 0x12) - set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks); + /* Set Valid LE States quirk */ + set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks); /* Setup MSFT Extension support */ btintel_set_msft_opcode(hdev, ver.hw_variant); @@ -2542,6 +2543,7 @@ static int btintel_setup_combined(struct hci_dev *hdev) case 0x17: case 0x18: case 0x19: + case 0x1b: /* Display version information of TLV type */ btintel_version_info_tlv(hdev, &ver_tlv); diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 15caa6469538..271963805a38 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -426,6 +426,8 @@ static const struct usb_device_id blacklist_table[] = { BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x04ca, 0x4006), .driver_info = BTUSB_REALTEK | BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x0cb8, 0xc549), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, /* Realtek 8852CE Bluetooth devices */ { USB_DEVICE(0x04ca, 0x4007), .driver_info = BTUSB_REALTEK | @@ -438,6 +440,8 @@ static const struct usb_device_id blacklist_table[] = { BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x13d3, 0x3586), .driver_info = BTUSB_REALTEK | BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x13d3, 0x3592), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, /* Realtek Bluetooth devices */ { USB_VENDOR_AND_INTERFACE_INFO(0x0bda, 0xe0, 0x01, 0x01), @@ -466,6 +470,9 @@ static const struct usb_device_id blacklist_table[] = { { USB_DEVICE(0x0489, 0xe0c8), .driver_info = BTUSB_MEDIATEK | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, + { USB_DEVICE(0x0489, 0xe0e0), .driver_info = BTUSB_MEDIATEK | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, { USB_DEVICE(0x04ca, 0x3802), .driver_info = BTUSB_MEDIATEK | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, @@ -478,9 +485,18 @@ static const struct usb_device_id blacklist_table[] = { { USB_DEVICE(0x13d3, 0x3567), .driver_info = BTUSB_MEDIATEK | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, + { USB_DEVICE(0x13d3, 0x3578), .driver_info = BTUSB_MEDIATEK | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, + { USB_DEVICE(0x13d3, 0x3583), .driver_info = BTUSB_MEDIATEK | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, { USB_DEVICE(0x0489, 0xe0cd), .driver_info = BTUSB_MEDIATEK | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, + { USB_DEVICE(0x0e8d, 0x0608), .driver_info = BTUSB_MEDIATEK | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, /* MediaTek MT7922A Bluetooth devices */ { USB_DEVICE(0x0489, 0xe0d8), .driver_info = BTUSB_MEDIATEK | @@ -516,19 +532,17 @@ static const struct usb_device_id blacklist_table[] = { { USB_DEVICE(0x0bda, 0xb009), .driver_info = BTUSB_REALTEK }, { USB_DEVICE(0x2ff8, 0xb011), .driver_info = BTUSB_REALTEK }, - /* Additional Realtek 8761B Bluetooth devices */ + /* Additional Realtek 8761BUV Bluetooth devices */ { USB_DEVICE(0x2357, 0x0604), .driver_info = BTUSB_REALTEK | BTUSB_WIDEBAND_SPEECH }, - - /* Additional Realtek 8761BU Bluetooth devices */ { USB_DEVICE(0x0b05, 0x190e), .driver_info = BTUSB_REALTEK | BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x2550, 0x8761), .driver_info = BTUSB_REALTEK | BTUSB_WIDEBAND_SPEECH }, - - /* Additional Realtek 8761BUV Bluetooth devices */ { USB_DEVICE(0x0bda, 0x8771), .driver_info = BTUSB_REALTEK | BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x7392, 0xc611), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, /* Additional Realtek 8821AE Bluetooth devices */ { USB_DEVICE(0x0b05, 0x17dc), .driver_info = BTUSB_REALTEK }, @@ -2477,15 +2491,29 @@ static int btusb_mtk_hci_wmt_sync(struct hci_dev *hdev, set_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags); + /* WMT cmd/event doesn't follow up the generic HCI cmd/event handling, + * it needs constantly polling control pipe until the host received the + * WMT event, thus, we should require to specifically acquire PM counter + * on the USB to prevent the interface from entering auto suspended + * while WMT cmd/event in progress. + */ + err = usb_autopm_get_interface(data->intf); + if (err < 0) + goto err_free_wc; + err = __hci_cmd_send(hdev, 0xfc6f, hlen, wc); if (err < 0) { clear_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags); + usb_autopm_put_interface(data->intf); goto err_free_wc; } /* Submit control IN URB on demand to process the WMT event */ err = btusb_mtk_submit_wmt_recv_urb(hdev); + + usb_autopm_put_interface(data->intf); + if (err < 0) goto err_free_wc; diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c index f537673ede17..865112e96ff9 100644 --- a/drivers/bluetooth/hci_ldisc.c +++ b/drivers/bluetooth/hci_ldisc.c @@ -493,6 +493,11 @@ static int hci_uart_tty_open(struct tty_struct *tty) BT_ERR("Can't allocate control structure"); return -ENFILE; } + if (percpu_init_rwsem(&hu->proto_lock)) { + BT_ERR("Can't allocate semaphore structure"); + kfree(hu); + return -ENOMEM; + } tty->disc_data = hu; hu->tty = tty; @@ -505,8 +510,6 @@ static int hci_uart_tty_open(struct tty_struct *tty) INIT_WORK(&hu->init_ready, hci_uart_init_work); INIT_WORK(&hu->write_work, hci_uart_write_work); - percpu_init_rwsem(&hu->proto_lock); - /* Flush any pending characters in the driver */ tty_driver_flush_buffer(tty); diff --git a/drivers/bluetooth/hci_serdev.c b/drivers/bluetooth/hci_serdev.c index c0e5f42ec6b7..f16fd79bc02b 100644 --- a/drivers/bluetooth/hci_serdev.c +++ b/drivers/bluetooth/hci_serdev.c @@ -310,11 +310,12 @@ int hci_uart_register_device(struct hci_uart *hu, serdev_device_set_client_ops(hu->serdev, &hci_serdev_client_ops); + if (percpu_init_rwsem(&hu->proto_lock)) + return -ENOMEM; + err = serdev_device_open(hu->serdev); if (err) - return err; - - percpu_init_rwsem(&hu->proto_lock); + goto err_rwsem; err = p->open(hu); if (err) @@ -389,6 +390,8 @@ err_alloc: p->close(hu); err_open: serdev_device_close(hu->serdev); +err_rwsem: + percpu_free_rwsem(&hu->proto_lock); return err; } EXPORT_SYMBOL_GPL(hci_uart_register_device); @@ -410,5 +413,6 @@ void hci_uart_unregister_device(struct hci_uart *hu) clear_bit(HCI_UART_PROTO_READY, &hu->flags); serdev_device_close(hu->serdev); } + percpu_free_rwsem(&hu->proto_lock); } EXPORT_SYMBOL_GPL(hci_uart_unregister_device); diff --git a/drivers/clk/bcm/clk-iproc-pll.c b/drivers/clk/bcm/clk-iproc-pll.c index 1a098db12062..680f9d8d357c 100644 --- a/drivers/clk/bcm/clk-iproc-pll.c +++ b/drivers/clk/bcm/clk-iproc-pll.c @@ -726,6 +726,7 @@ void iproc_pll_clk_setup(struct device_node *node, const char *parent_name; struct iproc_clk *iclk_array; struct clk_hw_onecell_data *clk_data; + const char *clk_name; if (WARN_ON(!pll_ctrl) || WARN_ON(!clk_ctrl)) return; @@ -773,7 +774,12 @@ void iproc_pll_clk_setup(struct device_node *node, iclk = &iclk_array[0]; iclk->pll = pll; - init.name = node->name; + ret = of_property_read_string_index(node, "clock-output-names", + 0, &clk_name); + if (WARN_ON(ret)) + goto err_pll_register; + + init.name = clk_name; init.ops = &iproc_pll_ops; init.flags = 0; parent_name = of_clk_get_parent_name(node, 0); @@ -793,13 +799,11 @@ void iproc_pll_clk_setup(struct device_node *node, goto err_pll_register; clk_data->hws[0] = &iclk->hw; + parent_name = clk_name; /* now initialize and register all leaf clocks */ for (i = 1; i < num_clks; i++) { - const char *clk_name; - memset(&init, 0, sizeof(init)); - parent_name = node->name; ret = of_property_read_string_index(node, "clock-output-names", i, &clk_name); diff --git a/drivers/clk/clk-tps68470.c b/drivers/clk/clk-tps68470.c index e5fbefd6ac2d..38f44b5b9b1b 100644 --- a/drivers/clk/clk-tps68470.c +++ b/drivers/clk/clk-tps68470.c @@ -200,7 +200,9 @@ static int tps68470_clk_probe(struct platform_device *pdev) .flags = CLK_SET_RATE_GATE, }; struct tps68470_clkdata *tps68470_clkdata; + struct tps68470_clk_consumer *consumer; int ret; + int i; tps68470_clkdata = devm_kzalloc(&pdev->dev, sizeof(*tps68470_clkdata), GFP_KERNEL); @@ -223,10 +225,13 @@ static int tps68470_clk_probe(struct platform_device *pdev) return ret; if (pdata) { - ret = devm_clk_hw_register_clkdev(&pdev->dev, - &tps68470_clkdata->clkout_hw, - pdata->consumer_con_id, - pdata->consumer_dev_name); + for (i = 0; i < pdata->n_consumers; i++) { + consumer = &pdata->consumers[i]; + ret = devm_clk_hw_register_clkdev(&pdev->dev, + &tps68470_clkdata->clkout_hw, + consumer->consumer_con_id, + consumer->consumer_dev_name); + } } return ret; diff --git a/drivers/clk/imx/clk-imx6sx.c b/drivers/clk/imx/clk-imx6sx.c index fc1bd23d4583..598f3cf4eba4 100644 --- a/drivers/clk/imx/clk-imx6sx.c +++ b/drivers/clk/imx/clk-imx6sx.c @@ -280,13 +280,13 @@ static void __init imx6sx_clocks_init(struct device_node *ccm_node) hws[IMX6SX_CLK_SSI3_SEL] = imx_clk_hw_mux("ssi3_sel", base + 0x1c, 14, 2, ssi_sels, ARRAY_SIZE(ssi_sels)); hws[IMX6SX_CLK_SSI2_SEL] = imx_clk_hw_mux("ssi2_sel", base + 0x1c, 12, 2, ssi_sels, ARRAY_SIZE(ssi_sels)); hws[IMX6SX_CLK_SSI1_SEL] = imx_clk_hw_mux("ssi1_sel", base + 0x1c, 10, 2, ssi_sels, ARRAY_SIZE(ssi_sels)); - hws[IMX6SX_CLK_QSPI1_SEL] = imx_clk_hw_mux_flags("qspi1_sel", base + 0x1c, 7, 3, qspi1_sels, ARRAY_SIZE(qspi1_sels), CLK_SET_RATE_PARENT); + hws[IMX6SX_CLK_QSPI1_SEL] = imx_clk_hw_mux("qspi1_sel", base + 0x1c, 7, 3, qspi1_sels, ARRAY_SIZE(qspi1_sels)); hws[IMX6SX_CLK_PERCLK_SEL] = imx_clk_hw_mux("perclk_sel", base + 0x1c, 6, 1, perclk_sels, ARRAY_SIZE(perclk_sels)); hws[IMX6SX_CLK_VID_SEL] = imx_clk_hw_mux("vid_sel", base + 0x20, 21, 3, vid_sels, ARRAY_SIZE(vid_sels)); hws[IMX6SX_CLK_ESAI_SEL] = imx_clk_hw_mux("esai_sel", base + 0x20, 19, 2, audio_sels, ARRAY_SIZE(audio_sels)); hws[IMX6SX_CLK_CAN_SEL] = imx_clk_hw_mux("can_sel", base + 0x20, 8, 2, can_sels, ARRAY_SIZE(can_sels)); hws[IMX6SX_CLK_UART_SEL] = imx_clk_hw_mux("uart_sel", base + 0x24, 6, 1, uart_sels, ARRAY_SIZE(uart_sels)); - hws[IMX6SX_CLK_QSPI2_SEL] = imx_clk_hw_mux_flags("qspi2_sel", base + 0x2c, 15, 3, qspi2_sels, ARRAY_SIZE(qspi2_sels), CLK_SET_RATE_PARENT); + hws[IMX6SX_CLK_QSPI2_SEL] = imx_clk_hw_mux("qspi2_sel", base + 0x2c, 15, 3, qspi2_sels, ARRAY_SIZE(qspi2_sels)); hws[IMX6SX_CLK_SPDIF_SEL] = imx_clk_hw_mux("spdif_sel", base + 0x30, 20, 2, audio_sels, ARRAY_SIZE(audio_sels)); hws[IMX6SX_CLK_AUDIO_SEL] = imx_clk_hw_mux("audio_sel", base + 0x30, 7, 2, audio_sels, ARRAY_SIZE(audio_sels)); hws[IMX6SX_CLK_ENET_PRE_SEL] = imx_clk_hw_mux("enet_pre_sel", base + 0x34, 15, 3, enet_pre_sels, ARRAY_SIZE(enet_pre_sels)); diff --git a/drivers/clk/imx/clk-imx93.c b/drivers/clk/imx/clk-imx93.c index f5c9fa40491c..dcc41d178238 100644 --- a/drivers/clk/imx/clk-imx93.c +++ b/drivers/clk/imx/clk-imx93.c @@ -332,7 +332,7 @@ static struct platform_driver imx93_clk_driver = { .driver = { .name = "imx93-ccm", .suppress_bind_attrs = true, - .of_match_table = of_match_ptr(imx93_clk_of_match), + .of_match_table = imx93_clk_of_match, }, }; module_platform_driver(imx93_clk_driver); diff --git a/drivers/clk/ingenic/tcu.c b/drivers/clk/ingenic/tcu.c index 201bf6e6b6e0..d5544cbc5c48 100644 --- a/drivers/clk/ingenic/tcu.c +++ b/drivers/clk/ingenic/tcu.c @@ -101,15 +101,11 @@ static bool ingenic_tcu_enable_regs(struct clk_hw *hw) bool enabled = false; /* - * If the SoC has no global TCU clock, we must ungate the channel's - * clock to be able to access its registers. - * If we have a TCU clock, it will be enabled automatically as it has - * been attached to the regmap. + * According to the programming manual, a timer channel's registers can + * only be accessed when the channel's stop bit is clear. */ - if (!tcu->clk) { - enabled = !!ingenic_tcu_is_enabled(hw); - regmap_write(tcu->map, TCU_REG_TSCR, BIT(info->gate_bit)); - } + enabled = !!ingenic_tcu_is_enabled(hw); + regmap_write(tcu->map, TCU_REG_TSCR, BIT(info->gate_bit)); return enabled; } @@ -120,8 +116,7 @@ static void ingenic_tcu_disable_regs(struct clk_hw *hw) const struct ingenic_tcu_clk_info *info = tcu_clk->info; struct ingenic_tcu *tcu = tcu_clk->tcu; - if (!tcu->clk) - regmap_write(tcu->map, TCU_REG_TSSR, BIT(info->gate_bit)); + regmap_write(tcu->map, TCU_REG_TSSR, BIT(info->gate_bit)); } static u8 ingenic_tcu_get_parent(struct clk_hw *hw) diff --git a/drivers/clk/microchip/clk-mpfs.c b/drivers/clk/microchip/clk-mpfs.c index 070c3b896559..b6b89413e090 100644 --- a/drivers/clk/microchip/clk-mpfs.c +++ b/drivers/clk/microchip/clk-mpfs.c @@ -239,6 +239,11 @@ static const struct clk_ops mpfs_clk_cfg_ops = { .hw.init = CLK_HW_INIT(_name, _parent, &mpfs_clk_cfg_ops, 0), \ } +#define CLK_CPU_OFFSET 0u +#define CLK_AXI_OFFSET 1u +#define CLK_AHB_OFFSET 2u +#define CLK_RTCREF_OFFSET 3u + static struct mpfs_cfg_hw_clock mpfs_cfg_clks[] = { CLK_CFG(CLK_CPU, "clk_cpu", "clk_msspll", 0, 2, mpfs_div_cpu_axi_table, 0, REG_CLOCK_CONFIG_CR), @@ -362,7 +367,7 @@ static const struct clk_ops mpfs_periph_clk_ops = { _flags), \ } -#define PARENT_CLK(PARENT) (&mpfs_cfg_clks[CLK_##PARENT].hw) +#define PARENT_CLK(PARENT) (&mpfs_cfg_clks[CLK_##PARENT##_OFFSET].hw) /* * Critical clocks: @@ -370,6 +375,8 @@ static const struct clk_ops mpfs_periph_clk_ops = { * trap handler * - CLK_MMUART0: reserved by the hss * - CLK_DDRC: provides clock to the ddr subsystem + * - CLK_RTC: the onboard RTC's AHB bus clock must be kept running as the rtc will stop + * if the AHB interface clock is disabled * - CLK_FICx: these provide the processor side clocks to the "FIC" (Fabric InterConnect) * clock domain crossers which provide the interface to the FPGA fabric. Disabling them * causes the FPGA fabric to go into reset. @@ -394,7 +401,7 @@ static struct mpfs_periph_hw_clock mpfs_periph_clks[] = { CLK_PERIPH(CLK_CAN0, "clk_periph_can0", PARENT_CLK(AHB), 14, 0), CLK_PERIPH(CLK_CAN1, "clk_periph_can1", PARENT_CLK(AHB), 15, 0), CLK_PERIPH(CLK_USB, "clk_periph_usb", PARENT_CLK(AHB), 16, 0), - CLK_PERIPH(CLK_RTC, "clk_periph_rtc", PARENT_CLK(AHB), 18, 0), + CLK_PERIPH(CLK_RTC, "clk_periph_rtc", PARENT_CLK(AHB), 18, CLK_IS_CRITICAL), CLK_PERIPH(CLK_QSPI, "clk_periph_qspi", PARENT_CLK(AHB), 19, 0), CLK_PERIPH(CLK_GPIO0, "clk_periph_gpio0", PARENT_CLK(AHB), 20, 0), CLK_PERIPH(CLK_GPIO1, "clk_periph_gpio1", PARENT_CLK(AHB), 21, 0), diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c index 30056da3e0af..42568c616181 100644 --- a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c +++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c @@ -1191,9 +1191,13 @@ static int sun50i_h6_ccu_probe(struct platform_device *pdev) if (IS_ERR(reg)) return PTR_ERR(reg); - /* Force PLL_GPU output divider bits to 0 */ + /* + * Force PLL_GPU output divider bits to 0 and adjust + * multiplier to sensible default value of 432 MHz. + */ val = readl(reg + SUN50I_H6_PLL_GPU_REG); - val &= ~BIT(0); + val &= ~(GENMASK(15, 8) | BIT(0)); + val |= 17 << 8; writel(val, reg + SUN50I_H6_PLL_GPU_REG); /* Force GPU_CLK divider bits to 0 */ diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c index 9ac75c1cde9c..f52b8f2fe529 100644 --- a/drivers/cpufreq/amd-pstate.c +++ b/drivers/cpufreq/amd-pstate.c @@ -46,8 +46,8 @@ #include <asm/cpu_device_id.h> #include "amd-pstate-trace.h" -#define AMD_PSTATE_TRANSITION_LATENCY 0x20000 -#define AMD_PSTATE_TRANSITION_DELAY 500 +#define AMD_PSTATE_TRANSITION_LATENCY 20000 +#define AMD_PSTATE_TRANSITION_DELAY 1000 /* * TODO: We need more time to fine tune processors with shared memory solution @@ -120,7 +120,7 @@ struct amd_cpudata { struct amd_aperf_mperf cur; struct amd_aperf_mperf prev; - u64 freq; + u64 freq; bool boost_supported; }; @@ -152,6 +152,7 @@ static inline int amd_pstate_enable(bool enable) static int pstate_init_perf(struct amd_cpudata *cpudata) { u64 cap1; + u32 highest_perf; int ret = rdmsrl_safe_on_cpu(cpudata->cpu, MSR_AMD_CPPC_CAP1, &cap1); @@ -163,7 +164,11 @@ static int pstate_init_perf(struct amd_cpudata *cpudata) * * CPPC entry doesn't indicate the highest performance in some ASICs. */ - WRITE_ONCE(cpudata->highest_perf, amd_get_highest_perf()); + highest_perf = amd_get_highest_perf(); + if (highest_perf > AMD_CPPC_HIGHEST_PERF(cap1)) + highest_perf = AMD_CPPC_HIGHEST_PERF(cap1); + + WRITE_ONCE(cpudata->highest_perf, highest_perf); WRITE_ONCE(cpudata->nominal_perf, AMD_CPPC_NOMINAL_PERF(cap1)); WRITE_ONCE(cpudata->lowest_nonlinear_perf, AMD_CPPC_LOWNONLIN_PERF(cap1)); @@ -175,12 +180,17 @@ static int pstate_init_perf(struct amd_cpudata *cpudata) static int cppc_init_perf(struct amd_cpudata *cpudata) { struct cppc_perf_caps cppc_perf; + u32 highest_perf; int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf); if (ret) return ret; - WRITE_ONCE(cpudata->highest_perf, amd_get_highest_perf()); + highest_perf = amd_get_highest_perf(); + if (highest_perf > cppc_perf.highest_perf) + highest_perf = cppc_perf.highest_perf; + + WRITE_ONCE(cpudata->highest_perf, highest_perf); WRITE_ONCE(cpudata->nominal_perf, cppc_perf.nominal_perf); WRITE_ONCE(cpudata->lowest_nonlinear_perf, @@ -269,6 +279,7 @@ static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf, u64 prev = READ_ONCE(cpudata->cppc_req_cached); u64 value = prev; + des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf); value &= ~AMD_CPPC_MIN_PERF(~0L); value |= AMD_CPPC_MIN_PERF(min_perf); @@ -312,7 +323,7 @@ static int amd_pstate_target(struct cpufreq_policy *policy, return -ENODEV; cap_perf = READ_ONCE(cpudata->highest_perf); - min_perf = READ_ONCE(cpudata->lowest_nonlinear_perf); + min_perf = READ_ONCE(cpudata->lowest_perf); max_perf = cap_perf; freqs.old = policy->cur; @@ -357,8 +368,6 @@ static void amd_pstate_adjust_perf(unsigned int cpu, if (max_perf < min_perf) max_perf = min_perf; - des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf); - amd_pstate_update(cpudata, min_perf, des_perf, max_perf, true); } @@ -555,9 +564,7 @@ free_cpudata1: static int amd_pstate_cpu_exit(struct cpufreq_policy *policy) { - struct amd_cpudata *cpudata; - - cpudata = policy->driver_data; + struct amd_cpudata *cpudata = policy->driver_data; freq_qos_remove_request(&cpudata->req[1]); freq_qos_remove_request(&cpudata->req[0]); @@ -599,9 +606,7 @@ static ssize_t show_amd_pstate_max_freq(struct cpufreq_policy *policy, char *buf) { int max_freq; - struct amd_cpudata *cpudata; - - cpudata = policy->driver_data; + struct amd_cpudata *cpudata = policy->driver_data; max_freq = amd_get_max_freq(cpudata); if (max_freq < 0) @@ -614,9 +619,7 @@ static ssize_t show_amd_pstate_lowest_nonlinear_freq(struct cpufreq_policy *poli char *buf) { int freq; - struct amd_cpudata *cpudata; - - cpudata = policy->driver_data; + struct amd_cpudata *cpudata = policy->driver_data; freq = amd_get_lowest_nonlinear_freq(cpudata); if (freq < 0) @@ -662,7 +665,7 @@ static struct cpufreq_driver amd_pstate_driver = { .resume = amd_pstate_cpu_resume, .set_boost = amd_pstate_set_boost, .name = "amd-pstate", - .attr = amd_pstate_attr, + .attr = amd_pstate_attr, }; static int __init amd_pstate_init(void) @@ -673,7 +676,7 @@ static int __init amd_pstate_init(void) return -ENODEV; if (!acpi_cpc_valid()) { - pr_debug("the _CPC object is not present in SBIOS\n"); + pr_warn_once("the _CPC object is not present in SBIOS or ACPI disabled\n"); return -ENODEV; } diff --git a/drivers/cpufreq/bmips-cpufreq.c b/drivers/cpufreq/bmips-cpufreq.c index f7c23fa468f0..39221a9a187a 100644 --- a/drivers/cpufreq/bmips-cpufreq.c +++ b/drivers/cpufreq/bmips-cpufreq.c @@ -156,7 +156,7 @@ static struct cpufreq_driver bmips_cpufreq_driver = { .name = BMIPS_CPUFREQ_PREFIX, }; -static int __init bmips_cpufreq_probe(void) +static int __init bmips_cpufreq_driver_init(void) { struct cpufreq_compat *cc; struct device_node *np; @@ -176,7 +176,13 @@ static int __init bmips_cpufreq_probe(void) return cpufreq_register_driver(&bmips_cpufreq_driver); } -device_initcall(bmips_cpufreq_probe); +module_init(bmips_cpufreq_driver_init); + +static void __exit bmips_cpufreq_driver_exit(void) +{ + cpufreq_unregister_driver(&bmips_cpufreq_driver); +} +module_exit(bmips_cpufreq_driver_exit); MODULE_AUTHOR("Markus Mayer <mmayer@broadcom.com>"); MODULE_DESCRIPTION("CPUfreq driver for Broadcom BMIPS SoCs"); diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c index 24eaf0ec344d..432dfb4e8027 100644 --- a/drivers/cpufreq/cppc_cpufreq.c +++ b/drivers/cpufreq/cppc_cpufreq.c @@ -63,7 +63,15 @@ static struct cppc_workaround_oem_info wa_info[] = { static struct cpufreq_driver cppc_cpufreq_driver; +static enum { + FIE_UNSET = -1, + FIE_ENABLED, + FIE_DISABLED +} fie_disabled = FIE_UNSET; + #ifdef CONFIG_ACPI_CPPC_CPUFREQ_FIE +module_param(fie_disabled, int, 0444); +MODULE_PARM_DESC(fie_disabled, "Disable Frequency Invariance Engine (FIE)"); /* Frequency invariance support */ struct cppc_freq_invariance { @@ -158,7 +166,7 @@ static void cppc_cpufreq_cpu_fie_init(struct cpufreq_policy *policy) struct cppc_freq_invariance *cppc_fi; int cpu, ret; - if (cppc_cpufreq_driver.get == hisi_cppc_cpufreq_get_rate) + if (fie_disabled) return; for_each_cpu(cpu, policy->cpus) { @@ -199,7 +207,7 @@ static void cppc_cpufreq_cpu_fie_exit(struct cpufreq_policy *policy) struct cppc_freq_invariance *cppc_fi; int cpu; - if (cppc_cpufreq_driver.get == hisi_cppc_cpufreq_get_rate) + if (fie_disabled) return; /* policy->cpus will be empty here, use related_cpus instead */ @@ -229,7 +237,15 @@ static void __init cppc_freq_invariance_init(void) }; int ret; - if (cppc_cpufreq_driver.get == hisi_cppc_cpufreq_get_rate) + if (fie_disabled != FIE_ENABLED && fie_disabled != FIE_DISABLED) { + fie_disabled = FIE_ENABLED; + if (cppc_perf_ctrs_in_pcc()) { + pr_info("FIE not enabled on systems with registers in PCC\n"); + fie_disabled = FIE_DISABLED; + } + } + + if (fie_disabled) return; kworker_fie = kthread_create_worker(0, "cppc_fie"); @@ -247,7 +263,7 @@ static void __init cppc_freq_invariance_init(void) static void cppc_freq_invariance_exit(void) { - if (cppc_cpufreq_driver.get == hisi_cppc_cpufreq_get_rate) + if (fie_disabled) return; kthread_destroy_worker(kworker_fie); @@ -936,6 +952,7 @@ static void cppc_check_hisi_workaround(void) wa_info[i].oem_revision == tbl->oem_revision) { /* Overwrite the get() callback */ cppc_cpufreq_driver.get = hisi_cppc_cpufreq_get_rate; + fie_disabled = FIE_DISABLED; break; } } @@ -947,7 +964,7 @@ static int __init cppc_cpufreq_init(void) { int ret; - if ((acpi_disabled) || !acpi_cpc_valid()) + if (!acpi_cpc_valid()) return -ENODEV; cppc_check_hisi_workaround(); diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c index 2c96de3f2d83..6ac3800db450 100644 --- a/drivers/cpufreq/cpufreq-dt-platdev.c +++ b/drivers/cpufreq/cpufreq-dt-platdev.c @@ -146,6 +146,7 @@ static const struct of_device_id blocklist[] __initconst = { { .compatible = "qcom,sc8180x", }, { .compatible = "qcom,sc8280xp", }, { .compatible = "qcom,sdm845", }, + { .compatible = "qcom,sm6115", }, { .compatible = "qcom,sm6350", }, { .compatible = "qcom,sm8150", }, { .compatible = "qcom,sm8250", }, diff --git a/drivers/cpufreq/highbank-cpufreq.c b/drivers/cpufreq/highbank-cpufreq.c index ac57cddc5f2f..a45864701143 100644 --- a/drivers/cpufreq/highbank-cpufreq.c +++ b/drivers/cpufreq/highbank-cpufreq.c @@ -55,7 +55,7 @@ static struct notifier_block hb_cpufreq_clk_nb = { .notifier_call = hb_cpufreq_clk_notify, }; -static int hb_cpufreq_driver_init(void) +static int __init hb_cpufreq_driver_init(void) { struct platform_device_info devinfo = { .name = "cpufreq-dt", }; struct device *cpu_dev; diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 57cdb3679885..fc3ebeb0bbe5 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -2416,6 +2416,7 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = { X86_MATCH(SKYLAKE_X, core_funcs), X86_MATCH(COMETLAKE, core_funcs), X86_MATCH(ICELAKE_X, core_funcs), + X86_MATCH(TIGERLAKE, core_funcs), {} }; MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids); diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c index d5ef3c66c762..833589bc95e4 100644 --- a/drivers/cpufreq/qcom-cpufreq-hw.c +++ b/drivers/cpufreq/qcom-cpufreq-hw.c @@ -13,6 +13,7 @@ #include <linux/of_address.h> #include <linux/of_platform.h> #include <linux/pm_opp.h> +#include <linux/pm_qos.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/units.h> @@ -56,6 +57,8 @@ struct qcom_cpufreq_data { struct cpufreq_policy *policy; bool per_core_dcvs; + + struct freq_qos_request throttle_freq_req; }; static unsigned long cpu_hw_rate, xo_rate; @@ -316,14 +319,16 @@ static void qcom_lmh_dcvs_notify(struct qcom_cpufreq_data *data) if (IS_ERR(opp)) { dev_warn(dev, "Can't find the OPP for throttling: %pe!\n", opp); } else { - throttled_freq = freq_hz / HZ_PER_KHZ; - - /* Update thermal pressure (the boost frequencies are accepted) */ - arch_update_thermal_pressure(policy->related_cpus, throttled_freq); - dev_pm_opp_put(opp); } + throttled_freq = freq_hz / HZ_PER_KHZ; + + freq_qos_update_request(&data->throttle_freq_req, throttled_freq); + + /* Update thermal pressure (the boost frequencies are accepted) */ + arch_update_thermal_pressure(policy->related_cpus, throttled_freq); + /* * In the unlikely case policy is unregistered do not enable * polling or h/w interrupt @@ -413,6 +418,14 @@ static int qcom_cpufreq_hw_lmh_init(struct cpufreq_policy *policy, int index) if (data->throttle_irq < 0) return data->throttle_irq; + ret = freq_qos_add_request(&policy->constraints, + &data->throttle_freq_req, FREQ_QOS_MAX, + FREQ_QOS_MAX_DEFAULT_VALUE); + if (ret < 0) { + dev_err(&pdev->dev, "Failed to add freq constraint (%d)\n", ret); + return ret; + } + data->cancel_throttle = false; data->policy = policy; @@ -479,6 +492,7 @@ static void qcom_cpufreq_hw_lmh_exit(struct qcom_cpufreq_data *data) if (data->throttle_irq <= 0) return; + freq_qos_remove_request(&data->throttle_freq_req); free_irq(data->throttle_irq, data); } diff --git a/drivers/cpufreq/sti-cpufreq.c b/drivers/cpufreq/sti-cpufreq.c index a67df90848c2..1a63aeea8711 100644 --- a/drivers/cpufreq/sti-cpufreq.c +++ b/drivers/cpufreq/sti-cpufreq.c @@ -252,7 +252,7 @@ static int sti_cpufreq_fetch_syscon_registers(void) return 0; } -static int sti_cpufreq_init(void) +static int __init sti_cpufreq_init(void) { int ret; diff --git a/drivers/cpufreq/tegra194-cpufreq.c b/drivers/cpufreq/tegra194-cpufreq.c index 1216046cf4c2..c2004cae3f02 100644 --- a/drivers/cpufreq/tegra194-cpufreq.c +++ b/drivers/cpufreq/tegra194-cpufreq.c @@ -38,14 +38,6 @@ /* cpufreq transisition latency */ #define TEGRA_CPUFREQ_TRANSITION_LATENCY (300 * 1000) /* unit in nanoseconds */ -enum cluster { - CLUSTER0, - CLUSTER1, - CLUSTER2, - CLUSTER3, - MAX_CLUSTERS, -}; - struct tegra_cpu_ctr { u32 cpu; u32 coreclk_cnt, last_coreclk_cnt; @@ -67,12 +59,12 @@ struct tegra_cpufreq_ops { struct tegra_cpufreq_soc { struct tegra_cpufreq_ops *ops; int maxcpus_per_cluster; + unsigned int num_clusters; phys_addr_t actmon_cntr_base; }; struct tegra194_cpufreq_data { void __iomem *regs; - size_t num_clusters; struct cpufreq_frequency_table **tables; const struct tegra_cpufreq_soc *soc; }; @@ -166,6 +158,14 @@ static const struct tegra_cpufreq_soc tegra234_cpufreq_soc = { .ops = &tegra234_cpufreq_ops, .actmon_cntr_base = 0x9000, .maxcpus_per_cluster = 4, + .num_clusters = 3, +}; + +static const struct tegra_cpufreq_soc tegra239_cpufreq_soc = { + .ops = &tegra234_cpufreq_ops, + .actmon_cntr_base = 0x4000, + .maxcpus_per_cluster = 8, + .num_clusters = 1, }; static void tegra194_get_cpu_cluster_id(u32 cpu, u32 *cpuid, u32 *clusterid) @@ -314,11 +314,7 @@ static void tegra194_get_cpu_ndiv_sysreg(void *ndiv) static int tegra194_get_cpu_ndiv(u32 cpu, u32 cpuid, u32 clusterid, u64 *ndiv) { - int ret; - - ret = smp_call_function_single(cpu, tegra194_get_cpu_ndiv_sysreg, &ndiv, true); - - return ret; + return smp_call_function_single(cpu, tegra194_get_cpu_ndiv_sysreg, &ndiv, true); } static void tegra194_set_cpu_ndiv_sysreg(void *data) @@ -382,7 +378,7 @@ static int tegra194_cpufreq_init(struct cpufreq_policy *policy) data->soc->ops->get_cpu_cluster_id(policy->cpu, NULL, &clusterid); - if (clusterid >= data->num_clusters || !data->tables[clusterid]) + if (clusterid >= data->soc->num_clusters || !data->tables[clusterid]) return -EINVAL; start_cpu = rounddown(policy->cpu, maxcpus_per_cluster); @@ -433,6 +429,7 @@ static struct tegra_cpufreq_ops tegra194_cpufreq_ops = { static const struct tegra_cpufreq_soc tegra194_cpufreq_soc = { .ops = &tegra194_cpufreq_ops, .maxcpus_per_cluster = 2, + .num_clusters = 4, }; static void tegra194_cpufreq_free_resources(void) @@ -525,15 +522,14 @@ static int tegra194_cpufreq_probe(struct platform_device *pdev) soc = of_device_get_match_data(&pdev->dev); - if (soc->ops && soc->maxcpus_per_cluster) { + if (soc->ops && soc->maxcpus_per_cluster && soc->num_clusters) { data->soc = soc; } else { dev_err(&pdev->dev, "soc data missing\n"); return -EINVAL; } - data->num_clusters = MAX_CLUSTERS; - data->tables = devm_kcalloc(&pdev->dev, data->num_clusters, + data->tables = devm_kcalloc(&pdev->dev, data->soc->num_clusters, sizeof(*data->tables), GFP_KERNEL); if (!data->tables) return -ENOMEM; @@ -558,7 +554,7 @@ static int tegra194_cpufreq_probe(struct platform_device *pdev) goto put_bpmp; } - for (i = 0; i < data->num_clusters; i++) { + for (i = 0; i < data->soc->num_clusters; i++) { data->tables[i] = init_freq_table(pdev, bpmp, i); if (IS_ERR(data->tables[i])) { err = PTR_ERR(data->tables[i]); @@ -590,6 +586,7 @@ static int tegra194_cpufreq_remove(struct platform_device *pdev) static const struct of_device_id tegra194_cpufreq_of_match[] = { { .compatible = "nvidia,tegra194-ccplex", .data = &tegra194_cpufreq_soc }, { .compatible = "nvidia,tegra234-ccplex-cluster", .data = &tegra234_cpufreq_soc }, + { .compatible = "nvidia,tegra239-ccplex-cluster", .data = &tegra239_cpufreq_soc }, { /* sentinel */ } }; diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c index df85a77d476b..f64180dd2005 100644 --- a/drivers/cpufreq/ti-cpufreq.c +++ b/drivers/cpufreq/ti-cpufreq.c @@ -398,7 +398,7 @@ fail_put_node: return ret; } -static int ti_cpufreq_init(void) +static int __init ti_cpufreq_init(void) { const struct of_device_id *match; diff --git a/drivers/cpuidle/coupled.c b/drivers/cpuidle/coupled.c index 74068742cef3..9acde71558d5 100644 --- a/drivers/cpuidle/coupled.c +++ b/drivers/cpuidle/coupled.c @@ -54,7 +54,7 @@ * variable is not locked. It is only written from the cpu that * it stores (or by the on/offlining cpu if that cpu is offline), * and only read after all the cpus are ready for the coupled idle - * state are are no longer updating it. + * state are no longer updating it. * * Three atomic counters are used. alive_count tracks the number * of cpus in the coupled set that are currently or soon will be diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c index c32c600b3cf8..0b5461b3d7dd 100644 --- a/drivers/cpuidle/cpuidle-powernv.c +++ b/drivers/cpuidle/cpuidle-powernv.c @@ -233,8 +233,8 @@ static inline void add_powernv_state(int index, const char *name, unsigned int exit_latency, u64 psscr_val, u64 psscr_mask) { - strlcpy(powernv_states[index].name, name, CPUIDLE_NAME_LEN); - strlcpy(powernv_states[index].desc, name, CPUIDLE_NAME_LEN); + strscpy(powernv_states[index].name, name, CPUIDLE_NAME_LEN); + strscpy(powernv_states[index].desc, name, CPUIDLE_NAME_LEN); powernv_states[index].flags = flags; powernv_states[index].target_residency = target_residency; powernv_states[index].exit_latency = exit_latency; diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c index 29acaf48e575..0d0f9751ff8f 100644 --- a/drivers/cpuidle/governor.c +++ b/drivers/cpuidle/governor.c @@ -63,12 +63,11 @@ int cpuidle_switch_governor(struct cpuidle_governor *gov) cpuidle_curr_governor = gov; - if (gov) { - list_for_each_entry(dev, &cpuidle_detected_devices, device_list) - cpuidle_enable_device(dev); - cpuidle_install_idle_handler(); - printk(KERN_INFO "cpuidle: using governor %s\n", gov->name); - } + list_for_each_entry(dev, &cpuidle_detected_devices, device_list) + cpuidle_enable_device(dev); + + cpuidle_install_idle_handler(); + pr_info("cpuidle: using governor %s\n", gov->name); return 0; } diff --git a/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c b/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c index 2a60d0525cde..168195672e2e 100644 --- a/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c +++ b/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c @@ -56,6 +56,10 @@ static void virtio_crypto_akcipher_finalize_req( struct virtio_crypto_akcipher_request *vc_akcipher_req, struct akcipher_request *req, int err) { + kfree(vc_akcipher_req->src_buf); + kfree(vc_akcipher_req->dst_buf); + vc_akcipher_req->src_buf = NULL; + vc_akcipher_req->dst_buf = NULL; virtcrypto_clear_request(&vc_akcipher_req->base); crypto_finalize_akcipher_request(vc_akcipher_req->base.dataq->engine, req, err); diff --git a/drivers/devfreq/event/rockchip-dfi.c b/drivers/devfreq/event/rockchip-dfi.c index 9a88faaf8b27..39ac069cabc7 100644 --- a/drivers/devfreq/event/rockchip-dfi.c +++ b/drivers/devfreq/event/rockchip-dfi.c @@ -189,10 +189,9 @@ static int rockchip_dfi_probe(struct platform_device *pdev) return PTR_ERR(data->regs); data->clk = devm_clk_get(dev, "pclk_ddr_mon"); - if (IS_ERR(data->clk)) { - dev_err(dev, "Cannot get the clk dmc_clk\n"); - return PTR_ERR(data->clk); - } + if (IS_ERR(data->clk)) + return dev_err_probe(dev, PTR_ERR(data->clk), + "Cannot get the clk pclk_ddr_mon\n"); /* try to find the optional reference to the pmu syscon */ node = of_parse_phandle(np, "rockchip,pmu", 0); diff --git a/drivers/devfreq/mtk-cci-devfreq.c b/drivers/devfreq/mtk-cci-devfreq.c index 71abb3fbd042..e5458ada5197 100644 --- a/drivers/devfreq/mtk-cci-devfreq.c +++ b/drivers/devfreq/mtk-cci-devfreq.c @@ -291,9 +291,13 @@ static int mtk_ccifreq_probe(struct platform_device *pdev) } drv->sram_reg = devm_regulator_get_optional(dev, "sram"); - if (IS_ERR(drv->sram_reg)) + if (IS_ERR(drv->sram_reg)) { + ret = PTR_ERR(drv->sram_reg); + if (ret == -EPROBE_DEFER) + goto out_free_resources; + drv->sram_reg = NULL; - else { + } else { ret = regulator_enable(drv->sram_reg); if (ret) { dev_err(dev, "failed to enable sram regulator\n"); diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c index eb58644bb019..6faeb2ab3960 100644 --- a/drivers/edac/edac_mc.c +++ b/drivers/edac/edac_mc.c @@ -103,7 +103,6 @@ static void edac_mc_dump_dimm(struct dimm_info *dimm) edac_dbg(4, " dimm->label = '%s'\n", dimm->label); edac_dbg(4, " dimm->nr_pages = 0x%x\n", dimm->nr_pages); edac_dbg(4, " dimm->grain = %d\n", dimm->grain); - edac_dbg(4, " dimm->nr_pages = 0x%x\n", dimm->nr_pages); } static void edac_mc_dump_csrow(struct csrow_info *csrow) diff --git a/drivers/edac/edac_module.h b/drivers/edac/edac_module.h index 96f6de0c8ff6..50ed9f2425bb 100644 --- a/drivers/edac/edac_module.h +++ b/drivers/edac/edac_module.h @@ -28,13 +28,9 @@ void edac_mc_sysfs_exit(void); extern int edac_create_sysfs_mci_device(struct mem_ctl_info *mci, const struct attribute_group **groups); extern void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci); -extern int edac_get_log_ue(void); -extern int edac_get_log_ce(void); -extern int edac_get_panic_on_ue(void); extern int edac_mc_get_log_ue(void); extern int edac_mc_get_log_ce(void); extern int edac_mc_get_panic_on_ue(void); -extern int edac_get_poll_msec(void); extern unsigned int edac_mc_get_poll_msec(void); unsigned edac_dimm_info_location(struct dimm_info *dimm, char *buf, diff --git a/drivers/edac/i10nm_base.c b/drivers/edac/i10nm_base.c index 6cf50ee0b77c..a22ea053f8e1 100644 --- a/drivers/edac/i10nm_base.c +++ b/drivers/edac/i10nm_base.c @@ -74,31 +74,47 @@ static struct list_head *i10nm_edac_list; static struct res_config *res_cfg; static int retry_rd_err_log; +static int decoding_via_mca; +static bool mem_cfg_2lm; static u32 offsets_scrub_icx[] = {0x22c60, 0x22c54, 0x22c5c, 0x22c58, 0x22c28, 0x20ed8}; static u32 offsets_scrub_spr[] = {0x22c60, 0x22c54, 0x22f08, 0x22c58, 0x22c28, 0x20ed8}; +static u32 offsets_scrub_spr_hbm0[] = {0x2860, 0x2854, 0x2b08, 0x2858, 0x2828, 0x0ed8}; +static u32 offsets_scrub_spr_hbm1[] = {0x2c60, 0x2c54, 0x2f08, 0x2c58, 0x2c28, 0x0fa8}; static u32 offsets_demand_icx[] = {0x22e54, 0x22e60, 0x22e64, 0x22e58, 0x22e5c, 0x20ee0}; static u32 offsets_demand_spr[] = {0x22e54, 0x22e60, 0x22f10, 0x22e58, 0x22e5c, 0x20ee0}; +static u32 offsets_demand2_spr[] = {0x22c70, 0x22d80, 0x22f18, 0x22d58, 0x22c64, 0x20f10}; +static u32 offsets_demand_spr_hbm0[] = {0x2a54, 0x2a60, 0x2b10, 0x2a58, 0x2a5c, 0x0ee0}; +static u32 offsets_demand_spr_hbm1[] = {0x2e54, 0x2e60, 0x2f10, 0x2e58, 0x2e5c, 0x0fb0}; -static void __enable_retry_rd_err_log(struct skx_imc *imc, int chan, bool enable) +static void __enable_retry_rd_err_log(struct skx_imc *imc, int chan, bool enable, + u32 *offsets_scrub, u32 *offsets_demand, + u32 *offsets_demand2) { - u32 s, d; + u32 s, d, d2; - if (!imc->mbase) - return; - - s = I10NM_GET_REG32(imc, chan, res_cfg->offsets_scrub[0]); - d = I10NM_GET_REG32(imc, chan, res_cfg->offsets_demand[0]); + s = I10NM_GET_REG32(imc, chan, offsets_scrub[0]); + d = I10NM_GET_REG32(imc, chan, offsets_demand[0]); + if (offsets_demand2) + d2 = I10NM_GET_REG32(imc, chan, offsets_demand2[0]); if (enable) { /* Save default configurations */ imc->chan[chan].retry_rd_err_log_s = s; imc->chan[chan].retry_rd_err_log_d = d; + if (offsets_demand2) + imc->chan[chan].retry_rd_err_log_d2 = d2; s &= ~RETRY_RD_ERR_LOG_NOOVER_UC; s |= RETRY_RD_ERR_LOG_EN; d &= ~RETRY_RD_ERR_LOG_NOOVER_UC; d |= RETRY_RD_ERR_LOG_EN; + + if (offsets_demand2) { + d2 &= ~RETRY_RD_ERR_LOG_UC; + d2 |= RETRY_RD_ERR_LOG_NOOVER; + d2 |= RETRY_RD_ERR_LOG_EN; + } } else { /* Restore default configurations */ if (imc->chan[chan].retry_rd_err_log_s & RETRY_RD_ERR_LOG_UC) @@ -113,23 +129,55 @@ static void __enable_retry_rd_err_log(struct skx_imc *imc, int chan, bool enable d |= RETRY_RD_ERR_LOG_NOOVER; if (!(imc->chan[chan].retry_rd_err_log_d & RETRY_RD_ERR_LOG_EN)) d &= ~RETRY_RD_ERR_LOG_EN; + + if (offsets_demand2) { + if (imc->chan[chan].retry_rd_err_log_d2 & RETRY_RD_ERR_LOG_UC) + d2 |= RETRY_RD_ERR_LOG_UC; + if (!(imc->chan[chan].retry_rd_err_log_d2 & RETRY_RD_ERR_LOG_NOOVER)) + d2 &= ~RETRY_RD_ERR_LOG_NOOVER; + if (!(imc->chan[chan].retry_rd_err_log_d2 & RETRY_RD_ERR_LOG_EN)) + d2 &= ~RETRY_RD_ERR_LOG_EN; + } } - I10NM_SET_REG32(imc, chan, res_cfg->offsets_scrub[0], s); - I10NM_SET_REG32(imc, chan, res_cfg->offsets_demand[0], d); + I10NM_SET_REG32(imc, chan, offsets_scrub[0], s); + I10NM_SET_REG32(imc, chan, offsets_demand[0], d); + if (offsets_demand2) + I10NM_SET_REG32(imc, chan, offsets_demand2[0], d2); } static void enable_retry_rd_err_log(bool enable) { + struct skx_imc *imc; struct skx_dev *d; int i, j; edac_dbg(2, "\n"); list_for_each_entry(d, i10nm_edac_list, list) - for (i = 0; i < I10NM_NUM_IMC; i++) - for (j = 0; j < I10NM_NUM_CHANNELS; j++) - __enable_retry_rd_err_log(&d->imc[i], j, enable); + for (i = 0; i < I10NM_NUM_IMC; i++) { + imc = &d->imc[i]; + if (!imc->mbase) + continue; + + for (j = 0; j < I10NM_NUM_CHANNELS; j++) { + if (imc->hbm_mc) { + __enable_retry_rd_err_log(imc, j, enable, + res_cfg->offsets_scrub_hbm0, + res_cfg->offsets_demand_hbm0, + NULL); + __enable_retry_rd_err_log(imc, j, enable, + res_cfg->offsets_scrub_hbm1, + res_cfg->offsets_demand_hbm1, + NULL); + } else { + __enable_retry_rd_err_log(imc, j, enable, + res_cfg->offsets_scrub, + res_cfg->offsets_demand, + res_cfg->offsets_demand2); + } + } + } } static void show_retry_rd_err_log(struct decoded_addr *res, char *msg, @@ -138,14 +186,33 @@ static void show_retry_rd_err_log(struct decoded_addr *res, char *msg, struct skx_imc *imc = &res->dev->imc[res->imc]; u32 log0, log1, log2, log3, log4; u32 corr0, corr1, corr2, corr3; + u32 lxg0, lxg1, lxg3, lxg4; + u32 *xffsets = NULL; u64 log2a, log5; + u64 lxg2a, lxg5; u32 *offsets; - int n; + int n, pch; if (!imc->mbase) return; - offsets = scrub_err ? res_cfg->offsets_scrub : res_cfg->offsets_demand; + if (imc->hbm_mc) { + pch = res->cs & 1; + + if (pch) + offsets = scrub_err ? res_cfg->offsets_scrub_hbm1 : + res_cfg->offsets_demand_hbm1; + else + offsets = scrub_err ? res_cfg->offsets_scrub_hbm0 : + res_cfg->offsets_demand_hbm0; + } else { + if (scrub_err) { + offsets = res_cfg->offsets_scrub; + } else { + offsets = res_cfg->offsets_demand; + xffsets = res_cfg->offsets_demand2; + } + } log0 = I10NM_GET_REG32(imc, res->channel, offsets[0]); log1 = I10NM_GET_REG32(imc, res->channel, offsets[1]); @@ -153,20 +220,52 @@ static void show_retry_rd_err_log(struct decoded_addr *res, char *msg, log4 = I10NM_GET_REG32(imc, res->channel, offsets[4]); log5 = I10NM_GET_REG64(imc, res->channel, offsets[5]); + if (xffsets) { + lxg0 = I10NM_GET_REG32(imc, res->channel, xffsets[0]); + lxg1 = I10NM_GET_REG32(imc, res->channel, xffsets[1]); + lxg3 = I10NM_GET_REG32(imc, res->channel, xffsets[3]); + lxg4 = I10NM_GET_REG32(imc, res->channel, xffsets[4]); + lxg5 = I10NM_GET_REG64(imc, res->channel, xffsets[5]); + } + if (res_cfg->type == SPR) { log2a = I10NM_GET_REG64(imc, res->channel, offsets[2]); - n = snprintf(msg, len, " retry_rd_err_log[%.8x %.8x %.16llx %.8x %.8x %.16llx]", + n = snprintf(msg, len, " retry_rd_err_log[%.8x %.8x %.16llx %.8x %.8x %.16llx", log0, log1, log2a, log3, log4, log5); + + if (len - n > 0) { + if (xffsets) { + lxg2a = I10NM_GET_REG64(imc, res->channel, xffsets[2]); + n += snprintf(msg + n, len - n, " %.8x %.8x %.16llx %.8x %.8x %.16llx]", + lxg0, lxg1, lxg2a, lxg3, lxg4, lxg5); + } else { + n += snprintf(msg + n, len - n, "]"); + } + } } else { log2 = I10NM_GET_REG32(imc, res->channel, offsets[2]); n = snprintf(msg, len, " retry_rd_err_log[%.8x %.8x %.8x %.8x %.8x %.16llx]", log0, log1, log2, log3, log4, log5); } - corr0 = I10NM_GET_REG32(imc, res->channel, 0x22c18); - corr1 = I10NM_GET_REG32(imc, res->channel, 0x22c1c); - corr2 = I10NM_GET_REG32(imc, res->channel, 0x22c20); - corr3 = I10NM_GET_REG32(imc, res->channel, 0x22c24); + if (imc->hbm_mc) { + if (pch) { + corr0 = I10NM_GET_REG32(imc, res->channel, 0x2c18); + corr1 = I10NM_GET_REG32(imc, res->channel, 0x2c1c); + corr2 = I10NM_GET_REG32(imc, res->channel, 0x2c20); + corr3 = I10NM_GET_REG32(imc, res->channel, 0x2c24); + } else { + corr0 = I10NM_GET_REG32(imc, res->channel, 0x2818); + corr1 = I10NM_GET_REG32(imc, res->channel, 0x281c); + corr2 = I10NM_GET_REG32(imc, res->channel, 0x2820); + corr3 = I10NM_GET_REG32(imc, res->channel, 0x2824); + } + } else { + corr0 = I10NM_GET_REG32(imc, res->channel, 0x22c18); + corr1 = I10NM_GET_REG32(imc, res->channel, 0x22c1c); + corr2 = I10NM_GET_REG32(imc, res->channel, 0x22c20); + corr3 = I10NM_GET_REG32(imc, res->channel, 0x22c24); + } if (len - n > 0) snprintf(msg + n, len - n, @@ -177,9 +276,16 @@ static void show_retry_rd_err_log(struct decoded_addr *res, char *msg, corr3 & 0xffff, corr3 >> 16); /* Clear status bits */ - if (retry_rd_err_log == 2 && (log0 & RETRY_RD_ERR_LOG_OVER_UC_V)) { - log0 &= ~RETRY_RD_ERR_LOG_OVER_UC_V; - I10NM_SET_REG32(imc, res->channel, offsets[0], log0); + if (retry_rd_err_log == 2) { + if (log0 & RETRY_RD_ERR_LOG_OVER_UC_V) { + log0 &= ~RETRY_RD_ERR_LOG_OVER_UC_V; + I10NM_SET_REG32(imc, res->channel, offsets[0], log0); + } + + if (xffsets && (lxg0 & RETRY_RD_ERR_LOG_OVER_UC_V)) { + lxg0 &= ~RETRY_RD_ERR_LOG_OVER_UC_V; + I10NM_SET_REG32(imc, res->channel, xffsets[0], lxg0); + } } } @@ -231,6 +337,103 @@ static bool i10nm_check_2lm(struct res_config *cfg) return false; } +/* + * Check whether the error comes from DDRT by ICX/Tremont model specific error code. + * Refer to SDM vol3B 16.11.3 Intel IMC MC error codes for IA32_MCi_STATUS. + */ +static bool i10nm_mscod_is_ddrt(u32 mscod) +{ + switch (mscod) { + case 0x0106: case 0x0107: + case 0x0800: case 0x0804: + case 0x0806 ... 0x0808: + case 0x080a ... 0x080e: + case 0x0810: case 0x0811: + case 0x0816: case 0x081e: + case 0x081f: + return true; + } + + return false; +} + +static bool i10nm_mc_decode_available(struct mce *mce) +{ + u8 bank; + + if (!decoding_via_mca || mem_cfg_2lm) + return false; + + if ((mce->status & (MCI_STATUS_MISCV | MCI_STATUS_ADDRV)) + != (MCI_STATUS_MISCV | MCI_STATUS_ADDRV)) + return false; + + bank = mce->bank; + + switch (res_cfg->type) { + case I10NM: + if (bank < 13 || bank > 26) + return false; + + /* DDRT errors can't be decoded from MCA bank registers */ + if (MCI_MISC_ECC_MODE(mce->misc) == MCI_MISC_ECC_DDRT) + return false; + + if (i10nm_mscod_is_ddrt(MCI_STATUS_MSCOD(mce->status))) + return false; + + /* Check whether one of {13,14,17,18,21,22,25,26} */ + return ((bank - 13) & BIT(1)) == 0; + default: + return false; + } +} + +static bool i10nm_mc_decode(struct decoded_addr *res) +{ + struct mce *m = res->mce; + struct skx_dev *d; + u8 bank; + + if (!i10nm_mc_decode_available(m)) + return false; + + list_for_each_entry(d, i10nm_edac_list, list) { + if (d->imc[0].src_id == m->socketid) { + res->socket = m->socketid; + res->dev = d; + break; + } + } + + switch (res_cfg->type) { + case I10NM: + bank = m->bank - 13; + res->imc = bank / 4; + res->channel = bank % 2; + break; + default: + return false; + } + + if (!res->dev) { + skx_printk(KERN_ERR, "No device for src_id %d imc %d\n", + m->socketid, res->imc); + return false; + } + + res->column = GET_BITFIELD(m->misc, 9, 18) << 2; + res->row = GET_BITFIELD(m->misc, 19, 39); + res->bank_group = GET_BITFIELD(m->misc, 40, 41); + res->bank_address = GET_BITFIELD(m->misc, 42, 43); + res->bank_group |= GET_BITFIELD(m->misc, 44, 44) << 2; + res->rank = GET_BITFIELD(m->misc, 56, 58); + res->dimm = res->rank >> 2; + res->rank = res->rank % 4; + + return true; +} + static int i10nm_get_ddr_munits(void) { struct pci_dev *mdev; @@ -420,7 +623,12 @@ static struct res_config spr_cfg = { .sad_all_devfn = PCI_DEVFN(10, 0), .sad_all_offset = 0x300, .offsets_scrub = offsets_scrub_spr, + .offsets_scrub_hbm0 = offsets_scrub_spr_hbm0, + .offsets_scrub_hbm1 = offsets_scrub_spr_hbm1, .offsets_demand = offsets_demand_spr, + .offsets_demand2 = offsets_demand2_spr, + .offsets_demand_hbm0 = offsets_demand_spr_hbm0, + .offsets_demand_hbm1 = offsets_demand_spr_hbm1, }; static const struct x86_cpu_id i10nm_cpuids[] = { @@ -574,7 +782,8 @@ static int __init i10nm_init(void) return -ENODEV; } - skx_set_mem_cfg(i10nm_check_2lm(cfg)); + mem_cfg_2lm = i10nm_check_2lm(cfg); + skx_set_mem_cfg(mem_cfg_2lm); rc = i10nm_get_ddr_munits(); @@ -626,9 +835,11 @@ static int __init i10nm_init(void) setup_i10nm_debug(); if (retry_rd_err_log && res_cfg->offsets_scrub && res_cfg->offsets_demand) { - skx_set_decode(NULL, show_retry_rd_err_log); + skx_set_decode(i10nm_mc_decode, show_retry_rd_err_log); if (retry_rd_err_log == 2) enable_retry_rd_err_log(true); + } else { + skx_set_decode(i10nm_mc_decode, NULL); } i10nm_printk(KERN_INFO, "%s\n", I10NM_REVISION); @@ -658,6 +869,34 @@ static void __exit i10nm_exit(void) module_init(i10nm_init); module_exit(i10nm_exit); +static int set_decoding_via_mca(const char *buf, const struct kernel_param *kp) +{ + unsigned long val; + int ret; + + ret = kstrtoul(buf, 0, &val); + + if (ret || val > 1) + return -EINVAL; + + if (val && mem_cfg_2lm) { + i10nm_printk(KERN_NOTICE, "Decoding errors via MCA banks for 2LM isn't supported yet\n"); + return -EIO; + } + + ret = param_set_int(buf, kp); + + return ret; +} + +static const struct kernel_param_ops decoding_via_mca_param_ops = { + .set = set_decoding_via_mca, + .get = param_get_int, +}; + +module_param_cb(decoding_via_mca, &decoding_via_mca_param_ops, &decoding_via_mca, 0644); +MODULE_PARM_DESC(decoding_via_mca, "decoding_via_mca: 0=off(default), 1=enable"); + module_param(retry_rd_err_log, int, 0444); MODULE_PARM_DESC(retry_rd_err_log, "retry_rd_err_log: 0=off(default), 1=bios(Linux doesn't reset any control bits, but just reports values.), 2=linux(Linux tries to take control and resets mode bits, clear valid/UC bits after reading.)"); diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c index 4f28b8c8d378..61adaa872ba7 100644 --- a/drivers/edac/i7300_edac.c +++ b/drivers/edac/i7300_edac.c @@ -1193,7 +1193,7 @@ static int __init i7300_init(void) } /** - * i7300_init() - Unregisters the driver + * i7300_exit() - Unregisters the driver */ static void __exit i7300_exit(void) { diff --git a/drivers/edac/ie31200_edac.c b/drivers/edac/ie31200_edac.c index 9a9ff5ad611a..9ef13570f2e5 100644 --- a/drivers/edac/ie31200_edac.c +++ b/drivers/edac/ie31200_edac.c @@ -20,11 +20,15 @@ * 0c08: Xeon E3-1200 v3 Processor DRAM Controller * 1918: Xeon E3-1200 v5 Skylake Host Bridge/DRAM Registers * 5918: Xeon E3-1200 Xeon E3-1200 v6/7th Gen Core Processor Host Bridge/DRAM Registers + * 190f: 6th Gen Core Dual-Core Processor Host Bridge/DRAM Registers + * 191f: 6th Gen Core Quad-Core Processor Host Bridge/DRAM Registers * 3e..: 8th/9th Gen Core Processor Host Bridge/DRAM Registers * * Based on Intel specification: * https://www.intel.com/content/dam/www/public/us/en/documents/datasheets/xeon-e3-1200v3-vol-2-datasheet.pdf * http://www.intel.com/content/www/us/en/processors/xeon/xeon-e3-1200-family-vol-2-datasheet.html + * https://www.intel.com/content/dam/www/public/us/en/documents/datasheets/desktop-6th-gen-core-family-datasheet-vol-2.pdf + * https://www.intel.com/content/dam/www/public/us/en/documents/datasheets/xeon-e3-1200v6-vol-2-datasheet.pdf * https://www.intel.com/content/www/us/en/processors/core/7th-gen-core-family-mobile-h-processor-lines-datasheet-vol-2.html * https://www.intel.com/content/www/us/en/products/docs/processors/core/8th-gen-core-family-datasheet-vol-2.html * @@ -53,15 +57,17 @@ #define ie31200_printk(level, fmt, arg...) \ edac_printk(level, "ie31200", fmt, ##arg) -#define PCI_DEVICE_ID_INTEL_IE31200_HB_1 0x0108 -#define PCI_DEVICE_ID_INTEL_IE31200_HB_2 0x010c -#define PCI_DEVICE_ID_INTEL_IE31200_HB_3 0x0150 -#define PCI_DEVICE_ID_INTEL_IE31200_HB_4 0x0158 -#define PCI_DEVICE_ID_INTEL_IE31200_HB_5 0x015c -#define PCI_DEVICE_ID_INTEL_IE31200_HB_6 0x0c04 -#define PCI_DEVICE_ID_INTEL_IE31200_HB_7 0x0c08 -#define PCI_DEVICE_ID_INTEL_IE31200_HB_8 0x1918 -#define PCI_DEVICE_ID_INTEL_IE31200_HB_9 0x5918 +#define PCI_DEVICE_ID_INTEL_IE31200_HB_1 0x0108 +#define PCI_DEVICE_ID_INTEL_IE31200_HB_2 0x010c +#define PCI_DEVICE_ID_INTEL_IE31200_HB_3 0x0150 +#define PCI_DEVICE_ID_INTEL_IE31200_HB_4 0x0158 +#define PCI_DEVICE_ID_INTEL_IE31200_HB_5 0x015c +#define PCI_DEVICE_ID_INTEL_IE31200_HB_6 0x0c04 +#define PCI_DEVICE_ID_INTEL_IE31200_HB_7 0x0c08 +#define PCI_DEVICE_ID_INTEL_IE31200_HB_8 0x190F +#define PCI_DEVICE_ID_INTEL_IE31200_HB_9 0x1918 +#define PCI_DEVICE_ID_INTEL_IE31200_HB_10 0x191F +#define PCI_DEVICE_ID_INTEL_IE31200_HB_11 0x5918 /* Coffee Lake-S */ #define PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_MASK 0x3e00 @@ -80,6 +86,8 @@ #define DEVICE_ID_SKYLAKE_OR_LATER(did) \ (((did) == PCI_DEVICE_ID_INTEL_IE31200_HB_8) || \ ((did) == PCI_DEVICE_ID_INTEL_IE31200_HB_9) || \ + ((did) == PCI_DEVICE_ID_INTEL_IE31200_HB_10) || \ + ((did) == PCI_DEVICE_ID_INTEL_IE31200_HB_11) || \ (((did) & PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_MASK) == \ PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_MASK)) @@ -577,6 +585,8 @@ static const struct pci_device_id ie31200_pci_tbl[] = { { PCI_VEND_DEV(INTEL, IE31200_HB_7), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 }, { PCI_VEND_DEV(INTEL, IE31200_HB_8), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 }, { PCI_VEND_DEV(INTEL, IE31200_HB_9), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 }, + { PCI_VEND_DEV(INTEL, IE31200_HB_10), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 }, + { PCI_VEND_DEV(INTEL, IE31200_HB_11), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 }, { PCI_VEND_DEV(INTEL, IE31200_HB_CFL_1), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 }, { PCI_VEND_DEV(INTEL, IE31200_HB_CFL_2), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 }, { PCI_VEND_DEV(INTEL, IE31200_HB_CFL_3), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 }, diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c index 0bc670778c99..046969b4e82e 100644 --- a/drivers/edac/ppc4xx_edac.c +++ b/drivers/edac/ppc4xx_edac.c @@ -178,11 +178,6 @@ struct ppc4xx_ecc_status { u32 wmirq; }; -/* Function Prototypes */ - -static int ppc4xx_edac_probe(struct platform_device *device); -static int ppc4xx_edac_remove(struct platform_device *device); - /* Global Variables */ /* @@ -197,15 +192,6 @@ static const struct of_device_id ppc4xx_edac_match[] = { }; MODULE_DEVICE_TABLE(of, ppc4xx_edac_match); -static struct platform_driver ppc4xx_edac_driver = { - .probe = ppc4xx_edac_probe, - .remove = ppc4xx_edac_remove, - .driver = { - .name = PPC4XX_EDAC_MODULE_NAME, - .of_match_table = ppc4xx_edac_match, - }, -}; - /* * TODO: The row and channel parameters likely need to be dynamically * set based on the aforementioned variant controller realizations. @@ -1391,6 +1377,15 @@ ppc4xx_edac_opstate_init(void) EDAC_OPSTATE_UNKNOWN_STR))); } +static struct platform_driver ppc4xx_edac_driver = { + .probe = ppc4xx_edac_probe, + .remove = ppc4xx_edac_remove, + .driver = { + .name = PPC4XX_EDAC_MODULE_NAME, + .of_match_table = ppc4xx_edac_match, + }, +}; + /** * ppc4xx_edac_init - driver/module insertion entry point * diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c index 9678ab97c7ac..8e39370fdb5c 100644 --- a/drivers/edac/sb_edac.c +++ b/drivers/edac/sb_edac.c @@ -335,6 +335,12 @@ struct sbridge_info { struct sbridge_channel { u32 ranks; u32 dimms; + struct dimm { + u32 rowbits; + u32 colbits; + u32 bank_xor_enable; + u32 amap_fine; + } dimm[MAX_DIMMS]; }; struct pci_id_descr { @@ -1603,7 +1609,7 @@ static int __populate_dimms(struct mem_ctl_info *mci, banks = 8; for (i = 0; i < channels; i++) { - u32 mtr; + u32 mtr, amap = 0; int max_dimms_per_channel; @@ -1615,6 +1621,7 @@ static int __populate_dimms(struct mem_ctl_info *mci, max_dimms_per_channel = ARRAY_SIZE(mtr_regs); if (!pvt->pci_tad[i]) continue; + pci_read_config_dword(pvt->pci_tad[i], 0x8c, &amap); } for (j = 0; j < max_dimms_per_channel; j++) { @@ -1627,6 +1634,7 @@ static int __populate_dimms(struct mem_ctl_info *mci, mtr_regs[j], &mtr); } edac_dbg(4, "Channel #%d MTR%d = %x\n", i, j, mtr); + if (IS_DIMM_PRESENT(mtr)) { if (!IS_ECC_ENABLED(pvt->info.mcmtr)) { sbridge_printk(KERN_ERR, "CPU SrcID #%d, Ha #%d, Channel #%d has DIMMs, but ECC is disabled\n", @@ -1661,6 +1669,11 @@ static int __populate_dimms(struct mem_ctl_info *mci, dimm->dtype = pvt->info.get_width(pvt, mtr); dimm->mtype = mtype; dimm->edac_mode = mode; + pvt->channel[i].dimm[j].rowbits = order_base_2(rows); + pvt->channel[i].dimm[j].colbits = order_base_2(cols); + pvt->channel[i].dimm[j].bank_xor_enable = + GET_BITFIELD(pvt->info.mcmtr, 9, 9); + pvt->channel[i].dimm[j].amap_fine = GET_BITFIELD(amap, 0, 0); snprintf(dimm->label, sizeof(dimm->label), "CPU_SrcID#%u_Ha#%u_Chan#%u_DIMM#%u", pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom, i, j); @@ -1922,6 +1935,99 @@ static struct mem_ctl_info *get_mci_for_node_id(u8 node_id, u8 ha) return NULL; } +static u8 sb_close_row[] = { + 15, 16, 17, 18, 20, 21, 22, 28, 10, 11, 12, 13, 29, 30, 31, 32, 33 +}; + +static u8 sb_close_column[] = { + 3, 4, 5, 14, 19, 23, 24, 25, 26, 27 +}; + +static u8 sb_open_row[] = { + 14, 15, 16, 20, 28, 21, 22, 23, 24, 25, 26, 27, 29, 30, 31, 32, 33 +}; + +static u8 sb_open_column[] = { + 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 +}; + +static u8 sb_open_fine_column[] = { + 3, 4, 5, 7, 8, 9, 10, 11, 12, 13 +}; + +static int sb_bits(u64 addr, int nbits, u8 *bits) +{ + int i, res = 0; + + for (i = 0; i < nbits; i++) + res |= ((addr >> bits[i]) & 1) << i; + return res; +} + +static int sb_bank_bits(u64 addr, int b0, int b1, int do_xor, int x0, int x1) +{ + int ret = GET_BITFIELD(addr, b0, b0) | (GET_BITFIELD(addr, b1, b1) << 1); + + if (do_xor) + ret ^= GET_BITFIELD(addr, x0, x0) | (GET_BITFIELD(addr, x1, x1) << 1); + + return ret; +} + +static bool sb_decode_ddr4(struct mem_ctl_info *mci, int ch, u8 rank, + u64 rank_addr, char *msg) +{ + int dimmno = 0; + int row, col, bank_address, bank_group; + struct sbridge_pvt *pvt; + u32 bg0 = 0, rowbits = 0, colbits = 0; + u32 amap_fine = 0, bank_xor_enable = 0; + + dimmno = (rank < 12) ? rank / 4 : 2; + pvt = mci->pvt_info; + amap_fine = pvt->channel[ch].dimm[dimmno].amap_fine; + bg0 = amap_fine ? 6 : 13; + rowbits = pvt->channel[ch].dimm[dimmno].rowbits; + colbits = pvt->channel[ch].dimm[dimmno].colbits; + bank_xor_enable = pvt->channel[ch].dimm[dimmno].bank_xor_enable; + + if (pvt->is_lockstep) { + pr_warn_once("LockStep row/column decode is not supported yet!\n"); + msg[0] = '\0'; + return false; + } + + if (pvt->is_close_pg) { + row = sb_bits(rank_addr, rowbits, sb_close_row); + col = sb_bits(rank_addr, colbits, sb_close_column); + col |= 0x400; /* C10 is autoprecharge, always set */ + bank_address = sb_bank_bits(rank_addr, 8, 9, bank_xor_enable, 22, 28); + bank_group = sb_bank_bits(rank_addr, 6, 7, bank_xor_enable, 20, 21); + } else { + row = sb_bits(rank_addr, rowbits, sb_open_row); + if (amap_fine) + col = sb_bits(rank_addr, colbits, sb_open_fine_column); + else + col = sb_bits(rank_addr, colbits, sb_open_column); + bank_address = sb_bank_bits(rank_addr, 18, 19, bank_xor_enable, 22, 23); + bank_group = sb_bank_bits(rank_addr, bg0, 17, bank_xor_enable, 20, 21); + } + + row &= (1u << rowbits) - 1; + + sprintf(msg, "row:0x%x col:0x%x bank_addr:%d bank_group:%d", + row, col, bank_address, bank_group); + return true; +} + +static bool sb_decode_ddr3(struct mem_ctl_info *mci, int ch, u8 rank, + u64 rank_addr, char *msg) +{ + pr_warn_once("DDR3 row/column decode not support yet!\n"); + msg[0] = '\0'; + return false; +} + static int get_memory_error_data(struct mem_ctl_info *mci, u64 addr, u8 *socket, u8 *ha, @@ -1937,12 +2043,13 @@ static int get_memory_error_data(struct mem_ctl_info *mci, int interleave_mode, shiftup = 0; unsigned int sad_interleave[MAX_INTERLEAVE]; u32 reg, dram_rule; - u8 ch_way, sck_way, pkg, sad_ha = 0; + u8 ch_way, sck_way, pkg, sad_ha = 0, rankid = 0; u32 tad_offset; u32 rir_way; u32 mb, gb; u64 ch_addr, offset, limit = 0, prv = 0; - + u64 rank_addr; + enum mem_type mtype; /* * Step 0) Check if the address is at special memory ranges @@ -2226,6 +2333,28 @@ static int get_memory_error_data(struct mem_ctl_info *mci, pci_read_config_dword(pvt->pci_tad[base_ch], rir_offset[n_rir][idx], ®); *rank = RIR_RNK_TGT(pvt->info.type, reg); + if (pvt->info.type == BROADWELL) { + if (pvt->is_close_pg) + shiftup = 6; + else + shiftup = 13; + + rank_addr = ch_addr >> shiftup; + rank_addr /= (1 << rir_way); + rank_addr <<= shiftup; + rank_addr |= ch_addr & GENMASK_ULL(shiftup - 1, 0); + rank_addr -= RIR_OFFSET(pvt->info.type, reg); + + mtype = pvt->info.get_memory_type(pvt); + rankid = *rank; + if (mtype == MEM_DDR4 || mtype == MEM_RDDR4) + sb_decode_ddr4(mci, base_ch, rankid, rank_addr, msg); + else + sb_decode_ddr3(mci, base_ch, rankid, rank_addr, msg); + } else { + msg[0] = '\0'; + } + edac_dbg(0, "RIR#%d: channel address 0x%08Lx < 0x%08Lx, RIR interleave %d, index %d\n", n_rir, ch_addr, @@ -2950,7 +3079,7 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci, struct mem_ctl_info *new_mci; struct sbridge_pvt *pvt = mci->pvt_info; enum hw_event_mc_err_type tp_event; - char *optype, msg[256]; + char *optype, msg[256], msg_full[512]; bool ripv = GET_BITFIELD(m->mcgstatus, 0, 0); bool overflow = GET_BITFIELD(m->status, 62, 62); bool uncorrected_error = GET_BITFIELD(m->status, 61, 61); @@ -3089,18 +3218,17 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci, */ if (!pvt->is_lockstep && !pvt->is_cur_addr_mirrored && !pvt->is_close_pg) channel = first_channel; - - snprintf(msg, sizeof(msg), - "%s%s area:%s err_code:%04x:%04x socket:%d ha:%d channel_mask:%ld rank:%d", + snprintf(msg_full, sizeof(msg_full), + "%s%s area:%s err_code:%04x:%04x socket:%d ha:%d channel_mask:%ld rank:%d %s", overflow ? " OVERFLOW" : "", (uncorrected_error && recoverable) ? " recoverable" : "", area_type, mscod, errcode, socket, ha, channel_mask, - rank); + rank, msg); - edac_dbg(0, "%s\n", msg); + edac_dbg(0, "%s\n", msg_full); /* FIXME: need support for channel mask */ @@ -3111,7 +3239,7 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci, edac_mc_handle_error(tp_event, mci, core_err_cnt, m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0, channel, dimm, -1, - optype, msg); + optype, msg_full); return; err_parsing: edac_mc_handle_error(tp_event, mci, core_err_cnt, 0, 0, 0, diff --git a/drivers/edac/skx_base.c b/drivers/edac/skx_base.c index 1abc020d49ab..7e2762f62eec 100644 --- a/drivers/edac/skx_base.c +++ b/drivers/edac/skx_base.c @@ -714,8 +714,13 @@ static int __init skx_init(void) skx_set_decode(skx_decode, skx_show_retry_rd_err_log); - if (nvdimm_count && skx_adxl_get() == -ENODEV) - skx_printk(KERN_NOTICE, "Only decoding DDR4 address!\n"); + if (nvdimm_count && skx_adxl_get() != -ENODEV) { + skx_set_decode(NULL, skx_show_retry_rd_err_log); + } else { + if (nvdimm_count) + skx_printk(KERN_NOTICE, "Only decoding DDR4 address!\n"); + skx_set_decode(skx_decode, skx_show_retry_rd_err_log); + } /* Ensure that the OPSTATE is set correctly for POLL or NMI */ opstate_init(); diff --git a/drivers/edac/skx_common.c b/drivers/edac/skx_common.c index 19c17c5198c5..f0f8e98f6efb 100644 --- a/drivers/edac/skx_common.c +++ b/drivers/edac/skx_common.c @@ -27,9 +27,11 @@ static const char * const component_names[] = { [INDEX_MEMCTRL] = "MemoryControllerId", [INDEX_CHANNEL] = "ChannelId", [INDEX_DIMM] = "DimmSlotId", + [INDEX_CS] = "ChipSelect", [INDEX_NM_MEMCTRL] = "NmMemoryControllerId", [INDEX_NM_CHANNEL] = "NmChannelId", [INDEX_NM_DIMM] = "NmDimmSlotId", + [INDEX_NM_CS] = "NmChipSelect", }; static int component_indices[ARRAY_SIZE(component_names)]; @@ -40,7 +42,7 @@ static char *adxl_msg; static unsigned long adxl_nm_bitmap; static char skx_msg[MSG_SIZE]; -static skx_decode_f skx_decode; +static skx_decode_f driver_decode; static skx_show_retry_log_f skx_show_retry_rd_err_log; static u64 skx_tolm, skx_tohm; static LIST_HEAD(dev_edac_list); @@ -139,10 +141,13 @@ static bool skx_adxl_decode(struct decoded_addr *res, bool error_in_1st_level_me (int)adxl_values[component_indices[INDEX_NM_CHANNEL]] : -1; res->dimm = (adxl_nm_bitmap & BIT_NM_DIMM) ? (int)adxl_values[component_indices[INDEX_NM_DIMM]] : -1; + res->cs = (adxl_nm_bitmap & BIT_NM_CS) ? + (int)adxl_values[component_indices[INDEX_NM_CS]] : -1; } else { res->imc = (int)adxl_values[component_indices[INDEX_MEMCTRL]]; res->channel = (int)adxl_values[component_indices[INDEX_CHANNEL]]; res->dimm = (int)adxl_values[component_indices[INDEX_DIMM]]; + res->cs = (int)adxl_values[component_indices[INDEX_CS]]; } if (res->imc > NUM_IMC - 1 || res->imc < 0) { @@ -173,6 +178,8 @@ static bool skx_adxl_decode(struct decoded_addr *res, bool error_in_1st_level_me break; } + res->decoded_by_adxl = true; + return true; } @@ -183,7 +190,7 @@ void skx_set_mem_cfg(bool mem_cfg_2lm) void skx_set_decode(skx_decode_f decode, skx_show_retry_log_f show_retry_log) { - skx_decode = decode; + driver_decode = decode; skx_show_retry_rd_err_log = show_retry_log; } @@ -591,19 +598,19 @@ static void skx_mce_output_error(struct mem_ctl_info *mci, break; } } - if (adxl_component_count) { + if (res->decoded_by_adxl) { len = snprintf(skx_msg, MSG_SIZE, "%s%s err_code:0x%04x:0x%04x %s", overflow ? " OVERFLOW" : "", (uncorrected_error && recoverable) ? " recoverable" : "", mscod, errcode, adxl_msg); } else { len = snprintf(skx_msg, MSG_SIZE, - "%s%s err_code:0x%04x:0x%04x socket:%d imc:%d rank:%d bg:%d ba:%d row:0x%x col:0x%x", + "%s%s err_code:0x%04x:0x%04x ProcessorSocketId:0x%x MemoryControllerId:0x%x PhysicalRankId:0x%x Row:0x%x Column:0x%x Bank:0x%x BankGroup:0x%x", overflow ? " OVERFLOW" : "", (uncorrected_error && recoverable) ? " recoverable" : "", mscod, errcode, res->socket, res->imc, res->rank, - res->bank_group, res->bank_address, res->row, res->column); + res->row, res->column, res->bank_address, res->bank_group); } if (skx_show_retry_rd_err_log) @@ -649,13 +656,14 @@ int skx_mce_check_error(struct notifier_block *nb, unsigned long val, return NOTIFY_DONE; memset(&res, 0, sizeof(res)); + res.mce = mce; res.addr = mce->addr; - if (adxl_component_count) { - if (!skx_adxl_decode(&res, skx_error_in_1st_level_mem(mce))) + /* Try driver decoder first */ + if (!(driver_decode && driver_decode(&res))) { + /* Then try firmware decoder (ACPI DSM methods) */ + if (!(adxl_component_count && skx_adxl_decode(&res, skx_error_in_1st_level_mem(mce)))) return NOTIFY_DONE; - } else if (!skx_decode || !skx_decode(&res)) { - return NOTIFY_DONE; } mci = res.dev->imc[res.imc].mci; diff --git a/drivers/edac/skx_common.h b/drivers/edac/skx_common.h index 03ac067a80b9..0cbadd3d2cd3 100644 --- a/drivers/edac/skx_common.h +++ b/drivers/edac/skx_common.h @@ -10,6 +10,7 @@ #define _SKX_COMM_EDAC_H #include <linux/bits.h> +#include <asm/mce.h> #define MSG_SIZE 1024 @@ -52,6 +53,9 @@ #define IS_DIMM_PRESENT(r) GET_BITFIELD(r, 15, 15) #define IS_NVDIMM_PRESENT(r, i) GET_BITFIELD(r, i, i) +#define MCI_MISC_ECC_MODE(m) (((m) >> 59) & 15) +#define MCI_MISC_ECC_DDRT 8 /* read from DDRT */ + /* * Each cpu socket contains some pci devices that provide global * information, and also some that are local to each of the two @@ -82,6 +86,7 @@ struct skx_dev { struct pci_dev *edev; u32 retry_rd_err_log_s; u32 retry_rd_err_log_d; + u32 retry_rd_err_log_d2; struct skx_dimm { u8 close_pg; u8 bank_xor_enable; @@ -108,18 +113,22 @@ enum { INDEX_MEMCTRL, INDEX_CHANNEL, INDEX_DIMM, + INDEX_CS, INDEX_NM_FIRST, INDEX_NM_MEMCTRL = INDEX_NM_FIRST, INDEX_NM_CHANNEL, INDEX_NM_DIMM, + INDEX_NM_CS, INDEX_MAX }; #define BIT_NM_MEMCTRL BIT_ULL(INDEX_NM_MEMCTRL) #define BIT_NM_CHANNEL BIT_ULL(INDEX_NM_CHANNEL) #define BIT_NM_DIMM BIT_ULL(INDEX_NM_DIMM) +#define BIT_NM_CS BIT_ULL(INDEX_NM_CS) struct decoded_addr { + struct mce *mce; struct skx_dev *dev; u64 addr; int socket; @@ -129,6 +138,7 @@ struct decoded_addr { int sktways; int chanways; int dimm; + int cs; int rank; int channel_rank; u64 rank_address; @@ -136,6 +146,7 @@ struct decoded_addr { int column; int bank_address; int bank_group; + bool decoded_by_adxl; }; struct res_config { @@ -154,7 +165,12 @@ struct res_config { int sad_all_offset; /* Offsets of retry_rd_err_log registers */ u32 *offsets_scrub; + u32 *offsets_scrub_hbm0; + u32 *offsets_scrub_hbm1; u32 *offsets_demand; + u32 *offsets_demand2; + u32 *offsets_demand_hbm0; + u32 *offsets_demand_hbm1; }; typedef int (*get_dimm_config_f)(struct mem_ctl_info *mci, diff --git a/drivers/edac/wq.c b/drivers/edac/wq.c index d021d287eaec..ad3f516627c5 100644 --- a/drivers/edac/wq.c +++ b/drivers/edac/wq.c @@ -37,7 +37,6 @@ int edac_workqueue_setup(void) void edac_workqueue_teardown(void) { - flush_workqueue(wq); destroy_workqueue(wq); wq = NULL; } diff --git a/drivers/firmware/efi/dev-path-parser.c b/drivers/firmware/efi/dev-path-parser.c index eb9c65f97841..f80d87c199c3 100644 --- a/drivers/firmware/efi/dev-path-parser.c +++ b/drivers/firmware/efi/dev-path-parser.c @@ -15,9 +15,11 @@ static long __init parse_acpi_path(const struct efi_dev_path *node, struct device *parent, struct device **child) { - char hid[ACPI_ID_LEN], uid[11]; /* UINT_MAX + null byte */ struct acpi_device *adev; struct device *phys_dev; + char hid[ACPI_ID_LEN]; + u64 uid; + int ret; if (node->header.length != 12) return -EINVAL; @@ -27,12 +29,12 @@ static long __init parse_acpi_path(const struct efi_dev_path *node, 'A' + ((node->acpi.hid >> 5) & 0x1f) - 1, 'A' + ((node->acpi.hid >> 0) & 0x1f) - 1, node->acpi.hid >> 16); - sprintf(uid, "%u", node->acpi.uid); for_each_acpi_dev_match(adev, hid, NULL, -1) { - if (adev->pnp.unique_id && !strcmp(adev->pnp.unique_id, uid)) + ret = acpi_dev_uid_to_integer(adev, &uid); + if (ret == 0 && node->acpi.uid == uid) break; - if (!adev->pnp.unique_id && node->acpi.uid == 0) + if (ret == -ENODATA && node->acpi.uid == 0) break; } if (!adev) diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile index 2c67f71f2375..b43fdb319fd4 100644 --- a/drivers/firmware/efi/libstub/Makefile +++ b/drivers/firmware/efi/libstub/Makefile @@ -46,6 +46,8 @@ KBUILD_CFLAGS := $(filter-out $(RANDSTRUCT_CFLAGS), $(KBUILD_CFLAGS)) # remove SCS flags from all objects in this directory KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_SCS), $(KBUILD_CFLAGS)) +# disable CFI +KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_CFI), $(KBUILD_CFLAGS)) # disable LTO KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_LTO), $(KBUILD_CFLAGS)) diff --git a/drivers/firmware/psci/psci.c b/drivers/firmware/psci/psci.c index cfb448eabdaa..bc6b5a12bf74 100644 --- a/drivers/firmware/psci/psci.c +++ b/drivers/firmware/psci/psci.c @@ -274,7 +274,7 @@ static void set_conduit(enum arm_smccc_conduit conduit) psci_conduit = conduit; } -static int get_set_conduit_method(struct device_node *np) +static int get_set_conduit_method(const struct device_node *np) { const char *method; @@ -334,7 +334,7 @@ static int __init psci_features(u32 psci_func_id) static int psci_suspend_finisher(unsigned long state) { u32 power_state = state; - phys_addr_t pa_cpu_resume = __pa_symbol(function_nocfi(cpu_resume)); + phys_addr_t pa_cpu_resume = __pa_symbol(cpu_resume); return psci_ops.cpu_suspend(power_state, pa_cpu_resume); } @@ -359,7 +359,7 @@ int psci_cpu_suspend_enter(u32 state) static int psci_system_suspend(unsigned long unused) { - phys_addr_t pa_cpu_resume = __pa_symbol(function_nocfi(cpu_resume)); + phys_addr_t pa_cpu_resume = __pa_symbol(cpu_resume); return invoke_psci_fn(PSCI_FN_NATIVE(1_0, SYSTEM_SUSPEND), pa_cpu_resume, 0, 0); @@ -528,7 +528,7 @@ typedef int (*psci_initcall_t)(const struct device_node *); * * Probe based on PSCI PSCI_VERSION function */ -static int __init psci_0_2_init(struct device_node *np) +static int __init psci_0_2_init(const struct device_node *np) { int err; @@ -549,7 +549,7 @@ static int __init psci_0_2_init(struct device_node *np) /* * PSCI < v0.2 get PSCI Function IDs via DT. */ -static int __init psci_0_1_init(struct device_node *np) +static int __init psci_0_1_init(const struct device_node *np) { u32 id; int err; @@ -585,7 +585,7 @@ static int __init psci_0_1_init(struct device_node *np) return 0; } -static int __init psci_1_0_init(struct device_node *np) +static int __init psci_1_0_init(const struct device_node *np) { int err; diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c index aa126ab80f0c..1bb317b8dcce 100644 --- a/drivers/gpio/gpio-mvebu.c +++ b/drivers/gpio/gpio-mvebu.c @@ -790,8 +790,12 @@ static int mvebu_pwm_probe(struct platform_device *pdev, u32 offset; u32 set; - if (of_device_is_compatible(mvchip->chip.of_node, - "marvell,armada-370-gpio")) { + if (mvchip->soc_variant == MVEBU_GPIO_SOC_VARIANT_A8K) { + int ret = of_property_read_u32(dev->of_node, + "marvell,pwm-offset", &offset); + if (ret < 0) + return 0; + } else { /* * There are only two sets of PWM configuration registers for * all the GPIO lines on those SoCs which this driver reserves @@ -801,13 +805,6 @@ static int mvebu_pwm_probe(struct platform_device *pdev, if (!platform_get_resource_byname(pdev, IORESOURCE_MEM, "pwm")) return 0; offset = 0; - } else if (mvchip->soc_variant == MVEBU_GPIO_SOC_VARIANT_A8K) { - int ret = of_property_read_u32(dev->of_node, - "marvell,pwm-offset", &offset); - if (ret < 0) - return 0; - } else { - return 0; } if (IS_ERR(mvchip->clk)) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c index 130060834b4e..48bd660ddb85 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c @@ -1050,6 +1050,10 @@ bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev) { if (adev->flags & AMD_IS_APU) return false; + + if (amdgpu_sriov_vf(adev)) + return false; + return pm_suspend_target_state != PM_SUSPEND_TO_IDLE; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index be7aff2d4a57..25e1f5ed7ead 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -3152,7 +3152,8 @@ static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev) continue; if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON || adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC || - adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) { + adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH || + (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP && amdgpu_sriov_vf(adev))) { r = adev->ip_blocks[i].version->funcs->resume(adev); if (r) { @@ -4064,12 +4065,20 @@ static void amdgpu_device_evict_resources(struct amdgpu_device *adev) int amdgpu_device_suspend(struct drm_device *dev, bool fbcon) { struct amdgpu_device *adev = drm_to_adev(dev); + int r = 0; if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) return 0; adev->in_suspend = true; + if (amdgpu_sriov_vf(adev)) { + amdgpu_virt_fini_data_exchange(adev); + r = amdgpu_virt_request_full_gpu(adev, false); + if (r) + return r; + } + if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3)) DRM_WARN("smart shift update failed\n"); @@ -4093,6 +4102,9 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon) amdgpu_device_ip_suspend_phase2(adev); + if (amdgpu_sriov_vf(adev)) + amdgpu_virt_release_full_gpu(adev, false); + return 0; } @@ -4111,6 +4123,12 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon) struct amdgpu_device *adev = drm_to_adev(dev); int r = 0; + if (amdgpu_sriov_vf(adev)) { + r = amdgpu_virt_request_full_gpu(adev, true); + if (r) + return r; + } + if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) return 0; @@ -4125,6 +4143,13 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon) } r = amdgpu_device_ip_resume(adev); + + /* no matter what r is, always need to properly release full GPU */ + if (amdgpu_sriov_vf(adev)) { + amdgpu_virt_init_data_exchange(adev); + amdgpu_virt_release_full_gpu(adev, true); + } + if (r) { dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index 8adeb7469f1e..d0d99ed607dd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -400,7 +400,6 @@ unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring) /* We are not protected by ring lock when reading the last sequence * but it's ok to report slightly wrong fence count here. */ - amdgpu_fence_process(ring); emitted = 0x100000000ull; emitted -= atomic_read(&ring->fence_drv.last_seq); emitted += READ_ONCE(ring->fence_drv.sync_seq); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index 23a696d38390..de5b936b016d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -304,6 +304,10 @@ struct amdgpu_gfx { uint32_t rlc_srlg_feature_version; uint32_t rlc_srls_fw_version; uint32_t rlc_srls_feature_version; + uint32_t rlcp_ucode_version; + uint32_t rlcp_ucode_feature_version; + uint32_t rlcv_ucode_version; + uint32_t rlcv_ucode_feature_version; uint32_t mec_feature_version; uint32_t mec2_feature_version; bool mec_fw_write_wait; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h index 7b46f6bf4187..ad980f4b66e1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h @@ -222,6 +222,8 @@ struct mes_add_queue_input { uint64_t tba_addr; uint64_t tma_addr; uint32_t is_kfd_process; + uint32_t is_aql_queue; + uint32_t queue_size; }; struct mes_remove_queue_input { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c index 6373bfb47d55..e23f6192c50e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c @@ -272,3 +272,267 @@ void amdgpu_gfx_rlc_fini(struct amdgpu_device *adev) &adev->gfx.rlc.cp_table_gpu_addr, (void **)&adev->gfx.rlc.cp_table_ptr); } + +static int amdgpu_gfx_rlc_init_microcode_v2_0(struct amdgpu_device *adev) +{ + const struct common_firmware_header *common_hdr; + const struct rlc_firmware_header_v2_0 *rlc_hdr; + struct amdgpu_firmware_info *info; + unsigned int *tmp; + unsigned int i; + + rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; + + adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version); + adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version); + adev->gfx.rlc.save_and_restore_offset = + le32_to_cpu(rlc_hdr->save_and_restore_offset); + adev->gfx.rlc.clear_state_descriptor_offset = + le32_to_cpu(rlc_hdr->clear_state_descriptor_offset); + adev->gfx.rlc.avail_scratch_ram_locations = + le32_to_cpu(rlc_hdr->avail_scratch_ram_locations); + adev->gfx.rlc.reg_restore_list_size = + le32_to_cpu(rlc_hdr->reg_restore_list_size); + adev->gfx.rlc.reg_list_format_start = + le32_to_cpu(rlc_hdr->reg_list_format_start); + adev->gfx.rlc.reg_list_format_separate_start = + le32_to_cpu(rlc_hdr->reg_list_format_separate_start); + adev->gfx.rlc.starting_offsets_start = + le32_to_cpu(rlc_hdr->starting_offsets_start); + adev->gfx.rlc.reg_list_format_size_bytes = + le32_to_cpu(rlc_hdr->reg_list_format_size_bytes); + adev->gfx.rlc.reg_list_size_bytes = + le32_to_cpu(rlc_hdr->reg_list_size_bytes); + adev->gfx.rlc.register_list_format = + kmalloc(adev->gfx.rlc.reg_list_format_size_bytes + + adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL); + if (!adev->gfx.rlc.register_list_format) { + dev_err(adev->dev, "failed to allocate memory for rlc register_list_format\n"); + return -ENOMEM; + } + + tmp = (unsigned int *)((uintptr_t)rlc_hdr + + le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes)); + for (i = 0 ; i < (rlc_hdr->reg_list_format_size_bytes >> 2); i++) + adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]); + + adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i; + + tmp = (unsigned int *)((uintptr_t)rlc_hdr + + le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes)); + for (i = 0 ; i < (rlc_hdr->reg_list_size_bytes >> 2); i++) + adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]); + + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { + info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G]; + info->ucode_id = AMDGPU_UCODE_ID_RLC_G; + info->fw = adev->gfx.rlc_fw; + if (info->fw) { + common_hdr = (const struct common_firmware_header *)info->fw->data; + adev->firmware.fw_size += + ALIGN(le32_to_cpu(common_hdr->ucode_size_bytes), PAGE_SIZE); + } + } + + return 0; +} + +static void amdgpu_gfx_rlc_init_microcode_v2_1(struct amdgpu_device *adev) +{ + const struct rlc_firmware_header_v2_1 *rlc_hdr; + struct amdgpu_firmware_info *info; + + rlc_hdr = (const struct rlc_firmware_header_v2_1 *)adev->gfx.rlc_fw->data; + adev->gfx.rlc_srlc_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_ucode_ver); + adev->gfx.rlc_srlc_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_feature_ver); + adev->gfx.rlc.save_restore_list_cntl_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_cntl_size_bytes); + adev->gfx.rlc.save_restore_list_cntl = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_cntl_offset_bytes); + adev->gfx.rlc_srlg_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_ucode_ver); + adev->gfx.rlc_srlg_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_feature_ver); + adev->gfx.rlc.save_restore_list_gpm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_gpm_size_bytes); + adev->gfx.rlc.save_restore_list_gpm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_gpm_offset_bytes); + adev->gfx.rlc_srls_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_ucode_ver); + adev->gfx.rlc_srls_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_feature_ver); + adev->gfx.rlc.save_restore_list_srm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_srm_size_bytes); + adev->gfx.rlc.save_restore_list_srm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_srm_offset_bytes); + adev->gfx.rlc.reg_list_format_direct_reg_list_length = + le32_to_cpu(rlc_hdr->reg_list_format_direct_reg_list_length); + + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { + if (adev->gfx.rlc.save_restore_list_gpm_size_bytes) { + info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM]; + info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM; + info->fw = adev->gfx.rlc_fw; + adev->firmware.fw_size += + ALIGN(adev->gfx.rlc.save_restore_list_gpm_size_bytes, PAGE_SIZE); + } + + if (adev->gfx.rlc.save_restore_list_srm_size_bytes) { + info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM]; + info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM; + info->fw = adev->gfx.rlc_fw; + adev->firmware.fw_size += + ALIGN(adev->gfx.rlc.save_restore_list_srm_size_bytes, PAGE_SIZE); + } + } +} + +static void amdgpu_gfx_rlc_init_microcode_v2_2(struct amdgpu_device *adev) +{ + const struct rlc_firmware_header_v2_2 *rlc_hdr; + struct amdgpu_firmware_info *info; + + rlc_hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data; + adev->gfx.rlc.rlc_iram_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlc_iram_ucode_size_bytes); + adev->gfx.rlc.rlc_iram_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlc_iram_ucode_offset_bytes); + adev->gfx.rlc.rlc_dram_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlc_dram_ucode_size_bytes); + adev->gfx.rlc.rlc_dram_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlc_dram_ucode_offset_bytes); + + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { + if (adev->gfx.rlc.rlc_iram_ucode_size_bytes) { + info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_IRAM]; + info->ucode_id = AMDGPU_UCODE_ID_RLC_IRAM; + info->fw = adev->gfx.rlc_fw; + adev->firmware.fw_size += + ALIGN(adev->gfx.rlc.rlc_iram_ucode_size_bytes, PAGE_SIZE); + } + + if (adev->gfx.rlc.rlc_dram_ucode_size_bytes) { + info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_DRAM]; + info->ucode_id = AMDGPU_UCODE_ID_RLC_DRAM; + info->fw = adev->gfx.rlc_fw; + adev->firmware.fw_size += + ALIGN(adev->gfx.rlc.rlc_dram_ucode_size_bytes, PAGE_SIZE); + } + } +} + +static void amdgpu_gfx_rlc_init_microcode_v2_3(struct amdgpu_device *adev) +{ + const struct rlc_firmware_header_v2_3 *rlc_hdr; + struct amdgpu_firmware_info *info; + + rlc_hdr = (const struct rlc_firmware_header_v2_3 *)adev->gfx.rlc_fw->data; + adev->gfx.rlcp_ucode_version = le32_to_cpu(rlc_hdr->rlcp_ucode_version); + adev->gfx.rlcp_ucode_feature_version = le32_to_cpu(rlc_hdr->rlcp_ucode_feature_version); + adev->gfx.rlc.rlcp_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlcp_ucode_size_bytes); + adev->gfx.rlc.rlcp_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlcp_ucode_offset_bytes); + + adev->gfx.rlcv_ucode_version = le32_to_cpu(rlc_hdr->rlcv_ucode_version); + adev->gfx.rlcv_ucode_feature_version = le32_to_cpu(rlc_hdr->rlcv_ucode_feature_version); + adev->gfx.rlc.rlcv_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlcv_ucode_size_bytes); + adev->gfx.rlc.rlcv_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlcv_ucode_offset_bytes); + + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { + if (adev->gfx.rlc.rlcp_ucode_size_bytes) { + info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_P]; + info->ucode_id = AMDGPU_UCODE_ID_RLC_P; + info->fw = adev->gfx.rlc_fw; + adev->firmware.fw_size += + ALIGN(adev->gfx.rlc.rlcp_ucode_size_bytes, PAGE_SIZE); + } + + if (adev->gfx.rlc.rlcv_ucode_size_bytes) { + info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_V]; + info->ucode_id = AMDGPU_UCODE_ID_RLC_V; + info->fw = adev->gfx.rlc_fw; + adev->firmware.fw_size += + ALIGN(adev->gfx.rlc.rlcv_ucode_size_bytes, PAGE_SIZE); + } + } +} + +static void amdgpu_gfx_rlc_init_microcode_v2_4(struct amdgpu_device *adev) +{ + const struct rlc_firmware_header_v2_4 *rlc_hdr; + struct amdgpu_firmware_info *info; + + rlc_hdr = (const struct rlc_firmware_header_v2_4 *)adev->gfx.rlc_fw->data; + adev->gfx.rlc.global_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->global_tap_delays_ucode_size_bytes); + adev->gfx.rlc.global_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->global_tap_delays_ucode_offset_bytes); + adev->gfx.rlc.se0_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->se0_tap_delays_ucode_size_bytes); + adev->gfx.rlc.se0_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->se0_tap_delays_ucode_offset_bytes); + adev->gfx.rlc.se1_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->se1_tap_delays_ucode_size_bytes); + adev->gfx.rlc.se1_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->se1_tap_delays_ucode_offset_bytes); + adev->gfx.rlc.se2_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->se2_tap_delays_ucode_size_bytes); + adev->gfx.rlc.se2_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->se2_tap_delays_ucode_offset_bytes); + adev->gfx.rlc.se3_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->se3_tap_delays_ucode_size_bytes); + adev->gfx.rlc.se3_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->se3_tap_delays_ucode_offset_bytes); + + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { + if (adev->gfx.rlc.global_tap_delays_ucode_size_bytes) { + info = &adev->firmware.ucode[AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS]; + info->ucode_id = AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS; + info->fw = adev->gfx.rlc_fw; + adev->firmware.fw_size += + ALIGN(adev->gfx.rlc.global_tap_delays_ucode_size_bytes, PAGE_SIZE); + } + + if (adev->gfx.rlc.se0_tap_delays_ucode_size_bytes) { + info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE0_TAP_DELAYS]; + info->ucode_id = AMDGPU_UCODE_ID_SE0_TAP_DELAYS; + info->fw = adev->gfx.rlc_fw; + adev->firmware.fw_size += + ALIGN(adev->gfx.rlc.se0_tap_delays_ucode_size_bytes, PAGE_SIZE); + } + + if (adev->gfx.rlc.se1_tap_delays_ucode_size_bytes) { + info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE1_TAP_DELAYS]; + info->ucode_id = AMDGPU_UCODE_ID_SE1_TAP_DELAYS; + info->fw = adev->gfx.rlc_fw; + adev->firmware.fw_size += + ALIGN(adev->gfx.rlc.se1_tap_delays_ucode_size_bytes, PAGE_SIZE); + } + + if (adev->gfx.rlc.se2_tap_delays_ucode_size_bytes) { + info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE2_TAP_DELAYS]; + info->ucode_id = AMDGPU_UCODE_ID_SE2_TAP_DELAYS; + info->fw = adev->gfx.rlc_fw; + adev->firmware.fw_size += + ALIGN(adev->gfx.rlc.se2_tap_delays_ucode_size_bytes, PAGE_SIZE); + } + + if (adev->gfx.rlc.se3_tap_delays_ucode_size_bytes) { + info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE3_TAP_DELAYS]; + info->ucode_id = AMDGPU_UCODE_ID_SE3_TAP_DELAYS; + info->fw = adev->gfx.rlc_fw; + adev->firmware.fw_size += + ALIGN(adev->gfx.rlc.se3_tap_delays_ucode_size_bytes, PAGE_SIZE); + } + } +} + +int amdgpu_gfx_rlc_init_microcode(struct amdgpu_device *adev, + uint16_t version_major, + uint16_t version_minor) +{ + int err; + + if (version_major < 2) { + /* only support rlc_hdr v2.x and onwards */ + dev_err(adev->dev, "unsupported rlc fw hdr\n"); + return -EINVAL; + } + + /* is_rlc_v2_1 is still used in APU code path */ + if (version_major == 2 && version_minor == 1) + adev->gfx.rlc.is_rlc_v2_1 = true; + + if (version_minor >= 0) { + err = amdgpu_gfx_rlc_init_microcode_v2_0(adev); + if (err) { + dev_err(adev->dev, "fail to init rlc v2_0 microcode\n"); + return err; + } + } + if (version_minor >= 1) + amdgpu_gfx_rlc_init_microcode_v2_1(adev); + if (version_minor >= 2) + amdgpu_gfx_rlc_init_microcode_v2_2(adev); + if (version_minor == 3) + amdgpu_gfx_rlc_init_microcode_v2_3(adev); + if (version_minor == 4) + amdgpu_gfx_rlc_init_microcode_v2_4(adev); + + return 0; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h index 03ac36b2c2cf..23f060db9255 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h @@ -267,5 +267,7 @@ int amdgpu_gfx_rlc_init_csb(struct amdgpu_device *adev); int amdgpu_gfx_rlc_init_cpt(struct amdgpu_device *adev); void amdgpu_gfx_rlc_setup_cp_table(struct amdgpu_device *adev); void amdgpu_gfx_rlc_fini(struct amdgpu_device *adev); - +int amdgpu_gfx_rlc_init_microcode(struct amdgpu_device *adev, + uint16_t version_major, + uint16_t version_minor); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h index 96b6cf4c4d54..59edf32f775e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h @@ -260,8 +260,12 @@ struct rlc_firmware_header_v2_2 { /* version_major=2, version_minor=3 */ struct rlc_firmware_header_v2_3 { struct rlc_firmware_header_v2_2 v2_2; + uint32_t rlcp_ucode_version; + uint32_t rlcp_ucode_feature_version; uint32_t rlcp_ucode_size_bytes; uint32_t rlcp_ucode_offset_bytes; + uint32_t rlcv_ucode_version; + uint32_t rlcv_ucode_feature_version; uint32_t rlcv_ucode_size_bytes; uint32_t rlcv_ucode_offset_bytes; }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index f36e4f08db6d..0b52af415b28 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -191,7 +191,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) fw_name = FIRMWARE_VCN4_0_2; if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) - adev->vcn.indirect_sram = false; + adev->vcn.indirect_sram = true; break; case IP_VERSION(4, 0, 4): fw_name = FIRMWARE_VCN4_0_4; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index f6b1bb40e503..daf8ba8235cd 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -474,49 +474,6 @@ static void gfx_v11_0_free_microcode(struct amdgpu_device *adev) kfree(adev->gfx.rlc.register_list_format); } -static void gfx_v11_0_init_rlc_ext_microcode(struct amdgpu_device *adev) -{ - const struct rlc_firmware_header_v2_1 *rlc_hdr; - - rlc_hdr = (const struct rlc_firmware_header_v2_1 *)adev->gfx.rlc_fw->data; - adev->gfx.rlc_srlc_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_ucode_ver); - adev->gfx.rlc_srlc_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_feature_ver); - adev->gfx.rlc.save_restore_list_cntl_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_cntl_size_bytes); - adev->gfx.rlc.save_restore_list_cntl = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_cntl_offset_bytes); - adev->gfx.rlc_srlg_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_ucode_ver); - adev->gfx.rlc_srlg_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_feature_ver); - adev->gfx.rlc.save_restore_list_gpm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_gpm_size_bytes); - adev->gfx.rlc.save_restore_list_gpm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_gpm_offset_bytes); - adev->gfx.rlc_srls_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_ucode_ver); - adev->gfx.rlc_srls_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_feature_ver); - adev->gfx.rlc.save_restore_list_srm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_srm_size_bytes); - adev->gfx.rlc.save_restore_list_srm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_srm_offset_bytes); - adev->gfx.rlc.reg_list_format_direct_reg_list_length = - le32_to_cpu(rlc_hdr->reg_list_format_direct_reg_list_length); -} - -static void gfx_v11_0_init_rlc_iram_dram_microcode(struct amdgpu_device *adev) -{ - const struct rlc_firmware_header_v2_2 *rlc_hdr; - - rlc_hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data; - adev->gfx.rlc.rlc_iram_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlc_iram_ucode_size_bytes); - adev->gfx.rlc.rlc_iram_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlc_iram_ucode_offset_bytes); - adev->gfx.rlc.rlc_dram_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlc_dram_ucode_size_bytes); - adev->gfx.rlc.rlc_dram_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlc_dram_ucode_offset_bytes); -} - -static void gfx_v11_0_init_rlcp_rlcv_microcode(struct amdgpu_device *adev) -{ - const struct rlc_firmware_header_v2_3 *rlc_hdr; - - rlc_hdr = (const struct rlc_firmware_header_v2_3 *)adev->gfx.rlc_fw->data; - adev->gfx.rlc.rlcp_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlcp_ucode_size_bytes); - adev->gfx.rlc.rlcp_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlcp_ucode_offset_bytes); - adev->gfx.rlc.rlcv_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlcv_ucode_size_bytes); - adev->gfx.rlc.rlcv_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlcv_ucode_offset_bytes); -} - static int gfx_v11_0_init_microcode(struct amdgpu_device *adev) { char fw_name[40]; @@ -527,8 +484,6 @@ static int gfx_v11_0_init_microcode(struct amdgpu_device *adev) const struct gfx_firmware_header_v1_0 *cp_hdr; const struct gfx_firmware_header_v2_0 *cp_hdr_v2_0; const struct rlc_firmware_header_v2_0 *rlc_hdr; - unsigned int *tmp = NULL; - unsigned int i = 0; uint16_t version_major; uint16_t version_minor; @@ -583,58 +538,14 @@ static int gfx_v11_0_init_microcode(struct amdgpu_device *adev) if (err) goto out; err = amdgpu_ucode_validate(adev->gfx.rlc_fw); + if (err) + goto out; rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; version_major = le16_to_cpu(rlc_hdr->header.header_version_major); version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor); - - adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version); - adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version); - adev->gfx.rlc.save_and_restore_offset = - le32_to_cpu(rlc_hdr->save_and_restore_offset); - adev->gfx.rlc.clear_state_descriptor_offset = - le32_to_cpu(rlc_hdr->clear_state_descriptor_offset); - adev->gfx.rlc.avail_scratch_ram_locations = - le32_to_cpu(rlc_hdr->avail_scratch_ram_locations); - adev->gfx.rlc.reg_restore_list_size = - le32_to_cpu(rlc_hdr->reg_restore_list_size); - adev->gfx.rlc.reg_list_format_start = - le32_to_cpu(rlc_hdr->reg_list_format_start); - adev->gfx.rlc.reg_list_format_separate_start = - le32_to_cpu(rlc_hdr->reg_list_format_separate_start); - adev->gfx.rlc.starting_offsets_start = - le32_to_cpu(rlc_hdr->starting_offsets_start); - adev->gfx.rlc.reg_list_format_size_bytes = - le32_to_cpu(rlc_hdr->reg_list_format_size_bytes); - adev->gfx.rlc.reg_list_size_bytes = - le32_to_cpu(rlc_hdr->reg_list_size_bytes); - adev->gfx.rlc.register_list_format = - kmalloc(adev->gfx.rlc.reg_list_format_size_bytes + - adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL); - if (!adev->gfx.rlc.register_list_format) { - err = -ENOMEM; + err = amdgpu_gfx_rlc_init_microcode(adev, version_major, version_minor); + if (err) goto out; - } - - tmp = (unsigned int *)((uintptr_t)rlc_hdr + - le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes)); - for (i = 0 ; i < (rlc_hdr->reg_list_format_size_bytes >> 2); i++) - adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]); - - adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i; - - tmp = (unsigned int *)((uintptr_t)rlc_hdr + - le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes)); - for (i = 0 ; i < (rlc_hdr->reg_list_size_bytes >> 2); i++) - adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]); - - if (version_major == 2) { - if (version_minor >= 1) - gfx_v11_0_init_rlc_ext_microcode(adev); - if (version_minor >= 2) - gfx_v11_0_init_rlc_iram_dram_microcode(adev); - if (version_minor == 3) - gfx_v11_0_init_rlcp_rlcv_microcode(adev); - } } snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", ucode_prefix); @@ -769,60 +680,6 @@ static int gfx_v11_0_init_microcode(struct amdgpu_device *adev) adev->firmware.fw_size += ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE); } - - info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G]; - info->ucode_id = AMDGPU_UCODE_ID_RLC_G; - info->fw = adev->gfx.rlc_fw; - if (info->fw) { - header = (const struct common_firmware_header *)info->fw->data; - adev->firmware.fw_size += - ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); - } - if (adev->gfx.rlc.save_restore_list_gpm_size_bytes && - adev->gfx.rlc.save_restore_list_srm_size_bytes) { - info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM]; - info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM; - info->fw = adev->gfx.rlc_fw; - adev->firmware.fw_size += - ALIGN(adev->gfx.rlc.save_restore_list_gpm_size_bytes, PAGE_SIZE); - - info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM]; - info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM; - info->fw = adev->gfx.rlc_fw; - adev->firmware.fw_size += - ALIGN(adev->gfx.rlc.save_restore_list_srm_size_bytes, PAGE_SIZE); - } - - if (adev->gfx.rlc.rlc_iram_ucode_size_bytes && - adev->gfx.rlc.rlc_dram_ucode_size_bytes) { - info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_IRAM]; - info->ucode_id = AMDGPU_UCODE_ID_RLC_IRAM; - info->fw = adev->gfx.rlc_fw; - adev->firmware.fw_size += - ALIGN(adev->gfx.rlc.rlc_iram_ucode_size_bytes, PAGE_SIZE); - - info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_DRAM]; - info->ucode_id = AMDGPU_UCODE_ID_RLC_DRAM; - info->fw = adev->gfx.rlc_fw; - adev->firmware.fw_size += - ALIGN(adev->gfx.rlc.rlc_dram_ucode_size_bytes, PAGE_SIZE); - } - - if (adev->gfx.rlc.rlcp_ucode_size_bytes) { - info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_P]; - info->ucode_id = AMDGPU_UCODE_ID_RLC_P; - info->fw = adev->gfx.rlc_fw; - adev->firmware.fw_size += - ALIGN(adev->gfx.rlc.rlcp_ucode_size_bytes, PAGE_SIZE); - } - - if (adev->gfx.rlc.rlcv_ucode_size_bytes) { - info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_V]; - info->ucode_id = AMDGPU_UCODE_ID_RLC_V; - info->fw = adev->gfx.rlc_fw; - adev->firmware.fw_size += - ALIGN(adev->gfx.rlc.rlcv_ucode_size_bytes, PAGE_SIZE); - } } out: @@ -5260,6 +5117,8 @@ static void gfx_v11_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid) { u32 reg, data; + amdgpu_gfx_off_ctrl(adev, false); + reg = SOC15_REG_OFFSET(GC, 0, regRLC_SPM_MC_CNTL); if (amdgpu_sriov_is_pp_one_vf(adev)) data = RREG32_NO_KIQ(reg); @@ -5273,6 +5132,8 @@ static void gfx_v11_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid) WREG32_SOC15_NO_KIQ(GC, 0, regRLC_SPM_MC_CNTL, data); else WREG32_SOC15(GC, 0, regRLC_SPM_MC_CNTL, data); + + amdgpu_gfx_off_ctrl(adev, true); } static const struct amdgpu_rlc_funcs gfx_v11_0_rlc_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index fc9c1043244c..037af8352677 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -5597,7 +5597,7 @@ static void gfx_v9_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigne BUG_ON(offset > ring->buf_mask); BUG_ON(ring->ring[offset] != 0x55aa55aa); - cur = (ring->wptr & ring->buf_mask) - 1; + cur = (ring->wptr - 1) & ring->buf_mask; if (likely(cur > offset)) ring->ring[offset] = cur - offset; else diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c index cc3fdbbcd314..f92744b8d79d 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c @@ -185,6 +185,10 @@ static int mes_v11_0_add_hw_queue(struct amdgpu_mes *mes, mes_add_queue_pkt.is_kfd_process = input->is_kfd_process; mes_add_queue_pkt.trap_en = 1; + /* For KFD, gds_size is re-used for queue size (needed in MES for AQL queues) */ + mes_add_queue_pkt.is_aql_queue = input->is_aql_queue; + mes_add_queue_pkt.gds_size = input->queue_size; + return mes_v11_0_submit_pkt_and_poll_completion(mes, &mes_add_queue_pkt, sizeof(mes_add_queue_pkt), offsetof(union MESAPI__ADD_QUEUE, api_status)); diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c index 2e50db3b761e..6e564b549b9f 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc21.c +++ b/drivers/gpu/drm/amd/amdgpu/soc21.c @@ -625,6 +625,7 @@ static int soc21_common_early_init(void *handle) AMD_CG_SUPPORT_JPEG_MGCG; adev->pg_flags = AMD_PG_SUPPORT_GFX_PG | + AMD_PG_SUPPORT_VCN_DPG | AMD_PG_SUPPORT_JPEG; adev->external_rev_id = adev->rev_id + 0x1; break; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index e83725a28106..007a3db69df1 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -205,6 +205,8 @@ static int add_queue_mes(struct device_queue_manager *dqm, struct queue *q, } queue_input.is_kfd_process = 1; + queue_input.is_aql_queue = (q->properties.format == KFD_QUEUE_FORMAT_AQL); + queue_input.queue_size = q->properties.queue_size >> 2; queue_input.paging = false; queue_input.tba_addr = qpd->tba_addr; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c index a6fcbeeb7428..0d53f6067422 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c @@ -350,11 +350,11 @@ static void event_interrupt_wq_v11(struct kfd_dev *dev, print_sq_intr_info_inst(context_id0, context_id1); sq_int_priv = REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, PRIV); - if (sq_int_priv /*&& (kfd_set_dbg_ev_from_interrupt(dev, pasid, + /*if (sq_int_priv && (kfd_set_dbg_ev_from_interrupt(dev, pasid, KFD_CTXID0_DOORBELL_ID(context_id0), KFD_CTXID0_TRAP_CODE(context_id0), - NULL, 0))*/) - return; + NULL, 0))) + return;*/ break; case SQ_INTERRUPT_WORD_ENCODING_ERROR: print_sq_intr_info_error(context_id0, context_id1); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c index b8e14c2cc295..3ae350220d42 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c @@ -126,6 +126,10 @@ static void init_mqd(struct mqd_manager *mm, void **mqd, m->compute_static_thread_mgmt_se1 = 0xFFFFFFFF; m->compute_static_thread_mgmt_se2 = 0xFFFFFFFF; m->compute_static_thread_mgmt_se3 = 0xFFFFFFFF; + m->compute_static_thread_mgmt_se4 = 0xFFFFFFFF; + m->compute_static_thread_mgmt_se5 = 0xFFFFFFFF; + m->compute_static_thread_mgmt_se6 = 0xFFFFFFFF; + m->compute_static_thread_mgmt_se7 = 0xFFFFFFFF; m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK | 0x55 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c index c8da18e45b0e..8ca10ab3dfc1 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c @@ -170,7 +170,13 @@ bool amdgpu_dm_psr_enable(struct dc_stream_state *stream) &stream, 1, ¶ms); - power_opt |= psr_power_opt_z10_static_screen; + /* + * Only enable static-screen optimizations for PSR1. For PSR SU, this + * causes vstartup interrupt issues, used by amdgpu_dm to send vblank + * events. + */ + if (link->psr_settings.psr_version < DC_PSR_VERSION_SU_1) + power_opt |= psr_power_opt_z10_static_screen; return dc_link_set_psr_allow_active(link, &psr_enable, false, false, &power_opt); } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c index 8559dcd80af0..4a15aa7a375f 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c @@ -130,11 +130,20 @@ static void dcn314_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state if (pipe->top_pipe || pipe->prev_odm_pipe) continue; if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal))) { + struct stream_encoder *stream_enc = pipe->stream_res.stream_enc; + if (disable) { + if (stream_enc && stream_enc->funcs->disable_fifo) + pipe->stream_res.stream_enc->funcs->disable_fifo(stream_enc); + pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg); reset_sync_context_for_pipe(dc, context, i); - } else + } else { pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg); + + if (stream_enc && stream_enc->funcs->enable_fifo) + pipe->stream_res.stream_enc->funcs->enable_fifo(stream_enc); + } } } } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c index c6785969eb1a..f0f3f66629cc 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c @@ -156,12 +156,14 @@ void dcn32_init_clocks(struct clk_mgr *clk_mgr_base) { struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); unsigned int num_levels; + unsigned int num_dcfclk_levels, num_dtbclk_levels, num_dispclk_levels; memset(&(clk_mgr_base->clks), 0, sizeof(struct dc_clocks)); clk_mgr_base->clks.p_state_change_support = true; clk_mgr_base->clks.prev_p_state_change_support = true; clk_mgr_base->clks.fclk_prev_p_state_change_support = true; clk_mgr->smu_present = false; + clk_mgr->dpm_present = false; if (!clk_mgr_base->bw_params) return; @@ -179,6 +181,7 @@ void dcn32_init_clocks(struct clk_mgr *clk_mgr_base) dcn32_init_single_clock(clk_mgr, PPCLK_DCFCLK, &clk_mgr_base->bw_params->clk_table.entries[0].dcfclk_mhz, &num_levels); + num_dcfclk_levels = num_levels; /* SOCCLK */ dcn32_init_single_clock(clk_mgr, PPCLK_SOCCLK, @@ -189,11 +192,16 @@ void dcn32_init_clocks(struct clk_mgr *clk_mgr_base) dcn32_init_single_clock(clk_mgr, PPCLK_DTBCLK, &clk_mgr_base->bw_params->clk_table.entries[0].dtbclk_mhz, &num_levels); + num_dtbclk_levels = num_levels; /* DISPCLK */ dcn32_init_single_clock(clk_mgr, PPCLK_DISPCLK, &clk_mgr_base->bw_params->clk_table.entries[0].dispclk_mhz, &num_levels); + num_dispclk_levels = num_levels; + + if (num_dcfclk_levels && num_dtbclk_levels && num_dispclk_levels) + clk_mgr->dpm_present = true; if (clk_mgr_base->ctx->dc->debug.min_disp_clk_khz) { unsigned int i; @@ -658,6 +666,12 @@ static void dcn32_get_memclk_states_from_smu(struct clk_mgr *clk_mgr_base) &num_levels); clk_mgr_base->bw_params->clk_table.num_entries = num_levels ? num_levels : 1; + if (clk_mgr->dpm_present && !num_levels) + clk_mgr->dpm_present = false; + + if (!clk_mgr->dpm_present) + dcn32_patch_dpm_table(clk_mgr_base->bw_params); + DC_FP_START(); /* Refresh bounding box */ clk_mgr_base->ctx->dc->res_pool->funcs->update_bw_bounding_box( diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index aea49334021c..38a67051d470 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -2164,8 +2164,7 @@ static void dce110_setup_audio_dto( continue; if (pipe_ctx->stream->signal != SIGNAL_TYPE_HDMI_TYPE_A) continue; - if (pipe_ctx->stream_res.audio != NULL && - pipe_ctx->stream_res.audio->enabled == false) { + if (pipe_ctx->stream_res.audio != NULL) { struct audio_output audio_output; build_audio_output(context, pipe_ctx, &audio_output); @@ -2205,8 +2204,7 @@ static void dce110_setup_audio_dto( if (!dc_is_dp_signal(pipe_ctx->stream->signal)) continue; - if (pipe_ctx->stream_res.audio != NULL && - pipe_ctx->stream_res.audio->enabled == false) { + if (pipe_ctx->stream_res.audio != NULL) { struct audio_output audio_output; build_audio_output(context, pipe_ctx, &audio_output); diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c index 232cc15979dd..fb729674953b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c @@ -45,6 +45,48 @@ #define DC_LOGGER \ dccg->ctx->logger +static void dccg314_get_pixel_rate_div( + struct dccg *dccg, + uint32_t otg_inst, + enum pixel_rate_div *k1, + enum pixel_rate_div *k2) +{ + struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + uint32_t val_k1 = PIXEL_RATE_DIV_NA, val_k2 = PIXEL_RATE_DIV_NA; + + *k1 = PIXEL_RATE_DIV_NA; + *k2 = PIXEL_RATE_DIV_NA; + + switch (otg_inst) { + case 0: + REG_GET_2(OTG_PIXEL_RATE_DIV, + OTG0_PIXEL_RATE_DIVK1, &val_k1, + OTG0_PIXEL_RATE_DIVK2, &val_k2); + break; + case 1: + REG_GET_2(OTG_PIXEL_RATE_DIV, + OTG1_PIXEL_RATE_DIVK1, &val_k1, + OTG1_PIXEL_RATE_DIVK2, &val_k2); + break; + case 2: + REG_GET_2(OTG_PIXEL_RATE_DIV, + OTG2_PIXEL_RATE_DIVK1, &val_k1, + OTG2_PIXEL_RATE_DIVK2, &val_k2); + break; + case 3: + REG_GET_2(OTG_PIXEL_RATE_DIV, + OTG3_PIXEL_RATE_DIVK1, &val_k1, + OTG3_PIXEL_RATE_DIVK2, &val_k2); + break; + default: + BREAK_TO_DEBUGGER(); + return; + } + + *k1 = (enum pixel_rate_div)val_k1; + *k2 = (enum pixel_rate_div)val_k2; +} + static void dccg314_set_pixel_rate_div( struct dccg *dccg, uint32_t otg_inst, @@ -52,6 +94,11 @@ static void dccg314_set_pixel_rate_div( enum pixel_rate_div k2) { struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + enum pixel_rate_div cur_k1 = PIXEL_RATE_DIV_NA, cur_k2 = PIXEL_RATE_DIV_NA; + + dccg314_get_pixel_rate_div(dccg, otg_inst, &cur_k1, &cur_k2); + if (k1 == PIXEL_RATE_DIV_NA || k2 == PIXEL_RATE_DIV_NA || (k1 == cur_k1 && k2 == cur_k2)) + return; switch (otg_inst) { case 0: diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c index 06d8638db696..8c0ab013764e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c @@ -56,7 +56,8 @@ static void enc314_enable_fifo(struct stream_encoder *enc) /* TODO: Confirm if we need to wait for DIG_SYMCLK_FE_ON */ REG_WAIT(DIG_FE_CNTL, DIG_SYMCLK_FE_ON, 1, 10, 5000); - REG_UPDATE_2(DIG_FIFO_CTRL0, DIG_FIFO_RESET, 1, DIG_FIFO_READ_START_LEVEL, 0x7); + REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_READ_START_LEVEL, 0x7); + REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_RESET, 1); REG_WAIT(DIG_FIFO_CTRL0, DIG_FIFO_RESET_DONE, 1, 10, 5000); REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_RESET, 0); REG_WAIT(DIG_FIFO_CTRL0, DIG_FIFO_RESET_DONE, 0, 10, 5000); @@ -261,6 +262,16 @@ static bool is_two_pixels_per_containter(const struct dc_crtc_timing *timing) return two_pix; } +void enc314_stream_encoder_dp_blank( + struct dc_link *link, + struct stream_encoder *enc) +{ + /* New to DCN314 - disable the FIFO before VID stream disable. */ + enc314_disable_fifo(enc); + + enc1_stream_encoder_dp_blank(link, enc); +} + static void enc314_stream_encoder_dp_unblank( struct dc_link *link, struct stream_encoder *enc, @@ -316,15 +327,11 @@ static void enc314_stream_encoder_dp_unblank( /* switch DP encoder to CRTC data, but reset it the fifo first. It may happen * that it overflows during mode transition, and sometimes doesn't recover. */ - REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_READ_START_LEVEL, 0x7); REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, 1); udelay(10); REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, 0); - /* DIG Resync FIFO now needs to be explicitly enabled. */ - enc314_enable_fifo(enc); - /* wait 100us for DIG/DP logic to prime * (i.e. a few video lines) */ @@ -340,6 +347,12 @@ static void enc314_stream_encoder_dp_unblank( REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, true); + /* + * DIG Resync FIFO now needs to be explicitly enabled. + * This should come after DP_VID_STREAM_ENABLE per HW docs. + */ + enc314_enable_fifo(enc); + dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_DP_VID_STREAM); } @@ -408,7 +421,7 @@ static const struct stream_encoder_funcs dcn314_str_enc_funcs = { .stop_dp_info_packets = enc1_stream_encoder_stop_dp_info_packets, .dp_blank = - enc1_stream_encoder_dp_blank, + enc314_stream_encoder_dp_blank, .dp_unblank = enc314_stream_encoder_dp_unblank, .audio_mute_control = enc3_audio_mute_control, diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c index 0d5e8a441512..6640d0ac4304 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c @@ -42,6 +42,48 @@ #define DC_LOGGER \ dccg->ctx->logger +static void dccg32_get_pixel_rate_div( + struct dccg *dccg, + uint32_t otg_inst, + enum pixel_rate_div *k1, + enum pixel_rate_div *k2) +{ + struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + uint32_t val_k1 = PIXEL_RATE_DIV_NA, val_k2 = PIXEL_RATE_DIV_NA; + + *k1 = PIXEL_RATE_DIV_NA; + *k2 = PIXEL_RATE_DIV_NA; + + switch (otg_inst) { + case 0: + REG_GET_2(OTG_PIXEL_RATE_DIV, + OTG0_PIXEL_RATE_DIVK1, &val_k1, + OTG0_PIXEL_RATE_DIVK2, &val_k2); + break; + case 1: + REG_GET_2(OTG_PIXEL_RATE_DIV, + OTG1_PIXEL_RATE_DIVK1, &val_k1, + OTG1_PIXEL_RATE_DIVK2, &val_k2); + break; + case 2: + REG_GET_2(OTG_PIXEL_RATE_DIV, + OTG2_PIXEL_RATE_DIVK1, &val_k1, + OTG2_PIXEL_RATE_DIVK2, &val_k2); + break; + case 3: + REG_GET_2(OTG_PIXEL_RATE_DIV, + OTG3_PIXEL_RATE_DIVK1, &val_k1, + OTG3_PIXEL_RATE_DIVK2, &val_k2); + break; + default: + BREAK_TO_DEBUGGER(); + return; + } + + *k1 = (enum pixel_rate_div)val_k1; + *k2 = (enum pixel_rate_div)val_k2; +} + static void dccg32_set_pixel_rate_div( struct dccg *dccg, uint32_t otg_inst, @@ -50,6 +92,17 @@ static void dccg32_set_pixel_rate_div( { struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + enum pixel_rate_div cur_k1 = PIXEL_RATE_DIV_NA, cur_k2 = PIXEL_RATE_DIV_NA; + + // Don't program 0xF into the register field. Not valid since + // K1 / K2 field is only 1 / 2 bits wide + if (k1 == PIXEL_RATE_DIV_NA || k2 == PIXEL_RATE_DIV_NA) + return; + + dccg32_get_pixel_rate_div(dccg, otg_inst, &cur_k1, &cur_k2); + if (k1 == cur_k1 && k2 == cur_k2) + return; + switch (otg_inst) { case 0: REG_UPDATE_2(OTG_PIXEL_RATE_DIV, diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c index 99eb239bbc7b..9aebc1be2f59 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c @@ -98,9 +98,13 @@ static void dcn32_program_det_size(struct hubbub *hubbub, int hubp_inst, unsigne default: break; } - /* Should never be hit, if it is we have an erroneous hw config*/ - ASSERT(hubbub2->det0_size + hubbub2->det1_size + hubbub2->det2_size - + hubbub2->det3_size + hubbub2->compbuf_size_segments <= hubbub2->crb_size_segs); + if (hubbub2->det0_size + hubbub2->det1_size + hubbub2->det2_size + + hubbub2->det3_size + hubbub2->compbuf_size_segments > hubbub2->crb_size_segs) { + /* This may happen during seamless transition from ODM 2:1 to ODM4:1 */ + DC_LOG_WARNING("CRB Config Warning: DET size (%d,%d,%d,%d) + Compbuf size (%d) > CRB segments (%d)\n", + hubbub2->det0_size, hubbub2->det1_size, hubbub2->det2_size, hubbub2->det3_size, + hubbub2->compbuf_size_segments, hubbub2->crb_size_segs); + } } static void dcn32_program_compbuf_size(struct hubbub *hubbub, unsigned int compbuf_size_kb, bool safe_to_increase) diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c index f43686997917..e573e706430d 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c @@ -121,8 +121,8 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_2_soc = { }, }, .num_states = 1, - .sr_exit_time_us = 20.16, - .sr_enter_plus_exit_time_us = 27.13, + .sr_exit_time_us = 42.97, + .sr_enter_plus_exit_time_us = 49.94, .sr_exit_z8_time_us = 285.0, .sr_enter_plus_exit_z8_time_us = 320, .writeback_latency_us = 12.0, @@ -1926,6 +1926,45 @@ static void remove_entry_from_table_at_index(struct _vcs_dpi_voltage_scaling_st memset(&table[--(*num_entries)], 0, sizeof(struct _vcs_dpi_voltage_scaling_st)); } +void dcn32_patch_dpm_table(struct clk_bw_params *bw_params) +{ + int i; + unsigned int max_dcfclk_mhz = 0, max_dispclk_mhz = 0, max_dppclk_mhz = 0, + max_phyclk_mhz = 0, max_dtbclk_mhz = 0, max_fclk_mhz = 0, max_uclk_mhz = 0; + + for (i = 0; i < MAX_NUM_DPM_LVL; i++) { + if (bw_params->clk_table.entries[i].dcfclk_mhz > max_dcfclk_mhz) + max_dcfclk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz; + if (bw_params->clk_table.entries[i].fclk_mhz > max_fclk_mhz) + max_fclk_mhz = bw_params->clk_table.entries[i].fclk_mhz; + if (bw_params->clk_table.entries[i].memclk_mhz > max_uclk_mhz) + max_uclk_mhz = bw_params->clk_table.entries[i].memclk_mhz; + if (bw_params->clk_table.entries[i].dispclk_mhz > max_dispclk_mhz) + max_dispclk_mhz = bw_params->clk_table.entries[i].dispclk_mhz; + if (bw_params->clk_table.entries[i].dppclk_mhz > max_dppclk_mhz) + max_dppclk_mhz = bw_params->clk_table.entries[i].dppclk_mhz; + if (bw_params->clk_table.entries[i].phyclk_mhz > max_phyclk_mhz) + max_phyclk_mhz = bw_params->clk_table.entries[i].phyclk_mhz; + if (bw_params->clk_table.entries[i].dtbclk_mhz > max_dtbclk_mhz) + max_dtbclk_mhz = bw_params->clk_table.entries[i].dtbclk_mhz; + } + + /* Scan through clock values we currently have and if they are 0, + * then populate it with dcn3_2_soc.clock_limits[] value. + * + * Do it for DCFCLK, DISPCLK, DTBCLK and UCLK as any of those being + * 0, will cause it to skip building the clock table. + */ + if (max_dcfclk_mhz == 0) + bw_params->clk_table.entries[0].dcfclk_mhz = dcn3_2_soc.clock_limits[0].dcfclk_mhz; + if (max_dispclk_mhz == 0) + bw_params->clk_table.entries[0].dispclk_mhz = dcn3_2_soc.clock_limits[0].dispclk_mhz; + if (max_dtbclk_mhz == 0) + bw_params->clk_table.entries[0].dtbclk_mhz = dcn3_2_soc.clock_limits[0].dtbclk_mhz; + if (max_uclk_mhz == 0) + bw_params->clk_table.entries[0].memclk_mhz = dcn3_2_soc.clock_limits[0].dram_speed_mts / 16; +} + static int build_synthetic_soc_states(struct clk_bw_params *bw_params, struct _vcs_dpi_voltage_scaling_st *table, unsigned int *num_entries) { diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h index 6ce221098979..e1b79e2aab8c 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h @@ -77,4 +77,6 @@ int dcn32_find_dummy_latency_index_for_fw_based_mclk_switch(struct dc *dc, int pipe_cnt, int vlevel); +void dcn32_patch_dpm_table(struct clk_bw_params *bw_params); + #endif diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h index 68c2ed434d2c..cff5fd55a0ad 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h @@ -340,6 +340,8 @@ struct clk_mgr_internal { bool smu_present; void *wm_range_table; long long wm_range_table_addr; + + bool dpm_present; }; struct clk_mgr_internal_funcs { diff --git a/drivers/gpu/drm/amd/include/mes_v11_api_def.h b/drivers/gpu/drm/amd/include/mes_v11_api_def.h index 50bfa513cb35..7e85cdc5bd34 100644 --- a/drivers/gpu/drm/amd/include/mes_v11_api_def.h +++ b/drivers/gpu/drm/amd/include/mes_v11_api_def.h @@ -269,7 +269,8 @@ union MESAPI__ADD_QUEUE { uint32_t map_kiq_utility_queue : 1; uint32_t is_kfd_process : 1; uint32_t trap_en : 1; - uint32_t reserved : 21; + uint32_t is_aql_queue : 1; + uint32_t reserved : 20; }; struct MES_API_STATUS api_status; uint64_t tma_addr; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c index 096327513dd0..1d454485e0d9 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c @@ -239,82 +239,47 @@ smu_v13_0_0_get_allowed_feature_mask(struct smu_context *smu, uint32_t *feature_mask, uint32_t num) { struct amdgpu_device *adev = smu->adev; + u32 smu_version; if (num > 2) return -EINVAL; - memset(feature_mask, 0, sizeof(uint32_t) * num); - - *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_FW_DATA_READ_BIT); + memset(feature_mask, 0xff, sizeof(uint32_t) * num); - if (adev->pm.pp_feature & PP_SCLK_DPM_MASK) { - *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT); - *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_IMU_BIT); + if (!(adev->pm.pp_feature & PP_SCLK_DPM_MASK)) { + *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT); + *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_GFX_IMU_BIT); } - *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_MM_DPM_BIT); - *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_VCN_BIT); - - if ((adev->pg_flags & AMD_PG_SUPPORT_ATHUB) && - (adev->pg_flags & AMD_PG_SUPPORT_MMHUB)) - *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_ATHUB_MMHUB_PG_BIT); + if (!(adev->pg_flags & AMD_PG_SUPPORT_ATHUB) || + !(adev->pg_flags & AMD_PG_SUPPORT_MMHUB)) + *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_ATHUB_MMHUB_PG_BIT); - if (adev->pm.pp_feature & PP_SOCCLK_DPM_MASK) - *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT); - -#if 0 - if (adev->pm.pp_feature & PP_GFXOFF_MASK) - *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFXOFF_BIT); -#endif + if (!(adev->pm.pp_feature & PP_SOCCLK_DPM_MASK)) + *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT); - *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_THROTTLERS_BIT); - *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_FAN_CONTROL_BIT); - - *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DF_CSTATE_BIT); - - if (adev->pm.pp_feature & PP_MCLK_DPM_MASK) { - *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_UCLK_BIT); - *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_VMEMP_SCALING_BIT); - *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_VDDIO_MEM_SCALING_BIT); + /* PMFW 78.58 contains a critical fix for gfxoff feature */ + smu_cmn_get_smc_version(smu, NULL, &smu_version); + if ((smu_version < 0x004e3a00) || + !(adev->pm.pp_feature & PP_GFXOFF_MASK)) + *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_GFXOFF_BIT); + + if (!(adev->pm.pp_feature & PP_MCLK_DPM_MASK)) { + *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_UCLK_BIT); + *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_VMEMP_SCALING_BIT); + *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_VDDIO_MEM_SCALING_BIT); } - *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_MEM_TEMP_READ_BIT); - - if (adev->pm.pp_feature & PP_SCLK_DEEP_SLEEP_MASK) - *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_GFXCLK_BIT); - *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_SOCCLK_BIT); - *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_SOC_MPCLK_DS_BIT); - *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_BACO_MPCLK_DS_BIT); + if (!(adev->pm.pp_feature & PP_SCLK_DEEP_SLEEP_MASK)) + *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DS_GFXCLK_BIT); - *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_FCLK_BIT); - *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_DCN_BIT); - *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_DCFCLK_BIT); - - if (adev->pm.pp_feature & PP_PCIE_DPM_MASK) { - *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_LINK_BIT); - *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_LCLK_BIT); + if (!(adev->pm.pp_feature & PP_PCIE_DPM_MASK)) { + *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_LINK_BIT); + *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DS_LCLK_BIT); } - *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_BACO_BIT); - - *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_MP0CLK_BIT); - *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_FW_DSTATE_BIT); - - *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_OUT_OF_BAND_MONITOR_BIT); - *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_SOC_CG_BIT); - - *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_FCLK_BIT); - - *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_FW_CTF_BIT); - - *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_UCLK_BIT); - - *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_VR0HOT_BIT); - - *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_GFX_POWER_OPTIMIZER_BIT); - - if (adev->pm.pp_feature & PP_ULV_MASK) - *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_ULV_BIT); + if (!(adev->pm.pp_feature & PP_ULV_MASK)) + *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_GFX_ULV_BIT); return 0; } diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c index 8aadcc0aa90b..df9370e0ff23 100644 --- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c +++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c @@ -1864,12 +1864,6 @@ EXPORT_SYMBOL_GPL(analogix_dp_remove); int analogix_dp_suspend(struct analogix_dp_device *dp) { clk_disable_unprepare(dp->clock); - - if (dp->plat_data->panel) { - if (drm_panel_unprepare(dp->plat_data->panel)) - DRM_ERROR("failed to turnoff the panel\n"); - } - return 0; } EXPORT_SYMBOL_GPL(analogix_dp_suspend); @@ -1884,13 +1878,6 @@ int analogix_dp_resume(struct analogix_dp_device *dp) return ret; } - if (dp->plat_data->panel) { - if (drm_panel_prepare(dp->plat_data->panel)) { - DRM_ERROR("failed to setup the panel\n"); - return -EBUSY; - } - } - return 0; } EXPORT_SYMBOL_GPL(analogix_dp_resume); diff --git a/drivers/gpu/drm/bridge/lontium-lt8912b.c b/drivers/gpu/drm/bridge/lontium-lt8912b.c index 28bad30dc4e5..5968f4af190b 100644 --- a/drivers/gpu/drm/bridge/lontium-lt8912b.c +++ b/drivers/gpu/drm/bridge/lontium-lt8912b.c @@ -188,7 +188,7 @@ static int lt8912_write_lvds_config(struct lt8912 *lt) {0x03, 0xff}, }; - return regmap_multi_reg_write(lt->regmap[I2C_CEC_DSI], seq, ARRAY_SIZE(seq)); + return regmap_multi_reg_write(lt->regmap[I2C_MAIN], seq, ARRAY_SIZE(seq)); }; static inline struct lt8912 *bridge_to_lt8912(struct drm_bridge *b) @@ -268,7 +268,7 @@ static int lt8912_video_setup(struct lt8912 *lt) u32 hactive, h_total, hpw, hfp, hbp; u32 vactive, v_total, vpw, vfp, vbp; u8 settle = 0x08; - int ret; + int ret, hsync_activehigh, vsync_activehigh; if (!lt) return -EINVAL; @@ -278,12 +278,14 @@ static int lt8912_video_setup(struct lt8912 *lt) hpw = lt->mode.hsync_len; hbp = lt->mode.hback_porch; h_total = hactive + hfp + hpw + hbp; + hsync_activehigh = lt->mode.flags & DISPLAY_FLAGS_HSYNC_HIGH; vactive = lt->mode.vactive; vfp = lt->mode.vfront_porch; vpw = lt->mode.vsync_len; vbp = lt->mode.vback_porch; v_total = vactive + vfp + vpw + vbp; + vsync_activehigh = lt->mode.flags & DISPLAY_FLAGS_VSYNC_HIGH; if (vactive <= 600) settle = 0x04; @@ -317,6 +319,13 @@ static int lt8912_video_setup(struct lt8912 *lt) ret |= regmap_write(lt->regmap[I2C_CEC_DSI], 0x3e, hfp & 0xff); ret |= regmap_write(lt->regmap[I2C_CEC_DSI], 0x3f, hfp >> 8); + ret |= regmap_update_bits(lt->regmap[I2C_MAIN], 0xab, BIT(0), + vsync_activehigh ? BIT(0) : 0); + ret |= regmap_update_bits(lt->regmap[I2C_MAIN], 0xab, BIT(1), + hsync_activehigh ? BIT(1) : 0); + ret |= regmap_update_bits(lt->regmap[I2C_MAIN], 0xb2, BIT(0), + lt->connector.display_info.is_hdmi ? BIT(0) : 0); + return ret; } diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h index 633a7e5dba3b..6b5d4ea22b67 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_types.h +++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h @@ -166,6 +166,21 @@ struct intel_engine_execlists { struct timer_list preempt; /** + * @preempt_target: active request at the time of the preemption request + * + * We force a preemption to occur if the pending contexts have not + * been promoted to active upon receipt of the CS ack event within + * the timeout. This timeout maybe chosen based on the target, + * using a very short timeout if the context is no longer schedulable. + * That short timeout may not be applicable to other contexts, so + * if a context switch should happen within before the preemption + * timeout, we may shoot early at an innocent context. To prevent this, + * we record which context was active at the time of the preemption + * request and only reset that context upon the timeout. + */ + const struct i915_request *preempt_target; + + /** * @ccid: identifier for contexts submitted to this engine */ u32 ccid; diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c index 4b909cb88cdf..c718e6dc40b5 100644 --- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c +++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c @@ -1241,6 +1241,9 @@ static unsigned long active_preempt_timeout(struct intel_engine_cs *engine, if (!rq) return 0; + /* Only allow ourselves to force reset the currently active context */ + engine->execlists.preempt_target = rq; + /* Force a fast reset for terminated contexts (ignoring sysfs!) */ if (unlikely(intel_context_is_banned(rq->context) || bad_request(rq))) return INTEL_CONTEXT_BANNED_PREEMPT_TIMEOUT_MS; @@ -2427,8 +2430,24 @@ static void execlists_submission_tasklet(struct tasklet_struct *t) GEM_BUG_ON(inactive - post > ARRAY_SIZE(post)); if (unlikely(preempt_timeout(engine))) { + const struct i915_request *rq = *engine->execlists.active; + + /* + * If after the preempt-timeout expired, we are still on the + * same active request/context as before we initiated the + * preemption, reset the engine. + * + * However, if we have processed a CS event to switch contexts, + * but not yet processed the CS event for the pending + * preemption, reset the timer allowing the new context to + * gracefully exit. + */ cancel_timer(&engine->execlists.preempt); - engine->execlists.error_interrupt |= ERROR_PREEMPT; + if (rq == engine->execlists.preempt_target) + engine->execlists.error_interrupt |= ERROR_PREEMPT; + else + set_timer_ms(&engine->execlists.preempt, + active_preempt_timeout(engine, rq)); } if (unlikely(READ_ONCE(engine->execlists.error_interrupt))) { diff --git a/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c index 73a8b46e0234..d09a0e845d09 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c @@ -545,8 +545,7 @@ static INTEL_GT_RPS_BOOL_ATTR_RO(throttle_reason_ratl, RATL_MASK); static INTEL_GT_RPS_BOOL_ATTR_RO(throttle_reason_vr_thermalert, VR_THERMALERT_MASK); static INTEL_GT_RPS_BOOL_ATTR_RO(throttle_reason_vr_tdc, VR_TDC_MASK); -static const struct attribute *freq_attrs[] = { - &dev_attr_punit_req_freq_mhz.attr, +static const struct attribute *throttle_reason_attrs[] = { &attr_throttle_reason_status.attr, &attr_throttle_reason_pl1.attr, &attr_throttle_reason_pl2.attr, @@ -763,12 +762,20 @@ void intel_gt_sysfs_pm_init(struct intel_gt *gt, struct kobject *kobj) if (!is_object_gt(kobj)) return; - ret = sysfs_create_files(kobj, freq_attrs); + ret = sysfs_create_file(kobj, &dev_attr_punit_req_freq_mhz.attr); if (ret) drm_warn(>->i915->drm, - "failed to create gt%u throttle sysfs files (%pe)", + "failed to create gt%u punit_req_freq_mhz sysfs (%pe)", gt->info.id, ERR_PTR(ret)); + if (GRAPHICS_VER(gt->i915) >= 11) { + ret = sysfs_create_files(kobj, throttle_reason_attrs); + if (ret) + drm_warn(>->i915->drm, + "failed to create gt%u throttle sysfs files (%pe)", + gt->info.id, ERR_PTR(ret)); + } + if (HAS_MEDIA_RATIO_MODE(gt->i915) && intel_uc_uses_guc_slpc(>->uc)) { ret = sysfs_create_files(kobj, media_perf_power_attrs); if (ret) diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c index 3c833ea60db6..7b9f3fc3adf7 100644 --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c @@ -2453,7 +2453,8 @@ static int vmbus_acpi_add(struct acpi_device *device) * Some ancestor of the vmbus acpi device (Gen1 or Gen2 * firmware) is the VMOD that has the mmio ranges. Get that. */ - for (ancestor = device->parent; ancestor; ancestor = ancestor->parent) { + for (ancestor = acpi_dev_parent(device); ancestor; + ancestor = acpi_dev_parent(ancestor)) { result = acpi_walk_resources(ancestor->handle, METHOD_NAME__CRS, vmbus_walk_resources, NULL); diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c index d2545a1be9fc..44e04c75d0d3 100644 --- a/drivers/hwmon/acpi_power_meter.c +++ b/drivers/hwmon/acpi_power_meter.c @@ -598,7 +598,7 @@ static int read_domain_devices(struct acpi_power_meter_resource *resource) continue; /* Create a symlink to domain objects */ - obj = acpi_bus_get_acpi_device(element->reference.handle); + obj = acpi_get_acpi_dev(element->reference.handle); resource->domain_devices[i] = obj; if (!obj) continue; diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c index 2e2cd79d89eb..4218750d5a66 100644 --- a/drivers/hwmon/hwmon.c +++ b/drivers/hwmon/hwmon.c @@ -151,9 +151,9 @@ static DEFINE_IDA(hwmon_ida); * between hwmon and thermal_sys modules. */ #ifdef CONFIG_THERMAL_OF -static int hwmon_thermal_get_temp(void *data, int *temp) +static int hwmon_thermal_get_temp(struct thermal_zone_device *tz, int *temp) { - struct hwmon_thermal_data *tdata = data; + struct hwmon_thermal_data *tdata = tz->devdata; struct hwmon_device *hwdev = to_hwmon_device(tdata->dev); int ret; long t; @@ -168,9 +168,9 @@ static int hwmon_thermal_get_temp(void *data, int *temp) return 0; } -static int hwmon_thermal_set_trips(void *data, int low, int high) +static int hwmon_thermal_set_trips(struct thermal_zone_device *tz, int low, int high) { - struct hwmon_thermal_data *tdata = data; + struct hwmon_thermal_data *tdata = tz->devdata; struct hwmon_device *hwdev = to_hwmon_device(tdata->dev); const struct hwmon_chip_info *chip = hwdev->chip; const struct hwmon_channel_info **info = chip->info; @@ -203,7 +203,7 @@ static int hwmon_thermal_set_trips(void *data, int low, int high) return 0; } -static const struct thermal_zone_of_device_ops hwmon_thermal_ops = { +static const struct thermal_zone_device_ops hwmon_thermal_ops = { .get_temp = hwmon_thermal_get_temp, .set_trips = hwmon_thermal_set_trips, }; @@ -227,8 +227,8 @@ static int hwmon_thermal_add_sensor(struct device *dev, int index) tdata->dev = dev; tdata->index = index; - tzd = devm_thermal_zone_of_sensor_register(dev, index, tdata, - &hwmon_thermal_ops); + tzd = devm_thermal_of_zone_register(dev, index, tdata, + &hwmon_thermal_ops); if (IS_ERR(tzd)) { if (PTR_ERR(tzd) != -ENODEV) return PTR_ERR(tzd); diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c index 81d3f91dd204..7ec04934747e 100644 --- a/drivers/hwmon/pmbus/pmbus_core.c +++ b/drivers/hwmon/pmbus/pmbus_core.c @@ -1270,9 +1270,9 @@ struct pmbus_thermal_data { struct pmbus_sensor *sensor; }; -static int pmbus_thermal_get_temp(void *data, int *temp) +static int pmbus_thermal_get_temp(struct thermal_zone_device *tz, int *temp) { - struct pmbus_thermal_data *tdata = data; + struct pmbus_thermal_data *tdata = tz->devdata; struct pmbus_sensor *sensor = tdata->sensor; struct pmbus_data *pmbus_data = tdata->pmbus_data; struct i2c_client *client = to_i2c_client(pmbus_data->dev); @@ -1296,7 +1296,7 @@ static int pmbus_thermal_get_temp(void *data, int *temp) return ret; } -static const struct thermal_zone_of_device_ops pmbus_thermal_ops = { +static const struct thermal_zone_device_ops pmbus_thermal_ops = { .get_temp = pmbus_thermal_get_temp, }; @@ -1314,8 +1314,8 @@ static int pmbus_thermal_add_sensor(struct pmbus_data *pmbus_data, tdata->sensor = sensor; tdata->pmbus_data = pmbus_data; - tzd = devm_thermal_zone_of_sensor_register(dev, index, tdata, - &pmbus_thermal_ops); + tzd = devm_thermal_of_zone_register(dev, index, tdata, + &pmbus_thermal_ops); /* * If CONFIG_THERMAL_OF is disabled, this returns -ENODEV, * so ignore that error but forward any other error. diff --git a/drivers/hwmon/scpi-hwmon.c b/drivers/hwmon/scpi-hwmon.c index 5187c6dd5a4f..4d75385f7d5e 100644 --- a/drivers/hwmon/scpi-hwmon.c +++ b/drivers/hwmon/scpi-hwmon.c @@ -62,9 +62,9 @@ static void scpi_scale_reading(u64 *value, struct sensor_data *sensor) } } -static int scpi_read_temp(void *dev, int *temp) +static int scpi_read_temp(struct thermal_zone_device *tz, int *temp) { - struct scpi_thermal_zone *zone = dev; + struct scpi_thermal_zone *zone = tz->devdata; struct scpi_sensors *scpi_sensors = zone->scpi_sensors; struct scpi_ops *scpi_ops = scpi_sensors->scpi_ops; struct sensor_data *sensor = &scpi_sensors->data[zone->sensor_id]; @@ -121,7 +121,7 @@ scpi_show_label(struct device *dev, struct device_attribute *attr, char *buf) return sprintf(buf, "%s\n", sensor->info.name); } -static const struct thermal_zone_of_device_ops scpi_sensor_ops = { +static const struct thermal_zone_device_ops scpi_sensor_ops = { .get_temp = scpi_read_temp, }; @@ -275,10 +275,10 @@ static int scpi_hwmon_probe(struct platform_device *pdev) zone->sensor_id = i; zone->scpi_sensors = scpi_sensors; - z = devm_thermal_zone_of_sensor_register(dev, - sensor->info.sensor_id, - zone, - &scpi_sensor_ops); + z = devm_thermal_of_zone_register(dev, + sensor->info.sensor_id, + zone, + &scpi_sensor_ops); /* * The call to thermal_zone_of_sensor_register returns * an error for sensors that are not associated with diff --git a/drivers/i2c/busses/i2c-amd-mp2-plat.c b/drivers/i2c/busses/i2c-amd-mp2-plat.c index 84b7e6cbc67b..423fe0c8a471 100644 --- a/drivers/i2c/busses/i2c-amd-mp2-plat.c +++ b/drivers/i2c/busses/i2c-amd-mp2-plat.c @@ -244,14 +244,18 @@ static const struct i2c_adapter_quirks amd_i2c_dev_quirks = { static int i2c_amd_probe(struct platform_device *pdev) { + struct device *dev = &pdev->dev; int ret; struct amd_i2c_dev *i2c_dev; - struct acpi_device *adev = ACPI_COMPANION(&pdev->dev); struct amd_mp2_dev *mp2_dev; - const char *uid; + u64 uid; - if (!adev) - return -ENODEV; + ret = acpi_dev_uid_to_integer(ACPI_COMPANION(dev), &uid); + if (ret) + return dev_err_probe(dev, ret, "missing UID/bus id!\n"); + if (uid >= 2) + return dev_err_probe(dev, -EINVAL, "incorrect UID/bus id \"%llu\"!\n", uid); + dev_dbg(dev, "bus id is %llu\n", uid); /* The ACPI namespace doesn't contain information about which MP2 PCI * device an AMDI0011 ACPI device is related to, so assume that there's @@ -266,6 +270,7 @@ static int i2c_amd_probe(struct platform_device *pdev) if (!i2c_dev) return -ENOMEM; + i2c_dev->common.bus_id = uid; i2c_dev->common.mp2_dev = mp2_dev; i2c_dev->pdev = pdev; platform_set_drvdata(pdev, i2c_dev); @@ -276,20 +281,6 @@ static int i2c_amd_probe(struct platform_device *pdev) i2c_dev->common.resume = &i2c_amd_resume; #endif - uid = adev->pnp.unique_id; - if (!uid) { - dev_err(&pdev->dev, "missing UID/bus id!\n"); - return -EINVAL; - } else if (strcmp(uid, "0") == 0) { - i2c_dev->common.bus_id = 0; - } else if (strcmp(uid, "1") == 0) { - i2c_dev->common.bus_id = 1; - } else { - dev_err(&pdev->dev, "incorrect UID/bus id \"%s\"!\n", uid); - return -EINVAL; - } - dev_dbg(&pdev->dev, "bus id is %u\n", i2c_dev->common.bus_id); - /* Register the adapter */ amd_mp2_pm_runtime_get(mp2_dev); diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c index 471c47db546b..c836cf884185 100644 --- a/drivers/i2c/busses/i2c-davinci.c +++ b/drivers/i2c/busses/i2c-davinci.c @@ -823,7 +823,7 @@ static int davinci_i2c_probe(struct platform_device *pdev) r = pm_runtime_resume_and_get(dev->dev); if (r < 0) { dev_err(dev->dev, "failed to runtime_get device: %d\n", r); - return r; + goto err_pm; } i2c_davinci_init(dev); @@ -882,6 +882,7 @@ static int davinci_i2c_probe(struct platform_device *pdev) err_unuse_clocks: pm_runtime_dont_use_autosuspend(dev->dev); pm_runtime_put_sync(dev->dev); +err_pm: pm_runtime_disable(dev->dev); return r; diff --git a/drivers/i2c/busses/i2c-mlxbf.c b/drivers/i2c/busses/i2c-mlxbf.c index ad5efd7497d1..60908c5737f1 100644 --- a/drivers/i2c/busses/i2c-mlxbf.c +++ b/drivers/i2c/busses/i2c-mlxbf.c @@ -2215,35 +2215,27 @@ MODULE_DEVICE_TABLE(acpi, mlxbf_i2c_acpi_ids); static int mlxbf_i2c_acpi_probe(struct device *dev, struct mlxbf_i2c_priv *priv) { const struct acpi_device_id *aid; - struct acpi_device *adev; - unsigned long bus_id = 0; - const char *uid; + u64 bus_id; int ret; if (acpi_disabled) return -ENOENT; - adev = ACPI_COMPANION(dev); - if (!adev) - return -ENXIO; - aid = acpi_match_device(mlxbf_i2c_acpi_ids, dev); if (!aid) return -ENODEV; priv->chip = (struct mlxbf_i2c_chip_info *)aid->driver_data; - uid = acpi_device_uid(adev); - if (!uid || !(*uid)) { + ret = acpi_dev_uid_to_integer(ACPI_COMPANION(dev), &bus_id); + if (ret) { dev_err(dev, "Cannot retrieve UID\n"); - return -ENODEV; + return ret; } - ret = kstrtoul(uid, 0, &bus_id); - if (!ret) - priv->bus = bus_id; + priv->bus = bus_id; - return ret; + return 0; } #else static int mlxbf_i2c_acpi_probe(struct device *dev, struct mlxbf_i2c_priv *priv) diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index 3e101719689a..cfeb24d40d37 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -928,6 +928,51 @@ static struct cpuidle_state adl_l_cstates[] __initdata = { .enter = NULL } }; +static struct cpuidle_state adl_n_cstates[] __initdata = { + { + .name = "C1", + .desc = "MWAIT 0x00", + .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_UNUSABLE, + .exit_latency = 1, + .target_residency = 1, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { + .name = "C1E", + .desc = "MWAIT 0x01", + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, + .exit_latency = 2, + .target_residency = 4, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { + .name = "C6", + .desc = "MWAIT 0x20", + .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 195, + .target_residency = 585, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { + .name = "C8", + .desc = "MWAIT 0x40", + .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 260, + .target_residency = 1040, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { + .name = "C10", + .desc = "MWAIT 0x60", + .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 660, + .target_residency = 1980, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { + .enter = NULL } +}; + static struct cpuidle_state spr_cstates[] __initdata = { { .name = "C1", @@ -1309,6 +1354,10 @@ static const struct idle_cpu idle_cpu_adl_l __initconst = { .state_table = adl_l_cstates, }; +static const struct idle_cpu idle_cpu_adl_n __initconst = { + .state_table = adl_n_cstates, +}; + static const struct idle_cpu idle_cpu_spr __initconst = { .state_table = spr_cstates, .disable_promotion_to_c1e = true, @@ -1379,6 +1428,7 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = { X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, &idle_cpu_icx), X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, &idle_cpu_adl), X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, &idle_cpu_adl_l), + X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_N, &idle_cpu_adl_n), X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &idle_cpu_spr), X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNL, &idle_cpu_knl), X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNM, &idle_cpu_knl), @@ -1507,7 +1557,7 @@ static void __init intel_idle_init_cstates_acpi(struct cpuidle_driver *drv) state = &drv->states[drv->state_count++]; snprintf(state->name, CPUIDLE_NAME_LEN, "C%d_ACPI", cstate); - strlcpy(state->desc, cx->desc, CPUIDLE_DESC_LEN); + strscpy(state->desc, cx->desc, CPUIDLE_DESC_LEN); state->exit_latency = cx->latency; /* * For C1-type C-states use the same number for both the exit @@ -1816,6 +1866,7 @@ static void __init intel_idle_init_cstates_icpu(struct cpuidle_driver *drv) break; case INTEL_FAM6_ALDERLAKE: case INTEL_FAM6_ALDERLAKE_L: + case INTEL_FAM6_ALDERLAKE_N: adl_idle_state_table_update(); break; } diff --git a/drivers/iio/adc/sun4i-gpadc-iio.c b/drivers/iio/adc/sun4i-gpadc-iio.c index 2d393a4dfff6..a6ade70dedf8 100644 --- a/drivers/iio/adc/sun4i-gpadc-iio.c +++ b/drivers/iio/adc/sun4i-gpadc-iio.c @@ -412,9 +412,9 @@ static int sun4i_gpadc_runtime_resume(struct device *dev) return 0; } -static int sun4i_gpadc_get_temp(void *data, int *temp) +static int sun4i_gpadc_get_temp(struct thermal_zone_device *tz, int *temp) { - struct sun4i_gpadc_iio *info = data; + struct sun4i_gpadc_iio *info = tz->devdata; int val, scale, offset; if (sun4i_gpadc_temp_read(info->indio_dev, &val)) @@ -428,7 +428,7 @@ static int sun4i_gpadc_get_temp(void *data, int *temp) return 0; } -static const struct thermal_zone_of_device_ops sun4i_ts_tz_ops = { +static const struct thermal_zone_device_ops sun4i_ts_tz_ops = { .get_temp = &sun4i_gpadc_get_temp, }; @@ -637,9 +637,9 @@ static int sun4i_gpadc_probe(struct platform_device *pdev) pm_runtime_enable(&pdev->dev); if (IS_ENABLED(CONFIG_THERMAL_OF)) { - info->tzd = thermal_zone_of_sensor_register(info->sensor_device, - 0, info, - &sun4i_ts_tz_ops); + info->tzd = devm_thermal_of_zone_register(info->sensor_device, + 0, info, + &sun4i_ts_tz_ops); /* * Do not fail driver probing when failing to register in * thermal because no thermal DT node is found. @@ -681,8 +681,6 @@ static int sun4i_gpadc_remove(struct platform_device *pdev) if (!IS_ENABLED(CONFIG_THERMAL_OF)) return 0; - thermal_zone_of_sensor_unregister(info->sensor_device, info->tzd); - if (!info->no_irq) iio_map_array_unregister(indio_dev); diff --git a/drivers/input/touchscreen/sun4i-ts.c b/drivers/input/touchscreen/sun4i-ts.c index 742a7e96c1b5..73eb8f80be6e 100644 --- a/drivers/input/touchscreen/sun4i-ts.c +++ b/drivers/input/touchscreen/sun4i-ts.c @@ -192,12 +192,12 @@ static int sun4i_get_temp(const struct sun4i_ts_data *ts, int *temp) return 0; } -static int sun4i_get_tz_temp(void *data, int *temp) +static int sun4i_get_tz_temp(struct thermal_zone_device *tz, int *temp) { - return sun4i_get_temp(data, temp); + return sun4i_get_temp(tz->devdata, temp); } -static const struct thermal_zone_of_device_ops sun4i_ts_tz_ops = { +static const struct thermal_zone_device_ops sun4i_ts_tz_ops = { .get_temp = sun4i_get_tz_temp, }; @@ -356,8 +356,8 @@ static int sun4i_ts_probe(struct platform_device *pdev) if (IS_ERR(hwmon)) return PTR_ERR(hwmon); - thermal = devm_thermal_zone_of_sensor_register(ts->dev, 0, ts, - &sun4i_ts_tz_ops); + thermal = devm_thermal_of_zone_register(ts->dev, 0, ts, + &sun4i_ts_tz_ops); if (IS_ERR(thermal)) return PTR_ERR(thermal); diff --git a/drivers/isdn/mISDN/l1oip.h b/drivers/isdn/mISDN/l1oip.h index 7ea10db20e3a..48133d022812 100644 --- a/drivers/isdn/mISDN/l1oip.h +++ b/drivers/isdn/mISDN/l1oip.h @@ -59,6 +59,7 @@ struct l1oip { int bundle; /* bundle channels in one frm */ int codec; /* codec to use for transmis. */ int limit; /* limit number of bchannels */ + bool shutdown; /* if card is released */ /* timer */ struct timer_list keep_tl; diff --git a/drivers/isdn/mISDN/l1oip_core.c b/drivers/isdn/mISDN/l1oip_core.c index 2c40412466e6..a77195e378b7 100644 --- a/drivers/isdn/mISDN/l1oip_core.c +++ b/drivers/isdn/mISDN/l1oip_core.c @@ -275,7 +275,7 @@ l1oip_socket_send(struct l1oip *hc, u8 localcodec, u8 channel, u32 chanmask, p = frame; /* restart timer */ - if (time_before(hc->keep_tl.expires, jiffies + 5 * HZ)) + if (time_before(hc->keep_tl.expires, jiffies + 5 * HZ) && !hc->shutdown) mod_timer(&hc->keep_tl, jiffies + L1OIP_KEEPALIVE * HZ); else hc->keep_tl.expires = jiffies + L1OIP_KEEPALIVE * HZ; @@ -601,7 +601,9 @@ multiframe: goto multiframe; /* restart timer */ - if (time_before(hc->timeout_tl.expires, jiffies + 5 * HZ) || !hc->timeout_on) { + if ((time_before(hc->timeout_tl.expires, jiffies + 5 * HZ) || + !hc->timeout_on) && + !hc->shutdown) { hc->timeout_on = 1; mod_timer(&hc->timeout_tl, jiffies + L1OIP_TIMEOUT * HZ); } else /* only adjust timer */ @@ -1232,11 +1234,10 @@ release_card(struct l1oip *hc) { int ch; - if (timer_pending(&hc->keep_tl)) - del_timer(&hc->keep_tl); + hc->shutdown = true; - if (timer_pending(&hc->timeout_tl)) - del_timer(&hc->timeout_tl); + del_timer_sync(&hc->keep_tl); + del_timer_sync(&hc->timeout_tl); cancel_work_sync(&hc->workq); diff --git a/drivers/md/dm-verity-loadpin.c b/drivers/md/dm-verity-loadpin.c index 387ec43aef72..4f78cc55c251 100644 --- a/drivers/md/dm-verity-loadpin.c +++ b/drivers/md/dm-verity-loadpin.c @@ -14,6 +14,7 @@ LIST_HEAD(dm_verity_loadpin_trusted_root_digests); static bool is_trusted_verity_target(struct dm_target *ti) { + int verity_mode; u8 *root_digest; unsigned int digest_size; struct dm_verity_loadpin_trusted_root_digest *trd; @@ -22,6 +23,13 @@ static bool is_trusted_verity_target(struct dm_target *ti) if (!dm_is_verity_target(ti)) return false; + verity_mode = dm_verity_get_mode(ti); + + if ((verity_mode != DM_VERITY_MODE_EIO) && + (verity_mode != DM_VERITY_MODE_RESTART) && + (verity_mode != DM_VERITY_MODE_PANIC)) + return false; + if (dm_verity_get_root_digest(ti, &root_digest, &digest_size)) return false; diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c index 94b6cb599db4..8a00cc42e498 100644 --- a/drivers/md/dm-verity-target.c +++ b/drivers/md/dm-verity-target.c @@ -1447,6 +1447,22 @@ bool dm_is_verity_target(struct dm_target *ti) } /* + * Get the verity mode (error behavior) of a verity target. + * + * Returns the verity mode of the target, or -EINVAL if 'ti' is not a verity + * target. + */ +int dm_verity_get_mode(struct dm_target *ti) +{ + struct dm_verity *v = ti->private; + + if (!dm_is_verity_target(ti)) + return -EINVAL; + + return v->mode; +} + +/* * Get the root digest of a verity target. * * Returns a copy of the root digest, the caller is responsible for diff --git a/drivers/md/dm-verity.h b/drivers/md/dm-verity.h index 45455de1b4bc..98f306ec6a33 100644 --- a/drivers/md/dm-verity.h +++ b/drivers/md/dm-verity.h @@ -134,6 +134,7 @@ extern int verity_hash_for_block(struct dm_verity *v, struct dm_verity_io *io, sector_t block, u8 *digest, bool *is_zero); extern bool dm_is_verity_target(struct dm_target *ti); +extern int dm_verity_get_mode(struct dm_target *ti); extern int dm_verity_get_root_digest(struct dm_target *ti, u8 **root_digest, unsigned int *digest_size); diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c index 742b2349fea3..10e0c5381d01 100644 --- a/drivers/md/md-cluster.c +++ b/drivers/md/md-cluster.c @@ -876,8 +876,8 @@ static int join(struct mddev *mddev, int nodes) memset(str, 0, 64); sprintf(str, "%pU", mddev->uuid); ret = dlm_new_lockspace(str, mddev->bitmap_info.cluster_name, - DLM_LSFL_FS, LVB_SIZE, - &md_ls_ops, mddev, &ops_rv, &cinfo->lockspace); + 0, LVB_SIZE, &md_ls_ops, mddev, + &ops_rv, &cinfo->lockspace); if (ret) goto err; wait_for_completion(&cinfo->completion); diff --git a/drivers/media/dvb-core/dvb_vb2.c b/drivers/media/dvb-core/dvb_vb2.c index a1bd6d9c9223..909df82fed33 100644 --- a/drivers/media/dvb-core/dvb_vb2.c +++ b/drivers/media/dvb-core/dvb_vb2.c @@ -354,6 +354,12 @@ int dvb_vb2_reqbufs(struct dvb_vb2_ctx *ctx, struct dmx_requestbuffers *req) int dvb_vb2_querybuf(struct dvb_vb2_ctx *ctx, struct dmx_buffer *b) { + struct vb2_queue *q = &ctx->vb_q; + + if (b->index >= q->num_buffers) { + dprintk(1, "[%s] buffer index out of range\n", ctx->name); + return -EINVAL; + } vb2_core_querybuf(&ctx->vb_q, b->index, b); dprintk(3, "[%s] index=%d\n", ctx->name, b->index); return 0; @@ -378,8 +384,13 @@ int dvb_vb2_expbuf(struct dvb_vb2_ctx *ctx, struct dmx_exportbuffer *exp) int dvb_vb2_qbuf(struct dvb_vb2_ctx *ctx, struct dmx_buffer *b) { + struct vb2_queue *q = &ctx->vb_q; int ret; + if (b->index >= q->num_buffers) { + dprintk(1, "[%s] buffer index out of range\n", ctx->name); + return -EINVAL; + } ret = vb2_core_qbuf(&ctx->vb_q, b->index, b, NULL); if (ret) { dprintk(1, "[%s] index=%d errno=%d\n", ctx->name, diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc_drv.c b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc_drv.c index 95e8c29ccc65..d2f5f30582a9 100644 --- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc_drv.c +++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc_drv.c @@ -228,7 +228,6 @@ static int mtk_vcodec_probe(struct platform_device *pdev) { struct mtk_vcodec_dev *dev; struct video_device *vfd_enc; - struct resource *res; phandle rproc_phandle; enum mtk_vcodec_fw_type fw_type; int ret; @@ -272,14 +271,12 @@ static int mtk_vcodec_probe(struct platform_device *pdev) goto err_res; } - res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); - if (res == NULL) { - dev_err(&pdev->dev, "failed to get irq resource"); - ret = -ENOENT; + dev->enc_irq = platform_get_irq(pdev, 0); + if (dev->enc_irq < 0) { + ret = dev->enc_irq; goto err_res; } - dev->enc_irq = platform_get_irq(pdev, 0); irq_set_status_flags(dev->enc_irq, IRQ_NOAUTOEN); ret = devm_request_irq(&pdev->dev, dev->enc_irq, mtk_vcodec_enc_irq_handler, diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c index 9c05776f11d1..d509a4a2f08e 100644 --- a/drivers/media/usb/uvc/uvc_driver.c +++ b/drivers/media/usb/uvc/uvc_driver.c @@ -2740,7 +2740,7 @@ static const struct usb_device_id uvc_ids[] = { .idProduct = 0x4034, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, - .bInterfaceProtocol = 0, + .bInterfaceProtocol = UVC_PC_PROTOCOL_15, .driver_info = (kernel_ulong_t)&uvc_ctrl_power_line_limited }, /* LogiLink Wireless Webcam */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c index 0f3d6b5667b0..55c26e7d370e 100644 --- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c +++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c @@ -1040,6 +1040,8 @@ int v4l2_compat_get_array_args(struct file *file, void *mbuf, { int err = 0; + memset(mbuf, 0, array_size); + switch (cmd) { case VIDIOC_G_FMT32: case VIDIOC_S_FMT32: diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c index c314025d977e..e6fd355a2e92 100644 --- a/drivers/media/v4l2-core/v4l2-ioctl.c +++ b/drivers/media/v4l2-core/v4l2-ioctl.c @@ -2872,9 +2872,9 @@ static const struct v4l2_ioctl_info v4l2_ioctls[] = { IOCTL_INFO(VIDIOC_S_PRIORITY, v4l_s_priority, v4l_print_u32, INFO_FL_PRIO), IOCTL_INFO(VIDIOC_G_SLICED_VBI_CAP, v4l_g_sliced_vbi_cap, v4l_print_sliced_vbi_cap, INFO_FL_CLEAR(v4l2_sliced_vbi_cap, type)), IOCTL_INFO(VIDIOC_LOG_STATUS, v4l_log_status, v4l_print_newline, 0), - IOCTL_INFO(VIDIOC_G_EXT_CTRLS, v4l_g_ext_ctrls, v4l_print_ext_controls, INFO_FL_CTRL), - IOCTL_INFO(VIDIOC_S_EXT_CTRLS, v4l_s_ext_ctrls, v4l_print_ext_controls, INFO_FL_PRIO | INFO_FL_CTRL), - IOCTL_INFO(VIDIOC_TRY_EXT_CTRLS, v4l_try_ext_ctrls, v4l_print_ext_controls, INFO_FL_CTRL), + IOCTL_INFO(VIDIOC_G_EXT_CTRLS, v4l_g_ext_ctrls, v4l_print_ext_controls, INFO_FL_CTRL | INFO_FL_ALWAYS_COPY), + IOCTL_INFO(VIDIOC_S_EXT_CTRLS, v4l_s_ext_ctrls, v4l_print_ext_controls, INFO_FL_PRIO | INFO_FL_CTRL | INFO_FL_ALWAYS_COPY), + IOCTL_INFO(VIDIOC_TRY_EXT_CTRLS, v4l_try_ext_ctrls, v4l_print_ext_controls, INFO_FL_CTRL | INFO_FL_ALWAYS_COPY), IOCTL_INFO(VIDIOC_ENUM_FRAMESIZES, v4l_stub_enum_framesizes, v4l_print_frmsizeenum, INFO_FL_CLEAR(v4l2_frmsizeenum, pixel_format)), IOCTL_INFO(VIDIOC_ENUM_FRAMEINTERVALS, v4l_stub_enum_frameintervals, v4l_print_frmivalenum, INFO_FL_CLEAR(v4l2_frmivalenum, height)), IOCTL_INFO(VIDIOC_G_ENC_INDEX, v4l_stub_g_enc_index, v4l_print_enc_idx, 0), @@ -3367,8 +3367,7 @@ video_usercopy(struct file *file, unsigned int orig_cmd, unsigned long arg, array_buf = kvmalloc(array_size, GFP_KERNEL); err = -ENOMEM; if (array_buf == NULL) - goto out_array_args; - err = -EFAULT; + goto out; if (in_compat_syscall()) err = v4l2_compat_get_array_args(file, array_buf, user_ptr, array_size, @@ -3377,7 +3376,7 @@ video_usercopy(struct file *file, unsigned int orig_cmd, unsigned long arg, err = copy_from_user(array_buf, user_ptr, array_size) ? -EFAULT : 0; if (err) - goto out_array_args; + goto out; *kernel_ptr = array_buf; } @@ -3395,6 +3394,13 @@ video_usercopy(struct file *file, unsigned int orig_cmd, unsigned long arg, trace_v4l2_qbuf(video_devdata(file)->minor, parg); } + /* + * Some ioctls can return an error, but still have valid + * results that must be returned. + */ + if (err < 0 && !always_copy) + goto out; + if (has_array_args) { *kernel_ptr = (void __force *)user_ptr; if (in_compat_syscall()) { @@ -3409,16 +3415,8 @@ video_usercopy(struct file *file, unsigned int orig_cmd, unsigned long arg, } else if (copy_to_user(user_ptr, array_buf, array_size)) { err = -EFAULT; } - goto out_array_args; } - /* - * Some ioctls can return an error, but still have valid - * results that must be returned. - */ - if (err < 0 && !always_copy) - goto out; -out_array_args: if (video_put_user((void __user *)arg, parg, cmd, orig_cmd)) err = -EFAULT; out: diff --git a/drivers/misc/lkdtm/cfi.c b/drivers/misc/lkdtm/cfi.c index 71483cb1e422..5245cf6013c9 100644 --- a/drivers/misc/lkdtm/cfi.c +++ b/drivers/misc/lkdtm/cfi.c @@ -20,6 +20,13 @@ static noinline int lkdtm_increment_int(int *counter) return *counter; } + +/* Don't allow the compiler to inline the calls. */ +static noinline void lkdtm_indirect_call(void (*func)(int *)) +{ + func(&called_count); +} + /* * This tries to call an indirect function with a mismatched prototype. */ @@ -29,15 +36,11 @@ static void lkdtm_CFI_FORWARD_PROTO(void) * Matches lkdtm_increment_void()'s prototype, but not * lkdtm_increment_int()'s prototype. */ - void (*func)(int *); - pr_info("Calling matched prototype ...\n"); - func = lkdtm_increment_void; - func(&called_count); + lkdtm_indirect_call(lkdtm_increment_void); pr_info("Calling mismatched prototype ...\n"); - func = (void *)lkdtm_increment_int; - func(&called_count); + lkdtm_indirect_call((void *)lkdtm_increment_int); pr_err("FAIL: survived mismatched prototype function call!\n"); pr_expected_config(CONFIG_CFI_CLANG); diff --git a/drivers/misc/lkdtm/fortify.c b/drivers/misc/lkdtm/fortify.c index 080293fa3c52..015927665678 100644 --- a/drivers/misc/lkdtm/fortify.c +++ b/drivers/misc/lkdtm/fortify.c @@ -10,28 +10,31 @@ static volatile int fortify_scratch_space; -static void lkdtm_FORTIFIED_OBJECT(void) +static void lkdtm_FORTIFY_STR_OBJECT(void) { struct target { char a[10]; - } target[2] = {}; + int foo; + } target[3] = {}; /* * Using volatile prevents the compiler from determining the value of * 'size' at compile time. Without that, we would get a compile error * rather than a runtime error. */ - volatile int size = 11; + volatile int size = 20; + + pr_info("trying to strcmp() past the end of a struct\n"); - pr_info("trying to read past the end of a struct\n"); + strncpy(target[0].a, target[1].a, size); /* Store result to global to prevent the code from being eliminated */ - fortify_scratch_space = memcmp(&target[0], &target[1], size); + fortify_scratch_space = target[0].a[3]; - pr_err("FAIL: fortify did not block an object overread!\n"); + pr_err("FAIL: fortify did not block a strncpy() object write overflow!\n"); pr_expected_config(CONFIG_FORTIFY_SOURCE); } -static void lkdtm_FORTIFIED_SUBOBJECT(void) +static void lkdtm_FORTIFY_STR_MEMBER(void) { struct target { char a[10]; @@ -44,7 +47,7 @@ static void lkdtm_FORTIFIED_SUBOBJECT(void) strscpy(src, "over ten bytes", size); size = strlen(src) + 1; - pr_info("trying to strncpy past the end of a member of a struct\n"); + pr_info("trying to strncpy() past the end of a struct member...\n"); /* * strncpy(target.a, src, 20); will hit a compile error because the @@ -56,7 +59,72 @@ static void lkdtm_FORTIFIED_SUBOBJECT(void) /* Store result to global to prevent the code from being eliminated */ fortify_scratch_space = target.a[3]; - pr_err("FAIL: fortify did not block an sub-object overrun!\n"); + pr_err("FAIL: fortify did not block a strncpy() struct member write overflow!\n"); + pr_expected_config(CONFIG_FORTIFY_SOURCE); + + kfree(src); +} + +static void lkdtm_FORTIFY_MEM_OBJECT(void) +{ + int before[10]; + struct target { + char a[10]; + int foo; + } target = {}; + int after[10]; + /* + * Using volatile prevents the compiler from determining the value of + * 'size' at compile time. Without that, we would get a compile error + * rather than a runtime error. + */ + volatile int size = 20; + + memset(before, 0, sizeof(before)); + memset(after, 0, sizeof(after)); + fortify_scratch_space = before[5]; + fortify_scratch_space = after[5]; + + pr_info("trying to memcpy() past the end of a struct\n"); + + pr_info("0: %zu\n", __builtin_object_size(&target, 0)); + pr_info("1: %zu\n", __builtin_object_size(&target, 1)); + pr_info("s: %d\n", size); + memcpy(&target, &before, size); + + /* Store result to global to prevent the code from being eliminated */ + fortify_scratch_space = target.a[3]; + + pr_err("FAIL: fortify did not block a memcpy() object write overflow!\n"); + pr_expected_config(CONFIG_FORTIFY_SOURCE); +} + +static void lkdtm_FORTIFY_MEM_MEMBER(void) +{ + struct target { + char a[10]; + char b[10]; + } target; + volatile int size = 20; + char *src; + + src = kmalloc(size, GFP_KERNEL); + strscpy(src, "over ten bytes", size); + size = strlen(src) + 1; + + pr_info("trying to memcpy() past the end of a struct member...\n"); + + /* + * strncpy(target.a, src, 20); will hit a compile error because the + * compiler knows at build time that target.a < 20 bytes. Use a + * volatile to force a runtime error. + */ + memcpy(target.a, src, size); + + /* Store result to global to prevent the code from being eliminated */ + fortify_scratch_space = target.a[3]; + + pr_err("FAIL: fortify did not block a memcpy() struct member write overflow!\n"); pr_expected_config(CONFIG_FORTIFY_SOURCE); kfree(src); @@ -67,7 +135,7 @@ static void lkdtm_FORTIFIED_SUBOBJECT(void) * strscpy and generate a panic because there is a write overflow (i.e. src * length is greater than dst length). */ -static void lkdtm_FORTIFIED_STRSCPY(void) +static void lkdtm_FORTIFY_STRSCPY(void) { char *src; char dst[5]; @@ -136,9 +204,11 @@ static void lkdtm_FORTIFIED_STRSCPY(void) } static struct crashtype crashtypes[] = { - CRASHTYPE(FORTIFIED_OBJECT), - CRASHTYPE(FORTIFIED_SUBOBJECT), - CRASHTYPE(FORTIFIED_STRSCPY), + CRASHTYPE(FORTIFY_STR_OBJECT), + CRASHTYPE(FORTIFY_STR_MEMBER), + CRASHTYPE(FORTIFY_MEM_OBJECT), + CRASHTYPE(FORTIFY_MEM_MEMBER), + CRASHTYPE(FORTIFY_STRSCPY), }; struct crashtype_category fortify_crashtypes = { diff --git a/drivers/misc/lkdtm/usercopy.c b/drivers/misc/lkdtm/usercopy.c index 6215ec995cd3..67db57249a34 100644 --- a/drivers/misc/lkdtm/usercopy.c +++ b/drivers/misc/lkdtm/usercopy.c @@ -330,7 +330,7 @@ static void lkdtm_USERCOPY_KERNEL(void) pr_info("attempting bad copy_to_user from kernel text: %px\n", vm_mmap); - if (copy_to_user((void __user *)user_addr, function_nocfi(vm_mmap), + if (copy_to_user((void __user *)user_addr, vm_mmap, unconst + PAGE_SIZE)) { pr_warn("copy_to_user failed, but lacked Oops\n"); goto free_user; diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 94c889802566..15d4a38b1351 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -500,6 +500,8 @@ config NET_SB1000 source "drivers/net/phy/Kconfig" +source "drivers/net/pse-pd/Kconfig" + source "drivers/net/can/Kconfig" source "drivers/net/mctp/Kconfig" diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 3f1192d3c52d..6ce076462dbf 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -23,6 +23,7 @@ obj-$(CONFIG_NET) += loopback.o obj-$(CONFIG_NETDEV_LEGACY_INIT) += Space.o obj-$(CONFIG_NETCONSOLE) += netconsole.o obj-y += phy/ +obj-y += pse-pd/ obj-y += mdio/ obj-y += pcs/ obj-$(CONFIG_RIONET) += rionet.o diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c index 80bb8424e9a4..d30d11872719 100644 --- a/drivers/net/ethernet/atheros/alx/main.c +++ b/drivers/net/ethernet/atheros/alx/main.c @@ -1912,11 +1912,14 @@ static int alx_suspend(struct device *dev) if (!netif_running(alx->dev)) return 0; + + rtnl_lock(); netif_device_detach(alx->dev); mutex_lock(&alx->mtx); __alx_stop(alx); mutex_unlock(&alx->mtx); + rtnl_unlock(); return 0; } @@ -1927,6 +1930,7 @@ static int alx_resume(struct device *dev) struct alx_hw *hw = &alx->hw; int err; + rtnl_lock(); mutex_lock(&alx->mtx); alx_reset_phy(hw); @@ -1943,6 +1947,7 @@ static int alx_resume(struct device *dev) unlock: mutex_unlock(&alx->mtx); + rtnl_unlock(); return err; } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index b6eb3ce976f0..16c490692f42 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -787,6 +787,7 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n", pad, len, fp->rx_buf_size); bnx2x_panic(); + bnx2x_frag_free(fp, new_data); return; } #endif diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c index 369bfd376d6f..edde0b8fa49c 100644 --- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c +++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c @@ -1396,8 +1396,8 @@ static int octeon_mgmt_probe(struct platform_device *pdev) platform_set_drvdata(pdev, netdev); p = netdev_priv(netdev); - netif_napi_add(netdev, &p->napi, octeon_mgmt_napi_poll, - OCTEON_MGMT_NAPI_WEIGHT); + netif_napi_add_weight(netdev, &p->napi, octeon_mgmt_napi_poll, + OCTEON_MGMT_NAPI_WEIGHT); p->netdev = netdev; p->dev = &pdev->dev; diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig index b7bf45cec29d..ce866ae3df03 100644 --- a/drivers/net/ethernet/freescale/Kconfig +++ b/drivers/net/ethernet/freescale/Kconfig @@ -28,6 +28,7 @@ config FEC depends on PTP_1588_CLOCK_OPTIONAL select CRC32 select PHYLIB + select PAGE_POOL imply NET_SELFTESTS help Say Y here if you want to use the built-in 10/100 Fast ethernet diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index b0100fe3c9e4..33f84a30e167 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@ -17,6 +17,7 @@ #include <linux/clocksource.h> #include <linux/net_tstamp.h> #include <linux/pm_qos.h> +#include <linux/bpf.h> #include <linux/ptp_clock_kernel.h> #include <linux/timecounter.h> #include <dt-bindings/firmware/imx/rsrc.h> @@ -346,8 +347,11 @@ struct bufdesc_ex { * the skbuffer directly. */ +#define FEC_ENET_XDP_HEADROOM (XDP_PACKET_HEADROOM) + #define FEC_ENET_RX_PAGES 256 -#define FEC_ENET_RX_FRSIZE 2048 +#define FEC_ENET_RX_FRSIZE (PAGE_SIZE - FEC_ENET_XDP_HEADROOM \ + - SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) #define FEC_ENET_RX_FRPPG (PAGE_SIZE / FEC_ENET_RX_FRSIZE) #define RX_RING_SIZE (FEC_ENET_RX_FRPPG * FEC_ENET_RX_PAGES) #define FEC_ENET_TX_FRSIZE 2048 @@ -517,6 +521,12 @@ struct bufdesc_prop { unsigned char dsize_log2; }; +struct fec_enet_priv_txrx_info { + int offset; + struct page *page; + struct sk_buff *skb; +}; + struct fec_enet_priv_tx_q { struct bufdesc_prop bd; unsigned char *tx_bounce[TX_RING_SIZE]; @@ -532,7 +542,14 @@ struct fec_enet_priv_tx_q { struct fec_enet_priv_rx_q { struct bufdesc_prop bd; - struct sk_buff *rx_skbuff[RX_RING_SIZE]; + struct fec_enet_priv_txrx_info rx_skb_info[RX_RING_SIZE]; + + /* page_pool */ + struct page_pool *page_pool; + struct xdp_rxq_info xdp_rxq; + + /* rx queue number, in the range 0-7 */ + u8 id; }; struct fec_stop_mode_gpr { diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index ff1950e96c6c..98d5cd313fdd 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -66,6 +66,8 @@ #include <linux/mfd/syscon.h> #include <linux/regmap.h> #include <soc/imx/cpuidle.h> +#include <linux/filter.h> +#include <linux/bpf.h> #include <asm/cacheflush.h> @@ -422,6 +424,48 @@ fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev) return 0; } +static int +fec_enet_create_page_pool(struct fec_enet_private *fep, + struct fec_enet_priv_rx_q *rxq, int size) +{ + struct page_pool_params pp_params = { + .order = 0, + .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV, + .pool_size = size, + .nid = dev_to_node(&fep->pdev->dev), + .dev = &fep->pdev->dev, + .dma_dir = DMA_FROM_DEVICE, + .offset = FEC_ENET_XDP_HEADROOM, + .max_len = FEC_ENET_RX_FRSIZE, + }; + int err; + + rxq->page_pool = page_pool_create(&pp_params); + if (IS_ERR(rxq->page_pool)) { + err = PTR_ERR(rxq->page_pool); + rxq->page_pool = NULL; + return err; + } + + err = xdp_rxq_info_reg(&rxq->xdp_rxq, fep->netdev, rxq->id, 0); + if (err < 0) + goto err_free_pp; + + err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL, + rxq->page_pool); + if (err) + goto err_unregister_rxq; + + return 0; + +err_unregister_rxq: + xdp_rxq_info_unreg(&rxq->xdp_rxq); +err_free_pp: + page_pool_destroy(rxq->page_pool); + rxq->page_pool = NULL; + return err; +} + static struct bufdesc * fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb, @@ -1450,7 +1494,7 @@ static void fec_enet_tx(struct net_device *ndev) fec_enet_tx_queue(ndev, i); } -static int +static int __maybe_unused fec_enet_new_rxbdp(struct net_device *ndev, struct bufdesc *bdp, struct sk_buff *skb) { struct fec_enet_private *fep = netdev_priv(ndev); @@ -1470,8 +1514,9 @@ fec_enet_new_rxbdp(struct net_device *ndev, struct bufdesc *bdp, struct sk_buff return 0; } -static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb, - struct bufdesc *bdp, u32 length, bool swap) +static bool __maybe_unused +fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb, + struct bufdesc *bdp, u32 length, bool swap) { struct fec_enet_private *fep = netdev_priv(ndev); struct sk_buff *new_skb; @@ -1496,6 +1541,21 @@ static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb, return true; } +static void fec_enet_update_cbd(struct fec_enet_priv_rx_q *rxq, + struct bufdesc *bdp, int index) +{ + struct page *new_page; + dma_addr_t phys_addr; + + new_page = page_pool_dev_alloc_pages(rxq->page_pool); + WARN_ON(!new_page); + rxq->rx_skb_info[index].page = new_page; + + rxq->rx_skb_info[index].offset = FEC_ENET_XDP_HEADROOM; + phys_addr = page_pool_get_dma_addr(new_page) + FEC_ENET_XDP_HEADROOM; + bdp->cbd_bufaddr = cpu_to_fec32(phys_addr); +} + /* During a receive, the bd_rx.cur points to the current incoming buffer. * When we update through the ring, if the next incoming buffer has * not been given to the system, we just set the empty indicator, @@ -1508,7 +1568,6 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) struct fec_enet_priv_rx_q *rxq; struct bufdesc *bdp; unsigned short status; - struct sk_buff *skb_new = NULL; struct sk_buff *skb; ushort pkt_len; __u8 *data; @@ -1517,8 +1576,8 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) bool vlan_packet_rcvd = false; u16 vlan_tag; int index = 0; - bool is_copybreak; bool need_swap = fep->quirks & FEC_QUIRK_SWAP_FRAME; + struct page *page; #ifdef CONFIG_M532x flush_cache_all(); @@ -1570,31 +1629,25 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) ndev->stats.rx_bytes += pkt_len; index = fec_enet_get_bd_index(bdp, &rxq->bd); - skb = rxq->rx_skbuff[index]; + page = rxq->rx_skb_info[index].page; + dma_sync_single_for_cpu(&fep->pdev->dev, + fec32_to_cpu(bdp->cbd_bufaddr), + pkt_len, + DMA_FROM_DEVICE); + prefetch(page_address(page)); + fec_enet_update_cbd(rxq, bdp, index); /* The packet length includes FCS, but we don't want to * include that when passing upstream as it messes up * bridging applications. */ - is_copybreak = fec_enet_copybreak(ndev, &skb, bdp, pkt_len - 4, - need_swap); - if (!is_copybreak) { - skb_new = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE); - if (unlikely(!skb_new)) { - ndev->stats.rx_dropped++; - goto rx_processing_done; - } - dma_unmap_single(&fep->pdev->dev, - fec32_to_cpu(bdp->cbd_bufaddr), - FEC_ENET_RX_FRSIZE - fep->rx_align, - DMA_FROM_DEVICE); - } - - prefetch(skb->data - NET_IP_ALIGN); + skb = build_skb(page_address(page), PAGE_SIZE); + skb_reserve(skb, FEC_ENET_XDP_HEADROOM); skb_put(skb, pkt_len - 4); + skb_mark_for_recycle(skb); data = skb->data; - if (!is_copybreak && need_swap) + if (need_swap) swap_buffer(data, pkt_len); #if !defined(CONFIG_M5272) @@ -1649,16 +1702,6 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) skb_record_rx_queue(skb, queue_id); napi_gro_receive(&fep->napi, skb); - if (is_copybreak) { - dma_sync_single_for_device(&fep->pdev->dev, - fec32_to_cpu(bdp->cbd_bufaddr), - FEC_ENET_RX_FRSIZE - fep->rx_align, - DMA_FROM_DEVICE); - } else { - rxq->rx_skbuff[index] = skb_new; - fec_enet_new_rxbdp(ndev, bdp, skb_new); - } - rx_processing_done: /* Clear the status flags for this buffer */ status &= ~BD_ENET_RX_STATS; @@ -3002,26 +3045,19 @@ static void fec_enet_free_buffers(struct net_device *ndev) struct fec_enet_private *fep = netdev_priv(ndev); unsigned int i; struct sk_buff *skb; - struct bufdesc *bdp; struct fec_enet_priv_tx_q *txq; struct fec_enet_priv_rx_q *rxq; unsigned int q; for (q = 0; q < fep->num_rx_queues; q++) { rxq = fep->rx_queue[q]; - bdp = rxq->bd.base; - for (i = 0; i < rxq->bd.ring_size; i++) { - skb = rxq->rx_skbuff[i]; - rxq->rx_skbuff[i] = NULL; - if (skb) { - dma_unmap_single(&fep->pdev->dev, - fec32_to_cpu(bdp->cbd_bufaddr), - FEC_ENET_RX_FRSIZE - fep->rx_align, - DMA_FROM_DEVICE); - dev_kfree_skb(skb); - } - bdp = fec_enet_get_nextdesc(bdp, &rxq->bd); - } + for (i = 0; i < rxq->bd.ring_size; i++) + page_pool_release_page(rxq->page_pool, rxq->rx_skb_info[i].page); + + if (xdp_rxq_info_is_reg(&rxq->xdp_rxq)) + xdp_rxq_info_unreg(&rxq->xdp_rxq); + page_pool_destroy(rxq->page_pool); + rxq->page_pool = NULL; } for (q = 0; q < fep->num_tx_queues; q++) { @@ -3111,24 +3147,31 @@ static int fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue) { struct fec_enet_private *fep = netdev_priv(ndev); - unsigned int i; - struct sk_buff *skb; - struct bufdesc *bdp; struct fec_enet_priv_rx_q *rxq; + dma_addr_t phys_addr; + struct bufdesc *bdp; + struct page *page; + int i, err; rxq = fep->rx_queue[queue]; bdp = rxq->bd.base; + + err = fec_enet_create_page_pool(fep, rxq, rxq->bd.ring_size); + if (err < 0) { + netdev_err(ndev, "%s failed queue %d (%d)\n", __func__, queue, err); + return err; + } + for (i = 0; i < rxq->bd.ring_size; i++) { - skb = __netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE, GFP_KERNEL); - if (!skb) + page = page_pool_dev_alloc_pages(rxq->page_pool); + if (!page) goto err_alloc; - if (fec_enet_new_rxbdp(ndev, bdp, skb)) { - dev_kfree_skb(skb); - goto err_alloc; - } + phys_addr = page_pool_get_dma_addr(page) + FEC_ENET_XDP_HEADROOM; + bdp->cbd_bufaddr = cpu_to_fec32(phys_addr); - rxq->rx_skbuff[i] = skb; + rxq->rx_skb_info[i].page = page; + rxq->rx_skb_info[i].offset = FEC_ENET_XDP_HEADROOM; bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY); if (fep->bufdesc_ex) { diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h index ad73a488fc5f..11e603686a27 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h @@ -1530,6 +1530,7 @@ u32 mvpp2_read(struct mvpp2 *priv, u32 offset); void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name); void mvpp2_dbgfs_cleanup(struct mvpp2 *priv); +void mvpp2_dbgfs_exit(void); void mvpp23_rx_fifo_fc_en(struct mvpp2 *priv, int port, bool en); diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c index 4a3baa7e0142..75e83ea2a926 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c @@ -691,6 +691,13 @@ static int mvpp2_dbgfs_port_init(struct dentry *parent, return 0; } +static struct dentry *mvpp2_root; + +void mvpp2_dbgfs_exit(void) +{ + debugfs_remove(mvpp2_root); +} + void mvpp2_dbgfs_cleanup(struct mvpp2 *priv) { debugfs_remove_recursive(priv->dbgfs_dir); @@ -700,10 +707,9 @@ void mvpp2_dbgfs_cleanup(struct mvpp2 *priv) void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name) { - struct dentry *mvpp2_dir, *mvpp2_root; + struct dentry *mvpp2_dir; int ret, i; - mvpp2_root = debugfs_lookup(MVPP2_DRIVER_NAME, NULL); if (!mvpp2_root) mvpp2_root = debugfs_create_dir(MVPP2_DRIVER_NAME, NULL); diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index daa890d6993e..eb0fb8128096 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -7704,7 +7704,18 @@ static struct platform_driver mvpp2_driver = { }, }; -module_platform_driver(mvpp2_driver); +static int __init mvpp2_driver_init(void) +{ + return platform_driver_register(&mvpp2_driver); +} +module_init(mvpp2_driver_init); + +static void __exit mvpp2_driver_exit(void) +{ + platform_driver_unregister(&mvpp2_driver); + mvpp2_dbgfs_exit(); +} +module_exit(mvpp2_driver_exit); MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com"); MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>"); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/Makefile b/drivers/net/ethernet/marvell/octeontx2/af/Makefile index 40203560b291..3cf4c8285c90 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/Makefile +++ b/drivers/net/ethernet/marvell/octeontx2/af/Makefile @@ -11,4 +11,4 @@ rvu_mbox-y := mbox.o rvu_trace.o rvu_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o \ rvu_reg.o rvu_npc.o rvu_debugfs.o ptp.o rvu_npc_fs.o \ rvu_cpt.o rvu_devlink.o rpm.o rvu_cn10k.o rvu_switch.o \ - rvu_sdp.o rvu_npc_hash.o + rvu_sdp.o rvu_npc_hash.o mcs.o mcs_rvu_if.o mcs_cnf10kb.o diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h index e26c3b0c4dcb..8d5d5a0f68c4 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h @@ -293,20 +293,74 @@ M(NIX_BANDPROF_ALLOC, 0x801d, nix_bandprof_alloc, nix_bandprof_alloc_req, \ M(NIX_BANDPROF_FREE, 0x801e, nix_bandprof_free, nix_bandprof_free_req, \ msg_rsp) \ M(NIX_BANDPROF_GET_HWINFO, 0x801f, nix_bandprof_get_hwinfo, msg_req, \ - nix_bandprof_get_hwinfo_rsp) - -/* Messages initiated by AF (range 0xC00 - 0xDFF) */ + nix_bandprof_get_hwinfo_rsp) \ +/* MCS mbox IDs (range 0xA000 - 0xBFFF) */ \ +M(MCS_ALLOC_RESOURCES, 0xa000, mcs_alloc_resources, mcs_alloc_rsrc_req, \ + mcs_alloc_rsrc_rsp) \ +M(MCS_FREE_RESOURCES, 0xa001, mcs_free_resources, mcs_free_rsrc_req, msg_rsp) \ +M(MCS_FLOWID_ENTRY_WRITE, 0xa002, mcs_flowid_entry_write, mcs_flowid_entry_write_req, \ + msg_rsp) \ +M(MCS_SECY_PLCY_WRITE, 0xa003, mcs_secy_plcy_write, mcs_secy_plcy_write_req, \ + msg_rsp) \ +M(MCS_RX_SC_CAM_WRITE, 0xa004, mcs_rx_sc_cam_write, mcs_rx_sc_cam_write_req, \ + msg_rsp) \ +M(MCS_SA_PLCY_WRITE, 0xa005, mcs_sa_plcy_write, mcs_sa_plcy_write_req, \ + msg_rsp) \ +M(MCS_TX_SC_SA_MAP_WRITE, 0xa006, mcs_tx_sc_sa_map_write, mcs_tx_sc_sa_map, \ + msg_rsp) \ +M(MCS_RX_SC_SA_MAP_WRITE, 0xa007, mcs_rx_sc_sa_map_write, mcs_rx_sc_sa_map, \ + msg_rsp) \ +M(MCS_FLOWID_ENA_ENTRY, 0xa008, mcs_flowid_ena_entry, mcs_flowid_ena_dis_entry, \ + msg_rsp) \ +M(MCS_PN_TABLE_WRITE, 0xa009, mcs_pn_table_write, mcs_pn_table_write_req, \ + msg_rsp) \ +M(MCS_SET_ACTIVE_LMAC, 0xa00a, mcs_set_active_lmac, mcs_set_active_lmac, \ + msg_rsp) \ +M(MCS_GET_HW_INFO, 0xa00b, mcs_get_hw_info, msg_req, mcs_hw_info) \ +M(MCS_GET_FLOWID_STATS, 0xa00c, mcs_get_flowid_stats, mcs_stats_req, \ + mcs_flowid_stats) \ +M(MCS_GET_SECY_STATS, 0xa00d, mcs_get_secy_stats, mcs_stats_req, \ + mcs_secy_stats) \ +M(MCS_GET_SC_STATS, 0xa00e, mcs_get_sc_stats, mcs_stats_req, mcs_sc_stats) \ +M(MCS_GET_SA_STATS, 0xa00f, mcs_get_sa_stats, mcs_stats_req, mcs_sa_stats) \ +M(MCS_GET_PORT_STATS, 0xa010, mcs_get_port_stats, mcs_stats_req, \ + mcs_port_stats) \ +M(MCS_CLEAR_STATS, 0xa011, mcs_clear_stats, mcs_clear_stats, msg_rsp) \ +M(MCS_INTR_CFG, 0xa012, mcs_intr_cfg, mcs_intr_cfg, msg_rsp) \ +M(MCS_SET_LMAC_MODE, 0xa013, mcs_set_lmac_mode, mcs_set_lmac_mode, msg_rsp) \ +M(MCS_SET_PN_THRESHOLD, 0xa014, mcs_set_pn_threshold, mcs_set_pn_threshold, \ + msg_rsp) \ +M(MCS_ALLOC_CTRL_PKT_RULE, 0xa015, mcs_alloc_ctrl_pkt_rule, \ + mcs_alloc_ctrl_pkt_rule_req, \ + mcs_alloc_ctrl_pkt_rule_rsp) \ +M(MCS_FREE_CTRL_PKT_RULE, 0xa016, mcs_free_ctrl_pkt_rule, \ + mcs_free_ctrl_pkt_rule_req, msg_rsp) \ +M(MCS_CTRL_PKT_RULE_WRITE, 0xa017, mcs_ctrl_pkt_rule_write, \ + mcs_ctrl_pkt_rule_write_req, msg_rsp) \ +M(MCS_PORT_RESET, 0xa018, mcs_port_reset, mcs_port_reset_req, msg_rsp) \ +M(MCS_PORT_CFG_SET, 0xa019, mcs_port_cfg_set, mcs_port_cfg_set_req, msg_rsp)\ +M(MCS_PORT_CFG_GET, 0xa020, mcs_port_cfg_get, mcs_port_cfg_get_req, \ + mcs_port_cfg_get_rsp) \ +M(MCS_CUSTOM_TAG_CFG_GET, 0xa021, mcs_custom_tag_cfg_get, \ + mcs_custom_tag_cfg_get_req, \ + mcs_custom_tag_cfg_get_rsp) + +/* Messages initiated by AF (range 0xC00 - 0xEFF) */ #define MBOX_UP_CGX_MESSAGES \ M(CGX_LINK_EVENT, 0xC00, cgx_link_event, cgx_link_info_msg, msg_rsp) #define MBOX_UP_CPT_MESSAGES \ M(CPT_INST_LMTST, 0xD00, cpt_inst_lmtst, cpt_inst_lmtst_req, msg_rsp) +#define MBOX_UP_MCS_MESSAGES \ +M(MCS_INTR_NOTIFY, 0xE00, mcs_intr_notify, mcs_intr_info, msg_rsp) + enum { #define M(_name, _id, _1, _2, _3) MBOX_MSG_ ## _name = _id, MBOX_MESSAGES MBOX_UP_CGX_MESSAGES MBOX_UP_CPT_MESSAGES +MBOX_UP_MCS_MESSAGES #undef M }; @@ -1657,4 +1711,415 @@ enum cgx_af_status { LMAC_AF_ERR_EXACT_MATCH_TBL_LOOK_UP_FAILED = -1110, }; +enum mcs_direction { + MCS_RX, + MCS_TX, +}; + +enum mcs_rsrc_type { + MCS_RSRC_TYPE_FLOWID, + MCS_RSRC_TYPE_SECY, + MCS_RSRC_TYPE_SC, + MCS_RSRC_TYPE_SA, +}; + +struct mcs_alloc_rsrc_req { + struct mbox_msghdr hdr; + u8 rsrc_type; + u8 rsrc_cnt; /* Resources count */ + u8 mcs_id; /* MCS block ID */ + u8 dir; /* Macsec ingress or egress side */ + u8 all; /* Allocate all resource type one each */ + u64 rsvd; +}; + +struct mcs_alloc_rsrc_rsp { + struct mbox_msghdr hdr; + u8 flow_ids[128]; /* Index of reserved entries */ + u8 secy_ids[128]; + u8 sc_ids[128]; + u8 sa_ids[256]; + u8 rsrc_type; + u8 rsrc_cnt; /* No of entries reserved */ + u8 mcs_id; + u8 dir; + u8 all; + u8 rsvd[256]; /* reserved fields for future expansion */ +}; + +struct mcs_free_rsrc_req { + struct mbox_msghdr hdr; + u8 rsrc_id; /* Index of the entry to be freed */ + u8 rsrc_type; + u8 mcs_id; + u8 dir; + u8 all; /* Free all the cam resources */ + u64 rsvd; +}; + +struct mcs_flowid_entry_write_req { + struct mbox_msghdr hdr; + u64 data[4]; + u64 mask[4]; + u64 sci; /* CNF10K-B for tx_secy_mem_map */ + u8 flow_id; + u8 secy_id; /* secyid for which flowid is mapped */ + u8 sc_id; /* Valid if dir = MCS_TX, SC_CAM id mapped to flowid */ + u8 ena; /* Enable tcam entry */ + u8 ctrl_pkt; + u8 mcs_id; + u8 dir; + u64 rsvd; +}; + +struct mcs_secy_plcy_write_req { + struct mbox_msghdr hdr; + u64 plcy; + u8 secy_id; + u8 mcs_id; + u8 dir; + u64 rsvd; +}; + +/* RX SC_CAM mapping */ +struct mcs_rx_sc_cam_write_req { + struct mbox_msghdr hdr; + u64 sci; /* SCI */ + u64 secy_id; /* secy index mapped to SC */ + u8 sc_id; /* SC CAM entry index */ + u8 mcs_id; + u64 rsvd; +}; + +struct mcs_sa_plcy_write_req { + struct mbox_msghdr hdr; + u64 plcy[2][9]; /* Support 2 SA policy */ + u8 sa_index[2]; + u8 sa_cnt; + u8 mcs_id; + u8 dir; + u64 rsvd; +}; + +struct mcs_tx_sc_sa_map { + struct mbox_msghdr hdr; + u8 sa_index0; + u8 sa_index1; + u8 rekey_ena; + u8 sa_index0_vld; + u8 sa_index1_vld; + u8 tx_sa_active; + u64 sectag_sci; + u8 sc_id; /* used as index for SA_MEM_MAP */ + u8 mcs_id; + u64 rsvd; +}; + +struct mcs_rx_sc_sa_map { + struct mbox_msghdr hdr; + u8 sa_index; + u8 sa_in_use; + u8 sc_id; + u8 an; /* value range 0-3, sc_id + an used as index SA_MEM_MAP */ + u8 mcs_id; + u64 rsvd; +}; + +struct mcs_flowid_ena_dis_entry { + struct mbox_msghdr hdr; + u8 flow_id; + u8 ena; + u8 mcs_id; + u8 dir; + u64 rsvd; +}; + +struct mcs_pn_table_write_req { + struct mbox_msghdr hdr; + u64 next_pn; + u8 pn_id; + u8 mcs_id; + u8 dir; + u64 rsvd; +}; + +struct mcs_hw_info { + struct mbox_msghdr hdr; + u8 num_mcs_blks; /* Number of MCS blocks */ + u8 tcam_entries; /* RX/TX Tcam entries per mcs block */ + u8 secy_entries; /* RX/TX SECY entries per mcs block */ + u8 sc_entries; /* RX/TX SC CAM entries per mcs block */ + u8 sa_entries; /* PN table entries = SA entries */ + u64 rsvd[16]; +}; + +struct mcs_set_active_lmac { + struct mbox_msghdr hdr; + u32 lmac_bmap; /* bitmap of active lmac per mcs block */ + u8 mcs_id; + u16 chan_base; /* MCS channel base */ + u64 rsvd; +}; + +struct mcs_set_lmac_mode { + struct mbox_msghdr hdr; + u8 mode; /* 1:Bypass 0:Operational */ + u8 lmac_id; + u8 mcs_id; + u64 rsvd; +}; + +struct mcs_port_reset_req { + struct mbox_msghdr hdr; + u8 reset; + u8 mcs_id; + u8 port_id; + u64 rsvd; +}; + +struct mcs_port_cfg_set_req { + struct mbox_msghdr hdr; + u8 cstm_tag_rel_mode_sel; + u8 custom_hdr_enb; + u8 fifo_skid; + u8 port_mode; + u8 port_id; + u8 mcs_id; + u64 rsvd; +}; + +struct mcs_port_cfg_get_req { + struct mbox_msghdr hdr; + u8 port_id; + u8 mcs_id; + u64 rsvd; +}; + +struct mcs_port_cfg_get_rsp { + struct mbox_msghdr hdr; + u8 cstm_tag_rel_mode_sel; + u8 custom_hdr_enb; + u8 fifo_skid; + u8 port_mode; + u8 port_id; + u8 mcs_id; + u64 rsvd; +}; + +struct mcs_custom_tag_cfg_get_req { + struct mbox_msghdr hdr; + u8 mcs_id; + u8 dir; + u64 rsvd; +}; + +struct mcs_custom_tag_cfg_get_rsp { + struct mbox_msghdr hdr; + u16 cstm_etype[8]; + u8 cstm_indx[8]; + u8 cstm_etype_en; + u8 mcs_id; + u8 dir; + u64 rsvd; +}; + +/* MCS mailbox error codes + * Range 1201 - 1300. + */ +enum mcs_af_status { + MCS_AF_ERR_INVALID_MCSID = -1201, + MCS_AF_ERR_NOT_MAPPED = -1202, +}; + +struct mcs_set_pn_threshold { + struct mbox_msghdr hdr; + u64 threshold; + u8 xpn; /* '1' for setting xpn threshold */ + u8 mcs_id; + u8 dir; + u64 rsvd; +}; + +enum mcs_ctrl_pkt_rulew_type { + MCS_CTRL_PKT_RULE_TYPE_ETH, + MCS_CTRL_PKT_RULE_TYPE_DA, + MCS_CTRL_PKT_RULE_TYPE_RANGE, + MCS_CTRL_PKT_RULE_TYPE_COMBO, + MCS_CTRL_PKT_RULE_TYPE_MAC, +}; + +struct mcs_alloc_ctrl_pkt_rule_req { + struct mbox_msghdr hdr; + u8 rule_type; + u8 mcs_id; /* MCS block ID */ + u8 dir; /* Macsec ingress or egress side */ + u64 rsvd; +}; + +struct mcs_alloc_ctrl_pkt_rule_rsp { + struct mbox_msghdr hdr; + u8 rule_idx; + u8 rule_type; + u8 mcs_id; + u8 dir; + u64 rsvd; +}; + +struct mcs_free_ctrl_pkt_rule_req { + struct mbox_msghdr hdr; + u8 rule_idx; + u8 rule_type; + u8 mcs_id; + u8 dir; + u8 all; + u64 rsvd; +}; + +struct mcs_ctrl_pkt_rule_write_req { + struct mbox_msghdr hdr; + u64 data0; + u64 data1; + u64 data2; + u8 rule_idx; + u8 rule_type; + u8 mcs_id; + u8 dir; + u64 rsvd; +}; + +struct mcs_stats_req { + struct mbox_msghdr hdr; + u8 id; + u8 mcs_id; + u8 dir; + u64 rsvd; +}; + +struct mcs_flowid_stats { + struct mbox_msghdr hdr; + u64 tcam_hit_cnt; + u64 rsvd; +}; + +struct mcs_secy_stats { + struct mbox_msghdr hdr; + u64 ctl_pkt_bcast_cnt; + u64 ctl_pkt_mcast_cnt; + u64 ctl_pkt_ucast_cnt; + u64 ctl_octet_cnt; + u64 unctl_pkt_bcast_cnt; + u64 unctl_pkt_mcast_cnt; + u64 unctl_pkt_ucast_cnt; + u64 unctl_octet_cnt; + /* Valid only for RX */ + u64 octet_decrypted_cnt; + u64 octet_validated_cnt; + u64 pkt_port_disabled_cnt; + u64 pkt_badtag_cnt; + u64 pkt_nosa_cnt; + u64 pkt_nosaerror_cnt; + u64 pkt_tagged_ctl_cnt; + u64 pkt_untaged_cnt; + u64 pkt_ctl_cnt; /* CN10K-B */ + u64 pkt_notag_cnt; /* CNF10K-B */ + /* Valid only for TX */ + u64 octet_encrypted_cnt; + u64 octet_protected_cnt; + u64 pkt_noactivesa_cnt; + u64 pkt_toolong_cnt; + u64 pkt_untagged_cnt; + u64 rsvd[4]; +}; + +struct mcs_port_stats { + struct mbox_msghdr hdr; + u64 tcam_miss_cnt; + u64 parser_err_cnt; + u64 preempt_err_cnt; /* CNF10K-B */ + u64 sectag_insert_err_cnt; + u64 rsvd[4]; +}; + +/* Only for CN10K-B */ +struct mcs_sa_stats { + struct mbox_msghdr hdr; + /* RX */ + u64 pkt_invalid_cnt; + u64 pkt_nosaerror_cnt; + u64 pkt_notvalid_cnt; + u64 pkt_ok_cnt; + u64 pkt_nosa_cnt; + /* TX */ + u64 pkt_encrypt_cnt; + u64 pkt_protected_cnt; + u64 rsvd[4]; +}; + +struct mcs_sc_stats { + struct mbox_msghdr hdr; + /* RX */ + u64 hit_cnt; + u64 pkt_invalid_cnt; + u64 pkt_late_cnt; + u64 pkt_notvalid_cnt; + u64 pkt_unchecked_cnt; + u64 pkt_delay_cnt; /* CNF10K-B */ + u64 pkt_ok_cnt; /* CNF10K-B */ + u64 octet_decrypt_cnt; /* CN10K-B */ + u64 octet_validate_cnt; /* CN10K-B */ + /* TX */ + u64 pkt_encrypt_cnt; + u64 pkt_protected_cnt; + u64 octet_encrypt_cnt; /* CN10K-B */ + u64 octet_protected_cnt; /* CN10K-B */ + u64 rsvd[4]; +}; + +struct mcs_clear_stats { + struct mbox_msghdr hdr; +#define MCS_FLOWID_STATS 0 +#define MCS_SECY_STATS 1 +#define MCS_SC_STATS 2 +#define MCS_SA_STATS 3 +#define MCS_PORT_STATS 4 + u8 type; /* FLOWID, SECY, SC, SA, PORT */ + u8 id; /* type = PORT, If id = FF(invalid) port no is derived from pcifunc */ + u8 mcs_id; + u8 dir; + u8 all; /* All resources stats mapped to PF are cleared */ +}; + +struct mcs_intr_cfg { + struct mbox_msghdr hdr; +#define MCS_CPM_RX_SECTAG_V_EQ1_INT BIT_ULL(0) +#define MCS_CPM_RX_SECTAG_E_EQ0_C_EQ1_INT BIT_ULL(1) +#define MCS_CPM_RX_SECTAG_SL_GTE48_INT BIT_ULL(2) +#define MCS_CPM_RX_SECTAG_ES_EQ1_SC_EQ1_INT BIT_ULL(3) +#define MCS_CPM_RX_SECTAG_SC_EQ1_SCB_EQ1_INT BIT_ULL(4) +#define MCS_CPM_RX_PACKET_XPN_EQ0_INT BIT_ULL(5) +#define MCS_CPM_RX_PN_THRESH_REACHED_INT BIT_ULL(6) +#define MCS_CPM_TX_PACKET_XPN_EQ0_INT BIT_ULL(7) +#define MCS_CPM_TX_PN_THRESH_REACHED_INT BIT_ULL(8) +#define MCS_CPM_TX_SA_NOT_VALID_INT BIT_ULL(9) +#define MCS_BBE_RX_DFIFO_OVERFLOW_INT BIT_ULL(10) +#define MCS_BBE_RX_PLFIFO_OVERFLOW_INT BIT_ULL(11) +#define MCS_BBE_TX_DFIFO_OVERFLOW_INT BIT_ULL(12) +#define MCS_BBE_TX_PLFIFO_OVERFLOW_INT BIT_ULL(13) +#define MCS_PAB_RX_CHAN_OVERFLOW_INT BIT_ULL(14) +#define MCS_PAB_TX_CHAN_OVERFLOW_INT BIT_ULL(15) + u64 intr_mask; /* Interrupt enable mask */ + u8 mcs_id; + u8 lmac_id; + u64 rsvd; +}; + +struct mcs_intr_info { + struct mbox_msghdr hdr; + u64 intr_mask; + int sa_id; + u8 mcs_id; + u8 lmac_id; + u64 rsvd; +}; + #endif /* MBOX_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs.c b/drivers/net/ethernet/marvell/octeontx2/af/mcs.c new file mode 100644 index 000000000000..5ba618aed6ad --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs.c @@ -0,0 +1,1601 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell MCS driver + * + * Copyright (C) 2022 Marvell. + */ + +#include <linux/bitfield.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/module.h> +#include <linux/pci.h> + +#include "mcs.h" +#include "mcs_reg.h" + +#define DRV_NAME "Marvell MCS Driver" + +#define PCI_CFG_REG_BAR_NUM 0 + +static const struct pci_device_id mcs_id_table[] = { + { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_MCS) }, + { 0, } /* end of table */ +}; + +static LIST_HEAD(mcs_list); + +void mcs_get_tx_secy_stats(struct mcs *mcs, struct mcs_secy_stats *stats, int id) +{ + u64 reg; + + reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLBCPKTSX(id); + stats->ctl_pkt_bcast_cnt = mcs_reg_read(mcs, reg); + + reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLMCPKTSX(id); + stats->ctl_pkt_mcast_cnt = mcs_reg_read(mcs, reg); + + reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLOCTETSX(id); + stats->ctl_octet_cnt = mcs_reg_read(mcs, reg); + + reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLUCPKTSX(id); + stats->ctl_pkt_ucast_cnt = mcs_reg_read(mcs, reg); + + reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLBCPKTSX(id); + stats->unctl_pkt_bcast_cnt = mcs_reg_read(mcs, reg); + + reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLMCPKTSX(id); + stats->unctl_pkt_mcast_cnt = mcs_reg_read(mcs, reg); + + reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLOCTETSX(id); + stats->unctl_octet_cnt = mcs_reg_read(mcs, reg); + + reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLUCPKTSX(id); + stats->unctl_pkt_ucast_cnt = mcs_reg_read(mcs, reg); + + reg = MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSECYENCRYPTEDX(id); + stats->octet_encrypted_cnt = mcs_reg_read(mcs, reg); + + reg = MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSECYPROTECTEDX(id); + stats->octet_protected_cnt = mcs_reg_read(mcs, reg); + + reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECYNOACTIVESAX(id); + stats->pkt_noactivesa_cnt = mcs_reg_read(mcs, reg); + + reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECYTOOLONGX(id); + stats->pkt_toolong_cnt = mcs_reg_read(mcs, reg); + + reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECYUNTAGGEDX(id); + stats->pkt_untagged_cnt = mcs_reg_read(mcs, reg); +} + +void mcs_get_rx_secy_stats(struct mcs *mcs, struct mcs_secy_stats *stats, int id) +{ + u64 reg; + + reg = MCSX_CSE_RX_MEM_SLAVE_IFINCTLBCPKTSX(id); + stats->ctl_pkt_bcast_cnt = mcs_reg_read(mcs, reg); + + reg = MCSX_CSE_RX_MEM_SLAVE_IFINCTLMCPKTSX(id); + stats->ctl_pkt_mcast_cnt = mcs_reg_read(mcs, reg); + + reg = MCSX_CSE_RX_MEM_SLAVE_IFINCTLOCTETSX(id); + stats->ctl_octet_cnt = mcs_reg_read(mcs, reg); + + reg = MCSX_CSE_RX_MEM_SLAVE_IFINCTLUCPKTSX(id); + stats->ctl_pkt_ucast_cnt = mcs_reg_read(mcs, reg); + + reg = MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLBCPKTSX(id); + stats->unctl_pkt_bcast_cnt = mcs_reg_read(mcs, reg); + + reg = MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLMCPKTSX(id); + stats->unctl_pkt_mcast_cnt = mcs_reg_read(mcs, reg); + + reg = MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLOCTETSX(id); + stats->unctl_octet_cnt = mcs_reg_read(mcs, reg); + + reg = MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLUCPKTSX(id); + stats->unctl_pkt_ucast_cnt = mcs_reg_read(mcs, reg); + + reg = MCSX_CSE_RX_MEM_SLAVE_INOCTETSSECYDECRYPTEDX(id); + stats->octet_decrypted_cnt = mcs_reg_read(mcs, reg); + + reg = MCSX_CSE_RX_MEM_SLAVE_INOCTETSSECYVALIDATEX(id); + stats->octet_validated_cnt = mcs_reg_read(mcs, reg); + + reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSCTRLPORTDISABLEDX(id); + stats->pkt_port_disabled_cnt = mcs_reg_read(mcs, reg); + + reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYBADTAGX(id); + stats->pkt_badtag_cnt = mcs_reg_read(mcs, reg); + + reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYNOSAX(id); + stats->pkt_nosa_cnt = mcs_reg_read(mcs, reg); + + reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYNOSAERRORX(id); + stats->pkt_nosaerror_cnt = mcs_reg_read(mcs, reg); + + reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYTAGGEDCTLX(id); + stats->pkt_tagged_ctl_cnt = mcs_reg_read(mcs, reg); + + reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYUNTAGGEDORNOTAGX(id); + stats->pkt_untaged_cnt = mcs_reg_read(mcs, reg); + + reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYCTLX(id); + stats->pkt_ctl_cnt = mcs_reg_read(mcs, reg); + + if (mcs->hw->mcs_blks > 1) { + reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYNOTAGX(id); + stats->pkt_notag_cnt = mcs_reg_read(mcs, reg); + } +} + +void mcs_get_flowid_stats(struct mcs *mcs, struct mcs_flowid_stats *stats, + int id, int dir) +{ + u64 reg; + + if (dir == MCS_RX) + reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSFLOWIDTCAMHITX(id); + else + reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSFLOWIDTCAMHITX(id); + + stats->tcam_hit_cnt = mcs_reg_read(mcs, reg); +} + +void mcs_get_port_stats(struct mcs *mcs, struct mcs_port_stats *stats, + int id, int dir) +{ + u64 reg; + + if (dir == MCS_RX) { + reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSFLOWIDTCAMMISSX(id); + stats->tcam_miss_cnt = mcs_reg_read(mcs, reg); + + reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSPARSEERRX(id); + stats->parser_err_cnt = mcs_reg_read(mcs, reg); + if (mcs->hw->mcs_blks > 1) { + reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSEARLYPREEMPTERRX(id); + stats->preempt_err_cnt = mcs_reg_read(mcs, reg); + } + } else { + reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSFLOWIDTCAMMISSX(id); + stats->tcam_miss_cnt = mcs_reg_read(mcs, reg); + + reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSPARSEERRX(id); + stats->parser_err_cnt = mcs_reg_read(mcs, reg); + + reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECTAGINSERTIONERRX(id); + stats->sectag_insert_err_cnt = mcs_reg_read(mcs, reg); + } +} + +void mcs_get_sa_stats(struct mcs *mcs, struct mcs_sa_stats *stats, int id, int dir) +{ + u64 reg; + + if (dir == MCS_RX) { + reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSAINVALIDX(id); + stats->pkt_invalid_cnt = mcs_reg_read(mcs, reg); + + reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSANOTUSINGSAERRORX(id); + stats->pkt_nosaerror_cnt = mcs_reg_read(mcs, reg); + + reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSANOTVALIDX(id); + stats->pkt_notvalid_cnt = mcs_reg_read(mcs, reg); + + reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSAOKX(id); + stats->pkt_ok_cnt = mcs_reg_read(mcs, reg); + + reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSAUNUSEDSAX(id); + stats->pkt_nosa_cnt = mcs_reg_read(mcs, reg); + } else { + reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSAENCRYPTEDX(id); + stats->pkt_encrypt_cnt = mcs_reg_read(mcs, reg); + + reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSAPROTECTEDX(id); + stats->pkt_protected_cnt = mcs_reg_read(mcs, reg); + } +} + +void mcs_get_sc_stats(struct mcs *mcs, struct mcs_sc_stats *stats, + int id, int dir) +{ + u64 reg; + + if (dir == MCS_RX) { + reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCCAMHITX(id); + stats->hit_cnt = mcs_reg_read(mcs, reg); + + reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCINVALIDX(id); + stats->pkt_invalid_cnt = mcs_reg_read(mcs, reg); + + reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCLATEORDELAYEDX(id); + stats->pkt_late_cnt = mcs_reg_read(mcs, reg); + + reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCNOTVALIDX(id); + stats->pkt_notvalid_cnt = mcs_reg_read(mcs, reg); + + reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCUNCHECKEDOROKX(id); + stats->pkt_unchecked_cnt = mcs_reg_read(mcs, reg); + + if (mcs->hw->mcs_blks > 1) { + reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCDELAYEDX(id); + stats->pkt_delay_cnt = mcs_reg_read(mcs, reg); + + reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCOKX(id); + stats->pkt_ok_cnt = mcs_reg_read(mcs, reg); + } + if (mcs->hw->mcs_blks == 1) { + reg = MCSX_CSE_RX_MEM_SLAVE_INOCTETSSCDECRYPTEDX(id); + stats->octet_decrypt_cnt = mcs_reg_read(mcs, reg); + + reg = MCSX_CSE_RX_MEM_SLAVE_INOCTETSSCVALIDATEX(id); + stats->octet_validate_cnt = mcs_reg_read(mcs, reg); + } + } else { + reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSCENCRYPTEDX(id); + stats->pkt_encrypt_cnt = mcs_reg_read(mcs, reg); + + reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSCPROTECTEDX(id); + stats->pkt_protected_cnt = mcs_reg_read(mcs, reg); + + if (mcs->hw->mcs_blks == 1) { + reg = MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSCENCRYPTEDX(id); + stats->octet_encrypt_cnt = mcs_reg_read(mcs, reg); + + reg = MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSCPROTECTEDX(id); + stats->octet_protected_cnt = mcs_reg_read(mcs, reg); + } + } +} + +void mcs_clear_stats(struct mcs *mcs, u8 type, u8 id, int dir) +{ + struct mcs_flowid_stats flowid_st; + struct mcs_port_stats port_st; + struct mcs_secy_stats secy_st; + struct mcs_sc_stats sc_st; + struct mcs_sa_stats sa_st; + u64 reg; + + if (dir == MCS_RX) + reg = MCSX_CSE_RX_SLAVE_CTRL; + else + reg = MCSX_CSE_TX_SLAVE_CTRL; + + mcs_reg_write(mcs, reg, BIT_ULL(0)); + + switch (type) { + case MCS_FLOWID_STATS: + mcs_get_flowid_stats(mcs, &flowid_st, id, dir); + break; + case MCS_SECY_STATS: + if (dir == MCS_RX) + mcs_get_rx_secy_stats(mcs, &secy_st, id); + else + mcs_get_tx_secy_stats(mcs, &secy_st, id); + break; + case MCS_SC_STATS: + mcs_get_sc_stats(mcs, &sc_st, id, dir); + break; + case MCS_SA_STATS: + mcs_get_sa_stats(mcs, &sa_st, id, dir); + break; + case MCS_PORT_STATS: + mcs_get_port_stats(mcs, &port_st, id, dir); + break; + } + + mcs_reg_write(mcs, reg, 0x0); +} + +int mcs_clear_all_stats(struct mcs *mcs, u16 pcifunc, int dir) +{ + struct mcs_rsrc_map *map; + int id; + + if (dir == MCS_RX) + map = &mcs->rx; + else + map = &mcs->tx; + + /* Clear FLOWID stats */ + for (id = 0; id < map->flow_ids.max; id++) { + if (map->flowid2pf_map[id] != pcifunc) + continue; + mcs_clear_stats(mcs, MCS_FLOWID_STATS, id, dir); + } + + /* Clear SECY stats */ + for (id = 0; id < map->secy.max; id++) { + if (map->secy2pf_map[id] != pcifunc) + continue; + mcs_clear_stats(mcs, MCS_SECY_STATS, id, dir); + } + + /* Clear SC stats */ + for (id = 0; id < map->secy.max; id++) { + if (map->sc2pf_map[id] != pcifunc) + continue; + mcs_clear_stats(mcs, MCS_SC_STATS, id, dir); + } + + /* Clear SA stats */ + for (id = 0; id < map->sa.max; id++) { + if (map->sa2pf_map[id] != pcifunc) + continue; + mcs_clear_stats(mcs, MCS_SA_STATS, id, dir); + } + return 0; +} + +void mcs_pn_table_write(struct mcs *mcs, u8 pn_id, u64 next_pn, u8 dir) +{ + u64 reg; + + if (dir == MCS_RX) + reg = MCSX_CPM_RX_SLAVE_SA_PN_TABLE_MEMX(pn_id); + else + reg = MCSX_CPM_TX_SLAVE_SA_PN_TABLE_MEMX(pn_id); + mcs_reg_write(mcs, reg, next_pn); +} + +void cn10kb_mcs_tx_sa_mem_map_write(struct mcs *mcs, struct mcs_tx_sc_sa_map *map) +{ + u64 reg, val; + + val = (map->sa_index0 & 0xFF) | + (map->sa_index1 & 0xFF) << 9 | + (map->rekey_ena & 0x1) << 18 | + (map->sa_index0_vld & 0x1) << 19 | + (map->sa_index1_vld & 0x1) << 20 | + (map->tx_sa_active & 0x1) << 21 | + map->sectag_sci << 22; + reg = MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(map->sc_id); + mcs_reg_write(mcs, reg, val); + + val = map->sectag_sci >> 42; + reg = MCSX_CPM_TX_SLAVE_SA_MAP_MEM_1X(map->sc_id); + mcs_reg_write(mcs, reg, val); +} + +void cn10kb_mcs_rx_sa_mem_map_write(struct mcs *mcs, struct mcs_rx_sc_sa_map *map) +{ + u64 val, reg; + + val = (map->sa_index & 0xFF) | map->sa_in_use << 9; + + reg = MCSX_CPM_RX_SLAVE_SA_MAP_MEMX((4 * map->sc_id) + map->an); + mcs_reg_write(mcs, reg, val); +} + +void mcs_sa_plcy_write(struct mcs *mcs, u64 *plcy, int sa_id, int dir) +{ + int reg_id; + u64 reg; + + if (dir == MCS_RX) { + for (reg_id = 0; reg_id < 8; reg_id++) { + reg = MCSX_CPM_RX_SLAVE_SA_PLCY_MEMX(reg_id, sa_id); + mcs_reg_write(mcs, reg, plcy[reg_id]); + } + } else { + for (reg_id = 0; reg_id < 9; reg_id++) { + reg = MCSX_CPM_TX_SLAVE_SA_PLCY_MEMX(reg_id, sa_id); + mcs_reg_write(mcs, reg, plcy[reg_id]); + } + } +} + +void mcs_ena_dis_sc_cam_entry(struct mcs *mcs, int sc_id, int ena) +{ + u64 reg, val; + + reg = MCSX_CPM_RX_SLAVE_SC_CAM_ENA(0); + if (sc_id > 63) + reg = MCSX_CPM_RX_SLAVE_SC_CAM_ENA(1); + + if (ena) + val = mcs_reg_read(mcs, reg) | BIT_ULL(sc_id); + else + val = mcs_reg_read(mcs, reg) & ~BIT_ULL(sc_id); + + mcs_reg_write(mcs, reg, val); +} + +void mcs_rx_sc_cam_write(struct mcs *mcs, u64 sci, u64 secy, int sc_id) +{ + mcs_reg_write(mcs, MCSX_CPM_RX_SLAVE_SC_CAMX(0, sc_id), sci); + mcs_reg_write(mcs, MCSX_CPM_RX_SLAVE_SC_CAMX(1, sc_id), secy); + /* Enable SC CAM */ + mcs_ena_dis_sc_cam_entry(mcs, sc_id, true); +} + +void mcs_secy_plcy_write(struct mcs *mcs, u64 plcy, int secy_id, int dir) +{ + u64 reg; + + if (dir == MCS_RX) + reg = MCSX_CPM_RX_SLAVE_SECY_PLCY_MEM_0X(secy_id); + else + reg = MCSX_CPM_TX_SLAVE_SECY_PLCY_MEMX(secy_id); + + mcs_reg_write(mcs, reg, plcy); + + if (mcs->hw->mcs_blks == 1 && dir == MCS_RX) + mcs_reg_write(mcs, MCSX_CPM_RX_SLAVE_SECY_PLCY_MEM_1X(secy_id), 0x0ull); +} + +void cn10kb_mcs_flowid_secy_map(struct mcs *mcs, struct secy_mem_map *map, int dir) +{ + u64 reg, val; + + val = (map->secy & 0x7F) | (map->ctrl_pkt & 0x1) << 8; + if (dir == MCS_RX) { + reg = MCSX_CPM_RX_SLAVE_SECY_MAP_MEMX(map->flow_id); + } else { + val |= (map->sc & 0x7F) << 9; + reg = MCSX_CPM_TX_SLAVE_SECY_MAP_MEM_0X(map->flow_id); + } + + mcs_reg_write(mcs, reg, val); +} + +void mcs_ena_dis_flowid_entry(struct mcs *mcs, int flow_id, int dir, int ena) +{ + u64 reg, val; + + if (dir == MCS_RX) { + reg = MCSX_CPM_RX_SLAVE_FLOWID_TCAM_ENA_0; + if (flow_id > 63) + reg = MCSX_CPM_RX_SLAVE_FLOWID_TCAM_ENA_1; + } else { + reg = MCSX_CPM_TX_SLAVE_FLOWID_TCAM_ENA_0; + if (flow_id > 63) + reg = MCSX_CPM_TX_SLAVE_FLOWID_TCAM_ENA_1; + } + + /* Enable/Disable the tcam entry */ + if (ena) + val = mcs_reg_read(mcs, reg) | BIT_ULL(flow_id); + else + val = mcs_reg_read(mcs, reg) & ~BIT_ULL(flow_id); + + mcs_reg_write(mcs, reg, val); +} + +void mcs_flowid_entry_write(struct mcs *mcs, u64 *data, u64 *mask, int flow_id, int dir) +{ + int reg_id; + u64 reg; + + if (dir == MCS_RX) { + for (reg_id = 0; reg_id < 4; reg_id++) { + reg = MCSX_CPM_RX_SLAVE_FLOWID_TCAM_DATAX(reg_id, flow_id); + mcs_reg_write(mcs, reg, data[reg_id]); + reg = MCSX_CPM_RX_SLAVE_FLOWID_TCAM_MASKX(reg_id, flow_id); + mcs_reg_write(mcs, reg, mask[reg_id]); + } + } else { + for (reg_id = 0; reg_id < 4; reg_id++) { + reg = MCSX_CPM_TX_SLAVE_FLOWID_TCAM_DATAX(reg_id, flow_id); + mcs_reg_write(mcs, reg, data[reg_id]); + reg = MCSX_CPM_TX_SLAVE_FLOWID_TCAM_MASKX(reg_id, flow_id); + mcs_reg_write(mcs, reg, mask[reg_id]); + } + } +} + +int mcs_install_flowid_bypass_entry(struct mcs *mcs) +{ + int flow_id, secy_id, reg_id; + struct secy_mem_map map; + u64 reg, plcy = 0; + + /* Flow entry */ + flow_id = mcs->hw->tcam_entries - MCS_RSRC_RSVD_CNT; + for (reg_id = 0; reg_id < 4; reg_id++) { + reg = MCSX_CPM_RX_SLAVE_FLOWID_TCAM_MASKX(reg_id, flow_id); + mcs_reg_write(mcs, reg, GENMASK_ULL(63, 0)); + } + for (reg_id = 0; reg_id < 4; reg_id++) { + reg = MCSX_CPM_TX_SLAVE_FLOWID_TCAM_MASKX(reg_id, flow_id); + mcs_reg_write(mcs, reg, GENMASK_ULL(63, 0)); + } + /* secy */ + secy_id = mcs->hw->secy_entries - MCS_RSRC_RSVD_CNT; + + /* Set validate frames to NULL and enable control port */ + plcy = 0x7ull; + if (mcs->hw->mcs_blks > 1) + plcy = BIT_ULL(0) | 0x3ull << 4; + mcs_secy_plcy_write(mcs, plcy, secy_id, MCS_RX); + + /* Enable control port and set mtu to max */ + plcy = BIT_ULL(0) | GENMASK_ULL(43, 28); + if (mcs->hw->mcs_blks > 1) + plcy = BIT_ULL(0) | GENMASK_ULL(63, 48); + mcs_secy_plcy_write(mcs, plcy, secy_id, MCS_TX); + + /* Map flowid to secy */ + map.secy = secy_id; + map.ctrl_pkt = 0; + map.flow_id = flow_id; + mcs->mcs_ops->mcs_flowid_secy_map(mcs, &map, MCS_RX); + map.sc = secy_id; + mcs->mcs_ops->mcs_flowid_secy_map(mcs, &map, MCS_TX); + + /* Enable Flowid entry */ + mcs_ena_dis_flowid_entry(mcs, flow_id, MCS_RX, true); + mcs_ena_dis_flowid_entry(mcs, flow_id, MCS_TX, true); + return 0; +} + +void mcs_clear_secy_plcy(struct mcs *mcs, int secy_id, int dir) +{ + struct mcs_rsrc_map *map; + int flow_id; + + if (dir == MCS_RX) + map = &mcs->rx; + else + map = &mcs->tx; + + /* Clear secy memory to zero */ + mcs_secy_plcy_write(mcs, 0, secy_id, dir); + + /* Disable the tcam entry using this secy */ + for (flow_id = 0; flow_id < map->flow_ids.max; flow_id++) { + if (map->flowid2secy_map[flow_id] != secy_id) + continue; + mcs_ena_dis_flowid_entry(mcs, flow_id, dir, false); + } +} + +int mcs_alloc_ctrlpktrule(struct rsrc_bmap *rsrc, u16 *pf_map, u16 offset, u16 pcifunc) +{ + int rsrc_id; + + if (!rsrc->bmap) + return -EINVAL; + + rsrc_id = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, offset, 1, 0); + if (rsrc_id >= rsrc->max) + return -ENOSPC; + + bitmap_set(rsrc->bmap, rsrc_id, 1); + pf_map[rsrc_id] = pcifunc; + + return rsrc_id; +} + +int mcs_free_ctrlpktrule(struct mcs *mcs, struct mcs_free_ctrl_pkt_rule_req *req) +{ + u16 pcifunc = req->hdr.pcifunc; + struct mcs_rsrc_map *map; + u64 dis, reg; + int id, rc; + + reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_RULE_ENABLE : MCSX_PEX_TX_SLAVE_RULE_ENABLE; + map = (req->dir == MCS_RX) ? &mcs->rx : &mcs->tx; + + if (req->all) { + for (id = 0; id < map->ctrlpktrule.max; id++) { + if (map->ctrlpktrule2pf_map[id] != pcifunc) + continue; + mcs_free_rsrc(&map->ctrlpktrule, map->ctrlpktrule2pf_map, id, pcifunc); + dis = mcs_reg_read(mcs, reg); + dis &= ~BIT_ULL(id); + mcs_reg_write(mcs, reg, dis); + } + return 0; + } + + rc = mcs_free_rsrc(&map->ctrlpktrule, map->ctrlpktrule2pf_map, req->rule_idx, pcifunc); + dis = mcs_reg_read(mcs, reg); + dis &= ~BIT_ULL(req->rule_idx); + mcs_reg_write(mcs, reg, dis); + + return rc; +} + +int mcs_ctrlpktrule_write(struct mcs *mcs, struct mcs_ctrl_pkt_rule_write_req *req) +{ + u64 reg, enb; + u64 idx; + + switch (req->rule_type) { + case MCS_CTRL_PKT_RULE_TYPE_ETH: + req->data0 &= GENMASK(15, 0); + if (req->data0 != ETH_P_PAE) + return -EINVAL; + + idx = req->rule_idx - MCS_CTRLPKT_ETYPE_RULE_OFFSET; + reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_RULE_ETYPE_CFGX(idx) : + MCSX_PEX_TX_SLAVE_RULE_ETYPE_CFGX(idx); + + mcs_reg_write(mcs, reg, req->data0); + break; + case MCS_CTRL_PKT_RULE_TYPE_DA: + if (!(req->data0 & BIT_ULL(40))) + return -EINVAL; + + idx = req->rule_idx - MCS_CTRLPKT_DA_RULE_OFFSET; + reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_RULE_DAX(idx) : + MCSX_PEX_TX_SLAVE_RULE_DAX(idx); + + mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0)); + break; + case MCS_CTRL_PKT_RULE_TYPE_RANGE: + if (!(req->data0 & BIT_ULL(40)) || !(req->data1 & BIT_ULL(40))) + return -EINVAL; + + idx = req->rule_idx - MCS_CTRLPKT_DA_RANGE_RULE_OFFSET; + if (req->dir == MCS_RX) { + reg = MCSX_PEX_RX_SLAVE_RULE_DA_RANGE_MINX(idx); + mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0)); + reg = MCSX_PEX_RX_SLAVE_RULE_DA_RANGE_MAXX(idx); + mcs_reg_write(mcs, reg, req->data1 & GENMASK_ULL(47, 0)); + } else { + reg = MCSX_PEX_TX_SLAVE_RULE_DA_RANGE_MINX(idx); + mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0)); + reg = MCSX_PEX_TX_SLAVE_RULE_DA_RANGE_MAXX(idx); + mcs_reg_write(mcs, reg, req->data1 & GENMASK_ULL(47, 0)); + } + break; + case MCS_CTRL_PKT_RULE_TYPE_COMBO: + req->data2 &= GENMASK(15, 0); + if (req->data2 != ETH_P_PAE || !(req->data0 & BIT_ULL(40)) || + !(req->data1 & BIT_ULL(40))) + return -EINVAL; + + idx = req->rule_idx - MCS_CTRLPKT_COMBO_RULE_OFFSET; + if (req->dir == MCS_RX) { + reg = MCSX_PEX_RX_SLAVE_RULE_COMBO_MINX(idx); + mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0)); + reg = MCSX_PEX_RX_SLAVE_RULE_COMBO_MAXX(idx); + mcs_reg_write(mcs, reg, req->data1 & GENMASK_ULL(47, 0)); + reg = MCSX_PEX_RX_SLAVE_RULE_COMBO_ETX(idx); + mcs_reg_write(mcs, reg, req->data2); + } else { + reg = MCSX_PEX_TX_SLAVE_RULE_COMBO_MINX(idx); + mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0)); + reg = MCSX_PEX_TX_SLAVE_RULE_COMBO_MAXX(idx); + mcs_reg_write(mcs, reg, req->data1 & GENMASK_ULL(47, 0)); + reg = MCSX_PEX_TX_SLAVE_RULE_COMBO_ETX(idx); + mcs_reg_write(mcs, reg, req->data2); + } + break; + case MCS_CTRL_PKT_RULE_TYPE_MAC: + if (!(req->data0 & BIT_ULL(40))) + return -EINVAL; + + idx = req->rule_idx - MCS_CTRLPKT_MAC_EN_RULE_OFFSET; + reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_RULE_MAC : + MCSX_PEX_TX_SLAVE_RULE_MAC; + + mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0)); + break; + } + + reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_RULE_ENABLE : MCSX_PEX_TX_SLAVE_RULE_ENABLE; + + enb = mcs_reg_read(mcs, reg); + enb |= BIT_ULL(req->rule_idx); + mcs_reg_write(mcs, reg, enb); + + return 0; +} + +int mcs_free_rsrc(struct rsrc_bmap *rsrc, u16 *pf_map, int rsrc_id, u16 pcifunc) +{ + /* Check if the rsrc_id is mapped to PF/VF */ + if (pf_map[rsrc_id] != pcifunc) + return -EINVAL; + + rvu_free_rsrc(rsrc, rsrc_id); + pf_map[rsrc_id] = 0; + return 0; +} + +/* Free all the cam resources mapped to pf */ +int mcs_free_all_rsrc(struct mcs *mcs, int dir, u16 pcifunc) +{ + struct mcs_rsrc_map *map; + int id; + + if (dir == MCS_RX) + map = &mcs->rx; + else + map = &mcs->tx; + + /* free tcam entries */ + for (id = 0; id < map->flow_ids.max; id++) { + if (map->flowid2pf_map[id] != pcifunc) + continue; + mcs_free_rsrc(&map->flow_ids, map->flowid2pf_map, + id, pcifunc); + mcs_ena_dis_flowid_entry(mcs, id, dir, false); + } + + /* free secy entries */ + for (id = 0; id < map->secy.max; id++) { + if (map->secy2pf_map[id] != pcifunc) + continue; + mcs_free_rsrc(&map->secy, map->secy2pf_map, + id, pcifunc); + mcs_clear_secy_plcy(mcs, id, dir); + } + + /* free sc entries */ + for (id = 0; id < map->secy.max; id++) { + if (map->sc2pf_map[id] != pcifunc) + continue; + mcs_free_rsrc(&map->sc, map->sc2pf_map, id, pcifunc); + + /* Disable SC CAM only on RX side */ + if (dir == MCS_RX) + mcs_ena_dis_sc_cam_entry(mcs, id, false); + } + + /* free sa entries */ + for (id = 0; id < map->sa.max; id++) { + if (map->sa2pf_map[id] != pcifunc) + continue; + mcs_free_rsrc(&map->sa, map->sa2pf_map, id, pcifunc); + } + return 0; +} + +int mcs_alloc_rsrc(struct rsrc_bmap *rsrc, u16 *pf_map, u16 pcifunc) +{ + int rsrc_id; + + rsrc_id = rvu_alloc_rsrc(rsrc); + if (rsrc_id < 0) + return -ENOMEM; + pf_map[rsrc_id] = pcifunc; + return rsrc_id; +} + +int mcs_alloc_all_rsrc(struct mcs *mcs, u8 *flow_id, u8 *secy_id, + u8 *sc_id, u8 *sa1_id, u8 *sa2_id, u16 pcifunc, int dir) +{ + struct mcs_rsrc_map *map; + int id; + + if (dir == MCS_RX) + map = &mcs->rx; + else + map = &mcs->tx; + + id = mcs_alloc_rsrc(&map->flow_ids, map->flowid2pf_map, pcifunc); + if (id < 0) + return -ENOMEM; + *flow_id = id; + + id = mcs_alloc_rsrc(&map->secy, map->secy2pf_map, pcifunc); + if (id < 0) + return -ENOMEM; + *secy_id = id; + + id = mcs_alloc_rsrc(&map->sc, map->sc2pf_map, pcifunc); + if (id < 0) + return -ENOMEM; + *sc_id = id; + + id = mcs_alloc_rsrc(&map->sa, map->sa2pf_map, pcifunc); + if (id < 0) + return -ENOMEM; + *sa1_id = id; + + id = mcs_alloc_rsrc(&map->sa, map->sa2pf_map, pcifunc); + if (id < 0) + return -ENOMEM; + *sa2_id = id; + + return 0; +} + +static void cn10kb_mcs_tx_pn_wrapped_handler(struct mcs *mcs) +{ + struct mcs_intr_event event = { 0 }; + struct rsrc_bmap *sc_bmap; + u64 val; + int sc; + + sc_bmap = &mcs->tx.sc; + + event.mcs_id = mcs->mcs_id; + event.intr_mask = MCS_CPM_TX_PACKET_XPN_EQ0_INT; + + for_each_set_bit(sc, sc_bmap->bmap, mcs->hw->sc_entries) { + val = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(sc)); + + if (mcs->tx_sa_active[sc]) + /* SA_index1 was used and got expired */ + event.sa_id = (val >> 9) & 0xFF; + else + /* SA_index0 was used and got expired */ + event.sa_id = val & 0xFF; + + event.pcifunc = mcs->tx.sa2pf_map[event.sa_id]; + mcs_add_intr_wq_entry(mcs, &event); + } +} + +static void cn10kb_mcs_tx_pn_thresh_reached_handler(struct mcs *mcs) +{ + struct mcs_intr_event event = { 0 }; + struct rsrc_bmap *sc_bmap; + u64 val, status; + int sc; + + sc_bmap = &mcs->tx.sc; + + event.mcs_id = mcs->mcs_id; + event.intr_mask = MCS_CPM_TX_PN_THRESH_REACHED_INT; + + /* TX SA interrupt is raised only if autorekey is enabled. + * MCS_CPM_TX_SLAVE_SA_MAP_MEM_0X[sc].tx_sa_active bit gets toggled if + * one of two SAs mapped to SC gets expired. If tx_sa_active=0 implies + * SA in SA_index1 got expired else SA in SA_index0 got expired. + */ + for_each_set_bit(sc, sc_bmap->bmap, mcs->hw->sc_entries) { + val = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(sc)); + /* Auto rekey is enable */ + if (!((val >> 18) & 0x1)) + continue; + + status = (val >> 21) & 0x1; + + /* Check if tx_sa_active status had changed */ + if (status == mcs->tx_sa_active[sc]) + continue; + /* SA_index0 is expired */ + if (status) + event.sa_id = val & 0xFF; + else + event.sa_id = (val >> 9) & 0xFF; + + event.pcifunc = mcs->tx.sa2pf_map[event.sa_id]; + mcs_add_intr_wq_entry(mcs, &event); + } +} + +static void mcs_rx_pn_thresh_reached_handler(struct mcs *mcs) +{ + struct mcs_intr_event event = { 0 }; + int sa, reg; + u64 intr; + + /* Check expired SAs */ + for (reg = 0; reg < (mcs->hw->sa_entries / 64); reg++) { + /* Bit high in *PN_THRESH_REACHEDX implies + * corresponding SAs are expired. + */ + intr = mcs_reg_read(mcs, MCSX_CPM_RX_SLAVE_PN_THRESH_REACHEDX(reg)); + for (sa = 0; sa < 64; sa++) { + if (!(intr & BIT_ULL(sa))) + continue; + + event.mcs_id = mcs->mcs_id; + event.intr_mask = MCS_CPM_RX_PN_THRESH_REACHED_INT; + event.sa_id = sa + (reg * 64); + event.pcifunc = mcs->rx.sa2pf_map[event.sa_id]; + mcs_add_intr_wq_entry(mcs, &event); + } + } +} + +static void mcs_rx_misc_intr_handler(struct mcs *mcs, u64 intr) +{ + struct mcs_intr_event event = { 0 }; + + event.mcs_id = mcs->mcs_id; + event.pcifunc = mcs->pf_map[0]; + + if (intr & MCS_CPM_RX_INT_SECTAG_V_EQ1) + event.intr_mask = MCS_CPM_RX_SECTAG_V_EQ1_INT; + if (intr & MCS_CPM_RX_INT_SECTAG_E_EQ0_C_EQ1) + event.intr_mask |= MCS_CPM_RX_SECTAG_E_EQ0_C_EQ1_INT; + if (intr & MCS_CPM_RX_INT_SL_GTE48) + event.intr_mask |= MCS_CPM_RX_SECTAG_SL_GTE48_INT; + if (intr & MCS_CPM_RX_INT_ES_EQ1_SC_EQ1) + event.intr_mask |= MCS_CPM_RX_SECTAG_ES_EQ1_SC_EQ1_INT; + if (intr & MCS_CPM_RX_INT_SC_EQ1_SCB_EQ1) + event.intr_mask |= MCS_CPM_RX_SECTAG_SC_EQ1_SCB_EQ1_INT; + if (intr & MCS_CPM_RX_INT_PACKET_XPN_EQ0) + event.intr_mask |= MCS_CPM_RX_PACKET_XPN_EQ0_INT; + + mcs_add_intr_wq_entry(mcs, &event); +} + +static void mcs_tx_misc_intr_handler(struct mcs *mcs, u64 intr) +{ + struct mcs_intr_event event = { 0 }; + + if (!(intr & MCS_CPM_TX_INT_SA_NOT_VALID)) + return; + + event.mcs_id = mcs->mcs_id; + event.pcifunc = mcs->pf_map[0]; + + event.intr_mask = MCS_CPM_TX_SA_NOT_VALID_INT; + + mcs_add_intr_wq_entry(mcs, &event); +} + +static void mcs_bbe_intr_handler(struct mcs *mcs, u64 intr, enum mcs_direction dir) +{ + struct mcs_intr_event event = { 0 }; + int i; + + if (!(intr & MCS_BBE_INT_MASK)) + return; + + event.mcs_id = mcs->mcs_id; + event.pcifunc = mcs->pf_map[0]; + + for (i = 0; i < MCS_MAX_BBE_INT; i++) { + if (!(intr & BIT_ULL(i))) + continue; + + /* Lower nibble denotes data fifo overflow interrupts and + * upper nibble indicates policy fifo overflow interrupts. + */ + if (intr & 0xFULL) + event.intr_mask = (dir == MCS_RX) ? + MCS_BBE_RX_DFIFO_OVERFLOW_INT : + MCS_BBE_TX_DFIFO_OVERFLOW_INT; + else + event.intr_mask = (dir == MCS_RX) ? + MCS_BBE_RX_PLFIFO_OVERFLOW_INT : + MCS_BBE_RX_PLFIFO_OVERFLOW_INT; + + /* Notify the lmac_id info which ran into BBE fatal error */ + event.lmac_id = i & 0x3ULL; + mcs_add_intr_wq_entry(mcs, &event); + } +} + +static void mcs_pab_intr_handler(struct mcs *mcs, u64 intr, enum mcs_direction dir) +{ + struct mcs_intr_event event = { 0 }; + int i; + + if (!(intr & MCS_PAB_INT_MASK)) + return; + + event.mcs_id = mcs->mcs_id; + event.pcifunc = mcs->pf_map[0]; + + for (i = 0; i < MCS_MAX_PAB_INT; i++) { + if (!(intr & BIT_ULL(i))) + continue; + + event.intr_mask = (dir == MCS_RX) ? MCS_PAB_RX_CHAN_OVERFLOW_INT : + MCS_PAB_TX_CHAN_OVERFLOW_INT; + + /* Notify the lmac_id info which ran into PAB fatal error */ + event.lmac_id = i; + mcs_add_intr_wq_entry(mcs, &event); + } +} + +static irqreturn_t mcs_ip_intr_handler(int irq, void *mcs_irq) +{ + struct mcs *mcs = (struct mcs *)mcs_irq; + u64 intr, cpm_intr, bbe_intr, pab_intr; + + /* Disable and clear the interrupt */ + mcs_reg_write(mcs, MCSX_IP_INT_ENA_W1C, BIT_ULL(0)); + mcs_reg_write(mcs, MCSX_IP_INT, BIT_ULL(0)); + + /* Check which block has interrupt*/ + intr = mcs_reg_read(mcs, MCSX_TOP_SLAVE_INT_SUM); + + /* CPM RX */ + if (intr & MCS_CPM_RX_INT_ENA) { + /* Check for PN thresh interrupt bit */ + cpm_intr = mcs_reg_read(mcs, MCSX_CPM_RX_SLAVE_RX_INT); + + if (cpm_intr & MCS_CPM_RX_INT_PN_THRESH_REACHED) + mcs_rx_pn_thresh_reached_handler(mcs); + + if (cpm_intr & MCS_CPM_RX_INT_ALL) + mcs_rx_misc_intr_handler(mcs, cpm_intr); + + /* Clear the interrupt */ + mcs_reg_write(mcs, MCSX_CPM_RX_SLAVE_RX_INT, cpm_intr); + } + + /* CPM TX */ + if (intr & MCS_CPM_TX_INT_ENA) { + cpm_intr = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_TX_INT); + + if (cpm_intr & MCS_CPM_TX_INT_PN_THRESH_REACHED) { + if (mcs->hw->mcs_blks > 1) + cnf10kb_mcs_tx_pn_thresh_reached_handler(mcs); + else + cn10kb_mcs_tx_pn_thresh_reached_handler(mcs); + } + + if (cpm_intr & MCS_CPM_TX_INT_SA_NOT_VALID) + mcs_tx_misc_intr_handler(mcs, cpm_intr); + + if (cpm_intr & MCS_CPM_TX_INT_PACKET_XPN_EQ0) { + if (mcs->hw->mcs_blks > 1) + cnf10kb_mcs_tx_pn_wrapped_handler(mcs); + else + cn10kb_mcs_tx_pn_wrapped_handler(mcs); + } + /* Clear the interrupt */ + mcs_reg_write(mcs, MCSX_CPM_TX_SLAVE_TX_INT, cpm_intr); + } + + /* BBE RX */ + if (intr & MCS_BBE_RX_INT_ENA) { + bbe_intr = mcs_reg_read(mcs, MCSX_BBE_RX_SLAVE_BBE_INT); + mcs_bbe_intr_handler(mcs, bbe_intr, MCS_RX); + + /* Clear the interrupt */ + mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_BBE_INT_INTR_RW, 0); + mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_BBE_INT, bbe_intr); + } + + /* BBE TX */ + if (intr & MCS_BBE_TX_INT_ENA) { + bbe_intr = mcs_reg_read(mcs, MCSX_BBE_TX_SLAVE_BBE_INT); + mcs_bbe_intr_handler(mcs, bbe_intr, MCS_TX); + + /* Clear the interrupt */ + mcs_reg_write(mcs, MCSX_BBE_TX_SLAVE_BBE_INT_INTR_RW, 0); + mcs_reg_write(mcs, MCSX_BBE_TX_SLAVE_BBE_INT, bbe_intr); + } + + /* PAB RX */ + if (intr & MCS_PAB_RX_INT_ENA) { + pab_intr = mcs_reg_read(mcs, MCSX_PAB_RX_SLAVE_PAB_INT); + mcs_pab_intr_handler(mcs, pab_intr, MCS_RX); + + /* Clear the interrupt */ + mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_PAB_INT_INTR_RW, 0); + mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_PAB_INT, pab_intr); + } + + /* PAB TX */ + if (intr & MCS_PAB_TX_INT_ENA) { + pab_intr = mcs_reg_read(mcs, MCSX_PAB_TX_SLAVE_PAB_INT); + mcs_pab_intr_handler(mcs, pab_intr, MCS_TX); + + /* Clear the interrupt */ + mcs_reg_write(mcs, MCSX_PAB_TX_SLAVE_PAB_INT_INTR_RW, 0); + mcs_reg_write(mcs, MCSX_PAB_TX_SLAVE_PAB_INT, pab_intr); + } + + /* Enable the interrupt */ + mcs_reg_write(mcs, MCSX_IP_INT_ENA_W1S, BIT_ULL(0)); + + return IRQ_HANDLED; +} + +static void *alloc_mem(struct mcs *mcs, int n) +{ + return devm_kcalloc(mcs->dev, n, sizeof(u16), GFP_KERNEL); +} + +static int mcs_alloc_struct_mem(struct mcs *mcs, struct mcs_rsrc_map *res) +{ + struct hwinfo *hw = mcs->hw; + int err; + + res->flowid2pf_map = alloc_mem(mcs, hw->tcam_entries); + if (!res->flowid2pf_map) + return -ENOMEM; + + res->secy2pf_map = alloc_mem(mcs, hw->secy_entries); + if (!res->secy2pf_map) + return -ENOMEM; + + res->sc2pf_map = alloc_mem(mcs, hw->sc_entries); + if (!res->sc2pf_map) + return -ENOMEM; + + res->sa2pf_map = alloc_mem(mcs, hw->sa_entries); + if (!res->sa2pf_map) + return -ENOMEM; + + res->flowid2secy_map = alloc_mem(mcs, hw->tcam_entries); + if (!res->flowid2secy_map) + return -ENOMEM; + + res->ctrlpktrule2pf_map = alloc_mem(mcs, MCS_MAX_CTRLPKT_RULES); + if (!res->ctrlpktrule2pf_map) + return -ENOMEM; + + res->flow_ids.max = hw->tcam_entries - MCS_RSRC_RSVD_CNT; + err = rvu_alloc_bitmap(&res->flow_ids); + if (err) + return err; + + res->secy.max = hw->secy_entries - MCS_RSRC_RSVD_CNT; + err = rvu_alloc_bitmap(&res->secy); + if (err) + return err; + + res->sc.max = hw->sc_entries; + err = rvu_alloc_bitmap(&res->sc); + if (err) + return err; + + res->sa.max = hw->sa_entries; + err = rvu_alloc_bitmap(&res->sa); + if (err) + return err; + + res->ctrlpktrule.max = MCS_MAX_CTRLPKT_RULES; + err = rvu_alloc_bitmap(&res->ctrlpktrule); + if (err) + return err; + + return 0; +} + +static int mcs_register_interrupts(struct mcs *mcs) +{ + int ret = 0; + + mcs->num_vec = pci_msix_vec_count(mcs->pdev); + + ret = pci_alloc_irq_vectors(mcs->pdev, mcs->num_vec, + mcs->num_vec, PCI_IRQ_MSIX); + if (ret < 0) { + dev_err(mcs->dev, "MCS Request for %d msix vector failed err:%d\n", + mcs->num_vec, ret); + return ret; + } + + ret = request_irq(pci_irq_vector(mcs->pdev, MCS_INT_VEC_IP), + mcs_ip_intr_handler, 0, "MCS_IP", mcs); + if (ret) { + dev_err(mcs->dev, "MCS IP irq registration failed\n"); + goto exit; + } + + /* MCS enable IP interrupts */ + mcs_reg_write(mcs, MCSX_IP_INT_ENA_W1S, BIT_ULL(0)); + + /* Enable CPM Rx/Tx interrupts */ + mcs_reg_write(mcs, MCSX_TOP_SLAVE_INT_SUM_ENB, + MCS_CPM_RX_INT_ENA | MCS_CPM_TX_INT_ENA | + MCS_BBE_RX_INT_ENA | MCS_BBE_TX_INT_ENA | + MCS_PAB_RX_INT_ENA | MCS_PAB_TX_INT_ENA); + + mcs_reg_write(mcs, MCSX_CPM_TX_SLAVE_TX_INT_ENB, 0x7ULL); + mcs_reg_write(mcs, MCSX_CPM_RX_SLAVE_RX_INT_ENB, 0x7FULL); + + mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_BBE_INT_ENB, 0xff); + mcs_reg_write(mcs, MCSX_BBE_TX_SLAVE_BBE_INT_ENB, 0xff); + + mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_PAB_INT_ENB, 0xff); + mcs_reg_write(mcs, MCSX_PAB_TX_SLAVE_PAB_INT_ENB, 0xff); + + mcs->tx_sa_active = alloc_mem(mcs, mcs->hw->sc_entries); + if (!mcs->tx_sa_active) + goto exit; + + return ret; +exit: + pci_free_irq_vectors(mcs->pdev); + mcs->num_vec = 0; + return ret; +} + +int mcs_get_blkcnt(void) +{ + struct mcs *mcs; + int idmax = -ENODEV; + + /* Check MCS block is present in hardware */ + if (!pci_dev_present(mcs_id_table)) + return 0; + + list_for_each_entry(mcs, &mcs_list, mcs_list) + if (mcs->mcs_id > idmax) + idmax = mcs->mcs_id; + + if (idmax < 0) + return 0; + + return idmax + 1; +} + +struct mcs *mcs_get_pdata(int mcs_id) +{ + struct mcs *mcs_dev; + + list_for_each_entry(mcs_dev, &mcs_list, mcs_list) { + if (mcs_dev->mcs_id == mcs_id) + return mcs_dev; + } + return NULL; +} + +void mcs_set_port_cfg(struct mcs *mcs, struct mcs_port_cfg_set_req *req) +{ + u64 val = 0; + + mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_PORT_CFGX(req->port_id), + req->port_mode & MCS_PORT_MODE_MASK); + + req->cstm_tag_rel_mode_sel &= 0x3; + + if (mcs->hw->mcs_blks > 1) { + req->fifo_skid &= MCS_PORT_FIFO_SKID_MASK; + val = (u32)req->fifo_skid << 0x10; + val |= req->fifo_skid; + mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_FIFO_SKID_CFGX(req->port_id), val); + mcs_reg_write(mcs, MCSX_PEX_TX_SLAVE_CUSTOM_TAG_REL_MODE_SEL(req->port_id), + req->cstm_tag_rel_mode_sel); + val = mcs_reg_read(mcs, MCSX_PEX_RX_SLAVE_PEX_CONFIGURATION); + + if (req->custom_hdr_enb) + val |= BIT_ULL(req->port_id); + else + val &= ~BIT_ULL(req->port_id); + + mcs_reg_write(mcs, MCSX_PEX_RX_SLAVE_PEX_CONFIGURATION, val); + } else { + val = mcs_reg_read(mcs, MCSX_PEX_TX_SLAVE_PORT_CONFIG(req->port_id)); + val |= (req->cstm_tag_rel_mode_sel << 2); + mcs_reg_write(mcs, MCSX_PEX_TX_SLAVE_PORT_CONFIG(req->port_id), val); + } +} + +void mcs_get_port_cfg(struct mcs *mcs, struct mcs_port_cfg_get_req *req, + struct mcs_port_cfg_get_rsp *rsp) +{ + u64 reg = 0; + + rsp->port_mode = mcs_reg_read(mcs, MCSX_PAB_RX_SLAVE_PORT_CFGX(req->port_id)) & + MCS_PORT_MODE_MASK; + + if (mcs->hw->mcs_blks > 1) { + reg = MCSX_PAB_RX_SLAVE_FIFO_SKID_CFGX(req->port_id); + rsp->fifo_skid = mcs_reg_read(mcs, reg) & MCS_PORT_FIFO_SKID_MASK; + reg = MCSX_PEX_TX_SLAVE_CUSTOM_TAG_REL_MODE_SEL(req->port_id); + rsp->cstm_tag_rel_mode_sel = mcs_reg_read(mcs, reg) & 0x3; + if (mcs_reg_read(mcs, MCSX_PEX_RX_SLAVE_PEX_CONFIGURATION) & BIT_ULL(req->port_id)) + rsp->custom_hdr_enb = 1; + } else { + reg = MCSX_PEX_TX_SLAVE_PORT_CONFIG(req->port_id); + rsp->cstm_tag_rel_mode_sel = mcs_reg_read(mcs, reg) >> 2; + } + + rsp->port_id = req->port_id; + rsp->mcs_id = req->mcs_id; +} + +void mcs_get_custom_tag_cfg(struct mcs *mcs, struct mcs_custom_tag_cfg_get_req *req, + struct mcs_custom_tag_cfg_get_rsp *rsp) +{ + u64 reg = 0, val = 0; + u8 idx; + + for (idx = 0; idx < MCS_MAX_CUSTOM_TAGS; idx++) { + if (mcs->hw->mcs_blks > 1) + reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_CUSTOM_TAGX(idx) : + MCSX_PEX_TX_SLAVE_CUSTOM_TAGX(idx); + else + reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_VLAN_CFGX(idx) : + MCSX_PEX_TX_SLAVE_VLAN_CFGX(idx); + + val = mcs_reg_read(mcs, reg); + if (mcs->hw->mcs_blks > 1) { + rsp->cstm_etype[idx] = val & GENMASK(15, 0); + rsp->cstm_indx[idx] = (val >> 0x16) & 0x3; + reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_ETYPE_ENABLE : + MCSX_PEX_TX_SLAVE_ETYPE_ENABLE; + rsp->cstm_etype_en = mcs_reg_read(mcs, reg) & 0xFF; + } else { + rsp->cstm_etype[idx] = (val >> 0x1) & GENMASK(15, 0); + rsp->cstm_indx[idx] = (val >> 0x11) & 0x3; + rsp->cstm_etype_en |= (val & 0x1) << idx; + } + } + + rsp->mcs_id = req->mcs_id; + rsp->dir = req->dir; +} + +void mcs_reset_port(struct mcs *mcs, u8 port_id, u8 reset) +{ + u64 reg = MCSX_MCS_TOP_SLAVE_PORT_RESET(port_id); + + mcs_reg_write(mcs, reg, reset & 0x1); +} + +/* Set lmac to bypass/operational mode */ +void mcs_set_lmac_mode(struct mcs *mcs, int lmac_id, u8 mode) +{ + u64 reg; + + reg = MCSX_MCS_TOP_SLAVE_CHANNEL_CFG(lmac_id * 2); + mcs_reg_write(mcs, reg, (u64)mode); +} + +void mcs_pn_threshold_set(struct mcs *mcs, struct mcs_set_pn_threshold *pn) +{ + u64 reg; + + if (pn->dir == MCS_RX) + reg = pn->xpn ? MCSX_CPM_RX_SLAVE_XPN_THRESHOLD : MCSX_CPM_RX_SLAVE_PN_THRESHOLD; + else + reg = pn->xpn ? MCSX_CPM_TX_SLAVE_XPN_THRESHOLD : MCSX_CPM_TX_SLAVE_PN_THRESHOLD; + + mcs_reg_write(mcs, reg, pn->threshold); +} + +void cn10kb_mcs_parser_cfg(struct mcs *mcs) +{ + u64 reg, val; + + /* VLAN CTag */ + val = BIT_ULL(0) | (0x8100ull & 0xFFFF) << 1 | BIT_ULL(17); + /* RX */ + reg = MCSX_PEX_RX_SLAVE_VLAN_CFGX(0); + mcs_reg_write(mcs, reg, val); + + /* TX */ + reg = MCSX_PEX_TX_SLAVE_VLAN_CFGX(0); + mcs_reg_write(mcs, reg, val); + + /* VLAN STag */ + val = BIT_ULL(0) | (0x88a8ull & 0xFFFF) << 1 | BIT_ULL(18); + /* RX */ + reg = MCSX_PEX_RX_SLAVE_VLAN_CFGX(1); + mcs_reg_write(mcs, reg, val); + + /* TX */ + reg = MCSX_PEX_TX_SLAVE_VLAN_CFGX(1); + mcs_reg_write(mcs, reg, val); +} + +static void mcs_lmac_init(struct mcs *mcs, int lmac_id) +{ + u64 reg; + + /* Port mode 25GB */ + reg = MCSX_PAB_RX_SLAVE_PORT_CFGX(lmac_id); + mcs_reg_write(mcs, reg, 0); + + if (mcs->hw->mcs_blks > 1) { + reg = MCSX_PAB_RX_SLAVE_FIFO_SKID_CFGX(lmac_id); + mcs_reg_write(mcs, reg, 0xe000e); + return; + } + + reg = MCSX_PAB_TX_SLAVE_PORT_CFGX(lmac_id); + mcs_reg_write(mcs, reg, 0); +} + +int mcs_set_lmac_channels(int mcs_id, u16 base) +{ + struct mcs *mcs; + int lmac; + u64 cfg; + + mcs = mcs_get_pdata(mcs_id); + if (!mcs) + return -ENODEV; + for (lmac = 0; lmac < mcs->hw->lmac_cnt; lmac++) { + cfg = mcs_reg_read(mcs, MCSX_LINK_LMACX_CFG(lmac)); + cfg &= ~(MCSX_LINK_LMAC_BASE_MASK | MCSX_LINK_LMAC_RANGE_MASK); + cfg |= FIELD_PREP(MCSX_LINK_LMAC_RANGE_MASK, ilog2(16)); + cfg |= FIELD_PREP(MCSX_LINK_LMAC_BASE_MASK, base); + mcs_reg_write(mcs, MCSX_LINK_LMACX_CFG(lmac), cfg); + base += 16; + } + return 0; +} + +static int mcs_x2p_calibration(struct mcs *mcs) +{ + unsigned long timeout = jiffies + usecs_to_jiffies(20000); + int i, err = 0; + u64 val; + + /* set X2P calibration */ + val = mcs_reg_read(mcs, MCSX_MIL_GLOBAL); + val |= BIT_ULL(5); + mcs_reg_write(mcs, MCSX_MIL_GLOBAL, val); + + /* Wait for calibration to complete */ + while (!(mcs_reg_read(mcs, MCSX_MIL_RX_GBL_STATUS) & BIT_ULL(0))) { + if (time_before(jiffies, timeout)) { + usleep_range(80, 100); + continue; + } else { + err = -EBUSY; + dev_err(mcs->dev, "MCS X2P calibration failed..ignoring\n"); + return err; + } + } + + val = mcs_reg_read(mcs, MCSX_MIL_RX_GBL_STATUS); + for (i = 0; i < mcs->hw->mcs_x2p_intf; i++) { + if (val & BIT_ULL(1 + i)) + continue; + err = -EBUSY; + dev_err(mcs->dev, "MCS:%d didn't respond to X2P calibration\n", i); + } + /* Clear X2P calibrate */ + mcs_reg_write(mcs, MCSX_MIL_GLOBAL, mcs_reg_read(mcs, MCSX_MIL_GLOBAL) & ~BIT_ULL(5)); + + return err; +} + +static void mcs_set_external_bypass(struct mcs *mcs, u8 bypass) +{ + u64 val; + + /* Set MCS to external bypass */ + val = mcs_reg_read(mcs, MCSX_MIL_GLOBAL); + if (bypass) + val |= BIT_ULL(6); + else + val &= ~BIT_ULL(6); + mcs_reg_write(mcs, MCSX_MIL_GLOBAL, val); +} + +static void mcs_global_cfg(struct mcs *mcs) +{ + /* Disable external bypass */ + mcs_set_external_bypass(mcs, false); + + /* Reset TX/RX stats memory */ + mcs_reg_write(mcs, MCSX_CSE_RX_SLAVE_STATS_CLEAR, 0x1F); + mcs_reg_write(mcs, MCSX_CSE_TX_SLAVE_STATS_CLEAR, 0x1F); + + /* Set MCS to perform standard IEEE802.1AE macsec processing */ + if (mcs->hw->mcs_blks == 1) { + mcs_reg_write(mcs, MCSX_IP_MODE, BIT_ULL(3)); + return; + } + + mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_CAL_ENTRY, 0xe4); + mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_CAL_LEN, 4); +} + +void cn10kb_mcs_set_hw_capabilities(struct mcs *mcs) +{ + struct hwinfo *hw = mcs->hw; + + hw->tcam_entries = 128; /* TCAM entries */ + hw->secy_entries = 128; /* SecY entries */ + hw->sc_entries = 128; /* SC CAM entries */ + hw->sa_entries = 256; /* SA entries */ + hw->lmac_cnt = 20; /* lmacs/ports per mcs block */ + hw->mcs_x2p_intf = 5; /* x2p clabration intf */ + hw->mcs_blks = 1; /* MCS blocks */ +} + +static struct mcs_ops cn10kb_mcs_ops = { + .mcs_set_hw_capabilities = cn10kb_mcs_set_hw_capabilities, + .mcs_parser_cfg = cn10kb_mcs_parser_cfg, + .mcs_tx_sa_mem_map_write = cn10kb_mcs_tx_sa_mem_map_write, + .mcs_rx_sa_mem_map_write = cn10kb_mcs_rx_sa_mem_map_write, + .mcs_flowid_secy_map = cn10kb_mcs_flowid_secy_map, +}; + +static int mcs_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct device *dev = &pdev->dev; + int lmac, err = 0; + struct mcs *mcs; + + mcs = devm_kzalloc(dev, sizeof(*mcs), GFP_KERNEL); + if (!mcs) + return -ENOMEM; + + mcs->hw = devm_kzalloc(dev, sizeof(struct hwinfo), GFP_KERNEL); + if (!mcs->hw) + return -ENOMEM; + + err = pci_enable_device(pdev); + if (err) { + dev_err(dev, "Failed to enable PCI device\n"); + pci_set_drvdata(pdev, NULL); + return err; + } + + err = pci_request_regions(pdev, DRV_NAME); + if (err) { + dev_err(dev, "PCI request regions failed 0x%x\n", err); + goto exit; + } + + mcs->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0); + if (!mcs->reg_base) { + dev_err(dev, "mcs: Cannot map CSR memory space, aborting\n"); + err = -ENOMEM; + goto exit; + } + + pci_set_drvdata(pdev, mcs); + mcs->pdev = pdev; + mcs->dev = &pdev->dev; + + if (pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_B) + mcs->mcs_ops = &cn10kb_mcs_ops; + else + mcs->mcs_ops = cnf10kb_get_mac_ops(); + + /* Set hardware capabilities */ + mcs->mcs_ops->mcs_set_hw_capabilities(mcs); + + mcs_global_cfg(mcs); + + /* Perform X2P clibration */ + err = mcs_x2p_calibration(mcs); + if (err) + goto err_x2p; + + mcs->mcs_id = (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24) + & MCS_ID_MASK; + + /* Set mcs tx side resources */ + err = mcs_alloc_struct_mem(mcs, &mcs->tx); + if (err) + goto err_x2p; + + /* Set mcs rx side resources */ + err = mcs_alloc_struct_mem(mcs, &mcs->rx); + if (err) + goto err_x2p; + + /* per port config */ + for (lmac = 0; lmac < mcs->hw->lmac_cnt; lmac++) + mcs_lmac_init(mcs, lmac); + + /* Parser configuration */ + mcs->mcs_ops->mcs_parser_cfg(mcs); + + err = mcs_register_interrupts(mcs); + if (err) + goto exit; + + list_add(&mcs->mcs_list, &mcs_list); + mutex_init(&mcs->stats_lock); + + return 0; + +err_x2p: + /* Enable external bypass */ + mcs_set_external_bypass(mcs, true); +exit: + pci_release_regions(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + return err; +} + +static void mcs_remove(struct pci_dev *pdev) +{ + struct mcs *mcs = pci_get_drvdata(pdev); + + /* Set MCS to external bypass */ + mcs_set_external_bypass(mcs, true); + pci_free_irq_vectors(pdev); + pci_release_regions(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); +} + +struct pci_driver mcs_driver = { + .name = DRV_NAME, + .id_table = mcs_id_table, + .probe = mcs_probe, + .remove = mcs_remove, +}; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs.h b/drivers/net/ethernet/marvell/octeontx2/af/mcs.h new file mode 100644 index 000000000000..64dc2b80e15d --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs.h @@ -0,0 +1,246 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Marvell CN10K MCS driver + * + * Copyright (C) 2022 Marvell. + */ + +#ifndef MCS_H +#define MCS_H + +#include <linux/bits.h> +#include "rvu.h" + +#define PCI_DEVID_CN10K_MCS 0xA096 + +#define MCSX_LINK_LMAC_RANGE_MASK GENMASK_ULL(19, 16) +#define MCSX_LINK_LMAC_BASE_MASK GENMASK_ULL(11, 0) + +#define MCS_ID_MASK 0x7 +#define MCS_MAX_PFS 128 + +#define MCS_PORT_MODE_MASK 0x3 +#define MCS_PORT_FIFO_SKID_MASK 0x3F +#define MCS_MAX_CUSTOM_TAGS 0x8 + +#define MCS_CTRLPKT_ETYPE_RULE_MAX 8 +#define MCS_CTRLPKT_DA_RULE_MAX 8 +#define MCS_CTRLPKT_DA_RANGE_RULE_MAX 4 +#define MCS_CTRLPKT_COMBO_RULE_MAX 4 +#define MCS_CTRLPKT_MAC_RULE_MAX 1 + +#define MCS_MAX_CTRLPKT_RULES (MCS_CTRLPKT_ETYPE_RULE_MAX + \ + MCS_CTRLPKT_DA_RULE_MAX + \ + MCS_CTRLPKT_DA_RANGE_RULE_MAX + \ + MCS_CTRLPKT_COMBO_RULE_MAX + \ + MCS_CTRLPKT_MAC_RULE_MAX) + +#define MCS_CTRLPKT_ETYPE_RULE_OFFSET 0 +#define MCS_CTRLPKT_DA_RULE_OFFSET 8 +#define MCS_CTRLPKT_DA_RANGE_RULE_OFFSET 16 +#define MCS_CTRLPKT_COMBO_RULE_OFFSET 20 +#define MCS_CTRLPKT_MAC_EN_RULE_OFFSET 24 + +/* Reserved resources for default bypass entry */ +#define MCS_RSRC_RSVD_CNT 1 + +/* MCS Interrupt Vector Enumeration */ +enum mcs_int_vec_e { + MCS_INT_VEC_MIL_RX_GBL = 0x0, + MCS_INT_VEC_MIL_RX_LMACX = 0x1, + MCS_INT_VEC_MIL_TX_LMACX = 0x5, + MCS_INT_VEC_HIL_RX_GBL = 0x9, + MCS_INT_VEC_HIL_RX_LMACX = 0xa, + MCS_INT_VEC_HIL_TX_GBL = 0xe, + MCS_INT_VEC_HIL_TX_LMACX = 0xf, + MCS_INT_VEC_IP = 0x13, + MCS_INT_VEC_CNT = 0x14, +}; + +#define MCS_MAX_BBE_INT 8ULL +#define MCS_BBE_INT_MASK 0xFFULL + +#define MCS_MAX_PAB_INT 4ULL +#define MCS_PAB_INT_MASK 0xFULL + +#define MCS_BBE_RX_INT_ENA BIT_ULL(0) +#define MCS_BBE_TX_INT_ENA BIT_ULL(1) +#define MCS_CPM_RX_INT_ENA BIT_ULL(2) +#define MCS_CPM_TX_INT_ENA BIT_ULL(3) +#define MCS_PAB_RX_INT_ENA BIT_ULL(4) +#define MCS_PAB_TX_INT_ENA BIT_ULL(5) + +#define MCS_CPM_TX_INT_PACKET_XPN_EQ0 BIT_ULL(0) +#define MCS_CPM_TX_INT_PN_THRESH_REACHED BIT_ULL(1) +#define MCS_CPM_TX_INT_SA_NOT_VALID BIT_ULL(2) + +#define MCS_CPM_RX_INT_SECTAG_V_EQ1 BIT_ULL(0) +#define MCS_CPM_RX_INT_SECTAG_E_EQ0_C_EQ1 BIT_ULL(1) +#define MCS_CPM_RX_INT_SL_GTE48 BIT_ULL(2) +#define MCS_CPM_RX_INT_ES_EQ1_SC_EQ1 BIT_ULL(3) +#define MCS_CPM_RX_INT_SC_EQ1_SCB_EQ1 BIT_ULL(4) +#define MCS_CPM_RX_INT_PACKET_XPN_EQ0 BIT_ULL(5) +#define MCS_CPM_RX_INT_PN_THRESH_REACHED BIT_ULL(6) + +#define MCS_CPM_RX_INT_ALL (MCS_CPM_RX_INT_SECTAG_V_EQ1 | \ + MCS_CPM_RX_INT_SECTAG_E_EQ0_C_EQ1 | \ + MCS_CPM_RX_INT_SL_GTE48 | \ + MCS_CPM_RX_INT_ES_EQ1_SC_EQ1 | \ + MCS_CPM_RX_INT_SC_EQ1_SCB_EQ1 | \ + MCS_CPM_RX_INT_PACKET_XPN_EQ0 | \ + MCS_CPM_RX_INT_PN_THRESH_REACHED) + +struct mcs_pfvf { + u64 intr_mask; /* Enabled Interrupt mask */ +}; + +struct mcs_intr_event { + u16 pcifunc; + u64 intr_mask; + u64 sa_id; + u8 mcs_id; + u8 lmac_id; +}; + +struct mcs_intrq_entry { + struct list_head node; + struct mcs_intr_event intr_event; +}; + +struct secy_mem_map { + u8 flow_id; + u8 secy; + u8 ctrl_pkt; + u8 sc; + u64 sci; +}; + +struct mcs_rsrc_map { + u16 *flowid2pf_map; + u16 *secy2pf_map; + u16 *sc2pf_map; + u16 *sa2pf_map; + u16 *flowid2secy_map; /* bitmap flowid mapped to secy*/ + u16 *ctrlpktrule2pf_map; + struct rsrc_bmap flow_ids; + struct rsrc_bmap secy; + struct rsrc_bmap sc; + struct rsrc_bmap sa; + struct rsrc_bmap ctrlpktrule; +}; + +struct hwinfo { + u8 tcam_entries; + u8 secy_entries; + u8 sc_entries; + u16 sa_entries; + u8 mcs_x2p_intf; + u8 lmac_cnt; + u8 mcs_blks; + unsigned long lmac_bmap; /* bitmap of enabled mcs lmac */ +}; + +struct mcs { + void __iomem *reg_base; + struct pci_dev *pdev; + struct device *dev; + struct hwinfo *hw; + struct mcs_rsrc_map tx; + struct mcs_rsrc_map rx; + u16 pf_map[MCS_MAX_PFS]; /* List of PCIFUNC mapped to MCS */ + u8 mcs_id; + struct mcs_ops *mcs_ops; + struct list_head mcs_list; + /* Lock for mcs stats */ + struct mutex stats_lock; + struct mcs_pfvf *pf; + struct mcs_pfvf *vf; + u16 num_vec; + void *rvu; + u16 *tx_sa_active; +}; + +struct mcs_ops { + void (*mcs_set_hw_capabilities)(struct mcs *mcs); + void (*mcs_parser_cfg)(struct mcs *mcs); + void (*mcs_tx_sa_mem_map_write)(struct mcs *mcs, struct mcs_tx_sc_sa_map *map); + void (*mcs_rx_sa_mem_map_write)(struct mcs *mcs, struct mcs_rx_sc_sa_map *map); + void (*mcs_flowid_secy_map)(struct mcs *mcs, struct secy_mem_map *map, int dir); +}; + +extern struct pci_driver mcs_driver; + +static inline void mcs_reg_write(struct mcs *mcs, u64 offset, u64 val) +{ + writeq(val, mcs->reg_base + offset); +} + +static inline u64 mcs_reg_read(struct mcs *mcs, u64 offset) +{ + return readq(mcs->reg_base + offset); +} + +/* MCS APIs */ +struct mcs *mcs_get_pdata(int mcs_id); +int mcs_get_blkcnt(void); +int mcs_set_lmac_channels(int mcs_id, u16 base); +int mcs_alloc_rsrc(struct rsrc_bmap *rsrc, u16 *pf_map, u16 pcifunc); +int mcs_free_rsrc(struct rsrc_bmap *rsrc, u16 *pf_map, int rsrc_id, u16 pcifunc); +int mcs_alloc_all_rsrc(struct mcs *mcs, u8 *flowid, u8 *secy_id, + u8 *sc_id, u8 *sa1_id, u8 *sa2_id, u16 pcifunc, int dir); +int mcs_free_all_rsrc(struct mcs *mcs, int dir, u16 pcifunc); +void mcs_clear_secy_plcy(struct mcs *mcs, int secy_id, int dir); +void mcs_ena_dis_flowid_entry(struct mcs *mcs, int id, int dir, int ena); +void mcs_ena_dis_sc_cam_entry(struct mcs *mcs, int id, int ena); +void mcs_flowid_entry_write(struct mcs *mcs, u64 *data, u64 *mask, int id, int dir); +void mcs_secy_plcy_write(struct mcs *mcs, u64 plcy, int id, int dir); +void mcs_rx_sc_cam_write(struct mcs *mcs, u64 sci, u64 secy, int sc_id); +void mcs_sa_plcy_write(struct mcs *mcs, u64 *plcy, int sa, int dir); +void mcs_map_sc_to_sa(struct mcs *mcs, u64 *sa_map, int sc, int dir); +void mcs_pn_table_write(struct mcs *mcs, u8 pn_id, u64 next_pn, u8 dir); +void mcs_tx_sa_mem_map_write(struct mcs *mcs, struct mcs_tx_sc_sa_map *map); +void mcs_flowid_secy_map(struct mcs *mcs, struct secy_mem_map *map, int dir); +void mcs_rx_sa_mem_map_write(struct mcs *mcs, struct mcs_rx_sc_sa_map *map); +void mcs_pn_threshold_set(struct mcs *mcs, struct mcs_set_pn_threshold *pn); +int mcs_install_flowid_bypass_entry(struct mcs *mcs); +void mcs_set_lmac_mode(struct mcs *mcs, int lmac_id, u8 mode); +void mcs_reset_port(struct mcs *mcs, u8 port_id, u8 reset); +void mcs_set_port_cfg(struct mcs *mcs, struct mcs_port_cfg_set_req *req); +void mcs_get_port_cfg(struct mcs *mcs, struct mcs_port_cfg_get_req *req, + struct mcs_port_cfg_get_rsp *rsp); +void mcs_get_custom_tag_cfg(struct mcs *mcs, struct mcs_custom_tag_cfg_get_req *req, + struct mcs_custom_tag_cfg_get_rsp *rsp); +int mcs_alloc_ctrlpktrule(struct rsrc_bmap *rsrc, u16 *pf_map, u16 offset, u16 pcifunc); +int mcs_free_ctrlpktrule(struct mcs *mcs, struct mcs_free_ctrl_pkt_rule_req *req); +int mcs_ctrlpktrule_write(struct mcs *mcs, struct mcs_ctrl_pkt_rule_write_req *req); + +/* CN10K-B APIs */ +void cn10kb_mcs_set_hw_capabilities(struct mcs *mcs); +void cn10kb_mcs_tx_sa_mem_map_write(struct mcs *mcs, struct mcs_tx_sc_sa_map *map); +void cn10kb_mcs_flowid_secy_map(struct mcs *mcs, struct secy_mem_map *map, int dir); +void cn10kb_mcs_rx_sa_mem_map_write(struct mcs *mcs, struct mcs_rx_sc_sa_map *map); +void cn10kb_mcs_parser_cfg(struct mcs *mcs); + +/* CNF10K-B APIs */ +struct mcs_ops *cnf10kb_get_mac_ops(void); +void cnf10kb_mcs_set_hw_capabilities(struct mcs *mcs); +void cnf10kb_mcs_tx_sa_mem_map_write(struct mcs *mcs, struct mcs_tx_sc_sa_map *map); +void cnf10kb_mcs_flowid_secy_map(struct mcs *mcs, struct secy_mem_map *map, int dir); +void cnf10kb_mcs_rx_sa_mem_map_write(struct mcs *mcs, struct mcs_rx_sc_sa_map *map); +void cnf10kb_mcs_parser_cfg(struct mcs *mcs); +void cnf10kb_mcs_tx_pn_thresh_reached_handler(struct mcs *mcs); +void cnf10kb_mcs_tx_pn_wrapped_handler(struct mcs *mcs); + +/* Stats APIs */ +void mcs_get_sc_stats(struct mcs *mcs, struct mcs_sc_stats *stats, int id, int dir); +void mcs_get_sa_stats(struct mcs *mcs, struct mcs_sa_stats *stats, int id, int dir); +void mcs_get_port_stats(struct mcs *mcs, struct mcs_port_stats *stats, int id, int dir); +void mcs_get_flowid_stats(struct mcs *mcs, struct mcs_flowid_stats *stats, int id, int dir); +void mcs_get_rx_secy_stats(struct mcs *mcs, struct mcs_secy_stats *stats, int id); +void mcs_get_tx_secy_stats(struct mcs *mcs, struct mcs_secy_stats *stats, int id); +void mcs_clear_stats(struct mcs *mcs, u8 type, u8 id, int dir); +int mcs_clear_all_stats(struct mcs *mcs, u16 pcifunc, int dir); +int mcs_set_force_clk_en(struct mcs *mcs, bool set); + +int mcs_add_intr_wq_entry(struct mcs *mcs, struct mcs_intr_event *event); + +#endif /* MCS_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs_cnf10kb.c b/drivers/net/ethernet/marvell/octeontx2/af/mcs_cnf10kb.c new file mode 100644 index 000000000000..7b6205414428 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs_cnf10kb.c @@ -0,0 +1,214 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell MCS driver + * + * Copyright (C) 2022 Marvell. + */ + +#include "mcs.h" +#include "mcs_reg.h" + +static struct mcs_ops cnf10kb_mcs_ops = { + .mcs_set_hw_capabilities = cnf10kb_mcs_set_hw_capabilities, + .mcs_parser_cfg = cnf10kb_mcs_parser_cfg, + .mcs_tx_sa_mem_map_write = cnf10kb_mcs_tx_sa_mem_map_write, + .mcs_rx_sa_mem_map_write = cnf10kb_mcs_rx_sa_mem_map_write, + .mcs_flowid_secy_map = cnf10kb_mcs_flowid_secy_map, +}; + +struct mcs_ops *cnf10kb_get_mac_ops(void) +{ + return &cnf10kb_mcs_ops; +} + +void cnf10kb_mcs_set_hw_capabilities(struct mcs *mcs) +{ + struct hwinfo *hw = mcs->hw; + + hw->tcam_entries = 64; /* TCAM entries */ + hw->secy_entries = 64; /* SecY entries */ + hw->sc_entries = 64; /* SC CAM entries */ + hw->sa_entries = 128; /* SA entries */ + hw->lmac_cnt = 4; /* lmacs/ports per mcs block */ + hw->mcs_x2p_intf = 1; /* x2p clabration intf */ + hw->mcs_blks = 7; /* MCS blocks */ +} + +void cnf10kb_mcs_parser_cfg(struct mcs *mcs) +{ + u64 reg, val; + + /* VLAN Ctag */ + val = (0x8100ull & 0xFFFF) | BIT_ULL(20) | BIT_ULL(22); + + reg = MCSX_PEX_RX_SLAVE_CUSTOM_TAGX(0); + mcs_reg_write(mcs, reg, val); + + reg = MCSX_PEX_TX_SLAVE_CUSTOM_TAGX(0); + mcs_reg_write(mcs, reg, val); + + /* VLAN STag */ + val = (0x88a8ull & 0xFFFF) | BIT_ULL(20) | BIT_ULL(23); + + /* RX */ + reg = MCSX_PEX_RX_SLAVE_CUSTOM_TAGX(1); + mcs_reg_write(mcs, reg, val); + + /* TX */ + reg = MCSX_PEX_TX_SLAVE_CUSTOM_TAGX(1); + mcs_reg_write(mcs, reg, val); + + /* Enable custom tage 0 and 1 and sectag */ + val = BIT_ULL(0) | BIT_ULL(1) | BIT_ULL(12); + + reg = MCSX_PEX_RX_SLAVE_ETYPE_ENABLE; + mcs_reg_write(mcs, reg, val); + + reg = MCSX_PEX_TX_SLAVE_ETYPE_ENABLE; + mcs_reg_write(mcs, reg, val); +} + +void cnf10kb_mcs_flowid_secy_map(struct mcs *mcs, struct secy_mem_map *map, int dir) +{ + u64 reg, val; + + val = (map->secy & 0x3F) | (map->ctrl_pkt & 0x1) << 6; + if (dir == MCS_RX) { + reg = MCSX_CPM_RX_SLAVE_SECY_MAP_MEMX(map->flow_id); + } else { + reg = MCSX_CPM_TX_SLAVE_SECY_MAP_MEM_0X(map->flow_id); + mcs_reg_write(mcs, reg, map->sci); + val |= (map->sc & 0x3F) << 7; + reg = MCSX_CPM_TX_SLAVE_SECY_MAP_MEM_1X(map->flow_id); + } + + mcs_reg_write(mcs, reg, val); +} + +void cnf10kb_mcs_tx_sa_mem_map_write(struct mcs *mcs, struct mcs_tx_sc_sa_map *map) +{ + u64 reg, val; + + val = (map->sa_index0 & 0x7F) | (map->sa_index1 & 0x7F) << 7; + + reg = MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(map->sc_id); + mcs_reg_write(mcs, reg, val); + + reg = MCSX_CPM_TX_SLAVE_AUTO_REKEY_ENABLE_0; + val = mcs_reg_read(mcs, reg); + + if (map->rekey_ena) + val |= BIT_ULL(map->sc_id); + else + val &= ~BIT_ULL(map->sc_id); + + mcs_reg_write(mcs, reg, val); + + mcs_reg_write(mcs, MCSX_CPM_TX_SLAVE_SA_INDEX0_VLDX(map->sc_id), map->sa_index0_vld); + mcs_reg_write(mcs, MCSX_CPM_TX_SLAVE_SA_INDEX1_VLDX(map->sc_id), map->sa_index1_vld); + + mcs_reg_write(mcs, MCSX_CPM_TX_SLAVE_TX_SA_ACTIVEX(map->sc_id), map->tx_sa_active); +} + +void cnf10kb_mcs_rx_sa_mem_map_write(struct mcs *mcs, struct mcs_rx_sc_sa_map *map) +{ + u64 val, reg; + + val = (map->sa_index & 0x7F) | (map->sa_in_use << 7); + + reg = MCSX_CPM_RX_SLAVE_SA_MAP_MEMX((4 * map->sc_id) + map->an); + mcs_reg_write(mcs, reg, val); +} + +int mcs_set_force_clk_en(struct mcs *mcs, bool set) +{ + unsigned long timeout = jiffies + usecs_to_jiffies(2000); + u64 val; + + val = mcs_reg_read(mcs, MCSX_MIL_GLOBAL); + + if (set) { + val |= BIT_ULL(4); + mcs_reg_write(mcs, MCSX_MIL_GLOBAL, val); + + /* Poll till mcsx_mil_ip_gbl_status.mcs_ip_stats_ready value is 1 */ + while (!(mcs_reg_read(mcs, MCSX_MIL_IP_GBL_STATUS) & BIT_ULL(0))) { + if (time_after(jiffies, timeout)) { + dev_err(mcs->dev, "MCS set force clk enable failed\n"); + break; + } + } + } else { + val &= ~BIT_ULL(4); + mcs_reg_write(mcs, MCSX_MIL_GLOBAL, val); + } + + return 0; +} + +/* TX SA interrupt is raised only if autorekey is enabled. + * MCS_CPM_TX_SLAVE_SA_MAP_MEM_0X[sc].tx_sa_active bit gets toggled if + * one of two SAs mapped to SC gets expired. If tx_sa_active=0 implies + * SA in SA_index1 got expired else SA in SA_index0 got expired. + */ +void cnf10kb_mcs_tx_pn_thresh_reached_handler(struct mcs *mcs) +{ + struct mcs_intr_event event; + struct rsrc_bmap *sc_bmap; + unsigned long rekey_ena; + u64 val, sa_status; + int sc; + + sc_bmap = &mcs->tx.sc; + + event.mcs_id = mcs->mcs_id; + event.intr_mask = MCS_CPM_TX_PN_THRESH_REACHED_INT; + + rekey_ena = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_AUTO_REKEY_ENABLE_0); + + for_each_set_bit(sc, sc_bmap->bmap, mcs->hw->sc_entries) { + /* Auto rekey is enable */ + if (!test_bit(sc, &rekey_ena)) + continue; + sa_status = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_TX_SA_ACTIVEX(sc)); + /* Check if tx_sa_active status had changed */ + if (sa_status == mcs->tx_sa_active[sc]) + continue; + + /* SA_index0 is expired */ + val = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(sc)); + if (sa_status) + event.sa_id = val & 0x7F; + else + event.sa_id = (val >> 7) & 0x7F; + + event.pcifunc = mcs->tx.sa2pf_map[event.sa_id]; + mcs_add_intr_wq_entry(mcs, &event); + } +} + +void cnf10kb_mcs_tx_pn_wrapped_handler(struct mcs *mcs) +{ + struct mcs_intr_event event = { 0 }; + struct rsrc_bmap *sc_bmap; + u64 val; + int sc; + + sc_bmap = &mcs->tx.sc; + + event.mcs_id = mcs->mcs_id; + event.intr_mask = MCS_CPM_TX_PACKET_XPN_EQ0_INT; + + for_each_set_bit(sc, sc_bmap->bmap, mcs->hw->sc_entries) { + val = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(sc)); + + if (mcs->tx_sa_active[sc]) + /* SA_index1 was used and got expired */ + event.sa_id = (val >> 7) & 0x7F; + else + /* SA_index0 was used and got expired */ + event.sa_id = val & 0x7F; + + event.pcifunc = mcs->tx.sa2pf_map[event.sa_id]; + mcs_add_intr_wq_entry(mcs, &event); + } +} diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/mcs_reg.h new file mode 100644 index 000000000000..c95a8b8f5eaf --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs_reg.h @@ -0,0 +1,1102 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Marvell MCS driver + * + * Copyright (C) 2022 Marvell. + */ + +#ifndef MCS_REG_H +#define MCS_REG_H + +#include <linux/bits.h> + +/* Registers */ +#define MCSX_IP_MODE 0x900c8ull +#define MCSX_MCS_TOP_SLAVE_PORT_RESET(a) ({ \ + u64 offset; \ + \ + offset = 0x408ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0xa28ull; \ + offset += (a) * 0x8ull; \ + offset; }) + + +#define MCSX_MCS_TOP_SLAVE_CHANNEL_CFG(a) ({ \ + u64 offset; \ + \ + offset = 0x808ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0xa68ull; \ + offset += (a) * 0x8ull; \ + offset; }) + +#define MCSX_MIL_GLOBAL ({ \ + u64 offset; \ + \ + offset = 0x80000ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0x60000ull; \ + offset; }) + +#define MCSX_MIL_RX_LMACX_CFG(a) ({ \ + u64 offset; \ + \ + offset = 0x900a8ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0x700a8ull; \ + offset += (a) * 0x800ull; \ + offset; }) + +#define MCSX_HIL_GLOBAL ({ \ + u64 offset; \ + \ + offset = 0xc0000ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0xa0000ull; \ + offset; }) + +#define MCSX_LINK_LMACX_CFG(a) ({ \ + u64 offset; \ + \ + offset = 0x90000ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0x70000ull; \ + offset += (a) * 0x800ull; \ + offset; }) + +#define MCSX_MIL_RX_GBL_STATUS ({ \ + u64 offset; \ + \ + offset = 0x800c8ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0x600c8ull; \ + offset; }) + +#define MCSX_MIL_IP_GBL_STATUS ({ \ + u64 offset; \ + \ + offset = 0x800d0ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0x600d0ull; \ + offset; }) + +/* PAB */ +#define MCSX_PAB_RX_SLAVE_PORT_CFGX(a) ({ \ + u64 offset; \ + \ + offset = 0x1718ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0x280ull; \ + offset += (a) * 0x40ull; \ + offset; }) + +#define MCSX_PAB_TX_SLAVE_PORT_CFGX(a) (0x2930ull + (a) * 0x40ull) + +/* PEX registers */ +#define MCSX_PEX_RX_SLAVE_VLAN_CFGX(a) (0x3b58ull + (a) * 0x8ull) +#define MCSX_PEX_TX_SLAVE_VLAN_CFGX(a) (0x46f8ull + (a) * 0x8ull) +#define MCSX_PEX_TX_SLAVE_CUSTOM_TAG_REL_MODE_SEL(a) (0x788ull + (a) * 0x8ull) +#define MCSX_PEX_TX_SLAVE_PORT_CONFIG(a) (0x4738ull + (a) * 0x8ull) +#define MCSX_PEX_RX_SLAVE_RULE_ETYPE_CFGX(a) ({ \ + u64 offset; \ + \ + offset = 0x3fc0ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0x558ull; \ + offset += (a) * 0x8ull; \ + offset; }) + +#define MCSX_PEX_RX_SLAVE_RULE_DAX(a) ({ \ + u64 offset; \ + \ + offset = 0x4000ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0x598ull; \ + offset += (a) * 0x8ull; \ + offset; }) + +#define MCSX_PEX_RX_SLAVE_RULE_DA_RANGE_MINX(a) ({ \ + u64 offset; \ + \ + offset = 0x4040ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0x5d8ull; \ + offset += (a) * 0x8ull; \ + offset; }) + +#define MCSX_PEX_RX_SLAVE_RULE_DA_RANGE_MAXX(a) ({ \ + u64 offset; \ + \ + offset = 0x4048ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0x5e0ull; \ + offset += (a) * 0x8ull; \ + offset; }) + +#define MCSX_PEX_RX_SLAVE_RULE_COMBO_MINX(a) ({ \ + u64 offset; \ + \ + offset = 0x4080ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0x648ull; \ + offset += (a) * 0x8ull; \ + offset; }) + +#define MCSX_PEX_RX_SLAVE_RULE_COMBO_MAXX(a) ({ \ + u64 offset; \ + \ + offset = 0x4088ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0x650ull; \ + offset += (a) * 0x8ull; \ + offset; }) + +#define MCSX_PEX_RX_SLAVE_RULE_COMBO_ETX(a) ({ \ + u64 offset; \ + \ + offset = 0x4090ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0x658ull; \ + offset += (a) * 0x8ull; \ + offset; }) + +#define MCSX_PEX_RX_SLAVE_RULE_MAC ({ \ + u64 offset; \ + \ + offset = 0x40e0ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0x6d8ull; \ + offset; }) + +#define MCSX_PEX_RX_SLAVE_RULE_ENABLE ({ \ + u64 offset; \ + \ + offset = 0x40e8ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0x6e0ull; \ + offset; }) + +#define MCSX_PEX_TX_SLAVE_RULE_ETYPE_CFGX(a) ({ \ + u64 offset; \ + \ + offset = 0x4b60ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0x7d8ull; \ + offset += (a) * 0x8ull; \ + offset; }) + +#define MCSX_PEX_TX_SLAVE_RULE_DAX(a) ({ \ + u64 offset; \ + \ + offset = 0x4ba0ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0x818ull; \ + offset += (a) * 0x8ull; \ + offset; }) + +#define MCSX_PEX_TX_SLAVE_RULE_DA_RANGE_MINX(a) ({ \ + u64 offset; \ + \ + offset = 0x4be0ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0x858ull; \ + offset += (a) * 0x8ull; \ + offset; }) + +#define MCSX_PEX_TX_SLAVE_RULE_DA_RANGE_MAXX(a) ({ \ + u64 offset; \ + \ + offset = 0x4be8ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0x860ull; \ + offset += (a) * 0x8ull; \ + offset; }) + +#define MCSX_PEX_TX_SLAVE_RULE_COMBO_MINX(a) ({ \ + u64 offset; \ + \ + offset = 0x4c20ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0x8c8ull; \ + offset += (a) * 0x8ull; \ + offset; }) + +#define MCSX_PEX_TX_SLAVE_RULE_COMBO_MAXX(a) ({ \ + u64 offset; \ + \ + offset = 0x4c28ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0x8d0ull; \ + offset += (a) * 0x8ull; \ + offset; }) + +#define MCSX_PEX_TX_SLAVE_RULE_COMBO_ETX(a) ({ \ + u64 offset; \ + \ + offset = 0x4c30ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0x8d8ull; \ + offset += (a) * 0x8ull; \ + offset; }) + +#define MCSX_PEX_TX_SLAVE_RULE_MAC ({ \ + u64 offset; \ + \ + offset = 0x4c80ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0x958ull; \ + offset; }) + +#define MCSX_PEX_TX_SLAVE_RULE_ENABLE ({ \ + u64 offset; \ + \ + offset = 0x4c88ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0x960ull; \ + offset; }) + +#define MCSX_PEX_RX_SLAVE_PEX_CONFIGURATION ({ \ + u64 offset; \ + \ + offset = 0x3b50ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0x4c0ull; \ + offset; }) + +/* CNF10K-B */ +#define MCSX_PEX_RX_SLAVE_CUSTOM_TAGX(a) (0x4c8ull + (a) * 0x8ull) +#define MCSX_PEX_TX_SLAVE_CUSTOM_TAGX(a) (0x748ull + (a) * 0x8ull) +#define MCSX_PEX_RX_SLAVE_ETYPE_ENABLE 0x6e8ull +#define MCSX_PEX_TX_SLAVE_ETYPE_ENABLE 0x968ull + +/* BEE */ +#define MCSX_BBE_RX_SLAVE_PADDING_CTL 0xe08ull +#define MCSX_BBE_TX_SLAVE_PADDING_CTL 0x12f8ull +#define MCSX_BBE_RX_SLAVE_CAL_ENTRY 0x180ull +#define MCSX_BBE_RX_SLAVE_CAL_LEN 0x188ull +#define MCSX_PAB_RX_SLAVE_FIFO_SKID_CFGX(a) (0x290ull + (a) * 0x40ull) + +#define MCSX_BBE_RX_SLAVE_BBE_INT ({ \ + u64 offset; \ + \ + offset = 0xe00ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0x160ull; \ + offset; }) + +#define MCSX_BBE_RX_SLAVE_BBE_INT_ENB ({ \ + u64 offset; \ + \ + offset = 0xe08ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0x168ull; \ + offset; }) + +#define MCSX_BBE_RX_SLAVE_BBE_INT_INTR_RW ({ \ + u64 offset; \ + \ + offset = 0xe08ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0x178ull; \ + offset; }) + +#define MCSX_BBE_TX_SLAVE_BBE_INT ({ \ + u64 offset; \ + \ + offset = 0x1278ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0x1e0ull; \ + offset; }) + +#define MCSX_BBE_TX_SLAVE_BBE_INT_INTR_RW ({ \ + u64 offset; \ + \ + offset = 0x1278ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0x1f8ull; \ + offset; }) + +#define MCSX_BBE_TX_SLAVE_BBE_INT_ENB ({ \ + u64 offset; \ + \ + offset = 0x1280ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0x1e8ull; \ + offset; }) + +#define MCSX_PAB_RX_SLAVE_PAB_INT ({ \ + u64 offset; \ + \ + offset = 0x16f0ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0x260ull; \ + offset; }) + +#define MCSX_PAB_RX_SLAVE_PAB_INT_ENB ({ \ + u64 offset; \ + \ + offset = 0x16f8ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0x268ull; \ + offset; }) + +#define MCSX_PAB_RX_SLAVE_PAB_INT_INTR_RW ({ \ + u64 offset; \ + \ + offset = 0x16f8ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0x278ull; \ + offset; }) + +#define MCSX_PAB_TX_SLAVE_PAB_INT ({ \ + u64 offset; \ + \ + offset = 0x2908ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0x380ull; \ + offset; }) + +#define MCSX_PAB_TX_SLAVE_PAB_INT_ENB ({ \ + u64 offset; \ + \ + offset = 0x2910ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0x388ull; \ + offset; }) + +#define MCSX_PAB_TX_SLAVE_PAB_INT_INTR_RW ({ \ + u64 offset; \ + \ + offset = 0x16f8ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0x398ull; \ + offset; }) + +/* CPM registers */ +#define MCSX_CPM_RX_SLAVE_FLOWID_TCAM_DATAX(a, b) ({ \ + u64 offset; \ + \ + offset = 0x30740ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0x3bf8ull; \ + offset += (a) * 0x8ull + (b) * 0x20ull; \ + offset; }) + +#define MCSX_CPM_RX_SLAVE_FLOWID_TCAM_MASKX(a, b) ({ \ + u64 offset; \ + \ + offset = 0x34740ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0x43f8ull; \ + offset += (a) * 0x8ull + (b) * 0x20ull; \ + offset; }) + +#define MCSX_CPM_RX_SLAVE_FLOWID_TCAM_ENA_0 ({ \ + u64 offset; \ + \ + offset = 0x30700ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0x3bd8ull; \ + offset; }) + +#define MCSX_CPM_RX_SLAVE_SC_CAMX(a, b) ({ \ + u64 offset; \ + \ + offset = 0x38780ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0x4c08ull; \ + offset += (a) * 0x8ull + (b) * 0x10ull; \ + offset; }) + +#define MCSX_CPM_RX_SLAVE_SC_CAM_ENA(a) ({ \ + u64 offset; \ + \ + offset = 0x38740ull + (a) * 0x8ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0x4bf8ull; \ + offset; }) + +#define MCSX_CPM_RX_SLAVE_SECY_MAP_MEMX(a) ({ \ + u64 offset; \ + \ + offset = 0x23ee0ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0xbd0ull; \ + offset += (a) * 0x8ull; \ + offset; }) + +#define MCSX_CPM_RX_SLAVE_SECY_PLCY_MEM_0X(a) ({ \ + u64 offset; \ + \ + offset = (0x246e0ull + (a) * 0x10ull); \ + if (mcs->hw->mcs_blks > 1) \ + offset = (0xdd0ull + (a) * 0x8ull); \ + offset; }) + +#define MCSX_CPM_RX_SLAVE_SA_KEY_LOCKOUTX(a) ({ \ + u64 offset; \ + \ + offset = 0x23E90ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0xbb0ull; \ + offset += (a) * 0x8ull; \ + offset; }) + +#define MCSX_CPM_RX_SLAVE_SA_MAP_MEMX(a) ({ \ + u64 offset; \ + \ + offset = 0x256e0ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0xfd0ull; \ + offset += (a) * 0x8ull; \ + offset; }) + +#define MCSX_CPM_RX_SLAVE_SA_PLCY_MEMX(a, b) ({ \ + u64 offset; \ + \ + offset = 0x27700ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0x17d8ull; \ + offset += (a) * 0x8ull + (b) * 0x40ull; \ + offset; }) + +#define MCSX_CPM_RX_SLAVE_SA_PN_TABLE_MEMX(a) ({ \ + u64 offset; \ + \ + offset = 0x2f700ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0x37d8; \ + offset += (a) * 0x8ull; \ + offset; }) + +#define MCSX_CPM_RX_SLAVE_XPN_THRESHOLD ({ \ + u64 offset; \ + \ + offset = 0x23e40ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0xb90ull; \ + offset; }) + +#define MCSX_CPM_RX_SLAVE_PN_THRESHOLD ({ \ + u64 offset; \ + \ + offset = 0x23e48ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0xb98ull; \ + offset; }) + +#define MCSX_CPM_RX_SLAVE_PN_THRESH_REACHEDX(a) ({ \ + u64 offset; \ + \ + offset = 0x23e50ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0xba0ull; \ + offset += (a) * 0x8ull; \ + offset; }) + +#define MCSX_CPM_RX_SLAVE_FLOWID_TCAM_ENA_1 0x30708ull +#define MCSX_CPM_RX_SLAVE_SECY_PLCY_MEM_1X(a) (0x246e8ull + (a) * 0x10ull) + +/* TX registers */ +#define MCSX_CPM_TX_SLAVE_FLOWID_TCAM_DATAX(a, b) ({ \ + u64 offset; \ + \ + offset = 0x51d50ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0xa7c0ull; \ + offset += (a) * 0x8ull + (b) * 0x20ull; \ + offset; }) + +#define MCSX_CPM_TX_SLAVE_FLOWID_TCAM_MASKX(a, b) ({ \ + u64 offset; \ + \ + offset = 0x55d50ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0xafc0ull; \ + offset += (a) * 0x8ull + (b) * 0x20ull; \ + offset; }) + +#define MCSX_CPM_TX_SLAVE_FLOWID_TCAM_ENA_0 ({ \ + u64 offset; \ + \ + offset = 0x51d10ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0xa7a0ull; \ + offset; }) + +#define MCSX_CPM_TX_SLAVE_SECY_MAP_MEM_0X(a) ({ \ + u64 offset; \ + \ + offset = 0x3e508ull + (a) * 0x8ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0x5550ull + (a) * 0x10ull; \ + offset; }) + +#define MCSX_CPM_TX_SLAVE_SECY_PLCY_MEMX(a) ({ \ + u64 offset; \ + \ + offset = 0x3ed08ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0x5950ull; \ + offset += (a) * 0x8ull; \ + offset; }) + +#define MCSX_CPM_TX_SLAVE_SA_KEY_LOCKOUTX(a) ({ \ + u64 offset; \ + \ + offset = 0x3e4c0ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0x5538ull; \ + offset += (a) * 0x8ull; \ + offset; }) + +#define MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(a) ({ \ + u64 offset; \ + \ + offset = 0x3fd10ull + (a) * 0x10ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0x6150ull + (a) * 0x8ull; \ + offset; }) + +#define MCSX_CPM_TX_SLAVE_SA_PLCY_MEMX(a, b) ({ \ + u64 offset; \ + \ + offset = 0x40d10ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0x63a0ull; \ + offset += (a) * 0x8ull + (b) * 0x80ull; \ + offset; }) + +#define MCSX_CPM_TX_SLAVE_SA_PN_TABLE_MEMX(a) ({ \ + u64 offset; \ + \ + offset = 0x50d10ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0xa3a0ull; \ + offset += (a) * 0x8ull; \ + offset; }) + +#define MCSX_CPM_TX_SLAVE_XPN_THRESHOLD ({ \ + u64 offset; \ + \ + offset = 0x3e4b0ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0x5528ull; \ + offset; }) + +#define MCSX_CPM_TX_SLAVE_PN_THRESHOLD ({ \ + u64 offset; \ + \ + offset = 0x3e4b8ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0x5530ull; \ + offset; }) + +#define MCSX_CPM_TX_SLAVE_SA_MAP_MEM_1X(a) (0x3fd18ull + (a) * 0x10ull) +#define MCSX_CPM_TX_SLAVE_SECY_MAP_MEM_1X(a) (0x5558ull + (a) * 0x10ull) +#define MCSX_CPM_TX_SLAVE_FLOWID_TCAM_ENA_1 0x51d18ull +#define MCSX_CPM_TX_SLAVE_TX_SA_ACTIVEX(a) (0x5b50 + (a) * 0x8ull) +#define MCSX_CPM_TX_SLAVE_SA_INDEX0_VLDX(a) (0x5d50 + (a) * 0x8ull) +#define MCSX_CPM_TX_SLAVE_SA_INDEX1_VLDX(a) (0x5f50 + (a) * 0x8ull) +#define MCSX_CPM_TX_SLAVE_AUTO_REKEY_ENABLE_0 0x5500ull + +/* CSE */ +#define MCSX_CSE_RX_MEM_SLAVE_IFINCTLBCPKTSX(a) ({ \ + u64 offset; \ + \ + offset = 0x9e80ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0xc218ull; \ + offset += (a) * 0x8ull; \ + offset; }) + +#define MCSX_CSE_RX_MEM_SLAVE_IFINCTLMCPKTSX(a) ({ \ + u64 offset; \ + \ + offset = 0x9680ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0xc018ull; \ + offset += (a) * 0x8ull; \ + offset; }) + +#define MCSX_CSE_RX_MEM_SLAVE_IFINCTLOCTETSX(a) ({ \ + u64 offset; \ + \ + offset = 0x6e80ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0xbc18ull; \ + offset += (a) * 0x8ull; \ + offset; }) + +#define MCSX_CSE_RX_MEM_SLAVE_IFINCTLUCPKTSX(a) ({ \ + u64 offset; \ + \ + offset = 0x8e80ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0xbe18ull; \ + offset += (a) * 0x8ull; \ + offset; }) + +#define MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLBCPKTSX(a) ({ \ + u64 offset; \ + \ + offset = 0x8680ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0xca18ull; \ + offset += (a) * 0x8ull; \ + offset; }) + +#define MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLMCPKTSX(a) ({ \ + u64 offset; \ + \ + offset = 0x7e80ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0xc818ull; \ + offset += (a) * 0x8ull; \ + offset; }) + +#define MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLOCTETSX(a) ({ \ + u64 offset; \ + \ + offset = 0x6680ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0xc418ull; \ + offset += (a) * 0x8ull; \ + offset; }) + +#define MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLUCPKTSX(a) ({ \ + u64 offset; \ + \ + offset = 0x7680ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0xc618ull; \ + offset += (a) * 0x8ull; \ + offset; }) + +#define MCSX_CSE_RX_MEM_SLAVE_INOCTETSSECYDECRYPTEDX(a) ({ \ + u64 offset; \ + \ + offset = 0x5e80ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0xdc18ull; \ + offset += (a) * 0x8ull; \ + offset; }) + +#define MCSX_CSE_RX_MEM_SLAVE_INOCTETSSECYVALIDATEX(a)({ \ + u64 offset; \ + \ + offset = 0x5680ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0xda18ull; \ + offset += (a) * 0x8ull; \ + offset; }) + +#define MCSX_CSE_RX_MEM_SLAVE_INPKTSCTRLPORTDISABLEDX(a) ({ \ + u64 offset; \ + \ + offset = 0xd680ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0xce18ull; \ + offset += (a) * 0x8ull; \ + offset; }) + +#define MCSX_CSE_RX_MEM_SLAVE_INPKTSFLOWIDTCAMHITX(a) ({ \ + u64 offset; \ + \ + offset = 0x16a80ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0xec78ull; \ + offset += (a) * 0x8ull; \ + offset; }) + +#define MCSX_CSE_RX_MEM_SLAVE_INPKTSFLOWIDTCAMMISSX(a) ({ \ + u64 offset; \ + \ + offset = 0x16680ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0xec38ull; \ + offset += (a) * 0x8ull; \ + offset; }) + +#define MCSX_CSE_RX_MEM_SLAVE_INPKTSPARSEERRX(a) ({ \ + u64 offset; \ + \ + offset = 0x16880ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0xec18ull; \ + offset += (a) * 0x8ull; \ + offset; }) + +#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSCCAMHITX(a) ({ \ + u64 offset; \ + \ + offset = 0xfe80ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0xde18ull; \ + offset += (a) * 0x8ull; \ + offset; }) + +#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSCINVALIDX(a) ({ \ + u64 offset; \ + \ + offset = 0x10680ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0xe418ull; \ + offset += (a) * 0x8ull; \ + offset; }) + +#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSCNOTVALIDX(a) ({ \ + u64 offset; \ + \ + offset = 0x10e80ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0xe218ull; \ + offset += (a) * 0x8ull; \ + offset; }) + +#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYBADTAGX(a) ({ \ + u64 offset; \ + \ + offset = 0xae80ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0xd418ull; \ + offset += (a) * 0x8ull; \ + offset; }) + +#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYNOSAX(a) ({ \ + u64 offset; \ + \ + offset = 0xc680ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0xd618ull; \ + offset += (a) * 0x8ull; \ + offset; }) + +#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYNOSAERRORX(a) ({ \ + u64 offset; \ + \ + offset = 0xce80ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0xd818ull; \ + offset += (a) * 0x8ull; \ + offset; }) + +#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYTAGGEDCTLX(a) ({ \ + u64 offset; \ + \ + offset = 0xbe80ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0xcc18ull; \ + offset += (a) * 0x8ull; \ + offset; }) + +#define MCSX_CSE_RX_SLAVE_CTRL ({ \ + u64 offset; \ + \ + offset = 0x52a0ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0x9c0ull; \ + offset; }) + +#define MCSX_CSE_RX_SLAVE_STATS_CLEAR ({ \ + u64 offset; \ + \ + offset = 0x52b8ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0x9d8ull; \ + offset; }) + +#define MCSX_CSE_RX_MEM_SLAVE_INOCTETSSCDECRYPTEDX(a) (0xe680ull + (a) * 0x8ull) +#define MCSX_CSE_RX_MEM_SLAVE_INOCTETSSCVALIDATEX(a) (0xde80ull + (a) * 0x8ull) +#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYUNTAGGEDORNOTAGX(a) (0xa680ull + (a) * 0x8ull) +#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYNOTAGX(a) (0xd218 + (a) * 0x8ull) +#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYUNTAGGEDX(a) (0xd018ull + (a) * 0x8ull) +#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSCUNCHECKEDOROKX(a) (0xee80ull + (a) * 0x8ull) +#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYCTLX(a) (0xb680ull + (a) * 0x8ull) +#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSCLATEORDELAYEDX(a) (0xf680ull + (a) * 0x8ull) +#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSAINVALIDX(a) (0x12680ull + (a) * 0x8ull) +#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSANOTUSINGSAERRORX(a) (0x15680ull + (a) * 0x8ull) +#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSANOTVALIDX(a) (0x13680ull + (a) * 0x8ull) +#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSAOKX(a) (0x11680ull + (a) * 0x8ull) +#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSAUNUSEDSAX(a) (0x14680ull + (a) * 0x8ull) +#define MCSX_CSE_RX_MEM_SLAVE_INPKTSEARLYPREEMPTERRX(a) (0xec58ull + (a) * 0x8ull) +#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSCOKX(a) (0xea18ull + (a) * 0x8ull) +#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSCDELAYEDX(a) (0xe618ull + (a) * 0x8ull) + +/* CSE TX */ +#define MCSX_CSE_TX_MEM_SLAVE_IFOUTCOMMONOCTETSX(a) (0x18440ull + (a) * 0x8ull) +#define MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLBCPKTSX(a) ({ \ + u64 offset; \ + \ + offset = 0x1c440ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0xf478ull; \ + offset += (a) * 0x8ull; \ + offset; }) + +#define MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLMCPKTSX(a) ({ \ + u64 offset; \ + \ + offset = 0x1bc40ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0xf278ull; \ + offset += (a) * 0x8ull; \ + offset; }) + +#define MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLOCTETSX(a) ({ \ + u64 offset; \ + \ + offset = 0x19440ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0xee78ull; \ + offset += (a) * 0x8ull; \ + offset; }) + +#define MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLUCPKTSX(a) ({ \ + u64 offset; \ + \ + offset = 0x1b440ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0xf078ull; \ + offset += (a) * 0x8ull; \ + offset; }) + +#define MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLBCPKTSX(a) ({ \ + u64 offset; \ + \ + offset = 0x1ac40ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0xfc78ull; \ + offset += (a) * 0x8ull; \ + offset; }) + +#define MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLMCPKTSX(a) ({ \ + u64 offset; \ + \ + offset = 0x1a440ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0xfa78ull; \ + offset += (a) * 0x8ull; \ + offset; }) + +#define MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLOCTETSX(a) ({ \ + u64 offset; \ + \ + offset = 0x18c40ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0xf678ull; \ + offset += (a) * 0x8ull; \ + offset; }) + +#define MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLUCPKTSX(a) ({ \ + u64 offset; \ + \ + offset = 0x19c40ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0xf878ull; \ + offset += (a) * 0x8ull; \ + offset; }) + +#define MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSECYENCRYPTEDX(a) ({ \ + u64 offset; \ + \ + offset = 0x17c40ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0x10878ull; \ + offset += (a) * 0x8ull; \ + offset; }) + +#define MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSECYPROTECTEDX(a) ({ \ + u64 offset; \ + \ + offset = 0x17440ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0x10678ull; \ + offset += (a) * 0x8ull; \ + offset; }) + +#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSCTRLPORTDISABLEDX(a) ({ \ + u64 offset; \ + \ + offset = 0x1e440ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0xfe78ull; \ + offset += (a) * 0x8ull; \ + offset; }) + +#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSFLOWIDTCAMHITX(a) ({ \ + u64 offset; \ + \ + offset = 0x23240ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0x10ed8ull; \ + offset += (a) * 0x8ull; \ + offset; }) + +#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSFLOWIDTCAMMISSX(a) ({ \ + u64 offset; \ + \ + offset = 0x22c40ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0x10e98ull; \ + offset += (a) * 0x8ull; \ + offset; }) + +#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSPARSEERRX(a) ({ \ + u64 offset; \ + \ + offset = 0x22e40ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0x10e78ull; \ + offset += (a) * 0x8ull; \ + offset; }) + +#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSCENCRYPTEDX(a) ({ \ + u64 offset; \ + \ + offset = 0x20440ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0x10c78ull; \ + offset += (a) * 0x8ull; \ + offset; }) + +#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSCPROTECTEDX(a) ({ \ + u64 offset; \ + \ + offset = 0x1fc40ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0x10a78ull; \ + offset += (a) * 0x8ull; \ + offset; }) + +#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECTAGINSERTIONERRX(a) ({ \ + u64 offset; \ + \ + offset = 0x23040ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0x110d8ull; \ + offset += (a) * 0x8ull; \ + offset; }) + +#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECYNOACTIVESAX(a) ({ \ + u64 offset; \ + \ + offset = 0x1dc40ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0x10278ull; \ + offset += (a) * 0x8ull; \ + offset; }) + +#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECYTOOLONGX(a) ({ \ + u64 offset; \ + \ + offset = 0x1d440ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0x10478ull; \ + offset += (a) * 0x8ull; \ + offset; }) + +#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECYUNTAGGEDX(a) ({ \ + u64 offset; \ + \ + offset = 0x1cc40ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0x10078ull; \ + offset += (a) * 0x8ull; \ + offset; }) + +#define MCSX_CSE_TX_SLAVE_CTRL ({ \ + u64 offset; \ + \ + offset = 0x54a0ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0xa00ull; \ + offset; }) + +#define MCSX_CSE_TX_SLAVE_STATS_CLEAR ({ \ + u64 offset; \ + \ + offset = 0x54b8ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0xa18ull; \ + offset; }) + +#define MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSCENCRYPTEDX(a) (0x1f440ull + (a) * 0x8ull) +#define MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSCPROTECTEDX(a) (0x1ec40ull + (a) * 0x8ull) +#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSEARLYPREEMPTERRX(a) (0x10eb8ull + (a) * 0x8ull) +#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSAENCRYPTEDX(a) (0x21c40ull + (a) * 0x8ull) +#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSAPROTECTEDX(a) (0x20c40ull + (a) * 0x8ull) + +#define MCSX_IP_INT ({ \ + u64 offset; \ + \ + offset = 0x80028ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0x60028ull; \ + offset; }) + +#define MCSX_IP_INT_ENA_W1S ({ \ + u64 offset; \ + \ + offset = 0x80040ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0x60040ull; \ + offset; }) + +#define MCSX_IP_INT_ENA_W1C ({ \ + u64 offset; \ + \ + offset = 0x80038ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0x60038ull; \ + offset; }) + +#define MCSX_TOP_SLAVE_INT_SUM ({ \ + u64 offset; \ + \ + offset = 0xc20ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0xab8ull; \ + offset; }) + +#define MCSX_TOP_SLAVE_INT_SUM_ENB ({ \ + u64 offset; \ + \ + offset = 0xc28ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0xac0ull; \ + offset; }) + +#define MCSX_CPM_RX_SLAVE_RX_INT ({ \ + u64 offset; \ + \ + offset = 0x23c00ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0x0ad8ull; \ + offset; }) + +#define MCSX_CPM_RX_SLAVE_RX_INT_ENB ({ \ + u64 offset; \ + \ + offset = 0x23c08ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0xae0ull; \ + offset; }) + +#define MCSX_CPM_TX_SLAVE_TX_INT ({ \ + u64 offset; \ + \ + offset = 0x3d490ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0x54a0ull; \ + offset; }) + +#define MCSX_CPM_TX_SLAVE_TX_INT_ENB ({ \ + u64 offset; \ + \ + offset = 0x3d498ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0x54a8ull; \ + offset; }) + +#endif diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c b/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c new file mode 100644 index 000000000000..fa8029a94068 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c @@ -0,0 +1,889 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell CN10K MCS driver + * + * Copyright (C) 2022 Marvell. + */ + +#include <linux/types.h> +#include <linux/device.h> +#include <linux/module.h> +#include <linux/pci.h> + +#include "mcs.h" +#include "rvu.h" +#include "lmac_common.h" + +#define M(_name, _id, _fn_name, _req_type, _rsp_type) \ +static struct _req_type __maybe_unused \ +*otx2_mbox_alloc_msg_ ## _fn_name(struct rvu *rvu, int devid) \ +{ \ + struct _req_type *req; \ + \ + req = (struct _req_type *)otx2_mbox_alloc_msg_rsp( \ + &rvu->afpf_wq_info.mbox_up, devid, sizeof(struct _req_type), \ + sizeof(struct _rsp_type)); \ + if (!req) \ + return NULL; \ + req->hdr.sig = OTX2_MBOX_REQ_SIG; \ + req->hdr.id = _id; \ + return req; \ +} + +MBOX_UP_MCS_MESSAGES +#undef M + +int rvu_mbox_handler_mcs_set_lmac_mode(struct rvu *rvu, + struct mcs_set_lmac_mode *req, + struct msg_rsp *rsp) +{ + struct mcs *mcs; + + if (req->mcs_id >= rvu->mcs_blk_cnt) + return MCS_AF_ERR_INVALID_MCSID; + + mcs = mcs_get_pdata(req->mcs_id); + + if (BIT_ULL(req->lmac_id) & mcs->hw->lmac_bmap) + mcs_set_lmac_mode(mcs, req->lmac_id, req->mode); + + return 0; +} + +int mcs_add_intr_wq_entry(struct mcs *mcs, struct mcs_intr_event *event) +{ + struct mcs_intrq_entry *qentry; + u16 pcifunc = event->pcifunc; + struct rvu *rvu = mcs->rvu; + struct mcs_pfvf *pfvf; + + /* Check if it is PF or VF */ + if (pcifunc & RVU_PFVF_FUNC_MASK) + pfvf = &mcs->vf[rvu_get_hwvf(rvu, pcifunc)]; + else + pfvf = &mcs->pf[rvu_get_pf(pcifunc)]; + + event->intr_mask &= pfvf->intr_mask; + + /* Check PF/VF interrupt notification is enabled */ + if (!(pfvf->intr_mask && event->intr_mask)) + return 0; + + qentry = kmalloc(sizeof(*qentry), GFP_ATOMIC); + if (!qentry) + return -ENOMEM; + + qentry->intr_event = *event; + spin_lock(&rvu->mcs_intrq_lock); + list_add_tail(&qentry->node, &rvu->mcs_intrq_head); + spin_unlock(&rvu->mcs_intrq_lock); + queue_work(rvu->mcs_intr_wq, &rvu->mcs_intr_work); + + return 0; +} + +static int mcs_notify_pfvf(struct mcs_intr_event *event, struct rvu *rvu) +{ + struct mcs_intr_info *req; + int err, pf; + + pf = rvu_get_pf(event->pcifunc); + + req = otx2_mbox_alloc_msg_mcs_intr_notify(rvu, pf); + if (!req) + return -ENOMEM; + + req->mcs_id = event->mcs_id; + req->intr_mask = event->intr_mask; + req->sa_id = event->sa_id; + req->hdr.pcifunc = event->pcifunc; + req->lmac_id = event->lmac_id; + + otx2_mbox_msg_send(&rvu->afpf_wq_info.mbox_up, pf); + err = otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pf); + if (err) + dev_warn(rvu->dev, "MCS notification to pf %d failed\n", pf); + + return 0; +} + +static void mcs_intr_handler_task(struct work_struct *work) +{ + struct rvu *rvu = container_of(work, struct rvu, mcs_intr_work); + struct mcs_intrq_entry *qentry; + struct mcs_intr_event *event; + unsigned long flags; + + do { + spin_lock_irqsave(&rvu->mcs_intrq_lock, flags); + qentry = list_first_entry_or_null(&rvu->mcs_intrq_head, + struct mcs_intrq_entry, + node); + if (qentry) + list_del(&qentry->node); + + spin_unlock_irqrestore(&rvu->mcs_intrq_lock, flags); + if (!qentry) + break; /* nothing more to process */ + + event = &qentry->intr_event; + + mcs_notify_pfvf(event, rvu); + kfree(qentry); + } while (1); +} + +int rvu_mbox_handler_mcs_intr_cfg(struct rvu *rvu, + struct mcs_intr_cfg *req, + struct msg_rsp *rsp) +{ + u16 pcifunc = req->hdr.pcifunc; + struct mcs_pfvf *pfvf; + struct mcs *mcs; + + if (req->mcs_id >= rvu->mcs_blk_cnt) + return MCS_AF_ERR_INVALID_MCSID; + + mcs = mcs_get_pdata(req->mcs_id); + + /* Check if it is PF or VF */ + if (pcifunc & RVU_PFVF_FUNC_MASK) + pfvf = &mcs->vf[rvu_get_hwvf(rvu, pcifunc)]; + else + pfvf = &mcs->pf[rvu_get_pf(pcifunc)]; + + mcs->pf_map[0] = pcifunc; + pfvf->intr_mask = req->intr_mask; + + return 0; +} + +int rvu_mbox_handler_mcs_get_hw_info(struct rvu *rvu, + struct msg_req *req, + struct mcs_hw_info *rsp) +{ + struct mcs *mcs; + + if (!rvu->mcs_blk_cnt) + return MCS_AF_ERR_NOT_MAPPED; + + /* MCS resources are same across all blocks */ + mcs = mcs_get_pdata(0); + rsp->num_mcs_blks = rvu->mcs_blk_cnt; + rsp->tcam_entries = mcs->hw->tcam_entries; + rsp->secy_entries = mcs->hw->secy_entries; + rsp->sc_entries = mcs->hw->sc_entries; + rsp->sa_entries = mcs->hw->sa_entries; + return 0; +} + +int rvu_mbox_handler_mcs_port_reset(struct rvu *rvu, struct mcs_port_reset_req *req, + struct msg_rsp *rsp) +{ + struct mcs *mcs; + + if (req->mcs_id >= rvu->mcs_blk_cnt) + return MCS_AF_ERR_INVALID_MCSID; + + mcs = mcs_get_pdata(req->mcs_id); + + mcs_reset_port(mcs, req->port_id, req->reset); + + return 0; +} + +int rvu_mbox_handler_mcs_clear_stats(struct rvu *rvu, + struct mcs_clear_stats *req, + struct msg_rsp *rsp) +{ + u16 pcifunc = req->hdr.pcifunc; + struct mcs *mcs; + + if (req->mcs_id >= rvu->mcs_blk_cnt) + return MCS_AF_ERR_INVALID_MCSID; + + mcs = mcs_get_pdata(req->mcs_id); + + mutex_lock(&mcs->stats_lock); + if (req->all) + mcs_clear_all_stats(mcs, pcifunc, req->dir); + else + mcs_clear_stats(mcs, req->type, req->id, req->dir); + + mutex_unlock(&mcs->stats_lock); + return 0; +} + +int rvu_mbox_handler_mcs_get_flowid_stats(struct rvu *rvu, + struct mcs_stats_req *req, + struct mcs_flowid_stats *rsp) +{ + struct mcs *mcs; + + if (req->mcs_id >= rvu->mcs_blk_cnt) + return MCS_AF_ERR_INVALID_MCSID; + + mcs = mcs_get_pdata(req->mcs_id); + + /* In CNF10K-B, before reading the statistics, + * MCSX_MIL_GLOBAL.FORCE_CLK_EN_IP needs to be set + * to get accurate statistics + */ + if (mcs->hw->mcs_blks > 1) + mcs_set_force_clk_en(mcs, true); + + mutex_lock(&mcs->stats_lock); + mcs_get_flowid_stats(mcs, rsp, req->id, req->dir); + mutex_unlock(&mcs->stats_lock); + + /* Clear MCSX_MIL_GLOBAL.FORCE_CLK_EN_IP after reading + * the statistics + */ + if (mcs->hw->mcs_blks > 1) + mcs_set_force_clk_en(mcs, false); + + return 0; +} + +int rvu_mbox_handler_mcs_get_secy_stats(struct rvu *rvu, + struct mcs_stats_req *req, + struct mcs_secy_stats *rsp) +{ struct mcs *mcs; + + if (req->mcs_id >= rvu->mcs_blk_cnt) + return MCS_AF_ERR_INVALID_MCSID; + + mcs = mcs_get_pdata(req->mcs_id); + + if (mcs->hw->mcs_blks > 1) + mcs_set_force_clk_en(mcs, true); + + mutex_lock(&mcs->stats_lock); + + if (req->dir == MCS_RX) + mcs_get_rx_secy_stats(mcs, rsp, req->id); + else + mcs_get_tx_secy_stats(mcs, rsp, req->id); + + mutex_unlock(&mcs->stats_lock); + + if (mcs->hw->mcs_blks > 1) + mcs_set_force_clk_en(mcs, false); + + return 0; +} + +int rvu_mbox_handler_mcs_get_sc_stats(struct rvu *rvu, + struct mcs_stats_req *req, + struct mcs_sc_stats *rsp) +{ + struct mcs *mcs; + + if (req->mcs_id >= rvu->mcs_blk_cnt) + return MCS_AF_ERR_INVALID_MCSID; + + mcs = mcs_get_pdata(req->mcs_id); + + if (mcs->hw->mcs_blks > 1) + mcs_set_force_clk_en(mcs, true); + + mutex_lock(&mcs->stats_lock); + mcs_get_sc_stats(mcs, rsp, req->id, req->dir); + mutex_unlock(&mcs->stats_lock); + + if (mcs->hw->mcs_blks > 1) + mcs_set_force_clk_en(mcs, false); + + return 0; +} + +int rvu_mbox_handler_mcs_get_sa_stats(struct rvu *rvu, + struct mcs_stats_req *req, + struct mcs_sa_stats *rsp) +{ + struct mcs *mcs; + + if (req->mcs_id >= rvu->mcs_blk_cnt) + return MCS_AF_ERR_INVALID_MCSID; + + mcs = mcs_get_pdata(req->mcs_id); + + if (mcs->hw->mcs_blks > 1) + mcs_set_force_clk_en(mcs, true); + + mutex_lock(&mcs->stats_lock); + mcs_get_sa_stats(mcs, rsp, req->id, req->dir); + mutex_unlock(&mcs->stats_lock); + + if (mcs->hw->mcs_blks > 1) + mcs_set_force_clk_en(mcs, false); + + return 0; +} + +int rvu_mbox_handler_mcs_get_port_stats(struct rvu *rvu, + struct mcs_stats_req *req, + struct mcs_port_stats *rsp) +{ + struct mcs *mcs; + + if (req->mcs_id >= rvu->mcs_blk_cnt) + return MCS_AF_ERR_INVALID_MCSID; + + mcs = mcs_get_pdata(req->mcs_id); + + if (mcs->hw->mcs_blks > 1) + mcs_set_force_clk_en(mcs, true); + + mutex_lock(&mcs->stats_lock); + mcs_get_port_stats(mcs, rsp, req->id, req->dir); + mutex_unlock(&mcs->stats_lock); + + if (mcs->hw->mcs_blks > 1) + mcs_set_force_clk_en(mcs, false); + + return 0; +} + +int rvu_mbox_handler_mcs_set_active_lmac(struct rvu *rvu, + struct mcs_set_active_lmac *req, + struct msg_rsp *rsp) +{ + struct mcs *mcs; + + if (req->mcs_id >= rvu->mcs_blk_cnt) + return MCS_AF_ERR_INVALID_MCSID; + + mcs = mcs_get_pdata(req->mcs_id); + if (!mcs) + return MCS_AF_ERR_NOT_MAPPED; + + mcs->hw->lmac_bmap = req->lmac_bmap; + mcs_set_lmac_channels(req->mcs_id, req->chan_base); + return 0; +} + +int rvu_mbox_handler_mcs_port_cfg_set(struct rvu *rvu, struct mcs_port_cfg_set_req *req, + struct msg_rsp *rsp) +{ + struct mcs *mcs; + + if (req->mcs_id >= rvu->mcs_blk_cnt) + return MCS_AF_ERR_INVALID_MCSID; + + mcs = mcs_get_pdata(req->mcs_id); + + if (mcs->hw->lmac_cnt <= req->port_id || !(mcs->hw->lmac_bmap & BIT_ULL(req->port_id))) + return -EINVAL; + + mcs_set_port_cfg(mcs, req); + + return 0; +} + +int rvu_mbox_handler_mcs_port_cfg_get(struct rvu *rvu, struct mcs_port_cfg_get_req *req, + struct mcs_port_cfg_get_rsp *rsp) +{ + struct mcs *mcs; + + if (req->mcs_id >= rvu->mcs_blk_cnt) + return MCS_AF_ERR_INVALID_MCSID; + + mcs = mcs_get_pdata(req->mcs_id); + + if (mcs->hw->lmac_cnt <= req->port_id || !(mcs->hw->lmac_bmap & BIT_ULL(req->port_id))) + return -EINVAL; + + mcs_get_port_cfg(mcs, req, rsp); + + return 0; +} + +int rvu_mbox_handler_mcs_custom_tag_cfg_get(struct rvu *rvu, struct mcs_custom_tag_cfg_get_req *req, + struct mcs_custom_tag_cfg_get_rsp *rsp) +{ + struct mcs *mcs; + + if (req->mcs_id >= rvu->mcs_blk_cnt) + return MCS_AF_ERR_INVALID_MCSID; + + mcs = mcs_get_pdata(req->mcs_id); + + mcs_get_custom_tag_cfg(mcs, req, rsp); + + return 0; +} + +int rvu_mcs_flr_handler(struct rvu *rvu, u16 pcifunc) +{ + struct mcs *mcs; + int mcs_id; + + /* CNF10K-B mcs0-6 are mapped to RPM2-8*/ + if (rvu->mcs_blk_cnt > 1) { + for (mcs_id = 0; mcs_id < rvu->mcs_blk_cnt; mcs_id++) { + mcs = mcs_get_pdata(mcs_id); + mcs_free_all_rsrc(mcs, MCS_RX, pcifunc); + mcs_free_all_rsrc(mcs, MCS_TX, pcifunc); + } + } else { + /* CN10K-B has only one mcs block */ + mcs = mcs_get_pdata(0); + mcs_free_all_rsrc(mcs, MCS_RX, pcifunc); + mcs_free_all_rsrc(mcs, MCS_TX, pcifunc); + } + return 0; +} + +int rvu_mbox_handler_mcs_flowid_ena_entry(struct rvu *rvu, + struct mcs_flowid_ena_dis_entry *req, + struct msg_rsp *rsp) +{ + struct mcs *mcs; + + if (req->mcs_id >= rvu->mcs_blk_cnt) + return MCS_AF_ERR_INVALID_MCSID; + + mcs = mcs_get_pdata(req->mcs_id); + mcs_ena_dis_flowid_entry(mcs, req->flow_id, req->dir, req->ena); + return 0; +} + +int rvu_mbox_handler_mcs_pn_table_write(struct rvu *rvu, + struct mcs_pn_table_write_req *req, + struct msg_rsp *rsp) +{ + struct mcs *mcs; + + if (req->mcs_id >= rvu->mcs_blk_cnt) + return MCS_AF_ERR_INVALID_MCSID; + + mcs = mcs_get_pdata(req->mcs_id); + mcs_pn_table_write(mcs, req->pn_id, req->next_pn, req->dir); + return 0; +} + +int rvu_mbox_handler_mcs_set_pn_threshold(struct rvu *rvu, + struct mcs_set_pn_threshold *req, + struct msg_rsp *rsp) +{ + struct mcs *mcs; + + if (req->mcs_id >= rvu->mcs_blk_cnt) + return MCS_AF_ERR_INVALID_MCSID; + + mcs = mcs_get_pdata(req->mcs_id); + + mcs_pn_threshold_set(mcs, req); + + return 0; +} + +int rvu_mbox_handler_mcs_rx_sc_sa_map_write(struct rvu *rvu, + struct mcs_rx_sc_sa_map *req, + struct msg_rsp *rsp) +{ + struct mcs *mcs; + + if (req->mcs_id >= rvu->mcs_blk_cnt) + return MCS_AF_ERR_INVALID_MCSID; + + mcs = mcs_get_pdata(req->mcs_id); + mcs->mcs_ops->mcs_rx_sa_mem_map_write(mcs, req); + return 0; +} + +int rvu_mbox_handler_mcs_tx_sc_sa_map_write(struct rvu *rvu, + struct mcs_tx_sc_sa_map *req, + struct msg_rsp *rsp) +{ + struct mcs *mcs; + + if (req->mcs_id >= rvu->mcs_blk_cnt) + return MCS_AF_ERR_INVALID_MCSID; + + mcs = mcs_get_pdata(req->mcs_id); + mcs->mcs_ops->mcs_tx_sa_mem_map_write(mcs, req); + mcs->tx_sa_active[req->sc_id] = req->tx_sa_active; + + return 0; +} + +int rvu_mbox_handler_mcs_sa_plcy_write(struct rvu *rvu, + struct mcs_sa_plcy_write_req *req, + struct msg_rsp *rsp) +{ + struct mcs *mcs; + int i; + + if (req->mcs_id >= rvu->mcs_blk_cnt) + return MCS_AF_ERR_INVALID_MCSID; + + mcs = mcs_get_pdata(req->mcs_id); + + for (i = 0; i < req->sa_cnt; i++) + mcs_sa_plcy_write(mcs, &req->plcy[i][0], + req->sa_index[i], req->dir); + return 0; +} + +int rvu_mbox_handler_mcs_rx_sc_cam_write(struct rvu *rvu, + struct mcs_rx_sc_cam_write_req *req, + struct msg_rsp *rsp) +{ + struct mcs *mcs; + + if (req->mcs_id >= rvu->mcs_blk_cnt) + return MCS_AF_ERR_INVALID_MCSID; + + mcs = mcs_get_pdata(req->mcs_id); + mcs_rx_sc_cam_write(mcs, req->sci, req->secy_id, req->sc_id); + return 0; +} + +int rvu_mbox_handler_mcs_secy_plcy_write(struct rvu *rvu, + struct mcs_secy_plcy_write_req *req, + struct msg_rsp *rsp) +{ struct mcs *mcs; + + if (req->mcs_id >= rvu->mcs_blk_cnt) + return MCS_AF_ERR_INVALID_MCSID; + + mcs = mcs_get_pdata(req->mcs_id); + + mcs_secy_plcy_write(mcs, req->plcy, + req->secy_id, req->dir); + return 0; +} + +int rvu_mbox_handler_mcs_flowid_entry_write(struct rvu *rvu, + struct mcs_flowid_entry_write_req *req, + struct msg_rsp *rsp) +{ + struct secy_mem_map map; + struct mcs *mcs; + + if (req->mcs_id >= rvu->mcs_blk_cnt) + return MCS_AF_ERR_INVALID_MCSID; + + mcs = mcs_get_pdata(req->mcs_id); + + /* TODO validate the flowid */ + mcs_flowid_entry_write(mcs, req->data, req->mask, + req->flow_id, req->dir); + map.secy = req->secy_id; + map.sc = req->sc_id; + map.ctrl_pkt = req->ctrl_pkt; + map.flow_id = req->flow_id; + map.sci = req->sci; + mcs->mcs_ops->mcs_flowid_secy_map(mcs, &map, req->dir); + if (req->ena) + mcs_ena_dis_flowid_entry(mcs, req->flow_id, + req->dir, true); + return 0; +} + +int rvu_mbox_handler_mcs_free_resources(struct rvu *rvu, + struct mcs_free_rsrc_req *req, + struct msg_rsp *rsp) +{ + u16 pcifunc = req->hdr.pcifunc; + struct mcs_rsrc_map *map; + struct mcs *mcs; + int rc; + + if (req->mcs_id >= rvu->mcs_blk_cnt) + return MCS_AF_ERR_INVALID_MCSID; + + mcs = mcs_get_pdata(req->mcs_id); + + if (req->dir == MCS_RX) + map = &mcs->rx; + else + map = &mcs->tx; + + mutex_lock(&rvu->rsrc_lock); + /* Free all the cam resources mapped to PF/VF */ + if (req->all) { + rc = mcs_free_all_rsrc(mcs, req->dir, pcifunc); + goto exit; + } + + switch (req->rsrc_type) { + case MCS_RSRC_TYPE_FLOWID: + rc = mcs_free_rsrc(&map->flow_ids, map->flowid2pf_map, req->rsrc_id, pcifunc); + mcs_ena_dis_flowid_entry(mcs, req->rsrc_id, req->dir, false); + break; + case MCS_RSRC_TYPE_SECY: + rc = mcs_free_rsrc(&map->secy, map->secy2pf_map, req->rsrc_id, pcifunc); + mcs_clear_secy_plcy(mcs, req->rsrc_id, req->dir); + break; + case MCS_RSRC_TYPE_SC: + rc = mcs_free_rsrc(&map->sc, map->sc2pf_map, req->rsrc_id, pcifunc); + /* Disable SC CAM only on RX side */ + if (req->dir == MCS_RX) + mcs_ena_dis_sc_cam_entry(mcs, req->rsrc_id, false); + break; + case MCS_RSRC_TYPE_SA: + rc = mcs_free_rsrc(&map->sa, map->sa2pf_map, req->rsrc_id, pcifunc); + break; + } +exit: + mutex_unlock(&rvu->rsrc_lock); + return rc; +} + +int rvu_mbox_handler_mcs_alloc_resources(struct rvu *rvu, + struct mcs_alloc_rsrc_req *req, + struct mcs_alloc_rsrc_rsp *rsp) +{ + u16 pcifunc = req->hdr.pcifunc; + struct mcs_rsrc_map *map; + struct mcs *mcs; + int rsrc_id, i; + + if (req->mcs_id >= rvu->mcs_blk_cnt) + return MCS_AF_ERR_INVALID_MCSID; + + mcs = mcs_get_pdata(req->mcs_id); + + if (req->dir == MCS_RX) + map = &mcs->rx; + else + map = &mcs->tx; + + mutex_lock(&rvu->rsrc_lock); + + if (req->all) { + rsrc_id = mcs_alloc_all_rsrc(mcs, &rsp->flow_ids[0], + &rsp->secy_ids[0], + &rsp->sc_ids[0], + &rsp->sa_ids[0], + &rsp->sa_ids[1], + pcifunc, req->dir); + goto exit; + } + + switch (req->rsrc_type) { + case MCS_RSRC_TYPE_FLOWID: + for (i = 0; i < req->rsrc_cnt; i++) { + rsrc_id = mcs_alloc_rsrc(&map->flow_ids, map->flowid2pf_map, pcifunc); + if (rsrc_id < 0) + goto exit; + rsp->flow_ids[i] = rsrc_id; + rsp->rsrc_cnt++; + } + break; + case MCS_RSRC_TYPE_SECY: + for (i = 0; i < req->rsrc_cnt; i++) { + rsrc_id = mcs_alloc_rsrc(&map->secy, map->secy2pf_map, pcifunc); + if (rsrc_id < 0) + goto exit; + rsp->secy_ids[i] = rsrc_id; + rsp->rsrc_cnt++; + } + break; + case MCS_RSRC_TYPE_SC: + for (i = 0; i < req->rsrc_cnt; i++) { + rsrc_id = mcs_alloc_rsrc(&map->sc, map->sc2pf_map, pcifunc); + if (rsrc_id < 0) + goto exit; + rsp->sc_ids[i] = rsrc_id; + rsp->rsrc_cnt++; + } + break; + case MCS_RSRC_TYPE_SA: + for (i = 0; i < req->rsrc_cnt; i++) { + rsrc_id = mcs_alloc_rsrc(&map->sa, map->sa2pf_map, pcifunc); + if (rsrc_id < 0) + goto exit; + rsp->sa_ids[i] = rsrc_id; + rsp->rsrc_cnt++; + } + break; + } + + rsp->rsrc_type = req->rsrc_type; + rsp->dir = req->dir; + rsp->mcs_id = req->mcs_id; + rsp->all = req->all; + +exit: + if (rsrc_id < 0) + dev_err(rvu->dev, "Failed to allocate the mcs resources for PCIFUNC:%d\n", pcifunc); + mutex_unlock(&rvu->rsrc_lock); + return 0; +} + +int rvu_mbox_handler_mcs_alloc_ctrl_pkt_rule(struct rvu *rvu, + struct mcs_alloc_ctrl_pkt_rule_req *req, + struct mcs_alloc_ctrl_pkt_rule_rsp *rsp) +{ + u16 pcifunc = req->hdr.pcifunc; + struct mcs_rsrc_map *map; + struct mcs *mcs; + int rsrc_id; + u16 offset; + + if (req->mcs_id >= rvu->mcs_blk_cnt) + return MCS_AF_ERR_INVALID_MCSID; + + mcs = mcs_get_pdata(req->mcs_id); + + map = (req->dir == MCS_RX) ? &mcs->rx : &mcs->tx; + + mutex_lock(&rvu->rsrc_lock); + + switch (req->rule_type) { + case MCS_CTRL_PKT_RULE_TYPE_ETH: + offset = MCS_CTRLPKT_ETYPE_RULE_OFFSET; + break; + case MCS_CTRL_PKT_RULE_TYPE_DA: + offset = MCS_CTRLPKT_DA_RULE_OFFSET; + break; + case MCS_CTRL_PKT_RULE_TYPE_RANGE: + offset = MCS_CTRLPKT_DA_RANGE_RULE_OFFSET; + break; + case MCS_CTRL_PKT_RULE_TYPE_COMBO: + offset = MCS_CTRLPKT_COMBO_RULE_OFFSET; + break; + case MCS_CTRL_PKT_RULE_TYPE_MAC: + offset = MCS_CTRLPKT_MAC_EN_RULE_OFFSET; + break; + } + + rsrc_id = mcs_alloc_ctrlpktrule(&map->ctrlpktrule, map->ctrlpktrule2pf_map, offset, + pcifunc); + if (rsrc_id < 0) + goto exit; + + rsp->rule_idx = rsrc_id; + rsp->rule_type = req->rule_type; + rsp->dir = req->dir; + rsp->mcs_id = req->mcs_id; + + mutex_unlock(&rvu->rsrc_lock); + return 0; +exit: + if (rsrc_id < 0) + dev_err(rvu->dev, "Failed to allocate the mcs ctrl pkt rule for PCIFUNC:%d\n", + pcifunc); + mutex_unlock(&rvu->rsrc_lock); + return rsrc_id; +} + +int rvu_mbox_handler_mcs_free_ctrl_pkt_rule(struct rvu *rvu, + struct mcs_free_ctrl_pkt_rule_req *req, + struct msg_rsp *rsp) +{ + struct mcs *mcs; + int rc; + + if (req->mcs_id >= rvu->mcs_blk_cnt) + return MCS_AF_ERR_INVALID_MCSID; + + mcs = mcs_get_pdata(req->mcs_id); + + mutex_lock(&rvu->rsrc_lock); + + rc = mcs_free_ctrlpktrule(mcs, req); + + mutex_unlock(&rvu->rsrc_lock); + + return rc; +} + +int rvu_mbox_handler_mcs_ctrl_pkt_rule_write(struct rvu *rvu, + struct mcs_ctrl_pkt_rule_write_req *req, + struct msg_rsp *rsp) +{ + struct mcs *mcs; + int rc; + + if (req->mcs_id >= rvu->mcs_blk_cnt) + return MCS_AF_ERR_INVALID_MCSID; + + mcs = mcs_get_pdata(req->mcs_id); + + rc = mcs_ctrlpktrule_write(mcs, req); + + return rc; +} + +static void rvu_mcs_set_lmac_bmap(struct rvu *rvu) +{ + struct mcs *mcs = mcs_get_pdata(0); + unsigned long lmac_bmap; + int cgx, lmac, port; + + for (port = 0; port < mcs->hw->lmac_cnt; port++) { + cgx = port / rvu->hw->lmac_per_cgx; + lmac = port % rvu->hw->lmac_per_cgx; + if (!is_lmac_valid(rvu_cgx_pdata(cgx, rvu), lmac)) + continue; + set_bit(port, &lmac_bmap); + } + mcs->hw->lmac_bmap = lmac_bmap; +} + +int rvu_mcs_init(struct rvu *rvu) +{ + struct rvu_hwinfo *hw = rvu->hw; + int lmac, err = 0, mcs_id; + struct mcs *mcs; + + rvu->mcs_blk_cnt = mcs_get_blkcnt(); + + if (!rvu->mcs_blk_cnt) + return 0; + + /* Needed only for CN10K-B */ + if (rvu->mcs_blk_cnt == 1) { + err = mcs_set_lmac_channels(0, hw->cgx_chan_base); + if (err) + return err; + /* Set active lmacs */ + rvu_mcs_set_lmac_bmap(rvu); + } + + /* Install default tcam bypass entry and set port to operational mode */ + for (mcs_id = 0; mcs_id < rvu->mcs_blk_cnt; mcs_id++) { + mcs = mcs_get_pdata(mcs_id); + mcs_install_flowid_bypass_entry(mcs); + for (lmac = 0; lmac < mcs->hw->lmac_cnt; lmac++) + mcs_set_lmac_mode(mcs, lmac, 0); + + mcs->rvu = rvu; + + /* Allocated memory for PFVF data */ + mcs->pf = devm_kcalloc(mcs->dev, hw->total_pfs, + sizeof(struct mcs_pfvf), GFP_KERNEL); + if (!mcs->pf) + return -ENOMEM; + + mcs->vf = devm_kcalloc(mcs->dev, hw->total_vfs, + sizeof(struct mcs_pfvf), GFP_KERNEL); + if (!mcs->vf) + return -ENOMEM; + } + + /* Initialize the wq for handling mcs interrupts */ + INIT_LIST_HEAD(&rvu->mcs_intrq_head); + INIT_WORK(&rvu->mcs_intr_work, mcs_intr_handler_task); + rvu->mcs_intr_wq = alloc_workqueue("mcs_intr_wq", 0, 0); + if (!rvu->mcs_intr_wq) { + dev_err(rvu->dev, "mcs alloc workqueue failed\n"); + return -ENOMEM; + } + + return err; +} + +void rvu_mcs_exit(struct rvu *rvu) +{ + if (!rvu->mcs_intr_wq) + return; + + flush_workqueue(rvu->mcs_intr_wq); + destroy_workqueue(rvu->mcs_intr_wq); + rvu->mcs_intr_wq = NULL; +} diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c index 7282a826d81e..3f5e09b77d4b 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c @@ -16,6 +16,7 @@ #include "rvu.h" #include "rvu_reg.h" #include "ptp.h" +#include "mcs.h" #include "rvu_trace.h" #include "rvu_npc_hash.h" @@ -23,8 +24,6 @@ #define DRV_NAME "rvu_af" #define DRV_STRING "Marvell OcteonTX2 RVU Admin Function Driver" -static int rvu_get_hwvf(struct rvu *rvu, int pcifunc); - static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf, struct rvu_block *block, int lf); static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf, @@ -418,7 +417,7 @@ void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf) *hwvf = cfg & 0xFFF; } -static int rvu_get_hwvf(struct rvu *rvu, int pcifunc) +int rvu_get_hwvf(struct rvu *rvu, int pcifunc) { int pf, func; u64 cfg; @@ -1159,6 +1158,12 @@ cpt: rvu_program_channels(rvu); + err = rvu_mcs_init(rvu); + if (err) { + dev_err(rvu->dev, "%s: Failed to initialize mcs\n", __func__); + goto nix_err; + } + return 0; nix_err: @@ -3293,6 +3298,7 @@ err_mbox: err_hwsetup: rvu_cgx_exit(rvu); rvu_fwdata_exit(rvu); + rvu_mcs_exit(rvu); rvu_reset_all_blocks(rvu); rvu_free_hw_resources(rvu); rvu_clear_rvum_blk_revid(rvu); @@ -3319,6 +3325,7 @@ static void rvu_remove(struct pci_dev *pdev) rvu_flr_wq_destroy(rvu); rvu_cgx_exit(rvu); rvu_fwdata_exit(rvu); + rvu_mcs_exit(rvu); rvu_mbox_destroy(&rvu->afpf_wq_info); rvu_disable_sriov(rvu); rvu_reset_all_blocks(rvu); @@ -3354,12 +3361,18 @@ static int __init rvu_init_module(void) if (err < 0) goto ptp_err; + err = pci_register_driver(&mcs_driver); + if (err < 0) + goto mcs_err; + err = pci_register_driver(&rvu_driver); if (err < 0) goto rvu_err; return 0; rvu_err: + pci_unregister_driver(&mcs_driver); +mcs_err: pci_unregister_driver(&ptp_driver); ptp_err: pci_unregister_driver(&cgx_driver); @@ -3370,6 +3383,7 @@ ptp_err: static void __exit rvu_cleanup_module(void) { pci_unregister_driver(&rvu_driver); + pci_unregister_driver(&mcs_driver); pci_unregister_driver(&ptp_driver); pci_unregister_driver(&cgx_driver); } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h index d15bc443335d..76474385a602 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h @@ -25,6 +25,8 @@ /* Subsystem Device ID */ #define PCI_SUBSYS_DEVID_96XX 0xB200 #define PCI_SUBSYS_DEVID_CN10K_A 0xB900 +#define PCI_SUBSYS_DEVID_CNF10K_B 0xBC00 +#define PCI_SUBSYS_DEVID_CN10K_B 0xBD00 /* PCI BAR nos */ #define PCI_AF_REG_BAR_NUM 0 @@ -62,6 +64,10 @@ struct rvu_debugfs { struct dentry *nix; struct dentry *npc; struct dentry *cpt; + struct dentry *mcs_root; + struct dentry *mcs; + struct dentry *mcs_rx; + struct dentry *mcs_tx; struct dump_ctx npa_aura_ctx; struct dump_ctx npa_pool_ctx; struct dump_ctx nix_cq_ctx; @@ -497,6 +503,8 @@ struct rvu { struct ptp *ptp; + int mcs_blk_cnt; + #ifdef CONFIG_DEBUG_FS struct rvu_debugfs rvu_dbg; #endif @@ -504,6 +512,12 @@ struct rvu { /* RVU switch implementation over NPC with DMAC rules */ struct rvu_switch rswitch; + + struct work_struct mcs_intr_work; + struct workqueue_struct *mcs_intr_wq; + struct list_head mcs_intrq_head; + /* mcs interrupt queue lock */ + spinlock_t mcs_intrq_lock; }; static inline void rvu_write64(struct rvu *rvu, u64 block, u64 offset, u64 val) @@ -868,4 +882,11 @@ void rvu_switch_update_rules(struct rvu *rvu, u16 pcifunc); int rvu_npc_set_parse_mode(struct rvu *rvu, u16 pcifunc, u64 mode, u8 dir, u64 pkind, u8 var_len_off, u8 var_len_off_mask, u8 shift_dir); +int rvu_get_hwvf(struct rvu *rvu, int pcifunc); + +/* CN10K MCS */ +int rvu_mcs_init(struct rvu *rvu); +int rvu_mcs_flr_handler(struct rvu *rvu, u16 pcifunc); +void rvu_mcs_exit(struct rvu *rvu); + #endif /* RVU_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c index f42a09f04b25..a1970ebedf95 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c @@ -19,6 +19,7 @@ #include "lmac_common.h" #include "npc.h" #include "rvu_npc_hash.h" +#include "mcs.h" #define DEBUGFS_DIR_NAME "octeontx2" @@ -227,6 +228,350 @@ static const struct file_operations rvu_dbg_##name##_fops = { \ static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf); +static int rvu_dbg_mcs_port_stats_display(struct seq_file *filp, void *unused, int dir) +{ + struct mcs *mcs = filp->private; + struct mcs_port_stats stats; + int lmac; + + seq_puts(filp, "\n port stats\n"); + mutex_lock(&mcs->stats_lock); + for_each_set_bit(lmac, &mcs->hw->lmac_bmap, mcs->hw->lmac_cnt) { + mcs_get_port_stats(mcs, &stats, lmac, dir); + seq_printf(filp, "port%d: Tcam Miss: %lld\n", lmac, stats.tcam_miss_cnt); + seq_printf(filp, "port%d: Parser errors: %lld\n", lmac, stats.parser_err_cnt); + + if (dir == MCS_RX && mcs->hw->mcs_blks > 1) + seq_printf(filp, "port%d: Preempt error: %lld\n", lmac, + stats.preempt_err_cnt); + if (dir == MCS_TX) + seq_printf(filp, "port%d: Sectag insert error: %lld\n", lmac, + stats.sectag_insert_err_cnt); + } + mutex_unlock(&mcs->stats_lock); + return 0; +} + +static int rvu_dbg_mcs_rx_port_stats_display(struct seq_file *filp, void *unused) +{ + return rvu_dbg_mcs_port_stats_display(filp, unused, MCS_RX); +} + +RVU_DEBUG_SEQ_FOPS(mcs_rx_port_stats, mcs_rx_port_stats_display, NULL); + +static int rvu_dbg_mcs_tx_port_stats_display(struct seq_file *filp, void *unused) +{ + return rvu_dbg_mcs_port_stats_display(filp, unused, MCS_TX); +} + +RVU_DEBUG_SEQ_FOPS(mcs_tx_port_stats, mcs_tx_port_stats_display, NULL); + +static int rvu_dbg_mcs_sa_stats_display(struct seq_file *filp, void *unused, int dir) +{ + struct mcs *mcs = filp->private; + struct mcs_sa_stats stats; + struct rsrc_bmap *map; + int sa_id; + + if (dir == MCS_TX) { + map = &mcs->tx.sa; + mutex_lock(&mcs->stats_lock); + for_each_set_bit(sa_id, map->bmap, mcs->hw->sa_entries) { + seq_puts(filp, "\n TX SA stats\n"); + mcs_get_sa_stats(mcs, &stats, sa_id, MCS_TX); + seq_printf(filp, "sa%d: Pkts encrypted: %lld\n", sa_id, + stats.pkt_encrypt_cnt); + + seq_printf(filp, "sa%d: Pkts protected: %lld\n", sa_id, + stats.pkt_protected_cnt); + } + mutex_unlock(&mcs->stats_lock); + return 0; + } + + /* RX stats */ + map = &mcs->rx.sa; + mutex_lock(&mcs->stats_lock); + for_each_set_bit(sa_id, map->bmap, mcs->hw->sa_entries) { + seq_puts(filp, "\n RX SA stats\n"); + mcs_get_sa_stats(mcs, &stats, sa_id, MCS_RX); + seq_printf(filp, "sa%d: Invalid pkts: %lld\n", sa_id, stats.pkt_invalid_cnt); + seq_printf(filp, "sa%d: Pkts no sa error: %lld\n", sa_id, stats.pkt_nosaerror_cnt); + seq_printf(filp, "sa%d: Pkts not valid: %lld\n", sa_id, stats.pkt_notvalid_cnt); + seq_printf(filp, "sa%d: Pkts ok: %lld\n", sa_id, stats.pkt_ok_cnt); + seq_printf(filp, "sa%d: Pkts no sa: %lld\n", sa_id, stats.pkt_nosa_cnt); + } + mutex_unlock(&mcs->stats_lock); + return 0; +} + +static int rvu_dbg_mcs_rx_sa_stats_display(struct seq_file *filp, void *unused) +{ + return rvu_dbg_mcs_sa_stats_display(filp, unused, MCS_RX); +} + +RVU_DEBUG_SEQ_FOPS(mcs_rx_sa_stats, mcs_rx_sa_stats_display, NULL); + +static int rvu_dbg_mcs_tx_sa_stats_display(struct seq_file *filp, void *unused) +{ + return rvu_dbg_mcs_sa_stats_display(filp, unused, MCS_TX); +} + +RVU_DEBUG_SEQ_FOPS(mcs_tx_sa_stats, mcs_tx_sa_stats_display, NULL); + +static int rvu_dbg_mcs_tx_sc_stats_display(struct seq_file *filp, void *unused) +{ + struct mcs *mcs = filp->private; + struct mcs_sc_stats stats; + struct rsrc_bmap *map; + int sc_id; + + map = &mcs->tx.sc; + seq_puts(filp, "\n SC stats\n"); + + mutex_lock(&mcs->stats_lock); + for_each_set_bit(sc_id, map->bmap, mcs->hw->sc_entries) { + mcs_get_sc_stats(mcs, &stats, sc_id, MCS_TX); + seq_printf(filp, "\n=======sc%d======\n\n", sc_id); + seq_printf(filp, "sc%d: Pkts encrypted: %lld\n", sc_id, stats.pkt_encrypt_cnt); + seq_printf(filp, "sc%d: Pkts protected: %lld\n", sc_id, stats.pkt_protected_cnt); + + if (mcs->hw->mcs_blks == 1) { + seq_printf(filp, "sc%d: Octets encrypted: %lld\n", sc_id, + stats.octet_encrypt_cnt); + seq_printf(filp, "sc%d: Octets protected: %lld\n", sc_id, + stats.octet_protected_cnt); + } + } + mutex_unlock(&mcs->stats_lock); + return 0; +} + +RVU_DEBUG_SEQ_FOPS(mcs_tx_sc_stats, mcs_tx_sc_stats_display, NULL); + +static int rvu_dbg_mcs_rx_sc_stats_display(struct seq_file *filp, void *unused) +{ + struct mcs *mcs = filp->private; + struct mcs_sc_stats stats; + struct rsrc_bmap *map; + int sc_id; + + map = &mcs->rx.sc; + seq_puts(filp, "\n SC stats\n"); + + mutex_lock(&mcs->stats_lock); + for_each_set_bit(sc_id, map->bmap, mcs->hw->sc_entries) { + mcs_get_sc_stats(mcs, &stats, sc_id, MCS_RX); + seq_printf(filp, "\n=======sc%d======\n\n", sc_id); + seq_printf(filp, "sc%d: Cam hits: %lld\n", sc_id, stats.hit_cnt); + seq_printf(filp, "sc%d: Invalid pkts: %lld\n", sc_id, stats.pkt_invalid_cnt); + seq_printf(filp, "sc%d: Late pkts: %lld\n", sc_id, stats.pkt_late_cnt); + seq_printf(filp, "sc%d: Notvalid pkts: %lld\n", sc_id, stats.pkt_notvalid_cnt); + seq_printf(filp, "sc%d: Unchecked pkts: %lld\n", sc_id, stats.pkt_unchecked_cnt); + + if (mcs->hw->mcs_blks > 1) { + seq_printf(filp, "sc%d: Delay pkts: %lld\n", sc_id, stats.pkt_delay_cnt); + seq_printf(filp, "sc%d: Pkts ok: %lld\n", sc_id, stats.pkt_ok_cnt); + } + if (mcs->hw->mcs_blks == 1) { + seq_printf(filp, "sc%d: Octets decrypted: %lld\n", sc_id, + stats.octet_decrypt_cnt); + seq_printf(filp, "sc%d: Octets validated: %lld\n", sc_id, + stats.octet_validate_cnt); + } + } + mutex_unlock(&mcs->stats_lock); + return 0; +} + +RVU_DEBUG_SEQ_FOPS(mcs_rx_sc_stats, mcs_rx_sc_stats_display, NULL); + +static int rvu_dbg_mcs_flowid_stats_display(struct seq_file *filp, void *unused, int dir) +{ + struct mcs *mcs = filp->private; + struct mcs_flowid_stats stats; + struct rsrc_bmap *map; + int flow_id; + + seq_puts(filp, "\n Flowid stats\n"); + + if (dir == MCS_RX) + map = &mcs->rx.flow_ids; + else + map = &mcs->tx.flow_ids; + + mutex_lock(&mcs->stats_lock); + for_each_set_bit(flow_id, map->bmap, mcs->hw->tcam_entries) { + mcs_get_flowid_stats(mcs, &stats, flow_id, dir); + seq_printf(filp, "Flowid%d: Hit:%lld\n", flow_id, stats.tcam_hit_cnt); + } + mutex_unlock(&mcs->stats_lock); + return 0; +} + +static int rvu_dbg_mcs_tx_flowid_stats_display(struct seq_file *filp, void *unused) +{ + return rvu_dbg_mcs_flowid_stats_display(filp, unused, MCS_TX); +} + +RVU_DEBUG_SEQ_FOPS(mcs_tx_flowid_stats, mcs_tx_flowid_stats_display, NULL); + +static int rvu_dbg_mcs_rx_flowid_stats_display(struct seq_file *filp, void *unused) +{ + return rvu_dbg_mcs_flowid_stats_display(filp, unused, MCS_RX); +} + +RVU_DEBUG_SEQ_FOPS(mcs_rx_flowid_stats, mcs_rx_flowid_stats_display, NULL); + +static int rvu_dbg_mcs_tx_secy_stats_display(struct seq_file *filp, void *unused) +{ + struct mcs *mcs = filp->private; + struct mcs_secy_stats stats; + struct rsrc_bmap *map; + int secy_id; + + map = &mcs->tx.secy; + seq_puts(filp, "\n MCS TX secy stats\n"); + + mutex_lock(&mcs->stats_lock); + for_each_set_bit(secy_id, map->bmap, mcs->hw->secy_entries) { + mcs_get_tx_secy_stats(mcs, &stats, secy_id); + seq_printf(filp, "\n=======Secy%d======\n\n", secy_id); + seq_printf(filp, "secy%d: Ctrl bcast pkts: %lld\n", secy_id, + stats.ctl_pkt_bcast_cnt); + seq_printf(filp, "secy%d: Ctrl Mcast pkts: %lld\n", secy_id, + stats.ctl_pkt_mcast_cnt); + seq_printf(filp, "secy%d: Ctrl ucast pkts: %lld\n", secy_id, + stats.ctl_pkt_ucast_cnt); + seq_printf(filp, "secy%d: Ctrl octets: %lld\n", secy_id, stats.ctl_octet_cnt); + seq_printf(filp, "secy%d: Unctrl bcast cnt: %lld\n", secy_id, + stats.unctl_pkt_bcast_cnt); + seq_printf(filp, "secy%d: Unctrl mcast pkts: %lld\n", secy_id, + stats.unctl_pkt_mcast_cnt); + seq_printf(filp, "secy%d: Unctrl ucast pkts: %lld\n", secy_id, + stats.unctl_pkt_ucast_cnt); + seq_printf(filp, "secy%d: Unctrl octets: %lld\n", secy_id, stats.unctl_octet_cnt); + seq_printf(filp, "secy%d: Octet encrypted: %lld\n", secy_id, + stats.octet_encrypted_cnt); + seq_printf(filp, "secy%d: octet protected: %lld\n", secy_id, + stats.octet_protected_cnt); + seq_printf(filp, "secy%d: Pkts on active sa: %lld\n", secy_id, + stats.pkt_noactivesa_cnt); + seq_printf(filp, "secy%d: Pkts too long: %lld\n", secy_id, stats.pkt_toolong_cnt); + seq_printf(filp, "secy%d: Pkts untagged: %lld\n", secy_id, stats.pkt_untagged_cnt); + } + mutex_unlock(&mcs->stats_lock); + return 0; +} + +RVU_DEBUG_SEQ_FOPS(mcs_tx_secy_stats, mcs_tx_secy_stats_display, NULL); + +static int rvu_dbg_mcs_rx_secy_stats_display(struct seq_file *filp, void *unused) +{ + struct mcs *mcs = filp->private; + struct mcs_secy_stats stats; + struct rsrc_bmap *map; + int secy_id; + + map = &mcs->rx.secy; + seq_puts(filp, "\n MCS secy stats\n"); + + mutex_lock(&mcs->stats_lock); + for_each_set_bit(secy_id, map->bmap, mcs->hw->secy_entries) { + mcs_get_rx_secy_stats(mcs, &stats, secy_id); + seq_printf(filp, "\n=======Secy%d======\n\n", secy_id); + seq_printf(filp, "secy%d: Ctrl bcast pkts: %lld\n", secy_id, + stats.ctl_pkt_bcast_cnt); + seq_printf(filp, "secy%d: Ctrl Mcast pkts: %lld\n", secy_id, + stats.ctl_pkt_mcast_cnt); + seq_printf(filp, "secy%d: Ctrl ucast pkts: %lld\n", secy_id, + stats.ctl_pkt_ucast_cnt); + seq_printf(filp, "secy%d: Ctrl octets: %lld\n", secy_id, stats.ctl_octet_cnt); + seq_printf(filp, "secy%d: Unctrl bcast cnt: %lld\n", secy_id, + stats.unctl_pkt_bcast_cnt); + seq_printf(filp, "secy%d: Unctrl mcast pkts: %lld\n", secy_id, + stats.unctl_pkt_mcast_cnt); + seq_printf(filp, "secy%d: Unctrl ucast pkts: %lld\n", secy_id, + stats.unctl_pkt_ucast_cnt); + seq_printf(filp, "secy%d: Unctrl octets: %lld\n", secy_id, stats.unctl_octet_cnt); + seq_printf(filp, "secy%d: Octet decrypted: %lld\n", secy_id, + stats.octet_decrypted_cnt); + seq_printf(filp, "secy%d: octet validated: %lld\n", secy_id, + stats.octet_validated_cnt); + seq_printf(filp, "secy%d: Pkts on disable port: %lld\n", secy_id, + stats.pkt_port_disabled_cnt); + seq_printf(filp, "secy%d: Octets validated: %lld\n", secy_id, stats.pkt_badtag_cnt); + seq_printf(filp, "secy%d: Octets validated: %lld\n", secy_id, stats.pkt_nosa_cnt); + seq_printf(filp, "secy%d: Pkts with nosaerror: %lld\n", secy_id, + stats.pkt_nosaerror_cnt); + seq_printf(filp, "secy%d: Tagged ctrl pkts: %lld\n", secy_id, + stats.pkt_tagged_ctl_cnt); + seq_printf(filp, "secy%d: Untaged pkts: %lld\n", secy_id, stats.pkt_untaged_cnt); + seq_printf(filp, "secy%d: Ctrl pkts: %lld\n", secy_id, stats.pkt_ctl_cnt); + if (mcs->hw->mcs_blks > 1) + seq_printf(filp, "secy%d: pkts notag: %lld\n", secy_id, + stats.pkt_notag_cnt); + } + mutex_unlock(&mcs->stats_lock); + return 0; +} + +RVU_DEBUG_SEQ_FOPS(mcs_rx_secy_stats, mcs_rx_secy_stats_display, NULL); + +static void rvu_dbg_mcs_init(struct rvu *rvu) +{ + struct mcs *mcs; + char dname[10]; + int i; + + if (!rvu->mcs_blk_cnt) + return; + + rvu->rvu_dbg.mcs_root = debugfs_create_dir("mcs", rvu->rvu_dbg.root); + + for (i = 0; i < rvu->mcs_blk_cnt; i++) { + mcs = mcs_get_pdata(i); + + sprintf(dname, "mcs%d", i); + rvu->rvu_dbg.mcs = debugfs_create_dir(dname, + rvu->rvu_dbg.mcs_root); + + rvu->rvu_dbg.mcs_rx = debugfs_create_dir("rx_stats", rvu->rvu_dbg.mcs); + + debugfs_create_file("flowid", 0600, rvu->rvu_dbg.mcs_rx, mcs, + &rvu_dbg_mcs_rx_flowid_stats_fops); + + debugfs_create_file("secy", 0600, rvu->rvu_dbg.mcs_rx, mcs, + &rvu_dbg_mcs_rx_secy_stats_fops); + + debugfs_create_file("sc", 0600, rvu->rvu_dbg.mcs_rx, mcs, + &rvu_dbg_mcs_rx_sc_stats_fops); + + debugfs_create_file("sa", 0600, rvu->rvu_dbg.mcs_rx, mcs, + &rvu_dbg_mcs_rx_sa_stats_fops); + + debugfs_create_file("port", 0600, rvu->rvu_dbg.mcs_rx, mcs, + &rvu_dbg_mcs_rx_port_stats_fops); + + rvu->rvu_dbg.mcs_tx = debugfs_create_dir("tx_stats", rvu->rvu_dbg.mcs); + + debugfs_create_file("flowid", 0600, rvu->rvu_dbg.mcs_tx, mcs, + &rvu_dbg_mcs_tx_flowid_stats_fops); + + debugfs_create_file("secy", 0600, rvu->rvu_dbg.mcs_tx, mcs, + &rvu_dbg_mcs_tx_secy_stats_fops); + + debugfs_create_file("sc", 0600, rvu->rvu_dbg.mcs_tx, mcs, + &rvu_dbg_mcs_tx_sc_stats_fops); + + debugfs_create_file("sa", 0600, rvu->rvu_dbg.mcs_tx, mcs, + &rvu_dbg_mcs_tx_sa_stats_fops); + + debugfs_create_file("port", 0600, rvu->rvu_dbg.mcs_tx, mcs, + &rvu_dbg_mcs_tx_port_stats_fops); + } +} + #define LMT_MAPTBL_ENTRY_SIZE 16 /* Dump LMTST map table */ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp, @@ -3053,6 +3398,7 @@ create: rvu_dbg_npc_init(rvu); rvu_dbg_cpt_init(rvu, BLKADDR_CPT0); rvu_dbg_cpt_init(rvu, BLKADDR_CPT1); + rvu_dbg_mcs_init(rvu); } void rvu_dbg_exit(struct rvu *rvu) diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile index d463dc72d80a..73fdb8798614 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile +++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile @@ -13,5 +13,6 @@ rvu_nicvf-y := otx2_vf.o otx2_devlink.o rvu_nicpf-$(CONFIG_DCB) += otx2_dcbnl.o rvu_nicvf-$(CONFIG_DCB) += otx2_dcbnl.o +rvu_nicpf-$(CONFIG_MACSEC) += cn10k_macsec.o ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c new file mode 100644 index 000000000000..64f3acd7f67b --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c @@ -0,0 +1,1668 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell MACSEC hardware offload driver + * + * Copyright (C) 2022 Marvell. + */ + +#include <linux/rtnetlink.h> +#include <linux/bitfield.h> +#include <net/macsec.h> +#include "otx2_common.h" + +#define MCS_TCAM0_MAC_SA_MASK GENMASK_ULL(63, 48) +#define MCS_TCAM1_MAC_SA_MASK GENMASK_ULL(31, 0) +#define MCS_TCAM1_ETYPE_MASK GENMASK_ULL(47, 32) + +#define MCS_SA_MAP_MEM_SA_USE BIT_ULL(9) + +#define MCS_RX_SECY_PLCY_RW_MASK GENMASK_ULL(49, 18) +#define MCS_RX_SECY_PLCY_RP BIT_ULL(17) +#define MCS_RX_SECY_PLCY_AUTH_ENA BIT_ULL(16) +#define MCS_RX_SECY_PLCY_CIP GENMASK_ULL(8, 5) +#define MCS_RX_SECY_PLCY_VAL GENMASK_ULL(2, 1) +#define MCS_RX_SECY_PLCY_ENA BIT_ULL(0) + +#define MCS_TX_SECY_PLCY_MTU GENMASK_ULL(43, 28) +#define MCS_TX_SECY_PLCY_ST_TCI GENMASK_ULL(27, 22) +#define MCS_TX_SECY_PLCY_ST_OFFSET GENMASK_ULL(21, 15) +#define MCS_TX_SECY_PLCY_INS_MODE BIT_ULL(14) +#define MCS_TX_SECY_PLCY_AUTH_ENA BIT_ULL(13) +#define MCS_TX_SECY_PLCY_CIP GENMASK_ULL(5, 2) +#define MCS_TX_SECY_PLCY_PROTECT BIT_ULL(1) +#define MCS_TX_SECY_PLCY_ENA BIT_ULL(0) + +#define MCS_GCM_AES_128 0 +#define MCS_GCM_AES_256 1 +#define MCS_GCM_AES_XPN_128 2 +#define MCS_GCM_AES_XPN_256 3 + +#define MCS_TCI_ES 0x40 /* end station */ +#define MCS_TCI_SC 0x20 /* SCI present */ +#define MCS_TCI_SCB 0x10 /* epon */ +#define MCS_TCI_E 0x08 /* encryption */ +#define MCS_TCI_C 0x04 /* changed text */ + +static struct cn10k_mcs_txsc *cn10k_mcs_get_txsc(struct cn10k_mcs_cfg *cfg, + struct macsec_secy *secy) +{ + struct cn10k_mcs_txsc *txsc; + + list_for_each_entry(txsc, &cfg->txsc_list, entry) { + if (txsc->sw_secy == secy) + return txsc; + } + + return NULL; +} + +static struct cn10k_mcs_rxsc *cn10k_mcs_get_rxsc(struct cn10k_mcs_cfg *cfg, + struct macsec_secy *secy, + struct macsec_rx_sc *rx_sc) +{ + struct cn10k_mcs_rxsc *rxsc; + + list_for_each_entry(rxsc, &cfg->rxsc_list, entry) { + if (rxsc->sw_rxsc == rx_sc && rxsc->sw_secy == secy) + return rxsc; + } + + return NULL; +} + +static const char *rsrc_name(enum mcs_rsrc_type rsrc_type) +{ + switch (rsrc_type) { + case MCS_RSRC_TYPE_FLOWID: + return "FLOW"; + case MCS_RSRC_TYPE_SC: + return "SC"; + case MCS_RSRC_TYPE_SECY: + return "SECY"; + case MCS_RSRC_TYPE_SA: + return "SA"; + default: + return "Unknown"; + }; + + return "Unknown"; +} + +static int cn10k_mcs_alloc_rsrc(struct otx2_nic *pfvf, enum mcs_direction dir, + enum mcs_rsrc_type type, u16 *rsrc_id) +{ + struct mbox *mbox = &pfvf->mbox; + struct mcs_alloc_rsrc_req *req; + struct mcs_alloc_rsrc_rsp *rsp; + int ret = -ENOMEM; + + mutex_lock(&mbox->lock); + + req = otx2_mbox_alloc_msg_mcs_alloc_resources(mbox); + if (!req) + goto fail; + + req->rsrc_type = type; + req->rsrc_cnt = 1; + req->dir = dir; + + ret = otx2_sync_mbox_msg(mbox); + if (ret) + goto fail; + + rsp = (struct mcs_alloc_rsrc_rsp *)otx2_mbox_get_rsp(&pfvf->mbox.mbox, + 0, &req->hdr); + if (IS_ERR(rsp) || req->rsrc_cnt != rsp->rsrc_cnt || + req->rsrc_type != rsp->rsrc_type || req->dir != rsp->dir) { + ret = -EINVAL; + goto fail; + } + + switch (rsp->rsrc_type) { + case MCS_RSRC_TYPE_FLOWID: + *rsrc_id = rsp->flow_ids[0]; + break; + case MCS_RSRC_TYPE_SC: + *rsrc_id = rsp->sc_ids[0]; + break; + case MCS_RSRC_TYPE_SECY: + *rsrc_id = rsp->secy_ids[0]; + break; + case MCS_RSRC_TYPE_SA: + *rsrc_id = rsp->sa_ids[0]; + break; + default: + ret = -EINVAL; + goto fail; + }; + + mutex_unlock(&mbox->lock); + + return 0; +fail: + dev_err(pfvf->dev, "Failed to allocate %s %s resource\n", + dir == MCS_TX ? "TX" : "RX", rsrc_name(type)); + mutex_unlock(&mbox->lock); + return ret; +} + +static void cn10k_mcs_free_rsrc(struct otx2_nic *pfvf, enum mcs_direction dir, + enum mcs_rsrc_type type, u16 hw_rsrc_id, + bool all) +{ + struct mbox *mbox = &pfvf->mbox; + struct mcs_free_rsrc_req *req; + + mutex_lock(&mbox->lock); + + req = otx2_mbox_alloc_msg_mcs_free_resources(mbox); + if (!req) + goto fail; + + req->rsrc_id = hw_rsrc_id; + req->rsrc_type = type; + req->dir = dir; + if (all) + req->all = 1; + + if (otx2_sync_mbox_msg(&pfvf->mbox)) + goto fail; + + mutex_unlock(&mbox->lock); + + return; +fail: + dev_err(pfvf->dev, "Failed to free %s %s resource\n", + dir == MCS_TX ? "TX" : "RX", rsrc_name(type)); + mutex_unlock(&mbox->lock); +} + +static int cn10k_mcs_alloc_txsa(struct otx2_nic *pfvf, u16 *hw_sa_id) +{ + return cn10k_mcs_alloc_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SA, hw_sa_id); +} + +static int cn10k_mcs_alloc_rxsa(struct otx2_nic *pfvf, u16 *hw_sa_id) +{ + return cn10k_mcs_alloc_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SA, hw_sa_id); +} + +static void cn10k_mcs_free_txsa(struct otx2_nic *pfvf, u16 hw_sa_id) +{ + cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SA, hw_sa_id, false); +} + +static void cn10k_mcs_free_rxsa(struct otx2_nic *pfvf, u16 hw_sa_id) +{ + cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SA, hw_sa_id, false); +} + +static int cn10k_mcs_write_rx_secy(struct otx2_nic *pfvf, + struct macsec_secy *secy, u8 hw_secy_id) +{ + struct mcs_secy_plcy_write_req *req; + struct mbox *mbox = &pfvf->mbox; + u64 policy; + int ret; + + mutex_lock(&mbox->lock); + + req = otx2_mbox_alloc_msg_mcs_secy_plcy_write(mbox); + if (!req) { + ret = -ENOMEM; + goto fail; + } + + policy = FIELD_PREP(MCS_RX_SECY_PLCY_RW_MASK, secy->replay_window); + if (secy->replay_protect) + policy |= MCS_RX_SECY_PLCY_RP; + + policy |= MCS_RX_SECY_PLCY_AUTH_ENA; + policy |= FIELD_PREP(MCS_RX_SECY_PLCY_CIP, MCS_GCM_AES_128); + policy |= FIELD_PREP(MCS_RX_SECY_PLCY_VAL, secy->validate_frames); + + policy |= MCS_RX_SECY_PLCY_ENA; + + req->plcy = policy; + req->secy_id = hw_secy_id; + req->dir = MCS_RX; + + ret = otx2_sync_mbox_msg(mbox); + +fail: + mutex_unlock(&mbox->lock); + return ret; +} + +static int cn10k_mcs_write_rx_flowid(struct otx2_nic *pfvf, + struct cn10k_mcs_rxsc *rxsc, u8 hw_secy_id) +{ + struct macsec_rx_sc *sw_rx_sc = rxsc->sw_rxsc; + struct mcs_flowid_entry_write_req *req; + struct mbox *mbox = &pfvf->mbox; + int ret; + + mutex_lock(&mbox->lock); + + req = otx2_mbox_alloc_msg_mcs_flowid_entry_write(mbox); + if (!req) { + ret = -ENOMEM; + goto fail; + } + + req->data[1] = FIELD_PREP(MCS_TCAM1_ETYPE_MASK, ETH_P_MACSEC); + req->mask[1] = ~0ULL; + req->mask[1] &= ~MCS_TCAM1_ETYPE_MASK; + + req->mask[0] = ~0ULL; + req->mask[2] = ~0ULL; + req->mask[3] = ~0ULL; + + req->flow_id = rxsc->hw_flow_id; + req->secy_id = hw_secy_id; + req->sc_id = rxsc->hw_sc_id; + req->dir = MCS_RX; + + if (sw_rx_sc->active) + req->ena = 1; + + ret = otx2_sync_mbox_msg(mbox); + +fail: + mutex_unlock(&mbox->lock); + return ret; +} + +static int cn10k_mcs_write_sc_cam(struct otx2_nic *pfvf, + struct cn10k_mcs_rxsc *rxsc, u8 hw_secy_id) +{ + struct macsec_rx_sc *sw_rx_sc = rxsc->sw_rxsc; + struct mcs_rx_sc_cam_write_req *sc_req; + struct mbox *mbox = &pfvf->mbox; + int ret; + + mutex_lock(&mbox->lock); + + sc_req = otx2_mbox_alloc_msg_mcs_rx_sc_cam_write(mbox); + if (!sc_req) { + return -ENOMEM; + goto fail; + } + + sc_req->sci = (__force u64)cpu_to_be64((__force u64)sw_rx_sc->sci); + sc_req->sc_id = rxsc->hw_sc_id; + sc_req->secy_id = hw_secy_id; + + ret = otx2_sync_mbox_msg(mbox); + +fail: + mutex_unlock(&mbox->lock); + return ret; +} + +static int cn10k_mcs_write_rx_sa_plcy(struct otx2_nic *pfvf, + struct macsec_secy *secy, + struct cn10k_mcs_rxsc *rxsc, + u8 assoc_num, bool sa_in_use) +{ + unsigned char *src = rxsc->sa_key[assoc_num]; + struct mcs_sa_plcy_write_req *plcy_req; + struct mcs_rx_sc_sa_map *map_req; + struct mbox *mbox = &pfvf->mbox; + u8 reg, key_len; + int ret; + + mutex_lock(&mbox->lock); + + plcy_req = otx2_mbox_alloc_msg_mcs_sa_plcy_write(mbox); + if (!plcy_req) { + ret = -ENOMEM; + goto fail; + } + + map_req = otx2_mbox_alloc_msg_mcs_rx_sc_sa_map_write(mbox); + if (!map_req) { + otx2_mbox_reset(&mbox->mbox, 0); + ret = -ENOMEM; + goto fail; + } + + for (reg = 0, key_len = 0; key_len < secy->key_len; key_len += 8) { + memcpy((u8 *)&plcy_req->plcy[0][reg], + (src + reg * 8), 8); + reg++; + } + + plcy_req->sa_index[0] = rxsc->hw_sa_id[assoc_num]; + plcy_req->sa_cnt = 1; + plcy_req->dir = MCS_RX; + + map_req->sa_index = rxsc->hw_sa_id[assoc_num]; + map_req->sa_in_use = sa_in_use; + map_req->sc_id = rxsc->hw_sc_id; + map_req->an = assoc_num; + + /* Send two messages together */ + ret = otx2_sync_mbox_msg(mbox); + +fail: + mutex_unlock(&mbox->lock); + return ret; +} + +static int cn10k_mcs_write_rx_sa_pn(struct otx2_nic *pfvf, + struct cn10k_mcs_rxsc *rxsc, + u8 assoc_num, u64 next_pn) +{ + struct mcs_pn_table_write_req *req; + struct mbox *mbox = &pfvf->mbox; + int ret; + + mutex_lock(&mbox->lock); + + req = otx2_mbox_alloc_msg_mcs_pn_table_write(mbox); + if (!req) { + ret = -ENOMEM; + goto fail; + } + + req->pn_id = rxsc->hw_sa_id[assoc_num]; + req->next_pn = next_pn; + req->dir = MCS_RX; + + ret = otx2_sync_mbox_msg(mbox); + +fail: + mutex_unlock(&mbox->lock); + return ret; +} + +static int cn10k_mcs_write_tx_secy(struct otx2_nic *pfvf, + struct macsec_secy *secy, + struct cn10k_mcs_txsc *txsc) +{ + struct mcs_secy_plcy_write_req *req; + struct mbox *mbox = &pfvf->mbox; + struct macsec_tx_sc *sw_tx_sc; + /* Insert SecTag after 12 bytes (DA+SA)*/ + u8 tag_offset = 12; + u8 sectag_tci = 0; + u64 policy; + int ret; + + sw_tx_sc = &secy->tx_sc; + + mutex_lock(&mbox->lock); + + req = otx2_mbox_alloc_msg_mcs_secy_plcy_write(mbox); + if (!req) { + ret = -ENOMEM; + goto fail; + } + + if (sw_tx_sc->send_sci) { + sectag_tci |= MCS_TCI_SC; + } else { + if (sw_tx_sc->end_station) + sectag_tci |= MCS_TCI_ES; + if (sw_tx_sc->scb) + sectag_tci |= MCS_TCI_SCB; + } + + if (sw_tx_sc->encrypt) + sectag_tci |= (MCS_TCI_E | MCS_TCI_C); + + policy = FIELD_PREP(MCS_TX_SECY_PLCY_MTU, secy->netdev->mtu); + /* Write SecTag excluding AN bits(1..0) */ + policy |= FIELD_PREP(MCS_TX_SECY_PLCY_ST_TCI, sectag_tci >> 2); + policy |= FIELD_PREP(MCS_TX_SECY_PLCY_ST_OFFSET, tag_offset); + policy |= MCS_TX_SECY_PLCY_INS_MODE; + policy |= MCS_TX_SECY_PLCY_AUTH_ENA; + policy |= FIELD_PREP(MCS_TX_SECY_PLCY_CIP, MCS_GCM_AES_128); + + if (secy->protect_frames) + policy |= MCS_TX_SECY_PLCY_PROTECT; + + /* If the encodingsa does not exist/active and protect is + * not set then frames can be sent out as it is. Hence enable + * the policy irrespective of secy operational when !protect. + */ + if (!secy->protect_frames || secy->operational) + policy |= MCS_TX_SECY_PLCY_ENA; + + req->plcy = policy; + req->secy_id = txsc->hw_secy_id_tx; + req->dir = MCS_TX; + + ret = otx2_sync_mbox_msg(mbox); + +fail: + mutex_unlock(&mbox->lock); + return ret; +} + +static int cn10k_mcs_write_tx_flowid(struct otx2_nic *pfvf, + struct macsec_secy *secy, + struct cn10k_mcs_txsc *txsc) +{ + struct mcs_flowid_entry_write_req *req; + struct mbox *mbox = &pfvf->mbox; + u64 mac_sa; + int ret; + + mutex_lock(&mbox->lock); + + req = otx2_mbox_alloc_msg_mcs_flowid_entry_write(mbox); + if (!req) { + ret = -ENOMEM; + goto fail; + } + + mac_sa = ether_addr_to_u64(secy->netdev->dev_addr); + + req->data[0] = FIELD_PREP(MCS_TCAM0_MAC_SA_MASK, mac_sa); + req->data[1] = FIELD_PREP(MCS_TCAM1_MAC_SA_MASK, mac_sa >> 16); + + req->mask[0] = ~0ULL; + req->mask[0] &= ~MCS_TCAM0_MAC_SA_MASK; + + req->mask[1] = ~0ULL; + req->mask[1] &= ~MCS_TCAM1_MAC_SA_MASK; + + req->mask[2] = ~0ULL; + req->mask[3] = ~0ULL; + + req->flow_id = txsc->hw_flow_id; + req->secy_id = txsc->hw_secy_id_tx; + req->sc_id = txsc->hw_sc_id; + req->sci = (__force u64)cpu_to_be64((__force u64)secy->sci); + req->dir = MCS_TX; + /* This can be enabled since stack xmits packets only when interface is up */ + req->ena = 1; + + ret = otx2_sync_mbox_msg(mbox); + +fail: + mutex_unlock(&mbox->lock); + return ret; +} + +static int cn10k_mcs_link_tx_sa2sc(struct otx2_nic *pfvf, + struct macsec_secy *secy, + struct cn10k_mcs_txsc *txsc, + u8 sa_num, bool sa_active) +{ + struct mcs_tx_sc_sa_map *map_req; + struct mbox *mbox = &pfvf->mbox; + int ret; + + /* Link the encoding_sa only to SC out of all SAs */ + if (txsc->encoding_sa != sa_num) + return 0; + + mutex_lock(&mbox->lock); + + map_req = otx2_mbox_alloc_msg_mcs_tx_sc_sa_map_write(mbox); + if (!map_req) { + otx2_mbox_reset(&mbox->mbox, 0); + ret = -ENOMEM; + goto fail; + } + + map_req->sa_index0 = txsc->hw_sa_id[sa_num]; + map_req->sa_index0_vld = sa_active; + map_req->sectag_sci = (__force u64)cpu_to_be64((__force u64)secy->sci); + map_req->sc_id = txsc->hw_sc_id; + + ret = otx2_sync_mbox_msg(mbox); + +fail: + mutex_unlock(&mbox->lock); + return ret; +} + +static int cn10k_mcs_write_tx_sa_plcy(struct otx2_nic *pfvf, + struct macsec_secy *secy, + struct cn10k_mcs_txsc *txsc, + u8 assoc_num) +{ + unsigned char *src = txsc->sa_key[assoc_num]; + struct mcs_sa_plcy_write_req *plcy_req; + struct mbox *mbox = &pfvf->mbox; + u8 reg, key_len; + int ret; + + mutex_lock(&mbox->lock); + + plcy_req = otx2_mbox_alloc_msg_mcs_sa_plcy_write(mbox); + if (!plcy_req) { + ret = -ENOMEM; + goto fail; + } + + for (reg = 0, key_len = 0; key_len < secy->key_len; key_len += 8) { + memcpy((u8 *)&plcy_req->plcy[0][reg], (src + reg * 8), 8); + reg++; + } + + plcy_req->plcy[0][8] = assoc_num; + plcy_req->sa_index[0] = txsc->hw_sa_id[assoc_num]; + plcy_req->sa_cnt = 1; + plcy_req->dir = MCS_TX; + + ret = otx2_sync_mbox_msg(mbox); + +fail: + mutex_unlock(&mbox->lock); + return ret; +} + +static int cn10k_write_tx_sa_pn(struct otx2_nic *pfvf, + struct cn10k_mcs_txsc *txsc, + u8 assoc_num, u64 next_pn) +{ + struct mcs_pn_table_write_req *req; + struct mbox *mbox = &pfvf->mbox; + int ret; + + mutex_lock(&mbox->lock); + + req = otx2_mbox_alloc_msg_mcs_pn_table_write(mbox); + if (!req) { + ret = -ENOMEM; + goto fail; + } + + req->pn_id = txsc->hw_sa_id[assoc_num]; + req->next_pn = next_pn; + req->dir = MCS_TX; + + ret = otx2_sync_mbox_msg(mbox); + +fail: + mutex_unlock(&mbox->lock); + return ret; +} + +static int cn10k_mcs_ena_dis_flowid(struct otx2_nic *pfvf, u16 hw_flow_id, + bool enable, enum mcs_direction dir) +{ + struct mcs_flowid_ena_dis_entry *req; + struct mbox *mbox = &pfvf->mbox; + int ret; + + mutex_lock(&mbox->lock); + + req = otx2_mbox_alloc_msg_mcs_flowid_ena_entry(mbox); + if (!req) { + return -ENOMEM; + goto fail; + } + + req->flow_id = hw_flow_id; + req->ena = enable; + req->dir = dir; + + ret = otx2_sync_mbox_msg(mbox); + +fail: + mutex_unlock(&mbox->lock); + return ret; +} + +static int cn10k_mcs_sa_stats(struct otx2_nic *pfvf, u8 hw_sa_id, + struct mcs_sa_stats *rsp_p, + enum mcs_direction dir, bool clear) +{ + struct mcs_clear_stats *clear_req; + struct mbox *mbox = &pfvf->mbox; + struct mcs_stats_req *req; + struct mcs_sa_stats *rsp; + int ret; + + mutex_lock(&mbox->lock); + + req = otx2_mbox_alloc_msg_mcs_get_sa_stats(mbox); + if (!req) { + ret = -ENOMEM; + goto fail; + } + + req->id = hw_sa_id; + req->dir = dir; + + if (!clear) + goto send_msg; + + clear_req = otx2_mbox_alloc_msg_mcs_clear_stats(mbox); + if (!clear_req) { + ret = -ENOMEM; + goto fail; + } + clear_req->id = hw_sa_id; + clear_req->dir = dir; + clear_req->type = MCS_RSRC_TYPE_SA; + +send_msg: + ret = otx2_sync_mbox_msg(mbox); + if (ret) + goto fail; + + rsp = (struct mcs_sa_stats *)otx2_mbox_get_rsp(&pfvf->mbox.mbox, + 0, &req->hdr); + if (IS_ERR(rsp)) { + ret = PTR_ERR(rsp); + goto fail; + } + + memcpy(rsp_p, rsp, sizeof(*rsp_p)); + + mutex_unlock(&mbox->lock); + + return 0; +fail: + mutex_unlock(&mbox->lock); + return ret; +} + +static int cn10k_mcs_sc_stats(struct otx2_nic *pfvf, u8 hw_sc_id, + struct mcs_sc_stats *rsp_p, + enum mcs_direction dir, bool clear) +{ + struct mcs_clear_stats *clear_req; + struct mbox *mbox = &pfvf->mbox; + struct mcs_stats_req *req; + struct mcs_sc_stats *rsp; + int ret; + + mutex_lock(&mbox->lock); + + req = otx2_mbox_alloc_msg_mcs_get_sc_stats(mbox); + if (!req) { + ret = -ENOMEM; + goto fail; + } + + req->id = hw_sc_id; + req->dir = dir; + + if (!clear) + goto send_msg; + + clear_req = otx2_mbox_alloc_msg_mcs_clear_stats(mbox); + if (!clear_req) { + ret = -ENOMEM; + goto fail; + } + clear_req->id = hw_sc_id; + clear_req->dir = dir; + clear_req->type = MCS_RSRC_TYPE_SC; + +send_msg: + ret = otx2_sync_mbox_msg(mbox); + if (ret) + goto fail; + + rsp = (struct mcs_sc_stats *)otx2_mbox_get_rsp(&pfvf->mbox.mbox, + 0, &req->hdr); + if (IS_ERR(rsp)) { + ret = PTR_ERR(rsp); + goto fail; + } + + memcpy(rsp_p, rsp, sizeof(*rsp_p)); + + mutex_unlock(&mbox->lock); + + return 0; +fail: + mutex_unlock(&mbox->lock); + return ret; +} + +static int cn10k_mcs_secy_stats(struct otx2_nic *pfvf, u8 hw_secy_id, + struct mcs_secy_stats *rsp_p, + enum mcs_direction dir, bool clear) +{ + struct mcs_clear_stats *clear_req; + struct mbox *mbox = &pfvf->mbox; + struct mcs_secy_stats *rsp; + struct mcs_stats_req *req; + int ret; + + mutex_lock(&mbox->lock); + + req = otx2_mbox_alloc_msg_mcs_get_secy_stats(mbox); + if (!req) { + ret = -ENOMEM; + goto fail; + } + + req->id = hw_secy_id; + req->dir = dir; + + if (!clear) + goto send_msg; + + clear_req = otx2_mbox_alloc_msg_mcs_clear_stats(mbox); + if (!clear_req) { + ret = -ENOMEM; + goto fail; + } + clear_req->id = hw_secy_id; + clear_req->dir = dir; + clear_req->type = MCS_RSRC_TYPE_SECY; + +send_msg: + ret = otx2_sync_mbox_msg(mbox); + if (ret) + goto fail; + + rsp = (struct mcs_secy_stats *)otx2_mbox_get_rsp(&pfvf->mbox.mbox, + 0, &req->hdr); + if (IS_ERR(rsp)) { + ret = PTR_ERR(rsp); + goto fail; + } + + memcpy(rsp_p, rsp, sizeof(*rsp_p)); + + mutex_unlock(&mbox->lock); + + return 0; +fail: + mutex_unlock(&mbox->lock); + return ret; +} + +static struct cn10k_mcs_txsc *cn10k_mcs_create_txsc(struct otx2_nic *pfvf) +{ + struct cn10k_mcs_txsc *txsc; + int ret; + + txsc = kzalloc(sizeof(*txsc), GFP_KERNEL); + if (!txsc) + return ERR_PTR(-ENOMEM); + + ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_FLOWID, + &txsc->hw_flow_id); + if (ret) + goto fail; + + /* For a SecY, one TX secy and one RX secy HW resources are needed */ + ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SECY, + &txsc->hw_secy_id_tx); + if (ret) + goto free_flowid; + + ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SECY, + &txsc->hw_secy_id_rx); + if (ret) + goto free_tx_secy; + + ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SC, + &txsc->hw_sc_id); + if (ret) + goto free_rx_secy; + + return txsc; +free_rx_secy: + cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SECY, + txsc->hw_secy_id_rx, false); +free_tx_secy: + cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SECY, + txsc->hw_secy_id_tx, false); +free_flowid: + cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_FLOWID, + txsc->hw_flow_id, false); +fail: + return ERR_PTR(ret); +} + +/* Free Tx SC and its SAs(if any) resources to AF + */ +static void cn10k_mcs_delete_txsc(struct otx2_nic *pfvf, + struct cn10k_mcs_txsc *txsc) +{ + u8 sa_bmap = txsc->sa_bmap; + u8 sa_num = 0; + + while (sa_bmap) { + if (sa_bmap & 1) { + cn10k_mcs_write_tx_sa_plcy(pfvf, txsc->sw_secy, + txsc, sa_num); + cn10k_mcs_free_txsa(pfvf, txsc->hw_sa_id[sa_num]); + } + sa_num++; + sa_bmap >>= 1; + } + + cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SC, + txsc->hw_sc_id, false); + cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SECY, + txsc->hw_secy_id_rx, false); + cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SECY, + txsc->hw_secy_id_tx, false); + cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_FLOWID, + txsc->hw_flow_id, false); +} + +static struct cn10k_mcs_rxsc *cn10k_mcs_create_rxsc(struct otx2_nic *pfvf) +{ + struct cn10k_mcs_rxsc *rxsc; + int ret; + + rxsc = kzalloc(sizeof(*rxsc), GFP_KERNEL); + if (!rxsc) + return ERR_PTR(-ENOMEM); + + ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_FLOWID, + &rxsc->hw_flow_id); + if (ret) + goto fail; + + ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SC, + &rxsc->hw_sc_id); + if (ret) + goto free_flowid; + + return rxsc; +free_flowid: + cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_FLOWID, + rxsc->hw_flow_id, false); +fail: + return ERR_PTR(ret); +} + +/* Free Rx SC and its SAs(if any) resources to AF + */ +static void cn10k_mcs_delete_rxsc(struct otx2_nic *pfvf, + struct cn10k_mcs_rxsc *rxsc) +{ + u8 sa_bmap = rxsc->sa_bmap; + u8 sa_num = 0; + + while (sa_bmap) { + if (sa_bmap & 1) { + cn10k_mcs_write_rx_sa_plcy(pfvf, rxsc->sw_secy, rxsc, + sa_num, false); + cn10k_mcs_free_rxsa(pfvf, rxsc->hw_sa_id[sa_num]); + } + sa_num++; + sa_bmap >>= 1; + } + + cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SC, + rxsc->hw_sc_id, false); + cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_FLOWID, + rxsc->hw_flow_id, false); +} + +static int cn10k_mcs_secy_tx_cfg(struct otx2_nic *pfvf, struct macsec_secy *secy, + struct cn10k_mcs_txsc *txsc, + struct macsec_tx_sa *sw_tx_sa, u8 sa_num) +{ + if (sw_tx_sa) { + cn10k_mcs_write_tx_sa_plcy(pfvf, secy, txsc, sa_num); + cn10k_write_tx_sa_pn(pfvf, txsc, sa_num, + sw_tx_sa->next_pn_halves.lower); + cn10k_mcs_link_tx_sa2sc(pfvf, secy, txsc, sa_num, + sw_tx_sa->active); + } + + cn10k_mcs_write_tx_secy(pfvf, secy, txsc); + cn10k_mcs_write_tx_flowid(pfvf, secy, txsc); + /* When updating secy, change RX secy also */ + cn10k_mcs_write_rx_secy(pfvf, secy, txsc->hw_secy_id_rx); + + return 0; +} + +static int cn10k_mcs_secy_rx_cfg(struct otx2_nic *pfvf, + struct macsec_secy *secy, u8 hw_secy_id) +{ + struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; + struct cn10k_mcs_rxsc *mcs_rx_sc; + struct macsec_rx_sc *sw_rx_sc; + struct macsec_rx_sa *sw_rx_sa; + u8 sa_num; + + for (sw_rx_sc = rcu_dereference_bh(secy->rx_sc); sw_rx_sc && sw_rx_sc->active; + sw_rx_sc = rcu_dereference_bh(sw_rx_sc->next)) { + mcs_rx_sc = cn10k_mcs_get_rxsc(cfg, secy, sw_rx_sc); + if (unlikely(!mcs_rx_sc)) + continue; + + for (sa_num = 0; sa_num < CN10K_MCS_SA_PER_SC; sa_num++) { + sw_rx_sa = rcu_dereference_bh(sw_rx_sc->sa[sa_num]); + if (!sw_rx_sa) + continue; + + cn10k_mcs_write_rx_sa_plcy(pfvf, secy, mcs_rx_sc, + sa_num, sw_rx_sa->active); + cn10k_mcs_write_rx_sa_pn(pfvf, mcs_rx_sc, sa_num, + sw_rx_sa->next_pn_halves.lower); + } + + cn10k_mcs_write_rx_flowid(pfvf, mcs_rx_sc, hw_secy_id); + cn10k_mcs_write_sc_cam(pfvf, mcs_rx_sc, hw_secy_id); + } + + return 0; +} + +static int cn10k_mcs_disable_rxscs(struct otx2_nic *pfvf, + struct macsec_secy *secy, + bool delete) +{ + struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; + struct cn10k_mcs_rxsc *mcs_rx_sc; + struct macsec_rx_sc *sw_rx_sc; + int ret; + + for (sw_rx_sc = rcu_dereference_bh(secy->rx_sc); sw_rx_sc && sw_rx_sc->active; + sw_rx_sc = rcu_dereference_bh(sw_rx_sc->next)) { + mcs_rx_sc = cn10k_mcs_get_rxsc(cfg, secy, sw_rx_sc); + if (unlikely(!mcs_rx_sc)) + continue; + + ret = cn10k_mcs_ena_dis_flowid(pfvf, mcs_rx_sc->hw_flow_id, + false, MCS_RX); + if (ret) + dev_err(pfvf->dev, "Failed to disable TCAM for SC %d\n", + mcs_rx_sc->hw_sc_id); + if (delete) { + cn10k_mcs_delete_rxsc(pfvf, mcs_rx_sc); + list_del(&mcs_rx_sc->entry); + kfree(mcs_rx_sc); + } + } + + return 0; +} + +static void cn10k_mcs_sync_stats(struct otx2_nic *pfvf, struct macsec_secy *secy, + struct cn10k_mcs_txsc *txsc) +{ + struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; + struct mcs_secy_stats rx_rsp = { 0 }; + struct mcs_sc_stats sc_rsp = { 0 }; + struct cn10k_mcs_rxsc *rxsc; + + /* Because of shared counters for some stats in the hardware, when + * updating secy policy take a snapshot of current stats and reset them. + * Below are the effected stats because of shared counters. + */ + + /* Check if sync is really needed */ + if (secy->validate_frames == txsc->last_validate_frames && + secy->protect_frames == txsc->last_protect_frames) + return; + + cn10k_mcs_secy_stats(pfvf, txsc->hw_secy_id_rx, &rx_rsp, MCS_RX, true); + + txsc->stats.InPktsBadTag += rx_rsp.pkt_badtag_cnt; + txsc->stats.InPktsUnknownSCI += rx_rsp.pkt_nosa_cnt; + txsc->stats.InPktsNoSCI += rx_rsp.pkt_nosaerror_cnt; + if (txsc->last_validate_frames == MACSEC_VALIDATE_STRICT) + txsc->stats.InPktsNoTag += rx_rsp.pkt_untaged_cnt; + else + txsc->stats.InPktsUntagged += rx_rsp.pkt_untaged_cnt; + + list_for_each_entry(rxsc, &cfg->rxsc_list, entry) { + cn10k_mcs_sc_stats(pfvf, rxsc->hw_sc_id, &sc_rsp, MCS_RX, true); + + rxsc->stats.InOctetsValidated += sc_rsp.octet_validate_cnt; + rxsc->stats.InOctetsDecrypted += sc_rsp.octet_decrypt_cnt; + + rxsc->stats.InPktsInvalid += sc_rsp.pkt_invalid_cnt; + rxsc->stats.InPktsNotValid += sc_rsp.pkt_notvalid_cnt; + + if (txsc->last_protect_frames) + rxsc->stats.InPktsLate += sc_rsp.pkt_late_cnt; + else + rxsc->stats.InPktsDelayed += sc_rsp.pkt_late_cnt; + + if (txsc->last_validate_frames == MACSEC_VALIDATE_CHECK) + rxsc->stats.InPktsUnchecked += sc_rsp.pkt_unchecked_cnt; + else + rxsc->stats.InPktsOK += sc_rsp.pkt_unchecked_cnt; + } + + txsc->last_validate_frames = secy->validate_frames; + txsc->last_protect_frames = secy->protect_frames; +} + +static int cn10k_mdo_open(struct macsec_context *ctx) +{ + struct otx2_nic *pfvf = netdev_priv(ctx->netdev); + struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; + struct macsec_secy *secy = ctx->secy; + struct macsec_tx_sa *sw_tx_sa; + struct cn10k_mcs_txsc *txsc; + u8 sa_num; + int err; + + txsc = cn10k_mcs_get_txsc(cfg, ctx->secy); + if (!txsc) + return -ENOENT; + + sa_num = txsc->encoding_sa; + sw_tx_sa = rcu_dereference_bh(secy->tx_sc.sa[sa_num]); + + err = cn10k_mcs_secy_tx_cfg(pfvf, secy, txsc, sw_tx_sa, sa_num); + if (err) + return err; + + return cn10k_mcs_secy_rx_cfg(pfvf, secy, txsc->hw_secy_id_rx); +} + +static int cn10k_mdo_stop(struct macsec_context *ctx) +{ + struct otx2_nic *pfvf = netdev_priv(ctx->netdev); + struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; + struct cn10k_mcs_txsc *txsc; + int err; + + txsc = cn10k_mcs_get_txsc(cfg, ctx->secy); + if (!txsc) + return -ENOENT; + + err = cn10k_mcs_ena_dis_flowid(pfvf, txsc->hw_flow_id, false, MCS_TX); + if (err) + return err; + + return cn10k_mcs_disable_rxscs(pfvf, ctx->secy, false); +} + +static int cn10k_mdo_add_secy(struct macsec_context *ctx) +{ + struct otx2_nic *pfvf = netdev_priv(ctx->netdev); + struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; + struct macsec_secy *secy = ctx->secy; + struct cn10k_mcs_txsc *txsc; + + if (secy->icv_len != MACSEC_DEFAULT_ICV_LEN) + return -EOPNOTSUPP; + + /* Stick to 16 bytes key len until XPN support is added */ + if (secy->key_len != 16) + return -EOPNOTSUPP; + + if (secy->xpn) + return -EOPNOTSUPP; + + txsc = cn10k_mcs_create_txsc(pfvf); + if (IS_ERR(txsc)) + return -ENOSPC; + + txsc->sw_secy = secy; + txsc->encoding_sa = secy->tx_sc.encoding_sa; + txsc->last_validate_frames = secy->validate_frames; + txsc->last_protect_frames = secy->protect_frames; + + list_add(&txsc->entry, &cfg->txsc_list); + + if (netif_running(secy->netdev)) + return cn10k_mcs_secy_tx_cfg(pfvf, secy, txsc, NULL, 0); + + return 0; +} + +static int cn10k_mdo_upd_secy(struct macsec_context *ctx) +{ + struct otx2_nic *pfvf = netdev_priv(ctx->netdev); + struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; + struct macsec_secy *secy = ctx->secy; + struct macsec_tx_sa *sw_tx_sa; + struct cn10k_mcs_txsc *txsc; + u8 sa_num; + int err; + + txsc = cn10k_mcs_get_txsc(cfg, secy); + if (!txsc) + return -ENOENT; + + txsc->encoding_sa = secy->tx_sc.encoding_sa; + + sa_num = txsc->encoding_sa; + sw_tx_sa = rcu_dereference_bh(secy->tx_sc.sa[sa_num]); + + if (netif_running(secy->netdev)) { + cn10k_mcs_sync_stats(pfvf, secy, txsc); + + err = cn10k_mcs_secy_tx_cfg(pfvf, secy, txsc, sw_tx_sa, sa_num); + if (err) + return err; + } + + return 0; +} + +static int cn10k_mdo_del_secy(struct macsec_context *ctx) +{ + struct otx2_nic *pfvf = netdev_priv(ctx->netdev); + struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; + struct cn10k_mcs_txsc *txsc; + + txsc = cn10k_mcs_get_txsc(cfg, ctx->secy); + if (!txsc) + return -ENOENT; + + cn10k_mcs_ena_dis_flowid(pfvf, txsc->hw_flow_id, false, MCS_TX); + cn10k_mcs_disable_rxscs(pfvf, ctx->secy, true); + cn10k_mcs_delete_txsc(pfvf, txsc); + list_del(&txsc->entry); + kfree(txsc); + + return 0; +} + +static int cn10k_mdo_add_txsa(struct macsec_context *ctx) +{ + struct otx2_nic *pfvf = netdev_priv(ctx->netdev); + struct macsec_tx_sa *sw_tx_sa = ctx->sa.tx_sa; + struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; + struct macsec_secy *secy = ctx->secy; + u8 sa_num = ctx->sa.assoc_num; + struct cn10k_mcs_txsc *txsc; + int err; + + txsc = cn10k_mcs_get_txsc(cfg, secy); + if (!txsc) + return -ENOENT; + + if (sa_num >= CN10K_MCS_SA_PER_SC) + return -EOPNOTSUPP; + + if (cn10k_mcs_alloc_txsa(pfvf, &txsc->hw_sa_id[sa_num])) + return -ENOSPC; + + memcpy(&txsc->sa_key[sa_num], ctx->sa.key, secy->key_len); + txsc->sa_bmap |= 1 << sa_num; + + if (netif_running(secy->netdev)) { + err = cn10k_mcs_write_tx_sa_plcy(pfvf, secy, txsc, sa_num); + if (err) + return err; + + err = cn10k_write_tx_sa_pn(pfvf, txsc, sa_num, + sw_tx_sa->next_pn_halves.lower); + if (err) + return err; + + err = cn10k_mcs_link_tx_sa2sc(pfvf, secy, txsc, + sa_num, sw_tx_sa->active); + if (err) + return err; + } + + return 0; +} + +static int cn10k_mdo_upd_txsa(struct macsec_context *ctx) +{ + struct otx2_nic *pfvf = netdev_priv(ctx->netdev); + struct macsec_tx_sa *sw_tx_sa = ctx->sa.tx_sa; + struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; + struct macsec_secy *secy = ctx->secy; + u8 sa_num = ctx->sa.assoc_num; + struct cn10k_mcs_txsc *txsc; + int err; + + txsc = cn10k_mcs_get_txsc(cfg, secy); + if (!txsc) + return -ENOENT; + + if (sa_num >= CN10K_MCS_SA_PER_SC) + return -EOPNOTSUPP; + + if (netif_running(secy->netdev)) { + /* Keys cannot be changed after creation */ + err = cn10k_write_tx_sa_pn(pfvf, txsc, sa_num, + sw_tx_sa->next_pn_halves.lower); + if (err) + return err; + + err = cn10k_mcs_link_tx_sa2sc(pfvf, secy, txsc, + sa_num, sw_tx_sa->active); + if (err) + return err; + } + + return 0; +} + +static int cn10k_mdo_del_txsa(struct macsec_context *ctx) +{ + struct otx2_nic *pfvf = netdev_priv(ctx->netdev); + struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; + u8 sa_num = ctx->sa.assoc_num; + struct cn10k_mcs_txsc *txsc; + + txsc = cn10k_mcs_get_txsc(cfg, ctx->secy); + if (!txsc) + return -ENOENT; + + if (sa_num >= CN10K_MCS_SA_PER_SC) + return -EOPNOTSUPP; + + cn10k_mcs_free_txsa(pfvf, txsc->hw_sa_id[sa_num]); + txsc->sa_bmap &= ~(1 << sa_num); + + return 0; +} + +static int cn10k_mdo_add_rxsc(struct macsec_context *ctx) +{ + struct otx2_nic *pfvf = netdev_priv(ctx->netdev); + struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; + struct macsec_secy *secy = ctx->secy; + struct cn10k_mcs_rxsc *rxsc; + struct cn10k_mcs_txsc *txsc; + int err; + + txsc = cn10k_mcs_get_txsc(cfg, secy); + if (!txsc) + return -ENOENT; + + rxsc = cn10k_mcs_create_rxsc(pfvf); + if (IS_ERR(rxsc)) + return -ENOSPC; + + rxsc->sw_secy = ctx->secy; + rxsc->sw_rxsc = ctx->rx_sc; + list_add(&rxsc->entry, &cfg->rxsc_list); + + if (netif_running(secy->netdev)) { + err = cn10k_mcs_write_rx_flowid(pfvf, rxsc, txsc->hw_secy_id_rx); + if (err) + return err; + + err = cn10k_mcs_write_sc_cam(pfvf, rxsc, txsc->hw_secy_id_rx); + if (err) + return err; + } + + return 0; +} + +static int cn10k_mdo_upd_rxsc(struct macsec_context *ctx) +{ + struct otx2_nic *pfvf = netdev_priv(ctx->netdev); + struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; + struct macsec_secy *secy = ctx->secy; + bool enable = ctx->rx_sc->active; + struct cn10k_mcs_rxsc *rxsc; + + rxsc = cn10k_mcs_get_rxsc(cfg, secy, ctx->rx_sc); + if (!rxsc) + return -ENOENT; + + if (netif_running(secy->netdev)) + return cn10k_mcs_ena_dis_flowid(pfvf, rxsc->hw_flow_id, + enable, MCS_RX); + + return 0; +} + +static int cn10k_mdo_del_rxsc(struct macsec_context *ctx) +{ + struct otx2_nic *pfvf = netdev_priv(ctx->netdev); + struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; + struct cn10k_mcs_rxsc *rxsc; + + rxsc = cn10k_mcs_get_rxsc(cfg, ctx->secy, ctx->rx_sc); + if (!rxsc) + return -ENOENT; + + cn10k_mcs_ena_dis_flowid(pfvf, rxsc->hw_flow_id, false, MCS_RX); + cn10k_mcs_delete_rxsc(pfvf, rxsc); + list_del(&rxsc->entry); + kfree(rxsc); + + return 0; +} + +static int cn10k_mdo_add_rxsa(struct macsec_context *ctx) +{ + struct macsec_rx_sc *sw_rx_sc = ctx->sa.rx_sa->sc; + struct otx2_nic *pfvf = netdev_priv(ctx->netdev); + struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; + struct macsec_rx_sa *rx_sa = ctx->sa.rx_sa; + u64 next_pn = rx_sa->next_pn_halves.lower; + struct macsec_secy *secy = ctx->secy; + bool sa_in_use = rx_sa->active; + u8 sa_num = ctx->sa.assoc_num; + struct cn10k_mcs_rxsc *rxsc; + int err; + + rxsc = cn10k_mcs_get_rxsc(cfg, secy, sw_rx_sc); + if (!rxsc) + return -ENOENT; + + if (sa_num >= CN10K_MCS_SA_PER_SC) + return -EOPNOTSUPP; + + if (cn10k_mcs_alloc_rxsa(pfvf, &rxsc->hw_sa_id[sa_num])) + return -ENOSPC; + + memcpy(&rxsc->sa_key[sa_num], ctx->sa.key, ctx->secy->key_len); + rxsc->sa_bmap |= 1 << sa_num; + + if (netif_running(secy->netdev)) { + err = cn10k_mcs_write_rx_sa_plcy(pfvf, secy, rxsc, + sa_num, sa_in_use); + if (err) + return err; + + err = cn10k_mcs_write_rx_sa_pn(pfvf, rxsc, sa_num, next_pn); + if (err) + return err; + } + + return 0; +} + +static int cn10k_mdo_upd_rxsa(struct macsec_context *ctx) +{ + struct macsec_rx_sc *sw_rx_sc = ctx->sa.rx_sa->sc; + struct otx2_nic *pfvf = netdev_priv(ctx->netdev); + struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; + struct macsec_rx_sa *rx_sa = ctx->sa.rx_sa; + u64 next_pn = rx_sa->next_pn_halves.lower; + struct macsec_secy *secy = ctx->secy; + bool sa_in_use = rx_sa->active; + u8 sa_num = ctx->sa.assoc_num; + struct cn10k_mcs_rxsc *rxsc; + int err; + + rxsc = cn10k_mcs_get_rxsc(cfg, secy, sw_rx_sc); + if (!rxsc) + return -ENOENT; + + if (sa_num >= CN10K_MCS_SA_PER_SC) + return -EOPNOTSUPP; + + if (netif_running(secy->netdev)) { + err = cn10k_mcs_write_rx_sa_plcy(pfvf, secy, rxsc, sa_num, sa_in_use); + if (err) + return err; + + err = cn10k_mcs_write_rx_sa_pn(pfvf, rxsc, sa_num, next_pn); + if (err) + return err; + } + + return 0; +} + +static int cn10k_mdo_del_rxsa(struct macsec_context *ctx) +{ + struct macsec_rx_sc *sw_rx_sc = ctx->sa.rx_sa->sc; + struct otx2_nic *pfvf = netdev_priv(ctx->netdev); + struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; + u8 sa_num = ctx->sa.assoc_num; + struct cn10k_mcs_rxsc *rxsc; + + rxsc = cn10k_mcs_get_rxsc(cfg, ctx->secy, sw_rx_sc); + if (!rxsc) + return -ENOENT; + + if (sa_num >= CN10K_MCS_SA_PER_SC) + return -EOPNOTSUPP; + + cn10k_mcs_write_rx_sa_plcy(pfvf, ctx->secy, rxsc, sa_num, false); + cn10k_mcs_free_rxsa(pfvf, rxsc->hw_sa_id[sa_num]); + + rxsc->sa_bmap &= ~(1 << sa_num); + + return 0; +} + +static int cn10k_mdo_get_dev_stats(struct macsec_context *ctx) +{ + struct mcs_secy_stats tx_rsp = { 0 }, rx_rsp = { 0 }; + struct otx2_nic *pfvf = netdev_priv(ctx->netdev); + struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; + struct macsec_secy *secy = ctx->secy; + struct cn10k_mcs_txsc *txsc; + + txsc = cn10k_mcs_get_txsc(cfg, ctx->secy); + if (!txsc) + return -ENOENT; + + cn10k_mcs_secy_stats(pfvf, txsc->hw_secy_id_tx, &tx_rsp, MCS_TX, false); + ctx->stats.dev_stats->OutPktsUntagged = tx_rsp.pkt_untagged_cnt; + ctx->stats.dev_stats->OutPktsTooLong = tx_rsp.pkt_toolong_cnt; + + cn10k_mcs_secy_stats(pfvf, txsc->hw_secy_id_rx, &rx_rsp, MCS_RX, true); + txsc->stats.InPktsBadTag += rx_rsp.pkt_badtag_cnt; + txsc->stats.InPktsUnknownSCI += rx_rsp.pkt_nosa_cnt; + txsc->stats.InPktsNoSCI += rx_rsp.pkt_nosaerror_cnt; + if (secy->validate_frames == MACSEC_VALIDATE_STRICT) + txsc->stats.InPktsNoTag += rx_rsp.pkt_untaged_cnt; + else + txsc->stats.InPktsUntagged += rx_rsp.pkt_untaged_cnt; + txsc->stats.InPktsOverrun = 0; + + ctx->stats.dev_stats->InPktsNoTag = txsc->stats.InPktsNoTag; + ctx->stats.dev_stats->InPktsUntagged = txsc->stats.InPktsUntagged; + ctx->stats.dev_stats->InPktsBadTag = txsc->stats.InPktsBadTag; + ctx->stats.dev_stats->InPktsUnknownSCI = txsc->stats.InPktsUnknownSCI; + ctx->stats.dev_stats->InPktsNoSCI = txsc->stats.InPktsNoSCI; + ctx->stats.dev_stats->InPktsOverrun = txsc->stats.InPktsOverrun; + + return 0; +} + +static int cn10k_mdo_get_tx_sc_stats(struct macsec_context *ctx) +{ + struct otx2_nic *pfvf = netdev_priv(ctx->netdev); + struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; + struct mcs_sc_stats rsp = { 0 }; + struct cn10k_mcs_txsc *txsc; + + txsc = cn10k_mcs_get_txsc(cfg, ctx->secy); + if (!txsc) + return -ENOENT; + + cn10k_mcs_sc_stats(pfvf, txsc->hw_sc_id, &rsp, MCS_TX, false); + + ctx->stats.tx_sc_stats->OutPktsProtected = rsp.pkt_protected_cnt; + ctx->stats.tx_sc_stats->OutPktsEncrypted = rsp.pkt_encrypt_cnt; + ctx->stats.tx_sc_stats->OutOctetsProtected = rsp.octet_protected_cnt; + ctx->stats.tx_sc_stats->OutOctetsEncrypted = rsp.octet_encrypt_cnt; + + return 0; +} + +static int cn10k_mdo_get_tx_sa_stats(struct macsec_context *ctx) +{ + struct otx2_nic *pfvf = netdev_priv(ctx->netdev); + struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; + struct mcs_sa_stats rsp = { 0 }; + u8 sa_num = ctx->sa.assoc_num; + struct cn10k_mcs_txsc *txsc; + + txsc = cn10k_mcs_get_txsc(cfg, ctx->secy); + if (!txsc) + return -ENOENT; + + if (sa_num >= CN10K_MCS_SA_PER_SC) + return -EOPNOTSUPP; + + cn10k_mcs_sa_stats(pfvf, txsc->hw_sa_id[sa_num], &rsp, MCS_TX, false); + + ctx->stats.tx_sa_stats->OutPktsProtected = rsp.pkt_protected_cnt; + ctx->stats.tx_sa_stats->OutPktsEncrypted = rsp.pkt_encrypt_cnt; + + return 0; +} + +static int cn10k_mdo_get_rx_sc_stats(struct macsec_context *ctx) +{ + struct otx2_nic *pfvf = netdev_priv(ctx->netdev); + struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; + struct macsec_secy *secy = ctx->secy; + struct mcs_sc_stats rsp = { 0 }; + struct cn10k_mcs_rxsc *rxsc; + + rxsc = cn10k_mcs_get_rxsc(cfg, secy, ctx->rx_sc); + if (!rxsc) + return -ENOENT; + + cn10k_mcs_sc_stats(pfvf, rxsc->hw_sc_id, &rsp, MCS_RX, true); + + rxsc->stats.InOctetsValidated += rsp.octet_validate_cnt; + rxsc->stats.InOctetsDecrypted += rsp.octet_decrypt_cnt; + + rxsc->stats.InPktsInvalid += rsp.pkt_invalid_cnt; + rxsc->stats.InPktsNotValid += rsp.pkt_notvalid_cnt; + + if (secy->protect_frames) + rxsc->stats.InPktsLate += rsp.pkt_late_cnt; + else + rxsc->stats.InPktsDelayed += rsp.pkt_late_cnt; + + if (secy->validate_frames == MACSEC_VALIDATE_CHECK) + rxsc->stats.InPktsUnchecked += rsp.pkt_unchecked_cnt; + else + rxsc->stats.InPktsOK += rsp.pkt_unchecked_cnt; + + ctx->stats.rx_sc_stats->InOctetsValidated = rxsc->stats.InOctetsValidated; + ctx->stats.rx_sc_stats->InOctetsDecrypted = rxsc->stats.InOctetsDecrypted; + ctx->stats.rx_sc_stats->InPktsInvalid = rxsc->stats.InPktsInvalid; + ctx->stats.rx_sc_stats->InPktsNotValid = rxsc->stats.InPktsNotValid; + ctx->stats.rx_sc_stats->InPktsLate = rxsc->stats.InPktsLate; + ctx->stats.rx_sc_stats->InPktsDelayed = rxsc->stats.InPktsDelayed; + ctx->stats.rx_sc_stats->InPktsUnchecked = rxsc->stats.InPktsUnchecked; + ctx->stats.rx_sc_stats->InPktsOK = rxsc->stats.InPktsOK; + + return 0; +} + +static int cn10k_mdo_get_rx_sa_stats(struct macsec_context *ctx) +{ + struct macsec_rx_sc *sw_rx_sc = ctx->sa.rx_sa->sc; + struct otx2_nic *pfvf = netdev_priv(ctx->netdev); + struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; + struct mcs_sa_stats rsp = { 0 }; + u8 sa_num = ctx->sa.assoc_num; + struct cn10k_mcs_rxsc *rxsc; + + rxsc = cn10k_mcs_get_rxsc(cfg, ctx->secy, sw_rx_sc); + if (!rxsc) + return -ENOENT; + + if (sa_num >= CN10K_MCS_SA_PER_SC) + return -EOPNOTSUPP; + + cn10k_mcs_sa_stats(pfvf, rxsc->hw_sa_id[sa_num], &rsp, MCS_RX, false); + + ctx->stats.rx_sa_stats->InPktsOK = rsp.pkt_ok_cnt; + ctx->stats.rx_sa_stats->InPktsInvalid = rsp.pkt_invalid_cnt; + ctx->stats.rx_sa_stats->InPktsNotValid = rsp.pkt_notvalid_cnt; + ctx->stats.rx_sa_stats->InPktsNotUsingSA = rsp.pkt_nosaerror_cnt; + ctx->stats.rx_sa_stats->InPktsUnusedSA = rsp.pkt_nosa_cnt; + + return 0; +} + +static const struct macsec_ops cn10k_mcs_ops = { + .mdo_dev_open = cn10k_mdo_open, + .mdo_dev_stop = cn10k_mdo_stop, + .mdo_add_secy = cn10k_mdo_add_secy, + .mdo_upd_secy = cn10k_mdo_upd_secy, + .mdo_del_secy = cn10k_mdo_del_secy, + .mdo_add_rxsc = cn10k_mdo_add_rxsc, + .mdo_upd_rxsc = cn10k_mdo_upd_rxsc, + .mdo_del_rxsc = cn10k_mdo_del_rxsc, + .mdo_add_rxsa = cn10k_mdo_add_rxsa, + .mdo_upd_rxsa = cn10k_mdo_upd_rxsa, + .mdo_del_rxsa = cn10k_mdo_del_rxsa, + .mdo_add_txsa = cn10k_mdo_add_txsa, + .mdo_upd_txsa = cn10k_mdo_upd_txsa, + .mdo_del_txsa = cn10k_mdo_del_txsa, + .mdo_get_dev_stats = cn10k_mdo_get_dev_stats, + .mdo_get_tx_sc_stats = cn10k_mdo_get_tx_sc_stats, + .mdo_get_tx_sa_stats = cn10k_mdo_get_tx_sa_stats, + .mdo_get_rx_sc_stats = cn10k_mdo_get_rx_sc_stats, + .mdo_get_rx_sa_stats = cn10k_mdo_get_rx_sa_stats, +}; + +void cn10k_handle_mcs_event(struct otx2_nic *pfvf, struct mcs_intr_info *event) +{ + struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; + struct macsec_tx_sa *sw_tx_sa = NULL; + struct macsec_secy *secy = NULL; + struct cn10k_mcs_txsc *txsc; + u8 an; + + if (!test_bit(CN10K_HW_MACSEC, &pfvf->hw.cap_flag)) + return; + + if (!(event->intr_mask & MCS_CPM_TX_PACKET_XPN_EQ0_INT)) + return; + + /* Find the SecY to which the expired hardware SA is mapped */ + list_for_each_entry(txsc, &cfg->txsc_list, entry) { + for (an = 0; an < CN10K_MCS_SA_PER_SC; an++) + if (txsc->hw_sa_id[an] == event->sa_id) { + secy = txsc->sw_secy; + sw_tx_sa = rcu_dereference_bh(secy->tx_sc.sa[an]); + } + } + + if (secy && sw_tx_sa) + macsec_pn_wrapped(secy, sw_tx_sa); +} + +int cn10k_mcs_init(struct otx2_nic *pfvf) +{ + struct mbox *mbox = &pfvf->mbox; + struct cn10k_mcs_cfg *cfg; + struct mcs_intr_cfg *req; + + if (!test_bit(CN10K_HW_MACSEC, &pfvf->hw.cap_flag)) + return 0; + + cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); + if (!cfg) + return -ENOMEM; + + INIT_LIST_HEAD(&cfg->txsc_list); + INIT_LIST_HEAD(&cfg->rxsc_list); + pfvf->macsec_cfg = cfg; + + pfvf->netdev->features |= NETIF_F_HW_MACSEC; + pfvf->netdev->macsec_ops = &cn10k_mcs_ops; + + mutex_lock(&mbox->lock); + + req = otx2_mbox_alloc_msg_mcs_intr_cfg(mbox); + if (!req) + goto fail; + + req->intr_mask = MCS_CPM_TX_PACKET_XPN_EQ0_INT; + + if (otx2_sync_mbox_msg(mbox)) + goto fail; + + mutex_unlock(&mbox->lock); + + return 0; +fail: + dev_err(pfvf->dev, "Cannot notify PN wrapped event\n"); + return 0; +} + +void cn10k_mcs_free(struct otx2_nic *pfvf) +{ + if (!test_bit(CN10K_HW_MACSEC, &pfvf->hw.cap_flag)) + return; + + cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SECY, 0, true); + cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SECY, 0, true); + kfree(pfvf->macsec_cfg); + pfvf->macsec_cfg = NULL; +} diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c index bc3e6aae6efa..9ac9e6615ae7 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c @@ -1827,4 +1827,5 @@ otx2_mbox_up_handler_ ## _fn_name(struct otx2_nic *pfvf, \ } \ EXPORT_SYMBOL(otx2_mbox_up_handler_ ## _fn_name); MBOX_UP_CGX_MESSAGES +MBOX_UP_MCS_MESSAGES #undef M diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h index 4c7691a1a1ed..282db6fe3b08 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h @@ -19,6 +19,7 @@ #include <net/devlink.h> #include <linux/time64.h> #include <linux/dim.h> +#include <uapi/linux/if_macsec.h> #include <mbox.h> #include <npc.h> @@ -33,6 +34,7 @@ #define PCI_DEVID_OCTEONTX2_RVU_AFVF 0xA0F8 #define PCI_SUBSYS_DEVID_96XX_RVU_PFVF 0xB200 +#define PCI_SUBSYS_DEVID_CN10K_B_RVU_PFVF 0xBD00 /* PCI BAR nos */ #define PCI_CFG_REG_BAR_NUM 2 @@ -244,6 +246,7 @@ struct otx2_hw { #define CN10K_LMTST 2 #define CN10K_RPM 3 #define CN10K_PTP_ONESTEP 4 +#define CN10K_HW_MACSEC 5 unsigned long cap_flag; #define LMT_LINE_SIZE 128 @@ -351,6 +354,66 @@ struct dev_hw_ops { void (*aura_freeptr)(void *dev, int aura, u64 buf); }; +#define CN10K_MCS_SA_PER_SC 4 + +/* Stats which need to be accumulated in software because + * of shared counters in hardware. + */ +struct cn10k_txsc_stats { + u64 InPktsUntagged; + u64 InPktsNoTag; + u64 InPktsBadTag; + u64 InPktsUnknownSCI; + u64 InPktsNoSCI; + u64 InPktsOverrun; +}; + +struct cn10k_rxsc_stats { + u64 InOctetsValidated; + u64 InOctetsDecrypted; + u64 InPktsUnchecked; + u64 InPktsDelayed; + u64 InPktsOK; + u64 InPktsInvalid; + u64 InPktsLate; + u64 InPktsNotValid; + u64 InPktsNotUsingSA; + u64 InPktsUnusedSA; +}; + +struct cn10k_mcs_txsc { + struct macsec_secy *sw_secy; + struct cn10k_txsc_stats stats; + struct list_head entry; + enum macsec_validation_type last_validate_frames; + bool last_protect_frames; + u16 hw_secy_id_tx; + u16 hw_secy_id_rx; + u16 hw_flow_id; + u16 hw_sc_id; + u16 hw_sa_id[CN10K_MCS_SA_PER_SC]; + u8 sa_bmap; + u8 sa_key[CN10K_MCS_SA_PER_SC][MACSEC_MAX_KEY_LEN]; + u8 encoding_sa; +}; + +struct cn10k_mcs_rxsc { + struct macsec_secy *sw_secy; + struct macsec_rx_sc *sw_rxsc; + struct cn10k_rxsc_stats stats; + struct list_head entry; + u16 hw_flow_id; + u16 hw_sc_id; + u16 hw_sa_id[CN10K_MCS_SA_PER_SC]; + u8 sa_bmap; + u8 sa_key[CN10K_MCS_SA_PER_SC][MACSEC_MAX_KEY_LEN]; +}; + +struct cn10k_mcs_cfg { + struct list_head txsc_list; + struct list_head rxsc_list; +}; + struct otx2_nic { void __iomem *reg_base; struct net_device *netdev; @@ -438,6 +501,10 @@ struct otx2_nic { /* napi event count. It is needed for adaptive irq coalescing. */ u32 napi_events; + +#if IS_ENABLED(CONFIG_MACSEC) + struct cn10k_mcs_cfg *macsec_cfg; +#endif }; static inline bool is_otx2_lbkvf(struct pci_dev *pdev) @@ -477,6 +544,11 @@ static inline bool is_dev_otx2(struct pci_dev *pdev) midr == PCI_REVISION_ID_95XXMM || midr == PCI_REVISION_ID_95XXO); } +static inline bool is_dev_cn10kb(struct pci_dev *pdev) +{ + return pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_B_RVU_PFVF; +} + static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf) { struct otx2_hw *hw = &pfvf->hw; @@ -508,6 +580,9 @@ static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf) __set_bit(CN10K_RPM, &hw->cap_flag); __set_bit(CN10K_PTP_ONESTEP, &hw->cap_flag); } + + if (is_dev_cn10kb(pfvf->pdev)) + __set_bit(CN10K_HW_MACSEC, &hw->cap_flag); } /* Register read/write APIs */ @@ -763,6 +838,7 @@ otx2_mbox_up_handler_ ## _fn_name(struct otx2_nic *pfvf, \ struct _rsp_type *rsp); \ MBOX_UP_CGX_MESSAGES +MBOX_UP_MCS_MESSAGES #undef M /* Time to wait before watchdog kicks off */ @@ -945,4 +1021,18 @@ int otx2_pfc_txschq_alloc(struct otx2_nic *pfvf); int otx2_pfc_txschq_update(struct otx2_nic *pfvf); int otx2_pfc_txschq_stop(struct otx2_nic *pfvf); #endif + +#if IS_ENABLED(CONFIG_MACSEC) +/* MACSEC offload support */ +int cn10k_mcs_init(struct otx2_nic *pfvf); +void cn10k_mcs_free(struct otx2_nic *pfvf); +void cn10k_handle_mcs_event(struct otx2_nic *pfvf, struct mcs_intr_info *event); +#else +static inline int cn10k_mcs_init(struct otx2_nic *pfvf) { return 0; } +static inline void cn10k_mcs_free(struct otx2_nic *pfvf) {} +static inline void cn10k_handle_mcs_event(struct otx2_nic *pfvf, + struct mcs_intr_info *event) +{} +#endif /* CONFIG_MACSEC */ + #endif /* OTX2_COMMON_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c index fa9348d6a4f4..5803d7f9137c 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c @@ -858,6 +858,15 @@ static void otx2_handle_link_event(struct otx2_nic *pf) } } +int otx2_mbox_up_handler_mcs_intr_notify(struct otx2_nic *pf, + struct mcs_intr_info *event, + struct msg_rsp *rsp) +{ + cn10k_handle_mcs_event(pf, event); + + return 0; +} + int otx2_mbox_up_handler_cgx_link_event(struct otx2_nic *pf, struct cgx_link_info_msg *msg, struct msg_rsp *rsp) @@ -917,6 +926,7 @@ static int otx2_process_mbox_msg_up(struct otx2_nic *pf, return err; \ } MBOX_UP_CGX_MESSAGES +MBOX_UP_MCS_MESSAGES #undef M break; default: @@ -2764,6 +2774,10 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (err) goto err_ptp_destroy; + err = cn10k_mcs_init(pf); + if (err) + goto err_del_mcam_entries; + if (pf->flags & OTX2_FLAG_NTUPLE_SUPPORT) netdev->hw_features |= NETIF_F_NTUPLE; @@ -2978,6 +2992,8 @@ static void otx2_remove(struct pci_dev *pdev) otx2_config_pause_frm(pf); } + cn10k_mcs_free(pf); + #ifdef CONFIG_DCB /* Disable PFC config */ if (pf->pfc_en) { diff --git a/drivers/net/ethernet/marvell/prestera/prestera.h b/drivers/net/ethernet/marvell/prestera/prestera.h index e5a4381a88b3..35554ee805cd 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera.h +++ b/drivers/net/ethernet/marvell/prestera/prestera.h @@ -306,17 +306,27 @@ struct prestera_switch { struct prestera_counter *counter; u8 lag_member_max; u8 lag_max; + u32 size_tbl_router_nexthop; }; struct prestera_router { struct prestera_switch *sw; struct list_head vr_list; struct list_head rif_entry_list; + struct rhashtable nh_neigh_ht; + struct rhashtable nexthop_group_ht; struct rhashtable fib_ht; + struct rhashtable kern_neigh_cache_ht; struct rhashtable kern_fib_cache_ht; struct notifier_block inetaddr_nb; struct notifier_block inetaddr_valid_nb; struct notifier_block fib_nb; + struct notifier_block netevent_nb; + u8 *nhgrp_hw_state_cache; /* Bitmap cached hw state of nhs */ + unsigned long nhgrp_hw_cache_kick; /* jiffies */ + struct { + struct delayed_work dw; + } neighs_update; }; struct prestera_rxtx_params { @@ -362,6 +372,8 @@ int prestera_port_cfg_mac_write(struct prestera_port *port, struct prestera_port *prestera_port_dev_lower_find(struct net_device *dev); void prestera_queue_work(struct work_struct *work); +void prestera_queue_delayed_work(struct delayed_work *work, unsigned long delay); +void prestera_queue_drain(void); int prestera_port_learning_set(struct prestera_port *port, bool learn_enable); int prestera_port_uc_flood_set(struct prestera_port *port, bool flood); diff --git a/drivers/net/ethernet/marvell/prestera/prestera_acl.c b/drivers/net/ethernet/marvell/prestera/prestera_acl.c index 0fa7541f0d7e..cba89fda504b 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_acl.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_acl.c @@ -185,10 +185,14 @@ err_rhashtable_init: return ERR_PTR(err); } -void prestera_acl_ruleset_keymask_set(struct prestera_acl_ruleset *ruleset, - void *keymask) +int prestera_acl_ruleset_keymask_set(struct prestera_acl_ruleset *ruleset, + void *keymask) { ruleset->keymask = kmemdup(keymask, ACL_KEYMASK_SIZE, GFP_KERNEL); + if (!ruleset->keymask) + return -ENOMEM; + + return 0; } int prestera_acl_ruleset_offload(struct prestera_acl_ruleset *ruleset) diff --git a/drivers/net/ethernet/marvell/prestera/prestera_acl.h b/drivers/net/ethernet/marvell/prestera/prestera_acl.h index d45ee88e8287..a35cc0609a1d 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_acl.h +++ b/drivers/net/ethernet/marvell/prestera/prestera_acl.h @@ -185,8 +185,8 @@ struct prestera_acl_ruleset * prestera_acl_ruleset_lookup(struct prestera_acl *acl, struct prestera_flow_block *block, u32 chain_index); -void prestera_acl_ruleset_keymask_set(struct prestera_acl_ruleset *ruleset, - void *keymask); +int prestera_acl_ruleset_keymask_set(struct prestera_acl_ruleset *ruleset, + void *keymask); bool prestera_acl_ruleset_is_offload(struct prestera_acl_ruleset *ruleset); int prestera_acl_ruleset_offload(struct prestera_acl_ruleset *ruleset); void prestera_acl_ruleset_put(struct prestera_acl_ruleset *ruleset); diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flower.c b/drivers/net/ethernet/marvell/prestera/prestera_flower.c index f38e8b3a543e..91a478b75cbf 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_flower.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_flower.c @@ -500,7 +500,9 @@ int prestera_flower_tmplt_create(struct prestera_flow_block *block, } /* preserve keymask/template to this ruleset */ - prestera_acl_ruleset_keymask_set(ruleset, rule.re_key.match.mask); + err = prestera_acl_ruleset_keymask_set(ruleset, rule.re_key.match.mask); + if (err) + goto err_ruleset_keymask_set; /* skip error, as it is not possible to reject template operation, * so, keep the reference to the ruleset for rules to be added @@ -516,6 +518,8 @@ int prestera_flower_tmplt_create(struct prestera_flow_block *block, list_add_rcu(&template->list, &block->template_list); return 0; +err_ruleset_keymask_set: + prestera_acl_ruleset_put(ruleset); err_ruleset_get: kfree(template); err_malloc: diff --git a/drivers/net/ethernet/marvell/prestera/prestera_hw.c b/drivers/net/ethernet/marvell/prestera/prestera_hw.c index 5803a28050e1..fc6f7d2746e8 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_hw.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_hw.c @@ -10,11 +10,14 @@ #include "prestera_hw.h" #include "prestera_acl.h" #include "prestera_counter.h" +#include "prestera_router_hw.h" #define PRESTERA_SWITCH_INIT_TIMEOUT_MS (30 * 1000) #define PRESTERA_MIN_MTU 64 +#define PRESTERA_MSG_CHUNK_SIZE 1024 + enum prestera_cmd_type_t { PRESTERA_CMD_TYPE_SWITCH_INIT = 0x1, PRESTERA_CMD_TYPE_SWITCH_ATTR_SET = 0x2, @@ -57,6 +60,10 @@ enum prestera_cmd_type_t { PRESTERA_CMD_TYPE_ROUTER_RIF_DELETE = 0x601, PRESTERA_CMD_TYPE_ROUTER_LPM_ADD = 0x610, PRESTERA_CMD_TYPE_ROUTER_LPM_DELETE = 0x611, + PRESTERA_CMD_TYPE_ROUTER_NH_GRP_SET = 0x622, + PRESTERA_CMD_TYPE_ROUTER_NH_GRP_BLK_GET = 0x645, + PRESTERA_CMD_TYPE_ROUTER_NH_GRP_ADD = 0x623, + PRESTERA_CMD_TYPE_ROUTER_NH_GRP_DELETE = 0x624, PRESTERA_CMD_TYPE_ROUTER_VR_CREATE = 0x630, PRESTERA_CMD_TYPE_ROUTER_VR_DELETE = 0x631, @@ -542,6 +549,14 @@ struct prestera_msg_ip_addr { u8 __pad[3]; }; +struct prestera_msg_nh { + struct prestera_msg_iface oif; + __le32 hw_id; + u8 mac[ETH_ALEN]; + u8 is_active; + u8 pad; +}; + struct prestera_msg_rif_req { struct prestera_msg_cmd cmd; struct prestera_msg_iface iif; @@ -567,6 +582,34 @@ struct prestera_msg_lpm_req { u8 __pad[2]; }; +struct prestera_msg_nh_req { + struct prestera_msg_cmd cmd; + struct prestera_msg_nh nh[PRESTERA_NHGR_SIZE_MAX]; + __le32 size; + __le32 grp_id; +}; + +struct prestera_msg_nh_chunk_req { + struct prestera_msg_cmd cmd; + __le32 offset; +}; + +struct prestera_msg_nh_chunk_resp { + struct prestera_msg_ret ret; + u8 hw_state[PRESTERA_MSG_CHUNK_SIZE]; +}; + +struct prestera_msg_nh_grp_req { + struct prestera_msg_cmd cmd; + __le32 grp_id; + __le32 size; +}; + +struct prestera_msg_nh_grp_resp { + struct prestera_msg_ret ret; + __le32 grp_id; +}; + struct prestera_msg_vr_req { struct prestera_msg_cmd cmd; __le16 vr_id; @@ -729,11 +772,15 @@ static void prestera_hw_build_tests(void) BUILD_BUG_ON(sizeof(struct prestera_msg_flood_domain_ports_reset_req) != 8); BUILD_BUG_ON(sizeof(struct prestera_msg_mdb_create_req) != 16); BUILD_BUG_ON(sizeof(struct prestera_msg_mdb_destroy_req) != 16); + BUILD_BUG_ON(sizeof(struct prestera_msg_nh_req) != 124); + BUILD_BUG_ON(sizeof(struct prestera_msg_nh_chunk_req) != 8); + BUILD_BUG_ON(sizeof(struct prestera_msg_nh_grp_req) != 12); /* structure that are part of req/resp fw messages */ BUILD_BUG_ON(sizeof(struct prestera_msg_iface) != 16); BUILD_BUG_ON(sizeof(struct prestera_msg_ip_addr) != 20); BUILD_BUG_ON(sizeof(struct prestera_msg_flood_domain_port) != 12); + BUILD_BUG_ON(sizeof(struct prestera_msg_nh) != 28); /* check responses */ BUILD_BUG_ON(sizeof(struct prestera_msg_common_resp) != 8); @@ -750,6 +797,8 @@ static void prestera_hw_build_tests(void) BUILD_BUG_ON(sizeof(struct prestera_msg_vr_resp) != 12); BUILD_BUG_ON(sizeof(struct prestera_msg_policer_resp) != 12); BUILD_BUG_ON(sizeof(struct prestera_msg_flood_domain_create_resp) != 12); + BUILD_BUG_ON(sizeof(struct prestera_msg_nh_chunk_resp) != 1032); + BUILD_BUG_ON(sizeof(struct prestera_msg_nh_grp_resp) != 12); /* check events */ BUILD_BUG_ON(sizeof(struct prestera_msg_event_port) != 20); @@ -1027,6 +1076,8 @@ int prestera_hw_switch_init(struct prestera_switch *sw) sw->id = resp.switch_id; sw->lag_member_max = resp.lag_member_max; sw->lag_max = resp.lag_max; + sw->size_tbl_router_nexthop = + __le32_to_cpu(resp.size_tbl_router_nexthop); return 0; } @@ -2037,6 +2088,85 @@ int prestera_hw_lpm_del(struct prestera_switch *sw, u16 vr_id, sizeof(req)); } +int prestera_hw_nh_entries_set(struct prestera_switch *sw, int count, + struct prestera_neigh_info *nhs, u32 grp_id) +{ + struct prestera_msg_nh_req req = { .size = __cpu_to_le32((u32)count), + .grp_id = __cpu_to_le32(grp_id) }; + int i, err; + + for (i = 0; i < count; i++) { + req.nh[i].is_active = nhs[i].connected; + memcpy(&req.nh[i].mac, nhs[i].ha, ETH_ALEN); + err = prestera_iface_to_msg(&nhs[i].iface, &req.nh[i].oif); + if (err) + return err; + } + + return prestera_cmd(sw, PRESTERA_CMD_TYPE_ROUTER_NH_GRP_SET, &req.cmd, + sizeof(req)); +} + +int prestera_hw_nhgrp_blk_get(struct prestera_switch *sw, + u8 *hw_state, u32 buf_size /* Buffer in bytes */) +{ + static struct prestera_msg_nh_chunk_resp resp; + struct prestera_msg_nh_chunk_req req; + u32 buf_offset; + int err; + + memset(&hw_state[0], 0, buf_size); + buf_offset = 0; + while (1) { + if (buf_offset >= buf_size) + break; + + memset(&req, 0, sizeof(req)); + req.offset = __cpu_to_le32(buf_offset * 8); /* 8 bits in u8 */ + err = prestera_cmd_ret(sw, + PRESTERA_CMD_TYPE_ROUTER_NH_GRP_BLK_GET, + &req.cmd, sizeof(req), &resp.ret, + sizeof(resp)); + if (err) + return err; + + memcpy(&hw_state[buf_offset], &resp.hw_state[0], + buf_offset + PRESTERA_MSG_CHUNK_SIZE > buf_size ? + buf_size - buf_offset : PRESTERA_MSG_CHUNK_SIZE); + buf_offset += PRESTERA_MSG_CHUNK_SIZE; + } + + return 0; +} + +int prestera_hw_nh_group_create(struct prestera_switch *sw, u16 nh_count, + u32 *grp_id) +{ + struct prestera_msg_nh_grp_req req = { .size = __cpu_to_le32((u32)nh_count) }; + struct prestera_msg_nh_grp_resp resp; + int err; + + err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_ROUTER_NH_GRP_ADD, + &req.cmd, sizeof(req), &resp.ret, sizeof(resp)); + if (err) + return err; + + *grp_id = __le32_to_cpu(resp.grp_id); + return err; +} + +int prestera_hw_nh_group_delete(struct prestera_switch *sw, u16 nh_count, + u32 grp_id) +{ + struct prestera_msg_nh_grp_req req = { + .grp_id = __cpu_to_le32(grp_id), + .size = __cpu_to_le32(nh_count) + }; + + return prestera_cmd(sw, PRESTERA_CMD_TYPE_ROUTER_NH_GRP_DELETE, + &req.cmd, sizeof(req)); +} + int prestera_hw_rxtx_init(struct prestera_switch *sw, struct prestera_rxtx_params *params) { diff --git a/drivers/net/ethernet/marvell/prestera/prestera_hw.h b/drivers/net/ethernet/marvell/prestera/prestera_hw.h index 21078a2256b2..0a929279e1ce 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_hw.h +++ b/drivers/net/ethernet/marvell/prestera/prestera_hw.h @@ -146,6 +146,7 @@ struct prestera_counter_stats; struct prestera_iface; struct prestera_flood_domain; struct prestera_mdb_entry; +struct prestera_neigh_info; /* Switch API */ int prestera_hw_switch_init(struct prestera_switch *sw); @@ -266,6 +267,16 @@ int prestera_hw_lpm_add(struct prestera_switch *sw, u16 vr_id, int prestera_hw_lpm_del(struct prestera_switch *sw, u16 vr_id, __be32 dst, u32 dst_len); +/* NH API */ +int prestera_hw_nh_entries_set(struct prestera_switch *sw, int count, + struct prestera_neigh_info *nhs, u32 grp_id); +int prestera_hw_nhgrp_blk_get(struct prestera_switch *sw, + u8 *hw_state, u32 buf_size /* Buffer in bytes */); +int prestera_hw_nh_group_create(struct prestera_switch *sw, u16 nh_count, + u32 *grp_id); +int prestera_hw_nh_group_delete(struct prestera_switch *sw, u16 nh_count, + u32 grp_id); + /* Event handlers */ int prestera_hw_event_handler_register(struct prestera_switch *sw, enum prestera_event_type type, diff --git a/drivers/net/ethernet/marvell/prestera/prestera_main.c b/drivers/net/ethernet/marvell/prestera/prestera_main.c index 639d3e940a88..24f9d6024745 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_main.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c @@ -36,6 +36,17 @@ void prestera_queue_work(struct work_struct *work) queue_work(prestera_owq, work); } +void prestera_queue_delayed_work(struct delayed_work *work, unsigned long delay) +{ + queue_delayed_work(prestera_wq, work, delay); +} + +void prestera_queue_drain(void) +{ + drain_workqueue(prestera_wq); + drain_workqueue(prestera_owq); +} + int prestera_port_learning_set(struct prestera_port *port, bool learn) { return prestera_hw_port_learning_set(port, learn); diff --git a/drivers/net/ethernet/marvell/prestera/prestera_router.c b/drivers/net/ethernet/marvell/prestera/prestera_router.c index 58f4e44d5ad7..4046be0e86ff 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_router.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_router.c @@ -7,10 +7,35 @@ #include <net/inet_dscp.h> #include <net/switchdev.h> #include <linux/rhashtable.h> +#include <net/nexthop.h> +#include <net/arp.h> +#include <linux/if_vlan.h> +#include <linux/if_macvlan.h> +#include <net/netevent.h> #include "prestera.h" #include "prestera_router_hw.h" +#define PRESTERA_IMPLICITY_RESOLVE_DEAD_NEIGH +#define PRESTERA_NH_PROBE_INTERVAL 5000 /* ms */ + +struct prestera_kern_neigh_cache_key { + struct prestera_ip_addr addr; + struct net_device *dev; +}; + +struct prestera_kern_neigh_cache { + struct prestera_kern_neigh_cache_key key; + struct rhash_head ht_node; + struct list_head kern_fib_cache_list; + /* Hold prepared nh_neigh info if is in_kernel */ + struct prestera_neigh_info nh_neigh_info; + /* Indicate if neighbour is reachable by direct route */ + bool reachable; + /* Lock cache if neigh is present in kernel */ + bool in_kernel; +}; + struct prestera_kern_fib_cache_key { struct prestera_ip_addr addr; u32 prefix_len; @@ -23,15 +48,29 @@ struct prestera_kern_fib_cache { struct { struct prestera_fib_key fib_key; enum prestera_fib_type fib_type; + struct prestera_nexthop_group_key nh_grp_key; } lpm_info; /* hold prepared lpm info */ /* Indicate if route is not overlapped by another table */ struct rhash_head ht_node; /* node of prestera_router */ - struct fib_info *fi; - dscp_t kern_dscp; - u8 kern_type; + struct prestera_kern_neigh_cache_head { + struct prestera_kern_fib_cache *this; + struct list_head head; + struct prestera_kern_neigh_cache *n_cache; + } kern_neigh_cache_head[PRESTERA_NHGR_SIZE_MAX]; + union { + struct fib_notifier_info info; /* point to any of 4/6 */ + struct fib_entry_notifier_info fen4_info; + }; bool reachable; }; +static const struct rhashtable_params __prestera_kern_neigh_cache_ht_params = { + .key_offset = offsetof(struct prestera_kern_neigh_cache, key), + .head_offset = offsetof(struct prestera_kern_neigh_cache, ht_node), + .key_len = sizeof(struct prestera_kern_neigh_cache_key), + .automatic_shrinking = true, +}; + static const struct rhashtable_params __prestera_kern_fib_cache_ht_params = { .key_offset = offsetof(struct prestera_kern_fib_cache, key), .head_offset = offsetof(struct prestera_kern_fib_cache, ht_node), @@ -51,15 +90,450 @@ static u32 prestera_fix_tb_id(u32 tb_id) } static void -prestera_util_fen_info2fib_cache_key(struct fib_entry_notifier_info *fen_info, +prestera_util_fen_info2fib_cache_key(struct fib_notifier_info *info, struct prestera_kern_fib_cache_key *key) { + struct fib_entry_notifier_info *fen_info = + container_of(info, struct fib_entry_notifier_info, info); + memset(key, 0, sizeof(*key)); + key->addr.v = PRESTERA_IPV4; key->addr.u.ipv4 = cpu_to_be32(fen_info->dst); key->prefix_len = fen_info->dst_len; key->kern_tb_id = fen_info->tb_id; } +static int prestera_util_nhc2nc_key(struct prestera_switch *sw, + struct fib_nh_common *nhc, + struct prestera_kern_neigh_cache_key *nk) +{ + memset(nk, 0, sizeof(*nk)); + if (nhc->nhc_gw_family == AF_INET) { + nk->addr.v = PRESTERA_IPV4; + nk->addr.u.ipv4 = nhc->nhc_gw.ipv4; + } else { + nk->addr.v = PRESTERA_IPV6; + nk->addr.u.ipv6 = nhc->nhc_gw.ipv6; + } + + nk->dev = nhc->nhc_dev; + return 0; +} + +static void +prestera_util_nc_key2nh_key(struct prestera_kern_neigh_cache_key *ck, + struct prestera_nh_neigh_key *nk) +{ + memset(nk, 0, sizeof(*nk)); + nk->addr = ck->addr; + nk->rif = (void *)ck->dev; +} + +static bool +prestera_util_nhc_eq_n_cache_key(struct prestera_switch *sw, + struct fib_nh_common *nhc, + struct prestera_kern_neigh_cache_key *nk) +{ + struct prestera_kern_neigh_cache_key tk; + int err; + + err = prestera_util_nhc2nc_key(sw, nhc, &tk); + if (err) + return false; + + if (memcmp(&tk, nk, sizeof(tk))) + return false; + + return true; +} + +static int +prestera_util_neigh2nc_key(struct prestera_switch *sw, struct neighbour *n, + struct prestera_kern_neigh_cache_key *key) +{ + memset(key, 0, sizeof(*key)); + if (n->tbl->family == AF_INET) { + key->addr.v = PRESTERA_IPV4; + key->addr.u.ipv4 = *(__be32 *)n->primary_key; + } else { + return -ENOENT; + } + + key->dev = n->dev; + + return 0; +} + +static bool __prestera_fi_is_direct(struct fib_info *fi) +{ + struct fib_nh *fib_nh; + + if (fib_info_num_path(fi) == 1) { + fib_nh = fib_info_nh(fi, 0); + if (fib_nh->fib_nh_gw_family == AF_UNSPEC) + return true; + } + + return false; +} + +static bool prestera_fi_is_direct(struct fib_info *fi) +{ + if (fi->fib_type != RTN_UNICAST) + return false; + + return __prestera_fi_is_direct(fi); +} + +static bool prestera_fi_is_nh(struct fib_info *fi) +{ + if (fi->fib_type != RTN_UNICAST) + return false; + + return !__prestera_fi_is_direct(fi); +} + +static bool __prestera_fi6_is_direct(struct fib6_info *fi) +{ + if (!fi->fib6_nh->nh_common.nhc_gw_family) + return true; + + return false; +} + +static bool prestera_fi6_is_direct(struct fib6_info *fi) +{ + if (fi->fib6_type != RTN_UNICAST) + return false; + + return __prestera_fi6_is_direct(fi); +} + +static bool prestera_fi6_is_nh(struct fib6_info *fi) +{ + if (fi->fib6_type != RTN_UNICAST) + return false; + + return !__prestera_fi6_is_direct(fi); +} + +static bool prestera_fib_info_is_direct(struct fib_notifier_info *info) +{ + struct fib6_entry_notifier_info *fen6_info = + container_of(info, struct fib6_entry_notifier_info, info); + struct fib_entry_notifier_info *fen_info = + container_of(info, struct fib_entry_notifier_info, info); + + if (info->family == AF_INET) + return prestera_fi_is_direct(fen_info->fi); + else + return prestera_fi6_is_direct(fen6_info->rt); +} + +static bool prestera_fib_info_is_nh(struct fib_notifier_info *info) +{ + struct fib6_entry_notifier_info *fen6_info = + container_of(info, struct fib6_entry_notifier_info, info); + struct fib_entry_notifier_info *fen_info = + container_of(info, struct fib_entry_notifier_info, info); + + if (info->family == AF_INET) + return prestera_fi_is_nh(fen_info->fi); + else + return prestera_fi6_is_nh(fen6_info->rt); +} + +/* must be called with rcu_read_lock() */ +static int prestera_util_kern_get_route(struct fib_result *res, u32 tb_id, + __be32 *addr) +{ + struct flowi4 fl4; + + /* TODO: walkthrough appropriate tables in kernel + * to know if the same prefix exists in several tables + */ + memset(&fl4, 0, sizeof(fl4)); + fl4.daddr = *addr; + return fib_lookup(&init_net, &fl4, res, 0 /* FIB_LOOKUP_NOREF */); +} + +static bool +__prestera_util_kern_n_is_reachable_v4(u32 tb_id, __be32 *addr, + struct net_device *dev) +{ + struct fib_nh *fib_nh; + struct fib_result res; + bool reachable; + + reachable = false; + + if (!prestera_util_kern_get_route(&res, tb_id, addr)) + if (prestera_fi_is_direct(res.fi)) { + fib_nh = fib_info_nh(res.fi, 0); + if (dev == fib_nh->fib_nh_dev) + reachable = true; + } + + return reachable; +} + +/* Check if neigh route is reachable */ +static bool +prestera_util_kern_n_is_reachable(u32 tb_id, + struct prestera_ip_addr *addr, + struct net_device *dev) +{ + if (addr->v == PRESTERA_IPV4) + return __prestera_util_kern_n_is_reachable_v4(tb_id, + &addr->u.ipv4, + dev); + else + return false; +} + +static void prestera_util_kern_set_neigh_offload(struct neighbour *n, + bool offloaded) +{ + if (offloaded) + n->flags |= NTF_OFFLOADED; + else + n->flags &= ~NTF_OFFLOADED; +} + +static void +prestera_util_kern_set_nh_offload(struct fib_nh_common *nhc, bool offloaded, bool trap) +{ + if (offloaded) + nhc->nhc_flags |= RTNH_F_OFFLOAD; + else + nhc->nhc_flags &= ~RTNH_F_OFFLOAD; + + if (trap) + nhc->nhc_flags |= RTNH_F_TRAP; + else + nhc->nhc_flags &= ~RTNH_F_TRAP; +} + +static struct fib_nh_common * +prestera_kern_fib_info_nhc(struct fib_notifier_info *info, int n) +{ + struct fib6_entry_notifier_info *fen6_info; + struct fib_entry_notifier_info *fen4_info; + struct fib6_info *iter; + + if (info->family == AF_INET) { + fen4_info = container_of(info, struct fib_entry_notifier_info, + info); + return &fib_info_nh(fen4_info->fi, n)->nh_common; + } else if (info->family == AF_INET6) { + fen6_info = container_of(info, struct fib6_entry_notifier_info, + info); + if (!n) + return &fen6_info->rt->fib6_nh->nh_common; + + list_for_each_entry(iter, &fen6_info->rt->fib6_siblings, + fib6_siblings) { + if (!--n) + return &iter->fib6_nh->nh_common; + } + } + + /* if family is incorrect - than upper functions has BUG */ + /* if doesn't find requested index - there is alsi bug, because + * valid index must be produced by nhs, which checks list length + */ + WARN(1, "Invalid parameters passed to %s n=%d i=%p", + __func__, n, info); + return NULL; +} + +static int prestera_kern_fib_info_nhs(struct fib_notifier_info *info) +{ + struct fib6_entry_notifier_info *fen6_info; + struct fib_entry_notifier_info *fen4_info; + + if (info->family == AF_INET) { + fen4_info = container_of(info, struct fib_entry_notifier_info, + info); + return fib_info_num_path(fen4_info->fi); + } else if (info->family == AF_INET6) { + fen6_info = container_of(info, struct fib6_entry_notifier_info, + info); + return fen6_info->rt->fib6_nsiblings + 1; + } + + return 0; +} + +static unsigned char +prestera_kern_fib_info_type(struct fib_notifier_info *info) +{ + struct fib6_entry_notifier_info *fen6_info; + struct fib_entry_notifier_info *fen4_info; + + if (info->family == AF_INET) { + fen4_info = container_of(info, struct fib_entry_notifier_info, + info); + return fen4_info->fi->fib_type; + } else if (info->family == AF_INET6) { + fen6_info = container_of(info, struct fib6_entry_notifier_info, + info); + /* TODO: ECMP in ipv6 is several routes. + * Every route has single nh. + */ + return fen6_info->rt->fib6_type; + } + + return RTN_UNSPEC; +} + +/* Decided, that uc_nh route with key==nh is obviously neighbour route */ +static bool +prestera_fib_node_util_is_neighbour(struct prestera_fib_node *fib_node) +{ + if (fib_node->info.type != PRESTERA_FIB_TYPE_UC_NH) + return false; + + if (fib_node->info.nh_grp->nh_neigh_head[1].neigh) + return false; + + if (!fib_node->info.nh_grp->nh_neigh_head[0].neigh) + return false; + + if (memcmp(&fib_node->info.nh_grp->nh_neigh_head[0].neigh->key.addr, + &fib_node->key.addr, sizeof(struct prestera_ip_addr))) + return false; + + return true; +} + +static int prestera_dev_if_type(const struct net_device *dev) +{ + struct macvlan_dev *vlan; + + if (is_vlan_dev(dev) && + netif_is_bridge_master(vlan_dev_real_dev(dev))) { + return PRESTERA_IF_VID_E; + } else if (netif_is_bridge_master(dev)) { + return PRESTERA_IF_VID_E; + } else if (netif_is_lag_master(dev)) { + return PRESTERA_IF_LAG_E; + } else if (netif_is_macvlan(dev)) { + vlan = netdev_priv(dev); + return prestera_dev_if_type(vlan->lowerdev); + } else { + return PRESTERA_IF_PORT_E; + } +} + +static int +prestera_neigh_iface_init(struct prestera_switch *sw, + struct prestera_iface *iface, + struct neighbour *n) +{ + struct prestera_port *port; + + iface->vlan_id = 0; /* TODO: vlan egress */ + iface->type = prestera_dev_if_type(n->dev); + if (iface->type != PRESTERA_IF_PORT_E) + return -EINVAL; + + if (!prestera_netdev_check(n->dev)) + return -EINVAL; + + port = netdev_priv(n->dev); + iface->dev_port.hw_dev_num = port->dev_id; + iface->dev_port.port_num = port->hw_id; + + return 0; +} + +static struct prestera_kern_neigh_cache * +prestera_kern_neigh_cache_find(struct prestera_switch *sw, + struct prestera_kern_neigh_cache_key *key) +{ + struct prestera_kern_neigh_cache *n_cache; + + n_cache = + rhashtable_lookup_fast(&sw->router->kern_neigh_cache_ht, key, + __prestera_kern_neigh_cache_ht_params); + return IS_ERR(n_cache) ? NULL : n_cache; +} + +static void +__prestera_kern_neigh_cache_destruct(struct prestera_switch *sw, + struct prestera_kern_neigh_cache *n_cache) +{ + dev_put(n_cache->key.dev); +} + +static void +__prestera_kern_neigh_cache_destroy(struct prestera_switch *sw, + struct prestera_kern_neigh_cache *n_cache) +{ + rhashtable_remove_fast(&sw->router->kern_neigh_cache_ht, + &n_cache->ht_node, + __prestera_kern_neigh_cache_ht_params); + __prestera_kern_neigh_cache_destruct(sw, n_cache); + kfree(n_cache); +} + +static struct prestera_kern_neigh_cache * +__prestera_kern_neigh_cache_create(struct prestera_switch *sw, + struct prestera_kern_neigh_cache_key *key) +{ + struct prestera_kern_neigh_cache *n_cache; + int err; + + n_cache = kzalloc(sizeof(*n_cache), GFP_KERNEL); + if (!n_cache) + goto err_kzalloc; + + memcpy(&n_cache->key, key, sizeof(*key)); + dev_hold(n_cache->key.dev); + + INIT_LIST_HEAD(&n_cache->kern_fib_cache_list); + err = rhashtable_insert_fast(&sw->router->kern_neigh_cache_ht, + &n_cache->ht_node, + __prestera_kern_neigh_cache_ht_params); + if (err) + goto err_ht_insert; + + return n_cache; + +err_ht_insert: + dev_put(n_cache->key.dev); + kfree(n_cache); +err_kzalloc: + return NULL; +} + +static struct prestera_kern_neigh_cache * +prestera_kern_neigh_cache_get(struct prestera_switch *sw, + struct prestera_kern_neigh_cache_key *key) +{ + struct prestera_kern_neigh_cache *n_cache; + + n_cache = prestera_kern_neigh_cache_find(sw, key); + if (!n_cache) + n_cache = __prestera_kern_neigh_cache_create(sw, key); + + return n_cache; +} + +static struct prestera_kern_neigh_cache * +prestera_kern_neigh_cache_put(struct prestera_switch *sw, + struct prestera_kern_neigh_cache *n_cache) +{ + if (!n_cache->in_kernel && + list_empty(&n_cache->kern_fib_cache_list)) { + __prestera_kern_neigh_cache_destroy(sw, n_cache); + return NULL; + } + + return n_cache; +} + static struct prestera_kern_fib_cache * prestera_kern_fib_cache_find(struct prestera_switch *sw, struct prestera_kern_fib_cache_key *key) @@ -73,24 +547,79 @@ prestera_kern_fib_cache_find(struct prestera_switch *sw, } static void +__prestera_kern_fib_cache_destruct(struct prestera_switch *sw, + struct prestera_kern_fib_cache *fib_cache) +{ + struct prestera_kern_neigh_cache *n_cache; + int i; + + for (i = 0; i < PRESTERA_NHGR_SIZE_MAX; i++) { + n_cache = fib_cache->kern_neigh_cache_head[i].n_cache; + if (n_cache) { + list_del(&fib_cache->kern_neigh_cache_head[i].head); + prestera_kern_neigh_cache_put(sw, n_cache); + } + } + + fib_info_put(fib_cache->fen4_info.fi); +} + +static void prestera_kern_fib_cache_destroy(struct prestera_switch *sw, struct prestera_kern_fib_cache *fib_cache) { - fib_info_put(fib_cache->fi); rhashtable_remove_fast(&sw->router->kern_fib_cache_ht, &fib_cache->ht_node, __prestera_kern_fib_cache_ht_params); + __prestera_kern_fib_cache_destruct(sw, fib_cache); kfree(fib_cache); } +static int +__prestera_kern_fib_cache_create_nhs(struct prestera_switch *sw, + struct prestera_kern_fib_cache *fc) +{ + struct prestera_kern_neigh_cache_key nc_key; + struct prestera_kern_neigh_cache *n_cache; + struct fib_nh_common *nhc; + int i, nhs, err; + + if (!prestera_fib_info_is_nh(&fc->info)) + return 0; + + nhs = prestera_kern_fib_info_nhs(&fc->info); + if (nhs > PRESTERA_NHGR_SIZE_MAX) + return 0; + + for (i = 0; i < nhs; i++) { + nhc = prestera_kern_fib_info_nhc(&fc->fen4_info.info, i); + err = prestera_util_nhc2nc_key(sw, nhc, &nc_key); + if (err) + return 0; + + n_cache = prestera_kern_neigh_cache_get(sw, &nc_key); + if (!n_cache) + return 0; + + fc->kern_neigh_cache_head[i].this = fc; + fc->kern_neigh_cache_head[i].n_cache = n_cache; + list_add(&fc->kern_neigh_cache_head[i].head, + &n_cache->kern_fib_cache_list); + } + + return 0; +} + /* Operations on fi (offload, etc) must be wrapped in utils. * This function just create storage. */ static struct prestera_kern_fib_cache * prestera_kern_fib_cache_create(struct prestera_switch *sw, struct prestera_kern_fib_cache_key *key, - struct fib_info *fi, dscp_t dscp, u8 type) + struct fib_notifier_info *info) { + struct fib_entry_notifier_info *fen_info = + container_of(info, struct fib_entry_notifier_info, info); struct prestera_kern_fib_cache *fib_cache; int err; @@ -99,10 +628,8 @@ prestera_kern_fib_cache_create(struct prestera_switch *sw, goto err_kzalloc; memcpy(&fib_cache->key, key, sizeof(*key)); - fib_info_hold(fi); - fib_cache->fi = fi; - fib_cache->kern_dscp = dscp; - fib_cache->kern_type = type; + fib_info_hold(fen_info->fi); + memcpy(&fib_cache->fen4_info, fen_info, sizeof(*fen_info)); err = rhashtable_insert_fast(&sw->router->kern_fib_cache_ht, &fib_cache->ht_node, @@ -110,48 +637,270 @@ prestera_kern_fib_cache_create(struct prestera_switch *sw, if (err) goto err_ht_insert; + /* Handle nexthops */ + err = __prestera_kern_fib_cache_create_nhs(sw, fib_cache); + if (err) + goto out; /* Not critical */ + +out: return fib_cache; err_ht_insert: - fib_info_put(fi); + fib_info_put(fen_info->fi); kfree(fib_cache); err_kzalloc: return NULL; } static void +__prestera_k_arb_fib_nh_offload_set(struct prestera_switch *sw, + struct prestera_kern_fib_cache *fibc, + struct prestera_kern_neigh_cache *nc, + bool offloaded, bool trap) +{ + struct fib_nh_common *nhc; + int i, nhs; + + nhs = prestera_kern_fib_info_nhs(&fibc->info); + for (i = 0; i < nhs; i++) { + nhc = prestera_kern_fib_info_nhc(&fibc->info, i); + if (!nc) { + prestera_util_kern_set_nh_offload(nhc, offloaded, trap); + continue; + } + + if (prestera_util_nhc_eq_n_cache_key(sw, nhc, &nc->key)) { + prestera_util_kern_set_nh_offload(nhc, offloaded, trap); + break; + } + } +} + +static void +__prestera_k_arb_n_offload_set(struct prestera_switch *sw, + struct prestera_kern_neigh_cache *nc, + bool offloaded) +{ + struct neighbour *n; + + n = neigh_lookup(&arp_tbl, &nc->key.addr.u.ipv4, + nc->key.dev); + if (!n) + return; + + prestera_util_kern_set_neigh_offload(n, offloaded); + neigh_release(n); +} + +static void __prestera_k_arb_fib_lpm_offload_set(struct prestera_switch *sw, struct prestera_kern_fib_cache *fc, bool fail, bool offload, bool trap) { struct fib_rt_info fri; - if (fc->key.addr.v != PRESTERA_IPV4) + switch (fc->key.addr.v) { + case PRESTERA_IPV4: + fri.fi = fc->fen4_info.fi; + fri.tb_id = fc->key.kern_tb_id; + fri.dst = fc->key.addr.u.ipv4; + fri.dst_len = fc->key.prefix_len; + fri.dscp = fc->fen4_info.dscp; + fri.type = fc->fen4_info.type; + /* flags begin */ + fri.offload = offload; + fri.trap = trap; + fri.offload_failed = fail; + /* flags end */ + fib_alias_hw_flags_set(&init_net, &fri); + return; + case PRESTERA_IPV6: + /* TODO */ return; + } +} + +static void +__prestera_k_arb_n_lpm_set(struct prestera_switch *sw, + struct prestera_kern_neigh_cache *n_cache, + bool enabled) +{ + struct prestera_nexthop_group_key nh_grp_key; + struct prestera_kern_fib_cache_key fc_key; + struct prestera_kern_fib_cache *fib_cache; + struct prestera_fib_node *fib_node; + struct prestera_fib_key fib_key; + + /* Exception for fc with prefix 32: LPM entry is already used by fib */ + memset(&fc_key, 0, sizeof(fc_key)); + fc_key.addr = n_cache->key.addr; + fc_key.prefix_len = PRESTERA_IP_ADDR_PLEN(n_cache->key.addr.v); + /* But better to use tb_id of route, which pointed to this neighbour. */ + /* We take it from rif, because rif inconsistent. + * Must be separated in_rif and out_rif. + * Also note: for each fib pointed to this neigh should be separated + * neigh lpm entry (for each ingress vr) + */ + fc_key.kern_tb_id = l3mdev_fib_table(n_cache->key.dev); + fib_cache = prestera_kern_fib_cache_find(sw, &fc_key); + memset(&fib_key, 0, sizeof(fib_key)); + fib_key.addr = n_cache->key.addr; + fib_key.prefix_len = PRESTERA_IP_ADDR_PLEN(n_cache->key.addr.v); + fib_key.tb_id = prestera_fix_tb_id(fc_key.kern_tb_id); + fib_node = prestera_fib_node_find(sw, &fib_key); + if (!fib_cache || !fib_cache->reachable) { + if (!enabled && fib_node) { + if (prestera_fib_node_util_is_neighbour(fib_node)) + prestera_fib_node_destroy(sw, fib_node); + return; + } + } + + if (enabled && !fib_node) { + memset(&nh_grp_key, 0, sizeof(nh_grp_key)); + prestera_util_nc_key2nh_key(&n_cache->key, + &nh_grp_key.neigh[0]); + fib_node = prestera_fib_node_create(sw, &fib_key, + PRESTERA_FIB_TYPE_UC_NH, + &nh_grp_key); + if (!fib_node) + pr_err("%s failed ip=%pI4n", "prestera_fib_node_create", + &fib_key.addr.u.ipv4); + return; + } +} + +static void +__prestera_k_arb_nc_kern_fib_fetch(struct prestera_switch *sw, + struct prestera_kern_neigh_cache *nc) +{ + if (prestera_util_kern_n_is_reachable(l3mdev_fib_table(nc->key.dev), + &nc->key.addr, nc->key.dev)) + nc->reachable = true; + else + nc->reachable = false; +} + +/* Kernel neighbour -> neigh_cache info */ +static void +__prestera_k_arb_nc_kern_n_fetch(struct prestera_switch *sw, + struct prestera_kern_neigh_cache *nc) +{ + struct neighbour *n; + int err; + + memset(&nc->nh_neigh_info, 0, sizeof(nc->nh_neigh_info)); + n = neigh_lookup(&arp_tbl, &nc->key.addr.u.ipv4, nc->key.dev); + if (!n) + goto out; + + read_lock_bh(&n->lock); + if (n->nud_state & NUD_VALID && !n->dead) { + err = prestera_neigh_iface_init(sw, &nc->nh_neigh_info.iface, + n); + if (err) + goto n_read_out; - fri.fi = fc->fi; - fri.tb_id = fc->key.kern_tb_id; - fri.dst = fc->key.addr.u.ipv4; - fri.dst_len = fc->key.prefix_len; - fri.dscp = fc->kern_dscp; - fri.type = fc->kern_type; - /* flags begin */ - fri.offload = offload; - fri.trap = trap; - fri.offload_failed = fail; - /* flags end */ - fib_alias_hw_flags_set(&init_net, &fri); + memcpy(&nc->nh_neigh_info.ha[0], &n->ha[0], ETH_ALEN); + nc->nh_neigh_info.connected = true; + } +n_read_out: + read_unlock_bh(&n->lock); +out: + nc->in_kernel = nc->nh_neigh_info.connected; + if (n) + neigh_release(n); +} + +/* neigh_cache info -> lpm update */ +static void +__prestera_k_arb_nc_apply(struct prestera_switch *sw, + struct prestera_kern_neigh_cache *nc) +{ + struct prestera_kern_neigh_cache_head *nhead; + struct prestera_nh_neigh_key nh_key; + struct prestera_nh_neigh *nh_neigh; + int err; + + __prestera_k_arb_n_lpm_set(sw, nc, nc->reachable && nc->in_kernel); + __prestera_k_arb_n_offload_set(sw, nc, nc->reachable && nc->in_kernel); + + prestera_util_nc_key2nh_key(&nc->key, &nh_key); + nh_neigh = prestera_nh_neigh_find(sw, &nh_key); + if (!nh_neigh) + goto out; + + /* Do hw update only if something changed to prevent nh flap */ + if (memcmp(&nc->nh_neigh_info, &nh_neigh->info, + sizeof(nh_neigh->info))) { + memcpy(&nh_neigh->info, &nc->nh_neigh_info, + sizeof(nh_neigh->info)); + err = prestera_nh_neigh_set(sw, nh_neigh); + if (err) { + pr_err("%s failed with err=%d ip=%pI4n mac=%pM", + "prestera_nh_neigh_set", err, + &nh_neigh->key.addr.u.ipv4, + &nh_neigh->info.ha[0]); + goto out; + } + } + +out: + list_for_each_entry(nhead, &nc->kern_fib_cache_list, head) { + __prestera_k_arb_fib_nh_offload_set(sw, nhead->this, nc, + nc->in_kernel, + !nc->in_kernel); + } } static int __prestera_pr_k_arb_fc_lpm_info_calc(struct prestera_switch *sw, struct prestera_kern_fib_cache *fc) { + struct fib_nh_common *nhc; + int nh_cnt; + memset(&fc->lpm_info, 0, sizeof(fc->lpm_info)); - switch (fc->fi->fib_type) { + switch (prestera_kern_fib_info_type(&fc->info)) { case RTN_UNICAST: - fc->lpm_info.fib_type = PRESTERA_FIB_TYPE_TRAP; + if (prestera_fib_info_is_direct(&fc->info) && + fc->key.prefix_len == + PRESTERA_IP_ADDR_PLEN(fc->key.addr.v)) { + /* This is special case. + * When prefix is 32. Than we will have conflict in lpm + * for direct route - once TRAP added, there is no + * place for neighbour entry. So represent direct route + * with prefix 32, as NH. So neighbour will be resolved + * as nexthop of this route. + */ + nhc = prestera_kern_fib_info_nhc(&fc->info, 0); + fc->lpm_info.fib_type = PRESTERA_FIB_TYPE_UC_NH; + fc->lpm_info.nh_grp_key.neigh[0].addr = + fc->key.addr; + fc->lpm_info.nh_grp_key.neigh[0].rif = + nhc->nhc_dev; + + break; + } + + /* We can also get nh_grp_key from fi. This will be correct to + * because cache not always represent, what actually written to + * lpm. But we use nh cache, as well for now (for this case). + */ + for (nh_cnt = 0; nh_cnt < PRESTERA_NHGR_SIZE_MAX; nh_cnt++) { + if (!fc->kern_neigh_cache_head[nh_cnt].n_cache) + break; + + fc->lpm_info.nh_grp_key.neigh[nh_cnt].addr = + fc->kern_neigh_cache_head[nh_cnt].n_cache->key.addr; + fc->lpm_info.nh_grp_key.neigh[nh_cnt].rif = + fc->kern_neigh_cache_head[nh_cnt].n_cache->key.dev; + } + + fc->lpm_info.fib_type = nh_cnt ? + PRESTERA_FIB_TYPE_UC_NH : + PRESTERA_FIB_TYPE_TRAP; break; /* Unsupported. Leave it for kernel: */ case RTN_BROADCAST: @@ -191,7 +940,8 @@ static int __prestera_k_arb_f_lpm_set(struct prestera_switch *sw, return 0; fib_node = prestera_fib_node_create(sw, &fc->lpm_info.fib_key, - fc->lpm_info.fib_type); + fc->lpm_info.fib_type, + &fc->lpm_info.nh_grp_key); if (!fib_node) { dev_err(sw->dev->dev, "fib_node=NULL %pI4n/%d kern_tb_id = %d", @@ -220,6 +970,10 @@ static int __prestera_k_arb_fc_apply(struct prestera_switch *sw, } switch (fc->lpm_info.fib_type) { + case PRESTERA_FIB_TYPE_UC_NH: + __prestera_k_arb_fib_lpm_offload_set(sw, fc, false, + fc->reachable, false); + break; case PRESTERA_FIB_TYPE_TRAP: __prestera_k_arb_fib_lpm_offload_set(sw, fc, false, false, fc->reachable); @@ -271,17 +1025,140 @@ __prestera_k_arb_util_fib_overlapped(struct prestera_switch *sw, return rfc; } +static void __prestera_k_arb_hw_state_upd(struct prestera_switch *sw, + struct prestera_kern_neigh_cache *nc) +{ + struct prestera_nh_neigh_key nh_key; + struct prestera_nh_neigh *nh_neigh; + struct neighbour *n; + bool hw_active; + + prestera_util_nc_key2nh_key(&nc->key, &nh_key); + nh_neigh = prestera_nh_neigh_find(sw, &nh_key); + if (!nh_neigh) { + pr_err("Cannot find nh_neigh for cached %pI4n", + &nc->key.addr.u.ipv4); + return; + } + + hw_active = prestera_nh_neigh_util_hw_state(sw, nh_neigh); + +#ifdef PRESTERA_IMPLICITY_RESOLVE_DEAD_NEIGH + if (!hw_active && nc->in_kernel) + goto out; +#else /* PRESTERA_IMPLICITY_RESOLVE_DEAD_NEIGH */ + if (!hw_active) + goto out; +#endif /* PRESTERA_IMPLICITY_RESOLVE_DEAD_NEIGH */ + + if (nc->key.addr.v == PRESTERA_IPV4) { + n = neigh_lookup(&arp_tbl, &nc->key.addr.u.ipv4, + nc->key.dev); + if (!n) + n = neigh_create(&arp_tbl, &nc->key.addr.u.ipv4, + nc->key.dev); + } else { + n = NULL; + } + + if (!IS_ERR(n) && n) { + neigh_event_send(n, NULL); + neigh_release(n); + } else { + pr_err("Cannot create neighbour %pI4n", &nc->key.addr.u.ipv4); + } + +out: + return; +} + +/* Propagate hw state to kernel */ +static void prestera_k_arb_hw_evt(struct prestera_switch *sw) +{ + struct prestera_kern_neigh_cache *n_cache; + struct rhashtable_iter iter; + + rhashtable_walk_enter(&sw->router->kern_neigh_cache_ht, &iter); + rhashtable_walk_start(&iter); + while (1) { + n_cache = rhashtable_walk_next(&iter); + + if (!n_cache) + break; + + if (IS_ERR(n_cache)) + continue; + + rhashtable_walk_stop(&iter); + __prestera_k_arb_hw_state_upd(sw, n_cache); + rhashtable_walk_start(&iter); + } + rhashtable_walk_stop(&iter); + rhashtable_walk_exit(&iter); +} + +/* Propagate kernel event to hw */ +static void prestera_k_arb_n_evt(struct prestera_switch *sw, + struct neighbour *n) +{ + struct prestera_kern_neigh_cache_key n_key; + struct prestera_kern_neigh_cache *n_cache; + int err; + + err = prestera_util_neigh2nc_key(sw, n, &n_key); + if (err) + return; + + n_cache = prestera_kern_neigh_cache_find(sw, &n_key); + if (!n_cache) { + n_cache = prestera_kern_neigh_cache_get(sw, &n_key); + if (!n_cache) + return; + __prestera_k_arb_nc_kern_fib_fetch(sw, n_cache); + } + + __prestera_k_arb_nc_kern_n_fetch(sw, n_cache); + __prestera_k_arb_nc_apply(sw, n_cache); + + prestera_kern_neigh_cache_put(sw, n_cache); +} + +static void __prestera_k_arb_fib_evt2nc(struct prestera_switch *sw) +{ + struct prestera_kern_neigh_cache *n_cache; + struct rhashtable_iter iter; + + rhashtable_walk_enter(&sw->router->kern_neigh_cache_ht, &iter); + rhashtable_walk_start(&iter); + while (1) { + n_cache = rhashtable_walk_next(&iter); + + if (!n_cache) + break; + + if (IS_ERR(n_cache)) + continue; + + rhashtable_walk_stop(&iter); + __prestera_k_arb_nc_kern_fib_fetch(sw, n_cache); + __prestera_k_arb_nc_apply(sw, n_cache); + rhashtable_walk_start(&iter); + } + rhashtable_walk_stop(&iter); + rhashtable_walk_exit(&iter); +} + static int prestera_k_arb_fib_evt(struct prestera_switch *sw, bool replace, /* replace or del */ - struct fib_entry_notifier_info *fen_info) + struct fib_notifier_info *info) { struct prestera_kern_fib_cache *tfib_cache, *bfib_cache; /* top/btm */ struct prestera_kern_fib_cache_key fc_key; struct prestera_kern_fib_cache *fib_cache; int err; - prestera_util_fen_info2fib_cache_key(fen_info, &fc_key); + prestera_util_fen_info2fib_cache_key(info, &fc_key); fib_cache = prestera_kern_fib_cache_find(sw, &fc_key); if (fib_cache) { fib_cache->reachable = false; @@ -304,10 +1181,7 @@ prestera_k_arb_fib_evt(struct prestera_switch *sw, } if (replace) { - fib_cache = prestera_kern_fib_cache_create(sw, &fc_key, - fen_info->fi, - fen_info->dscp, - fen_info->type); + fib_cache = prestera_kern_fib_cache_create(sw, &fc_key, info); if (!fib_cache) { dev_err(sw->dev->dev, "fib_cache == NULL"); return -ENOENT; @@ -331,9 +1205,65 @@ prestera_k_arb_fib_evt(struct prestera_switch *sw, dev_err(sw->dev->dev, "Applying fib_cache failed"); } + /* Update all neighs to resolve overlapped and apply related */ + __prestera_k_arb_fib_evt2nc(sw); + return 0; } +static void __prestera_k_arb_abort_neigh_ht_cb(void *ptr, void *arg) +{ + struct prestera_kern_neigh_cache *n_cache = ptr; + struct prestera_switch *sw = arg; + + if (!list_empty(&n_cache->kern_fib_cache_list)) { + WARN_ON(1); /* BUG */ + return; + } + __prestera_k_arb_n_offload_set(sw, n_cache, false); + n_cache->in_kernel = false; + /* No need to destroy lpm. + * It will be aborted by destroy_ht + */ + __prestera_kern_neigh_cache_destruct(sw, n_cache); + kfree(n_cache); +} + +static void __prestera_k_arb_abort_fib_ht_cb(void *ptr, void *arg) +{ + struct prestera_kern_fib_cache *fib_cache = ptr; + struct prestera_switch *sw = arg; + + __prestera_k_arb_fib_lpm_offload_set(sw, fib_cache, + false, false, + false); + __prestera_k_arb_fib_nh_offload_set(sw, fib_cache, NULL, + false, false); + /* No need to destroy lpm. + * It will be aborted by destroy_ht + */ + __prestera_kern_fib_cache_destruct(sw, fib_cache); + kfree(fib_cache); +} + +static void prestera_k_arb_abort(struct prestera_switch *sw) +{ + /* Function to remove all arbiter entries and related hw objects. */ + /* Sequence: + * 1) Clear arbiter tables, but don't touch hw + * 2) Clear hw + * We use such approach, because arbiter object is not directly mapped + * to hw. So deletion of one arbiter object may even lead to creation of + * hw object (e.g. in case of overlapped routes). + */ + rhashtable_free_and_destroy(&sw->router->kern_fib_cache_ht, + __prestera_k_arb_abort_fib_ht_cb, + sw); + rhashtable_free_and_destroy(&sw->router->kern_neigh_cache_ht, + __prestera_k_arb_abort_neigh_ht_cb, + sw); +} + static int __prestera_inetaddr_port_event(struct net_device *port_dev, unsigned long event, struct netlink_ext_ack *extack) @@ -469,13 +1399,15 @@ static void __prestera_router_fib_event_work(struct work_struct *work) switch (fib_work->event) { case FIB_EVENT_ENTRY_REPLACE: - err = prestera_k_arb_fib_evt(sw, true, &fib_work->fen_info); + err = prestera_k_arb_fib_evt(sw, true, + &fib_work->fen_info.info); if (err) goto err_out; break; case FIB_EVENT_ENTRY_DEL: - err = prestera_k_arb_fib_evt(sw, false, &fib_work->fen_info); + err = prestera_k_arb_fib_evt(sw, false, + &fib_work->fen_info.info); if (err) goto err_out; @@ -534,10 +1466,89 @@ static int __prestera_router_fib_event(struct notifier_block *nb, return NOTIFY_DONE; } +struct prestera_netevent_work { + struct work_struct work; + struct prestera_switch *sw; + struct neighbour *n; +}; + +static void prestera_router_neigh_event_work(struct work_struct *work) +{ + struct prestera_netevent_work *net_work = + container_of(work, struct prestera_netevent_work, work); + struct prestera_switch *sw = net_work->sw; + struct neighbour *n = net_work->n; + + /* neigh - its not hw related object. It stored only in kernel. So... */ + rtnl_lock(); + + prestera_k_arb_n_evt(sw, n); + + neigh_release(n); + rtnl_unlock(); + kfree(net_work); +} + +static int prestera_router_netevent_event(struct notifier_block *nb, + unsigned long event, void *ptr) +{ + struct prestera_netevent_work *net_work; + struct prestera_router *router; + struct neighbour *n = ptr; + + router = container_of(nb, struct prestera_router, netevent_nb); + + switch (event) { + case NETEVENT_NEIGH_UPDATE: + if (n->tbl->family != AF_INET) + return NOTIFY_DONE; + + net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC); + if (WARN_ON(!net_work)) + return NOTIFY_BAD; + + neigh_clone(n); + net_work->n = n; + net_work->sw = router->sw; + INIT_WORK(&net_work->work, prestera_router_neigh_event_work); + prestera_queue_work(&net_work->work); + } + + return NOTIFY_DONE; +} + +static void prestera_router_update_neighs_work(struct work_struct *work) +{ + struct prestera_router *router; + + router = container_of(work, struct prestera_router, + neighs_update.dw.work); + rtnl_lock(); + + prestera_k_arb_hw_evt(router->sw); + + rtnl_unlock(); + prestera_queue_delayed_work(&router->neighs_update.dw, + msecs_to_jiffies(PRESTERA_NH_PROBE_INTERVAL)); +} + +static int prestera_neigh_work_init(struct prestera_switch *sw) +{ + INIT_DELAYED_WORK(&sw->router->neighs_update.dw, + prestera_router_update_neighs_work); + prestera_queue_delayed_work(&sw->router->neighs_update.dw, 0); + return 0; +} + +static void prestera_neigh_work_fini(struct prestera_switch *sw) +{ + cancel_delayed_work_sync(&sw->router->neighs_update.dw); +} + int prestera_router_init(struct prestera_switch *sw) { struct prestera_router *router; - int err; + int err, nhgrp_cache_bytes; router = kzalloc(sizeof(*sw->router), GFP_KERNEL); if (!router) @@ -555,6 +1566,22 @@ int prestera_router_init(struct prestera_switch *sw) if (err) goto err_kern_fib_cache_ht_init; + err = rhashtable_init(&router->kern_neigh_cache_ht, + &__prestera_kern_neigh_cache_ht_params); + if (err) + goto err_kern_neigh_cache_ht_init; + + nhgrp_cache_bytes = sw->size_tbl_router_nexthop / 8 + 1; + router->nhgrp_hw_state_cache = kzalloc(nhgrp_cache_bytes, GFP_KERNEL); + if (!router->nhgrp_hw_state_cache) { + err = -ENOMEM; + goto err_nh_state_cache_alloc; + } + + err = prestera_neigh_work_init(sw); + if (err) + goto err_neigh_work_init; + router->inetaddr_valid_nb.notifier_call = __prestera_inetaddr_valid_cb; err = register_inetaddr_validator_notifier(&router->inetaddr_valid_nb); if (err) @@ -565,6 +1592,11 @@ int prestera_router_init(struct prestera_switch *sw) if (err) goto err_register_inetaddr_notifier; + router->netevent_nb.notifier_call = prestera_router_netevent_event; + err = register_netevent_notifier(&router->netevent_nb); + if (err) + goto err_register_netevent_notifier; + router->fib_nb.notifier_call = __prestera_router_fib_event; err = register_fib_notifier(&init_net, &router->fib_nb, /* TODO: flush fib entries */ NULL, NULL); @@ -574,10 +1606,18 @@ int prestera_router_init(struct prestera_switch *sw) return 0; err_register_fib_notifier: + unregister_netevent_notifier(&router->netevent_nb); +err_register_netevent_notifier: unregister_inetaddr_notifier(&router->inetaddr_nb); err_register_inetaddr_notifier: unregister_inetaddr_validator_notifier(&router->inetaddr_valid_nb); err_register_inetaddr_validator_notifier: + prestera_neigh_work_fini(sw); +err_neigh_work_init: + kfree(router->nhgrp_hw_state_cache); +err_nh_state_cache_alloc: + rhashtable_destroy(&router->kern_neigh_cache_ht); +err_kern_neigh_cache_ht_init: rhashtable_destroy(&router->kern_fib_cache_ht); err_kern_fib_cache_ht_init: prestera_router_hw_fini(sw); @@ -589,8 +1629,15 @@ err_router_lib_init: void prestera_router_fini(struct prestera_switch *sw) { unregister_fib_notifier(&init_net, &sw->router->fib_nb); + unregister_netevent_notifier(&sw->router->netevent_nb); unregister_inetaddr_notifier(&sw->router->inetaddr_nb); unregister_inetaddr_validator_notifier(&sw->router->inetaddr_valid_nb); + prestera_neigh_work_fini(sw); + prestera_queue_drain(); + + prestera_k_arb_abort(sw); + + kfree(sw->router->nhgrp_hw_state_cache); rhashtable_destroy(&sw->router->kern_fib_cache_ht); prestera_router_hw_fini(sw); kfree(sw->router); diff --git a/drivers/net/ethernet/marvell/prestera/prestera_router_hw.c b/drivers/net/ethernet/marvell/prestera/prestera_router_hw.c index 5b0cf3be9a9e..4f65df0ae5e8 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_router_hw.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_router_hw.c @@ -8,10 +8,16 @@ #include "prestera_router_hw.h" #include "prestera_acl.h" -/* +--+ - * +------->|vr|<-+ - * | +--+ | - * | | +/* Nexthop is pointed + * to port (not rif) + * +-------+ + * +>|nexthop| + * | +-------+ + * | + * +--+ +-----++ + * +------->|vr|<-+ +>|nh_grp| + * | +--+ | | +------+ + * | | | * +-+-------+ +--+---+-+ * |rif_entry| |fib_node| * +---------+ +--------+ @@ -23,6 +29,8 @@ #define PRESTERA_NHGR_UNUSED (0) #define PRESTERA_NHGR_DROP (0xFFFFFFFF) +/* Need to merge it with router_manager */ +#define PRESTERA_NH_ACTIVE_JIFFER_FILTER 3000 /* ms */ static const struct rhashtable_params __prestera_fib_ht_params = { .key_offset = offsetof(struct prestera_fib_node, key), @@ -31,10 +39,45 @@ static const struct rhashtable_params __prestera_fib_ht_params = { .automatic_shrinking = true, }; +static const struct rhashtable_params __prestera_nh_neigh_ht_params = { + .key_offset = offsetof(struct prestera_nh_neigh, key), + .key_len = sizeof(struct prestera_nh_neigh_key), + .head_offset = offsetof(struct prestera_nh_neigh, ht_node), +}; + +static const struct rhashtable_params __prestera_nexthop_group_ht_params = { + .key_offset = offsetof(struct prestera_nexthop_group, key), + .key_len = sizeof(struct prestera_nexthop_group_key), + .head_offset = offsetof(struct prestera_nexthop_group, ht_node), +}; + +static int prestera_nexthop_group_set(struct prestera_switch *sw, + struct prestera_nexthop_group *nh_grp); +static bool +prestera_nexthop_group_util_hw_state(struct prestera_switch *sw, + struct prestera_nexthop_group *nh_grp); +static void prestera_fib_node_destroy_ht_cb(void *ptr, void *arg); + +/* TODO: move to router.h as macros */ +static bool prestera_nh_neigh_key_is_valid(struct prestera_nh_neigh_key *key) +{ + return memchr_inv(key, 0, sizeof(*key)) ? true : false; +} + int prestera_router_hw_init(struct prestera_switch *sw) { int err; + err = rhashtable_init(&sw->router->nh_neigh_ht, + &__prestera_nh_neigh_ht_params); + if (err) + goto err_nh_neigh_ht_init; + + err = rhashtable_init(&sw->router->nexthop_group_ht, + &__prestera_nexthop_group_ht_params); + if (err) + goto err_nexthop_grp_ht_init; + err = rhashtable_init(&sw->router->fib_ht, &__prestera_fib_ht_params); if (err) @@ -43,15 +86,25 @@ int prestera_router_hw_init(struct prestera_switch *sw) INIT_LIST_HEAD(&sw->router->vr_list); INIT_LIST_HEAD(&sw->router->rif_entry_list); + return 0; + err_fib_ht_init: + rhashtable_destroy(&sw->router->nexthop_group_ht); +err_nexthop_grp_ht_init: + rhashtable_destroy(&sw->router->nh_neigh_ht); +err_nh_neigh_ht_init: return 0; } void prestera_router_hw_fini(struct prestera_switch *sw) { + rhashtable_free_and_destroy(&sw->router->fib_ht, + prestera_fib_node_destroy_ht_cb, sw); WARN_ON(!list_empty(&sw->router->vr_list)); WARN_ON(!list_empty(&sw->router->rif_entry_list)); rhashtable_destroy(&sw->router->fib_ht); + rhashtable_destroy(&sw->router->nexthop_group_ht); + rhashtable_destroy(&sw->router->nh_neigh_ht); } static struct prestera_vr *__prestera_vr_find(struct prestera_switch *sw, @@ -232,6 +285,286 @@ err_kzalloc: return NULL; } +static void __prestera_nh_neigh_destroy(struct prestera_switch *sw, + struct prestera_nh_neigh *neigh) +{ + rhashtable_remove_fast(&sw->router->nh_neigh_ht, + &neigh->ht_node, + __prestera_nh_neigh_ht_params); + kfree(neigh); +} + +static struct prestera_nh_neigh * +__prestera_nh_neigh_create(struct prestera_switch *sw, + struct prestera_nh_neigh_key *key) +{ + struct prestera_nh_neigh *neigh; + int err; + + neigh = kzalloc(sizeof(*neigh), GFP_KERNEL); + if (!neigh) + goto err_kzalloc; + + memcpy(&neigh->key, key, sizeof(*key)); + neigh->info.connected = false; + INIT_LIST_HEAD(&neigh->nexthop_group_list); + err = rhashtable_insert_fast(&sw->router->nh_neigh_ht, + &neigh->ht_node, + __prestera_nh_neigh_ht_params); + if (err) + goto err_rhashtable_insert; + + return neigh; + +err_rhashtable_insert: + kfree(neigh); +err_kzalloc: + return NULL; +} + +struct prestera_nh_neigh * +prestera_nh_neigh_find(struct prestera_switch *sw, + struct prestera_nh_neigh_key *key) +{ + struct prestera_nh_neigh *nh_neigh; + + nh_neigh = rhashtable_lookup_fast(&sw->router->nh_neigh_ht, + key, __prestera_nh_neigh_ht_params); + return IS_ERR(nh_neigh) ? NULL : nh_neigh; +} + +struct prestera_nh_neigh * +prestera_nh_neigh_get(struct prestera_switch *sw, + struct prestera_nh_neigh_key *key) +{ + struct prestera_nh_neigh *neigh; + + neigh = prestera_nh_neigh_find(sw, key); + if (!neigh) + return __prestera_nh_neigh_create(sw, key); + + return neigh; +} + +void prestera_nh_neigh_put(struct prestera_switch *sw, + struct prestera_nh_neigh *neigh) +{ + if (list_empty(&neigh->nexthop_group_list)) + __prestera_nh_neigh_destroy(sw, neigh); +} + +/* Updates new prestera_neigh_info */ +int prestera_nh_neigh_set(struct prestera_switch *sw, + struct prestera_nh_neigh *neigh) +{ + struct prestera_nh_neigh_head *nh_head; + struct prestera_nexthop_group *nh_grp; + int err; + + list_for_each_entry(nh_head, &neigh->nexthop_group_list, head) { + nh_grp = nh_head->this; + err = prestera_nexthop_group_set(sw, nh_grp); + if (err) + return err; + } + + return 0; +} + +bool prestera_nh_neigh_util_hw_state(struct prestera_switch *sw, + struct prestera_nh_neigh *nh_neigh) +{ + bool state; + struct prestera_nh_neigh_head *nh_head, *tmp; + + state = false; + list_for_each_entry_safe(nh_head, tmp, + &nh_neigh->nexthop_group_list, head) { + state = prestera_nexthop_group_util_hw_state(sw, nh_head->this); + if (state) + goto out; + } + +out: + return state; +} + +static struct prestera_nexthop_group * +__prestera_nexthop_group_create(struct prestera_switch *sw, + struct prestera_nexthop_group_key *key) +{ + struct prestera_nexthop_group *nh_grp; + struct prestera_nh_neigh *nh_neigh; + int nh_cnt, err, gid; + + nh_grp = kzalloc(sizeof(*nh_grp), GFP_KERNEL); + if (!nh_grp) + goto err_kzalloc; + + memcpy(&nh_grp->key, key, sizeof(*key)); + for (nh_cnt = 0; nh_cnt < PRESTERA_NHGR_SIZE_MAX; nh_cnt++) { + if (!prestera_nh_neigh_key_is_valid(&nh_grp->key.neigh[nh_cnt])) + break; + + nh_neigh = prestera_nh_neigh_get(sw, + &nh_grp->key.neigh[nh_cnt]); + if (!nh_neigh) + goto err_nh_neigh_get; + + nh_grp->nh_neigh_head[nh_cnt].neigh = nh_neigh; + nh_grp->nh_neigh_head[nh_cnt].this = nh_grp; + list_add(&nh_grp->nh_neigh_head[nh_cnt].head, + &nh_neigh->nexthop_group_list); + } + + err = prestera_hw_nh_group_create(sw, nh_cnt, &nh_grp->grp_id); + if (err) + goto err_nh_group_create; + + err = prestera_nexthop_group_set(sw, nh_grp); + if (err) + goto err_nexthop_group_set; + + err = rhashtable_insert_fast(&sw->router->nexthop_group_ht, + &nh_grp->ht_node, + __prestera_nexthop_group_ht_params); + if (err) + goto err_ht_insert; + + /* reset cache for created group */ + gid = nh_grp->grp_id; + sw->router->nhgrp_hw_state_cache[gid / 8] &= ~BIT(gid % 8); + + return nh_grp; + +err_ht_insert: +err_nexthop_group_set: + prestera_hw_nh_group_delete(sw, nh_cnt, nh_grp->grp_id); +err_nh_group_create: +err_nh_neigh_get: + for (nh_cnt--; nh_cnt >= 0; nh_cnt--) { + list_del(&nh_grp->nh_neigh_head[nh_cnt].head); + prestera_nh_neigh_put(sw, nh_grp->nh_neigh_head[nh_cnt].neigh); + } + + kfree(nh_grp); +err_kzalloc: + return NULL; +} + +static void +__prestera_nexthop_group_destroy(struct prestera_switch *sw, + struct prestera_nexthop_group *nh_grp) +{ + struct prestera_nh_neigh *nh_neigh; + int nh_cnt; + + rhashtable_remove_fast(&sw->router->nexthop_group_ht, + &nh_grp->ht_node, + __prestera_nexthop_group_ht_params); + + for (nh_cnt = 0; nh_cnt < PRESTERA_NHGR_SIZE_MAX; nh_cnt++) { + nh_neigh = nh_grp->nh_neigh_head[nh_cnt].neigh; + if (!nh_neigh) + break; + + list_del(&nh_grp->nh_neigh_head[nh_cnt].head); + prestera_nh_neigh_put(sw, nh_neigh); + } + + prestera_hw_nh_group_delete(sw, nh_cnt, nh_grp->grp_id); + kfree(nh_grp); +} + +static struct prestera_nexthop_group * +__prestera_nexthop_group_find(struct prestera_switch *sw, + struct prestera_nexthop_group_key *key) +{ + struct prestera_nexthop_group *nh_grp; + + nh_grp = rhashtable_lookup_fast(&sw->router->nexthop_group_ht, + key, __prestera_nexthop_group_ht_params); + return IS_ERR(nh_grp) ? NULL : nh_grp; +} + +static struct prestera_nexthop_group * +prestera_nexthop_group_get(struct prestera_switch *sw, + struct prestera_nexthop_group_key *key) +{ + struct prestera_nexthop_group *nh_grp; + + nh_grp = __prestera_nexthop_group_find(sw, key); + if (nh_grp) { + refcount_inc(&nh_grp->refcount); + } else { + nh_grp = __prestera_nexthop_group_create(sw, key); + if (IS_ERR(nh_grp)) + return ERR_CAST(nh_grp); + + refcount_set(&nh_grp->refcount, 1); + } + + return nh_grp; +} + +static void prestera_nexthop_group_put(struct prestera_switch *sw, + struct prestera_nexthop_group *nh_grp) +{ + if (refcount_dec_and_test(&nh_grp->refcount)) + __prestera_nexthop_group_destroy(sw, nh_grp); +} + +/* Updates with new nh_neigh's info */ +static int prestera_nexthop_group_set(struct prestera_switch *sw, + struct prestera_nexthop_group *nh_grp) +{ + struct prestera_neigh_info info[PRESTERA_NHGR_SIZE_MAX]; + struct prestera_nh_neigh *neigh; + int nh_cnt; + + memset(&info[0], 0, sizeof(info)); + for (nh_cnt = 0; nh_cnt < PRESTERA_NHGR_SIZE_MAX; nh_cnt++) { + neigh = nh_grp->nh_neigh_head[nh_cnt].neigh; + if (!neigh) + break; + + memcpy(&info[nh_cnt], &neigh->info, sizeof(neigh->info)); + } + + return prestera_hw_nh_entries_set(sw, nh_cnt, &info[0], nh_grp->grp_id); +} + +static bool +prestera_nexthop_group_util_hw_state(struct prestera_switch *sw, + struct prestera_nexthop_group *nh_grp) +{ + int err; + u32 buf_size = sw->size_tbl_router_nexthop / 8 + 1; + u32 gid = nh_grp->grp_id; + u8 *cache = sw->router->nhgrp_hw_state_cache; + + /* Antijitter + * Prevent situation, when we read state of nh_grp twice in short time, + * and state bit is still cleared on second call. So just stuck active + * state for PRESTERA_NH_ACTIVE_JIFFER_FILTER, after last occurred. + */ + if (!time_before(jiffies, sw->router->nhgrp_hw_cache_kick + + msecs_to_jiffies(PRESTERA_NH_ACTIVE_JIFFER_FILTER))) { + err = prestera_hw_nhgrp_blk_get(sw, cache, buf_size); + if (err) { + pr_err("Failed to get hw state nh_grp's"); + return false; + } + + sw->router->nhgrp_hw_cache_kick = jiffies; + } + + if (cache[gid / 8] & BIT(gid % 8)) + return true; + + return false; +} + struct prestera_fib_node * prestera_fib_node_find(struct prestera_switch *sw, struct prestera_fib_key *key) { @@ -251,6 +584,9 @@ static void __prestera_fib_node_destruct(struct prestera_switch *sw, prestera_hw_lpm_del(sw, vr->hw_vr_id, fib_node->key.addr.u.ipv4, fib_node->key.prefix_len); switch (fib_node->info.type) { + case PRESTERA_FIB_TYPE_UC_NH: + prestera_nexthop_group_put(sw, fib_node->info.nh_grp); + break; case PRESTERA_FIB_TYPE_TRAP: break; case PRESTERA_FIB_TYPE_DROP: @@ -272,10 +608,20 @@ void prestera_fib_node_destroy(struct prestera_switch *sw, kfree(fib_node); } +static void prestera_fib_node_destroy_ht_cb(void *ptr, void *arg) +{ + struct prestera_fib_node *node = ptr; + struct prestera_switch *sw = arg; + + __prestera_fib_node_destruct(sw, node); + kfree(node); +} + struct prestera_fib_node * prestera_fib_node_create(struct prestera_switch *sw, struct prestera_fib_key *key, - enum prestera_fib_type fib_type) + enum prestera_fib_type fib_type, + struct prestera_nexthop_group_key *nh_grp_key) { struct prestera_fib_node *fib_node; u32 grp_id; @@ -302,6 +648,14 @@ prestera_fib_node_create(struct prestera_switch *sw, case PRESTERA_FIB_TYPE_DROP: grp_id = PRESTERA_NHGR_DROP; break; + case PRESTERA_FIB_TYPE_UC_NH: + fib_node->info.nh_grp = prestera_nexthop_group_get(sw, + nh_grp_key); + if (!fib_node->info.nh_grp) + goto err_nh_grp_get; + + grp_id = fib_node->info.nh_grp->grp_id; + break; default: pr_err("Unsupported fib_type %d", fib_type); goto err_nh_grp_get; @@ -323,6 +677,8 @@ err_ht_insert: prestera_hw_lpm_del(sw, vr->hw_vr_id, key->addr.u.ipv4, key->prefix_len); err_lpm_add: + if (fib_type == PRESTERA_FIB_TYPE_UC_NH) + prestera_nexthop_group_put(sw, fib_node->info.nh_grp); err_nh_grp_get: prestera_vr_put(sw, vr); err_vr_get: diff --git a/drivers/net/ethernet/marvell/prestera/prestera_router_hw.h b/drivers/net/ethernet/marvell/prestera/prestera_router_hw.h index 67dbb49c8bd4..9ca97919c863 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_router_hw.h +++ b/drivers/net/ethernet/marvell/prestera/prestera_router_hw.h @@ -31,6 +31,63 @@ struct prestera_ip_addr { PRESTERA_IPV4 = 0, PRESTERA_IPV6 } v; +#define PRESTERA_IP_ADDR_PLEN(V) ((V) == PRESTERA_IPV4 ? 32 : \ + /* (V) == PRESTERA_IPV6 ? */ 128 /* : 0 */) +}; + +struct prestera_nh_neigh_key { + struct prestera_ip_addr addr; + /* Seems like rif is obsolete, because there is iface in info ? + * Key can contain functional fields, or fields, which is used to + * filter duplicate objects on logical level (before you pass it to + * HW)... also key can be used to cover hardware restrictions. + * In our case rif - is logical interface (even can be VLAN), which + * is used in combination with IP address (which is also not related to + * hardware nexthop) to provide logical compression of created nexthops. + * You even can imagine, that rif+IPaddr is just cookie. + */ + /* struct prestera_rif *rif; */ + /* Use just as cookie, to divide ARP domains (in order with addr) */ + void *rif; +}; + +/* Used for hw call */ +struct prestera_neigh_info { + struct prestera_iface iface; + unsigned char ha[ETH_ALEN]; + u8 connected; /* bool. indicate, if mac/oif valid */ + u8 __pad[1]; +}; + +/* Used to notify nh about neigh change */ +struct prestera_nh_neigh { + struct prestera_nh_neigh_key key; + struct prestera_neigh_info info; + struct rhash_head ht_node; /* node of prestera_vr */ + struct list_head nexthop_group_list; +}; + +#define PRESTERA_NHGR_SIZE_MAX 4 + +struct prestera_nexthop_group { + struct prestera_nexthop_group_key { + struct prestera_nh_neigh_key neigh[PRESTERA_NHGR_SIZE_MAX]; + } key; + /* Store intermediate object here. + * This prevent overhead kzalloc call. + */ + /* nh_neigh is used only to notify nexthop_group */ + struct prestera_nh_neigh_head { + struct prestera_nexthop_group *this; + struct list_head head; + /* ptr to neigh is not necessary. + * It used to prevent lookup of nh_neigh by key (n) on destroy + */ + struct prestera_nh_neigh *neigh; + } nh_neigh_head[PRESTERA_NHGR_SIZE_MAX]; + struct rhash_head ht_node; /* node of prestera_vr */ + refcount_t refcount; + u32 grp_id; /* hw */ }; struct prestera_fib_key { @@ -44,12 +101,16 @@ struct prestera_fib_info { struct list_head vr_node; enum prestera_fib_type { PRESTERA_FIB_TYPE_INVALID = 0, + /* must be pointer to nh_grp id */ + PRESTERA_FIB_TYPE_UC_NH, /* It can be connected route * and will be overlapped with neighbours */ PRESTERA_FIB_TYPE_TRAP, PRESTERA_FIB_TYPE_DROP } type; + /* Valid only if type = UC_NH*/ + struct prestera_nexthop_group *nh_grp; }; struct prestera_fib_node { @@ -67,6 +128,18 @@ struct prestera_rif_entry * prestera_rif_entry_create(struct prestera_switch *sw, struct prestera_rif_entry_key *k, u32 tb_id, const unsigned char *addr); +struct prestera_nh_neigh * +prestera_nh_neigh_find(struct prestera_switch *sw, + struct prestera_nh_neigh_key *key); +struct prestera_nh_neigh * +prestera_nh_neigh_get(struct prestera_switch *sw, + struct prestera_nh_neigh_key *key); +void prestera_nh_neigh_put(struct prestera_switch *sw, + struct prestera_nh_neigh *neigh); +int prestera_nh_neigh_set(struct prestera_switch *sw, + struct prestera_nh_neigh *neigh); +bool prestera_nh_neigh_util_hw_state(struct prestera_switch *sw, + struct prestera_nh_neigh *nh_neigh); struct prestera_fib_node *prestera_fib_node_find(struct prestera_switch *sw, struct prestera_fib_key *key); void prestera_fib_node_destroy(struct prestera_switch *sw, @@ -74,7 +147,8 @@ void prestera_fib_node_destroy(struct prestera_switch *sw, struct prestera_fib_node * prestera_fib_node_create(struct prestera_switch *sw, struct prestera_fib_key *key, - enum prestera_fib_type fib_type); + enum prestera_fib_type fib_type, + struct prestera_nexthop_group_key *nh_grp_key); int prestera_router_hw_init(struct prestera_switch *sw); void prestera_router_hw_fini(struct prestera_switch *sw); diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c index 887f430734f7..ae00e572390d 100644 --- a/drivers/net/ethernet/mediatek/mtk_ppe.c +++ b/drivers/net/ethernet/mediatek/mtk_ppe.c @@ -442,7 +442,7 @@ __mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, entry->hash); hwe->ib1 &= ~MTK_FOE_IB1_STATE; - hwe->ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_UNBIND); + hwe->ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_INVALID); dma_wmb(); } entry->hash = 0xffff; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 95a232fb2127..26a23047f1f3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -109,12 +109,8 @@ struct page_pool; #define MLX5_MPWRQ_MAX_PAGES_PER_WQE \ rounddown_pow_of_two(MLX5_UMR_MAX_MTT_SPACE / sizeof(struct mlx5_mtt)) -#define MLX5_ALIGN_MTTS(mtts) (ALIGN(mtts, 8)) -#define MLX5_ALIGNED_MTTS_OCTW(mtts) ((mtts) / 2) -#define MLX5_MTT_OCTW(mtts) (MLX5_ALIGNED_MTTS_OCTW(MLX5_ALIGN_MTTS(mtts))) -#define MLX5_KSM_OCTW(ksms) (ksms) #define MLX5E_MAX_RQ_NUM_MTTS \ - (ALIGN_DOWN(U16_MAX, 4) * 2) /* So that MLX5_MTT_OCTW(num_mtts) fits into u16 */ + (ALIGN_DOWN(U16_MAX, 4) * 2) /* Fits into u16 and aligned by WQEBB. */ #define MLX5E_MAX_RQ_NUM_KSMS (U16_MAX - 1) /* So that num_ksms fits into u16. */ #define MLX5E_ORDER2_MAX_PACKET_MTU (order_base_2(10 * 1024)) @@ -181,12 +177,6 @@ do { \ #define mlx5e_state_dereference(priv, p) \ rcu_dereference_protected((p), lockdep_is_held(&(priv)->state_lock)) -enum mlx5e_rq_group { - MLX5E_RQ_GROUP_REGULAR, - MLX5E_RQ_GROUP_XSK, -#define MLX5E_NUM_RQ_GROUPS(g) (1 + MLX5E_RQ_GROUP_##g) -}; - static inline u8 mlx5e_get_num_lag_ports(struct mlx5_core_dev *mdev) { if (mlx5_lag_is_lacp_owner(mdev)) @@ -660,6 +650,7 @@ struct mlx5e_rq_frags_info { u8 num_frags; u8 log_num_frags; u8 wqe_bulk; + u8 wqe_index_mask; }; struct mlx5e_dma_info { @@ -686,6 +677,13 @@ struct mlx5e_hw_gro_data { int second_ip_id; }; +enum mlx5e_mpwrq_umr_mode { + MLX5E_MPWRQ_UMR_MODE_ALIGNED, + MLX5E_MPWRQ_UMR_MODE_UNALIGNED, + MLX5E_MPWRQ_UMR_MODE_OVERSIZED, + MLX5E_MPWRQ_UMR_MODE_TRIPLE, +}; + struct mlx5e_rq { /* data path */ union { @@ -713,7 +711,7 @@ struct mlx5e_rq { u8 pages_per_wqe; u8 umr_wqebbs; u8 mtts_per_wqe; - u8 unaligned; + u8 umr_mode; struct mlx5e_shampo_hd *shampo; } mpwqe; }; @@ -1004,7 +1002,6 @@ struct mlx5e_profile { mlx5e_stats_grp_t *stats_grps; const struct mlx5e_rx_handlers *rx_handlers; int max_tc; - u8 rq_groups; u32 features; }; @@ -1014,7 +1011,7 @@ struct mlx5e_profile { void mlx5e_build_ptys2ethtool_map(void); bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev, u8 page_shift, - bool unaligned); + enum mlx5e_mpwrq_umr_mode umr_mode); void mlx5e_shampo_dealloc_hd(struct mlx5e_rq *rq, u16 len, u16 start, bool close); void mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats); @@ -1042,6 +1039,7 @@ struct mlx5e_rq_param; int mlx5e_open_rq(struct mlx5e_params *params, struct mlx5e_rq_param *param, struct mlx5e_xsk_param *xsk, int node, struct mlx5e_rq *rq); +#define MLX5E_RQ_WQES_TIMEOUT 20000 /* msecs */ int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time); void mlx5e_close_rq(struct mlx5e_rq *rq); int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param); @@ -1096,7 +1094,7 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv); void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv); int mlx5e_ptp_rx_manage_fs_ctx(struct mlx5e_priv *priv, void *ctx); -int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state); +int mlx5e_flush_rq(struct mlx5e_rq *rq, int curr_state); void mlx5e_activate_rq(struct mlx5e_rq *rq); void mlx5e_deactivate_rq(struct mlx5e_rq *rq); void mlx5e_activate_icosq(struct mlx5e_icosq *icosq); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/channels.c b/drivers/net/ethernet/mellanox/mlx5/core/en/channels.c index e7c14c0de0a7..48581ea3adcb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/channels.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/channels.c @@ -10,28 +10,33 @@ unsigned int mlx5e_channels_get_num(struct mlx5e_channels *chs) return chs->num; } -void mlx5e_channels_get_regular_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn) +static struct mlx5e_channel *mlx5e_channels_get(struct mlx5e_channels *chs, unsigned int ix) { - struct mlx5e_channel *c; + WARN_ON_ONCE(ix >= mlx5e_channels_get_num(chs)); + return chs->c[ix]; +} - WARN_ON(ix >= mlx5e_channels_get_num(chs)); - c = chs->c[ix]; +bool mlx5e_channels_is_xsk(struct mlx5e_channels *chs, unsigned int ix) +{ + struct mlx5e_channel *c = mlx5e_channels_get(chs, ix); - *rqn = c->rq.rqn; + return test_bit(MLX5E_CHANNEL_STATE_XSK, c->state); } -bool mlx5e_channels_get_xsk_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn) +void mlx5e_channels_get_regular_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn) { - struct mlx5e_channel *c; + struct mlx5e_channel *c = mlx5e_channels_get(chs, ix); - WARN_ON(ix >= mlx5e_channels_get_num(chs)); - c = chs->c[ix]; + *rqn = c->rq.rqn; +} - if (!test_bit(MLX5E_CHANNEL_STATE_XSK, c->state)) - return false; +void mlx5e_channels_get_xsk_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn) +{ + struct mlx5e_channel *c = mlx5e_channels_get(chs, ix); + + WARN_ON_ONCE(!test_bit(MLX5E_CHANNEL_STATE_XSK, c->state)); *rqn = c->xskrq.rqn; - return true; } bool mlx5e_channels_get_ptp_rqn(struct mlx5e_channels *chs, u32 *rqn) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/channels.h b/drivers/net/ethernet/mellanox/mlx5/core/en/channels.h index ca00cbc827cb..637ca90daaa8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/channels.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/channels.h @@ -9,8 +9,9 @@ struct mlx5e_channels; unsigned int mlx5e_channels_get_num(struct mlx5e_channels *chs); +bool mlx5e_channels_is_xsk(struct mlx5e_channels *chs, unsigned int ix); void mlx5e_channels_get_regular_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn); -bool mlx5e_channels_get_xsk_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn); +void mlx5e_channels_get_xsk_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn); bool mlx5e_channels_get_ptp_rqn(struct mlx5e_channels *chs, u32 *rqn); #endif /* __MLX5_EN_CHANNELS_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c index 68bc66cbd8a5..29dd3a04c154 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c @@ -27,9 +27,80 @@ u8 mlx5e_mpwrq_page_shift(struct mlx5_core_dev *mdev, struct mlx5e_xsk_param *xs return max(req_page_shift, min_page_shift); } -u8 mlx5e_mpwrq_log_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift, bool unaligned) +enum mlx5e_mpwrq_umr_mode +mlx5e_mpwrq_umr_mode(struct mlx5_core_dev *mdev, struct mlx5e_xsk_param *xsk) +{ + /* Different memory management schemes use different mechanisms to map + * user-mode memory. The stricter guarantees we have, the faster + * mechanisms we use: + * 1. MTT - direct mapping in page granularity. + * 2. KSM - indirect mapping to another MKey to arbitrary addresses, but + * all mappings have the same size. + * 3. KLM - indirect mapping to another MKey to arbitrary addresses, and + * mappings can have different sizes. + */ + u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk); + bool unaligned = xsk ? xsk->unaligned : false; + bool oversized = false; + + if (xsk) { + oversized = xsk->chunk_size < (1 << page_shift); + WARN_ON_ONCE(xsk->chunk_size > (1 << page_shift)); + } + + /* XSK frame size doesn't match the UMR page size, either because the + * frame size is not a power of two, or it's smaller than the minimal + * page size supported by the firmware. + * It's possible to receive packets bigger than MTU in certain setups. + * To avoid writing over the XSK frame boundary, the top region of each + * stride is mapped to a garbage page, resulting in two mappings of + * different sizes per frame. + */ + if (oversized) { + /* An optimization for frame sizes equal to 3 * power_of_two. + * 3 KSMs point to the frame, and one KSM points to the garbage + * page, which works faster than KLM. + */ + if (xsk->chunk_size % 3 == 0 && is_power_of_2(xsk->chunk_size / 3)) + return MLX5E_MPWRQ_UMR_MODE_TRIPLE; + + return MLX5E_MPWRQ_UMR_MODE_OVERSIZED; + } + + /* XSK frames can start at arbitrary unaligned locations, but they all + * have the same size which is a power of two. It allows to optimize to + * one KSM per frame. + */ + if (unaligned) + return MLX5E_MPWRQ_UMR_MODE_UNALIGNED; + + /* XSK: frames are naturally aligned, MTT can be used. + * Non-XSK: Allocations happen in units of CPU pages, therefore, the + * mappings are naturally aligned. + */ + return MLX5E_MPWRQ_UMR_MODE_ALIGNED; +} + +u8 mlx5e_mpwrq_umr_entry_size(enum mlx5e_mpwrq_umr_mode mode) { - u8 umr_entry_size = unaligned ? sizeof(struct mlx5_ksm) : sizeof(struct mlx5_mtt); + switch (mode) { + case MLX5E_MPWRQ_UMR_MODE_ALIGNED: + return sizeof(struct mlx5_mtt); + case MLX5E_MPWRQ_UMR_MODE_UNALIGNED: + return sizeof(struct mlx5_ksm); + case MLX5E_MPWRQ_UMR_MODE_OVERSIZED: + return sizeof(struct mlx5_klm) * 2; + case MLX5E_MPWRQ_UMR_MODE_TRIPLE: + return sizeof(struct mlx5_ksm) * 4; + } + WARN_ONCE(1, "MPWRQ UMR mode %d is not known\n", mode); + return 0; +} + +u8 mlx5e_mpwrq_log_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift, + enum mlx5e_mpwrq_umr_mode umr_mode) +{ + u8 umr_entry_size = mlx5e_mpwrq_umr_entry_size(umr_mode); u8 max_pages_per_wqe, max_log_mpwqe_size; u16 max_wqe_size; @@ -44,13 +115,21 @@ u8 mlx5e_mpwrq_log_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift, bool unalig return min_t(u8, max_log_mpwqe_size, MLX5_MPWRQ_MAX_LOG_WQE_SZ); } -u8 mlx5e_mpwrq_pages_per_wqe(struct mlx5_core_dev *mdev, u8 page_shift, bool unaligned) +u8 mlx5e_mpwrq_pages_per_wqe(struct mlx5_core_dev *mdev, u8 page_shift, + enum mlx5e_mpwrq_umr_mode umr_mode) { - u8 log_wqe_sz = mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, unaligned); + u8 log_wqe_sz = mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, umr_mode); u8 pages_per_wqe; pages_per_wqe = log_wqe_sz > page_shift ? (1 << (log_wqe_sz - page_shift)) : 1; + /* Two MTTs are needed to form an octword. The number of MTTs is encoded + * in octwords in a UMR WQE, so we need at least two to avoid mapping + * garbage addresses. + */ + if (WARN_ON_ONCE(pages_per_wqe < 2 && umr_mode == MLX5E_MPWRQ_UMR_MODE_ALIGNED)) + pages_per_wqe = 2; + /* Sanity check for further calculations to succeed. */ BUILD_BUG_ON(MLX5_MPWRQ_MAX_PAGES_PER_WQE > 64); if (WARN_ON_ONCE(pages_per_wqe > MLX5_MPWRQ_MAX_PAGES_PER_WQE)) @@ -59,10 +138,11 @@ u8 mlx5e_mpwrq_pages_per_wqe(struct mlx5_core_dev *mdev, u8 page_shift, bool una return pages_per_wqe; } -u16 mlx5e_mpwrq_umr_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift, bool unaligned) +u16 mlx5e_mpwrq_umr_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift, + enum mlx5e_mpwrq_umr_mode umr_mode) { - u8 umr_entry_size = unaligned ? sizeof(struct mlx5_ksm) : sizeof(struct mlx5_mtt); - u8 pages_per_wqe = mlx5e_mpwrq_pages_per_wqe(mdev, page_shift, unaligned); + u8 pages_per_wqe = mlx5e_mpwrq_pages_per_wqe(mdev, page_shift, umr_mode); + u8 umr_entry_size = mlx5e_mpwrq_umr_entry_size(umr_mode); u16 umr_wqe_sz; umr_wqe_sz = sizeof(struct mlx5e_umr_wqe) + @@ -73,44 +153,64 @@ u16 mlx5e_mpwrq_umr_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift, bool unali return umr_wqe_sz; } -u8 mlx5e_mpwrq_umr_wqebbs(struct mlx5_core_dev *mdev, u8 page_shift, bool unaligned) +u8 mlx5e_mpwrq_umr_wqebbs(struct mlx5_core_dev *mdev, u8 page_shift, + enum mlx5e_mpwrq_umr_mode umr_mode) { - return DIV_ROUND_UP(mlx5e_mpwrq_umr_wqe_sz(mdev, page_shift, unaligned), + return DIV_ROUND_UP(mlx5e_mpwrq_umr_wqe_sz(mdev, page_shift, umr_mode), MLX5_SEND_WQE_BB); } -u8 mlx5e_mpwrq_mtts_per_wqe(struct mlx5_core_dev *mdev, u8 page_shift, bool unaligned) +u8 mlx5e_mpwrq_mtts_per_wqe(struct mlx5_core_dev *mdev, u8 page_shift, + enum mlx5e_mpwrq_umr_mode umr_mode) { + u8 pages_per_wqe = mlx5e_mpwrq_pages_per_wqe(mdev, page_shift, umr_mode); + /* Add another page as a buffer between WQEs. This page will absorb * write overflow by the hardware, when receiving packets larger than * MTU. These oversize packets are dropped by the driver at a later * stage. */ - return MLX5_ALIGN_MTTS(mlx5e_mpwrq_pages_per_wqe(mdev, page_shift, unaligned) + 1); -} - -u32 mlx5e_mpwrq_max_num_entries(struct mlx5_core_dev *mdev, bool unaligned) -{ - if (unaligned) - return min(MLX5E_MAX_RQ_NUM_KSMS, - 1 << MLX5_CAP_GEN(mdev, log_max_klm_list_size)); - - return MLX5E_MAX_RQ_NUM_MTTS; + return ALIGN(pages_per_wqe + 1, + MLX5_SEND_WQE_BB / mlx5e_mpwrq_umr_entry_size(umr_mode)); +} + +u32 mlx5e_mpwrq_max_num_entries(struct mlx5_core_dev *mdev, + enum mlx5e_mpwrq_umr_mode umr_mode) +{ + /* Same limits apply to KSMs and KLMs. */ + u32 klm_limit = min(MLX5E_MAX_RQ_NUM_KSMS, + 1 << MLX5_CAP_GEN(mdev, log_max_klm_list_size)); + + switch (umr_mode) { + case MLX5E_MPWRQ_UMR_MODE_ALIGNED: + return MLX5E_MAX_RQ_NUM_MTTS; + case MLX5E_MPWRQ_UMR_MODE_UNALIGNED: + return klm_limit; + case MLX5E_MPWRQ_UMR_MODE_OVERSIZED: + /* Each entry is two KLMs. */ + return klm_limit / 2; + case MLX5E_MPWRQ_UMR_MODE_TRIPLE: + /* Each entry is four KSMs. */ + return klm_limit / 4; + } + WARN_ONCE(1, "MPWRQ UMR mode %d is not known\n", umr_mode); + return 0; } static u8 mlx5e_mpwrq_max_log_rq_size(struct mlx5_core_dev *mdev, u8 page_shift, - bool unaligned) + enum mlx5e_mpwrq_umr_mode umr_mode) { - u8 mtts_per_wqe = mlx5e_mpwrq_mtts_per_wqe(mdev, page_shift, unaligned); - u32 max_entries = mlx5e_mpwrq_max_num_entries(mdev, unaligned); + u8 mtts_per_wqe = mlx5e_mpwrq_mtts_per_wqe(mdev, page_shift, umr_mode); + u32 max_entries = mlx5e_mpwrq_max_num_entries(mdev, umr_mode); return ilog2(max_entries / mtts_per_wqe); } -u8 mlx5e_mpwrq_max_log_rq_pkts(struct mlx5_core_dev *mdev, u8 page_shift, bool unaligned) +u8 mlx5e_mpwrq_max_log_rq_pkts(struct mlx5_core_dev *mdev, u8 page_shift, + enum mlx5e_mpwrq_umr_mode umr_mode) { - return mlx5e_mpwrq_max_log_rq_size(mdev, page_shift, unaligned) + - mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, unaligned) - + return mlx5e_mpwrq_max_log_rq_size(mdev, page_shift, umr_mode) + + mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, umr_mode) - MLX5E_ORDER2_MAX_PACKET_MTU; } @@ -171,10 +271,10 @@ static u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5_core_dev *mdev, struct mlx5e_xsk_param *xsk) { u32 linear_stride_sz = mlx5e_rx_get_linear_stride_sz(mdev, params, xsk, true); + enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk); u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk); - bool unaligned = xsk ? xsk->unaligned : false; - return mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, unaligned) - + return mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, umr_mode) - order_base_2(linear_stride_sz); } @@ -200,10 +300,11 @@ bool mlx5e_rx_is_linear_skb(struct mlx5_core_dev *mdev, static bool mlx5e_verify_rx_mpwqe_strides(struct mlx5_core_dev *mdev, u8 log_stride_sz, u8 log_num_strides, - u8 page_shift, bool unaligned) + u8 page_shift, + enum mlx5e_mpwrq_umr_mode umr_mode) { if (log_stride_sz + log_num_strides != - mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, unaligned)) + mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, umr_mode)) return false; if (log_stride_sz < MLX5_MPWQE_LOG_STRIDE_SZ_BASE || @@ -223,8 +324,8 @@ bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev, struct mlx5e_params *params, struct mlx5e_xsk_param *xsk) { + enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk); u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk); - bool unaligned = xsk ? xsk->unaligned : false; u8 log_num_strides; u8 log_stride_sz; u8 log_wqe_sz; @@ -233,7 +334,7 @@ bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev, return false; log_stride_sz = order_base_2(mlx5e_rx_get_linear_stride_sz(mdev, params, xsk, true)); - log_wqe_sz = mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, unaligned); + log_wqe_sz = mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, umr_mode); if (log_wqe_sz < log_stride_sz) return false; @@ -242,19 +343,19 @@ bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev, return mlx5e_verify_rx_mpwqe_strides(mdev, log_stride_sz, log_num_strides, page_shift, - unaligned); + umr_mode); } u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5_core_dev *mdev, struct mlx5e_params *params, struct mlx5e_xsk_param *xsk) { + enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk); u8 log_pkts_per_wqe, page_shift, max_log_rq_size; - bool unaligned = xsk ? xsk->unaligned : false; log_pkts_per_wqe = mlx5e_mpwqe_log_pkts_per_wqe(mdev, params, xsk); page_shift = mlx5e_mpwrq_page_shift(mdev, xsk); - max_log_rq_size = mlx5e_mpwrq_max_log_rq_size(mdev, page_shift, unaligned); + max_log_rq_size = mlx5e_mpwrq_max_log_rq_size(mdev, page_shift, umr_mode); /* Numbers are unsigned, don't subtract to avoid underflow. */ if (params->log_rq_mtu_frames < @@ -308,10 +409,10 @@ u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev, struct mlx5e_params *params, struct mlx5e_xsk_param *xsk) { + enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk); u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk); - bool unaligned = xsk ? xsk->unaligned : false; - return mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, unaligned) - + return mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, umr_mode) - mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk); } @@ -460,9 +561,10 @@ bool slow_pci_heuristic(struct mlx5_core_dev *mdev) int mlx5e_mpwrq_validate_regular(struct mlx5_core_dev *mdev, struct mlx5e_params *params) { + enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, NULL); u8 page_shift = mlx5e_mpwrq_page_shift(mdev, NULL); - if (!mlx5e_check_fragmented_striding_rq_cap(mdev, page_shift, false)) + if (!mlx5e_check_fragmented_striding_rq_cap(mdev, page_shift, umr_mode)) return -EOPNOTSUPP; if (params->xdp_prog && !mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL)) @@ -474,11 +576,12 @@ int mlx5e_mpwrq_validate_regular(struct mlx5_core_dev *mdev, struct mlx5e_params int mlx5e_mpwrq_validate_xsk(struct mlx5_core_dev *mdev, struct mlx5e_params *params, struct mlx5e_xsk_param *xsk) { + enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk); u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk); bool unaligned = xsk ? xsk->unaligned : false; u16 max_mtu_pkts; - if (!mlx5e_check_fragmented_striding_rq_cap(mdev, page_shift, xsk->unaligned)) + if (!mlx5e_check_fragmented_striding_rq_cap(mdev, page_shift, umr_mode)) return -EOPNOTSUPP; if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk)) @@ -586,7 +689,14 @@ static int mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev, info->arr[0].frag_size = byte_count; info->arr[0].frag_stride = frag_stride; info->num_frags = 1; - info->wqe_bulk = PAGE_SIZE / frag_stride; + + /* N WQEs share the same page, N = PAGE_SIZE / frag_stride. The + * first WQE in the page is responsible for allocation of this + * page, this WQE's index is k*N. If WQEs [k*N+1; k*N+N-1] are + * still not completed, the allocation must stop before k*N. + */ + info->wqe_index_mask = (PAGE_SIZE / frag_stride) - 1; + goto out; } @@ -635,11 +745,40 @@ static int mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev, i++; } info->num_frags = i; - /* number of different wqes sharing a page */ - info->wqe_bulk = 1 + (info->num_frags % 2); + + /* The last fragment of WQE with index 2*N may share the page with the + * first fragment of WQE with index 2*N+1 in certain cases. If WQE 2*N+1 + * is not completed yet, WQE 2*N must not be allocated, as it's + * responsible for allocating a new page. + */ + if (frag_size_max == PAGE_SIZE) { + /* No WQE can start in the middle of a page. */ + info->wqe_index_mask = 0; + } else { + /* PAGE_SIZEs starting from 8192 don't use 2K-sized fragments, + * because there would be more than MLX5E_MAX_RX_FRAGS of them. + */ + WARN_ON(PAGE_SIZE != 2 * DEFAULT_FRAG_SIZE); + + /* Odd number of fragments allows to pack the last fragment of + * the previous WQE and the first fragment of the next WQE into + * the same page. + * As long as DEFAULT_FRAG_SIZE is 2048, and MLX5E_MAX_RX_FRAGS + * is 4, the last fragment can be bigger than the rest only if + * it's the fourth one, so WQEs consisting of 3 fragments will + * always share a page. + * When a page is shared, WQE bulk size is 2, otherwise just 1. + */ + info->wqe_index_mask = info->num_frags % 2; + } out: - info->wqe_bulk = max_t(u8, info->wqe_bulk, 8); + /* Bulking optimization to skip allocation until at least 8 WQEs can be + * allocated in a row. At the same time, never start allocation when + * the page is still used by older WQEs. + */ + info->wqe_bulk = max_t(u8, info->wqe_index_mask + 1, 8); + info->log_num_frags = order_base_2(info->num_frags); return 0; @@ -745,16 +884,16 @@ int mlx5e_build_rq_param(struct mlx5_core_dev *mdev, case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: { u8 log_wqe_num_of_strides = mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk); u8 log_wqe_stride_size = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk); + enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk); u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk); - bool unaligned = xsk ? xsk->unaligned : false; if (!mlx5e_verify_rx_mpwqe_strides(mdev, log_wqe_stride_size, log_wqe_num_of_strides, - page_shift, unaligned)) { + page_shift, umr_mode)) { mlx5_core_err(mdev, - "Bad RX MPWQE params: log_stride_size %u, log_num_strides %u, unaligned %d\n", + "Bad RX MPWQE params: log_stride_size %u, log_num_strides %u, umr_mode %d\n", log_wqe_stride_size, log_wqe_num_of_strides, - unaligned); + umr_mode); return -EINVAL; } @@ -938,11 +1077,11 @@ static u32 mlx5e_mpwrq_total_umr_wqebbs(struct mlx5_core_dev *mdev, struct mlx5e_params *params, struct mlx5e_xsk_param *xsk) { + enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk); u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk); - bool unaligned = xsk ? xsk->unaligned : false; u8 umr_wqebbs; - umr_wqebbs = mlx5e_mpwrq_umr_wqebbs(mdev, page_shift, unaligned); + umr_wqebbs = mlx5e_mpwrq_umr_wqebbs(mdev, page_shift, umr_mode); return umr_wqebbs * (1 << mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk)); } @@ -990,6 +1129,16 @@ static u8 mlx5e_build_icosq_log_wq_sz(struct mlx5_core_dev *mdev, xsk.unaligned = true; max_xsk_wqebbs = max(max_xsk_wqebbs, mlx5e_mpwrq_total_umr_wqebbs(mdev, params, &xsk)); + + /* XSK unaligned mode, frame size is not equal to stride size. */ + xsk.chunk_size -= 1; + max_xsk_wqebbs = max(max_xsk_wqebbs, + mlx5e_mpwrq_total_umr_wqebbs(mdev, params, &xsk)); + + /* XSK unaligned mode, frame size is a triple power of two. */ + xsk.chunk_size = (1 << frame_shift) / 4 * 3; + max_xsk_wqebbs = max(max_xsk_wqebbs, + mlx5e_mpwrq_total_umr_wqebbs(mdev, params, &xsk)); } wqebbs += max_xsk_wqebbs; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h index cb862c478376..034debd140bc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h @@ -53,48 +53,26 @@ struct mlx5e_create_sq_param { u8 min_inline_mode; }; -static inline bool mlx5e_qid_get_ch_if_in_group(struct mlx5e_params *params, - u16 qid, - enum mlx5e_rq_group group, - u16 *ix) -{ - int nch = params->num_channels; - int ch = qid - nch * group; - - if (ch < 0 || ch >= nch) - return false; - - *ix = ch; - return true; -} - -static inline void mlx5e_qid_get_ch_and_group(struct mlx5e_params *params, - u16 qid, - u16 *ix, - enum mlx5e_rq_group *group) -{ - u16 nch = params->num_channels; - - *ix = qid % nch; - *group = qid / nch; -} - -static inline bool mlx5e_qid_validate(const struct mlx5e_profile *profile, - struct mlx5e_params *params, u64 qid) -{ - return qid < params->num_channels * profile->rq_groups; -} - /* Striding RQ dynamic parameters */ u8 mlx5e_mpwrq_page_shift(struct mlx5_core_dev *mdev, struct mlx5e_xsk_param *xsk); -u8 mlx5e_mpwrq_log_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift, bool unaligned); -u8 mlx5e_mpwrq_pages_per_wqe(struct mlx5_core_dev *mdev, u8 page_shift, bool unaligned); -u16 mlx5e_mpwrq_umr_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift, bool unaligned); -u8 mlx5e_mpwrq_umr_wqebbs(struct mlx5_core_dev *mdev, u8 page_shift, bool unaligned); -u8 mlx5e_mpwrq_mtts_per_wqe(struct mlx5_core_dev *mdev, u8 page_shift, bool unaligned); -u32 mlx5e_mpwrq_max_num_entries(struct mlx5_core_dev *mdev, bool unaligned); -u8 mlx5e_mpwrq_max_log_rq_pkts(struct mlx5_core_dev *mdev, u8 page_shift, bool unaligned); +enum mlx5e_mpwrq_umr_mode +mlx5e_mpwrq_umr_mode(struct mlx5_core_dev *mdev, struct mlx5e_xsk_param *xsk); +u8 mlx5e_mpwrq_umr_entry_size(enum mlx5e_mpwrq_umr_mode mode); +u8 mlx5e_mpwrq_log_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift, + enum mlx5e_mpwrq_umr_mode umr_mode); +u8 mlx5e_mpwrq_pages_per_wqe(struct mlx5_core_dev *mdev, u8 page_shift, + enum mlx5e_mpwrq_umr_mode umr_mode); +u16 mlx5e_mpwrq_umr_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift, + enum mlx5e_mpwrq_umr_mode umr_mode); +u8 mlx5e_mpwrq_umr_wqebbs(struct mlx5_core_dev *mdev, u8 page_shift, + enum mlx5e_mpwrq_umr_mode umr_mode); +u8 mlx5e_mpwrq_mtts_per_wqe(struct mlx5_core_dev *mdev, u8 page_shift, + enum mlx5e_mpwrq_umr_mode umr_mode); +u32 mlx5e_mpwrq_max_num_entries(struct mlx5_core_dev *mdev, + enum mlx5e_mpwrq_umr_mode umr_mode); +u8 mlx5e_mpwrq_max_log_rq_pkts(struct mlx5_core_dev *mdev, u8 page_shift, + enum mlx5e_mpwrq_umr_mode umr_mode); /* Parameter calculations */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c index 2b946ae1d97f..5f6f95ad6888 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c @@ -134,34 +134,13 @@ out: return err; } -static int mlx5e_rq_to_ready(struct mlx5e_rq *rq, int curr_state) -{ - struct net_device *dev = rq->netdev; - int err; - - err = mlx5e_modify_rq_state(rq, curr_state, MLX5_RQC_STATE_RST); - if (err) { - netdev_err(dev, "Failed to move rq 0x%x to reset\n", rq->rqn); - return err; - } - err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY); - if (err) { - netdev_err(dev, "Failed to move rq 0x%x to ready\n", rq->rqn); - return err; - } - - return 0; -} - static int mlx5e_rx_reporter_err_rq_cqe_recover(void *ctx) { struct mlx5e_rq *rq = ctx; int err; mlx5e_deactivate_rq(rq); - mlx5e_free_rx_descs(rq); - - err = mlx5e_rq_to_ready(rq, MLX5_RQC_STATE_ERR); + err = mlx5e_flush_rq(rq, MLX5_RQC_STATE_ERR); clear_bit(MLX5E_RQ_STATE_RECOVERING, &rq->state); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c index 3436ecfcbc2f..e1095bc36543 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c @@ -24,8 +24,6 @@ struct mlx5e_rx_res { struct { struct mlx5e_rqt direct_rqt; struct mlx5e_tir direct_tir; - struct mlx5e_rqt xsk_rqt; - struct mlx5e_tir xsk_tir; } *channels; struct { @@ -320,48 +318,8 @@ static int mlx5e_rx_res_channels_init(struct mlx5e_rx_res *res) mlx5e_tir_builder_clear(builder); } - if (!(res->features & MLX5E_RX_RES_FEATURE_XSK)) - goto out; - - for (ix = 0; ix < res->max_nch; ix++) { - err = mlx5e_rqt_init_direct(&res->channels[ix].xsk_rqt, - res->mdev, false, res->drop_rqn); - if (err) { - mlx5_core_warn(res->mdev, "Failed to create an XSK RQT: err = %d, ix = %u\n", - err, ix); - goto err_destroy_xsk_rqts; - } - } - - for (ix = 0; ix < res->max_nch; ix++) { - mlx5e_tir_builder_build_rqt(builder, res->mdev->mlx5e_res.hw_objs.td.tdn, - mlx5e_rqt_get_rqtn(&res->channels[ix].xsk_rqt), - inner_ft_support); - mlx5e_tir_builder_build_packet_merge(builder, &res->pkt_merge_param); - mlx5e_tir_builder_build_direct(builder); - - err = mlx5e_tir_init(&res->channels[ix].xsk_tir, builder, res->mdev, true); - if (err) { - mlx5_core_warn(res->mdev, "Failed to create an XSK TIR: err = %d, ix = %u\n", - err, ix); - goto err_destroy_xsk_tirs; - } - - mlx5e_tir_builder_clear(builder); - } - goto out; -err_destroy_xsk_tirs: - while (--ix >= 0) - mlx5e_tir_destroy(&res->channels[ix].xsk_tir); - - ix = res->max_nch; -err_destroy_xsk_rqts: - while (--ix >= 0) - mlx5e_rqt_destroy(&res->channels[ix].xsk_rqt); - - ix = res->max_nch; err_destroy_direct_tirs: while (--ix >= 0) mlx5e_tir_destroy(&res->channels[ix].direct_tir); @@ -420,12 +378,6 @@ static void mlx5e_rx_res_channels_destroy(struct mlx5e_rx_res *res) for (ix = 0; ix < res->max_nch; ix++) { mlx5e_tir_destroy(&res->channels[ix].direct_tir); mlx5e_rqt_destroy(&res->channels[ix].direct_rqt); - - if (!(res->features & MLX5E_RX_RES_FEATURE_XSK)) - continue; - - mlx5e_tir_destroy(&res->channels[ix].xsk_tir); - mlx5e_rqt_destroy(&res->channels[ix].xsk_rqt); } kvfree(res->channels); @@ -491,13 +443,6 @@ u32 mlx5e_rx_res_get_tirn_direct(struct mlx5e_rx_res *res, unsigned int ix) return mlx5e_tir_get_tirn(&res->channels[ix].direct_tir); } -u32 mlx5e_rx_res_get_tirn_xsk(struct mlx5e_rx_res *res, unsigned int ix) -{ - WARN_ON(!(res->features & MLX5E_RX_RES_FEATURE_XSK)); - - return mlx5e_tir_get_tirn(&res->channels[ix].xsk_tir); -} - u32 mlx5e_rx_res_get_tirn_rss(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt) { struct mlx5e_rss *rss = res->rss[0]; @@ -527,26 +472,14 @@ static void mlx5e_rx_res_channel_activate_direct(struct mlx5e_rx_res *res, struct mlx5e_channels *chs, unsigned int ix) { - u32 rqn; + u32 rqn = res->rss_rqns[ix]; int err; - mlx5e_channels_get_regular_rqn(chs, ix, &rqn); err = mlx5e_rqt_redirect_direct(&res->channels[ix].direct_rqt, rqn); if (err) mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to RQ %#x (channel %u): err = %d\n", mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt), rqn, ix, err); - - if (!(res->features & MLX5E_RX_RES_FEATURE_XSK)) - return; - - if (!mlx5e_channels_get_xsk_rqn(chs, ix, &rqn)) - rqn = res->drop_rqn; - err = mlx5e_rqt_redirect_direct(&res->channels[ix].xsk_rqt, rqn); - if (err) - mlx5_core_warn(res->mdev, "Failed to redirect XSK RQT %#x to RQ %#x (channel %u): err = %d\n", - mlx5e_rqt_get_rqtn(&res->channels[ix].xsk_rqt), - rqn, ix, err); } static void mlx5e_rx_res_channel_deactivate_direct(struct mlx5e_rx_res *res, @@ -559,15 +492,6 @@ static void mlx5e_rx_res_channel_deactivate_direct(struct mlx5e_rx_res *res, mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to drop RQ %#x (channel %u): err = %d\n", mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt), res->drop_rqn, ix, err); - - if (!(res->features & MLX5E_RX_RES_FEATURE_XSK)) - return; - - err = mlx5e_rqt_redirect_direct(&res->channels[ix].xsk_rqt, res->drop_rqn); - if (err) - mlx5_core_warn(res->mdev, "Failed to redirect XSK RQT %#x to drop RQ %#x (channel %u): err = %d\n", - mlx5e_rqt_get_rqtn(&res->channels[ix].xsk_rqt), - res->drop_rqn, ix, err); } void mlx5e_rx_res_channels_activate(struct mlx5e_rx_res *res, struct mlx5e_channels *chs) @@ -577,8 +501,12 @@ void mlx5e_rx_res_channels_activate(struct mlx5e_rx_res *res, struct mlx5e_chann nch = mlx5e_channels_get_num(chs); - for (ix = 0; ix < chs->num; ix++) - mlx5e_channels_get_regular_rqn(chs, ix, &res->rss_rqns[ix]); + for (ix = 0; ix < chs->num; ix++) { + if (mlx5e_channels_is_xsk(chs, ix)) + mlx5e_channels_get_xsk_rqn(chs, ix, &res->rss_rqns[ix]); + else + mlx5e_channels_get_regular_rqn(chs, ix, &res->rss_rqns[ix]); + } res->rss_nch = chs->num; mlx5e_rx_res_rss_enable(res); @@ -621,33 +549,17 @@ void mlx5e_rx_res_channels_deactivate(struct mlx5e_rx_res *res) } } -int mlx5e_rx_res_xsk_activate(struct mlx5e_rx_res *res, struct mlx5e_channels *chs, - unsigned int ix) +void mlx5e_rx_res_xsk_update(struct mlx5e_rx_res *res, struct mlx5e_channels *chs, + unsigned int ix, bool xsk) { - u32 rqn; - int err; - - if (!mlx5e_channels_get_xsk_rqn(chs, ix, &rqn)) - return -EINVAL; - - err = mlx5e_rqt_redirect_direct(&res->channels[ix].xsk_rqt, rqn); - if (err) - mlx5_core_warn(res->mdev, "Failed to redirect XSK RQT %#x to XSK RQ %#x (channel %u): err = %d\n", - mlx5e_rqt_get_rqtn(&res->channels[ix].xsk_rqt), - rqn, ix, err); - return err; -} + if (xsk) + mlx5e_channels_get_xsk_rqn(chs, ix, &res->rss_rqns[ix]); + else + mlx5e_channels_get_regular_rqn(chs, ix, &res->rss_rqns[ix]); -int mlx5e_rx_res_xsk_deactivate(struct mlx5e_rx_res *res, unsigned int ix) -{ - int err; + mlx5e_rx_res_rss_enable(res); - err = mlx5e_rqt_redirect_direct(&res->channels[ix].xsk_rqt, res->drop_rqn); - if (err) - mlx5_core_warn(res->mdev, "Failed to redirect XSK RQT %#x to drop RQ %#x (channel %u): err = %d\n", - mlx5e_rqt_get_rqtn(&res->channels[ix].xsk_rqt), - res->drop_rqn, ix, err); - return err; + mlx5e_rx_res_channel_activate_direct(res, chs, ix); } int mlx5e_rx_res_packet_merge_set_param(struct mlx5e_rx_res *res, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h index b39b20a720e0..5d5f64fab60f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h @@ -17,8 +17,7 @@ struct mlx5e_rss_params_hash; enum mlx5e_rx_res_features { MLX5E_RX_RES_FEATURE_INNER_FT = BIT(0), - MLX5E_RX_RES_FEATURE_XSK = BIT(1), - MLX5E_RX_RES_FEATURE_PTP = BIT(2), + MLX5E_RX_RES_FEATURE_PTP = BIT(1), }; /* Setup */ @@ -32,7 +31,6 @@ void mlx5e_rx_res_free(struct mlx5e_rx_res *res); /* TIRN getters for flow steering */ u32 mlx5e_rx_res_get_tirn_direct(struct mlx5e_rx_res *res, unsigned int ix); -u32 mlx5e_rx_res_get_tirn_xsk(struct mlx5e_rx_res *res, unsigned int ix); u32 mlx5e_rx_res_get_tirn_rss(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt); u32 mlx5e_rx_res_get_tirn_rss_inner(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt); u32 mlx5e_rx_res_get_tirn_ptp(struct mlx5e_rx_res *res); @@ -40,9 +38,8 @@ u32 mlx5e_rx_res_get_tirn_ptp(struct mlx5e_rx_res *res); /* Activate/deactivate API */ void mlx5e_rx_res_channels_activate(struct mlx5e_rx_res *res, struct mlx5e_channels *chs); void mlx5e_rx_res_channels_deactivate(struct mlx5e_rx_res *res); -int mlx5e_rx_res_xsk_activate(struct mlx5e_rx_res *res, struct mlx5e_channels *chs, - unsigned int ix); -int mlx5e_rx_res_xsk_deactivate(struct mlx5e_rx_res *res, unsigned int ix); +void mlx5e_rx_res_xsk_update(struct mlx5e_rx_res *res, struct mlx5e_channels *chs, + unsigned int ix, bool xsk); /* Configuration API */ void mlx5e_rx_res_rss_set_indir_uniform(struct mlx5e_rx_res *res, unsigned int nch); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h index f4f306bb8e6d..4456ad5cedf1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h @@ -452,4 +452,11 @@ static inline bool mlx5e_icosq_can_post_wqe(struct mlx5e_icosq *sq, u16 wqe_size return mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, room); } + +static inline struct mlx5e_mpw_info *mlx5e_get_mpw_info(struct mlx5e_rq *rq, int i) +{ + size_t isz = struct_size(rq->mpwqe.info, alloc_units, rq->mpwqe.pages_per_wqe); + + return (struct mlx5e_mpw_info *)((char *)rq->mpwqe.info + array_size(i, isz)); +} #endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c index 6058b1e72c6c..ebada0c5af3c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c @@ -99,6 +99,15 @@ static int mlx5e_xsk_enable_locked(struct mlx5e_priv *priv, mlx5e_build_xsk_param(pool, &xsk); + if (priv->channels.params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ && + mlx5e_mpwrq_umr_mode(priv->mdev, &xsk) == MLX5E_MPWRQ_UMR_MODE_OVERSIZED) { + const char *recommendation = is_power_of_2(xsk.chunk_size) ? + "Upgrade firmware" : "Disable striding RQ"; + + mlx5_core_warn(priv->mdev, "Expected slowdown with XSK frame size %u. %s for better performance.\n", + xsk.chunk_size, recommendation); + } + if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { /* XSK objects will be created on open. */ goto validate_closed; @@ -124,15 +133,12 @@ static int mlx5e_xsk_enable_locked(struct mlx5e_priv *priv, * any Fill Ring entries at the setup stage. */ - err = mlx5e_rx_res_xsk_activate(priv->rx_res, &priv->channels, ix); - if (unlikely(err)) - goto err_deactivate; + mlx5e_rx_res_xsk_update(priv->rx_res, &priv->channels, ix, true); - return 0; + mlx5e_deactivate_rq(&c->rq); + mlx5e_flush_rq(&c->rq, MLX5_RQC_STATE_RDY); -err_deactivate: - mlx5e_deactivate_xsk(c); - mlx5e_close_xsk(c); + return 0; err_remove_pool: mlx5e_xsk_remove_pool(&priv->xsk, ix); @@ -171,7 +177,13 @@ static int mlx5e_xsk_disable_locked(struct mlx5e_priv *priv, u16 ix) goto remove_pool; c = priv->channels.c[ix]; - mlx5e_rx_res_xsk_deactivate(priv->rx_res, ix); + + mlx5e_activate_rq(&c->rq); + mlx5e_trigger_napi_icosq(c); + mlx5e_wait_for_min_rx_wqes(&c->rq, MLX5E_RQ_WQES_TIMEOUT); + + mlx5e_rx_res_xsk_update(priv->rx_res, &priv->channels, ix, false); + mlx5e_deactivate_xsk(c); mlx5e_close_xsk(c); @@ -209,11 +221,10 @@ int mlx5e_xsk_setup_pool(struct net_device *dev, struct xsk_buff_pool *pool, u16 { struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5e_params *params = &priv->channels.params; - u16 ix; - if (unlikely(!mlx5e_qid_get_ch_if_in_group(params, qid, MLX5E_RQ_GROUP_XSK, &ix))) + if (unlikely(qid >= params->num_channels)) return -EINVAL; - return pool ? mlx5e_xsk_enable_pool(priv, pool, ix) : - mlx5e_xsk_disable_pool(priv, ix); + return pool ? mlx5e_xsk_enable_pool(priv, pool, qid) : + mlx5e_xsk_disable_pool(priv, qid); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c index 4441d35943d1..c91b54d9ff27 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c @@ -8,18 +8,221 @@ /* RX data path */ -static struct sk_buff *mlx5e_xsk_construct_skb(struct mlx5e_rq *rq, void *data, - u32 cqe_bcnt) +int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) { + struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, ix); + struct mlx5e_icosq *icosq = rq->icosq; + struct mlx5_wq_cyc *wq = &icosq->wq; + struct mlx5e_umr_wqe *umr_wqe; + int batch, i; + u32 offset; /* 17-bit value with MTT. */ + u16 pi; + + if (unlikely(!xsk_buff_can_alloc(rq->xsk_pool, rq->mpwqe.pages_per_wqe))) + goto err; + + BUILD_BUG_ON(sizeof(wi->alloc_units[0]) != sizeof(wi->alloc_units[0].xsk)); + batch = xsk_buff_alloc_batch(rq->xsk_pool, (struct xdp_buff **)wi->alloc_units, + rq->mpwqe.pages_per_wqe); + + /* If batch < pages_per_wqe, either: + * 1. Some (or all) descriptors were invalid. + * 2. dma_need_sync is true, and it fell back to allocating one frame. + * In either case, try to continue allocating frames one by one, until + * the first error, which will mean there are no more valid descriptors. + */ + for (; batch < rq->mpwqe.pages_per_wqe; batch++) { + wi->alloc_units[batch].xsk = xsk_buff_alloc(rq->xsk_pool); + if (unlikely(!wi->alloc_units[batch].xsk)) + goto err_reuse_batch; + } + + pi = mlx5e_icosq_get_next_pi(icosq, rq->mpwqe.umr_wqebbs); + umr_wqe = mlx5_wq_cyc_get_wqe(wq, pi); + memcpy(umr_wqe, &rq->mpwqe.umr_wqe, sizeof(struct mlx5e_umr_wqe)); + + if (likely(rq->mpwqe.umr_mode == MLX5E_MPWRQ_UMR_MODE_ALIGNED)) { + for (i = 0; i < batch; i++) { + dma_addr_t addr = xsk_buff_xdp_get_frame_dma(wi->alloc_units[i].xsk); + + umr_wqe->inline_mtts[i] = (struct mlx5_mtt) { + .ptag = cpu_to_be64(addr | MLX5_EN_WR), + }; + } + } else if (unlikely(rq->mpwqe.umr_mode == MLX5E_MPWRQ_UMR_MODE_UNALIGNED)) { + for (i = 0; i < batch; i++) { + dma_addr_t addr = xsk_buff_xdp_get_frame_dma(wi->alloc_units[i].xsk); + + umr_wqe->inline_ksms[i] = (struct mlx5_ksm) { + .key = rq->mkey_be, + .va = cpu_to_be64(addr), + }; + } + } else if (likely(rq->mpwqe.umr_mode == MLX5E_MPWRQ_UMR_MODE_TRIPLE)) { + u32 mapping_size = 1 << (rq->mpwqe.page_shift - 2); + + for (i = 0; i < batch; i++) { + dma_addr_t addr = xsk_buff_xdp_get_frame_dma(wi->alloc_units[i].xsk); + + umr_wqe->inline_ksms[i << 2] = (struct mlx5_ksm) { + .key = rq->mkey_be, + .va = cpu_to_be64(addr), + }; + umr_wqe->inline_ksms[(i << 2) + 1] = (struct mlx5_ksm) { + .key = rq->mkey_be, + .va = cpu_to_be64(addr + mapping_size), + }; + umr_wqe->inline_ksms[(i << 2) + 2] = (struct mlx5_ksm) { + .key = rq->mkey_be, + .va = cpu_to_be64(addr + mapping_size * 2), + }; + umr_wqe->inline_ksms[(i << 2) + 3] = (struct mlx5_ksm) { + .key = rq->mkey_be, + .va = cpu_to_be64(rq->wqe_overflow.addr), + }; + } + } else { + __be32 pad_size = cpu_to_be32((1 << rq->mpwqe.page_shift) - + rq->xsk_pool->chunk_size); + __be32 frame_size = cpu_to_be32(rq->xsk_pool->chunk_size); + + for (i = 0; i < batch; i++) { + dma_addr_t addr = xsk_buff_xdp_get_frame_dma(wi->alloc_units[i].xsk); + + umr_wqe->inline_klms[i << 1] = (struct mlx5_klm) { + .key = rq->mkey_be, + .va = cpu_to_be64(addr), + .bcount = frame_size, + }; + umr_wqe->inline_klms[(i << 1) + 1] = (struct mlx5_klm) { + .key = rq->mkey_be, + .va = cpu_to_be64(rq->wqe_overflow.addr), + .bcount = pad_size, + }; + } + } + + bitmap_zero(wi->xdp_xmit_bitmap, rq->mpwqe.pages_per_wqe); + wi->consumed_strides = 0; + + umr_wqe->ctrl.opmod_idx_opcode = + cpu_to_be32((icosq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) | MLX5_OPCODE_UMR); + + /* Optimized for speed: keep in sync with mlx5e_mpwrq_umr_entry_size. */ + offset = ix * rq->mpwqe.mtts_per_wqe; + if (likely(rq->mpwqe.umr_mode == MLX5E_MPWRQ_UMR_MODE_ALIGNED)) + offset = offset * sizeof(struct mlx5_mtt) / MLX5_OCTWORD; + else if (unlikely(rq->mpwqe.umr_mode == MLX5E_MPWRQ_UMR_MODE_OVERSIZED)) + offset = offset * sizeof(struct mlx5_klm) * 2 / MLX5_OCTWORD; + else if (unlikely(rq->mpwqe.umr_mode == MLX5E_MPWRQ_UMR_MODE_TRIPLE)) + offset = offset * sizeof(struct mlx5_ksm) * 4 / MLX5_OCTWORD; + umr_wqe->uctrl.xlt_offset = cpu_to_be16(offset); + + icosq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) { + .wqe_type = MLX5E_ICOSQ_WQE_UMR_RX, + .num_wqebbs = rq->mpwqe.umr_wqebbs, + .umr.rq = rq, + }; + + icosq->pc += rq->mpwqe.umr_wqebbs; + + icosq->doorbell_cseg = &umr_wqe->ctrl; + + return 0; + +err_reuse_batch: + while (--batch >= 0) + xsk_buff_free(wi->alloc_units[batch].xsk); + +err: + rq->stats->buff_alloc_err++; + return -ENOMEM; +} + +int mlx5e_xsk_alloc_rx_wqes_batched(struct mlx5e_rq *rq, u16 ix, int wqe_bulk) +{ + struct mlx5_wq_cyc *wq = &rq->wqe.wq; + struct xdp_buff **buffs; + u32 contig, alloc; + int i; + + /* mlx5e_init_frags_partition creates a 1:1 mapping between + * rq->wqe.frags and rq->wqe.alloc_units, which allows us to + * allocate XDP buffers straight into alloc_units. + */ + BUILD_BUG_ON(sizeof(rq->wqe.alloc_units[0]) != + sizeof(rq->wqe.alloc_units[0].xsk)); + buffs = (struct xdp_buff **)rq->wqe.alloc_units; + contig = mlx5_wq_cyc_get_size(wq) - ix; + if (wqe_bulk <= contig) { + alloc = xsk_buff_alloc_batch(rq->xsk_pool, buffs + ix, wqe_bulk); + } else { + alloc = xsk_buff_alloc_batch(rq->xsk_pool, buffs + ix, contig); + if (likely(alloc == contig)) + alloc += xsk_buff_alloc_batch(rq->xsk_pool, buffs, wqe_bulk - contig); + } + + for (i = 0; i < alloc; i++) { + int j = mlx5_wq_cyc_ctr2ix(wq, ix + i); + struct mlx5e_wqe_frag_info *frag; + struct mlx5e_rx_wqe_cyc *wqe; + dma_addr_t addr; + + wqe = mlx5_wq_cyc_get_wqe(wq, j); + /* Assumes log_num_frags == 0. */ + frag = &rq->wqe.frags[j]; + + addr = xsk_buff_xdp_get_frame_dma(frag->au->xsk); + wqe->data[0].addr = cpu_to_be64(addr + rq->buff.headroom); + } + + return alloc; +} + +int mlx5e_xsk_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk) +{ + struct mlx5_wq_cyc *wq = &rq->wqe.wq; + int i; + + for (i = 0; i < wqe_bulk; i++) { + int j = mlx5_wq_cyc_ctr2ix(wq, ix + i); + struct mlx5e_wqe_frag_info *frag; + struct mlx5e_rx_wqe_cyc *wqe; + dma_addr_t addr; + + wqe = mlx5_wq_cyc_get_wqe(wq, j); + /* Assumes log_num_frags == 0. */ + frag = &rq->wqe.frags[j]; + + frag->au->xsk = xsk_buff_alloc(rq->xsk_pool); + if (unlikely(!frag->au->xsk)) + return i; + + addr = xsk_buff_xdp_get_frame_dma(frag->au->xsk); + wqe->data[0].addr = cpu_to_be64(addr + rq->buff.headroom); + } + + return wqe_bulk; +} + +static struct sk_buff *mlx5e_xsk_construct_skb(struct mlx5e_rq *rq, struct xdp_buff *xdp) +{ + u32 totallen = xdp->data_end - xdp->data_meta; + u32 metalen = xdp->data - xdp->data_meta; struct sk_buff *skb; - skb = napi_alloc_skb(rq->cq.napi, cqe_bcnt); + skb = napi_alloc_skb(rq->cq.napi, totallen); if (unlikely(!skb)) { rq->stats->buff_alloc_err++; return NULL; } - skb_put_data(skb, data, cqe_bcnt); + skb_put_data(skb, xdp->data_meta, totallen); + + if (metalen) { + skb_metadata_set(skb, metalen); + __skb_pull(skb, metalen); + } return skb; } @@ -46,8 +249,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, */ WARN_ON_ONCE(head_offset); - xdp->data_end = xdp->data + cqe_bcnt; - xdp_set_data_meta_invalid(xdp); + xsk_buff_set_size(xdp, cqe_bcnt); xsk_buff_dma_sync_for_cpu(xdp, rq->xsk_pool); net_prefetch(xdp->data); @@ -76,7 +278,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, /* XDP_PASS: copy the data from the UMEM to a new SKB and reuse the * frame. On SKB allocation failure, NULL is returned. */ - return mlx5e_xsk_construct_skb(rq, xdp->data, xdp->data_end - xdp->data); + return mlx5e_xsk_construct_skb(rq, xdp); } struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq, @@ -93,8 +295,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq, */ WARN_ON_ONCE(wi->offset); - xdp->data_end = xdp->data + cqe_bcnt; - xdp_set_data_meta_invalid(xdp); + xsk_buff_set_size(xdp, cqe_bcnt); xsk_buff_dma_sync_for_cpu(xdp, rq->xsk_pool); net_prefetch(xdp->data); @@ -103,8 +304,8 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq, return NULL; /* page/packet was consumed by XDP */ /* XDP_PASS: copy the data from the UMEM to a new SKB. The frame reuse - * will be handled by mlx5e_put_rx_frag. + * will be handled by mlx5e_free_rx_wqe. * On SKB allocation failure, NULL is returned. */ - return mlx5e_xsk_construct_skb(rq, xdp->data, xdp->data_end - xdp->data); + return mlx5e_xsk_construct_skb(rq, xdp); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h index e702cb790476..087c943bd8e9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h @@ -5,10 +5,12 @@ #define __MLX5_EN_XSK_RX_H__ #include "en.h" -#include <net/xdp_sock_drv.h> /* RX data path */ +int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix); +int mlx5e_xsk_alloc_rx_wqes_batched(struct mlx5e_rq *rq, u16 ix, int wqe_bulk); +int mlx5e_xsk_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk); struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, u16 cqe_bcnt, @@ -18,17 +20,4 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt); -static inline bool mlx5e_xsk_update_rx_wakeup(struct mlx5e_rq *rq, bool alloc_err) -{ - if (!xsk_uses_need_wakeup(rq->xsk_pool)) - return alloc_err; - - if (unlikely(alloc_err)) - xsk_set_rx_need_wakeup(rq->xsk_pool); - else - xsk_clear_rx_need_wakeup(rq->xsk_pool); - - return false; -} - #endif /* __MLX5_EN_XSK_RX_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c index 5129b9bf534f..ff03c43833bb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c @@ -66,7 +66,7 @@ static int mlx5e_init_xsk_rq(struct mlx5e_channel *c, rq->xsk_pool = pool; rq->stats = &c->priv->channel_stats[c->ix]->xskrq; rq->ptp_cyc2time = mlx5_rq_ts_translator(mdev); - rq_xdp_ix = c->ix + params->num_channels * MLX5E_RQ_GROUP_XSK; + rq_xdp_ix = c->ix; err = mlx5e_rq_set_handlers(rq, params, xsk); if (err) return err; @@ -154,7 +154,7 @@ err_free_cparam: void mlx5e_close_xsk(struct mlx5e_channel *c) { clear_bit(MLX5E_CHANNEL_STATE_XSK, c->state); - synchronize_net(); /* Sync with the XSK wakeup and with NAPI. */ + synchronize_net(); /* Sync with NAPI. */ mlx5e_close_rq(&c->xskrq); mlx5e_close_cq(&c->xskrq.cq); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c index 4902ef74fedf..367a9505ca4f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c @@ -12,18 +12,14 @@ int mlx5e_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags) struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5e_params *params = &priv->channels.params; struct mlx5e_channel *c; - u16 ix; if (unlikely(!mlx5e_xdp_is_active(priv))) return -ENETDOWN; - if (unlikely(!mlx5e_qid_get_ch_if_in_group(params, qid, MLX5E_RQ_GROUP_XSK, &ix))) + if (unlikely(qid >= params->num_channels)) return -EINVAL; - c = priv->channels.c[ix]; - - if (unlikely(!test_bit(MLX5E_CHANNEL_STATE_XSK, c->state))) - return -EINVAL; + c = priv->channels.c[qid]; if (!napi_if_scheduled_mark_missed(&c->napi)) { /* To avoid WQE overrun, don't post a NOP if async_icosq is not @@ -36,9 +32,7 @@ int mlx5e_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags) if (test_and_set_bit(MLX5E_SQ_STATE_PENDING_XSK_TX, &c->async_icosq.state)) return 0; - spin_lock_bh(&c->async_icosq_lock); - mlx5e_trigger_irq(&c->async_icosq); - spin_unlock_bh(&c->async_icosq_lock); + mlx5e_trigger_napi_icosq(c); } return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.h index a05085035f23..9c505158b975 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.h @@ -5,7 +5,6 @@ #define __MLX5_EN_XSK_TX_H__ #include "en.h" -#include <net/xdp_sock_drv.h> /* TX data path */ @@ -13,15 +12,4 @@ int mlx5e_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags); bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget); -static inline void mlx5e_xsk_update_tx_wakeup(struct mlx5e_xdpsq *sq) -{ - if (!xsk_uses_need_wakeup(sq->xsk_pool)) - return; - - if (sq->pc != sq->cc) - xsk_clear_tx_need_wakeup(sq->xsk_pool); - else - xsk_set_tx_need_wakeup(sq->xsk_pool); -} - #endif /* __MLX5_EN_XSK_TX_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 26f1ac4683e7..24aa25da482b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -314,7 +314,9 @@ void mlx5e_ethtool_get_ringparam(struct mlx5e_priv *priv, /* Limitation for regular RQ. XSK RQ may clamp the queue length in * mlx5e_mpwqe_get_log_rq_size. */ - u8 max_log_mpwrq_pkts = mlx5e_mpwrq_max_log_rq_pkts(priv->mdev, PAGE_SHIFT, false); + u8 max_log_mpwrq_pkts = mlx5e_mpwrq_max_log_rq_pkts(priv->mdev, + PAGE_SHIFT, + MLX5E_MPWRQ_UMR_MODE_ALIGNED); param->rx_max_pending = 1 << min_t(u8, MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE, max_log_mpwrq_pkts); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c index 2a67798cd446..aac32e505c14 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c @@ -451,15 +451,7 @@ static int flow_get_tirn(struct mlx5e_priv *priv, eth_rule->rss = rss; mlx5e_rss_refcnt_inc(eth_rule->rss); } else { - struct mlx5e_params *params = &priv->channels.params; - enum mlx5e_rq_group group; - u16 ix; - - mlx5e_qid_get_ch_and_group(params, fs->ring_cookie, &ix, &group); - - *tirn = group == MLX5E_RQ_GROUP_XSK ? - mlx5e_rx_res_get_tirn_xsk(priv->rx_res, ix) : - mlx5e_rx_res_get_tirn_direct(priv->rx_res, ix); + *tirn = mlx5e_rx_res_get_tirn_direct(priv->rx_res, fs->ring_cookie); } return 0; @@ -682,8 +674,7 @@ static int validate_flow(struct mlx5e_priv *priv, return -ENOSPC; if (fs->ring_cookie != RX_CLS_FLOW_DISC) - if (!mlx5e_qid_validate(priv->profile, &priv->channels.params, - fs->ring_cookie)) + if (fs->ring_cookie >= priv->channels.params.num_channels) return -EINVAL; switch (flow_type_mask(fs->flow_type)) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 2719247b18db..364f04309149 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -69,7 +69,7 @@ #include "en/trap.h" bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev, u8 page_shift, - bool unaligned) + enum mlx5e_mpwrq_umr_mode umr_mode) { u16 umr_wqebbs, max_wqebbs; bool striding_rq_umr; @@ -79,7 +79,7 @@ bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev, u8 page_ if (!striding_rq_umr) return false; - umr_wqebbs = mlx5e_mpwrq_umr_wqebbs(mdev, page_shift, unaligned); + umr_wqebbs = mlx5e_mpwrq_umr_wqebbs(mdev, page_shift, umr_mode); max_wqebbs = mlx5e_get_max_sq_aligned_wqebbs(mdev); /* Sanity check; should never happen, because mlx5e_mpwrq_umr_wqebbs is * calculated from mlx5e_get_max_sq_aligned_wqebbs. @@ -203,6 +203,15 @@ static void mlx5e_disable_blocking_events(struct mlx5e_priv *priv) mlx5_blocking_notifier_unregister(priv->mdev, &priv->blocking_events_nb); } +static u16 mlx5e_mpwrq_umr_octowords(u32 entries, enum mlx5e_mpwrq_umr_mode umr_mode) +{ + u8 umr_entry_size = mlx5e_mpwrq_umr_entry_size(umr_mode); + + WARN_ON_ONCE(entries * umr_entry_size % MLX5_OCTWORD); + + return entries * umr_entry_size / MLX5_OCTWORD; +} + static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq, struct mlx5e_icosq *sq, struct mlx5e_umr_wqe *wqe) @@ -213,7 +222,7 @@ static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq, u8 ds_cnt; ds_cnt = DIV_ROUND_UP(mlx5e_mpwrq_umr_wqe_sz(rq->mdev, rq->mpwqe.page_shift, - rq->mpwqe.unaligned), + rq->mpwqe.umr_mode), MLX5_SEND_WQE_DS); cseg->qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) | @@ -221,8 +230,7 @@ static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq, cseg->umr_mkey = rq->mpwqe.umr_mkey_be; ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN | MLX5_UMR_INLINE; - octowords = rq->mpwqe.unaligned ? MLX5_KSM_OCTW(rq->mpwqe.pages_per_wqe) : - MLX5_MTT_OCTW(rq->mpwqe.pages_per_wqe); + octowords = mlx5e_mpwrq_umr_octowords(rq->mpwqe.pages_per_wqe, rq->mpwqe.umr_mode); ucseg->xlt_octowords = cpu_to_be16(octowords); ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE); } @@ -283,12 +291,32 @@ static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq, int node) return 0; } + +static u8 mlx5e_mpwrq_access_mode(enum mlx5e_mpwrq_umr_mode umr_mode) +{ + switch (umr_mode) { + case MLX5E_MPWRQ_UMR_MODE_ALIGNED: + return MLX5_MKC_ACCESS_MODE_MTT; + case MLX5E_MPWRQ_UMR_MODE_UNALIGNED: + return MLX5_MKC_ACCESS_MODE_KSM; + case MLX5E_MPWRQ_UMR_MODE_OVERSIZED: + return MLX5_MKC_ACCESS_MODE_KLMS; + case MLX5E_MPWRQ_UMR_MODE_TRIPLE: + return MLX5_MKC_ACCESS_MODE_KSM; + } + WARN_ONCE(1, "MPWRQ UMR mode %d is not known\n", umr_mode); + return 0; +} + static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev, u32 npages, u8 page_shift, u32 *umr_mkey, - dma_addr_t filler_addr, bool unaligned) + dma_addr_t filler_addr, + enum mlx5e_mpwrq_umr_mode umr_mode, + u32 xsk_chunk_size) { struct mlx5_mtt *mtt; struct mlx5_ksm *ksm; + struct mlx5_klm *klm; u32 octwords; int inlen; void *mkc; @@ -296,14 +324,17 @@ static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev, int err; int i; - if (unaligned && !MLX5_CAP_GEN(mdev, fixed_buffer_size)) { + if ((umr_mode == MLX5E_MPWRQ_UMR_MODE_UNALIGNED || + umr_mode == MLX5E_MPWRQ_UMR_MODE_TRIPLE) && + !MLX5_CAP_GEN(mdev, fixed_buffer_size)) { mlx5_core_warn(mdev, "Unaligned AF_XDP requires fixed_buffer_size capability\n"); return -EINVAL; } + octwords = mlx5e_mpwrq_umr_octowords(npages, umr_mode); + inlen = MLX5_FLEXIBLE_INLEN(mdev, MLX5_ST_SZ_BYTES(create_mkey_in), - unaligned ? sizeof(*ksm) : sizeof(*mtt), - npages); + MLX5_OCTWORD, octwords); if (inlen < 0) return inlen; @@ -311,22 +342,22 @@ static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev, if (!in) return -ENOMEM; - octwords = unaligned ? MLX5_KSM_OCTW(npages) : MLX5_MTT_OCTW(npages); - mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); MLX5_SET(mkc, mkc, free, 1); MLX5_SET(mkc, mkc, umr_en, 1); MLX5_SET(mkc, mkc, lw, 1); MLX5_SET(mkc, mkc, lr, 1); - MLX5_SET(mkc, mkc, access_mode_1_0, - unaligned ? MLX5_MKC_ACCESS_MODE_KSM : MLX5_MKC_ACCESS_MODE_MTT); + MLX5_SET(mkc, mkc, access_mode_1_0, mlx5e_mpwrq_access_mode(umr_mode)); mlx5e_mkey_set_relaxed_ordering(mdev, mkc); MLX5_SET(mkc, mkc, qpn, 0xffffff); MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.hw_objs.pdn); MLX5_SET64(mkc, mkc, len, npages << page_shift); MLX5_SET(mkc, mkc, translations_octword_size, octwords); - MLX5_SET(mkc, mkc, log_page_size, page_shift); + if (umr_mode == MLX5E_MPWRQ_UMR_MODE_TRIPLE) + MLX5_SET(mkc, mkc, log_page_size, page_shift - 2); + else if (umr_mode != MLX5E_MPWRQ_UMR_MODE_OVERSIZED) + MLX5_SET(mkc, mkc, log_page_size, page_shift); MLX5_SET(create_mkey_in, in, translations_octword_actual_size, octwords); /* Initialize the mkey with all MTTs pointing to a default @@ -335,19 +366,46 @@ static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev, * the RQ's pool, while the gaps (wqe_overflow) remain mapped * to the default page. */ - if (unaligned) { + switch (umr_mode) { + case MLX5E_MPWRQ_UMR_MODE_OVERSIZED: + klm = MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt); + for (i = 0; i < npages; i++) { + klm[i << 1] = (struct mlx5_klm) { + .va = cpu_to_be64(filler_addr), + .bcount = cpu_to_be32(xsk_chunk_size), + .key = cpu_to_be32(mdev->mlx5e_res.hw_objs.mkey), + }; + klm[(i << 1) + 1] = (struct mlx5_klm) { + .va = cpu_to_be64(filler_addr), + .bcount = cpu_to_be32((1 << page_shift) - xsk_chunk_size), + .key = cpu_to_be32(mdev->mlx5e_res.hw_objs.mkey), + }; + } + break; + case MLX5E_MPWRQ_UMR_MODE_UNALIGNED: ksm = MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt); for (i = 0; i < npages; i++) ksm[i] = (struct mlx5_ksm) { .key = cpu_to_be32(mdev->mlx5e_res.hw_objs.mkey), .va = cpu_to_be64(filler_addr), }; - } else { + break; + case MLX5E_MPWRQ_UMR_MODE_ALIGNED: mtt = MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt); for (i = 0; i < npages; i++) mtt[i] = (struct mlx5_mtt) { .ptag = cpu_to_be64(filler_addr), }; + break; + case MLX5E_MPWRQ_UMR_MODE_TRIPLE: + ksm = MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt); + for (i = 0; i < npages * 4; i++) { + ksm[i] = (struct mlx5_ksm) { + .key = cpu_to_be32(mdev->mlx5e_res.hw_objs.mkey), + .va = cpu_to_be64(filler_addr), + }; + } + break; } err = mlx5_core_create_mkey(mdev, umr_mkey, in, inlen); @@ -391,12 +449,13 @@ static int mlx5e_create_umr_klm_mkey(struct mlx5_core_dev *mdev, static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq *rq) { + u32 xsk_chunk_size = rq->xsk_pool ? rq->xsk_pool->chunk_size : 0; u32 wq_size = mlx5_wq_ll_get_size(&rq->mpwqe.wq); u32 num_entries, max_num_entries; u32 umr_mkey; int err; - max_num_entries = mlx5e_mpwrq_max_num_entries(mdev, rq->mpwqe.unaligned); + max_num_entries = mlx5e_mpwrq_max_num_entries(mdev, rq->mpwqe.umr_mode); /* Shouldn't overflow, the result is at most MLX5E_MAX_RQ_NUM_MTTS. */ if (WARN_ON_ONCE(check_mul_overflow(wq_size, (u32)rq->mpwqe.mtts_per_wqe, @@ -408,7 +467,7 @@ static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq err = mlx5e_create_umr_mkey(mdev, num_entries, rq->mpwqe.page_shift, &umr_mkey, rq->wqe_overflow.addr, - rq->mpwqe.unaligned); + rq->mpwqe.umr_mode, xsk_chunk_size); rq->mpwqe.umr_mkey_be = cpu_to_be32(umr_mkey); return err; } @@ -433,6 +492,13 @@ static void mlx5e_init_frags_partition(struct mlx5e_rq *rq) struct mlx5e_wqe_frag_info *prev = NULL; int i; + if (rq->xsk_pool) { + /* Assumptions used by XSK batched allocator. */ + WARN_ON(rq->wqe.info.num_frags != 1); + WARN_ON(rq->wqe.info.log_num_frags != 0); + WARN_ON(rq->wqe.info.arr[0].frag_stride != PAGE_SIZE); + } + next_frag.au = &rq->wqe.alloc_units[0]; for (i = 0; i < mlx5_wq_cyc_get_size(&rq->wqe.wq); i++) { @@ -532,7 +598,7 @@ static int mlx5e_init_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *param if (err) return err; - return xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq->ix, 0); + return xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq->ix, c->napi.napi_id); } static int mlx5_rq_shampo_alloc(struct mlx5_core_dev *mdev, @@ -637,16 +703,16 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params, wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq); rq->mpwqe.page_shift = mlx5e_mpwrq_page_shift(mdev, xsk); - rq->mpwqe.unaligned = xsk ? xsk->unaligned : false; + rq->mpwqe.umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk); rq->mpwqe.pages_per_wqe = mlx5e_mpwrq_pages_per_wqe(mdev, rq->mpwqe.page_shift, - rq->mpwqe.unaligned); + rq->mpwqe.umr_mode); rq->mpwqe.umr_wqebbs = mlx5e_mpwrq_umr_wqebbs(mdev, rq->mpwqe.page_shift, - rq->mpwqe.unaligned); + rq->mpwqe.umr_mode); rq->mpwqe.mtts_per_wqe = mlx5e_mpwrq_mtts_per_wqe(mdev, rq->mpwqe.page_shift, - rq->mpwqe.unaligned); + rq->mpwqe.umr_mode); pool_size = rq->mpwqe.pages_per_wqe << mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk); @@ -892,7 +958,7 @@ int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param) return err; } -int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state) +static int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state) { struct mlx5_core_dev *mdev = rq->mdev; @@ -921,6 +987,32 @@ int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state) return err; } +static int mlx5e_rq_to_ready(struct mlx5e_rq *rq, int curr_state) +{ + struct net_device *dev = rq->netdev; + int err; + + err = mlx5e_modify_rq_state(rq, curr_state, MLX5_RQC_STATE_RST); + if (err) { + netdev_err(dev, "Failed to move rq 0x%x to reset\n", rq->rqn); + return err; + } + err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY); + if (err) { + netdev_err(dev, "Failed to move rq 0x%x to ready\n", rq->rqn); + return err; + } + + return 0; +} + +int mlx5e_flush_rq(struct mlx5e_rq *rq, int curr_state) +{ + mlx5e_free_rx_descs(rq); + + return mlx5e_rq_to_ready(rq, curr_state); +} + static int mlx5e_modify_rq_scatter_fcs(struct mlx5e_rq *rq, bool enable) { struct mlx5_core_dev *mdev = rq->mdev; @@ -2375,10 +2467,11 @@ static void mlx5e_activate_channel(struct mlx5e_channel *c) mlx5e_activate_txqsq(&c->sq[tc]); mlx5e_activate_icosq(&c->icosq); mlx5e_activate_icosq(&c->async_icosq); - mlx5e_activate_rq(&c->rq); if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state)) mlx5e_activate_xsk(c); + else + mlx5e_activate_rq(&c->rq); mlx5e_trigger_napi_icosq(c); } @@ -2389,8 +2482,9 @@ static void mlx5e_deactivate_channel(struct mlx5e_channel *c) if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state)) mlx5e_deactivate_xsk(c); + else + mlx5e_deactivate_rq(&c->rq); - mlx5e_deactivate_rq(&c->rq); mlx5e_deactivate_icosq(&c->async_icosq); mlx5e_deactivate_icosq(&c->icosq); for (tc = 0; tc < c->num_tc; tc++) @@ -2482,8 +2576,6 @@ static void mlx5e_activate_channels(struct mlx5e_channels *chs) mlx5e_ptp_activate_channel(chs->ptp); } -#define MLX5E_RQ_WQES_TIMEOUT 20000 /* msecs */ - static int mlx5e_wait_channels_min_rx_wqes(struct mlx5e_channels *chs) { int err = 0; @@ -2491,8 +2583,12 @@ static int mlx5e_wait_channels_min_rx_wqes(struct mlx5e_channels *chs) for (i = 0; i < chs->num; i++) { int timeout = err ? 0 : MLX5E_RQ_WQES_TIMEOUT; + struct mlx5e_channel *c = chs->c[i]; + + if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state)) + continue; - err |= mlx5e_wait_for_min_rx_wqes(&chs->c[i]->rq, timeout); + err |= mlx5e_wait_for_min_rx_wqes(&c->rq, timeout); /* Don't wait on the XSK RQ, because the newer xdpsock sample * doesn't provide any Fill Ring entries at the setup stage. @@ -2657,7 +2753,7 @@ static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv) struct netdev_tc_txq old_tc_to_txq[TC_MAX_QUEUE], *tc_to_txq; struct net_device *netdev = priv->netdev; int old_num_txqs, old_ntc; - int num_rxqs, nch, ntc; + int nch, ntc; int err; int i; @@ -2668,7 +2764,6 @@ static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv) nch = priv->channels.params.num_channels; ntc = priv->channels.params.mqprio.num_tc; - num_rxqs = nch * priv->profile->rq_groups; tc_to_txq = priv->channels.params.mqprio.tc_to_txq; err = mlx5e_netdev_set_tcs(netdev, nch, ntc, tc_to_txq); @@ -2677,7 +2772,7 @@ static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv) err = mlx5e_update_tx_netdev_queues(priv); if (err) goto err_tcs; - err = netif_set_real_num_rx_queues(netdev, num_rxqs); + err = netif_set_real_num_rx_queues(netdev, nch); if (err) { netdev_warn(netdev, "netif_set_real_num_rx_queues failed, %d\n", err); goto err_txqs; @@ -3604,7 +3699,8 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) stats->rx_length_errors = PPORT_802_3_GET(pstats, a_in_range_length_errors) + PPORT_802_3_GET(pstats, a_out_of_range_length_field) + - PPORT_802_3_GET(pstats, a_frame_too_long_errors); + PPORT_802_3_GET(pstats, a_frame_too_long_errors) + + VNIC_ENV_GET(&priv->stats.vnic, eth_wqe_too_small); stats->rx_crc_errors = PPORT_802_3_GET(pstats, a_frame_check_sequence_errors); stats->rx_frame_errors = PPORT_802_3_GET(pstats, a_alignment_errors); @@ -4976,7 +5072,8 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) if (!!MLX5_CAP_ETH(mdev, lro_cap) && !MLX5_CAP_ETH(mdev, tunnel_lro_vxlan) && !MLX5_CAP_ETH(mdev, tunnel_lro_gre) && - mlx5e_check_fragmented_striding_rq_cap(mdev, PAGE_SHIFT, false)) + mlx5e_check_fragmented_striding_rq_cap(mdev, PAGE_SHIFT, + MLX5E_MPWRQ_UMR_MODE_ALIGNED)) netdev->vlan_features |= NETIF_F_LRO; netdev->hw_features = netdev->vlan_features; @@ -5166,7 +5263,7 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv) goto err_destroy_q_counters; } - features = MLX5E_RX_RES_FEATURE_XSK | MLX5E_RX_RES_FEATURE_PTP; + features = MLX5E_RX_RES_FEATURE_PTP; if (priv->channels.params.tunneled_offload_en) features |= MLX5E_RX_RES_FEATURE_INNER_FT; err = mlx5e_rx_res_init(priv->rx_res, priv->mdev, features, @@ -5357,7 +5454,6 @@ static const struct mlx5e_profile mlx5e_nic_profile = { .update_carrier = mlx5e_update_carrier, .rx_handlers = &mlx5e_rx_handlers_nic, .max_tc = MLX5E_MAX_NUM_TC, - .rq_groups = MLX5E_NUM_RQ_GROUPS(XSK), .stats_grps = mlx5e_nic_stats_grps, .stats_grps_num = mlx5e_nic_stats_grps_num, .features = BIT(MLX5E_PROFILE_FEATURE_PTP_RX) | @@ -5390,8 +5486,7 @@ mlx5e_calc_max_nch(struct mlx5_core_dev *mdev, struct net_device *netdev, max_nch = mlx5e_profile_max_num_channels(mdev, profile); /* netdev rx queues */ - tmp = netdev->num_rx_queues / max_t(u8, profile->rq_groups, 1); - max_nch = min_t(unsigned int, max_nch, tmp); + max_nch = min_t(unsigned int, max_nch, netdev->num_rx_queues); /* netdev tx queues */ tmp = netdev->num_tx_queues; @@ -5535,11 +5630,7 @@ static unsigned int mlx5e_get_max_num_txqs(struct mlx5_core_dev *mdev, static unsigned int mlx5e_get_max_num_rxqs(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile) { - unsigned int nch; - - nch = mlx5e_profile_max_num_channels(mdev, profile); - - return nch * profile->rq_groups; + return mlx5e_profile_max_num_channels(mdev, profile); } struct net_device * diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 83b2febe8a7b..794cd8dfe9c9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -1224,7 +1224,6 @@ static const struct mlx5e_profile mlx5e_rep_profile = { .update_stats = mlx5e_stats_update_ndo_stats, .rx_handlers = &mlx5e_rx_handlers_rep, .max_tc = 1, - .rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR), .stats_grps = mlx5e_rep_stats_grps, .stats_grps_num = mlx5e_rep_stats_grps_num, .max_nch_limit = mlx5e_rep_max_nch_limit, @@ -1244,8 +1243,6 @@ static const struct mlx5e_profile mlx5e_uplink_rep_profile = { .update_carrier = mlx5e_update_carrier, .rx_handlers = &mlx5e_rx_handlers_rep, .max_tc = MLX5E_MAX_NUM_TC, - /* XSK is needed so we can replace profile with NIC netdev */ - .rq_groups = MLX5E_NUM_RQ_GROUPS(XSK), .stats_grps = mlx5e_ul_rep_stats_grps, .stats_grps_num = mlx5e_ul_rep_stats_grps_num, }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 72d74de3ee99..58084650151f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -41,6 +41,7 @@ #include <net/gro.h> #include <net/udp.h> #include <net/tcp.h> +#include <net/xdp_sock_drv.h> #include "en.h" #include "en/txrx.h" #include "en_tc.h" @@ -75,13 +76,6 @@ const struct mlx5e_rx_handlers mlx5e_rx_handlers_nic = { .handle_rx_cqe_mpwqe_shampo = mlx5e_handle_rx_cqe_mpwrq_shampo, }; -static struct mlx5e_mpw_info *mlx5e_get_mpw_info(struct mlx5e_rq *rq, int i) -{ - size_t isz = struct_size(rq->mpwqe.info, alloc_units, rq->mpwqe.pages_per_wqe); - - return (struct mlx5e_mpw_info *)((char *)rq->mpwqe.info + array_size(i, isz)); -} - static inline bool mlx5e_rx_hw_stamp(struct hwtstamp_config *config) { return config->rx_filter == HWTSTAMP_FILTER_ALL; @@ -300,16 +294,6 @@ static inline int mlx5e_page_alloc_pool(struct mlx5e_rq *rq, union mlx5e_alloc_u return 0; } -static inline int mlx5e_page_alloc(struct mlx5e_rq *rq, union mlx5e_alloc_unit *au) -{ - if (rq->xsk_pool) { - au->xsk = xsk_buff_alloc(rq->xsk_pool); - return likely(au->xsk) ? 0 : -ENOMEM; - } else { - return mlx5e_page_alloc_pool(rq, au); - } -} - void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct page *page) { dma_addr_t dma_addr = page_pool_get_dma_addr(page); @@ -334,20 +318,6 @@ void mlx5e_page_release_dynamic(struct mlx5e_rq *rq, struct page *page, bool rec } } -static inline void mlx5e_page_release(struct mlx5e_rq *rq, - union mlx5e_alloc_unit *au, - bool recycle) -{ - if (rq->xsk_pool) - /* The `recycle` parameter is ignored, and the page is always - * put into the Reuse Ring, because there is no way to return - * the page to the userspace when the interface goes down. - */ - xsk_buff_free(au->xsk); - else - mlx5e_page_release_dynamic(rq, au->page, recycle); -} - static inline int mlx5e_get_rx_frag(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *frag) { @@ -359,7 +329,7 @@ static inline int mlx5e_get_rx_frag(struct mlx5e_rq *rq, * offset) should just use the new one without replenishing again * by themselves. */ - err = mlx5e_page_alloc(rq, frag->au); + err = mlx5e_page_alloc_pool(rq, frag->au); return err; } @@ -369,7 +339,7 @@ static inline void mlx5e_put_rx_frag(struct mlx5e_rq *rq, bool recycle) { if (frag->last_in_page) - mlx5e_page_release(rq, frag->au, recycle); + mlx5e_page_release_dynamic(rq, frag->au->page, recycle); } static inline struct mlx5e_wqe_frag_info *get_frag(struct mlx5e_rq *rq, u16 ix) @@ -393,8 +363,7 @@ static int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe_cyc *wqe, goto free_frags; headroom = i == 0 ? rq->buff.headroom : 0; - addr = rq->xsk_pool ? xsk_buff_xdp_get_frame_dma(frag->au->xsk) : - page_pool_get_dma_addr(frag->au->page); + addr = page_pool_get_dma_addr(frag->au->page); wqe->data[i].addr = cpu_to_be64(addr + frag->offset + headroom); } @@ -413,6 +382,15 @@ static inline void mlx5e_free_rx_wqe(struct mlx5e_rq *rq, { int i; + if (rq->xsk_pool) { + /* The `recycle` parameter is ignored, and the page is always + * put into the Reuse Ring, because there is no way to return + * the page to the userspace when the interface goes down. + */ + xsk_buff_free(wi->au->xsk); + return; + } + for (i = 0; i < rq->wqe.info.num_frags; i++, wi++) mlx5e_put_rx_frag(rq, wi, recycle); } @@ -424,38 +402,22 @@ static void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix) mlx5e_free_rx_wqe(rq, wi, false); } -static int mlx5e_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, u8 wqe_bulk) +static int mlx5e_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk) { struct mlx5_wq_cyc *wq = &rq->wqe.wq; - int err; int i; - if (rq->xsk_pool) { - int pages_desired = wqe_bulk << rq->wqe.info.log_num_frags; - - /* Check in advance that we have enough frames, instead of - * allocating one-by-one, failing and moving frames to the - * Reuse Ring. - */ - if (unlikely(!xsk_buff_can_alloc(rq->xsk_pool, pages_desired))) - return -ENOMEM; - } - for (i = 0; i < wqe_bulk; i++) { - struct mlx5e_rx_wqe_cyc *wqe = mlx5_wq_cyc_get_wqe(wq, ix + i); + int j = mlx5_wq_cyc_ctr2ix(wq, ix + i); + struct mlx5e_rx_wqe_cyc *wqe; - err = mlx5e_alloc_rx_wqe(rq, wqe, ix + i); - if (unlikely(err)) - goto free_wqes; - } - - return 0; + wqe = mlx5_wq_cyc_get_wqe(wq, j); -free_wqes: - while (--i >= 0) - mlx5e_dealloc_rx_wqe(rq, ix + i); + if (unlikely(mlx5e_alloc_rx_wqe(rq, wqe, j))) + break; + } - return err; + return i; } static inline void @@ -497,9 +459,19 @@ mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, bool recycle no_xdp_xmit = bitmap_empty(wi->xdp_xmit_bitmap, rq->mpwqe.pages_per_wqe); - for (i = 0; i < rq->mpwqe.pages_per_wqe; i++) - if (no_xdp_xmit || !test_bit(i, wi->xdp_xmit_bitmap)) - mlx5e_page_release(rq, &alloc_units[i], recycle); + if (rq->xsk_pool) { + /* The `recycle` parameter is ignored, and the page is always + * put into the Reuse Ring, because there is no way to return + * the page to the userspace when the interface goes down. + */ + for (i = 0; i < rq->mpwqe.pages_per_wqe; i++) + if (no_xdp_xmit || !test_bit(i, wi->xdp_xmit_bitmap)) + xsk_buff_free(alloc_units[i].xsk); + } else { + for (i = 0; i < rq->mpwqe.pages_per_wqe; i++) + if (no_xdp_xmit || !test_bit(i, wi->xdp_xmit_bitmap)) + mlx5e_page_release_dynamic(rq, alloc_units[i].page, recycle); + } } static void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq, u8 n) @@ -586,7 +558,7 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq, if (!(header_offset & (PAGE_SIZE - 1))) { union mlx5e_alloc_unit au; - err = mlx5e_page_alloc(rq, &au); + err = mlx5e_page_alloc_pool(rq, &au); if (unlikely(err)) goto err_unmap; page = dma_info->page = au.page; @@ -622,12 +594,8 @@ err_unmap: while (--i >= 0) { dma_info = &shampo->info[--index]; if (!(i & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1))) { - union mlx5e_alloc_unit au = { - .page = dma_info->page, - }; - dma_info->addr = ALIGN_DOWN(dma_info->addr, PAGE_SIZE); - mlx5e_page_release(rq, &au, true); + mlx5e_page_release_dynamic(rq, dma_info->page, true); } } rq->stats->buff_alloc_err++; @@ -685,15 +653,6 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) int err; int i; - /* Check in advance that we have enough frames, instead of allocating - * one-by-one, failing and moving frames to the Reuse Ring. - */ - if (rq->xsk_pool && - unlikely(!xsk_buff_can_alloc(rq->xsk_pool, rq->mpwqe.pages_per_wqe))) { - err = -ENOMEM; - goto err; - } - if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state)) { err = mlx5e_alloc_rx_hd_mpwqe(rq); if (unlikely(err)) @@ -704,33 +663,16 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) umr_wqe = mlx5_wq_cyc_get_wqe(wq, pi); memcpy(umr_wqe, &rq->mpwqe.umr_wqe, sizeof(struct mlx5e_umr_wqe)); - if (unlikely(rq->mpwqe.unaligned)) { - for (i = 0; i < rq->mpwqe.pages_per_wqe; i++, au++) { - dma_addr_t addr; - - err = mlx5e_page_alloc(rq, au); - if (unlikely(err)) - goto err_unmap; - /* Unaligned means XSK. */ - addr = xsk_buff_xdp_get_frame_dma(au->xsk); - umr_wqe->inline_ksms[i] = (struct mlx5_ksm) { - .key = rq->mkey_be, - .va = cpu_to_be64(addr), - }; - } - } else { - for (i = 0; i < rq->mpwqe.pages_per_wqe; i++, au++) { - dma_addr_t addr; + for (i = 0; i < rq->mpwqe.pages_per_wqe; i++, au++) { + dma_addr_t addr; - err = mlx5e_page_alloc(rq, au); - if (unlikely(err)) - goto err_unmap; - addr = rq->xsk_pool ? xsk_buff_xdp_get_frame_dma(au->xsk) : - page_pool_get_dma_addr(au->page); - umr_wqe->inline_mtts[i] = (struct mlx5_mtt) { - .ptag = cpu_to_be64(addr | MLX5_EN_WR), - }; - } + err = mlx5e_page_alloc_pool(rq, au); + if (unlikely(err)) + goto err_unmap; + addr = page_pool_get_dma_addr(au->page); + umr_wqe->inline_mtts[i] = (struct mlx5_mtt) { + .ptag = cpu_to_be64(addr | MLX5_EN_WR), + }; } bitmap_zero(wi->xdp_xmit_bitmap, rq->mpwqe.pages_per_wqe); @@ -740,9 +682,7 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) | MLX5_OPCODE_UMR); - offset = ix * rq->mpwqe.mtts_per_wqe; - if (!rq->mpwqe.unaligned) - offset = MLX5_ALIGNED_MTTS_OCTW(offset); + offset = (ix * rq->mpwqe.mtts_per_wqe) * sizeof(struct mlx5_mtt) / MLX5_OCTWORD; umr_wqe->uctrl.xlt_offset = cpu_to_be16(offset); sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) { @@ -760,7 +700,7 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) err_unmap: while (--i >= 0) { au--; - mlx5e_page_release(rq, au, true); + mlx5e_page_release_dynamic(rq, au->page, true); } err: @@ -793,12 +733,8 @@ void mlx5e_shampo_dealloc_hd(struct mlx5e_rq *rq, u16 len, u16 start, bool close hd_info = &shampo->info[index]; hd_info->addr = ALIGN_DOWN(hd_info->addr, PAGE_SIZE); if (hd_info->page != deleted_page) { - union mlx5e_alloc_unit au = { - .page = hd_info->page, - }; - deleted_page = hd_info->page; - mlx5e_page_release(rq, &au, false); + mlx5e_page_release_dynamic(rq, hd_info->page, false); } } @@ -821,38 +757,51 @@ static void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq) { struct mlx5_wq_cyc *wq = &rq->wqe.wq; - u8 wqe_bulk; - int err; + int wqe_bulk, count; + bool busy = false; + u16 head; if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state))) return false; - wqe_bulk = rq->wqe.info.wqe_bulk; - - if (mlx5_wq_cyc_missing(wq) < wqe_bulk) + if (mlx5_wq_cyc_missing(wq) < rq->wqe.info.wqe_bulk) return false; if (rq->page_pool) page_pool_nid_changed(rq->page_pool, numa_mem_id()); - do { - u16 head = mlx5_wq_cyc_get_head(wq); + wqe_bulk = mlx5_wq_cyc_missing(wq); + head = mlx5_wq_cyc_get_head(wq); - err = mlx5e_alloc_rx_wqes(rq, head, wqe_bulk); - if (unlikely(err)) { - rq->stats->buff_alloc_err++; - break; - } + /* Don't allow any newly allocated WQEs to share the same page with old + * WQEs that aren't completed yet. Stop earlier. + */ + wqe_bulk -= (head + wqe_bulk) & rq->wqe.info.wqe_index_mask; - mlx5_wq_cyc_push_n(wq, wqe_bulk); - } while (mlx5_wq_cyc_missing(wq) >= wqe_bulk); + if (!rq->xsk_pool) + count = mlx5e_alloc_rx_wqes(rq, head, wqe_bulk); + else if (likely(!rq->xsk_pool->dma_need_sync)) + count = mlx5e_xsk_alloc_rx_wqes_batched(rq, head, wqe_bulk); + else + /* If dma_need_sync is true, it's more efficient to call + * xsk_buff_alloc in a loop, rather than xsk_buff_alloc_batch, + * because the latter does the same check and returns only one + * frame. + */ + count = mlx5e_xsk_alloc_rx_wqes(rq, head, wqe_bulk); + + mlx5_wq_cyc_push_n(wq, count); + if (unlikely(count != wqe_bulk)) { + rq->stats->buff_alloc_err++; + busy = true; + } /* ensure wqes are visible to device before updating doorbell record */ dma_wmb(); mlx5_wq_cyc_update_db_record(wq); - return !!err; + return busy; } void mlx5e_free_icosq_descs(struct mlx5e_icosq *sq) @@ -1020,7 +969,8 @@ INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq) head = rq->mpwqe.actual_wq_head; i = missing; do { - alloc_err = mlx5e_alloc_rx_mpwqe(rq, head); + alloc_err = rq->xsk_pool ? mlx5e_xsk_alloc_rx_mpwqe(rq, head) : + mlx5e_alloc_rx_mpwqe(rq, head); if (unlikely(alloc_err)) break; @@ -1760,9 +1710,10 @@ static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) goto free_wqe; } - skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe, + skb = INDIRECT_CALL_3(rq->wqe.skb_from_cqe, mlx5e_skb_from_cqe_linear, mlx5e_skb_from_cqe_nonlinear, + mlx5e_xsk_skb_from_cqe_linear, rq, wi, cqe_bcnt); if (!skb) { /* probably for XDP */ @@ -2109,12 +2060,8 @@ mlx5e_free_rx_shampo_hd_entry(struct mlx5e_rq *rq, u16 header_index) u64 addr = shampo->info[header_index].addr; if (((header_index + 1) & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) == 0) { - union mlx5e_alloc_unit au = { - .page = shampo->info[header_index].page, - }; - shampo->info[header_index].addr = ALIGN_DOWN(addr, PAGE_SIZE); - mlx5e_page_release(rq, &au, true); + mlx5e_page_release_dynamic(rq, shampo->info[header_index].page, true); } bitmap_clear(shampo->bitmap, header_index, 1); } @@ -2235,9 +2182,10 @@ static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cq cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe); - skb = INDIRECT_CALL_2(rq->mpwqe.skb_from_cqe_mpwrq, + skb = INDIRECT_CALL_3(rq->mpwqe.skb_from_cqe_mpwrq, mlx5e_skb_from_cqe_mpwrq_linear, mlx5e_skb_from_cqe_mpwrq_nonlinear, + mlx5e_xsk_skb_from_cqe_mpwrq_linear, rq, wi, cqe_bcnt, head_offset, page_idx); if (!skb) goto mpwrq_cqe_out; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index 575717186912..03c1841970f1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -641,17 +641,26 @@ static const struct counter_desc vnic_env_stats_dev_oob_desc[] = { VNIC_ENV_OFF(vport_env.internal_rq_out_of_buffer) }, }; +static const struct counter_desc vnic_env_stats_drop_desc[] = { + { "rx_oversize_pkts_buffer", + VNIC_ENV_OFF(vport_env.eth_wqe_too_small) }, +}; + #define NUM_VNIC_ENV_STEER_COUNTERS(dev) \ (MLX5_CAP_GEN(dev, nic_receive_steering_discard) ? \ ARRAY_SIZE(vnic_env_stats_steer_desc) : 0) #define NUM_VNIC_ENV_DEV_OOB_COUNTERS(dev) \ (MLX5_CAP_GEN(dev, vnic_env_int_rq_oob) ? \ ARRAY_SIZE(vnic_env_stats_dev_oob_desc) : 0) +#define NUM_VNIC_ENV_DROP_COUNTERS(dev) \ + (MLX5_CAP_GEN(dev, eth_wqe_too_small) ? \ + ARRAY_SIZE(vnic_env_stats_drop_desc) : 0) static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vnic_env) { return NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev) + - NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); + NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev) + + NUM_VNIC_ENV_DROP_COUNTERS(priv->mdev); } static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vnic_env) @@ -665,6 +674,11 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vnic_env) for (i = 0; i < NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); i++) strcpy(data + (idx++) * ETH_GSTRING_LEN, vnic_env_stats_dev_oob_desc[i].format); + + for (i = 0; i < NUM_VNIC_ENV_DROP_COUNTERS(priv->mdev); i++) + strcpy(data + (idx++) * ETH_GSTRING_LEN, + vnic_env_stats_drop_desc[i].format); + return idx; } @@ -679,6 +693,11 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vnic_env) for (i = 0; i < NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); i++) data[idx++] = MLX5E_READ_CTR32_BE(priv->stats.vnic.query_vnic_env_out, vnic_env_stats_dev_oob_desc, i); + + for (i = 0; i < NUM_VNIC_ENV_DROP_COUNTERS(priv->mdev); i++) + data[idx++] = MLX5E_READ_CTR32_BE(priv->stats.vnic.query_vnic_env_out, + vnic_env_stats_drop_desc, i); + return idx; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index 99e321bfb744..9f781085be47 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -273,6 +273,10 @@ struct mlx5e_qcounter_stats { u32 rx_if_down_packets; }; +#define VNIC_ENV_GET(vnic_env_stats, c) \ + MLX5_GET(query_vnic_env_out, (vnic_env_stats)->query_vnic_env_out, \ + vport_env.c) + struct mlx5e_vnic_env_stats { __be64 query_vnic_env_out[MLX5_ST_SZ_QW(query_vnic_env_out)]; }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 0872a214d2a3..70a7a61f9708 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -4463,7 +4463,7 @@ int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv, int err = 0; if (!mlx5_esw_hold(priv->mdev)) - return -EAGAIN; + return -EBUSY; mlx5_esw_get(priv->mdev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c index 833be29170a1..9a458a5d9853 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c @@ -31,6 +31,7 @@ */ #include <linux/irq.h> +#include <net/xdp_sock_drv.h> #include "en.h" #include "en/txrx.h" #include "en/xdp.h" @@ -86,26 +87,36 @@ void mlx5e_trigger_irq(struct mlx5e_icosq *sq) static bool mlx5e_napi_xsk_post(struct mlx5e_xdpsq *xsksq, struct mlx5e_rq *xskrq) { + bool need_wakeup = xsk_uses_need_wakeup(xskrq->xsk_pool); bool busy_xsk = false, xsk_rx_alloc_err; - /* Handle the race between the application querying need_wakeup and the - * driver setting it: - * 1. Update need_wakeup both before and after the TX. If it goes to - * "yes", it can only happen with the first update. - * 2. If the application queried need_wakeup before we set it, the - * packets will be transmitted anyway, even w/o a wakeup. - * 3. Give a chance to clear need_wakeup after new packets were queued - * for TX. + /* If SQ is empty, there are no TX completions to trigger NAPI, so set + * need_wakeup. Do it before queuing packets for TX to avoid race + * condition with userspace. */ - mlx5e_xsk_update_tx_wakeup(xsksq); + if (need_wakeup && xsksq->pc == xsksq->cc) + xsk_set_tx_need_wakeup(xsksq->xsk_pool); busy_xsk |= mlx5e_xsk_tx(xsksq, MLX5E_TX_XSK_POLL_BUDGET); - mlx5e_xsk_update_tx_wakeup(xsksq); + /* If we queued some packets for TX, no need for wakeup anymore. */ + if (need_wakeup && xsksq->pc != xsksq->cc) + xsk_clear_tx_need_wakeup(xsksq->xsk_pool); + /* If WQ is empty, RX won't trigger NAPI, so set need_wakeup. Do it + * before refilling to avoid race condition with userspace. + */ + if (need_wakeup && !mlx5e_rqwq_get_cur_sz(xskrq)) + xsk_set_rx_need_wakeup(xskrq->xsk_pool); xsk_rx_alloc_err = INDIRECT_CALL_2(xskrq->post_wqes, mlx5e_post_rx_mpwqes, mlx5e_post_rx_wqes, xskrq); - busy_xsk |= mlx5e_xsk_update_rx_wakeup(xskrq, xsk_rx_alloc_err); + /* Ask for wakeup if WQ is not full after refill. */ + if (!need_wakeup) + busy_xsk |= xsk_rx_alloc_err; + else if (xsk_rx_alloc_err) + xsk_set_rx_need_wakeup(xskrq->xsk_pool); + else + xsk_clear_rx_need_wakeup(xskrq->xsk_pool); return busy_xsk; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c index 694c54066955..4f8a24d84a86 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c @@ -924,12 +924,16 @@ int mlx5_esw_qos_vport_update_group(struct mlx5_eswitch *esw, struct mlx5_esw_rate_group *group, struct netlink_ext_ack *extack) { - int err; + int err = 0; mutex_lock(&esw->state_lock); + if (!vport->qos.enabled && !group) + goto unlock; + err = esw_qos_vport_enable(esw, vport, 0, 0, extack); if (!err) err = esw_qos_vport_update_group(esw, vport, group, extack); +unlock: mutex_unlock(&esw->state_lock); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index c98c6af21581..4e50df3139c6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -483,25 +483,27 @@ esw_setup_dests(struct mlx5_flow_destination *dest, !(attr->flags & MLX5_ATTR_FLAG_SLOW_PATH)) { esw_setup_sampler_dest(dest, flow_act, attr->sample_attr.sampler_id, *i); (*i)++; - } else if (attr->dest_ft) { - esw_setup_ft_dest(dest, flow_act, esw, attr, spec, *i); - (*i)++; } else if (attr->flags & MLX5_ATTR_FLAG_SLOW_PATH) { esw_setup_slow_path_dest(dest, flow_act, esw, *i); (*i)++; } else if (attr->flags & MLX5_ATTR_FLAG_ACCEPT) { esw_setup_accept_dest(dest, flow_act, chains, *i); (*i)++; - } else if (attr->dest_chain) { - err = esw_setup_chain_dest(dest, flow_act, chains, attr->dest_chain, - 1, 0, *i); - (*i)++; } else if (esw_is_indir_table(esw, attr)) { err = esw_setup_indir_table(dest, flow_act, esw, attr, spec, true, i); } else if (esw_is_chain_src_port_rewrite(esw, esw_attr)) { err = esw_setup_chain_src_port_rewrite(dest, flow_act, esw, chains, attr, i); } else { *i = esw_setup_vport_dests(dest, flow_act, esw, esw_attr, *i); + + if (attr->dest_ft) { + err = esw_setup_ft_dest(dest, flow_act, esw, attr, spec, *i); + (*i)++; + } else if (attr->dest_chain) { + err = esw_setup_chain_dest(dest, flow_act, chains, attr->dest_chain, + 1, 0, *i); + (*i)++; + } } return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index 59205ba2ef7b..86ed87d704f7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c @@ -702,11 +702,25 @@ static const struct devlink_health_reporter_ops mlx5_fw_fatal_reporter_ops = { .dump = mlx5_fw_fatal_reporter_dump, }; -#define MLX5_REPORTER_FW_GRACEFUL_PERIOD 1200000 +#define MLX5_FW_REPORTER_ECPF_GRACEFUL_PERIOD 180000 +#define MLX5_FW_REPORTER_PF_GRACEFUL_PERIOD 60000 +#define MLX5_FW_REPORTER_VF_GRACEFUL_PERIOD 30000 +#define MLX5_FW_REPORTER_DEFAULT_GRACEFUL_PERIOD MLX5_FW_REPORTER_VF_GRACEFUL_PERIOD + static void mlx5_fw_reporters_create(struct mlx5_core_dev *dev) { struct mlx5_core_health *health = &dev->priv.health; struct devlink *devlink = priv_to_devlink(dev); + u64 grace_period; + + if (mlx5_core_is_ecpf(dev)) { + grace_period = MLX5_FW_REPORTER_ECPF_GRACEFUL_PERIOD; + } else if (mlx5_core_is_pf(dev)) { + grace_period = MLX5_FW_REPORTER_PF_GRACEFUL_PERIOD; + } else { + /* VF or SF */ + grace_period = MLX5_FW_REPORTER_DEFAULT_GRACEFUL_PERIOD; + } health->fw_reporter = devlink_health_reporter_create(devlink, &mlx5_fw_reporter_ops, @@ -718,7 +732,7 @@ static void mlx5_fw_reporters_create(struct mlx5_core_dev *dev) health->fw_fatal_reporter = devlink_health_reporter_create(devlink, &mlx5_fw_fatal_reporter_ops, - MLX5_REPORTER_FW_GRACEFUL_PERIOD, + grace_period, dev); if (IS_ERR(health->fw_fatal_reporter)) mlx5_core_warn(dev, "Failed to create fw fatal reporter, err = %ld\n", @@ -843,9 +857,6 @@ void mlx5_start_health_poll(struct mlx5_core_dev *dev) health->timer.expires = jiffies + msecs_to_jiffies(poll_interval_ms); add_timer(&health->timer); - - if (mlx5_core_is_pf(dev) && MLX5_CAP_MCAM_REG(dev, mrtc)) - queue_delayed_work(health->wq, &health->update_fw_log_ts_work, 0); } void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health) @@ -862,6 +873,14 @@ void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health) del_timer_sync(&health->timer); } +void mlx5_start_health_fw_log_up(struct mlx5_core_dev *dev) +{ + struct mlx5_core_health *health = &dev->priv.health; + + if (mlx5_core_is_pf(dev) && MLX5_CAP_MCAM_REG(dev, mrtc)) + queue_delayed_work(health->wq, &health->update_fw_log_ts_work, 0); +} + void mlx5_drain_health_wq(struct mlx5_core_dev *dev) { struct mlx5_core_health *health = &dev->priv.health; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c index 35f797cfd21e..4e3a75496dd9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c @@ -463,7 +463,6 @@ static const struct mlx5e_profile mlx5i_nic_profile = { .update_carrier = NULL, /* no HW update in IB link */ .rx_handlers = &mlx5i_rx_handlers, .max_tc = MLX5I_MAX_NUM_TC, - .rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR), .stats_grps = mlx5i_stats_grps, .stats_grps_num = mlx5i_stats_grps_num, }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c index 0b86e78dbc0e..0227a521d301 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c @@ -349,7 +349,6 @@ static const struct mlx5e_profile mlx5i_pkey_nic_profile = { .update_stats = NULL, .rx_handlers = &mlx5i_rx_handlers, .max_tc = MLX5I_MAX_NUM_TC, - .rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR), }; const struct mlx5e_profile *mlx5i_pkey_get_profile(void) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index daa7442f31c9..0b459d841c3a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -1092,7 +1092,7 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev) mlx5_devcom_unregister_device(dev->priv.devcom); } -static int mlx5_function_setup(struct mlx5_core_dev *dev, u64 timeout) +static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot, u64 timeout) { int err; @@ -1130,10 +1130,12 @@ static int mlx5_function_setup(struct mlx5_core_dev *dev, u64 timeout) mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_UP); + mlx5_start_health_poll(dev); + err = mlx5_core_enable_hca(dev, 0); if (err) { mlx5_core_err(dev, "enable hca failed\n"); - goto err_cmd_cleanup; + goto stop_health_poll; } err = mlx5_core_set_issi(dev); @@ -1185,8 +1187,7 @@ static int mlx5_function_setup(struct mlx5_core_dev *dev, u64 timeout) mlx5_core_err(dev, "query hca failed\n"); goto reclaim_boot_pages; } - - mlx5_start_health_poll(dev); + mlx5_start_health_fw_log_up(dev); return 0; @@ -1194,6 +1195,8 @@ reclaim_boot_pages: mlx5_reclaim_startup_pages(dev); err_disable_hca: mlx5_core_disable_hca(dev, 0); +stop_health_poll: + mlx5_stop_health_poll(dev, boot); err_cmd_cleanup: mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN); mlx5_cmd_cleanup(dev); @@ -1205,7 +1208,6 @@ static int mlx5_function_teardown(struct mlx5_core_dev *dev, bool boot) { int err; - mlx5_stop_health_poll(dev, boot); err = mlx5_cmd_teardown_hca(dev); if (err) { mlx5_core_err(dev, "tear_down_hca failed, skip cleanup\n"); @@ -1213,6 +1215,7 @@ static int mlx5_function_teardown(struct mlx5_core_dev *dev, bool boot) } mlx5_reclaim_startup_pages(dev); mlx5_core_disable_hca(dev, 0); + mlx5_stop_health_poll(dev, boot); mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN); mlx5_cmd_cleanup(dev); @@ -1362,7 +1365,7 @@ int mlx5_init_one(struct mlx5_core_dev *dev) mutex_lock(&dev->intf_state_mutex); dev->state = MLX5_DEVICE_STATE_UP; - err = mlx5_function_setup(dev, mlx5_tout_ms(dev, FW_PRE_INIT_TIMEOUT)); + err = mlx5_function_setup(dev, true, mlx5_tout_ms(dev, FW_PRE_INIT_TIMEOUT)); if (err) goto err_function; @@ -1450,7 +1453,7 @@ int mlx5_load_one_devl_locked(struct mlx5_core_dev *dev, bool recovery) timeout = mlx5_tout_ms(dev, FW_PRE_INIT_ON_RECOVERY_TIMEOUT); else timeout = mlx5_tout_ms(dev, FW_PRE_INIT_TIMEOUT); - err = mlx5_function_setup(dev, timeout); + err = mlx5_function_setup(dev, false, timeout); if (err) goto err_function; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h index e5c4dcd1425e..4d629e5ddbc7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/wq.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h @@ -123,7 +123,7 @@ static inline void mlx5_wq_cyc_push(struct mlx5_wq_cyc *wq) wq->cur_sz++; } -static inline void mlx5_wq_cyc_push_n(struct mlx5_wq_cyc *wq, u8 n) +static inline void mlx5_wq_cyc_push_n(struct mlx5_wq_cyc *wq, u16 n) { wq->wqe_ctr += n; wq->cur_sz += n; diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c index 3548fe1df7c8..987fe5c9d5a3 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c @@ -21,7 +21,6 @@ #define MLXSW_THERMAL_ASIC_TEMP_HOT 105000 /* 105C */ #define MLXSW_THERMAL_HYSTERESIS_TEMP 5000 /* 5C */ #define MLXSW_THERMAL_MODULE_TEMP_SHIFT (MLXSW_THERMAL_HYSTERESIS_TEMP * 2) -#define MLXSW_THERMAL_TEMP_SCORE_MAX GENMASK(31, 0) #define MLXSW_THERMAL_MAX_STATE 10 #define MLXSW_THERMAL_MIN_STATE 2 #define MLXSW_THERMAL_MAX_DUTY 255 @@ -101,8 +100,6 @@ struct mlxsw_thermal { struct thermal_cooling_device *cdevs[MLXSW_MFCR_PWMS_MAX]; u8 cooling_levels[MLXSW_THERMAL_MAX_STATE + 1]; struct mlxsw_thermal_trip trips[MLXSW_THERMAL_NUM_TRIPS]; - unsigned int tz_highest_score; - struct thermal_zone_device *tz_highest_dev; struct mlxsw_thermal_area line_cards[]; }; @@ -193,34 +190,6 @@ mlxsw_thermal_module_trips_update(struct device *dev, struct mlxsw_core *core, return 0; } -static void mlxsw_thermal_tz_score_update(struct mlxsw_thermal *thermal, - struct thermal_zone_device *tzdev, - struct mlxsw_thermal_trip *trips, - int temp) -{ - struct mlxsw_thermal_trip *trip = trips; - unsigned int score, delta, i, shift = 1; - - /* Calculate thermal zone score, if temperature is above the hot - * threshold score is set to MLXSW_THERMAL_TEMP_SCORE_MAX. - */ - score = MLXSW_THERMAL_TEMP_SCORE_MAX; - for (i = MLXSW_THERMAL_TEMP_TRIP_NORM; i < MLXSW_THERMAL_NUM_TRIPS; - i++, trip++) { - if (temp < trip->temp) { - delta = DIV_ROUND_CLOSEST(temp, trip->temp - temp); - score = delta * shift; - break; - } - shift *= 256; - } - - if (score > thermal->tz_highest_score) { - thermal->tz_highest_score = score; - thermal->tz_highest_dev = tzdev; - } -} - static int mlxsw_thermal_bind(struct thermal_zone_device *tzdev, struct thermal_cooling_device *cdev) { @@ -286,9 +255,6 @@ static int mlxsw_thermal_get_temp(struct thermal_zone_device *tzdev, return err; } mlxsw_reg_mtmp_unpack(mtmp_pl, &temp, NULL, NULL, NULL, NULL); - if (temp > 0) - mlxsw_thermal_tz_score_update(thermal, tzdev, thermal->trips, - temp); *p_temp = temp; return 0; @@ -349,21 +315,6 @@ static int mlxsw_thermal_set_trip_hyst(struct thermal_zone_device *tzdev, return 0; } -static int mlxsw_thermal_trend_get(struct thermal_zone_device *tzdev, - int trip, enum thermal_trend *trend) -{ - struct mlxsw_thermal *thermal = tzdev->devdata; - - if (trip < 0 || trip >= MLXSW_THERMAL_NUM_TRIPS) - return -EINVAL; - - if (tzdev == thermal->tz_highest_dev) - return 1; - - *trend = THERMAL_TREND_STABLE; - return 0; -} - static struct thermal_zone_params mlxsw_thermal_params = { .no_hwmon = true, }; @@ -377,7 +328,6 @@ static struct thermal_zone_device_ops mlxsw_thermal_ops = { .set_trip_temp = mlxsw_thermal_set_trip_temp, .get_trip_hyst = mlxsw_thermal_get_trip_hyst, .set_trip_hyst = mlxsw_thermal_set_trip_hyst, - .get_trend = mlxsw_thermal_trend_get, }; static int mlxsw_thermal_module_bind(struct thermal_zone_device *tzdev, @@ -463,7 +413,6 @@ static int mlxsw_thermal_module_temp_get(struct thermal_zone_device *tzdev, int temp, crit_temp, emerg_temp; struct device *dev; u16 sensor_index; - int err; dev = thermal->bus_info->dev; sensor_index = MLXSW_REG_MTMP_MODULE_INDEX_MIN + tz->module; @@ -479,10 +428,8 @@ static int mlxsw_thermal_module_temp_get(struct thermal_zone_device *tzdev, return 0; /* Update trip points. */ - err = mlxsw_thermal_module_trips_update(dev, thermal->core, tz, - crit_temp, emerg_temp); - if (!err && temp > 0) - mlxsw_thermal_tz_score_update(thermal, tzdev, tz->trips, temp); + mlxsw_thermal_module_trips_update(dev, thermal->core, tz, + crit_temp, emerg_temp); return 0; } @@ -546,22 +493,6 @@ mlxsw_thermal_module_trip_hyst_set(struct thermal_zone_device *tzdev, int trip, return 0; } -static int mlxsw_thermal_module_trend_get(struct thermal_zone_device *tzdev, - int trip, enum thermal_trend *trend) -{ - struct mlxsw_thermal_module *tz = tzdev->devdata; - struct mlxsw_thermal *thermal = tz->parent; - - if (trip < 0 || trip >= MLXSW_THERMAL_NUM_TRIPS) - return -EINVAL; - - if (tzdev == thermal->tz_highest_dev) - return 1; - - *trend = THERMAL_TREND_STABLE; - return 0; -} - static struct thermal_zone_device_ops mlxsw_thermal_module_ops = { .bind = mlxsw_thermal_module_bind, .unbind = mlxsw_thermal_module_unbind, @@ -571,7 +502,6 @@ static struct thermal_zone_device_ops mlxsw_thermal_module_ops = { .set_trip_temp = mlxsw_thermal_module_trip_temp_set, .get_trip_hyst = mlxsw_thermal_module_trip_hyst_get, .set_trip_hyst = mlxsw_thermal_module_trip_hyst_set, - .get_trend = mlxsw_thermal_module_trend_get, }; static int mlxsw_thermal_gearbox_temp_get(struct thermal_zone_device *tzdev, @@ -592,8 +522,6 @@ static int mlxsw_thermal_gearbox_temp_get(struct thermal_zone_device *tzdev, return err; mlxsw_reg_mtmp_unpack(mtmp_pl, &temp, NULL, NULL, NULL, NULL); - if (temp > 0) - mlxsw_thermal_tz_score_update(thermal, tzdev, tz->trips, temp); *p_temp = temp; return 0; @@ -608,7 +536,6 @@ static struct thermal_zone_device_ops mlxsw_thermal_gearbox_ops = { .set_trip_temp = mlxsw_thermal_module_trip_temp_set, .get_trip_hyst = mlxsw_thermal_module_trip_hyst_get, .set_trip_hyst = mlxsw_thermal_module_trip_hyst_set, - .get_trend = mlxsw_thermal_module_trend_get, }; static int mlxsw_thermal_get_max_state(struct thermal_cooling_device *cdev, diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.c b/drivers/net/ethernet/microchip/lan743x_ptp.c index 6a11e2ceb013..da3ea905adbb 100644 --- a/drivers/net/ethernet/microchip/lan743x_ptp.c +++ b/drivers/net/ethernet/microchip/lan743x_ptp.c @@ -1049,6 +1049,10 @@ static int lan743x_ptpci_verify_pin_config(struct ptp_clock_info *ptp, enum ptp_pin_function func, unsigned int chan) { + struct lan743x_ptp *lan_ptp = + container_of(ptp, struct lan743x_ptp, ptp_clock_info); + struct lan743x_adapter *adapter = + container_of(lan_ptp, struct lan743x_adapter, ptp); int result = 0; /* Confirm the requested function is supported. Parameter @@ -1057,7 +1061,10 @@ static int lan743x_ptpci_verify_pin_config(struct ptp_clock_info *ptp, switch (func) { case PTP_PF_NONE: case PTP_PF_PEROUT: + break; case PTP_PF_EXTTS: + if (!adapter->is_pci11x1x) + result = -1; break; case PTP_PF_PHYSYNC: default: diff --git a/drivers/net/ethernet/microchip/lan966x/Makefile b/drivers/net/ethernet/microchip/lan966x/Makefile index 7360c1c7b53c..962f7c5f9e7d 100644 --- a/drivers/net/ethernet/microchip/lan966x/Makefile +++ b/drivers/net/ethernet/microchip/lan966x/Makefile @@ -10,4 +10,5 @@ lan966x-switch-objs := lan966x_main.o lan966x_phylink.o lan966x_port.o \ lan966x_vlan.o lan966x_fdb.o lan966x_mdb.o \ lan966x_ptp.o lan966x_fdma.o lan966x_lag.o \ lan966x_tc.o lan966x_mqprio.o lan966x_taprio.o \ - lan966x_tbf.o lan966x_cbs.o lan966x_ets.o + lan966x_tbf.o lan966x_cbs.o lan966x_ets.o \ + lan966x_tc_matchall.o lan966x_police.o lan966x_mirror.o diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c index b98d37c76edb..be2fd030cccb 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c @@ -344,7 +344,8 @@ static void lan966x_ifh_set_timestamp(void *ifh, u64 timestamp) IFH_POS_TIMESTAMP, IFH_LEN * 4, PACK, 0); } -static int lan966x_port_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t lan966x_port_xmit(struct sk_buff *skb, + struct net_device *dev) { struct lan966x_port *port = netdev_priv(dev); struct lan966x *lan966x = port->lan966x; diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h index 78665eb9a3f1..9656071b8289 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h @@ -264,6 +264,11 @@ struct lan966x { struct lan966x_rx rx; struct lan966x_tx tx; struct napi_struct napi; + + /* Mirror */ + struct lan966x_port *mirror_monitor; + u32 mirror_mask[2]; + u32 mirror_count; }; struct lan966x_port_config { @@ -276,6 +281,15 @@ struct lan966x_port_config { bool autoneg; }; +struct lan966x_port_tc { + bool ingress_shared_block; + unsigned long police_id; + unsigned long ingress_mirror_id; + unsigned long egress_mirror_id; + struct flow_stats police_stat; + struct flow_stats mirror_stat; +}; + struct lan966x_port { struct net_device *dev; struct lan966x *lan966x; @@ -302,6 +316,8 @@ struct lan966x_port { struct net_device *bond; bool lag_tx_active; enum netdev_lag_hash hash_type; + + struct lan966x_port_tc tc; }; extern const struct phylink_mac_ops lan966x_phylink_mac_ops; @@ -481,6 +497,34 @@ int lan966x_ets_add(struct lan966x_port *port, int lan966x_ets_del(struct lan966x_port *port, struct tc_ets_qopt_offload *qopt); +int lan966x_tc_matchall(struct lan966x_port *port, + struct tc_cls_matchall_offload *f, + bool ingress); + +int lan966x_police_port_add(struct lan966x_port *port, + struct flow_action *action, + struct flow_action_entry *act, + unsigned long police_id, + bool ingress, + struct netlink_ext_ack *extack); +int lan966x_police_port_del(struct lan966x_port *port, + unsigned long police_id, + struct netlink_ext_ack *extack); +void lan966x_police_port_stats(struct lan966x_port *port, + struct flow_stats *stats); + +int lan966x_mirror_port_add(struct lan966x_port *port, + struct flow_action_entry *action, + unsigned long mirror_id, + bool ingress, + struct netlink_ext_ack *extack); +int lan966x_mirror_port_del(struct lan966x_port *port, + bool ingress, + struct netlink_ext_ack *extack); +void lan966x_mirror_port_stats(struct lan966x_port *port, + struct flow_stats *stats, + bool ingress); + static inline void __iomem *lan_addr(void __iomem *base[], int id, int tinst, int tcnt, int gbase, int ginst, diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_mirror.c b/drivers/net/ethernet/microchip/lan966x/lan966x_mirror.c new file mode 100644 index 000000000000..7e1ba3f40c35 --- /dev/null +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_mirror.c @@ -0,0 +1,138 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include "lan966x_main.h" + +int lan966x_mirror_port_add(struct lan966x_port *port, + struct flow_action_entry *action, + unsigned long mirror_id, + bool ingress, + struct netlink_ext_ack *extack) +{ + struct lan966x *lan966x = port->lan966x; + struct lan966x_port *monitor_port; + + if (!lan966x_netdevice_check(action->dev)) { + NL_SET_ERR_MSG_MOD(extack, + "Destination not an lan966x port"); + return -EOPNOTSUPP; + } + + monitor_port = netdev_priv(action->dev); + + if (lan966x->mirror_mask[ingress] & BIT(port->chip_port)) { + NL_SET_ERR_MSG_MOD(extack, + "Mirror already exists"); + return -EEXIST; + } + + if (lan966x->mirror_monitor && + lan966x->mirror_monitor != monitor_port) { + NL_SET_ERR_MSG_MOD(extack, + "Cannot change mirror port while in use"); + return -EBUSY; + } + + if (port == monitor_port) { + NL_SET_ERR_MSG_MOD(extack, + "Cannot mirror the monitor port"); + return -EINVAL; + } + + lan966x->mirror_mask[ingress] |= BIT(port->chip_port); + + lan966x->mirror_monitor = monitor_port; + lan_wr(BIT(monitor_port->chip_port), lan966x, ANA_MIRRORPORTS); + + if (ingress) { + lan_rmw(ANA_PORT_CFG_SRC_MIRROR_ENA_SET(1), + ANA_PORT_CFG_SRC_MIRROR_ENA, + lan966x, ANA_PORT_CFG(port->chip_port)); + } else { + lan_wr(lan966x->mirror_mask[0], lan966x, + ANA_EMIRRORPORTS); + } + + lan966x->mirror_count++; + + if (ingress) + port->tc.ingress_mirror_id = mirror_id; + else + port->tc.egress_mirror_id = mirror_id; + + return 0; +} + +int lan966x_mirror_port_del(struct lan966x_port *port, + bool ingress, + struct netlink_ext_ack *extack) +{ + struct lan966x *lan966x = port->lan966x; + + if (!(lan966x->mirror_mask[ingress] & BIT(port->chip_port))) { + NL_SET_ERR_MSG_MOD(extack, + "There is no mirroring for this port"); + return -ENOENT; + } + + lan966x->mirror_mask[ingress] &= ~BIT(port->chip_port); + + if (ingress) { + lan_rmw(ANA_PORT_CFG_SRC_MIRROR_ENA_SET(0), + ANA_PORT_CFG_SRC_MIRROR_ENA, + lan966x, ANA_PORT_CFG(port->chip_port)); + } else { + lan_wr(lan966x->mirror_mask[0], lan966x, + ANA_EMIRRORPORTS); + } + + lan966x->mirror_count--; + + if (lan966x->mirror_count == 0) { + lan966x->mirror_monitor = NULL; + lan_wr(0, lan966x, ANA_MIRRORPORTS); + } + + if (ingress) + port->tc.ingress_mirror_id = 0; + else + port->tc.egress_mirror_id = 0; + + return 0; +} + +void lan966x_mirror_port_stats(struct lan966x_port *port, + struct flow_stats *stats, + bool ingress) +{ + struct rtnl_link_stats64 new_stats; + struct flow_stats *old_stats; + + old_stats = &port->tc.mirror_stat; + lan966x_stats_get(port->dev, &new_stats); + + if (ingress) { + flow_stats_update(stats, + new_stats.rx_bytes - old_stats->bytes, + new_stats.rx_packets - old_stats->pkts, + new_stats.rx_dropped - old_stats->drops, + old_stats->lastused, + FLOW_ACTION_HW_STATS_IMMEDIATE); + + old_stats->bytes = new_stats.rx_bytes; + old_stats->pkts = new_stats.rx_packets; + old_stats->drops = new_stats.rx_dropped; + old_stats->lastused = jiffies; + } else { + flow_stats_update(stats, + new_stats.tx_bytes - old_stats->bytes, + new_stats.tx_packets - old_stats->pkts, + new_stats.tx_dropped - old_stats->drops, + old_stats->lastused, + FLOW_ACTION_HW_STATS_IMMEDIATE); + + old_stats->bytes = new_stats.tx_bytes; + old_stats->pkts = new_stats.tx_packets; + old_stats->drops = new_stats.tx_dropped; + old_stats->lastused = jiffies; + } +} diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_police.c b/drivers/net/ethernet/microchip/lan966x/lan966x_police.c new file mode 100644 index 000000000000..a9aec900d608 --- /dev/null +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_police.c @@ -0,0 +1,235 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include "lan966x_main.h" + +/* 0-8 : 9 port policers */ +#define POL_IDX_PORT 0 + +/* Policer order: Serial (QoS -> Port -> VCAP) */ +#define POL_ORDER 0x1d3 + +struct lan966x_tc_policer { + /* kilobit per second */ + u32 rate; + /* bytes */ + u32 burst; +}; + +static int lan966x_police_add(struct lan966x_port *port, + struct lan966x_tc_policer *pol, + u16 pol_idx) +{ + struct lan966x *lan966x = port->lan966x; + + /* Rate unit is 33 1/3 kpps */ + pol->rate = DIV_ROUND_UP(pol->rate * 3, 100); + /* Avoid zero burst size */ + pol->burst = pol->burst ?: 1; + /* Unit is 4kB */ + pol->burst = DIV_ROUND_UP(pol->burst, 4096); + + if (pol->rate > GENMASK(15, 0) || + pol->burst > GENMASK(6, 0)) + return -EINVAL; + + lan_wr(ANA_POL_MODE_DROP_ON_YELLOW_ENA_SET(0) | + ANA_POL_MODE_MARK_ALL_FRMS_RED_ENA_SET(0) | + ANA_POL_MODE_IPG_SIZE_SET(20) | + ANA_POL_MODE_FRM_MODE_SET(1) | + ANA_POL_MODE_OVERSHOOT_ENA_SET(1), + lan966x, ANA_POL_MODE(pol_idx)); + + lan_wr(ANA_POL_PIR_STATE_PIR_LVL_SET(0), + lan966x, ANA_POL_PIR_STATE(pol_idx)); + + lan_wr(ANA_POL_PIR_CFG_PIR_RATE_SET(pol->rate) | + ANA_POL_PIR_CFG_PIR_BURST_SET(pol->burst), + lan966x, ANA_POL_PIR_CFG(pol_idx)); + + return 0; +} + +static int lan966x_police_del(struct lan966x_port *port, + u16 pol_idx) +{ + struct lan966x *lan966x = port->lan966x; + + lan_wr(ANA_POL_MODE_DROP_ON_YELLOW_ENA_SET(0) | + ANA_POL_MODE_MARK_ALL_FRMS_RED_ENA_SET(0) | + ANA_POL_MODE_IPG_SIZE_SET(20) | + ANA_POL_MODE_FRM_MODE_SET(2) | + ANA_POL_MODE_OVERSHOOT_ENA_SET(1), + lan966x, ANA_POL_MODE(pol_idx)); + + lan_wr(ANA_POL_PIR_STATE_PIR_LVL_SET(0), + lan966x, ANA_POL_PIR_STATE(pol_idx)); + + lan_wr(ANA_POL_PIR_CFG_PIR_RATE_SET(GENMASK(14, 0)) | + ANA_POL_PIR_CFG_PIR_BURST_SET(0), + lan966x, ANA_POL_PIR_CFG(pol_idx)); + + return 0; +} + +static int lan966x_police_validate(struct lan966x_port *port, + const struct flow_action *action, + const struct flow_action_entry *act, + unsigned long police_id, + bool ingress, + struct netlink_ext_ack *extack) +{ + if (act->police.exceed.act_id != FLOW_ACTION_DROP) { + NL_SET_ERR_MSG_MOD(extack, + "Offload not supported when exceed action is not drop"); + return -EOPNOTSUPP; + } + + if (act->police.notexceed.act_id != FLOW_ACTION_PIPE && + act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) { + NL_SET_ERR_MSG_MOD(extack, + "Offload not supported when conform action is not pipe or ok"); + return -EOPNOTSUPP; + } + + if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT && + !flow_action_is_last_entry(action, act)) { + NL_SET_ERR_MSG_MOD(extack, + "Offload not supported when conform action is ok, but action is not last"); + return -EOPNOTSUPP; + } + + if (act->police.peakrate_bytes_ps || + act->police.avrate || act->police.overhead) { + NL_SET_ERR_MSG_MOD(extack, + "Offload not supported when peakrate/avrate/overhead is configured"); + return -EOPNOTSUPP; + } + + if (act->police.rate_pkt_ps) { + NL_SET_ERR_MSG_MOD(extack, + "QoS offload not support packets per second"); + return -EOPNOTSUPP; + } + + if (!ingress) { + NL_SET_ERR_MSG_MOD(extack, + "Policer is not supported on egress"); + return -EOPNOTSUPP; + } + + if (port->tc.ingress_shared_block) { + NL_SET_ERR_MSG_MOD(extack, + "Policer is not supported on shared ingress blocks"); + return -EOPNOTSUPP; + } + + if (port->tc.police_id && port->tc.police_id != police_id) { + NL_SET_ERR_MSG_MOD(extack, + "Only one policer per port is supported"); + return -EEXIST; + } + + return 0; +} + +int lan966x_police_port_add(struct lan966x_port *port, + struct flow_action *action, + struct flow_action_entry *act, + unsigned long police_id, + bool ingress, + struct netlink_ext_ack *extack) +{ + struct lan966x *lan966x = port->lan966x; + struct rtnl_link_stats64 new_stats; + struct lan966x_tc_policer pol; + struct flow_stats *old_stats; + int err; + + err = lan966x_police_validate(port, action, act, police_id, ingress, + extack); + if (err) + return err; + + memset(&pol, 0, sizeof(pol)); + + pol.rate = div_u64(act->police.rate_bytes_ps, 1000) * 8; + pol.burst = act->police.burst; + + err = lan966x_police_add(port, &pol, POL_IDX_PORT + port->chip_port); + if (err) { + NL_SET_ERR_MSG_MOD(extack, + "Failed to add policer to port"); + return err; + } + + lan_rmw(ANA_POL_CFG_PORT_POL_ENA_SET(1) | + ANA_POL_CFG_POL_ORDER_SET(POL_ORDER), + ANA_POL_CFG_PORT_POL_ENA | + ANA_POL_CFG_POL_ORDER, + lan966x, ANA_POL_CFG(port->chip_port)); + + port->tc.police_id = police_id; + + /* Setup initial stats */ + old_stats = &port->tc.police_stat; + lan966x_stats_get(port->dev, &new_stats); + old_stats->bytes = new_stats.rx_bytes; + old_stats->pkts = new_stats.rx_packets; + old_stats->drops = new_stats.rx_dropped; + old_stats->lastused = jiffies; + + return 0; +} + +int lan966x_police_port_del(struct lan966x_port *port, + unsigned long police_id, + struct netlink_ext_ack *extack) +{ + struct lan966x *lan966x = port->lan966x; + int err; + + if (port->tc.police_id != police_id) { + NL_SET_ERR_MSG_MOD(extack, + "Invalid policer id"); + return -EINVAL; + } + + err = lan966x_police_del(port, port->tc.police_id); + if (err) { + NL_SET_ERR_MSG_MOD(extack, + "Failed to add policer to port"); + return err; + } + + lan_rmw(ANA_POL_CFG_PORT_POL_ENA_SET(0) | + ANA_POL_CFG_POL_ORDER_SET(POL_ORDER), + ANA_POL_CFG_PORT_POL_ENA | + ANA_POL_CFG_POL_ORDER, + lan966x, ANA_POL_CFG(port->chip_port)); + + port->tc.police_id = 0; + + return 0; +} + +void lan966x_police_port_stats(struct lan966x_port *port, + struct flow_stats *stats) +{ + struct rtnl_link_stats64 new_stats; + struct flow_stats *old_stats; + + old_stats = &port->tc.police_stat; + lan966x_stats_get(port->dev, &new_stats); + + flow_stats_update(stats, + new_stats.rx_bytes - old_stats->bytes, + new_stats.rx_packets - old_stats->pkts, + new_stats.rx_dropped - old_stats->drops, + old_stats->lastused, + FLOW_ACTION_HW_STATS_IMMEDIATE); + + old_stats->bytes = new_stats.rx_bytes; + old_stats->pkts = new_stats.rx_packets; + old_stats->drops = new_stats.rx_dropped; + old_stats->lastused = jiffies; +} diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h b/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h index 4f00f95d66b6..1d90b93dd417 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h @@ -90,6 +90,24 @@ enum lan966x_target { #define ANA_AUTOAGE_AGE_PERIOD_GET(x)\ FIELD_GET(ANA_AUTOAGE_AGE_PERIOD, x) +/* ANA:ANA:MIRRORPORTS */ +#define ANA_MIRRORPORTS __REG(TARGET_ANA, 0, 1, 29824, 0, 1, 244, 60, 0, 1, 4) + +#define ANA_MIRRORPORTS_MIRRORPORTS GENMASK(8, 0) +#define ANA_MIRRORPORTS_MIRRORPORTS_SET(x)\ + FIELD_PREP(ANA_MIRRORPORTS_MIRRORPORTS, x) +#define ANA_MIRRORPORTS_MIRRORPORTS_GET(x)\ + FIELD_GET(ANA_MIRRORPORTS_MIRRORPORTS, x) + +/* ANA:ANA:EMIRRORPORTS */ +#define ANA_EMIRRORPORTS __REG(TARGET_ANA, 0, 1, 29824, 0, 1, 244, 64, 0, 1, 4) + +#define ANA_EMIRRORPORTS_EMIRRORPORTS GENMASK(8, 0) +#define ANA_EMIRRORPORTS_EMIRRORPORTS_SET(x)\ + FIELD_PREP(ANA_EMIRRORPORTS_EMIRRORPORTS, x) +#define ANA_EMIRRORPORTS_EMIRRORPORTS_GET(x)\ + FIELD_GET(ANA_EMIRRORPORTS_EMIRRORPORTS, x) + /* ANA:ANA:FLOODING */ #define ANA_FLOODING(r) __REG(TARGET_ANA, 0, 1, 29824, 0, 1, 244, 68, r, 8, 4) @@ -330,6 +348,12 @@ enum lan966x_target { /* ANA:PORT:PORT_CFG */ #define ANA_PORT_CFG(g) __REG(TARGET_ANA, 0, 1, 28672, g, 9, 128, 112, 0, 1, 4) +#define ANA_PORT_CFG_SRC_MIRROR_ENA BIT(13) +#define ANA_PORT_CFG_SRC_MIRROR_ENA_SET(x)\ + FIELD_PREP(ANA_PORT_CFG_SRC_MIRROR_ENA, x) +#define ANA_PORT_CFG_SRC_MIRROR_ENA_GET(x)\ + FIELD_GET(ANA_PORT_CFG_SRC_MIRROR_ENA, x) + #define ANA_PORT_CFG_LEARNAUTO BIT(6) #define ANA_PORT_CFG_LEARNAUTO_SET(x)\ FIELD_PREP(ANA_PORT_CFG_LEARNAUTO, x) @@ -354,6 +378,21 @@ enum lan966x_target { #define ANA_PORT_CFG_PORTID_VAL_GET(x)\ FIELD_GET(ANA_PORT_CFG_PORTID_VAL, x) +/* ANA:PORT:POL_CFG */ +#define ANA_POL_CFG(g) __REG(TARGET_ANA, 0, 1, 28672, g, 9, 128, 116, 0, 1, 4) + +#define ANA_POL_CFG_PORT_POL_ENA BIT(17) +#define ANA_POL_CFG_PORT_POL_ENA_SET(x)\ + FIELD_PREP(ANA_POL_CFG_PORT_POL_ENA, x) +#define ANA_POL_CFG_PORT_POL_ENA_GET(x)\ + FIELD_GET(ANA_POL_CFG_PORT_POL_ENA, x) + +#define ANA_POL_CFG_POL_ORDER GENMASK(8, 0) +#define ANA_POL_CFG_POL_ORDER_SET(x)\ + FIELD_PREP(ANA_POL_CFG_POL_ORDER, x) +#define ANA_POL_CFG_POL_ORDER_GET(x)\ + FIELD_GET(ANA_POL_CFG_POL_ORDER, x) + /* ANA:PFC:PFC_CFG */ #define ANA_PFC_CFG(g) __REG(TARGET_ANA, 0, 1, 30720, g, 8, 64, 0, 0, 1, 4) @@ -408,6 +447,63 @@ enum lan966x_target { #define ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA_GET(x)\ FIELD_GET(ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA, x) +/* ANA:POL:POL_PIR_CFG */ +#define ANA_POL_PIR_CFG(g) __REG(TARGET_ANA, 0, 1, 16384, g, 345, 32, 0, 0, 1, 4) + +#define ANA_POL_PIR_CFG_PIR_RATE GENMASK(20, 6) +#define ANA_POL_PIR_CFG_PIR_RATE_SET(x)\ + FIELD_PREP(ANA_POL_PIR_CFG_PIR_RATE, x) +#define ANA_POL_PIR_CFG_PIR_RATE_GET(x)\ + FIELD_GET(ANA_POL_PIR_CFG_PIR_RATE, x) + +#define ANA_POL_PIR_CFG_PIR_BURST GENMASK(5, 0) +#define ANA_POL_PIR_CFG_PIR_BURST_SET(x)\ + FIELD_PREP(ANA_POL_PIR_CFG_PIR_BURST, x) +#define ANA_POL_PIR_CFG_PIR_BURST_GET(x)\ + FIELD_GET(ANA_POL_PIR_CFG_PIR_BURST, x) + +/* ANA:POL:POL_MODE_CFG */ +#define ANA_POL_MODE(g) __REG(TARGET_ANA, 0, 1, 16384, g, 345, 32, 8, 0, 1, 4) + +#define ANA_POL_MODE_DROP_ON_YELLOW_ENA BIT(11) +#define ANA_POL_MODE_DROP_ON_YELLOW_ENA_SET(x)\ + FIELD_PREP(ANA_POL_MODE_DROP_ON_YELLOW_ENA, x) +#define ANA_POL_MODE_DROP_ON_YELLOW_ENA_GET(x)\ + FIELD_GET(ANA_POL_MODE_DROP_ON_YELLOW_ENA, x) + +#define ANA_POL_MODE_MARK_ALL_FRMS_RED_ENA BIT(10) +#define ANA_POL_MODE_MARK_ALL_FRMS_RED_ENA_SET(x)\ + FIELD_PREP(ANA_POL_MODE_MARK_ALL_FRMS_RED_ENA, x) +#define ANA_POL_MODE_MARK_ALL_FRMS_RED_ENA_GET(x)\ + FIELD_GET(ANA_POL_MODE_MARK_ALL_FRMS_RED_ENA, x) + +#define ANA_POL_MODE_IPG_SIZE GENMASK(9, 5) +#define ANA_POL_MODE_IPG_SIZE_SET(x)\ + FIELD_PREP(ANA_POL_MODE_IPG_SIZE, x) +#define ANA_POL_MODE_IPG_SIZE_GET(x)\ + FIELD_GET(ANA_POL_MODE_IPG_SIZE, x) + +#define ANA_POL_MODE_FRM_MODE GENMASK(4, 3) +#define ANA_POL_MODE_FRM_MODE_SET(x)\ + FIELD_PREP(ANA_POL_MODE_FRM_MODE, x) +#define ANA_POL_MODE_FRM_MODE_GET(x)\ + FIELD_GET(ANA_POL_MODE_FRM_MODE, x) + +#define ANA_POL_MODE_OVERSHOOT_ENA BIT(0) +#define ANA_POL_MODE_OVERSHOOT_ENA_SET(x)\ + FIELD_PREP(ANA_POL_MODE_OVERSHOOT_ENA, x) +#define ANA_POL_MODE_OVERSHOOT_ENA_GET(x)\ + FIELD_GET(ANA_POL_MODE_OVERSHOOT_ENA, x) + +/* ANA:POL:POL_PIR_STATE */ +#define ANA_POL_PIR_STATE(g) __REG(TARGET_ANA, 0, 1, 16384, g, 345, 32, 12, 0, 1, 4) + +#define ANA_POL_PIR_STATE_PIR_LVL GENMASK(21, 0) +#define ANA_POL_PIR_STATE_PIR_LVL_SET(x)\ + FIELD_PREP(ANA_POL_PIR_STATE_PIR_LVL, x) +#define ANA_POL_PIR_STATE_PIR_LVL_GET(x)\ + FIELD_GET(ANA_POL_PIR_STATE_PIR_LVL, x) + /* CHIP_TOP:CUPHY_CFG:CUPHY_PORT_CFG */ #define CHIP_TOP_CUPHY_PORT_CFG(r) __REG(TARGET_CHIP_TOP, 0, 1, 16, 0, 1, 20, 8, r, 2, 4) diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_tc.c b/drivers/net/ethernet/microchip/lan966x/lan966x_tc.c index 336eb7ee0d60..651d5493ae55 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_tc.c +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_tc.c @@ -4,6 +4,8 @@ #include "lan966x_main.h" +static LIST_HEAD(lan966x_tc_block_cb_list); + static int lan966x_tc_setup_qdisc_mqprio(struct lan966x_port *port, struct tc_mqprio_qopt_offload *mqprio) { @@ -59,6 +61,52 @@ static int lan966x_tc_setup_qdisc_ets(struct lan966x_port *port, return -EOPNOTSUPP; } +static int lan966x_tc_block_cb(enum tc_setup_type type, void *type_data, + void *cb_priv, bool ingress) +{ + struct lan966x_port *port = cb_priv; + + switch (type) { + case TC_SETUP_CLSMATCHALL: + return lan966x_tc_matchall(port, type_data, ingress); + default: + return -EOPNOTSUPP; + } +} + +static int lan966x_tc_block_cb_ingress(enum tc_setup_type type, + void *type_data, void *cb_priv) +{ + return lan966x_tc_block_cb(type, type_data, cb_priv, true); +} + +static int lan966x_tc_block_cb_egress(enum tc_setup_type type, + void *type_data, void *cb_priv) +{ + return lan966x_tc_block_cb(type, type_data, cb_priv, false); +} + +static int lan966x_tc_setup_block(struct lan966x_port *port, + struct flow_block_offload *f) +{ + flow_setup_cb_t *cb; + bool ingress; + + if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) { + cb = lan966x_tc_block_cb_ingress; + port->tc.ingress_shared_block = f->block_shared; + ingress = true; + } else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) { + cb = lan966x_tc_block_cb_egress; + ingress = false; + } else { + return -EOPNOTSUPP; + } + + return flow_block_cb_setup_simple(f, &lan966x_tc_block_cb_list, + cb, port, port, ingress); +} + int lan966x_tc_setup(struct net_device *dev, enum tc_setup_type type, void *type_data) { @@ -75,6 +123,8 @@ int lan966x_tc_setup(struct net_device *dev, enum tc_setup_type type, return lan966x_tc_setup_qdisc_cbs(port, type_data); case TC_SETUP_QDISC_ETS: return lan966x_tc_setup_qdisc_ets(port, type_data); + case TC_SETUP_BLOCK: + return lan966x_tc_setup_block(port, type_data); default: return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_tc_matchall.c b/drivers/net/ethernet/microchip/lan966x/lan966x_tc_matchall.c new file mode 100644 index 000000000000..7368433b9277 --- /dev/null +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_tc_matchall.c @@ -0,0 +1,95 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include "lan966x_main.h" + +static int lan966x_tc_matchall_add(struct lan966x_port *port, + struct tc_cls_matchall_offload *f, + bool ingress) +{ + struct flow_action_entry *act; + + if (!flow_offload_has_one_action(&f->rule->action)) { + NL_SET_ERR_MSG_MOD(f->common.extack, + "Only once action per filter is supported"); + return -EOPNOTSUPP; + } + + act = &f->rule->action.entries[0]; + switch (act->id) { + case FLOW_ACTION_POLICE: + return lan966x_police_port_add(port, &f->rule->action, act, + f->cookie, ingress, + f->common.extack); + case FLOW_ACTION_MIRRED: + return lan966x_mirror_port_add(port, act, f->cookie, + ingress, f->common.extack); + default: + NL_SET_ERR_MSG_MOD(f->common.extack, + "Unsupported action"); + return -EOPNOTSUPP; + } + + return 0; +} + +static int lan966x_tc_matchall_del(struct lan966x_port *port, + struct tc_cls_matchall_offload *f, + bool ingress) +{ + if (f->cookie == port->tc.police_id) { + return lan966x_police_port_del(port, f->cookie, + f->common.extack); + } else if (f->cookie == port->tc.ingress_mirror_id || + f->cookie == port->tc.egress_mirror_id) { + return lan966x_mirror_port_del(port, ingress, + f->common.extack); + } else { + NL_SET_ERR_MSG_MOD(f->common.extack, + "Unsupported action"); + return -EOPNOTSUPP; + } + + return 0; +} + +static int lan966x_tc_matchall_stats(struct lan966x_port *port, + struct tc_cls_matchall_offload *f, + bool ingress) +{ + if (f->cookie == port->tc.police_id) { + lan966x_police_port_stats(port, &f->stats); + } else if (f->cookie == port->tc.ingress_mirror_id || + f->cookie == port->tc.egress_mirror_id) { + lan966x_mirror_port_stats(port, &f->stats, ingress); + } else { + NL_SET_ERR_MSG_MOD(f->common.extack, + "Unsupported action"); + return -EOPNOTSUPP; + } + + return 0; +} + +int lan966x_tc_matchall(struct lan966x_port *port, + struct tc_cls_matchall_offload *f, + bool ingress) +{ + if (!tc_cls_can_offload_and_chain0(port->dev, &f->common)) { + NL_SET_ERR_MSG_MOD(f->common.extack, + "Only chain zero is supported"); + return -EOPNOTSUPP; + } + + switch (f->command) { + case TC_CLSMATCHALL_REPLACE: + return lan966x_tc_matchall_add(port, f, ingress); + case TC_CLSMATCHALL_DESTROY: + return lan966x_tc_matchall_del(port, f, ingress); + case TC_CLSMATCHALL_STATS: + return lan966x_tc_matchall_stats(port, f, ingress); + default: + return -EOPNOTSUPP; + } + + return 0; +} diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h index 8b42cad0e49c..7a83222caa73 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h @@ -305,7 +305,7 @@ struct frame_info { void sparx5_xtr_flush(struct sparx5 *sparx5, u8 grp); void sparx5_ifh_parse(u32 *ifh, struct frame_info *info); irqreturn_t sparx5_xtr_handler(int irq, void *_priv); -int sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev); +netdev_tx_t sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev); int sparx5_manual_injection_mode(struct sparx5 *sparx5); void sparx5_port_inj_timer_setup(struct sparx5_port *port); diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c index 21844beba72d..83c16ca5b30f 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c @@ -222,13 +222,13 @@ static int sparx5_inject(struct sparx5 *sparx5, return NETDEV_TX_OK; } -int sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev) +netdev_tx_t sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev) { struct net_device_stats *stats = &dev->stats; struct sparx5_port *port = netdev_priv(dev); struct sparx5 *sparx5 = port->sparx5; u32 ifh[IFH_LEN]; - int ret; + netdev_tx_t ret; memset(ifh, 0, IFH_LEN * 4); sparx5_set_port_ifh(ifh, port->portno); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c index 873429f7a6da..e66e548919d4 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c @@ -691,6 +691,71 @@ static int nfp_pf_find_rtsyms(struct nfp_pf *pf) return 0; } +int nfp_net_pf_get_app_id(struct nfp_pf *pf) +{ + return nfp_pf_rtsym_read_optional(pf, "_pf%u_net_app_id", + NFP_APP_CORE_NIC); +} + +static u64 nfp_net_pf_get_app_cap(struct nfp_pf *pf) +{ + char name[32]; + int err = 0; + u64 val; + + snprintf(name, sizeof(name), "_pf%u_net_app_cap", nfp_cppcore_pcie_unit(pf->cpp)); + + val = nfp_rtsym_read_le(pf->rtbl, name, &err); + if (err) { + if (err != -ENOENT) + nfp_err(pf->cpp, "Unable to read symbol %s\n", name); + + return 0; + } + + return val; +} + +static int nfp_pf_cfg_hwinfo(struct nfp_pf *pf, bool sp_indiff) +{ + struct nfp_nsp *nsp; + char hwinfo[32]; + int err; + + nsp = nfp_nsp_open(pf->cpp); + if (IS_ERR(nsp)) + return PTR_ERR(nsp); + + snprintf(hwinfo, sizeof(hwinfo), "sp_indiff=%d", sp_indiff); + err = nfp_nsp_hwinfo_set(nsp, hwinfo, sizeof(hwinfo)); + /* Not a fatal error, no need to return error to stop driver from loading */ + if (err) { + nfp_warn(pf->cpp, "HWinfo(sp_indiff=%d) set failed: %d\n", sp_indiff, err); + } else { + /* Need reinit eth_tbl since the eth table state may change + * after sp_indiff is configured. + */ + kfree(pf->eth_tbl); + pf->eth_tbl = __nfp_eth_read_ports(pf->cpp, nsp); + } + + nfp_nsp_close(nsp); + return 0; +} + +static int nfp_pf_nsp_cfg(struct nfp_pf *pf) +{ + bool sp_indiff = (nfp_net_pf_get_app_id(pf) == NFP_APP_FLOWER_NIC) || + (nfp_net_pf_get_app_cap(pf) & NFP_NET_APP_CAP_SP_INDIFF); + + return nfp_pf_cfg_hwinfo(pf, sp_indiff); +} + +static void nfp_pf_nsp_clean(struct nfp_pf *pf) +{ + nfp_pf_cfg_hwinfo(pf, false); +} + static int nfp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id) { @@ -791,10 +856,14 @@ static int nfp_pci_probe(struct pci_dev *pdev, goto err_fw_unload; } - err = nfp_net_pci_probe(pf); + err = nfp_pf_nsp_cfg(pf); if (err) goto err_fw_unload; + err = nfp_net_pci_probe(pf); + if (err) + goto err_nsp_clean; + err = nfp_hwmon_register(pf); if (err) { dev_err(&pdev->dev, "Failed to register hwmon info\n"); @@ -805,6 +874,8 @@ static int nfp_pci_probe(struct pci_dev *pdev, err_net_remove: nfp_net_pci_remove(pf); +err_nsp_clean: + nfp_pf_nsp_clean(pf); err_fw_unload: kfree(pf->rtbl); nfp_mip_close(pf->mip); @@ -844,6 +915,7 @@ static void __nfp_pci_shutdown(struct pci_dev *pdev, bool unload_fw) nfp_net_pci_remove(pf); + nfp_pf_nsp_clean(pf); vfree(pf->dumpspec); kfree(pf->rtbl); nfp_mip_close(pf->mip); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.h b/drivers/net/ethernet/netronome/nfp/nfp_main.h index 6805af186f1b..afd3edfa2428 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.h @@ -65,7 +65,6 @@ struct nfp_dumpspec { * @num_vfs: Number of SR-IOV VFs enabled * @fw_loaded: Is the firmware loaded? * @unload_fw_on_remove:Do we need to unload firmware on driver removal? - * @sp_indiff: Is the firmware indifferent to physical port speed? * @ctrl_vnic: Pointer to the control vNIC if available * @mip: MIP handle * @rtbl: RTsym table @@ -115,7 +114,6 @@ struct nfp_pf { bool fw_loaded; bool unload_fw_on_remove; - bool sp_indiff; struct nfp_net *ctrl_vnic; @@ -163,6 +161,7 @@ bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb); int nfp_pf_rtsym_read_optional(struct nfp_pf *pf, const char *format, unsigned int default_val); +int nfp_net_pf_get_app_id(struct nfp_pf *pf); u8 __iomem * nfp_pf_map_rtsym(struct nfp_pf *pf, const char *name, const char *sym_fmt, unsigned int min_size, struct nfp_cpp_area **area); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c index d81bd8697047..c3a763134e79 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c @@ -148,14 +148,6 @@ int nfp_net_tlv_caps_parse(struct device *dev, u8 __iomem *ctrl_mem, true)) return -EINVAL; break; - case NFP_NET_CFG_TLV_TYPE_SP_INDIFF: - if (length) { - dev_err(dev, "Unexpected len of SP_INDIFF TLV:%u\n", length); - return -EINVAL; - } - - caps->sp_indiff = true; - break; default: if (!FIELD_GET(NFP_NET_CFG_TLV_HEADER_REQUIRED, hdr)) break; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h index 1d53f721a1c8..6714d5e8fdab 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h @@ -14,6 +14,9 @@ #include <linux/types.h> +/* 64-bit per app capabilities */ +#define NFP_NET_APP_CAP_SP_INDIFF BIT_ULL(0) /* indifferent to port speed */ + /* Configuration BAR size. * * The configuration BAR is 8K in size, but due to @@ -492,10 +495,6 @@ * %NFP_NET_CFG_TLV_TYPE_CRYPTO_OPS_RX_SCAN: * Same as %NFP_NET_CFG_TLV_TYPE_CRYPTO_OPS, but crypto TLS does stream scan * RX sync, rather than kernel-assisted sync. - * - * %NFP_NET_CFG_TLV_TYPE_SP_INDIFF: - * Empty, indicate the firmware is indifferent to port speed. Then no need to - * reload driver and firmware when port speed is changed. */ #define NFP_NET_CFG_TLV_TYPE_UNKNOWN 0 #define NFP_NET_CFG_TLV_TYPE_RESERVED 1 @@ -509,7 +508,6 @@ #define NFP_NET_CFG_TLV_TYPE_CRYPTO_OPS 11 /* see crypto/fw.h */ #define NFP_NET_CFG_TLV_TYPE_VNIC_STATS 12 #define NFP_NET_CFG_TLV_TYPE_CRYPTO_OPS_RX_SCAN 13 -#define NFP_NET_CFG_TLV_TYPE_SP_INDIFF 14 struct device; @@ -524,7 +522,6 @@ struct device; * @vnic_stats_off: offset of vNIC stats area * @vnic_stats_cnt: number of vNIC stats * @tls_resync_ss: TLS resync will be performed via stream scan - * @sp_indiff: Firmware is indifferent to port speed */ struct nfp_net_tlv_caps { u32 me_freq_mhz; @@ -537,7 +534,6 @@ struct nfp_net_tlv_caps { unsigned int vnic_stats_off; unsigned int vnic_stats_cnt; unsigned int tls_resync_ss:1; - unsigned int sp_indiff:1; }; int nfp_net_tlv_caps_parse(struct device *dev, u8 __iomem *ctrl_mem, diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c index db58532364b6..22a5d2419084 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c @@ -228,6 +228,37 @@ nfp_net_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) nfp_get_drvinfo(nn->app, nn->pdev, vnic_version, drvinfo); } +static int +nfp_net_nway_reset(struct net_device *netdev) +{ + struct nfp_eth_table_port *eth_port; + struct nfp_port *port; + int err; + + port = nfp_port_from_netdev(netdev); + eth_port = nfp_port_get_eth_port(port); + if (!eth_port) + return -EOPNOTSUPP; + + if (!netif_running(netdev)) + return 0; + + err = nfp_eth_set_configured(port->app->cpp, eth_port->index, false); + if (err) { + netdev_info(netdev, "Link down failed: %d\n", err); + return err; + } + + err = nfp_eth_set_configured(port->app->cpp, eth_port->index, true); + if (err) { + netdev_info(netdev, "Link up failed: %d\n", err); + return err; + } + + netdev_info(netdev, "Link reset succeeded\n"); + return 0; +} + static void nfp_app_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) { @@ -290,8 +321,13 @@ nfp_net_get_link_ksettings(struct net_device *netdev, if (eth_port) { ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause); - cmd->base.autoneg = eth_port->aneg != NFP_ANEG_DISABLED ? - AUTONEG_ENABLE : AUTONEG_DISABLE; + if (eth_port->supp_aneg) { + ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); + if (eth_port->aneg == NFP_ANEG_AUTO) { + ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg); + cmd->base.autoneg = AUTONEG_ENABLE; + } + } nfp_net_set_fec_link_mode(eth_port, cmd); } @@ -327,6 +363,7 @@ static int nfp_net_set_link_ksettings(struct net_device *netdev, const struct ethtool_link_ksettings *cmd) { + bool req_aneg = (cmd->base.autoneg == AUTONEG_ENABLE); struct nfp_eth_table_port *eth_port; struct nfp_port *port; struct nfp_nsp *nsp; @@ -346,13 +383,25 @@ nfp_net_set_link_ksettings(struct net_device *netdev, if (IS_ERR(nsp)) return PTR_ERR(nsp); - err = __nfp_eth_set_aneg(nsp, cmd->base.autoneg == AUTONEG_ENABLE ? - NFP_ANEG_AUTO : NFP_ANEG_DISABLED); + if (req_aneg && !eth_port->supp_aneg) { + netdev_warn(netdev, "Autoneg is not supported.\n"); + err = -EOPNOTSUPP; + goto err_bad_set; + } + + err = __nfp_eth_set_aneg(nsp, req_aneg ? NFP_ANEG_AUTO : NFP_ANEG_DISABLED); if (err) goto err_bad_set; + if (cmd->base.speed != SPEED_UNKNOWN) { u32 speed = cmd->base.speed / eth_port->lanes; + if (req_aneg) { + netdev_err(netdev, "Speed changing is not allowed when working on autoneg mode.\n"); + err = -EINVAL; + goto err_bad_set; + } + err = __nfp_eth_set_speed(nsp, speed); if (err) goto err_bad_set; @@ -996,7 +1045,7 @@ nfp_port_get_fecparam(struct net_device *netdev, return 0; param->fec = nfp_port_fec_nsp_to_ethtool(eth_port->fec_modes_supported); - param->active_fec = nfp_port_fec_nsp_to_ethtool(eth_port->fec); + param->active_fec = nfp_port_fec_nsp_to_ethtool(BIT(eth_port->act_fec)); return 0; } @@ -1823,6 +1872,7 @@ static const struct ethtool_ops nfp_net_ethtool_ops = { ETHTOOL_COALESCE_MAX_FRAMES | ETHTOOL_COALESCE_USE_ADAPTIVE, .get_drvinfo = nfp_net_get_drvinfo, + .nway_reset = nfp_net_nway_reset, .get_link = ethtool_op_get_link, .get_ringparam = nfp_net_get_ringparam, .set_ringparam = nfp_net_set_ringparam, @@ -1860,6 +1910,7 @@ static const struct ethtool_ops nfp_net_ethtool_ops = { const struct ethtool_ops nfp_port_ethtool_ops = { .get_drvinfo = nfp_app_get_drvinfo, + .nway_reset = nfp_net_nway_reset, .get_link = ethtool_op_get_link, .get_strings = nfp_port_get_strings, .get_ethtool_stats = nfp_port_get_stats, diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c index e2d4c487e8de..3bae92dc899e 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c @@ -77,12 +77,6 @@ static int nfp_net_pf_get_num_ports(struct nfp_pf *pf) return nfp_pf_rtsym_read_optional(pf, "nfd_cfg_pf%u_num_ports", 1); } -static int nfp_net_pf_get_app_id(struct nfp_pf *pf) -{ - return nfp_pf_rtsym_read_optional(pf, "_pf%u_net_app_id", - NFP_APP_CORE_NIC); -} - static void nfp_net_pf_free_vnic(struct nfp_pf *pf, struct nfp_net *nn) { if (nfp_net_is_data_vnic(nn)) @@ -206,7 +200,6 @@ nfp_net_pf_alloc_vnics(struct nfp_pf *pf, void __iomem *ctrl_bar, nn->port->link_cb = nfp_net_refresh_port_table; ctrl_bar += NFP_PF_CSR_SLICE_SIZE; - pf->sp_indiff |= nn->tlv_caps.sp_indiff; /* Kill the vNIC if app init marked it as invalid */ if (nn->port && nn->port->type == NFP_PORT_INVALID) @@ -308,37 +301,6 @@ err_prev_deinit: return err; } -static int nfp_net_pf_cfg_nsp(struct nfp_pf *pf, bool sp_indiff) -{ - struct nfp_nsp *nsp; - char hwinfo[32]; - int err; - - nsp = nfp_nsp_open(pf->cpp); - if (IS_ERR(nsp)) { - err = PTR_ERR(nsp); - return err; - } - - snprintf(hwinfo, sizeof(hwinfo), "sp_indiff=%d", sp_indiff); - err = nfp_nsp_hwinfo_set(nsp, hwinfo, sizeof(hwinfo)); - if (err) - nfp_warn(pf->cpp, "HWinfo(sp_indiff=%d) set failed: %d\n", sp_indiff, err); - - nfp_nsp_close(nsp); - return err; -} - -static int nfp_net_pf_init_nsp(struct nfp_pf *pf) -{ - return nfp_net_pf_cfg_nsp(pf, pf->sp_indiff); -} - -static void nfp_net_pf_clean_nsp(struct nfp_pf *pf) -{ - (void)nfp_net_pf_cfg_nsp(pf, false); -} - static int nfp_net_pf_app_init(struct nfp_pf *pf, u8 __iomem *qc_bar, unsigned int stride) { @@ -350,8 +312,6 @@ nfp_net_pf_app_init(struct nfp_pf *pf, u8 __iomem *qc_bar, unsigned int stride) if (IS_ERR(pf->app)) return PTR_ERR(pf->app); - pf->sp_indiff |= pf->app->type->id == NFP_APP_FLOWER_NIC; - devl_lock(devlink); err = nfp_app_init(pf->app); devl_unlock(devlink); @@ -814,13 +774,9 @@ int nfp_net_pci_probe(struct nfp_pf *pf) if (err) goto err_clean_ddir; - err = nfp_net_pf_init_nsp(pf); - if (err) - goto err_free_vnics; - err = nfp_net_pf_alloc_irqs(pf); if (err) - goto err_clean_nsp; + goto err_free_vnics; err = nfp_net_pf_app_start(pf); if (err) @@ -839,8 +795,6 @@ err_stop_app: nfp_net_pf_app_stop(pf); err_free_irqs: nfp_net_pf_free_irqs(pf); -err_clean_nsp: - nfp_net_pf_clean_nsp(pf); err_free_vnics: nfp_net_pf_free_vnics(pf); err_clean_ddir: @@ -871,7 +825,6 @@ void nfp_net_pci_remove(struct nfp_pf *pf) nfp_net_pf_free_vnic(pf, nn); } - nfp_net_pf_clean_nsp(pf); nfp_net_pf_app_stop(pf); /* stop app first, to avoid double free of ctrl vNIC's ddir */ nfp_net_debugfs_dir_clean(&pf->ddir); diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h index 77d66855be42..992d72ac98d3 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h @@ -132,6 +132,7 @@ enum nfp_eth_fec { * @ports.interface: interface (module) plugged in * @ports.media: media type of the @interface * @ports.fec: forward error correction mode + * @ports.act_fec: active forward error correction mode * @ports.aneg: auto negotiation mode * @ports.mac_addr: interface MAC address * @ports.label_port: port id @@ -162,6 +163,7 @@ struct nfp_eth_table { enum nfp_eth_media media; enum nfp_eth_fec fec; + enum nfp_eth_fec act_fec; enum nfp_eth_aneg aneg; u8 mac_addr[ETH_ALEN]; @@ -172,6 +174,7 @@ struct nfp_eth_table { bool enabled; bool tx_enabled; bool rx_enabled; + bool supp_aneg; bool override_changed; diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c index 4cc38799eabc..bb64efec4c46 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c @@ -27,6 +27,7 @@ #define NSP_ETH_PORT_PHYLABEL GENMASK_ULL(59, 54) #define NSP_ETH_PORT_FEC_SUPP_BASER BIT_ULL(60) #define NSP_ETH_PORT_FEC_SUPP_RS BIT_ULL(61) +#define NSP_ETH_PORT_SUPP_ANEG BIT_ULL(63) #define NSP_ETH_PORT_LANES_MASK cpu_to_le64(NSP_ETH_PORT_LANES) @@ -40,6 +41,7 @@ #define NSP_ETH_STATE_OVRD_CHNG BIT_ULL(22) #define NSP_ETH_STATE_ANEG GENMASK_ULL(25, 23) #define NSP_ETH_STATE_FEC GENMASK_ULL(27, 26) +#define NSP_ETH_STATE_ACT_FEC GENMASK_ULL(29, 28) #define NSP_ETH_CTRL_CONFIGURED BIT_ULL(0) #define NSP_ETH_CTRL_ENABLED BIT_ULL(1) @@ -170,7 +172,14 @@ nfp_eth_port_translate(struct nfp_nsp *nsp, const union eth_table_entry *src, if (dst->fec_modes_supported) dst->fec_modes_supported |= NFP_FEC_AUTO | NFP_FEC_DISABLED; - dst->fec = 1 << FIELD_GET(NSP_ETH_STATE_FEC, state); + dst->fec = FIELD_GET(NSP_ETH_STATE_FEC, state); + dst->act_fec = dst->fec; + + if (nfp_nsp_get_abi_ver_minor(nsp) < 33) + return; + + dst->act_fec = FIELD_GET(NSP_ETH_STATE_ACT_FEC, state); + dst->supp_aneg = FIELD_GET(NSP_ETH_PORT_SUPP_ANEG, port); } static void diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index 3ec6d1319a8a..a73d061d9fcb 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -2443,6 +2443,11 @@ static void rtl_wait_txrx_fifo_empty(struct rtl8169_private *tp) } } +static void rtl_disable_rxdvgate(struct rtl8169_private *tp) +{ + RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN); +} + static void rtl_enable_rxdvgate(struct rtl8169_private *tp) { RTL_W32(tp, MISC, RTL_R32(tp, MISC) | RXDV_GATED_EN); @@ -2960,7 +2965,7 @@ static void rtl_hw_start_8168g(struct rtl8169_private *tp) rtl_reset_packet_filter(tp); rtl_eri_write(tp, 0x2f8, ERIAR_MASK_0011, 0x1d8f); - RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN); + rtl_disable_rxdvgate(tp); rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000); @@ -3198,7 +3203,7 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp) rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87); - RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN); + rtl_disable_rxdvgate(tp); rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000); @@ -3249,7 +3254,7 @@ static void rtl_hw_start_8168ep(struct rtl8169_private *tp) rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87); - RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN); + rtl_disable_rxdvgate(tp); rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000); @@ -3313,7 +3318,7 @@ static void rtl_hw_start_8117(struct rtl8169_private *tp) rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87); - RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN); + rtl_disable_rxdvgate(tp); rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000); @@ -3557,8 +3562,7 @@ static void rtl_hw_start_8125_common(struct rtl8169_private *tp) else rtl8125a_config_eee_mac(tp); - RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN); - udelay(10); + rtl_disable_rxdvgate(tp); } static void rtl_hw_start_8125a_2(struct rtl8169_private *tp) diff --git a/drivers/net/ethernet/sunplus/spl2sw_driver.c b/drivers/net/ethernet/sunplus/spl2sw_driver.c index b89d72242002..9be585237277 100644 --- a/drivers/net/ethernet/sunplus/spl2sw_driver.c +++ b/drivers/net/ethernet/sunplus/spl2sw_driver.c @@ -249,8 +249,8 @@ static int spl2sw_nvmem_get_mac_address(struct device *dev, struct device_node * /* Check if mac address is valid */ if (!is_valid_ether_addr(mac)) { - kfree(mac); dev_info(dev, "Invalid mac address in nvmem (%pM)!\n", mac); + kfree(mac); return -EINVAL; } diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c index f8036ee78647..bea2da1c4c51 100644 --- a/drivers/net/ipa/gsi.c +++ b/drivers/net/ipa/gsi.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. - * Copyright (C) 2018-2021 Linaro Ltd. + * Copyright (C) 2018-2022 Linaro Ltd. */ #include <linux/types.h> @@ -56,9 +56,9 @@ * element can also contain an immediate command, requesting the IPA perform * actions other than data transfer. * - * Each TRE refers to a block of data--also located DRAM. After writing one - * or more TREs to a channel, the writer (either the IPA or an EE) writes a - * doorbell register to inform the receiving side how many elements have + * Each TRE refers to a block of data--also located in DRAM. After writing + * one or more TREs to a channel, the writer (either the IPA or an EE) writes + * a doorbell register to inform the receiving side how many elements have * been written. * * Each channel has a GSI "event ring" associated with it. An event ring @@ -1347,8 +1347,8 @@ gsi_event_trans(struct gsi *gsi, struct gsi_event *event) * we update transactions to record their actual received lengths. * * When an event for a TX channel arrives we use information in the - * transaction to report the number of requests and bytes have been - * transferred. + * transaction to report the number of requests and bytes that have + * been transferred. * * This function is called whenever we learn that the GSI hardware has filled * new events since the last time we checked. The ring's index field tells @@ -1474,7 +1474,7 @@ void gsi_channel_doorbell(struct gsi_channel *channel) iowrite32(val, gsi->virt + GSI_CH_C_DOORBELL_0_OFFSET(channel_id)); } -/* Consult hardware, move any newly completed transactions to completed list */ +/* Consult hardware, move newly completed transactions to completed state */ void gsi_channel_update(struct gsi_channel *channel) { u32 evt_ring_id = channel->evt_ring_id; @@ -1515,17 +1515,17 @@ void gsi_channel_update(struct gsi_channel *channel) * * Return: Transaction pointer, or null if none are available * - * This function returns the first entry on a channel's completed transaction - * list. If that list is empty, the hardware is consulted to determine - * whether any new transactions have completed. If so, they're moved to the - * completed list and the new first entry is returned. If there are no more - * completed transactions, a null pointer is returned. + * This function returns the first of a channel's completed transactions. + * If no transactions are in completed state, the hardware is consulted to + * determine whether any new transactions have completed. If so, they're + * moved to completed state and the first such transaction is returned. + * If there are no more completed transactions, a null pointer is returned. */ static struct gsi_trans *gsi_channel_poll_one(struct gsi_channel *channel) { struct gsi_trans *trans; - /* Get the first transaction from the completed list */ + /* Get the first completed transaction */ trans = gsi_channel_trans_complete(channel); if (trans) gsi_trans_move_polled(trans); diff --git a/drivers/net/ipa/gsi.h b/drivers/net/ipa/gsi.h index 0fc25a6ae006..49dcadba4e0b 100644 --- a/drivers/net/ipa/gsi.h +++ b/drivers/net/ipa/gsi.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. - * Copyright (C) 2018-2021 Linaro Ltd. + * Copyright (C) 2018-2022 Linaro Ltd. */ #ifndef _GSI_H_ #define _GSI_H_ diff --git a/drivers/net/ipa/gsi_private.h b/drivers/net/ipa/gsi_private.h index af4cc13864e2..c65f7c5cdc8d 100644 --- a/drivers/net/ipa/gsi_private.h +++ b/drivers/net/ipa/gsi_private.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. - * Copyright (C) 2018-2020 Linaro Ltd. + * Copyright (C) 2018-2022 Linaro Ltd. */ #ifndef _GSI_PRIVATE_H_ #define _GSI_PRIVATE_H_ @@ -18,13 +18,13 @@ struct gsi_channel; /** * gsi_trans_move_complete() - Mark a GSI transaction completed - * @trans: Transaction to commit + * @trans: Transaction whose state is to be updated */ void gsi_trans_move_complete(struct gsi_trans *trans); /** * gsi_trans_move_polled() - Mark a transaction polled - * @trans: Transaction to update + * @trans: Transaction whose state is to be updated */ void gsi_trans_move_polled(struct gsi_trans *trans); @@ -97,8 +97,8 @@ void gsi_channel_doorbell(struct gsi_channel *channel); /* gsi_channel_update() - Update knowledge of channel hardware state * @channel: Channel to be updated * - * Consult hardware, move any newly completed transactions to a - * channel's completed list. + * Consult hardware, change the state of any newly-completed transactions + * on a channel. */ void gsi_channel_update(struct gsi_channel *channel); diff --git a/drivers/net/ipa/gsi_reg.h b/drivers/net/ipa/gsi_reg.h index b36fd10a57d6..3763359f208f 100644 --- a/drivers/net/ipa/gsi_reg.h +++ b/drivers/net/ipa/gsi_reg.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. - * Copyright (C) 2018-2021 Linaro Ltd. + * Copyright (C) 2018-2022 Linaro Ltd. */ #ifndef _GSI_REG_H_ #define _GSI_REG_H_ diff --git a/drivers/net/ipa/gsi_trans.c b/drivers/net/ipa/gsi_trans.c index 03e54fc4376a..26b7f683a3e1 100644 --- a/drivers/net/ipa/gsi_trans.c +++ b/drivers/net/ipa/gsi_trans.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. - * Copyright (C) 2019-2020 Linaro Ltd. + * Copyright (C) 2019-2022 Linaro Ltd. */ #include <linux/types.h> @@ -22,37 +22,36 @@ * DOC: GSI Transactions * * A GSI transaction abstracts the behavior of a GSI channel by representing - * everything about a related group of IPA commands in a single structure. - * (A "command" in this sense is either a data transfer or an IPA immediate + * everything about a related group of IPA operations in a single structure. + * (A "operation" in this sense is either a data transfer or an IPA immediate * command.) Most details of interaction with the GSI hardware are managed - * by the GSI transaction core, allowing users to simply describe commands + * by the GSI transaction core, allowing users to simply describe operations * to be performed. When a transaction has completed a callback function * (dependent on the type of endpoint associated with the channel) allows * cleanup of resources associated with the transaction. * - * To perform a command (or set of them), a user of the GSI transaction + * To perform an operation (or set of them), a user of the GSI transaction * interface allocates a transaction, indicating the number of TREs required - * (one per command). If sufficient TREs are available, they are reserved + * (one per operation). If sufficient TREs are available, they are reserved * for use in the transaction and the allocation succeeds. This way - * exhaustion of the available TREs in a channel ring is detected - * as early as possible. All resources required to complete a transaction - * are allocated at transaction allocation time. + * exhaustion of the available TREs in a channel ring is detected as early + * as possible. Any other resources that might be needed to complete a + * transaction are also allocated when the transaction is allocated. * - * Commands performed as part of a transaction are represented in an array - * of Linux scatterlist structures. This array is allocated with the - * transaction, and its entries are initialized using standard scatterlist - * functions (such as sg_set_buf() or skb_to_sgvec()). + * Operations performed as part of a transaction are represented in an array + * of Linux scatterlist structures, allocated with the transaction. These + * scatterlist structures are initialized by "adding" operations to the + * transaction. If a buffer in an operation must be mapped for DMA, this is + * done at the time it is added to the transaction. It is possible for a + * mapping error to occur when an operation is added. In this case the + * transaction should simply be freed; this correctly releases resources + * associated with the transaction. * - * Once a transaction's scatterlist structures have been initialized, the - * transaction is committed. The caller is responsible for mapping buffers - * for DMA if necessary, and this should be done *before* allocating - * the transaction. Between a successful allocation and commit of a - * transaction no errors should occur. - * - * Committing transfers ownership of the entire transaction to the GSI - * transaction core. The GSI transaction code formats the content of - * the scatterlist array into the channel ring buffer and informs the - * hardware that new TREs are available to process. + * Once all operations have been successfully added to a transaction, the + * transaction is committed. Committing transfers ownership of the entire + * transaction to the GSI transaction core. The GSI transaction code + * formats the content of the scatterlist array into the channel ring + * buffer and informs the hardware that new TREs are available to process. * * The last TRE in each transaction is marked to interrupt the AP when the * GSI hardware has completed it. Because transfers described by TREs are @@ -125,11 +124,10 @@ void gsi_trans_pool_exit(struct gsi_trans_pool *pool) memset(pool, 0, sizeof(*pool)); } -/* Allocate the requested number of (zeroed) entries from the pool */ -/* Home-grown DMA pool. This way we can preallocate and use the tre_count - * to guarantee allocations will succeed. Even though we specify max_alloc - * (and it can be more than one), we only allow allocation of a single - * element from a DMA pool. +/* Home-grown DMA pool. This way we can preallocate the pool, and guarantee + * allocations will succeed. The immediate commands in a transaction can + * require up to max_alloc elements from the pool. But we only allow + * allocation of a single element from a DMA pool at a time. */ int gsi_trans_pool_init_dma(struct device *dev, struct gsi_trans_pool *pool, size_t size, u32 count, u32 max_alloc) @@ -537,8 +535,8 @@ static void gsi_trans_tre_fill(struct gsi_tre *dest_tre, dma_addr_t addr, * * Formats channel ring TRE entries based on the content of the scatterlist. * Maps a transaction pointer to the last ring entry used for the transaction, - * so it can be recovered when it completes. Moves the transaction to the - * pending list. Finally, updates the channel ring pointer and optionally + * so it can be recovered when it completes. Moves the transaction to + * pending state. Finally, updates the channel ring pointer and optionally * rings the doorbell. */ static void __gsi_trans_commit(struct gsi_trans *trans, bool ring_db) diff --git a/drivers/net/ipa/gsi_trans.h b/drivers/net/ipa/gsi_trans.h index af8c4c6719d1..30c1c2dc77c6 100644 --- a/drivers/net/ipa/gsi_trans.h +++ b/drivers/net/ipa/gsi_trans.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. - * Copyright (C) 2019-2020 Linaro Ltd. + * Copyright (C) 2019-2022 Linaro Ltd. */ #ifndef _GSI_TRANS_H_ #define _GSI_TRANS_H_ @@ -74,7 +74,7 @@ struct gsi_trans { /** * gsi_trans_pool_init() - Initialize a pool of structures for transactions - * @pool: GSI transaction poll pointer + * @pool: GSI transaction pool pointer * @size: Size of elements in the pool * @count: Minimum number of elements in the pool * @max_alloc: Maximum number of elements allocated at a time from pool diff --git a/drivers/net/ipa/ipa.h b/drivers/net/ipa/ipa.h index 349643cf2b44..09ead433ec38 100644 --- a/drivers/net/ipa/ipa.h +++ b/drivers/net/ipa/ipa.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. - * Copyright (C) 2018-2020 Linaro Ltd. + * Copyright (C) 2018-2022 Linaro Ltd. */ #ifndef _IPA_H_ #define _IPA_H_ diff --git a/drivers/net/ipa/ipa_cmd.c b/drivers/net/ipa/ipa_cmd.c index f762d7d5f31f..26c3db9f52b1 100644 --- a/drivers/net/ipa/ipa_cmd.c +++ b/drivers/net/ipa/ipa_cmd.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. - * Copyright (C) 2019-2021 Linaro Ltd. + * Copyright (C) 2019-2022 Linaro Ltd. */ #include <linux/types.h> @@ -32,7 +32,7 @@ * immediate command's opcode. The payload for a command resides in AP * memory and is described by a single scatterlist entry in its transaction. * Commands do not require a transaction completion callback, and are - * (currently) always issued using gsi_trans_commit_wait(). + * always issued using gsi_trans_commit_wait(). */ /* Some commands can wait until indicated pipeline stages are clear */ diff --git a/drivers/net/ipa/ipa_cmd.h b/drivers/net/ipa/ipa_cmd.h index 9215ddad1010..8e4243c1f0bb 100644 --- a/drivers/net/ipa/ipa_cmd.h +++ b/drivers/net/ipa/ipa_cmd.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. - * Copyright (C) 2019-2020 Linaro Ltd. + * Copyright (C) 2019-2022 Linaro Ltd. */ #ifndef _IPA_CMD_H_ #define _IPA_CMD_H_ diff --git a/drivers/net/ipa/ipa_data.h b/drivers/net/ipa/ipa_data.h index e15eb3cd3e33..e5a6ce75c7dd 100644 --- a/drivers/net/ipa/ipa_data.h +++ b/drivers/net/ipa/ipa_data.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. - * Copyright (C) 2019-2021 Linaro Ltd. + * Copyright (C) 2019-2022 Linaro Ltd. */ #ifndef _IPA_DATA_H_ #define _IPA_DATA_H_ @@ -31,7 +31,7 @@ * communication path between the IPA and a particular execution environment * (EE), such as the AP or Modem. Each EE has a set of channels associated * with it, and each channel has an ID unique for that EE. For the most part - * the only GSI channels of concern to this driver belong to the AP + * the only GSI channels of concern to this driver belong to the AP. * * An endpoint is an IPA construct representing a single channel anywhere * in the system. An IPA endpoint ID maps directly to an (EE, channel_id) diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c index 0da02d8d238d..093e11ec7c2d 100644 --- a/drivers/net/ipa/ipa_endpoint.c +++ b/drivers/net/ipa/ipa_endpoint.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. - * Copyright (C) 2019-2021 Linaro Ltd. + * Copyright (C) 2019-2022 Linaro Ltd. */ #include <linux/types.h> @@ -23,8 +23,6 @@ #include "ipa_gsi.h" #include "ipa_power.h" -#define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0) - /* Hardware is told about receive buffers once a "batch" has been queued */ #define IPA_REPLENISH_BATCH 16 /* Must be non-zero */ diff --git a/drivers/net/ipa/ipa_endpoint.h b/drivers/net/ipa/ipa_endpoint.h index 28e0a7386fd7..d8dfa24f5214 100644 --- a/drivers/net/ipa/ipa_endpoint.h +++ b/drivers/net/ipa/ipa_endpoint.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. - * Copyright (C) 2019-2020 Linaro Ltd. + * Copyright (C) 2019-2022 Linaro Ltd. */ #ifndef _IPA_ENDPOINT_H_ #define _IPA_ENDPOINT_H_ diff --git a/drivers/net/ipa/ipa_interrupt.c b/drivers/net/ipa/ipa_interrupt.c index d0142b17a275..c269432f9c2e 100644 --- a/drivers/net/ipa/ipa_interrupt.c +++ b/drivers/net/ipa/ipa_interrupt.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved. - * Copyright (C) 2018-2020 Linaro Ltd. + * Copyright (C) 2018-2022 Linaro Ltd. */ /* DOC: IPA Interrupts diff --git a/drivers/net/ipa/ipa_interrupt.h b/drivers/net/ipa/ipa_interrupt.h index 231390cea52a..f31fd9965fdc 100644 --- a/drivers/net/ipa/ipa_interrupt.h +++ b/drivers/net/ipa/ipa_interrupt.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. - * Copyright (C) 2018-2020 Linaro Ltd. + * Copyright (C) 2018-2022 Linaro Ltd. */ #ifndef _IPA_INTERRUPT_H_ #define _IPA_INTERRUPT_H_ diff --git a/drivers/net/ipa/ipa_main.c b/drivers/net/ipa/ipa_main.c index a0f6212aa3c3..3461ad3029ab 100644 --- a/drivers/net/ipa/ipa_main.c +++ b/drivers/net/ipa/ipa_main.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. - * Copyright (C) 2018-2021 Linaro Ltd. + * Copyright (C) 2018-2022 Linaro Ltd. */ #include <linux/types.h> diff --git a/drivers/net/ipa/ipa_mem.c b/drivers/net/ipa/ipa_mem.c index 9abf473be1dd..f84c6830495a 100644 --- a/drivers/net/ipa/ipa_mem.c +++ b/drivers/net/ipa/ipa_mem.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. - * Copyright (C) 2019-2021 Linaro Ltd. + * Copyright (C) 2019-2022 Linaro Ltd. */ #include <linux/types.h> diff --git a/drivers/net/ipa/ipa_modem.c b/drivers/net/ipa/ipa_modem.c index c8b1c4d9c507..423422a2a445 100644 --- a/drivers/net/ipa/ipa_modem.c +++ b/drivers/net/ipa/ipa_modem.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved. - * Copyright (C) 2018-2021 Linaro Ltd. + * Copyright (C) 2018-2022 Linaro Ltd. */ #include <linux/errno.h> diff --git a/drivers/net/ipa/ipa_modem.h b/drivers/net/ipa/ipa_modem.h index e64ccc2402e9..d85718db9a57 100644 --- a/drivers/net/ipa/ipa_modem.h +++ b/drivers/net/ipa/ipa_modem.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. - * Copyright (C) 2018-2020 Linaro Ltd. + * Copyright (C) 2018-2022 Linaro Ltd. */ #ifndef _IPA_MODEM_H_ #define _IPA_MODEM_H_ diff --git a/drivers/net/ipa/ipa_power.c b/drivers/net/ipa/ipa_power.c index db5ac7552286..8420f93128a2 100644 --- a/drivers/net/ipa/ipa_power.c +++ b/drivers/net/ipa/ipa_power.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. - * Copyright (C) 2018-2021 Linaro Ltd. + * Copyright (C) 2018-2022 Linaro Ltd. */ #include <linux/clk.h> diff --git a/drivers/net/ipa/ipa_power.h b/drivers/net/ipa/ipa_power.h index 6f84f057a209..896f052e51a1 100644 --- a/drivers/net/ipa/ipa_power.h +++ b/drivers/net/ipa/ipa_power.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. - * Copyright (C) 2018-2020 Linaro Ltd. + * Copyright (C) 2018-2022 Linaro Ltd. */ #ifndef _IPA_POWER_H_ #define _IPA_POWER_H_ diff --git a/drivers/net/ipa/ipa_qmi.c b/drivers/net/ipa/ipa_qmi.c index 6f874f99b910..8295fd4b70d1 100644 --- a/drivers/net/ipa/ipa_qmi.c +++ b/drivers/net/ipa/ipa_qmi.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved. - * Copyright (C) 2018-2020 Linaro Ltd. + * Copyright (C) 2018-2022 Linaro Ltd. */ #include <linux/types.h> diff --git a/drivers/net/ipa/ipa_qmi.h b/drivers/net/ipa/ipa_qmi.h index 856ef629ccc8..1c236826c17a 100644 --- a/drivers/net/ipa/ipa_qmi.h +++ b/drivers/net/ipa/ipa_qmi.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (c) 2018, The Linux Foundation. All rights reserved. - * Copyright (C) 2018-2020 Linaro Ltd. + * Copyright (C) 2018-2022 Linaro Ltd. */ #ifndef _IPA_QMI_H_ #define _IPA_QMI_H_ diff --git a/drivers/net/ipa/ipa_qmi_msg.c b/drivers/net/ipa/ipa_qmi_msg.c index 75d3fc0092e9..97c0befe8d86 100644 --- a/drivers/net/ipa/ipa_qmi_msg.c +++ b/drivers/net/ipa/ipa_qmi_msg.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2018, The Linux Foundation. All rights reserved. - * Copyright (C) 2018-2020 Linaro Ltd. + * Copyright (C) 2018-2022 Linaro Ltd. */ #include <linux/stddef.h> #include <linux/soc/qcom/qmi.h> diff --git a/drivers/net/ipa/ipa_qmi_msg.h b/drivers/net/ipa/ipa_qmi_msg.h index 9651aa59b596..e29663965f43 100644 --- a/drivers/net/ipa/ipa_qmi_msg.h +++ b/drivers/net/ipa/ipa_qmi_msg.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (c) 2018, The Linux Foundation. All rights reserved. - * Copyright (C) 2018-2020 Linaro Ltd. + * Copyright (C) 2018-2022 Linaro Ltd. */ #ifndef _IPA_QMI_MSG_H_ #define _IPA_QMI_MSG_H_ diff --git a/drivers/net/ipa/ipa_reg.c b/drivers/net/ipa/ipa_reg.c index fb4663bcf14b..22f067741d9b 100644 --- a/drivers/net/ipa/ipa_reg.c +++ b/drivers/net/ipa/ipa_reg.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. - * Copyright (C) 2019-2020 Linaro Ltd. + * Copyright (C) 2019-2022 Linaro Ltd. */ #include <linux/io.h> diff --git a/drivers/net/ipa/ipa_reg.h b/drivers/net/ipa/ipa_reg.h index f81381891a2e..7bf70f70f63f 100644 --- a/drivers/net/ipa/ipa_reg.h +++ b/drivers/net/ipa/ipa_reg.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. - * Copyright (C) 2018-2021 Linaro Ltd. + * Copyright (C) 2018-2022 Linaro Ltd. */ #ifndef _IPA_REG_H_ #define _IPA_REG_H_ @@ -17,53 +17,38 @@ struct ipa; * DOC: IPA Registers * * IPA registers are located within the "ipa-reg" address space defined by - * Device Tree. The offset of each register within that space is specified - * by symbols defined below. The address space is mapped to virtual memory - * space in ipa_mem_init(). All IPA registers are 32 bits wide. + * Device Tree. Each register has a specified offset within that space, + * which is mapped into virtual memory space in ipa_mem_init(). Each + * has a unique identifer, taken from the ipa_reg_id enumerated type. + * All IPA registers are 32 bits wide. * - * Certain register types are duplicated for a number of instances of - * something. For example, each IPA endpoint has an set of registers - * defining its configuration. The offset to an endpoint's set of registers - * is computed based on an "base" offset, plus an endpoint's ID multiplied - * and a "stride" value for the register. For such registers, the offset is - * computed by a function-like macro that takes a parameter used in the - * computation. + * Certain "parameterized" register types are duplicated for a number of + * instances of something. For example, each IPA endpoint has an set of + * registers defining its configuration. The offset to an endpoint's set + * of registers is computed based on an "base" offset, plus an endpoint's + * ID multiplied and a "stride" value for the register. Similarly, some + * registers have an offset that depends on execution environment. In + * this case, the stride is multiplied by a member of the gsi_ee_id + * enumerated type. * - * Some register offsets depend on execution environment. For these an "ee" - * parameter is supplied to the offset macro. The "ee" value is a member of - * the gsi_ee enumerated type. + * Each version of IPA implements an array of ipa_reg structures indexed + * by register ID. Each entry in the array specifies the base offset and + * (for parameterized registers) a non-zero stride value. Not all versions + * of IPA define all registers. The offset for a register is returned by + * ipa_reg_offset() when the register's ipa_reg structure is supplied; + * zero is returned for an undefined register (this should never happen). * - * The offset of a register dependent on endpoint ID is computed by a macro - * that is supplied a parameter "ep", "txep", or "rxep". A register with an - * "ep" parameter is valid for any endpoint; a register with a "txep" or - * "rxep" parameter is valid only for TX or RX endpoints, respectively. The - * "*ep" value is assumed to be less than the maximum valid endpoint ID - * for the current hardware, and that will not exceed IPA_ENDPOINT_MAX. - * - * The offset of registers related to filter and route tables is computed - * by a macro that is supplied a parameter "er". The "er" represents an - * endpoint ID for filters, or a route ID for routes. For filters, the - * endpoint ID must be less than IPA_ENDPOINT_MAX, but is further restricted - * because not all endpoints support filtering. For routes, the route ID - * must be less than IPA_ROUTE_MAX. - * - * The offset of registers related to resource types is computed by a macro - * that is supplied a parameter "rt". The "rt" represents a resource type, - * which is a member of the ipa_resource_type_src enumerated type for - * source endpoint resources or the ipa_resource_type_dst enumerated type - * for destination endpoint resources. - * - * Some registers encode multiple fields within them. For these, each field - * has a symbol below defining a field mask that encodes both the position - * and width of the field within its register. - * - * In some cases, different versions of IPA hardware use different offset or - * field mask values. In such cases an inline_function(ipa) is used rather - * than a MACRO to define the offset or field mask to use. - * - * Finally, some registers hold bitmasks representing endpoints. In such - * cases the @available field in the @ipa structure defines the "full" set - * of valid bits for the register. + * Some registers encode multiple fields within them. Each field in + * such a register has a unique identifier (from an enumerated type). + * The position and width of the fields in a register are defined by + * an array of field masks, indexed by field ID. Two functions are + * used to access register fields; both take an ipa_reg structure as + * argument. To encode a value to be represented in a register field, + * the value and field ID are passed to ipa_reg_encode(). To extract + * a value encoded in a register field, the field ID is passed to + * ipa_reg_decode(). In addition, for single-bit fields, ipa_reg_bit() + * can be used to either encode the bit value, or to generate a mask + * used to extract the bit value. */ /* enum ipa_reg_id - IPA register IDs */ diff --git a/drivers/net/ipa/ipa_resource.c b/drivers/net/ipa/ipa_resource.c index 5376b71f4598..a257f0e5e361 100644 --- a/drivers/net/ipa/ipa_resource.c +++ b/drivers/net/ipa/ipa_resource.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. - * Copyright (C) 2018-2021 Linaro Ltd. + * Copyright (C) 2018-2022 Linaro Ltd. */ #include <linux/types.h> diff --git a/drivers/net/ipa/ipa_smp2p.c b/drivers/net/ipa/ipa_smp2p.c index 211233612039..5620dc271fac 100644 --- a/drivers/net/ipa/ipa_smp2p.c +++ b/drivers/net/ipa/ipa_smp2p.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. - * Copyright (C) 2019-2020 Linaro Ltd. + * Copyright (C) 2019-2022 Linaro Ltd. */ #include <linux/types.h> diff --git a/drivers/net/ipa/ipa_smp2p.h b/drivers/net/ipa/ipa_smp2p.h index 59cee31a7383..9b969b03d1a4 100644 --- a/drivers/net/ipa/ipa_smp2p.h +++ b/drivers/net/ipa/ipa_smp2p.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. - * Copyright (C) 2019-2020 Linaro Ltd. + * Copyright (C) 2019-2022 Linaro Ltd. */ #ifndef _IPA_SMP2P_H_ #define _IPA_SMP2P_H_ diff --git a/drivers/net/ipa/ipa_sysfs.c b/drivers/net/ipa/ipa_sysfs.c index c0c8641cdd14..5cbc15a971f9 100644 --- a/drivers/net/ipa/ipa_sysfs.c +++ b/drivers/net/ipa/ipa_sysfs.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright (C) 2021 Linaro Ltd. */ +/* Copyright (C) 2021-2022 Linaro Ltd. */ #include <linux/kernel.h> #include <linux/types.h> diff --git a/drivers/net/ipa/ipa_sysfs.h b/drivers/net/ipa/ipa_sysfs.h index 4a3ffd1e4e3f..58ba22810bab 100644 --- a/drivers/net/ipa/ipa_sysfs.h +++ b/drivers/net/ipa/ipa_sysfs.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. - * Copyright (C) 2019-2021 Linaro Ltd. + * Copyright (C) 2019-2022 Linaro Ltd. */ #ifndef _IPA_SYSFS_H_ #define _IPA_SYSFS_H_ diff --git a/drivers/net/ipa/ipa_table.c b/drivers/net/ipa/ipa_table.c index 02cab1b59f21..510ff2dc8999 100644 --- a/drivers/net/ipa/ipa_table.c +++ b/drivers/net/ipa/ipa_table.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. - * Copyright (C) 2018-2021 Linaro Ltd. + * Copyright (C) 2018-2022 Linaro Ltd. */ #include <linux/types.h> diff --git a/drivers/net/ipa/ipa_table.h b/drivers/net/ipa/ipa_table.h index 1538e2e1732f..395189f75d78 100644 --- a/drivers/net/ipa/ipa_table.h +++ b/drivers/net/ipa/ipa_table.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. - * Copyright (C) 2019-2021 Linaro Ltd. + * Copyright (C) 2019-2022 Linaro Ltd. */ #ifndef _IPA_TABLE_H_ #define _IPA_TABLE_H_ diff --git a/drivers/net/ipa/ipa_uc.c b/drivers/net/ipa/ipa_uc.c index cf21f1a87a88..f0ee47281015 100644 --- a/drivers/net/ipa/ipa_uc.c +++ b/drivers/net/ipa/ipa_uc.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. - * Copyright (C) 2018-2020 Linaro Ltd. + * Copyright (C) 2018-2022 Linaro Ltd. */ #include <linux/types.h> diff --git a/drivers/net/ipa/ipa_uc.h b/drivers/net/ipa/ipa_uc.h index 23847f934d64..8514096e6f36 100644 --- a/drivers/net/ipa/ipa_uc.h +++ b/drivers/net/ipa/ipa_uc.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. - * Copyright (C) 2019-2020 Linaro Ltd. + * Copyright (C) 2019-2022 Linaro Ltd. */ #ifndef _IPA_UC_H_ #define _IPA_UC_H_ diff --git a/drivers/net/ipa/ipa_version.h b/drivers/net/ipa/ipa_version.h index 58f7b43b4db3..7870e0cc3d7c 100644 --- a/drivers/net/ipa/ipa_version.h +++ b/drivers/net/ipa/ipa_version.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. - * Copyright (C) 2019-2020 Linaro Ltd. + * Copyright (C) 2019-2022 Linaro Ltd. */ #ifndef _IPA_VERSION_H_ #define _IPA_VERSION_H_ diff --git a/drivers/net/mdio/fwnode_mdio.c b/drivers/net/mdio/fwnode_mdio.c index 1c1584fca632..689e728345ce 100644 --- a/drivers/net/mdio/fwnode_mdio.c +++ b/drivers/net/mdio/fwnode_mdio.c @@ -10,10 +10,31 @@ #include <linux/fwnode_mdio.h> #include <linux/of.h> #include <linux/phy.h> +#include <linux/pse-pd/pse.h> MODULE_AUTHOR("Calvin Johnson <calvin.johnson@oss.nxp.com>"); MODULE_LICENSE("GPL"); +static struct pse_control * +fwnode_find_pse_control(struct fwnode_handle *fwnode) +{ + struct pse_control *psec; + struct device_node *np; + + if (!IS_ENABLED(CONFIG_PSE_CONTROLLER)) + return NULL; + + np = to_of_node(fwnode); + if (!np) + return NULL; + + psec = of_pse_control_get(np); + if (PTR_ERR(psec) == -ENOENT) + return NULL; + + return psec; +} + static struct mii_timestamper * fwnode_find_mii_timestamper(struct fwnode_handle *fwnode) { @@ -91,14 +112,21 @@ int fwnode_mdiobus_register_phy(struct mii_bus *bus, struct fwnode_handle *child, u32 addr) { struct mii_timestamper *mii_ts = NULL; + struct pse_control *psec = NULL; struct phy_device *phy; bool is_c45 = false; u32 phy_id; int rc; + psec = fwnode_find_pse_control(child); + if (IS_ERR(psec)) + return PTR_ERR(psec); + mii_ts = fwnode_find_mii_timestamper(child); - if (IS_ERR(mii_ts)) - return PTR_ERR(mii_ts); + if (IS_ERR(mii_ts)) { + rc = PTR_ERR(mii_ts); + goto clean_pse; + } rc = fwnode_property_match_string(child, "compatible", "ethernet-phy-ieee802.3-c45"); @@ -110,8 +138,8 @@ int fwnode_mdiobus_register_phy(struct mii_bus *bus, else phy = phy_device_create(bus, addr, phy_id, 0, NULL); if (IS_ERR(phy)) { - unregister_mii_timestamper(mii_ts); - return PTR_ERR(phy); + rc = PTR_ERR(phy); + goto clean_mii_ts; } if (is_acpi_node(child)) { @@ -125,25 +153,33 @@ int fwnode_mdiobus_register_phy(struct mii_bus *bus, /* All data is now stored in the phy struct, so register it */ rc = phy_device_register(phy); if (rc) { - phy_device_free(phy); fwnode_handle_put(phy->mdio.dev.fwnode); - return rc; + goto clean_phy; } } else if (is_of_node(child)) { rc = fwnode_mdiobus_phy_device_register(bus, phy, child, addr); - if (rc) { - unregister_mii_timestamper(mii_ts); - phy_device_free(phy); - return rc; - } + if (rc) + goto clean_phy; } + phy->psec = psec; + /* phy->mii_ts may already be defined by the PHY driver. A * mii_timestamper probed via the device tree will still have * precedence. */ if (mii_ts) phy->mii_ts = mii_ts; + return 0; + +clean_phy: + phy_device_free(phy); +clean_mii_ts: + unregister_mii_timestamper(mii_ts); +clean_pse: + pse_control_put(psec); + + return rc; } EXPORT_SYMBOL(fwnode_mdiobus_register_phy); diff --git a/drivers/net/mdio/mdio-i2c.c b/drivers/net/mdio/mdio-i2c.c index 09200a70b315..bf8bf5e20faf 100644 --- a/drivers/net/mdio/mdio-i2c.c +++ b/drivers/net/mdio/mdio-i2c.c @@ -3,6 +3,7 @@ * MDIO I2C bridge * * Copyright (C) 2015-2016 Russell King + * Copyright (C) 2021 Marek Behun * * Network PHYs can appear on I2C buses when they are part of SFP module. * This driver exposes these PHYs to the networking PHY code, allowing @@ -12,6 +13,7 @@ #include <linux/i2c.h> #include <linux/mdio/mdio-i2c.h> #include <linux/phy.h> +#include <linux/sfp.h> /* * I2C bus addresses 0x50 and 0x51 are normally an EEPROM, which is @@ -28,7 +30,7 @@ static unsigned int i2c_mii_phy_addr(int phy_id) return phy_id + 0x40; } -static int i2c_mii_read(struct mii_bus *bus, int phy_id, int reg) +static int i2c_mii_read_default(struct mii_bus *bus, int phy_id, int reg) { struct i2c_adapter *i2c = bus->priv; struct i2c_msg msgs[2]; @@ -62,7 +64,8 @@ static int i2c_mii_read(struct mii_bus *bus, int phy_id, int reg) return data[0] << 8 | data[1]; } -static int i2c_mii_write(struct mii_bus *bus, int phy_id, int reg, u16 val) +static int i2c_mii_write_default(struct mii_bus *bus, int phy_id, int reg, + u16 val) { struct i2c_adapter *i2c = bus->priv; struct i2c_msg msg; @@ -91,9 +94,288 @@ static int i2c_mii_write(struct mii_bus *bus, int phy_id, int reg, u16 val) return ret < 0 ? ret : 0; } -struct mii_bus *mdio_i2c_alloc(struct device *parent, struct i2c_adapter *i2c) +/* RollBall SFPs do not access internal PHY via I2C address 0x56, but + * instead via address 0x51, when SFP page is set to 0x03 and password to + * 0xffffffff. + * + * address size contents description + * ------- ---- -------- ----------- + * 0x80 1 CMD 0x01/0x02/0x04 for write/read/done + * 0x81 1 DEV Clause 45 device + * 0x82 2 REG Clause 45 register + * 0x84 2 VAL Register value + */ +#define ROLLBALL_PHY_I2C_ADDR 0x51 + +#define ROLLBALL_PASSWORD (SFP_VSL + 3) + +#define ROLLBALL_CMD_ADDR 0x80 +#define ROLLBALL_DATA_ADDR 0x81 + +#define ROLLBALL_CMD_WRITE 0x01 +#define ROLLBALL_CMD_READ 0x02 +#define ROLLBALL_CMD_DONE 0x04 + +#define SFP_PAGE_ROLLBALL_MDIO 3 + +static int __i2c_transfer_err(struct i2c_adapter *i2c, struct i2c_msg *msgs, + int num) +{ + int ret; + + ret = __i2c_transfer(i2c, msgs, num); + if (ret < 0) + return ret; + else if (ret != num) + return -EIO; + else + return 0; +} + +static int __i2c_rollball_get_page(struct i2c_adapter *i2c, int bus_addr, + u8 *page) +{ + struct i2c_msg msgs[2]; + u8 addr = SFP_PAGE; + + msgs[0].addr = bus_addr; + msgs[0].flags = 0; + msgs[0].len = 1; + msgs[0].buf = &addr; + + msgs[1].addr = bus_addr; + msgs[1].flags = I2C_M_RD; + msgs[1].len = 1; + msgs[1].buf = page; + + return __i2c_transfer_err(i2c, msgs, 2); +} + +static int __i2c_rollball_set_page(struct i2c_adapter *i2c, int bus_addr, + u8 page) +{ + struct i2c_msg msg; + u8 buf[2]; + + buf[0] = SFP_PAGE; + buf[1] = page; + + msg.addr = bus_addr; + msg.flags = 0; + msg.len = 2; + msg.buf = buf; + + return __i2c_transfer_err(i2c, &msg, 1); +} + +/* In order to not interfere with other SFP code (which possibly may manipulate + * SFP_PAGE), for every transfer we do this: + * 1. lock the bus + * 2. save content of SFP_PAGE + * 3. set SFP_PAGE to 3 + * 4. do the transfer + * 5. restore original SFP_PAGE + * 6. unlock the bus + * Note that one might think that steps 2 to 5 could be theoretically done all + * in one call to i2c_transfer (by constructing msgs array in such a way), but + * unfortunately tests show that this does not work :-( Changed SFP_PAGE does + * not take into account until i2c_transfer() is done. + */ +static int i2c_transfer_rollball(struct i2c_adapter *i2c, + struct i2c_msg *msgs, int num) +{ + int ret, main_err = 0; + u8 saved_page; + + i2c_lock_bus(i2c, I2C_LOCK_SEGMENT); + + /* save original page */ + ret = __i2c_rollball_get_page(i2c, msgs->addr, &saved_page); + if (ret) + goto unlock; + + /* change to RollBall MDIO page */ + ret = __i2c_rollball_set_page(i2c, msgs->addr, SFP_PAGE_ROLLBALL_MDIO); + if (ret) + goto unlock; + + /* do the transfer; we try to restore original page if this fails */ + ret = __i2c_transfer_err(i2c, msgs, num); + if (ret) + main_err = ret; + + /* restore original page */ + ret = __i2c_rollball_set_page(i2c, msgs->addr, saved_page); + +unlock: + i2c_unlock_bus(i2c, I2C_LOCK_SEGMENT); + + return main_err ? : ret; +} + +static int i2c_rollball_mii_poll(struct mii_bus *bus, int bus_addr, u8 *buf, + size_t len) +{ + struct i2c_adapter *i2c = bus->priv; + struct i2c_msg msgs[2]; + u8 cmd_addr, tmp, *res; + int i, ret; + + cmd_addr = ROLLBALL_CMD_ADDR; + + res = buf ? buf : &tmp; + len = buf ? len : 1; + + msgs[0].addr = bus_addr; + msgs[0].flags = 0; + msgs[0].len = 1; + msgs[0].buf = &cmd_addr; + + msgs[1].addr = bus_addr; + msgs[1].flags = I2C_M_RD; + msgs[1].len = len; + msgs[1].buf = res; + + /* By experiment it takes up to 70 ms to access a register for these + * SFPs. Sleep 20ms between iterations and try 10 times. + */ + i = 10; + do { + msleep(20); + + ret = i2c_transfer_rollball(i2c, msgs, ARRAY_SIZE(msgs)); + if (ret) + return ret; + + if (*res == ROLLBALL_CMD_DONE) + return 0; + } while (i-- > 0); + + dev_dbg(&bus->dev, "poll timed out\n"); + + return -ETIMEDOUT; +} + +static int i2c_rollball_mii_cmd(struct mii_bus *bus, int bus_addr, u8 cmd, + u8 *data, size_t len) +{ + struct i2c_adapter *i2c = bus->priv; + struct i2c_msg msgs[2]; + u8 cmdbuf[2]; + + cmdbuf[0] = ROLLBALL_CMD_ADDR; + cmdbuf[1] = cmd; + + msgs[0].addr = bus_addr; + msgs[0].flags = 0; + msgs[0].len = len; + msgs[0].buf = data; + + msgs[1].addr = bus_addr; + msgs[1].flags = 0; + msgs[1].len = sizeof(cmdbuf); + msgs[1].buf = cmdbuf; + + return i2c_transfer_rollball(i2c, msgs, ARRAY_SIZE(msgs)); +} + +static int i2c_mii_read_rollball(struct mii_bus *bus, int phy_id, int reg) +{ + u8 buf[4], res[6]; + int bus_addr, ret; + u16 val; + + if (!(reg & MII_ADDR_C45)) + return -EOPNOTSUPP; + + bus_addr = i2c_mii_phy_addr(phy_id); + if (bus_addr != ROLLBALL_PHY_I2C_ADDR) + return 0xffff; + + buf[0] = ROLLBALL_DATA_ADDR; + buf[1] = (reg >> 16) & 0x1f; + buf[2] = (reg >> 8) & 0xff; + buf[3] = reg & 0xff; + + ret = i2c_rollball_mii_cmd(bus, bus_addr, ROLLBALL_CMD_READ, buf, + sizeof(buf)); + if (ret < 0) + return ret; + + ret = i2c_rollball_mii_poll(bus, bus_addr, res, sizeof(res)); + if (ret == -ETIMEDOUT) + return 0xffff; + else if (ret < 0) + return ret; + + val = res[4] << 8 | res[5]; + + return val; +} + +static int i2c_mii_write_rollball(struct mii_bus *bus, int phy_id, int reg, + u16 val) +{ + int bus_addr, ret; + u8 buf[6]; + + if (!(reg & MII_ADDR_C45)) + return -EOPNOTSUPP; + + bus_addr = i2c_mii_phy_addr(phy_id); + if (bus_addr != ROLLBALL_PHY_I2C_ADDR) + return 0; + + buf[0] = ROLLBALL_DATA_ADDR; + buf[1] = (reg >> 16) & 0x1f; + buf[2] = (reg >> 8) & 0xff; + buf[3] = reg & 0xff; + buf[4] = val >> 8; + buf[5] = val & 0xff; + + ret = i2c_rollball_mii_cmd(bus, bus_addr, ROLLBALL_CMD_WRITE, buf, + sizeof(buf)); + if (ret < 0) + return ret; + + ret = i2c_rollball_mii_poll(bus, bus_addr, NULL, 0); + if (ret < 0) + return ret; + + return 0; +} + +static int i2c_mii_init_rollball(struct i2c_adapter *i2c) +{ + struct i2c_msg msg; + u8 pw[5]; + int ret; + + pw[0] = ROLLBALL_PASSWORD; + pw[1] = 0xff; + pw[2] = 0xff; + pw[3] = 0xff; + pw[4] = 0xff; + + msg.addr = ROLLBALL_PHY_I2C_ADDR; + msg.flags = 0; + msg.len = sizeof(pw); + msg.buf = pw; + + ret = i2c_transfer(i2c, &msg, 1); + if (ret < 0) + return ret; + else if (ret != 1) + return -EIO; + else + return 0; +} + +struct mii_bus *mdio_i2c_alloc(struct device *parent, struct i2c_adapter *i2c, + enum mdio_i2c_proto protocol) { struct mii_bus *mii; + int ret; if (!i2c_check_functionality(i2c, I2C_FUNC_I2C)) return ERR_PTR(-EINVAL); @@ -104,10 +386,28 @@ struct mii_bus *mdio_i2c_alloc(struct device *parent, struct i2c_adapter *i2c) snprintf(mii->id, MII_BUS_ID_SIZE, "i2c:%s", dev_name(parent)); mii->parent = parent; - mii->read = i2c_mii_read; - mii->write = i2c_mii_write; mii->priv = i2c; + switch (protocol) { + case MDIO_I2C_ROLLBALL: + ret = i2c_mii_init_rollball(i2c); + if (ret < 0) { + dev_err(parent, + "Cannot initialize RollBall MDIO I2C protocol: %d\n", + ret); + mdiobus_free(mii); + return ERR_PTR(ret); + } + + mii->read = i2c_mii_read_rollball; + mii->write = i2c_mii_write_rollball; + break; + default: + mii->read = i2c_mii_read_default; + mii->write = i2c_mii_write_default; + break; + } + return mii; } EXPORT_SYMBOL_GPL(mdio_i2c_alloc); diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c index 11ebd59bf2eb..9e9adde335c8 100644 --- a/drivers/net/phy/at803x.c +++ b/drivers/net/phy/at803x.c @@ -676,6 +676,7 @@ static int at803x_sfp_insert(void *upstream, const struct sfp_eeprom_id *id) struct phy_device *phydev = upstream; __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_support); __ETHTOOL_DECLARE_LINK_MODE_MASK(sfp_support); + DECLARE_PHY_INTERFACE_MASK(interfaces); phy_interface_t iface; linkmode_zero(phy_support); @@ -686,7 +687,7 @@ static int at803x_sfp_insert(void *upstream, const struct sfp_eeprom_id *id) phylink_set(phy_support, Asym_Pause); linkmode_zero(sfp_support); - sfp_parse_support(phydev->sfp_bus, id, sfp_support); + sfp_parse_support(phydev->sfp_bus, id, sfp_support, interfaces); /* Some modules support 10G modes as well as others we support. * Mask out non-supported modes so the correct interface is picked. */ diff --git a/drivers/net/phy/marvell-88x2222.c b/drivers/net/phy/marvell-88x2222.c index f070776ca904..fd9ad4820192 100644 --- a/drivers/net/phy/marvell-88x2222.c +++ b/drivers/net/phy/marvell-88x2222.c @@ -478,6 +478,7 @@ static int mv2222_config_init(struct phy_device *phydev) static int mv2222_sfp_insert(void *upstream, const struct sfp_eeprom_id *id) { + DECLARE_PHY_INTERFACE_MASK(interfaces); struct phy_device *phydev = upstream; phy_interface_t sfp_interface; struct mv2222_data *priv; @@ -489,7 +490,7 @@ static int mv2222_sfp_insert(void *upstream, const struct sfp_eeprom_id *id) priv = (struct mv2222_data *)phydev->priv; dev = &phydev->mdio.dev; - sfp_parse_support(phydev->sfp_bus, id, sfp_supported); + sfp_parse_support(phydev->sfp_bus, id, sfp_supported, interfaces); phydev->port = sfp_parse_port(phydev->sfp_bus, id, sfp_supported); sfp_interface = sfp_select_interface(phydev->sfp_bus, sfp_supported); diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index a3e810705ce2..2810f4f9da0c 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -2845,6 +2845,7 @@ static int marvell_probe(struct phy_device *phydev) static int m88e1510_sfp_insert(void *upstream, const struct sfp_eeprom_id *id) { + DECLARE_PHY_INTERFACE_MASK(interfaces); struct phy_device *phydev = upstream; phy_interface_t interface; struct device *dev; @@ -2856,7 +2857,7 @@ static int m88e1510_sfp_insert(void *upstream, const struct sfp_eeprom_id *id) dev = &phydev->mdio.dev; - sfp_parse_support(phydev->sfp_bus, id, supported); + sfp_parse_support(phydev->sfp_bus, id, supported, interfaces); interface = sfp_select_interface(phydev->sfp_bus, supported); dev_info(dev, "%s SFP module inserted\n", phy_modes(interface)); diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c index 2b7d0720720b..383a9c9f36e5 100644 --- a/drivers/net/phy/marvell10g.c +++ b/drivers/net/phy/marvell10g.c @@ -96,6 +96,11 @@ enum { MV_PCS_PORT_INFO_NPORTS_MASK = 0x0380, MV_PCS_PORT_INFO_NPORTS_SHIFT = 7, + /* SerDes reinitialization 88E21X0 */ + MV_AN_21X0_SERDES_CTRL2 = 0x800f, + MV_AN_21X0_SERDES_CTRL2_AUTO_INIT_DIS = BIT(13), + MV_AN_21X0_SERDES_CTRL2_RUN_INIT = BIT(15), + /* These registers appear at 0x800X and 0xa00X - the 0xa00X control * registers appear to set themselves to the 0x800X when AN is * restarted, but status registers appear readable from either. @@ -117,16 +122,16 @@ enum { MV_V2_33X0_PORT_CTRL_MACTYPE_10GBASER_NO_SGMII_AN = 0x5, MV_V2_33X0_PORT_CTRL_MACTYPE_10GBASER_RATE_MATCH = 0x6, MV_V2_33X0_PORT_CTRL_MACTYPE_USXGMII = 0x7, - MV_V2_PORT_INTR_STS = 0xf040, - MV_V2_PORT_INTR_MASK = 0xf043, - MV_V2_PORT_INTR_STS_WOL_EN = BIT(8), - MV_V2_MAGIC_PKT_WORD0 = 0xf06b, - MV_V2_MAGIC_PKT_WORD1 = 0xf06c, - MV_V2_MAGIC_PKT_WORD2 = 0xf06d, + MV_V2_PORT_INTR_STS = 0xf040, + MV_V2_PORT_INTR_MASK = 0xf043, + MV_V2_PORT_INTR_STS_WOL_EN = BIT(8), + MV_V2_MAGIC_PKT_WORD0 = 0xf06b, + MV_V2_MAGIC_PKT_WORD1 = 0xf06c, + MV_V2_MAGIC_PKT_WORD2 = 0xf06d, /* Wake on LAN registers */ - MV_V2_WOL_CTRL = 0xf06e, - MV_V2_WOL_CTRL_CLEAR_STS = BIT(15), - MV_V2_WOL_CTRL_MAGIC_PKT_EN = BIT(0), + MV_V2_WOL_CTRL = 0xf06e, + MV_V2_WOL_CTRL_CLEAR_STS = BIT(15), + MV_V2_WOL_CTRL_MAGIC_PKT_EN = BIT(0), /* Temperature control/read registers (88X3310 only) */ MV_V2_TEMP_CTRL = 0xf08a, MV_V2_TEMP_CTRL_MASK = 0xc000, @@ -140,6 +145,8 @@ struct mv3310_chip { bool (*has_downshift)(struct phy_device *phydev); void (*init_supported_interfaces)(unsigned long *mask); int (*get_mactype)(struct phy_device *phydev); + int (*set_mactype)(struct phy_device *phydev, int mactype); + int (*select_mactype)(unsigned long *interfaces); int (*init_interface)(struct phy_device *phydev, int mactype); #ifdef CONFIG_HWMON @@ -466,9 +473,10 @@ static int mv3310_sfp_insert(void *upstream, const struct sfp_eeprom_id *id) { struct phy_device *phydev = upstream; __ETHTOOL_DECLARE_LINK_MODE_MASK(support) = { 0, }; + DECLARE_PHY_INTERFACE_MASK(interfaces); phy_interface_t iface; - sfp_parse_support(phydev->sfp_bus, id, support); + sfp_parse_support(phydev->sfp_bus, id, support, interfaces); iface = sfp_select_interface(phydev->sfp_bus, support); if (iface != PHY_INTERFACE_MODE_10GBASER) { @@ -593,6 +601,49 @@ static int mv2110_get_mactype(struct phy_device *phydev) return mactype & MV_PMA_21X0_PORT_CTRL_MACTYPE_MASK; } +static int mv2110_set_mactype(struct phy_device *phydev, int mactype) +{ + int err, val; + + mactype &= MV_PMA_21X0_PORT_CTRL_MACTYPE_MASK; + err = phy_modify_mmd(phydev, MDIO_MMD_PMAPMD, MV_PMA_21X0_PORT_CTRL, + MV_PMA_21X0_PORT_CTRL_SWRST | + MV_PMA_21X0_PORT_CTRL_MACTYPE_MASK, + MV_PMA_21X0_PORT_CTRL_SWRST | mactype); + if (err) + return err; + + err = phy_set_bits_mmd(phydev, MDIO_MMD_AN, MV_AN_21X0_SERDES_CTRL2, + MV_AN_21X0_SERDES_CTRL2_AUTO_INIT_DIS | + MV_AN_21X0_SERDES_CTRL2_RUN_INIT); + if (err) + return err; + + err = phy_read_mmd_poll_timeout(phydev, MDIO_MMD_AN, + MV_AN_21X0_SERDES_CTRL2, val, + !(val & + MV_AN_21X0_SERDES_CTRL2_RUN_INIT), + 5000, 100000, true); + if (err) + return err; + + return phy_clear_bits_mmd(phydev, MDIO_MMD_AN, MV_AN_21X0_SERDES_CTRL2, + MV_AN_21X0_SERDES_CTRL2_AUTO_INIT_DIS); +} + +static int mv2110_select_mactype(unsigned long *interfaces) +{ + if (test_bit(PHY_INTERFACE_MODE_USXGMII, interfaces)) + return MV_PMA_21X0_PORT_CTRL_MACTYPE_USXGMII; + else if (test_bit(PHY_INTERFACE_MODE_SGMII, interfaces) && + !test_bit(PHY_INTERFACE_MODE_10GBASER, interfaces)) + return MV_PMA_21X0_PORT_CTRL_MACTYPE_5GBASER; + else if (test_bit(PHY_INTERFACE_MODE_10GBASER, interfaces)) + return MV_PMA_21X0_PORT_CTRL_MACTYPE_10GBASER_RATE_MATCH; + else + return -1; +} + static int mv3310_get_mactype(struct phy_device *phydev) { int mactype; @@ -604,6 +655,46 @@ static int mv3310_get_mactype(struct phy_device *phydev) return mactype & MV_V2_33X0_PORT_CTRL_MACTYPE_MASK; } +static int mv3310_set_mactype(struct phy_device *phydev, int mactype) +{ + int ret; + + mactype &= MV_V2_33X0_PORT_CTRL_MACTYPE_MASK; + ret = phy_modify_mmd_changed(phydev, MDIO_MMD_VEND2, MV_V2_PORT_CTRL, + MV_V2_33X0_PORT_CTRL_MACTYPE_MASK, + mactype); + if (ret <= 0) + return ret; + + return phy_set_bits_mmd(phydev, MDIO_MMD_VEND2, MV_V2_PORT_CTRL, + MV_V2_33X0_PORT_CTRL_SWRST); +} + +static int mv3310_select_mactype(unsigned long *interfaces) +{ + if (test_bit(PHY_INTERFACE_MODE_USXGMII, interfaces)) + return MV_V2_33X0_PORT_CTRL_MACTYPE_USXGMII; + else if (test_bit(PHY_INTERFACE_MODE_SGMII, interfaces) && + test_bit(PHY_INTERFACE_MODE_10GBASER, interfaces)) + return MV_V2_33X0_PORT_CTRL_MACTYPE_10GBASER; + else if (test_bit(PHY_INTERFACE_MODE_SGMII, interfaces) && + test_bit(PHY_INTERFACE_MODE_RXAUI, interfaces)) + return MV_V2_33X0_PORT_CTRL_MACTYPE_RXAUI; + else if (test_bit(PHY_INTERFACE_MODE_SGMII, interfaces) && + test_bit(PHY_INTERFACE_MODE_XAUI, interfaces)) + return MV_V2_3310_PORT_CTRL_MACTYPE_XAUI; + else if (test_bit(PHY_INTERFACE_MODE_10GBASER, interfaces)) + return MV_V2_33X0_PORT_CTRL_MACTYPE_10GBASER_RATE_MATCH; + else if (test_bit(PHY_INTERFACE_MODE_RXAUI, interfaces)) + return MV_V2_33X0_PORT_CTRL_MACTYPE_RXAUI_RATE_MATCH; + else if (test_bit(PHY_INTERFACE_MODE_XAUI, interfaces)) + return MV_V2_3310_PORT_CTRL_MACTYPE_XAUI_RATE_MATCH; + else if (test_bit(PHY_INTERFACE_MODE_SGMII, interfaces)) + return MV_V2_33X0_PORT_CTRL_MACTYPE_10GBASER; + else + return -1; +} + static int mv2110_init_interface(struct phy_device *phydev, int mactype) { struct mv3310_priv *priv = dev_get_drvdata(&phydev->mdio.dev); @@ -687,6 +778,20 @@ static int mv3310_config_init(struct phy_device *phydev) if (err) return err; + /* If host provided host supported interface modes, try to select the + * best one + */ + if (!phy_interface_empty(phydev->host_interfaces)) { + mactype = chip->select_mactype(phydev->host_interfaces); + if (mactype >= 0) { + phydev_info(phydev, "Changing MACTYPE to %i\n", + mactype); + err = chip->set_mactype(phydev, mactype); + if (err) + return err; + } + } + mactype = chip->get_mactype(phydev); if (mactype < 0) return mactype; @@ -1049,6 +1154,8 @@ static const struct mv3310_chip mv3310_type = { .has_downshift = mv3310_has_downshift, .init_supported_interfaces = mv3310_init_supported_interfaces, .get_mactype = mv3310_get_mactype, + .set_mactype = mv3310_set_mactype, + .select_mactype = mv3310_select_mactype, .init_interface = mv3310_init_interface, #ifdef CONFIG_HWMON @@ -1060,6 +1167,8 @@ static const struct mv3310_chip mv3340_type = { .has_downshift = mv3310_has_downshift, .init_supported_interfaces = mv3340_init_supported_interfaces, .get_mactype = mv3310_get_mactype, + .set_mactype = mv3310_set_mactype, + .select_mactype = mv3310_select_mactype, .init_interface = mv3340_init_interface, #ifdef CONFIG_HWMON @@ -1070,6 +1179,8 @@ static const struct mv3310_chip mv3340_type = { static const struct mv3310_chip mv2110_type = { .init_supported_interfaces = mv2110_init_supported_interfaces, .get_mactype = mv2110_get_mactype, + .set_mactype = mv2110_set_mactype, + .select_mactype = mv2110_select_mactype, .init_interface = mv2110_init_interface, #ifdef CONFIG_HWMON @@ -1080,6 +1191,8 @@ static const struct mv3310_chip mv2110_type = { static const struct mv3310_chip mv2111_type = { .init_supported_interfaces = mv2111_init_supported_interfaces, .get_mactype = mv2110_get_mactype, + .set_mactype = mv2110_set_mactype, + .select_mactype = mv2110_select_mactype, .init_interface = mv2110_init_interface, #ifdef CONFIG_HWMON diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index a4f5f151014a..57849ac0384e 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -26,6 +26,7 @@ #include <linux/netdevice.h> #include <linux/phy.h> #include <linux/phy_led_triggers.h> +#include <linux/pse-pd/pse.h> #include <linux/property.h> #include <linux/sfp.h> #include <linux/skbuff.h> @@ -991,6 +992,7 @@ EXPORT_SYMBOL(phy_device_register); void phy_device_remove(struct phy_device *phydev) { unregister_mii_timestamper(phydev->mii_ts); + pse_control_put(phydev->psec); device_del(&phydev->mdio.dev); diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index d0af026c9afa..75464df191ef 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -77,6 +77,7 @@ struct phylink { struct sfp_bus *sfp_bus; bool sfp_may_have_phy; + DECLARE_PHY_INTERFACE_MASK(sfp_interfaces); __ETHTOOL_DECLARE_LINK_MODE_MASK(sfp_support); u8 sfp_port; }; @@ -637,8 +638,9 @@ static int phylink_validate_mac_and_pcs(struct phylink *pl, return phylink_is_empty_linkmode(supported) ? -EINVAL : 0; } -static int phylink_validate_any(struct phylink *pl, unsigned long *supported, - struct phylink_link_state *state) +static int phylink_validate_mask(struct phylink *pl, unsigned long *supported, + struct phylink_link_state *state, + const unsigned long *interfaces) { __ETHTOOL_DECLARE_LINK_MODE_MASK(all_adv) = { 0, }; __ETHTOOL_DECLARE_LINK_MODE_MASK(all_s) = { 0, }; @@ -647,7 +649,7 @@ static int phylink_validate_any(struct phylink *pl, unsigned long *supported, int intf; for (intf = 0; intf < PHY_INTERFACE_MODE_MAX; intf++) { - if (test_bit(intf, pl->config->supported_interfaces)) { + if (test_bit(intf, interfaces)) { linkmode_copy(s, supported); t = *state; @@ -668,12 +670,14 @@ static int phylink_validate_any(struct phylink *pl, unsigned long *supported, static int phylink_validate(struct phylink *pl, unsigned long *supported, struct phylink_link_state *state) { - if (!phy_interface_empty(pl->config->supported_interfaces)) { + const unsigned long *interfaces = pl->config->supported_interfaces; + + if (!phy_interface_empty(interfaces)) { if (state->interface == PHY_INTERFACE_MODE_NA) - return phylink_validate_any(pl, supported, state); + return phylink_validate_mask(pl, supported, state, + interfaces); - if (!test_bit(state->interface, - pl->config->supported_interfaces)) + if (!test_bit(state->interface, interfaces)) return -EINVAL; } @@ -1665,7 +1669,7 @@ static int phylink_attach_phy(struct phylink *pl, struct phy_device *phy, { if (WARN_ON(pl->cfg_link_an_mode == MLO_AN_FIXED || (pl->cfg_link_an_mode == MLO_AN_INBAND && - phy_interface_mode_is_8023z(interface)))) + phy_interface_mode_is_8023z(interface) && !pl->sfp_bus))) return -EINVAL; if (pl->phydev) @@ -2799,21 +2803,85 @@ static void phylink_sfp_detach(void *upstream, struct sfp_bus *bus) pl->netdev->sfp_bus = NULL; } -static int phylink_sfp_config(struct phylink *pl, u8 mode, - const unsigned long *supported, - const unsigned long *advertising) +static const phy_interface_t phylink_sfp_interface_preference[] = { + PHY_INTERFACE_MODE_25GBASER, + PHY_INTERFACE_MODE_USXGMII, + PHY_INTERFACE_MODE_10GBASER, + PHY_INTERFACE_MODE_5GBASER, + PHY_INTERFACE_MODE_2500BASEX, + PHY_INTERFACE_MODE_SGMII, + PHY_INTERFACE_MODE_1000BASEX, + PHY_INTERFACE_MODE_100BASEX, +}; + +static DECLARE_PHY_INTERFACE_MASK(phylink_sfp_interfaces); + +static phy_interface_t phylink_choose_sfp_interface(struct phylink *pl, + const unsigned long *intf) +{ + phy_interface_t interface; + size_t i; + + interface = PHY_INTERFACE_MODE_NA; + for (i = 0; i < ARRAY_SIZE(phylink_sfp_interface_preference); i++) + if (test_bit(phylink_sfp_interface_preference[i], intf)) { + interface = phylink_sfp_interface_preference[i]; + break; + } + + return interface; +} + +static void phylink_sfp_set_config(struct phylink *pl, u8 mode, + unsigned long *supported, + struct phylink_link_state *state) +{ + bool changed = false; + + phylink_dbg(pl, "requesting link mode %s/%s with support %*pb\n", + phylink_an_mode_str(mode), phy_modes(state->interface), + __ETHTOOL_LINK_MODE_MASK_NBITS, supported); + + if (!linkmode_equal(pl->supported, supported)) { + linkmode_copy(pl->supported, supported); + changed = true; + } + + if (!linkmode_equal(pl->link_config.advertising, state->advertising)) { + linkmode_copy(pl->link_config.advertising, state->advertising); + changed = true; + } + + if (pl->cur_link_an_mode != mode || + pl->link_config.interface != state->interface) { + pl->cur_link_an_mode = mode; + pl->link_config.interface = state->interface; + + changed = true; + + phylink_info(pl, "switched to %s/%s link mode\n", + phylink_an_mode_str(mode), + phy_modes(state->interface)); + } + + if (changed && !test_bit(PHYLINK_DISABLE_STOPPED, + &pl->phylink_disable_state)) + phylink_mac_initial_config(pl, false); +} + +static int phylink_sfp_config_phy(struct phylink *pl, u8 mode, + struct phy_device *phy) { __ETHTOOL_DECLARE_LINK_MODE_MASK(support1); __ETHTOOL_DECLARE_LINK_MODE_MASK(support); struct phylink_link_state config; phy_interface_t iface; - bool changed; int ret; - linkmode_copy(support, supported); + linkmode_copy(support, phy->supported); memset(&config, 0, sizeof(config)); - linkmode_copy(config.advertising, advertising); + linkmode_copy(config.advertising, phy->advertising); config.interface = PHY_INTERFACE_MODE_NA; config.speed = SPEED_UNKNOWN; config.duplex = DUPLEX_UNKNOWN; @@ -2850,60 +2918,100 @@ static int phylink_sfp_config(struct phylink *pl, u8 mode, return ret; } - phylink_dbg(pl, "requesting link mode %s/%s with support %*pb\n", - phylink_an_mode_str(mode), phy_modes(config.interface), - __ETHTOOL_LINK_MODE_MASK_NBITS, support); + pl->link_port = pl->sfp_port; + + phylink_sfp_set_config(pl, mode, support, &config); + + return 0; +} + +static int phylink_sfp_config_optical(struct phylink *pl) +{ + __ETHTOOL_DECLARE_LINK_MODE_MASK(support); + DECLARE_PHY_INTERFACE_MASK(interfaces); + struct phylink_link_state config; + phy_interface_t interface; + int ret; + + phylink_dbg(pl, "optical SFP: interfaces=[mac=%*pbl, sfp=%*pbl]\n", + (int)PHY_INTERFACE_MODE_MAX, + pl->config->supported_interfaces, + (int)PHY_INTERFACE_MODE_MAX, + pl->sfp_interfaces); - if (phy_interface_mode_is_8023z(iface) && pl->phydev) + /* Find the union of the supported interfaces by the PCS/MAC and + * the SFP module. + */ + phy_interface_and(interfaces, pl->config->supported_interfaces, + pl->sfp_interfaces); + if (phy_interface_empty(interfaces)) { + phylink_err(pl, "unsupported SFP module: no common interface modes\n"); return -EINVAL; + } - changed = !linkmode_equal(pl->supported, support) || - !linkmode_equal(pl->link_config.advertising, - config.advertising); - if (changed) { - linkmode_copy(pl->supported, support); - linkmode_copy(pl->link_config.advertising, config.advertising); + memset(&config, 0, sizeof(config)); + linkmode_copy(support, pl->sfp_support); + linkmode_copy(config.advertising, pl->sfp_support); + config.speed = SPEED_UNKNOWN; + config.duplex = DUPLEX_UNKNOWN; + config.pause = MLO_PAUSE_AN; + config.an_enabled = true; + + /* For all the interfaces that are supported, reduce the sfp_support + * mask to only those link modes that can be supported. + */ + ret = phylink_validate_mask(pl, pl->sfp_support, &config, interfaces); + if (ret) { + phylink_err(pl, "unsupported SFP module: validation with support %*pb failed\n", + __ETHTOOL_LINK_MODE_MASK_NBITS, support); + return ret; } - if (pl->cur_link_an_mode != mode || - pl->link_config.interface != config.interface) { - pl->link_config.interface = config.interface; - pl->cur_link_an_mode = mode; + interface = phylink_choose_sfp_interface(pl, interfaces); + if (interface == PHY_INTERFACE_MODE_NA) { + phylink_err(pl, "failed to select SFP interface\n"); + return -EINVAL; + } - changed = true; + phylink_dbg(pl, "optical SFP: chosen %s interface\n", + phy_modes(interface)); - phylink_info(pl, "switched to %s/%s link mode\n", - phylink_an_mode_str(mode), - phy_modes(config.interface)); + config.interface = interface; + + /* Ignore errors if we're expecting a PHY to attach later */ + ret = phylink_validate(pl, support, &config); + if (ret) { + phylink_err(pl, "validation with support %*pb failed: %pe\n", + __ETHTOOL_LINK_MODE_MASK_NBITS, support, + ERR_PTR(ret)); + return ret; } pl->link_port = pl->sfp_port; - if (changed && !test_bit(PHYLINK_DISABLE_STOPPED, - &pl->phylink_disable_state)) - phylink_mac_initial_config(pl, false); + phylink_sfp_set_config(pl, MLO_AN_INBAND, pl->sfp_support, &config); - return ret; + return 0; } static int phylink_sfp_module_insert(void *upstream, const struct sfp_eeprom_id *id) { struct phylink *pl = upstream; - unsigned long *support = pl->sfp_support; ASSERT_RTNL(); - linkmode_zero(support); - sfp_parse_support(pl->sfp_bus, id, support); - pl->sfp_port = sfp_parse_port(pl->sfp_bus, id, support); + linkmode_zero(pl->sfp_support); + phy_interface_zero(pl->sfp_interfaces); + sfp_parse_support(pl->sfp_bus, id, pl->sfp_support, pl->sfp_interfaces); + pl->sfp_port = sfp_parse_port(pl->sfp_bus, id, pl->sfp_support); /* If this module may have a PHY connecting later, defer until later */ pl->sfp_may_have_phy = sfp_may_have_phy(pl->sfp_bus, id); if (pl->sfp_may_have_phy) return 0; - return phylink_sfp_config(pl, MLO_AN_INBAND, support, support); + return phylink_sfp_config_optical(pl); } static int phylink_sfp_module_start(void *upstream) @@ -2922,8 +3030,7 @@ static int phylink_sfp_module_start(void *upstream) if (!pl->sfp_may_have_phy) return 0; - return phylink_sfp_config(pl, MLO_AN_INBAND, - pl->sfp_support, pl->sfp_support); + return phylink_sfp_config_optical(pl); } static void phylink_sfp_module_stop(void *upstream) @@ -2983,8 +3090,12 @@ static int phylink_sfp_connect_phy(void *upstream, struct phy_device *phy) else mode = MLO_AN_INBAND; + /* Set the PHY's host supported interfaces */ + phy_interface_and(phy->host_interfaces, phylink_sfp_interfaces, + pl->config->supported_interfaces); + /* Do the initial configuration */ - ret = phylink_sfp_config(pl, mode, phy->supported, phy->advertising); + ret = phylink_sfp_config_phy(pl, mode, phy); if (ret < 0) return ret; @@ -3336,4 +3447,15 @@ void phylink_mii_c45_pcs_get_state(struct mdio_device *pcs, } EXPORT_SYMBOL_GPL(phylink_mii_c45_pcs_get_state); +static int __init phylink_init(void) +{ + for (int i = 0; i < ARRAY_SIZE(phylink_sfp_interface_preference); ++i) + __set_bit(phylink_sfp_interface_preference[i], + phylink_sfp_interfaces); + + return 0; +} + +module_init(phylink_init); + MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c index 0a9099c77694..29e3fa86bac3 100644 --- a/drivers/net/phy/sfp-bus.c +++ b/drivers/net/phy/sfp-bus.c @@ -139,12 +139,14 @@ EXPORT_SYMBOL_GPL(sfp_may_have_phy); * @bus: a pointer to the &struct sfp_bus structure for the sfp module * @id: a pointer to the module's &struct sfp_eeprom_id * @support: pointer to an array of unsigned long for the ethtool support mask + * @interfaces: pointer to an array of unsigned long for phy interface modes + * mask * * Parse the EEPROM identification information and derive the supported * ethtool link modes for the module. */ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id, - unsigned long *support) + unsigned long *support, unsigned long *interfaces) { unsigned int br_min, br_nom, br_max; __ETHTOOL_DECLARE_LINK_MODE_MASK(modes) = { 0, }; @@ -171,54 +173,81 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id, } /* Set ethtool support from the compliance fields. */ - if (id->base.e10g_base_sr) + if (id->base.e10g_base_sr) { phylink_set(modes, 10000baseSR_Full); - if (id->base.e10g_base_lr) + __set_bit(PHY_INTERFACE_MODE_10GBASER, interfaces); + } + if (id->base.e10g_base_lr) { phylink_set(modes, 10000baseLR_Full); - if (id->base.e10g_base_lrm) + __set_bit(PHY_INTERFACE_MODE_10GBASER, interfaces); + } + if (id->base.e10g_base_lrm) { phylink_set(modes, 10000baseLRM_Full); - if (id->base.e10g_base_er) + __set_bit(PHY_INTERFACE_MODE_10GBASER, interfaces); + } + if (id->base.e10g_base_er) { phylink_set(modes, 10000baseER_Full); + __set_bit(PHY_INTERFACE_MODE_10GBASER, interfaces); + } if (id->base.e1000_base_sx || id->base.e1000_base_lx || - id->base.e1000_base_cx) + id->base.e1000_base_cx) { phylink_set(modes, 1000baseX_Full); + __set_bit(PHY_INTERFACE_MODE_1000BASEX, interfaces); + } if (id->base.e1000_base_t) { phylink_set(modes, 1000baseT_Half); phylink_set(modes, 1000baseT_Full); + __set_bit(PHY_INTERFACE_MODE_1000BASEX, interfaces); + __set_bit(PHY_INTERFACE_MODE_SGMII, interfaces); } /* 1000Base-PX or 1000Base-BX10 */ if ((id->base.e_base_px || id->base.e_base_bx10) && - br_min <= 1300 && br_max >= 1200) + br_min <= 1300 && br_max >= 1200) { phylink_set(modes, 1000baseX_Full); + __set_bit(PHY_INTERFACE_MODE_1000BASEX, interfaces); + } /* 100Base-FX, 100Base-LX, 100Base-PX, 100Base-BX10 */ - if (id->base.e100_base_fx || id->base.e100_base_lx) + if (id->base.e100_base_fx || id->base.e100_base_lx) { phylink_set(modes, 100baseFX_Full); - if ((id->base.e_base_px || id->base.e_base_bx10) && br_nom == 100) + __set_bit(PHY_INTERFACE_MODE_100BASEX, interfaces); + } + if ((id->base.e_base_px || id->base.e_base_bx10) && br_nom == 100) { phylink_set(modes, 100baseFX_Full); + __set_bit(PHY_INTERFACE_MODE_100BASEX, interfaces); + } /* For active or passive cables, select the link modes * based on the bit rates and the cable compliance bytes. */ if ((id->base.sfp_ct_passive || id->base.sfp_ct_active) && br_nom) { /* This may look odd, but some manufacturers use 12000MBd */ - if (br_min <= 12000 && br_max >= 10300) + if (br_min <= 12000 && br_max >= 10300) { phylink_set(modes, 10000baseCR_Full); - if (br_min <= 3200 && br_max >= 3100) + __set_bit(PHY_INTERFACE_MODE_10GBASER, interfaces); + } + if (br_min <= 3200 && br_max >= 3100) { phylink_set(modes, 2500baseX_Full); - if (br_min <= 1300 && br_max >= 1200) + __set_bit(PHY_INTERFACE_MODE_2500BASEX, interfaces); + } + if (br_min <= 1300 && br_max >= 1200) { phylink_set(modes, 1000baseX_Full); + __set_bit(PHY_INTERFACE_MODE_1000BASEX, interfaces); + } } if (id->base.sfp_ct_passive) { - if (id->base.passive.sff8431_app_e) + if (id->base.passive.sff8431_app_e) { phylink_set(modes, 10000baseCR_Full); + __set_bit(PHY_INTERFACE_MODE_10GBASER, interfaces); + } } if (id->base.sfp_ct_active) { if (id->base.active.sff8431_app_e || id->base.active.sff8431_lim) { phylink_set(modes, 10000baseCR_Full); + __set_bit(PHY_INTERFACE_MODE_10GBASER, interfaces); } } @@ -243,12 +272,14 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id, case SFF8024_ECC_10GBASE_T_SFI: case SFF8024_ECC_10GBASE_T_SR: phylink_set(modes, 10000baseT_Full); + __set_bit(PHY_INTERFACE_MODE_10GBASER, interfaces); break; case SFF8024_ECC_5GBASE_T: phylink_set(modes, 5000baseT_Full); break; case SFF8024_ECC_2_5GBASE_T: phylink_set(modes, 2500baseT_Full); + __set_bit(PHY_INTERFACE_MODE_2500BASEX, interfaces); break; default: dev_warn(bus->sfp_dev, @@ -261,10 +292,14 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id, if (id->base.fc_speed_100 || id->base.fc_speed_200 || id->base.fc_speed_400) { - if (id->base.br_nominal >= 31) + if (id->base.br_nominal >= 31) { phylink_set(modes, 2500baseX_Full); - if (id->base.br_nominal >= 12) + __set_bit(PHY_INTERFACE_MODE_2500BASEX, interfaces); + } + if (id->base.br_nominal >= 12) { phylink_set(modes, 1000baseX_Full); + __set_bit(PHY_INTERFACE_MODE_1000BASEX, interfaces); + } } /* If we haven't discovered any modes that this module supports, try @@ -277,14 +312,18 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id, * 2500BASE-X, so we allow some slack here. */ if (bitmap_empty(modes, __ETHTOOL_LINK_MODE_MASK_NBITS) && br_nom) { - if (br_min <= 1300 && br_max >= 1200) + if (br_min <= 1300 && br_max >= 1200) { phylink_set(modes, 1000baseX_Full); - if (br_min <= 3200 && br_max >= 2500) + __set_bit(PHY_INTERFACE_MODE_1000BASEX, interfaces); + } + if (br_min <= 3200 && br_max >= 2500) { phylink_set(modes, 2500baseX_Full); + __set_bit(PHY_INTERFACE_MODE_2500BASEX, interfaces); + } } if (bus->sfp_quirk && bus->sfp_quirk->modes) - bus->sfp_quirk->modes(id, modes); + bus->sfp_quirk->modes(id, modes, interfaces); linkmode_or(support, support, modes); diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c index cb1dbd0d9701..40c9a64c5e30 100644 --- a/drivers/net/phy/sfp.c +++ b/drivers/net/phy/sfp.c @@ -166,6 +166,7 @@ static const enum gpiod_flags gpio_flags[] = { * on board (for a copper SFP) time to initialise. */ #define T_WAIT msecs_to_jiffies(50) +#define T_WAIT_ROLLBALL msecs_to_jiffies(25000) #define T_START_UP msecs_to_jiffies(300) #define T_START_UP_BAD_GPON msecs_to_jiffies(60000) @@ -205,8 +206,11 @@ static const enum gpiod_flags gpio_flags[] = { /* SFP modules appear to always have their PHY configured for bus address * 0x56 (which with mdio-i2c, translates to a PHY address of 22). + * RollBall SFPs access phy via SFP Enhanced Digital Diagnostic Interface + * via address 0x51 (mdio-i2c will use RollBall protocol on this address). */ -#define SFP_PHY_ADDR 22 +#define SFP_PHY_ADDR 22 +#define SFP_PHY_ADDR_ROLLBALL 17 struct sff_data { unsigned int gpios; @@ -218,6 +222,7 @@ struct sfp { struct i2c_adapter *i2c; struct mii_bus *i2c_mii; struct sfp_bus *sfp_bus; + enum mdio_i2c_proto mdio_protocol; struct phy_device *mod_phy; const struct sff_data *type; size_t i2c_block_size; @@ -251,6 +256,7 @@ struct sfp { struct sfp_eeprom_id id; unsigned int module_power_mW; unsigned int module_t_start_up; + unsigned int module_t_wait; bool tx_fault_ignore; const struct sfp_quirk *quirk; @@ -330,14 +336,33 @@ static void sfp_fixup_halny_gsfp(struct sfp *sfp) sfp->state_hw_mask &= ~(SFP_F_TX_FAULT | SFP_F_LOS); } +static void sfp_fixup_rollball(struct sfp *sfp) +{ + sfp->mdio_protocol = MDIO_I2C_ROLLBALL; + sfp->module_t_wait = T_WAIT_ROLLBALL; +} + +static void sfp_fixup_rollball_cc(struct sfp *sfp) +{ + sfp_fixup_rollball(sfp); + + /* Some RollBall SFPs may have wrong (zero) extended compliance code + * burned in EEPROM. For PHY probing we need the correct one. + */ + sfp->id.base.extended_cc = SFF8024_ECC_10GBASE_T_SFI; +} + static void sfp_quirk_2500basex(const struct sfp_eeprom_id *id, - unsigned long *modes) + unsigned long *modes, + unsigned long *interfaces) { linkmode_set_bit(ETHTOOL_LINK_MODE_2500baseX_Full_BIT, modes); + __set_bit(PHY_INTERFACE_MODE_2500BASEX, interfaces); } static void sfp_quirk_ubnt_uf_instant(const struct sfp_eeprom_id *id, - unsigned long *modes) + unsigned long *modes, + unsigned long *interfaces) { /* Ubiquiti U-Fiber Instant module claims that support all transceiver * types including 10G Ethernet which is not truth. So clear all claimed @@ -347,42 +372,39 @@ static void sfp_quirk_ubnt_uf_instant(const struct sfp_eeprom_id *id, linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, modes); } +#define SFP_QUIRK(_v, _p, _m, _f) \ + { .vendor = _v, .part = _p, .modes = _m, .fixup = _f, } +#define SFP_QUIRK_M(_v, _p, _m) SFP_QUIRK(_v, _p, _m, NULL) +#define SFP_QUIRK_F(_v, _p, _f) SFP_QUIRK(_v, _p, NULL, _f) + static const struct sfp_quirk sfp_quirks[] = { - { - // Alcatel Lucent G-010S-P can operate at 2500base-X, but - // incorrectly report 2500MBd NRZ in their EEPROM - .vendor = "ALCATELLUCENT", - .part = "G010SP", - .modes = sfp_quirk_2500basex, - }, { - // Alcatel Lucent G-010S-A can operate at 2500base-X, but - // report 3.2GBd NRZ in their EEPROM - .vendor = "ALCATELLUCENT", - .part = "3FE46541AA", - .modes = sfp_quirk_2500basex, - .fixup = sfp_fixup_long_startup, - }, { - .vendor = "HALNy", - .part = "HL-GSFP", - .fixup = sfp_fixup_halny_gsfp, - }, { - // Huawei MA5671A can operate at 2500base-X, but report 1.2GBd - // NRZ in their EEPROM - .vendor = "HUAWEI", - .part = "MA5671A", - .modes = sfp_quirk_2500basex, - .fixup = sfp_fixup_ignore_tx_fault, - }, { - // Lantech 8330-262D-E can operate at 2500base-X, but - // incorrectly report 2500MBd NRZ in their EEPROM - .vendor = "Lantech", - .part = "8330-262D-E", - .modes = sfp_quirk_2500basex, - }, { - .vendor = "UBNT", - .part = "UF-INSTANT", - .modes = sfp_quirk_ubnt_uf_instant, - } + // Alcatel Lucent G-010S-P can operate at 2500base-X, but incorrectly + // report 2500MBd NRZ in their EEPROM + SFP_QUIRK_M("ALCATELLUCENT", "G010SP", sfp_quirk_2500basex), + + // Alcatel Lucent G-010S-A can operate at 2500base-X, but report 3.2GBd + // NRZ in their EEPROM + SFP_QUIRK("ALCATELLUCENT", "3FE46541AA", sfp_quirk_2500basex, + sfp_fixup_long_startup), + + SFP_QUIRK_F("HALNy", "HL-GSFP", sfp_fixup_halny_gsfp), + + // Huawei MA5671A can operate at 2500base-X, but report 1.2GBd NRZ in + // their EEPROM + SFP_QUIRK("HUAWEI", "MA5671A", sfp_quirk_2500basex, + sfp_fixup_ignore_tx_fault), + + // Lantech 8330-262D-E can operate at 2500base-X, but incorrectly report + // 2500MBd NRZ in their EEPROM + SFP_QUIRK_M("Lantech", "8330-262D-E", sfp_quirk_2500basex), + + SFP_QUIRK_M("UBNT", "UF-INSTANT", sfp_quirk_ubnt_uf_instant), + + SFP_QUIRK_F("OEM", "SFP-10G-T", sfp_fixup_rollball_cc), + SFP_QUIRK_F("OEM", "RTSFP-10", sfp_fixup_rollball_cc), + SFP_QUIRK_F("OEM", "RTSFP-10G", sfp_fixup_rollball_cc), + SFP_QUIRK_F("Turris", "RTSFP-10", sfp_fixup_rollball), + SFP_QUIRK_F("Turris", "RTSFP-10G", sfp_fixup_rollball), }; static size_t sfp_strlen(const char *str, size_t maxlen) @@ -536,9 +558,6 @@ static int sfp_i2c_write(struct sfp *sfp, bool a2, u8 dev_addr, void *buf, static int sfp_i2c_configure(struct sfp *sfp, struct i2c_adapter *i2c) { - struct mii_bus *i2c_mii; - int ret; - if (!i2c_check_functionality(i2c, I2C_FUNC_I2C)) return -EINVAL; @@ -546,7 +565,15 @@ static int sfp_i2c_configure(struct sfp *sfp, struct i2c_adapter *i2c) sfp->read = sfp_i2c_read; sfp->write = sfp_i2c_write; - i2c_mii = mdio_i2c_alloc(sfp->dev, i2c); + return 0; +} + +static int sfp_i2c_mdiobus_create(struct sfp *sfp) +{ + struct mii_bus *i2c_mii; + int ret; + + i2c_mii = mdio_i2c_alloc(sfp->dev, sfp->i2c, sfp->mdio_protocol); if (IS_ERR(i2c_mii)) return PTR_ERR(i2c_mii); @@ -564,6 +591,12 @@ static int sfp_i2c_configure(struct sfp *sfp, struct i2c_adapter *i2c) return 0; } +static void sfp_i2c_mdiobus_destroy(struct sfp *sfp) +{ + mdiobus_unregister(sfp->i2c_mii); + sfp->i2c_mii = NULL; +} + /* Interface */ static int sfp_read(struct sfp *sfp, bool a2, u8 addr, void *buf, size_t len) { @@ -1579,12 +1612,12 @@ static void sfp_sm_phy_detach(struct sfp *sfp) sfp->mod_phy = NULL; } -static int sfp_sm_probe_phy(struct sfp *sfp, bool is_c45) +static int sfp_sm_probe_phy(struct sfp *sfp, int addr, bool is_c45) { struct phy_device *phy; int err; - phy = get_phy_device(sfp->i2c_mii, SFP_PHY_ADDR, is_c45); + phy = get_phy_device(sfp->i2c_mii, addr, is_c45); if (phy == ERR_PTR(-ENODEV)) return PTR_ERR(phy); if (IS_ERR(phy)) { @@ -1680,6 +1713,14 @@ static void sfp_sm_fault(struct sfp *sfp, unsigned int next_state, bool warn) } } +static int sfp_sm_add_mdio_bus(struct sfp *sfp) +{ + if (sfp->mdio_protocol != MDIO_I2C_NONE) + return sfp_i2c_mdiobus_create(sfp); + + return 0; +} + /* Probe a SFP for a PHY device if the module supports copper - the PHY * normally sits at I2C bus address 0x56, and may either be a clause 22 * or clause 45 PHY. @@ -1695,19 +1736,23 @@ static int sfp_sm_probe_for_phy(struct sfp *sfp) { int err = 0; - switch (sfp->id.base.extended_cc) { - case SFF8024_ECC_10GBASE_T_SFI: - case SFF8024_ECC_10GBASE_T_SR: - case SFF8024_ECC_5GBASE_T: - case SFF8024_ECC_2_5GBASE_T: - err = sfp_sm_probe_phy(sfp, true); + switch (sfp->mdio_protocol) { + case MDIO_I2C_NONE: break; - default: - if (sfp->id.base.e1000_base_t) - err = sfp_sm_probe_phy(sfp, false); + case MDIO_I2C_MARVELL_C22: + err = sfp_sm_probe_phy(sfp, SFP_PHY_ADDR, false); + break; + + case MDIO_I2C_C45: + err = sfp_sm_probe_phy(sfp, SFP_PHY_ADDR, true); + break; + + case MDIO_I2C_ROLLBALL: + err = sfp_sm_probe_phy(sfp, SFP_PHY_ADDR_ROLLBALL, true); break; } + return err; } @@ -2031,9 +2076,20 @@ static int sfp_sm_mod_probe(struct sfp *sfp, bool report) sfp->state_hw_mask |= SFP_F_LOS; sfp->module_t_start_up = T_START_UP; + sfp->module_t_wait = T_WAIT; sfp->tx_fault_ignore = false; + if (sfp->id.base.extended_cc == SFF8024_ECC_10GBASE_T_SFI || + sfp->id.base.extended_cc == SFF8024_ECC_10GBASE_T_SR || + sfp->id.base.extended_cc == SFF8024_ECC_5GBASE_T || + sfp->id.base.extended_cc == SFF8024_ECC_2_5GBASE_T) + sfp->mdio_protocol = MDIO_I2C_C45; + else if (sfp->id.base.e1000_base_t) + sfp->mdio_protocol = MDIO_I2C_MARVELL_C22; + else + sfp->mdio_protocol = MDIO_I2C_NONE; + sfp->quirk = sfp_lookup_quirk(&id); if (sfp->quirk && sfp->quirk->fixup) sfp->quirk->fixup(sfp); @@ -2210,6 +2266,8 @@ static void sfp_sm_main(struct sfp *sfp, unsigned int event) sfp_module_stop(sfp->sfp_bus); if (sfp->mod_phy) sfp_sm_phy_detach(sfp); + if (sfp->i2c_mii) + sfp_i2c_mdiobus_destroy(sfp); sfp_module_tx_disable(sfp); sfp_soft_stop_poll(sfp); sfp_sm_next(sfp, SFP_S_DOWN, 0); @@ -2233,9 +2291,10 @@ static void sfp_sm_main(struct sfp *sfp, unsigned int event) /* We need to check the TX_FAULT state, which is not defined * while TX_DISABLE is asserted. The earliest we want to do - * anything (such as probe for a PHY) is 50ms. + * anything (such as probe for a PHY) is 50ms (or more on + * specific modules). */ - sfp_sm_next(sfp, SFP_S_WAIT, T_WAIT); + sfp_sm_next(sfp, SFP_S_WAIT, sfp->module_t_wait); break; case SFP_S_WAIT: @@ -2249,8 +2308,8 @@ static void sfp_sm_main(struct sfp *sfp, unsigned int event) * deasserting. */ timeout = sfp->module_t_start_up; - if (timeout > T_WAIT) - timeout -= T_WAIT; + if (timeout > sfp->module_t_wait) + timeout -= sfp->module_t_wait; else timeout = 1; @@ -2272,6 +2331,12 @@ static void sfp_sm_main(struct sfp *sfp, unsigned int event) sfp->sm_fault_retries == N_FAULT_INIT); } else if (event == SFP_E_TIMEOUT || event == SFP_E_TX_CLEAR) { init_done: + /* Create mdiobus and start trying for PHY */ + ret = sfp_sm_add_mdio_bus(sfp); + if (ret < 0) { + sfp_sm_next(sfp, SFP_S_FAIL, 0); + break; + } sfp->sm_phy_retries = R_PHY_RETRY; goto phy_probe; } diff --git a/drivers/net/phy/sfp.h b/drivers/net/phy/sfp.h index 7ad06deae76c..6cf1643214d3 100644 --- a/drivers/net/phy/sfp.h +++ b/drivers/net/phy/sfp.h @@ -9,7 +9,8 @@ struct sfp; struct sfp_quirk { const char *vendor; const char *part; - void (*modes)(const struct sfp_eeprom_id *id, unsigned long *modes); + void (*modes)(const struct sfp_eeprom_id *id, unsigned long *modes, + unsigned long *interfaces); void (*fixup)(struct sfp *sfp); }; diff --git a/drivers/net/pse-pd/Kconfig b/drivers/net/pse-pd/Kconfig new file mode 100644 index 000000000000..73d163704068 --- /dev/null +++ b/drivers/net/pse-pd/Kconfig @@ -0,0 +1,22 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Ethernet Power Sourcing Equipment drivers +# + +menuconfig PSE_CONTROLLER + bool "Ethernet Power Sourcing Equipment Support" + help + Generic Power Sourcing Equipment Controller support. + + If unsure, say no. + +if PSE_CONTROLLER + +config PSE_REGULATOR + tristate "Regulator based PSE controller" + help + This module provides support for simple regulator based Ethernet Power + Sourcing Equipment without automatic classification support. For + example for basic implementation of PoDL (802.3bu) specification. + +endif diff --git a/drivers/net/pse-pd/Makefile b/drivers/net/pse-pd/Makefile new file mode 100644 index 000000000000..1b8aa4c70f0b --- /dev/null +++ b/drivers/net/pse-pd/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0-only +# Makefile for Linux PSE drivers + +obj-$(CONFIG_PSE_CONTROLLER) += pse_core.o + +obj-$(CONFIG_PSE_REGULATOR) += pse_regulator.o diff --git a/drivers/net/pse-pd/pse_core.c b/drivers/net/pse-pd/pse_core.c new file mode 100644 index 000000000000..146b81f08a89 --- /dev/null +++ b/drivers/net/pse-pd/pse_core.c @@ -0,0 +1,314 @@ +// SPDX-License-Identifier: GPL-2.0-only +// +// Framework for Ethernet Power Sourcing Equipment +// +// Copyright (c) 2022 Pengutronix, Oleksij Rempel <kernel@pengutronix.de> +// + +#include <linux/device.h> +#include <linux/of.h> +#include <linux/pse-pd/pse.h> + +static DEFINE_MUTEX(pse_list_mutex); +static LIST_HEAD(pse_controller_list); + +/** + * struct pse_control - a PSE control + * @pcdev: a pointer to the PSE controller device + * this PSE control belongs to + * @list: list entry for the pcdev's PSE controller list + * @id: ID of the PSE line in the PSE controller device + * @refcnt: Number of gets of this pse_control + */ +struct pse_control { + struct pse_controller_dev *pcdev; + struct list_head list; + unsigned int id; + struct kref refcnt; +}; + +/** + * of_pse_zero_xlate - dummy function for controllers with one only control + * @pcdev: a pointer to the PSE controller device + * @pse_spec: PSE line specifier as found in the device tree + * + * This static translation function is used by default if of_xlate in + * :c:type:`pse_controller_dev` is not set. It is useful for all PSE + * controllers with #pse-cells = <0>. + */ +static int of_pse_zero_xlate(struct pse_controller_dev *pcdev, + const struct of_phandle_args *pse_spec) +{ + return 0; +} + +/** + * of_pse_simple_xlate - translate pse_spec to the PSE line number + * @pcdev: a pointer to the PSE controller device + * @pse_spec: PSE line specifier as found in the device tree + * + * This static translation function is used by default if of_xlate in + * :c:type:`pse_controller_dev` is not set. It is useful for all PSE + * controllers with 1:1 mapping, where PSE lines can be indexed by number + * without gaps. + */ +static int of_pse_simple_xlate(struct pse_controller_dev *pcdev, + const struct of_phandle_args *pse_spec) +{ + if (pse_spec->args[0] >= pcdev->nr_lines) + return -EINVAL; + + return pse_spec->args[0]; +} + +/** + * pse_controller_register - register a PSE controller device + * @pcdev: a pointer to the initialized PSE controller device + */ +int pse_controller_register(struct pse_controller_dev *pcdev) +{ + if (!pcdev->of_xlate) { + if (pcdev->of_pse_n_cells == 0) + pcdev->of_xlate = of_pse_zero_xlate; + else if (pcdev->of_pse_n_cells == 1) + pcdev->of_xlate = of_pse_simple_xlate; + } + + mutex_init(&pcdev->lock); + INIT_LIST_HEAD(&pcdev->pse_control_head); + + mutex_lock(&pse_list_mutex); + list_add(&pcdev->list, &pse_controller_list); + mutex_unlock(&pse_list_mutex); + + return 0; +} +EXPORT_SYMBOL_GPL(pse_controller_register); + +/** + * pse_controller_unregister - unregister a PSE controller device + * @pcdev: a pointer to the PSE controller device + */ +void pse_controller_unregister(struct pse_controller_dev *pcdev) +{ + mutex_lock(&pse_list_mutex); + list_del(&pcdev->list); + mutex_unlock(&pse_list_mutex); +} +EXPORT_SYMBOL_GPL(pse_controller_unregister); + +static void devm_pse_controller_release(struct device *dev, void *res) +{ + pse_controller_unregister(*(struct pse_controller_dev **)res); +} + +/** + * devm_pse_controller_register - resource managed pse_controller_register() + * @dev: device that is registering this PSE controller + * @pcdev: a pointer to the initialized PSE controller device + * + * Managed pse_controller_register(). For PSE controllers registered by + * this function, pse_controller_unregister() is automatically called on + * driver detach. See pse_controller_register() for more information. + */ +int devm_pse_controller_register(struct device *dev, + struct pse_controller_dev *pcdev) +{ + struct pse_controller_dev **pcdevp; + int ret; + + pcdevp = devres_alloc(devm_pse_controller_release, sizeof(*pcdevp), + GFP_KERNEL); + if (!pcdevp) + return -ENOMEM; + + ret = pse_controller_register(pcdev); + if (ret) { + devres_free(pcdevp); + return ret; + } + + *pcdevp = pcdev; + devres_add(dev, pcdevp); + + return 0; +} +EXPORT_SYMBOL_GPL(devm_pse_controller_register); + +/* PSE control section */ + +static void __pse_control_release(struct kref *kref) +{ + struct pse_control *psec = container_of(kref, struct pse_control, + refcnt); + + lockdep_assert_held(&pse_list_mutex); + + module_put(psec->pcdev->owner); + + list_del(&psec->list); + kfree(psec); +} + +static void __pse_control_put_internal(struct pse_control *psec) +{ + lockdep_assert_held(&pse_list_mutex); + + kref_put(&psec->refcnt, __pse_control_release); +} + +/** + * pse_control_put - free the PSE control + * @psec: PSE control pointer + */ +void pse_control_put(struct pse_control *psec) +{ + if (IS_ERR_OR_NULL(psec)) + return; + + mutex_lock(&pse_list_mutex); + __pse_control_put_internal(psec); + mutex_unlock(&pse_list_mutex); +} +EXPORT_SYMBOL_GPL(pse_control_put); + +static struct pse_control * +pse_control_get_internal(struct pse_controller_dev *pcdev, unsigned int index) +{ + struct pse_control *psec; + + lockdep_assert_held(&pse_list_mutex); + + list_for_each_entry(psec, &pcdev->pse_control_head, list) { + if (psec->id == index) { + kref_get(&psec->refcnt); + return psec; + } + } + + psec = kzalloc(sizeof(*psec), GFP_KERNEL); + if (!psec) + return ERR_PTR(-ENOMEM); + + if (!try_module_get(pcdev->owner)) { + kfree(psec); + return ERR_PTR(-ENODEV); + } + + psec->pcdev = pcdev; + list_add(&psec->list, &pcdev->pse_control_head); + psec->id = index; + kref_init(&psec->refcnt); + + return psec; +} + +struct pse_control * +of_pse_control_get(struct device_node *node) +{ + struct pse_controller_dev *r, *pcdev; + struct of_phandle_args args; + struct pse_control *psec; + int psec_id; + int ret; + + if (!node) + return ERR_PTR(-EINVAL); + + ret = of_parse_phandle_with_args(node, "pses", "#pse-cells", 0, &args); + if (ret) + return ERR_PTR(ret); + + mutex_lock(&pse_list_mutex); + pcdev = NULL; + list_for_each_entry(r, &pse_controller_list, list) { + if (args.np == r->dev->of_node) { + pcdev = r; + break; + } + } + + if (!pcdev) { + psec = ERR_PTR(-EPROBE_DEFER); + goto out; + } + + if (WARN_ON(args.args_count != pcdev->of_pse_n_cells)) { + psec = ERR_PTR(-EINVAL); + goto out; + } + + psec_id = pcdev->of_xlate(pcdev, &args); + if (psec_id < 0) { + psec = ERR_PTR(psec_id); + goto out; + } + + /* pse_list_mutex also protects the pcdev's pse_control list */ + psec = pse_control_get_internal(pcdev, psec_id); + +out: + mutex_unlock(&pse_list_mutex); + of_node_put(args.np); + + return psec; +} +EXPORT_SYMBOL_GPL(of_pse_control_get); + +/** + * pse_ethtool_get_status - get status of PSE control + * @psec: PSE control pointer + * @extack: extack for reporting useful error messages + * @status: struct to store PSE status + */ +int pse_ethtool_get_status(struct pse_control *psec, + struct netlink_ext_ack *extack, + struct pse_control_status *status) +{ + const struct pse_controller_ops *ops; + int err; + + ops = psec->pcdev->ops; + + if (!ops->ethtool_get_status) { + NL_SET_ERR_MSG(extack, + "PSE driver does not support status report"); + return -EOPNOTSUPP; + } + + mutex_lock(&psec->pcdev->lock); + err = ops->ethtool_get_status(psec->pcdev, psec->id, extack, status); + mutex_unlock(&psec->pcdev->lock); + + return err; +} +EXPORT_SYMBOL_GPL(pse_ethtool_get_status); + +/** + * pse_ethtool_set_config - set PSE control configuration + * @psec: PSE control pointer + * @extack: extack for reporting useful error messages + * @config: Configuration of the test to run + */ +int pse_ethtool_set_config(struct pse_control *psec, + struct netlink_ext_ack *extack, + const struct pse_control_config *config) +{ + const struct pse_controller_ops *ops; + int err; + + ops = psec->pcdev->ops; + + if (!ops->ethtool_set_config) { + NL_SET_ERR_MSG(extack, + "PSE driver does not configuration"); + return -EOPNOTSUPP; + } + + mutex_lock(&psec->pcdev->lock); + err = ops->ethtool_set_config(psec->pcdev, psec->id, extack, config); + mutex_unlock(&psec->pcdev->lock); + + return err; +} +EXPORT_SYMBOL_GPL(pse_ethtool_set_config); diff --git a/drivers/net/pse-pd/pse_regulator.c b/drivers/net/pse-pd/pse_regulator.c new file mode 100644 index 000000000000..e2bf8306ca90 --- /dev/null +++ b/drivers/net/pse-pd/pse_regulator.c @@ -0,0 +1,147 @@ +// SPDX-License-Identifier: GPL-2.0-only +// +// Driver for the regulator based Ethernet Power Sourcing Equipment, without +// auto classification support. +// +// Copyright (c) 2022 Pengutronix, Oleksij Rempel <kernel@pengutronix.de> +// + +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/pse-pd/pse.h> +#include <linux/regulator/consumer.h> + +struct pse_reg_priv { + struct pse_controller_dev pcdev; + struct regulator *ps; /*power source */ + enum ethtool_podl_pse_admin_state admin_state; +}; + +static struct pse_reg_priv *to_pse_reg(struct pse_controller_dev *pcdev) +{ + return container_of(pcdev, struct pse_reg_priv, pcdev); +} + +static int +pse_reg_ethtool_set_config(struct pse_controller_dev *pcdev, unsigned long id, + struct netlink_ext_ack *extack, + const struct pse_control_config *config) +{ + struct pse_reg_priv *priv = to_pse_reg(pcdev); + int ret; + + if (priv->admin_state == config->admin_cotrol) + return 0; + + switch (config->admin_cotrol) { + case ETHTOOL_PODL_PSE_ADMIN_STATE_ENABLED: + ret = regulator_enable(priv->ps); + break; + case ETHTOOL_PODL_PSE_ADMIN_STATE_DISABLED: + ret = regulator_disable(priv->ps); + break; + default: + dev_err(pcdev->dev, "Unknown admin state %i\n", + config->admin_cotrol); + ret = -ENOTSUPP; + } + + if (ret) + return ret; + + priv->admin_state = config->admin_cotrol; + + return 0; +} + +static int +pse_reg_ethtool_get_status(struct pse_controller_dev *pcdev, unsigned long id, + struct netlink_ext_ack *extack, + struct pse_control_status *status) +{ + struct pse_reg_priv *priv = to_pse_reg(pcdev); + int ret; + + ret = regulator_is_enabled(priv->ps); + if (ret < 0) + return ret; + + if (!ret) + status->podl_pw_status = ETHTOOL_PODL_PSE_PW_D_STATUS_DISABLED; + else + status->podl_pw_status = + ETHTOOL_PODL_PSE_PW_D_STATUS_DELIVERING; + + status->podl_admin_state = priv->admin_state; + + return 0; +} + +static const struct pse_controller_ops pse_reg_ops = { + .ethtool_get_status = pse_reg_ethtool_get_status, + .ethtool_set_config = pse_reg_ethtool_set_config, +}; + +static int +pse_reg_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct pse_reg_priv *priv; + int ret; + + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + if (!pdev->dev.of_node) + return -ENOENT; + + priv->ps = devm_regulator_get_exclusive(dev, "pse"); + if (IS_ERR(priv->ps)) + return dev_err_probe(dev, PTR_ERR(priv->ps), + "failed to get PSE regulator.\n"); + + platform_set_drvdata(pdev, priv); + + ret = regulator_is_enabled(priv->ps); + if (ret < 0) + return ret; + + if (ret) + priv->admin_state = ETHTOOL_PODL_PSE_ADMIN_STATE_ENABLED; + else + priv->admin_state = ETHTOOL_PODL_PSE_ADMIN_STATE_DISABLED; + + priv->pcdev.owner = THIS_MODULE; + priv->pcdev.ops = &pse_reg_ops; + priv->pcdev.dev = dev; + ret = devm_pse_controller_register(dev, &priv->pcdev); + if (ret) { + dev_err(dev, "failed to register PSE controller (%pe)\n", + ERR_PTR(ret)); + return ret; + } + + return 0; +} + +static const __maybe_unused struct of_device_id pse_reg_of_match[] = { + { .compatible = "podl-pse-regulator", }, + { }, +}; +MODULE_DEVICE_TABLE(of, pse_reg_of_match); + +static struct platform_driver pse_reg_driver = { + .probe = pse_reg_probe, + .driver = { + .name = "PSE regulator", + .of_match_table = of_match_ptr(pse_reg_of_match), + }, +}; +module_platform_driver(pse_reg_driver); + +MODULE_AUTHOR("Oleksij Rempel <kernel@pengutronix.de>"); +MODULE_DESCRIPTION("regulator based Ethernet Power Sourcing Equipment"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:pse-regulator"); diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index a51d8ded60f3..a481a1d831e2 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -1875,7 +1875,9 @@ static void intr_callback(struct urb *urb) "Stop submitting intr, status %d\n", status); return; case -EOVERFLOW: - netif_info(tp, intr, tp->netdev, "intr status -EOVERFLOW\n"); + if (net_ratelimit()) + netif_info(tp, intr, tp->netdev, + "intr status -EOVERFLOW\n"); goto resubmit; /* -EPIPE: should clear the halt */ default: diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c index e67cc7909bce..6c054850363f 100644 --- a/drivers/net/wireless/mediatek/mt76/tx.c +++ b/drivers/net/wireless/mediatek/mt76/tx.c @@ -60,14 +60,20 @@ mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list) .skb = skb, .info = IEEE80211_SKB_CB(skb), }; + struct ieee80211_rate_status rs = {}; struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb); struct mt76_wcid *wcid; wcid = rcu_dereference(dev->wcid[cb->wcid]); if (wcid) { status.sta = wcid_to_sta(wcid); - status.rates = NULL; - status.n_rates = 0; + if (status.sta && (wcid->rate.flags || wcid->rate.legacy)) { + rs.rate_idx = wcid->rate; + status.rates = &rs; + status.n_rates = 1; + } else { + status.n_rates = 0; + } } hw = mt76_tx_status_get_hw(dev, skb); diff --git a/drivers/net/wwan/iosm/iosm_ipc_wwan.c b/drivers/net/wwan/iosm/iosm_ipc_wwan.c index 03757ad21d51..2f1f8b5d5b59 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_wwan.c +++ b/drivers/net/wwan/iosm/iosm_ipc_wwan.c @@ -323,15 +323,16 @@ struct iosm_wwan *ipc_wwan_init(struct iosm_imem *ipc_imem, struct device *dev) ipc_wwan->dev = dev; ipc_wwan->ipc_imem = ipc_imem; + mutex_init(&ipc_wwan->if_mutex); + /* WWAN core will create a netdev for the default IP MUX channel */ if (wwan_register_ops(ipc_wwan->dev, &iosm_wwan_ops, ipc_wwan, IP_MUX_SESSION_DEFAULT)) { + mutex_destroy(&ipc_wwan->if_mutex); kfree(ipc_wwan); return NULL; } - mutex_init(&ipc_wwan->if_mutex); - return ipc_wwan; } diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 66446f1e06cf..8d5a7ae19844 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -2162,14 +2162,14 @@ static int nvme_pr_preempt(struct block_device *bdev, u64 old, u64 new, static int nvme_pr_clear(struct block_device *bdev, u64 key) { - u32 cdw10 = 1 | (key ? 1 << 3 : 0); + u32 cdw10 = 1 | (key ? 0 : 1 << 3); - return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_register); + return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release); } static int nvme_pr_release(struct block_device *bdev, u64 key, enum pr_type type) { - u32 cdw10 = nvme_pr_type(type) << 8 | (key ? 1 << 3 : 0); + u32 cdw10 = nvme_pr_type(type) << 8 | (key ? 0 : 1 << 3); return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release); } diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 98864b853eef..67d3335e9cc8 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -3470,6 +3470,10 @@ static const struct pci_device_id nvme_id_table[] = { { PCI_DEVICE(0x1987, 0x5016), /* Phison E16 */ .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN | NVME_QUIRK_BOGUS_NID, }, + { PCI_DEVICE(0x1987, 0x5019), /* phison E19 */ + .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, + { PCI_DEVICE(0x1987, 0x5021), /* Phison E21 */ + .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, { PCI_DEVICE(0x1b4b, 0x1092), /* Lexar 256 GB SSD */ .driver_data = NVME_QUIRK_NO_NS_DESC_LIST | NVME_QUIRK_IGNORE_DEV_SUBNQN, }, diff --git a/drivers/perf/arm_dsu_pmu.c b/drivers/perf/arm_dsu_pmu.c index a36698a90d2f..4a15c86f45ef 100644 --- a/drivers/perf/arm_dsu_pmu.c +++ b/drivers/perf/arm_dsu_pmu.c @@ -639,6 +639,7 @@ static int dsu_pmu_dt_get_cpus(struct device *dev, cpumask_t *mask) static int dsu_pmu_acpi_get_cpus(struct device *dev, cpumask_t *mask) { #ifdef CONFIG_ACPI + struct acpi_device *parent_adev = acpi_dev_parent(ACPI_COMPANION(dev)); int cpu; /* @@ -653,8 +654,7 @@ static int dsu_pmu_acpi_get_cpus(struct device *dev, cpumask_t *mask) continue; acpi_dev = ACPI_COMPANION(cpu_dev); - if (acpi_dev && - acpi_dev->parent == ACPI_COMPANION(dev)->parent) + if (acpi_dev && acpi_dev_parent(acpi_dev) == parent_adev) cpumask_set_cpu(cpu, mask); } #endif diff --git a/drivers/perf/qcom_l2_pmu.c b/drivers/perf/qcom_l2_pmu.c index 30234c261b05..aaca6db7d8f6 100644 --- a/drivers/perf/qcom_l2_pmu.c +++ b/drivers/perf/qcom_l2_pmu.c @@ -840,16 +840,16 @@ static int l2_cache_pmu_probe_cluster(struct device *dev, void *data) { struct platform_device *pdev = to_platform_device(dev->parent); struct platform_device *sdev = to_platform_device(dev); - struct acpi_device *adev = ACPI_COMPANION(dev); struct l2cache_pmu *l2cache_pmu = data; struct cluster_pmu *cluster; - unsigned long fw_cluster_id; + u64 fw_cluster_id; int err; int irq; - if (!adev || kstrtoul(adev->pnp.unique_id, 10, &fw_cluster_id) < 0) { + err = acpi_dev_uid_to_integer(ACPI_COMPANION(dev), &fw_cluster_id); + if (err) { dev_err(&pdev->dev, "unable to read ACPI uid\n"); - return -ENODEV; + return err; } cluster = devm_kzalloc(&pdev->dev, sizeof(*cluster), GFP_KERNEL); @@ -879,7 +879,7 @@ static int l2_cache_pmu_probe_cluster(struct device *dev, void *data) } dev_info(&pdev->dev, - "Registered L2 cache PMU cluster %ld\n", fw_cluster_id); + "Registered L2 cache PMU cluster %lld\n", fw_cluster_id); spin_lock_init(&cluster->pmu_lock); diff --git a/drivers/perf/qcom_l3_pmu.c b/drivers/perf/qcom_l3_pmu.c index 1ff2ff6582bf..346311a05460 100644 --- a/drivers/perf/qcom_l3_pmu.c +++ b/drivers/perf/qcom_l3_pmu.c @@ -742,7 +742,8 @@ static int qcom_l3_cache_pmu_probe(struct platform_device *pdev) l3pmu = devm_kzalloc(&pdev->dev, sizeof(*l3pmu), GFP_KERNEL); name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "l3cache_%s_%s", - acpi_dev->parent->pnp.unique_id, acpi_dev->pnp.unique_id); + acpi_dev_parent(acpi_dev)->pnp.unique_id, + acpi_dev->pnp.unique_id); if (!l3pmu || !name) return -ENOMEM; diff --git a/drivers/platform/x86/intel/int3472/common.c b/drivers/platform/x86/intel/int3472/common.c index 77cf058e4168..9db2bb0bbba4 100644 --- a/drivers/platform/x86/intel/int3472/common.c +++ b/drivers/platform/x86/intel/int3472/common.c @@ -62,7 +62,7 @@ int skl_int3472_get_sensor_adev_and_name(struct device *dev, struct acpi_device *sensor; int ret = 0; - sensor = acpi_dev_get_first_consumer_dev(adev); + sensor = acpi_dev_get_next_consumer_dev(adev, NULL); if (!sensor) { dev_err(dev, "INT3472 seems to have no dependents.\n"); return -ENODEV; diff --git a/drivers/platform/x86/intel/int3472/tps68470.c b/drivers/platform/x86/intel/int3472/tps68470.c index 22f61b47f9e5..49fc379fe680 100644 --- a/drivers/platform/x86/intel/int3472/tps68470.c +++ b/drivers/platform/x86/intel/int3472/tps68470.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* Author: Dan Scally <djrscally@gmail.com> */ +#include <linux/acpi.h> #include <linux/i2c.h> #include <linux/kernel.h> #include <linux/mfd/core.h> @@ -95,20 +96,65 @@ static int skl_int3472_tps68470_calc_type(struct acpi_device *adev) return DESIGNED_FOR_WINDOWS; } +/* + * Return the size of the flexible array member, because we'll need that later + * on to pass .pdata_size to cells. + */ +static int +skl_int3472_fill_clk_pdata(struct device *dev, struct tps68470_clk_platform_data **clk_pdata) +{ + struct acpi_device *adev = ACPI_COMPANION(dev); + struct acpi_device *consumer; + unsigned int n_consumers = 0; + const char *sensor_name; + unsigned int i = 0; + + for_each_acpi_consumer_dev(adev, consumer) + n_consumers++; + + if (!n_consumers) { + dev_err(dev, "INT3472 seems to have no dependents\n"); + return -ENODEV; + } + + *clk_pdata = devm_kzalloc(dev, struct_size(*clk_pdata, consumers, n_consumers), + GFP_KERNEL); + if (!*clk_pdata) + return -ENOMEM; + + (*clk_pdata)->n_consumers = n_consumers; + i = 0; + + for_each_acpi_consumer_dev(adev, consumer) { + sensor_name = devm_kasprintf(dev, GFP_KERNEL, I2C_DEV_NAME_FORMAT, + acpi_dev_name(consumer)); + if (!sensor_name) + return -ENOMEM; + + (*clk_pdata)->consumers[i].consumer_dev_name = sensor_name; + i++; + } + + acpi_dev_put(consumer); + + return n_consumers; +} + static int skl_int3472_tps68470_probe(struct i2c_client *client) { struct acpi_device *adev = ACPI_COMPANION(&client->dev); const struct int3472_tps68470_board_data *board_data; - struct tps68470_clk_platform_data clk_pdata = {}; + struct tps68470_clk_platform_data *clk_pdata; struct mfd_cell *cells; struct regmap *regmap; + int n_consumers; int device_type; int ret; + int i; - ret = skl_int3472_get_sensor_adev_and_name(&client->dev, NULL, - &clk_pdata.consumer_dev_name); - if (ret) - return ret; + n_consumers = skl_int3472_fill_clk_pdata(&client->dev, &clk_pdata); + if (n_consumers < 0) + return n_consumers; regmap = devm_regmap_init_i2c(client, &tps68470_regmap_config); if (IS_ERR(regmap)) { @@ -142,22 +188,25 @@ static int skl_int3472_tps68470_probe(struct i2c_client *client) * the clk + regulators must be ready when this happens. */ cells[0].name = "tps68470-clk"; - cells[0].platform_data = &clk_pdata; - cells[0].pdata_size = sizeof(clk_pdata); + cells[0].platform_data = clk_pdata; + cells[0].pdata_size = struct_size(clk_pdata, consumers, n_consumers); cells[1].name = "tps68470-regulator"; cells[1].platform_data = (void *)board_data->tps68470_regulator_pdata; cells[1].pdata_size = sizeof(struct tps68470_regulator_platform_data); cells[2].name = "tps68470-gpio"; - gpiod_add_lookup_table(board_data->tps68470_gpio_lookup_table); + for (i = 0; i < board_data->n_gpiod_lookups; i++) + gpiod_add_lookup_table(board_data->tps68470_gpio_lookup_tables[i]); ret = devm_mfd_add_devices(&client->dev, PLATFORM_DEVID_NONE, cells, TPS68470_WIN_MFD_CELL_COUNT, NULL, 0, NULL); kfree(cells); - if (ret) - gpiod_remove_lookup_table(board_data->tps68470_gpio_lookup_table); + if (ret) { + for (i = 0; i < board_data->n_gpiod_lookups; i++) + gpiod_remove_lookup_table(board_data->tps68470_gpio_lookup_tables[i]); + } break; case DESIGNED_FOR_CHROMEOS: @@ -181,10 +230,13 @@ static int skl_int3472_tps68470_probe(struct i2c_client *client) static int skl_int3472_tps68470_remove(struct i2c_client *client) { const struct int3472_tps68470_board_data *board_data; + int i; board_data = int3472_tps68470_get_board_data(dev_name(&client->dev)); - if (board_data) - gpiod_remove_lookup_table(board_data->tps68470_gpio_lookup_table); + if (board_data) { + for (i = 0; i < board_data->n_gpiod_lookups; i++) + gpiod_remove_lookup_table(board_data->tps68470_gpio_lookup_tables[i]); + } return 0; } diff --git a/drivers/platform/x86/intel/int3472/tps68470.h b/drivers/platform/x86/intel/int3472/tps68470.h index cfd33eb62740..35915e701593 100644 --- a/drivers/platform/x86/intel/int3472/tps68470.h +++ b/drivers/platform/x86/intel/int3472/tps68470.h @@ -16,8 +16,9 @@ struct tps68470_regulator_platform_data; struct int3472_tps68470_board_data { const char *dev_name; - struct gpiod_lookup_table *tps68470_gpio_lookup_table; const struct tps68470_regulator_platform_data *tps68470_regulator_pdata; + unsigned int n_gpiod_lookups; + struct gpiod_lookup_table *tps68470_gpio_lookup_tables[]; }; const struct int3472_tps68470_board_data *int3472_tps68470_get_board_data(const char *dev_name); diff --git a/drivers/platform/x86/intel/int3472/tps68470_board_data.c b/drivers/platform/x86/intel/int3472/tps68470_board_data.c index 525f09a3b5ff..309eab9c0558 100644 --- a/drivers/platform/x86/intel/int3472/tps68470_board_data.c +++ b/drivers/platform/x86/intel/int3472/tps68470_board_data.c @@ -30,6 +30,15 @@ static struct regulator_consumer_supply int347a_vcm_consumer_supplies[] = { static struct regulator_consumer_supply int347a_vsio_consumer_supplies[] = { REGULATOR_SUPPLY("dovdd", "i2c-INT347A:00"), REGULATOR_SUPPLY("vsio", "i2c-INT347A:00-VCM"), + REGULATOR_SUPPLY("vddd", "i2c-INT347E:00"), +}; + +static struct regulator_consumer_supply int347a_aux1_consumer_supplies[] = { + REGULATOR_SUPPLY("vdda", "i2c-INT347E:00"), +}; + +static struct regulator_consumer_supply int347a_aux2_consumer_supplies[] = { + REGULATOR_SUPPLY("vdddo", "i2c-INT347E:00"), }; static const struct regulator_init_data surface_go_tps68470_core_reg_init_data = { @@ -86,6 +95,28 @@ static const struct regulator_init_data surface_go_tps68470_vsio_reg_init_data = .consumer_supplies = int347a_vsio_consumer_supplies, }; +static const struct regulator_init_data surface_go_tps68470_aux1_reg_init_data = { + .constraints = { + .min_uV = 2815200, + .max_uV = 2815200, + .apply_uV = 1, + .valid_ops_mask = REGULATOR_CHANGE_STATUS, + }, + .num_consumer_supplies = ARRAY_SIZE(int347a_aux1_consumer_supplies), + .consumer_supplies = int347a_aux1_consumer_supplies, +}; + +static const struct regulator_init_data surface_go_tps68470_aux2_reg_init_data = { + .constraints = { + .min_uV = 1800600, + .max_uV = 1800600, + .apply_uV = 1, + .valid_ops_mask = REGULATOR_CHANGE_STATUS, + }, + .num_consumer_supplies = ARRAY_SIZE(int347a_aux2_consumer_supplies), + .consumer_supplies = int347a_aux2_consumer_supplies, +}; + static const struct tps68470_regulator_platform_data surface_go_tps68470_pdata = { .reg_init_data = { [TPS68470_CORE] = &surface_go_tps68470_core_reg_init_data, @@ -93,10 +124,12 @@ static const struct tps68470_regulator_platform_data surface_go_tps68470_pdata = [TPS68470_VCM] = &surface_go_tps68470_vcm_reg_init_data, [TPS68470_VIO] = &surface_go_tps68470_vio_reg_init_data, [TPS68470_VSIO] = &surface_go_tps68470_vsio_reg_init_data, + [TPS68470_AUX1] = &surface_go_tps68470_aux1_reg_init_data, + [TPS68470_AUX2] = &surface_go_tps68470_aux2_reg_init_data, }, }; -static struct gpiod_lookup_table surface_go_tps68470_gpios = { +static struct gpiod_lookup_table surface_go_int347a_gpios = { .dev_id = "i2c-INT347A:00", .table = { GPIO_LOOKUP("tps68470-gpio", 9, "reset", GPIO_ACTIVE_LOW), @@ -105,16 +138,31 @@ static struct gpiod_lookup_table surface_go_tps68470_gpios = { } }; +static struct gpiod_lookup_table surface_go_int347e_gpios = { + .dev_id = "i2c-INT347E:00", + .table = { + GPIO_LOOKUP("tps68470-gpio", 5, "enable", GPIO_ACTIVE_HIGH), + { } + } +}; + static const struct int3472_tps68470_board_data surface_go_tps68470_board_data = { .dev_name = "i2c-INT3472:05", - .tps68470_gpio_lookup_table = &surface_go_tps68470_gpios, .tps68470_regulator_pdata = &surface_go_tps68470_pdata, + .n_gpiod_lookups = 2, + .tps68470_gpio_lookup_tables = { + &surface_go_int347a_gpios, + &surface_go_int347e_gpios, + }, }; static const struct int3472_tps68470_board_data surface_go3_tps68470_board_data = { .dev_name = "i2c-INT3472:01", - .tps68470_gpio_lookup_table = &surface_go_tps68470_gpios, .tps68470_regulator_pdata = &surface_go_tps68470_pdata, + .n_gpiod_lookups = 1, + .tps68470_gpio_lookup_tables = { + &surface_go_int347a_gpios + }, }; static const struct dmi_system_id int3472_tps68470_board_data_table[] = { diff --git a/drivers/pnp/pnpbios/pnpbios.h b/drivers/pnp/pnpbios/pnpbios.h index 2ce739ff9c1a..f3302006842e 100644 --- a/drivers/pnp/pnpbios/pnpbios.h +++ b/drivers/pnp/pnpbios/pnpbios.h @@ -153,7 +153,6 @@ extern int pnpbios_dont_use_current_config; extern int pnpbios_parse_data_stream(struct pnp_dev *dev, struct pnp_bios_node * node); extern int pnpbios_read_resources_from_node(struct pnp_dev *dev, struct pnp_bios_node *node); extern int pnpbios_write_resources_to_node(struct pnp_dev *dev, struct pnp_bios_node *node); -extern void pnpid32_to_pnpid(u32 id, char *str); extern void pnpbios_print_status(const char * module, u16 status); extern void pnpbios_calls_init(union pnp_bios_install_struct * header); diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c index 21d624f9f5fb..26d00b1853b4 100644 --- a/drivers/powercap/intel_rapl_common.c +++ b/drivers/powercap/intel_rapl_common.c @@ -994,6 +994,9 @@ static u64 rapl_compute_time_window_core(struct rapl_package *rp, u64 value, y = value & 0x1f; value = (1 << y) * (4 + f) * rp->time_unit / 4; } else { + if (value < rp->time_unit) + return 0; + do_div(value, rp->time_unit); y = ilog2(value); f = div64_u64(4 * (value - (1 << y)), 1 << y); @@ -1035,7 +1038,6 @@ static const struct rapl_defaults rapl_defaults_spr_server = { .check_unit = rapl_check_unit_core, .set_floor_freq = set_floor_freq_default, .compute_time_window = rapl_compute_time_window_core, - .dram_domain_energy_unit = 15300, .psys_domain_energy_unit = 1000000000, .spr_psys_bits = true, }; @@ -1110,6 +1112,7 @@ static const struct x86_cpu_id rapl_ids[] __initconst = { X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_N, &rapl_defaults_core), X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, &rapl_defaults_core), X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, &rapl_defaults_core), + X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S, &rapl_defaults_core), X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &rapl_defaults_spr_server), X86_MATCH_INTEL_FAM6_MODEL(LAKEFIELD, &rapl_defaults_core), diff --git a/drivers/ras/cec.c b/drivers/ras/cec.c index 42f2fc0bc8a9..321af498ee11 100644 --- a/drivers/ras/cec.c +++ b/drivers/ras/cec.c @@ -556,6 +556,14 @@ static int __init cec_init(void) if (ce_arr.disabled) return -ENODEV; + /* + * Intel systems may avoid uncorrectable errors + * if pages with corrected errors are aggressively + * taken offline. + */ + if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) + action_threshold = 2; + ce_arr.array = (void *)get_zeroed_page(GFP_KERNEL); if (!ce_arr.array) { pr_err("Error allocating CE array page!\n"); diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c index fdcb0f508984..596cc36aaff6 100644 --- a/drivers/regulator/max8973-regulator.c +++ b/drivers/regulator/max8973-regulator.c @@ -434,9 +434,9 @@ static int max8973_init_dcdc(struct max8973_chip *max, return ret; } -static int max8973_thermal_read_temp(void *data, int *temp) +static int max8973_thermal_read_temp(struct thermal_zone_device *tz, int *temp) { - struct max8973_chip *mchip = data; + struct max8973_chip *mchip = tz->devdata; unsigned int val; int ret; @@ -465,7 +465,7 @@ static irqreturn_t max8973_thermal_irq(int irq, void *data) return IRQ_HANDLED; } -static const struct thermal_zone_of_device_ops max77621_tz_ops = { +static const struct thermal_zone_device_ops max77621_tz_ops = { .get_temp = max8973_thermal_read_temp, }; @@ -479,8 +479,8 @@ static int max8973_thermal_init(struct max8973_chip *mchip) if (mchip->id != MAX77621) return 0; - tzd = devm_thermal_zone_of_sensor_register(mchip->dev, 0, mchip, - &max77621_tz_ops); + tzd = devm_thermal_of_zone_register(mchip->dev, 0, mchip, + &max77621_tz_ops); if (IS_ERR(tzd)) { ret = PTR_ERR(tzd); dev_err(mchip->dev, "Failed to register thermal sensor: %d\n", diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c index 838d12e65144..c8e079d7e541 100644 --- a/drivers/spi/spi-pxa2xx.c +++ b/drivers/spi/spi-pxa2xx.c @@ -1441,31 +1441,6 @@ static const struct of_device_id pxa2xx_spi_of_match[] = { }; MODULE_DEVICE_TABLE(of, pxa2xx_spi_of_match); -#ifdef CONFIG_ACPI - -static int pxa2xx_spi_get_port_id(struct device *dev) -{ - struct acpi_device *adev; - unsigned int devid; - int port_id = -1; - - adev = ACPI_COMPANION(dev); - if (adev && adev->pnp.unique_id && - !kstrtouint(adev->pnp.unique_id, 0, &devid)) - port_id = devid; - return port_id; -} - -#else /* !CONFIG_ACPI */ - -static int pxa2xx_spi_get_port_id(struct device *dev) -{ - return -1; -} - -#endif /* CONFIG_ACPI */ - - #ifdef CONFIG_PCI static bool pxa2xx_spi_idma_filter(struct dma_chan *chan, void *param) @@ -1479,13 +1454,16 @@ static struct pxa2xx_spi_controller * pxa2xx_spi_init_pdata(struct platform_device *pdev) { struct pxa2xx_spi_controller *pdata; + struct device *dev = &pdev->dev; + struct device *parent = dev->parent; struct ssp_device *ssp; struct resource *res; - struct device *parent = pdev->dev.parent; struct pci_dev *pcidev = dev_is_pci(parent) ? to_pci_dev(parent) : NULL; const struct pci_device_id *pcidev_id = NULL; enum pxa_ssp_type type; const void *match; + int status; + u64 uid; if (pcidev) pcidev_id = pci_match_id(pxa2xx_spi_pci_compound_match, pcidev); @@ -1529,7 +1507,12 @@ pxa2xx_spi_init_pdata(struct platform_device *pdev) ssp->type = type; ssp->dev = &pdev->dev; - ssp->port_id = pxa2xx_spi_get_port_id(&pdev->dev); + + status = acpi_dev_uid_to_integer(ACPI_COMPANION(dev), &uid); + if (status) + ssp->port_id = -1; + else + ssp->port_id = uid; pdata->is_slave = device_property_read_bool(&pdev->dev, "spi-slave"); pdata->num_chipselect = 1; diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 32c01e684af3..30e82f968639 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -4374,7 +4374,7 @@ static int acpi_spi_notify(struct notifier_block *nb, unsigned long value, switch (value) { case ACPI_RECONFIG_DEVICE_ADD: - ctlr = acpi_spi_find_controller_by_adev(adev->parent); + ctlr = acpi_spi_find_controller_by_adev(acpi_dev_parent(adev)); if (!ctlr) break; diff --git a/drivers/staging/media/rkvdec/rkvdec-h264.c b/drivers/staging/media/rkvdec/rkvdec-h264.c index 4af5a831bde0..4fc167b42cf0 100644 --- a/drivers/staging/media/rkvdec/rkvdec-h264.c +++ b/drivers/staging/media/rkvdec/rkvdec-h264.c @@ -1162,8 +1162,8 @@ static int rkvdec_h264_run(struct rkvdec_ctx *ctx) schedule_delayed_work(&rkvdec->watchdog_work, msecs_to_jiffies(2000)); - writel(0xffffffff, rkvdec->regs + RKVDEC_REG_STRMD_ERR_EN); - writel(0xffffffff, rkvdec->regs + RKVDEC_REG_H264_ERR_E); + writel(0, rkvdec->regs + RKVDEC_REG_STRMD_ERR_EN); + writel(0, rkvdec->regs + RKVDEC_REG_H264_ERR_E); writel(1, rkvdec->regs + RKVDEC_REG_PREF_LUMA_CACHE_COMMAND); writel(1, rkvdec->regs + RKVDEC_REG_PREF_CHR_CACHE_COMMAND); diff --git a/drivers/thermal/amlogic_thermal.c b/drivers/thermal/amlogic_thermal.c index e61b91d14ad1..d30cb791e63c 100644 --- a/drivers/thermal/amlogic_thermal.c +++ b/drivers/thermal/amlogic_thermal.c @@ -179,12 +179,12 @@ static int amlogic_thermal_disable(struct amlogic_thermal *data) return 0; } -static int amlogic_thermal_get_temp(void *data, int *temp) +static int amlogic_thermal_get_temp(struct thermal_zone_device *tz, int *temp) { unsigned int tval; - struct amlogic_thermal *pdata = data; + struct amlogic_thermal *pdata = tz->devdata; - if (!data) + if (!pdata) return -EINVAL; regmap_read(pdata->regmap, TSENSOR_STAT0, &tval); @@ -195,7 +195,7 @@ static int amlogic_thermal_get_temp(void *data, int *temp) return 0; } -static const struct thermal_zone_of_device_ops amlogic_thermal_ops = { +static const struct thermal_zone_device_ops amlogic_thermal_ops = { .get_temp = amlogic_thermal_get_temp, }; @@ -276,10 +276,10 @@ static int amlogic_thermal_probe(struct platform_device *pdev) return PTR_ERR(pdata->sec_ao_map); } - pdata->tzd = devm_thermal_zone_of_sensor_register(&pdev->dev, - 0, - pdata, - &amlogic_thermal_ops); + pdata->tzd = devm_thermal_of_zone_register(&pdev->dev, + 0, + pdata, + &amlogic_thermal_ops); if (IS_ERR(pdata->tzd)) { ret = PTR_ERR(pdata->tzd); dev_err(dev, "Failed to register tsensor: %d\n", ret); diff --git a/drivers/thermal/armada_thermal.c b/drivers/thermal/armada_thermal.c index c2ebfb5be4b3..52d63b3997fe 100644 --- a/drivers/thermal/armada_thermal.c +++ b/drivers/thermal/armada_thermal.c @@ -420,9 +420,9 @@ static struct thermal_zone_device_ops legacy_ops = { .get_temp = armada_get_temp_legacy, }; -static int armada_get_temp(void *_sensor, int *temp) +static int armada_get_temp(struct thermal_zone_device *tz, int *temp) { - struct armada_thermal_sensor *sensor = _sensor; + struct armada_thermal_sensor *sensor = tz->devdata; struct armada_thermal_priv *priv = sensor->priv; int ret; @@ -450,7 +450,7 @@ unlock_mutex: return ret; } -static const struct thermal_zone_of_device_ops of_ops = { +static const struct thermal_zone_device_ops of_ops = { .get_temp = armada_get_temp, }; @@ -928,9 +928,9 @@ static int armada_thermal_probe(struct platform_device *pdev) /* Register the sensor */ sensor->priv = priv; sensor->id = sensor_id; - tz = devm_thermal_zone_of_sensor_register(&pdev->dev, - sensor->id, sensor, - &of_ops); + tz = devm_thermal_of_zone_register(&pdev->dev, + sensor->id, sensor, + &of_ops); if (IS_ERR(tz)) { dev_info(&pdev->dev, "Thermal sensor %d unavailable\n", sensor_id); diff --git a/drivers/thermal/broadcom/bcm2711_thermal.c b/drivers/thermal/broadcom/bcm2711_thermal.c index e9bef5c3414b..1f8651d15160 100644 --- a/drivers/thermal/broadcom/bcm2711_thermal.c +++ b/drivers/thermal/broadcom/bcm2711_thermal.c @@ -31,11 +31,11 @@ struct bcm2711_thermal_priv { struct thermal_zone_device *thermal; }; -static int bcm2711_get_temp(void *data, int *temp) +static int bcm2711_get_temp(struct thermal_zone_device *tz, int *temp) { - struct bcm2711_thermal_priv *priv = data; - int slope = thermal_zone_get_slope(priv->thermal); - int offset = thermal_zone_get_offset(priv->thermal); + struct bcm2711_thermal_priv *priv = tz->devdata; + int slope = thermal_zone_get_slope(tz); + int offset = thermal_zone_get_offset(tz); u32 val; int ret; @@ -54,7 +54,7 @@ static int bcm2711_get_temp(void *data, int *temp) return 0; } -static const struct thermal_zone_of_device_ops bcm2711_thermal_of_ops = { +static const struct thermal_zone_device_ops bcm2711_thermal_of_ops = { .get_temp = bcm2711_get_temp, }; @@ -88,8 +88,8 @@ static int bcm2711_thermal_probe(struct platform_device *pdev) } priv->regmap = regmap; - thermal = devm_thermal_zone_of_sensor_register(dev, 0, priv, - &bcm2711_thermal_of_ops); + thermal = devm_thermal_of_zone_register(dev, 0, priv, + &bcm2711_thermal_of_ops); if (IS_ERR(thermal)) { ret = PTR_ERR(thermal); dev_err(dev, "could not register sensor: %d\n", ret); diff --git a/drivers/thermal/broadcom/bcm2835_thermal.c b/drivers/thermal/broadcom/bcm2835_thermal.c index c8e4344d5a3d..2c67841a1115 100644 --- a/drivers/thermal/broadcom/bcm2835_thermal.c +++ b/drivers/thermal/broadcom/bcm2835_thermal.c @@ -88,9 +88,9 @@ static int bcm2835_thermal_temp2adc(int temp, int offset, int slope) return temp; } -static int bcm2835_thermal_get_temp(void *d, int *temp) +static int bcm2835_thermal_get_temp(struct thermal_zone_device *tz, int *temp) { - struct bcm2835_thermal_data *data = d; + struct bcm2835_thermal_data *data = tz->devdata; u32 val = readl(data->regs + BCM2835_TS_TSENSSTAT); if (!(val & BCM2835_TS_TSENSSTAT_VALID)) @@ -135,7 +135,7 @@ static void bcm2835_thermal_debugfs(struct platform_device *pdev) debugfs_create_regset32("regset", 0444, data->debugfsdir, regset); } -static const struct thermal_zone_of_device_ops bcm2835_thermal_ops = { +static const struct thermal_zone_device_ops bcm2835_thermal_ops = { .get_temp = bcm2835_thermal_get_temp, }; @@ -206,8 +206,8 @@ static int bcm2835_thermal_probe(struct platform_device *pdev) data->clk, rate); /* register of thermal sensor and get info from DT */ - tz = thermal_zone_of_sensor_register(&pdev->dev, 0, data, - &bcm2835_thermal_ops); + tz = devm_thermal_of_zone_register(&pdev->dev, 0, data, + &bcm2835_thermal_ops); if (IS_ERR(tz)) { err = PTR_ERR(tz); dev_err(&pdev->dev, @@ -277,7 +277,7 @@ static int bcm2835_thermal_probe(struct platform_device *pdev) return 0; err_tz: - thermal_zone_of_sensor_unregister(&pdev->dev, tz); + thermal_of_zone_unregister(tz); err_clk: clk_disable_unprepare(data->clk); @@ -290,7 +290,7 @@ static int bcm2835_thermal_remove(struct platform_device *pdev) struct thermal_zone_device *tz = data->tz; debugfs_remove_recursive(data->debugfsdir); - thermal_zone_of_sensor_unregister(&pdev->dev, tz); + thermal_of_zone_unregister(tz); clk_disable_unprepare(data->clk); return 0; diff --git a/drivers/thermal/broadcom/brcmstb_thermal.c b/drivers/thermal/broadcom/brcmstb_thermal.c index 0cedb8b4f00a..c79c6cfdd74d 100644 --- a/drivers/thermal/broadcom/brcmstb_thermal.c +++ b/drivers/thermal/broadcom/brcmstb_thermal.c @@ -105,7 +105,7 @@ static struct avs_tmon_trip avs_tmon_trips[] = { struct brcmstb_thermal_params { unsigned int offset; unsigned int mult; - const struct thermal_zone_of_device_ops *of_ops; + const struct thermal_zone_device_ops *of_ops; }; struct brcmstb_thermal_priv { @@ -150,9 +150,9 @@ static inline u32 avs_tmon_temp_to_code(struct brcmstb_thermal_priv *priv, return (u32)((offset - temp) / mult); } -static int brcmstb_get_temp(void *data, int *temp) +static int brcmstb_get_temp(struct thermal_zone_device *tz, int *temp) { - struct brcmstb_thermal_priv *priv = data; + struct brcmstb_thermal_priv *priv = tz->devdata; u32 val; long t; @@ -260,9 +260,9 @@ static irqreturn_t brcmstb_tmon_irq_thread(int irq, void *data) return IRQ_HANDLED; } -static int brcmstb_set_trips(void *data, int low, int high) +static int brcmstb_set_trips(struct thermal_zone_device *tz, int low, int high) { - struct brcmstb_thermal_priv *priv = data; + struct brcmstb_thermal_priv *priv = tz->devdata; dev_dbg(priv->dev, "set trips %d <--> %d\n", low, high); @@ -288,7 +288,7 @@ static int brcmstb_set_trips(void *data, int low, int high) return 0; } -static const struct thermal_zone_of_device_ops brcmstb_16nm_of_ops = { +static const struct thermal_zone_device_ops brcmstb_16nm_of_ops = { .get_temp = brcmstb_get_temp, }; @@ -298,7 +298,7 @@ static const struct brcmstb_thermal_params brcmstb_16nm_params = { .of_ops = &brcmstb_16nm_of_ops, }; -static const struct thermal_zone_of_device_ops brcmstb_28nm_of_ops = { +static const struct thermal_zone_device_ops brcmstb_28nm_of_ops = { .get_temp = brcmstb_get_temp, .set_trips = brcmstb_set_trips, }; @@ -318,7 +318,7 @@ MODULE_DEVICE_TABLE(of, brcmstb_thermal_id_table); static int brcmstb_thermal_probe(struct platform_device *pdev) { - const struct thermal_zone_of_device_ops *of_ops; + const struct thermal_zone_device_ops *of_ops; struct thermal_zone_device *thermal; struct brcmstb_thermal_priv *priv; struct resource *res; @@ -341,8 +341,8 @@ static int brcmstb_thermal_probe(struct platform_device *pdev) platform_set_drvdata(pdev, priv); of_ops = priv->temp_params->of_ops; - thermal = devm_thermal_zone_of_sensor_register(&pdev->dev, 0, priv, - of_ops); + thermal = devm_thermal_of_zone_register(&pdev->dev, 0, priv, + of_ops); if (IS_ERR(thermal)) { ret = PTR_ERR(thermal); dev_err(&pdev->dev, "could not register sensor: %d\n", ret); diff --git a/drivers/thermal/broadcom/ns-thermal.c b/drivers/thermal/broadcom/ns-thermal.c index c9468ba9d449..07a8a3f49bd0 100644 --- a/drivers/thermal/broadcom/ns-thermal.c +++ b/drivers/thermal/broadcom/ns-thermal.c @@ -14,19 +14,14 @@ #define PVTMON_CONTROL0_SEL_TEST_MODE 0x0000000e #define PVTMON_STATUS 0x08 -struct ns_thermal { - struct thermal_zone_device *tz; - void __iomem *pvtmon; -}; - -static int ns_thermal_get_temp(void *data, int *temp) +static int ns_thermal_get_temp(struct thermal_zone_device *tz, int *temp) { - struct ns_thermal *ns_thermal = data; - int offset = thermal_zone_get_offset(ns_thermal->tz); - int slope = thermal_zone_get_slope(ns_thermal->tz); + void __iomem *pvtmon = tz->devdata; + int offset = thermal_zone_get_offset(tz); + int slope = thermal_zone_get_slope(tz); u32 val; - val = readl(ns_thermal->pvtmon + PVTMON_CONTROL0); + val = readl(pvtmon + PVTMON_CONTROL0); if ((val & PVTMON_CONTROL0_SEL_MASK) != PVTMON_CONTROL0_SEL_TEMP_MONITOR) { /* Clear current mode selection */ val &= ~PVTMON_CONTROL0_SEL_MASK; @@ -34,50 +29,47 @@ static int ns_thermal_get_temp(void *data, int *temp) /* Set temp monitor mode (it's the default actually) */ val |= PVTMON_CONTROL0_SEL_TEMP_MONITOR; - writel(val, ns_thermal->pvtmon + PVTMON_CONTROL0); + writel(val, pvtmon + PVTMON_CONTROL0); } - val = readl(ns_thermal->pvtmon + PVTMON_STATUS); + val = readl(pvtmon + PVTMON_STATUS); *temp = slope * val + offset; return 0; } -static const struct thermal_zone_of_device_ops ns_thermal_ops = { +static const struct thermal_zone_device_ops ns_thermal_ops = { .get_temp = ns_thermal_get_temp, }; static int ns_thermal_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; - struct ns_thermal *ns_thermal; - - ns_thermal = devm_kzalloc(dev, sizeof(*ns_thermal), GFP_KERNEL); - if (!ns_thermal) - return -ENOMEM; + struct thermal_zone_device *tz; + void __iomem *pvtmon; - ns_thermal->pvtmon = of_iomap(dev_of_node(dev), 0); - if (WARN_ON(!ns_thermal->pvtmon)) + pvtmon = of_iomap(dev_of_node(dev), 0); + if (WARN_ON(!pvtmon)) return -ENOENT; - ns_thermal->tz = devm_thermal_zone_of_sensor_register(dev, 0, - ns_thermal, - &ns_thermal_ops); - if (IS_ERR(ns_thermal->tz)) { - iounmap(ns_thermal->pvtmon); - return PTR_ERR(ns_thermal->tz); + tz = devm_thermal_of_zone_register(dev, 0, + pvtmon, + &ns_thermal_ops); + if (IS_ERR(tz)) { + iounmap(pvtmon); + return PTR_ERR(tz); } - platform_set_drvdata(pdev, ns_thermal); + platform_set_drvdata(pdev, pvtmon); return 0; } static int ns_thermal_remove(struct platform_device *pdev) { - struct ns_thermal *ns_thermal = platform_get_drvdata(pdev); + void __iomem *pvtmon = platform_get_drvdata(pdev); - iounmap(ns_thermal->pvtmon); + iounmap(pvtmon); return 0; } diff --git a/drivers/thermal/broadcom/sr-thermal.c b/drivers/thermal/broadcom/sr-thermal.c index 85ab9edd580c..2b93502543ff 100644 --- a/drivers/thermal/broadcom/sr-thermal.c +++ b/drivers/thermal/broadcom/sr-thermal.c @@ -19,7 +19,6 @@ #define SR_TMON_MAX_LIST 6 struct sr_tmon { - struct thermal_zone_device *tz; unsigned int crit_temp; unsigned int tmon_id; struct sr_thermal *priv; @@ -31,9 +30,9 @@ struct sr_thermal { struct sr_tmon tmon[SR_TMON_MAX_LIST]; }; -static int sr_get_temp(void *data, int *temp) +static int sr_get_temp(struct thermal_zone_device *tz, int *temp) { - struct sr_tmon *tmon = data; + struct sr_tmon *tmon = tz->devdata; struct sr_thermal *sr_thermal = tmon->priv; *temp = readl(sr_thermal->regs + SR_TMON_TEMP_BASE(tmon->tmon_id)); @@ -41,13 +40,14 @@ static int sr_get_temp(void *data, int *temp) return 0; } -static const struct thermal_zone_of_device_ops sr_tz_ops = { +static const struct thermal_zone_device_ops sr_tz_ops = { .get_temp = sr_get_temp, }; static int sr_thermal_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; + struct thermal_zone_device *tz; struct sr_thermal *sr_thermal; struct sr_tmon *tmon; struct resource *res; @@ -84,10 +84,10 @@ static int sr_thermal_probe(struct platform_device *pdev) writel(0, sr_thermal->regs + SR_TMON_TEMP_BASE(i)); tmon->tmon_id = i; tmon->priv = sr_thermal; - tmon->tz = devm_thermal_zone_of_sensor_register(dev, i, tmon, - &sr_tz_ops); - if (IS_ERR(tmon->tz)) - return PTR_ERR(tmon->tz); + tz = devm_thermal_of_zone_register(dev, i, tmon, + &sr_tz_ops); + if (IS_ERR(tz)) + return PTR_ERR(tz); dev_dbg(dev, "thermal sensor %d registered\n", i); } diff --git a/drivers/thermal/cpufreq_cooling.c b/drivers/thermal/cpufreq_cooling.c index b76293cc989c..9f8b438fcf8f 100644 --- a/drivers/thermal/cpufreq_cooling.c +++ b/drivers/thermal/cpufreq_cooling.c @@ -475,7 +475,7 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev, /** * __cpufreq_cooling_register - helper function to create cpufreq cooling device - * @np: a valid struct device_node to the cooling device device tree node + * @np: a valid struct device_node to the cooling device tree node * @policy: cpufreq policy * Normally this should be same as cpufreq policy->related_cpus. * @em: Energy Model of the cpufreq policy @@ -501,17 +501,17 @@ __cpufreq_cooling_register(struct device_node *np, struct thermal_cooling_device_ops *cooling_ops; char *name; + if (IS_ERR_OR_NULL(policy)) { + pr_err("%s: cpufreq policy isn't valid: %p\n", __func__, policy); + return ERR_PTR(-EINVAL); + } + dev = get_cpu_device(policy->cpu); if (unlikely(!dev)) { pr_warn("No cpu device for cpu %d\n", policy->cpu); return ERR_PTR(-ENODEV); } - if (IS_ERR_OR_NULL(policy)) { - pr_err("%s: cpufreq policy isn't valid: %p\n", __func__, policy); - return ERR_PTR(-EINVAL); - } - i = cpufreq_table_count_valid_entries(policy); if (!i) { pr_debug("%s: CPUFreq table not found or has no valid entries\n", diff --git a/drivers/thermal/da9062-thermal.c b/drivers/thermal/da9062-thermal.c index 180edec34e07..7dcfde7a9f2c 100644 --- a/drivers/thermal/da9062-thermal.c +++ b/drivers/thermal/da9062-thermal.c @@ -248,10 +248,9 @@ static int da9062_thermal_probe(struct platform_device *pdev) jiffies_to_msecs(thermal->zone->passive_delay_jiffies)); ret = platform_get_irq_byname(pdev, "THERMAL"); - if (ret < 0) { - dev_err(&pdev->dev, "Failed to get platform IRQ.\n"); + if (ret < 0) goto err_zone; - } + thermal->irq = ret; ret = request_threaded_irq(thermal->irq, NULL, diff --git a/drivers/thermal/db8500_thermal.c b/drivers/thermal/db8500_thermal.c index 121cf853e545..cb10e280681f 100644 --- a/drivers/thermal/db8500_thermal.c +++ b/drivers/thermal/db8500_thermal.c @@ -58,9 +58,9 @@ struct db8500_thermal_zone { }; /* Callback to get current temperature */ -static int db8500_thermal_get_temp(void *data, int *temp) +static int db8500_thermal_get_temp(struct thermal_zone_device *tz, int *temp) { - struct db8500_thermal_zone *th = data; + struct db8500_thermal_zone *th = tz->devdata; /* * TODO: There is no PRCMU interface to get temperature data currently, @@ -72,7 +72,7 @@ static int db8500_thermal_get_temp(void *data, int *temp) return 0; } -static struct thermal_zone_of_device_ops thdev_ops = { +static const struct thermal_zone_device_ops thdev_ops = { .get_temp = db8500_thermal_get_temp, }; @@ -182,7 +182,7 @@ static int db8500_thermal_probe(struct platform_device *pdev) } /* register of thermal sensor and get info from DT */ - th->tz = devm_thermal_zone_of_sensor_register(dev, 0, th, &thdev_ops); + th->tz = devm_thermal_of_zone_register(dev, 0, th, &thdev_ops); if (IS_ERR(th->tz)) { dev_err(dev, "register thermal zone sensor failed\n"); return PTR_ERR(th->tz); diff --git a/drivers/thermal/gov_bang_bang.c b/drivers/thermal/gov_bang_bang.c index 991a1c54296d..a08bbe33be96 100644 --- a/drivers/thermal/gov_bang_bang.c +++ b/drivers/thermal/gov_bang_bang.c @@ -31,8 +31,6 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip) trip, trip_temp, tz->temperature, trip_hyst); - mutex_lock(&tz->lock); - list_for_each_entry(instance, &tz->thermal_instances, tz_node) { if (instance->trip != trip) continue; @@ -65,8 +63,6 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip) instance->cdev->updated = false; /* cdev needs update */ mutex_unlock(&instance->cdev->lock); } - - mutex_unlock(&tz->lock); } /** @@ -100,15 +96,13 @@ static int bang_bang_control(struct thermal_zone_device *tz, int trip) { struct thermal_instance *instance; - thermal_zone_trip_update(tz, trip); + lockdep_assert_held(&tz->lock); - mutex_lock(&tz->lock); + thermal_zone_trip_update(tz, trip); list_for_each_entry(instance, &tz->thermal_instances, tz_node) thermal_cdev_update(instance->cdev); - mutex_unlock(&tz->lock); - return 0; } diff --git a/drivers/thermal/gov_fair_share.c b/drivers/thermal/gov_fair_share.c index 6a2abcfc648f..a4ee4661e9cc 100644 --- a/drivers/thermal/gov_fair_share.c +++ b/drivers/thermal/gov_fair_share.c @@ -82,7 +82,7 @@ static int fair_share_throttle(struct thermal_zone_device *tz, int trip) int total_instance = 0; int cur_trip_level = get_trip_level(tz); - mutex_lock(&tz->lock); + lockdep_assert_held(&tz->lock); list_for_each_entry(instance, &tz->thermal_instances, tz_node) { if (instance->trip != trip) @@ -112,7 +112,6 @@ static int fair_share_throttle(struct thermal_zone_device *tz, int trip) mutex_unlock(&cdev->lock); } - mutex_unlock(&tz->lock); return 0; } diff --git a/drivers/thermal/gov_power_allocator.c b/drivers/thermal/gov_power_allocator.c index 1d5052470967..2d1aeaba38a8 100644 --- a/drivers/thermal/gov_power_allocator.c +++ b/drivers/thermal/gov_power_allocator.c @@ -392,8 +392,6 @@ static int allocate_power(struct thermal_zone_device *tz, int i, num_actors, total_weight, ret = 0; int trip_max_desired_temperature = params->trip_max_desired_temperature; - mutex_lock(&tz->lock); - num_actors = 0; total_weight = 0; list_for_each_entry(instance, &tz->thermal_instances, tz_node) { @@ -404,10 +402,8 @@ static int allocate_power(struct thermal_zone_device *tz, } } - if (!num_actors) { - ret = -ENODEV; - goto unlock; - } + if (!num_actors) + return -ENODEV; /* * We need to allocate five arrays of the same size: @@ -421,10 +417,8 @@ static int allocate_power(struct thermal_zone_device *tz, BUILD_BUG_ON(sizeof(*req_power) != sizeof(*extra_actor_power)); BUILD_BUG_ON(sizeof(*req_power) != sizeof(*weighted_req_power)); req_power = kcalloc(num_actors * 5, sizeof(*req_power), GFP_KERNEL); - if (!req_power) { - ret = -ENOMEM; - goto unlock; - } + if (!req_power) + return -ENOMEM; max_power = &req_power[num_actors]; granted_power = &req_power[2 * num_actors]; @@ -496,8 +490,6 @@ static int allocate_power(struct thermal_zone_device *tz, control_temp - tz->temperature); kfree(req_power); -unlock: - mutex_unlock(&tz->lock); return ret; } @@ -576,7 +568,6 @@ static void allow_maximum_power(struct thermal_zone_device *tz, bool update) struct power_allocator_params *params = tz->governor_data; u32 req_power; - mutex_lock(&tz->lock); list_for_each_entry(instance, &tz->thermal_instances, tz_node) { struct thermal_cooling_device *cdev = instance->cdev; @@ -598,7 +589,6 @@ static void allow_maximum_power(struct thermal_zone_device *tz, bool update) mutex_unlock(&instance->cdev->lock); } - mutex_unlock(&tz->lock); } /** @@ -712,6 +702,8 @@ static int power_allocator_throttle(struct thermal_zone_device *tz, int trip) struct power_allocator_params *params = tz->governor_data; bool update; + lockdep_assert_held(&tz->lock); + /* * We get called for every trip point but we only need to do * our calculations once diff --git a/drivers/thermal/gov_step_wise.c b/drivers/thermal/gov_step_wise.c index 9729b46d0258..cdd3354bc27f 100644 --- a/drivers/thermal/gov_step_wise.c +++ b/drivers/thermal/gov_step_wise.c @@ -117,8 +117,6 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip) dev_dbg(&tz->device, "Trip%d[type=%d,temp=%d]:trend=%d,throttle=%d\n", trip, trip_type, trip_temp, trend, throttle); - mutex_lock(&tz->lock); - list_for_each_entry(instance, &tz->thermal_instances, tz_node) { if (instance->trip != trip) continue; @@ -145,8 +143,6 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip) instance->cdev->updated = false; /* cdev needs update */ mutex_unlock(&instance->cdev->lock); } - - mutex_unlock(&tz->lock); } /** @@ -164,15 +160,13 @@ static int step_wise_throttle(struct thermal_zone_device *tz, int trip) { struct thermal_instance *instance; - thermal_zone_trip_update(tz, trip); + lockdep_assert_held(&tz->lock); - mutex_lock(&tz->lock); + thermal_zone_trip_update(tz, trip); list_for_each_entry(instance, &tz->thermal_instances, tz_node) thermal_cdev_update(instance->cdev); - mutex_unlock(&tz->lock); - return 0; } diff --git a/drivers/thermal/gov_user_space.c b/drivers/thermal/gov_user_space.c index a62a4e90bd3f..8bc1c22aaf03 100644 --- a/drivers/thermal/gov_user_space.c +++ b/drivers/thermal/gov_user_space.c @@ -34,7 +34,8 @@ static int notify_user_space(struct thermal_zone_device *tz, int trip) char *thermal_prop[5]; int i; - mutex_lock(&tz->lock); + lockdep_assert_held(&tz->lock); + thermal_prop[0] = kasprintf(GFP_KERNEL, "NAME=%s", tz->type); thermal_prop[1] = kasprintf(GFP_KERNEL, "TEMP=%d", tz->temperature); thermal_prop[2] = kasprintf(GFP_KERNEL, "TRIP=%d", trip); @@ -43,7 +44,7 @@ static int notify_user_space(struct thermal_zone_device *tz, int trip) kobject_uevent_env(&tz->device.kobj, KOBJ_CHANGE, thermal_prop); for (i = 0; i < 4; ++i) kfree(thermal_prop[i]); - mutex_unlock(&tz->lock); + return 0; } diff --git a/drivers/thermal/hisi_thermal.c b/drivers/thermal/hisi_thermal.c index 19a242c69ce6..d6974db7aaf7 100644 --- a/drivers/thermal/hisi_thermal.c +++ b/drivers/thermal/hisi_thermal.c @@ -434,9 +434,9 @@ static int hi3660_thermal_probe(struct hisi_thermal_data *data) return 0; } -static int hisi_thermal_get_temp(void *__data, int *temp) +static int hisi_thermal_get_temp(struct thermal_zone_device *tz, int *temp) { - struct hisi_thermal_sensor *sensor = __data; + struct hisi_thermal_sensor *sensor = tz->devdata; struct hisi_thermal_data *data = sensor->data; *temp = data->ops->get_temp(sensor); @@ -447,7 +447,7 @@ static int hisi_thermal_get_temp(void *__data, int *temp) return 0; } -static const struct thermal_zone_of_device_ops hisi_of_thermal_ops = { +static const struct thermal_zone_device_ops hisi_of_thermal_ops = { .get_temp = hisi_thermal_get_temp, }; @@ -459,7 +459,7 @@ static irqreturn_t hisi_thermal_alarm_irq_thread(int irq, void *dev) data->ops->irq_handler(sensor); - hisi_thermal_get_temp(sensor, &temp); + temp = data->ops->get_temp(sensor); if (temp >= sensor->thres_temp) { dev_crit(&data->pdev->dev, @@ -484,9 +484,9 @@ static int hisi_thermal_register_sensor(struct platform_device *pdev, int ret, i; const struct thermal_trip *trip; - sensor->tzd = devm_thermal_zone_of_sensor_register(&pdev->dev, - sensor->id, sensor, - &hisi_of_thermal_ops); + sensor->tzd = devm_thermal_of_zone_register(&pdev->dev, + sensor->id, sensor, + &hisi_of_thermal_ops); if (IS_ERR(sensor->tzd)) { ret = PTR_ERR(sensor->tzd); sensor->tzd = NULL; diff --git a/drivers/thermal/imx8mm_thermal.c b/drivers/thermal/imx8mm_thermal.c index af666bd9e8d4..e2c2673025a7 100644 --- a/drivers/thermal/imx8mm_thermal.c +++ b/drivers/thermal/imx8mm_thermal.c @@ -96,15 +96,15 @@ static int imx8mp_tmu_get_temp(void *data, int *temp) return 0; } -static int tmu_get_temp(void *data, int *temp) +static int tmu_get_temp(struct thermal_zone_device *tz, int *temp) { - struct tmu_sensor *sensor = data; + struct tmu_sensor *sensor = tz->devdata; struct imx8mm_tmu *tmu = sensor->priv; - return tmu->socdata->get_temp(data, temp); + return tmu->socdata->get_temp(sensor, temp); } -static struct thermal_zone_of_device_ops tmu_tz_ops = { +static const struct thermal_zone_device_ops tmu_tz_ops = { .get_temp = tmu_get_temp, }; @@ -165,9 +165,9 @@ static int imx8mm_tmu_probe(struct platform_device *pdev) for (i = 0; i < data->num_sensors; i++) { tmu->sensors[i].priv = tmu; tmu->sensors[i].tzd = - devm_thermal_zone_of_sensor_register(&pdev->dev, i, - &tmu->sensors[i], - &tmu_tz_ops); + devm_thermal_of_zone_register(&pdev->dev, i, + &tmu->sensors[i], + &tmu_tz_ops); if (IS_ERR(tmu->sensors[i].tzd)) { ret = PTR_ERR(tmu->sensors[i].tzd); dev_err(&pdev->dev, diff --git a/drivers/thermal/imx_sc_thermal.c b/drivers/thermal/imx_sc_thermal.c index 331a241eb0ef..10bfa6507eb4 100644 --- a/drivers/thermal/imx_sc_thermal.c +++ b/drivers/thermal/imx_sc_thermal.c @@ -43,11 +43,11 @@ struct imx_sc_msg_misc_get_temp { } data; } __packed __aligned(4); -static int imx_sc_thermal_get_temp(void *data, int *temp) +static int imx_sc_thermal_get_temp(struct thermal_zone_device *tz, int *temp) { struct imx_sc_msg_misc_get_temp msg; struct imx_sc_rpc_msg *hdr = &msg.hdr; - struct imx_sc_sensor *sensor = data; + struct imx_sc_sensor *sensor = tz->devdata; int ret; msg.data.req.resource_id = sensor->resource_id; @@ -70,7 +70,7 @@ static int imx_sc_thermal_get_temp(void *data, int *temp) return 0; } -static const struct thermal_zone_of_device_ops imx_sc_thermal_ops = { +static const struct thermal_zone_device_ops imx_sc_thermal_ops = { .get_temp = imx_sc_thermal_get_temp, }; @@ -109,10 +109,10 @@ static int imx_sc_thermal_probe(struct platform_device *pdev) break; } - sensor->tzd = devm_thermal_zone_of_sensor_register(&pdev->dev, - sensor->resource_id, - sensor, - &imx_sc_thermal_ops); + sensor->tzd = devm_thermal_of_zone_register(&pdev->dev, + sensor->resource_id, + sensor, + &imx_sc_thermal_ops); if (IS_ERR(sensor->tzd)) { dev_err(&pdev->dev, "failed to register thermal zone\n"); ret = PTR_ERR(sensor->tzd); diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c index 365489bf4b8c..db8a6f63657d 100644 --- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c +++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c @@ -614,9 +614,8 @@ static int int3400_thermal_probe(struct platform_device *pdev) free_sysfs: cleanup_odvp(priv); - if (priv->data_vault) { - if (!ZERO_OR_NULL_PTR(priv->data_vault)) - sysfs_remove_group(&pdev->dev.kobj, &data_attribute_group); + if (!ZERO_OR_NULL_PTR(priv->data_vault)) { + sysfs_remove_group(&pdev->dev.kobj, &data_attribute_group); kfree(priv->data_vault); } free_uuid: diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c index c2dc4c158b9d..bf1b1cdfade4 100644 --- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c +++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c @@ -373,18 +373,7 @@ static struct pci_driver proc_thermal_pci_driver = { .driver.pm = &proc_thermal_pci_pm, }; -static int __init proc_thermal_init(void) -{ - return pci_register_driver(&proc_thermal_pci_driver); -} - -static void __exit proc_thermal_exit(void) -{ - pci_unregister_driver(&proc_thermal_pci_driver); -} - -module_init(proc_thermal_init); -module_exit(proc_thermal_exit); +module_pci_driver(proc_thermal_pci_driver); MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>"); MODULE_DESCRIPTION("Processor Thermal Reporting Device Driver"); diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci_legacy.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci_legacy.c index 4571a1a53b84..09e032f822f3 100644 --- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci_legacy.c +++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci_legacy.c @@ -151,18 +151,7 @@ static struct pci_driver proc_thermal_pci_driver = { .driver.pm = &proc_thermal_pci_pm, }; -static int __init proc_thermal_init(void) -{ - return pci_register_driver(&proc_thermal_pci_driver); -} - -static void __exit proc_thermal_exit(void) -{ - pci_unregister_driver(&proc_thermal_pci_driver); -} - -module_init(proc_thermal_init); -module_exit(proc_thermal_exit); +module_pci_driver(proc_thermal_pci_driver); MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>"); MODULE_DESCRIPTION("Processor Thermal Reporting Device Driver"); diff --git a/drivers/thermal/intel/intel_powerclamp.c b/drivers/thermal/intel/intel_powerclamp.c index c841ab37e7c6..2a5570b9799a 100644 --- a/drivers/thermal/intel/intel_powerclamp.c +++ b/drivers/thermal/intel/intel_powerclamp.c @@ -62,8 +62,7 @@ static struct dentry *debug_dir; static unsigned int set_target_ratio; static unsigned int current_ratio; static bool should_skip; -static bool reduce_irq; -static atomic_t idle_wakeup_counter; + static unsigned int control_cpu; /* The cpu assigned to collect stat and update * control parameters. default to BSP but BSP * can be offlined. @@ -285,9 +284,6 @@ static unsigned int get_compensation(int ratio) cal_data[ratio + 1].steady_comp) / 3; } - /* REVISIT: simple penalty of double idle injection */ - if (reduce_irq) - comp = ratio; /* do not exceed limit */ if (comp + ratio >= MAX_TARGET_RATIO) comp = MAX_TARGET_RATIO - ratio - 1; @@ -301,13 +297,9 @@ static void adjust_compensation(int target_ratio, unsigned int win) struct powerclamp_calibration_data *d = &cal_data[target_ratio]; /* - * adjust compensations if confidence level has not been reached or - * there are too many wakeups during the last idle injection period, we - * cannot trust the data for compensation. + * adjust compensations if confidence level has not been reached. */ - if (d->confidence >= CONFIDENCE_OK || - atomic_read(&idle_wakeup_counter) > - win * num_online_cpus()) + if (d->confidence >= CONFIDENCE_OK) return; delta = set_target_ratio - current_ratio; @@ -347,14 +339,7 @@ static bool powerclamp_adjust_controls(unsigned int target_ratio, tsc_last = tsc_now; adjust_compensation(target_ratio, win); - /* - * too many external interrupts, set flag such - * that we can take measure later. - */ - reduce_irq = atomic_read(&idle_wakeup_counter) >= - 2 * win * num_online_cpus(); - atomic_set(&idle_wakeup_counter, 0); /* if we are above target+guard, skip */ return set_target_ratio + guard <= current_ratio; } @@ -532,8 +517,10 @@ static int start_power_clamp(void) /* prefer BSP */ control_cpu = 0; - if (!cpu_online(control_cpu)) - control_cpu = smp_processor_id(); + if (!cpu_online(control_cpu)) { + control_cpu = get_cpu(); + put_cpu(); + } clamping = true; schedule_delayed_work(&poll_pkg_cstate_work, 0); diff --git a/drivers/thermal/k3_bandgap.c b/drivers/thermal/k3_bandgap.c index 5d0b3ffc6f46..22c9bcb899c3 100644 --- a/drivers/thermal/k3_bandgap.c +++ b/drivers/thermal/k3_bandgap.c @@ -139,9 +139,9 @@ static int k3_bgp_read_temp(struct k3_thermal_data *devdata, return 0; } -static int k3_thermal_get_temp(void *devdata, int *temp) +static int k3_thermal_get_temp(struct thermal_zone_device *tz, int *temp) { - struct k3_thermal_data *data = devdata; + struct k3_thermal_data *data = tz->devdata; int ret = 0; ret = k3_bgp_read_temp(data, temp); @@ -151,7 +151,7 @@ static int k3_thermal_get_temp(void *devdata, int *temp) return ret; } -static const struct thermal_zone_of_device_ops k3_of_thermal_ops = { +static const struct thermal_zone_device_ops k3_of_thermal_ops = { .get_temp = k3_thermal_get_temp, }; @@ -213,9 +213,9 @@ static int k3_bandgap_probe(struct platform_device *pdev) writel(val, data[id].bgp->base + data[id].ctrl_offset); data[id].tzd = - devm_thermal_zone_of_sensor_register(dev, id, - &data[id], - &k3_of_thermal_ops); + devm_thermal_of_zone_register(dev, id, + &data[id], + &k3_of_thermal_ops); if (IS_ERR(data[id].tzd)) { dev_err(dev, "thermal zone device is NULL\n"); ret = PTR_ERR(data[id].tzd); diff --git a/drivers/thermal/k3_j72xx_bandgap.c b/drivers/thermal/k3_j72xx_bandgap.c index 115a44eb4fbf..16b6bcf1bf4f 100644 --- a/drivers/thermal/k3_j72xx_bandgap.c +++ b/drivers/thermal/k3_j72xx_bandgap.c @@ -247,9 +247,9 @@ static inline int k3_bgp_read_temp(struct k3_thermal_data *devdata, } /* Get temperature callback function for thermal zone */ -static int k3_thermal_get_temp(void *devdata, int *temp) +static int k3_thermal_get_temp(struct thermal_zone_device *tz, int *temp) { - struct k3_thermal_data *data = devdata; + struct k3_thermal_data *data = tz->devdata; int ret = 0; ret = k3_bgp_read_temp(data, temp); @@ -259,7 +259,7 @@ static int k3_thermal_get_temp(void *devdata, int *temp) return ret; } -static const struct thermal_zone_of_device_ops k3_of_thermal_ops = { +static const struct thermal_zone_device_ops k3_of_thermal_ops = { .get_temp = k3_thermal_get_temp, }; @@ -474,10 +474,8 @@ static int k3_j72xx_bandgap_probe(struct platform_device *pdev) writel(val, data[id].bgp->cfg2_base + data[id].ctrl_offset); bgp->ts_data[id] = &data[id]; - ti_thermal = - devm_thermal_zone_of_sensor_register(bgp->dev, id, - &data[id], - &k3_of_thermal_ops); + ti_thermal = devm_thermal_of_zone_register(bgp->dev, id, &data[id], + &k3_of_thermal_ops); if (IS_ERR(ti_thermal)) { dev_err(bgp->dev, "thermal zone device is NULL\n"); ret = PTR_ERR(ti_thermal); diff --git a/drivers/thermal/max77620_thermal.c b/drivers/thermal/max77620_thermal.c index 82d06c7411eb..6451a55eb582 100644 --- a/drivers/thermal/max77620_thermal.c +++ b/drivers/thermal/max77620_thermal.c @@ -44,9 +44,9 @@ struct max77620_therm_info { * Return 0 on success otherwise error number to show reason of failure. */ -static int max77620_thermal_read_temp(void *data, int *temp) +static int max77620_thermal_read_temp(struct thermal_zone_device *tz, int *temp) { - struct max77620_therm_info *mtherm = data; + struct max77620_therm_info *mtherm = tz->devdata; unsigned int val; int ret; @@ -66,7 +66,7 @@ static int max77620_thermal_read_temp(void *data, int *temp) return 0; } -static const struct thermal_zone_of_device_ops max77620_thermal_ops = { +static const struct thermal_zone_device_ops max77620_thermal_ops = { .get_temp = max77620_thermal_read_temp, }; @@ -114,7 +114,7 @@ static int max77620_thermal_probe(struct platform_device *pdev) */ device_set_of_node_from_dev(&pdev->dev, pdev->dev.parent); - mtherm->tz_device = devm_thermal_zone_of_sensor_register(&pdev->dev, 0, + mtherm->tz_device = devm_thermal_of_zone_register(&pdev->dev, 0, mtherm, &max77620_thermal_ops); if (IS_ERR(mtherm->tz_device)) { ret = PTR_ERR(mtherm->tz_device); diff --git a/drivers/thermal/mtk_thermal.c b/drivers/thermal/mtk_thermal.c index ede94eadddda..8440692e3890 100644 --- a/drivers/thermal/mtk_thermal.c +++ b/drivers/thermal/mtk_thermal.c @@ -679,9 +679,9 @@ static int mtk_thermal_bank_temperature(struct mtk_thermal_bank *bank) return max; } -static int mtk_read_temp(void *data, int *temperature) +static int mtk_read_temp(struct thermal_zone_device *tz, int *temperature) { - struct mtk_thermal *mt = data; + struct mtk_thermal *mt = tz->devdata; int i; int tempmax = INT_MIN; @@ -700,7 +700,7 @@ static int mtk_read_temp(void *data, int *temperature) return 0; } -static const struct thermal_zone_of_device_ops mtk_thermal_ops = { +static const struct thermal_zone_device_ops mtk_thermal_ops = { .get_temp = mtk_read_temp, }; @@ -1082,8 +1082,8 @@ static int mtk_thermal_probe(struct platform_device *pdev) platform_set_drvdata(pdev, mt); - tzdev = devm_thermal_zone_of_sensor_register(&pdev->dev, 0, mt, - &mtk_thermal_ops); + tzdev = devm_thermal_of_zone_register(&pdev->dev, 0, mt, + &mtk_thermal_ops); if (IS_ERR(tzdev)) { ret = PTR_ERR(tzdev); goto err_disable_clk_peri_therm; diff --git a/drivers/thermal/qcom/qcom-spmi-adc-tm5.c b/drivers/thermal/qcom/qcom-spmi-adc-tm5.c index 073943cbcc2b..af68adf720cc 100644 --- a/drivers/thermal/qcom/qcom-spmi-adc-tm5.c +++ b/drivers/thermal/qcom/qcom-spmi-adc-tm5.c @@ -357,9 +357,9 @@ static irqreturn_t adc_tm5_gen2_isr(int irq, void *data) return IRQ_HANDLED; } -static int adc_tm5_get_temp(void *data, int *temp) +static int adc_tm5_get_temp(struct thermal_zone_device *tz, int *temp) { - struct adc_tm5_channel *channel = data; + struct adc_tm5_channel *channel = tz->devdata; int ret; if (!channel || !channel->iio) @@ -639,9 +639,9 @@ config_fail: return ret; } -static int adc_tm5_set_trips(void *data, int low, int high) +static int adc_tm5_set_trips(struct thermal_zone_device *tz, int low, int high) { - struct adc_tm5_channel *channel = data; + struct adc_tm5_channel *channel = tz->devdata; struct adc_tm5_chip *chip; int ret; @@ -660,7 +660,7 @@ static int adc_tm5_set_trips(void *data, int low, int high) return ret; } -static struct thermal_zone_of_device_ops adc_tm5_thermal_ops = { +static const struct thermal_zone_device_ops adc_tm5_thermal_ops = { .get_temp = adc_tm5_get_temp, .set_trips = adc_tm5_set_trips, }; @@ -672,11 +672,10 @@ static int adc_tm5_register_tzd(struct adc_tm5_chip *adc_tm) for (i = 0; i < adc_tm->nchannels; i++) { adc_tm->channels[i].chip = adc_tm; - - tzd = devm_thermal_zone_of_sensor_register(adc_tm->dev, - adc_tm->channels[i].channel, - &adc_tm->channels[i], - &adc_tm5_thermal_ops); + tzd = devm_thermal_of_zone_register(adc_tm->dev, + adc_tm->channels[i].channel, + &adc_tm->channels[i], + &adc_tm5_thermal_ops); if (IS_ERR(tzd)) { if (PTR_ERR(tzd) == -ENODEV) { dev_warn(adc_tm->dev, "thermal sensor on channel %d is not used\n", @@ -1026,10 +1025,8 @@ static int adc_tm5_probe(struct platform_device *pdev) adc_tm->base = reg; irq = platform_get_irq(pdev, 0); - if (irq < 0) { - dev_err(dev, "get_irq failed: %d\n", irq); + if (irq < 0) return irq; - } ret = adc_tm5_get_dt_data(adc_tm, node); if (ret) { diff --git a/drivers/thermal/qcom/qcom-spmi-temp-alarm.c b/drivers/thermal/qcom/qcom-spmi-temp-alarm.c index 770f82cc9bca..be785ab37e53 100644 --- a/drivers/thermal/qcom/qcom-spmi-temp-alarm.c +++ b/drivers/thermal/qcom/qcom-spmi-temp-alarm.c @@ -186,9 +186,9 @@ static int qpnp_tm_update_temp_no_adc(struct qpnp_tm_chip *chip) return 0; } -static int qpnp_tm_get_temp(void *data, int *temp) +static int qpnp_tm_get_temp(struct thermal_zone_device *tz, int *temp) { - struct qpnp_tm_chip *chip = data; + struct qpnp_tm_chip *chip = tz->devdata; int ret, mili_celsius; if (!temp) @@ -263,9 +263,9 @@ skip: return qpnp_tm_write(chip, QPNP_TM_REG_SHUTDOWN_CTRL1, reg); } -static int qpnp_tm_set_trip_temp(void *data, int trip, int temp) +static int qpnp_tm_set_trip_temp(struct thermal_zone_device *tz, int trip, int temp) { - struct qpnp_tm_chip *chip = data; + struct qpnp_tm_chip *chip = tz->devdata; const struct thermal_trip *trip_points; int ret; @@ -283,7 +283,7 @@ static int qpnp_tm_set_trip_temp(void *data, int trip, int temp) return ret; } -static const struct thermal_zone_of_device_ops qpnp_tm_sensor_ops = { +static const struct thermal_zone_device_ops qpnp_tm_sensor_ops = { .get_temp = qpnp_tm_get_temp, .set_trip_temp = qpnp_tm_set_trip_temp, }; @@ -446,7 +446,7 @@ static int qpnp_tm_probe(struct platform_device *pdev) * read the trip points. get_temp() returns the default temperature * before the hardware initialization is completed. */ - chip->tz_dev = devm_thermal_zone_of_sensor_register( + chip->tz_dev = devm_thermal_of_zone_register( &pdev->dev, 0, chip, &qpnp_tm_sensor_ops); if (IS_ERR(chip->tz_dev)) { dev_err(&pdev->dev, "failed to register sensor\n"); diff --git a/drivers/thermal/qcom/tsens.c b/drivers/thermal/qcom/tsens.c index e49f58e83513..b1b10005fb28 100644 --- a/drivers/thermal/qcom/tsens.c +++ b/drivers/thermal/qcom/tsens.c @@ -532,9 +532,9 @@ static irqreturn_t tsens_irq_thread(int irq, void *data) return IRQ_HANDLED; } -static int tsens_set_trips(void *_sensor, int low, int high) +static int tsens_set_trips(struct thermal_zone_device *tz, int low, int high) { - struct tsens_sensor *s = _sensor; + struct tsens_sensor *s = tz->devdata; struct tsens_priv *priv = s->priv; struct device *dev = priv->dev; struct tsens_irq_data d; @@ -925,9 +925,9 @@ err_put_device: return ret; } -static int tsens_get_temp(void *data, int *temp) +static int tsens_get_temp(struct thermal_zone_device *tz, int *temp) { - struct tsens_sensor *s = data; + struct tsens_sensor *s = tz->devdata; struct tsens_priv *priv = s->priv; return priv->ops->get_temp(s, temp); @@ -991,7 +991,7 @@ static const struct of_device_id tsens_table[] = { }; MODULE_DEVICE_TABLE(of, tsens_table); -static const struct thermal_zone_of_device_ops tsens_of_ops = { +static const struct thermal_zone_device_ops tsens_of_ops = { .get_temp = tsens_get_temp, .set_trips = tsens_set_trips, }; @@ -1044,9 +1044,9 @@ static int tsens_register(struct tsens_priv *priv) for (i = 0; i < priv->num_sensors; i++) { priv->sensor[i].priv = priv; - tzd = devm_thermal_zone_of_sensor_register(priv->dev, priv->sensor[i].hw_id, - &priv->sensor[i], - &tsens_of_ops); + tzd = devm_thermal_of_zone_register(priv->dev, priv->sensor[i].hw_id, + &priv->sensor[i], + &tsens_of_ops); if (IS_ERR(tzd)) continue; priv->sensor[i].tzd = tzd; diff --git a/drivers/thermal/qoriq_thermal.c b/drivers/thermal/qoriq_thermal.c index 73049f9bea25..d111e218f362 100644 --- a/drivers/thermal/qoriq_thermal.c +++ b/drivers/thermal/qoriq_thermal.c @@ -82,9 +82,9 @@ static struct qoriq_tmu_data *qoriq_sensor_to_data(struct qoriq_sensor *s) return container_of(s, struct qoriq_tmu_data, sensor[s->id]); } -static int tmu_get_temp(void *p, int *temp) +static int tmu_get_temp(struct thermal_zone_device *tz, int *temp) { - struct qoriq_sensor *qsensor = p; + struct qoriq_sensor *qsensor = tz->devdata; struct qoriq_tmu_data *qdata = qoriq_sensor_to_data(qsensor); u32 val; /* @@ -122,7 +122,7 @@ static int tmu_get_temp(void *p, int *temp) return 0; } -static const struct thermal_zone_of_device_ops tmu_tz_ops = { +static const struct thermal_zone_device_ops tmu_tz_ops = { .get_temp = tmu_get_temp, }; @@ -146,9 +146,9 @@ static int qoriq_tmu_register_tmu_zone(struct device *dev, sensor->id = id; - tzd = devm_thermal_zone_of_sensor_register(dev, id, - sensor, - &tmu_tz_ops); + tzd = devm_thermal_of_zone_register(dev, id, + sensor, + &tmu_tz_ops); ret = PTR_ERR_OR_ZERO(tzd); if (ret) { if (ret == -ENODEV) diff --git a/drivers/thermal/rcar_gen3_thermal.c b/drivers/thermal/rcar_gen3_thermal.c index cda7c52f2319..4c1c6f89aa2f 100644 --- a/drivers/thermal/rcar_gen3_thermal.c +++ b/drivers/thermal/rcar_gen3_thermal.c @@ -164,9 +164,9 @@ static int rcar_gen3_thermal_round(int temp) return result * RCAR3_THERMAL_GRAN; } -static int rcar_gen3_thermal_get_temp(void *devdata, int *temp) +static int rcar_gen3_thermal_get_temp(struct thermal_zone_device *tz, int *temp) { - struct rcar_gen3_thermal_tsc *tsc = devdata; + struct rcar_gen3_thermal_tsc *tsc = tz->devdata; int mcelsius, val; int reg; @@ -203,9 +203,9 @@ static int rcar_gen3_thermal_mcelsius_to_temp(struct rcar_gen3_thermal_tsc *tsc, return INT_FIXPT(val); } -static int rcar_gen3_thermal_set_trips(void *devdata, int low, int high) +static int rcar_gen3_thermal_set_trips(struct thermal_zone_device *tz, int low, int high) { - struct rcar_gen3_thermal_tsc *tsc = devdata; + struct rcar_gen3_thermal_tsc *tsc = tz->devdata; u32 irqmsk = 0; if (low != -INT_MAX) { @@ -225,7 +225,7 @@ static int rcar_gen3_thermal_set_trips(void *devdata, int low, int high) return 0; } -static struct thermal_zone_of_device_ops rcar_gen3_tz_of_ops = { +static struct thermal_zone_device_ops rcar_gen3_tz_of_ops = { .get_temp = rcar_gen3_thermal_get_temp, .set_trips = rcar_gen3_thermal_set_trips, }; @@ -508,8 +508,8 @@ static int rcar_gen3_thermal_probe(struct platform_device *pdev) for (i = 0; i < priv->num_tscs; i++) { struct rcar_gen3_thermal_tsc *tsc = priv->tscs[i]; - zone = devm_thermal_zone_of_sensor_register(dev, i, tsc, - &rcar_gen3_tz_of_ops); + zone = devm_thermal_of_zone_register(dev, i, tsc, + &rcar_gen3_tz_of_ops); if (IS_ERR(zone)) { dev_err(dev, "Sensor %u: Can't register thermal zone\n", i); ret = PTR_ERR(zone); @@ -560,7 +560,7 @@ static int __maybe_unused rcar_gen3_thermal_resume(struct device *dev) priv->thermal_init(tsc); if (zone->ops->set_trips) - rcar_gen3_thermal_set_trips(tsc, zone->prev_low_trip, + rcar_gen3_thermal_set_trips(zone, zone->prev_low_trip, zone->prev_high_trip); } diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c index 1d729ed4d685..4df42d70d867 100644 --- a/drivers/thermal/rcar_thermal.c +++ b/drivers/thermal/rcar_thermal.c @@ -271,13 +271,6 @@ static int rcar_thermal_get_current_temp(struct rcar_thermal_priv *priv, return 0; } -static int rcar_thermal_of_get_temp(void *data, int *temp) -{ - struct rcar_thermal_priv *priv = data; - - return rcar_thermal_get_current_temp(priv, temp); -} - static int rcar_thermal_get_temp(struct thermal_zone_device *zone, int *temp) { struct rcar_thermal_priv *priv = rcar_zone_to_priv(zone); @@ -323,8 +316,8 @@ static int rcar_thermal_get_trip_temp(struct thermal_zone_device *zone, return 0; } -static const struct thermal_zone_of_device_ops rcar_thermal_zone_of_ops = { - .get_temp = rcar_thermal_of_get_temp, +static struct thermal_zone_device_ops rcar_thermal_zone_of_ops = { + .get_temp = rcar_thermal_get_temp, }; static struct thermal_zone_device_ops rcar_thermal_zone_ops = { @@ -534,7 +527,7 @@ static int rcar_thermal_probe(struct platform_device *pdev) goto error_unregister; if (chip->use_of_thermal) { - priv->zone = devm_thermal_zone_of_sensor_register( + priv->zone = devm_thermal_of_zone_register( dev, i, priv, &rcar_thermal_zone_of_ops); } else { diff --git a/drivers/thermal/rockchip_thermal.c b/drivers/thermal/rockchip_thermal.c index dc3a9c276a09..819e059cde71 100644 --- a/drivers/thermal/rockchip_thermal.c +++ b/drivers/thermal/rockchip_thermal.c @@ -1211,9 +1211,9 @@ static irqreturn_t rockchip_thermal_alarm_irq_thread(int irq, void *dev) return IRQ_HANDLED; } -static int rockchip_thermal_set_trips(void *_sensor, int low, int high) +static int rockchip_thermal_set_trips(struct thermal_zone_device *tz, int low, int high) { - struct rockchip_thermal_sensor *sensor = _sensor; + struct rockchip_thermal_sensor *sensor = tz->devdata; struct rockchip_thermal_data *thermal = sensor->thermal; const struct rockchip_tsadc_chip *tsadc = thermal->chip; @@ -1224,9 +1224,9 @@ static int rockchip_thermal_set_trips(void *_sensor, int low, int high) sensor->id, thermal->regs, high); } -static int rockchip_thermal_get_temp(void *_sensor, int *out_temp) +static int rockchip_thermal_get_temp(struct thermal_zone_device *tz, int *out_temp) { - struct rockchip_thermal_sensor *sensor = _sensor; + struct rockchip_thermal_sensor *sensor = tz->devdata; struct rockchip_thermal_data *thermal = sensor->thermal; const struct rockchip_tsadc_chip *tsadc = sensor->thermal->chip; int retval; @@ -1239,7 +1239,7 @@ static int rockchip_thermal_get_temp(void *_sensor, int *out_temp) return retval; } -static const struct thermal_zone_of_device_ops rockchip_of_thermal_ops = { +static const struct thermal_zone_device_ops rockchip_of_thermal_ops = { .get_temp = rockchip_thermal_get_temp, .set_trips = rockchip_thermal_set_trips, }; @@ -1326,8 +1326,8 @@ rockchip_thermal_register_sensor(struct platform_device *pdev, sensor->thermal = thermal; sensor->id = id; - sensor->tzd = devm_thermal_zone_of_sensor_register(&pdev->dev, id, - sensor, &rockchip_of_thermal_ops); + sensor->tzd = devm_thermal_of_zone_register(&pdev->dev, id, sensor, + &rockchip_of_thermal_ops); if (IS_ERR(sensor->tzd)) { error = PTR_ERR(sensor->tzd); dev_err(&pdev->dev, "failed to register sensor %d: %d\n", diff --git a/drivers/thermal/rzg2l_thermal.c b/drivers/thermal/rzg2l_thermal.c index 51ae80eda6af..2e0649f38506 100644 --- a/drivers/thermal/rzg2l_thermal.c +++ b/drivers/thermal/rzg2l_thermal.c @@ -73,9 +73,9 @@ static inline void rzg2l_thermal_write(struct rzg2l_thermal_priv *priv, u32 reg, iowrite32(data, priv->base + reg); } -static int rzg2l_thermal_get_temp(void *devdata, int *temp) +static int rzg2l_thermal_get_temp(struct thermal_zone_device *tz, int *temp) { - struct rzg2l_thermal_priv *priv = devdata; + struct rzg2l_thermal_priv *priv = tz->devdata; u32 result = 0, dsensor, ts_code_ave; int val, i; @@ -114,7 +114,7 @@ static int rzg2l_thermal_get_temp(void *devdata, int *temp) return 0; } -static const struct thermal_zone_of_device_ops rzg2l_tz_of_ops = { +static const struct thermal_zone_device_ops rzg2l_tz_of_ops = { .get_temp = rzg2l_thermal_get_temp, }; @@ -207,8 +207,8 @@ static int rzg2l_thermal_probe(struct platform_device *pdev) goto err; } - zone = devm_thermal_zone_of_sensor_register(dev, 0, priv, - &rzg2l_tz_of_ops); + zone = devm_thermal_of_zone_register(dev, 0, priv, + &rzg2l_tz_of_ops); if (IS_ERR(zone)) { dev_err(dev, "Can't register thermal zone"); ret = PTR_ERR(zone); diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c index f4ab4c5b4b62..51874d0a284c 100644 --- a/drivers/thermal/samsung/exynos_tmu.c +++ b/drivers/thermal/samsung/exynos_tmu.c @@ -650,9 +650,9 @@ static void exynos7_tmu_control(struct platform_device *pdev, bool on) writel(con, data->base + EXYNOS_TMU_REG_CONTROL); } -static int exynos_get_temp(void *p, int *temp) +static int exynos_get_temp(struct thermal_zone_device *tz, int *temp) { - struct exynos_tmu_data *data = p; + struct exynos_tmu_data *data = tz->devdata; int value, ret = 0; if (!data || !data->tmu_read) @@ -728,9 +728,9 @@ static void exynos4412_tmu_set_emulation(struct exynos_tmu_data *data, writel(val, data->base + emul_con); } -static int exynos_tmu_set_emulation(void *drv_data, int temp) +static int exynos_tmu_set_emulation(struct thermal_zone_device *tz, int temp) { - struct exynos_tmu_data *data = drv_data; + struct exynos_tmu_data *data = tz->devdata; int ret = -EINVAL; if (data->soc == SOC_ARCH_EXYNOS4210) @@ -750,7 +750,7 @@ out: } #else #define exynos4412_tmu_set_emulation NULL -static int exynos_tmu_set_emulation(void *drv_data, int temp) +static int exynos_tmu_set_emulation(struct thermal_zone_device *tz, int temp) { return -EINVAL; } #endif /* CONFIG_THERMAL_EMULATION */ @@ -997,7 +997,7 @@ static int exynos_map_dt_data(struct platform_device *pdev) return 0; } -static const struct thermal_zone_of_device_ops exynos_sensor_ops = { +static const struct thermal_zone_device_ops exynos_sensor_ops = { .get_temp = exynos_get_temp, .set_emul_temp = exynos_tmu_set_emulation, }; @@ -1091,8 +1091,8 @@ static int exynos_tmu_probe(struct platform_device *pdev) * data->tzd must be registered before calling exynos_tmu_initialize(), * requesting irq and calling exynos_tmu_control(). */ - data->tzd = thermal_zone_of_sensor_register(&pdev->dev, 0, data, - &exynos_sensor_ops); + data->tzd = devm_thermal_of_zone_register(&pdev->dev, 0, data, + &exynos_sensor_ops); if (IS_ERR(data->tzd)) { ret = PTR_ERR(data->tzd); if (ret != -EPROBE_DEFER) @@ -1104,21 +1104,19 @@ static int exynos_tmu_probe(struct platform_device *pdev) ret = exynos_tmu_initialize(pdev); if (ret) { dev_err(&pdev->dev, "Failed to initialize TMU\n"); - goto err_thermal; + goto err_sclk; } ret = devm_request_irq(&pdev->dev, data->irq, exynos_tmu_irq, IRQF_TRIGGER_RISING | IRQF_SHARED, dev_name(&pdev->dev), data); if (ret) { dev_err(&pdev->dev, "Failed to request irq: %d\n", data->irq); - goto err_thermal; + goto err_sclk; } exynos_tmu_control(pdev, true); return 0; -err_thermal: - thermal_zone_of_sensor_unregister(&pdev->dev, data->tzd); err_sclk: clk_disable_unprepare(data->sclk); err_clk: @@ -1136,9 +1134,7 @@ err_sensor: static int exynos_tmu_remove(struct platform_device *pdev) { struct exynos_tmu_data *data = platform_get_drvdata(pdev); - struct thermal_zone_device *tzd = data->tzd; - thermal_zone_of_sensor_unregister(&pdev->dev, tzd); exynos_tmu_control(pdev, false); clk_disable_unprepare(data->sclk); diff --git a/drivers/thermal/sprd_thermal.c b/drivers/thermal/sprd_thermal.c index fff80fc18002..ac884514f116 100644 --- a/drivers/thermal/sprd_thermal.c +++ b/drivers/thermal/sprd_thermal.c @@ -204,9 +204,9 @@ static int sprd_thm_temp_to_rawdata(int temp, struct sprd_thermal_sensor *sen) return clamp(val, val, (u32)(SPRD_THM_RAW_DATA_HIGH - 1)); } -static int sprd_thm_read_temp(void *devdata, int *temp) +static int sprd_thm_read_temp(struct thermal_zone_device *tz, int *temp) { - struct sprd_thermal_sensor *sen = devdata; + struct sprd_thermal_sensor *sen = tz->devdata; u32 data; data = readl(sen->data->base + SPRD_THM_TEMP(sen->id)) & @@ -217,7 +217,7 @@ static int sprd_thm_read_temp(void *devdata, int *temp) return 0; } -static const struct thermal_zone_of_device_ops sprd_thm_ops = { +static const struct thermal_zone_device_ops sprd_thm_ops = { .get_temp = sprd_thm_read_temp, }; @@ -408,10 +408,10 @@ static int sprd_thm_probe(struct platform_device *pdev) sprd_thm_sensor_init(thm, sen); - sen->tzd = devm_thermal_zone_of_sensor_register(sen->dev, - sen->id, - sen, - &sprd_thm_ops); + sen->tzd = devm_thermal_of_zone_register(sen->dev, + sen->id, + sen, + &sprd_thm_ops); if (IS_ERR(sen->tzd)) { dev_err(&pdev->dev, "register thermal zone failed %d\n", sen->id); @@ -523,8 +523,8 @@ static int sprd_thm_remove(struct platform_device *pdev) for (i = 0; i < thm->nr_sensors; i++) { sprd_thm_toggle_sensor(thm->sensor[i], false); - devm_thermal_zone_of_sensor_unregister(&pdev->dev, - thm->sensor[i]->tzd); + devm_thermal_of_zone_unregister(&pdev->dev, + thm->sensor[i]->tzd); } clk_disable_unprepare(thm->clk); diff --git a/drivers/thermal/st/stm_thermal.c b/drivers/thermal/st/stm_thermal.c index 5fd3fb8912a6..78feb802a87d 100644 --- a/drivers/thermal/st/stm_thermal.c +++ b/drivers/thermal/st/stm_thermal.c @@ -302,9 +302,9 @@ static int stm_disable_irq(struct stm_thermal_sensor *sensor) return 0; } -static int stm_thermal_set_trips(void *data, int low, int high) +static int stm_thermal_set_trips(struct thermal_zone_device *tz, int low, int high) { - struct stm_thermal_sensor *sensor = data; + struct stm_thermal_sensor *sensor = tz->devdata; u32 itr1, th; int ret; @@ -350,9 +350,9 @@ static int stm_thermal_set_trips(void *data, int low, int high) } /* Callback to get temperature from HW */ -static int stm_thermal_get_temp(void *data, int *temp) +static int stm_thermal_get_temp(struct thermal_zone_device *tz, int *temp) { - struct stm_thermal_sensor *sensor = data; + struct stm_thermal_sensor *sensor = tz->devdata; u32 periods; int freqM, ret; @@ -474,7 +474,7 @@ static int stm_thermal_resume(struct device *dev) static SIMPLE_DEV_PM_OPS(stm_thermal_pm_ops, stm_thermal_suspend, stm_thermal_resume); -static const struct thermal_zone_of_device_ops stm_tz_ops = { +static const struct thermal_zone_device_ops stm_tz_ops = { .get_temp = stm_thermal_get_temp, .set_trips = stm_thermal_set_trips, }; @@ -539,9 +539,9 @@ static int stm_thermal_probe(struct platform_device *pdev) return ret; } - sensor->th_dev = devm_thermal_zone_of_sensor_register(&pdev->dev, 0, - sensor, - &stm_tz_ops); + sensor->th_dev = devm_thermal_of_zone_register(&pdev->dev, 0, + sensor, + &stm_tz_ops); if (IS_ERR(sensor->th_dev)) { dev_err(&pdev->dev, "%s: thermal zone sensor registering KO\n", @@ -572,7 +572,6 @@ static int stm_thermal_probe(struct platform_device *pdev) return 0; err_tz: - thermal_zone_of_sensor_unregister(&pdev->dev, sensor->th_dev); return ret; } @@ -582,7 +581,6 @@ static int stm_thermal_remove(struct platform_device *pdev) stm_thermal_sensor_off(sensor); thermal_remove_hwmon_sysfs(sensor->th_dev); - thermal_zone_of_sensor_unregister(&pdev->dev, sensor->th_dev); return 0; } diff --git a/drivers/thermal/sun8i_thermal.c b/drivers/thermal/sun8i_thermal.c index 212c87e63a66..e64d06d1328c 100644 --- a/drivers/thermal/sun8i_thermal.c +++ b/drivers/thermal/sun8i_thermal.c @@ -108,9 +108,9 @@ static int sun50i_h5_calc_temp(struct ths_device *tmdev, return -1590 * reg / 10 + 276000; } -static int sun8i_ths_get_temp(void *data, int *temp) +static int sun8i_ths_get_temp(struct thermal_zone_device *tz, int *temp) { - struct tsensor *s = data; + struct tsensor *s = tz->devdata; struct ths_device *tmdev = s->tmdev; int val = 0; @@ -135,7 +135,7 @@ static int sun8i_ths_get_temp(void *data, int *temp) return 0; } -static const struct thermal_zone_of_device_ops ths_ops = { +static const struct thermal_zone_device_ops ths_ops = { .get_temp = sun8i_ths_get_temp, }; @@ -468,10 +468,10 @@ static int sun8i_ths_register(struct ths_device *tmdev) tmdev->sensor[i].tmdev = tmdev; tmdev->sensor[i].id = i; tmdev->sensor[i].tzd = - devm_thermal_zone_of_sensor_register(tmdev->dev, - i, - &tmdev->sensor[i], - &ths_ops); + devm_thermal_of_zone_register(tmdev->dev, + i, + &tmdev->sensor[i], + &ths_ops); if (IS_ERR(tmdev->sensor[i].tzd)) return PTR_ERR(tmdev->sensor[i].tzd); diff --git a/drivers/thermal/tegra/soctherm.c b/drivers/thermal/tegra/soctherm.c index 825eab526619..1efe470f31e9 100644 --- a/drivers/thermal/tegra/soctherm.c +++ b/drivers/thermal/tegra/soctherm.c @@ -421,9 +421,9 @@ static int translate_temp(u16 val) return t; } -static int tegra_thermctl_get_temp(void *data, int *out_temp) +static int tegra_thermctl_get_temp(struct thermal_zone_device *tz, int *out_temp) { - struct tegra_thermctl_zone *zone = data; + struct tegra_thermctl_zone *zone = tz->devdata; u32 val; val = readl(zone->reg); @@ -582,10 +582,9 @@ static int tsensor_group_thermtrip_get(struct tegra_soctherm *ts, int id) return temp; } -static int tegra_thermctl_set_trip_temp(void *data, int trip, int temp) +static int tegra_thermctl_set_trip_temp(struct thermal_zone_device *tz, int trip, int temp) { - struct tegra_thermctl_zone *zone = data; - struct thermal_zone_device *tz = zone->tz; + struct tegra_thermctl_zone *zone = tz->devdata; struct tegra_soctherm *ts = zone->ts; const struct tegra_tsensor_group *sg = zone->sg; struct device *dev = zone->dev; @@ -657,9 +656,9 @@ static void thermal_irq_disable(struct tegra_thermctl_zone *zn) mutex_unlock(&zn->ts->thermctl_lock); } -static int tegra_thermctl_set_trips(void *data, int lo, int hi) +static int tegra_thermctl_set_trips(struct thermal_zone_device *tz, int lo, int hi) { - struct tegra_thermctl_zone *zone = data; + struct tegra_thermctl_zone *zone = tz->devdata; u32 r; thermal_irq_disable(zone); @@ -682,7 +681,7 @@ static int tegra_thermctl_set_trips(void *data, int lo, int hi) return 0; } -static const struct thermal_zone_of_device_ops tegra_of_thermal_ops = { +static const struct thermal_zone_device_ops tegra_of_thermal_ops = { .get_temp = tegra_thermctl_get_temp, .set_trip_temp = tegra_thermctl_set_trip_temp, .set_trips = tegra_thermctl_set_trips, @@ -2194,9 +2193,9 @@ static int tegra_soctherm_probe(struct platform_device *pdev) zone->sg = soc->ttgs[i]; zone->ts = tegra; - z = devm_thermal_zone_of_sensor_register(&pdev->dev, - soc->ttgs[i]->id, zone, - &tegra_of_thermal_ops); + z = devm_thermal_of_zone_register(&pdev->dev, + soc->ttgs[i]->id, zone, + &tegra_of_thermal_ops); if (IS_ERR(z)) { err = PTR_ERR(z); dev_err(&pdev->dev, "failed to register sensor: %d\n", diff --git a/drivers/thermal/tegra/tegra-bpmp-thermal.c b/drivers/thermal/tegra/tegra-bpmp-thermal.c index 5affc3d196be..eb84f0b9dc7c 100644 --- a/drivers/thermal/tegra/tegra-bpmp-thermal.c +++ b/drivers/thermal/tegra/tegra-bpmp-thermal.c @@ -30,9 +30,9 @@ struct tegra_bpmp_thermal { struct tegra_bpmp_thermal_zone **zones; }; -static int tegra_bpmp_thermal_get_temp(void *data, int *out_temp) +static int __tegra_bpmp_thermal_get_temp(struct tegra_bpmp_thermal_zone *zone, + int *out_temp) { - struct tegra_bpmp_thermal_zone *zone = data; struct mrq_thermal_host_to_bpmp_request req; union mrq_thermal_bpmp_to_host_response reply; struct tegra_bpmp_message msg; @@ -60,9 +60,14 @@ static int tegra_bpmp_thermal_get_temp(void *data, int *out_temp) return 0; } -static int tegra_bpmp_thermal_set_trips(void *data, int low, int high) +static int tegra_bpmp_thermal_get_temp(struct thermal_zone_device *tz, int *out_temp) { - struct tegra_bpmp_thermal_zone *zone = data; + return __tegra_bpmp_thermal_get_temp(tz->devdata, out_temp); +} + +static int tegra_bpmp_thermal_set_trips(struct thermal_zone_device *tz, int low, int high) +{ + struct tegra_bpmp_thermal_zone *zone = tz->devdata; struct mrq_thermal_host_to_bpmp_request req; struct tegra_bpmp_message msg; int err; @@ -157,7 +162,7 @@ static int tegra_bpmp_thermal_get_num_zones(struct tegra_bpmp *bpmp, return 0; } -static const struct thermal_zone_of_device_ops tegra_bpmp_of_thermal_ops = { +static const struct thermal_zone_device_ops tegra_bpmp_of_thermal_ops = { .get_temp = tegra_bpmp_thermal_get_temp, .set_trips = tegra_bpmp_thermal_set_trips, }; @@ -200,13 +205,13 @@ static int tegra_bpmp_thermal_probe(struct platform_device *pdev) zone->idx = i; zone->tegra = tegra; - err = tegra_bpmp_thermal_get_temp(zone, &temp); + err = __tegra_bpmp_thermal_get_temp(zone, &temp); if (err < 0) { devm_kfree(&pdev->dev, zone); continue; } - tzd = devm_thermal_zone_of_sensor_register( + tzd = devm_thermal_of_zone_register( &pdev->dev, i, zone, &tegra_bpmp_of_thermal_ops); if (IS_ERR(tzd)) { if (PTR_ERR(tzd) == -EPROBE_DEFER) diff --git a/drivers/thermal/tegra/tegra30-tsensor.c b/drivers/thermal/tegra/tegra30-tsensor.c index 05886684f429..c34501287e96 100644 --- a/drivers/thermal/tegra/tegra30-tsensor.c +++ b/drivers/thermal/tegra/tegra30-tsensor.c @@ -159,9 +159,9 @@ static void devm_tegra_tsensor_hw_disable(void *data) tegra_tsensor_hw_disable(ts); } -static int tegra_tsensor_get_temp(void *data, int *temp) +static int tegra_tsensor_get_temp(struct thermal_zone_device *tz, int *temp) { - const struct tegra_tsensor_channel *tsc = data; + const struct tegra_tsensor_channel *tsc = tz->devdata; const struct tegra_tsensor *ts = tsc->ts; int err, c1, c2, c3, c4, counter; u32 val; @@ -217,9 +217,9 @@ static int tegra_tsensor_temp_to_counter(const struct tegra_tsensor *ts, int tem return DIV_ROUND_CLOSEST(c2 * 1000000 - ts->calib.b, ts->calib.a); } -static int tegra_tsensor_set_trips(void *data, int low, int high) +static int tegra_tsensor_set_trips(struct thermal_zone_device *tz, int low, int high) { - const struct tegra_tsensor_channel *tsc = data; + const struct tegra_tsensor_channel *tsc = tz->devdata; const struct tegra_tsensor *ts = tsc->ts; u32 val; @@ -240,7 +240,7 @@ static int tegra_tsensor_set_trips(void *data, int low, int high) return 0; } -static const struct thermal_zone_of_device_ops ops = { +static const struct thermal_zone_device_ops ops = { .get_temp = tegra_tsensor_get_temp, .set_trips = tegra_tsensor_set_trips, }; @@ -516,7 +516,7 @@ static int tegra_tsensor_register_channel(struct tegra_tsensor *ts, tsc->id = id; tsc->regs = ts->regs + 0x40 * (hw_id + 1); - tsc->tzd = devm_thermal_zone_of_sensor_register(ts->dev, id, tsc, &ops); + tsc->tzd = devm_thermal_of_zone_register(ts->dev, id, tsc, &ops); if (IS_ERR(tsc->tzd)) { if (PTR_ERR(tsc->tzd) != -ENODEV) return dev_err_probe(ts->dev, PTR_ERR(tsc->tzd), diff --git a/drivers/thermal/thermal-generic-adc.c b/drivers/thermal/thermal-generic-adc.c index 73665c3ccfe0..323e273e3298 100644 --- a/drivers/thermal/thermal-generic-adc.c +++ b/drivers/thermal/thermal-generic-adc.c @@ -52,9 +52,9 @@ static int gadc_thermal_adc_to_temp(struct gadc_thermal_info *gti, int val) return temp; } -static int gadc_thermal_get_temp(void *data, int *temp) +static int gadc_thermal_get_temp(struct thermal_zone_device *tz, int *temp) { - struct gadc_thermal_info *gti = data; + struct gadc_thermal_info *gti = tz->devdata; int val; int ret; @@ -68,7 +68,7 @@ static int gadc_thermal_get_temp(void *data, int *temp) return 0; } -static const struct thermal_zone_of_device_ops gadc_thermal_ops = { +static const struct thermal_zone_device_ops gadc_thermal_ops = { .get_temp = gadc_thermal_get_temp, }; @@ -143,8 +143,8 @@ static int gadc_thermal_probe(struct platform_device *pdev) gti->dev = &pdev->dev; platform_set_drvdata(pdev, gti); - gti->tz_dev = devm_thermal_zone_of_sensor_register(&pdev->dev, 0, gti, - &gadc_thermal_ops); + gti->tz_dev = devm_thermal_of_zone_register(&pdev->dev, 0, gti, + &gadc_thermal_ops); if (IS_ERR(gti->tz_dev)) { ret = PTR_ERR(gti->tz_dev); if (ret != -EPROBE_DEFER) diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c index 50d50cec7774..7e669b60a065 100644 --- a/drivers/thermal/thermal_core.c +++ b/drivers/thermal/thermal_core.c @@ -295,27 +295,14 @@ static void thermal_zone_device_set_polling(struct thermal_zone_device *tz, cancel_delayed_work(&tz->poll_queue); } -static inline bool should_stop_polling(struct thermal_zone_device *tz) -{ - return !thermal_zone_device_is_enabled(tz); -} - static void monitor_thermal_zone(struct thermal_zone_device *tz) { - bool stop; - - stop = should_stop_polling(tz); - - mutex_lock(&tz->lock); - - if (!stop && tz->passive) + if (tz->mode != THERMAL_DEVICE_ENABLED) + thermal_zone_device_set_polling(tz, 0); + else if (tz->passive) thermal_zone_device_set_polling(tz, tz->passive_delay_jiffies); - else if (!stop && tz->polling_delay_jiffies) + else if (tz->polling_delay_jiffies) thermal_zone_device_set_polling(tz, tz->polling_delay_jiffies); - else - thermal_zone_device_set_polling(tz, 0); - - mutex_unlock(&tz->lock); } static void handle_non_critical_trips(struct thermal_zone_device *tz, int trip) @@ -383,18 +370,13 @@ static void handle_thermal_trip(struct thermal_zone_device *tz, int trip) handle_critical_trips(tz, trip, trip_temp, type); else handle_non_critical_trips(tz, trip); - /* - * Alright, we handled this trip successfully. - * So, start monitoring again. - */ - monitor_thermal_zone(tz); } static void update_temperature(struct thermal_zone_device *tz) { int temp, ret; - ret = thermal_zone_get_temp(tz, &temp); + ret = __thermal_zone_get_temp(tz, &temp); if (ret) { if (ret != -EAGAIN) dev_warn(&tz->device, @@ -403,10 +385,8 @@ static void update_temperature(struct thermal_zone_device *tz) return; } - mutex_lock(&tz->lock); tz->last_temperature = tz->temperature; tz->temperature = temp; - mutex_unlock(&tz->lock); trace_thermal_temperature(tz); @@ -469,15 +449,9 @@ EXPORT_SYMBOL_GPL(thermal_zone_device_disable); int thermal_zone_device_is_enabled(struct thermal_zone_device *tz) { - enum thermal_device_mode mode; - - mutex_lock(&tz->lock); - - mode = tz->mode; + lockdep_assert_held(&tz->lock); - mutex_unlock(&tz->lock); - - return mode == THERMAL_DEVICE_ENABLED; + return tz->mode == THERMAL_DEVICE_ENABLED; } void thermal_zone_device_update(struct thermal_zone_device *tz, @@ -485,9 +459,6 @@ void thermal_zone_device_update(struct thermal_zone_device *tz, { int count; - if (should_stop_polling(tz)) - return; - if (atomic_read(&in_suspend)) return; @@ -495,14 +466,23 @@ void thermal_zone_device_update(struct thermal_zone_device *tz, "'get_temp' ops set\n", __func__)) return; + mutex_lock(&tz->lock); + + if (!thermal_zone_device_is_enabled(tz)) + goto out; + update_temperature(tz); - thermal_zone_set_trips(tz); + __thermal_zone_set_trips(tz); tz->notify_event = event; for (count = 0; count < tz->num_trips; count++) handle_thermal_trip(tz, count); + + monitor_thermal_zone(tz); +out: + mutex_unlock(&tz->lock); } EXPORT_SYMBOL_GPL(thermal_zone_device_update); @@ -1212,7 +1192,20 @@ thermal_zone_device_register_with_trips(const char *type, struct thermal_trip *t return ERR_PTR(-EINVAL); } - if (num_trips > THERMAL_MAX_TRIPS || num_trips < 0 || mask >> num_trips) { + /* + * Max trip count can't exceed 31 as the "mask >> num_trips" condition. + * For example, shifting by 32 will result in compiler warning: + * warning: right shift count >= width of type [-Wshift-count- overflow] + * + * Also "mask >> num_trips" will always be true with 32 bit shift. + * E.g. mask = 0x80000000 for trip id 31 to be RW. Then + * mask >> 32 = 0x80000000 + * This will result in failure for the below condition. + * + * Check will be true when the bit 31 of the mask is set. + * 32 bit shift will cause overflow of 4 byte integer. + */ + if (num_trips > (BITS_PER_TYPE(int) - 1) || num_trips < 0 || mask >> num_trips) { pr_err("Incorrect number of thermal trips\n"); return ERR_PTR(-EINVAL); } @@ -1239,7 +1232,7 @@ thermal_zone_device_register_with_trips(const char *type, struct thermal_trip *t } tz->id = id; - strlcpy(tz->type, type, sizeof(tz->type)); + strscpy(tz->type, type, sizeof(tz->type)); result = dev_set_name(&tz->device, "thermal_zone%d", tz->id); if (result) @@ -1458,9 +1451,6 @@ static int thermal_pm_notify(struct notifier_block *nb, case PM_POST_SUSPEND: atomic_set(&in_suspend, 0); list_for_each_entry(tz, &thermal_tz_list, node) { - if (!thermal_zone_device_is_enabled(tz)) - continue; - thermal_zone_device_init(tz); thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED); @@ -1492,10 +1482,6 @@ static int __init thermal_init(void) if (result) goto unregister_governors; - result = of_parse_thermal_zones(); - if (result) - goto unregister_class; - result = register_pm_notifier(&thermal_pm_nb); if (result) pr_warn("Thermal: Can not register suspend notifier, return %d\n", @@ -1503,8 +1489,6 @@ static int __init thermal_init(void) return 0; -unregister_class: - class_unregister(&thermal_class); unregister_governors: thermal_unregister_governors(); error: diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h index c991bb290512..1571917bd3c8 100644 --- a/drivers/thermal/thermal_core.h +++ b/drivers/thermal/thermal_core.h @@ -112,6 +112,8 @@ int thermal_build_list_of_policies(char *buf); /* Helpers */ void thermal_zone_set_trips(struct thermal_zone_device *tz); +void __thermal_zone_set_trips(struct thermal_zone_device *tz); +int __thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp); /* sysfs I/F */ int thermal_zone_create_device_groups(struct thermal_zone_device *, int); @@ -135,13 +137,11 @@ thermal_cooling_device_stats_update(struct thermal_cooling_device *cdev, /* device tree support */ #ifdef CONFIG_THERMAL_OF -int of_parse_thermal_zones(void); int of_thermal_get_ntrips(struct thermal_zone_device *); bool of_thermal_is_trip_valid(struct thermal_zone_device *, int); const struct thermal_trip * of_thermal_get_trip_points(struct thermal_zone_device *); #else -static inline int of_parse_thermal_zones(void) { return 0; } static inline int of_thermal_get_ntrips(struct thermal_zone_device *tz) { return 0; diff --git a/drivers/thermal/thermal_helpers.c b/drivers/thermal/thermal_helpers.c index 690890f054a3..c65cdce8f856 100644 --- a/drivers/thermal/thermal_helpers.c +++ b/drivers/thermal/thermal_helpers.c @@ -64,27 +64,17 @@ get_thermal_instance(struct thermal_zone_device *tz, } EXPORT_SYMBOL(get_thermal_instance); -/** - * thermal_zone_get_temp() - returns the temperature of a thermal zone - * @tz: a valid pointer to a struct thermal_zone_device - * @temp: a valid pointer to where to store the resulting temperature. - * - * When a valid thermal zone reference is passed, it will fetch its - * temperature and fill @temp. - * - * Return: On success returns 0, an error code otherwise - */ -int thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp) +int __thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp) { int ret = -EINVAL; int count; int crit_temp = INT_MAX; enum thermal_trip_type type; - if (!tz || IS_ERR(tz) || !tz->ops->get_temp) - goto exit; + lockdep_assert_held(&tz->lock); - mutex_lock(&tz->lock); + if (!tz || IS_ERR(tz) || !tz->ops->get_temp) + return -EINVAL; ret = tz->ops->get_temp(tz, temp); @@ -107,35 +97,42 @@ int thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp) *temp = tz->emul_temperature; } - mutex_unlock(&tz->lock); -exit: return ret; } -EXPORT_SYMBOL_GPL(thermal_zone_get_temp); /** - * thermal_zone_set_trips - Computes the next trip points for the driver - * @tz: a pointer to a thermal zone device structure + * thermal_zone_get_temp() - returns the temperature of a thermal zone + * @tz: a valid pointer to a struct thermal_zone_device + * @temp: a valid pointer to where to store the resulting temperature. * - * The function computes the next temperature boundaries by browsing - * the trip points. The result is the closer low and high trip points - * to the current temperature. These values are passed to the backend - * driver to let it set its own notification mechanism (usually an - * interrupt). + * When a valid thermal zone reference is passed, it will fetch its + * temperature and fill @temp. * - * It does not return a value + * Return: On success returns 0, an error code otherwise */ -void thermal_zone_set_trips(struct thermal_zone_device *tz) +int thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp) +{ + int ret; + + mutex_lock(&tz->lock); + ret = __thermal_zone_get_temp(tz, temp); + mutex_unlock(&tz->lock); + + return ret; +} +EXPORT_SYMBOL_GPL(thermal_zone_get_temp); + +void __thermal_zone_set_trips(struct thermal_zone_device *tz) { int low = -INT_MAX; int high = INT_MAX; int trip_temp, hysteresis; int i, ret; - mutex_lock(&tz->lock); + lockdep_assert_held(&tz->lock); if (!tz->ops->set_trips || !tz->ops->get_trip_hyst) - goto exit; + return; for (i = 0; i < tz->num_trips; i++) { int trip_low; @@ -154,7 +151,7 @@ void thermal_zone_set_trips(struct thermal_zone_device *tz) /* No need to change trip points */ if (tz->prev_low_trip == low && tz->prev_high_trip == high) - goto exit; + return; tz->prev_low_trip = low; tz->prev_high_trip = high; @@ -169,8 +166,24 @@ void thermal_zone_set_trips(struct thermal_zone_device *tz) ret = tz->ops->set_trips(tz, low, high); if (ret) dev_err(&tz->device, "Failed to set trips: %d\n", ret); +} -exit: +/** + * thermal_zone_set_trips - Computes the next trip points for the driver + * @tz: a pointer to a thermal zone device structure + * + * The function computes the next temperature boundaries by browsing + * the trip points. The result is the closer low and high trip points + * to the current temperature. These values are passed to the backend + * driver to let it set its own notification mechanism (usually an + * interrupt). + * + * It does not return a value + */ +void thermal_zone_set_trips(struct thermal_zone_device *tz) +{ + mutex_lock(&tz->lock); + __thermal_zone_set_trips(tz); mutex_unlock(&tz->lock); } diff --git a/drivers/thermal/thermal_hwmon.c b/drivers/thermal/thermal_hwmon.c index 09e49ec8b6f4..f53f4ceb6a5d 100644 --- a/drivers/thermal/thermal_hwmon.c +++ b/drivers/thermal/thermal_hwmon.c @@ -147,7 +147,7 @@ int thermal_add_hwmon_sysfs(struct thermal_zone_device *tz) return -ENOMEM; INIT_LIST_HEAD(&hwmon->tz_list); - strlcpy(hwmon->type, tz->type, THERMAL_NAME_LENGTH); + strscpy(hwmon->type, tz->type, THERMAL_NAME_LENGTH); strreplace(hwmon->type, '-', '_'); hwmon->device = hwmon_device_register_for_thermal(&tz->device, hwmon->type, hwmon); diff --git a/drivers/thermal/thermal_mmio.c b/drivers/thermal/thermal_mmio.c index 360b0dfdc3b0..39c921415989 100644 --- a/drivers/thermal/thermal_mmio.c +++ b/drivers/thermal/thermal_mmio.c @@ -20,11 +20,10 @@ static u32 thermal_mmio_readb(void __iomem *mmio_base) return readb(mmio_base); } -static int thermal_mmio_get_temperature(void *private, int *temp) +static int thermal_mmio_get_temperature(struct thermal_zone_device *tz, int *temp) { int t; - struct thermal_mmio *sensor = - (struct thermal_mmio *)private; + struct thermal_mmio *sensor = tz->devdata; t = sensor->read_mmio(sensor->mmio_base) & sensor->mask; t *= sensor->factor; @@ -34,7 +33,7 @@ static int thermal_mmio_get_temperature(void *private, int *temp) return 0; } -static const struct thermal_zone_of_device_ops thermal_mmio_ops = { +static const struct thermal_zone_device_ops thermal_mmio_ops = { .get_temp = thermal_mmio_get_temperature, }; @@ -68,10 +67,10 @@ static int thermal_mmio_probe(struct platform_device *pdev) } } - thermal_zone = devm_thermal_zone_of_sensor_register(&pdev->dev, - 0, - sensor, - &thermal_mmio_ops); + thermal_zone = devm_thermal_of_zone_register(&pdev->dev, + 0, + sensor, + &thermal_mmio_ops); if (IS_ERR(thermal_zone)) { dev_err(&pdev->dev, "failed to register sensor (%ld)\n", @@ -79,7 +78,7 @@ static int thermal_mmio_probe(struct platform_device *pdev) return PTR_ERR(thermal_zone); } - thermal_mmio_get_temperature(sensor, &temperature); + thermal_mmio_get_temperature(thermal_zone, &temperature); dev_info(&pdev->dev, "thermal mmio sensor %s registered, current temperature: %d\n", pdev->name, temperature); @@ -107,7 +106,7 @@ static struct platform_driver thermal_mmio_driver = { .probe = thermal_mmio_probe, .driver = { .name = "thermal-mmio", - .of_match_table = of_match_ptr(thermal_mmio_id_table), + .of_match_table = thermal_mmio_id_table, }, }; diff --git a/drivers/thermal/thermal_of.c b/drivers/thermal/thermal_of.c index 802c30b72a92..fd2fb84bf246 100644 --- a/drivers/thermal/thermal_of.c +++ b/drivers/thermal/thermal_of.c @@ -19,93 +19,6 @@ #include "thermal_core.h" -/*** Private data structures to represent thermal device tree data ***/ - -/** - * struct __thermal_cooling_bind_param - a cooling device for a trip point - * @cooling_device: a pointer to identify the referred cooling device - * @min: minimum cooling state used at this trip point - * @max: maximum cooling state used at this trip point - */ - -struct __thermal_cooling_bind_param { - struct device_node *cooling_device; - unsigned long min; - unsigned long max; -}; - -/** - * struct __thermal_bind_params - a match between trip and cooling device - * @tcbp: a pointer to an array of cooling devices - * @count: number of elements in array - * @trip_id: the trip point index - * @usage: the percentage (from 0 to 100) of cooling contribution - */ - -struct __thermal_bind_params { - struct __thermal_cooling_bind_param *tcbp; - unsigned int count; - unsigned int trip_id; - unsigned int usage; -}; - -/** - * struct __thermal_zone - internal representation of a thermal zone - * @passive_delay: polling interval while passive cooling is activated - * @polling_delay: zone polling interval - * @slope: slope of the temperature adjustment curve - * @offset: offset of the temperature adjustment curve - * @ntrips: number of trip points - * @trips: an array of trip points (0..ntrips - 1) - * @num_tbps: number of thermal bind params - * @tbps: an array of thermal bind params (0..num_tbps - 1) - * @sensor_data: sensor private data used while reading temperature and trend - * @ops: set of callbacks to handle the thermal zone based on DT - */ - -struct __thermal_zone { - int passive_delay; - int polling_delay; - int slope; - int offset; - - /* trip data */ - int ntrips; - struct thermal_trip *trips; - - /* cooling binding data */ - int num_tbps; - struct __thermal_bind_params *tbps; - - /* sensor interface */ - void *sensor_data; - const struct thermal_zone_of_device_ops *ops; -}; - -/*** DT thermal zone device callbacks ***/ - -static int of_thermal_get_temp(struct thermal_zone_device *tz, - int *temp) -{ - struct __thermal_zone *data = tz->devdata; - - if (!data->ops || !data->ops->get_temp) - return -EINVAL; - - return data->ops->get_temp(data->sensor_data, temp); -} - -static int of_thermal_set_trips(struct thermal_zone_device *tz, - int low, int high) -{ - struct __thermal_zone *data = tz->devdata; - - if (!data->ops || !data->ops->set_trips) - return -EINVAL; - - return data->ops->set_trips(data->sensor_data, low, high); -} - /** * of_thermal_get_ntrips - function to export number of available trip * points. @@ -158,114 +71,6 @@ of_thermal_get_trip_points(struct thermal_zone_device *tz) } EXPORT_SYMBOL_GPL(of_thermal_get_trip_points); -/** - * of_thermal_set_emul_temp - function to set emulated temperature - * - * @tz: pointer to a thermal zone - * @temp: temperature to set - * - * This function gives the ability to set emulated value of temperature, - * which is handy for debugging - * - * Return: zero on success, error code otherwise - */ -static int of_thermal_set_emul_temp(struct thermal_zone_device *tz, - int temp) -{ - struct __thermal_zone *data = tz->devdata; - - if (!data->ops || !data->ops->set_emul_temp) - return -EINVAL; - - return data->ops->set_emul_temp(data->sensor_data, temp); -} - -static int of_thermal_get_trend(struct thermal_zone_device *tz, int trip, - enum thermal_trend *trend) -{ - struct __thermal_zone *data = tz->devdata; - - if (!data->ops || !data->ops->get_trend) - return -EINVAL; - - return data->ops->get_trend(data->sensor_data, trip, trend); -} - -static int of_thermal_change_mode(struct thermal_zone_device *tz, - enum thermal_device_mode mode) -{ - struct __thermal_zone *data = tz->devdata; - - return data->ops->change_mode(data->sensor_data, mode); -} - -static int of_thermal_bind(struct thermal_zone_device *thermal, - struct thermal_cooling_device *cdev) -{ - struct __thermal_zone *data = thermal->devdata; - struct __thermal_bind_params *tbp; - struct __thermal_cooling_bind_param *tcbp; - int i, j; - - if (!data || IS_ERR(data)) - return -ENODEV; - - /* find where to bind */ - for (i = 0; i < data->num_tbps; i++) { - tbp = data->tbps + i; - - for (j = 0; j < tbp->count; j++) { - tcbp = tbp->tcbp + j; - - if (tcbp->cooling_device == cdev->np) { - int ret; - - ret = thermal_zone_bind_cooling_device(thermal, - tbp->trip_id, cdev, - tcbp->max, - tcbp->min, - tbp->usage); - if (ret) - return ret; - } - } - } - - return 0; -} - -static int of_thermal_unbind(struct thermal_zone_device *thermal, - struct thermal_cooling_device *cdev) -{ - struct __thermal_zone *data = thermal->devdata; - struct __thermal_bind_params *tbp; - struct __thermal_cooling_bind_param *tcbp; - int i, j; - - if (!data || IS_ERR(data)) - return -ENODEV; - - /* find where to unbind */ - for (i = 0; i < data->num_tbps; i++) { - tbp = data->tbps + i; - - for (j = 0; j < tbp->count; j++) { - tcbp = tbp->tcbp + j; - - if (tcbp->cooling_device == cdev->np) { - int ret; - - ret = thermal_zone_unbind_cooling_device(thermal, - tbp->trip_id, cdev); - if (ret) - return ret; - } - } - } - - return 0; -} - static int of_thermal_get_trip_type(struct thermal_zone_device *tz, int trip, enum thermal_trip_type *type) { @@ -288,28 +93,6 @@ static int of_thermal_get_trip_temp(struct thermal_zone_device *tz, int trip, return 0; } -static int of_thermal_set_trip_temp(struct thermal_zone_device *tz, int trip, - int temp) -{ - struct __thermal_zone *data = tz->devdata; - - if (trip >= tz->num_trips || trip < 0) - return -EDOM; - - if (data->ops && data->ops->set_trip_temp) { - int ret; - - ret = data->ops->set_trip_temp(data->sensor_data, trip, temp); - if (ret) - return ret; - } - - /* thermal framework should take care of data->mask & (1 << trip) */ - tz->trips[trip].temperature = temp; - - return 0; -} - static int of_thermal_get_trip_hyst(struct thermal_zone_device *tz, int trip, int *hyst) { @@ -347,62 +130,6 @@ static int of_thermal_get_crit_temp(struct thermal_zone_device *tz, return -EINVAL; } -static struct thermal_zone_device_ops of_thermal_ops = { - .get_trip_type = of_thermal_get_trip_type, - .get_trip_temp = of_thermal_get_trip_temp, - .set_trip_temp = of_thermal_set_trip_temp, - .get_trip_hyst = of_thermal_get_trip_hyst, - .set_trip_hyst = of_thermal_set_trip_hyst, - .get_crit_temp = of_thermal_get_crit_temp, - - .bind = of_thermal_bind, - .unbind = of_thermal_unbind, -}; - -/*** sensor API ***/ - -static struct thermal_zone_device * -thermal_zone_of_add_sensor(struct device_node *zone, - struct device_node *sensor, void *data, - const struct thermal_zone_of_device_ops *ops) -{ - struct thermal_zone_device *tzd; - struct __thermal_zone *tz; - - tzd = thermal_zone_get_zone_by_name(zone->name); - if (IS_ERR(tzd)) - return ERR_PTR(-EPROBE_DEFER); - - tz = tzd->devdata; - - if (!ops) - return ERR_PTR(-EINVAL); - - mutex_lock(&tzd->lock); - tz->ops = ops; - tz->sensor_data = data; - - tzd->ops->get_temp = of_thermal_get_temp; - tzd->ops->get_trend = of_thermal_get_trend; - - /* - * The thermal zone core will calculate the window if they have set the - * optional set_trips pointer. - */ - if (ops->set_trips) - tzd->ops->set_trips = of_thermal_set_trips; - - if (ops->set_emul_temp) - tzd->ops->set_emul_temp = of_thermal_set_emul_temp; - - if (ops->change_mode) - tzd->ops->change_mode = of_thermal_change_mode; - - mutex_unlock(&tzd->lock); - - return tzd; -} - /** * thermal_zone_of_get_sensor_id - get sensor ID from a DT thermal zone * @tz_np: a valid thermal zone device node. @@ -447,207 +174,6 @@ int thermal_zone_of_get_sensor_id(struct device_node *tz_np, } EXPORT_SYMBOL_GPL(thermal_zone_of_get_sensor_id); -/** - * thermal_zone_of_sensor_register - registers a sensor to a DT thermal zone - * @dev: a valid struct device pointer of a sensor device. Must contain - * a valid .of_node, for the sensor node. - * @sensor_id: a sensor identifier, in case the sensor IP has more - * than one sensors - * @data: a private pointer (owned by the caller) that will be passed - * back, when a temperature reading is needed. - * @ops: struct thermal_zone_of_device_ops *. Must contain at least .get_temp. - * - * This function will search the list of thermal zones described in device - * tree and look for the zone that refer to the sensor device pointed by - * @dev->of_node as temperature providers. For the zone pointing to the - * sensor node, the sensor will be added to the DT thermal zone device. - * - * The thermal zone temperature is provided by the @get_temp function - * pointer. When called, it will have the private pointer @data back. - * - * The thermal zone temperature trend is provided by the @get_trend function - * pointer. When called, it will have the private pointer @data back. - * - * TODO: - * 01 - This function must enqueue the new sensor instead of using - * it as the only source of temperature values. - * - * 02 - There must be a way to match the sensor with all thermal zones - * that refer to it. - * - * Return: On success returns a valid struct thermal_zone_device, - * otherwise, it returns a corresponding ERR_PTR(). Caller must - * check the return value with help of IS_ERR() helper. - */ -struct thermal_zone_device * -thermal_zone_of_sensor_register(struct device *dev, int sensor_id, void *data, - const struct thermal_zone_of_device_ops *ops) -{ - struct device_node *np, *child, *sensor_np; - struct thermal_zone_device *tzd = ERR_PTR(-ENODEV); - - np = of_find_node_by_name(NULL, "thermal-zones"); - if (!np) - return ERR_PTR(-ENODEV); - - if (!dev || !dev->of_node) { - of_node_put(np); - return ERR_PTR(-ENODEV); - } - - sensor_np = of_node_get(dev->of_node); - - for_each_available_child_of_node(np, child) { - int ret, id; - - /* For now, thermal framework supports only 1 sensor per zone */ - ret = thermal_zone_of_get_sensor_id(child, sensor_np, &id); - if (ret) - continue; - - if (id == sensor_id) { - tzd = thermal_zone_of_add_sensor(child, sensor_np, - data, ops); - if (!IS_ERR(tzd)) - thermal_zone_device_enable(tzd); - - of_node_put(child); - goto exit; - } - } -exit: - of_node_put(sensor_np); - of_node_put(np); - - return tzd; -} -EXPORT_SYMBOL_GPL(thermal_zone_of_sensor_register); - -/** - * thermal_zone_of_sensor_unregister - unregisters a sensor from a DT thermal zone - * @dev: a valid struct device pointer of a sensor device. Must contain - * a valid .of_node, for the sensor node. - * @tzd: a pointer to struct thermal_zone_device where the sensor is registered. - * - * This function removes the sensor callbacks and private data from the - * thermal zone device registered with thermal_zone_of_sensor_register() - * API. It will also silent the zone by remove the .get_temp() and .get_trend() - * thermal zone device callbacks. - * - * TODO: When the support to several sensors per zone is added, this - * function must search the sensor list based on @dev parameter. - * - */ -void thermal_zone_of_sensor_unregister(struct device *dev, - struct thermal_zone_device *tzd) -{ - struct __thermal_zone *tz; - - if (!dev || !tzd || !tzd->devdata) - return; - - tz = tzd->devdata; - - /* no __thermal_zone, nothing to be done */ - if (!tz) - return; - - /* stop temperature polling */ - thermal_zone_device_disable(tzd); - - mutex_lock(&tzd->lock); - tzd->ops->get_temp = NULL; - tzd->ops->get_trend = NULL; - tzd->ops->set_emul_temp = NULL; - tzd->ops->change_mode = NULL; - - tz->ops = NULL; - tz->sensor_data = NULL; - mutex_unlock(&tzd->lock); -} -EXPORT_SYMBOL_GPL(thermal_zone_of_sensor_unregister); - -static void devm_thermal_zone_of_sensor_release(struct device *dev, void *res) -{ - thermal_zone_of_sensor_unregister(dev, - *(struct thermal_zone_device **)res); -} - -static int devm_thermal_zone_of_sensor_match(struct device *dev, void *res, - void *data) -{ - struct thermal_zone_device **r = res; - - if (WARN_ON(!r || !*r)) - return 0; - - return *r == data; -} - -/** - * devm_thermal_zone_of_sensor_register - Resource managed version of - * thermal_zone_of_sensor_register() - * @dev: a valid struct device pointer of a sensor device. Must contain - * a valid .of_node, for the sensor node. - * @sensor_id: a sensor identifier, in case the sensor IP has more - * than one sensors - * @data: a private pointer (owned by the caller) that will be passed - * back, when a temperature reading is needed. - * @ops: struct thermal_zone_of_device_ops *. Must contain at least .get_temp. - * - * Refer thermal_zone_of_sensor_register() for more details. - * - * Return: On success returns a valid struct thermal_zone_device, - * otherwise, it returns a corresponding ERR_PTR(). Caller must - * check the return value with help of IS_ERR() helper. - * Registered thermal_zone_device device will automatically be - * released when device is unbounded. - */ -struct thermal_zone_device *devm_thermal_zone_of_sensor_register( - struct device *dev, int sensor_id, - void *data, const struct thermal_zone_of_device_ops *ops) -{ - struct thermal_zone_device **ptr, *tzd; - - ptr = devres_alloc(devm_thermal_zone_of_sensor_release, sizeof(*ptr), - GFP_KERNEL); - if (!ptr) - return ERR_PTR(-ENOMEM); - - tzd = thermal_zone_of_sensor_register(dev, sensor_id, data, ops); - if (IS_ERR(tzd)) { - devres_free(ptr); - return tzd; - } - - *ptr = tzd; - devres_add(dev, ptr); - - return tzd; -} -EXPORT_SYMBOL_GPL(devm_thermal_zone_of_sensor_register); - -/** - * devm_thermal_zone_of_sensor_unregister - Resource managed version of - * thermal_zone_of_sensor_unregister(). - * @dev: Device for which which resource was allocated. - * @tzd: a pointer to struct thermal_zone_device where the sensor is registered. - * - * This function removes the sensor callbacks and private data from the - * thermal zone device registered with devm_thermal_zone_of_sensor_register() - * API. It will also silent the zone by remove the .get_temp() and .get_trend() - * thermal zone device callbacks. - * Normally this function will not need to be called and the resource - * management code will ensure that the resource is freed. - */ -void devm_thermal_zone_of_sensor_unregister(struct device *dev, - struct thermal_zone_device *tzd) -{ - WARN_ON(devres_release(dev, devm_thermal_zone_of_sensor_release, - devm_thermal_zone_of_sensor_match, tzd)); -} -EXPORT_SYMBOL_GPL(devm_thermal_zone_of_sensor_unregister); - /*** functions parsing device tree nodes ***/ static int of_find_trip_id(struct device_node *np, struct device_node *trip) @@ -679,98 +205,6 @@ out: return i; } -/** - * thermal_of_populate_bind_params - parse and fill cooling map data - * @np: DT node containing a cooling-map node - * @__tbp: data structure to be filled with cooling map info - * @trips: array of thermal zone trip points - * @ntrips: number of trip points inside trips. - * - * This function parses a cooling-map type of node represented by - * @np parameter and fills the read data into @__tbp data structure. - * It needs the already parsed array of trip points of the thermal zone - * in consideration. - * - * Return: 0 on success, proper error code otherwise - */ -static int thermal_of_populate_bind_params(struct device_node *tz_np, - struct device_node *np, - struct __thermal_bind_params *__tbp) -{ - struct of_phandle_args cooling_spec; - struct __thermal_cooling_bind_param *__tcbp; - struct device_node *trip; - int ret, i, count; - int trip_id; - u32 prop; - - /* Default weight. Usage is optional */ - __tbp->usage = THERMAL_WEIGHT_DEFAULT; - ret = of_property_read_u32(np, "contribution", &prop); - if (ret == 0) - __tbp->usage = prop; - - trip = of_parse_phandle(np, "trip", 0); - if (!trip) { - pr_err("missing trip property\n"); - return -ENODEV; - } - - trip_id = of_find_trip_id(tz_np, trip); - if (trip_id < 0) { - ret = trip_id; - goto end; - } - - __tbp->trip_id = trip_id; - - count = of_count_phandle_with_args(np, "cooling-device", - "#cooling-cells"); - if (count <= 0) { - pr_err("Add a cooling_device property with at least one device\n"); - ret = -ENOENT; - goto end; - } - - __tcbp = kcalloc(count, sizeof(*__tcbp), GFP_KERNEL); - if (!__tcbp) { - ret = -ENOMEM; - goto end; - } - - for (i = 0; i < count; i++) { - ret = of_parse_phandle_with_args(np, "cooling-device", - "#cooling-cells", i, &cooling_spec); - if (ret < 0) { - pr_err("Invalid cooling-device entry\n"); - goto free_tcbp; - } - - __tcbp[i].cooling_device = cooling_spec.np; - - if (cooling_spec.args_count >= 2) { /* at least min and max */ - __tcbp[i].min = cooling_spec.args[0]; - __tcbp[i].max = cooling_spec.args[1]; - } else { - pr_err("wrong reference to cooling device, missing limits\n"); - } - } - - __tbp->tcbp = __tcbp; - __tbp->count = count; - - goto end; - -free_tcbp: - for (i = i - 1; i >= 0; i--) - of_node_put(__tcbp[i].cooling_device); - kfree(__tcbp); -end: - of_node_put(trip); - - return ret; -} - /* * It maps 'enum thermal_trip_type' found in include/linux/thermal.h * into the device tree binding of 'trip', property type. @@ -811,16 +245,6 @@ static int thermal_of_get_trip_type(struct device_node *np, return -ENODEV; } -/** - * thermal_of_populate_trip - parse and fill one trip point data - * @np: DT node containing a trip point node - * @trip: trip point data structure to be filled up - * - * This function parses a trip point type of node represented by - * @np parameter and fills the read data into @trip data structure. - * - * Return: 0 on success, proper error code otherwise - */ static int thermal_of_populate_trip(struct device_node *np, struct thermal_trip *trip) { @@ -897,258 +321,458 @@ out_of_node_put: return ERR_PTR(ret); } -/** - * thermal_of_build_thermal_zone - parse and fill one thermal zone data - * @np: DT node containing a thermal zone node - * - * This function parses a thermal zone type of node represented by - * @np parameter and fills the read data into a __thermal_zone data structure - * and return this pointer. - * - * TODO: Missing properties to parse: thermal-sensor-names - * - * Return: On success returns a valid struct __thermal_zone, - * otherwise, it returns a corresponding ERR_PTR(). Caller must - * check the return value with help of IS_ERR() helper. - */ -static struct __thermal_zone -__init *thermal_of_build_thermal_zone(struct device_node *np) +static struct device_node *of_thermal_zone_find(struct device_node *sensor, int id) { - struct device_node *child = NULL, *gchild; - struct __thermal_zone *tz; - int ret, i; - u32 prop, coef[2]; + struct device_node *np, *tz; + struct of_phandle_args sensor_specs; + np = of_find_node_by_name(NULL, "thermal-zones"); if (!np) { - pr_err("no thermal zone np\n"); - return ERR_PTR(-EINVAL); + pr_debug("No thermal zones description\n"); + return ERR_PTR(-ENODEV); } - tz = kzalloc(sizeof(*tz), GFP_KERNEL); - if (!tz) - return ERR_PTR(-ENOMEM); + /* + * Search for each thermal zone, a defined sensor + * corresponding to the one passed as parameter + */ + for_each_available_child_of_node(np, tz) { + + int count, i; + + count = of_count_phandle_with_args(tz, "thermal-sensors", + "#thermal-sensor-cells"); + if (count <= 0) { + pr_err("%pOFn: missing thermal sensor\n", tz); + tz = ERR_PTR(-EINVAL); + goto out; + } + + for (i = 0; i < count; i++) { + + int ret; - ret = of_property_read_u32(np, "polling-delay-passive", &prop); + ret = of_parse_phandle_with_args(tz, "thermal-sensors", + "#thermal-sensor-cells", + i, &sensor_specs); + if (ret < 0) { + pr_err("%pOFn: Failed to read thermal-sensors cells: %d\n", tz, ret); + tz = ERR_PTR(ret); + goto out; + } + + if ((sensor == sensor_specs.np) && id == (sensor_specs.args_count ? + sensor_specs.args[0] : 0)) { + pr_debug("sensor %pOFn id=%d belongs to %pOFn\n", sensor, id, tz); + goto out; + } + } + } + tz = ERR_PTR(-ENODEV); +out: + of_node_put(np); + return tz; +} + +static int thermal_of_monitor_init(struct device_node *np, int *delay, int *pdelay) +{ + int ret; + + ret = of_property_read_u32(np, "polling-delay-passive", pdelay); if (ret < 0) { pr_err("%pOFn: missing polling-delay-passive property\n", np); - goto free_tz; + return ret; } - tz->passive_delay = prop; - ret = of_property_read_u32(np, "polling-delay", &prop); + ret = of_property_read_u32(np, "polling-delay", delay); if (ret < 0) { pr_err("%pOFn: missing polling-delay property\n", np); - goto free_tz; + return ret; } - tz->polling_delay = prop; + + return 0; +} + +static struct thermal_zone_params *thermal_of_parameters_init(struct device_node *np) +{ + struct thermal_zone_params *tzp; + int coef[2]; + int ncoef = ARRAY_SIZE(coef); + int prop, ret; + + tzp = kzalloc(sizeof(*tzp), GFP_KERNEL); + if (!tzp) + return ERR_PTR(-ENOMEM); + + tzp->no_hwmon = true; + + if (!of_property_read_u32(np, "sustainable-power", &prop)) + tzp->sustainable_power = prop; /* - * REVIST: for now, the thermal framework supports only - * one sensor per thermal zone. Thus, we are considering - * only the first two values as slope and offset. + * For now, the thermal framework supports only one sensor per + * thermal zone. Thus, we are considering only the first two + * values as slope and offset. */ - ret = of_property_read_u32_array(np, "coefficients", coef, 2); - if (ret == 0) { - tz->slope = coef[0]; - tz->offset = coef[1]; - } else { - tz->slope = 1; - tz->offset = 0; + ret = of_property_read_u32_array(np, "coefficients", coef, ncoef); + if (ret) { + coef[0] = 1; + coef[1] = 0; + } + + tzp->slope = coef[0]; + tzp->offset = coef[1]; + + return tzp; +} + +static struct device_node *thermal_of_zone_get_by_name(struct thermal_zone_device *tz) +{ + struct device_node *np, *tz_np; + + np = of_find_node_by_name(NULL, "thermal-zones"); + if (!np) + return ERR_PTR(-ENODEV); + + tz_np = of_get_child_by_name(np, tz->type); + + of_node_put(np); + + if (!tz_np) + return ERR_PTR(-ENODEV); + + return tz_np; +} + +static int __thermal_of_unbind(struct device_node *map_np, int index, int trip_id, + struct thermal_zone_device *tz, struct thermal_cooling_device *cdev) +{ + struct of_phandle_args cooling_spec; + int ret; + + ret = of_parse_phandle_with_args(map_np, "cooling-device", "#cooling-cells", + index, &cooling_spec); + + of_node_put(cooling_spec.np); + + if (ret < 0) { + pr_err("Invalid cooling-device entry\n"); + return ret; } - tz->trips = thermal_of_trips_init(np, &tz->ntrips); - if (IS_ERR(tz->trips)) { - ret = PTR_ERR(tz->trips); - goto finish; + if (cooling_spec.args_count < 2) { + pr_err("wrong reference to cooling device, missing limits\n"); + return -EINVAL; } - /* cooling-maps */ - child = of_get_child_by_name(np, "cooling-maps"); + if (cooling_spec.np != cdev->np) + return 0; - /* cooling-maps not provided */ - if (!child) - goto finish; + ret = thermal_zone_unbind_cooling_device(tz, trip_id, cdev); + if (ret) + pr_err("Failed to unbind '%s' with '%s': %d\n", tz->type, cdev->type, ret); - tz->num_tbps = of_get_child_count(child); - if (tz->num_tbps == 0) - goto finish; + return ret; +} - tz->tbps = kcalloc(tz->num_tbps, sizeof(*tz->tbps), GFP_KERNEL); - if (!tz->tbps) { - ret = -ENOMEM; - goto free_trips; +static int __thermal_of_bind(struct device_node *map_np, int index, int trip_id, + struct thermal_zone_device *tz, struct thermal_cooling_device *cdev) +{ + struct of_phandle_args cooling_spec; + int ret, weight = THERMAL_WEIGHT_DEFAULT; + + of_property_read_u32(map_np, "contribution", &weight); + + ret = of_parse_phandle_with_args(map_np, "cooling-device", "#cooling-cells", + index, &cooling_spec); + + of_node_put(cooling_spec.np); + + if (ret < 0) { + pr_err("Invalid cooling-device entry\n"); + return ret; } - i = 0; - for_each_child_of_node(child, gchild) { - ret = thermal_of_populate_bind_params(np, gchild, &tz->tbps[i++]); - if (ret) { - of_node_put(gchild); - goto free_tbps; - } + if (cooling_spec.args_count < 2) { + pr_err("wrong reference to cooling device, missing limits\n"); + return -EINVAL; } -finish: - of_node_put(child); + if (cooling_spec.np != cdev->np) + return 0; + + ret = thermal_zone_bind_cooling_device(tz, trip_id, cdev, cooling_spec.args[1], + cooling_spec.args[0], + weight); + if (ret) + pr_err("Failed to bind '%s' with '%s': %d\n", tz->type, cdev->type, ret); - return tz; + return ret; +} -free_tbps: - for (i = i - 1; i >= 0; i--) { - struct __thermal_bind_params *tbp = tz->tbps + i; - int j; +static int thermal_of_for_each_cooling_device(struct device_node *tz_np, struct device_node *map_np, + struct thermal_zone_device *tz, struct thermal_cooling_device *cdev, + int (*action)(struct device_node *, int, int, + struct thermal_zone_device *, struct thermal_cooling_device *)) +{ + struct device_node *tr_np; + int count, i, trip_id; - for (j = 0; j < tbp->count; j++) - of_node_put(tbp->tcbp[j].cooling_device); + tr_np = of_parse_phandle(map_np, "trip", 0); + if (!tr_np) + return -ENODEV; - kfree(tbp->tcbp); + trip_id = of_find_trip_id(tz_np, tr_np); + if (trip_id < 0) + return trip_id; + + count = of_count_phandle_with_args(map_np, "cooling-device", "#cooling-cells"); + if (count <= 0) { + pr_err("Add a cooling_device property with at least one device\n"); + return -ENOENT; } - kfree(tz->tbps); -free_trips: - kfree(tz->trips); -free_tz: - kfree(tz); - of_node_put(child); + /* + * At this point, we don't want to bail out when there is an + * error, we will try to bind/unbind as many as possible + * cooling devices + */ + for (i = 0; i < count; i++) + action(map_np, i, trip_id, tz, cdev); - return ERR_PTR(ret); + return 0; } -static __init void of_thermal_free_zone(struct __thermal_zone *tz) +static int thermal_of_for_each_cooling_maps(struct thermal_zone_device *tz, + struct thermal_cooling_device *cdev, + int (*action)(struct device_node *, int, int, + struct thermal_zone_device *, struct thermal_cooling_device *)) { - struct __thermal_bind_params *tbp; - int i, j; + struct device_node *tz_np, *cm_np, *child; + int ret = 0; - for (i = 0; i < tz->num_tbps; i++) { - tbp = tz->tbps + i; + tz_np = thermal_of_zone_get_by_name(tz); + if (IS_ERR(tz_np)) { + pr_err("Failed to get node tz by name\n"); + return PTR_ERR(tz_np); + } - for (j = 0; j < tbp->count; j++) - of_node_put(tbp->tcbp[j].cooling_device); + cm_np = of_get_child_by_name(tz_np, "cooling-maps"); + if (!cm_np) + goto out; - kfree(tbp->tcbp); + for_each_child_of_node(cm_np, child) { + ret = thermal_of_for_each_cooling_device(tz_np, child, tz, cdev, action); + if (ret) + break; } - kfree(tz->tbps); - kfree(tz->trips); - kfree(tz); + of_node_put(cm_np); +out: + of_node_put(tz_np); + + return ret; +} + +static int thermal_of_bind(struct thermal_zone_device *tz, + struct thermal_cooling_device *cdev) +{ + return thermal_of_for_each_cooling_maps(tz, cdev, __thermal_of_bind); +} + +static int thermal_of_unbind(struct thermal_zone_device *tz, + struct thermal_cooling_device *cdev) +{ + return thermal_of_for_each_cooling_maps(tz, cdev, __thermal_of_unbind); } /** - * of_thermal_destroy_zones - remove all zones parsed and allocated resources + * thermal_of_zone_unregister - Cleanup the specific allocated ressources * - * Finds all zones parsed and added to the thermal framework and remove them - * from the system, together with their resources. + * This function disables the thermal zone and frees the different + * ressources allocated specific to the thermal OF. * + * @tz: a pointer to the thermal zone structure */ -static __init void of_thermal_destroy_zones(void) +void thermal_of_zone_unregister(struct thermal_zone_device *tz) { - struct device_node *np, *child; + struct thermal_trip *trips = tz->trips; + struct thermal_zone_params *tzp = tz->tzp; + struct thermal_zone_device_ops *ops = tz->ops; - np = of_find_node_by_name(NULL, "thermal-zones"); - if (!np) { - pr_debug("unable to find thermal zones\n"); - return; + thermal_zone_device_disable(tz); + thermal_zone_device_unregister(tz); + kfree(trips); + kfree(tzp); + kfree(ops); +} +EXPORT_SYMBOL_GPL(thermal_of_zone_unregister); + +/** + * thermal_of_zone_register - Register a thermal zone with device node + * sensor + * + * The thermal_of_zone_register() parses a device tree given a device + * node sensor and identifier. It searches for the thermal zone + * associated to the couple sensor/id and retrieves all the thermal + * zone properties and registers new thermal zone with those + * properties. + * + * @sensor: A device node pointer corresponding to the sensor in the device tree + * @id: An integer as sensor identifier + * @data: A private data to be stored in the thermal zone dedicated private area + * @ops: A set of thermal sensor ops + * + * Return: a valid thermal zone structure pointer on success. + * - EINVAL: if the device tree thermal description is malformed + * - ENOMEM: if one structure can not be allocated + * - Other negative errors are returned by the underlying called functions + */ +struct thermal_zone_device *thermal_of_zone_register(struct device_node *sensor, int id, void *data, + const struct thermal_zone_device_ops *ops) +{ + struct thermal_zone_device *tz; + struct thermal_trip *trips; + struct thermal_zone_params *tzp; + struct thermal_zone_device_ops *of_ops; + struct device_node *np; + int delay, pdelay; + int ntrips, mask; + int ret; + + of_ops = kmemdup(ops, sizeof(*ops), GFP_KERNEL); + if (!of_ops) + return ERR_PTR(-ENOMEM); + + np = of_thermal_zone_find(sensor, id); + if (IS_ERR(np)) { + if (PTR_ERR(np) != -ENODEV) + pr_err("Failed to find thermal zone for %pOFn id=%d\n", sensor, id); + return ERR_CAST(np); } - for_each_available_child_of_node(np, child) { - struct thermal_zone_device *zone; + trips = thermal_of_trips_init(np, &ntrips); + if (IS_ERR(trips)) { + pr_err("Failed to find trip points for %pOFn id=%d\n", sensor, id); + return ERR_CAST(trips); + } - zone = thermal_zone_get_zone_by_name(child->name); - if (IS_ERR(zone)) - continue; + ret = thermal_of_monitor_init(np, &delay, &pdelay); + if (ret) { + pr_err("Failed to initialize monitoring delays from %pOFn\n", np); + goto out_kfree_trips; + } - thermal_zone_device_unregister(zone); - kfree(zone->tzp); - kfree(zone->ops); - of_thermal_free_zone(zone->devdata); + tzp = thermal_of_parameters_init(np); + if (IS_ERR(tzp)) { + ret = PTR_ERR(tzp); + pr_err("Failed to initialize parameter from %pOFn: %d\n", np, ret); + goto out_kfree_trips; } - of_node_put(np); + + of_ops->get_trip_type = of_ops->get_trip_type ? : of_thermal_get_trip_type; + of_ops->get_trip_temp = of_ops->get_trip_temp ? : of_thermal_get_trip_temp; + of_ops->get_trip_hyst = of_ops->get_trip_hyst ? : of_thermal_get_trip_hyst; + of_ops->set_trip_hyst = of_ops->set_trip_hyst ? : of_thermal_set_trip_hyst; + of_ops->get_crit_temp = of_ops->get_crit_temp ? : of_thermal_get_crit_temp; + of_ops->bind = thermal_of_bind; + of_ops->unbind = thermal_of_unbind; + + mask = GENMASK_ULL((ntrips) - 1, 0); + + tz = thermal_zone_device_register_with_trips(np->name, trips, ntrips, + mask, data, of_ops, tzp, + pdelay, delay); + if (IS_ERR(tz)) { + ret = PTR_ERR(tz); + pr_err("Failed to register thermal zone %pOFn: %d\n", np, ret); + goto out_kfree_tzp; + } + + ret = thermal_zone_device_enable(tz); + if (ret) { + pr_err("Failed to enabled thermal zone '%s', id=%d: %d\n", + tz->type, tz->id, ret); + thermal_of_zone_unregister(tz); + return ERR_PTR(ret); + } + + return tz; + +out_kfree_tzp: + kfree(tzp); +out_kfree_trips: + kfree(trips); + + return ERR_PTR(ret); +} +EXPORT_SYMBOL_GPL(thermal_of_zone_register); + +static void devm_thermal_of_zone_release(struct device *dev, void *res) +{ + thermal_of_zone_unregister(*(struct thermal_zone_device **)res); +} + +static int devm_thermal_of_zone_match(struct device *dev, void *res, + void *data) +{ + struct thermal_zone_device **r = res; + + if (WARN_ON(!r || !*r)) + return 0; + + return *r == data; } /** - * of_parse_thermal_zones - parse device tree thermal data + * devm_thermal_of_zone_register - register a thermal tied with the sensor life cycle * - * Initialization function that can be called by machine initialization - * code to parse thermal data and populate the thermal framework - * with hardware thermal zones info. This function only parses thermal zones. - * Cooling devices and sensor devices nodes are supposed to be parsed - * by their respective drivers. - * - * Return: 0 on success, proper error code otherwise + * This function is the device version of the thermal_of_zone_register() function. * + * @dev: a device structure pointer to sensor to be tied with the thermal zone OF life cycle + * @sensor_id: the sensor identifier + * @data: a pointer to a private data to be stored in the thermal zone 'devdata' field + * @ops: a pointer to the ops structure associated with the sensor */ -int __init of_parse_thermal_zones(void) +struct thermal_zone_device *devm_thermal_of_zone_register(struct device *dev, int sensor_id, void *data, + const struct thermal_zone_device_ops *ops) { - struct device_node *np, *child; - struct __thermal_zone *tz; - struct thermal_zone_device_ops *ops; - - np = of_find_node_by_name(NULL, "thermal-zones"); - if (!np) { - pr_debug("unable to find thermal zones\n"); - return 0; /* Run successfully on systems without thermal DT */ - } - - for_each_available_child_of_node(np, child) { - struct thermal_zone_device *zone; - struct thermal_zone_params *tzp; - int i, mask = 0; - u32 prop; - - tz = thermal_of_build_thermal_zone(child); - if (IS_ERR(tz)) { - pr_err("failed to build thermal zone %pOFn: %ld\n", - child, - PTR_ERR(tz)); - continue; - } - - ops = kmemdup(&of_thermal_ops, sizeof(*ops), GFP_KERNEL); - if (!ops) - goto exit_free; + struct thermal_zone_device **ptr, *tzd; - tzp = kzalloc(sizeof(*tzp), GFP_KERNEL); - if (!tzp) { - kfree(ops); - goto exit_free; - } + ptr = devres_alloc(devm_thermal_of_zone_release, sizeof(*ptr), + GFP_KERNEL); + if (!ptr) + return ERR_PTR(-ENOMEM); - /* No hwmon because there might be hwmon drivers registering */ - tzp->no_hwmon = true; - - if (!of_property_read_u32(child, "sustainable-power", &prop)) - tzp->sustainable_power = prop; - - for (i = 0; i < tz->ntrips; i++) - mask |= 1 << i; - - /* these two are left for temperature drivers to use */ - tzp->slope = tz->slope; - tzp->offset = tz->offset; - - zone = thermal_zone_device_register_with_trips(child->name, tz->trips, tz->ntrips, - mask, tz, ops, tzp, tz->passive_delay, - tz->polling_delay); - if (IS_ERR(zone)) { - pr_err("Failed to build %pOFn zone %ld\n", child, - PTR_ERR(zone)); - kfree(tzp); - kfree(ops); - of_thermal_free_zone(tz); - /* attempting to build remaining zones still */ - } + tzd = thermal_of_zone_register(dev->of_node, sensor_id, data, ops); + if (IS_ERR(tzd)) { + devres_free(ptr); + return tzd; } - of_node_put(np); - - return 0; -exit_free: - of_node_put(child); - of_node_put(np); - of_thermal_free_zone(tz); + *ptr = tzd; + devres_add(dev, ptr); - /* no memory available, so free what we have built */ - of_thermal_destroy_zones(); + return tzd; +} +EXPORT_SYMBOL_GPL(devm_thermal_of_zone_register); - return -ENOMEM; +/** + * devm_thermal_of_zone_unregister - Resource managed version of + * thermal_of_zone_unregister(). + * @dev: Device for which which resource was allocated. + * @tz: a pointer to struct thermal_zone where the sensor is registered. + * + * This function removes the sensor callbacks and private data from the + * thermal zone device registered with devm_thermal_zone_of_sensor_register() + * API. It will also silent the zone by remove the .get_temp() and .get_trend() + * thermal zone device callbacks. + * Normally this function will not need to be called and the resource + * management code will ensure that the resource is freed. + */ +void devm_thermal_of_zone_unregister(struct device *dev, struct thermal_zone_device *tz) +{ + WARN_ON(devres_release(dev, devm_thermal_of_zone_release, + devm_thermal_of_zone_match, tz)); } +EXPORT_SYMBOL_GPL(devm_thermal_of_zone_unregister); diff --git a/drivers/thermal/thermal_sysfs.c b/drivers/thermal/thermal_sysfs.c index 3a8d6e747c25..78c5841bdfae 100644 --- a/drivers/thermal/thermal_sysfs.c +++ b/drivers/thermal/thermal_sysfs.c @@ -49,7 +49,11 @@ static ssize_t mode_show(struct device *dev, struct device_attribute *attr, char *buf) { struct thermal_zone_device *tz = to_thermal_zone(dev); - int enabled = thermal_zone_device_is_enabled(tz); + int enabled; + + mutex_lock(&tz->lock); + enabled = thermal_zone_device_is_enabled(tz); + mutex_unlock(&tz->lock); return sprintf(buf, "%s\n", enabled ? "enabled" : "disabled"); } @@ -115,7 +119,7 @@ trip_point_temp_store(struct device *dev, struct device_attribute *attr, int temperature, hyst = 0; enum thermal_trip_type type; - if (!tz->ops->set_trip_temp) + if (!tz->ops->set_trip_temp && !tz->trips) return -EPERM; if (sscanf(attr->attr.name, "trip_point_%d_temp", &trip) != 1) @@ -128,6 +132,9 @@ trip_point_temp_store(struct device *dev, struct device_attribute *attr, if (ret) return ret; + if (tz->trips) + tz->trips[trip].temperature = temperature; + if (tz->ops->get_trip_hyst) { ret = tz->ops->get_trip_hyst(tz, trip, &hyst); if (ret) diff --git a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c index 703039d8b937..8a9055bd376e 100644 --- a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c +++ b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c @@ -65,10 +65,10 @@ static inline int ti_thermal_hotspot_temperature(int t, int s, int c) /* thermal zone ops */ /* Get temperature callback function for thermal zone */ -static inline int __ti_thermal_get_temp(void *devdata, int *temp) +static inline int __ti_thermal_get_temp(struct thermal_zone_device *tz, int *temp) { struct thermal_zone_device *pcb_tz = NULL; - struct ti_thermal_data *data = devdata; + struct ti_thermal_data *data = tz->devdata; struct ti_bandgap *bgp; const struct ti_temp_sensor *s; int ret, tmp, slope, constant; @@ -85,8 +85,8 @@ static inline int __ti_thermal_get_temp(void *devdata, int *temp) return ret; /* Default constants */ - slope = thermal_zone_get_slope(data->ti_thermal); - constant = thermal_zone_get_offset(data->ti_thermal); + slope = thermal_zone_get_slope(tz); + constant = thermal_zone_get_offset(tz); pcb_tz = data->pcb_tz; /* In case pcb zone is available, use the extrapolation rule with it */ @@ -107,9 +107,9 @@ static inline int __ti_thermal_get_temp(void *devdata, int *temp) return ret; } -static int __ti_thermal_get_trend(void *p, int trip, enum thermal_trend *trend) +static int __ti_thermal_get_trend(struct thermal_zone_device *tz, int trip, enum thermal_trend *trend) { - struct ti_thermal_data *data = p; + struct ti_thermal_data *data = tz->devdata; struct ti_bandgap *bgp; int id, tr, ret = 0; @@ -130,7 +130,7 @@ static int __ti_thermal_get_trend(void *p, int trip, enum thermal_trend *trend) return 0; } -static const struct thermal_zone_of_device_ops ti_of_thermal_ops = { +static const struct thermal_zone_device_ops ti_of_thermal_ops = { .get_temp = __ti_thermal_get_temp, .get_trend = __ti_thermal_get_trend, }; @@ -170,7 +170,7 @@ int ti_thermal_expose_sensor(struct ti_bandgap *bgp, int id, return -EINVAL; /* in case this is specified by DT */ - data->ti_thermal = devm_thermal_zone_of_sensor_register(bgp->dev, id, + data->ti_thermal = devm_thermal_of_zone_register(bgp->dev, id, data, &ti_of_thermal_ops); if (IS_ERR(data->ti_thermal)) { dev_err(bgp->dev, "thermal zone device is NULL\n"); diff --git a/drivers/thermal/uniphier_thermal.c b/drivers/thermal/uniphier_thermal.c index 4cae5561a2a3..4111d99ef50e 100644 --- a/drivers/thermal/uniphier_thermal.c +++ b/drivers/thermal/uniphier_thermal.c @@ -187,9 +187,9 @@ static void uniphier_tm_disable_sensor(struct uniphier_tm_dev *tdev) usleep_range(1000, 2000); /* The spec note says at least 1ms */ } -static int uniphier_tm_get_temp(void *data, int *out_temp) +static int uniphier_tm_get_temp(struct thermal_zone_device *tz, int *out_temp) { - struct uniphier_tm_dev *tdev = data; + struct uniphier_tm_dev *tdev = tz->devdata; struct regmap *map = tdev->regmap; int ret; u32 temp; @@ -204,7 +204,7 @@ static int uniphier_tm_get_temp(void *data, int *out_temp) return 0; } -static const struct thermal_zone_of_device_ops uniphier_of_thermal_ops = { +static const struct thermal_zone_device_ops uniphier_of_thermal_ops = { .get_temp = uniphier_tm_get_temp, }; @@ -289,8 +289,8 @@ static int uniphier_tm_probe(struct platform_device *pdev) platform_set_drvdata(pdev, tdev); - tdev->tz_dev = devm_thermal_zone_of_sensor_register(dev, 0, tdev, - &uniphier_of_thermal_ops); + tdev->tz_dev = devm_thermal_of_zone_register(dev, 0, tdev, + &uniphier_of_thermal_ops); if (IS_ERR(tdev->tz_dev)) { dev_err(dev, "failed to register sensor device\n"); return PTR_ERR(tdev->tz_dev); diff --git a/drivers/thunderbolt/acpi.c b/drivers/thunderbolt/acpi.c index b1f0dc8df47c..7a8adf5ad5a0 100644 --- a/drivers/thunderbolt/acpi.c +++ b/drivers/thunderbolt/acpi.c @@ -42,7 +42,7 @@ static acpi_status tb_acpi_add_link(acpi_handle handle, u32 level, void *data, */ dev = acpi_get_first_physical_node(adev); while (!dev) { - adev = adev->parent; + adev = acpi_dev_parent(adev); if (!adev) break; dev = acpi_get_first_physical_node(adev); diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c index c63c1f4ff9dc..77d7f07ca075 100644 --- a/drivers/thunderbolt/switch.c +++ b/drivers/thunderbolt/switch.c @@ -2413,6 +2413,7 @@ int tb_switch_configure(struct tb_switch *sw) * additional capabilities. */ sw->config.cmuv = USB4_VERSION_1_0; + sw->config.plug_events_delay = 0xa; /* Enumerate the switch */ ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH, diff --git a/drivers/usb/dwc3/dwc3-st.c b/drivers/usb/dwc3/dwc3-st.c index 166b5bde45cb..6c14a79279f9 100644 --- a/drivers/usb/dwc3/dwc3-st.c +++ b/drivers/usb/dwc3/dwc3-st.c @@ -251,7 +251,7 @@ static int st_dwc3_probe(struct platform_device *pdev) /* Manage SoftReset */ reset_control_deassert(dwc3_data->rstc_rst); - child = of_get_child_by_name(node, "dwc3"); + child = of_get_child_by_name(node, "usb"); if (!child) { dev_err(&pdev->dev, "failed to find dwc3 core node\n"); ret = -ENODEV; diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h index 23ab3b048d9b..251778d14e2d 100644 --- a/drivers/usb/storage/unusual_uas.h +++ b/drivers/usb/storage/unusual_uas.h @@ -52,6 +52,13 @@ UNUSUAL_DEV(0x059f, 0x1061, 0x0000, 0x9999, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_NO_REPORT_OPCODES | US_FL_NO_SAME), +/* Reported-by: Hongling Zeng <zenghongling@kylinos.cn> */ +UNUSUAL_DEV(0x090c, 0x2000, 0x0000, 0x9999, + "Hiksemi", + "External HDD", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_IGNORE_UAS), + /* * Apricorn USB3 dongle sometimes returns "USBSUSBSUSBS" in response to SCSI * commands in UAS mode. Observed with the 1.28 firmware; are there others? @@ -76,6 +83,13 @@ UNUSUAL_DEV(0x0bc2, 0x331a, 0x0000, 0x9999, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_NO_REPORT_LUNS), +/* Reported-by: Hongling Zeng <zenghongling@kylinos.cn> */ +UNUSUAL_DEV(0x0bda, 0x9210, 0x0000, 0x9999, + "Hiksemi", + "External HDD", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_IGNORE_UAS), + /* Reported-by: Benjamin Tissoires <benjamin.tissoires@redhat.com> */ UNUSUAL_DEV(0x13fd, 0x3940, 0x0000, 0x9999, "Initio Corporation", @@ -118,6 +132,13 @@ UNUSUAL_DEV(0x154b, 0xf00d, 0x0000, 0x9999, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_NO_ATA_1X), +/* Reported-by: Hongling Zeng <zenghongling@kylinos.cn> */ +UNUSUAL_DEV(0x17ef, 0x3899, 0x0000, 0x9999, + "Thinkplus", + "External HDD", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_IGNORE_UAS), + /* Reported-by: Hans de Goede <hdegoede@redhat.com> */ UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999, "VIA", diff --git a/drivers/usb/typec/mux/intel_pmc_mux.c b/drivers/usb/typec/mux/intel_pmc_mux.c index a8e273fe204a..e1f4df7238bf 100644 --- a/drivers/usb/typec/mux/intel_pmc_mux.c +++ b/drivers/usb/typec/mux/intel_pmc_mux.c @@ -569,15 +569,6 @@ err_unregister_switch: return ret; } -static int is_memory(struct acpi_resource *res, void *data) -{ - struct resource_win win = {}; - struct resource *r = &win.res; - - return !(acpi_dev_resource_memory(res, r) || - acpi_dev_resource_address_space(res, &win)); -} - /* IOM ACPI IDs and IOM_PORT_STATUS_OFFSET */ static const struct acpi_device_id iom_acpi_ids[] = { /* TigerLake */ @@ -611,7 +602,7 @@ static int pmc_usb_probe_iom(struct pmc_usb *pmc) return -ENODEV; INIT_LIST_HEAD(&resource_list); - ret = acpi_dev_get_resources(adev, &resource_list, is_memory, NULL); + ret = acpi_dev_get_memory_resources(adev, &resource_list); if (ret < 0) return ret; diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c index 7f2624f42724..6364f0d467ea 100644 --- a/drivers/usb/typec/ucsi/ucsi.c +++ b/drivers/usb/typec/ucsi/ucsi.c @@ -588,8 +588,6 @@ static int ucsi_get_pdos(struct ucsi_connector *con, int is_partner, num_pdos * sizeof(u32)); if (ret < 0 && ret != -ETIMEDOUT) dev_err(ucsi->dev, "UCSI_GET_PDOS failed (%d)\n", ret); - if (ret == 0 && offset == 0) - dev_warn(ucsi->dev, "UCSI_GET_PDOS returned 0 bytes\n"); return ret; } diff --git a/drivers/vdpa/ifcvf/ifcvf_base.c b/drivers/vdpa/ifcvf/ifcvf_base.c index 75a703b803a2..3e4486bfa0b7 100644 --- a/drivers/vdpa/ifcvf/ifcvf_base.c +++ b/drivers/vdpa/ifcvf/ifcvf_base.c @@ -323,7 +323,7 @@ u16 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid) u32 q_pair_id; ifcvf_lm = (struct ifcvf_lm_cfg __iomem *)hw->lm_cfg; - q_pair_id = qid / hw->nr_vring; + q_pair_id = qid / 2; avail_idx_addr = &ifcvf_lm->vring_lm_cfg[q_pair_id].idx_addr[qid % 2]; last_avail_idx = vp_ioread16(avail_idx_addr); @@ -337,7 +337,7 @@ int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num) u32 q_pair_id; ifcvf_lm = (struct ifcvf_lm_cfg __iomem *)hw->lm_cfg; - q_pair_id = qid / hw->nr_vring; + q_pair_id = qid / 2; avail_idx_addr = &ifcvf_lm->vring_lm_cfg[q_pair_id].idx_addr[qid % 2]; hw->vring[qid].last_avail_idx = num; vp_iowrite16(num, avail_idx_addr); diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c index ed100a35e596..90913365def4 100644 --- a/drivers/vdpa/mlx5/net/mlx5_vnet.c +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c @@ -1320,6 +1320,8 @@ static void teardown_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue * static int create_rqt(struct mlx5_vdpa_net *ndev) { + int rqt_table_size = roundup_pow_of_two(ndev->rqt_size); + int act_sz = roundup_pow_of_two(ndev->cur_num_vqs / 2); __be32 *list; void *rqtc; int inlen; @@ -1327,7 +1329,7 @@ static int create_rqt(struct mlx5_vdpa_net *ndev) int i, j; int err; - inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + ndev->rqt_size * MLX5_ST_SZ_BYTES(rq_num); + inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + rqt_table_size * MLX5_ST_SZ_BYTES(rq_num); in = kzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; @@ -1336,12 +1338,12 @@ static int create_rqt(struct mlx5_vdpa_net *ndev) rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context); MLX5_SET(rqtc, rqtc, list_q_type, MLX5_RQTC_LIST_Q_TYPE_VIRTIO_NET_Q); - MLX5_SET(rqtc, rqtc, rqt_max_size, ndev->rqt_size); + MLX5_SET(rqtc, rqtc, rqt_max_size, rqt_table_size); list = MLX5_ADDR_OF(rqtc, rqtc, rq_num[0]); - for (i = 0, j = 0; i < ndev->rqt_size; i++, j += 2) + for (i = 0, j = 0; i < act_sz; i++, j += 2) list[i] = cpu_to_be32(ndev->vqs[j % ndev->cur_num_vqs].virtq_id); - MLX5_SET(rqtc, rqtc, rqt_actual_size, ndev->rqt_size); + MLX5_SET(rqtc, rqtc, rqt_actual_size, act_sz); err = mlx5_vdpa_create_rqt(&ndev->mvdev, in, inlen, &ndev->res.rqtn); kfree(in); if (err) @@ -1354,6 +1356,7 @@ static int create_rqt(struct mlx5_vdpa_net *ndev) static int modify_rqt(struct mlx5_vdpa_net *ndev, int num) { + int act_sz = roundup_pow_of_two(num / 2); __be32 *list; void *rqtc; int inlen; @@ -1361,7 +1364,7 @@ static int modify_rqt(struct mlx5_vdpa_net *ndev, int num) int i, j; int err; - inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + ndev->rqt_size * MLX5_ST_SZ_BYTES(rq_num); + inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + act_sz * MLX5_ST_SZ_BYTES(rq_num); in = kzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; @@ -1372,10 +1375,10 @@ static int modify_rqt(struct mlx5_vdpa_net *ndev, int num) MLX5_SET(rqtc, rqtc, list_q_type, MLX5_RQTC_LIST_Q_TYPE_VIRTIO_NET_Q); list = MLX5_ADDR_OF(rqtc, rqtc, rq_num[0]); - for (i = 0, j = 0; i < ndev->rqt_size; i++, j += 2) + for (i = 0, j = 0; i < act_sz; i++, j = j + 2) list[i] = cpu_to_be32(ndev->vqs[j % num].virtq_id); - MLX5_SET(rqtc, rqtc, rqt_actual_size, ndev->rqt_size); + MLX5_SET(rqtc, rqtc, rqt_actual_size, act_sz); err = mlx5_vdpa_modify_rqt(&ndev->mvdev, in, inlen, ndev->res.rqtn); kfree(in); if (err) diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c index 41c0b29739f1..35dceee3ed56 100644 --- a/drivers/vdpa/vdpa_user/vduse_dev.c +++ b/drivers/vdpa/vdpa_user/vduse_dev.c @@ -673,10 +673,15 @@ static void vduse_vdpa_get_config(struct vdpa_device *vdpa, unsigned int offset, { struct vduse_dev *dev = vdpa_to_vduse(vdpa); - if (offset > dev->config_size || - len > dev->config_size - offset) + /* Initialize the buffer in case of partial copy. */ + memset(buf, 0, len); + + if (offset > dev->config_size) return; + if (len > dev->config_size - offset) + len = dev->config_size - offset; + memcpy(buf, dev->config + offset, len); } diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c index 368330417bde..5703775af129 100644 --- a/drivers/vhost/vsock.c +++ b/drivers/vhost/vsock.c @@ -393,7 +393,7 @@ vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq, return NULL; } - pkt->buf = kmalloc(pkt->len, GFP_KERNEL); + pkt->buf = kvmalloc(pkt->len, GFP_KERNEL); if (!pkt->buf) { kfree(pkt); return NULL; |