diff options
Diffstat (limited to 'drivers/usb')
185 files changed, 15533 insertions, 3231 deletions
diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile index ba5706ccc188..3e2cc95b7b0b 100644 --- a/drivers/usb/Makefile +++ b/drivers/usb/Makefile @@ -13,7 +13,9 @@ obj-$(CONFIG_USB_DWC3) += dwc3/ obj-$(CONFIG_USB_DWC2) += dwc2/ obj-$(CONFIG_USB_ISP1760) += isp1760/ +obj-$(CONFIG_USB_CDNS_SUPPORT) += cdns3/ obj-$(CONFIG_USB_CDNS3) += cdns3/ +obj-$(CONFIG_USB_CDNSP_PCI) += cdns3/ obj-$(CONFIG_USB_MON) += mon/ obj-$(CONFIG_USB_MTU3) += mtu3/ diff --git a/drivers/usb/c67x00/c67x00-hcd.h b/drivers/usb/c67x00/c67x00-hcd.h index 6b6b04a3fe0f..6332a6b5dce6 100644 --- a/drivers/usb/c67x00/c67x00-hcd.h +++ b/drivers/usb/c67x00/c67x00-hcd.h @@ -76,7 +76,7 @@ struct c67x00_hcd { u16 next_td_addr; u16 next_buf_addr; - struct tasklet_struct tasklet; + struct work_struct work; struct completion endpoint_disable; diff --git a/drivers/usb/c67x00/c67x00-sched.c b/drivers/usb/c67x00/c67x00-sched.c index e65f1a0ae80b..c7d3e907be81 100644 --- a/drivers/usb/c67x00/c67x00-sched.c +++ b/drivers/usb/c67x00/c67x00-sched.c @@ -1123,24 +1123,26 @@ static void c67x00_do_work(struct c67x00_hcd *c67x00) /* -------------------------------------------------------------------------- */ -static void c67x00_sched_tasklet(struct tasklet_struct *t) +static void c67x00_sched_work(struct work_struct *work) { - struct c67x00_hcd *c67x00 = from_tasklet(c67x00, t, tasklet); + struct c67x00_hcd *c67x00; + + c67x00 = container_of(work, struct c67x00_hcd, work); c67x00_do_work(c67x00); } void c67x00_sched_kick(struct c67x00_hcd *c67x00) { - tasklet_hi_schedule(&c67x00->tasklet); + queue_work(system_highpri_wq, &c67x00->work); } int c67x00_sched_start_scheduler(struct c67x00_hcd *c67x00) { - tasklet_setup(&c67x00->tasklet, c67x00_sched_tasklet); + INIT_WORK(&c67x00->work, c67x00_sched_work); return 0; } void c67x00_sched_stop_scheduler(struct c67x00_hcd *c67x00) { - tasklet_kill(&c67x00->tasklet); + cancel_work_sync(&c67x00->work); } diff --git a/drivers/usb/cdns3/Kconfig b/drivers/usb/cdns3/Kconfig index 84716d216ae5..b98ca0a1352a 100644 --- a/drivers/usb/cdns3/Kconfig +++ b/drivers/usb/cdns3/Kconfig @@ -1,14 +1,28 @@ -config USB_CDNS3 - tristate "Cadence USB3 Dual-Role Controller" +config USB_CDNS_SUPPORT + tristate "Cadence USB Support" depends on USB_SUPPORT && (USB || USB_GADGET) && HAS_DMA select USB_XHCI_PLATFORM if USB_XHCI_HCD select USB_ROLE_SWITCH help + Say Y here if your system has a Cadence USBSS or USBSSP + dual-role controller. + It supports: dual-role switch, Host-only, and Peripheral-only. + +config USB_CDNS_HOST + bool + +if USB_CDNS_SUPPORT + +config USB_CDNS3 + tristate "Cadence USB3 Dual-Role Controller" + depends on USB_CDNS_SUPPORT + help Say Y here if your system has a Cadence USB3 dual-role controller. It supports: dual-role switch, Host-only, and Peripheral-only. If you choose to build this driver is a dynamically linked as module, the module will be called cdns3.ko. +endif if USB_CDNS3 @@ -25,6 +39,7 @@ config USB_CDNS3_GADGET config USB_CDNS3_HOST bool "Cadence USB3 host controller" depends on USB=y || USB=USB_CDNS3 + select USB_CDNS_HOST help Say Y here to enable host controller functionality of the Cadence driver. @@ -64,3 +79,44 @@ config USB_CDNS3_IMX For example, imx8qm and imx8qxp. endif + +if USB_CDNS_SUPPORT + +config USB_CDNSP_PCI + tristate "Cadence CDNSP Dual-Role Controller" + depends on USB_CDNS_SUPPORT && USB_PCI && ACPI + help + Say Y here if your system has a Cadence CDNSP dual-role controller. + It supports: dual-role switch Host-only, and Peripheral-only. + + If you choose to build this driver is a dynamically linked + module, the module will be called cdnsp.ko. +endif + +if USB_CDNSP_PCI + +config USB_CDNSP_GADGET + bool "Cadence CDNSP device controller" + depends on USB_GADGET=y || USB_GADGET=USB_CDNSP_PCI + help + Say Y here to enable device controller functionality of the + Cadence CDNSP-DEV driver. + + Cadence CDNSP Device Controller in device mode is + very similar to XHCI controller. Therefore some algorithms + used has been taken from host driver. + This controller supports FF, HS, SS and SSP mode. + It doesn't support LS. + +config USB_CDNSP_HOST + bool "Cadence CDNSP host controller" + depends on USB=y || USB=USB_CDNSP_PCI + select USB_CDNS_HOST + help + Say Y here to enable host controller functionality of the + Cadence driver. + + Host controller is compliant with XHCI so it uses + standard XHCI driver. + +endif diff --git a/drivers/usb/cdns3/Makefile b/drivers/usb/cdns3/Makefile index d47e341a6f39..61edb2f89276 100644 --- a/drivers/usb/cdns3/Makefile +++ b/drivers/usb/cdns3/Makefile @@ -1,18 +1,43 @@ # SPDX-License-Identifier: GPL-2.0 # define_trace.h needs to know how to find our header -CFLAGS_trace.o := -I$(src) +CFLAGS_cdns3-trace.o := -I$(src) +CFLAGS_cdnsp-trace.o := -I$(src) -cdns3-y := core.o drd.o +cdns-usb-common-y := core.o drd.o +cdns3-y := cdns3-plat.o -obj-$(CONFIG_USB_CDNS3) += cdns3.o -cdns3-$(CONFIG_USB_CDNS3_GADGET) += gadget.o ep0.o +ifeq ($(CONFIG_USB),m) +obj-m += cdns-usb-common.o +obj-m += cdns3.o +else +obj-$(CONFIG_USB_CDNS_SUPPORT) += cdns-usb-common.o +obj-$(CONFIG_USB_CDNS3) += cdns3.o +endif + +cdns-usb-common-$(CONFIG_USB_CDNS_HOST) += host.o +cdns3-$(CONFIG_USB_CDNS3_GADGET) += cdns3-gadget.o cdns3-ep0.o ifneq ($(CONFIG_USB_CDNS3_GADGET),) -cdns3-$(CONFIG_TRACING) += trace.o +cdns3-$(CONFIG_TRACING) += cdns3-trace.o +endif + +obj-$(CONFIG_USB_CDNS3_PCI_WRAP) += cdns3-pci-wrap.o +obj-$(CONFIG_USB_CDNS3_TI) += cdns3-ti.o +obj-$(CONFIG_USB_CDNS3_IMX) += cdns3-imx.o + +cdnsp-udc-pci-y := cdnsp-pci.o + +ifdef CONFIG_USB_CDNSP_PCI +ifeq ($(CONFIG_USB),m) +obj-m += cdnsp-udc-pci.o +else +obj-$(CONFIG_USB_CDNSP_PCI) += cdnsp-udc-pci.o +endif endif -cdns3-$(CONFIG_USB_CDNS3_HOST) += host.o +cdnsp-udc-pci-$(CONFIG_USB_CDNSP_GADGET) += cdnsp-ring.o cdnsp-gadget.o \ + cdnsp-mem.o cdnsp-ep0.o -obj-$(CONFIG_USB_CDNS3_PCI_WRAP) += cdns3-pci-wrap.o -obj-$(CONFIG_USB_CDNS3_TI) += cdns3-ti.o -obj-$(CONFIG_USB_CDNS3_IMX) += cdns3-imx.o +ifneq ($(CONFIG_USB_CDNSP_GADGET),) +cdnsp-udc-pci-$(CONFIG_TRACING) += cdnsp-trace.o +endif diff --git a/drivers/usb/cdns3/debug.h b/drivers/usb/cdns3/cdns3-debug.h index a5c6a29e1340..a5c6a29e1340 100644 --- a/drivers/usb/cdns3/debug.h +++ b/drivers/usb/cdns3/cdns3-debug.h diff --git a/drivers/usb/cdns3/ep0.c b/drivers/usb/cdns3/cdns3-ep0.c index d3121a32cc68..9a17802275d5 100644 --- a/drivers/usb/cdns3/ep0.c +++ b/drivers/usb/cdns3/cdns3-ep0.c @@ -13,8 +13,8 @@ #include <linux/usb/composite.h> #include <linux/iopoll.h> -#include "gadget.h" -#include "trace.h" +#include "cdns3-gadget.h" +#include "cdns3-trace.h" static struct usb_endpoint_descriptor cdns3_gadget_ep0_desc = { .bLength = USB_DT_ENDPOINT_SIZE, @@ -364,7 +364,7 @@ static int cdns3_ep0_feature_handle_endpoint(struct cdns3_device *priv_dev, if (le16_to_cpu(ctrl->wValue) != USB_ENDPOINT_HALT) return -EINVAL; - if (!(ctrl->wIndex & ~USB_DIR_IN)) + if (!(le16_to_cpu(ctrl->wIndex) & ~USB_DIR_IN)) return 0; index = cdns3_ep_addr_to_index(le16_to_cpu(ctrl->wIndex)); @@ -789,7 +789,7 @@ int cdns3_gadget_ep_set_wedge(struct usb_ep *ep) return 0; } -const struct usb_ep_ops cdns3_gadget_ep0_ops = { +static const struct usb_ep_ops cdns3_gadget_ep0_ops = { .enable = cdns3_gadget_ep0_enable, .disable = cdns3_gadget_ep0_disable, .alloc_request = cdns3_gadget_ep_alloc_request, diff --git a/drivers/usb/cdns3/gadget.c b/drivers/usb/cdns3/cdns3-gadget.c index 08a4e693c470..582bfeceedb4 100644 --- a/drivers/usb/cdns3/gadget.c +++ b/drivers/usb/cdns3/cdns3-gadget.c @@ -63,8 +63,8 @@ #include "core.h" #include "gadget-export.h" -#include "gadget.h" -#include "trace.h" +#include "cdns3-gadget.h" +#include "cdns3-trace.h" #include "drd.h" static int __cdns3_gadget_ep_queue(struct usb_ep *ep, @@ -1200,7 +1200,7 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep, td_size = DIV_ROUND_UP(request->length, priv_ep->endpoint.maxpacket); if (priv_dev->gadget.speed == USB_SPEED_SUPER) - trb->length = TRB_TDL_SS_SIZE(td_size); + trb->length = cpu_to_le32(TRB_TDL_SS_SIZE(td_size)); else control |= TRB_TDL_HS_SIZE(td_size); } @@ -1247,10 +1247,10 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep, priv_req->trb->control = cpu_to_le32(control); if (sg_supported) { - trb->control |= TRB_ISP; + trb->control |= cpu_to_le32(TRB_ISP); /* Don't set chain bit for last TRB */ if (sg_iter < num_trb - 1) - trb->control |= TRB_CHAIN; + trb->control |= cpu_to_le32(TRB_CHAIN); s = sg_next(s); } @@ -1844,7 +1844,7 @@ __must_hold(&priv_dev->lock) static irqreturn_t cdns3_device_irq_handler(int irq, void *data) { struct cdns3_device *priv_dev = data; - struct cdns3 *cdns = dev_get_drvdata(priv_dev->dev); + struct cdns *cdns = dev_get_drvdata(priv_dev->dev); irqreturn_t ret = IRQ_NONE; u32 reg; @@ -3084,7 +3084,7 @@ static void cdns3_gadget_release(struct device *dev) kfree(priv_dev); } -static void cdns3_gadget_exit(struct cdns3 *cdns) +static void cdns3_gadget_exit(struct cdns *cdns) { struct cdns3_device *priv_dev; @@ -3117,10 +3117,10 @@ static void cdns3_gadget_exit(struct cdns3 *cdns) kfree(priv_dev->zlp_buf); usb_put_gadget(&priv_dev->gadget); cdns->gadget_dev = NULL; - cdns3_drd_gadget_off(cdns); + cdns_drd_gadget_off(cdns); } -static int cdns3_gadget_start(struct cdns3 *cdns) +static int cdns3_gadget_start(struct cdns *cdns) { struct cdns3_device *priv_dev; u32 max_speed; @@ -3240,7 +3240,7 @@ err1: return ret; } -static int __cdns3_gadget_init(struct cdns3 *cdns) +static int __cdns3_gadget_init(struct cdns *cdns) { int ret = 0; @@ -3251,7 +3251,7 @@ static int __cdns3_gadget_init(struct cdns3 *cdns) return ret; } - cdns3_drd_gadget_on(cdns); + cdns_drd_gadget_on(cdns); pm_runtime_get_sync(cdns->dev); ret = cdns3_gadget_start(cdns); @@ -3277,7 +3277,7 @@ err0: return ret; } -static int cdns3_gadget_suspend(struct cdns3 *cdns, bool do_wakeup) +static int cdns3_gadget_suspend(struct cdns *cdns, bool do_wakeup) __must_hold(&cdns->lock) { struct cdns3_device *priv_dev = cdns->gadget_dev; @@ -3296,7 +3296,7 @@ __must_hold(&cdns->lock) return 0; } -static int cdns3_gadget_resume(struct cdns3 *cdns, bool hibernated) +static int cdns3_gadget_resume(struct cdns *cdns, bool hibernated) { struct cdns3_device *priv_dev = cdns->gadget_dev; @@ -3311,13 +3311,13 @@ static int cdns3_gadget_resume(struct cdns3 *cdns, bool hibernated) /** * cdns3_gadget_init - initialize device structure * - * @cdns: cdns3 instance + * @cdns: cdns instance * * This function initializes the gadget. */ -int cdns3_gadget_init(struct cdns3 *cdns) +int cdns3_gadget_init(struct cdns *cdns) { - struct cdns3_role_driver *rdrv; + struct cdns_role_driver *rdrv; rdrv = devm_kzalloc(cdns->dev, sizeof(*rdrv), GFP_KERNEL); if (!rdrv) @@ -3327,7 +3327,7 @@ int cdns3_gadget_init(struct cdns3 *cdns) rdrv->stop = cdns3_gadget_exit; rdrv->suspend = cdns3_gadget_suspend; rdrv->resume = cdns3_gadget_resume; - rdrv->state = CDNS3_ROLE_STATE_INACTIVE; + rdrv->state = CDNS_ROLE_STATE_INACTIVE; rdrv->name = "gadget"; cdns->roles[USB_ROLE_DEVICE] = rdrv; diff --git a/drivers/usb/cdns3/gadget.h b/drivers/usb/cdns3/cdns3-gadget.h index 21fa461c518e..21fa461c518e 100644 --- a/drivers/usb/cdns3/gadget.h +++ b/drivers/usb/cdns3/cdns3-gadget.h diff --git a/drivers/usb/cdns3/cdns3-imx.c b/drivers/usb/cdns3/cdns3-imx.c index 22a56c4dce67..8f88eec0b0ea 100644 --- a/drivers/usb/cdns3/cdns3-imx.c +++ b/drivers/usb/cdns3/cdns3-imx.c @@ -185,7 +185,11 @@ static int cdns_imx_probe(struct platform_device *pdev) } data->num_clks = ARRAY_SIZE(imx_cdns3_core_clks); - data->clks = (struct clk_bulk_data *)imx_cdns3_core_clks; + data->clks = devm_kmemdup(dev, imx_cdns3_core_clks, + sizeof(imx_cdns3_core_clks), GFP_KERNEL); + if (!data->clks) + return -ENOMEM; + ret = devm_clk_bulk_get(dev, data->num_clks, data->clks); if (ret) return ret; @@ -214,20 +218,16 @@ err: return ret; } -static int cdns_imx_remove_core(struct device *dev, void *data) -{ - struct platform_device *pdev = to_platform_device(dev); - - platform_device_unregister(pdev); - - return 0; -} - static int cdns_imx_remove(struct platform_device *pdev) { struct device *dev = &pdev->dev; + struct cdns_imx *data = dev_get_drvdata(dev); - device_for_each_child(dev, NULL, cdns_imx_remove_core); + pm_runtime_get_sync(dev); + of_platform_depopulate(dev); + clk_bulk_disable_unprepare(data->num_clks, data->clks); + pm_runtime_disable(dev); + pm_runtime_put_noidle(dev); platform_set_drvdata(pdev, NULL); return 0; @@ -250,7 +250,7 @@ static void cdns3_set_wakeup(struct cdns_imx *data, bool enable) static int cdns_imx_platform_suspend(struct device *dev, bool suspend, bool wakeup) { - struct cdns3 *cdns = dev_get_drvdata(dev); + struct cdns *cdns = dev_get_drvdata(dev); struct device *parent = dev->parent; struct cdns_imx *data = dev_get_drvdata(parent); void __iomem *otg_regs = (void __iomem *)(cdns->otg_regs); diff --git a/drivers/usb/cdns3/cdns3-plat.c b/drivers/usb/cdns3/cdns3-plat.c new file mode 100644 index 000000000000..4b18e1c6a4bb --- /dev/null +++ b/drivers/usb/cdns3/cdns3-plat.c @@ -0,0 +1,315 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Cadence USBSS DRD Driver. + * + * Copyright (C) 2018-2020 Cadence. + * Copyright (C) 2017-2018 NXP + * Copyright (C) 2019 Texas Instruments + * + * + * Author: Peter Chen <peter.chen@nxp.com> + * Pawel Laszczak <pawell@cadence.com> + * Roger Quadros <rogerq@ti.com> + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> + +#include "core.h" +#include "gadget-export.h" + +static int set_phy_power_on(struct cdns *cdns) +{ + int ret; + + ret = phy_power_on(cdns->usb2_phy); + if (ret) + return ret; + + ret = phy_power_on(cdns->usb3_phy); + if (ret) + phy_power_off(cdns->usb2_phy); + + return ret; +} + +static void set_phy_power_off(struct cdns *cdns) +{ + phy_power_off(cdns->usb3_phy); + phy_power_off(cdns->usb2_phy); +} + +/** + * cdns3_plat_probe - probe for cdns3 core device + * @pdev: Pointer to cdns3 core platform device + * + * Returns 0 on success otherwise negative errno + */ +static int cdns3_plat_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct resource *res; + struct cdns *cdns; + void __iomem *regs; + int ret; + + cdns = devm_kzalloc(dev, sizeof(*cdns), GFP_KERNEL); + if (!cdns) + return -ENOMEM; + + cdns->dev = dev; + cdns->pdata = dev_get_platdata(dev); + + platform_set_drvdata(pdev, cdns); + + res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "host"); + if (!res) { + dev_err(dev, "missing host IRQ\n"); + return -ENODEV; + } + + cdns->xhci_res[0] = *res; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "xhci"); + if (!res) { + dev_err(dev, "couldn't get xhci resource\n"); + return -ENXIO; + } + + cdns->xhci_res[1] = *res; + + cdns->dev_irq = platform_get_irq_byname(pdev, "peripheral"); + + if (cdns->dev_irq < 0) + return cdns->dev_irq; + + regs = devm_platform_ioremap_resource_byname(pdev, "dev"); + if (IS_ERR(regs)) + return PTR_ERR(regs); + cdns->dev_regs = regs; + + cdns->otg_irq = platform_get_irq_byname(pdev, "otg"); + if (cdns->otg_irq < 0) + return cdns->otg_irq; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "otg"); + if (!res) { + dev_err(dev, "couldn't get otg resource\n"); + return -ENXIO; + } + + cdns->phyrst_a_enable = device_property_read_bool(dev, "cdns,phyrst-a-enable"); + + cdns->otg_res = *res; + + cdns->wakeup_irq = platform_get_irq_byname_optional(pdev, "wakeup"); + if (cdns->wakeup_irq == -EPROBE_DEFER) + return cdns->wakeup_irq; + else if (cdns->wakeup_irq == 0) + return -EINVAL; + + if (cdns->wakeup_irq < 0) { + dev_dbg(dev, "couldn't get wakeup irq\n"); + cdns->wakeup_irq = 0x0; + } + + cdns->usb2_phy = devm_phy_optional_get(dev, "cdns3,usb2-phy"); + if (IS_ERR(cdns->usb2_phy)) + return PTR_ERR(cdns->usb2_phy); + + ret = phy_init(cdns->usb2_phy); + if (ret) + return ret; + + cdns->usb3_phy = devm_phy_optional_get(dev, "cdns3,usb3-phy"); + if (IS_ERR(cdns->usb3_phy)) + return PTR_ERR(cdns->usb3_phy); + + ret = phy_init(cdns->usb3_phy); + if (ret) + goto err_phy3_init; + + ret = set_phy_power_on(cdns); + if (ret) + goto err_phy_power_on; + + cdns->gadget_init = cdns3_gadget_init; + + ret = cdns_init(cdns); + if (ret) + goto err_cdns_init; + + device_set_wakeup_capable(dev, true); + pm_runtime_set_active(dev); + pm_runtime_enable(dev); + if (!(cdns->pdata && (cdns->pdata->quirks & CDNS3_DEFAULT_PM_RUNTIME_ALLOW))) + pm_runtime_forbid(dev); + + /* + * The controller needs less time between bus and controller suspend, + * and we also needs a small delay to avoid frequently entering low + * power mode. + */ + pm_runtime_set_autosuspend_delay(dev, 20); + pm_runtime_mark_last_busy(dev); + pm_runtime_use_autosuspend(dev); + + return 0; + +err_cdns_init: + set_phy_power_off(cdns); +err_phy_power_on: + phy_exit(cdns->usb3_phy); +err_phy3_init: + phy_exit(cdns->usb2_phy); + + return ret; +} + +/** + * cdns3_remove - unbind drd driver and clean up + * @pdev: Pointer to Linux platform device + * + * Returns 0 on success otherwise negative errno + */ +static int cdns3_plat_remove(struct platform_device *pdev) +{ + struct cdns *cdns = platform_get_drvdata(pdev); + struct device *dev = cdns->dev; + + pm_runtime_get_sync(dev); + pm_runtime_disable(dev); + pm_runtime_put_noidle(dev); + cdns_remove(cdns); + set_phy_power_off(cdns); + phy_exit(cdns->usb2_phy); + phy_exit(cdns->usb3_phy); + return 0; +} + +#ifdef CONFIG_PM + +static int cdns3_set_platform_suspend(struct device *dev, + bool suspend, bool wakeup) +{ + struct cdns *cdns = dev_get_drvdata(dev); + int ret = 0; + + if (cdns->pdata && cdns->pdata->platform_suspend) + ret = cdns->pdata->platform_suspend(dev, suspend, wakeup); + + return ret; +} + +static int cdns3_controller_suspend(struct device *dev, pm_message_t msg) +{ + struct cdns *cdns = dev_get_drvdata(dev); + bool wakeup; + unsigned long flags; + + if (cdns->in_lpm) + return 0; + + if (PMSG_IS_AUTO(msg)) + wakeup = true; + else + wakeup = device_may_wakeup(dev); + + cdns3_set_platform_suspend(cdns->dev, true, wakeup); + set_phy_power_off(cdns); + spin_lock_irqsave(&cdns->lock, flags); + cdns->in_lpm = true; + spin_unlock_irqrestore(&cdns->lock, flags); + dev_dbg(cdns->dev, "%s ends\n", __func__); + + return 0; +} + +static int cdns3_controller_resume(struct device *dev, pm_message_t msg) +{ + struct cdns *cdns = dev_get_drvdata(dev); + int ret; + unsigned long flags; + + if (!cdns->in_lpm) + return 0; + + ret = set_phy_power_on(cdns); + if (ret) + return ret; + + cdns3_set_platform_suspend(cdns->dev, false, false); + + spin_lock_irqsave(&cdns->lock, flags); + cdns_resume(cdns, !PMSG_IS_AUTO(msg)); + cdns->in_lpm = false; + spin_unlock_irqrestore(&cdns->lock, flags); + if (cdns->wakeup_pending) { + cdns->wakeup_pending = false; + enable_irq(cdns->wakeup_irq); + } + dev_dbg(cdns->dev, "%s ends\n", __func__); + + return ret; +} + +static int cdns3_plat_runtime_suspend(struct device *dev) +{ + return cdns3_controller_suspend(dev, PMSG_AUTO_SUSPEND); +} + +static int cdns3_plat_runtime_resume(struct device *dev) +{ + return cdns3_controller_resume(dev, PMSG_AUTO_RESUME); +} + +#ifdef CONFIG_PM_SLEEP + +static int cdns3_plat_suspend(struct device *dev) +{ + struct cdns *cdns = dev_get_drvdata(dev); + + cdns_suspend(cdns); + + return cdns3_controller_suspend(dev, PMSG_SUSPEND); +} + +static int cdns3_plat_resume(struct device *dev) +{ + return cdns3_controller_resume(dev, PMSG_RESUME); +} +#endif /* CONFIG_PM_SLEEP */ +#endif /* CONFIG_PM */ + +static const struct dev_pm_ops cdns3_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(cdns3_plat_suspend, cdns3_plat_resume) + SET_RUNTIME_PM_OPS(cdns3_plat_runtime_suspend, + cdns3_plat_runtime_resume, NULL) +}; + +#ifdef CONFIG_OF +static const struct of_device_id of_cdns3_match[] = { + { .compatible = "cdns,usb3" }, + { }, +}; +MODULE_DEVICE_TABLE(of, of_cdns3_match); +#endif + +static struct platform_driver cdns3_driver = { + .probe = cdns3_plat_probe, + .remove = cdns3_plat_remove, + .driver = { + .name = "cdns-usb3", + .of_match_table = of_match_ptr(of_cdns3_match), + .pm = &cdns3_pm_ops, + }, +}; + +module_platform_driver(cdns3_driver); + +MODULE_ALIAS("platform:cdns3"); +MODULE_AUTHOR("Pawel Laszczak <pawell@cadence.com>"); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Cadence USB3 DRD Controller Driver"); diff --git a/drivers/usb/cdns3/cdns3-ti.c b/drivers/usb/cdns3/cdns3-ti.c index 90e246601537..eccb1c766bba 100644 --- a/drivers/usb/cdns3/cdns3-ti.c +++ b/drivers/usb/cdns3/cdns3-ti.c @@ -214,6 +214,7 @@ static int cdns_ti_remove(struct platform_device *pdev) static const struct of_device_id cdns_ti_of_match[] = { { .compatible = "ti,j721e-usb", }, + { .compatible = "ti,am64-usb", }, {}, }; MODULE_DEVICE_TABLE(of, cdns_ti_of_match); diff --git a/drivers/usb/cdns3/trace.c b/drivers/usb/cdns3/cdns3-trace.c index 459fa72d9c74..b9858acaef02 100644 --- a/drivers/usb/cdns3/trace.c +++ b/drivers/usb/cdns3/cdns3-trace.c @@ -8,4 +8,4 @@ */ #define CREATE_TRACE_POINTS -#include "trace.h" +#include "cdns3-trace.h" diff --git a/drivers/usb/cdns3/trace.h b/drivers/usb/cdns3/cdns3-trace.h index 0a2a3269bfac..8648c7a7a9dd 100644 --- a/drivers/usb/cdns3/trace.h +++ b/drivers/usb/cdns3/cdns3-trace.h @@ -19,8 +19,8 @@ #include <asm/byteorder.h> #include <linux/usb/ch9.h> #include "core.h" -#include "gadget.h" -#include "debug.h" +#include "cdns3-gadget.h" +#include "cdns3-debug.h" #define CDNS3_MSG_MAX 500 @@ -565,6 +565,6 @@ DEFINE_EVENT(cdns3_log_request_handled, cdns3_request_handled, #define TRACE_INCLUDE_PATH . #undef TRACE_INCLUDE_FILE -#define TRACE_INCLUDE_FILE trace +#define TRACE_INCLUDE_FILE cdns3-trace #include <trace/define_trace.h> diff --git a/drivers/usb/cdns3/cdnsp-debug.h b/drivers/usb/cdns3/cdnsp-debug.h new file mode 100644 index 000000000000..a8776df2d4e0 --- /dev/null +++ b/drivers/usb/cdns3/cdnsp-debug.h @@ -0,0 +1,583 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Cadence CDNSP DRD Driver. + * + * Copyright (C) 2020 Cadence. + * + * Author: Pawel Laszczak <pawell@cadence.com> + * + */ +#ifndef __LINUX_CDNSP_DEBUG +#define __LINUX_CDNSP_DEBUG + +static inline const char *cdnsp_trb_comp_code_string(u8 status) +{ + switch (status) { + case COMP_INVALID: + return "Invalid"; + case COMP_SUCCESS: + return "Success"; + case COMP_DATA_BUFFER_ERROR: + return "Data Buffer Error"; + case COMP_BABBLE_DETECTED_ERROR: + return "Babble Detected"; + case COMP_TRB_ERROR: + return "TRB Error"; + case COMP_RESOURCE_ERROR: + return "Resource Error"; + case COMP_NO_SLOTS_AVAILABLE_ERROR: + return "No Slots Available Error"; + case COMP_INVALID_STREAM_TYPE_ERROR: + return "Invalid Stream Type Error"; + case COMP_SLOT_NOT_ENABLED_ERROR: + return "Slot Not Enabled Error"; + case COMP_ENDPOINT_NOT_ENABLED_ERROR: + return "Endpoint Not Enabled Error"; + case COMP_SHORT_PACKET: + return "Short Packet"; + case COMP_RING_UNDERRUN: + return "Ring Underrun"; + case COMP_RING_OVERRUN: + return "Ring Overrun"; + case COMP_VF_EVENT_RING_FULL_ERROR: + return "VF Event Ring Full Error"; + case COMP_PARAMETER_ERROR: + return "Parameter Error"; + case COMP_CONTEXT_STATE_ERROR: + return "Context State Error"; + case COMP_EVENT_RING_FULL_ERROR: + return "Event Ring Full Error"; + case COMP_INCOMPATIBLE_DEVICE_ERROR: + return "Incompatible Device Error"; + case COMP_MISSED_SERVICE_ERROR: + return "Missed Service Error"; + case COMP_COMMAND_RING_STOPPED: + return "Command Ring Stopped"; + case COMP_COMMAND_ABORTED: + return "Command Aborted"; + case COMP_STOPPED: + return "Stopped"; + case COMP_STOPPED_LENGTH_INVALID: + return "Stopped - Length Invalid"; + case COMP_STOPPED_SHORT_PACKET: + return "Stopped - Short Packet"; + case COMP_MAX_EXIT_LATENCY_TOO_LARGE_ERROR: + return "Max Exit Latency Too Large Error"; + case COMP_ISOCH_BUFFER_OVERRUN: + return "Isoch Buffer Overrun"; + case COMP_EVENT_LOST_ERROR: + return "Event Lost Error"; + case COMP_UNDEFINED_ERROR: + return "Undefined Error"; + case COMP_INVALID_STREAM_ID_ERROR: + return "Invalid Stream ID Error"; + default: + return "Unknown!!"; + } +} + +static inline const char *cdnsp_trb_type_string(u8 type) +{ + switch (type) { + case TRB_NORMAL: + return "Normal"; + case TRB_SETUP: + return "Setup Stage"; + case TRB_DATA: + return "Data Stage"; + case TRB_STATUS: + return "Status Stage"; + case TRB_ISOC: + return "Isoch"; + case TRB_LINK: + return "Link"; + case TRB_EVENT_DATA: + return "Event Data"; + case TRB_TR_NOOP: + return "No-Op"; + case TRB_ENABLE_SLOT: + return "Enable Slot Command"; + case TRB_DISABLE_SLOT: + return "Disable Slot Command"; + case TRB_ADDR_DEV: + return "Address Device Command"; + case TRB_CONFIG_EP: + return "Configure Endpoint Command"; + case TRB_EVAL_CONTEXT: + return "Evaluate Context Command"; + case TRB_RESET_EP: + return "Reset Endpoint Command"; + case TRB_STOP_RING: + return "Stop Ring Command"; + case TRB_SET_DEQ: + return "Set TR Dequeue Pointer Command"; + case TRB_RESET_DEV: + return "Reset Device Command"; + case TRB_FORCE_HEADER: + return "Force Header Command"; + case TRB_CMD_NOOP: + return "No-Op Command"; + case TRB_TRANSFER: + return "Transfer Event"; + case TRB_COMPLETION: + return "Command Completion Event"; + case TRB_PORT_STATUS: + return "Port Status Change Event"; + case TRB_HC_EVENT: + return "Device Controller Event"; + case TRB_MFINDEX_WRAP: + return "MFINDEX Wrap Event"; + case TRB_ENDPOINT_NRDY: + return "Endpoint Not ready"; + case TRB_HALT_ENDPOINT: + return "Halt Endpoint"; + case TRB_FLUSH_ENDPOINT: + return "FLush Endpoint"; + default: + return "UNKNOWN"; + } +} + +static inline const char *cdnsp_ring_type_string(enum cdnsp_ring_type type) +{ + switch (type) { + case TYPE_CTRL: + return "CTRL"; + case TYPE_ISOC: + return "ISOC"; + case TYPE_BULK: + return "BULK"; + case TYPE_INTR: + return "INTR"; + case TYPE_STREAM: + return "STREAM"; + case TYPE_COMMAND: + return "CMD"; + case TYPE_EVENT: + return "EVENT"; + } + + return "UNKNOWN"; +} + +static inline char *cdnsp_slot_state_string(u32 state) +{ + switch (state) { + case SLOT_STATE_ENABLED: + return "enabled/disabled"; + case SLOT_STATE_DEFAULT: + return "default"; + case SLOT_STATE_ADDRESSED: + return "addressed"; + case SLOT_STATE_CONFIGURED: + return "configured"; + default: + return "reserved"; + } +} + +static inline const char *cdnsp_decode_trb(char *str, size_t size, u32 field0, + u32 field1, u32 field2, u32 field3) +{ + int ep_id = TRB_TO_EP_INDEX(field3) - 1; + int type = TRB_FIELD_TO_TYPE(field3); + unsigned int ep_num; + int ret = 0; + u32 temp; + + ep_num = DIV_ROUND_UP(ep_id, 2); + + switch (type) { + case TRB_LINK: + ret += snprintf(str, size, + "LINK %08x%08x intr %ld type '%s' flags %c:%c:%c:%c", + field1, field0, GET_INTR_TARGET(field2), + cdnsp_trb_type_string(type), + field3 & TRB_IOC ? 'I' : 'i', + field3 & TRB_CHAIN ? 'C' : 'c', + field3 & TRB_TC ? 'T' : 't', + field3 & TRB_CYCLE ? 'C' : 'c'); + break; + case TRB_TRANSFER: + case TRB_COMPLETION: + case TRB_PORT_STATUS: + case TRB_HC_EVENT: + ret += snprintf(str, size, + "ep%d%s(%d) type '%s' TRB %08x%08x status '%s'" + " len %ld slot %ld flags %c:%c", + ep_num, ep_id % 2 ? "out" : "in", + TRB_TO_EP_INDEX(field3), + cdnsp_trb_type_string(type), field1, field0, + cdnsp_trb_comp_code_string(GET_COMP_CODE(field2)), + EVENT_TRB_LEN(field2), TRB_TO_SLOT_ID(field3), + field3 & EVENT_DATA ? 'E' : 'e', + field3 & TRB_CYCLE ? 'C' : 'c'); + break; + case TRB_MFINDEX_WRAP: + ret += snprintf(str, size, "%s: flags %c", + cdnsp_trb_type_string(type), + field3 & TRB_CYCLE ? 'C' : 'c'); + break; + case TRB_SETUP: + ret += snprintf(str, size, + "type '%s' bRequestType %02x bRequest %02x " + "wValue %02x%02x wIndex %02x%02x wLength %d " + "length %ld TD size %ld intr %ld Setup ID %ld " + "flags %c:%c:%c", + cdnsp_trb_type_string(type), + field0 & 0xff, + (field0 & 0xff00) >> 8, + (field0 & 0xff000000) >> 24, + (field0 & 0xff0000) >> 16, + (field1 & 0xff00) >> 8, + field1 & 0xff, + (field1 & 0xff000000) >> 16 | + (field1 & 0xff0000) >> 16, + TRB_LEN(field2), GET_TD_SIZE(field2), + GET_INTR_TARGET(field2), + TRB_SETUPID_TO_TYPE(field3), + field3 & TRB_IDT ? 'D' : 'd', + field3 & TRB_IOC ? 'I' : 'i', + field3 & TRB_CYCLE ? 'C' : 'c'); + break; + case TRB_DATA: + ret += snprintf(str, size, + "type '%s' Buffer %08x%08x length %ld TD size %ld " + "intr %ld flags %c:%c:%c:%c:%c:%c:%c", + cdnsp_trb_type_string(type), + field1, field0, TRB_LEN(field2), + GET_TD_SIZE(field2), + GET_INTR_TARGET(field2), + field3 & TRB_IDT ? 'D' : 'i', + field3 & TRB_IOC ? 'I' : 'i', + field3 & TRB_CHAIN ? 'C' : 'c', + field3 & TRB_NO_SNOOP ? 'S' : 's', + field3 & TRB_ISP ? 'I' : 'i', + field3 & TRB_ENT ? 'E' : 'e', + field3 & TRB_CYCLE ? 'C' : 'c'); + break; + case TRB_STATUS: + ret += snprintf(str, size, + "Buffer %08x%08x length %ld TD size %ld intr" + "%ld type '%s' flags %c:%c:%c:%c", + field1, field0, TRB_LEN(field2), + GET_TD_SIZE(field2), + GET_INTR_TARGET(field2), + cdnsp_trb_type_string(type), + field3 & TRB_IOC ? 'I' : 'i', + field3 & TRB_CHAIN ? 'C' : 'c', + field3 & TRB_ENT ? 'E' : 'e', + field3 & TRB_CYCLE ? 'C' : 'c'); + break; + case TRB_NORMAL: + case TRB_ISOC: + case TRB_EVENT_DATA: + case TRB_TR_NOOP: + ret += snprintf(str, size, + "type '%s' Buffer %08x%08x length %ld " + "TD size %ld intr %ld " + "flags %c:%c:%c:%c:%c:%c:%c:%c:%c", + cdnsp_trb_type_string(type), + field1, field0, TRB_LEN(field2), + GET_TD_SIZE(field2), + GET_INTR_TARGET(field2), + field3 & TRB_BEI ? 'B' : 'b', + field3 & TRB_IDT ? 'T' : 't', + field3 & TRB_IOC ? 'I' : 'i', + field3 & TRB_CHAIN ? 'C' : 'c', + field3 & TRB_NO_SNOOP ? 'S' : 's', + field3 & TRB_ISP ? 'I' : 'i', + field3 & TRB_ENT ? 'E' : 'e', + field3 & TRB_CYCLE ? 'C' : 'c', + !(field3 & TRB_EVENT_INVALIDATE) ? 'V' : 'v'); + break; + case TRB_CMD_NOOP: + case TRB_ENABLE_SLOT: + ret += snprintf(str, size, "%s: flags %c", + cdnsp_trb_type_string(type), + field3 & TRB_CYCLE ? 'C' : 'c'); + break; + case TRB_DISABLE_SLOT: + ret += snprintf(str, size, "%s: slot %ld flags %c", + cdnsp_trb_type_string(type), + TRB_TO_SLOT_ID(field3), + field3 & TRB_CYCLE ? 'C' : 'c'); + break; + case TRB_ADDR_DEV: + ret += snprintf(str, size, + "%s: ctx %08x%08x slot %ld flags %c:%c", + cdnsp_trb_type_string(type), field1, field0, + TRB_TO_SLOT_ID(field3), + field3 & TRB_BSR ? 'B' : 'b', + field3 & TRB_CYCLE ? 'C' : 'c'); + break; + case TRB_CONFIG_EP: + ret += snprintf(str, size, + "%s: ctx %08x%08x slot %ld flags %c:%c", + cdnsp_trb_type_string(type), field1, field0, + TRB_TO_SLOT_ID(field3), + field3 & TRB_DC ? 'D' : 'd', + field3 & TRB_CYCLE ? 'C' : 'c'); + break; + case TRB_EVAL_CONTEXT: + ret += snprintf(str, size, + "%s: ctx %08x%08x slot %ld flags %c", + cdnsp_trb_type_string(type), field1, field0, + TRB_TO_SLOT_ID(field3), + field3 & TRB_CYCLE ? 'C' : 'c'); + break; + case TRB_RESET_EP: + case TRB_HALT_ENDPOINT: + case TRB_FLUSH_ENDPOINT: + ret += snprintf(str, size, + "%s: ep%d%s(%d) ctx %08x%08x slot %ld flags %c", + cdnsp_trb_type_string(type), + ep_num, ep_id % 2 ? "out" : "in", + TRB_TO_EP_INDEX(field3), field1, field0, + TRB_TO_SLOT_ID(field3), + field3 & TRB_CYCLE ? 'C' : 'c'); + break; + case TRB_STOP_RING: + ret += snprintf(str, size, + "%s: ep%d%s(%d) slot %ld sp %d flags %c", + cdnsp_trb_type_string(type), + ep_num, ep_id % 2 ? "out" : "in", + TRB_TO_EP_INDEX(field3), + TRB_TO_SLOT_ID(field3), + TRB_TO_SUSPEND_PORT(field3), + field3 & TRB_CYCLE ? 'C' : 'c'); + break; + case TRB_SET_DEQ: + ret += snprintf(str, size, + "%s: ep%d%s(%d) deq %08x%08x stream %ld slot %ld flags %c", + cdnsp_trb_type_string(type), + ep_num, ep_id % 2 ? "out" : "in", + TRB_TO_EP_INDEX(field3), field1, field0, + TRB_TO_STREAM_ID(field2), + TRB_TO_SLOT_ID(field3), + field3 & TRB_CYCLE ? 'C' : 'c'); + break; + case TRB_RESET_DEV: + ret += snprintf(str, size, "%s: slot %ld flags %c", + cdnsp_trb_type_string(type), + TRB_TO_SLOT_ID(field3), + field3 & TRB_CYCLE ? 'C' : 'c'); + break; + case TRB_ENDPOINT_NRDY: + temp = TRB_TO_HOST_STREAM(field2); + + ret += snprintf(str, size, + "%s: ep%d%s(%d) H_SID %x%s%s D_SID %lx flags %c:%c", + cdnsp_trb_type_string(type), + ep_num, ep_id % 2 ? "out" : "in", + TRB_TO_EP_INDEX(field3), temp, + temp == STREAM_PRIME_ACK ? "(PRIME)" : "", + temp == STREAM_REJECTED ? "(REJECTED)" : "", + TRB_TO_DEV_STREAM(field0), + field3 & TRB_STAT ? 'S' : 's', + field3 & TRB_CYCLE ? 'C' : 'c'); + break; + default: + ret += snprintf(str, size, + "type '%s' -> raw %08x %08x %08x %08x", + cdnsp_trb_type_string(type), + field0, field1, field2, field3); + } + + return str; +} + +static inline const char *cdnsp_decode_slot_context(u32 info, u32 info2, + u32 int_target, u32 state) +{ + static char str[1024]; + int ret = 0; + u32 speed; + char *s; + + speed = info & DEV_SPEED; + + switch (speed) { + case SLOT_SPEED_FS: + s = "full-speed"; + break; + case SLOT_SPEED_HS: + s = "high-speed"; + break; + case SLOT_SPEED_SS: + s = "super-speed"; + break; + case SLOT_SPEED_SSP: + s = "super-speed plus"; + break; + default: + s = "UNKNOWN speed"; + } + + ret = sprintf(str, "%s Ctx Entries %d", + s, (info & LAST_CTX_MASK) >> 27); + + ret += sprintf(str + ret, " [Intr %ld] Addr %ld State %s", + GET_INTR_TARGET(int_target), state & DEV_ADDR_MASK, + cdnsp_slot_state_string(GET_SLOT_STATE(state))); + + return str; +} + +static inline const char *cdnsp_portsc_link_state_string(u32 portsc) +{ + switch (portsc & PORT_PLS_MASK) { + case XDEV_U0: + return "U0"; + case XDEV_U1: + return "U1"; + case XDEV_U2: + return "U2"; + case XDEV_U3: + return "U3"; + case XDEV_DISABLED: + return "Disabled"; + case XDEV_RXDETECT: + return "RxDetect"; + case XDEV_INACTIVE: + return "Inactive"; + case XDEV_POLLING: + return "Polling"; + case XDEV_RECOVERY: + return "Recovery"; + case XDEV_HOT_RESET: + return "Hot Reset"; + case XDEV_COMP_MODE: + return "Compliance mode"; + case XDEV_TEST_MODE: + return "Test mode"; + case XDEV_RESUME: + return "Resume"; + default: + break; + } + + return "Unknown"; +} + +static inline const char *cdnsp_decode_portsc(char *str, size_t size, + u32 portsc) +{ + int ret; + + ret = snprintf(str, size, "%s %s %s Link:%s PortSpeed:%d ", + portsc & PORT_POWER ? "Powered" : "Powered-off", + portsc & PORT_CONNECT ? "Connected" : "Not-connected", + portsc & PORT_PED ? "Enabled" : "Disabled", + cdnsp_portsc_link_state_string(portsc), + DEV_PORT_SPEED(portsc)); + + if (portsc & PORT_RESET) + ret += snprintf(str + ret, size - ret, "In-Reset "); + + ret += snprintf(str + ret, size - ret, "Change: "); + if (portsc & PORT_CSC) + ret += snprintf(str + ret, size - ret, "CSC "); + if (portsc & PORT_WRC) + ret += snprintf(str + ret, size - ret, "WRC "); + if (portsc & PORT_RC) + ret += snprintf(str + ret, size - ret, "PRC "); + if (portsc & PORT_PLC) + ret += snprintf(str + ret, size - ret, "PLC "); + if (portsc & PORT_CEC) + ret += snprintf(str + ret, size - ret, "CEC "); + ret += snprintf(str + ret, size - ret, "Wake: "); + if (portsc & PORT_WKCONN_E) + ret += snprintf(str + ret, size - ret, "WCE "); + if (portsc & PORT_WKDISC_E) + ret += snprintf(str + ret, size - ret, "WDE "); + + return str; +} + +static inline const char *cdnsp_ep_state_string(u8 state) +{ + switch (state) { + case EP_STATE_DISABLED: + return "disabled"; + case EP_STATE_RUNNING: + return "running"; + case EP_STATE_HALTED: + return "halted"; + case EP_STATE_STOPPED: + return "stopped"; + case EP_STATE_ERROR: + return "error"; + default: + return "INVALID"; + } +} + +static inline const char *cdnsp_ep_type_string(u8 type) +{ + switch (type) { + case ISOC_OUT_EP: + return "Isoc OUT"; + case BULK_OUT_EP: + return "Bulk OUT"; + case INT_OUT_EP: + return "Int OUT"; + case CTRL_EP: + return "Ctrl"; + case ISOC_IN_EP: + return "Isoc IN"; + case BULK_IN_EP: + return "Bulk IN"; + case INT_IN_EP: + return "Int IN"; + default: + return "INVALID"; + } +} + +static inline const char *cdnsp_decode_ep_context(char *str, size_t size, + u32 info, u32 info2, + u64 deq, u32 tx_info) +{ + u8 max_pstr, ep_state, interval, ep_type, burst, cerr, mult; + bool lsa, hid; + u16 maxp, avg; + u32 esit; + int ret; + + esit = CTX_TO_MAX_ESIT_PAYLOAD_HI(info) << 16 | + CTX_TO_MAX_ESIT_PAYLOAD_LO(tx_info); + + ep_state = info & EP_STATE_MASK; + max_pstr = CTX_TO_EP_MAXPSTREAMS(info); + interval = CTX_TO_EP_INTERVAL(info); + mult = CTX_TO_EP_MULT(info) + 1; + lsa = !!(info & EP_HAS_LSA); + + cerr = (info2 & (3 << 1)) >> 1; + ep_type = CTX_TO_EP_TYPE(info2); + hid = !!(info2 & (1 << 7)); + burst = CTX_TO_MAX_BURST(info2); + maxp = MAX_PACKET_DECODED(info2); + + avg = EP_AVG_TRB_LENGTH(tx_info); + + ret = snprintf(str, size, "State %s mult %d max P. Streams %d %s", + cdnsp_ep_state_string(ep_state), mult, + max_pstr, lsa ? "LSA " : ""); + + ret += snprintf(str + ret, size - ret, + "interval %d us max ESIT payload %d CErr %d ", + (1 << interval) * 125, esit, cerr); + + ret += snprintf(str + ret, size - ret, + "Type %s %sburst %d maxp %d deq %016llx ", + cdnsp_ep_type_string(ep_type), hid ? "HID" : "", + burst, maxp, deq); + + ret += snprintf(str + ret, size - ret, "avg trb len %d", avg); + + return str; +} + +#endif /*__LINUX_CDNSP_DEBUG*/ diff --git a/drivers/usb/cdns3/cdnsp-ep0.c b/drivers/usb/cdns3/cdnsp-ep0.c new file mode 100644 index 000000000000..9b8325f82499 --- /dev/null +++ b/drivers/usb/cdns3/cdnsp-ep0.c @@ -0,0 +1,489 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Cadence CDNSP DRD Driver. + * + * Copyright (C) 2020 Cadence. + * + * Author: Pawel Laszczak <pawell@cadence.com> + * + */ + +#include <linux/usb/composite.h> +#include <linux/usb/gadget.h> +#include <linux/list.h> + +#include "cdnsp-gadget.h" +#include "cdnsp-trace.h" + +static void cdnsp_ep0_stall(struct cdnsp_device *pdev) +{ + struct cdnsp_request *preq; + struct cdnsp_ep *pep; + + pep = &pdev->eps[0]; + preq = next_request(&pep->pending_list); + + if (pdev->three_stage_setup) { + cdnsp_halt_endpoint(pdev, pep, true); + + if (preq) + cdnsp_gadget_giveback(pep, preq, -ECONNRESET); + } else { + pep->ep_state |= EP0_HALTED_STATUS; + + if (preq) + list_del(&preq->list); + + cdnsp_status_stage(pdev); + } +} + +static int cdnsp_ep0_delegate_req(struct cdnsp_device *pdev, + struct usb_ctrlrequest *ctrl) +{ + int ret; + + spin_unlock(&pdev->lock); + ret = pdev->gadget_driver->setup(&pdev->gadget, ctrl); + spin_lock(&pdev->lock); + + return ret; +} + +static int cdnsp_ep0_set_config(struct cdnsp_device *pdev, + struct usb_ctrlrequest *ctrl) +{ + enum usb_device_state state = pdev->gadget.state; + u32 cfg; + int ret; + + cfg = le16_to_cpu(ctrl->wValue); + + switch (state) { + case USB_STATE_ADDRESS: + trace_cdnsp_ep0_set_config("from Address state"); + break; + case USB_STATE_CONFIGURED: + trace_cdnsp_ep0_set_config("from Configured state"); + break; + default: + dev_err(pdev->dev, "Set Configuration - bad device state\n"); + return -EINVAL; + } + + ret = cdnsp_ep0_delegate_req(pdev, ctrl); + if (ret) + return ret; + + if (!cfg) + usb_gadget_set_state(&pdev->gadget, USB_STATE_ADDRESS); + + return 0; +} + +static int cdnsp_ep0_set_address(struct cdnsp_device *pdev, + struct usb_ctrlrequest *ctrl) +{ + enum usb_device_state state = pdev->gadget.state; + struct cdnsp_slot_ctx *slot_ctx; + unsigned int slot_state; + int ret; + u32 addr; + + addr = le16_to_cpu(ctrl->wValue); + + if (addr > 127) { + dev_err(pdev->dev, "Invalid device address %d\n", addr); + return -EINVAL; + } + + slot_ctx = cdnsp_get_slot_ctx(&pdev->out_ctx); + + if (state == USB_STATE_CONFIGURED) { + dev_err(pdev->dev, "Can't Set Address from Configured State\n"); + return -EINVAL; + } + + pdev->device_address = le16_to_cpu(ctrl->wValue); + + slot_ctx = cdnsp_get_slot_ctx(&pdev->out_ctx); + slot_state = GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)); + if (slot_state == SLOT_STATE_ADDRESSED) + cdnsp_reset_device(pdev); + + /*set device address*/ + ret = cdnsp_setup_device(pdev, SETUP_CONTEXT_ADDRESS); + if (ret) + return ret; + + if (addr) + usb_gadget_set_state(&pdev->gadget, USB_STATE_ADDRESS); + else + usb_gadget_set_state(&pdev->gadget, USB_STATE_DEFAULT); + + return 0; +} + +int cdnsp_status_stage(struct cdnsp_device *pdev) +{ + pdev->ep0_stage = CDNSP_STATUS_STAGE; + pdev->ep0_preq.request.length = 0; + + return cdnsp_ep_enqueue(pdev->ep0_preq.pep, &pdev->ep0_preq); +} + +static int cdnsp_w_index_to_ep_index(u16 wIndex) +{ + if (!(wIndex & USB_ENDPOINT_NUMBER_MASK)) + return 0; + + return ((wIndex & USB_ENDPOINT_NUMBER_MASK) * 2) + + (wIndex & USB_ENDPOINT_DIR_MASK ? 1 : 0) - 1; +} + +static int cdnsp_ep0_handle_status(struct cdnsp_device *pdev, + struct usb_ctrlrequest *ctrl) +{ + struct cdnsp_ep *pep; + __le16 *response; + int ep_sts = 0; + u16 status = 0; + u32 recipient; + + recipient = ctrl->bRequestType & USB_RECIP_MASK; + + switch (recipient) { + case USB_RECIP_DEVICE: + status = pdev->gadget.is_selfpowered; + status |= pdev->may_wakeup << USB_DEVICE_REMOTE_WAKEUP; + + if (pdev->gadget.speed >= USB_SPEED_SUPER) { + status |= pdev->u1_allowed << USB_DEV_STAT_U1_ENABLED; + status |= pdev->u2_allowed << USB_DEV_STAT_U2_ENABLED; + } + break; + case USB_RECIP_INTERFACE: + /* + * Function Remote Wake Capable D0 + * Function Remote Wakeup D1 + */ + return cdnsp_ep0_delegate_req(pdev, ctrl); + case USB_RECIP_ENDPOINT: + ep_sts = cdnsp_w_index_to_ep_index(le16_to_cpu(ctrl->wIndex)); + pep = &pdev->eps[ep_sts]; + ep_sts = GET_EP_CTX_STATE(pep->out_ctx); + + /* check if endpoint is stalled */ + if (ep_sts == EP_STATE_HALTED) + status = BIT(USB_ENDPOINT_HALT); + break; + default: + return -EINVAL; + } + + response = (__le16 *)pdev->setup_buf; + *response = cpu_to_le16(status); + + pdev->ep0_preq.request.length = sizeof(*response); + pdev->ep0_preq.request.buf = pdev->setup_buf; + + return cdnsp_ep_enqueue(pdev->ep0_preq.pep, &pdev->ep0_preq); +} + +static void cdnsp_enter_test_mode(struct cdnsp_device *pdev) +{ + u32 temp; + + temp = readl(&pdev->active_port->regs->portpmsc) & ~GENMASK(31, 28); + temp |= PORT_TEST_MODE(pdev->test_mode); + writel(temp, &pdev->active_port->regs->portpmsc); +} + +static int cdnsp_ep0_handle_feature_device(struct cdnsp_device *pdev, + struct usb_ctrlrequest *ctrl, + int set) +{ + enum usb_device_state state; + enum usb_device_speed speed; + u16 tmode; + + state = pdev->gadget.state; + speed = pdev->gadget.speed; + + switch (le16_to_cpu(ctrl->wValue)) { + case USB_DEVICE_REMOTE_WAKEUP: + pdev->may_wakeup = !!set; + trace_cdnsp_may_wakeup(set); + break; + case USB_DEVICE_U1_ENABLE: + if (state != USB_STATE_CONFIGURED || speed < USB_SPEED_SUPER) + return -EINVAL; + + pdev->u1_allowed = !!set; + trace_cdnsp_u1(set); + break; + case USB_DEVICE_U2_ENABLE: + if (state != USB_STATE_CONFIGURED || speed < USB_SPEED_SUPER) + return -EINVAL; + + pdev->u2_allowed = !!set; + trace_cdnsp_u2(set); + break; + case USB_DEVICE_LTM_ENABLE: + return -EINVAL; + case USB_DEVICE_TEST_MODE: + if (state != USB_STATE_CONFIGURED || speed > USB_SPEED_HIGH) + return -EINVAL; + + tmode = le16_to_cpu(ctrl->wIndex); + + if (!set || (tmode & 0xff) != 0) + return -EINVAL; + + tmode = tmode >> 8; + + if (tmode > USB_TEST_FORCE_ENABLE || tmode < USB_TEST_J) + return -EINVAL; + + pdev->test_mode = tmode; + + /* + * Test mode must be set before Status Stage but controller + * will start testing sequence after Status Stage. + */ + cdnsp_enter_test_mode(pdev); + break; + default: + return -EINVAL; + } + + return 0; +} + +static int cdnsp_ep0_handle_feature_intf(struct cdnsp_device *pdev, + struct usb_ctrlrequest *ctrl, + int set) +{ + u16 wValue, wIndex; + int ret; + + wValue = le16_to_cpu(ctrl->wValue); + wIndex = le16_to_cpu(ctrl->wIndex); + + switch (wValue) { + case USB_INTRF_FUNC_SUSPEND: + ret = cdnsp_ep0_delegate_req(pdev, ctrl); + if (ret) + return ret; + + /* + * Remote wakeup is enabled when any function within a device + * is enabled for function remote wakeup. + */ + if (wIndex & USB_INTRF_FUNC_SUSPEND_RW) + pdev->may_wakeup++; + else + if (pdev->may_wakeup > 0) + pdev->may_wakeup--; + + return 0; + default: + return -EINVAL; + } + + return 0; +} + +static int cdnsp_ep0_handle_feature_endpoint(struct cdnsp_device *pdev, + struct usb_ctrlrequest *ctrl, + int set) +{ + struct cdnsp_ep *pep; + u16 wValue; + + wValue = le16_to_cpu(ctrl->wValue); + pep = &pdev->eps[cdnsp_w_index_to_ep_index(le16_to_cpu(ctrl->wIndex))]; + + switch (wValue) { + case USB_ENDPOINT_HALT: + if (!set && (pep->ep_state & EP_WEDGE)) { + /* Resets Sequence Number */ + cdnsp_halt_endpoint(pdev, pep, 0); + cdnsp_halt_endpoint(pdev, pep, 1); + break; + } + + return cdnsp_halt_endpoint(pdev, pep, set); + default: + dev_warn(pdev->dev, "WARN Incorrect wValue %04x\n", wValue); + return -EINVAL; + } + + return 0; +} + +static int cdnsp_ep0_handle_feature(struct cdnsp_device *pdev, + struct usb_ctrlrequest *ctrl, + int set) +{ + switch (ctrl->bRequestType & USB_RECIP_MASK) { + case USB_RECIP_DEVICE: + return cdnsp_ep0_handle_feature_device(pdev, ctrl, set); + case USB_RECIP_INTERFACE: + return cdnsp_ep0_handle_feature_intf(pdev, ctrl, set); + case USB_RECIP_ENDPOINT: + return cdnsp_ep0_handle_feature_endpoint(pdev, ctrl, set); + default: + return -EINVAL; + } +} + +static int cdnsp_ep0_set_sel(struct cdnsp_device *pdev, + struct usb_ctrlrequest *ctrl) +{ + enum usb_device_state state = pdev->gadget.state; + u16 wLength; + + if (state == USB_STATE_DEFAULT) + return -EINVAL; + + wLength = le16_to_cpu(ctrl->wLength); + + if (wLength != 6) { + dev_err(pdev->dev, "Set SEL should be 6 bytes, got %d\n", + wLength); + return -EINVAL; + } + + /* + * To handle Set SEL we need to receive 6 bytes from Host. So let's + * queue a usb_request for 6 bytes. + */ + pdev->ep0_preq.request.length = 6; + pdev->ep0_preq.request.buf = pdev->setup_buf; + + return cdnsp_ep_enqueue(pdev->ep0_preq.pep, &pdev->ep0_preq); +} + +static int cdnsp_ep0_set_isoch_delay(struct cdnsp_device *pdev, + struct usb_ctrlrequest *ctrl) +{ + if (le16_to_cpu(ctrl->wIndex) || le16_to_cpu(ctrl->wLength)) + return -EINVAL; + + pdev->gadget.isoch_delay = le16_to_cpu(ctrl->wValue); + + return 0; +} + +static int cdnsp_ep0_std_request(struct cdnsp_device *pdev, + struct usb_ctrlrequest *ctrl) +{ + int ret; + + switch (ctrl->bRequest) { + case USB_REQ_GET_STATUS: + ret = cdnsp_ep0_handle_status(pdev, ctrl); + break; + case USB_REQ_CLEAR_FEATURE: + ret = cdnsp_ep0_handle_feature(pdev, ctrl, 0); + break; + case USB_REQ_SET_FEATURE: + ret = cdnsp_ep0_handle_feature(pdev, ctrl, 1); + break; + case USB_REQ_SET_ADDRESS: + ret = cdnsp_ep0_set_address(pdev, ctrl); + break; + case USB_REQ_SET_CONFIGURATION: + ret = cdnsp_ep0_set_config(pdev, ctrl); + break; + case USB_REQ_SET_SEL: + ret = cdnsp_ep0_set_sel(pdev, ctrl); + break; + case USB_REQ_SET_ISOCH_DELAY: + ret = cdnsp_ep0_set_isoch_delay(pdev, ctrl); + break; + case USB_REQ_SET_INTERFACE: + /* + * Add request into pending list to block sending status stage + * by libcomposite. + */ + list_add_tail(&pdev->ep0_preq.list, + &pdev->ep0_preq.pep->pending_list); + + ret = cdnsp_ep0_delegate_req(pdev, ctrl); + if (ret == -EBUSY) + ret = 0; + + list_del(&pdev->ep0_preq.list); + break; + default: + ret = cdnsp_ep0_delegate_req(pdev, ctrl); + break; + } + + return ret; +} + +void cdnsp_setup_analyze(struct cdnsp_device *pdev) +{ + struct usb_ctrlrequest *ctrl = &pdev->setup; + int ret = 0; + u16 len; + + trace_cdnsp_ctrl_req(ctrl); + + if (!pdev->gadget_driver) + goto out; + + if (pdev->gadget.state == USB_STATE_NOTATTACHED) { + dev_err(pdev->dev, "ERR: Setup detected in unattached state\n"); + ret = -EINVAL; + goto out; + } + + /* Restore the ep0 to Stopped/Running state. */ + if (pdev->eps[0].ep_state & EP_HALTED) { + trace_cdnsp_ep0_halted("Restore to normal state"); + cdnsp_halt_endpoint(pdev, &pdev->eps[0], 0); + } + + /* + * Finishing previous SETUP transfer by removing request from + * list and informing upper layer + */ + if (!list_empty(&pdev->eps[0].pending_list)) { + struct cdnsp_request *req; + + trace_cdnsp_ep0_request("Remove previous"); + req = next_request(&pdev->eps[0].pending_list); + cdnsp_ep_dequeue(&pdev->eps[0], req); + } + + len = le16_to_cpu(ctrl->wLength); + if (!len) { + pdev->three_stage_setup = false; + pdev->ep0_expect_in = false; + } else { + pdev->three_stage_setup = true; + pdev->ep0_expect_in = !!(ctrl->bRequestType & USB_DIR_IN); + } + + if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) + ret = cdnsp_ep0_std_request(pdev, ctrl); + else + ret = cdnsp_ep0_delegate_req(pdev, ctrl); + + if (!len) + pdev->ep0_stage = CDNSP_STATUS_STAGE; + + if (ret == USB_GADGET_DELAYED_STATUS) { + trace_cdnsp_ep0_status_stage("delayed"); + return; + } +out: + if (ret < 0) + cdnsp_ep0_stall(pdev); + else if (pdev->ep0_stage == CDNSP_STATUS_STAGE) + cdnsp_status_stage(pdev); +} diff --git a/drivers/usb/cdns3/cdnsp-gadget.c b/drivers/usb/cdns3/cdnsp-gadget.c new file mode 100644 index 000000000000..f2ebbacd932e --- /dev/null +++ b/drivers/usb/cdns3/cdnsp-gadget.c @@ -0,0 +1,2009 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Cadence CDNSP DRD Driver. + * + * Copyright (C) 2020 Cadence. + * + * Author: Pawel Laszczak <pawell@cadence.com> + * + */ + +#include <linux/moduleparam.h> +#include <linux/dma-mapping.h> +#include <linux/module.h> +#include <linux/iopoll.h> +#include <linux/delay.h> +#include <linux/log2.h> +#include <linux/slab.h> +#include <linux/pci.h> +#include <linux/irq.h> +#include <linux/dmi.h> + +#include "core.h" +#include "gadget-export.h" +#include "drd.h" +#include "cdnsp-gadget.h" +#include "cdnsp-trace.h" + +unsigned int cdnsp_port_speed(unsigned int port_status) +{ + /*Detect gadget speed based on PORTSC register*/ + if (DEV_SUPERSPEEDPLUS(port_status)) + return USB_SPEED_SUPER_PLUS; + else if (DEV_SUPERSPEED(port_status)) + return USB_SPEED_SUPER; + else if (DEV_HIGHSPEED(port_status)) + return USB_SPEED_HIGH; + else if (DEV_FULLSPEED(port_status)) + return USB_SPEED_FULL; + + /* If device is detached then speed will be USB_SPEED_UNKNOWN.*/ + return USB_SPEED_UNKNOWN; +} + +/* + * Given a port state, this function returns a value that would result in the + * port being in the same state, if the value was written to the port status + * control register. + * Save Read Only (RO) bits and save read/write bits where + * writing a 0 clears the bit and writing a 1 sets the bit (RWS). + * For all other types (RW1S, RW1CS, RW, and RZ), writing a '0' has no effect. + */ +u32 cdnsp_port_state_to_neutral(u32 state) +{ + /* Save read-only status and port state. */ + return (state & CDNSP_PORT_RO) | (state & CDNSP_PORT_RWS); +} + +/** + * Find the offset of the extended capabilities with capability ID id. + * @base: PCI MMIO registers base address. + * @start: Address at which to start looking, (0 or HCC_PARAMS to start at + * beginning of list) + * @id: Extended capability ID to search for. + * + * Returns the offset of the next matching extended capability structure. + * Some capabilities can occur several times, + * e.g., the EXT_CAPS_PROTOCOL, and this provides a way to find them all. + */ +int cdnsp_find_next_ext_cap(void __iomem *base, u32 start, int id) +{ + u32 offset = start; + u32 next; + u32 val; + + if (!start || start == HCC_PARAMS_OFFSET) { + val = readl(base + HCC_PARAMS_OFFSET); + if (val == ~0) + return 0; + + offset = HCC_EXT_CAPS(val) << 2; + if (!offset) + return 0; + }; + + do { + val = readl(base + offset); + if (val == ~0) + return 0; + + if (EXT_CAPS_ID(val) == id && offset != start) + return offset; + + next = EXT_CAPS_NEXT(val); + offset += next << 2; + } while (next); + + return 0; +} + +void cdnsp_set_link_state(struct cdnsp_device *pdev, + __le32 __iomem *port_regs, + u32 link_state) +{ + int port_num = 0xFF; + u32 temp; + + temp = readl(port_regs); + temp = cdnsp_port_state_to_neutral(temp); + temp |= PORT_WKCONN_E | PORT_WKDISC_E; + writel(temp, port_regs); + + temp &= ~PORT_PLS_MASK; + temp |= PORT_LINK_STROBE | link_state; + + if (pdev->active_port) + port_num = pdev->active_port->port_num; + + trace_cdnsp_handle_port_status(port_num, readl(port_regs)); + writel(temp, port_regs); + trace_cdnsp_link_state_changed(port_num, readl(port_regs)); +} + +static void cdnsp_disable_port(struct cdnsp_device *pdev, + __le32 __iomem *port_regs) +{ + u32 temp = cdnsp_port_state_to_neutral(readl(port_regs)); + + writel(temp | PORT_PED, port_regs); +} + +static void cdnsp_clear_port_change_bit(struct cdnsp_device *pdev, + __le32 __iomem *port_regs) +{ + u32 portsc = readl(port_regs); + + writel(cdnsp_port_state_to_neutral(portsc) | + (portsc & PORT_CHANGE_BITS), port_regs); +} + +static void cdnsp_set_chicken_bits_2(struct cdnsp_device *pdev, u32 bit) +{ + __le32 __iomem *reg; + void __iomem *base; + u32 offset = 0; + + base = &pdev->cap_regs->hc_capbase; + offset = cdnsp_find_next_ext_cap(base, offset, D_XEC_PRE_REGS_CAP); + reg = base + offset + REG_CHICKEN_BITS_2_OFFSET; + + bit = readl(reg) | bit; + writel(bit, reg); +} + +static void cdnsp_clear_chicken_bits_2(struct cdnsp_device *pdev, u32 bit) +{ + __le32 __iomem *reg; + void __iomem *base; + u32 offset = 0; + + base = &pdev->cap_regs->hc_capbase; + offset = cdnsp_find_next_ext_cap(base, offset, D_XEC_PRE_REGS_CAP); + reg = base + offset + REG_CHICKEN_BITS_2_OFFSET; + + bit = readl(reg) & ~bit; + writel(bit, reg); +} + +/* + * Disable interrupts and begin the controller halting process. + */ +static void cdnsp_quiesce(struct cdnsp_device *pdev) +{ + u32 halted; + u32 mask; + u32 cmd; + + mask = ~(u32)(CDNSP_IRQS); + + halted = readl(&pdev->op_regs->status) & STS_HALT; + if (!halted) + mask &= ~(CMD_R_S | CMD_DEVEN); + + cmd = readl(&pdev->op_regs->command); + cmd &= mask; + writel(cmd, &pdev->op_regs->command); +} + +/* + * Force controller into halt state. + * + * Disable any IRQs and clear the run/stop bit. + * Controller will complete any current and actively pipelined transactions, and + * should halt within 16 ms of the run/stop bit being cleared. + * Read controller Halted bit in the status register to see when the + * controller is finished. + */ +int cdnsp_halt(struct cdnsp_device *pdev) +{ + int ret; + u32 val; + + cdnsp_quiesce(pdev); + + ret = readl_poll_timeout_atomic(&pdev->op_regs->status, val, + val & STS_HALT, 1, + CDNSP_MAX_HALT_USEC); + if (ret) { + dev_err(pdev->dev, "ERROR: Device halt failed\n"); + return ret; + } + + pdev->cdnsp_state |= CDNSP_STATE_HALTED; + + return 0; +} + +/* + * device controller died, register read returns 0xffffffff, or command never + * ends. + */ +void cdnsp_died(struct cdnsp_device *pdev) +{ + dev_err(pdev->dev, "ERROR: CDNSP controller not responding\n"); + pdev->cdnsp_state |= CDNSP_STATE_DYING; + cdnsp_halt(pdev); +} + +/* + * Set the run bit and wait for the device to be running. + */ +static int cdnsp_start(struct cdnsp_device *pdev) +{ + u32 temp; + int ret; + + temp = readl(&pdev->op_regs->command); + temp |= (CMD_R_S | CMD_DEVEN); + writel(temp, &pdev->op_regs->command); + + pdev->cdnsp_state = 0; + + /* + * Wait for the STS_HALT Status bit to be 0 to indicate the device is + * running. + */ + ret = readl_poll_timeout_atomic(&pdev->op_regs->status, temp, + !(temp & STS_HALT), 1, + CDNSP_MAX_HALT_USEC); + if (ret) { + pdev->cdnsp_state = CDNSP_STATE_DYING; + dev_err(pdev->dev, "ERROR: Controller run failed\n"); + } + + return ret; +} + +/* + * Reset a halted controller. + * + * This resets pipelines, timers, counters, state machines, etc. + * Transactions will be terminated immediately, and operational registers + * will be set to their defaults. + */ +int cdnsp_reset(struct cdnsp_device *pdev) +{ + u32 command; + u32 temp; + int ret; + + temp = readl(&pdev->op_regs->status); + + if (temp == ~(u32)0) { + dev_err(pdev->dev, "Device not accessible, reset failed.\n"); + return -ENODEV; + } + + if ((temp & STS_HALT) == 0) { + dev_err(pdev->dev, "Controller not halted, aborting reset.\n"); + return -EINVAL; + } + + command = readl(&pdev->op_regs->command); + command |= CMD_RESET; + writel(command, &pdev->op_regs->command); + + ret = readl_poll_timeout_atomic(&pdev->op_regs->command, temp, + !(temp & CMD_RESET), 1, + 10 * 1000); + if (ret) { + dev_err(pdev->dev, "ERROR: Controller reset failed\n"); + return ret; + } + + /* + * CDNSP cannot write any doorbells or operational registers other + * than status until the "Controller Not Ready" flag is cleared. + */ + ret = readl_poll_timeout_atomic(&pdev->op_regs->status, temp, + !(temp & STS_CNR), 1, + 10 * 1000); + + if (ret) { + dev_err(pdev->dev, "ERROR: Controller not ready to work\n"); + return ret; + } + + dev_dbg(pdev->dev, "Controller ready to work"); + + return ret; +} + +/* + * cdnsp_get_endpoint_index - Find the index for an endpoint given its + * descriptor.Use the return value to right shift 1 for the bitmask. + * + * Index = (epnum * 2) + direction - 1, + * where direction = 0 for OUT, 1 for IN. + * For control endpoints, the IN index is used (OUT index is unused), so + * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2) + */ +static unsigned int + cdnsp_get_endpoint_index(const struct usb_endpoint_descriptor *desc) +{ + unsigned int index = (unsigned int)usb_endpoint_num(desc); + + if (usb_endpoint_xfer_control(desc)) + return index * 2; + + return (index * 2) + (usb_endpoint_dir_in(desc) ? 1 : 0) - 1; +} + +/* + * Find the flag for this endpoint (for use in the control context). Use the + * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is + * bit 1, etc. + */ +static unsigned int + cdnsp_get_endpoint_flag(const struct usb_endpoint_descriptor *desc) +{ + return 1 << (cdnsp_get_endpoint_index(desc) + 1); +} + +int cdnsp_ep_enqueue(struct cdnsp_ep *pep, struct cdnsp_request *preq) +{ + struct cdnsp_device *pdev = pep->pdev; + struct usb_request *request; + int ret; + + if (preq->epnum == 0 && !list_empty(&pep->pending_list)) { + trace_cdnsp_request_enqueue_busy(preq); + return -EBUSY; + } + + request = &preq->request; + request->actual = 0; + request->status = -EINPROGRESS; + preq->direction = pep->direction; + preq->epnum = pep->number; + preq->td.drbl = 0; + + ret = usb_gadget_map_request_by_dev(pdev->dev, request, pep->direction); + if (ret) { + trace_cdnsp_request_enqueue_error(preq); + return ret; + } + + list_add_tail(&preq->list, &pep->pending_list); + + trace_cdnsp_request_enqueue(preq); + + switch (usb_endpoint_type(pep->endpoint.desc)) { + case USB_ENDPOINT_XFER_CONTROL: + ret = cdnsp_queue_ctrl_tx(pdev, preq); + break; + case USB_ENDPOINT_XFER_BULK: + case USB_ENDPOINT_XFER_INT: + ret = cdnsp_queue_bulk_tx(pdev, preq); + break; + case USB_ENDPOINT_XFER_ISOC: + ret = cdnsp_queue_isoc_tx_prepare(pdev, preq); + } + + if (ret) + goto unmap; + + return 0; + +unmap: + usb_gadget_unmap_request_by_dev(pdev->dev, &preq->request, + pep->direction); + list_del(&preq->list); + trace_cdnsp_request_enqueue_error(preq); + + return ret; +} + +/* + * Remove the request's TD from the endpoint ring. This may cause the + * controller to stop USB transfers, potentially stopping in the middle of a + * TRB buffer. The controller should pick up where it left off in the TD, + * unless a Set Transfer Ring Dequeue Pointer is issued. + * + * The TRBs that make up the buffers for the canceled request will be "removed" + * from the ring. Since the ring is a contiguous structure, they can't be + * physically removed. Instead, there are two options: + * + * 1) If the controller is in the middle of processing the request to be + * canceled, we simply move the ring's dequeue pointer past those TRBs + * using the Set Transfer Ring Dequeue Pointer command. This will be + * the common case, when drivers timeout on the last submitted request + * and attempt to cancel. + * + * 2) If the controller is in the middle of a different TD, we turn the TRBs + * into a series of 1-TRB transfer no-op TDs. No-ops shouldn't be chained. + * The controller will need to invalidate the any TRBs it has cached after + * the stop endpoint command. + * + * 3) The TD may have completed by the time the Stop Endpoint Command + * completes, so software needs to handle that case too. + * + */ +int cdnsp_ep_dequeue(struct cdnsp_ep *pep, struct cdnsp_request *preq) +{ + struct cdnsp_device *pdev = pep->pdev; + int ret; + + trace_cdnsp_request_dequeue(preq); + + if (GET_EP_CTX_STATE(pep->out_ctx) == EP_STATE_RUNNING) { + ret = cdnsp_cmd_stop_ep(pdev, pep); + if (ret) + return ret; + } + + return cdnsp_remove_request(pdev, preq, pep); +} + +static void cdnsp_zero_in_ctx(struct cdnsp_device *pdev) +{ + struct cdnsp_input_control_ctx *ctrl_ctx; + struct cdnsp_slot_ctx *slot_ctx; + struct cdnsp_ep_ctx *ep_ctx; + int i; + + ctrl_ctx = cdnsp_get_input_control_ctx(&pdev->in_ctx); + + /* + * When a device's add flag and drop flag are zero, any subsequent + * configure endpoint command will leave that endpoint's state + * untouched. Make sure we don't leave any old state in the input + * endpoint contexts. + */ + ctrl_ctx->drop_flags = 0; + ctrl_ctx->add_flags = 0; + slot_ctx = cdnsp_get_slot_ctx(&pdev->in_ctx); + slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK); + + /* Endpoint 0 is always valid */ + slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1)); + for (i = 1; i < CDNSP_ENDPOINTS_NUM; ++i) { + ep_ctx = cdnsp_get_ep_ctx(&pdev->in_ctx, i); + ep_ctx->ep_info = 0; + ep_ctx->ep_info2 = 0; + ep_ctx->deq = 0; + ep_ctx->tx_info = 0; + } +} + +/* Issue a configure endpoint command and wait for it to finish. */ +static int cdnsp_configure_endpoint(struct cdnsp_device *pdev) +{ + int ret; + + cdnsp_queue_configure_endpoint(pdev, pdev->cmd.in_ctx->dma); + cdnsp_ring_cmd_db(pdev); + ret = cdnsp_wait_for_cmd_compl(pdev); + if (ret) { + dev_err(pdev->dev, + "ERR: unexpected command completion code 0x%x.\n", ret); + return -EINVAL; + } + + return ret; +} + +static void cdnsp_invalidate_ep_events(struct cdnsp_device *pdev, + struct cdnsp_ep *pep) +{ + struct cdnsp_segment *segment; + union cdnsp_trb *event; + u32 cycle_state; + u32 data; + + event = pdev->event_ring->dequeue; + segment = pdev->event_ring->deq_seg; + cycle_state = pdev->event_ring->cycle_state; + + while (1) { + data = le32_to_cpu(event->trans_event.flags); + + /* Check the owner of the TRB. */ + if ((data & TRB_CYCLE) != cycle_state) + break; + + if (TRB_FIELD_TO_TYPE(data) == TRB_TRANSFER && + TRB_TO_EP_ID(data) == (pep->idx + 1)) { + data |= TRB_EVENT_INVALIDATE; + event->trans_event.flags = cpu_to_le32(data); + } + + if (cdnsp_last_trb_on_seg(segment, event)) { + cycle_state ^= 1; + segment = pdev->event_ring->deq_seg->next; + event = segment->trbs; + } else { + event++; + } + } +} + +int cdnsp_wait_for_cmd_compl(struct cdnsp_device *pdev) +{ + struct cdnsp_segment *event_deq_seg; + union cdnsp_trb *cmd_trb; + dma_addr_t cmd_deq_dma; + union cdnsp_trb *event; + u32 cycle_state; + int ret, val; + u64 cmd_dma; + u32 flags; + + cmd_trb = pdev->cmd.command_trb; + pdev->cmd.status = 0; + + trace_cdnsp_cmd_wait_for_compl(pdev->cmd_ring, &cmd_trb->generic); + + ret = readl_poll_timeout_atomic(&pdev->op_regs->cmd_ring, val, + !CMD_RING_BUSY(val), 1, + CDNSP_CMD_TIMEOUT); + if (ret) { + dev_err(pdev->dev, "ERR: Timeout while waiting for command\n"); + trace_cdnsp_cmd_timeout(pdev->cmd_ring, &cmd_trb->generic); + pdev->cdnsp_state = CDNSP_STATE_DYING; + return -ETIMEDOUT; + } + + event = pdev->event_ring->dequeue; + event_deq_seg = pdev->event_ring->deq_seg; + cycle_state = pdev->event_ring->cycle_state; + + cmd_deq_dma = cdnsp_trb_virt_to_dma(pdev->cmd_ring->deq_seg, cmd_trb); + if (!cmd_deq_dma) + return -EINVAL; + + while (1) { + flags = le32_to_cpu(event->event_cmd.flags); + + /* Check the owner of the TRB. */ + if ((flags & TRB_CYCLE) != cycle_state) + return -EINVAL; + + cmd_dma = le64_to_cpu(event->event_cmd.cmd_trb); + + /* + * Check whether the completion event is for last queued + * command. + */ + if (TRB_FIELD_TO_TYPE(flags) != TRB_COMPLETION || + cmd_dma != (u64)cmd_deq_dma) { + if (!cdnsp_last_trb_on_seg(event_deq_seg, event)) { + event++; + continue; + } + + if (cdnsp_last_trb_on_ring(pdev->event_ring, + event_deq_seg, event)) + cycle_state ^= 1; + + event_deq_seg = event_deq_seg->next; + event = event_deq_seg->trbs; + continue; + } + + trace_cdnsp_handle_command(pdev->cmd_ring, &cmd_trb->generic); + + pdev->cmd.status = GET_COMP_CODE(le32_to_cpu(event->event_cmd.status)); + if (pdev->cmd.status == COMP_SUCCESS) + return 0; + + return -pdev->cmd.status; + } +} + +int cdnsp_halt_endpoint(struct cdnsp_device *pdev, + struct cdnsp_ep *pep, + int value) +{ + int ret; + + trace_cdnsp_ep_halt(value ? "Set" : "Clear"); + + if (value) { + ret = cdnsp_cmd_stop_ep(pdev, pep); + if (ret) + return ret; + + if (GET_EP_CTX_STATE(pep->out_ctx) == EP_STATE_STOPPED) { + cdnsp_queue_halt_endpoint(pdev, pep->idx); + cdnsp_ring_cmd_db(pdev); + ret = cdnsp_wait_for_cmd_compl(pdev); + } + + pep->ep_state |= EP_HALTED; + } else { + /* + * In device mode driver can call reset endpoint command + * from any endpoint state. + */ + cdnsp_queue_reset_ep(pdev, pep->idx); + cdnsp_ring_cmd_db(pdev); + ret = cdnsp_wait_for_cmd_compl(pdev); + trace_cdnsp_handle_cmd_reset_ep(pep->out_ctx); + + if (ret) + return ret; + + pep->ep_state &= ~EP_HALTED; + + if (pep->idx != 0 && !(pep->ep_state & EP_WEDGE)) + cdnsp_ring_doorbell_for_active_rings(pdev, pep); + + pep->ep_state &= ~EP_WEDGE; + } + + return 0; +} + +static int cdnsp_update_eps_configuration(struct cdnsp_device *pdev, + struct cdnsp_ep *pep) +{ + struct cdnsp_input_control_ctx *ctrl_ctx; + struct cdnsp_slot_ctx *slot_ctx; + int ret = 0; + u32 ep_sts; + int i; + + ctrl_ctx = cdnsp_get_input_control_ctx(&pdev->in_ctx); + + /* Don't issue the command if there's no endpoints to update. */ + if (ctrl_ctx->add_flags == 0 && ctrl_ctx->drop_flags == 0) + return 0; + + ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); + ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG); + ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG)); + + /* Fix up Context Entries field. Minimum value is EP0 == BIT(1). */ + slot_ctx = cdnsp_get_slot_ctx(&pdev->in_ctx); + for (i = CDNSP_ENDPOINTS_NUM; i >= 1; i--) { + __le32 le32 = cpu_to_le32(BIT(i)); + + if ((pdev->eps[i - 1].ring && !(ctrl_ctx->drop_flags & le32)) || + (ctrl_ctx->add_flags & le32) || i == 1) { + slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK); + slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(i)); + break; + } + } + + ep_sts = GET_EP_CTX_STATE(pep->out_ctx); + + if ((ctrl_ctx->add_flags != cpu_to_le32(SLOT_FLAG) && + ep_sts == EP_STATE_DISABLED) || + (ep_sts != EP_STATE_DISABLED && ctrl_ctx->drop_flags)) + ret = cdnsp_configure_endpoint(pdev); + + trace_cdnsp_configure_endpoint(cdnsp_get_slot_ctx(&pdev->out_ctx)); + trace_cdnsp_handle_cmd_config_ep(pep->out_ctx); + + cdnsp_zero_in_ctx(pdev); + + return ret; +} + +/* + * This submits a Reset Device Command, which will set the device state to 0, + * set the device address to 0, and disable all the endpoints except the default + * control endpoint. The USB core should come back and call + * cdnsp_setup_device(), and then re-set up the configuration. + */ +int cdnsp_reset_device(struct cdnsp_device *pdev) +{ + struct cdnsp_slot_ctx *slot_ctx; + int slot_state; + int ret, i; + + slot_ctx = cdnsp_get_slot_ctx(&pdev->in_ctx); + slot_ctx->dev_info = 0; + pdev->device_address = 0; + + /* If device is not setup, there is no point in resetting it. */ + slot_ctx = cdnsp_get_slot_ctx(&pdev->out_ctx); + slot_state = GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)); + trace_cdnsp_reset_device(slot_ctx); + + if (slot_state <= SLOT_STATE_DEFAULT && + pdev->eps[0].ep_state & EP_HALTED) { + cdnsp_halt_endpoint(pdev, &pdev->eps[0], 0); + } + + /* + * During Reset Device command controller shall transition the + * endpoint ep0 to the Running State. + */ + pdev->eps[0].ep_state &= ~(EP_STOPPED | EP_HALTED); + pdev->eps[0].ep_state |= EP_ENABLED; + + if (slot_state <= SLOT_STATE_DEFAULT) + return 0; + + cdnsp_queue_reset_device(pdev); + cdnsp_ring_cmd_db(pdev); + ret = cdnsp_wait_for_cmd_compl(pdev); + + /* + * After Reset Device command all not default endpoints + * are in Disabled state. + */ + for (i = 1; i < CDNSP_ENDPOINTS_NUM; ++i) + pdev->eps[i].ep_state |= EP_STOPPED; + + trace_cdnsp_handle_cmd_reset_dev(slot_ctx); + + if (ret) + dev_err(pdev->dev, "Reset device failed with error code %d", + ret); + + return ret; +} + +/* + * Sets the MaxPStreams field and the Linear Stream Array field. + * Sets the dequeue pointer to the stream context array. + */ +static void cdnsp_setup_streams_ep_input_ctx(struct cdnsp_device *pdev, + struct cdnsp_ep_ctx *ep_ctx, + struct cdnsp_stream_info *stream_info) +{ + u32 max_primary_streams; + + /* MaxPStreams is the number of stream context array entries, not the + * number we're actually using. Must be in 2^(MaxPstreams + 1) format. + * fls(0) = 0, fls(0x1) = 1, fls(0x10) = 2, fls(0x100) = 3, etc. + */ + max_primary_streams = fls(stream_info->num_stream_ctxs) - 2; + ep_ctx->ep_info &= cpu_to_le32(~EP_MAXPSTREAMS_MASK); + ep_ctx->ep_info |= cpu_to_le32(EP_MAXPSTREAMS(max_primary_streams) + | EP_HAS_LSA); + ep_ctx->deq = cpu_to_le64(stream_info->ctx_array_dma); +} + +/* + * The drivers use this function to prepare a bulk endpoints to use streams. + * + * Don't allow the call to succeed if endpoint only supports one stream + * (which means it doesn't support streams at all). + */ +int cdnsp_alloc_streams(struct cdnsp_device *pdev, struct cdnsp_ep *pep) +{ + unsigned int num_streams = usb_ss_max_streams(pep->endpoint.comp_desc); + unsigned int num_stream_ctxs; + int ret; + + if (num_streams == 0) + return 0; + + if (num_streams > STREAM_NUM_STREAMS) + return -EINVAL; + + /* + * Add two to the number of streams requested to account for + * stream 0 that is reserved for controller usage and one additional + * for TASK SET FULL response. + */ + num_streams += 2; + + /* The stream context array size must be a power of two */ + num_stream_ctxs = roundup_pow_of_two(num_streams); + + trace_cdnsp_stream_number(pep, num_stream_ctxs, num_streams); + + ret = cdnsp_alloc_stream_info(pdev, pep, num_stream_ctxs, num_streams); + if (ret) + return ret; + + cdnsp_setup_streams_ep_input_ctx(pdev, pep->in_ctx, &pep->stream_info); + + pep->ep_state |= EP_HAS_STREAMS; + pep->stream_info.td_count = 0; + pep->stream_info.first_prime_det = 0; + + /* Subtract 1 for stream 0, which drivers can't use. */ + return num_streams - 1; +} + +int cdnsp_disable_slot(struct cdnsp_device *pdev) +{ + int ret; + + cdnsp_queue_slot_control(pdev, TRB_DISABLE_SLOT); + cdnsp_ring_cmd_db(pdev); + ret = cdnsp_wait_for_cmd_compl(pdev); + + pdev->slot_id = 0; + pdev->active_port = NULL; + + trace_cdnsp_handle_cmd_disable_slot(cdnsp_get_slot_ctx(&pdev->out_ctx)); + + memset(pdev->in_ctx.bytes, 0, CDNSP_CTX_SIZE); + memset(pdev->out_ctx.bytes, 0, CDNSP_CTX_SIZE); + + return ret; +} + +int cdnsp_enable_slot(struct cdnsp_device *pdev) +{ + struct cdnsp_slot_ctx *slot_ctx; + int slot_state; + int ret; + + /* If device is not setup, there is no point in resetting it */ + slot_ctx = cdnsp_get_slot_ctx(&pdev->out_ctx); + slot_state = GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)); + + if (slot_state != SLOT_STATE_DISABLED) + return 0; + + cdnsp_queue_slot_control(pdev, TRB_ENABLE_SLOT); + cdnsp_ring_cmd_db(pdev); + ret = cdnsp_wait_for_cmd_compl(pdev); + if (ret) + goto show_trace; + + pdev->slot_id = 1; + +show_trace: + trace_cdnsp_handle_cmd_enable_slot(cdnsp_get_slot_ctx(&pdev->out_ctx)); + + return ret; +} + +/* + * Issue an Address Device command with BSR=0 if setup is SETUP_CONTEXT_ONLY + * or with BSR = 1 if set_address is SETUP_CONTEXT_ADDRESS. + */ +int cdnsp_setup_device(struct cdnsp_device *pdev, enum cdnsp_setup_dev setup) +{ + struct cdnsp_input_control_ctx *ctrl_ctx; + struct cdnsp_slot_ctx *slot_ctx; + int dev_state = 0; + int ret; + + if (!pdev->slot_id) { + trace_cdnsp_slot_id("incorrect"); + return -EINVAL; + } + + if (!pdev->active_port->port_num) + return -EINVAL; + + slot_ctx = cdnsp_get_slot_ctx(&pdev->out_ctx); + dev_state = GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)); + + if (setup == SETUP_CONTEXT_ONLY && dev_state == SLOT_STATE_DEFAULT) { + trace_cdnsp_slot_already_in_default(slot_ctx); + return 0; + } + + slot_ctx = cdnsp_get_slot_ctx(&pdev->in_ctx); + ctrl_ctx = cdnsp_get_input_control_ctx(&pdev->in_ctx); + + if (!slot_ctx->dev_info || dev_state == SLOT_STATE_DEFAULT) { + ret = cdnsp_setup_addressable_priv_dev(pdev); + if (ret) + return ret; + } + + cdnsp_copy_ep0_dequeue_into_input_ctx(pdev); + + ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG); + ctrl_ctx->drop_flags = 0; + + trace_cdnsp_setup_device_slot(slot_ctx); + + cdnsp_queue_address_device(pdev, pdev->in_ctx.dma, setup); + cdnsp_ring_cmd_db(pdev); + ret = cdnsp_wait_for_cmd_compl(pdev); + + trace_cdnsp_handle_cmd_addr_dev(cdnsp_get_slot_ctx(&pdev->out_ctx)); + + /* Zero the input context control for later use. */ + ctrl_ctx->add_flags = 0; + ctrl_ctx->drop_flags = 0; + + return ret; +} + +void cdnsp_set_usb2_hardware_lpm(struct cdnsp_device *pdev, + struct usb_request *req, + int enable) +{ + if (pdev->active_port != &pdev->usb2_port || !pdev->gadget.lpm_capable) + return; + + trace_cdnsp_lpm(enable); + + if (enable) + writel(PORT_BESL(CDNSP_DEFAULT_BESL) | PORT_L1S_NYET | PORT_HLE, + &pdev->active_port->regs->portpmsc); + else + writel(PORT_L1S_NYET, &pdev->active_port->regs->portpmsc); +} + +static int cdnsp_get_frame(struct cdnsp_device *pdev) +{ + return readl(&pdev->run_regs->microframe_index) >> 3; +} + +static int cdnsp_gadget_ep_enable(struct usb_ep *ep, + const struct usb_endpoint_descriptor *desc) +{ + struct cdnsp_input_control_ctx *ctrl_ctx; + struct cdnsp_device *pdev; + struct cdnsp_ep *pep; + unsigned long flags; + u32 added_ctxs; + int ret; + + if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT || + !desc->wMaxPacketSize) + return -EINVAL; + + pep = to_cdnsp_ep(ep); + pdev = pep->pdev; + + if (dev_WARN_ONCE(pdev->dev, pep->ep_state & EP_ENABLED, + "%s is already enabled\n", pep->name)) + return 0; + + spin_lock_irqsave(&pdev->lock, flags); + + added_ctxs = cdnsp_get_endpoint_flag(desc); + if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) { + dev_err(pdev->dev, "ERROR: Bad endpoint number\n"); + ret = -EINVAL; + goto unlock; + } + + pep->interval = desc->bInterval ? BIT(desc->bInterval - 1) : 0; + + if (pdev->gadget.speed == USB_SPEED_FULL) { + if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_INT) + pep->interval = desc->bInterval << 3; + if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_ISOC) + pep->interval = BIT(desc->bInterval - 1) << 3; + } + + if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_ISOC) { + if (pep->interval > BIT(12)) { + dev_err(pdev->dev, "bInterval %d not supported\n", + desc->bInterval); + ret = -EINVAL; + goto unlock; + } + cdnsp_set_chicken_bits_2(pdev, CHICKEN_XDMA_2_TP_CACHE_DIS); + } + + ret = cdnsp_endpoint_init(pdev, pep, GFP_ATOMIC); + if (ret) + goto unlock; + + ctrl_ctx = cdnsp_get_input_control_ctx(&pdev->in_ctx); + ctrl_ctx->add_flags = cpu_to_le32(added_ctxs); + ctrl_ctx->drop_flags = 0; + + ret = cdnsp_update_eps_configuration(pdev, pep); + if (ret) { + cdnsp_free_endpoint_rings(pdev, pep); + goto unlock; + } + + pep->ep_state |= EP_ENABLED; + pep->ep_state &= ~EP_STOPPED; + +unlock: + trace_cdnsp_ep_enable_end(pep, 0); + spin_unlock_irqrestore(&pdev->lock, flags); + + return ret; +} + +static int cdnsp_gadget_ep_disable(struct usb_ep *ep) +{ + struct cdnsp_input_control_ctx *ctrl_ctx; + struct cdnsp_request *preq; + struct cdnsp_device *pdev; + struct cdnsp_ep *pep; + unsigned long flags; + u32 drop_flag; + int ret = 0; + + if (!ep) + return -EINVAL; + + pep = to_cdnsp_ep(ep); + pdev = pep->pdev; + + spin_lock_irqsave(&pdev->lock, flags); + + if (!(pep->ep_state & EP_ENABLED)) { + dev_err(pdev->dev, "%s is already disabled\n", pep->name); + ret = -EINVAL; + goto finish; + } + + cdnsp_cmd_stop_ep(pdev, pep); + pep->ep_state |= EP_DIS_IN_RROGRESS; + cdnsp_cmd_flush_ep(pdev, pep); + + /* Remove all queued USB requests. */ + while (!list_empty(&pep->pending_list)) { + preq = next_request(&pep->pending_list); + cdnsp_ep_dequeue(pep, preq); + } + + cdnsp_invalidate_ep_events(pdev, pep); + + pep->ep_state &= ~EP_DIS_IN_RROGRESS; + drop_flag = cdnsp_get_endpoint_flag(pep->endpoint.desc); + ctrl_ctx = cdnsp_get_input_control_ctx(&pdev->in_ctx); + ctrl_ctx->drop_flags = cpu_to_le32(drop_flag); + ctrl_ctx->add_flags = 0; + + cdnsp_endpoint_zero(pdev, pep); + + ret = cdnsp_update_eps_configuration(pdev, pep); + cdnsp_free_endpoint_rings(pdev, pep); + + pep->ep_state &= ~EP_ENABLED; + pep->ep_state |= EP_STOPPED; + +finish: + trace_cdnsp_ep_disable_end(pep, 0); + spin_unlock_irqrestore(&pdev->lock, flags); + + return ret; +} + +static struct usb_request *cdnsp_gadget_ep_alloc_request(struct usb_ep *ep, + gfp_t gfp_flags) +{ + struct cdnsp_ep *pep = to_cdnsp_ep(ep); + struct cdnsp_request *preq; + + preq = kzalloc(sizeof(*preq), gfp_flags); + if (!preq) + return NULL; + + preq->epnum = pep->number; + preq->pep = pep; + + trace_cdnsp_alloc_request(preq); + + return &preq->request; +} + +static void cdnsp_gadget_ep_free_request(struct usb_ep *ep, + struct usb_request *request) +{ + struct cdnsp_request *preq = to_cdnsp_request(request); + + trace_cdnsp_free_request(preq); + kfree(preq); +} + +static int cdnsp_gadget_ep_queue(struct usb_ep *ep, + struct usb_request *request, + gfp_t gfp_flags) +{ + struct cdnsp_request *preq; + struct cdnsp_device *pdev; + struct cdnsp_ep *pep; + unsigned long flags; + int ret; + + if (!request || !ep) + return -EINVAL; + + pep = to_cdnsp_ep(ep); + pdev = pep->pdev; + + if (!(pep->ep_state & EP_ENABLED)) { + dev_err(pdev->dev, "%s: can't queue to disabled endpoint\n", + pep->name); + return -EINVAL; + } + + preq = to_cdnsp_request(request); + spin_lock_irqsave(&pdev->lock, flags); + ret = cdnsp_ep_enqueue(pep, preq); + spin_unlock_irqrestore(&pdev->lock, flags); + + return ret; +} + +static int cdnsp_gadget_ep_dequeue(struct usb_ep *ep, + struct usb_request *request) +{ + struct cdnsp_ep *pep = to_cdnsp_ep(ep); + struct cdnsp_device *pdev = pep->pdev; + unsigned long flags; + int ret; + + if (!pep->endpoint.desc) { + dev_err(pdev->dev, + "%s: can't dequeue to disabled endpoint\n", + pep->name); + return -ESHUTDOWN; + } + + spin_lock_irqsave(&pdev->lock, flags); + ret = cdnsp_ep_dequeue(pep, to_cdnsp_request(request)); + spin_unlock_irqrestore(&pdev->lock, flags); + + return ret; +} + +static int cdnsp_gadget_ep_set_halt(struct usb_ep *ep, int value) +{ + struct cdnsp_ep *pep = to_cdnsp_ep(ep); + struct cdnsp_device *pdev = pep->pdev; + struct cdnsp_request *preq; + unsigned long flags = 0; + int ret; + + spin_lock_irqsave(&pdev->lock, flags); + + preq = next_request(&pep->pending_list); + if (value) { + if (preq) { + trace_cdnsp_ep_busy_try_halt_again(pep, 0); + ret = -EAGAIN; + goto done; + } + } + + ret = cdnsp_halt_endpoint(pdev, pep, value); + +done: + spin_unlock_irqrestore(&pdev->lock, flags); + return ret; +} + +static int cdnsp_gadget_ep_set_wedge(struct usb_ep *ep) +{ + struct cdnsp_ep *pep = to_cdnsp_ep(ep); + struct cdnsp_device *pdev = pep->pdev; + unsigned long flags = 0; + int ret; + + spin_lock_irqsave(&pdev->lock, flags); + pep->ep_state |= EP_WEDGE; + ret = cdnsp_halt_endpoint(pdev, pep, 1); + spin_unlock_irqrestore(&pdev->lock, flags); + + return ret; +} + +static const struct usb_ep_ops cdnsp_gadget_ep0_ops = { + .enable = cdnsp_gadget_ep_enable, + .disable = cdnsp_gadget_ep_disable, + .alloc_request = cdnsp_gadget_ep_alloc_request, + .free_request = cdnsp_gadget_ep_free_request, + .queue = cdnsp_gadget_ep_queue, + .dequeue = cdnsp_gadget_ep_dequeue, + .set_halt = cdnsp_gadget_ep_set_halt, + .set_wedge = cdnsp_gadget_ep_set_wedge, +}; + +static const struct usb_ep_ops cdnsp_gadget_ep_ops = { + .enable = cdnsp_gadget_ep_enable, + .disable = cdnsp_gadget_ep_disable, + .alloc_request = cdnsp_gadget_ep_alloc_request, + .free_request = cdnsp_gadget_ep_free_request, + .queue = cdnsp_gadget_ep_queue, + .dequeue = cdnsp_gadget_ep_dequeue, + .set_halt = cdnsp_gadget_ep_set_halt, + .set_wedge = cdnsp_gadget_ep_set_wedge, +}; + +void cdnsp_gadget_giveback(struct cdnsp_ep *pep, + struct cdnsp_request *preq, + int status) +{ + struct cdnsp_device *pdev = pep->pdev; + + list_del(&preq->list); + + if (preq->request.status == -EINPROGRESS) + preq->request.status = status; + + usb_gadget_unmap_request_by_dev(pdev->dev, &preq->request, + preq->direction); + + trace_cdnsp_request_giveback(preq); + + if (preq != &pdev->ep0_preq) { + spin_unlock(&pdev->lock); + usb_gadget_giveback_request(&pep->endpoint, &preq->request); + spin_lock(&pdev->lock); + } +} + +static struct usb_endpoint_descriptor cdnsp_gadget_ep0_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bmAttributes = USB_ENDPOINT_XFER_CONTROL, +}; + +static int cdnsp_run(struct cdnsp_device *pdev, + enum usb_device_speed speed) +{ + u32 fs_speed = 0; + u64 temp_64; + u32 temp; + int ret; + + temp_64 = cdnsp_read_64(&pdev->ir_set->erst_dequeue); + temp_64 &= ~ERST_PTR_MASK; + temp = readl(&pdev->ir_set->irq_control); + temp &= ~IMOD_INTERVAL_MASK; + temp |= ((IMOD_DEFAULT_INTERVAL / 250) & IMOD_INTERVAL_MASK); + writel(temp, &pdev->ir_set->irq_control); + + temp = readl(&pdev->port3x_regs->mode_addr); + + switch (speed) { + case USB_SPEED_SUPER_PLUS: + temp |= CFG_3XPORT_SSP_SUPPORT; + break; + case USB_SPEED_SUPER: + temp &= ~CFG_3XPORT_SSP_SUPPORT; + break; + case USB_SPEED_HIGH: + break; + case USB_SPEED_FULL: + fs_speed = PORT_REG6_FORCE_FS; + break; + default: + dev_err(pdev->dev, "invalid maximum_speed parameter %d\n", + speed); + fallthrough; + case USB_SPEED_UNKNOWN: + /* Default to superspeed. */ + speed = USB_SPEED_SUPER; + break; + } + + if (speed >= USB_SPEED_SUPER) { + writel(temp, &pdev->port3x_regs->mode_addr); + cdnsp_set_link_state(pdev, &pdev->usb3_port.regs->portsc, + XDEV_RXDETECT); + } else { + cdnsp_disable_port(pdev, &pdev->usb3_port.regs->portsc); + } + + cdnsp_set_link_state(pdev, &pdev->usb2_port.regs->portsc, + XDEV_RXDETECT); + + cdnsp_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); + + writel(PORT_REG6_L1_L0_HW_EN | fs_speed, &pdev->port20_regs->port_reg6); + + ret = cdnsp_start(pdev); + if (ret) { + ret = -ENODEV; + goto err; + } + + temp = readl(&pdev->op_regs->command); + temp |= (CMD_INTE); + writel(temp, &pdev->op_regs->command); + + temp = readl(&pdev->ir_set->irq_pending); + writel(IMAN_IE_SET(temp), &pdev->ir_set->irq_pending); + + trace_cdnsp_init("Controller ready to work"); + return 0; +err: + cdnsp_halt(pdev); + return ret; +} + +static int cdnsp_gadget_udc_start(struct usb_gadget *g, + struct usb_gadget_driver *driver) +{ + enum usb_device_speed max_speed = driver->max_speed; + struct cdnsp_device *pdev = gadget_to_cdnsp(g); + unsigned long flags; + int ret; + + spin_lock_irqsave(&pdev->lock, flags); + pdev->gadget_driver = driver; + + /* limit speed if necessary */ + max_speed = min(driver->max_speed, g->max_speed); + ret = cdnsp_run(pdev, max_speed); + + spin_unlock_irqrestore(&pdev->lock, flags); + + return ret; +} + +/* + * Update Event Ring Dequeue Pointer: + * - When all events have finished + * - To avoid "Event Ring Full Error" condition + */ +void cdnsp_update_erst_dequeue(struct cdnsp_device *pdev, + union cdnsp_trb *event_ring_deq, + u8 clear_ehb) +{ + u64 temp_64; + dma_addr_t deq; + + temp_64 = cdnsp_read_64(&pdev->ir_set->erst_dequeue); + + /* If necessary, update the HW's version of the event ring deq ptr. */ + if (event_ring_deq != pdev->event_ring->dequeue) { + deq = cdnsp_trb_virt_to_dma(pdev->event_ring->deq_seg, + pdev->event_ring->dequeue); + temp_64 &= ERST_PTR_MASK; + temp_64 |= ((u64)deq & (u64)~ERST_PTR_MASK); + } + + /* Clear the event handler busy flag (RW1C). */ + if (clear_ehb) + temp_64 |= ERST_EHB; + else + temp_64 &= ~ERST_EHB; + + cdnsp_write_64(temp_64, &pdev->ir_set->erst_dequeue); +} + +static void cdnsp_clear_cmd_ring(struct cdnsp_device *pdev) +{ + struct cdnsp_segment *seg; + u64 val_64; + int i; + + cdnsp_initialize_ring_info(pdev->cmd_ring); + + seg = pdev->cmd_ring->first_seg; + for (i = 0; i < pdev->cmd_ring->num_segs; i++) { + memset(seg->trbs, 0, + sizeof(union cdnsp_trb) * (TRBS_PER_SEGMENT - 1)); + seg = seg->next; + } + + /* Set the address in the Command Ring Control register. */ + val_64 = cdnsp_read_64(&pdev->op_regs->cmd_ring); + val_64 = (val_64 & (u64)CMD_RING_RSVD_BITS) | + (pdev->cmd_ring->first_seg->dma & (u64)~CMD_RING_RSVD_BITS) | + pdev->cmd_ring->cycle_state; + cdnsp_write_64(val_64, &pdev->op_regs->cmd_ring); +} + +static void cdnsp_consume_all_events(struct cdnsp_device *pdev) +{ + struct cdnsp_segment *event_deq_seg; + union cdnsp_trb *event_ring_deq; + union cdnsp_trb *event; + u32 cycle_bit; + + event_ring_deq = pdev->event_ring->dequeue; + event_deq_seg = pdev->event_ring->deq_seg; + event = pdev->event_ring->dequeue; + + /* Update ring dequeue pointer. */ + while (1) { + cycle_bit = (le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE); + + /* Does the controller or driver own the TRB? */ + if (cycle_bit != pdev->event_ring->cycle_state) + break; + + cdnsp_inc_deq(pdev, pdev->event_ring); + + if (!cdnsp_last_trb_on_seg(event_deq_seg, event)) { + event++; + continue; + } + + if (cdnsp_last_trb_on_ring(pdev->event_ring, event_deq_seg, + event)) + cycle_bit ^= 1; + + event_deq_seg = event_deq_seg->next; + event = event_deq_seg->trbs; + } + + cdnsp_update_erst_dequeue(pdev, event_ring_deq, 1); +} + +static void cdnsp_stop(struct cdnsp_device *pdev) +{ + u32 temp; + + cdnsp_cmd_flush_ep(pdev, &pdev->eps[0]); + + /* Remove internally queued request for ep0. */ + if (!list_empty(&pdev->eps[0].pending_list)) { + struct cdnsp_request *req; + + req = next_request(&pdev->eps[0].pending_list); + if (req == &pdev->ep0_preq) + cdnsp_ep_dequeue(&pdev->eps[0], req); + } + + cdnsp_disable_port(pdev, &pdev->usb2_port.regs->portsc); + cdnsp_disable_port(pdev, &pdev->usb3_port.regs->portsc); + cdnsp_disable_slot(pdev); + cdnsp_halt(pdev); + + temp = readl(&pdev->op_regs->status); + writel((temp & ~0x1fff) | STS_EINT, &pdev->op_regs->status); + temp = readl(&pdev->ir_set->irq_pending); + writel(IMAN_IE_CLEAR(temp), &pdev->ir_set->irq_pending); + + cdnsp_clear_port_change_bit(pdev, &pdev->usb2_port.regs->portsc); + cdnsp_clear_port_change_bit(pdev, &pdev->usb3_port.regs->portsc); + + /* Clear interrupt line */ + temp = readl(&pdev->ir_set->irq_pending); + temp |= IMAN_IP; + writel(temp, &pdev->ir_set->irq_pending); + + cdnsp_consume_all_events(pdev); + cdnsp_clear_cmd_ring(pdev); + + trace_cdnsp_exit("Controller stopped."); +} + +/* + * Stop controller. + * This function is called by the gadget core when the driver is removed. + * Disable slot, disable IRQs, and quiesce the controller. + */ +static int cdnsp_gadget_udc_stop(struct usb_gadget *g) +{ + struct cdnsp_device *pdev = gadget_to_cdnsp(g); + unsigned long flags; + + spin_lock_irqsave(&pdev->lock, flags); + cdnsp_stop(pdev); + pdev->gadget_driver = NULL; + spin_unlock_irqrestore(&pdev->lock, flags); + + return 0; +} + +static int cdnsp_gadget_get_frame(struct usb_gadget *g) +{ + struct cdnsp_device *pdev = gadget_to_cdnsp(g); + + return cdnsp_get_frame(pdev); +} + +static void __cdnsp_gadget_wakeup(struct cdnsp_device *pdev) +{ + struct cdnsp_port_regs __iomem *port_regs; + u32 portpm, portsc; + + port_regs = pdev->active_port->regs; + portsc = readl(&port_regs->portsc) & PORT_PLS_MASK; + + /* Remote wakeup feature is not enabled by host. */ + if (pdev->gadget.speed < USB_SPEED_SUPER && portsc == XDEV_U2) { + portpm = readl(&port_regs->portpmsc); + + if (!(portpm & PORT_RWE)) + return; + } + + if (portsc == XDEV_U3 && !pdev->may_wakeup) + return; + + cdnsp_set_link_state(pdev, &port_regs->portsc, XDEV_U0); + + pdev->cdnsp_state |= CDNSP_WAKEUP_PENDING; +} + +static int cdnsp_gadget_wakeup(struct usb_gadget *g) +{ + struct cdnsp_device *pdev = gadget_to_cdnsp(g); + unsigned long flags; + + spin_lock_irqsave(&pdev->lock, flags); + __cdnsp_gadget_wakeup(pdev); + spin_unlock_irqrestore(&pdev->lock, flags); + + return 0; +} + +static int cdnsp_gadget_set_selfpowered(struct usb_gadget *g, + int is_selfpowered) +{ + struct cdnsp_device *pdev = gadget_to_cdnsp(g); + unsigned long flags; + + spin_lock_irqsave(&pdev->lock, flags); + g->is_selfpowered = !!is_selfpowered; + spin_unlock_irqrestore(&pdev->lock, flags); + + return 0; +} + +static int cdnsp_gadget_pullup(struct usb_gadget *gadget, int is_on) +{ + struct cdnsp_device *pdev = gadget_to_cdnsp(gadget); + struct cdns *cdns = dev_get_drvdata(pdev->dev); + + trace_cdnsp_pullup(is_on); + + if (!is_on) { + cdnsp_reset_device(pdev); + cdns_clear_vbus(cdns); + } else { + cdns_set_vbus(cdns); + } + return 0; +} + +static const struct usb_gadget_ops cdnsp_gadget_ops = { + .get_frame = cdnsp_gadget_get_frame, + .wakeup = cdnsp_gadget_wakeup, + .set_selfpowered = cdnsp_gadget_set_selfpowered, + .pullup = cdnsp_gadget_pullup, + .udc_start = cdnsp_gadget_udc_start, + .udc_stop = cdnsp_gadget_udc_stop, +}; + +static void cdnsp_get_ep_buffering(struct cdnsp_device *pdev, + struct cdnsp_ep *pep) +{ + void __iomem *reg = &pdev->cap_regs->hc_capbase; + int endpoints; + + reg += cdnsp_find_next_ext_cap(reg, 0, XBUF_CAP_ID); + + if (!pep->direction) { + pep->buffering = readl(reg + XBUF_RX_TAG_MASK_0_OFFSET); + pep->buffering_period = readl(reg + XBUF_RX_TAG_MASK_1_OFFSET); + pep->buffering = (pep->buffering + 1) / 2; + pep->buffering_period = (pep->buffering_period + 1) / 2; + return; + } + + endpoints = HCS_ENDPOINTS(pdev->hcs_params1) / 2; + + /* Set to XBUF_TX_TAG_MASK_0 register. */ + reg += XBUF_TX_CMD_OFFSET + (endpoints * 2 + 2) * sizeof(u32); + /* Set reg to XBUF_TX_TAG_MASK_N related with this endpoint. */ + reg += pep->number * sizeof(u32) * 2; + + pep->buffering = (readl(reg) + 1) / 2; + pep->buffering_period = pep->buffering; +} + +static int cdnsp_gadget_init_endpoints(struct cdnsp_device *pdev) +{ + int max_streams = HCC_MAX_PSA(pdev->hcc_params); + struct cdnsp_ep *pep; + int i; + + INIT_LIST_HEAD(&pdev->gadget.ep_list); + + if (max_streams < STREAM_LOG_STREAMS) { + dev_err(pdev->dev, "Stream size %d not supported\n", + max_streams); + return -EINVAL; + } + + max_streams = STREAM_LOG_STREAMS; + + for (i = 0; i < CDNSP_ENDPOINTS_NUM; i++) { + bool direction = !(i & 1); /* Start from OUT endpoint. */ + u8 epnum = ((i + 1) >> 1); + + if (!CDNSP_IF_EP_EXIST(pdev, epnum, direction)) + continue; + + pep = &pdev->eps[i]; + pep->pdev = pdev; + pep->number = epnum; + pep->direction = direction; /* 0 for OUT, 1 for IN. */ + + /* + * Ep0 is bidirectional, so ep0in and ep0out are represented by + * pdev->eps[0] + */ + if (epnum == 0) { + snprintf(pep->name, sizeof(pep->name), "ep%d%s", + epnum, "BiDir"); + + pep->idx = 0; + usb_ep_set_maxpacket_limit(&pep->endpoint, 512); + pep->endpoint.maxburst = 1; + pep->endpoint.ops = &cdnsp_gadget_ep0_ops; + pep->endpoint.desc = &cdnsp_gadget_ep0_desc; + pep->endpoint.comp_desc = NULL; + pep->endpoint.caps.type_control = true; + pep->endpoint.caps.dir_in = true; + pep->endpoint.caps.dir_out = true; + + pdev->ep0_preq.epnum = pep->number; + pdev->ep0_preq.pep = pep; + pdev->gadget.ep0 = &pep->endpoint; + } else { + snprintf(pep->name, sizeof(pep->name), "ep%d%s", + epnum, (pep->direction) ? "in" : "out"); + + pep->idx = (epnum * 2 + (direction ? 1 : 0)) - 1; + usb_ep_set_maxpacket_limit(&pep->endpoint, 1024); + + pep->endpoint.max_streams = max_streams; + pep->endpoint.ops = &cdnsp_gadget_ep_ops; + list_add_tail(&pep->endpoint.ep_list, + &pdev->gadget.ep_list); + + pep->endpoint.caps.type_iso = true; + pep->endpoint.caps.type_bulk = true; + pep->endpoint.caps.type_int = true; + + pep->endpoint.caps.dir_in = direction; + pep->endpoint.caps.dir_out = !direction; + } + + pep->endpoint.name = pep->name; + pep->in_ctx = cdnsp_get_ep_ctx(&pdev->in_ctx, pep->idx); + pep->out_ctx = cdnsp_get_ep_ctx(&pdev->out_ctx, pep->idx); + cdnsp_get_ep_buffering(pdev, pep); + + dev_dbg(pdev->dev, "Init %s, MPS: %04x SupType: " + "CTRL: %s, INT: %s, BULK: %s, ISOC %s, " + "SupDir IN: %s, OUT: %s\n", + pep->name, 1024, + (pep->endpoint.caps.type_control) ? "yes" : "no", + (pep->endpoint.caps.type_int) ? "yes" : "no", + (pep->endpoint.caps.type_bulk) ? "yes" : "no", + (pep->endpoint.caps.type_iso) ? "yes" : "no", + (pep->endpoint.caps.dir_in) ? "yes" : "no", + (pep->endpoint.caps.dir_out) ? "yes" : "no"); + + INIT_LIST_HEAD(&pep->pending_list); + } + + return 0; +} + +static void cdnsp_gadget_free_endpoints(struct cdnsp_device *pdev) +{ + struct cdnsp_ep *pep; + int i; + + for (i = 0; i < CDNSP_ENDPOINTS_NUM; i++) { + pep = &pdev->eps[i]; + if (pep->number != 0 && pep->out_ctx) + list_del(&pep->endpoint.ep_list); + } +} + +void cdnsp_disconnect_gadget(struct cdnsp_device *pdev) +{ + pdev->cdnsp_state |= CDNSP_STATE_DISCONNECT_PENDING; + + if (pdev->gadget_driver && pdev->gadget_driver->disconnect) { + spin_unlock(&pdev->lock); + pdev->gadget_driver->disconnect(&pdev->gadget); + spin_lock(&pdev->lock); + } + + pdev->gadget.speed = USB_SPEED_UNKNOWN; + usb_gadget_set_state(&pdev->gadget, USB_STATE_NOTATTACHED); + + pdev->cdnsp_state &= ~CDNSP_STATE_DISCONNECT_PENDING; +} + +void cdnsp_suspend_gadget(struct cdnsp_device *pdev) +{ + if (pdev->gadget_driver && pdev->gadget_driver->suspend) { + spin_unlock(&pdev->lock); + pdev->gadget_driver->suspend(&pdev->gadget); + spin_lock(&pdev->lock); + } +} + +void cdnsp_resume_gadget(struct cdnsp_device *pdev) +{ + if (pdev->gadget_driver && pdev->gadget_driver->resume) { + spin_unlock(&pdev->lock); + pdev->gadget_driver->resume(&pdev->gadget); + spin_lock(&pdev->lock); + } +} + +void cdnsp_irq_reset(struct cdnsp_device *pdev) +{ + struct cdnsp_port_regs __iomem *port_regs; + + cdnsp_reset_device(pdev); + + port_regs = pdev->active_port->regs; + pdev->gadget.speed = cdnsp_port_speed(readl(port_regs)); + + spin_unlock(&pdev->lock); + usb_gadget_udc_reset(&pdev->gadget, pdev->gadget_driver); + spin_lock(&pdev->lock); + + switch (pdev->gadget.speed) { + case USB_SPEED_SUPER_PLUS: + case USB_SPEED_SUPER: + cdnsp_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); + pdev->gadget.ep0->maxpacket = 512; + break; + case USB_SPEED_HIGH: + case USB_SPEED_FULL: + cdnsp_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64); + pdev->gadget.ep0->maxpacket = 64; + break; + default: + /* Low speed is not supported. */ + dev_err(pdev->dev, "Unknown device speed\n"); + break; + } + + cdnsp_clear_chicken_bits_2(pdev, CHICKEN_XDMA_2_TP_CACHE_DIS); + cdnsp_setup_device(pdev, SETUP_CONTEXT_ONLY); + usb_gadget_set_state(&pdev->gadget, USB_STATE_DEFAULT); +} + +static void cdnsp_get_rev_cap(struct cdnsp_device *pdev) +{ + void __iomem *reg = &pdev->cap_regs->hc_capbase; + + reg += cdnsp_find_next_ext_cap(reg, 0, RTL_REV_CAP); + pdev->rev_cap = reg; + + dev_info(pdev->dev, "Rev: %08x/%08x, eps: %08x, buff: %08x/%08x\n", + readl(&pdev->rev_cap->ctrl_revision), + readl(&pdev->rev_cap->rtl_revision), + readl(&pdev->rev_cap->ep_supported), + readl(&pdev->rev_cap->rx_buff_size), + readl(&pdev->rev_cap->tx_buff_size)); +} + +static int cdnsp_gen_setup(struct cdnsp_device *pdev) +{ + int ret; + u32 reg; + + pdev->cap_regs = pdev->regs; + pdev->op_regs = pdev->regs + + HC_LENGTH(readl(&pdev->cap_regs->hc_capbase)); + pdev->run_regs = pdev->regs + + (readl(&pdev->cap_regs->run_regs_off) & RTSOFF_MASK); + + /* Cache read-only capability registers */ + pdev->hcs_params1 = readl(&pdev->cap_regs->hcs_params1); + pdev->hcc_params = readl(&pdev->cap_regs->hc_capbase); + pdev->hci_version = HC_VERSION(pdev->hcc_params); + pdev->hcc_params = readl(&pdev->cap_regs->hcc_params); + + cdnsp_get_rev_cap(pdev); + + /* Make sure the Device Controller is halted. */ + ret = cdnsp_halt(pdev); + if (ret) + return ret; + + /* Reset the internal controller memory state and registers. */ + ret = cdnsp_reset(pdev); + if (ret) + return ret; + + /* + * Set dma_mask and coherent_dma_mask to 64-bits, + * if controller supports 64-bit addressing. + */ + if (HCC_64BIT_ADDR(pdev->hcc_params) && + !dma_set_mask(pdev->dev, DMA_BIT_MASK(64))) { + dev_dbg(pdev->dev, "Enabling 64-bit DMA addresses.\n"); + dma_set_coherent_mask(pdev->dev, DMA_BIT_MASK(64)); + } else { + /* + * This is to avoid error in cases where a 32-bit USB + * controller is used on a 64-bit capable system. + */ + ret = dma_set_mask(pdev->dev, DMA_BIT_MASK(32)); + if (ret) + return ret; + + dev_dbg(pdev->dev, "Enabling 32-bit DMA addresses.\n"); + dma_set_coherent_mask(pdev->dev, DMA_BIT_MASK(32)); + } + + spin_lock_init(&pdev->lock); + + ret = cdnsp_mem_init(pdev); + if (ret) + return ret; + + /* + * Software workaround for U1: after transition + * to U1 the controller starts gating clock, and in some cases, + * it causes that controller stack. + */ + reg = readl(&pdev->port3x_regs->mode_2); + reg &= ~CFG_3XPORT_U1_PIPE_CLK_GATE_EN; + writel(reg, &pdev->port3x_regs->mode_2); + + return 0; +} + +static int __cdnsp_gadget_init(struct cdns *cdns) +{ + struct cdnsp_device *pdev; + u32 max_speed; + int ret = -ENOMEM; + + cdns_drd_gadget_on(cdns); + + pdev = kzalloc(sizeof(*pdev), GFP_KERNEL); + if (!pdev) + return -ENOMEM; + + pm_runtime_get_sync(cdns->dev); + + cdns->gadget_dev = pdev; + pdev->dev = cdns->dev; + pdev->regs = cdns->dev_regs; + max_speed = usb_get_maximum_speed(cdns->dev); + + switch (max_speed) { + case USB_SPEED_FULL: + case USB_SPEED_HIGH: + case USB_SPEED_SUPER: + case USB_SPEED_SUPER_PLUS: + break; + default: + dev_err(cdns->dev, "invalid speed parameter %d\n", max_speed); + fallthrough; + case USB_SPEED_UNKNOWN: + /* Default to SSP */ + max_speed = USB_SPEED_SUPER_PLUS; + break; + } + + pdev->gadget.ops = &cdnsp_gadget_ops; + pdev->gadget.name = "cdnsp-gadget"; + pdev->gadget.speed = USB_SPEED_UNKNOWN; + pdev->gadget.sg_supported = 1; + pdev->gadget.max_speed = USB_SPEED_SUPER_PLUS; + pdev->gadget.lpm_capable = 1; + + pdev->setup_buf = kzalloc(CDNSP_EP0_SETUP_SIZE, GFP_KERNEL); + if (!pdev->setup_buf) + goto free_pdev; + + /* + * Controller supports not aligned buffer but it should improve + * performance. + */ + pdev->gadget.quirk_ep_out_aligned_size = true; + + ret = cdnsp_gen_setup(pdev); + if (ret) { + dev_err(pdev->dev, "Generic initialization failed %d\n", ret); + goto free_setup; + } + + ret = cdnsp_gadget_init_endpoints(pdev); + if (ret) { + dev_err(pdev->dev, "failed to initialize endpoints\n"); + goto halt_pdev; + } + + ret = usb_add_gadget_udc(pdev->dev, &pdev->gadget); + if (ret) { + dev_err(pdev->dev, "failed to register udc\n"); + goto free_endpoints; + } + + ret = devm_request_threaded_irq(pdev->dev, cdns->dev_irq, + cdnsp_irq_handler, + cdnsp_thread_irq_handler, IRQF_SHARED, + dev_name(pdev->dev), pdev); + if (ret) + goto del_gadget; + + return 0; + +del_gadget: + usb_del_gadget_udc(&pdev->gadget); +free_endpoints: + cdnsp_gadget_free_endpoints(pdev); +halt_pdev: + cdnsp_halt(pdev); + cdnsp_reset(pdev); + cdnsp_mem_cleanup(pdev); +free_setup: + kfree(pdev->setup_buf); +free_pdev: + kfree(pdev); + + return ret; +} + +static void cdnsp_gadget_exit(struct cdns *cdns) +{ + struct cdnsp_device *pdev = cdns->gadget_dev; + + devm_free_irq(pdev->dev, cdns->dev_irq, pdev); + pm_runtime_mark_last_busy(cdns->dev); + pm_runtime_put_autosuspend(cdns->dev); + usb_del_gadget_udc(&pdev->gadget); + cdnsp_gadget_free_endpoints(pdev); + cdnsp_mem_cleanup(pdev); + kfree(pdev); + cdns->gadget_dev = NULL; + cdns_drd_gadget_off(cdns); +} + +static int cdnsp_gadget_suspend(struct cdns *cdns, bool do_wakeup) +{ + struct cdnsp_device *pdev = cdns->gadget_dev; + unsigned long flags; + + if (pdev->link_state == XDEV_U3) + return 0; + + spin_lock_irqsave(&pdev->lock, flags); + cdnsp_disconnect_gadget(pdev); + cdnsp_stop(pdev); + spin_unlock_irqrestore(&pdev->lock, flags); + + return 0; +} + +static int cdnsp_gadget_resume(struct cdns *cdns, bool hibernated) +{ + struct cdnsp_device *pdev = cdns->gadget_dev; + enum usb_device_speed max_speed; + unsigned long flags; + int ret; + + if (!pdev->gadget_driver) + return 0; + + spin_lock_irqsave(&pdev->lock, flags); + max_speed = pdev->gadget_driver->max_speed; + + /* Limit speed if necessary. */ + max_speed = min(max_speed, pdev->gadget.max_speed); + + ret = cdnsp_run(pdev, max_speed); + + if (pdev->link_state == XDEV_U3) + __cdnsp_gadget_wakeup(pdev); + + spin_unlock_irqrestore(&pdev->lock, flags); + + return ret; +} + +/** + * cdnsp_gadget_init - initialize device structure + * @cdns: cdnsp instance + * + * This function initializes the gadget. + */ +int cdnsp_gadget_init(struct cdns *cdns) +{ + struct cdns_role_driver *rdrv; + + rdrv = devm_kzalloc(cdns->dev, sizeof(*rdrv), GFP_KERNEL); + if (!rdrv) + return -ENOMEM; + + rdrv->start = __cdnsp_gadget_init; + rdrv->stop = cdnsp_gadget_exit; + rdrv->suspend = cdnsp_gadget_suspend; + rdrv->resume = cdnsp_gadget_resume; + rdrv->state = CDNS_ROLE_STATE_INACTIVE; + rdrv->name = "gadget"; + cdns->roles[USB_ROLE_DEVICE] = rdrv; + + return 0; +} diff --git a/drivers/usb/cdns3/cdnsp-gadget.h b/drivers/usb/cdns3/cdnsp-gadget.h new file mode 100644 index 000000000000..6bbb26548c04 --- /dev/null +++ b/drivers/usb/cdns3/cdnsp-gadget.h @@ -0,0 +1,1601 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Cadence CDNSP DRD Driver. + * + * Copyright (C) 2020 Cadence. + * + * Author: Pawel Laszczak <pawell@cadence.com> + * + * Code based on Linux XHCI driver. + * Origin: Copyright (C) 2008 Intel Corp. + */ +#ifndef __LINUX_CDNSP_GADGET_H +#define __LINUX_CDNSP_GADGET_H + +#include <linux/io-64-nonatomic-lo-hi.h> +#include <linux/usb/gadget.h> +#include <linux/irq.h> + +/* Max number slots - only 1 is allowed. */ +#define CDNSP_DEV_MAX_SLOTS 1 + +#define CDNSP_EP0_SETUP_SIZE 512 + +/* One control and 15 for in and 15 for out endpoints. */ +#define CDNSP_ENDPOINTS_NUM 31 + +/* Best Effort Service Latency. */ +#define CDNSP_DEFAULT_BESL 0 + +/* Device Controller command default timeout value in us */ +#define CDNSP_CMD_TIMEOUT (15 * 1000) + +/* Up to 16 ms to halt an device controller */ +#define CDNSP_MAX_HALT_USEC (16 * 1000) + +#define CDNSP_CTX_SIZE 2112 + +/* + * Controller register interface. + */ + +/** + * struct cdnsp_cap_regs - CDNSP Registers. + * @hc_capbase: Length of the capabilities register and controller + * version number + * @hcs_params1: HCSPARAMS1 - Structural Parameters 1 + * @hcs_params2: HCSPARAMS2 - Structural Parameters 2 + * @hcs_params3: HCSPARAMS3 - Structural Parameters 3 + * @hcc_params: HCCPARAMS - Capability Parameters + * @db_off: DBOFF - Doorbell array offset + * @run_regs_off: RTSOFF - Runtime register space offset + * @hcc_params2: HCCPARAMS2 Capability Parameters 2, + */ +struct cdnsp_cap_regs { + __le32 hc_capbase; + __le32 hcs_params1; + __le32 hcs_params2; + __le32 hcs_params3; + __le32 hcc_params; + __le32 db_off; + __le32 run_regs_off; + __le32 hcc_params2; + /* Reserved up to (CAPLENGTH - 0x1C) */ +}; + +/* hc_capbase bitmasks. */ +/* bits 7:0 - how long is the Capabilities register. */ +#define HC_LENGTH(p) (((p) >> 00) & GENMASK(7, 0)) +/* bits 31:16 */ +#define HC_VERSION(p) (((p) >> 16) & GENMASK(15, 1)) + +/* HCSPARAMS1 - hcs_params1 - bitmasks */ +/* bits 0:7, Max Device Endpoints */ +#define HCS_ENDPOINTS_MASK GENMASK(7, 0) +#define HCS_ENDPOINTS(p) (((p) & HCS_ENDPOINTS_MASK) >> 0) + +/* HCCPARAMS offset from PCI base address */ +#define HCC_PARAMS_OFFSET 0x10 + +/* HCCPARAMS - hcc_params - bitmasks */ +/* 1: device controller can use 64-bit address pointers. */ +#define HCC_64BIT_ADDR(p) ((p) & BIT(0)) +/* 1: device controller uses 64-byte Device Context structures. */ +#define HCC_64BYTE_CONTEXT(p) ((p) & BIT(2)) +/* Max size for Primary Stream Arrays - 2^(n+1), where n is bits 12:15. */ +#define HCC_MAX_PSA(p) ((((p) >> 12) & 0xf) + 1) +/* Extended Capabilities pointer from PCI base. */ +#define HCC_EXT_CAPS(p) (((p) & GENMASK(31, 16)) >> 16) + +#define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32) + +/* db_off bitmask - bits 0:1 reserved. */ +#define DBOFF_MASK GENMASK(31, 2) + +/* run_regs_off bitmask - bits 0:4 reserved. */ +#define RTSOFF_MASK GENMASK(31, 5) + +/** + * struct cdnsp_op_regs - Device Controller Operational Registers. + * @command: USBCMD - Controller command register. + * @status: USBSTS - Controller status register. + * @page_size: This indicates the page size that the device controller supports. + * If bit n is set, the controller supports a page size of 2^(n+12), + * up to a 128MB page size. 4K is the minimum page size. + * @dnctrl: DNCTRL - Device notification control register. + * @cmd_ring: CRP - 64-bit Command Ring Pointer. + * @dcbaa_ptr: DCBAAP - 64-bit Device Context Base Address Array Pointer. + * @config_reg: CONFIG - Configure Register + * @port_reg_base: PORTSCn - base address for Port Status and Control + * Each port has a Port Status and Control register, + * followed by a Port Power Management Status and Control + * register, a Port Link Info register, and a reserved + * register. + */ +struct cdnsp_op_regs { + __le32 command; + __le32 status; + __le32 page_size; + __le32 reserved1; + __le32 reserved2; + __le32 dnctrl; + __le64 cmd_ring; + /* rsvd: offset 0x20-2F. */ + __le32 reserved3[4]; + __le64 dcbaa_ptr; + __le32 config_reg; + /* rsvd: offset 0x3C-3FF. */ + __le32 reserved4[241]; + /* port 1 registers, which serve as a base address for other ports. */ + __le32 port_reg_base; +}; + +/* Number of registers per port. */ +#define NUM_PORT_REGS 4 + +/** + * struct cdnsp_port_regs - Port Registers. + * @portsc: PORTSC - Port Status and Control Register. + * @portpmsc: PORTPMSC - Port Power Managements Status and Control Register. + * @portli: PORTLI - Port Link Info register. + */ +struct cdnsp_port_regs { + __le32 portsc; + __le32 portpmsc; + __le32 portli; + __le32 reserved; +}; + +/* + * These bits are Read Only (RO) and should be saved and written to the + * registers: 0 (connect status) and 10:13 (port speed). + * These bits are also sticky - meaning they're in the AUX well and they aren't + * changed by a hot and warm. + */ +#define CDNSP_PORT_RO (PORT_CONNECT | DEV_SPEED_MASK) + +/* + * These bits are RW; writing a 0 clears the bit, writing a 1 sets the bit: + * bits 5:8 (link state), 25:26 ("wake on" enable state) + */ +#define CDNSP_PORT_RWS (PORT_PLS_MASK | PORT_WKCONN_E | PORT_WKDISC_E) + +/* + * These bits are RW; writing a 1 clears the bit, writing a 0 has no effect: + * bits 1 (port enable/disable), 17 ( connect changed), + * 21 (port reset changed) , 22 (Port Link State Change), + */ +#define CDNSP_PORT_RW1CS (PORT_PED | PORT_CSC | PORT_RC | PORT_PLC) + +/* USBCMD - USB command - bitmasks. */ +/* Run/Stop, controller execution - do not write unless controller is halted.*/ +#define CMD_R_S BIT(0) +/* + * Reset device controller - resets internal controller state machine and all + * registers (except PCI config regs). + */ +#define CMD_RESET BIT(1) +/* Event Interrupt Enable - a '1' allows interrupts from the controller. */ +#define CMD_INTE BIT(2) +/* + * Device System Error Interrupt Enable - get out-of-band signal for + * controller errors. + */ +#define CMD_DSEIE BIT(3) +/* device controller save/restore state. */ +#define CMD_CSS BIT(8) +#define CMD_CRS BIT(9) +/* + * Enable Wrap Event - '1' means device controller generates an event + * when MFINDEX wraps. + */ +#define CMD_EWE BIT(10) +/* 1: device enabled */ +#define CMD_DEVEN BIT(17) +/* bits 18:31 are reserved (and should be preserved on writes). */ + +/* Command register values to disable interrupts. */ +#define CDNSP_IRQS (CMD_INTE | CMD_DSEIE | CMD_EWE) + +/* USBSTS - USB status - bitmasks */ +/* controller not running - set to 1 when run/stop bit is cleared. */ +#define STS_HALT BIT(0) +/* + * serious error, e.g. PCI parity error. The controller will clear + * the run/stop bit. + */ +#define STS_FATAL BIT(2) +/* event interrupt - clear this prior to clearing any IP flags in IR set.*/ +#define STS_EINT BIT(3) +/* port change detect */ +#define STS_PCD BIT(4) +/* save state status - '1' means device controller is saving state. */ +#define STS_SSS BIT(8) +/* restore state status - '1' means controllers is restoring state. */ +#define STS_RSS BIT(9) +/* 1: save or restore error */ +#define STS_SRE BIT(10) +/* 1: device Not Ready to accept doorbell or op reg writes after reset. */ +#define STS_CNR BIT(11) +/* 1: internal Device Controller Error.*/ +#define STS_HCE BIT(12) + +/* CRCR - Command Ring Control Register - cmd_ring bitmasks. */ +/* bit 0 is the command ring cycle state. */ +#define CMD_RING_CS BIT(0) +/* stop ring immediately - abort the currently executing command. */ +#define CMD_RING_ABORT BIT(2) +/* + * Command Ring Busy. + * Set when Doorbell register is written with DB for command and cleared when + * the controller reached end of CR. + */ +#define CMD_RING_BUSY(p) ((p) & BIT(4)) +/* 1: command ring is running */ +#define CMD_RING_RUNNING BIT(3) +/* Command Ring pointer - bit mask for the lower 32 bits. */ +#define CMD_RING_RSVD_BITS GENMASK(5, 0) + +/* CONFIG - Configure Register - config_reg bitmasks. */ +/* bits 0:7 - maximum number of device slots enabled. */ +#define MAX_DEVS GENMASK(7, 0) +/* bit 8: U3 Entry Enabled, assert PLC when controller enters U3. */ +#define CONFIG_U3E BIT(8) + +/* PORTSC - Port Status and Control Register - port_reg_base bitmasks */ +/* 1: device connected. */ +#define PORT_CONNECT BIT(0) +/* 1: port enabled. */ +#define PORT_PED BIT(1) +/* 1: port reset signaling asserted. */ +#define PORT_RESET BIT(4) +/* + * Port Link State - bits 5:8 + * A read gives the current link PM state of the port, + * a write with Link State Write Strobe sets the link state. + */ +#define PORT_PLS_MASK GENMASK(8, 5) +#define XDEV_U0 (0x0 << 5) +#define XDEV_U1 (0x1 << 5) +#define XDEV_U2 (0x2 << 5) +#define XDEV_U3 (0x3 << 5) +#define XDEV_DISABLED (0x4 << 5) +#define XDEV_RXDETECT (0x5 << 5) +#define XDEV_INACTIVE (0x6 << 5) +#define XDEV_POLLING (0x7 << 5) +#define XDEV_RECOVERY (0x8 << 5) +#define XDEV_HOT_RESET (0x9 << 5) +#define XDEV_COMP_MODE (0xa << 5) +#define XDEV_TEST_MODE (0xb << 5) +#define XDEV_RESUME (0xf << 5) +/* 1: port has power. */ +#define PORT_POWER BIT(9) +/* + * bits 10:13 indicate device speed: + * 0 - undefined speed - port hasn't be initialized by a reset yet + * 1 - full speed + * 2 - Reserved (Low Speed not supported + * 3 - high speed + * 4 - super speed + * 5 - super speed + * 6-15 reserved + */ +#define DEV_SPEED_MASK GENMASK(13, 10) +#define XDEV_FS (0x1 << 10) +#define XDEV_HS (0x3 << 10) +#define XDEV_SS (0x4 << 10) +#define XDEV_SSP (0x5 << 10) +#define DEV_UNDEFSPEED(p) (((p) & DEV_SPEED_MASK) == (0x0 << 10)) +#define DEV_FULLSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_FS) +#define DEV_HIGHSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_HS) +#define DEV_SUPERSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_SS) +#define DEV_SUPERSPEEDPLUS(p) (((p) & DEV_SPEED_MASK) == XDEV_SSP) +#define DEV_SUPERSPEED_ANY(p) (((p) & DEV_SPEED_MASK) >= XDEV_SS) +#define DEV_PORT_SPEED(p) (((p) >> 10) & 0x0f) +/* Port Link State Write Strobe - set this when changing link state */ +#define PORT_LINK_STROBE BIT(16) +/* 1: connect status change */ +#define PORT_CSC BIT(17) +/* 1: warm reset for a USB 3.0 device is done. */ +#define PORT_WRC BIT(19) +/* 1: reset change - 1 to 0 transition of PORT_RESET */ +#define PORT_RC BIT(21) +/* + * port link status change - set on some port link state transitions: + * Transition Reason + * ---------------------------------------------------------------------------- + * - U3 to Resume Wakeup signaling from a device + * - Resume to Recovery to U0 USB 3.0 device resume + * - Resume to U0 USB 2.0 device resume + * - U3 to Recovery to U0 Software resume of USB 3.0 device complete + * - U3 to U0 Software resume of USB 2.0 device complete + * - U2 to U0 L1 resume of USB 2.1 device complete + * - U0 to U0 L1 entry rejection by USB 2.1 device + * - U0 to disabled L1 entry error with USB 2.1 device + * - Any state to inactive Error on USB 3.0 port + */ +#define PORT_PLC BIT(22) +/* Port configure error change - port failed to configure its link partner. */ +#define PORT_CEC BIT(23) +/* Wake on connect (enable). */ +#define PORT_WKCONN_E BIT(25) +/* Wake on disconnect (enable). */ +#define PORT_WKDISC_E BIT(26) +/* Indicates if Warm Reset is being received. */ +#define PORT_WR BIT(31) + +#define PORT_CHANGE_BITS (PORT_CSC | PORT_WRC | PORT_RC | PORT_PLC | PORT_CEC) + +/* PORTPMSCUSB3 - Port Power Management Status and Control - bitmasks. */ +/* Enables U1 entry. */ +#define PORT_U1_TIMEOUT_MASK GENMASK(7, 0) +#define PORT_U1_TIMEOUT(p) ((p) & PORT_U1_TIMEOUT_MASK) +/* Enables U2 entry .*/ +#define PORT_U2_TIMEOUT_MASK GENMASK(14, 8) +#define PORT_U2_TIMEOUT(p) (((p) << 8) & PORT_U2_TIMEOUT_MASK) + +/* PORTPMSCUSB2 - Port Power Management Status and Control - bitmasks. */ +#define PORT_L1S_MASK GENMASK(2, 0) +#define PORT_L1S(p) ((p) & PORT_L1S_MASK) +#define PORT_L1S_ACK PORT_L1S(1) +#define PORT_L1S_NYET PORT_L1S(2) +#define PORT_L1S_STALL PORT_L1S(3) +#define PORT_L1S_TIMEOUT PORT_L1S(4) +/* Remote Wake Enable. */ +#define PORT_RWE BIT(3) +/* Best Effort Service Latency (BESL). */ +#define PORT_BESL(p) (((p) << 4) & GENMASK(7, 4)) +/* Hardware LPM Enable (HLE). */ +#define PORT_HLE BIT(16) +/* Received Best Effort Service Latency (BESL). */ +#define PORT_RRBESL(p) (((p) & GENMASK(20, 17)) >> 17) +/* Port Test Control. */ +#define PORT_TEST_MODE_MASK GENMASK(31, 28) +#define PORT_TEST_MODE(p) (((p) << 28) & PORT_TEST_MODE_MASK) + +/** + * struct cdnsp_intr_reg - Interrupt Register Set. + * @irq_pending: IMAN - Interrupt Management Register. Used to enable + * interrupts and check for pending interrupts. + * @irq_control: IMOD - Interrupt Moderation Register. + * Used to throttle interrupts. + * @erst_size: Number of segments in the Event Ring Segment Table (ERST). + * @erst_base: ERST base address. + * @erst_dequeue: Event ring dequeue pointer. + * + * Each interrupter (defined by a MSI-X vector) has an event ring and an Event + * Ring Segment Table (ERST) associated with it. The event ring is comprised of + * multiple segments of the same size. The controller places events on the ring + * and "updates the Cycle bit in the TRBs to indicate to software the current + * position of the Enqueue Pointer." The driver processes those events and + * updates the dequeue pointer. + */ +struct cdnsp_intr_reg { + __le32 irq_pending; + __le32 irq_control; + __le32 erst_size; + __le32 rsvd; + __le64 erst_base; + __le64 erst_dequeue; +}; + +/* IMAN - Interrupt Management Register - irq_pending bitmasks l. */ +#define IMAN_IE BIT(1) +#define IMAN_IP BIT(0) +/* bits 2:31 need to be preserved */ +#define IMAN_IE_SET(p) (((p) & IMAN_IE) | 0x2) +#define IMAN_IE_CLEAR(p) (((p) & IMAN_IE) & ~(0x2)) + +/* IMOD - Interrupter Moderation Register - irq_control bitmasks. */ +/* + * Minimum interval between interrupts (in 250ns intervals). The interval + * between interrupts will be longer if there are no events on the event ring. + * Default is 4000 (1 ms). + */ +#define IMOD_INTERVAL_MASK GENMASK(15, 0) +/* Counter used to count down the time to the next interrupt - HW use only */ +#define IMOD_COUNTER_MASK GENMASK(31, 16) +#define IMOD_DEFAULT_INTERVAL 0 + +/* erst_size bitmasks. */ +/* Preserve bits 16:31 of erst_size. */ +#define ERST_SIZE_MASK GENMASK(31, 16) + +/* erst_dequeue bitmasks. */ +/* + * Dequeue ERST Segment Index (DESI) - Segment number (or alias) + * where the current dequeue pointer lies. This is an optional HW hint. + */ +#define ERST_DESI_MASK GENMASK(2, 0) +/* Event Handler Busy (EHB) - is the event ring scheduled to be serviced. */ +#define ERST_EHB BIT(3) +#define ERST_PTR_MASK GENMASK(3, 0) + +/** + * struct cdnsp_run_regs + * @microframe_index: MFINDEX - current microframe number. + * @ir_set: Array of Interrupter registers. + * + * Device Controller Runtime Registers: + * "Software should read and write these registers using only Dword (32 bit) + * or larger accesses" + */ +struct cdnsp_run_regs { + __le32 microframe_index; + __le32 rsvd[7]; + struct cdnsp_intr_reg ir_set[128]; +}; + +/** + * USB2.0 Port Peripheral Configuration Registers. + * @ext_cap: Header register for Extended Capability. + * @port_reg1: Timer Configuration Register. + * @port_reg2: Timer Configuration Register. + * @port_reg3: Timer Configuration Register. + * @port_reg4: Timer Configuration Register. + * @port_reg5: Timer Configuration Register. + * @port_reg6: Chicken bits for USB20PPP. + */ +struct cdnsp_20port_cap { + __le32 ext_cap; + __le32 port_reg1; + __le32 port_reg2; + __le32 port_reg3; + __le32 port_reg4; + __le32 port_reg5; + __le32 port_reg6; +}; + +/* Extended capability register fields */ +#define EXT_CAPS_ID(p) (((p) >> 0) & GENMASK(7, 0)) +#define EXT_CAPS_NEXT(p) (((p) >> 8) & GENMASK(7, 0)) +/* Extended capability IDs - ID 0 reserved */ +#define EXT_CAPS_PROTOCOL 2 + +/* USB 2.0 Port Peripheral Configuration Extended Capability */ +#define EXT_CAP_CFG_DEV_20PORT_CAP_ID 0xC1 +/* + * Setting this bit to '1' enables automatic wakeup from L1 state on transfer + * TRB prepared when USBSSP operates in USB2.0 mode. + */ +#define PORT_REG6_L1_L0_HW_EN BIT(1) +/* + * Setting this bit to '1' forces Full Speed when USBSSP operates in USB2.0 + * mode (disables High Speed). + */ +#define PORT_REG6_FORCE_FS BIT(0) + +/** + * USB3.x Port Peripheral Configuration Registers. + * @ext_cap: Header register for Extended Capability. + * @mode_addr: Miscellaneous 3xPORT operation mode configuration register. + * @mode_2: 3x Port Control Register 2. + */ +struct cdnsp_3xport_cap { + __le32 ext_cap; + __le32 mode_addr; + __le32 reserved[52]; + __le32 mode_2; +}; + +/* Extended Capability Header for 3XPort Configuration Registers. */ +#define D_XEC_CFG_3XPORT_CAP 0xC0 +#define CFG_3XPORT_SSP_SUPPORT BIT(31) +#define CFG_3XPORT_U1_PIPE_CLK_GATE_EN BIT(0) + +/* Revision Extended Capability ID */ +#define RTL_REV_CAP 0xC4 +#define RTL_REV_CAP_RX_BUFF_CMD_SIZE BITMASK(31, 24) +#define RTL_REV_CAP_RX_BUFF_SIZE BITMASK(15, 0) +#define RTL_REV_CAP_TX_BUFF_CMD_SIZE BITMASK(31, 24) +#define RTL_REV_CAP_TX_BUFF_SIZE BITMASK(15, 0) + +#define CDNSP_VER_1 0x00000000 +#define CDNSP_VER_2 0x10000000 + +#define CDNSP_IF_EP_EXIST(pdev, ep_num, dir) \ + (readl(&(pdev)->rev_cap->ep_supported) & \ + (BIT(ep_num) << ((dir) ? 0 : 16))) + +/** + * struct cdnsp_rev_cap - controller capabilities. + * @ext_cap: Header for RTL Revision Extended Capability. + * @rtl_revision: RTL revision. + * @rx_buff_size: Rx buffer sizes. + * @tx_buff_size: Tx buffer sizes. + * @ep_supported: Supported endpoints. + * @ctrl_revision: Controller revision ID. + */ +struct cdnsp_rev_cap { + __le32 ext_cap; + __le32 rtl_revision; + __le32 rx_buff_size; + __le32 tx_buff_size; + __le32 ep_supported; + __le32 ctrl_revision; +}; + +/* USB2.0 Port Peripheral Configuration Registers. */ +#define D_XEC_PRE_REGS_CAP 0xC8 +#define REG_CHICKEN_BITS_2_OFFSET 0x48 +#define CHICKEN_XDMA_2_TP_CACHE_DIS BIT(28) + +/* XBUF Extended Capability ID. */ +#define XBUF_CAP_ID 0xCB +#define XBUF_RX_TAG_MASK_0_OFFSET 0x1C +#define XBUF_RX_TAG_MASK_1_OFFSET 0x24 +#define XBUF_TX_CMD_OFFSET 0x2C + +/** + * struct cdnsp_doorbell_array. + * @cmd_db: Command ring doorbell register. + * @ep_db: Endpoint ring doorbell register. + * Bits 0 - 7: Endpoint target. + * Bits 8 - 15: RsvdZ. + * Bits 16 - 31: Stream ID. + */ +struct cdnsp_doorbell_array { + __le32 cmd_db; + __le32 ep_db; +}; + +#define DB_VALUE(ep, stream) ((((ep) + 1) & 0xff) | ((stream) << 16)) +#define DB_VALUE_EP0_OUT(ep, stream) ((ep) & 0xff) +#define DB_VALUE_CMD 0x00000000 + +/** + * struct cdnsp_container_ctx. + * @type: Type of context. Used to calculated offsets to contained contexts. + * @size: Size of the context data. + * @ctx_size: context data structure size - 64 or 32 bits. + * @dma: dma address of the bytes. + * @bytes: The raw context data given to HW. + * + * Represents either a Device or Input context. Holds a pointer to the raw + * memory used for the context (bytes) and dma address of it (dma). + */ +struct cdnsp_container_ctx { + unsigned int type; +#define CDNSP_CTX_TYPE_DEVICE 0x1 +#define CDNSP_CTX_TYPE_INPUT 0x2 + int size; + int ctx_size; + dma_addr_t dma; + u8 *bytes; +}; + +/** + * struct cdnsp_slot_ctx + * @dev_info: Device speed, and last valid endpoint. + * @dev_port: Device port number that is needed to access the USB device. + * @int_target: Interrupter target number. + * @dev_state: Slot state and device address. + * + * Slot Context - This assumes the controller uses 32-byte context + * structures. If the controller uses 64-byte contexts, there is an additional + * 32 bytes reserved at the end of the slot context for controller internal use. + */ +struct cdnsp_slot_ctx { + __le32 dev_info; + __le32 dev_port; + __le32 int_target; + __le32 dev_state; + /* offset 0x10 to 0x1f reserved for controller internal use. */ + __le32 reserved[4]; +}; + +/* Bits 20:23 in the Slot Context are the speed for the device. */ +#define SLOT_SPEED_FS (XDEV_FS << 10) +#define SLOT_SPEED_HS (XDEV_HS << 10) +#define SLOT_SPEED_SS (XDEV_SS << 10) +#define SLOT_SPEED_SSP (XDEV_SSP << 10) + +/* dev_info bitmasks. */ +/* Device speed - values defined by PORTSC Device Speed field - 20:23. */ +#define DEV_SPEED GENMASK(23, 20) +#define GET_DEV_SPEED(n) (((n) & DEV_SPEED) >> 20) +/* Index of the last valid endpoint context in this device context - 27:31. */ +#define LAST_CTX_MASK ((unsigned int)GENMASK(31, 27)) +#define LAST_CTX(p) ((p) << 27) +#define LAST_CTX_TO_EP_NUM(p) (((p) >> 27) - 1) +#define SLOT_FLAG BIT(0) +#define EP0_FLAG BIT(1) + +/* dev_port bitmasks */ +/* Device port number that is needed to access the USB device. */ +#define DEV_PORT(p) (((p) & 0xff) << 16) + +/* dev_state bitmasks */ +/* USB device address - assigned by the controller. */ +#define DEV_ADDR_MASK GENMASK(7, 0) +/* Slot state */ +#define SLOT_STATE GENMASK(31, 27) +#define GET_SLOT_STATE(p) (((p) & SLOT_STATE) >> 27) + +#define SLOT_STATE_DISABLED 0 +#define SLOT_STATE_ENABLED SLOT_STATE_DISABLED +#define SLOT_STATE_DEFAULT 1 +#define SLOT_STATE_ADDRESSED 2 +#define SLOT_STATE_CONFIGURED 3 + +/** + * struct cdnsp_ep_ctx. + * @ep_info: Endpoint state, streams, mult, and interval information. + * @ep_info2: Information on endpoint type, max packet size, max burst size, + * error count, and whether the controller will force an event for + * all transactions. + * @deq: 64-bit ring dequeue pointer address. If the endpoint only + * defines one stream, this points to the endpoint transfer ring. + * Otherwise, it points to a stream context array, which has a + * ring pointer for each flow. + * @tx_info: Average TRB lengths for the endpoint ring and + * max payload within an Endpoint Service Interval Time (ESIT). + * + * Endpoint Context - This assumes the controller uses 32-byte context + * structures. If the controller uses 64-byte contexts, there is an additional + * 32 bytes reserved at the end of the endpoint context for controller internal + * use. + */ +struct cdnsp_ep_ctx { + __le32 ep_info; + __le32 ep_info2; + __le64 deq; + __le32 tx_info; + /* offset 0x14 - 0x1f reserved for controller internal use. */ + __le32 reserved[3]; +}; + +/* ep_info bitmasks. */ +/* + * Endpoint State - bits 0:2: + * 0 - disabled + * 1 - running + * 2 - halted due to halt condition + * 3 - stopped + * 4 - TRB error + * 5-7 - reserved + */ +#define EP_STATE_MASK GENMASK(3, 0) +#define EP_STATE_DISABLED 0 +#define EP_STATE_RUNNING 1 +#define EP_STATE_HALTED 2 +#define EP_STATE_STOPPED 3 +#define EP_STATE_ERROR 4 +#define GET_EP_CTX_STATE(ctx) (le32_to_cpu((ctx)->ep_info) & EP_STATE_MASK) + +/* Mult - Max number of burst within an interval, in EP companion desc. */ +#define EP_MULT(p) (((p) << 8) & GENMASK(9, 8)) +#define CTX_TO_EP_MULT(p) (((p) & GENMASK(9, 8)) >> 8) +/* bits 10:14 are Max Primary Streams. */ +/* bit 15 is Linear Stream Array. */ +/* Interval - period between requests to an endpoint - 125u increments. */ +#define EP_INTERVAL(p) (((p) << 16) & GENMASK(23, 16)) +#define EP_INTERVAL_TO_UFRAMES(p) (1 << (((p) & GENMASK(23, 16)) >> 16)) +#define CTX_TO_EP_INTERVAL(p) (((p) & GENMASK(23, 16)) >> 16) +#define EP_MAXPSTREAMS_MASK GENMASK(14, 10) +#define EP_MAXPSTREAMS(p) (((p) << 10) & EP_MAXPSTREAMS_MASK) +#define CTX_TO_EP_MAXPSTREAMS(p) (((p) & EP_MAXPSTREAMS_MASK) >> 10) +/* Endpoint is set up with a Linear Stream Array (vs. Secondary Stream Array) */ +#define EP_HAS_LSA BIT(15) + +/* ep_info2 bitmasks */ +#define ERROR_COUNT(p) (((p) & 0x3) << 1) +#define CTX_TO_EP_TYPE(p) (((p) >> 3) & 0x7) +#define EP_TYPE(p) ((p) << 3) +#define ISOC_OUT_EP 1 +#define BULK_OUT_EP 2 +#define INT_OUT_EP 3 +#define CTRL_EP 4 +#define ISOC_IN_EP 5 +#define BULK_IN_EP 6 +#define INT_IN_EP 7 +/* bit 6 reserved. */ +/* bit 7 is Device Initiate Disable - for disabling stream selection. */ +#define MAX_BURST(p) (((p) << 8) & GENMASK(15, 8)) +#define CTX_TO_MAX_BURST(p) (((p) & GENMASK(15, 8)) >> 8) +#define MAX_PACKET(p) (((p) << 16) & GENMASK(31, 16)) +#define MAX_PACKET_MASK GENMASK(31, 16) +#define MAX_PACKET_DECODED(p) (((p) & GENMASK(31, 16)) >> 16) + +/* tx_info bitmasks. */ +#define EP_AVG_TRB_LENGTH(p) ((p) & GENMASK(15, 0)) +#define EP_MAX_ESIT_PAYLOAD_LO(p) (((p) << 16) & GENMASK(31, 16)) +#define EP_MAX_ESIT_PAYLOAD_HI(p) ((((p) & GENMASK(23, 16)) >> 16) << 24) +#define CTX_TO_MAX_ESIT_PAYLOAD_LO(p) (((p) & GENMASK(31, 16)) >> 16) +#define CTX_TO_MAX_ESIT_PAYLOAD_HI(p) (((p) & GENMASK(31, 24)) >> 24) + +/* deq bitmasks. */ +#define EP_CTX_CYCLE_MASK BIT(0) +#define CTX_DEQ_MASK (~0xfL) + +/** + * struct cdnsp_input_control_context + * Input control context; + * + * @drop_context: Set the bit of the endpoint context you want to disable. + * @add_context: Set the bit of the endpoint context you want to enable. + */ +struct cdnsp_input_control_ctx { + __le32 drop_flags; + __le32 add_flags; + __le32 rsvd2[6]; +}; + +/** + * Represents everything that is needed to issue a command on the command ring. + * + * @in_ctx: Pointer to input context structure. + * @status: Command Completion Code for last command. + * @command_trb: Pointer to command TRB. + */ +struct cdnsp_command { + /* Input context for changing device state. */ + struct cdnsp_container_ctx *in_ctx; + u32 status; + union cdnsp_trb *command_trb; +}; + +/** + * Stream context structure. + * + * @stream_ring: 64-bit stream ring address, cycle state, and stream type. + * @reserved: offset 0x14 - 0x1f reserved for controller internal use. + */ +struct cdnsp_stream_ctx { + __le64 stream_ring; + __le32 reserved[2]; +}; + +/* Stream Context Types - bits 3:1 of stream ctx deq ptr. */ +#define SCT_FOR_CTX(p) (((p) << 1) & GENMASK(3, 1)) +/* Secondary stream array type, dequeue pointer is to a transfer ring. */ +#define SCT_SEC_TR 0 +/* Primary stream array type, dequeue pointer is to a transfer ring. */ +#define SCT_PRI_TR 1 + +/** + * struct cdnsp_stream_info: Representing everything that is needed to + * supports stream capable endpoints. + * @stream_rings: Array of pointers containing Transfer rings for all + * supported streams. + * @num_streams: Number of streams, including stream 0. + * @stream_ctx_array: The stream context array may be bigger than the number + * of streams the driver asked for. + * @num_stream_ctxs: Number of streams. + * @ctx_array_dma: Dma address of Context Stream Array. + * @trb_address_map: For mapping physical TRB addresses to segments in + * stream rings. + * @td_count: Number of TDs associated with endpoint. + * @first_prime_det: First PRIME packet detected. + * @drbls_count: Number of allowed doorbells. + */ +struct cdnsp_stream_info { + struct cdnsp_ring **stream_rings; + unsigned int num_streams; + struct cdnsp_stream_ctx *stream_ctx_array; + unsigned int num_stream_ctxs; + dma_addr_t ctx_array_dma; + struct radix_tree_root trb_address_map; + int td_count; + u8 first_prime_det; +#define STREAM_DRBL_FIFO_DEPTH 2 + u8 drbls_count; +}; + +#define STREAM_LOG_STREAMS 4 +#define STREAM_NUM_STREAMS BIT(STREAM_LOG_STREAMS) + +#if STREAM_LOG_STREAMS > 16 && STREAM_LOG_STREAMS < 1 +#error "Not suupported stream value" +#endif + +/** + * struct cdnsp_ep - extended device side representation of USB endpoint. + * @endpoint: usb endpoint + * @pending_req_list: List of requests queuing on transfer ring. + * @pdev: Device associated with this endpoint. + * @number: Endpoint number (1 - 15). + * idx: The device context index (DCI). + * interval: Interval between packets used for ISOC endpoint. + * @name: A human readable name e.g. ep1out. + * @direction: Endpoint direction. + * @buffering: Number of on-chip buffers related to endpoint. + * @buffering_period; Number of on-chip buffers related to periodic endpoint. + * @in_ctx: Pointer to input endpoint context structure. + * @out_ctx: Pointer to output endpoint context structure. + * @ring: Pointer to transfer ring. + * @stream_info: Hold stream information. + * @ep_state: Current state of endpoint. + * @skip: Sometimes the controller can not process isochronous endpoint ring + * quickly enough, and it will miss some isoc tds on the ring and + * generate Missed Service Error Event. + * Set skip flag when receive a Missed Service Error Event and + * process the missed tds on the endpoint ring. + */ +struct cdnsp_ep { + struct usb_ep endpoint; + struct list_head pending_list; + struct cdnsp_device *pdev; + u8 number; + u8 idx; + u32 interval; + char name[20]; + u8 direction; + u8 buffering; + u8 buffering_period; + struct cdnsp_ep_ctx *in_ctx; + struct cdnsp_ep_ctx *out_ctx; + struct cdnsp_ring *ring; + struct cdnsp_stream_info stream_info; + unsigned int ep_state; +#define EP_ENABLED BIT(0) +#define EP_DIS_IN_RROGRESS BIT(1) +#define EP_HALTED BIT(2) +#define EP_STOPPED BIT(3) +#define EP_WEDGE BIT(4) +#define EP0_HALTED_STATUS BIT(5) +#define EP_HAS_STREAMS BIT(6) + + bool skip; +}; + +/** + * struct cdnsp_device_context_array + * @dev_context_ptr: Array of 64-bit DMA addresses for device contexts. + * @dma: DMA address for device contexts structure. + */ +struct cdnsp_device_context_array { + __le64 dev_context_ptrs[CDNSP_DEV_MAX_SLOTS + 1]; + dma_addr_t dma; +}; + +/** + * struct cdnsp_transfer_event. + * @buffer: 64-bit buffer address, or immediate data. + * @transfer_len: Data length transferred. + * @flags: Field is interpreted differently based on the type of TRB. + */ +struct cdnsp_transfer_event { + __le64 buffer; + __le32 transfer_len; + __le32 flags; +}; + +/* Invalidate event after disabling endpoint. */ +#define TRB_EVENT_INVALIDATE 8 + +/* Transfer event TRB length bit mask. */ +/* bits 0:23 */ +#define EVENT_TRB_LEN(p) ((p) & GENMASK(23, 0)) +/* Completion Code - only applicable for some types of TRBs */ +#define COMP_CODE_MASK (0xff << 24) +#define GET_COMP_CODE(p) (((p) & COMP_CODE_MASK) >> 24) +#define COMP_INVALID 0 +#define COMP_SUCCESS 1 +#define COMP_DATA_BUFFER_ERROR 2 +#define COMP_BABBLE_DETECTED_ERROR 3 +#define COMP_TRB_ERROR 5 +#define COMP_RESOURCE_ERROR 7 +#define COMP_NO_SLOTS_AVAILABLE_ERROR 9 +#define COMP_INVALID_STREAM_TYPE_ERROR 10 +#define COMP_SLOT_NOT_ENABLED_ERROR 11 +#define COMP_ENDPOINT_NOT_ENABLED_ERROR 12 +#define COMP_SHORT_PACKET 13 +#define COMP_RING_UNDERRUN 14 +#define COMP_RING_OVERRUN 15 +#define COMP_VF_EVENT_RING_FULL_ERROR 16 +#define COMP_PARAMETER_ERROR 17 +#define COMP_CONTEXT_STATE_ERROR 19 +#define COMP_EVENT_RING_FULL_ERROR 21 +#define COMP_INCOMPATIBLE_DEVICE_ERROR 22 +#define COMP_MISSED_SERVICE_ERROR 23 +#define COMP_COMMAND_RING_STOPPED 24 +#define COMP_COMMAND_ABORTED 25 +#define COMP_STOPPED 26 +#define COMP_STOPPED_LENGTH_INVALID 27 +#define COMP_STOPPED_SHORT_PACKET 28 +#define COMP_MAX_EXIT_LATENCY_TOO_LARGE_ERROR 29 +#define COMP_ISOCH_BUFFER_OVERRUN 31 +#define COMP_EVENT_LOST_ERROR 32 +#define COMP_UNDEFINED_ERROR 33 +#define COMP_INVALID_STREAM_ID_ERROR 34 + +/*Transfer Event NRDY bit fields */ +#define TRB_TO_DEV_STREAM(p) ((p) & GENMASK(16, 0)) +#define TRB_TO_HOST_STREAM(p) ((p) & GENMASK(16, 0)) +#define STREAM_PRIME_ACK 0xFFFE +#define STREAM_REJECTED 0xFFFF + +/** Transfer Event bit fields **/ +#define TRB_TO_EP_ID(p) (((p) & GENMASK(20, 16)) >> 16) + +/** + * struct cdnsp_link_trb + * @segment_ptr: 64-bit segment pointer. + * @intr_target: Interrupter target. + * @control: Flags. + */ +struct cdnsp_link_trb { + __le64 segment_ptr; + __le32 intr_target; + __le32 control; +}; + +/* control bitfields */ +#define LINK_TOGGLE BIT(1) + +/** + * struct cdnsp_event_cmd - Command completion event TRB. + * cmd_trb: Pointer to command TRB, or the value passed by the event data trb + * status: Command completion parameters and error code. + * flags: Flags. + */ +struct cdnsp_event_cmd { + __le64 cmd_trb; + __le32 status; + __le32 flags; +}; + +/* flags bitmasks */ + +/* Address device - disable SetAddress. */ +#define TRB_BSR BIT(9) + +/* Configure Endpoint - Deconfigure. */ +#define TRB_DC BIT(9) + +/* Force Header */ +#define TRB_FH_TO_PACKET_TYPE(p) ((p) & GENMASK(4, 0)) +#define TRB_FH_TR_PACKET 0x4 +#define TRB_FH_TO_DEVICE_ADDRESS(p) (((p) << 25) & GENMASK(31, 25)) +#define TRB_FH_TR_PACKET_DEV_NOT 0x6 +#define TRB_FH_TO_NOT_TYPE(p) (((p) << 4) & GENMASK(7, 4)) +#define TRB_FH_TR_PACKET_FUNCTION_WAKE 0x1 +#define TRB_FH_TO_INTERFACE(p) (((p) << 8) & GENMASK(15, 8)) + +enum cdnsp_setup_dev { + SETUP_CONTEXT_ONLY, + SETUP_CONTEXT_ADDRESS, +}; + +/* bits 24:31 are the slot ID. */ +#define TRB_TO_SLOT_ID(p) (((p) & GENMASK(31, 24)) >> 24) +#define SLOT_ID_FOR_TRB(p) (((p) << 24) & GENMASK(31, 24)) + +/* Stop Endpoint TRB - ep_index to endpoint ID for this TRB. */ +#define TRB_TO_EP_INDEX(p) (((p) >> 16) & 0x1f) + +#define EP_ID_FOR_TRB(p) ((((p) + 1) << 16) & GENMASK(20, 16)) + +#define SUSPEND_PORT_FOR_TRB(p) (((p) & 1) << 23) +#define TRB_TO_SUSPEND_PORT(p) (((p) >> 23) & 0x1) +#define LAST_EP_INDEX 30 + +/* Set TR Dequeue Pointer command TRB fields. */ +#define TRB_TO_STREAM_ID(p) ((((p) & GENMASK(31, 16)) >> 16)) +#define STREAM_ID_FOR_TRB(p) ((((p)) << 16) & GENMASK(31, 16)) +#define SCT_FOR_TRB(p) (((p) << 1) & 0x7) + +/* Link TRB specific fields. */ +#define TRB_TC BIT(1) + +/* Port Status Change Event TRB fields. */ +/* Port ID - bits 31:24. */ +#define GET_PORT_ID(p) (((p) & GENMASK(31, 24)) >> 24) +#define SET_PORT_ID(p) (((p) << 24) & GENMASK(31, 24)) +#define EVENT_DATA BIT(2) + +/* Normal TRB fields. */ +/* transfer_len bitmasks - bits 0:16. */ +#define TRB_LEN(p) ((p) & GENMASK(16, 0)) +/* TD Size, packets remaining in this TD, bits 21:17 (5 bits, so max 31). */ +#define TRB_TD_SIZE(p) (min((p), (u32)31) << 17) +#define GET_TD_SIZE(p) (((p) & GENMASK(21, 17)) >> 17) +/* + * Controller uses the TD_SIZE field for TBC if Extended TBC + * is enabled (ETE). + */ +#define TRB_TD_SIZE_TBC(p) (min((p), (u32)31) << 17) +/* Interrupter Target - which MSI-X vector to target the completion event at. */ +#define TRB_INTR_TARGET(p) (((p) << 22) & GENMASK(31, 22)) +#define GET_INTR_TARGET(p) (((p) & GENMASK(31, 22)) >> 22) +/* + * Total burst count field, Rsvdz on controller with Extended TBC + * enabled (ETE). + */ +#define TRB_TBC(p) (((p) & 0x3) << 7) +#define TRB_TLBPC(p) (((p) & 0xf) << 16) + +/* Cycle bit - indicates TRB ownership by driver or driver.*/ +#define TRB_CYCLE BIT(0) +/* + * Force next event data TRB to be evaluated before task switch. + * Used to pass OS data back after a TD completes. + */ +#define TRB_ENT BIT(1) +/* Interrupt on short packet. */ +#define TRB_ISP BIT(2) +/* Set PCIe no snoop attribute. */ +#define TRB_NO_SNOOP BIT(3) +/* Chain multiple TRBs into a TD. */ +#define TRB_CHAIN BIT(4) +/* Interrupt on completion. */ +#define TRB_IOC BIT(5) +/* The buffer pointer contains immediate data. */ +#define TRB_IDT BIT(6) +/* 0 - NRDY during data stage, 1 - NRDY during status stage (only control). */ +#define TRB_STAT BIT(7) +/* Block Event Interrupt. */ +#define TRB_BEI BIT(9) + +/* Control transfer TRB specific fields. */ +#define TRB_DIR_IN BIT(16) + +/* TRB bit mask in Data Stage TRB */ +#define TRB_SETUPID_BITMASK GENMASK(9, 8) +#define TRB_SETUPID(p) ((p) << 8) +#define TRB_SETUPID_TO_TYPE(p) (((p) & TRB_SETUPID_BITMASK) >> 8) + +#define TRB_SETUP_SPEEDID_USB3 0x1 +#define TRB_SETUP_SPEEDID_USB2 0x0 +#define TRB_SETUP_SPEEDID(p) ((p) & (1 << 7)) + +#define TRB_SETUPSTAT_ACK 0x1 +#define TRB_SETUPSTAT_STALL 0x0 +#define TRB_SETUPSTAT(p) ((p) << 6) + +/* Isochronous TRB specific fields */ +#define TRB_SIA BIT(31) +#define TRB_FRAME_ID(p) (((p) << 20) & GENMASK(30, 20)) + +struct cdnsp_generic_trb { + __le32 field[4]; +}; + +union cdnsp_trb { + struct cdnsp_link_trb link; + struct cdnsp_transfer_event trans_event; + struct cdnsp_event_cmd event_cmd; + struct cdnsp_generic_trb generic; +}; + +/* TRB bit mask. */ +#define TRB_TYPE_BITMASK GENMASK(15, 10) +#define TRB_TYPE(p) ((p) << 10) +#define TRB_FIELD_TO_TYPE(p) (((p) & TRB_TYPE_BITMASK) >> 10) + +/* TRB type IDs. */ +/* bulk, interrupt, isoc scatter/gather, and control data stage. */ +#define TRB_NORMAL 1 +/* Setup Stage for control transfers. */ +#define TRB_SETUP 2 +/* Data Stage for control transfers. */ +#define TRB_DATA 3 +/* Status Stage for control transfers. */ +#define TRB_STATUS 4 +/* ISOC transfers. */ +#define TRB_ISOC 5 +/* TRB for linking ring segments. */ +#define TRB_LINK 6 +#define TRB_EVENT_DATA 7 +/* Transfer Ring No-op (not for the command ring). */ +#define TRB_TR_NOOP 8 + +/* Command TRBs */ +/* Enable Slot Command. */ +#define TRB_ENABLE_SLOT 9 +/* Disable Slot Command. */ +#define TRB_DISABLE_SLOT 10 +/* Address Device Command. */ +#define TRB_ADDR_DEV 11 +/* Configure Endpoint Command. */ +#define TRB_CONFIG_EP 12 +/* Evaluate Context Command. */ +#define TRB_EVAL_CONTEXT 13 +/* Reset Endpoint Command. */ +#define TRB_RESET_EP 14 +/* Stop Transfer Ring Command. */ +#define TRB_STOP_RING 15 +/* Set Transfer Ring Dequeue Pointer Command. */ +#define TRB_SET_DEQ 16 +/* Reset Device Command. */ +#define TRB_RESET_DEV 17 +/* Force Event Command (opt). */ +#define TRB_FORCE_EVENT 18 +/* Force Header Command - generate a transaction or link management packet. */ +#define TRB_FORCE_HEADER 22 +/* No-op Command - not for transfer rings. */ +#define TRB_CMD_NOOP 23 +/* TRB IDs 24-31 reserved. */ + +/* Event TRBS. */ +/* Transfer Event. */ +#define TRB_TRANSFER 32 +/* Command Completion Event. */ +#define TRB_COMPLETION 33 +/* Port Status Change Event. */ +#define TRB_PORT_STATUS 34 +/* Device Controller Event. */ +#define TRB_HC_EVENT 37 +/* MFINDEX Wrap Event - microframe counter wrapped. */ +#define TRB_MFINDEX_WRAP 39 +/* TRB IDs 40-47 reserved. */ +/* Endpoint Not Ready Event. */ +#define TRB_ENDPOINT_NRDY 48 +/* TRB IDs 49-53 reserved. */ +/* Halt Endpoint Command. */ +#define TRB_HALT_ENDPOINT 54 +/* Doorbell Overflow Event. */ +#define TRB_DRB_OVERFLOW 57 +/* Flush Endpoint Command. */ +#define TRB_FLUSH_ENDPOINT 58 + +#define TRB_TYPE_LINK(x) (((x) & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK)) +#define TRB_TYPE_LINK_LE32(x) (((x) & cpu_to_le32(TRB_TYPE_BITMASK)) == \ + cpu_to_le32(TRB_TYPE(TRB_LINK))) +#define TRB_TYPE_NOOP_LE32(x) (((x) & cpu_to_le32(TRB_TYPE_BITMASK)) == \ + cpu_to_le32(TRB_TYPE(TRB_TR_NOOP))) + +/* + * TRBS_PER_SEGMENT must be a multiple of 4. + * The command ring is 64-byte aligned, so it must also be greater than 16. + */ +#define TRBS_PER_SEGMENT 256 +#define TRBS_PER_EVENT_SEGMENT 256 +#define TRBS_PER_EV_DEQ_UPDATE 100 +#define TRB_SEGMENT_SIZE (TRBS_PER_SEGMENT * 16) +#define TRB_SEGMENT_SHIFT (ilog2(TRB_SEGMENT_SIZE)) +/* TRB buffer pointers can't cross 64KB boundaries. */ +#define TRB_MAX_BUFF_SHIFT 16 +#define TRB_MAX_BUFF_SIZE BIT(TRB_MAX_BUFF_SHIFT) +/* How much data is left before the 64KB boundary? */ +#define TRB_BUFF_LEN_UP_TO_BOUNDARY(addr) (TRB_MAX_BUFF_SIZE - \ + ((addr) & (TRB_MAX_BUFF_SIZE - 1))) + +/** + * struct cdnsp_segment - segment related data. + * @trbs: Array of Transfer Request Blocks. + * @next: Pointer to the next segment. + * @dma: DMA address of current segment. + * @bounce_dma: Bounce buffer DMA address . + * @bounce_buf: Bounce buffer virtual address. + * bounce_offs: Bounce buffer offset. + * bounce_len: Bounce buffer length. + */ +struct cdnsp_segment { + union cdnsp_trb *trbs; + struct cdnsp_segment *next; + dma_addr_t dma; + /* Max packet sized bounce buffer for td-fragmant alignment */ + dma_addr_t bounce_dma; + void *bounce_buf; + unsigned int bounce_offs; + unsigned int bounce_len; +}; + +/** + * struct cdnsp_td - Transfer Descriptor object. + * @td_list: Used for binding TD with ep_ring->td_list. + * @preq: Request associated with this TD + * @start_seg: Segment containing the first_trb in TD. + * @first_trb: First TRB for this TD. + * @last_trb: Last TRB related with TD. + * @bounce_seg: Bounce segment for this TD. + * @request_length_set: actual_length of the request has already been set. + * @drbl - TD has been added to HW scheduler - only for stream capable + * endpoints. + */ +struct cdnsp_td { + struct list_head td_list; + struct cdnsp_request *preq; + struct cdnsp_segment *start_seg; + union cdnsp_trb *first_trb; + union cdnsp_trb *last_trb; + struct cdnsp_segment *bounce_seg; + bool request_length_set; + bool drbl; +}; + +/** + * struct cdnsp_dequeue_state - New dequeue pointer for Transfer Ring. + * @new_deq_seg: New dequeue segment. + * @new_deq_ptr: New dequeue pointer. + * @new_cycle_state: New cycle state. + * @stream_id: stream id for which new dequeue pointer has been selected. + */ +struct cdnsp_dequeue_state { + struct cdnsp_segment *new_deq_seg; + union cdnsp_trb *new_deq_ptr; + int new_cycle_state; + unsigned int stream_id; +}; + +enum cdnsp_ring_type { + TYPE_CTRL = 0, + TYPE_ISOC, + TYPE_BULK, + TYPE_INTR, + TYPE_STREAM, + TYPE_COMMAND, + TYPE_EVENT, +}; + +/** + * struct cdnsp_ring - information describing transfer, command or event ring. + * @first_seg: First segment on transfer ring. + * @last_seg: Last segment on transfer ring. + * @enqueue: SW enqueue pointer address. + * @enq_seg: SW enqueue segment address. + * @dequeue: SW dequeue pointer address. + * @deq_seg: SW dequeue segment address. + * @td_list: transfer descriptor list associated with this ring. + * @cycle_state: Current cycle bit. Write the cycle state into the TRB cycle + * field to give ownership of the TRB to the device controller + * (if we are the producer) or to check if we own the TRB + * (if we are the consumer). + * @stream_id: Stream id + * @stream_active: Stream is active - PRIME packet has been detected. + * @stream_rejected: This ring has been rejected by host. + * @num_tds: Number of TDs associated with ring. + * @num_segs: Number of segments. + * @num_trbs_free: Number of free TRBs on the ring. + * @bounce_buf_len: Length of bounce buffer. + * @type: Ring type - event, transfer, or command ring. + * @last_td_was_short - TD is short TD. + * @trb_address_map: For mapping physical TRB addresses to segments in + * stream rings. + */ +struct cdnsp_ring { + struct cdnsp_segment *first_seg; + struct cdnsp_segment *last_seg; + union cdnsp_trb *enqueue; + struct cdnsp_segment *enq_seg; + union cdnsp_trb *dequeue; + struct cdnsp_segment *deq_seg; + struct list_head td_list; + u32 cycle_state; + unsigned int stream_id; + unsigned int stream_active; + unsigned int stream_rejected; + int num_tds; + unsigned int num_segs; + unsigned int num_trbs_free; + unsigned int bounce_buf_len; + enum cdnsp_ring_type type; + bool last_td_was_short; + struct radix_tree_root *trb_address_map; +}; + +/** + * struct cdnsp_erst_entry - even ring segment table entry object. + * @seg_addr: 64-bit event ring segment address. + * seg_size: Number of TRBs in segment.; + */ +struct cdnsp_erst_entry { + __le64 seg_addr; + __le32 seg_size; + /* Set to zero */ + __le32 rsvd; +}; + +/** + * struct cdnsp_erst - even ring segment table for event ring. + * @entries: Array of event ring segments + * @num_entries: Number of segments in entries array. + * @erst_dma_addr: DMA address for entries array. + */ +struct cdnsp_erst { + struct cdnsp_erst_entry *entries; + unsigned int num_entries; + dma_addr_t erst_dma_addr; +}; + +/** + * struct cdnsp_request - extended device side representation of usb_request + * object . + * @td: Transfer descriptor associated with this request. + * @request: Generic usb_request object describing single I/O request. + * @list: Used to adding request to endpoint pending_list. + * @pep: Extended representation of usb_ep object + * @epnum: Endpoint number associated with usb request. + * @direction: Endpoint direction for usb request. + */ +struct cdnsp_request { + struct cdnsp_td td; + struct usb_request request; + struct list_head list; + struct cdnsp_ep *pep; + u8 epnum; + unsigned direction:1; +}; + +#define ERST_NUM_SEGS 1 + +/* Stages used during enumeration process.*/ +enum cdnsp_ep0_stage { + CDNSP_SETUP_STAGE, + CDNSP_DATA_STAGE, + CDNSP_STATUS_STAGE, +}; + +/** + * struct cdnsp_port - holds information about detected ports. + * @port_num: Port number. + * @exist: Indicate if port exist. + * maj_rev: Major revision. + * min_rev: Minor revision. + */ +struct cdnsp_port { + struct cdnsp_port_regs __iomem *regs; + u8 port_num; + u8 exist; + u8 maj_rev; + u8 min_rev; +}; + +#define CDNSP_EXT_PORT_MAJOR(x) (((x) >> 24) & 0xff) +#define CDNSP_EXT_PORT_MINOR(x) (((x) >> 16) & 0xff) +#define CDNSP_EXT_PORT_OFF(x) ((x) & 0xff) +#define CDNSP_EXT_PORT_COUNT(x) (((x) >> 8) & 0xff) + +/** + * struct cdnsp_device - represent USB device. + * @dev: Pointer to device structure associated whit this controller. + * @gadget: Device side representation of the peripheral controller. + * @gadget_driver: Pointer to the gadget driver. + * @irq: IRQ line number used by device side. + * @regs:IO device memory. + * @cap_regs: Capability registers. + * @op_regs: Operational registers. + * @run_regs: Runtime registers. + * @dba: Device base address register. + * @ir_set: Current interrupter register set. + * @port20_regs: Port 2.0 Peripheral Configuration Registers. + * @port3x_regs: USB3.x Port Peripheral Configuration Registers. + * @rev_cap: Controller Capabilities Registers. + * @hcs_params1: Cached register copies of read-only HCSPARAMS1 + * @hcc_params: Cached register copies of read-only HCCPARAMS1 + * @setup: Temporary buffer for setup packet. + * @ep0_preq: Internal allocated request used during enumeration. + * @ep0_stage: ep0 stage during enumeration process. + * @three_stage_setup: Three state or two state setup. + * @ep0_expect_in: Data IN expected for control transfer. + * @setup_id: Setup identifier. + * @setup_speed - Speed detected for current SETUP packet. + * @setup_buf: Buffer for SETUP packet. + * @device_address: Current device address. + * @may_wakeup: remote wakeup enabled/disabled. + * @lock: Lock used in interrupt thread context. + * @hci_version: device controller version. + * @dcbaa: Device context base address array. + * @cmd_ring: Command ring. + * @cmd: Represent all what is needed to issue command on Command Ring. + * @event_ring: Event ring. + * @erst: Event Ring Segment table + * @slot_id: Current Slot ID. Should be 0 or 1. + * @out_ctx: Output context. + * @in_ctx: Input context. + * @eps: array of endpoints object associated with device. + * @usb2_hw_lpm_capable: hardware lpm is enabled; + * @u1_allowed: Allow device transition to U1 state. + * @u2_allowed: Allow device transition to U2 state + * @device_pool: DMA pool for allocating input and output context. + * @segment_pool: DMA pool for allocating new segments. + * @cdnsp_state: Current state of controller. + * @link_state: Current link state. + * @usb2_port - Port USB 2.0. + * @usb3_port - Port USB 3.0. + * @active_port - Current selected Port. + * @test_mode: selected Test Mode. + */ +struct cdnsp_device { + struct device *dev; + struct usb_gadget gadget; + struct usb_gadget_driver *gadget_driver; + unsigned int irq; + void __iomem *regs; + + /* Registers map */ + struct cdnsp_cap_regs __iomem *cap_regs; + struct cdnsp_op_regs __iomem *op_regs; + struct cdnsp_run_regs __iomem *run_regs; + struct cdnsp_doorbell_array __iomem *dba; + struct cdnsp_intr_reg __iomem *ir_set; + struct cdnsp_20port_cap __iomem *port20_regs; + struct cdnsp_3xport_cap __iomem *port3x_regs; + struct cdnsp_rev_cap __iomem *rev_cap; + + /* Cached register copies of read-only CDNSP data */ + __u32 hcs_params1; + __u32 hcs_params3; + __u32 hcc_params; + /* Lock used in interrupt thread context. */ + spinlock_t lock; + struct usb_ctrlrequest setup; + struct cdnsp_request ep0_preq; + enum cdnsp_ep0_stage ep0_stage; + u8 three_stage_setup; + u8 ep0_expect_in; + u8 setup_id; + u8 setup_speed; + void *setup_buf; + u8 device_address; + int may_wakeup; + u16 hci_version; + + /* data structures */ + struct cdnsp_device_context_array *dcbaa; + struct cdnsp_ring *cmd_ring; + struct cdnsp_command cmd; + struct cdnsp_ring *event_ring; + struct cdnsp_erst erst; + int slot_id; + + /* + * Commands to the hardware are passed an "input context" that + * tells the hardware what to change in its data structures. + * The hardware will return changes in an "output context" that + * software must allocate for the hardware. . + */ + struct cdnsp_container_ctx out_ctx; + struct cdnsp_container_ctx in_ctx; + struct cdnsp_ep eps[CDNSP_ENDPOINTS_NUM]; + u8 usb2_hw_lpm_capable:1; + u8 u1_allowed:1; + u8 u2_allowed:1; + + /* DMA pools */ + struct dma_pool *device_pool; + struct dma_pool *segment_pool; + +#define CDNSP_STATE_HALTED BIT(1) +#define CDNSP_STATE_DYING BIT(2) +#define CDNSP_STATE_DISCONNECT_PENDING BIT(3) +#define CDNSP_WAKEUP_PENDING BIT(4) + unsigned int cdnsp_state; + unsigned int link_state; + + struct cdnsp_port usb2_port; + struct cdnsp_port usb3_port; + struct cdnsp_port *active_port; + u16 test_mode; +}; + +/* + * Registers should always be accessed with double word or quad word accesses. + * + * Registers with 64-bit address pointers should be written to with + * dword accesses by writing the low dword first (ptr[0]), then the high dword + * (ptr[1]) second. controller implementations that do not support 64-bit + * address pointers will ignore the high dword, and write order is irrelevant. + */ +static inline u64 cdnsp_read_64(__le64 __iomem *regs) +{ + return lo_hi_readq(regs); +} + +static inline void cdnsp_write_64(const u64 val, __le64 __iomem *regs) +{ + lo_hi_writeq(val, regs); +} + +/* CDNSP memory management functions. */ +void cdnsp_mem_cleanup(struct cdnsp_device *pdev); +int cdnsp_mem_init(struct cdnsp_device *pdev); +int cdnsp_setup_addressable_priv_dev(struct cdnsp_device *pdev); +void cdnsp_copy_ep0_dequeue_into_input_ctx(struct cdnsp_device *pdev); +void cdnsp_endpoint_zero(struct cdnsp_device *pdev, struct cdnsp_ep *ep); +int cdnsp_endpoint_init(struct cdnsp_device *pdev, + struct cdnsp_ep *pep, + gfp_t mem_flags); +int cdnsp_ring_expansion(struct cdnsp_device *pdev, + struct cdnsp_ring *ring, + unsigned int num_trbs, gfp_t flags); +struct cdnsp_ring *cdnsp_dma_to_transfer_ring(struct cdnsp_ep *ep, u64 address); +int cdnsp_alloc_stream_info(struct cdnsp_device *pdev, + struct cdnsp_ep *pep, + unsigned int num_stream_ctxs, + unsigned int num_streams); +int cdnsp_alloc_streams(struct cdnsp_device *pdev, struct cdnsp_ep *pep); +void cdnsp_free_endpoint_rings(struct cdnsp_device *pdev, struct cdnsp_ep *pep); + +/* Device controller glue. */ +int cdnsp_find_next_ext_cap(void __iomem *base, u32 start, int id); +int cdnsp_halt(struct cdnsp_device *pdev); +void cdnsp_died(struct cdnsp_device *pdev); +int cdnsp_reset(struct cdnsp_device *pdev); +irqreturn_t cdnsp_irq_handler(int irq, void *priv); +int cdnsp_setup_device(struct cdnsp_device *pdev, enum cdnsp_setup_dev setup); +void cdnsp_set_usb2_hardware_lpm(struct cdnsp_device *usbsssp_data, + struct usb_request *req, int enable); +irqreturn_t cdnsp_thread_irq_handler(int irq, void *data); + +/* Ring, segment, TRB, and TD functions. */ +dma_addr_t cdnsp_trb_virt_to_dma(struct cdnsp_segment *seg, + union cdnsp_trb *trb); +bool cdnsp_last_trb_on_seg(struct cdnsp_segment *seg, union cdnsp_trb *trb); +bool cdnsp_last_trb_on_ring(struct cdnsp_ring *ring, + struct cdnsp_segment *seg, + union cdnsp_trb *trb); +int cdnsp_wait_for_cmd_compl(struct cdnsp_device *pdev); +void cdnsp_update_erst_dequeue(struct cdnsp_device *pdev, + union cdnsp_trb *event_ring_deq, + u8 clear_ehb); +void cdnsp_initialize_ring_info(struct cdnsp_ring *ring); +void cdnsp_ring_cmd_db(struct cdnsp_device *pdev); +void cdnsp_queue_slot_control(struct cdnsp_device *pdev, u32 trb_type); +void cdnsp_queue_address_device(struct cdnsp_device *pdev, + dma_addr_t in_ctx_ptr, + enum cdnsp_setup_dev setup); +void cdnsp_queue_stop_endpoint(struct cdnsp_device *pdev, + unsigned int ep_index); +int cdnsp_queue_ctrl_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq); +int cdnsp_queue_bulk_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq); +int cdnsp_queue_isoc_tx_prepare(struct cdnsp_device *pdev, + struct cdnsp_request *preq); +void cdnsp_queue_configure_endpoint(struct cdnsp_device *pdev, + dma_addr_t in_ctx_ptr); +void cdnsp_queue_reset_ep(struct cdnsp_device *pdev, unsigned int ep_index); +void cdnsp_queue_halt_endpoint(struct cdnsp_device *pdev, + unsigned int ep_index); +void cdnsp_queue_flush_endpoint(struct cdnsp_device *pdev, + unsigned int ep_index); +void cdnsp_force_header_wakeup(struct cdnsp_device *pdev, int intf_num); +void cdnsp_queue_reset_device(struct cdnsp_device *pdev); +void cdnsp_queue_new_dequeue_state(struct cdnsp_device *pdev, + struct cdnsp_ep *pep, + struct cdnsp_dequeue_state *deq_state); +void cdnsp_ring_doorbell_for_active_rings(struct cdnsp_device *pdev, + struct cdnsp_ep *pep); +void cdnsp_inc_deq(struct cdnsp_device *pdev, struct cdnsp_ring *ring); +void cdnsp_set_link_state(struct cdnsp_device *pdev, + __le32 __iomem *port_regs, u32 link_state); +u32 cdnsp_port_state_to_neutral(u32 state); + +/* CDNSP device controller contexts. */ +int cdnsp_enable_slot(struct cdnsp_device *pdev); +int cdnsp_disable_slot(struct cdnsp_device *pdev); +struct cdnsp_input_control_ctx + *cdnsp_get_input_control_ctx(struct cdnsp_container_ctx *ctx); +struct cdnsp_slot_ctx *cdnsp_get_slot_ctx(struct cdnsp_container_ctx *ctx); +struct cdnsp_ep_ctx *cdnsp_get_ep_ctx(struct cdnsp_container_ctx *ctx, + unsigned int ep_index); +/* CDNSP gadget interface. */ +void cdnsp_suspend_gadget(struct cdnsp_device *pdev); +void cdnsp_resume_gadget(struct cdnsp_device *pdev); +void cdnsp_disconnect_gadget(struct cdnsp_device *pdev); +void cdnsp_gadget_giveback(struct cdnsp_ep *pep, struct cdnsp_request *preq, + int status); +int cdnsp_ep_enqueue(struct cdnsp_ep *pep, struct cdnsp_request *preq); +int cdnsp_ep_dequeue(struct cdnsp_ep *pep, struct cdnsp_request *preq); +unsigned int cdnsp_port_speed(unsigned int port_status); +void cdnsp_irq_reset(struct cdnsp_device *pdev); +int cdnsp_halt_endpoint(struct cdnsp_device *pdev, + struct cdnsp_ep *pep, int value); +int cdnsp_cmd_stop_ep(struct cdnsp_device *pdev, struct cdnsp_ep *pep); +int cdnsp_cmd_flush_ep(struct cdnsp_device *pdev, struct cdnsp_ep *pep); +void cdnsp_setup_analyze(struct cdnsp_device *pdev); +int cdnsp_status_stage(struct cdnsp_device *pdev); +int cdnsp_reset_device(struct cdnsp_device *pdev); + +/** + * next_request - gets the next request on the given list + * @list: the request list to operate on + * + * Caller should take care of locking. This function return NULL or the first + * request available on list. + */ +static inline struct cdnsp_request *next_request(struct list_head *list) +{ + return list_first_entry_or_null(list, struct cdnsp_request, list); +} + +#define to_cdnsp_ep(ep) (container_of(ep, struct cdnsp_ep, endpoint)) +#define gadget_to_cdnsp(g) (container_of(g, struct cdnsp_device, gadget)) +#define request_to_cdnsp_request(r) (container_of(r, struct cdnsp_request, \ + request)) +#define to_cdnsp_request(r) (container_of(r, struct cdnsp_request, request)) +int cdnsp_remove_request(struct cdnsp_device *pdev, struct cdnsp_request *preq, + struct cdnsp_ep *pep); + +#endif /* __LINUX_CDNSP_GADGET_H */ diff --git a/drivers/usb/cdns3/cdnsp-mem.c b/drivers/usb/cdns3/cdnsp-mem.c new file mode 100644 index 000000000000..7a84e928710e --- /dev/null +++ b/drivers/usb/cdns3/cdnsp-mem.c @@ -0,0 +1,1336 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Cadence CDNSP DRD Driver. + * + * Copyright (C) 2020 Cadence. + * + * Author: Pawel Laszczak <pawell@cadence.com> + * + * Code based on Linux XHCI driver. + * Origin: Copyright (C) 2008 Intel Corp. + */ + +#include <linux/dma-mapping.h> +#include <linux/dmapool.h> +#include <linux/slab.h> +#include <linux/usb.h> + +#include "cdnsp-gadget.h" +#include "cdnsp-trace.h" + +static void cdnsp_free_stream_info(struct cdnsp_device *pdev, + struct cdnsp_ep *pep); +/* + * Allocates a generic ring segment from the ring pool, sets the dma address, + * initializes the segment to zero, and sets the private next pointer to NULL. + * + * "All components of all Command and Transfer TRBs shall be initialized to '0'" + */ +static struct cdnsp_segment *cdnsp_segment_alloc(struct cdnsp_device *pdev, + unsigned int cycle_state, + unsigned int max_packet, + gfp_t flags) +{ + struct cdnsp_segment *seg; + dma_addr_t dma; + int i; + + seg = kzalloc(sizeof(*seg), flags); + if (!seg) + return NULL; + + seg->trbs = dma_pool_zalloc(pdev->segment_pool, flags, &dma); + if (!seg->trbs) { + kfree(seg); + return NULL; + } + + if (max_packet) { + seg->bounce_buf = kzalloc(max_packet, flags | GFP_DMA); + if (!seg->bounce_buf) + goto free_dma; + } + + /* If the cycle state is 0, set the cycle bit to 1 for all the TRBs. */ + if (cycle_state == 0) { + for (i = 0; i < TRBS_PER_SEGMENT; i++) + seg->trbs[i].link.control |= cpu_to_le32(TRB_CYCLE); + } + seg->dma = dma; + seg->next = NULL; + + return seg; + +free_dma: + dma_pool_free(pdev->segment_pool, seg->trbs, dma); + kfree(seg); + + return NULL; +} + +static void cdnsp_segment_free(struct cdnsp_device *pdev, + struct cdnsp_segment *seg) +{ + if (seg->trbs) + dma_pool_free(pdev->segment_pool, seg->trbs, seg->dma); + + kfree(seg->bounce_buf); + kfree(seg); +} + +static void cdnsp_free_segments_for_ring(struct cdnsp_device *pdev, + struct cdnsp_segment *first) +{ + struct cdnsp_segment *seg; + + seg = first->next; + + while (seg != first) { + struct cdnsp_segment *next = seg->next; + + cdnsp_segment_free(pdev, seg); + seg = next; + } + + cdnsp_segment_free(pdev, first); +} + +/* + * Make the prev segment point to the next segment. + * + * Change the last TRB in the prev segment to be a Link TRB which points to the + * DMA address of the next segment. The caller needs to set any Link TRB + * related flags, such as End TRB, Toggle Cycle, and no snoop. + */ +static void cdnsp_link_segments(struct cdnsp_device *pdev, + struct cdnsp_segment *prev, + struct cdnsp_segment *next, + enum cdnsp_ring_type type) +{ + struct cdnsp_link_trb *link; + u32 val; + + if (!prev || !next) + return; + + prev->next = next; + if (type != TYPE_EVENT) { + link = &prev->trbs[TRBS_PER_SEGMENT - 1].link; + link->segment_ptr = cpu_to_le64(next->dma); + + /* + * Set the last TRB in the segment to have a TRB type ID + * of Link TRB + */ + val = le32_to_cpu(link->control); + val &= ~TRB_TYPE_BITMASK; + val |= TRB_TYPE(TRB_LINK); + link->control = cpu_to_le32(val); + } +} + +/* + * Link the ring to the new segments. + * Set Toggle Cycle for the new ring if needed. + */ +static void cdnsp_link_rings(struct cdnsp_device *pdev, + struct cdnsp_ring *ring, + struct cdnsp_segment *first, + struct cdnsp_segment *last, + unsigned int num_segs) +{ + struct cdnsp_segment *next; + + if (!ring || !first || !last) + return; + + next = ring->enq_seg->next; + cdnsp_link_segments(pdev, ring->enq_seg, first, ring->type); + cdnsp_link_segments(pdev, last, next, ring->type); + ring->num_segs += num_segs; + ring->num_trbs_free += (TRBS_PER_SEGMENT - 1) * num_segs; + + if (ring->type != TYPE_EVENT && ring->enq_seg == ring->last_seg) { + ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control &= + ~cpu_to_le32(LINK_TOGGLE); + last->trbs[TRBS_PER_SEGMENT - 1].link.control |= + cpu_to_le32(LINK_TOGGLE); + ring->last_seg = last; + } +} + +/* + * We need a radix tree for mapping physical addresses of TRBs to which stream + * ID they belong to. We need to do this because the device controller won't + * tell us which stream ring the TRB came from. We could store the stream ID + * in an event data TRB, but that doesn't help us for the cancellation case, + * since the endpoint may stop before it reaches that event data TRB. + * + * The radix tree maps the upper portion of the TRB DMA address to a ring + * segment that has the same upper portion of DMA addresses. For example, + * say I have segments of size 1KB, that are always 1KB aligned. A segment may + * start at 0x10c91000 and end at 0x10c913f0. If I use the upper 10 bits, the + * key to the stream ID is 0x43244. I can use the DMA address of the TRB to + * pass the radix tree a key to get the right stream ID: + * + * 0x10c90fff >> 10 = 0x43243 + * 0x10c912c0 >> 10 = 0x43244 + * 0x10c91400 >> 10 = 0x43245 + * + * Obviously, only those TRBs with DMA addresses that are within the segment + * will make the radix tree return the stream ID for that ring. + * + * Caveats for the radix tree: + * + * The radix tree uses an unsigned long as a key pair. On 32-bit systems, an + * unsigned long will be 32-bits; on a 64-bit system an unsigned long will be + * 64-bits. Since we only request 32-bit DMA addresses, we can use that as the + * key on 32-bit or 64-bit systems (it would also be fine if we asked for 64-bit + * PCI DMA addresses on a 64-bit system). There might be a problem on 32-bit + * extended systems (where the DMA address can be bigger than 32-bits), + * if we allow the PCI dma mask to be bigger than 32-bits. So don't do that. + */ +static int cdnsp_insert_segment_mapping(struct radix_tree_root *trb_address_map, + struct cdnsp_ring *ring, + struct cdnsp_segment *seg, + gfp_t mem_flags) +{ + unsigned long key; + int ret; + + key = (unsigned long)(seg->dma >> TRB_SEGMENT_SHIFT); + + /* Skip any segments that were already added. */ + if (radix_tree_lookup(trb_address_map, key)) + return 0; + + ret = radix_tree_maybe_preload(mem_flags); + if (ret) + return ret; + + ret = radix_tree_insert(trb_address_map, key, ring); + radix_tree_preload_end(); + + return ret; +} + +static void cdnsp_remove_segment_mapping(struct radix_tree_root *trb_address_map, + struct cdnsp_segment *seg) +{ + unsigned long key; + + key = (unsigned long)(seg->dma >> TRB_SEGMENT_SHIFT); + if (radix_tree_lookup(trb_address_map, key)) + radix_tree_delete(trb_address_map, key); +} + +static int cdnsp_update_stream_segment_mapping(struct radix_tree_root *trb_address_map, + struct cdnsp_ring *ring, + struct cdnsp_segment *first_seg, + struct cdnsp_segment *last_seg, + gfp_t mem_flags) +{ + struct cdnsp_segment *failed_seg; + struct cdnsp_segment *seg; + int ret; + + seg = first_seg; + do { + ret = cdnsp_insert_segment_mapping(trb_address_map, ring, seg, + mem_flags); + if (ret) + goto remove_streams; + if (seg == last_seg) + return 0; + seg = seg->next; + } while (seg != first_seg); + + return 0; + +remove_streams: + failed_seg = seg; + seg = first_seg; + do { + cdnsp_remove_segment_mapping(trb_address_map, seg); + if (seg == failed_seg) + return ret; + seg = seg->next; + } while (seg != first_seg); + + return ret; +} + +static void cdnsp_remove_stream_mapping(struct cdnsp_ring *ring) +{ + struct cdnsp_segment *seg; + + seg = ring->first_seg; + do { + cdnsp_remove_segment_mapping(ring->trb_address_map, seg); + seg = seg->next; + } while (seg != ring->first_seg); +} + +static int cdnsp_update_stream_mapping(struct cdnsp_ring *ring) +{ + return cdnsp_update_stream_segment_mapping(ring->trb_address_map, ring, + ring->first_seg, ring->last_seg, GFP_ATOMIC); +} + +static void cdnsp_ring_free(struct cdnsp_device *pdev, struct cdnsp_ring *ring) +{ + if (!ring) + return; + + trace_cdnsp_ring_free(ring); + + if (ring->first_seg) { + if (ring->type == TYPE_STREAM) + cdnsp_remove_stream_mapping(ring); + + cdnsp_free_segments_for_ring(pdev, ring->first_seg); + } + + kfree(ring); +} + +void cdnsp_initialize_ring_info(struct cdnsp_ring *ring) +{ + ring->enqueue = ring->first_seg->trbs; + ring->enq_seg = ring->first_seg; + ring->dequeue = ring->enqueue; + ring->deq_seg = ring->first_seg; + + /* + * The ring is initialized to 0. The producer must write 1 to the cycle + * bit to handover ownership of the TRB, so PCS = 1. The consumer must + * compare CCS to the cycle bit to check ownership, so CCS = 1. + * + * New rings are initialized with cycle state equal to 1; if we are + * handling ring expansion, set the cycle state equal to the old ring. + */ + ring->cycle_state = 1; + + /* + * Each segment has a link TRB, and leave an extra TRB for SW + * accounting purpose + */ + ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1; +} + +/* Allocate segments and link them for a ring. */ +static int cdnsp_alloc_segments_for_ring(struct cdnsp_device *pdev, + struct cdnsp_segment **first, + struct cdnsp_segment **last, + unsigned int num_segs, + unsigned int cycle_state, + enum cdnsp_ring_type type, + unsigned int max_packet, + gfp_t flags) +{ + struct cdnsp_segment *prev; + + /* Allocate first segment. */ + prev = cdnsp_segment_alloc(pdev, cycle_state, max_packet, flags); + if (!prev) + return -ENOMEM; + + num_segs--; + *first = prev; + + /* Allocate all other segments. */ + while (num_segs > 0) { + struct cdnsp_segment *next; + + next = cdnsp_segment_alloc(pdev, cycle_state, + max_packet, flags); + if (!next) { + cdnsp_free_segments_for_ring(pdev, *first); + return -ENOMEM; + } + + cdnsp_link_segments(pdev, prev, next, type); + + prev = next; + num_segs--; + } + + cdnsp_link_segments(pdev, prev, *first, type); + *last = prev; + + return 0; +} + +/* + * Create a new ring with zero or more segments. + * + * Link each segment together into a ring. + * Set the end flag and the cycle toggle bit on the last segment. + */ +static struct cdnsp_ring *cdnsp_ring_alloc(struct cdnsp_device *pdev, + unsigned int num_segs, + enum cdnsp_ring_type type, + unsigned int max_packet, + gfp_t flags) +{ + struct cdnsp_ring *ring; + int ret; + + ring = kzalloc(sizeof *(ring), flags); + if (!ring) + return NULL; + + ring->num_segs = num_segs; + ring->bounce_buf_len = max_packet; + INIT_LIST_HEAD(&ring->td_list); + ring->type = type; + + if (num_segs == 0) + return ring; + + ret = cdnsp_alloc_segments_for_ring(pdev, &ring->first_seg, + &ring->last_seg, num_segs, + 1, type, max_packet, flags); + if (ret) + goto fail; + + /* Only event ring does not use link TRB. */ + if (type != TYPE_EVENT) + ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control |= + cpu_to_le32(LINK_TOGGLE); + + cdnsp_initialize_ring_info(ring); + trace_cdnsp_ring_alloc(ring); + return ring; +fail: + kfree(ring); + return NULL; +} + +void cdnsp_free_endpoint_rings(struct cdnsp_device *pdev, struct cdnsp_ep *pep) +{ + cdnsp_ring_free(pdev, pep->ring); + pep->ring = NULL; + cdnsp_free_stream_info(pdev, pep); +} + +/* + * Expand an existing ring. + * Allocate a new ring which has same segment numbers and link the two rings. + */ +int cdnsp_ring_expansion(struct cdnsp_device *pdev, + struct cdnsp_ring *ring, + unsigned int num_trbs, + gfp_t flags) +{ + unsigned int num_segs_needed; + struct cdnsp_segment *first; + struct cdnsp_segment *last; + unsigned int num_segs; + int ret; + + num_segs_needed = (num_trbs + (TRBS_PER_SEGMENT - 1) - 1) / + (TRBS_PER_SEGMENT - 1); + + /* Allocate number of segments we needed, or double the ring size. */ + num_segs = max(ring->num_segs, num_segs_needed); + + ret = cdnsp_alloc_segments_for_ring(pdev, &first, &last, num_segs, + ring->cycle_state, ring->type, + ring->bounce_buf_len, flags); + if (ret) + return -ENOMEM; + + if (ring->type == TYPE_STREAM) + ret = cdnsp_update_stream_segment_mapping(ring->trb_address_map, + ring, first, + last, flags); + + if (ret) { + cdnsp_free_segments_for_ring(pdev, first); + + return ret; + } + + cdnsp_link_rings(pdev, ring, first, last, num_segs); + trace_cdnsp_ring_expansion(ring); + + return 0; +} + +static int cdnsp_init_device_ctx(struct cdnsp_device *pdev) +{ + int size = HCC_64BYTE_CONTEXT(pdev->hcc_params) ? 2048 : 1024; + + pdev->out_ctx.type = CDNSP_CTX_TYPE_DEVICE; + pdev->out_ctx.size = size; + pdev->out_ctx.ctx_size = CTX_SIZE(pdev->hcc_params); + pdev->out_ctx.bytes = dma_pool_zalloc(pdev->device_pool, GFP_ATOMIC, + &pdev->out_ctx.dma); + + if (!pdev->out_ctx.bytes) + return -ENOMEM; + + pdev->in_ctx.type = CDNSP_CTX_TYPE_INPUT; + pdev->in_ctx.ctx_size = pdev->out_ctx.ctx_size; + pdev->in_ctx.size = size + pdev->out_ctx.ctx_size; + pdev->in_ctx.bytes = dma_pool_zalloc(pdev->device_pool, GFP_ATOMIC, + &pdev->in_ctx.dma); + + if (!pdev->in_ctx.bytes) { + dma_pool_free(pdev->device_pool, pdev->out_ctx.bytes, + pdev->out_ctx.dma); + return -ENOMEM; + } + + return 0; +} + +struct cdnsp_input_control_ctx + *cdnsp_get_input_control_ctx(struct cdnsp_container_ctx *ctx) +{ + if (ctx->type != CDNSP_CTX_TYPE_INPUT) + return NULL; + + return (struct cdnsp_input_control_ctx *)ctx->bytes; +} + +struct cdnsp_slot_ctx *cdnsp_get_slot_ctx(struct cdnsp_container_ctx *ctx) +{ + if (ctx->type == CDNSP_CTX_TYPE_DEVICE) + return (struct cdnsp_slot_ctx *)ctx->bytes; + + return (struct cdnsp_slot_ctx *)(ctx->bytes + ctx->ctx_size); +} + +struct cdnsp_ep_ctx *cdnsp_get_ep_ctx(struct cdnsp_container_ctx *ctx, + unsigned int ep_index) +{ + /* Increment ep index by offset of start of ep ctx array. */ + ep_index++; + if (ctx->type == CDNSP_CTX_TYPE_INPUT) + ep_index++; + + return (struct cdnsp_ep_ctx *)(ctx->bytes + (ep_index * ctx->ctx_size)); +} + +static void cdnsp_free_stream_ctx(struct cdnsp_device *pdev, + struct cdnsp_ep *pep) +{ + dma_pool_free(pdev->device_pool, pep->stream_info.stream_ctx_array, + pep->stream_info.ctx_array_dma); +} + +/* The stream context array must be a power of 2. */ +static struct cdnsp_stream_ctx + *cdnsp_alloc_stream_ctx(struct cdnsp_device *pdev, struct cdnsp_ep *pep) +{ + size_t size = sizeof(struct cdnsp_stream_ctx) * + pep->stream_info.num_stream_ctxs; + + if (size > CDNSP_CTX_SIZE) + return NULL; + + /** + * Driver uses intentionally the device_pool to allocated stream + * context array. Device Pool has 2048 bytes of size what gives us + * 128 entries. + */ + return dma_pool_zalloc(pdev->device_pool, GFP_DMA32 | GFP_ATOMIC, + &pep->stream_info.ctx_array_dma); +} + +struct cdnsp_ring *cdnsp_dma_to_transfer_ring(struct cdnsp_ep *pep, u64 address) +{ + if (pep->ep_state & EP_HAS_STREAMS) + return radix_tree_lookup(&pep->stream_info.trb_address_map, + address >> TRB_SEGMENT_SHIFT); + + return pep->ring; +} + +/* + * Change an endpoint's internal structure so it supports stream IDs. + * The number of requested streams includes stream 0, which cannot be used by + * driver. + * + * The number of stream contexts in the stream context array may be bigger than + * the number of streams the driver wants to use. This is because the number of + * stream context array entries must be a power of two. + */ +int cdnsp_alloc_stream_info(struct cdnsp_device *pdev, + struct cdnsp_ep *pep, + unsigned int num_stream_ctxs, + unsigned int num_streams) +{ + struct cdnsp_stream_info *stream_info; + struct cdnsp_ring *cur_ring; + u32 cur_stream; + u64 addr; + int ret; + int mps; + + stream_info = &pep->stream_info; + stream_info->num_streams = num_streams; + stream_info->num_stream_ctxs = num_stream_ctxs; + + /* Initialize the array of virtual pointers to stream rings. */ + stream_info->stream_rings = kcalloc(num_streams, + sizeof(struct cdnsp_ring *), + GFP_ATOMIC); + if (!stream_info->stream_rings) + return -ENOMEM; + + /* Initialize the array of DMA addresses for stream rings for the HW. */ + stream_info->stream_ctx_array = cdnsp_alloc_stream_ctx(pdev, pep); + if (!stream_info->stream_ctx_array) + goto cleanup_stream_rings; + + memset(stream_info->stream_ctx_array, 0, + sizeof(struct cdnsp_stream_ctx) * num_stream_ctxs); + INIT_RADIX_TREE(&stream_info->trb_address_map, GFP_ATOMIC); + mps = usb_endpoint_maxp(pep->endpoint.desc); + + /* + * Allocate rings for all the streams that the driver will use, + * and add their segment DMA addresses to the radix tree. + * Stream 0 is reserved. + */ + for (cur_stream = 1; cur_stream < num_streams; cur_stream++) { + cur_ring = cdnsp_ring_alloc(pdev, 2, TYPE_STREAM, mps, + GFP_ATOMIC); + stream_info->stream_rings[cur_stream] = cur_ring; + + if (!cur_ring) + goto cleanup_rings; + + cur_ring->stream_id = cur_stream; + cur_ring->trb_address_map = &stream_info->trb_address_map; + + /* Set deq ptr, cycle bit, and stream context type. */ + addr = cur_ring->first_seg->dma | SCT_FOR_CTX(SCT_PRI_TR) | + cur_ring->cycle_state; + + stream_info->stream_ctx_array[cur_stream].stream_ring = + cpu_to_le64(addr); + + trace_cdnsp_set_stream_ring(cur_ring); + + ret = cdnsp_update_stream_mapping(cur_ring); + if (ret) + goto cleanup_rings; + } + + return 0; + +cleanup_rings: + for (cur_stream = 1; cur_stream < num_streams; cur_stream++) { + cur_ring = stream_info->stream_rings[cur_stream]; + if (cur_ring) { + cdnsp_ring_free(pdev, cur_ring); + stream_info->stream_rings[cur_stream] = NULL; + } + } + +cleanup_stream_rings: + kfree(pep->stream_info.stream_rings); + + return -ENOMEM; +} + +/* Frees all stream contexts associated with the endpoint. */ +static void cdnsp_free_stream_info(struct cdnsp_device *pdev, + struct cdnsp_ep *pep) +{ + struct cdnsp_stream_info *stream_info = &pep->stream_info; + struct cdnsp_ring *cur_ring; + int cur_stream; + + if (!(pep->ep_state & EP_HAS_STREAMS)) + return; + + for (cur_stream = 1; cur_stream < stream_info->num_streams; + cur_stream++) { + cur_ring = stream_info->stream_rings[cur_stream]; + if (cur_ring) { + cdnsp_ring_free(pdev, cur_ring); + stream_info->stream_rings[cur_stream] = NULL; + } + } + + if (stream_info->stream_ctx_array) + cdnsp_free_stream_ctx(pdev, pep); + + kfree(stream_info->stream_rings); + pep->ep_state &= ~EP_HAS_STREAMS; +} + +/* All the cdnsp_tds in the ring's TD list should be freed at this point.*/ +static void cdnsp_free_priv_device(struct cdnsp_device *pdev) +{ + pdev->dcbaa->dev_context_ptrs[1] = 0; + + cdnsp_free_endpoint_rings(pdev, &pdev->eps[0]); + + if (pdev->in_ctx.bytes) + dma_pool_free(pdev->device_pool, pdev->in_ctx.bytes, + pdev->in_ctx.dma); + + if (pdev->out_ctx.bytes) + dma_pool_free(pdev->device_pool, pdev->out_ctx.bytes, + pdev->out_ctx.dma); + + pdev->in_ctx.bytes = NULL; + pdev->out_ctx.bytes = NULL; +} + +static int cdnsp_alloc_priv_device(struct cdnsp_device *pdev) +{ + int ret = -ENOMEM; + + ret = cdnsp_init_device_ctx(pdev); + if (ret) + return ret; + + /* Allocate endpoint 0 ring. */ + pdev->eps[0].ring = cdnsp_ring_alloc(pdev, 2, TYPE_CTRL, 0, GFP_ATOMIC); + if (!pdev->eps[0].ring) + goto fail; + + /* Point to output device context in dcbaa. */ + pdev->dcbaa->dev_context_ptrs[1] = cpu_to_le64(pdev->out_ctx.dma); + pdev->cmd.in_ctx = &pdev->in_ctx; + + trace_cdnsp_alloc_priv_device(pdev); + return 0; +fail: + dma_pool_free(pdev->device_pool, pdev->out_ctx.bytes, + pdev->out_ctx.dma); + dma_pool_free(pdev->device_pool, pdev->in_ctx.bytes, + pdev->in_ctx.dma); + + return ret; +} + +void cdnsp_copy_ep0_dequeue_into_input_ctx(struct cdnsp_device *pdev) +{ + struct cdnsp_ep_ctx *ep0_ctx = pdev->eps[0].in_ctx; + struct cdnsp_ring *ep_ring = pdev->eps[0].ring; + dma_addr_t dma; + + dma = cdnsp_trb_virt_to_dma(ep_ring->enq_seg, ep_ring->enqueue); + ep0_ctx->deq = cpu_to_le64(dma | ep_ring->cycle_state); +} + +/* Setup an controller private device for a Set Address command. */ +int cdnsp_setup_addressable_priv_dev(struct cdnsp_device *pdev) +{ + struct cdnsp_slot_ctx *slot_ctx; + struct cdnsp_ep_ctx *ep0_ctx; + u32 max_packets, port; + + ep0_ctx = cdnsp_get_ep_ctx(&pdev->in_ctx, 0); + slot_ctx = cdnsp_get_slot_ctx(&pdev->in_ctx); + + /* Only the control endpoint is valid - one endpoint context. */ + slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1)); + + switch (pdev->gadget.speed) { + case USB_SPEED_SUPER_PLUS: + slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SSP); + max_packets = MAX_PACKET(512); + break; + case USB_SPEED_SUPER: + slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SS); + max_packets = MAX_PACKET(512); + break; + case USB_SPEED_HIGH: + slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_HS); + max_packets = MAX_PACKET(64); + break; + case USB_SPEED_FULL: + slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_FS); + max_packets = MAX_PACKET(64); + break; + default: + /* Speed was not set , this shouldn't happen. */ + return -EINVAL; + } + + port = DEV_PORT(pdev->active_port->port_num); + slot_ctx->dev_port |= cpu_to_le32(port); + slot_ctx->dev_state = cpu_to_le32((pdev->device_address & + DEV_ADDR_MASK)); + ep0_ctx->tx_info = cpu_to_le32(EP_AVG_TRB_LENGTH(0x8)); + ep0_ctx->ep_info2 = cpu_to_le32(EP_TYPE(CTRL_EP)); + ep0_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(0) | ERROR_COUNT(3) | + max_packets); + + ep0_ctx->deq = cpu_to_le64(pdev->eps[0].ring->first_seg->dma | + pdev->eps[0].ring->cycle_state); + + trace_cdnsp_setup_addressable_priv_device(pdev); + + return 0; +} + +/* + * Convert interval expressed as 2^(bInterval - 1) == interval into + * straight exponent value 2^n == interval. + */ +static unsigned int cdnsp_parse_exponent_interval(struct usb_gadget *g, + struct cdnsp_ep *pep) +{ + unsigned int interval; + + interval = clamp_val(pep->endpoint.desc->bInterval, 1, 16) - 1; + if (interval != pep->endpoint.desc->bInterval - 1) + dev_warn(&g->dev, "ep %s - rounding interval to %d %sframes\n", + pep->name, 1 << interval, + g->speed == USB_SPEED_FULL ? "" : "micro"); + + /* + * Full speed isoc endpoints specify interval in frames, + * not microframes. We are using microframes everywhere, + * so adjust accordingly. + */ + if (g->speed == USB_SPEED_FULL) + interval += 3; /* 1 frame = 2^3 uframes */ + + /* Controller handles only up to 512ms (2^12). */ + if (interval > 12) + interval = 12; + + return interval; +} + +/* + * Convert bInterval expressed in microframes (in 1-255 range) to exponent of + * microframes, rounded down to nearest power of 2. + */ +static unsigned int cdnsp_microframes_to_exponent(struct usb_gadget *g, + struct cdnsp_ep *pep, + unsigned int desc_interval, + unsigned int min_exponent, + unsigned int max_exponent) +{ + unsigned int interval; + + interval = fls(desc_interval) - 1; + return clamp_val(interval, min_exponent, max_exponent); +} + +/* + * Return the polling interval. + * + * The polling interval is expressed in "microframes". If controllers's Interval + * field is set to N, it will service the endpoint every 2^(Interval)*125us. + */ +static unsigned int cdnsp_get_endpoint_interval(struct usb_gadget *g, + struct cdnsp_ep *pep) +{ + unsigned int interval = 0; + + switch (g->speed) { + case USB_SPEED_HIGH: + case USB_SPEED_SUPER_PLUS: + case USB_SPEED_SUPER: + if (usb_endpoint_xfer_int(pep->endpoint.desc) || + usb_endpoint_xfer_isoc(pep->endpoint.desc)) + interval = cdnsp_parse_exponent_interval(g, pep); + break; + case USB_SPEED_FULL: + if (usb_endpoint_xfer_isoc(pep->endpoint.desc)) { + interval = cdnsp_parse_exponent_interval(g, pep); + } else if (usb_endpoint_xfer_int(pep->endpoint.desc)) { + interval = pep->endpoint.desc->bInterval << 3; + interval = cdnsp_microframes_to_exponent(g, pep, + interval, + 3, 10); + } + + break; + default: + WARN_ON(1); + } + + return interval; +} + +/* + * The "Mult" field in the endpoint context is only set for SuperSpeed isoc eps. + * High speed endpoint descriptors can define "the number of additional + * transaction opportunities per microframe", but that goes in the Max Burst + * endpoint context field. + */ +static u32 cdnsp_get_endpoint_mult(struct usb_gadget *g, struct cdnsp_ep *pep) +{ + if (g->speed < USB_SPEED_SUPER || + !usb_endpoint_xfer_isoc(pep->endpoint.desc)) + return 0; + + return pep->endpoint.comp_desc->bmAttributes; +} + +static u32 cdnsp_get_endpoint_max_burst(struct usb_gadget *g, + struct cdnsp_ep *pep) +{ + /* Super speed and Plus have max burst in ep companion desc */ + if (g->speed >= USB_SPEED_SUPER) + return pep->endpoint.comp_desc->bMaxBurst; + + if (g->speed == USB_SPEED_HIGH && + (usb_endpoint_xfer_isoc(pep->endpoint.desc) || + usb_endpoint_xfer_int(pep->endpoint.desc))) + return (usb_endpoint_maxp(pep->endpoint.desc) & 0x1800) >> 11; + + return 0; +} + +static u32 cdnsp_get_endpoint_type(const struct usb_endpoint_descriptor *desc) +{ + int in; + + in = usb_endpoint_dir_in(desc); + + switch (usb_endpoint_type(desc)) { + case USB_ENDPOINT_XFER_CONTROL: + return CTRL_EP; + case USB_ENDPOINT_XFER_BULK: + return in ? BULK_IN_EP : BULK_OUT_EP; + case USB_ENDPOINT_XFER_ISOC: + return in ? ISOC_IN_EP : ISOC_OUT_EP; + case USB_ENDPOINT_XFER_INT: + return in ? INT_IN_EP : INT_OUT_EP; + } + + return 0; +} + +/* + * Return the maximum endpoint service interval time (ESIT) payload. + * Basically, this is the maxpacket size, multiplied by the burst size + * and mult size. + */ +static u32 cdnsp_get_max_esit_payload(struct usb_gadget *g, + struct cdnsp_ep *pep) +{ + int max_packet; + int max_burst; + + /* Only applies for interrupt or isochronous endpoints*/ + if (usb_endpoint_xfer_control(pep->endpoint.desc) || + usb_endpoint_xfer_bulk(pep->endpoint.desc)) + return 0; + + /* SuperSpeedPlus Isoc ep sending over 48k per EIST. */ + if (g->speed >= USB_SPEED_SUPER_PLUS && + USB_SS_SSP_ISOC_COMP(pep->endpoint.desc->bmAttributes)) + return le16_to_cpu(pep->endpoint.comp_desc->wBytesPerInterval); + /* SuperSpeed or SuperSpeedPlus Isoc ep with less than 48k per esit */ + else if (g->speed >= USB_SPEED_SUPER) + return le16_to_cpu(pep->endpoint.comp_desc->wBytesPerInterval); + + max_packet = usb_endpoint_maxp(pep->endpoint.desc); + max_burst = usb_endpoint_maxp_mult(pep->endpoint.desc); + + /* A 0 in max burst means 1 transfer per ESIT */ + return max_packet * max_burst; +} + +int cdnsp_endpoint_init(struct cdnsp_device *pdev, + struct cdnsp_ep *pep, + gfp_t mem_flags) +{ + enum cdnsp_ring_type ring_type; + struct cdnsp_ep_ctx *ep_ctx; + unsigned int err_count = 0; + unsigned int avg_trb_len; + unsigned int max_packet; + unsigned int max_burst; + unsigned int interval; + u32 max_esit_payload; + unsigned int mult; + u32 endpoint_type; + int ret; + + ep_ctx = pep->in_ctx; + + endpoint_type = cdnsp_get_endpoint_type(pep->endpoint.desc); + if (!endpoint_type) + return -EINVAL; + + ring_type = usb_endpoint_type(pep->endpoint.desc); + + /* + * Get values to fill the endpoint context, mostly from ep descriptor. + * The average TRB buffer length for bulk endpoints is unclear as we + * have no clue on scatter gather list entry size. For Isoc and Int, + * set it to max available. + */ + max_esit_payload = cdnsp_get_max_esit_payload(&pdev->gadget, pep); + interval = cdnsp_get_endpoint_interval(&pdev->gadget, pep); + mult = cdnsp_get_endpoint_mult(&pdev->gadget, pep); + max_packet = usb_endpoint_maxp(pep->endpoint.desc); + max_burst = cdnsp_get_endpoint_max_burst(&pdev->gadget, pep); + avg_trb_len = max_esit_payload; + + /* Allow 3 retries for everything but isoc, set CErr = 3. */ + if (!usb_endpoint_xfer_isoc(pep->endpoint.desc)) + err_count = 3; + if (usb_endpoint_xfer_bulk(pep->endpoint.desc) && + pdev->gadget.speed == USB_SPEED_HIGH) + max_packet = 512; + /* Controller spec indicates that ctrl ep avg TRB Length should be 8. */ + if (usb_endpoint_xfer_control(pep->endpoint.desc)) + avg_trb_len = 8; + + /* Set up the endpoint ring. */ + pep->ring = cdnsp_ring_alloc(pdev, 2, ring_type, max_packet, mem_flags); + pep->skip = false; + + /* Fill the endpoint context */ + ep_ctx->ep_info = cpu_to_le32(EP_MAX_ESIT_PAYLOAD_HI(max_esit_payload) | + EP_INTERVAL(interval) | EP_MULT(mult)); + ep_ctx->ep_info2 = cpu_to_le32(EP_TYPE(endpoint_type) | + MAX_PACKET(max_packet) | MAX_BURST(max_burst) | + ERROR_COUNT(err_count)); + ep_ctx->deq = cpu_to_le64(pep->ring->first_seg->dma | + pep->ring->cycle_state); + + ep_ctx->tx_info = cpu_to_le32(EP_MAX_ESIT_PAYLOAD_LO(max_esit_payload) | + EP_AVG_TRB_LENGTH(avg_trb_len)); + + if (usb_endpoint_xfer_bulk(pep->endpoint.desc) && + pdev->gadget.speed > USB_SPEED_HIGH) { + ret = cdnsp_alloc_streams(pdev, pep); + if (ret < 0) + return ret; + } + + return 0; +} + +void cdnsp_endpoint_zero(struct cdnsp_device *pdev, struct cdnsp_ep *pep) +{ + pep->in_ctx->ep_info = 0; + pep->in_ctx->ep_info2 = 0; + pep->in_ctx->deq = 0; + pep->in_ctx->tx_info = 0; +} + +static int cdnsp_alloc_erst(struct cdnsp_device *pdev, + struct cdnsp_ring *evt_ring, + struct cdnsp_erst *erst) +{ + struct cdnsp_erst_entry *entry; + struct cdnsp_segment *seg; + unsigned int val; + size_t size; + + size = sizeof(struct cdnsp_erst_entry) * evt_ring->num_segs; + erst->entries = dma_alloc_coherent(pdev->dev, size, + &erst->erst_dma_addr, GFP_KERNEL); + if (!erst->entries) + return -ENOMEM; + + erst->num_entries = evt_ring->num_segs; + + seg = evt_ring->first_seg; + for (val = 0; val < evt_ring->num_segs; val++) { + entry = &erst->entries[val]; + entry->seg_addr = cpu_to_le64(seg->dma); + entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT); + entry->rsvd = 0; + seg = seg->next; + } + + return 0; +} + +static void cdnsp_free_erst(struct cdnsp_device *pdev, struct cdnsp_erst *erst) +{ + size_t size = sizeof(struct cdnsp_erst_entry) * (erst->num_entries); + struct device *dev = pdev->dev; + + if (erst->entries) + dma_free_coherent(dev, size, erst->entries, + erst->erst_dma_addr); + + erst->entries = NULL; +} + +void cdnsp_mem_cleanup(struct cdnsp_device *pdev) +{ + struct device *dev = pdev->dev; + + cdnsp_free_priv_device(pdev); + cdnsp_free_erst(pdev, &pdev->erst); + + if (pdev->event_ring) + cdnsp_ring_free(pdev, pdev->event_ring); + + pdev->event_ring = NULL; + + if (pdev->cmd_ring) + cdnsp_ring_free(pdev, pdev->cmd_ring); + + pdev->cmd_ring = NULL; + + dma_pool_destroy(pdev->segment_pool); + pdev->segment_pool = NULL; + dma_pool_destroy(pdev->device_pool); + pdev->device_pool = NULL; + + if (pdev->dcbaa) + dma_free_coherent(dev, sizeof(*pdev->dcbaa), + pdev->dcbaa, pdev->dcbaa->dma); + + pdev->dcbaa = NULL; + + pdev->usb2_port.exist = 0; + pdev->usb3_port.exist = 0; + pdev->usb2_port.port_num = 0; + pdev->usb3_port.port_num = 0; + pdev->active_port = NULL; +} + +static void cdnsp_set_event_deq(struct cdnsp_device *pdev) +{ + dma_addr_t deq; + u64 temp; + + deq = cdnsp_trb_virt_to_dma(pdev->event_ring->deq_seg, + pdev->event_ring->dequeue); + + /* Update controller event ring dequeue pointer */ + temp = cdnsp_read_64(&pdev->ir_set->erst_dequeue); + temp &= ERST_PTR_MASK; + + /* + * Don't clear the EHB bit (which is RW1C) because + * there might be more events to service. + */ + temp &= ~ERST_EHB; + + cdnsp_write_64(((u64)deq & (u64)~ERST_PTR_MASK) | temp, + &pdev->ir_set->erst_dequeue); +} + +static void cdnsp_add_in_port(struct cdnsp_device *pdev, + struct cdnsp_port *port, + __le32 __iomem *addr) +{ + u32 temp, port_offset, port_count; + + temp = readl(addr); + port->maj_rev = CDNSP_EXT_PORT_MAJOR(temp); + port->min_rev = CDNSP_EXT_PORT_MINOR(temp); + + /* Port offset and count in the third dword.*/ + temp = readl(addr + 2); + port_offset = CDNSP_EXT_PORT_OFF(temp); + port_count = CDNSP_EXT_PORT_COUNT(temp); + + trace_cdnsp_port_info(addr, port_offset, port_count, port->maj_rev); + + port->port_num = port_offset; + port->exist = 1; +} + +/* + * Scan the Extended Capabilities for the "Supported Protocol Capabilities" that + * specify what speeds each port is supposed to be. + */ +static int cdnsp_setup_port_arrays(struct cdnsp_device *pdev) +{ + void __iomem *base; + u32 offset; + int i; + + base = &pdev->cap_regs->hc_capbase; + offset = cdnsp_find_next_ext_cap(base, 0, + EXT_CAP_CFG_DEV_20PORT_CAP_ID); + pdev->port20_regs = base + offset; + + offset = cdnsp_find_next_ext_cap(base, 0, D_XEC_CFG_3XPORT_CAP); + pdev->port3x_regs = base + offset; + + offset = 0; + base = &pdev->cap_regs->hc_capbase; + + /* Driver expects max 2 extended protocol capability. */ + for (i = 0; i < 2; i++) { + u32 temp; + + offset = cdnsp_find_next_ext_cap(base, offset, + EXT_CAPS_PROTOCOL); + temp = readl(base + offset); + + if (CDNSP_EXT_PORT_MAJOR(temp) == 0x03 && + !pdev->usb3_port.port_num) + cdnsp_add_in_port(pdev, &pdev->usb3_port, + base + offset); + + if (CDNSP_EXT_PORT_MAJOR(temp) == 0x02 && + !pdev->usb2_port.port_num) + cdnsp_add_in_port(pdev, &pdev->usb2_port, + base + offset); + } + + if (!pdev->usb2_port.exist || !pdev->usb3_port.exist) { + dev_err(pdev->dev, "Error: Only one port detected\n"); + return -ENODEV; + } + + trace_cdnsp_init("Found USB 2.0 ports and USB 3.0 ports."); + + pdev->usb2_port.regs = (struct cdnsp_port_regs __iomem *) + (&pdev->op_regs->port_reg_base + NUM_PORT_REGS * + (pdev->usb2_port.port_num - 1)); + + pdev->usb3_port.regs = (struct cdnsp_port_regs __iomem *) + (&pdev->op_regs->port_reg_base + NUM_PORT_REGS * + (pdev->usb3_port.port_num - 1)); + + return 0; +} + +/* + * Initialize memory for CDNSP (one-time init). + * + * Program the PAGESIZE register, initialize the device context array, create + * device contexts, set up a command ring segment, create event + * ring (one for now). + */ +int cdnsp_mem_init(struct cdnsp_device *pdev) +{ + struct device *dev = pdev->dev; + int ret = -ENOMEM; + unsigned int val; + dma_addr_t dma; + u32 page_size; + u64 val_64; + + /* + * Use 4K pages, since that's common and the minimum the + * controller supports + */ + page_size = 1 << 12; + + val = readl(&pdev->op_regs->config_reg); + val |= ((val & ~MAX_DEVS) | CDNSP_DEV_MAX_SLOTS) | CONFIG_U3E; + writel(val, &pdev->op_regs->config_reg); + + /* + * Doorbell array must be physically contiguous + * and 64-byte (cache line) aligned. + */ + pdev->dcbaa = dma_alloc_coherent(dev, sizeof(*pdev->dcbaa), + &dma, GFP_KERNEL); + if (!pdev->dcbaa) + return -ENOMEM; + + memset(pdev->dcbaa, 0, sizeof(*pdev->dcbaa)); + pdev->dcbaa->dma = dma; + + cdnsp_write_64(dma, &pdev->op_regs->dcbaa_ptr); + + /* + * Initialize the ring segment pool. The ring must be a contiguous + * structure comprised of TRBs. The TRBs must be 16 byte aligned, + * however, the command ring segment needs 64-byte aligned segments + * and our use of dma addresses in the trb_address_map radix tree needs + * TRB_SEGMENT_SIZE alignment, so driver pick the greater alignment + * need. + */ + pdev->segment_pool = dma_pool_create("CDNSP ring segments", dev, + TRB_SEGMENT_SIZE, TRB_SEGMENT_SIZE, + page_size); + if (!pdev->segment_pool) + goto release_dcbaa; + + pdev->device_pool = dma_pool_create("CDNSP input/output contexts", dev, + CDNSP_CTX_SIZE, 64, page_size); + if (!pdev->device_pool) + goto destroy_segment_pool; + + + /* Set up the command ring to have one segments for now. */ + pdev->cmd_ring = cdnsp_ring_alloc(pdev, 1, TYPE_COMMAND, 0, GFP_KERNEL); + if (!pdev->cmd_ring) + goto destroy_device_pool; + + /* Set the address in the Command Ring Control register */ + val_64 = cdnsp_read_64(&pdev->op_regs->cmd_ring); + val_64 = (val_64 & (u64)CMD_RING_RSVD_BITS) | + (pdev->cmd_ring->first_seg->dma & (u64)~CMD_RING_RSVD_BITS) | + pdev->cmd_ring->cycle_state; + cdnsp_write_64(val_64, &pdev->op_regs->cmd_ring); + + val = readl(&pdev->cap_regs->db_off); + val &= DBOFF_MASK; + pdev->dba = (void __iomem *)pdev->cap_regs + val; + + /* Set ir_set to interrupt register set 0 */ + pdev->ir_set = &pdev->run_regs->ir_set[0]; + + /* + * Event ring setup: Allocate a normal ring, but also setup + * the event ring segment table (ERST). + */ + pdev->event_ring = cdnsp_ring_alloc(pdev, ERST_NUM_SEGS, TYPE_EVENT, + 0, GFP_KERNEL); + if (!pdev->event_ring) + goto free_cmd_ring; + + ret = cdnsp_alloc_erst(pdev, pdev->event_ring, &pdev->erst); + if (ret) + goto free_event_ring; + + /* Set ERST count with the number of entries in the segment table. */ + val = readl(&pdev->ir_set->erst_size); + val &= ERST_SIZE_MASK; + val |= ERST_NUM_SEGS; + writel(val, &pdev->ir_set->erst_size); + + /* Set the segment table base address. */ + val_64 = cdnsp_read_64(&pdev->ir_set->erst_base); + val_64 &= ERST_PTR_MASK; + val_64 |= (pdev->erst.erst_dma_addr & (u64)~ERST_PTR_MASK); + cdnsp_write_64(val_64, &pdev->ir_set->erst_base); + + /* Set the event ring dequeue address. */ + cdnsp_set_event_deq(pdev); + + ret = cdnsp_setup_port_arrays(pdev); + if (ret) + goto free_erst; + + ret = cdnsp_alloc_priv_device(pdev); + if (ret) { + dev_err(pdev->dev, + "Could not allocate cdnsp_device data structures\n"); + goto free_erst; + } + + return 0; + +free_erst: + cdnsp_free_erst(pdev, &pdev->erst); +free_event_ring: + cdnsp_ring_free(pdev, pdev->event_ring); +free_cmd_ring: + cdnsp_ring_free(pdev, pdev->cmd_ring); +destroy_device_pool: + dma_pool_destroy(pdev->device_pool); +destroy_segment_pool: + dma_pool_destroy(pdev->segment_pool); +release_dcbaa: + dma_free_coherent(dev, sizeof(*pdev->dcbaa), pdev->dcbaa, + pdev->dcbaa->dma); + + cdnsp_reset(pdev); + + return ret; +} diff --git a/drivers/usb/cdns3/cdnsp-pci.c b/drivers/usb/cdns3/cdnsp-pci.c new file mode 100644 index 000000000000..fe8a114c586c --- /dev/null +++ b/drivers/usb/cdns3/cdnsp-pci.c @@ -0,0 +1,254 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Cadence PCI Glue driver. + * + * Copyright (C) 2019 Cadence. + * + * Author: Pawel Laszczak <pawell@cadence.com> + * + */ + +#include <linux/platform_device.h> +#include <linux/dma-mapping.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/pci.h> + +#include "core.h" +#include "gadget-export.h" + +#define PCI_BAR_HOST 0 +#define PCI_BAR_OTG 0 +#define PCI_BAR_DEV 2 + +#define PCI_DEV_FN_HOST_DEVICE 0 +#define PCI_DEV_FN_OTG 1 + +#define PCI_DRIVER_NAME "cdns-pci-usbssp" +#define PLAT_DRIVER_NAME "cdns-usbssp" + +#define CDNS_VENDOR_ID 0x17cd +#define CDNS_DEVICE_ID 0x0100 +#define CDNS_DRD_IF (PCI_CLASS_SERIAL_USB << 8 | 0x80) + +static struct pci_dev *cdnsp_get_second_fun(struct pci_dev *pdev) +{ + struct pci_dev *func; + + /* + * Gets the second function. + * It's little tricky, but this platform has two function. + * The fist keeps resources for Host/Device while the second + * keeps resources for DRD/OTG. + */ + func = pci_get_device(pdev->vendor, pdev->device, NULL); + if (!func) + return NULL; + + if (func->devfn == pdev->devfn) { + func = pci_get_device(pdev->vendor, pdev->device, func); + if (!func) + return NULL; + } + + return func; +} + +static int cdnsp_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct device *dev = &pdev->dev; + struct pci_dev *func; + struct resource *res; + struct cdns *cdnsp; + int ret; + + /* + * For GADGET/HOST PCI (devfn) function number is 0, + * for OTG PCI (devfn) function number is 1. + */ + if (!id || (pdev->devfn != PCI_DEV_FN_HOST_DEVICE && + pdev->devfn != PCI_DEV_FN_OTG)) + return -EINVAL; + + func = cdnsp_get_second_fun(pdev); + if (!func) + return -EINVAL; + + if (func->class == PCI_CLASS_SERIAL_USB_XHCI || + pdev->class == PCI_CLASS_SERIAL_USB_XHCI) { + ret = -EINVAL; + goto put_pci; + } + + ret = pcim_enable_device(pdev); + if (ret) { + dev_err(&pdev->dev, "Enabling PCI device has failed %d\n", ret); + goto put_pci; + } + + pci_set_master(pdev); + if (pci_is_enabled(func)) { + cdnsp = pci_get_drvdata(func); + } else { + cdnsp = kzalloc(sizeof(*cdnsp), GFP_KERNEL); + if (!cdnsp) { + ret = -ENOMEM; + goto disable_pci; + } + } + + /* For GADGET device function number is 0. */ + if (pdev->devfn == 0) { + resource_size_t rsrc_start, rsrc_len; + + /* Function 0: host(BAR_0) + device(BAR_1).*/ + dev_dbg(dev, "Initialize resources\n"); + rsrc_start = pci_resource_start(pdev, PCI_BAR_DEV); + rsrc_len = pci_resource_len(pdev, PCI_BAR_DEV); + res = devm_request_mem_region(dev, rsrc_start, rsrc_len, "dev"); + if (!res) { + dev_dbg(dev, "controller already in use\n"); + ret = -EBUSY; + goto free_cdnsp; + } + + cdnsp->dev_regs = devm_ioremap(dev, rsrc_start, rsrc_len); + if (!cdnsp->dev_regs) { + dev_dbg(dev, "error mapping memory\n"); + ret = -EFAULT; + goto free_cdnsp; + } + + cdnsp->dev_irq = pdev->irq; + dev_dbg(dev, "USBSS-DEV physical base addr: %pa\n", + &rsrc_start); + + res = &cdnsp->xhci_res[0]; + res->start = pci_resource_start(pdev, PCI_BAR_HOST); + res->end = pci_resource_end(pdev, PCI_BAR_HOST); + res->name = "xhci"; + res->flags = IORESOURCE_MEM; + dev_dbg(dev, "USBSS-XHCI physical base addr: %pa\n", + &res->start); + + /* Interrupt for XHCI, */ + res = &cdnsp->xhci_res[1]; + res->start = pdev->irq; + res->name = "host"; + res->flags = IORESOURCE_IRQ; + } else { + res = &cdnsp->otg_res; + res->start = pci_resource_start(pdev, PCI_BAR_OTG); + res->end = pci_resource_end(pdev, PCI_BAR_OTG); + res->name = "otg"; + res->flags = IORESOURCE_MEM; + dev_dbg(dev, "CDNSP-DRD physical base addr: %pa\n", + &res->start); + + /* Interrupt for OTG/DRD. */ + cdnsp->otg_irq = pdev->irq; + } + + if (pci_is_enabled(func)) { + cdnsp->dev = dev; + cdnsp->gadget_init = cdnsp_gadget_init; + + ret = cdns_init(cdnsp); + if (ret) + goto free_cdnsp; + } + + pci_set_drvdata(pdev, cdnsp); + + device_wakeup_enable(&pdev->dev); + if (pci_dev_run_wake(pdev)) + pm_runtime_put_noidle(&pdev->dev); + + return 0; + +free_cdnsp: + if (!pci_is_enabled(func)) + kfree(cdnsp); + +disable_pci: + pci_disable_device(pdev); + +put_pci: + pci_dev_put(func); + + return ret; +} + +static void cdnsp_pci_remove(struct pci_dev *pdev) +{ + struct cdns *cdnsp; + struct pci_dev *func; + + func = cdnsp_get_second_fun(pdev); + cdnsp = (struct cdns *)pci_get_drvdata(pdev); + + if (pci_dev_run_wake(pdev)) + pm_runtime_get_noresume(&pdev->dev); + + if (!pci_is_enabled(func)) { + kfree(cdnsp); + goto pci_put; + } + + cdns_remove(cdnsp); + +pci_put: + pci_dev_put(func); +} + +static int __maybe_unused cdnsp_pci_suspend(struct device *dev) +{ + struct cdns *cdns = dev_get_drvdata(dev); + + return cdns_suspend(cdns); +} + +static int __maybe_unused cdnsp_pci_resume(struct device *dev) +{ + struct cdns *cdns = dev_get_drvdata(dev); + unsigned long flags; + int ret; + + spin_lock_irqsave(&cdns->lock, flags); + ret = cdns_resume(cdns, 1); + spin_unlock_irqrestore(&cdns->lock, flags); + + return ret; +} + +static const struct dev_pm_ops cdnsp_pci_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(cdnsp_pci_suspend, cdnsp_pci_resume) +}; + +static const struct pci_device_id cdnsp_pci_ids[] = { + { PCI_VENDOR_ID_CDNS, CDNS_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, + PCI_CLASS_SERIAL_USB_DEVICE, PCI_ANY_ID }, + { PCI_VENDOR_ID_CDNS, CDNS_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, + CDNS_DRD_IF, PCI_ANY_ID }, + { 0, } +}; + +static struct pci_driver cdnsp_pci_driver = { + .name = "cdnsp-pci", + .id_table = &cdnsp_pci_ids[0], + .probe = cdnsp_pci_probe, + .remove = cdnsp_pci_remove, + .driver = { + .pm = &cdnsp_pci_pm_ops, + } +}; + +module_pci_driver(cdnsp_pci_driver); +MODULE_DEVICE_TABLE(pci, cdnsp_pci_ids); + +MODULE_ALIAS("pci:cdnsp"); +MODULE_AUTHOR("Pawel Laszczak <pawell@cadence.com>"); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Cadence CDNSP PCI driver"); diff --git a/drivers/usb/cdns3/cdnsp-ring.c b/drivers/usb/cdns3/cdnsp-ring.c new file mode 100644 index 000000000000..f9170d177a89 --- /dev/null +++ b/drivers/usb/cdns3/cdnsp-ring.c @@ -0,0 +1,2438 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Cadence CDNSP DRD Driver. + * + * Copyright (C) 2020 Cadence. + * + * Author: Pawel Laszczak <pawell@cadence.com> + * + * Code based on Linux XHCI driver. + * Origin: Copyright (C) 2008 Intel Corp + */ + +/* + * Ring initialization rules: + * 1. Each segment is initialized to zero, except for link TRBs. + * 2. Ring cycle state = 0. This represents Producer Cycle State (PCS) or + * Consumer Cycle State (CCS), depending on ring function. + * 3. Enqueue pointer = dequeue pointer = address of first TRB in the segment. + * + * Ring behavior rules: + * 1. A ring is empty if enqueue == dequeue. This means there will always be at + * least one free TRB in the ring. This is useful if you want to turn that + * into a link TRB and expand the ring. + * 2. When incrementing an enqueue or dequeue pointer, if the next TRB is a + * link TRB, then load the pointer with the address in the link TRB. If the + * link TRB had its toggle bit set, you may need to update the ring cycle + * state (see cycle bit rules). You may have to do this multiple times + * until you reach a non-link TRB. + * 3. A ring is full if enqueue++ (for the definition of increment above) + * equals the dequeue pointer. + * + * Cycle bit rules: + * 1. When a consumer increments a dequeue pointer and encounters a toggle bit + * in a link TRB, it must toggle the ring cycle state. + * 2. When a producer increments an enqueue pointer and encounters a toggle bit + * in a link TRB, it must toggle the ring cycle state. + * + * Producer rules: + * 1. Check if ring is full before you enqueue. + * 2. Write the ring cycle state to the cycle bit in the TRB you're enqueuing. + * Update enqueue pointer between each write (which may update the ring + * cycle state). + * 3. Notify consumer. If SW is producer, it rings the doorbell for command + * and endpoint rings. If controller is the producer for the event ring, + * and it generates an interrupt according to interrupt modulation rules. + * + * Consumer rules: + * 1. Check if TRB belongs to you. If the cycle bit == your ring cycle state, + * the TRB is owned by the consumer. + * 2. Update dequeue pointer (which may update the ring cycle state) and + * continue processing TRBs until you reach a TRB which is not owned by you. + * 3. Notify the producer. SW is the consumer for the event ring, and it + * updates event ring dequeue pointer. Controller is the consumer for the + * command and endpoint rings; it generates events on the event ring + * for these. + */ + +#include <linux/scatterlist.h> +#include <linux/dma-mapping.h> +#include <linux/delay.h> +#include <linux/slab.h> +#include <linux/irq.h> + +#include "cdnsp-trace.h" +#include "cdnsp-gadget.h" + +/* + * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA + * address of the TRB. + */ +dma_addr_t cdnsp_trb_virt_to_dma(struct cdnsp_segment *seg, + union cdnsp_trb *trb) +{ + unsigned long segment_offset = trb - seg->trbs; + + if (trb < seg->trbs || segment_offset >= TRBS_PER_SEGMENT) + return 0; + + return seg->dma + (segment_offset * sizeof(*trb)); +} + +static bool cdnsp_trb_is_noop(union cdnsp_trb *trb) +{ + return TRB_TYPE_NOOP_LE32(trb->generic.field[3]); +} + +static bool cdnsp_trb_is_link(union cdnsp_trb *trb) +{ + return TRB_TYPE_LINK_LE32(trb->link.control); +} + +bool cdnsp_last_trb_on_seg(struct cdnsp_segment *seg, union cdnsp_trb *trb) +{ + return trb == &seg->trbs[TRBS_PER_SEGMENT - 1]; +} + +bool cdnsp_last_trb_on_ring(struct cdnsp_ring *ring, + struct cdnsp_segment *seg, + union cdnsp_trb *trb) +{ + return cdnsp_last_trb_on_seg(seg, trb) && (seg->next == ring->first_seg); +} + +static bool cdnsp_link_trb_toggles_cycle(union cdnsp_trb *trb) +{ + return le32_to_cpu(trb->link.control) & LINK_TOGGLE; +} + +static void cdnsp_trb_to_noop(union cdnsp_trb *trb, u32 noop_type) +{ + if (cdnsp_trb_is_link(trb)) { + /* Unchain chained link TRBs. */ + trb->link.control &= cpu_to_le32(~TRB_CHAIN); + } else { + trb->generic.field[0] = 0; + trb->generic.field[1] = 0; + trb->generic.field[2] = 0; + /* Preserve only the cycle bit of this TRB. */ + trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE); + trb->generic.field[3] |= cpu_to_le32(TRB_TYPE(noop_type)); + } +} + +/* + * Updates trb to point to the next TRB in the ring, and updates seg if the next + * TRB is in a new segment. This does not skip over link TRBs, and it does not + * effect the ring dequeue or enqueue pointers. + */ +static void cdnsp_next_trb(struct cdnsp_device *pdev, + struct cdnsp_ring *ring, + struct cdnsp_segment **seg, + union cdnsp_trb **trb) +{ + if (cdnsp_trb_is_link(*trb)) { + *seg = (*seg)->next; + *trb = ((*seg)->trbs); + } else { + (*trb)++; + } +} + +/* + * See Cycle bit rules. SW is the consumer for the event ring only. + * Don't make a ring full of link TRBs. That would be dumb and this would loop. + */ +void cdnsp_inc_deq(struct cdnsp_device *pdev, struct cdnsp_ring *ring) +{ + /* event ring doesn't have link trbs, check for last trb. */ + if (ring->type == TYPE_EVENT) { + if (!cdnsp_last_trb_on_seg(ring->deq_seg, ring->dequeue)) { + ring->dequeue++; + goto out; + } + + if (cdnsp_last_trb_on_ring(ring, ring->deq_seg, ring->dequeue)) + ring->cycle_state ^= 1; + + ring->deq_seg = ring->deq_seg->next; + ring->dequeue = ring->deq_seg->trbs; + goto out; + } + + /* All other rings have link trbs. */ + if (!cdnsp_trb_is_link(ring->dequeue)) { + ring->dequeue++; + ring->num_trbs_free++; + } + while (cdnsp_trb_is_link(ring->dequeue)) { + ring->deq_seg = ring->deq_seg->next; + ring->dequeue = ring->deq_seg->trbs; + } +out: + trace_cdnsp_inc_deq(ring); +} + +/* + * See Cycle bit rules. SW is the consumer for the event ring only. + * Don't make a ring full of link TRBs. That would be dumb and this would loop. + * + * If we've just enqueued a TRB that is in the middle of a TD (meaning the + * chain bit is set), then set the chain bit in all the following link TRBs. + * If we've enqueued the last TRB in a TD, make sure the following link TRBs + * have their chain bit cleared (so that each Link TRB is a separate TD). + * + * @more_trbs_coming: Will you enqueue more TRBs before ringing the doorbell. + */ +static void cdnsp_inc_enq(struct cdnsp_device *pdev, + struct cdnsp_ring *ring, + bool more_trbs_coming) +{ + union cdnsp_trb *next; + u32 chain; + + chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN; + + /* If this is not event ring, there is one less usable TRB. */ + if (!cdnsp_trb_is_link(ring->enqueue)) + ring->num_trbs_free--; + next = ++(ring->enqueue); + + /* Update the dequeue pointer further if that was a link TRB */ + while (cdnsp_trb_is_link(next)) { + /* + * If the caller doesn't plan on enqueuing more TDs before + * ringing the doorbell, then we don't want to give the link TRB + * to the hardware just yet. We'll give the link TRB back in + * cdnsp_prepare_ring() just before we enqueue the TD at the + * top of the ring. + */ + if (!chain && !more_trbs_coming) + break; + + next->link.control &= cpu_to_le32(~TRB_CHAIN); + next->link.control |= cpu_to_le32(chain); + + /* Give this link TRB to the hardware */ + wmb(); + next->link.control ^= cpu_to_le32(TRB_CYCLE); + + /* Toggle the cycle bit after the last ring segment. */ + if (cdnsp_link_trb_toggles_cycle(next)) + ring->cycle_state ^= 1; + + ring->enq_seg = ring->enq_seg->next; + ring->enqueue = ring->enq_seg->trbs; + next = ring->enqueue; + } + + trace_cdnsp_inc_enq(ring); +} + +/* + * Check to see if there's room to enqueue num_trbs on the ring and make sure + * enqueue pointer will not advance into dequeue segment. + */ +static bool cdnsp_room_on_ring(struct cdnsp_device *pdev, + struct cdnsp_ring *ring, + unsigned int num_trbs) +{ + int num_trbs_in_deq_seg; + + if (ring->num_trbs_free < num_trbs) + return false; + + if (ring->type != TYPE_COMMAND && ring->type != TYPE_EVENT) { + num_trbs_in_deq_seg = ring->dequeue - ring->deq_seg->trbs; + + if (ring->num_trbs_free < num_trbs + num_trbs_in_deq_seg) + return false; + } + + return true; +} + +/* + * Workaround for L1: controller has issue with resuming from L1 after + * setting doorbell for endpoint during L1 state. This function forces + * resume signal in such case. + */ +static void cdnsp_force_l0_go(struct cdnsp_device *pdev) +{ + if (pdev->active_port == &pdev->usb2_port && pdev->gadget.lpm_capable) + cdnsp_set_link_state(pdev, &pdev->active_port->regs->portsc, XDEV_U0); +} + +/* Ring the doorbell after placing a command on the ring. */ +void cdnsp_ring_cmd_db(struct cdnsp_device *pdev) +{ + writel(DB_VALUE_CMD, &pdev->dba->cmd_db); +} + +/* + * Ring the doorbell after placing a transfer on the ring. + * Returns true if doorbell was set, otherwise false. + */ +static bool cdnsp_ring_ep_doorbell(struct cdnsp_device *pdev, + struct cdnsp_ep *pep, + unsigned int stream_id) +{ + __le32 __iomem *reg_addr = &pdev->dba->ep_db; + unsigned int ep_state = pep->ep_state; + unsigned int db_value; + + /* + * Don't ring the doorbell for this endpoint if endpoint is halted or + * disabled. + */ + if (ep_state & EP_HALTED || !(ep_state & EP_ENABLED)) + return false; + + /* For stream capable endpoints driver can ring doorbell only twice. */ + if (pep->ep_state & EP_HAS_STREAMS) { + if (pep->stream_info.drbls_count >= 2) + return false; + + pep->stream_info.drbls_count++; + } + + pep->ep_state &= ~EP_STOPPED; + + if (pep->idx == 0 && pdev->ep0_stage == CDNSP_DATA_STAGE && + !pdev->ep0_expect_in) + db_value = DB_VALUE_EP0_OUT(pep->idx, stream_id); + else + db_value = DB_VALUE(pep->idx, stream_id); + + trace_cdnsp_tr_drbl(pep, stream_id); + + writel(db_value, reg_addr); + + cdnsp_force_l0_go(pdev); + + /* Doorbell was set. */ + return true; +} + +/* + * Get the right ring for the given pep and stream_id. + * If the endpoint supports streams, boundary check the USB request's stream ID. + * If the endpoint doesn't support streams, return the singular endpoint ring. + */ +static struct cdnsp_ring *cdnsp_get_transfer_ring(struct cdnsp_device *pdev, + struct cdnsp_ep *pep, + unsigned int stream_id) +{ + if (!(pep->ep_state & EP_HAS_STREAMS)) + return pep->ring; + + if (stream_id == 0 || stream_id >= pep->stream_info.num_streams) { + dev_err(pdev->dev, "ERR: %s ring doesn't exist for SID: %d.\n", + pep->name, stream_id); + return NULL; + } + + return pep->stream_info.stream_rings[stream_id]; +} + +static struct cdnsp_ring * + cdnsp_request_to_transfer_ring(struct cdnsp_device *pdev, + struct cdnsp_request *preq) +{ + return cdnsp_get_transfer_ring(pdev, preq->pep, + preq->request.stream_id); +} + +/* Ring the doorbell for any rings with pending requests. */ +void cdnsp_ring_doorbell_for_active_rings(struct cdnsp_device *pdev, + struct cdnsp_ep *pep) +{ + struct cdnsp_stream_info *stream_info; + unsigned int stream_id; + int ret; + + if (pep->ep_state & EP_DIS_IN_RROGRESS) + return; + + /* A ring has pending Request if its TD list is not empty. */ + if (!(pep->ep_state & EP_HAS_STREAMS) && pep->number) { + if (pep->ring && !list_empty(&pep->ring->td_list)) + cdnsp_ring_ep_doorbell(pdev, pep, 0); + return; + } + + stream_info = &pep->stream_info; + + for (stream_id = 1; stream_id < stream_info->num_streams; stream_id++) { + struct cdnsp_td *td, *td_temp; + struct cdnsp_ring *ep_ring; + + if (stream_info->drbls_count >= 2) + return; + + ep_ring = cdnsp_get_transfer_ring(pdev, pep, stream_id); + if (!ep_ring) + continue; + + if (!ep_ring->stream_active || ep_ring->stream_rejected) + continue; + + list_for_each_entry_safe(td, td_temp, &ep_ring->td_list, + td_list) { + if (td->drbl) + continue; + + ret = cdnsp_ring_ep_doorbell(pdev, pep, stream_id); + if (ret) + td->drbl = 1; + } + } +} + +/* + * Get the hw dequeue pointer controller stopped on, either directly from the + * endpoint context, or if streams are in use from the stream context. + * The returned hw_dequeue contains the lowest four bits with cycle state + * and possible stream context type. + */ +static u64 cdnsp_get_hw_deq(struct cdnsp_device *pdev, + unsigned int ep_index, + unsigned int stream_id) +{ + struct cdnsp_stream_ctx *st_ctx; + struct cdnsp_ep *pep; + + pep = &pdev->eps[stream_id]; + + if (pep->ep_state & EP_HAS_STREAMS) { + st_ctx = &pep->stream_info.stream_ctx_array[stream_id]; + return le64_to_cpu(st_ctx->stream_ring); + } + + return le64_to_cpu(pep->out_ctx->deq); +} + +/* + * Move the controller endpoint ring dequeue pointer past cur_td. + * Record the new state of the controller endpoint ring dequeue segment, + * dequeue pointer, and new consumer cycle state in state. + * Update internal representation of the ring's dequeue pointer. + * + * We do this in three jumps: + * - First we update our new ring state to be the same as when the + * controller stopped. + * - Then we traverse the ring to find the segment that contains + * the last TRB in the TD. We toggle the controller new cycle state + * when we pass any link TRBs with the toggle cycle bit set. + * - Finally we move the dequeue state one TRB further, toggling the cycle bit + * if we've moved it past a link TRB with the toggle cycle bit set. + */ +static void cdnsp_find_new_dequeue_state(struct cdnsp_device *pdev, + struct cdnsp_ep *pep, + unsigned int stream_id, + struct cdnsp_td *cur_td, + struct cdnsp_dequeue_state *state) +{ + bool td_last_trb_found = false; + struct cdnsp_segment *new_seg; + struct cdnsp_ring *ep_ring; + union cdnsp_trb *new_deq; + bool cycle_found = false; + u64 hw_dequeue; + + ep_ring = cdnsp_get_transfer_ring(pdev, pep, stream_id); + if (!ep_ring) + return; + + /* + * Dig out the cycle state saved by the controller during the + * stop endpoint command. + */ + hw_dequeue = cdnsp_get_hw_deq(pdev, pep->idx, stream_id); + new_seg = ep_ring->deq_seg; + new_deq = ep_ring->dequeue; + state->new_cycle_state = hw_dequeue & 0x1; + state->stream_id = stream_id; + + /* + * We want to find the pointer, segment and cycle state of the new trb + * (the one after current TD's last_trb). We know the cycle state at + * hw_dequeue, so walk the ring until both hw_dequeue and last_trb are + * found. + */ + do { + if (!cycle_found && cdnsp_trb_virt_to_dma(new_seg, new_deq) + == (dma_addr_t)(hw_dequeue & ~0xf)) { + cycle_found = true; + + if (td_last_trb_found) + break; + } + + if (new_deq == cur_td->last_trb) + td_last_trb_found = true; + + if (cycle_found && cdnsp_trb_is_link(new_deq) && + cdnsp_link_trb_toggles_cycle(new_deq)) + state->new_cycle_state ^= 0x1; + + cdnsp_next_trb(pdev, ep_ring, &new_seg, &new_deq); + + /* Search wrapped around, bail out. */ + if (new_deq == pep->ring->dequeue) { + dev_err(pdev->dev, + "Error: Failed finding new dequeue state\n"); + state->new_deq_seg = NULL; + state->new_deq_ptr = NULL; + return; + } + + } while (!cycle_found || !td_last_trb_found); + + state->new_deq_seg = new_seg; + state->new_deq_ptr = new_deq; + + trace_cdnsp_new_deq_state(state); +} + +/* + * flip_cycle means flip the cycle bit of all but the first and last TRB. + * (The last TRB actually points to the ring enqueue pointer, which is not part + * of this TD.) This is used to remove partially enqueued isoc TDs from a ring. + */ +static void cdnsp_td_to_noop(struct cdnsp_device *pdev, + struct cdnsp_ring *ep_ring, + struct cdnsp_td *td, + bool flip_cycle) +{ + struct cdnsp_segment *seg = td->start_seg; + union cdnsp_trb *trb = td->first_trb; + + while (1) { + cdnsp_trb_to_noop(trb, TRB_TR_NOOP); + + /* flip cycle if asked to */ + if (flip_cycle && trb != td->first_trb && trb != td->last_trb) + trb->generic.field[3] ^= cpu_to_le32(TRB_CYCLE); + + if (trb == td->last_trb) + break; + + cdnsp_next_trb(pdev, ep_ring, &seg, &trb); + } +} + +/* + * This TD is defined by the TRBs starting at start_trb in start_seg and ending + * at end_trb, which may be in another segment. If the suspect DMA address is a + * TRB in this TD, this function returns that TRB's segment. Otherwise it + * returns 0. + */ +static struct cdnsp_segment *cdnsp_trb_in_td(struct cdnsp_device *pdev, + struct cdnsp_segment *start_seg, + union cdnsp_trb *start_trb, + union cdnsp_trb *end_trb, + dma_addr_t suspect_dma) +{ + struct cdnsp_segment *cur_seg; + union cdnsp_trb *temp_trb; + dma_addr_t end_seg_dma; + dma_addr_t end_trb_dma; + dma_addr_t start_dma; + + start_dma = cdnsp_trb_virt_to_dma(start_seg, start_trb); + cur_seg = start_seg; + + do { + if (start_dma == 0) + return NULL; + + temp_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1]; + /* We may get an event for a Link TRB in the middle of a TD */ + end_seg_dma = cdnsp_trb_virt_to_dma(cur_seg, temp_trb); + /* If the end TRB isn't in this segment, this is set to 0 */ + end_trb_dma = cdnsp_trb_virt_to_dma(cur_seg, end_trb); + + trace_cdnsp_looking_trb_in_td(suspect_dma, start_dma, + end_trb_dma, cur_seg->dma, + end_seg_dma); + + if (end_trb_dma > 0) { + /* + * The end TRB is in this segment, so suspect should + * be here + */ + if (start_dma <= end_trb_dma) { + if (suspect_dma >= start_dma && + suspect_dma <= end_trb_dma) { + return cur_seg; + } + } else { + /* + * Case for one segment with a + * TD wrapped around to the top + */ + if ((suspect_dma >= start_dma && + suspect_dma <= end_seg_dma) || + (suspect_dma >= cur_seg->dma && + suspect_dma <= end_trb_dma)) { + return cur_seg; + } + } + + return NULL; + } + + /* Might still be somewhere in this segment */ + if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma) + return cur_seg; + + cur_seg = cur_seg->next; + start_dma = cdnsp_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]); + } while (cur_seg != start_seg); + + return NULL; +} + +static void cdnsp_unmap_td_bounce_buffer(struct cdnsp_device *pdev, + struct cdnsp_ring *ring, + struct cdnsp_td *td) +{ + struct cdnsp_segment *seg = td->bounce_seg; + struct cdnsp_request *preq; + size_t len; + + if (!seg) + return; + + preq = td->preq; + + trace_cdnsp_bounce_unmap(td->preq, seg->bounce_len, seg->bounce_offs, + seg->bounce_dma, 0); + + if (!preq->direction) { + dma_unmap_single(pdev->dev, seg->bounce_dma, + ring->bounce_buf_len, DMA_TO_DEVICE); + return; + } + + dma_unmap_single(pdev->dev, seg->bounce_dma, ring->bounce_buf_len, + DMA_FROM_DEVICE); + + /* For in transfers we need to copy the data from bounce to sg */ + len = sg_pcopy_from_buffer(preq->request.sg, preq->request.num_sgs, + seg->bounce_buf, seg->bounce_len, + seg->bounce_offs); + if (len != seg->bounce_len) + dev_warn(pdev->dev, "WARN Wrong bounce buffer read length: %zu != %d\n", + len, seg->bounce_len); + + seg->bounce_len = 0; + seg->bounce_offs = 0; +} + +static int cdnsp_cmd_set_deq(struct cdnsp_device *pdev, + struct cdnsp_ep *pep, + struct cdnsp_dequeue_state *deq_state) +{ + struct cdnsp_ring *ep_ring; + int ret; + + if (!deq_state->new_deq_ptr || !deq_state->new_deq_seg) { + cdnsp_ring_doorbell_for_active_rings(pdev, pep); + return 0; + } + + cdnsp_queue_new_dequeue_state(pdev, pep, deq_state); + cdnsp_ring_cmd_db(pdev); + ret = cdnsp_wait_for_cmd_compl(pdev); + + trace_cdnsp_handle_cmd_set_deq(cdnsp_get_slot_ctx(&pdev->out_ctx)); + trace_cdnsp_handle_cmd_set_deq_ep(pep->out_ctx); + + /* + * Update the ring's dequeue segment and dequeue pointer + * to reflect the new position. + */ + ep_ring = cdnsp_get_transfer_ring(pdev, pep, deq_state->stream_id); + + if (cdnsp_trb_is_link(ep_ring->dequeue)) { + ep_ring->deq_seg = ep_ring->deq_seg->next; + ep_ring->dequeue = ep_ring->deq_seg->trbs; + } + + while (ep_ring->dequeue != deq_state->new_deq_ptr) { + ep_ring->num_trbs_free++; + ep_ring->dequeue++; + + if (cdnsp_trb_is_link(ep_ring->dequeue)) { + if (ep_ring->dequeue == deq_state->new_deq_ptr) + break; + + ep_ring->deq_seg = ep_ring->deq_seg->next; + ep_ring->dequeue = ep_ring->deq_seg->trbs; + } + } + + /* + * Probably there was TIMEOUT during handling Set Dequeue Pointer + * command. It's critical error and controller will be stopped. + */ + if (ret) + return -ESHUTDOWN; + + /* Restart any rings with pending requests */ + cdnsp_ring_doorbell_for_active_rings(pdev, pep); + + return 0; +} + +int cdnsp_remove_request(struct cdnsp_device *pdev, + struct cdnsp_request *preq, + struct cdnsp_ep *pep) +{ + struct cdnsp_dequeue_state deq_state; + struct cdnsp_td *cur_td = NULL; + struct cdnsp_ring *ep_ring; + struct cdnsp_segment *seg; + int status = -ECONNRESET; + int ret = 0; + u64 hw_deq; + + memset(&deq_state, 0, sizeof(deq_state)); + + trace_cdnsp_remove_request(pep->out_ctx); + trace_cdnsp_remove_request_td(preq); + + cur_td = &preq->td; + ep_ring = cdnsp_request_to_transfer_ring(pdev, preq); + + /* + * If we stopped on the TD we need to cancel, then we have to + * move the controller endpoint ring dequeue pointer past + * this TD. + */ + hw_deq = cdnsp_get_hw_deq(pdev, pep->idx, preq->request.stream_id); + hw_deq &= ~0xf; + + seg = cdnsp_trb_in_td(pdev, cur_td->start_seg, cur_td->first_trb, + cur_td->last_trb, hw_deq); + + if (seg && (pep->ep_state & EP_ENABLED)) + cdnsp_find_new_dequeue_state(pdev, pep, preq->request.stream_id, + cur_td, &deq_state); + else + cdnsp_td_to_noop(pdev, ep_ring, cur_td, false); + + /* + * The event handler won't see a completion for this TD anymore, + * so remove it from the endpoint ring's TD list. + */ + list_del_init(&cur_td->td_list); + ep_ring->num_tds--; + pep->stream_info.td_count--; + + /* + * During disconnecting all endpoint will be disabled so we don't + * have to worry about updating dequeue pointer. + */ + if (pdev->cdnsp_state & CDNSP_STATE_DISCONNECT_PENDING) { + status = -ESHUTDOWN; + ret = cdnsp_cmd_set_deq(pdev, pep, &deq_state); + } + + cdnsp_unmap_td_bounce_buffer(pdev, ep_ring, cur_td); + cdnsp_gadget_giveback(pep, cur_td->preq, status); + + return ret; +} + +static int cdnsp_update_port_id(struct cdnsp_device *pdev, u32 port_id) +{ + struct cdnsp_port *port = pdev->active_port; + u8 old_port = 0; + + if (port && port->port_num == port_id) + return 0; + + if (port) + old_port = port->port_num; + + if (port_id == pdev->usb2_port.port_num) { + port = &pdev->usb2_port; + } else if (port_id == pdev->usb3_port.port_num) { + port = &pdev->usb3_port; + } else { + dev_err(pdev->dev, "Port event with invalid port ID %d\n", + port_id); + return -EINVAL; + } + + if (port_id != old_port) { + cdnsp_disable_slot(pdev); + pdev->active_port = port; + cdnsp_enable_slot(pdev); + } + + if (port_id == pdev->usb2_port.port_num) + cdnsp_set_usb2_hardware_lpm(pdev, NULL, 1); + else + writel(PORT_U1_TIMEOUT(1) | PORT_U2_TIMEOUT(1), + &pdev->usb3_port.regs->portpmsc); + + return 0; +} + +static void cdnsp_handle_port_status(struct cdnsp_device *pdev, + union cdnsp_trb *event) +{ + struct cdnsp_port_regs __iomem *port_regs; + u32 portsc, cmd_regs; + bool port2 = false; + u32 link_state; + u32 port_id; + + /* Port status change events always have a successful completion code */ + if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS) + dev_err(pdev->dev, "ERR: incorrect PSC event\n"); + + port_id = GET_PORT_ID(le32_to_cpu(event->generic.field[0])); + + if (cdnsp_update_port_id(pdev, port_id)) + goto cleanup; + + port_regs = pdev->active_port->regs; + + if (port_id == pdev->usb2_port.port_num) + port2 = true; + +new_event: + portsc = readl(&port_regs->portsc); + writel(cdnsp_port_state_to_neutral(portsc) | + (portsc & PORT_CHANGE_BITS), &port_regs->portsc); + + trace_cdnsp_handle_port_status(pdev->active_port->port_num, portsc); + + pdev->gadget.speed = cdnsp_port_speed(portsc); + link_state = portsc & PORT_PLS_MASK; + + /* Port Link State change detected. */ + if ((portsc & PORT_PLC)) { + if (!(pdev->cdnsp_state & CDNSP_WAKEUP_PENDING) && + link_state == XDEV_RESUME) { + cmd_regs = readl(&pdev->op_regs->command); + if (!(cmd_regs & CMD_R_S)) + goto cleanup; + + if (DEV_SUPERSPEED_ANY(portsc)) { + cdnsp_set_link_state(pdev, &port_regs->portsc, + XDEV_U0); + + cdnsp_resume_gadget(pdev); + } + } + + if ((pdev->cdnsp_state & CDNSP_WAKEUP_PENDING) && + link_state == XDEV_U0) { + pdev->cdnsp_state &= ~CDNSP_WAKEUP_PENDING; + + cdnsp_force_header_wakeup(pdev, 1); + cdnsp_ring_cmd_db(pdev); + cdnsp_wait_for_cmd_compl(pdev); + } + + if (link_state == XDEV_U0 && pdev->link_state == XDEV_U3 && + !DEV_SUPERSPEED_ANY(portsc)) + cdnsp_resume_gadget(pdev); + + if (link_state == XDEV_U3 && pdev->link_state != XDEV_U3) + cdnsp_suspend_gadget(pdev); + + pdev->link_state = link_state; + } + + if (portsc & PORT_CSC) { + /* Detach device. */ + if (pdev->gadget.connected && !(portsc & PORT_CONNECT)) + cdnsp_disconnect_gadget(pdev); + + /* Attach device. */ + if (portsc & PORT_CONNECT) { + if (!port2) + cdnsp_irq_reset(pdev); + + usb_gadget_set_state(&pdev->gadget, USB_STATE_ATTACHED); + } + } + + /* Port reset. */ + if ((portsc & (PORT_RC | PORT_WRC)) && (portsc & PORT_CONNECT)) { + cdnsp_irq_reset(pdev); + pdev->u1_allowed = 0; + pdev->u2_allowed = 0; + pdev->may_wakeup = 0; + } + + if (portsc & PORT_CEC) + dev_err(pdev->dev, "Port Over Current detected\n"); + + if (portsc & PORT_CEC) + dev_err(pdev->dev, "Port Configure Error detected\n"); + + if (readl(&port_regs->portsc) & PORT_CHANGE_BITS) + goto new_event; + +cleanup: + cdnsp_inc_deq(pdev, pdev->event_ring); +} + +static void cdnsp_td_cleanup(struct cdnsp_device *pdev, + struct cdnsp_td *td, + struct cdnsp_ring *ep_ring, + int *status) +{ + struct cdnsp_request *preq = td->preq; + + /* if a bounce buffer was used to align this td then unmap it */ + cdnsp_unmap_td_bounce_buffer(pdev, ep_ring, td); + + /* + * If the controller said we transferred more data than the buffer + * length, Play it safe and say we didn't transfer anything. + */ + if (preq->request.actual > preq->request.length) { + preq->request.actual = 0; + *status = 0; + } + + list_del_init(&td->td_list); + ep_ring->num_tds--; + preq->pep->stream_info.td_count--; + + cdnsp_gadget_giveback(preq->pep, preq, *status); +} + +static void cdnsp_finish_td(struct cdnsp_device *pdev, + struct cdnsp_td *td, + struct cdnsp_transfer_event *event, + struct cdnsp_ep *ep, + int *status) +{ + struct cdnsp_ring *ep_ring; + u32 trb_comp_code; + + ep_ring = cdnsp_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); + trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); + + if (trb_comp_code == COMP_STOPPED_LENGTH_INVALID || + trb_comp_code == COMP_STOPPED || + trb_comp_code == COMP_STOPPED_SHORT_PACKET) { + /* + * The Endpoint Stop Command completion will take care of any + * stopped TDs. A stopped TD may be restarted, so don't update + * the ring dequeue pointer or take this TD off any lists yet. + */ + return; + } + + /* Update ring dequeue pointer */ + while (ep_ring->dequeue != td->last_trb) + cdnsp_inc_deq(pdev, ep_ring); + + cdnsp_inc_deq(pdev, ep_ring); + + cdnsp_td_cleanup(pdev, td, ep_ring, status); +} + +/* sum trb lengths from ring dequeue up to stop_trb, _excluding_ stop_trb */ +static int cdnsp_sum_trb_lengths(struct cdnsp_device *pdev, + struct cdnsp_ring *ring, + union cdnsp_trb *stop_trb) +{ + struct cdnsp_segment *seg = ring->deq_seg; + union cdnsp_trb *trb = ring->dequeue; + u32 sum; + + for (sum = 0; trb != stop_trb; cdnsp_next_trb(pdev, ring, &seg, &trb)) { + if (!cdnsp_trb_is_noop(trb) && !cdnsp_trb_is_link(trb)) + sum += TRB_LEN(le32_to_cpu(trb->generic.field[2])); + } + return sum; +} + +static int cdnsp_giveback_first_trb(struct cdnsp_device *pdev, + struct cdnsp_ep *pep, + unsigned int stream_id, + int start_cycle, + struct cdnsp_generic_trb *start_trb) +{ + /* + * Pass all the TRBs to the hardware at once and make sure this write + * isn't reordered. + */ + wmb(); + + if (start_cycle) + start_trb->field[3] |= cpu_to_le32(start_cycle); + else + start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE); + + if ((pep->ep_state & EP_HAS_STREAMS) && + !pep->stream_info.first_prime_det) { + trace_cdnsp_wait_for_prime(pep, stream_id); + return 0; + } + + return cdnsp_ring_ep_doorbell(pdev, pep, stream_id); +} + +/* + * Process control tds, update USB request status and actual_length. + */ +static void cdnsp_process_ctrl_td(struct cdnsp_device *pdev, + struct cdnsp_td *td, + union cdnsp_trb *event_trb, + struct cdnsp_transfer_event *event, + struct cdnsp_ep *pep, + int *status) +{ + struct cdnsp_ring *ep_ring; + u32 remaining; + u32 trb_type; + + trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(event_trb->generic.field[3])); + ep_ring = cdnsp_dma_to_transfer_ring(pep, le64_to_cpu(event->buffer)); + remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); + + /* + * if on data stage then update the actual_length of the USB + * request and flag it as set, so it won't be overwritten in the event + * for the last TRB. + */ + if (trb_type == TRB_DATA) { + td->request_length_set = true; + td->preq->request.actual = td->preq->request.length - remaining; + } + + /* at status stage */ + if (!td->request_length_set) + td->preq->request.actual = td->preq->request.length; + + if (pdev->ep0_stage == CDNSP_DATA_STAGE && pep->number == 0 && + pdev->three_stage_setup) { + td = list_entry(ep_ring->td_list.next, struct cdnsp_td, + td_list); + pdev->ep0_stage = CDNSP_STATUS_STAGE; + + cdnsp_giveback_first_trb(pdev, pep, 0, ep_ring->cycle_state, + &td->last_trb->generic); + return; + } + + cdnsp_finish_td(pdev, td, event, pep, status); +} + +/* + * Process isochronous tds, update usb request status and actual_length. + */ +static void cdnsp_process_isoc_td(struct cdnsp_device *pdev, + struct cdnsp_td *td, + union cdnsp_trb *ep_trb, + struct cdnsp_transfer_event *event, + struct cdnsp_ep *pep, + int status) +{ + struct cdnsp_request *preq = td->preq; + u32 remaining, requested, ep_trb_len; + bool sum_trbs_for_length = false; + struct cdnsp_ring *ep_ring; + u32 trb_comp_code; + u32 td_length; + + ep_ring = cdnsp_dma_to_transfer_ring(pep, le64_to_cpu(event->buffer)); + trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); + remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); + ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2])); + + requested = preq->request.length; + + /* handle completion code */ + switch (trb_comp_code) { + case COMP_SUCCESS: + preq->request.status = 0; + break; + case COMP_SHORT_PACKET: + preq->request.status = 0; + sum_trbs_for_length = true; + break; + case COMP_ISOCH_BUFFER_OVERRUN: + case COMP_BABBLE_DETECTED_ERROR: + preq->request.status = -EOVERFLOW; + break; + case COMP_STOPPED: + sum_trbs_for_length = true; + break; + case COMP_STOPPED_SHORT_PACKET: + /* field normally containing residue now contains transferred */ + preq->request.status = 0; + requested = remaining; + break; + case COMP_STOPPED_LENGTH_INVALID: + requested = 0; + remaining = 0; + break; + default: + sum_trbs_for_length = true; + preq->request.status = -1; + break; + } + + if (sum_trbs_for_length) { + td_length = cdnsp_sum_trb_lengths(pdev, ep_ring, ep_trb); + td_length += ep_trb_len - remaining; + } else { + td_length = requested; + } + + td->preq->request.actual += td_length; + + cdnsp_finish_td(pdev, td, event, pep, &status); +} + +static void cdnsp_skip_isoc_td(struct cdnsp_device *pdev, + struct cdnsp_td *td, + struct cdnsp_transfer_event *event, + struct cdnsp_ep *pep, + int status) +{ + struct cdnsp_ring *ep_ring; + + ep_ring = cdnsp_dma_to_transfer_ring(pep, le64_to_cpu(event->buffer)); + td->preq->request.status = -EXDEV; + td->preq->request.actual = 0; + + /* Update ring dequeue pointer */ + while (ep_ring->dequeue != td->last_trb) + cdnsp_inc_deq(pdev, ep_ring); + + cdnsp_inc_deq(pdev, ep_ring); + + cdnsp_td_cleanup(pdev, td, ep_ring, &status); +} + +/* + * Process bulk and interrupt tds, update usb request status and actual_length. + */ +static void cdnsp_process_bulk_intr_td(struct cdnsp_device *pdev, + struct cdnsp_td *td, + union cdnsp_trb *ep_trb, + struct cdnsp_transfer_event *event, + struct cdnsp_ep *ep, + int *status) +{ + u32 remaining, requested, ep_trb_len; + struct cdnsp_ring *ep_ring; + u32 trb_comp_code; + + ep_ring = cdnsp_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); + trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); + remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); + ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2])); + requested = td->preq->request.length; + + switch (trb_comp_code) { + case COMP_SUCCESS: + case COMP_SHORT_PACKET: + *status = 0; + break; + case COMP_STOPPED_SHORT_PACKET: + td->preq->request.actual = remaining; + goto finish_td; + case COMP_STOPPED_LENGTH_INVALID: + /* Stopped on ep trb with invalid length, exclude it. */ + ep_trb_len = 0; + remaining = 0; + break; + } + + if (ep_trb == td->last_trb) + ep_trb_len = requested - remaining; + else + ep_trb_len = cdnsp_sum_trb_lengths(pdev, ep_ring, ep_trb) + + ep_trb_len - remaining; + td->preq->request.actual = ep_trb_len; + +finish_td: + ep->stream_info.drbls_count--; + + cdnsp_finish_td(pdev, td, event, ep, status); +} + +static void cdnsp_handle_tx_nrdy(struct cdnsp_device *pdev, + struct cdnsp_transfer_event *event) +{ + struct cdnsp_generic_trb *generic; + struct cdnsp_ring *ep_ring; + struct cdnsp_ep *pep; + int cur_stream; + int ep_index; + int host_sid; + int dev_sid; + + generic = (struct cdnsp_generic_trb *)event; + ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1; + dev_sid = TRB_TO_DEV_STREAM(le32_to_cpu(generic->field[0])); + host_sid = TRB_TO_HOST_STREAM(le32_to_cpu(generic->field[2])); + + pep = &pdev->eps[ep_index]; + + if (!(pep->ep_state & EP_HAS_STREAMS)) + return; + + if (host_sid == STREAM_PRIME_ACK) { + pep->stream_info.first_prime_det = 1; + for (cur_stream = 1; cur_stream < pep->stream_info.num_streams; + cur_stream++) { + ep_ring = pep->stream_info.stream_rings[cur_stream]; + ep_ring->stream_active = 1; + ep_ring->stream_rejected = 0; + } + } + + if (host_sid == STREAM_REJECTED) { + struct cdnsp_td *td, *td_temp; + + pep->stream_info.drbls_count--; + ep_ring = pep->stream_info.stream_rings[dev_sid]; + ep_ring->stream_active = 0; + ep_ring->stream_rejected = 1; + + list_for_each_entry_safe(td, td_temp, &ep_ring->td_list, + td_list) { + td->drbl = 0; + } + } + + cdnsp_ring_doorbell_for_active_rings(pdev, pep); +} + +/* + * If this function returns an error condition, it means it got a Transfer + * event with a corrupted TRB DMA address or endpoint is disabled. + */ +static int cdnsp_handle_tx_event(struct cdnsp_device *pdev, + struct cdnsp_transfer_event *event) +{ + const struct usb_endpoint_descriptor *desc; + bool handling_skipped_tds = false; + struct cdnsp_segment *ep_seg; + struct cdnsp_ring *ep_ring; + int status = -EINPROGRESS; + union cdnsp_trb *ep_trb; + dma_addr_t ep_trb_dma; + struct cdnsp_ep *pep; + struct cdnsp_td *td; + u32 trb_comp_code; + int invalidate; + int ep_index; + + invalidate = le32_to_cpu(event->flags) & TRB_EVENT_INVALIDATE; + ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1; + trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); + ep_trb_dma = le64_to_cpu(event->buffer); + + pep = &pdev->eps[ep_index]; + ep_ring = cdnsp_dma_to_transfer_ring(pep, le64_to_cpu(event->buffer)); + + /* + * If device is disconnect then all requests will be dequeued + * by upper layers as part of disconnect sequence. + * We don't want handle such event to avoid racing. + */ + if (invalidate || !pdev->gadget.connected) + goto cleanup; + + if (GET_EP_CTX_STATE(pep->out_ctx) == EP_STATE_DISABLED) { + trace_cdnsp_ep_disabled(pep->out_ctx); + goto err_out; + } + + /* Some transfer events don't always point to a trb*/ + if (!ep_ring) { + switch (trb_comp_code) { + case COMP_INVALID_STREAM_TYPE_ERROR: + case COMP_INVALID_STREAM_ID_ERROR: + case COMP_RING_UNDERRUN: + case COMP_RING_OVERRUN: + goto cleanup; + default: + dev_err(pdev->dev, "ERROR: %s event for unknown ring\n", + pep->name); + goto err_out; + } + } + + /* Look for some error cases that need special treatment. */ + switch (trb_comp_code) { + case COMP_BABBLE_DETECTED_ERROR: + status = -EOVERFLOW; + break; + case COMP_RING_UNDERRUN: + case COMP_RING_OVERRUN: + /* + * When the Isoch ring is empty, the controller will generate + * a Ring Overrun Event for IN Isoch endpoint or Ring + * Underrun Event for OUT Isoch endpoint. + */ + goto cleanup; + case COMP_MISSED_SERVICE_ERROR: + /* + * When encounter missed service error, one or more isoc tds + * may be missed by controller. + * Set skip flag of the ep_ring; Complete the missed tds as + * short transfer when process the ep_ring next time. + */ + pep->skip = true; + break; + } + + do { + /* + * This TRB should be in the TD at the head of this ring's TD + * list. + */ + if (list_empty(&ep_ring->td_list)) { + /* + * Don't print warnings if it's due to a stopped + * endpoint generating an extra completion event, or + * a event for the last TRB of a short TD we already + * got a short event for. + * The short TD is already removed from the TD list. + */ + if (!(trb_comp_code == COMP_STOPPED || + trb_comp_code == COMP_STOPPED_LENGTH_INVALID || + ep_ring->last_td_was_short)) + trace_cdnsp_trb_without_td(ep_ring, + (struct cdnsp_generic_trb *)event); + + if (pep->skip) { + pep->skip = false; + trace_cdnsp_ep_list_empty_with_skip(pep, 0); + } + + goto cleanup; + } + + td = list_entry(ep_ring->td_list.next, struct cdnsp_td, + td_list); + + /* Is this a TRB in the currently executing TD? */ + ep_seg = cdnsp_trb_in_td(pdev, ep_ring->deq_seg, + ep_ring->dequeue, td->last_trb, + ep_trb_dma); + + /* + * Skip the Force Stopped Event. The event_trb(ep_trb_dma) + * of FSE is not in the current TD pointed by ep_ring->dequeue + * because that the hardware dequeue pointer still at the + * previous TRB of the current TD. The previous TRB maybe a + * Link TD or the last TRB of the previous TD. The command + * completion handle will take care the rest. + */ + if (!ep_seg && (trb_comp_code == COMP_STOPPED || + trb_comp_code == COMP_STOPPED_LENGTH_INVALID)) { + pep->skip = false; + goto cleanup; + } + + desc = td->preq->pep->endpoint.desc; + if (!ep_seg) { + if (!pep->skip || !usb_endpoint_xfer_isoc(desc)) { + /* Something is busted, give up! */ + dev_err(pdev->dev, + "ERROR Transfer event TRB DMA ptr not " + "part of current TD ep_index %d " + "comp_code %u\n", ep_index, + trb_comp_code); + return -EINVAL; + } + + cdnsp_skip_isoc_td(pdev, td, event, pep, status); + goto cleanup; + } + + if (trb_comp_code == COMP_SHORT_PACKET) + ep_ring->last_td_was_short = true; + else + ep_ring->last_td_was_short = false; + + if (pep->skip) { + pep->skip = false; + cdnsp_skip_isoc_td(pdev, td, event, pep, status); + goto cleanup; + } + + ep_trb = &ep_seg->trbs[(ep_trb_dma - ep_seg->dma) + / sizeof(*ep_trb)]; + + trace_cdnsp_handle_transfer(ep_ring, + (struct cdnsp_generic_trb *)ep_trb); + + if (cdnsp_trb_is_noop(ep_trb)) + goto cleanup; + + if (usb_endpoint_xfer_control(desc)) + cdnsp_process_ctrl_td(pdev, td, ep_trb, event, pep, + &status); + else if (usb_endpoint_xfer_isoc(desc)) + cdnsp_process_isoc_td(pdev, td, ep_trb, event, pep, + status); + else + cdnsp_process_bulk_intr_td(pdev, td, ep_trb, event, pep, + &status); +cleanup: + handling_skipped_tds = pep->skip; + + /* + * Do not update event ring dequeue pointer if we're in a loop + * processing missed tds. + */ + if (!handling_skipped_tds) + cdnsp_inc_deq(pdev, pdev->event_ring); + + /* + * If ep->skip is set, it means there are missed tds on the + * endpoint ring need to take care of. + * Process them as short transfer until reach the td pointed by + * the event. + */ + } while (handling_skipped_tds); + return 0; + +err_out: + dev_err(pdev->dev, "@%016llx %08x %08x %08x %08x\n", + (unsigned long long) + cdnsp_trb_virt_to_dma(pdev->event_ring->deq_seg, + pdev->event_ring->dequeue), + lower_32_bits(le64_to_cpu(event->buffer)), + upper_32_bits(le64_to_cpu(event->buffer)), + le32_to_cpu(event->transfer_len), + le32_to_cpu(event->flags)); + return -EINVAL; +} + +/* + * This function handles all events on the event ring. + * Returns true for "possibly more events to process" (caller should call + * again), otherwise false if done. + */ +static bool cdnsp_handle_event(struct cdnsp_device *pdev) +{ + unsigned int comp_code; + union cdnsp_trb *event; + bool update_ptrs = true; + u32 cycle_bit; + int ret = 0; + u32 flags; + + event = pdev->event_ring->dequeue; + flags = le32_to_cpu(event->event_cmd.flags); + cycle_bit = (flags & TRB_CYCLE); + + /* Does the controller or driver own the TRB? */ + if (cycle_bit != pdev->event_ring->cycle_state) + return false; + + trace_cdnsp_handle_event(pdev->event_ring, &event->generic); + + /* + * Barrier between reading the TRB_CYCLE (valid) flag above and any + * reads of the event's flags/data below. + */ + rmb(); + + switch (flags & TRB_TYPE_BITMASK) { + case TRB_TYPE(TRB_COMPLETION): + /* + * Command can't be handled in interrupt context so just + * increment command ring dequeue pointer. + */ + cdnsp_inc_deq(pdev, pdev->cmd_ring); + break; + case TRB_TYPE(TRB_PORT_STATUS): + cdnsp_handle_port_status(pdev, event); + update_ptrs = false; + break; + case TRB_TYPE(TRB_TRANSFER): + ret = cdnsp_handle_tx_event(pdev, &event->trans_event); + if (ret >= 0) + update_ptrs = false; + break; + case TRB_TYPE(TRB_SETUP): + pdev->ep0_stage = CDNSP_SETUP_STAGE; + pdev->setup_id = TRB_SETUPID_TO_TYPE(flags); + pdev->setup_speed = TRB_SETUP_SPEEDID(flags); + pdev->setup = *((struct usb_ctrlrequest *) + &event->trans_event.buffer); + + cdnsp_setup_analyze(pdev); + break; + case TRB_TYPE(TRB_ENDPOINT_NRDY): + cdnsp_handle_tx_nrdy(pdev, &event->trans_event); + break; + case TRB_TYPE(TRB_HC_EVENT): { + comp_code = GET_COMP_CODE(le32_to_cpu(event->generic.field[2])); + + switch (comp_code) { + case COMP_EVENT_RING_FULL_ERROR: + dev_err(pdev->dev, "Event Ring Full\n"); + break; + default: + dev_err(pdev->dev, "Controller error code 0x%02x\n", + comp_code); + } + + break; + } + case TRB_TYPE(TRB_MFINDEX_WRAP): + case TRB_TYPE(TRB_DRB_OVERFLOW): + break; + default: + dev_warn(pdev->dev, "ERROR unknown event type %ld\n", + TRB_FIELD_TO_TYPE(flags)); + } + + if (update_ptrs) + /* Update SW event ring dequeue pointer. */ + cdnsp_inc_deq(pdev, pdev->event_ring); + + /* + * Caller will call us again to check if there are more items + * on the event ring. + */ + return true; +} + +irqreturn_t cdnsp_thread_irq_handler(int irq, void *data) +{ + struct cdnsp_device *pdev = (struct cdnsp_device *)data; + union cdnsp_trb *event_ring_deq; + int counter = 0; + + spin_lock(&pdev->lock); + + if (pdev->cdnsp_state & (CDNSP_STATE_HALTED | CDNSP_STATE_DYING)) { + cdnsp_died(pdev); + spin_unlock(&pdev->lock); + return IRQ_HANDLED; + } + + event_ring_deq = pdev->event_ring->dequeue; + + while (cdnsp_handle_event(pdev)) { + if (++counter >= TRBS_PER_EV_DEQ_UPDATE) { + cdnsp_update_erst_dequeue(pdev, event_ring_deq, 0); + event_ring_deq = pdev->event_ring->dequeue; + counter = 0; + } + } + + cdnsp_update_erst_dequeue(pdev, event_ring_deq, 1); + + spin_unlock(&pdev->lock); + + return IRQ_HANDLED; +} + +irqreturn_t cdnsp_irq_handler(int irq, void *priv) +{ + struct cdnsp_device *pdev = (struct cdnsp_device *)priv; + u32 irq_pending; + u32 status; + + status = readl(&pdev->op_regs->status); + + if (status == ~(u32)0) { + cdnsp_died(pdev); + return IRQ_HANDLED; + } + + if (!(status & STS_EINT)) + return IRQ_NONE; + + writel(status | STS_EINT, &pdev->op_regs->status); + irq_pending = readl(&pdev->ir_set->irq_pending); + irq_pending |= IMAN_IP; + writel(irq_pending, &pdev->ir_set->irq_pending); + + if (status & STS_FATAL) { + cdnsp_died(pdev); + return IRQ_HANDLED; + } + + return IRQ_WAKE_THREAD; +} + +/* + * Generic function for queuing a TRB on a ring. + * The caller must have checked to make sure there's room on the ring. + * + * @more_trbs_coming: Will you enqueue more TRBs before setting doorbell? + */ +static void cdnsp_queue_trb(struct cdnsp_device *pdev, struct cdnsp_ring *ring, + bool more_trbs_coming, u32 field1, u32 field2, + u32 field3, u32 field4) +{ + struct cdnsp_generic_trb *trb; + + trb = &ring->enqueue->generic; + + trb->field[0] = cpu_to_le32(field1); + trb->field[1] = cpu_to_le32(field2); + trb->field[2] = cpu_to_le32(field3); + trb->field[3] = cpu_to_le32(field4); + + trace_cdnsp_queue_trb(ring, trb); + cdnsp_inc_enq(pdev, ring, more_trbs_coming); +} + +/* + * Does various checks on the endpoint ring, and makes it ready to + * queue num_trbs. + */ +static int cdnsp_prepare_ring(struct cdnsp_device *pdev, + struct cdnsp_ring *ep_ring, + u32 ep_state, unsigned + int num_trbs, + gfp_t mem_flags) +{ + unsigned int num_trbs_needed; + + /* Make sure the endpoint has been added to controller schedule. */ + switch (ep_state) { + case EP_STATE_STOPPED: + case EP_STATE_RUNNING: + case EP_STATE_HALTED: + break; + default: + dev_err(pdev->dev, "ERROR: incorrect endpoint state\n"); + return -EINVAL; + } + + while (1) { + if (cdnsp_room_on_ring(pdev, ep_ring, num_trbs)) + break; + + trace_cdnsp_no_room_on_ring("try ring expansion"); + + num_trbs_needed = num_trbs - ep_ring->num_trbs_free; + if (cdnsp_ring_expansion(pdev, ep_ring, num_trbs_needed, + mem_flags)) { + dev_err(pdev->dev, "Ring expansion failed\n"); + return -ENOMEM; + } + } + + while (cdnsp_trb_is_link(ep_ring->enqueue)) { + ep_ring->enqueue->link.control |= cpu_to_le32(TRB_CHAIN); + /* The cycle bit must be set as the last operation. */ + wmb(); + ep_ring->enqueue->link.control ^= cpu_to_le32(TRB_CYCLE); + + /* Toggle the cycle bit after the last ring segment. */ + if (cdnsp_link_trb_toggles_cycle(ep_ring->enqueue)) + ep_ring->cycle_state ^= 1; + ep_ring->enq_seg = ep_ring->enq_seg->next; + ep_ring->enqueue = ep_ring->enq_seg->trbs; + } + return 0; +} + +static int cdnsp_prepare_transfer(struct cdnsp_device *pdev, + struct cdnsp_request *preq, + unsigned int num_trbs) +{ + struct cdnsp_ring *ep_ring; + int ret; + + ep_ring = cdnsp_get_transfer_ring(pdev, preq->pep, + preq->request.stream_id); + if (!ep_ring) + return -EINVAL; + + ret = cdnsp_prepare_ring(pdev, ep_ring, + GET_EP_CTX_STATE(preq->pep->out_ctx), + num_trbs, GFP_ATOMIC); + if (ret) + return ret; + + INIT_LIST_HEAD(&preq->td.td_list); + preq->td.preq = preq; + + /* Add this TD to the tail of the endpoint ring's TD list. */ + list_add_tail(&preq->td.td_list, &ep_ring->td_list); + ep_ring->num_tds++; + preq->pep->stream_info.td_count++; + + preq->td.start_seg = ep_ring->enq_seg; + preq->td.first_trb = ep_ring->enqueue; + + return 0; +} + +static unsigned int cdnsp_count_trbs(u64 addr, u64 len) +{ + unsigned int num_trbs; + + num_trbs = DIV_ROUND_UP(len + (addr & (TRB_MAX_BUFF_SIZE - 1)), + TRB_MAX_BUFF_SIZE); + if (num_trbs == 0) + num_trbs++; + + return num_trbs; +} + +static unsigned int count_trbs_needed(struct cdnsp_request *preq) +{ + return cdnsp_count_trbs(preq->request.dma, preq->request.length); +} + +static unsigned int count_sg_trbs_needed(struct cdnsp_request *preq) +{ + unsigned int i, len, full_len, num_trbs = 0; + struct scatterlist *sg; + + full_len = preq->request.length; + + for_each_sg(preq->request.sg, sg, preq->request.num_sgs, i) { + len = sg_dma_len(sg); + num_trbs += cdnsp_count_trbs(sg_dma_address(sg), len); + len = min(len, full_len); + full_len -= len; + if (full_len == 0) + break; + } + + return num_trbs; +} + +static unsigned int count_isoc_trbs_needed(struct cdnsp_request *preq) +{ + return cdnsp_count_trbs(preq->request.dma, preq->request.length); +} + +static void cdnsp_check_trb_math(struct cdnsp_request *preq, int running_total) +{ + if (running_total != preq->request.length) + dev_err(preq->pep->pdev->dev, + "%s - Miscalculated tx length, " + "queued %#x, asked for %#x (%d)\n", + preq->pep->name, running_total, + preq->request.length, preq->request.actual); +} + +/* + * TD size is the number of max packet sized packets remaining in the TD + * (*not* including this TRB). + * + * Total TD packet count = total_packet_count = + * DIV_ROUND_UP(TD size in bytes / wMaxPacketSize) + * + * Packets transferred up to and including this TRB = packets_transferred = + * rounddown(total bytes transferred including this TRB / wMaxPacketSize) + * + * TD size = total_packet_count - packets_transferred + * + * It must fit in bits 21:17, so it can't be bigger than 31. + * This is taken care of in the TRB_TD_SIZE() macro + * + * The last TRB in a TD must have the TD size set to zero. + */ +static u32 cdnsp_td_remainder(struct cdnsp_device *pdev, + int transferred, + int trb_buff_len, + unsigned int td_total_len, + struct cdnsp_request *preq, + bool more_trbs_coming) +{ + u32 maxp, total_packet_count; + + /* One TRB with a zero-length data packet. */ + if (!more_trbs_coming || (transferred == 0 && trb_buff_len == 0) || + trb_buff_len == td_total_len) + return 0; + + maxp = usb_endpoint_maxp(preq->pep->endpoint.desc); + total_packet_count = DIV_ROUND_UP(td_total_len, maxp); + + /* Queuing functions don't count the current TRB into transferred. */ + return (total_packet_count - ((transferred + trb_buff_len) / maxp)); +} + +static int cdnsp_align_td(struct cdnsp_device *pdev, + struct cdnsp_request *preq, u32 enqd_len, + u32 *trb_buff_len, struct cdnsp_segment *seg) +{ + struct device *dev = pdev->dev; + unsigned int unalign; + unsigned int max_pkt; + u32 new_buff_len; + + max_pkt = usb_endpoint_maxp(preq->pep->endpoint.desc); + unalign = (enqd_len + *trb_buff_len) % max_pkt; + + /* We got lucky, last normal TRB data on segment is packet aligned. */ + if (unalign == 0) + return 0; + + /* Is the last nornal TRB alignable by splitting it. */ + if (*trb_buff_len > unalign) { + *trb_buff_len -= unalign; + trace_cdnsp_bounce_align_td_split(preq, *trb_buff_len, + enqd_len, 0, unalign); + return 0; + } + + /* + * We want enqd_len + trb_buff_len to sum up to a number aligned to + * number which is divisible by the endpoint's wMaxPacketSize. IOW: + * (size of currently enqueued TRBs + remainder) % wMaxPacketSize == 0. + */ + new_buff_len = max_pkt - (enqd_len % max_pkt); + + if (new_buff_len > (preq->request.length - enqd_len)) + new_buff_len = (preq->request.length - enqd_len); + + /* Create a max max_pkt sized bounce buffer pointed to by last trb. */ + if (preq->direction) { + sg_pcopy_to_buffer(preq->request.sg, + preq->request.num_mapped_sgs, + seg->bounce_buf, new_buff_len, enqd_len); + seg->bounce_dma = dma_map_single(dev, seg->bounce_buf, + max_pkt, DMA_TO_DEVICE); + } else { + seg->bounce_dma = dma_map_single(dev, seg->bounce_buf, + max_pkt, DMA_FROM_DEVICE); + } + + if (dma_mapping_error(dev, seg->bounce_dma)) { + /* Try without aligning.*/ + dev_warn(pdev->dev, + "Failed mapping bounce buffer, not aligning\n"); + return 0; + } + + *trb_buff_len = new_buff_len; + seg->bounce_len = new_buff_len; + seg->bounce_offs = enqd_len; + + trace_cdnsp_bounce_map(preq, new_buff_len, enqd_len, seg->bounce_dma, + unalign); + + /* + * Bounce buffer successful aligned and seg->bounce_dma will be used + * in transfer TRB as new transfer buffer address. + */ + return 1; +} + +int cdnsp_queue_bulk_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq) +{ + unsigned int enqd_len, block_len, trb_buff_len, full_len; + unsigned int start_cycle, num_sgs = 0; + struct cdnsp_generic_trb *start_trb; + u32 field, length_field, remainder; + struct scatterlist *sg = NULL; + bool more_trbs_coming = true; + bool need_zero_pkt = false; + bool zero_len_trb = false; + struct cdnsp_ring *ring; + bool first_trb = true; + unsigned int num_trbs; + struct cdnsp_ep *pep; + u64 addr, send_addr; + int sent_len, ret; + + ring = cdnsp_request_to_transfer_ring(pdev, preq); + if (!ring) + return -EINVAL; + + full_len = preq->request.length; + + if (preq->request.num_sgs) { + num_sgs = preq->request.num_sgs; + sg = preq->request.sg; + addr = (u64)sg_dma_address(sg); + block_len = sg_dma_len(sg); + num_trbs = count_sg_trbs_needed(preq); + } else { + num_trbs = count_trbs_needed(preq); + addr = (u64)preq->request.dma; + block_len = full_len; + } + + pep = preq->pep; + + /* Deal with request.zero - need one more td/trb. */ + if (preq->request.zero && preq->request.length && + IS_ALIGNED(full_len, usb_endpoint_maxp(pep->endpoint.desc))) { + need_zero_pkt = true; + num_trbs++; + } + + ret = cdnsp_prepare_transfer(pdev, preq, num_trbs); + if (ret) + return ret; + + /* + * Don't give the first TRB to the hardware (by toggling the cycle bit) + * until we've finished creating all the other TRBs. The ring's cycle + * state may change as we enqueue the other TRBs, so save it too. + */ + start_trb = &ring->enqueue->generic; + start_cycle = ring->cycle_state; + send_addr = addr; + + /* Queue the TRBs, even if they are zero-length */ + for (enqd_len = 0; zero_len_trb || first_trb || enqd_len < full_len; + enqd_len += trb_buff_len) { + field = TRB_TYPE(TRB_NORMAL); + + /* TRB buffer should not cross 64KB boundaries */ + trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr); + trb_buff_len = min(trb_buff_len, block_len); + if (enqd_len + trb_buff_len > full_len) + trb_buff_len = full_len - enqd_len; + + /* Don't change the cycle bit of the first TRB until later */ + if (first_trb) { + first_trb = false; + if (start_cycle == 0) + field |= TRB_CYCLE; + } else { + field |= ring->cycle_state; + } + + /* + * Chain all the TRBs together; clear the chain bit in the last + * TRB to indicate it's the last TRB in the chain. + */ + if (enqd_len + trb_buff_len < full_len || need_zero_pkt) { + field |= TRB_CHAIN; + if (cdnsp_trb_is_link(ring->enqueue + 1)) { + if (cdnsp_align_td(pdev, preq, enqd_len, + &trb_buff_len, + ring->enq_seg)) { + send_addr = ring->enq_seg->bounce_dma; + /* Assuming TD won't span 2 segs */ + preq->td.bounce_seg = ring->enq_seg; + } + } + } + + if (enqd_len + trb_buff_len >= full_len) { + if (need_zero_pkt && zero_len_trb) { + zero_len_trb = true; + } else { + field &= ~TRB_CHAIN; + field |= TRB_IOC; + more_trbs_coming = false; + need_zero_pkt = false; + preq->td.last_trb = ring->enqueue; + } + } + + /* Only set interrupt on short packet for OUT endpoints. */ + if (!preq->direction) + field |= TRB_ISP; + + /* Set the TRB length, TD size, and interrupter fields. */ + remainder = cdnsp_td_remainder(pdev, enqd_len, trb_buff_len, + full_len, preq, + more_trbs_coming); + + length_field = TRB_LEN(trb_buff_len) | TRB_TD_SIZE(remainder) | + TRB_INTR_TARGET(0); + + cdnsp_queue_trb(pdev, ring, more_trbs_coming | need_zero_pkt, + lower_32_bits(send_addr), + upper_32_bits(send_addr), + length_field, + field); + + addr += trb_buff_len; + sent_len = trb_buff_len; + while (sg && sent_len >= block_len) { + /* New sg entry */ + --num_sgs; + sent_len -= block_len; + if (num_sgs != 0) { + sg = sg_next(sg); + block_len = sg_dma_len(sg); + addr = (u64)sg_dma_address(sg); + addr += sent_len; + } + } + block_len -= sent_len; + send_addr = addr; + } + + cdnsp_check_trb_math(preq, enqd_len); + ret = cdnsp_giveback_first_trb(pdev, pep, preq->request.stream_id, + start_cycle, start_trb); + + if (ret) + preq->td.drbl = 1; + + return 0; +} + +int cdnsp_queue_ctrl_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq) +{ + u32 field, length_field, remainder; + struct cdnsp_ep *pep = preq->pep; + struct cdnsp_ring *ep_ring; + int num_trbs; + int ret; + + ep_ring = cdnsp_request_to_transfer_ring(pdev, preq); + if (!ep_ring) + return -EINVAL; + + /* 1 TRB for data, 1 for status */ + num_trbs = (pdev->three_stage_setup) ? 2 : 1; + + ret = cdnsp_prepare_transfer(pdev, preq, num_trbs); + if (ret) + return ret; + + /* If there's data, queue data TRBs */ + if (pdev->ep0_expect_in) + field = TRB_TYPE(TRB_DATA) | TRB_IOC; + else + field = TRB_ISP | TRB_TYPE(TRB_DATA) | TRB_IOC; + + if (preq->request.length > 0) { + remainder = cdnsp_td_remainder(pdev, 0, preq->request.length, + preq->request.length, preq, 1); + + length_field = TRB_LEN(preq->request.length) | + TRB_TD_SIZE(remainder) | TRB_INTR_TARGET(0); + + if (pdev->ep0_expect_in) + field |= TRB_DIR_IN; + + cdnsp_queue_trb(pdev, ep_ring, true, + lower_32_bits(preq->request.dma), + upper_32_bits(preq->request.dma), length_field, + field | ep_ring->cycle_state | + TRB_SETUPID(pdev->setup_id) | + pdev->setup_speed); + + pdev->ep0_stage = CDNSP_DATA_STAGE; + } + + /* Save the DMA address of the last TRB in the TD. */ + preq->td.last_trb = ep_ring->enqueue; + + /* Queue status TRB. */ + if (preq->request.length == 0) + field = ep_ring->cycle_state; + else + field = (ep_ring->cycle_state ^ 1); + + if (preq->request.length > 0 && pdev->ep0_expect_in) + field |= TRB_DIR_IN; + + if (pep->ep_state & EP0_HALTED_STATUS) { + pep->ep_state &= ~EP0_HALTED_STATUS; + field |= TRB_SETUPSTAT(TRB_SETUPSTAT_STALL); + } else { + field |= TRB_SETUPSTAT(TRB_SETUPSTAT_ACK); + } + + cdnsp_queue_trb(pdev, ep_ring, false, 0, 0, TRB_INTR_TARGET(0), + field | TRB_IOC | TRB_SETUPID(pdev->setup_id) | + TRB_TYPE(TRB_STATUS) | pdev->setup_speed); + + cdnsp_ring_ep_doorbell(pdev, pep, preq->request.stream_id); + + return 0; +} + +int cdnsp_cmd_stop_ep(struct cdnsp_device *pdev, struct cdnsp_ep *pep) +{ + u32 ep_state = GET_EP_CTX_STATE(pep->out_ctx); + int ret = 0; + + if (ep_state == EP_STATE_STOPPED || ep_state == EP_STATE_DISABLED) { + trace_cdnsp_ep_stopped_or_disabled(pep->out_ctx); + goto ep_stopped; + } + + cdnsp_queue_stop_endpoint(pdev, pep->idx); + cdnsp_ring_cmd_db(pdev); + ret = cdnsp_wait_for_cmd_compl(pdev); + + trace_cdnsp_handle_cmd_stop_ep(pep->out_ctx); + +ep_stopped: + pep->ep_state |= EP_STOPPED; + return ret; +} + +int cdnsp_cmd_flush_ep(struct cdnsp_device *pdev, struct cdnsp_ep *pep) +{ + int ret; + + cdnsp_queue_flush_endpoint(pdev, pep->idx); + cdnsp_ring_cmd_db(pdev); + ret = cdnsp_wait_for_cmd_compl(pdev); + + trace_cdnsp_handle_cmd_flush_ep(pep->out_ctx); + + return ret; +} + +/* + * The transfer burst count field of the isochronous TRB defines the number of + * bursts that are required to move all packets in this TD. Only SuperSpeed + * devices can burst up to bMaxBurst number of packets per service interval. + * This field is zero based, meaning a value of zero in the field means one + * burst. Basically, for everything but SuperSpeed devices, this field will be + * zero. + */ +static unsigned int cdnsp_get_burst_count(struct cdnsp_device *pdev, + struct cdnsp_request *preq, + unsigned int total_packet_count) +{ + unsigned int max_burst; + + if (pdev->gadget.speed < USB_SPEED_SUPER) + return 0; + + max_burst = preq->pep->endpoint.comp_desc->bMaxBurst; + return DIV_ROUND_UP(total_packet_count, max_burst + 1) - 1; +} + +/* + * Returns the number of packets in the last "burst" of packets. This field is + * valid for all speeds of devices. USB 2.0 devices can only do one "burst", so + * the last burst packet count is equal to the total number of packets in the + * TD. SuperSpeed endpoints can have up to 3 bursts. All but the last burst + * must contain (bMaxBurst + 1) number of packets, but the last burst can + * contain 1 to (bMaxBurst + 1) packets. + */ +static unsigned int + cdnsp_get_last_burst_packet_count(struct cdnsp_device *pdev, + struct cdnsp_request *preq, + unsigned int total_packet_count) +{ + unsigned int max_burst; + unsigned int residue; + + if (pdev->gadget.speed >= USB_SPEED_SUPER) { + /* bMaxBurst is zero based: 0 means 1 packet per burst. */ + max_burst = preq->pep->endpoint.comp_desc->bMaxBurst; + residue = total_packet_count % (max_burst + 1); + + /* + * If residue is zero, the last burst contains (max_burst + 1) + * number of packets, but the TLBPC field is zero-based. + */ + if (residue == 0) + return max_burst; + + return residue - 1; + } + if (total_packet_count == 0) + return 0; + + return total_packet_count - 1; +} + +/* Queue function isoc transfer */ +static int cdnsp_queue_isoc_tx(struct cdnsp_device *pdev, + struct cdnsp_request *preq) +{ + int trb_buff_len, td_len, td_remain_len, ret; + unsigned int burst_count, last_burst_pkt; + unsigned int total_pkt_count, max_pkt; + struct cdnsp_generic_trb *start_trb; + bool more_trbs_coming = true; + struct cdnsp_ring *ep_ring; + int running_total = 0; + u32 field, length_field; + int start_cycle; + int trbs_per_td; + u64 addr; + int i; + + ep_ring = preq->pep->ring; + start_trb = &ep_ring->enqueue->generic; + start_cycle = ep_ring->cycle_state; + td_len = preq->request.length; + addr = (u64)preq->request.dma; + td_remain_len = td_len; + + max_pkt = usb_endpoint_maxp(preq->pep->endpoint.desc); + total_pkt_count = DIV_ROUND_UP(td_len, max_pkt); + + /* A zero-length transfer still involves at least one packet. */ + if (total_pkt_count == 0) + total_pkt_count++; + + burst_count = cdnsp_get_burst_count(pdev, preq, total_pkt_count); + last_burst_pkt = cdnsp_get_last_burst_packet_count(pdev, preq, + total_pkt_count); + trbs_per_td = count_isoc_trbs_needed(preq); + + ret = cdnsp_prepare_transfer(pdev, preq, trbs_per_td); + if (ret) + goto cleanup; + + /* + * Set isoc specific data for the first TRB in a TD. + * Prevent HW from getting the TRBs by keeping the cycle state + * inverted in the first TDs isoc TRB. + */ + field = TRB_TYPE(TRB_ISOC) | TRB_TLBPC(last_burst_pkt) | + start_cycle ? 0 : 1 | TRB_SIA | TRB_TBC(burst_count); + + /* Fill the rest of the TRB fields, and remaining normal TRBs. */ + for (i = 0; i < trbs_per_td; i++) { + u32 remainder; + + /* Calculate TRB length. */ + trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr); + if (trb_buff_len > td_remain_len) + trb_buff_len = td_remain_len; + + /* Set the TRB length, TD size, & interrupter fields. */ + remainder = cdnsp_td_remainder(pdev, running_total, + trb_buff_len, td_len, preq, + more_trbs_coming); + + length_field = TRB_LEN(trb_buff_len) | TRB_INTR_TARGET(0); + + /* Only first TRB is isoc, overwrite otherwise. */ + if (i) { + field = TRB_TYPE(TRB_NORMAL) | ep_ring->cycle_state; + length_field |= TRB_TD_SIZE(remainder); + } else { + length_field |= TRB_TD_SIZE_TBC(burst_count); + } + + /* Only set interrupt on short packet for OUT EPs. */ + if (usb_endpoint_dir_out(preq->pep->endpoint.desc)) + field |= TRB_ISP; + + /* Set the chain bit for all except the last TRB. */ + if (i < trbs_per_td - 1) { + more_trbs_coming = true; + field |= TRB_CHAIN; + } else { + more_trbs_coming = false; + preq->td.last_trb = ep_ring->enqueue; + field |= TRB_IOC; + } + + cdnsp_queue_trb(pdev, ep_ring, more_trbs_coming, + lower_32_bits(addr), upper_32_bits(addr), + length_field, field); + + running_total += trb_buff_len; + addr += trb_buff_len; + td_remain_len -= trb_buff_len; + } + + /* Check TD length */ + if (running_total != td_len) { + dev_err(pdev->dev, "ISOC TD length unmatch\n"); + ret = -EINVAL; + goto cleanup; + } + + cdnsp_giveback_first_trb(pdev, preq->pep, preq->request.stream_id, + start_cycle, start_trb); + + return 0; + +cleanup: + /* Clean up a partially enqueued isoc transfer. */ + list_del_init(&preq->td.td_list); + ep_ring->num_tds--; + + /* + * Use the first TD as a temporary variable to turn the TDs we've + * queued into No-ops with a software-owned cycle bit. + * That way the hardware won't accidentally start executing bogus TDs + * when we partially overwrite them. + * td->first_trb and td->start_seg are already set. + */ + preq->td.last_trb = ep_ring->enqueue; + /* Every TRB except the first & last will have its cycle bit flipped. */ + cdnsp_td_to_noop(pdev, ep_ring, &preq->td, true); + + /* Reset the ring enqueue back to the first TRB and its cycle bit. */ + ep_ring->enqueue = preq->td.first_trb; + ep_ring->enq_seg = preq->td.start_seg; + ep_ring->cycle_state = start_cycle; + return ret; +} + +int cdnsp_queue_isoc_tx_prepare(struct cdnsp_device *pdev, + struct cdnsp_request *preq) +{ + struct cdnsp_ring *ep_ring; + u32 ep_state; + int num_trbs; + int ret; + + ep_ring = preq->pep->ring; + ep_state = GET_EP_CTX_STATE(preq->pep->out_ctx); + num_trbs = count_isoc_trbs_needed(preq); + + /* + * Check the ring to guarantee there is enough room for the whole + * request. Do not insert any td of the USB Request to the ring if the + * check failed. + */ + ret = cdnsp_prepare_ring(pdev, ep_ring, ep_state, num_trbs, GFP_ATOMIC); + if (ret) + return ret; + + return cdnsp_queue_isoc_tx(pdev, preq); +} + +/**** Command Ring Operations ****/ +/* + * Generic function for queuing a command TRB on the command ring. + * Driver queue only one command to ring in the moment. + */ +static void cdnsp_queue_command(struct cdnsp_device *pdev, + u32 field1, + u32 field2, + u32 field3, + u32 field4) +{ + cdnsp_prepare_ring(pdev, pdev->cmd_ring, EP_STATE_RUNNING, 1, + GFP_ATOMIC); + + pdev->cmd.command_trb = pdev->cmd_ring->enqueue; + + cdnsp_queue_trb(pdev, pdev->cmd_ring, false, field1, field2, + field3, field4 | pdev->cmd_ring->cycle_state); +} + +/* Queue a slot enable or disable request on the command ring */ +void cdnsp_queue_slot_control(struct cdnsp_device *pdev, u32 trb_type) +{ + cdnsp_queue_command(pdev, 0, 0, 0, TRB_TYPE(trb_type) | + SLOT_ID_FOR_TRB(pdev->slot_id)); +} + +/* Queue an address device command TRB */ +void cdnsp_queue_address_device(struct cdnsp_device *pdev, + dma_addr_t in_ctx_ptr, + enum cdnsp_setup_dev setup) +{ + cdnsp_queue_command(pdev, lower_32_bits(in_ctx_ptr), + upper_32_bits(in_ctx_ptr), 0, + TRB_TYPE(TRB_ADDR_DEV) | + SLOT_ID_FOR_TRB(pdev->slot_id) | + (setup == SETUP_CONTEXT_ONLY ? TRB_BSR : 0)); +} + +/* Queue a reset device command TRB */ +void cdnsp_queue_reset_device(struct cdnsp_device *pdev) +{ + cdnsp_queue_command(pdev, 0, 0, 0, TRB_TYPE(TRB_RESET_DEV) | + SLOT_ID_FOR_TRB(pdev->slot_id)); +} + +/* Queue a configure endpoint command TRB */ +void cdnsp_queue_configure_endpoint(struct cdnsp_device *pdev, + dma_addr_t in_ctx_ptr) +{ + cdnsp_queue_command(pdev, lower_32_bits(in_ctx_ptr), + upper_32_bits(in_ctx_ptr), 0, + TRB_TYPE(TRB_CONFIG_EP) | + SLOT_ID_FOR_TRB(pdev->slot_id)); +} + +/* + * Suspend is set to indicate "Stop Endpoint Command" is being issued to stop + * activity on an endpoint that is about to be suspended. + */ +void cdnsp_queue_stop_endpoint(struct cdnsp_device *pdev, unsigned int ep_index) +{ + cdnsp_queue_command(pdev, 0, 0, 0, SLOT_ID_FOR_TRB(pdev->slot_id) | + EP_ID_FOR_TRB(ep_index) | TRB_TYPE(TRB_STOP_RING)); +} + +/* Set Transfer Ring Dequeue Pointer command. */ +void cdnsp_queue_new_dequeue_state(struct cdnsp_device *pdev, + struct cdnsp_ep *pep, + struct cdnsp_dequeue_state *deq_state) +{ + u32 trb_stream_id = STREAM_ID_FOR_TRB(deq_state->stream_id); + u32 trb_slot_id = SLOT_ID_FOR_TRB(pdev->slot_id); + u32 type = TRB_TYPE(TRB_SET_DEQ); + u32 trb_sct = 0; + dma_addr_t addr; + + addr = cdnsp_trb_virt_to_dma(deq_state->new_deq_seg, + deq_state->new_deq_ptr); + + if (deq_state->stream_id) + trb_sct = SCT_FOR_TRB(SCT_PRI_TR); + + cdnsp_queue_command(pdev, lower_32_bits(addr) | trb_sct | + deq_state->new_cycle_state, upper_32_bits(addr), + trb_stream_id, trb_slot_id | + EP_ID_FOR_TRB(pep->idx) | type); +} + +void cdnsp_queue_reset_ep(struct cdnsp_device *pdev, unsigned int ep_index) +{ + return cdnsp_queue_command(pdev, 0, 0, 0, + SLOT_ID_FOR_TRB(pdev->slot_id) | + EP_ID_FOR_TRB(ep_index) | + TRB_TYPE(TRB_RESET_EP)); +} + +/* + * Queue a halt endpoint request on the command ring. + */ +void cdnsp_queue_halt_endpoint(struct cdnsp_device *pdev, unsigned int ep_index) +{ + cdnsp_queue_command(pdev, 0, 0, 0, TRB_TYPE(TRB_HALT_ENDPOINT) | + SLOT_ID_FOR_TRB(pdev->slot_id) | + EP_ID_FOR_TRB(ep_index)); +} + +/* + * Queue a flush endpoint request on the command ring. + */ +void cdnsp_queue_flush_endpoint(struct cdnsp_device *pdev, + unsigned int ep_index) +{ + cdnsp_queue_command(pdev, 0, 0, 0, TRB_TYPE(TRB_FLUSH_ENDPOINT) | + SLOT_ID_FOR_TRB(pdev->slot_id) | + EP_ID_FOR_TRB(ep_index)); +} + +void cdnsp_force_header_wakeup(struct cdnsp_device *pdev, int intf_num) +{ + u32 lo, mid; + + lo = TRB_FH_TO_PACKET_TYPE(TRB_FH_TR_PACKET) | + TRB_FH_TO_DEVICE_ADDRESS(pdev->device_address); + mid = TRB_FH_TR_PACKET_DEV_NOT | + TRB_FH_TO_NOT_TYPE(TRB_FH_TR_PACKET_FUNCTION_WAKE) | + TRB_FH_TO_INTERFACE(intf_num); + + cdnsp_queue_command(pdev, lo, mid, 0, + TRB_TYPE(TRB_FORCE_HEADER) | SET_PORT_ID(2)); +} diff --git a/drivers/usb/cdns3/cdnsp-trace.c b/drivers/usb/cdns3/cdnsp-trace.c new file mode 100644 index 000000000000..e50ab799ad95 --- /dev/null +++ b/drivers/usb/cdns3/cdnsp-trace.c @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Cadence CDNSP DRD Driver. + * + * Copyright (C) 2020 Cadence. + * + * Author: Pawel Laszczak <pawell@cadence.com> + * + */ + +#define CREATE_TRACE_POINTS +#include "cdnsp-trace.h" diff --git a/drivers/usb/cdns3/cdnsp-trace.h b/drivers/usb/cdns3/cdnsp-trace.h new file mode 100644 index 000000000000..5aa88ca012de --- /dev/null +++ b/drivers/usb/cdns3/cdnsp-trace.h @@ -0,0 +1,830 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Cadence CDNSP DRD Driver. + * Trace support header file + * + * Copyright (C) 2020 Cadence. + * + * Author: Pawel Laszczak <pawell@cadence.com> + * + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM cdnsp-dev + +/* + * The TRACE_SYSTEM_VAR defaults to TRACE_SYSTEM, but must be a + * legitimate C variable. It is not exported to user space. + */ +#undef TRACE_SYSTEM_VAR +#define TRACE_SYSTEM_VAR cdnsp_dev + +#if !defined(__CDNSP_DEV_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) +#define __CDNSP_DEV_TRACE_H + +#include <linux/tracepoint.h> +#include "cdnsp-gadget.h" +#include "cdnsp-debug.h" + +/* + * There is limitation for single buffer size in TRACEPOINT subsystem. + * By default TRACE_BUF_SIZE is 1024, so no all data will be logged. + * To show more data this must be increased. In most cases the default + * value is sufficient. + */ +#define CDNSP_MSG_MAX 500 + +DECLARE_EVENT_CLASS(cdnsp_log_ep, + TP_PROTO(struct cdnsp_ep *pep, u32 stream_id), + TP_ARGS(pep, stream_id), + TP_STRUCT__entry( + __string(name, pep->name) + __field(unsigned int, state) + __field(u32, stream_id) + __field(u8, enabled) + __field(unsigned int, num_streams) + __field(int, td_count) + __field(u8, first_prime_det) + __field(u8, drbls_count) + ), + TP_fast_assign( + __assign_str(name, pep->name); + __entry->state = pep->ep_state; + __entry->stream_id = stream_id; + __entry->enabled = pep->ep_state & EP_HAS_STREAMS; + __entry->num_streams = pep->stream_info.num_streams; + __entry->td_count = pep->stream_info.td_count; + __entry->first_prime_det = pep->stream_info.first_prime_det; + __entry->drbls_count = pep->stream_info.drbls_count; + ), + TP_printk("%s: SID: %08x ep state: %x stream: enabled: %d num %d " + "tds %d, first prime: %d drbls %d", + __get_str(name), __entry->state, __entry->stream_id, + __entry->enabled, __entry->num_streams, __entry->td_count, + __entry->first_prime_det, __entry->drbls_count) +); + +DEFINE_EVENT(cdnsp_log_ep, cdnsp_tr_drbl, + TP_PROTO(struct cdnsp_ep *pep, u32 stream_id), + TP_ARGS(pep, stream_id) +); + +DEFINE_EVENT(cdnsp_log_ep, cdnsp_wait_for_prime, + TP_PROTO(struct cdnsp_ep *pep, u32 stream_id), + TP_ARGS(pep, stream_id) +); + +DEFINE_EVENT(cdnsp_log_ep, cdnsp_ep_list_empty_with_skip, + TP_PROTO(struct cdnsp_ep *pep, u32 stream_id), + TP_ARGS(pep, stream_id) +); + +DEFINE_EVENT(cdnsp_log_ep, cdnsp_ep_enable_end, + TP_PROTO(struct cdnsp_ep *pep, u32 stream_id), + TP_ARGS(pep, stream_id) +); + +DEFINE_EVENT(cdnsp_log_ep, cdnsp_ep_disable_end, + TP_PROTO(struct cdnsp_ep *pep, u32 stream_id), + TP_ARGS(pep, stream_id) +); + +DEFINE_EVENT(cdnsp_log_ep, cdnsp_ep_busy_try_halt_again, + TP_PROTO(struct cdnsp_ep *pep, u32 stream_id), + TP_ARGS(pep, stream_id) +); + +DECLARE_EVENT_CLASS(cdnsp_log_enable_disable, + TP_PROTO(int set), + TP_ARGS(set), + TP_STRUCT__entry( + __field(int, set) + ), + TP_fast_assign( + __entry->set = set; + ), + TP_printk("%s", __entry->set ? "enabled" : "disabled") +); + +DEFINE_EVENT(cdnsp_log_enable_disable, cdnsp_pullup, + TP_PROTO(int set), + TP_ARGS(set) +); + +DEFINE_EVENT(cdnsp_log_enable_disable, cdnsp_u1, + TP_PROTO(int set), + TP_ARGS(set) +); + +DEFINE_EVENT(cdnsp_log_enable_disable, cdnsp_u2, + TP_PROTO(int set), + TP_ARGS(set) +); + +DEFINE_EVENT(cdnsp_log_enable_disable, cdnsp_lpm, + TP_PROTO(int set), + TP_ARGS(set) +); + +DEFINE_EVENT(cdnsp_log_enable_disable, cdnsp_may_wakeup, + TP_PROTO(int set), + TP_ARGS(set) +); + +DECLARE_EVENT_CLASS(cdnsp_log_simple, + TP_PROTO(char *msg), + TP_ARGS(msg), + TP_STRUCT__entry( + __string(text, msg) + ), + TP_fast_assign( + __assign_str(text, msg) + ), + TP_printk("%s", __get_str(text)) +); + +DEFINE_EVENT(cdnsp_log_simple, cdnsp_exit, + TP_PROTO(char *msg), + TP_ARGS(msg) +); + +DEFINE_EVENT(cdnsp_log_simple, cdnsp_init, + TP_PROTO(char *msg), + TP_ARGS(msg) +); + +DEFINE_EVENT(cdnsp_log_simple, cdnsp_slot_id, + TP_PROTO(char *msg), + TP_ARGS(msg) +); + +DEFINE_EVENT(cdnsp_log_simple, cdnsp_no_room_on_ring, + TP_PROTO(char *msg), + TP_ARGS(msg) +); + +DEFINE_EVENT(cdnsp_log_simple, cdnsp_ep0_status_stage, + TP_PROTO(char *msg), + TP_ARGS(msg) +); + +DEFINE_EVENT(cdnsp_log_simple, cdnsp_ep0_request, + TP_PROTO(char *msg), + TP_ARGS(msg) +); + +DEFINE_EVENT(cdnsp_log_simple, cdnsp_ep0_set_config, + TP_PROTO(char *msg), + TP_ARGS(msg) +); + +DEFINE_EVENT(cdnsp_log_simple, cdnsp_ep0_halted, + TP_PROTO(char *msg), + TP_ARGS(msg) +); + +DEFINE_EVENT(cdnsp_log_simple, cdnsp_ep_halt, + TP_PROTO(char *msg), + TP_ARGS(msg) +); + +TRACE_EVENT(cdnsp_looking_trb_in_td, + TP_PROTO(dma_addr_t suspect, dma_addr_t trb_start, dma_addr_t trb_end, + dma_addr_t curr_seg, dma_addr_t end_seg), + TP_ARGS(suspect, trb_start, trb_end, curr_seg, end_seg), + TP_STRUCT__entry( + __field(dma_addr_t, suspect) + __field(dma_addr_t, trb_start) + __field(dma_addr_t, trb_end) + __field(dma_addr_t, curr_seg) + __field(dma_addr_t, end_seg) + ), + TP_fast_assign( + __entry->suspect = suspect; + __entry->trb_start = trb_start; + __entry->trb_end = trb_end; + __entry->curr_seg = curr_seg; + __entry->end_seg = end_seg; + ), + TP_printk("DMA: suspect event: %pad, trb-start: %pad, trb-end %pad, " + "seg-start %pad, seg-end %pad", + &__entry->suspect, &__entry->trb_start, &__entry->trb_end, + &__entry->curr_seg, &__entry->end_seg) +); + +TRACE_EVENT(cdnsp_port_info, + TP_PROTO(__le32 __iomem *addr, u32 offset, u32 count, u32 rev), + TP_ARGS(addr, offset, count, rev), + TP_STRUCT__entry( + __field(__le32 __iomem *, addr) + __field(u32, offset) + __field(u32, count) + __field(u32, rev) + ), + TP_fast_assign( + __entry->addr = addr; + __entry->offset = offset; + __entry->count = count; + __entry->rev = rev; + ), + TP_printk("Ext Cap %p, port offset = %u, count = %u, rev = 0x%x", + __entry->addr, __entry->offset, __entry->count, __entry->rev) +); + +DECLARE_EVENT_CLASS(cdnsp_log_deq_state, + TP_PROTO(struct cdnsp_dequeue_state *state), + TP_ARGS(state), + TP_STRUCT__entry( + __field(int, new_cycle_state) + __field(struct cdnsp_segment *, new_deq_seg) + __field(dma_addr_t, deq_seg_dma) + __field(union cdnsp_trb *, new_deq_ptr) + __field(dma_addr_t, deq_ptr_dma) + ), + TP_fast_assign( + __entry->new_cycle_state = state->new_cycle_state; + __entry->new_deq_seg = state->new_deq_seg; + __entry->deq_seg_dma = state->new_deq_seg->dma; + __entry->new_deq_ptr = state->new_deq_ptr, + __entry->deq_ptr_dma = cdnsp_trb_virt_to_dma(state->new_deq_seg, + state->new_deq_ptr); + ), + TP_printk("New cycle state = 0x%x, New dequeue segment = %p (0x%pad dma), " + "New dequeue pointer = %p (0x%pad dma)", + __entry->new_cycle_state, __entry->new_deq_seg, + &__entry->deq_seg_dma, __entry->new_deq_ptr, + &__entry->deq_ptr_dma + ) +); + +DEFINE_EVENT(cdnsp_log_deq_state, cdnsp_new_deq_state, + TP_PROTO(struct cdnsp_dequeue_state *state), + TP_ARGS(state) +); + +DECLARE_EVENT_CLASS(cdnsp_log_ctrl, + TP_PROTO(struct usb_ctrlrequest *ctrl), + TP_ARGS(ctrl), + TP_STRUCT__entry( + __field(u8, bRequestType) + __field(u8, bRequest) + __field(u16, wValue) + __field(u16, wIndex) + __field(u16, wLength) + __dynamic_array(char, str, CDNSP_MSG_MAX) + ), + TP_fast_assign( + __entry->bRequestType = ctrl->bRequestType; + __entry->bRequest = ctrl->bRequest; + __entry->wValue = le16_to_cpu(ctrl->wValue); + __entry->wIndex = le16_to_cpu(ctrl->wIndex); + __entry->wLength = le16_to_cpu(ctrl->wLength); + ), + TP_printk("%s", usb_decode_ctrl(__get_str(str), CDNSP_MSG_MAX, + __entry->bRequestType, + __entry->bRequest, __entry->wValue, + __entry->wIndex, __entry->wLength) + ) +); + +DEFINE_EVENT(cdnsp_log_ctrl, cdnsp_ctrl_req, + TP_PROTO(struct usb_ctrlrequest *ctrl), + TP_ARGS(ctrl) +); + +DECLARE_EVENT_CLASS(cdnsp_log_bounce, + TP_PROTO(struct cdnsp_request *preq, u32 new_buf_len, u32 offset, + dma_addr_t dma, unsigned int unalign), + TP_ARGS(preq, new_buf_len, offset, dma, unalign), + TP_STRUCT__entry( + __string(name, preq->pep->name) + __field(u32, new_buf_len) + __field(u32, offset) + __field(dma_addr_t, dma) + __field(unsigned int, unalign) + ), + TP_fast_assign( + __assign_str(name, preq->pep->name); + __entry->new_buf_len = new_buf_len; + __entry->offset = offset; + __entry->dma = dma; + __entry->unalign = unalign; + ), + TP_printk("%s buf len %d, offset %d, dma %pad, unalign %d", + __get_str(name), __entry->new_buf_len, + __entry->offset, &__entry->dma, __entry->unalign + ) +); + +DEFINE_EVENT(cdnsp_log_bounce, cdnsp_bounce_align_td_split, + TP_PROTO(struct cdnsp_request *preq, u32 new_buf_len, u32 offset, + dma_addr_t dma, unsigned int unalign), + TP_ARGS(preq, new_buf_len, offset, dma, unalign) +); + +DEFINE_EVENT(cdnsp_log_bounce, cdnsp_bounce_map, + TP_PROTO(struct cdnsp_request *preq, u32 new_buf_len, u32 offset, + dma_addr_t dma, unsigned int unalign), + TP_ARGS(preq, new_buf_len, offset, dma, unalign) +); + +DEFINE_EVENT(cdnsp_log_bounce, cdnsp_bounce_unmap, + TP_PROTO(struct cdnsp_request *preq, u32 new_buf_len, u32 offset, + dma_addr_t dma, unsigned int unalign), + TP_ARGS(preq, new_buf_len, offset, dma, unalign) +); + +DECLARE_EVENT_CLASS(cdnsp_log_trb, + TP_PROTO(struct cdnsp_ring *ring, struct cdnsp_generic_trb *trb), + TP_ARGS(ring, trb), + TP_STRUCT__entry( + __field(u32, type) + __field(u32, field0) + __field(u32, field1) + __field(u32, field2) + __field(u32, field3) + __field(union cdnsp_trb *, trb) + __field(dma_addr_t, trb_dma) + __dynamic_array(char, str, CDNSP_MSG_MAX) + ), + TP_fast_assign( + __entry->type = ring->type; + __entry->field0 = le32_to_cpu(trb->field[0]); + __entry->field1 = le32_to_cpu(trb->field[1]); + __entry->field2 = le32_to_cpu(trb->field[2]); + __entry->field3 = le32_to_cpu(trb->field[3]); + __entry->trb = (union cdnsp_trb *)trb; + __entry->trb_dma = cdnsp_trb_virt_to_dma(ring->deq_seg, + (union cdnsp_trb *)trb); + + ), + TP_printk("%s: %s trb: %p(%pad)", cdnsp_ring_type_string(__entry->type), + cdnsp_decode_trb(__get_str(str), CDNSP_MSG_MAX, + __entry->field0, __entry->field1, + __entry->field2, __entry->field3), + __entry->trb, &__entry->trb_dma + ) +); + +DEFINE_EVENT(cdnsp_log_trb, cdnsp_handle_event, + TP_PROTO(struct cdnsp_ring *ring, struct cdnsp_generic_trb *trb), + TP_ARGS(ring, trb) +); + +DEFINE_EVENT(cdnsp_log_trb, cdnsp_trb_without_td, + TP_PROTO(struct cdnsp_ring *ring, struct cdnsp_generic_trb *trb), + TP_ARGS(ring, trb) +); + +DEFINE_EVENT(cdnsp_log_trb, cdnsp_handle_command, + TP_PROTO(struct cdnsp_ring *ring, struct cdnsp_generic_trb *trb), + TP_ARGS(ring, trb) +); + +DEFINE_EVENT(cdnsp_log_trb, cdnsp_handle_transfer, + TP_PROTO(struct cdnsp_ring *ring, struct cdnsp_generic_trb *trb), + TP_ARGS(ring, trb) +); + +DEFINE_EVENT(cdnsp_log_trb, cdnsp_queue_trb, + TP_PROTO(struct cdnsp_ring *ring, struct cdnsp_generic_trb *trb), + TP_ARGS(ring, trb) +); + +DEFINE_EVENT(cdnsp_log_trb, cdnsp_cmd_wait_for_compl, + TP_PROTO(struct cdnsp_ring *ring, struct cdnsp_generic_trb *trb), + TP_ARGS(ring, trb) +); + +DEFINE_EVENT(cdnsp_log_trb, cdnsp_cmd_timeout, + TP_PROTO(struct cdnsp_ring *ring, struct cdnsp_generic_trb *trb), + TP_ARGS(ring, trb) +); + +DEFINE_EVENT(cdnsp_log_trb, cdnsp_defered_event, + TP_PROTO(struct cdnsp_ring *ring, struct cdnsp_generic_trb *trb), + TP_ARGS(ring, trb) +); + +DECLARE_EVENT_CLASS(cdnsp_log_pdev, + TP_PROTO(struct cdnsp_device *pdev), + TP_ARGS(pdev), + TP_STRUCT__entry( + __field(struct cdnsp_device *, pdev) + __field(struct usb_gadget *, gadget) + __field(dma_addr_t, out_ctx) + __field(dma_addr_t, in_ctx) + __field(u8, port_num) + ), + TP_fast_assign( + __entry->pdev = pdev; + __entry->gadget = &pdev->gadget; + __entry->in_ctx = pdev->in_ctx.dma; + __entry->out_ctx = pdev->out_ctx.dma; + __entry->port_num = pdev->active_port ? + pdev->active_port->port_num : 0xFF; + ), + TP_printk("pdev %p gadget %p ctx %pad | %pad, port %d ", + __entry->pdev, __entry->gadget, &__entry->in_ctx, + &__entry->out_ctx, __entry->port_num + ) +); + +DEFINE_EVENT(cdnsp_log_pdev, cdnsp_alloc_priv_device, + TP_PROTO(struct cdnsp_device *vdev), + TP_ARGS(vdev) +); + +DEFINE_EVENT(cdnsp_log_pdev, cdnsp_free_priv_device, + TP_PROTO(struct cdnsp_device *vdev), + TP_ARGS(vdev) +); + +DEFINE_EVENT(cdnsp_log_pdev, cdnsp_setup_device, + TP_PROTO(struct cdnsp_device *vdev), + TP_ARGS(vdev) +); + +DEFINE_EVENT(cdnsp_log_pdev, cdnsp_setup_addressable_priv_device, + TP_PROTO(struct cdnsp_device *vdev), + TP_ARGS(vdev) +); + +DECLARE_EVENT_CLASS(cdnsp_log_request, + TP_PROTO(struct cdnsp_request *req), + TP_ARGS(req), + TP_STRUCT__entry( + __string(name, req->pep->name) + __field(struct usb_request *, request) + __field(struct cdnsp_request *, preq) + __field(void *, buf) + __field(unsigned int, actual) + __field(unsigned int, length) + __field(int, status) + __field(dma_addr_t, dma) + __field(unsigned int, stream_id) + __field(unsigned int, zero) + __field(unsigned int, short_not_ok) + __field(unsigned int, no_interrupt) + __field(struct scatterlist*, sg) + __field(unsigned int, num_sgs) + __field(unsigned int, num_mapped_sgs) + + ), + TP_fast_assign( + __assign_str(name, req->pep->name); + __entry->request = &req->request; + __entry->preq = req; + __entry->buf = req->request.buf; + __entry->actual = req->request.actual; + __entry->length = req->request.length; + __entry->status = req->request.status; + __entry->dma = req->request.dma; + __entry->stream_id = req->request.stream_id; + __entry->zero = req->request.zero; + __entry->short_not_ok = req->request.short_not_ok; + __entry->no_interrupt = req->request.no_interrupt; + __entry->sg = req->request.sg; + __entry->num_sgs = req->request.num_sgs; + __entry->num_mapped_sgs = req->request.num_mapped_sgs; + ), + TP_printk("%s; req U:%p/P:%p, req buf %p, length %u/%u, status %d, " + "buf dma (%pad), SID %u, %s%s%s, sg %p, num_sg %d," + " num_m_sg %d", + __get_str(name), __entry->request, __entry->preq, + __entry->buf, __entry->actual, __entry->length, + __entry->status, &__entry->dma, + __entry->stream_id, __entry->zero ? "Z" : "z", + __entry->short_not_ok ? "S" : "s", + __entry->no_interrupt ? "I" : "i", + __entry->sg, __entry->num_sgs, __entry->num_mapped_sgs + ) +); + +DEFINE_EVENT(cdnsp_log_request, cdnsp_request_enqueue, + TP_PROTO(struct cdnsp_request *req), + TP_ARGS(req) +); + +DEFINE_EVENT(cdnsp_log_request, cdnsp_request_enqueue_busy, + TP_PROTO(struct cdnsp_request *req), + TP_ARGS(req) +); + +DEFINE_EVENT(cdnsp_log_request, cdnsp_request_enqueue_error, + TP_PROTO(struct cdnsp_request *req), + TP_ARGS(req) +); + +DEFINE_EVENT(cdnsp_log_request, cdnsp_request_dequeue, + TP_PROTO(struct cdnsp_request *req), + TP_ARGS(req) +); + +DEFINE_EVENT(cdnsp_log_request, cdnsp_request_giveback, + TP_PROTO(struct cdnsp_request *req), + TP_ARGS(req) +); + +DEFINE_EVENT(cdnsp_log_request, cdnsp_alloc_request, + TP_PROTO(struct cdnsp_request *req), + TP_ARGS(req) +); + +DEFINE_EVENT(cdnsp_log_request, cdnsp_free_request, + TP_PROTO(struct cdnsp_request *req), + TP_ARGS(req) +); + +DECLARE_EVENT_CLASS(cdnsp_log_ep_ctx, + TP_PROTO(struct cdnsp_ep_ctx *ctx), + TP_ARGS(ctx), + TP_STRUCT__entry( + __field(u32, info) + __field(u32, info2) + __field(u64, deq) + __field(u32, tx_info) + __dynamic_array(char, str, CDNSP_MSG_MAX) + ), + TP_fast_assign( + __entry->info = le32_to_cpu(ctx->ep_info); + __entry->info2 = le32_to_cpu(ctx->ep_info2); + __entry->deq = le64_to_cpu(ctx->deq); + __entry->tx_info = le32_to_cpu(ctx->tx_info); + ), + TP_printk("%s", cdnsp_decode_ep_context(__get_str(str), CDNSP_MSG_MAX, + __entry->info, __entry->info2, + __entry->deq, __entry->tx_info) + ) +); + +DEFINE_EVENT(cdnsp_log_ep_ctx, cdnsp_ep_disabled, + TP_PROTO(struct cdnsp_ep_ctx *ctx), + TP_ARGS(ctx) +); + +DEFINE_EVENT(cdnsp_log_ep_ctx, cdnsp_ep_stopped_or_disabled, + TP_PROTO(struct cdnsp_ep_ctx *ctx), + TP_ARGS(ctx) +); + +DEFINE_EVENT(cdnsp_log_ep_ctx, cdnsp_remove_request, + TP_PROTO(struct cdnsp_ep_ctx *ctx), + TP_ARGS(ctx) +); + +DEFINE_EVENT(cdnsp_log_ep_ctx, cdnsp_handle_cmd_stop_ep, + TP_PROTO(struct cdnsp_ep_ctx *ctx), + TP_ARGS(ctx) +); + +DEFINE_EVENT(cdnsp_log_ep_ctx, cdnsp_handle_cmd_flush_ep, + TP_PROTO(struct cdnsp_ep_ctx *ctx), + TP_ARGS(ctx) +); + +DEFINE_EVENT(cdnsp_log_ep_ctx, cdnsp_handle_cmd_set_deq_ep, + TP_PROTO(struct cdnsp_ep_ctx *ctx), + TP_ARGS(ctx) +); + +DEFINE_EVENT(cdnsp_log_ep_ctx, cdnsp_handle_cmd_reset_ep, + TP_PROTO(struct cdnsp_ep_ctx *ctx), + TP_ARGS(ctx) +); + +DEFINE_EVENT(cdnsp_log_ep_ctx, cdnsp_handle_cmd_config_ep, + TP_PROTO(struct cdnsp_ep_ctx *ctx), + TP_ARGS(ctx) +); + +DECLARE_EVENT_CLASS(cdnsp_log_slot_ctx, + TP_PROTO(struct cdnsp_slot_ctx *ctx), + TP_ARGS(ctx), + TP_STRUCT__entry( + __field(u32, info) + __field(u32, info2) + __field(u32, int_target) + __field(u32, state) + ), + TP_fast_assign( + __entry->info = le32_to_cpu(ctx->dev_info); + __entry->info2 = le32_to_cpu(ctx->dev_port); + __entry->int_target = le32_to_cpu(ctx->int_target); + __entry->state = le32_to_cpu(ctx->dev_state); + ), + TP_printk("%s", cdnsp_decode_slot_context(__entry->info, + __entry->info2, + __entry->int_target, + __entry->state) + ) +); + +DEFINE_EVENT(cdnsp_log_slot_ctx, cdnsp_slot_already_in_default, + TP_PROTO(struct cdnsp_slot_ctx *ctx), + TP_ARGS(ctx) +); + +DEFINE_EVENT(cdnsp_log_slot_ctx, cdnsp_handle_cmd_enable_slot, + TP_PROTO(struct cdnsp_slot_ctx *ctx), + TP_ARGS(ctx) +); + +DEFINE_EVENT(cdnsp_log_slot_ctx, cdnsp_handle_cmd_disable_slot, + TP_PROTO(struct cdnsp_slot_ctx *ctx), + TP_ARGS(ctx) +); + +DEFINE_EVENT(cdnsp_log_slot_ctx, cdnsp_reset_device, + TP_PROTO(struct cdnsp_slot_ctx *ctx), + TP_ARGS(ctx) +); + +DEFINE_EVENT(cdnsp_log_slot_ctx, cdnsp_setup_device_slot, + TP_PROTO(struct cdnsp_slot_ctx *ctx), + TP_ARGS(ctx) +); + +DEFINE_EVENT(cdnsp_log_slot_ctx, cdnsp_handle_cmd_addr_dev, + TP_PROTO(struct cdnsp_slot_ctx *ctx), + TP_ARGS(ctx) +); + +DEFINE_EVENT(cdnsp_log_slot_ctx, cdnsp_handle_cmd_reset_dev, + TP_PROTO(struct cdnsp_slot_ctx *ctx), + TP_ARGS(ctx) +); + +DEFINE_EVENT(cdnsp_log_slot_ctx, cdnsp_handle_cmd_set_deq, + TP_PROTO(struct cdnsp_slot_ctx *ctx), + TP_ARGS(ctx) +); + +DEFINE_EVENT(cdnsp_log_slot_ctx, cdnsp_configure_endpoint, + TP_PROTO(struct cdnsp_slot_ctx *ctx), + TP_ARGS(ctx) +); + +DECLARE_EVENT_CLASS(cdnsp_log_td_info, + TP_PROTO(struct cdnsp_request *preq), + TP_ARGS(preq), + TP_STRUCT__entry( + __string(name, preq->pep->name) + __field(struct usb_request *, request) + __field(struct cdnsp_request *, preq) + __field(union cdnsp_trb *, first_trb) + __field(union cdnsp_trb *, last_trb) + __field(dma_addr_t, trb_dma) + ), + TP_fast_assign( + __assign_str(name, preq->pep->name); + __entry->request = &preq->request; + __entry->preq = preq; + __entry->first_trb = preq->td.first_trb; + __entry->last_trb = preq->td.last_trb; + __entry->trb_dma = cdnsp_trb_virt_to_dma(preq->td.start_seg, + preq->td.first_trb) + ), + TP_printk("%s req/preq: %p/%p, first trb %p[vir]/%pad(dma), last trb %p", + __get_str(name), __entry->request, __entry->preq, + __entry->first_trb, &__entry->trb_dma, + __entry->last_trb + ) +); + +DEFINE_EVENT(cdnsp_log_td_info, cdnsp_remove_request_td, + TP_PROTO(struct cdnsp_request *preq), + TP_ARGS(preq) +); + +DECLARE_EVENT_CLASS(cdnsp_log_ring, + TP_PROTO(struct cdnsp_ring *ring), + TP_ARGS(ring), + TP_STRUCT__entry( + __field(u32, type) + __field(void *, ring) + __field(dma_addr_t, enq) + __field(dma_addr_t, deq) + __field(dma_addr_t, enq_seg) + __field(dma_addr_t, deq_seg) + __field(unsigned int, num_segs) + __field(unsigned int, stream_id) + __field(unsigned int, cycle_state) + __field(unsigned int, num_trbs_free) + __field(unsigned int, bounce_buf_len) + ), + TP_fast_assign( + __entry->ring = ring; + __entry->type = ring->type; + __entry->num_segs = ring->num_segs; + __entry->stream_id = ring->stream_id; + __entry->enq_seg = ring->enq_seg->dma; + __entry->deq_seg = ring->deq_seg->dma; + __entry->cycle_state = ring->cycle_state; + __entry->num_trbs_free = ring->num_trbs_free; + __entry->bounce_buf_len = ring->bounce_buf_len; + __entry->enq = cdnsp_trb_virt_to_dma(ring->enq_seg, + ring->enqueue); + __entry->deq = cdnsp_trb_virt_to_dma(ring->deq_seg, + ring->dequeue); + ), + TP_printk("%s %p: enq %pad(%pad) deq %pad(%pad) segs %d stream %d" + " free_trbs %d bounce %d cycle %d", + cdnsp_ring_type_string(__entry->type), __entry->ring, + &__entry->enq, &__entry->enq_seg, + &__entry->deq, &__entry->deq_seg, + __entry->num_segs, + __entry->stream_id, + __entry->num_trbs_free, + __entry->bounce_buf_len, + __entry->cycle_state + ) +); + +DEFINE_EVENT(cdnsp_log_ring, cdnsp_ring_alloc, + TP_PROTO(struct cdnsp_ring *ring), + TP_ARGS(ring) +); + +DEFINE_EVENT(cdnsp_log_ring, cdnsp_ring_free, + TP_PROTO(struct cdnsp_ring *ring), + TP_ARGS(ring) +); + +DEFINE_EVENT(cdnsp_log_ring, cdnsp_set_stream_ring, + TP_PROTO(struct cdnsp_ring *ring), + TP_ARGS(ring) +); + +DEFINE_EVENT(cdnsp_log_ring, cdnsp_ring_expansion, + TP_PROTO(struct cdnsp_ring *ring), + TP_ARGS(ring) +); + +DEFINE_EVENT(cdnsp_log_ring, cdnsp_inc_enq, + TP_PROTO(struct cdnsp_ring *ring), + TP_ARGS(ring) +); + +DEFINE_EVENT(cdnsp_log_ring, cdnsp_inc_deq, + TP_PROTO(struct cdnsp_ring *ring), + TP_ARGS(ring) +); + +DECLARE_EVENT_CLASS(cdnsp_log_portsc, + TP_PROTO(u32 portnum, u32 portsc), + TP_ARGS(portnum, portsc), + TP_STRUCT__entry( + __field(u32, portnum) + __field(u32, portsc) + __dynamic_array(char, str, CDNSP_MSG_MAX) + ), + TP_fast_assign( + __entry->portnum = portnum; + __entry->portsc = portsc; + ), + TP_printk("port-%d: %s", + __entry->portnum, + cdnsp_decode_portsc(__get_str(str), CDNSP_MSG_MAX, + __entry->portsc) + ) +); + +DEFINE_EVENT(cdnsp_log_portsc, cdnsp_handle_port_status, + TP_PROTO(u32 portnum, u32 portsc), + TP_ARGS(portnum, portsc) +); + +DEFINE_EVENT(cdnsp_log_portsc, cdnsp_link_state_changed, + TP_PROTO(u32 portnum, u32 portsc), + TP_ARGS(portnum, portsc) +); + +TRACE_EVENT(cdnsp_stream_number, + TP_PROTO(struct cdnsp_ep *pep, int num_stream_ctxs, int num_streams), + TP_ARGS(pep, num_stream_ctxs, num_streams), + TP_STRUCT__entry( + __string(name, pep->name) + __field(int, num_stream_ctxs) + __field(int, num_streams) + ), + TP_fast_assign( + __entry->num_stream_ctxs = num_stream_ctxs; + __entry->num_streams = num_streams; + ), + TP_printk("%s Need %u stream ctx entries for %u stream IDs.", + __get_str(name), __entry->num_stream_ctxs, + __entry->num_streams) +); + +#endif /* __CDNSP_TRACE_H */ + +/* this part must be outside header guard */ + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . + +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE cdnsp-trace + +#include <trace/define_trace.h> diff --git a/drivers/usb/cdns3/core.c b/drivers/usb/cdns3/core.c index 1991cb5cf6bf..199713769289 100644 --- a/drivers/usb/cdns3/core.c +++ b/drivers/usb/cdns3/core.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Cadence USBSS DRD Driver. + * Cadence USBSS and USBSSP DRD Driver. * * Copyright (C) 2018-2019 Cadence. * Copyright (C) 2017-2018 NXP @@ -19,15 +19,13 @@ #include <linux/io.h> #include <linux/pm_runtime.h> -#include "gadget.h" #include "core.h" #include "host-export.h" -#include "gadget-export.h" #include "drd.h" -static int cdns3_idle_init(struct cdns3 *cdns); +static int cdns_idle_init(struct cdns *cdns); -static int cdns3_role_start(struct cdns3 *cdns, enum usb_role role) +static int cdns_role_start(struct cdns *cdns, enum usb_role role) { int ret; @@ -41,47 +39,47 @@ static int cdns3_role_start(struct cdns3 *cdns, enum usb_role role) if (!cdns->roles[role]) return -ENXIO; - if (cdns->roles[role]->state == CDNS3_ROLE_STATE_ACTIVE) + if (cdns->roles[role]->state == CDNS_ROLE_STATE_ACTIVE) return 0; mutex_lock(&cdns->mutex); ret = cdns->roles[role]->start(cdns); if (!ret) - cdns->roles[role]->state = CDNS3_ROLE_STATE_ACTIVE; + cdns->roles[role]->state = CDNS_ROLE_STATE_ACTIVE; mutex_unlock(&cdns->mutex); return ret; } -static void cdns3_role_stop(struct cdns3 *cdns) +static void cdns_role_stop(struct cdns *cdns) { enum usb_role role = cdns->role; if (WARN_ON(role > USB_ROLE_DEVICE)) return; - if (cdns->roles[role]->state == CDNS3_ROLE_STATE_INACTIVE) + if (cdns->roles[role]->state == CDNS_ROLE_STATE_INACTIVE) return; mutex_lock(&cdns->mutex); cdns->roles[role]->stop(cdns); - cdns->roles[role]->state = CDNS3_ROLE_STATE_INACTIVE; + cdns->roles[role]->state = CDNS_ROLE_STATE_INACTIVE; mutex_unlock(&cdns->mutex); } -static void cdns3_exit_roles(struct cdns3 *cdns) +static void cdns_exit_roles(struct cdns *cdns) { - cdns3_role_stop(cdns); - cdns3_drd_exit(cdns); + cdns_role_stop(cdns); + cdns_drd_exit(cdns); } /** - * cdns3_core_init_role - initialize role of operation - * @cdns: Pointer to cdns3 structure + * cdns_core_init_role - initialize role of operation + * @cdns: Pointer to cdns structure * * Returns 0 on success otherwise negative errno */ -static int cdns3_core_init_role(struct cdns3 *cdns) +static int cdns_core_init_role(struct cdns *cdns) { struct device *dev = cdns->dev; enum usb_dr_mode best_dr_mode; @@ -97,13 +95,23 @@ static int cdns3_core_init_role(struct cdns3 *cdns) * can be restricted later depending on strap pin configuration. */ if (dr_mode == USB_DR_MODE_UNKNOWN) { - if (IS_ENABLED(CONFIG_USB_CDNS3_HOST) && - IS_ENABLED(CONFIG_USB_CDNS3_GADGET)) - dr_mode = USB_DR_MODE_OTG; - else if (IS_ENABLED(CONFIG_USB_CDNS3_HOST)) - dr_mode = USB_DR_MODE_HOST; - else if (IS_ENABLED(CONFIG_USB_CDNS3_GADGET)) - dr_mode = USB_DR_MODE_PERIPHERAL; + if (cdns->version == CDNSP_CONTROLLER_V2) { + if (IS_ENABLED(CONFIG_USB_CDNSP_HOST) && + IS_ENABLED(CONFIG_USB_CDNSP_GADGET)) + dr_mode = USB_DR_MODE_OTG; + else if (IS_ENABLED(CONFIG_USB_CDNSP_HOST)) + dr_mode = USB_DR_MODE_HOST; + else if (IS_ENABLED(CONFIG_USB_CDNSP_GADGET)) + dr_mode = USB_DR_MODE_PERIPHERAL; + } else { + if (IS_ENABLED(CONFIG_USB_CDNS3_HOST) && + IS_ENABLED(CONFIG_USB_CDNS3_GADGET)) + dr_mode = USB_DR_MODE_OTG; + else if (IS_ENABLED(CONFIG_USB_CDNS3_HOST)) + dr_mode = USB_DR_MODE_HOST; + else if (IS_ENABLED(CONFIG_USB_CDNS3_GADGET)) + dr_mode = USB_DR_MODE_PERIPHERAL; + } } /* @@ -112,7 +120,7 @@ static int cdns3_core_init_role(struct cdns3 *cdns) */ best_dr_mode = cdns->dr_mode; - ret = cdns3_idle_init(cdns); + ret = cdns_idle_init(cdns); if (ret) return ret; @@ -128,7 +136,14 @@ static int cdns3_core_init_role(struct cdns3 *cdns) dr_mode = best_dr_mode; if (dr_mode == USB_DR_MODE_OTG || dr_mode == USB_DR_MODE_HOST) { - ret = cdns3_host_init(cdns); + if ((cdns->version == CDNSP_CONTROLLER_V2 && + IS_ENABLED(CONFIG_USB_CDNSP_HOST)) || + (cdns->version < CDNSP_CONTROLLER_V2 && + IS_ENABLED(CONFIG_USB_CDNS3_HOST))) + ret = cdns_host_init(cdns); + else + ret = -ENXIO; + if (ret) { dev_err(dev, "Host initialization failed with %d\n", ret); @@ -137,7 +152,11 @@ static int cdns3_core_init_role(struct cdns3 *cdns) } if (dr_mode == USB_DR_MODE_OTG || dr_mode == USB_DR_MODE_PERIPHERAL) { - ret = cdns3_gadget_init(cdns); + if (cdns->gadget_init) + ret = cdns->gadget_init(cdns); + else + ret = -ENXIO; + if (ret) { dev_err(dev, "Device initialization failed with %d\n", ret); @@ -147,28 +166,28 @@ static int cdns3_core_init_role(struct cdns3 *cdns) cdns->dr_mode = dr_mode; - ret = cdns3_drd_update_mode(cdns); + ret = cdns_drd_update_mode(cdns); if (ret) goto err; /* Initialize idle role to start with */ - ret = cdns3_role_start(cdns, USB_ROLE_NONE); + ret = cdns_role_start(cdns, USB_ROLE_NONE); if (ret) goto err; switch (cdns->dr_mode) { case USB_DR_MODE_OTG: - ret = cdns3_hw_role_switch(cdns); + ret = cdns_hw_role_switch(cdns); if (ret) goto err; break; case USB_DR_MODE_PERIPHERAL: - ret = cdns3_role_start(cdns, USB_ROLE_DEVICE); + ret = cdns_role_start(cdns, USB_ROLE_DEVICE); if (ret) goto err; break; case USB_DR_MODE_HOST: - ret = cdns3_role_start(cdns, USB_ROLE_HOST); + ret = cdns_role_start(cdns, USB_ROLE_HOST); if (ret) goto err; break; @@ -179,32 +198,32 @@ static int cdns3_core_init_role(struct cdns3 *cdns) return 0; err: - cdns3_exit_roles(cdns); + cdns_exit_roles(cdns); return ret; } /** - * cdns3_hw_role_state_machine - role switch state machine based on hw events. + * cdns_hw_role_state_machine - role switch state machine based on hw events. * @cdns: Pointer to controller structure. * * Returns next role to be entered based on hw events. */ -static enum usb_role cdns3_hw_role_state_machine(struct cdns3 *cdns) +static enum usb_role cdns_hw_role_state_machine(struct cdns *cdns) { enum usb_role role = USB_ROLE_NONE; int id, vbus; if (cdns->dr_mode != USB_DR_MODE_OTG) { - if (cdns3_is_host(cdns)) + if (cdns_is_host(cdns)) role = USB_ROLE_HOST; - if (cdns3_is_device(cdns)) + if (cdns_is_device(cdns)) role = USB_ROLE_DEVICE; return role; } - id = cdns3_get_id(cdns); - vbus = cdns3_get_vbus(cdns); + id = cdns_get_id(cdns); + vbus = cdns_get_vbus(cdns); /* * Role change state machine @@ -240,28 +259,28 @@ static enum usb_role cdns3_hw_role_state_machine(struct cdns3 *cdns) return role; } -static int cdns3_idle_role_start(struct cdns3 *cdns) +static int cdns_idle_role_start(struct cdns *cdns) { return 0; } -static void cdns3_idle_role_stop(struct cdns3 *cdns) +static void cdns_idle_role_stop(struct cdns *cdns) { /* Program Lane swap and bring PHY out of RESET */ phy_reset(cdns->usb3_phy); } -static int cdns3_idle_init(struct cdns3 *cdns) +static int cdns_idle_init(struct cdns *cdns) { - struct cdns3_role_driver *rdrv; + struct cdns_role_driver *rdrv; rdrv = devm_kzalloc(cdns->dev, sizeof(*rdrv), GFP_KERNEL); if (!rdrv) return -ENOMEM; - rdrv->start = cdns3_idle_role_start; - rdrv->stop = cdns3_idle_role_stop; - rdrv->state = CDNS3_ROLE_STATE_INACTIVE; + rdrv->start = cdns_idle_role_start; + rdrv->stop = cdns_idle_role_stop; + rdrv->state = CDNS_ROLE_STATE_INACTIVE; rdrv->suspend = NULL; rdrv->resume = NULL; rdrv->name = "idle"; @@ -272,10 +291,10 @@ static int cdns3_idle_init(struct cdns3 *cdns) } /** - * cdns3_hw_role_switch - switch roles based on HW state + * cdns_hw_role_switch - switch roles based on HW state * @cdns: controller */ -int cdns3_hw_role_switch(struct cdns3 *cdns) +int cdns_hw_role_switch(struct cdns *cdns) { enum usb_role real_role, current_role; int ret = 0; @@ -287,22 +306,22 @@ int cdns3_hw_role_switch(struct cdns3 *cdns) pm_runtime_get_sync(cdns->dev); current_role = cdns->role; - real_role = cdns3_hw_role_state_machine(cdns); + real_role = cdns_hw_role_state_machine(cdns); /* Do nothing if nothing changed */ if (current_role == real_role) goto exit; - cdns3_role_stop(cdns); + cdns_role_stop(cdns); dev_dbg(cdns->dev, "Switching role %d -> %d", current_role, real_role); - ret = cdns3_role_start(cdns, real_role); + ret = cdns_role_start(cdns, real_role); if (ret) { /* Back to current role */ dev_err(cdns->dev, "set %d has failed, back to %d\n", real_role, current_role); - ret = cdns3_role_start(cdns, current_role); + ret = cdns_role_start(cdns, current_role); if (ret) dev_err(cdns->dev, "back to %d failed too\n", current_role); @@ -319,15 +338,15 @@ exit: * * Returns role */ -static enum usb_role cdns3_role_get(struct usb_role_switch *sw) +static enum usb_role cdns_role_get(struct usb_role_switch *sw) { - struct cdns3 *cdns = usb_role_switch_get_drvdata(sw); + struct cdns *cdns = usb_role_switch_get_drvdata(sw); return cdns->role; } /** - * cdns3_role_set - set current role of controller. + * cdns_role_set - set current role of controller. * * @sw: pointer to USB role switch structure * @role: the previous role @@ -335,9 +354,9 @@ static enum usb_role cdns3_role_get(struct usb_role_switch *sw) * - Role switch for dual-role devices * - USB_ROLE_GADGET <--> USB_ROLE_NONE for peripheral-only devices */ -static int cdns3_role_set(struct usb_role_switch *sw, enum usb_role role) +static int cdns_role_set(struct usb_role_switch *sw, enum usb_role role) { - struct cdns3 *cdns = usb_role_switch_get_drvdata(sw); + struct cdns *cdns = usb_role_switch_get_drvdata(sw); int ret = 0; pm_runtime_get_sync(cdns->dev); @@ -365,8 +384,8 @@ static int cdns3_role_set(struct usb_role_switch *sw, enum usb_role role) } } - cdns3_role_stop(cdns); - ret = cdns3_role_start(cdns, role); + cdns_role_stop(cdns); + ret = cdns_role_start(cdns, role); if (ret) dev_err(cdns->dev, "set role %d has failed\n", role); @@ -375,37 +394,17 @@ pm_put: return ret; } -static int set_phy_power_on(struct cdns3 *cdns) -{ - int ret; - - ret = phy_power_on(cdns->usb2_phy); - if (ret) - return ret; - - ret = phy_power_on(cdns->usb3_phy); - if (ret) - phy_power_off(cdns->usb2_phy); - - return ret; -} - -static void set_phy_power_off(struct cdns3 *cdns) -{ - phy_power_off(cdns->usb3_phy); - phy_power_off(cdns->usb2_phy); -} /** - * cdns3_wakeup_irq - interrupt handler for wakeup events - * @irq: irq number for cdns3 core device - * @data: structure of cdns3 + * cdns_wakeup_irq - interrupt handler for wakeup events + * @irq: irq number for cdns3/cdnsp core device + * @data: structure of cdns * * Returns IRQ_HANDLED or IRQ_NONE */ -static irqreturn_t cdns3_wakeup_irq(int irq, void *data) +static irqreturn_t cdns_wakeup_irq(int irq, void *data) { - struct cdns3 *cdns = data; + struct cdns *cdns = data; if (cdns->in_lpm) { disable_irq_nosync(irq); @@ -420,17 +419,14 @@ static irqreturn_t cdns3_wakeup_irq(int irq, void *data) } /** - * cdns3_probe - probe for cdns3 core device - * @pdev: Pointer to cdns3 core platform device + * cdns_probe - probe for cdns3/cdnsp core device + * @cdns: Pointer to cdns structure. * * Returns 0 on success otherwise negative errno */ -static int cdns3_probe(struct platform_device *pdev) +int cdns_init(struct cdns *cdns) { - struct device *dev = &pdev->dev; - struct resource *res; - struct cdns3 *cdns; - void __iomem *regs; + struct device *dev = cdns->dev; int ret; ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); @@ -439,259 +435,78 @@ static int cdns3_probe(struct platform_device *pdev) return ret; } - cdns = devm_kzalloc(dev, sizeof(*cdns), GFP_KERNEL); - if (!cdns) - return -ENOMEM; - - cdns->dev = dev; - cdns->pdata = dev_get_platdata(dev); - - platform_set_drvdata(pdev, cdns); - - res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "host"); - if (!res) { - dev_err(dev, "missing host IRQ\n"); - return -ENODEV; - } - - cdns->xhci_res[0] = *res; - - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "xhci"); - if (!res) { - dev_err(dev, "couldn't get xhci resource\n"); - return -ENXIO; - } - - cdns->xhci_res[1] = *res; - - cdns->dev_irq = platform_get_irq_byname(pdev, "peripheral"); - if (cdns->dev_irq < 0) - return cdns->dev_irq; - - regs = devm_platform_ioremap_resource_byname(pdev, "dev"); - if (IS_ERR(regs)) - return PTR_ERR(regs); - cdns->dev_regs = regs; - - cdns->otg_irq = platform_get_irq_byname(pdev, "otg"); - if (cdns->otg_irq < 0) - return cdns->otg_irq; - - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "otg"); - if (!res) { - dev_err(dev, "couldn't get otg resource\n"); - return -ENXIO; - } - - cdns->phyrst_a_enable = device_property_read_bool(dev, "cdns,phyrst-a-enable"); - - cdns->otg_res = *res; - - cdns->wakeup_irq = platform_get_irq_byname_optional(pdev, "wakeup"); - if (cdns->wakeup_irq == -EPROBE_DEFER) - return cdns->wakeup_irq; - else if (cdns->wakeup_irq == 0) - return -EINVAL; - - if (cdns->wakeup_irq < 0) { - dev_dbg(dev, "couldn't get wakeup irq\n"); - cdns->wakeup_irq = 0x0; - } - mutex_init(&cdns->mutex); - cdns->usb2_phy = devm_phy_optional_get(dev, "cdns3,usb2-phy"); - if (IS_ERR(cdns->usb2_phy)) - return PTR_ERR(cdns->usb2_phy); - - ret = phy_init(cdns->usb2_phy); - if (ret) - return ret; - - cdns->usb3_phy = devm_phy_optional_get(dev, "cdns3,usb3-phy"); - if (IS_ERR(cdns->usb3_phy)) - return PTR_ERR(cdns->usb3_phy); - - ret = phy_init(cdns->usb3_phy); - if (ret) - goto err1; - - ret = set_phy_power_on(cdns); - if (ret) - goto err2; - if (device_property_read_bool(dev, "usb-role-switch")) { struct usb_role_switch_desc sw_desc = { }; - sw_desc.set = cdns3_role_set; - sw_desc.get = cdns3_role_get; + sw_desc.set = cdns_role_set; + sw_desc.get = cdns_role_get; sw_desc.allow_userspace_control = true; sw_desc.driver_data = cdns; sw_desc.fwnode = dev->fwnode; cdns->role_sw = usb_role_switch_register(dev, &sw_desc); if (IS_ERR(cdns->role_sw)) { - ret = PTR_ERR(cdns->role_sw); dev_warn(dev, "Unable to register Role Switch\n"); - goto err3; + return PTR_ERR(cdns->role_sw); } } if (cdns->wakeup_irq) { ret = devm_request_irq(cdns->dev, cdns->wakeup_irq, - cdns3_wakeup_irq, + cdns_wakeup_irq, IRQF_SHARED, dev_name(cdns->dev), cdns); if (ret) { dev_err(cdns->dev, "couldn't register wakeup irq handler\n"); - goto err4; + goto role_switch_unregister; } } - ret = cdns3_drd_init(cdns); + ret = cdns_drd_init(cdns); if (ret) - goto err4; + goto init_failed; - ret = cdns3_core_init_role(cdns); + ret = cdns_core_init_role(cdns); if (ret) - goto err4; + goto init_failed; spin_lock_init(&cdns->lock); - device_set_wakeup_capable(dev, true); - pm_runtime_set_active(dev); - pm_runtime_enable(dev); - if (!(cdns->pdata && (cdns->pdata->quirks & CDNS3_DEFAULT_PM_RUNTIME_ALLOW))) - pm_runtime_forbid(dev); - /* - * The controller needs less time between bus and controller suspend, - * and we also needs a small delay to avoid frequently entering low - * power mode. - */ - pm_runtime_set_autosuspend_delay(dev, 20); - pm_runtime_mark_last_busy(dev); - pm_runtime_use_autosuspend(dev); dev_dbg(dev, "Cadence USB3 core: probe succeed\n"); return 0; -err4: - cdns3_drd_exit(cdns); +init_failed: + cdns_drd_exit(cdns); +role_switch_unregister: if (cdns->role_sw) usb_role_switch_unregister(cdns->role_sw); -err3: - set_phy_power_off(cdns); -err2: - phy_exit(cdns->usb3_phy); -err1: - phy_exit(cdns->usb2_phy); return ret; } +EXPORT_SYMBOL_GPL(cdns_init); /** - * cdns3_remove - unbind drd driver and clean up - * @pdev: Pointer to Linux platform device + * cdns_remove - unbind drd driver and clean up + * @cdns: Pointer to cdns structure. * * Returns 0 on success otherwise negative errno */ -static int cdns3_remove(struct platform_device *pdev) +int cdns_remove(struct cdns *cdns) { - struct cdns3 *cdns = platform_get_drvdata(pdev); - - pm_runtime_get_sync(&pdev->dev); - pm_runtime_disable(&pdev->dev); - pm_runtime_put_noidle(&pdev->dev); - cdns3_exit_roles(cdns); + cdns_exit_roles(cdns); usb_role_switch_unregister(cdns->role_sw); - set_phy_power_off(cdns); - phy_exit(cdns->usb2_phy); - phy_exit(cdns->usb3_phy); - return 0; -} - -#ifdef CONFIG_PM - -static int cdns3_set_platform_suspend(struct device *dev, - bool suspend, bool wakeup) -{ - struct cdns3 *cdns = dev_get_drvdata(dev); - int ret = 0; - - if (cdns->pdata && cdns->pdata->platform_suspend) - ret = cdns->pdata->platform_suspend(dev, suspend, wakeup); - - return ret; -} - -static int cdns3_controller_suspend(struct device *dev, pm_message_t msg) -{ - struct cdns3 *cdns = dev_get_drvdata(dev); - bool wakeup; - unsigned long flags; - - if (cdns->in_lpm) - return 0; - - if (PMSG_IS_AUTO(msg)) - wakeup = true; - else - wakeup = device_may_wakeup(dev); - - cdns3_set_platform_suspend(cdns->dev, true, wakeup); - set_phy_power_off(cdns); - spin_lock_irqsave(&cdns->lock, flags); - cdns->in_lpm = true; - spin_unlock_irqrestore(&cdns->lock, flags); - dev_dbg(cdns->dev, "%s ends\n", __func__); return 0; } +EXPORT_SYMBOL_GPL(cdns_remove); -static int cdns3_controller_resume(struct device *dev, pm_message_t msg) -{ - struct cdns3 *cdns = dev_get_drvdata(dev); - int ret; - unsigned long flags; - - if (!cdns->in_lpm) - return 0; - - ret = set_phy_power_on(cdns); - if (ret) - return ret; - - cdns3_set_platform_suspend(cdns->dev, false, false); - - spin_lock_irqsave(&cdns->lock, flags); - if (cdns->roles[cdns->role]->resume && !PMSG_IS_AUTO(msg)) - cdns->roles[cdns->role]->resume(cdns, false); - - cdns->in_lpm = false; - spin_unlock_irqrestore(&cdns->lock, flags); - if (cdns->wakeup_pending) { - cdns->wakeup_pending = false; - enable_irq(cdns->wakeup_irq); - } - dev_dbg(cdns->dev, "%s ends\n", __func__); - - return ret; -} - -static int cdns3_runtime_suspend(struct device *dev) -{ - return cdns3_controller_suspend(dev, PMSG_AUTO_SUSPEND); -} - -static int cdns3_runtime_resume(struct device *dev) -{ - return cdns3_controller_resume(dev, PMSG_AUTO_RESUME); -} #ifdef CONFIG_PM_SLEEP - -static int cdns3_suspend(struct device *dev) +int cdns_suspend(struct cdns *cdns) { - struct cdns3 *cdns = dev_get_drvdata(dev); + struct device *dev = cdns->dev; unsigned long flags; if (pm_runtime_status_suspended(dev)) @@ -703,52 +518,30 @@ static int cdns3_suspend(struct device *dev) spin_unlock_irqrestore(&cdns->lock, flags); } - return cdns3_controller_suspend(dev, PMSG_SUSPEND); + return 0; } +EXPORT_SYMBOL_GPL(cdns_suspend); -static int cdns3_resume(struct device *dev) +int cdns_resume(struct cdns *cdns, u8 set_active) { - int ret; + struct device *dev = cdns->dev; - ret = cdns3_controller_resume(dev, PMSG_RESUME); - if (ret) - return ret; + if (cdns->roles[cdns->role]->resume) + cdns->roles[cdns->role]->resume(cdns, false); - pm_runtime_disable(dev); - pm_runtime_set_active(dev); - pm_runtime_enable(dev); + if (set_active) { + pm_runtime_disable(dev); + pm_runtime_set_active(dev); + pm_runtime_enable(dev); + } - return ret; + return 0; } +EXPORT_SYMBOL_GPL(cdns_resume); #endif /* CONFIG_PM_SLEEP */ -#endif /* CONFIG_PM */ - -static const struct dev_pm_ops cdns3_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(cdns3_suspend, cdns3_resume) - SET_RUNTIME_PM_OPS(cdns3_runtime_suspend, cdns3_runtime_resume, NULL) -}; - -#ifdef CONFIG_OF -static const struct of_device_id of_cdns3_match[] = { - { .compatible = "cdns,usb3" }, - { }, -}; -MODULE_DEVICE_TABLE(of, of_cdns3_match); -#endif - -static struct platform_driver cdns3_driver = { - .probe = cdns3_probe, - .remove = cdns3_remove, - .driver = { - .name = "cdns-usb3", - .of_match_table = of_match_ptr(of_cdns3_match), - .pm = &cdns3_pm_ops, - }, -}; - -module_platform_driver(cdns3_driver); - -MODULE_ALIAS("platform:cdns3"); + +MODULE_AUTHOR("Peter Chen <peter.chen@nxp.com>"); MODULE_AUTHOR("Pawel Laszczak <pawell@cadence.com>"); -MODULE_LICENSE("GPL v2"); -MODULE_DESCRIPTION("Cadence USB3 DRD Controller Driver"); +MODULE_AUTHOR("Roger Quadros <rogerq@ti.com>"); +MODULE_DESCRIPTION("Cadence USBSS and USBSSP DRD Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/usb/cdns3/core.h b/drivers/usb/cdns3/core.h index 3176f924293a..ab0cb68acd23 100644 --- a/drivers/usb/cdns3/core.h +++ b/drivers/usb/cdns3/core.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* - * Cadence USBSS DRD Header File. + * Cadence USBSS and USBSSP DRD Header File. * * Copyright (C) 2017-2018 NXP * Copyright (C) 2018-2019 Cadence. @@ -14,10 +14,10 @@ #ifndef __LINUX_CDNS3_CORE_H #define __LINUX_CDNS3_CORE_H -struct cdns3; +struct cdns; /** - * struct cdns3_role_driver - host/gadget role driver + * struct cdns_role_driver - host/gadget role driver * @start: start this role * @stop: stop this role * @suspend: suspend callback for this role @@ -26,18 +26,18 @@ struct cdns3; * @name: role name string (host/gadget) * @state: current state */ -struct cdns3_role_driver { - int (*start)(struct cdns3 *cdns); - void (*stop)(struct cdns3 *cdns); - int (*suspend)(struct cdns3 *cdns, bool do_wakeup); - int (*resume)(struct cdns3 *cdns, bool hibernated); +struct cdns_role_driver { + int (*start)(struct cdns *cdns); + void (*stop)(struct cdns *cdns); + int (*suspend)(struct cdns *cdns, bool do_wakeup); + int (*resume)(struct cdns *cdns, bool hibernated); const char *name; -#define CDNS3_ROLE_STATE_INACTIVE 0 -#define CDNS3_ROLE_STATE_ACTIVE 1 +#define CDNS_ROLE_STATE_INACTIVE 0 +#define CDNS_ROLE_STATE_ACTIVE 1 int state; }; -#define CDNS3_XHCI_RESOURCES_NUM 2 +#define CDNS_XHCI_RESOURCES_NUM 2 struct cdns3_platform_data { int (*platform_suspend)(struct device *dev, @@ -47,7 +47,7 @@ struct cdns3_platform_data { }; /** - * struct cdns3 - Representation of Cadence USB3 DRD controller. + * struct cdns - Representation of Cadence USB3 DRD controller. * @dev: pointer to Cadence device struct * @xhci_regs: pointer to base of xhci registers * @xhci_res: the resource for xhci @@ -55,14 +55,16 @@ struct cdns3_platform_data { * @otg_res: the resource for otg * @otg_v0_regs: pointer to base of v0 otg registers * @otg_v1_regs: pointer to base of v1 otg registers + * @otg_cdnsp_regs: pointer to base of CDNSP otg registers * @otg_regs: pointer to base of otg registers + * @otg_irq_regs: pointer to interrupt registers * @otg_irq: irq number for otg controller * @dev_irq: irq number for device controller * @wakeup_irq: irq number for wakeup event, it is optional * @roles: array of supported roles for this controller * @role: current role - * @host_dev: the child host device pointer for cdns3 core - * @gadget_dev: the child gadget device pointer for cdns3 core + * @host_dev: the child host device pointer for cdns core + * @gadget_dev: the child gadget device pointer * @usb2_phy: pointer to USB2 PHY * @usb3_phy: pointer to USB3 PHY * @mutex: the mutex for concurrent code at driver @@ -76,29 +78,33 @@ struct cdns3_platform_data { * @pdata: platform data from glue layer * @lock: spinlock structure * @xhci_plat_data: xhci private data structure pointer + * @gadget_init: pointer to gadget initialization function */ -struct cdns3 { +struct cdns { struct device *dev; void __iomem *xhci_regs; - struct resource xhci_res[CDNS3_XHCI_RESOURCES_NUM]; + struct resource xhci_res[CDNS_XHCI_RESOURCES_NUM]; struct cdns3_usb_regs __iomem *dev_regs; - struct resource otg_res; - struct cdns3_otg_legacy_regs *otg_v0_regs; - struct cdns3_otg_regs *otg_v1_regs; - struct cdns3_otg_common_regs *otg_regs; + struct resource otg_res; + struct cdns3_otg_legacy_regs __iomem *otg_v0_regs; + struct cdns3_otg_regs __iomem *otg_v1_regs; + struct cdnsp_otg_regs __iomem *otg_cdnsp_regs; + struct cdns_otg_common_regs __iomem *otg_regs; + struct cdns_otg_irq_regs __iomem *otg_irq_regs; #define CDNS3_CONTROLLER_V0 0 #define CDNS3_CONTROLLER_V1 1 +#define CDNSP_CONTROLLER_V2 2 u32 version; bool phyrst_a_enable; int otg_irq; int dev_irq; int wakeup_irq; - struct cdns3_role_driver *roles[USB_ROLE_DEVICE + 1]; + struct cdns_role_driver *roles[USB_ROLE_DEVICE + 1]; enum usb_role role; struct platform_device *host_dev; - struct cdns3_device *gadget_dev; + void *gadget_dev; struct phy *usb2_phy; struct phy *usb3_phy; /* mutext used in workqueue*/ @@ -110,8 +116,21 @@ struct cdns3 { struct cdns3_platform_data *pdata; spinlock_t lock; struct xhci_plat_priv *xhci_plat_data; + + int (*gadget_init)(struct cdns *cdns); }; -int cdns3_hw_role_switch(struct cdns3 *cdns); +int cdns_hw_role_switch(struct cdns *cdns); +int cdns_init(struct cdns *cdns); +int cdns_remove(struct cdns *cdns); +#ifdef CONFIG_PM_SLEEP +int cdns_resume(struct cdns *cdns, u8 set_active); +int cdns_suspend(struct cdns *cdns); +#else /* CONFIG_PM_SLEEP */ +static inline int cdns_resume(struct cdns *cdns, u8 set_active) +{ return 0; } +static inline int cdns_suspend(struct cdns *cdns) +{ return 0; } +#endif /* CONFIG_PM_SLEEP */ #endif /* __LINUX_CDNS3_CORE_H */ diff --git a/drivers/usb/cdns3/drd.c b/drivers/usb/cdns3/drd.c index 38ccd29e4cde..fa5318ade3e1 100644 --- a/drivers/usb/cdns3/drd.c +++ b/drivers/usb/cdns3/drd.c @@ -1,35 +1,33 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Cadence USBSS DRD Driver. + * Cadence USBSS and USBSSP DRD Driver. * - * Copyright (C) 2018-2019 Cadence. + * Copyright (C) 2018-2020 Cadence. * Copyright (C) 2019 Texas Instruments * * Author: Pawel Laszczak <pawell@cadence.com> * Roger Quadros <rogerq@ti.com> * - * */ #include <linux/kernel.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/iopoll.h> #include <linux/usb/otg.h> -#include <linux/phy/phy.h> -#include "gadget.h" #include "drd.h" #include "core.h" /** - * cdns3_set_mode - change mode of OTG Core + * cdns_set_mode - change mode of OTG Core * @cdns: pointer to context structure * @mode: selected mode from cdns_role * * Returns 0 on success otherwise negative errno */ -int cdns3_set_mode(struct cdns3 *cdns, enum usb_dr_mode mode) +static int cdns_set_mode(struct cdns *cdns, enum usb_dr_mode mode) { + void __iomem *override_reg; u32 reg; switch (mode) { @@ -39,11 +37,24 @@ int cdns3_set_mode(struct cdns3 *cdns, enum usb_dr_mode mode) break; case USB_DR_MODE_OTG: dev_dbg(cdns->dev, "Set controller to OTG mode\n"); - if (cdns->version == CDNS3_CONTROLLER_V1) { - reg = readl(&cdns->otg_v1_regs->override); + + if (cdns->version == CDNSP_CONTROLLER_V2) + override_reg = &cdns->otg_cdnsp_regs->override; + else if (cdns->version == CDNS3_CONTROLLER_V1) + override_reg = &cdns->otg_v1_regs->override; + else + override_reg = &cdns->otg_v0_regs->ctrl1; + + reg = readl(override_reg); + + if (cdns->version != CDNS3_CONTROLLER_V0) reg |= OVERRIDE_IDPULLUP; - writel(reg, &cdns->otg_v1_regs->override); + else + reg |= OVERRIDE_IDPULLUP_V0; + + writel(reg, override_reg); + if (cdns->version == CDNS3_CONTROLLER_V1) { /* * Enable work around feature built into the * controller to address issue with RX Sensitivity @@ -55,10 +66,6 @@ int cdns3_set_mode(struct cdns3 *cdns, enum usb_dr_mode mode) reg |= PHYRST_CFG_PHYRST_A_ENABLE; writel(reg, &cdns->otg_v1_regs->phyrst_cfg); } - } else { - reg = readl(&cdns->otg_v0_regs->ctrl1); - reg |= OVERRIDE_IDPULLUP_V0; - writel(reg, &cdns->otg_v0_regs->ctrl1); } /* @@ -76,7 +83,7 @@ int cdns3_set_mode(struct cdns3 *cdns, enum usb_dr_mode mode) return 0; } -int cdns3_get_id(struct cdns3 *cdns) +int cdns_get_id(struct cdns *cdns) { int id; @@ -86,7 +93,7 @@ int cdns3_get_id(struct cdns3 *cdns) return id; } -int cdns3_get_vbus(struct cdns3 *cdns) +int cdns_get_vbus(struct cdns *cdns) { int vbus; @@ -96,64 +103,95 @@ int cdns3_get_vbus(struct cdns3 *cdns) return vbus; } -bool cdns3_is_host(struct cdns3 *cdns) +void cdns_clear_vbus(struct cdns *cdns) +{ + u32 reg; + + if (cdns->version != CDNSP_CONTROLLER_V2) + return; + + reg = readl(&cdns->otg_cdnsp_regs->override); + reg |= OVERRIDE_SESS_VLD_SEL; + writel(reg, &cdns->otg_cdnsp_regs->override); +} +EXPORT_SYMBOL_GPL(cdns_clear_vbus); + +void cdns_set_vbus(struct cdns *cdns) +{ + u32 reg; + + if (cdns->version != CDNSP_CONTROLLER_V2) + return; + + reg = readl(&cdns->otg_cdnsp_regs->override); + reg &= ~OVERRIDE_SESS_VLD_SEL; + writel(reg, &cdns->otg_cdnsp_regs->override); +} +EXPORT_SYMBOL_GPL(cdns_set_vbus); + +bool cdns_is_host(struct cdns *cdns) { if (cdns->dr_mode == USB_DR_MODE_HOST) return true; - else if (cdns3_get_id(cdns) == CDNS3_ID_HOST) + else if (cdns_get_id(cdns) == CDNS3_ID_HOST) return true; return false; } -bool cdns3_is_device(struct cdns3 *cdns) +bool cdns_is_device(struct cdns *cdns) { if (cdns->dr_mode == USB_DR_MODE_PERIPHERAL) return true; else if (cdns->dr_mode == USB_DR_MODE_OTG) - if (cdns3_get_id(cdns) == CDNS3_ID_PERIPHERAL) + if (cdns_get_id(cdns) == CDNS3_ID_PERIPHERAL) return true; return false; } /** - * cdns3_otg_disable_irq - Disable all OTG interrupts + * cdns_otg_disable_irq - Disable all OTG interrupts * @cdns: Pointer to controller context structure */ -static void cdns3_otg_disable_irq(struct cdns3 *cdns) +static void cdns_otg_disable_irq(struct cdns *cdns) { - writel(0, &cdns->otg_regs->ien); + writel(0, &cdns->otg_irq_regs->ien); } /** - * cdns3_otg_enable_irq - enable id and sess_valid interrupts + * cdns_otg_enable_irq - enable id and sess_valid interrupts * @cdns: Pointer to controller context structure */ -static void cdns3_otg_enable_irq(struct cdns3 *cdns) +static void cdns_otg_enable_irq(struct cdns *cdns) { writel(OTGIEN_ID_CHANGE_INT | OTGIEN_VBUSVALID_RISE_INT | - OTGIEN_VBUSVALID_FALL_INT, &cdns->otg_regs->ien); + OTGIEN_VBUSVALID_FALL_INT, &cdns->otg_irq_regs->ien); } /** - * cdns3_drd_host_on - start host. + * cdns_drd_host_on - start host. * @cdns: Pointer to controller context structure. * * Returns 0 on success otherwise negative errno. */ -int cdns3_drd_host_on(struct cdns3 *cdns) +int cdns_drd_host_on(struct cdns *cdns) { - u32 val; + u32 val, ready_bit; int ret; /* Enable host mode. */ writel(OTGCMD_HOST_BUS_REQ | OTGCMD_OTG_DIS, &cdns->otg_regs->cmd); + if (cdns->version == CDNSP_CONTROLLER_V2) + ready_bit = OTGSTS_CDNSP_XHCI_READY; + else + ready_bit = OTGSTS_CDNS3_XHCI_READY; + dev_dbg(cdns->dev, "Waiting till Host mode is turned on\n"); ret = readl_poll_timeout_atomic(&cdns->otg_regs->sts, val, - val & OTGSTS_XHCI_READY, 1, 100000); + val & ready_bit, 1, 100000); if (ret) dev_err(cdns->dev, "timeout waiting for xhci_ready\n"); @@ -163,10 +201,10 @@ int cdns3_drd_host_on(struct cdns3 *cdns) } /** - * cdns3_drd_host_off - stop host. + * cdns_drd_host_off - stop host. * @cdns: Pointer to controller context structure. */ -void cdns3_drd_host_off(struct cdns3 *cdns) +void cdns_drd_host_off(struct cdns *cdns) { u32 val; @@ -182,24 +220,29 @@ void cdns3_drd_host_off(struct cdns3 *cdns) } /** - * cdns3_drd_gadget_on - start gadget. + * cdns_drd_gadget_on - start gadget. * @cdns: Pointer to controller context structure. * * Returns 0 on success otherwise negative errno */ -int cdns3_drd_gadget_on(struct cdns3 *cdns) +int cdns_drd_gadget_on(struct cdns *cdns) { - int ret, val; u32 reg = OTGCMD_OTG_DIS; + u32 ready_bit; + int ret, val; /* switch OTG core */ writel(OTGCMD_DEV_BUS_REQ | reg, &cdns->otg_regs->cmd); dev_dbg(cdns->dev, "Waiting till Device mode is turned on\n"); + if (cdns->version == CDNSP_CONTROLLER_V2) + ready_bit = OTGSTS_CDNSP_DEV_READY; + else + ready_bit = OTGSTS_CDNS3_DEV_READY; + ret = readl_poll_timeout_atomic(&cdns->otg_regs->sts, val, - val & OTGSTS_DEV_READY, - 1, 100000); + val & ready_bit, 1, 100000); if (ret) { dev_err(cdns->dev, "timeout waiting for dev_ready\n"); return ret; @@ -208,12 +251,13 @@ int cdns3_drd_gadget_on(struct cdns3 *cdns) phy_set_mode(cdns->usb3_phy, PHY_MODE_USB_DEVICE); return 0; } +EXPORT_SYMBOL_GPL(cdns_drd_gadget_on); /** - * cdns3_drd_gadget_off - stop gadget. + * cdns_drd_gadget_off - stop gadget. * @cdns: Pointer to controller context structure. */ -void cdns3_drd_gadget_off(struct cdns3 *cdns) +void cdns_drd_gadget_off(struct cdns *cdns) { u32 val; @@ -231,49 +275,50 @@ void cdns3_drd_gadget_off(struct cdns3 *cdns) 1, 2000000); phy_set_mode(cdns->usb3_phy, PHY_MODE_INVALID); } +EXPORT_SYMBOL_GPL(cdns_drd_gadget_off); /** - * cdns3_init_otg_mode - initialize drd controller + * cdns_init_otg_mode - initialize drd controller * @cdns: Pointer to controller context structure * * Returns 0 on success otherwise negative errno */ -static int cdns3_init_otg_mode(struct cdns3 *cdns) +static int cdns_init_otg_mode(struct cdns *cdns) { int ret; - cdns3_otg_disable_irq(cdns); + cdns_otg_disable_irq(cdns); /* clear all interrupts */ - writel(~0, &cdns->otg_regs->ivect); + writel(~0, &cdns->otg_irq_regs->ivect); - ret = cdns3_set_mode(cdns, USB_DR_MODE_OTG); + ret = cdns_set_mode(cdns, USB_DR_MODE_OTG); if (ret) return ret; - cdns3_otg_enable_irq(cdns); + cdns_otg_enable_irq(cdns); return 0; } /** - * cdns3_drd_update_mode - initialize mode of operation + * cdns_drd_update_mode - initialize mode of operation * @cdns: Pointer to controller context structure * * Returns 0 on success otherwise negative errno */ -int cdns3_drd_update_mode(struct cdns3 *cdns) +int cdns_drd_update_mode(struct cdns *cdns) { int ret; switch (cdns->dr_mode) { case USB_DR_MODE_PERIPHERAL: - ret = cdns3_set_mode(cdns, USB_DR_MODE_PERIPHERAL); + ret = cdns_set_mode(cdns, USB_DR_MODE_PERIPHERAL); break; case USB_DR_MODE_HOST: - ret = cdns3_set_mode(cdns, USB_DR_MODE_HOST); + ret = cdns_set_mode(cdns, USB_DR_MODE_HOST); break; case USB_DR_MODE_OTG: - ret = cdns3_init_otg_mode(cdns); + ret = cdns_init_otg_mode(cdns); break; default: dev_err(cdns->dev, "Unsupported mode of operation %d\n", @@ -284,27 +329,27 @@ int cdns3_drd_update_mode(struct cdns3 *cdns) return ret; } -static irqreturn_t cdns3_drd_thread_irq(int irq, void *data) +static irqreturn_t cdns_drd_thread_irq(int irq, void *data) { - struct cdns3 *cdns = data; + struct cdns *cdns = data; - cdns3_hw_role_switch(cdns); + cdns_hw_role_switch(cdns); return IRQ_HANDLED; } /** - * cdns3_drd_irq - interrupt handler for OTG events + * cdns_drd_irq - interrupt handler for OTG events * - * @irq: irq number for cdns3 core device - * @data: structure of cdns3 + * @irq: irq number for cdns core device + * @data: structure of cdns * * Returns IRQ_HANDLED or IRQ_NONE */ -static irqreturn_t cdns3_drd_irq(int irq, void *data) +static irqreturn_t cdns_drd_irq(int irq, void *data) { irqreturn_t ret = IRQ_NONE; - struct cdns3 *cdns = data; + struct cdns *cdns = data; u32 reg; if (cdns->dr_mode != USB_DR_MODE_OTG) @@ -313,30 +358,30 @@ static irqreturn_t cdns3_drd_irq(int irq, void *data) if (cdns->in_lpm) return ret; - reg = readl(&cdns->otg_regs->ivect); + reg = readl(&cdns->otg_irq_regs->ivect); if (!reg) return IRQ_NONE; if (reg & OTGIEN_ID_CHANGE_INT) { dev_dbg(cdns->dev, "OTG IRQ: new ID: %d\n", - cdns3_get_id(cdns)); + cdns_get_id(cdns)); ret = IRQ_WAKE_THREAD; } if (reg & (OTGIEN_VBUSVALID_RISE_INT | OTGIEN_VBUSVALID_FALL_INT)) { dev_dbg(cdns->dev, "OTG IRQ: new VBUS: %d\n", - cdns3_get_vbus(cdns)); + cdns_get_vbus(cdns)); ret = IRQ_WAKE_THREAD; } - writel(~0, &cdns->otg_regs->ivect); + writel(~0, &cdns->otg_irq_regs->ivect); return ret; } -int cdns3_drd_init(struct cdns3 *cdns) +int cdns_drd_init(struct cdns *cdns) { void __iomem *regs; u32 state; @@ -347,28 +392,43 @@ int cdns3_drd_init(struct cdns3 *cdns) return PTR_ERR(regs); /* Detection of DRD version. Controller has been released - * in two versions. Both are similar, but they have same changes - * in register maps. - * The first register in old version is command register and it's read - * only, so driver should read 0 from it. On the other hand, in v1 - * the first register contains device ID number which is not set to 0. - * Driver uses this fact to detect the proper version of + * in three versions. All are very similar and are software compatible, + * but they have same changes in register maps. + * The first register in oldest version is command register and it's + * read only. Driver should read 0 from it. On the other hand, in v1 + * and v2 the first register contains device ID number which is not + * set to 0. Driver uses this fact to detect the proper version of * controller. */ cdns->otg_v0_regs = regs; if (!readl(&cdns->otg_v0_regs->cmd)) { cdns->version = CDNS3_CONTROLLER_V0; cdns->otg_v1_regs = NULL; + cdns->otg_cdnsp_regs = NULL; cdns->otg_regs = regs; + cdns->otg_irq_regs = (struct cdns_otg_irq_regs __iomem *) + &cdns->otg_v0_regs->ien; writel(1, &cdns->otg_v0_regs->simulate); dev_dbg(cdns->dev, "DRD version v0 (%08x)\n", readl(&cdns->otg_v0_regs->version)); } else { cdns->otg_v0_regs = NULL; cdns->otg_v1_regs = regs; - cdns->otg_regs = (void *)&cdns->otg_v1_regs->cmd; - cdns->version = CDNS3_CONTROLLER_V1; - writel(1, &cdns->otg_v1_regs->simulate); + cdns->otg_cdnsp_regs = regs; + + cdns->otg_regs = (void __iomem *)&cdns->otg_v1_regs->cmd; + + if (readl(&cdns->otg_cdnsp_regs->did) == OTG_CDNSP_DID) { + cdns->otg_irq_regs = (struct cdns_otg_irq_regs __iomem *) + &cdns->otg_cdnsp_regs->ien; + cdns->version = CDNSP_CONTROLLER_V2; + } else { + cdns->otg_irq_regs = (struct cdns_otg_irq_regs __iomem *) + &cdns->otg_v1_regs->ien; + writel(1, &cdns->otg_v1_regs->simulate); + cdns->version = CDNS3_CONTROLLER_V1; + } + dev_dbg(cdns->dev, "DRD version v1 (ID: %08x, rev: %08x)\n", readl(&cdns->otg_v1_regs->did), readl(&cdns->otg_v1_regs->rid)); @@ -378,17 +438,24 @@ int cdns3_drd_init(struct cdns3 *cdns) /* Update dr_mode according to STRAP configuration. */ cdns->dr_mode = USB_DR_MODE_OTG; - if (state == OTGSTS_STRAP_HOST) { + + if ((cdns->version == CDNSP_CONTROLLER_V2 && + state == OTGSTS_CDNSP_STRAP_HOST) || + (cdns->version != CDNSP_CONTROLLER_V2 && + state == OTGSTS_STRAP_HOST)) { dev_dbg(cdns->dev, "Controller strapped to HOST\n"); cdns->dr_mode = USB_DR_MODE_HOST; - } else if (state == OTGSTS_STRAP_GADGET) { + } else if ((cdns->version == CDNSP_CONTROLLER_V2 && + state == OTGSTS_CDNSP_STRAP_GADGET) || + (cdns->version != CDNSP_CONTROLLER_V2 && + state == OTGSTS_STRAP_GADGET)) { dev_dbg(cdns->dev, "Controller strapped to PERIPHERAL\n"); cdns->dr_mode = USB_DR_MODE_PERIPHERAL; } ret = devm_request_threaded_irq(cdns->dev, cdns->otg_irq, - cdns3_drd_irq, - cdns3_drd_thread_irq, + cdns_drd_irq, + cdns_drd_thread_irq, IRQF_SHARED, dev_name(cdns->dev), cdns); if (ret) { @@ -405,8 +472,9 @@ int cdns3_drd_init(struct cdns3 *cdns) return 0; } -int cdns3_drd_exit(struct cdns3 *cdns) +int cdns_drd_exit(struct cdns *cdns) { - cdns3_otg_disable_irq(cdns); + cdns_otg_disable_irq(cdns); + return 0; } diff --git a/drivers/usb/cdns3/drd.h b/drivers/usb/cdns3/drd.h index f1ccae285a16..9724acdecbbb 100644 --- a/drivers/usb/cdns3/drd.h +++ b/drivers/usb/cdns3/drd.h @@ -1,8 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* - * Cadence USB3 DRD header file. + * Cadence USB3 and USBSSP DRD header file. * - * Copyright (C) 2018-2019 Cadence. + * Copyright (C) 2018-2020 Cadence. * * Author: Pawel Laszczak <pawell@cadence.com> */ @@ -10,10 +10,9 @@ #define __LINUX_CDNS3_DRD #include <linux/usb/otg.h> -#include <linux/phy/phy.h> #include "core.h" -/* DRD register interface for version v1. */ +/* DRD register interface for version v1 of cdns3 driver. */ struct cdns3_otg_regs { __le32 did; __le32 rid; @@ -38,7 +37,7 @@ struct cdns3_otg_regs { __le32 ctrl2; }; -/* DRD register interface for version v0. */ +/* DRD register interface for version v0 of cdns3 driver. */ struct cdns3_otg_legacy_regs { __le32 cmd; __le32 sts; @@ -57,14 +56,45 @@ struct cdns3_otg_legacy_regs { __le32 ctrl1; }; +/* DRD register interface for cdnsp driver */ +struct cdnsp_otg_regs { + __le32 did; + __le32 rid; + __le32 cfgs1; + __le32 cfgs2; + __le32 cmd; + __le32 sts; + __le32 state; + __le32 ien; + __le32 ivect; + __le32 tmr; + __le32 simulate; + __le32 adpbc_sts; + __le32 adp_ramp_time; + __le32 adpbc_ctrl1; + __le32 adpbc_ctrl2; + __le32 override; + __le32 vbusvalid_dbnc_cfg; + __le32 sessvalid_dbnc_cfg; + __le32 susp_timing_ctrl; +}; + +#define OTG_CDNSP_DID 0x0004034E + /* - * Common registers interface for both version of DRD. + * Common registers interface for both CDNS3 and CDNSP version of DRD. */ -struct cdns3_otg_common_regs { +struct cdns_otg_common_regs { __le32 cmd; __le32 sts; __le32 state; - __le32 different1; +}; + +/* + * Interrupt related registers. This registers are mapped in different + * location for CDNSP controller. + */ +struct cdns_otg_irq_regs { __le32 ien; __le32 ivect; }; @@ -92,9 +122,9 @@ struct cdns3_otg_common_regs { #define OTGCMD_DEV_BUS_DROP BIT(8) /* Drop the bus for Host mode*/ #define OTGCMD_HOST_BUS_DROP BIT(9) -/* Power Down USBSS-DEV. */ +/* Power Down USBSS-DEV - only for CDNS3.*/ #define OTGCMD_DEV_POWER_OFF BIT(11) -/* Power Down CDNSXHCI. */ +/* Power Down CDNSXHCI - only for CDNS3. */ #define OTGCMD_HOST_POWER_OFF BIT(12) /* OTGIEN - bitmasks */ @@ -123,20 +153,31 @@ struct cdns3_otg_common_regs { #define OTGSTS_OTG_NRDY_MASK BIT(11) #define OTGSTS_OTG_NRDY(p) ((p) & OTGSTS_OTG_NRDY_MASK) /* - * Value of the strap pins. + * Value of the strap pins for: + * CDNS3: * 000 - no default configuration * 010 - Controller initiall configured as Host * 100 - Controller initially configured as Device + * CDNSP: + * 000 - No default configuration. + * 010 - Controller initiall configured as Host. + * 100 - Controller initially configured as Device. */ #define OTGSTS_STRAP(p) (((p) & GENMASK(14, 12)) >> 12) #define OTGSTS_STRAP_NO_DEFAULT_CFG 0x00 #define OTGSTS_STRAP_HOST_OTG 0x01 #define OTGSTS_STRAP_HOST 0x02 #define OTGSTS_STRAP_GADGET 0x04 +#define OTGSTS_CDNSP_STRAP_HOST 0x01 +#define OTGSTS_CDNSP_STRAP_GADGET 0x02 + /* Host mode is turned on. */ -#define OTGSTS_XHCI_READY BIT(26) +#define OTGSTS_CDNS3_XHCI_READY BIT(26) +#define OTGSTS_CDNSP_XHCI_READY BIT(27) + /* "Device mode is turned on .*/ -#define OTGSTS_DEV_READY BIT(27) +#define OTGSTS_CDNS3_DEV_READY BIT(27) +#define OTGSTS_CDNSP_DEV_READY BIT(26) /* OTGSTATE- bitmasks */ #define OTGSTATE_DEV_STATE_MASK GENMASK(2, 0) @@ -152,6 +193,8 @@ struct cdns3_otg_common_regs { #define OVERRIDE_IDPULLUP BIT(0) /* Only for CDNS3_CONTROLLER_V0 version */ #define OVERRIDE_IDPULLUP_V0 BIT(24) +/* Vbusvalid/Sesvalid override select. */ +#define OVERRIDE_SESS_VLD_SEL BIT(10) /* PHYRST_CFG - bitmasks */ #define PHYRST_CFG_PHYRST_A_ENABLE BIT(0) @@ -159,17 +202,18 @@ struct cdns3_otg_common_regs { #define CDNS3_ID_PERIPHERAL 1 #define CDNS3_ID_HOST 0 -bool cdns3_is_host(struct cdns3 *cdns); -bool cdns3_is_device(struct cdns3 *cdns); -int cdns3_get_id(struct cdns3 *cdns); -int cdns3_get_vbus(struct cdns3 *cdns); -int cdns3_drd_init(struct cdns3 *cdns); -int cdns3_drd_exit(struct cdns3 *cdns); -int cdns3_drd_update_mode(struct cdns3 *cdns); -int cdns3_drd_gadget_on(struct cdns3 *cdns); -void cdns3_drd_gadget_off(struct cdns3 *cdns); -int cdns3_drd_host_on(struct cdns3 *cdns); -void cdns3_drd_host_off(struct cdns3 *cdns); -int cdns3_set_mode(struct cdns3 *cdns, enum usb_dr_mode mode); +bool cdns_is_host(struct cdns *cdns); +bool cdns_is_device(struct cdns *cdns); +int cdns_get_id(struct cdns *cdns); +int cdns_get_vbus(struct cdns *cdns); +void cdns_clear_vbus(struct cdns *cdns); +void cdns_set_vbus(struct cdns *cdns); +int cdns_drd_init(struct cdns *cdns); +int cdns_drd_exit(struct cdns *cdns); +int cdns_drd_update_mode(struct cdns *cdns); +int cdns_drd_gadget_on(struct cdns *cdns); +void cdns_drd_gadget_off(struct cdns *cdns); +int cdns_drd_host_on(struct cdns *cdns); +void cdns_drd_host_off(struct cdns *cdns); #endif /* __LINUX_CDNS3_DRD */ diff --git a/drivers/usb/cdns3/gadget-export.h b/drivers/usb/cdns3/gadget-export.h index 702c5a267a92..c37b6269b001 100644 --- a/drivers/usb/cdns3/gadget-export.h +++ b/drivers/usb/cdns3/gadget-export.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* - * Cadence USBSS DRD Driver - Gadget Export APIs. + * Cadence USBSS and USBSSP DRD Driver - Gadget Export APIs. * * Copyright (C) 2017 NXP * Copyright (C) 2017-2018 NXP @@ -10,16 +10,28 @@ #ifndef __LINUX_CDNS3_GADGET_EXPORT #define __LINUX_CDNS3_GADGET_EXPORT -#ifdef CONFIG_USB_CDNS3_GADGET +#if IS_ENABLED(CONFIG_USB_CDNSP_GADGET) -int cdns3_gadget_init(struct cdns3 *cdns); +int cdnsp_gadget_init(struct cdns *cdns); #else -static inline int cdns3_gadget_init(struct cdns3 *cdns) +static inline int cdnsp_gadget_init(struct cdns *cdns) { return -ENXIO; } -#endif +#endif /* CONFIG_USB_CDNSP_GADGET */ + +#if IS_ENABLED(CONFIG_USB_CDNS3_GADGET) + +int cdns3_gadget_init(struct cdns *cdns); +#else + +static inline int cdns3_gadget_init(struct cdns *cdns) +{ + return -ENXIO; +} + +#endif /* CONFIG_USB_CDNS3_GADGET */ #endif /* __LINUX_CDNS3_GADGET_EXPORT */ diff --git a/drivers/usb/cdns3/host-export.h b/drivers/usb/cdns3/host-export.h index 26041718a086..cf92173ecf00 100644 --- a/drivers/usb/cdns3/host-export.h +++ b/drivers/usb/cdns3/host-export.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* - * Cadence USBSS DRD Driver - Host Export APIs + * Cadence USBSS and USBSSP DRD Driver - Host Export APIs * * Copyright (C) 2017-2018 NXP * @@ -9,25 +9,19 @@ #ifndef __LINUX_CDNS3_HOST_EXPORT #define __LINUX_CDNS3_HOST_EXPORT -struct usb_hcd; -#ifdef CONFIG_USB_CDNS3_HOST +#if IS_ENABLED(CONFIG_USB_CDNS_HOST) -int cdns3_host_init(struct cdns3 *cdns); -int xhci_cdns3_suspend_quirk(struct usb_hcd *hcd); +int cdns_host_init(struct cdns *cdns); #else -static inline int cdns3_host_init(struct cdns3 *cdns) +static inline int cdns_host_init(struct cdns *cdns) { return -ENXIO; } -static inline void cdns3_host_exit(struct cdns3 *cdns) { } -static inline int xhci_cdns3_suspend_quirk(struct usb_hcd *hcd) -{ - return 0; -} +static inline void cdns_host_exit(struct cdns *cdns) { } -#endif /* CONFIG_USB_CDNS3_HOST */ +#endif /* USB_CDNS_HOST */ #endif /* __LINUX_CDNS3_HOST_EXPORT */ diff --git a/drivers/usb/cdns3/host.c b/drivers/usb/cdns3/host.c index ec89f2e5430f..84dadfa726aa 100644 --- a/drivers/usb/cdns3/host.c +++ b/drivers/usb/cdns3/host.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Cadence USBSS DRD Driver - host side + * Cadence USBSS and USBSSP DRD Driver - host side * * Copyright (C) 2018-2019 Cadence Design Systems. * Copyright (C) 2017-2018 NXP @@ -23,18 +23,20 @@ #define CFG_RXDET_P3_EN BIT(15) #define LPM_2_STB_SWITCH_EN BIT(25) +static int xhci_cdns3_suspend_quirk(struct usb_hcd *hcd); + static const struct xhci_plat_priv xhci_plat_cdns3_xhci = { .quirks = XHCI_SKIP_PHY_INIT | XHCI_AVOID_BEI, .suspend_quirk = xhci_cdns3_suspend_quirk, }; -static int __cdns3_host_init(struct cdns3 *cdns) +static int __cdns_host_init(struct cdns *cdns) { struct platform_device *xhci; int ret; struct usb_hcd *hcd; - cdns3_drd_host_on(cdns); + cdns_drd_host_on(cdns); xhci = platform_device_alloc("xhci-hcd", PLATFORM_DEVID_AUTO); if (!xhci) { @@ -46,7 +48,7 @@ static int __cdns3_host_init(struct cdns3 *cdns) cdns->host_dev = xhci; ret = platform_device_add_resources(xhci, cdns->xhci_res, - CDNS3_XHCI_RESOURCES_NUM); + CDNS_XHCI_RESOURCES_NUM); if (ret) { dev_err(cdns->dev, "couldn't add resources to xHCI device\n"); goto err1; @@ -87,7 +89,7 @@ err1: return ret; } -int xhci_cdns3_suspend_quirk(struct usb_hcd *hcd) +static int xhci_cdns3_suspend_quirk(struct usb_hcd *hcd) { struct xhci_hcd *xhci = hcd_to_xhci(hcd); u32 value; @@ -113,25 +115,25 @@ int xhci_cdns3_suspend_quirk(struct usb_hcd *hcd) return 0; } -static void cdns3_host_exit(struct cdns3 *cdns) +static void cdns_host_exit(struct cdns *cdns) { kfree(cdns->xhci_plat_data); platform_device_unregister(cdns->host_dev); cdns->host_dev = NULL; - cdns3_drd_host_off(cdns); + cdns_drd_host_off(cdns); } -int cdns3_host_init(struct cdns3 *cdns) +int cdns_host_init(struct cdns *cdns) { - struct cdns3_role_driver *rdrv; + struct cdns_role_driver *rdrv; rdrv = devm_kzalloc(cdns->dev, sizeof(*rdrv), GFP_KERNEL); if (!rdrv) return -ENOMEM; - rdrv->start = __cdns3_host_init; - rdrv->stop = cdns3_host_exit; - rdrv->state = CDNS3_ROLE_STATE_INACTIVE; + rdrv->start = __cdns_host_init; + rdrv->stop = cdns_host_exit; + rdrv->state = CDNS_ROLE_STATE_INACTIVE; rdrv->name = "host"; cdns->roles[USB_ROLE_HOST] = rdrv; diff --git a/drivers/usb/chipidea/Kconfig b/drivers/usb/chipidea/Kconfig index 8bafcfc6080d..661818e8fed6 100644 --- a/drivers/usb/chipidea/Kconfig +++ b/drivers/usb/chipidea/Kconfig @@ -53,9 +53,8 @@ config USB_CHIPIDEA_GENERIC default USB_CHIPIDEA config USB_CHIPIDEA_TEGRA - tristate "Enable Tegra UDC glue driver" if EMBEDDED + tristate "Enable Tegra USB glue driver" if EMBEDDED depends on OF - depends on USB_CHIPIDEA_UDC default USB_CHIPIDEA endif diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c index 9e12152ea46b..8b7bc10b6e8b 100644 --- a/drivers/usb/chipidea/ci_hdrc_imx.c +++ b/drivers/usb/chipidea/ci_hdrc_imx.c @@ -139,9 +139,13 @@ static struct imx_usbmisc_data *usbmisc_get_init_data(struct device *dev) misc_pdev = of_find_device_by_node(args.np); of_node_put(args.np); - if (!misc_pdev || !platform_get_drvdata(misc_pdev)) + if (!misc_pdev) return ERR_PTR(-EPROBE_DEFER); + if (!platform_get_drvdata(misc_pdev)) { + put_device(&misc_pdev->dev); + return ERR_PTR(-EPROBE_DEFER); + } data->dev = &misc_pdev->dev; /* diff --git a/drivers/usb/chipidea/ci_hdrc_tegra.c b/drivers/usb/chipidea/ci_hdrc_tegra.c index 7455df0ede49..90f2a8b786be 100644 --- a/drivers/usb/chipidea/ci_hdrc_tegra.c +++ b/drivers/usb/chipidea/ci_hdrc_tegra.c @@ -4,57 +4,278 @@ */ #include <linux/clk.h> +#include <linux/io.h> #include <linux/module.h> #include <linux/of_device.h> #include <linux/reset.h> +#include <linux/usb.h> #include <linux/usb/chipidea.h> +#include <linux/usb/hcd.h> +#include <linux/usb/of.h> +#include <linux/usb/phy.h> + +#include "../host/ehci.h" #include "ci.h" -struct tegra_udc { +struct tegra_usb { struct ci_hdrc_platform_data data; struct platform_device *dev; + const struct tegra_usb_soc_info *soc; struct usb_phy *phy; struct clk *clk; + + bool needs_double_reset; }; -struct tegra_udc_soc_info { +struct tegra_usb_soc_info { unsigned long flags; + unsigned int txfifothresh; + enum usb_dr_mode dr_mode; }; -static const struct tegra_udc_soc_info tegra_udc_soc_info = { - .flags = CI_HDRC_REQUIRES_ALIGNED_DMA, +static const struct tegra_usb_soc_info tegra20_ehci_soc_info = { + .flags = CI_HDRC_REQUIRES_ALIGNED_DMA | + CI_HDRC_OVERRIDE_PHY_CONTROL | + CI_HDRC_SUPPORTS_RUNTIME_PM, + .dr_mode = USB_DR_MODE_HOST, + .txfifothresh = 10, }; -static const struct of_device_id tegra_udc_of_match[] = { +static const struct tegra_usb_soc_info tegra30_ehci_soc_info = { + .flags = CI_HDRC_REQUIRES_ALIGNED_DMA | + CI_HDRC_OVERRIDE_PHY_CONTROL | + CI_HDRC_SUPPORTS_RUNTIME_PM, + .dr_mode = USB_DR_MODE_HOST, + .txfifothresh = 16, +}; + +static const struct tegra_usb_soc_info tegra20_udc_soc_info = { + .flags = CI_HDRC_REQUIRES_ALIGNED_DMA | + CI_HDRC_OVERRIDE_PHY_CONTROL | + CI_HDRC_SUPPORTS_RUNTIME_PM, + .dr_mode = USB_DR_MODE_UNKNOWN, + .txfifothresh = 10, +}; + +static const struct tegra_usb_soc_info tegra30_udc_soc_info = { + .flags = CI_HDRC_REQUIRES_ALIGNED_DMA | + CI_HDRC_OVERRIDE_PHY_CONTROL | + CI_HDRC_SUPPORTS_RUNTIME_PM, + .dr_mode = USB_DR_MODE_UNKNOWN, + .txfifothresh = 16, +}; + +static const struct of_device_id tegra_usb_of_match[] = { { + .compatible = "nvidia,tegra20-ehci", + .data = &tegra20_ehci_soc_info, + }, { + .compatible = "nvidia,tegra30-ehci", + .data = &tegra30_ehci_soc_info, + }, { .compatible = "nvidia,tegra20-udc", - .data = &tegra_udc_soc_info, + .data = &tegra20_udc_soc_info, }, { .compatible = "nvidia,tegra30-udc", - .data = &tegra_udc_soc_info, + .data = &tegra30_udc_soc_info, }, { .compatible = "nvidia,tegra114-udc", - .data = &tegra_udc_soc_info, + .data = &tegra30_udc_soc_info, }, { .compatible = "nvidia,tegra124-udc", - .data = &tegra_udc_soc_info, + .data = &tegra30_udc_soc_info, }, { /* sentinel */ } }; -MODULE_DEVICE_TABLE(of, tegra_udc_of_match); +MODULE_DEVICE_TABLE(of, tegra_usb_of_match); + +static int tegra_usb_reset_controller(struct device *dev) +{ + struct reset_control *rst, *rst_utmi; + struct device_node *phy_np; + int err; + + rst = devm_reset_control_get_shared(dev, "usb"); + if (IS_ERR(rst)) { + dev_err(dev, "can't get ehci reset: %pe\n", rst); + return PTR_ERR(rst); + } + + phy_np = of_parse_phandle(dev->of_node, "nvidia,phy", 0); + if (!phy_np) + return -ENOENT; + + /* + * The 1st USB controller contains some UTMI pad registers that are + * global for all the controllers on the chip. Those registers are + * also cleared when reset is asserted to the 1st controller. + */ + rst_utmi = of_reset_control_get_shared(phy_np, "utmi-pads"); + if (IS_ERR(rst_utmi)) { + dev_warn(dev, "can't get utmi-pads reset from the PHY\n"); + dev_warn(dev, "continuing, but please update your DT\n"); + } else { + /* + * PHY driver performs UTMI-pads reset in a case of a + * non-legacy DT. + */ + reset_control_put(rst_utmi); + } + + of_node_put(phy_np); + + /* reset control is shared, hence initialize it first */ + err = reset_control_deassert(rst); + if (err) + return err; + + err = reset_control_assert(rst); + if (err) + return err; + + udelay(1); + + err = reset_control_deassert(rst); + if (err) + return err; + + return 0; +} + +static int tegra_usb_notify_event(struct ci_hdrc *ci, unsigned int event) +{ + struct tegra_usb *usb = dev_get_drvdata(ci->dev->parent); + struct ehci_hcd *ehci; + + switch (event) { + case CI_HDRC_CONTROLLER_RESET_EVENT: + if (ci->hcd) { + ehci = hcd_to_ehci(ci->hcd); + ehci->has_tdi_phy_lpm = false; + ehci_writel(ehci, usb->soc->txfifothresh << 16, + &ehci->regs->txfill_tuning); + } + break; + } + + return 0; +} + +static int tegra_usb_internal_port_reset(struct ehci_hcd *ehci, + u32 __iomem *portsc_reg, + unsigned long *flags) +{ + u32 saved_usbintr, temp; + unsigned int i, tries; + int retval = 0; + + saved_usbintr = ehci_readl(ehci, &ehci->regs->intr_enable); + /* disable USB interrupt */ + ehci_writel(ehci, 0, &ehci->regs->intr_enable); + spin_unlock_irqrestore(&ehci->lock, *flags); + + /* + * Here we have to do Port Reset at most twice for + * Port Enable bit to be set. + */ + for (i = 0; i < 2; i++) { + temp = ehci_readl(ehci, portsc_reg); + temp |= PORT_RESET; + ehci_writel(ehci, temp, portsc_reg); + fsleep(10000); + temp &= ~PORT_RESET; + ehci_writel(ehci, temp, portsc_reg); + fsleep(1000); + tries = 100; + do { + fsleep(1000); + /* + * Up to this point, Port Enable bit is + * expected to be set after 2 ms waiting. + * USB1 usually takes extra 45 ms, for safety, + * we take 100 ms as timeout. + */ + temp = ehci_readl(ehci, portsc_reg); + } while (!(temp & PORT_PE) && tries--); + if (temp & PORT_PE) + break; + } + if (i == 2) + retval = -ETIMEDOUT; + + /* + * Clear Connect Status Change bit if it's set. + * We can't clear PORT_PEC. It will also cause PORT_PE to be cleared. + */ + if (temp & PORT_CSC) + ehci_writel(ehci, PORT_CSC, portsc_reg); + + /* + * Write to clear any interrupt status bits that might be set + * during port reset. + */ + temp = ehci_readl(ehci, &ehci->regs->status); + ehci_writel(ehci, temp, &ehci->regs->status); + + /* restore original interrupt-enable bits */ + spin_lock_irqsave(&ehci->lock, *flags); + ehci_writel(ehci, saved_usbintr, &ehci->regs->intr_enable); + + return retval; +} + +static int tegra_ehci_hub_control(struct ci_hdrc *ci, u16 typeReq, u16 wValue, + u16 wIndex, char *buf, u16 wLength, + bool *done, unsigned long *flags) +{ + struct tegra_usb *usb = dev_get_drvdata(ci->dev->parent); + struct ehci_hcd *ehci = hcd_to_ehci(ci->hcd); + u32 __iomem *status_reg; + int retval = 0; + + status_reg = &ehci->regs->port_status[(wIndex & 0xff) - 1]; + + switch (typeReq) { + case SetPortFeature: + if (wValue != USB_PORT_FEAT_RESET || !usb->needs_double_reset) + break; + + /* for USB1 port we need to issue Port Reset twice internally */ + retval = tegra_usb_internal_port_reset(ehci, status_reg, flags); + *done = true; + break; + } + + return retval; +} -static int tegra_udc_probe(struct platform_device *pdev) +static void tegra_usb_enter_lpm(struct ci_hdrc *ci, bool enable) { - const struct tegra_udc_soc_info *soc; - struct tegra_udc *udc; + /* + * Touching any register which belongs to AHB clock domain will + * hang CPU if USB controller is put into low power mode because + * AHB USB clock is gated on Tegra in the LPM. + * + * Tegra PHY has a separate register for checking the clock status + * and usb_phy_set_suspend() takes care of gating/ungating the clocks + * and restoring the PHY state on Tegra. Hence DEVLC/PORTSC registers + * shouldn't be touched directly by the CI driver. + */ + usb_phy_set_suspend(ci->usb_phy, enable); +} + +static int tegra_usb_probe(struct platform_device *pdev) +{ + const struct tegra_usb_soc_info *soc; + struct tegra_usb *usb; int err; - udc = devm_kzalloc(&pdev->dev, sizeof(*udc), GFP_KERNEL); - if (!udc) + usb = devm_kzalloc(&pdev->dev, sizeof(*usb), GFP_KERNEL); + if (!usb) return -ENOMEM; soc = of_device_get_match_data(&pdev->dev); @@ -63,70 +284,99 @@ static int tegra_udc_probe(struct platform_device *pdev) return -EINVAL; } - udc->phy = devm_usb_get_phy_by_phandle(&pdev->dev, "nvidia,phy", 0); - if (IS_ERR(udc->phy)) { - err = PTR_ERR(udc->phy); + usb->phy = devm_usb_get_phy_by_phandle(&pdev->dev, "nvidia,phy", 0); + if (IS_ERR(usb->phy)) { + err = PTR_ERR(usb->phy); dev_err(&pdev->dev, "failed to get PHY: %d\n", err); return err; } - udc->clk = devm_clk_get(&pdev->dev, NULL); - if (IS_ERR(udc->clk)) { - err = PTR_ERR(udc->clk); + usb->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(usb->clk)) { + err = PTR_ERR(usb->clk); dev_err(&pdev->dev, "failed to get clock: %d\n", err); return err; } - err = clk_prepare_enable(udc->clk); + err = clk_prepare_enable(usb->clk); if (err < 0) { dev_err(&pdev->dev, "failed to enable clock: %d\n", err); return err; } - /* setup and register ChipIdea HDRC device */ - udc->data.name = "tegra-udc"; - udc->data.flags = soc->flags; - udc->data.usb_phy = udc->phy; - udc->data.capoffset = DEF_CAPOFFSET; - - udc->dev = ci_hdrc_add_device(&pdev->dev, pdev->resource, - pdev->num_resources, &udc->data); - if (IS_ERR(udc->dev)) { - err = PTR_ERR(udc->dev); - dev_err(&pdev->dev, "failed to add HDRC device: %d\n", err); + if (device_property_present(&pdev->dev, "nvidia,needs-double-reset")) + usb->needs_double_reset = true; + + err = tegra_usb_reset_controller(&pdev->dev); + if (err) { + dev_err(&pdev->dev, "failed to reset controller: %d\n", err); goto fail_power_off; } - platform_set_drvdata(pdev, udc); + /* + * USB controller registers shouldn't be touched before PHY is + * initialized, otherwise CPU will hang because clocks are gated. + * PHY driver controls gating of internal USB clocks on Tegra. + */ + err = usb_phy_init(usb->phy); + if (err) + goto fail_power_off; + + platform_set_drvdata(pdev, usb); + + /* setup and register ChipIdea HDRC device */ + usb->soc = soc; + usb->data.name = "tegra-usb"; + usb->data.flags = soc->flags; + usb->data.usb_phy = usb->phy; + usb->data.dr_mode = soc->dr_mode; + usb->data.capoffset = DEF_CAPOFFSET; + usb->data.enter_lpm = tegra_usb_enter_lpm; + usb->data.hub_control = tegra_ehci_hub_control; + usb->data.notify_event = tegra_usb_notify_event; + + /* Tegra PHY driver currently doesn't support LPM for ULPI */ + if (of_usb_get_phy_mode(pdev->dev.of_node) == USBPHY_INTERFACE_MODE_ULPI) + usb->data.flags &= ~CI_HDRC_SUPPORTS_RUNTIME_PM; + + usb->dev = ci_hdrc_add_device(&pdev->dev, pdev->resource, + pdev->num_resources, &usb->data); + if (IS_ERR(usb->dev)) { + err = PTR_ERR(usb->dev); + dev_err(&pdev->dev, "failed to add HDRC device: %d\n", err); + goto phy_shutdown; + } return 0; +phy_shutdown: + usb_phy_shutdown(usb->phy); fail_power_off: - clk_disable_unprepare(udc->clk); + clk_disable_unprepare(usb->clk); return err; } -static int tegra_udc_remove(struct platform_device *pdev) +static int tegra_usb_remove(struct platform_device *pdev) { - struct tegra_udc *udc = platform_get_drvdata(pdev); + struct tegra_usb *usb = platform_get_drvdata(pdev); - ci_hdrc_remove_device(udc->dev); - clk_disable_unprepare(udc->clk); + ci_hdrc_remove_device(usb->dev); + usb_phy_shutdown(usb->phy); + clk_disable_unprepare(usb->clk); return 0; } -static struct platform_driver tegra_udc_driver = { +static struct platform_driver tegra_usb_driver = { .driver = { - .name = "tegra-udc", - .of_match_table = tegra_udc_of_match, + .name = "tegra-usb", + .of_match_table = tegra_usb_of_match, }, - .probe = tegra_udc_probe, - .remove = tegra_udc_remove, + .probe = tegra_usb_probe, + .remove = tegra_usb_remove, }; -module_platform_driver(tegra_udc_driver); +module_platform_driver(tegra_usb_driver); -MODULE_DESCRIPTION("NVIDIA Tegra USB device mode driver"); +MODULE_DESCRIPTION("NVIDIA Tegra USB driver"); MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>"); -MODULE_ALIAS("platform:tegra-udc"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c index aa40e510b806..3f6c21406dbd 100644 --- a/drivers/usb/chipidea/core.c +++ b/drivers/usb/chipidea/core.c @@ -195,7 +195,7 @@ static void hw_wait_phy_stable(void) } /* The PHY enters/leaves low power mode */ -static void ci_hdrc_enter_lpm(struct ci_hdrc *ci, bool enable) +static void ci_hdrc_enter_lpm_common(struct ci_hdrc *ci, bool enable) { enum ci_hw_regs reg = ci->hw_bank.lpm ? OP_DEVLC : OP_PORTSC; bool lpm = !!(hw_read(ci, reg, PORTSC_PHCD(ci->hw_bank.lpm))); @@ -208,6 +208,11 @@ static void ci_hdrc_enter_lpm(struct ci_hdrc *ci, bool enable) 0); } +static void ci_hdrc_enter_lpm(struct ci_hdrc *ci, bool enable) +{ + return ci->platdata->enter_lpm(ci, enable); +} + static int hw_device_init(struct ci_hdrc *ci, void __iomem *base) { u32 reg; @@ -790,6 +795,9 @@ static int ci_get_platdata(struct device *dev, platdata->pins_device = p; } + if (!platdata->enter_lpm) + platdata->enter_lpm = ci_hdrc_enter_lpm_common; + return 0; } diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c index 48e4a5ca1835..67247d2ac07a 100644 --- a/drivers/usb/chipidea/host.c +++ b/drivers/usb/chipidea/host.c @@ -29,6 +29,12 @@ struct ehci_ci_priv { bool enabled; }; +struct ci_hdrc_dma_aligned_buffer { + void *kmalloc_ptr; + void *old_xfer_buffer; + u8 data[0]; +}; + static int ehci_ci_portpower(struct usb_hcd *hcd, int portnum, bool enable) { struct ehci_hcd *ehci = hcd_to_ehci(hcd); @@ -160,14 +166,15 @@ static int host_start(struct ci_hdrc *ci) pinctrl_select_state(ci->platdata->pctl, ci->platdata->pins_host); + ci->hcd = hcd; + ret = usb_add_hcd(hcd, 0, 0); if (ret) { + ci->hcd = NULL; goto disable_reg; } else { struct usb_otg *otg = &ci->otg; - ci->hcd = hcd; - if (ci_otg_is_fsm_mode(ci)) { otg->host = &hcd->self; hcd->self.otg_port = 1; @@ -237,6 +244,7 @@ static int ci_ehci_hub_control( u32 temp; unsigned long flags; int retval = 0; + bool done = false; struct device *dev = hcd->self.controller; struct ci_hdrc *ci = dev_get_drvdata(dev); @@ -244,6 +252,13 @@ static int ci_ehci_hub_control( spin_lock_irqsave(&ehci->lock, flags); + if (ci->platdata->hub_control) { + retval = ci->platdata->hub_control(ci, typeReq, wValue, wIndex, + buf, wLength, &done, &flags); + if (done) + goto done; + } + if (typeReq == SetPortFeature && wValue == USB_PORT_FEAT_SUSPEND) { temp = ehci_readl(ehci, status_reg); if ((temp & PORT_PE) == 0 || (temp & PORT_RESET) != 0) { @@ -349,6 +364,86 @@ static int ci_ehci_bus_suspend(struct usb_hcd *hcd) return 0; } +static void ci_hdrc_free_dma_aligned_buffer(struct urb *urb) +{ + struct ci_hdrc_dma_aligned_buffer *temp; + size_t length; + + if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER)) + return; + + temp = container_of(urb->transfer_buffer, + struct ci_hdrc_dma_aligned_buffer, data); + + if (usb_urb_dir_in(urb)) { + if (usb_pipeisoc(urb->pipe)) + length = urb->transfer_buffer_length; + else + length = urb->actual_length; + + memcpy(temp->old_xfer_buffer, temp->data, length); + } + urb->transfer_buffer = temp->old_xfer_buffer; + kfree(temp->kmalloc_ptr); + + urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER; +} + +static int ci_hdrc_alloc_dma_aligned_buffer(struct urb *urb, gfp_t mem_flags) +{ + struct ci_hdrc_dma_aligned_buffer *temp, *kmalloc_ptr; + const unsigned int ci_hdrc_usb_dma_align = 32; + size_t kmalloc_size; + + if (urb->num_sgs || urb->sg || urb->transfer_buffer_length == 0 || + !((uintptr_t)urb->transfer_buffer & (ci_hdrc_usb_dma_align - 1))) + return 0; + + /* Allocate a buffer with enough padding for alignment */ + kmalloc_size = urb->transfer_buffer_length + + sizeof(struct ci_hdrc_dma_aligned_buffer) + + ci_hdrc_usb_dma_align - 1; + + kmalloc_ptr = kmalloc(kmalloc_size, mem_flags); + if (!kmalloc_ptr) + return -ENOMEM; + + /* Position our struct dma_aligned_buffer such that data is aligned */ + temp = PTR_ALIGN(kmalloc_ptr + 1, ci_hdrc_usb_dma_align) - 1; + temp->kmalloc_ptr = kmalloc_ptr; + temp->old_xfer_buffer = urb->transfer_buffer; + if (usb_urb_dir_out(urb)) + memcpy(temp->data, urb->transfer_buffer, + urb->transfer_buffer_length); + urb->transfer_buffer = temp->data; + + urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER; + + return 0; +} + +static int ci_hdrc_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb, + gfp_t mem_flags) +{ + int ret; + + ret = ci_hdrc_alloc_dma_aligned_buffer(urb, mem_flags); + if (ret) + return ret; + + ret = usb_hcd_map_urb_for_dma(hcd, urb, mem_flags); + if (ret) + ci_hdrc_free_dma_aligned_buffer(urb); + + return ret; +} + +static void ci_hdrc_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb) +{ + usb_hcd_unmap_urb_for_dma(hcd, urb); + ci_hdrc_free_dma_aligned_buffer(urb); +} + int ci_hdrc_host_init(struct ci_hdrc *ci) { struct ci_role_driver *rdrv; @@ -366,6 +461,11 @@ int ci_hdrc_host_init(struct ci_hdrc *ci) rdrv->name = "host"; ci->roles[CI_ROLE_HOST] = rdrv; + if (ci->platdata->flags & CI_HDRC_REQUIRES_ALIGNED_DMA) { + ci_ehci_hc_driver.map_urb_for_dma = ci_hdrc_map_urb_for_dma; + ci_ehci_hc_driver.unmap_urb_for_dma = ci_hdrc_unmap_urb_for_dma; + } + return 0; } diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index f52f1bc0559f..37f824b59daa 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -1895,6 +1895,16 @@ static const struct usb_device_id acm_ids[] = { { USB_DEVICE(0x04d8, 0xfd08), .driver_info = IGNORE_DEVICE, }, + + { USB_DEVICE(0x04d8, 0xf58b), + .driver_info = IGNORE_DEVICE, + }, +#endif + +#if IS_ENABLED(CONFIG_USB_SERIAL_XR) + { USB_DEVICE(0x04e2, 0x1410), /* Ignore XR21V141X USB to Serial converter */ + .driver_info = IGNORE_DEVICE, + }, #endif /*Samsung phone in firmware update mode */ diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c index 02d0cfd23bb2..508b1c3f8b73 100644 --- a/drivers/usb/class/cdc-wdm.c +++ b/drivers/usb/class/cdc-wdm.c @@ -465,13 +465,23 @@ static int service_outstanding_interrupt(struct wdm_device *desc) if (!desc->resp_count || !--desc->resp_count) goto out; + if (test_bit(WDM_DISCONNECTING, &desc->flags)) { + rv = -ENODEV; + goto out; + } + if (test_bit(WDM_RESETTING, &desc->flags)) { + rv = -EIO; + goto out; + } + set_bit(WDM_RESPONDING, &desc->flags); spin_unlock_irq(&desc->iuspin); rv = usb_submit_urb(desc->response, GFP_KERNEL); spin_lock_irq(&desc->iuspin); if (rv) { - dev_err(&desc->intf->dev, - "usb_submit_urb failed with result %d\n", rv); + if (!test_bit(WDM_DISCONNECTING, &desc->flags)) + dev_err(&desc->intf->dev, + "usb_submit_urb failed with result %d\n", rv); /* make sure the next notification trigger a submit */ clear_bit(WDM_RESPONDING, &desc->flags); @@ -1027,9 +1037,9 @@ static void wdm_disconnect(struct usb_interface *intf) wake_up_all(&desc->wait); mutex_lock(&desc->rlock); mutex_lock(&desc->wlock); - kill_urbs(desc); cancel_work_sync(&desc->rxwork); cancel_work_sync(&desc->service_outs_intr); + kill_urbs(desc); mutex_unlock(&desc->wlock); mutex_unlock(&desc->rlock); diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c index 67cbd42421be..c9f6e9758288 100644 --- a/drivers/usb/class/usblp.c +++ b/drivers/usb/class/usblp.c @@ -274,8 +274,25 @@ static int usblp_ctrl_msg(struct usblp *usblp, int request, int type, int dir, i #define usblp_reset(usblp)\ usblp_ctrl_msg(usblp, USBLP_REQ_RESET, USB_TYPE_CLASS, USB_DIR_OUT, USB_RECIP_OTHER, 0, NULL, 0) -#define usblp_hp_channel_change_request(usblp, channel, buffer) \ - usblp_ctrl_msg(usblp, USBLP_REQ_HP_CHANNEL_CHANGE_REQUEST, USB_TYPE_VENDOR, USB_DIR_IN, USB_RECIP_INTERFACE, channel, buffer, 1) +static int usblp_hp_channel_change_request(struct usblp *usblp, int channel, u8 *new_channel) +{ + u8 *buf; + int ret; + + buf = kzalloc(1, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + ret = usblp_ctrl_msg(usblp, USBLP_REQ_HP_CHANNEL_CHANGE_REQUEST, + USB_TYPE_VENDOR, USB_DIR_IN, USB_RECIP_INTERFACE, + channel, buf, 1); + if (ret == 0) + *new_channel = buf[0]; + + kfree(buf); + + return ret; +} /* * See the description for usblp_select_alts() below for the usage @@ -1312,14 +1329,17 @@ static int usblp_set_protocol(struct usblp *usblp, int protocol) if (protocol < USBLP_FIRST_PROTOCOL || protocol > USBLP_LAST_PROTOCOL) return -EINVAL; - alts = usblp->protocol[protocol].alt_setting; - if (alts < 0) - return -EINVAL; - r = usb_set_interface(usblp->dev, usblp->ifnum, alts); - if (r < 0) { - printk(KERN_ERR "usblp: can't set desired altsetting %d on interface %d\n", - alts, usblp->ifnum); - return r; + /* Don't unnecessarily set the interface if there's a single alt. */ + if (usblp->intf->num_altsetting > 1) { + alts = usblp->protocol[protocol].alt_setting; + if (alts < 0) + return -EINVAL; + r = usb_set_interface(usblp->dev, usblp->ifnum, alts); + if (r < 0) { + printk(KERN_ERR "usblp: can't set desired altsetting %d on interface %d\n", + alts, usblp->ifnum); + return r; + } } usblp->bidir = (usblp->protocol[protocol].epread != NULL); diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c index b222b777e6a4..74d5a9c5238a 100644 --- a/drivers/usb/class/usbtmc.c +++ b/drivers/usb/class/usbtmc.c @@ -25,7 +25,7 @@ /* Increment API VERSION when changing tmc.h with new flags or ioctls * or when changing a significant behavior of the driver. */ -#define USBTMC_API_VERSION (2) +#define USBTMC_API_VERSION (3) #define USBTMC_HEADER_SIZE 12 #define USBTMC_MINOR_BASE 176 @@ -475,33 +475,17 @@ static int usbtmc_ioctl_abort_bulk_out(struct usbtmc_device_data *data) return usbtmc_ioctl_abort_bulk_out_tag(data, data->bTag_last_write); } -static int usbtmc488_ioctl_read_stb(struct usbtmc_file_data *file_data, - void __user *arg) +static int usbtmc_get_stb(struct usbtmc_file_data *file_data, __u8 *stb) { struct usbtmc_device_data *data = file_data->data; struct device *dev = &data->intf->dev; - int srq_asserted = 0; u8 *buffer; u8 tag; - __u8 stb; int rv; dev_dbg(dev, "Enter ioctl_read_stb iin_ep_present: %d\n", data->iin_ep_present); - spin_lock_irq(&data->dev_lock); - srq_asserted = atomic_xchg(&file_data->srq_asserted, srq_asserted); - if (srq_asserted) { - /* a STB with SRQ is already received */ - stb = file_data->srq_byte; - spin_unlock_irq(&data->dev_lock); - rv = put_user(stb, (__u8 __user *)arg); - dev_dbg(dev, "stb:0x%02x with srq received %d\n", - (unsigned int)stb, rv); - return rv; - } - spin_unlock_irq(&data->dev_lock); - buffer = kmalloc(8, GFP_KERNEL); if (!buffer) return -ENOMEM; @@ -548,13 +532,12 @@ static int usbtmc488_ioctl_read_stb(struct usbtmc_file_data *file_data, data->iin_bTag, tag); } - stb = data->bNotify2; + *stb = data->bNotify2; } else { - stb = buffer[2]; + *stb = buffer[2]; } - rv = put_user(stb, (__u8 __user *)arg); - dev_dbg(dev, "stb:0x%02x received %d\n", (unsigned int)stb, rv); + dev_dbg(dev, "stb:0x%02x received %d\n", (unsigned int)*stb, rv); exit: /* bump interrupt bTag */ @@ -567,6 +550,53 @@ static int usbtmc488_ioctl_read_stb(struct usbtmc_file_data *file_data, return rv; } +static int usbtmc488_ioctl_read_stb(struct usbtmc_file_data *file_data, + void __user *arg) +{ + int srq_asserted = 0; + __u8 stb; + int rv; + + rv = usbtmc_get_stb(file_data, &stb); + + if (rv > 0) { + srq_asserted = atomic_xchg(&file_data->srq_asserted, + srq_asserted); + if (srq_asserted) + stb |= 0x40; /* Set RQS bit */ + + rv = put_user(stb, (__u8 __user *)arg); + } + return rv; + +} + +static int usbtmc_ioctl_get_srq_stb(struct usbtmc_file_data *file_data, + void __user *arg) +{ + struct usbtmc_device_data *data = file_data->data; + struct device *dev = &data->intf->dev; + int srq_asserted = 0; + __u8 stb = 0; + int rv; + + spin_lock_irq(&data->dev_lock); + srq_asserted = atomic_xchg(&file_data->srq_asserted, srq_asserted); + + if (srq_asserted) { + stb = file_data->srq_byte; + spin_unlock_irq(&data->dev_lock); + rv = put_user(stb, (__u8 __user *)arg); + } else { + spin_unlock_irq(&data->dev_lock); + rv = -ENOMSG; + } + + dev_dbg(dev, "stb:0x%02x with srq received %d\n", (unsigned int)stb, rv); + + return rv; +} + static int usbtmc488_ioctl_wait_srq(struct usbtmc_file_data *file_data, __u32 __user *arg) { @@ -2145,6 +2175,17 @@ static long usbtmc_ioctl(struct file *file, unsigned int cmd, unsigned long arg) file_data->auto_abort = !!tmp_byte; break; + case USBTMC_IOCTL_GET_STB: + retval = usbtmc_get_stb(file_data, &tmp_byte); + if (retval > 0) + retval = put_user(tmp_byte, (__u8 __user *)arg); + break; + + case USBTMC_IOCTL_GET_SRQ_STB: + retval = usbtmc_ioctl_get_srq_stb(file_data, + (void __user *)arg); + break; + case USBTMC_IOCTL_CANCEL_IO: retval = usbtmc_ioctl_cancel_io(file_data); break; diff --git a/drivers/usb/common/common.c b/drivers/usb/common/common.c index 1433260d99b4..fc21cf2d36f6 100644 --- a/drivers/usb/common/common.c +++ b/drivers/usb/common/common.c @@ -69,6 +69,13 @@ static const char *const speed_names[] = { [USB_SPEED_SUPER_PLUS] = "super-speed-plus", }; +static const char *const ssp_rate[] = { + [USB_SSP_GEN_UNKNOWN] = "UNKNOWN", + [USB_SSP_GEN_2x1] = "super-speed-plus-gen2x1", + [USB_SSP_GEN_1x2] = "super-speed-plus-gen1x2", + [USB_SSP_GEN_2x2] = "super-speed-plus-gen2x2", +}; + const char *usb_speed_string(enum usb_device_speed speed) { if (speed < 0 || speed >= ARRAY_SIZE(speed_names)) @@ -86,12 +93,29 @@ enum usb_device_speed usb_get_maximum_speed(struct device *dev) if (ret < 0) return USB_SPEED_UNKNOWN; - ret = match_string(speed_names, ARRAY_SIZE(speed_names), maximum_speed); + ret = match_string(ssp_rate, ARRAY_SIZE(ssp_rate), maximum_speed); + if (ret > 0) + return USB_SPEED_SUPER_PLUS; + ret = match_string(speed_names, ARRAY_SIZE(speed_names), maximum_speed); return (ret < 0) ? USB_SPEED_UNKNOWN : ret; } EXPORT_SYMBOL_GPL(usb_get_maximum_speed); +enum usb_ssp_rate usb_get_maximum_ssp_rate(struct device *dev) +{ + const char *maximum_speed; + int ret; + + ret = device_property_read_string(dev, "maximum-speed", &maximum_speed); + if (ret < 0) + return USB_SSP_GEN_UNKNOWN; + + ret = match_string(ssp_rate, ARRAY_SIZE(ssp_rate), maximum_speed); + return (ret < 0) ? USB_SSP_GEN_UNKNOWN : ret; +} +EXPORT_SYMBOL_GPL(usb_get_maximum_ssp_rate); + const char *usb_state_string(enum usb_device_state state) { static const char *const names[] = { diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index 60886a7464c3..ad5a0f405a75 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c @@ -1649,14 +1649,12 @@ static void __usb_hcd_giveback_urb(struct urb *urb) urb->status = status; /* * This function can be called in task context inside another remote - * coverage collection section, but KCOV doesn't support that kind of + * coverage collection section, but kcov doesn't support that kind of * recursion yet. Only collect coverage in softirq context for now. */ - if (in_serving_softirq()) - kcov_remote_start_usb((u64)urb->dev->bus->busnum); + kcov_remote_start_usb_softirq((u64)urb->dev->bus->busnum); urb->complete(urb); - if (in_serving_softirq()) - kcov_remote_stop(); + kcov_remote_stop_softirq(); usb_anchor_resume_wakeups(anchor); atomic_dec(&urb->use_count); diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index 1b4eb7046b07..6ade3daf7858 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -391,6 +391,9 @@ static const struct usb_device_id usb_quirk_list[] = { /* X-Rite/Gretag-Macbeth Eye-One Pro display colorimeter */ { USB_DEVICE(0x0971, 0x2000), .driver_info = USB_QUIRK_NO_SET_INTF }, + /* ELMO L-12F document camera */ + { USB_DEVICE(0x09a1, 0x0028), .driver_info = USB_QUIRK_DELAY_CTRL_MSG }, + /* Broadcom BCM92035DGROM BT dongle */ { USB_DEVICE(0x0a5c, 0x2021), .driver_info = USB_QUIRK_RESET_RESUME }, @@ -415,6 +418,9 @@ static const struct usb_device_id usb_quirk_list[] = { { USB_DEVICE(0x10d6, 0x2200), .driver_info = USB_QUIRK_STRING_FETCH_255 }, + /* novation SoundControl XL */ + { USB_DEVICE(0x1235, 0x0061), .driver_info = USB_QUIRK_RESET_RESUME }, + /* Huawei 4G LTE module */ { USB_DEVICE(0x12d1, 0x15bb), .driver_info = USB_QUIRK_DISCONNECT_SUSPEND }, @@ -495,9 +501,6 @@ static const struct usb_device_id usb_quirk_list[] = { /* INTEL VALUE SSD */ { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME }, - /* novation SoundControl XL */ - { USB_DEVICE(0x1235, 0x0061), .driver_info = USB_QUIRK_RESET_RESUME }, - { } /* terminating entry must be last */ }; diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c index 0a0d11151cfb..ad4c94366dad 100644 --- a/drivers/usb/dwc2/gadget.c +++ b/drivers/usb/dwc2/gadget.c @@ -1543,7 +1543,6 @@ static void dwc2_hsotg_complete_oursetup(struct usb_ep *ep, static struct dwc2_hsotg_ep *ep_from_windex(struct dwc2_hsotg *hsotg, u32 windex) { - struct dwc2_hsotg_ep *ep; int dir = (windex & USB_DIR_IN) ? 1 : 0; int idx = windex & 0x7F; @@ -1553,12 +1552,7 @@ static struct dwc2_hsotg_ep *ep_from_windex(struct dwc2_hsotg *hsotg, if (idx > hsotg->num_of_eps) return NULL; - ep = index_to_ep(hsotg, idx, dir); - - if (idx && ep->dir_in != dir) - return NULL; - - return ep; + return index_to_ep(hsotg, idx, dir); } /** diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c index e9ac215b9663..fc3269f5faf1 100644 --- a/drivers/usb/dwc2/hcd.c +++ b/drivers/usb/dwc2/hcd.c @@ -1313,19 +1313,20 @@ static void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg, if (num_packets > max_hc_pkt_count) { num_packets = max_hc_pkt_count; chan->xfer_len = num_packets * chan->max_packet; + } else if (chan->ep_is_in) { + /* + * Always program an integral # of max packets + * for IN transfers. + * Note: This assumes that the input buffer is + * aligned and sized accordingly. + */ + chan->xfer_len = num_packets * chan->max_packet; } } else { /* Need 1 packet for transfer length of 0 */ num_packets = 1; } - if (chan->ep_is_in) - /* - * Always program an integral # of max packets for IN - * transfers - */ - chan->xfer_len = num_packets * chan->max_packet; - if (chan->ep_type == USB_ENDPOINT_XFER_INT || chan->ep_type == USB_ENDPOINT_XFER_ISOC) /* diff --git a/drivers/usb/dwc2/hcd_intr.c b/drivers/usb/dwc2/hcd_intr.c index a052d39b4375..d5f4ec1b73b1 100644 --- a/drivers/usb/dwc2/hcd_intr.c +++ b/drivers/usb/dwc2/hcd_intr.c @@ -500,7 +500,7 @@ static int dwc2_update_urb_state(struct dwc2_hsotg *hsotg, &short_read); if (urb->actual_length + xfer_length > urb->length) { - dev_warn(hsotg->dev, "%s(): trimming xfer length\n", __func__); + dev_dbg(hsotg->dev, "%s(): trimming xfer length\n", __func__); xfer_length = urb->length - urb->actual_length; } @@ -1977,6 +1977,18 @@ error: qtd->error_count++; dwc2_update_urb_state_abn(hsotg, chan, chnum, qtd->urb, qtd, DWC2_HC_XFER_XACT_ERR); + /* + * We can get here after a completed transaction + * (urb->actual_length >= urb->length) which was not reported + * as completed. If that is the case, and we do not abort + * the transfer, a transfer of size 0 will be enqueued + * subsequently. If urb->actual_length is not DMA-aligned, + * the buffer will then point to an unaligned address, and + * the resulting behavior is undefined. Bail out in that + * situation. + */ + if (qtd->urb->actual_length >= qtd->urb->length) + qtd->error_count = 3; dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd); dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_XACT_ERR); } diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c index 267543c3dc38..92df3d620f7d 100644 --- a/drivers/usb/dwc2/params.c +++ b/drivers/usb/dwc2/params.c @@ -177,7 +177,10 @@ static void dwc2_set_stm32mp15_fsotg_params(struct dwc2_hsotg *hsotg) p->i2c_enable = false; p->activate_stm_fs_transceiver = true; p->activate_stm_id_vb_detection = true; + p->ahbcfg = GAHBCFG_HBSTLEN_INCR16 << GAHBCFG_HBSTLEN_SHIFT; p->power_down = DWC2_POWER_DOWN_PARAM_NONE; + p->host_support_fs_ls_low_power = true; + p->host_ls_low_power_phy_clk = true; } static void dwc2_set_stm32mp15_hsotg_params(struct dwc2_hsotg *hsotg) @@ -189,7 +192,12 @@ static void dwc2_set_stm32mp15_hsotg_params(struct dwc2_hsotg *hsotg) p->host_rx_fifo_size = 440; p->host_nperio_tx_fifo_size = 256; p->host_perio_tx_fifo_size = 256; + p->ahbcfg = GAHBCFG_HBSTLEN_INCR16 << GAHBCFG_HBSTLEN_SHIFT; p->power_down = DWC2_POWER_DOWN_PARAM_NONE; + p->lpm = false; + p->lpm_clock_gating = false; + p->besl = false; + p->hird_threshold_en = false; } const struct of_device_id dwc2_of_match_table[] = { diff --git a/drivers/usb/dwc2/pci.c b/drivers/usb/dwc2/pci.c index 7afc10872f1f..0000151e3ca9 100644 --- a/drivers/usb/dwc2/pci.c +++ b/drivers/usb/dwc2/pci.c @@ -63,20 +63,6 @@ struct dwc2_pci_glue { struct platform_device *phy; }; -static int dwc2_pci_quirks(struct pci_dev *pdev, struct platform_device *dwc2) -{ - if (pdev->vendor == PCI_VENDOR_ID_SYNOPSYS && - pdev->device == PCI_PRODUCT_ID_HAPS_HSOTG) { - struct property_entry properties[] = { - { }, - }; - - return platform_device_add_properties(dwc2, properties); - } - - return 0; -} - /** * dwc2_pci_probe() - Provides the cleanup entry points for the DWC_otg PCI * driver @@ -143,10 +129,6 @@ static int dwc2_pci_probe(struct pci_dev *pci, dwc2->dev.parent = dev; - ret = dwc2_pci_quirks(pci, dwc2); - if (ret) - goto err; - glue = devm_kzalloc(dev, sizeof(*glue), GFP_KERNEL); if (!glue) { ret = -ENOMEM; diff --git a/drivers/usb/dwc3/Kconfig b/drivers/usb/dwc3/Kconfig index 7a2304565a73..2133acf8ee69 100644 --- a/drivers/usb/dwc3/Kconfig +++ b/drivers/usb/dwc3/Kconfig @@ -139,4 +139,14 @@ config USB_DWC3_QCOM for peripheral mode support. Say 'Y' or 'M' if you have one such device. +config USB_DWC3_IMX8MP + tristate "NXP iMX8MP Platform" + depends on OF && COMMON_CLK + depends on (ARCH_MXC && ARM64) || COMPILE_TEST + default USB_DWC3 + help + NXP iMX8M Plus SoC use DesignWare Core IP for USB2/3 + functionality. + Say 'Y' or 'M' if you have one such device. + endif diff --git a/drivers/usb/dwc3/Makefile b/drivers/usb/dwc3/Makefile index ae86da0dc5bd..2259f8876fb2 100644 --- a/drivers/usb/dwc3/Makefile +++ b/drivers/usb/dwc3/Makefile @@ -51,3 +51,4 @@ obj-$(CONFIG_USB_DWC3_MESON_G12A) += dwc3-meson-g12a.o obj-$(CONFIG_USB_DWC3_OF_SIMPLE) += dwc3-of-simple.o obj-$(CONFIG_USB_DWC3_ST) += dwc3-st.o obj-$(CONFIG_USB_DWC3_QCOM) += dwc3-qcom.o +obj-$(CONFIG_USB_DWC3_IMX8MP) += dwc3-imx8mp.o diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index 841daec70b6e..f2448d0a9d39 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c @@ -1126,11 +1126,8 @@ static int dwc3_core_get_phy(struct dwc3 *dwc) ret = PTR_ERR(dwc->usb2_phy); if (ret == -ENXIO || ret == -ENODEV) { dwc->usb2_phy = NULL; - } else if (ret == -EPROBE_DEFER) { - return ret; } else { - dev_err(dev, "no usb2 phy configured\n"); - return ret; + return dev_err_probe(dev, ret, "no usb2 phy configured\n"); } } @@ -1138,11 +1135,8 @@ static int dwc3_core_get_phy(struct dwc3 *dwc) ret = PTR_ERR(dwc->usb3_phy); if (ret == -ENXIO || ret == -ENODEV) { dwc->usb3_phy = NULL; - } else if (ret == -EPROBE_DEFER) { - return ret; } else { - dev_err(dev, "no usb3 phy configured\n"); - return ret; + return dev_err_probe(dev, ret, "no usb3 phy configured\n"); } } @@ -1151,11 +1145,8 @@ static int dwc3_core_get_phy(struct dwc3 *dwc) ret = PTR_ERR(dwc->usb2_generic_phy); if (ret == -ENOSYS || ret == -ENODEV) { dwc->usb2_generic_phy = NULL; - } else if (ret == -EPROBE_DEFER) { - return ret; } else { - dev_err(dev, "no usb2 phy configured\n"); - return ret; + return dev_err_probe(dev, ret, "no usb2 phy configured\n"); } } @@ -1164,11 +1155,8 @@ static int dwc3_core_get_phy(struct dwc3 *dwc) ret = PTR_ERR(dwc->usb3_generic_phy); if (ret == -ENOSYS || ret == -ENODEV) { dwc->usb3_generic_phy = NULL; - } else if (ret == -EPROBE_DEFER) { - return ret; } else { - dev_err(dev, "no usb3 phy configured\n"); - return ret; + return dev_err_probe(dev, ret, "no usb3 phy configured\n"); } } @@ -1190,11 +1178,8 @@ static int dwc3_core_init_mode(struct dwc3 *dwc) phy_set_mode(dwc->usb3_generic_phy, PHY_MODE_USB_DEVICE); ret = dwc3_gadget_init(dwc); - if (ret) { - if (ret != -EPROBE_DEFER) - dev_err(dev, "failed to initialize gadget\n"); - return ret; - } + if (ret) + return dev_err_probe(dev, ret, "failed to initialize gadget\n"); break; case USB_DR_MODE_HOST: dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST); @@ -1205,20 +1190,14 @@ static int dwc3_core_init_mode(struct dwc3 *dwc) phy_set_mode(dwc->usb3_generic_phy, PHY_MODE_USB_HOST); ret = dwc3_host_init(dwc); - if (ret) { - if (ret != -EPROBE_DEFER) - dev_err(dev, "failed to initialize host\n"); - return ret; - } + if (ret) + return dev_err_probe(dev, ret, "failed to initialize host\n"); break; case USB_DR_MODE_OTG: INIT_WORK(&dwc->drd_work, __dwc3_set_mode); ret = dwc3_drd_init(dwc); - if (ret) { - if (ret != -EPROBE_DEFER) - dev_err(dev, "failed to initialize dual-role\n"); - return ret; - } + if (ret) + return dev_err_probe(dev, ret, "failed to initialize dual-role\n"); break; default: dev_err(dev, "Unsupported mode of operation %d\n", dwc->dr_mode); @@ -1273,6 +1252,7 @@ static void dwc3_get_properties(struct dwc3 *dwc) hird_threshold = 12; dwc->maximum_speed = usb_get_maximum_speed(dev); + dwc->max_ssp_rate = usb_get_maximum_ssp_rate(dev); dwc->dr_mode = usb_get_dr_mode(dev); dwc->hsphy_mode = of_usb_get_phy_mode(dev->of_node); @@ -1444,6 +1424,42 @@ static void dwc3_check_params(struct dwc3 *dwc) } break; } + + /* + * Currently the controller does not have visibility into the HW + * parameter to determine the maximum number of lanes the HW supports. + * If the number of lanes is not specified in the device property, then + * set the default to support dual-lane for DWC_usb32 and single-lane + * for DWC_usb31 for super-speed-plus. + */ + if (dwc->maximum_speed == USB_SPEED_SUPER_PLUS) { + switch (dwc->max_ssp_rate) { + case USB_SSP_GEN_2x1: + if (hwparam_gen == DWC3_GHWPARAMS3_SSPHY_IFC_GEN1) + dev_warn(dev, "UDC only supports Gen 1\n"); + break; + case USB_SSP_GEN_1x2: + case USB_SSP_GEN_2x2: + if (DWC3_IP_IS(DWC31)) + dev_warn(dev, "UDC only supports single lane\n"); + break; + case USB_SSP_GEN_UNKNOWN: + default: + switch (hwparam_gen) { + case DWC3_GHWPARAMS3_SSPHY_IFC_GEN2: + if (DWC3_IP_IS(DWC32)) + dwc->max_ssp_rate = USB_SSP_GEN_2x2; + else + dwc->max_ssp_rate = USB_SSP_GEN_2x1; + break; + case DWC3_GHWPARAMS3_SSPHY_IFC_GEN1: + if (DWC3_IP_IS(DWC32)) + dwc->max_ssp_rate = USB_SSP_GEN_1x2; + break; + } + break; + } + } } static int dwc3_probe(struct platform_device *pdev) @@ -1490,7 +1506,7 @@ static int dwc3_probe(struct platform_device *pdev) dwc3_get_properties(dwc); - dwc->reset = devm_reset_control_array_get(dev, true, true); + dwc->reset = devm_reset_control_array_get_optional_shared(dev); if (IS_ERR(dwc->reset)) return PTR_ERR(dwc->reset); @@ -1555,8 +1571,7 @@ static int dwc3_probe(struct platform_device *pdev) ret = dwc3_core_init(dwc); if (ret) { - if (ret != -EPROBE_DEFER) - dev_err(dev, "failed to initialize core: %d\n", ret); + dev_err_probe(dev, ret, "failed to initialize core\n"); goto err4; } @@ -1758,7 +1773,7 @@ static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg) if (PMSG_IS_AUTO(msg)) break; - ret = dwc3_core_init(dwc); + ret = dwc3_core_init_for_resume(dwc); if (ret) return ret; diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h index 2f95f08ca511..052b20d52651 100644 --- a/drivers/usb/dwc3/core.h +++ b/drivers/usb/dwc3/core.h @@ -285,6 +285,7 @@ /* Global USB2 PHY Vendor Control Register */ #define DWC3_GUSB2PHYACC_NEWREGREQ BIT(25) +#define DWC3_GUSB2PHYACC_DONE BIT(24) #define DWC3_GUSB2PHYACC_BUSY BIT(23) #define DWC3_GUSB2PHYACC_WRITE BIT(22) #define DWC3_GUSB2PHYACC_ADDR(n) (n << 16) @@ -385,6 +386,8 @@ #define DWC3_GUCTL3_SPLITDISABLE BIT(14) /* Device Configuration Register */ +#define DWC3_DCFG_NUMLANES(n) (((n) & 0x3) << 30) /* DWC_usb32 only */ + #define DWC3_DCFG_DEVADDR(addr) ((addr) << 3) #define DWC3_DCFG_DEVADDR_MASK DWC3_DCFG_DEVADDR(0x7f) @@ -458,6 +461,8 @@ #define DWC3_DEVTEN_USBRSTEN BIT(1) #define DWC3_DEVTEN_DISCONNEVTEN BIT(0) +#define DWC3_DSTS_CONNLANES(n) (((n) >> 30) & 0x3) /* DWC_usb32 only */ + /* Device Status Register */ #define DWC3_DSTS_DCNRD BIT(29) @@ -963,6 +968,10 @@ struct dwc3_scratchpad_array { * @nr_scratch: number of scratch buffers * @u1u2: only used on revisions <1.83a for workaround * @maximum_speed: maximum speed requested (mainly for testing purposes) + * @max_ssp_rate: SuperSpeed Plus maximum signaling rate and lane count + * @gadget_max_speed: maximum gadget speed requested + * @gadget_ssp_rate: Gadget driver's maximum supported SuperSpeed Plus signaling + * rate and lane count. * @ip: controller's ID * @revision: controller's version of an IP * @version_type: VERSIONTYPE register contents, a sub release of a revision @@ -1125,6 +1134,9 @@ struct dwc3 { u32 nr_scratch; u32 u1u2; u32 maximum_speed; + u32 gadget_max_speed; + enum usb_ssp_rate max_ssp_rate; + enum usb_ssp_rate gadget_ssp_rate; u32 ip; diff --git a/drivers/usb/dwc3/drd.c b/drivers/usb/dwc3/drd.c index 3e1c1aacf002..e2b68bb770d1 100644 --- a/drivers/usb/dwc3/drd.c +++ b/drivers/usb/dwc3/drd.c @@ -441,8 +441,8 @@ static int dwc3_drd_notifier(struct notifier_block *nb, static struct extcon_dev *dwc3_get_extcon(struct dwc3 *dwc) { struct device *dev = dwc->dev; - struct device_node *np_phy, *np_conn; - struct extcon_dev *edev; + struct device_node *np_phy; + struct extcon_dev *edev = NULL; const char *name; if (device_property_read_bool(dev, "extcon")) @@ -462,15 +462,22 @@ static struct extcon_dev *dwc3_get_extcon(struct dwc3 *dwc) return edev; } + /* + * Try to get an extcon device from the USB PHY controller's "port" + * node. Check if it has the "port" node first, to avoid printing the + * error message from underlying code, as it's a valid case: extcon + * device (and "port" node) may be missing in case of "usb-role-switch" + * or OTG mode. + */ np_phy = of_parse_phandle(dev->of_node, "phys", 0); - np_conn = of_graph_get_remote_node(np_phy, -1, -1); + if (of_graph_is_present(np_phy)) { + struct device_node *np_conn; - if (np_conn) - edev = extcon_find_edev_by_node(np_conn); - else - edev = NULL; - - of_node_put(np_conn); + np_conn = of_graph_get_remote_node(np_phy, -1, -1); + if (np_conn) + edev = extcon_find_edev_by_node(np_conn); + of_node_put(np_conn); + } of_node_put(np_phy); return edev; diff --git a/drivers/usb/dwc3/dwc3-haps.c b/drivers/usb/dwc3/dwc3-haps.c index 55b4a901168e..f6e3817fa7af 100644 --- a/drivers/usb/dwc3/dwc3-haps.c +++ b/drivers/usb/dwc3/dwc3-haps.c @@ -33,6 +33,10 @@ static const struct property_entry initial_properties[] = { { }, }; +static const struct software_node dwc3_haps_swnode = { + .properties = initial_properties, +}; + static int dwc3_haps_probe(struct pci_dev *pci, const struct pci_device_id *id) { @@ -77,7 +81,7 @@ static int dwc3_haps_probe(struct pci_dev *pci, dwc->pci = pci; dwc->dwc3->dev.parent = dev; - ret = platform_device_add_properties(dwc->dwc3, initial_properties); + ret = device_add_software_node(&dwc->dwc3->dev, &dwc3_haps_swnode); if (ret) goto err; @@ -91,6 +95,7 @@ static int dwc3_haps_probe(struct pci_dev *pci, return 0; err: + device_remove_software_node(&dwc->dwc3->dev); platform_device_put(dwc->dwc3); return ret; } @@ -99,6 +104,7 @@ static void dwc3_haps_remove(struct pci_dev *pci) { struct dwc3_haps *dwc = pci_get_drvdata(pci); + device_remove_software_node(&dwc->dwc3->dev); platform_device_unregister(dwc->dwc3); } diff --git a/drivers/usb/dwc3/dwc3-imx8mp.c b/drivers/usb/dwc3/dwc3-imx8mp.c new file mode 100644 index 000000000000..75f0042b998b --- /dev/null +++ b/drivers/usb/dwc3/dwc3-imx8mp.c @@ -0,0 +1,363 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * dwc3-imx8mp.c - NXP imx8mp Specific Glue layer + * + * Copyright (c) 2020 NXP. + */ + +#include <linux/clk.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> + +#include "core.h" + +/* USB wakeup registers */ +#define USB_WAKEUP_CTRL 0x00 + +/* Global wakeup interrupt enable, also used to clear interrupt */ +#define USB_WAKEUP_EN BIT(31) +/* Wakeup from connect or disconnect, only for superspeed */ +#define USB_WAKEUP_SS_CONN BIT(5) +/* 0 select vbus_valid, 1 select sessvld */ +#define USB_WAKEUP_VBUS_SRC_SESS_VAL BIT(4) +/* Enable signal for wake up from u3 state */ +#define USB_WAKEUP_U3_EN BIT(3) +/* Enable signal for wake up from id change */ +#define USB_WAKEUP_ID_EN BIT(2) +/* Enable signal for wake up from vbus change */ +#define USB_WAKEUP_VBUS_EN BIT(1) +/* Enable signal for wake up from dp/dm change */ +#define USB_WAKEUP_DPDM_EN BIT(0) + +#define USB_WAKEUP_EN_MASK GENMASK(5, 0) + +struct dwc3_imx8mp { + struct device *dev; + struct platform_device *dwc3; + void __iomem *glue_base; + struct clk *hsio_clk; + struct clk *suspend_clk; + int irq; + bool pm_suspended; + bool wakeup_pending; +}; + +static void dwc3_imx8mp_wakeup_enable(struct dwc3_imx8mp *dwc3_imx) +{ + struct dwc3 *dwc3 = platform_get_drvdata(dwc3_imx->dwc3); + u32 val; + + if (!dwc3) + return; + + val = readl(dwc3_imx->glue_base + USB_WAKEUP_CTRL); + + if ((dwc3->current_dr_role == DWC3_GCTL_PRTCAP_HOST) && dwc3->xhci) + val |= USB_WAKEUP_EN | USB_WAKEUP_SS_CONN | + USB_WAKEUP_U3_EN | USB_WAKEUP_DPDM_EN; + else if (dwc3->current_dr_role == DWC3_GCTL_PRTCAP_DEVICE) + val |= USB_WAKEUP_EN | USB_WAKEUP_VBUS_EN | + USB_WAKEUP_VBUS_SRC_SESS_VAL; + + writel(val, dwc3_imx->glue_base + USB_WAKEUP_CTRL); +} + +static void dwc3_imx8mp_wakeup_disable(struct dwc3_imx8mp *dwc3_imx) +{ + u32 val; + + val = readl(dwc3_imx->glue_base + USB_WAKEUP_CTRL); + val &= ~(USB_WAKEUP_EN | USB_WAKEUP_EN_MASK); + writel(val, dwc3_imx->glue_base + USB_WAKEUP_CTRL); +} + +static irqreturn_t dwc3_imx8mp_interrupt(int irq, void *_dwc3_imx) +{ + struct dwc3_imx8mp *dwc3_imx = _dwc3_imx; + struct dwc3 *dwc = platform_get_drvdata(dwc3_imx->dwc3); + + if (!dwc3_imx->pm_suspended) + return IRQ_HANDLED; + + disable_irq_nosync(dwc3_imx->irq); + dwc3_imx->wakeup_pending = true; + + if ((dwc->current_dr_role == DWC3_GCTL_PRTCAP_HOST) && dwc->xhci) + pm_runtime_resume(&dwc->xhci->dev); + else if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_DEVICE) + pm_runtime_get(dwc->dev); + + return IRQ_HANDLED; +} + +static int dwc3_imx8mp_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *dwc3_np, *node = dev->of_node; + struct dwc3_imx8mp *dwc3_imx; + int err, irq; + + if (!node) { + dev_err(dev, "device node not found\n"); + return -EINVAL; + } + + dwc3_imx = devm_kzalloc(dev, sizeof(*dwc3_imx), GFP_KERNEL); + if (!dwc3_imx) + return -ENOMEM; + + platform_set_drvdata(pdev, dwc3_imx); + + dwc3_imx->dev = dev; + + dwc3_imx->glue_base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(dwc3_imx->glue_base)) + return PTR_ERR(dwc3_imx->glue_base); + + dwc3_imx->hsio_clk = devm_clk_get(dev, "hsio"); + if (IS_ERR(dwc3_imx->hsio_clk)) { + err = PTR_ERR(dwc3_imx->hsio_clk); + dev_err(dev, "Failed to get hsio clk, err=%d\n", err); + return err; + } + + err = clk_prepare_enable(dwc3_imx->hsio_clk); + if (err) { + dev_err(dev, "Failed to enable hsio clk, err=%d\n", err); + return err; + } + + dwc3_imx->suspend_clk = devm_clk_get(dev, "suspend"); + if (IS_ERR(dwc3_imx->suspend_clk)) { + err = PTR_ERR(dwc3_imx->suspend_clk); + dev_err(dev, "Failed to get suspend clk, err=%d\n", err); + goto disable_hsio_clk; + } + + err = clk_prepare_enable(dwc3_imx->suspend_clk); + if (err) { + dev_err(dev, "Failed to enable suspend clk, err=%d\n", err); + goto disable_hsio_clk; + } + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + err = irq; + goto disable_clks; + } + dwc3_imx->irq = irq; + + err = devm_request_threaded_irq(dev, irq, NULL, dwc3_imx8mp_interrupt, + IRQF_ONESHOT, dev_name(dev), dwc3_imx); + if (err) { + dev_err(dev, "failed to request IRQ #%d --> %d\n", irq, err); + goto disable_clks; + } + + pm_runtime_set_active(dev); + pm_runtime_enable(dev); + err = pm_runtime_get_sync(dev); + if (err < 0) + goto disable_rpm; + + dwc3_np = of_get_child_by_name(node, "dwc3"); + if (!dwc3_np) { + dev_err(dev, "failed to find dwc3 core child\n"); + goto disable_rpm; + } + + err = of_platform_populate(node, NULL, NULL, dev); + if (err) { + dev_err(&pdev->dev, "failed to create dwc3 core\n"); + goto err_node_put; + } + + dwc3_imx->dwc3 = of_find_device_by_node(dwc3_np); + if (!dwc3_imx->dwc3) { + dev_err(dev, "failed to get dwc3 platform device\n"); + err = -ENODEV; + goto depopulate; + } + of_node_put(dwc3_np); + + device_set_wakeup_capable(dev, true); + pm_runtime_put(dev); + + return 0; + +depopulate: + of_platform_depopulate(dev); +err_node_put: + of_node_put(dwc3_np); +disable_rpm: + pm_runtime_disable(dev); + pm_runtime_put_noidle(dev); +disable_clks: + clk_disable_unprepare(dwc3_imx->suspend_clk); +disable_hsio_clk: + clk_disable_unprepare(dwc3_imx->hsio_clk); + + return err; +} + +static int dwc3_imx8mp_remove(struct platform_device *pdev) +{ + struct dwc3_imx8mp *dwc3_imx = platform_get_drvdata(pdev); + struct device *dev = &pdev->dev; + + pm_runtime_get_sync(dev); + of_platform_depopulate(dev); + + clk_disable_unprepare(dwc3_imx->suspend_clk); + clk_disable_unprepare(dwc3_imx->hsio_clk); + + pm_runtime_disable(dev); + pm_runtime_put_noidle(dev); + platform_set_drvdata(pdev, NULL); + + return 0; +} + +static int __maybe_unused dwc3_imx8mp_suspend(struct dwc3_imx8mp *dwc3_imx, + pm_message_t msg) +{ + if (dwc3_imx->pm_suspended) + return 0; + + /* Wakeup enable */ + if (PMSG_IS_AUTO(msg) || device_may_wakeup(dwc3_imx->dev)) + dwc3_imx8mp_wakeup_enable(dwc3_imx); + + dwc3_imx->pm_suspended = true; + + return 0; +} + +static int __maybe_unused dwc3_imx8mp_resume(struct dwc3_imx8mp *dwc3_imx, + pm_message_t msg) +{ + struct dwc3 *dwc = platform_get_drvdata(dwc3_imx->dwc3); + int ret = 0; + + if (!dwc3_imx->pm_suspended) + return 0; + + /* Wakeup disable */ + dwc3_imx8mp_wakeup_disable(dwc3_imx); + dwc3_imx->pm_suspended = false; + + if (dwc3_imx->wakeup_pending) { + dwc3_imx->wakeup_pending = false; + if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_DEVICE) { + pm_runtime_mark_last_busy(dwc->dev); + pm_runtime_put_autosuspend(dwc->dev); + } else { + /* + * Add wait for xhci switch from suspend + * clock to normal clock to detect connection. + */ + usleep_range(9000, 10000); + } + enable_irq(dwc3_imx->irq); + } + + return ret; +} + +static int __maybe_unused dwc3_imx8mp_pm_suspend(struct device *dev) +{ + struct dwc3_imx8mp *dwc3_imx = dev_get_drvdata(dev); + int ret; + + ret = dwc3_imx8mp_suspend(dwc3_imx, PMSG_SUSPEND); + + if (device_may_wakeup(dwc3_imx->dev)) + enable_irq_wake(dwc3_imx->irq); + else + clk_disable_unprepare(dwc3_imx->suspend_clk); + + clk_disable_unprepare(dwc3_imx->hsio_clk); + dev_dbg(dev, "dwc3 imx8mp pm suspend.\n"); + + return ret; +} + +static int __maybe_unused dwc3_imx8mp_pm_resume(struct device *dev) +{ + struct dwc3_imx8mp *dwc3_imx = dev_get_drvdata(dev); + int ret; + + if (device_may_wakeup(dwc3_imx->dev)) { + disable_irq_wake(dwc3_imx->irq); + } else { + ret = clk_prepare_enable(dwc3_imx->suspend_clk); + if (ret) + return ret; + } + + ret = clk_prepare_enable(dwc3_imx->hsio_clk); + if (ret) + return ret; + + ret = dwc3_imx8mp_resume(dwc3_imx, PMSG_RESUME); + + pm_runtime_disable(dev); + pm_runtime_set_active(dev); + pm_runtime_enable(dev); + + dev_dbg(dev, "dwc3 imx8mp pm resume.\n"); + + return ret; +} + +static int __maybe_unused dwc3_imx8mp_runtime_suspend(struct device *dev) +{ + struct dwc3_imx8mp *dwc3_imx = dev_get_drvdata(dev); + + dev_dbg(dev, "dwc3 imx8mp runtime suspend.\n"); + + return dwc3_imx8mp_suspend(dwc3_imx, PMSG_AUTO_SUSPEND); +} + +static int __maybe_unused dwc3_imx8mp_runtime_resume(struct device *dev) +{ + struct dwc3_imx8mp *dwc3_imx = dev_get_drvdata(dev); + + dev_dbg(dev, "dwc3 imx8mp runtime resume.\n"); + + return dwc3_imx8mp_resume(dwc3_imx, PMSG_AUTO_RESUME); +} + +static const struct dev_pm_ops dwc3_imx8mp_dev_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(dwc3_imx8mp_pm_suspend, dwc3_imx8mp_pm_resume) + SET_RUNTIME_PM_OPS(dwc3_imx8mp_runtime_suspend, + dwc3_imx8mp_runtime_resume, NULL) +}; + +static const struct of_device_id dwc3_imx8mp_of_match[] = { + { .compatible = "fsl,imx8mp-dwc3", }, + {}, +}; +MODULE_DEVICE_TABLE(of, dwc3_imx8mp_of_match); + +static struct platform_driver dwc3_imx8mp_driver = { + .probe = dwc3_imx8mp_probe, + .remove = dwc3_imx8mp_remove, + .driver = { + .name = "imx8mp-dwc3", + .pm = &dwc3_imx8mp_dev_pm_ops, + .of_match_table = dwc3_imx8mp_of_match, + }, +}; + +module_platform_driver(dwc3_imx8mp_driver); + +MODULE_ALIAS("platform:imx8mp-dwc3"); +MODULE_AUTHOR("jun.li@nxp.com"); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("DesignWare USB3 imx8mp Glue Layer"); diff --git a/drivers/usb/dwc3/dwc3-keystone.c b/drivers/usb/dwc3/dwc3-keystone.c index 9a99253d5ba3..057056c0975e 100644 --- a/drivers/usb/dwc3/dwc3-keystone.c +++ b/drivers/usb/dwc3/dwc3-keystone.c @@ -99,13 +99,8 @@ static int kdwc3_probe(struct platform_device *pdev) /* PSC dependency on AM65 needs SERDES0 to be powered before USB0 */ kdwc->usb3_phy = devm_phy_optional_get(dev, "usb3-phy"); - if (IS_ERR(kdwc->usb3_phy)) { - error = PTR_ERR(kdwc->usb3_phy); - if (error != -EPROBE_DEFER) - dev_err(dev, "couldn't get usb3 phy: %d\n", error); - - return error; - } + if (IS_ERR(kdwc->usb3_phy)) + return dev_err_probe(dev, PTR_ERR(kdwc->usb3_phy), "couldn't get usb3 phy\n"); phy_pm_runtime_get_sync(kdwc->usb3_phy); diff --git a/drivers/usb/dwc3/dwc3-meson-g12a.c b/drivers/usb/dwc3/dwc3-meson-g12a.c index 417e05381b5d..bdf1f98dfad8 100644 --- a/drivers/usb/dwc3/dwc3-meson-g12a.c +++ b/drivers/usb/dwc3/dwc3-meson-g12a.c @@ -754,7 +754,7 @@ static int dwc3_meson_g12a_probe(struct platform_device *pdev) ret = priv->drvdata->setup_regmaps(priv, base); if (ret) - return ret; + goto err_disable_clks; if (priv->vbus) { ret = regulator_enable(priv->vbus); diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c index bae6a70664c8..3d3918a8d5fb 100644 --- a/drivers/usb/dwc3/dwc3-pci.c +++ b/drivers/usb/dwc3/dwc3-pci.c @@ -40,7 +40,9 @@ #define PCI_DEVICE_ID_INTEL_TGPLP 0xa0ee #define PCI_DEVICE_ID_INTEL_TGPH 0x43ee #define PCI_DEVICE_ID_INTEL_JSP 0x4dee +#define PCI_DEVICE_ID_INTEL_ADLP 0x51ee #define PCI_DEVICE_ID_INTEL_ADLS 0x7ae1 +#define PCI_DEVICE_ID_INTEL_TGL 0x9a15 #define PCI_INTEL_BXT_DSM_GUID "732b85d5-b7a7-4a1b-9ba0-4bbd00ffd511" #define PCI_INTEL_BXT_FUNC_PMU_PWR 4 @@ -142,6 +144,18 @@ static const struct property_entry dwc3_pci_amd_properties[] = { {} }; +static const struct software_node dwc3_pci_intel_swnode = { + .properties = dwc3_pci_intel_properties, +}; + +static const struct software_node dwc3_pci_intel_mrfld_swnode = { + .properties = dwc3_pci_mrfld_properties, +}; + +static const struct software_node dwc3_pci_amd_swnode = { + .properties = dwc3_pci_amd_properties, +}; + static int dwc3_pci_quirks(struct dwc3_pci *dwc) { struct pci_dev *pdev = dwc->pci; @@ -222,7 +236,6 @@ static void dwc3_pci_resume_work(struct work_struct *work) static int dwc3_pci_probe(struct pci_dev *pci, const struct pci_device_id *id) { - struct property_entry *p = (struct property_entry *)id->driver_data; struct dwc3_pci *dwc; struct resource res[2]; int ret; @@ -265,7 +278,7 @@ static int dwc3_pci_probe(struct pci_dev *pci, const struct pci_device_id *id) dwc->dwc3->dev.parent = dev; ACPI_COMPANION_SET(&dwc->dwc3->dev, ACPI_COMPANION(dev)); - ret = platform_device_add_properties(dwc->dwc3, p); + ret = device_add_software_node(&dwc->dwc3->dev, (void *)id->driver_data); if (ret < 0) goto err; @@ -288,6 +301,7 @@ static int dwc3_pci_probe(struct pci_dev *pci, const struct pci_device_id *id) return 0; err: + device_remove_software_node(&dwc->dwc3->dev); platform_device_put(dwc->dwc3); return ret; } @@ -304,75 +318,82 @@ static void dwc3_pci_remove(struct pci_dev *pci) #endif device_init_wakeup(&pci->dev, false); pm_runtime_get(&pci->dev); + device_remove_software_node(&dwc->dwc3->dev); platform_device_unregister(dwc->dwc3); } static const struct pci_device_id dwc3_pci_id_table[] = { { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_BSW), - (kernel_ulong_t) &dwc3_pci_intel_properties }, + (kernel_ulong_t) &dwc3_pci_intel_swnode, }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_BYT), - (kernel_ulong_t) &dwc3_pci_intel_properties, }, + (kernel_ulong_t) &dwc3_pci_intel_swnode, }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MRFLD), - (kernel_ulong_t) &dwc3_pci_mrfld_properties, }, + (kernel_ulong_t) &dwc3_pci_intel_mrfld_swnode, }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_CMLLP), - (kernel_ulong_t) &dwc3_pci_intel_properties, }, + (kernel_ulong_t) &dwc3_pci_intel_swnode, }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_CMLH), - (kernel_ulong_t) &dwc3_pci_intel_properties, }, + (kernel_ulong_t) &dwc3_pci_intel_swnode, }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_SPTLP), - (kernel_ulong_t) &dwc3_pci_intel_properties, }, + (kernel_ulong_t) &dwc3_pci_intel_swnode, }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_SPTH), - (kernel_ulong_t) &dwc3_pci_intel_properties, }, + (kernel_ulong_t) &dwc3_pci_intel_swnode, }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_BXT), - (kernel_ulong_t) &dwc3_pci_intel_properties, }, + (kernel_ulong_t) &dwc3_pci_intel_swnode, }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_BXT_M), - (kernel_ulong_t) &dwc3_pci_intel_properties, }, + (kernel_ulong_t) &dwc3_pci_intel_swnode, }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_APL), - (kernel_ulong_t) &dwc3_pci_intel_properties, }, + (kernel_ulong_t) &dwc3_pci_intel_swnode, }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_KBP), - (kernel_ulong_t) &dwc3_pci_intel_properties, }, + (kernel_ulong_t) &dwc3_pci_intel_swnode, }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_GLK), - (kernel_ulong_t) &dwc3_pci_intel_properties, }, + (kernel_ulong_t) &dwc3_pci_intel_swnode, }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_CNPLP), - (kernel_ulong_t) &dwc3_pci_intel_properties, }, + (kernel_ulong_t) &dwc3_pci_intel_swnode, }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_CNPH), - (kernel_ulong_t) &dwc3_pci_intel_properties, }, + (kernel_ulong_t) &dwc3_pci_intel_swnode, }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_CNPV), - (kernel_ulong_t) &dwc3_pci_intel_properties, }, + (kernel_ulong_t) &dwc3_pci_intel_swnode, }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICLLP), - (kernel_ulong_t) &dwc3_pci_intel_properties, }, + (kernel_ulong_t) &dwc3_pci_intel_swnode, }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_EHLLP), - (kernel_ulong_t) &dwc3_pci_intel_properties, }, + (kernel_ulong_t) &dwc3_pci_intel_swnode }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGPLP), - (kernel_ulong_t) &dwc3_pci_intel_properties, }, + (kernel_ulong_t) &dwc3_pci_intel_swnode, }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGPH), - (kernel_ulong_t) &dwc3_pci_intel_properties, }, + (kernel_ulong_t) &dwc3_pci_intel_swnode, }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_JSP), - (kernel_ulong_t) &dwc3_pci_intel_properties, }, + (kernel_ulong_t) &dwc3_pci_intel_swnode, }, + + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ADLP), + (kernel_ulong_t) &dwc3_pci_intel_swnode, }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ADLS), - (kernel_ulong_t) &dwc3_pci_intel_properties, }, + (kernel_ulong_t) &dwc3_pci_intel_swnode, }, + + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGL), + (kernel_ulong_t) &dwc3_pci_intel_swnode, }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_NL_USB), - (kernel_ulong_t) &dwc3_pci_amd_properties, }, + (kernel_ulong_t) &dwc3_pci_amd_swnode, }, { } /* Terminating Entry */ }; MODULE_DEVICE_TABLE(pci, dwc3_pci_id_table); diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c index c703d552bbcf..846a47be6df7 100644 --- a/drivers/usb/dwc3/dwc3-qcom.c +++ b/drivers/usb/dwc3/dwc3-qcom.c @@ -60,12 +60,14 @@ struct dwc3_acpi_pdata { int dp_hs_phy_irq_index; int dm_hs_phy_irq_index; int ss_phy_irq_index; + bool is_urs; }; struct dwc3_qcom { struct device *dev; void __iomem *qscratch_base; struct platform_device *dwc3; + struct platform_device *urs_usb; struct clk **clks; int num_clocks; struct reset_control *resets; @@ -429,13 +431,15 @@ static void dwc3_qcom_select_utmi_clk(struct dwc3_qcom *qcom) static int dwc3_qcom_get_irq(struct platform_device *pdev, const char *name, int num) { + struct dwc3_qcom *qcom = platform_get_drvdata(pdev); + struct platform_device *pdev_irq = qcom->urs_usb ? qcom->urs_usb : pdev; struct device_node *np = pdev->dev.of_node; int ret; if (np) - ret = platform_get_irq_byname(pdev, name); + ret = platform_get_irq_byname(pdev_irq, name); else - ret = platform_get_irq(pdev, num); + ret = platform_get_irq(pdev_irq, num); return ret; } @@ -563,11 +567,17 @@ static const struct property_entry dwc3_qcom_acpi_properties[] = { {} }; +static const struct software_node dwc3_qcom_swnode = { + .properties = dwc3_qcom_acpi_properties, +}; + static int dwc3_qcom_acpi_register_core(struct platform_device *pdev) { struct dwc3_qcom *qcom = platform_get_drvdata(pdev); struct device *dev = &pdev->dev; struct resource *res, *child_res = NULL; + struct platform_device *pdev_irq = qcom->urs_usb ? qcom->urs_usb : + pdev; int irq; int ret; @@ -597,7 +607,7 @@ static int dwc3_qcom_acpi_register_core(struct platform_device *pdev) child_res[0].end = child_res[0].start + qcom->acpi_pdata->dwc3_core_base_size; - irq = platform_get_irq(pdev, 0); + irq = platform_get_irq(pdev_irq, 0); child_res[1].flags = IORESOURCE_IRQ; child_res[1].start = child_res[1].end = irq; @@ -607,16 +617,17 @@ static int dwc3_qcom_acpi_register_core(struct platform_device *pdev) goto out; } - ret = platform_device_add_properties(qcom->dwc3, - dwc3_qcom_acpi_properties); + ret = device_add_software_node(&qcom->dwc3->dev, &dwc3_qcom_swnode); if (ret < 0) { dev_err(&pdev->dev, "failed to add properties\n"); goto out; } ret = platform_device_add(qcom->dwc3); - if (ret) + if (ret) { dev_err(&pdev->dev, "failed to add device\n"); + device_remove_software_node(&qcom->dwc3->dev); + } out: kfree(child_res); @@ -651,6 +662,33 @@ static int dwc3_qcom_of_register_core(struct platform_device *pdev) return 0; } +static struct platform_device * +dwc3_qcom_create_urs_usb_platdev(struct device *dev) +{ + struct fwnode_handle *fwh; + struct acpi_device *adev; + char name[8]; + int ret; + int id; + + /* Figure out device id */ + ret = sscanf(fwnode_get_name(dev->fwnode), "URS%d", &id); + if (!ret) + return NULL; + + /* Find the child using name */ + snprintf(name, sizeof(name), "USB%d", id); + fwh = fwnode_get_named_child_node(dev->fwnode, name); + if (!fwh) + return NULL; + + adev = to_acpi_device_node(fwh); + if (!adev) + return NULL; + + return acpi_create_platform_device(adev, NULL); +} + static int dwc3_qcom_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; @@ -715,6 +753,14 @@ static int dwc3_qcom_probe(struct platform_device *pdev) qcom->acpi_pdata->qscratch_base_offset; parent_res->end = parent_res->start + qcom->acpi_pdata->qscratch_base_size; + + if (qcom->acpi_pdata->is_urs) { + qcom->urs_usb = dwc3_qcom_create_urs_usb_platdev(dev); + if (!qcom->urs_usb) { + dev_err(dev, "failed to create URS USB platdev\n"); + return -ENODEV; + } + } } qcom->qscratch_base = devm_ioremap_resource(dev, parent_res); @@ -796,6 +842,7 @@ static int dwc3_qcom_remove(struct platform_device *pdev) struct device *dev = &pdev->dev; int i; + device_remove_software_node(&qcom->dwc3->dev); of_platform_depopulate(dev); for (i = qcom->num_clocks - 1; i >= 0; i--) { @@ -877,8 +924,20 @@ static const struct dwc3_acpi_pdata sdm845_acpi_pdata = { .ss_phy_irq_index = 2 }; +static const struct dwc3_acpi_pdata sdm845_acpi_urs_pdata = { + .qscratch_base_offset = SDM845_QSCRATCH_BASE_OFFSET, + .qscratch_base_size = SDM845_QSCRATCH_SIZE, + .dwc3_core_base_size = SDM845_DWC3_CORE_SIZE, + .hs_phy_irq_index = 1, + .dp_hs_phy_irq_index = 4, + .dm_hs_phy_irq_index = 3, + .ss_phy_irq_index = 2, + .is_urs = true, +}; + static const struct acpi_device_id dwc3_qcom_acpi_match[] = { { "QCOM2430", (unsigned long)&sdm845_acpi_pdata }, + { "QCOM0304", (unsigned long)&sdm845_acpi_urs_pdata }, { }, }; MODULE_DEVICE_TABLE(acpi, dwc3_qcom_acpi_match); diff --git a/drivers/usb/dwc3/dwc3-st.c b/drivers/usb/dwc3/dwc3-st.c index e733be840545..b06b7092b1a2 100644 --- a/drivers/usb/dwc3/dwc3-st.c +++ b/drivers/usb/dwc3/dwc3-st.c @@ -274,7 +274,7 @@ static int st_dwc3_probe(struct platform_device *pdev) dwc3_data->dr_mode = usb_get_dr_mode(&child_pdev->dev); of_node_put(child); - of_dev_put(child_pdev); + platform_device_put(child_pdev); /* * Configure the USB port as device or host according to the static diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 78cb4db8a6e4..aebcf8ec0716 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -605,8 +605,23 @@ static int dwc3_gadget_set_ep_config(struct dwc3_ep *dep, unsigned int action) params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1); if (desc->bInterval) { - params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(desc->bInterval - 1); - dep->interval = 1 << (desc->bInterval - 1); + u8 bInterval_m1; + + /* + * Valid range for DEPCFG.bInterval_m1 is from 0 to 13, and it + * must be set to 0 when the controller operates in full-speed. + */ + bInterval_m1 = min_t(u8, desc->bInterval - 1, 13); + if (dwc->gadget->speed == USB_SPEED_FULL) + bInterval_m1 = 0; + + if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_INT && + dwc->gadget->speed == USB_SPEED_FULL) + dep->interval = desc->bInterval; + else + dep->interval = 1 << (desc->bInterval - 1); + + params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(bInterval_m1); } return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETEPCONFIG, ¶ms); @@ -1763,6 +1778,8 @@ static int dwc3_gadget_ep_dequeue(struct usb_ep *ep, list_for_each_entry_safe(r, t, &dep->started_list, list) dwc3_gadget_move_cancelled_request(r); + dep->flags &= ~DWC3_EP_WAIT_TRANSFER_COMPLETE; + goto out; } } @@ -2036,6 +2053,102 @@ static void dwc3_stop_active_transfers(struct dwc3 *dwc) } } +static void __dwc3_gadget_set_ssp_rate(struct dwc3 *dwc) +{ + enum usb_ssp_rate ssp_rate = dwc->gadget_ssp_rate; + u32 reg; + + if (ssp_rate == USB_SSP_GEN_UNKNOWN) + ssp_rate = dwc->max_ssp_rate; + + reg = dwc3_readl(dwc->regs, DWC3_DCFG); + reg &= ~DWC3_DCFG_SPEED_MASK; + reg &= ~DWC3_DCFG_NUMLANES(~0); + + if (ssp_rate == USB_SSP_GEN_1x2) + reg |= DWC3_DCFG_SUPERSPEED; + else if (dwc->max_ssp_rate != USB_SSP_GEN_1x2) + reg |= DWC3_DCFG_SUPERSPEED_PLUS; + + if (ssp_rate != USB_SSP_GEN_2x1 && + dwc->max_ssp_rate != USB_SSP_GEN_2x1) + reg |= DWC3_DCFG_NUMLANES(1); + + dwc3_writel(dwc->regs, DWC3_DCFG, reg); +} + +static void __dwc3_gadget_set_speed(struct dwc3 *dwc) +{ + enum usb_device_speed speed; + u32 reg; + + speed = dwc->gadget_max_speed; + if (speed > dwc->maximum_speed) + speed = dwc->maximum_speed; + + if (speed == USB_SPEED_SUPER_PLUS && + DWC3_IP_IS(DWC32)) { + __dwc3_gadget_set_ssp_rate(dwc); + return; + } + + reg = dwc3_readl(dwc->regs, DWC3_DCFG); + reg &= ~(DWC3_DCFG_SPEED_MASK); + + /* + * WORKAROUND: DWC3 revision < 2.20a have an issue + * which would cause metastability state on Run/Stop + * bit if we try to force the IP to USB2-only mode. + * + * Because of that, we cannot configure the IP to any + * speed other than the SuperSpeed + * + * Refers to: + * + * STAR#9000525659: Clock Domain Crossing on DCTL in + * USB 2.0 Mode + */ + if (DWC3_VER_IS_PRIOR(DWC3, 220A) && + !dwc->dis_metastability_quirk) { + reg |= DWC3_DCFG_SUPERSPEED; + } else { + switch (speed) { + case USB_SPEED_LOW: + reg |= DWC3_DCFG_LOWSPEED; + break; + case USB_SPEED_FULL: + reg |= DWC3_DCFG_FULLSPEED; + break; + case USB_SPEED_HIGH: + reg |= DWC3_DCFG_HIGHSPEED; + break; + case USB_SPEED_SUPER: + reg |= DWC3_DCFG_SUPERSPEED; + break; + case USB_SPEED_SUPER_PLUS: + if (DWC3_IP_IS(DWC3)) + reg |= DWC3_DCFG_SUPERSPEED; + else + reg |= DWC3_DCFG_SUPERSPEED_PLUS; + break; + default: + dev_err(dwc->dev, "invalid speed (%d)\n", speed); + + if (DWC3_IP_IS(DWC3)) + reg |= DWC3_DCFG_SUPERSPEED; + else + reg |= DWC3_DCFG_SUPERSPEED_PLUS; + } + } + + if (DWC3_IP_IS(DWC32) && + speed > USB_SPEED_UNKNOWN && + speed < USB_SPEED_SUPER_PLUS) + reg &= ~DWC3_DCFG_NUMLANES(~0); + + dwc3_writel(dwc->regs, DWC3_DCFG, reg); +} + static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend) { u32 reg; @@ -2058,6 +2171,7 @@ static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend) if (dwc->has_hibernation) reg |= DWC3_DCTL_KEEP_CONNECT; + __dwc3_gadget_set_speed(dwc); dwc->pullups_connected = true; } else { reg &= ~DWC3_DCTL_RUN_STOP; @@ -2083,6 +2197,7 @@ static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend) static void dwc3_gadget_disable_irq(struct dwc3 *dwc); static void __dwc3_gadget_stop(struct dwc3 *dwc); +static int __dwc3_gadget_start(struct dwc3 *dwc); static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on) { @@ -2108,6 +2223,17 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on) } /* + * Check the return value for successful resume, or error. For a + * successful resume, the DWC3 runtime PM resume routine will handle + * the run stop sequence, so avoid duplicate operations here. + */ + ret = pm_runtime_get_sync(dwc->dev); + if (!ret || ret < 0) { + pm_runtime_put(dwc->dev); + return 0; + } + + /* * Synchronize any pending event handling before executing the controller * halt routine. */ @@ -2145,10 +2271,14 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on) dwc->ev_buf->lpos = (dwc->ev_buf->lpos + count) % dwc->ev_buf->length; } + dwc->connected = false; + } else { + __dwc3_gadget_start(dwc); } ret = dwc3_gadget_run_stop(dwc, is_on, false); spin_unlock_irqrestore(&dwc->lock, flags); + pm_runtime_put(dwc->dev); return ret; } @@ -2158,8 +2288,7 @@ static void dwc3_gadget_enable_irq(struct dwc3 *dwc) u32 reg; /* Enable all but Start and End of Frame IRQs */ - reg = (DWC3_DEVTEN_VNDRDEVTSTRCVEDEN | - DWC3_DEVTEN_EVNTOVERFLOWEN | + reg = (DWC3_DEVTEN_EVNTOVERFLOWEN | DWC3_DEVTEN_CMDCMPLTEN | DWC3_DEVTEN_ERRTICERREN | DWC3_DEVTEN_WKUPEVTEN | @@ -2297,7 +2426,7 @@ static int dwc3_gadget_start(struct usb_gadget *g, { struct dwc3 *dwc = gadget_to_dwc(g); unsigned long flags; - int ret = 0; + int ret; int irq; irq = dwc->irq_gadget; @@ -2306,33 +2435,14 @@ static int dwc3_gadget_start(struct usb_gadget *g, if (ret) { dev_err(dwc->dev, "failed to request irq #%d --> %d\n", irq, ret); - goto err0; + return ret; } spin_lock_irqsave(&dwc->lock, flags); - if (dwc->gadget_driver) { - dev_err(dwc->dev, "%s is already bound to %s\n", - dwc->gadget->name, - dwc->gadget_driver->driver.name); - ret = -EBUSY; - goto err1; - } - dwc->gadget_driver = driver; - - if (pm_runtime_active(dwc->dev)) - __dwc3_gadget_start(dwc); - spin_unlock_irqrestore(&dwc->lock, flags); return 0; - -err1: - spin_unlock_irqrestore(&dwc->lock, flags); - free_irq(irq, dwc); - -err0: - return ret; } static void __dwc3_gadget_stop(struct dwc3 *dwc) @@ -2348,13 +2458,6 @@ static int dwc3_gadget_stop(struct usb_gadget *g) unsigned long flags; spin_lock_irqsave(&dwc->lock, flags); - - if (pm_runtime_suspended(dwc->dev)) - goto out; - - __dwc3_gadget_stop(dwc); - -out: dwc->gadget_driver = NULL; spin_unlock_irqrestore(&dwc->lock, flags); @@ -2407,62 +2510,33 @@ static void dwc3_gadget_set_speed(struct usb_gadget *g, { struct dwc3 *dwc = gadget_to_dwc(g); unsigned long flags; - u32 reg; spin_lock_irqsave(&dwc->lock, flags); - reg = dwc3_readl(dwc->regs, DWC3_DCFG); - reg &= ~(DWC3_DCFG_SPEED_MASK); - - /* - * WORKAROUND: DWC3 revision < 2.20a have an issue - * which would cause metastability state on Run/Stop - * bit if we try to force the IP to USB2-only mode. - * - * Because of that, we cannot configure the IP to any - * speed other than the SuperSpeed - * - * Refers to: - * - * STAR#9000525659: Clock Domain Crossing on DCTL in - * USB 2.0 Mode - */ - if (DWC3_VER_IS_PRIOR(DWC3, 220A) && - !dwc->dis_metastability_quirk) { - reg |= DWC3_DCFG_SUPERSPEED; - } else { - switch (speed) { - case USB_SPEED_LOW: - reg |= DWC3_DCFG_LOWSPEED; - break; - case USB_SPEED_FULL: - reg |= DWC3_DCFG_FULLSPEED; - break; - case USB_SPEED_HIGH: - reg |= DWC3_DCFG_HIGHSPEED; - break; - case USB_SPEED_SUPER: - reg |= DWC3_DCFG_SUPERSPEED; - break; - case USB_SPEED_SUPER_PLUS: - if (DWC3_IP_IS(DWC3)) - reg |= DWC3_DCFG_SUPERSPEED; - else - reg |= DWC3_DCFG_SUPERSPEED_PLUS; - break; - default: - dev_err(dwc->dev, "invalid speed (%d)\n", speed); + dwc->gadget_max_speed = speed; + spin_unlock_irqrestore(&dwc->lock, flags); +} - if (DWC3_IP_IS(DWC3)) - reg |= DWC3_DCFG_SUPERSPEED; - else - reg |= DWC3_DCFG_SUPERSPEED_PLUS; - } - } - dwc3_writel(dwc->regs, DWC3_DCFG, reg); +static void dwc3_gadget_set_ssp_rate(struct usb_gadget *g, + enum usb_ssp_rate rate) +{ + struct dwc3 *dwc = gadget_to_dwc(g); + unsigned long flags; + spin_lock_irqsave(&dwc->lock, flags); + dwc->gadget_ssp_rate = rate; spin_unlock_irqrestore(&dwc->lock, flags); } +static int dwc3_gadget_vbus_draw(struct usb_gadget *g, unsigned int mA) +{ + struct dwc3 *dwc = gadget_to_dwc(g); + + if (dwc->usb2_phy) + return usb_phy_set_power(dwc->usb2_phy, mA); + + return 0; +} + static const struct usb_gadget_ops dwc3_gadget_ops = { .get_frame = dwc3_gadget_get_frame, .wakeup = dwc3_gadget_wakeup, @@ -2471,7 +2545,9 @@ static const struct usb_gadget_ops dwc3_gadget_ops = { .udc_start = dwc3_gadget_start, .udc_stop = dwc3_gadget_stop, .udc_set_speed = dwc3_gadget_set_speed, + .udc_set_ssp_rate = dwc3_gadget_set_ssp_rate, .get_config_params = dwc3_gadget_config_params, + .vbus_draw = dwc3_gadget_vbus_draw, }; /* -------------------------------------------------------------------------- */ @@ -3304,12 +3380,18 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc) struct dwc3_ep *dep; int ret; u32 reg; + u8 lanes = 1; u8 speed; reg = dwc3_readl(dwc->regs, DWC3_DSTS); speed = reg & DWC3_DSTS_CONNECTSPD; dwc->speed = speed; + if (DWC3_IP_IS(DWC32)) + lanes = DWC3_DSTS_CONNLANES(reg) + 1; + + dwc->gadget->ssp_rate = USB_SSP_GEN_UNKNOWN; + /* * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed * each time on Connect Done. @@ -3324,6 +3406,11 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc) dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); dwc->gadget->ep0->maxpacket = 512; dwc->gadget->speed = USB_SPEED_SUPER_PLUS; + + if (lanes > 1) + dwc->gadget->ssp_rate = USB_SSP_GEN_2x2; + else + dwc->gadget->ssp_rate = USB_SSP_GEN_2x1; break; case DWC3_DSTS_SUPERSPEED: /* @@ -3345,6 +3432,11 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc) dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); dwc->gadget->ep0->maxpacket = 512; dwc->gadget->speed = USB_SPEED_SUPER; + + if (lanes > 1) { + dwc->gadget->speed = USB_SPEED_SUPER_PLUS; + dwc->gadget->ssp_rate = USB_SSP_GEN_1x2; + } break; case DWC3_DSTS_HIGHSPEED: dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64); @@ -3839,6 +3931,7 @@ int dwc3_gadget_init(struct dwc3 *dwc) dev->platform_data = dwc; dwc->gadget->ops = &dwc3_gadget_ops; dwc->gadget->speed = USB_SPEED_UNKNOWN; + dwc->gadget->ssp_rate = USB_SSP_GEN_UNKNOWN; dwc->gadget->sg_supported = true; dwc->gadget->name = "dwc3-gadget"; dwc->gadget->lpm_capable = true; @@ -3865,6 +3958,7 @@ int dwc3_gadget_init(struct dwc3 *dwc) dwc->revision); dwc->gadget->max_speed = dwc->maximum_speed; + dwc->gadget->max_ssp_rate = dwc->max_ssp_rate; /* * REVISIT: Here we should clear all pending IRQs to be @@ -3881,7 +3975,10 @@ int dwc3_gadget_init(struct dwc3 *dwc) goto err5; } - dwc3_gadget_set_speed(dwc->gadget, dwc->maximum_speed); + if (DWC3_IP_IS(DWC32) && dwc->maximum_speed == USB_SPEED_SUPER_PLUS) + dwc3_gadget_set_ssp_rate(dwc->gadget, dwc->max_ssp_rate); + else + dwc3_gadget_set_speed(dwc->gadget, dwc->maximum_speed); return 0; diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c index e195176580de..f29a264635aa 100644 --- a/drivers/usb/dwc3/host.c +++ b/drivers/usb/dwc3/host.c @@ -108,7 +108,7 @@ int dwc3_host_init(struct dwc3 *dwc) props[prop_idx++] = PROPERTY_ENTRY_BOOL("quirk-broken-port-ped"); if (prop_idx) { - ret = platform_device_add_properties(xhci, props); + ret = device_create_managed_software_node(&xhci->dev, props, NULL); if (ret) { dev_err(dwc->dev, "failed to add properties to xHCI\n"); goto err; diff --git a/drivers/usb/dwc3/ulpi.c b/drivers/usb/dwc3/ulpi.c index aa213c9815f6..f23f4c9a557e 100644 --- a/drivers/usb/dwc3/ulpi.c +++ b/drivers/usb/dwc3/ulpi.c @@ -7,6 +7,8 @@ * Author: Heikki Krogerus <heikki.krogerus@linux.intel.com> */ +#include <linux/delay.h> +#include <linux/time64.h> #include <linux/ulpi/regs.h> #include "core.h" @@ -17,14 +19,28 @@ DWC3_GUSB2PHYACC_ADDR(ULPI_ACCESS_EXTENDED) | \ DWC3_GUSB2PHYACC_EXTEND_ADDR(a) : DWC3_GUSB2PHYACC_ADDR(a)) -static int dwc3_ulpi_busyloop(struct dwc3 *dwc) +#define DWC3_ULPI_BASE_DELAY DIV_ROUND_UP(NSEC_PER_SEC, 60000000L) + +static int dwc3_ulpi_busyloop(struct dwc3 *dwc, u8 addr, bool read) { - unsigned int count = 1000; + unsigned long ns = 5L * DWC3_ULPI_BASE_DELAY; + unsigned int count = 10000; u32 reg; + if (addr >= ULPI_EXT_VENDOR_SPECIFIC) + ns += DWC3_ULPI_BASE_DELAY; + + if (read) + ns += DWC3_ULPI_BASE_DELAY; + + reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)); + if (reg & DWC3_GUSB2PHYCFG_SUSPHY) + usleep_range(1000, 1200); + while (count--) { + ndelay(ns); reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYACC(0)); - if (!(reg & DWC3_GUSB2PHYACC_BUSY)) + if (reg & DWC3_GUSB2PHYACC_DONE) return 0; cpu_relax(); } @@ -38,16 +54,10 @@ static int dwc3_ulpi_read(struct device *dev, u8 addr) u32 reg; int ret; - reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)); - if (reg & DWC3_GUSB2PHYCFG_SUSPHY) { - reg &= ~DWC3_GUSB2PHYCFG_SUSPHY; - dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg); - } - reg = DWC3_GUSB2PHYACC_NEWREGREQ | DWC3_ULPI_ADDR(addr); dwc3_writel(dwc->regs, DWC3_GUSB2PHYACC(0), reg); - ret = dwc3_ulpi_busyloop(dwc); + ret = dwc3_ulpi_busyloop(dwc, addr, true); if (ret) return ret; @@ -61,17 +71,11 @@ static int dwc3_ulpi_write(struct device *dev, u8 addr, u8 val) struct dwc3 *dwc = dev_get_drvdata(dev); u32 reg; - reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)); - if (reg & DWC3_GUSB2PHYCFG_SUSPHY) { - reg &= ~DWC3_GUSB2PHYCFG_SUSPHY; - dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg); - } - reg = DWC3_GUSB2PHYACC_NEWREGREQ | DWC3_ULPI_ADDR(addr); reg |= DWC3_GUSB2PHYACC_WRITE | val; dwc3_writel(dwc->regs, DWC3_GUSB2PHYACC(0), reg); - return dwc3_ulpi_busyloop(dwc); + return dwc3_ulpi_busyloop(dwc, addr, false); } static const struct ulpi_ops dwc3_ulpi_ops = { diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig index 7e47e6223089..2d152571a7de 100644 --- a/drivers/usb/gadget/Kconfig +++ b/drivers/usb/gadget/Kconfig @@ -265,6 +265,7 @@ config USB_CONFIGFS_NCM depends on NET select USB_U_ETHER select USB_F_NCM + select CRC32 help NCM is an advanced protocol for Ethernet encapsulation, allows grouping of several ethernet frames into one USB transfer and @@ -314,6 +315,7 @@ config USB_CONFIGFS_EEM depends on NET select USB_U_ETHER select USB_F_EEM + select CRC32 help CDC EEM is a newer USB standard that is somewhat simpler than CDC ECM and therefore can be supported by more hardware. Technically ECM and diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c index c6d455f2bb92..72a9797dbbae 100644 --- a/drivers/usb/gadget/composite.c +++ b/drivers/usb/gadget/composite.c @@ -13,6 +13,7 @@ #include <linux/module.h> #include <linux/device.h> #include <linux/utsname.h> +#include <linux/bitfield.h> #include <linux/usb/composite.h> #include <linux/usb/otg.h> @@ -392,8 +393,11 @@ int usb_function_deactivate(struct usb_function *function) spin_lock_irqsave(&cdev->lock, flags); - if (cdev->deactivations == 0) + if (cdev->deactivations == 0) { + spin_unlock_irqrestore(&cdev->lock, flags); status = usb_gadget_deactivate(cdev->gadget); + spin_lock_irqsave(&cdev->lock, flags); + } if (status == 0) cdev->deactivations++; @@ -424,8 +428,11 @@ int usb_function_activate(struct usb_function *function) status = -EINVAL; else { cdev->deactivations--; - if (cdev->deactivations == 0) + if (cdev->deactivations == 0) { + spin_unlock_irqrestore(&cdev->lock, flags); status = usb_gadget_activate(cdev->gadget); + spin_lock_irqsave(&cdev->lock, flags); + } } spin_unlock_irqrestore(&cdev->lock, flags); @@ -728,47 +735,77 @@ static int bos_desc(struct usb_composite_dev *cdev) /* The SuperSpeedPlus USB Device Capability descriptor */ if (gadget_is_superspeed_plus(cdev->gadget)) { struct usb_ssp_cap_descriptor *ssp_cap; + u8 ssac = 1; + u8 ssic; + int i; - ssp_cap = cdev->req->buf + le16_to_cpu(bos->wTotalLength); - bos->bNumDeviceCaps++; + if (cdev->gadget->max_ssp_rate == USB_SSP_GEN_2x2) + ssac = 3; /* - * Report typical values. + * Paired RX and TX sublink speed attributes share + * the same SSID. */ + ssic = (ssac + 1) / 2 - 1; + + ssp_cap = cdev->req->buf + le16_to_cpu(bos->wTotalLength); + bos->bNumDeviceCaps++; - le16_add_cpu(&bos->wTotalLength, USB_DT_USB_SSP_CAP_SIZE(1)); - ssp_cap->bLength = USB_DT_USB_SSP_CAP_SIZE(1); + le16_add_cpu(&bos->wTotalLength, USB_DT_USB_SSP_CAP_SIZE(ssac)); + ssp_cap->bLength = USB_DT_USB_SSP_CAP_SIZE(ssac); ssp_cap->bDescriptorType = USB_DT_DEVICE_CAPABILITY; ssp_cap->bDevCapabilityType = USB_SSP_CAP_TYPE; ssp_cap->bReserved = 0; ssp_cap->wReserved = 0; - /* SSAC = 1 (2 attributes) */ - ssp_cap->bmAttributes = cpu_to_le32(1); + ssp_cap->bmAttributes = + cpu_to_le32(FIELD_PREP(USB_SSP_SUBLINK_SPEED_ATTRIBS, ssac) | + FIELD_PREP(USB_SSP_SUBLINK_SPEED_IDS, ssic)); - /* Min RX/TX Lane Count = 1 */ ssp_cap->wFunctionalitySupport = - cpu_to_le16((1 << 8) | (1 << 12)); + cpu_to_le16(FIELD_PREP(USB_SSP_MIN_SUBLINK_SPEED_ATTRIBUTE_ID, 0) | + FIELD_PREP(USB_SSP_MIN_RX_LANE_COUNT, 1) | + FIELD_PREP(USB_SSP_MIN_TX_LANE_COUNT, 1)); /* - * bmSublinkSpeedAttr[0]: - * ST = Symmetric, RX - * LSE = 3 (Gbps) - * LP = 1 (SuperSpeedPlus) - * LSM = 10 (10 Gbps) + * Use 1 SSID if the gadget supports up to gen2x1 or not + * specified: + * - SSID 0 for symmetric RX/TX sublink speed of 10 Gbps. + * + * Use 1 SSID if the gadget supports up to gen1x2: + * - SSID 0 for symmetric RX/TX sublink speed of 5 Gbps. + * + * Use 2 SSIDs if the gadget supports up to gen2x2: + * - SSID 0 for symmetric RX/TX sublink speed of 5 Gbps. + * - SSID 1 for symmetric RX/TX sublink speed of 10 Gbps. */ - ssp_cap->bmSublinkSpeedAttr[0] = - cpu_to_le32((3 << 4) | (1 << 14) | (0xa << 16)); - /* - * bmSublinkSpeedAttr[1] = - * ST = Symmetric, TX - * LSE = 3 (Gbps) - * LP = 1 (SuperSpeedPlus) - * LSM = 10 (10 Gbps) - */ - ssp_cap->bmSublinkSpeedAttr[1] = - cpu_to_le32((3 << 4) | (1 << 14) | - (0xa << 16) | (1 << 7)); + for (i = 0; i < ssac + 1; i++) { + u8 ssid; + u8 mantissa; + u8 type; + + ssid = i >> 1; + + if (cdev->gadget->max_ssp_rate == USB_SSP_GEN_2x1 || + cdev->gadget->max_ssp_rate == USB_SSP_GEN_UNKNOWN) + mantissa = 10; + else + mantissa = 5 << ssid; + + if (i % 2) + type = USB_SSP_SUBLINK_SPEED_ST_SYM_TX; + else + type = USB_SSP_SUBLINK_SPEED_ST_SYM_RX; + + ssp_cap->bmSublinkSpeedAttr[i] = + cpu_to_le32(FIELD_PREP(USB_SSP_SUBLINK_SPEED_SSID, ssid) | + FIELD_PREP(USB_SSP_SUBLINK_SPEED_LSE, + USB_SSP_SUBLINK_SPEED_LSE_GBPS) | + FIELD_PREP(USB_SSP_SUBLINK_SPEED_ST, type) | + FIELD_PREP(USB_SSP_SUBLINK_SPEED_LP, + USB_SSP_SUBLINK_SPEED_LP_SSP) | + FIELD_PREP(USB_SSP_SUBLINK_SPEED_LSM, mantissa)); + } } return le16_to_cpu(bos->wTotalLength); @@ -2036,7 +2073,7 @@ done: return value; } -void composite_disconnect(struct usb_gadget *gadget) +static void __composite_disconnect(struct usb_gadget *gadget) { struct usb_composite_dev *cdev = get_gadget_data(gadget); unsigned long flags; @@ -2053,6 +2090,23 @@ void composite_disconnect(struct usb_gadget *gadget) spin_unlock_irqrestore(&cdev->lock, flags); } +void composite_disconnect(struct usb_gadget *gadget) +{ + usb_gadget_vbus_draw(gadget, 0); + __composite_disconnect(gadget); +} + +void composite_reset(struct usb_gadget *gadget) +{ + /* + * Section 1.4.13 Standard Downstream Port of the USB battery charging + * specification v1.2 states that a device connected on a SDP shall only + * draw at max 100mA while in a connected, but unconfigured state. + */ + usb_gadget_vbus_draw(gadget, 100); + __composite_disconnect(gadget); +} + /*-------------------------------------------------------------------------*/ static ssize_t suspended_show(struct device *dev, struct device_attribute *attr, @@ -2373,7 +2427,7 @@ static const struct usb_gadget_driver composite_driver_template = { .unbind = composite_unbind, .setup = composite_setup, - .reset = composite_disconnect, + .reset = composite_reset, .disconnect = composite_disconnect, .suspend = composite_suspend, diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c index 56051bb97349..0d56f33d63c2 100644 --- a/drivers/usb/gadget/configfs.c +++ b/drivers/usb/gadget/configfs.c @@ -221,9 +221,16 @@ static ssize_t gadget_dev_desc_bcdUSB_store(struct config_item *item, static ssize_t gadget_dev_desc_UDC_show(struct config_item *item, char *page) { - char *udc_name = to_gadget_info(item)->composite.gadget_driver.udc_name; + struct gadget_info *gi = to_gadget_info(item); + char *udc_name; + int ret; - return sprintf(page, "%s\n", udc_name ?: ""); + mutex_lock(&gi->lock); + udc_name = gi->composite.gadget_driver.udc_name; + ret = sprintf(page, "%s\n", udc_name ?: ""); + mutex_unlock(&gi->lock); + + return ret; } static int unregister_gadget(struct gadget_info *gi) @@ -1248,9 +1255,9 @@ static void purge_configs_funcs(struct gadget_info *gi) cfg = container_of(c, struct config_usb_cfg, c); - list_for_each_entry_safe(f, tmp, &c->functions, list) { + list_for_each_entry_safe_reverse(f, tmp, &c->functions, list) { - list_move_tail(&f->list, &cfg->func_list); + list_move(&f->list, &cfg->func_list); if (f->unbind) { dev_dbg(&gi->cdev.gadget->dev, "unbind function '%s'/%p\n", @@ -1481,6 +1488,28 @@ static void configfs_composite_disconnect(struct usb_gadget *gadget) spin_unlock_irqrestore(&gi->spinlock, flags); } +static void configfs_composite_reset(struct usb_gadget *gadget) +{ + struct usb_composite_dev *cdev; + struct gadget_info *gi; + unsigned long flags; + + cdev = get_gadget_data(gadget); + if (!cdev) + return; + + gi = container_of(cdev, struct gadget_info, cdev); + spin_lock_irqsave(&gi->spinlock, flags); + cdev = get_gadget_data(gadget); + if (!cdev || gi->unbind) { + spin_unlock_irqrestore(&gi->spinlock, flags); + return; + } + + composite_reset(gadget); + spin_unlock_irqrestore(&gi->spinlock, flags); +} + static void configfs_composite_suspend(struct usb_gadget *gadget) { struct usb_composite_dev *cdev; @@ -1530,13 +1559,13 @@ static const struct usb_gadget_driver configfs_driver_template = { .unbind = configfs_composite_unbind, .setup = configfs_composite_setup, - .reset = configfs_composite_disconnect, + .reset = configfs_composite_reset, .disconnect = configfs_composite_disconnect, .suspend = configfs_composite_suspend, .resume = configfs_composite_resume, - .max_speed = USB_SPEED_SUPER, + .max_speed = USB_SPEED_SUPER_PLUS, .driver = { .owner = THIS_MODULE, .name = "configfs-gadget", @@ -1576,7 +1605,7 @@ static struct config_group *gadgets_make( gi->composite.unbind = configfs_do_nothing; gi->composite.suspend = NULL; gi->composite.resume = NULL; - gi->composite.max_speed = USB_SPEED_SUPER; + gi->composite.max_speed = USB_SPEED_SUPER_PLUS; spin_lock_init(&gi->spinlock); mutex_init(&gi->lock); diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c index 8fff995b8dd5..71a1a26e85c7 100644 --- a/drivers/usb/gadget/function/f_midi.c +++ b/drivers/usb/gadget/function/f_midi.c @@ -87,7 +87,7 @@ struct f_midi { struct snd_rawmidi_substream *out_substream[MAX_PORTS]; unsigned long out_triggered; - struct tasklet_struct tasklet; + struct work_struct work; unsigned int in_ports; unsigned int out_ports; int index; @@ -698,9 +698,11 @@ drop_out: f_midi_drop_out_substreams(midi); } -static void f_midi_in_tasklet(struct tasklet_struct *t) +static void f_midi_in_work(struct work_struct *work) { - struct f_midi *midi = from_tasklet(midi, t, tasklet); + struct f_midi *midi; + + midi = container_of(work, struct f_midi, work); f_midi_transmit(midi); } @@ -737,7 +739,7 @@ static void f_midi_in_trigger(struct snd_rawmidi_substream *substream, int up) VDBG(midi, "%s() %d\n", __func__, up); midi->in_ports_array[substream->number].active = up; if (up) - tasklet_hi_schedule(&midi->tasklet); + queue_work(system_highpri_wq, &midi->work); } static int f_midi_out_open(struct snd_rawmidi_substream *substream) @@ -875,7 +877,7 @@ static int f_midi_bind(struct usb_configuration *c, struct usb_function *f) int status, n, jack = 1, i = 0, endpoint_descriptor_index = 0; midi->gadget = cdev->gadget; - tasklet_setup(&midi->tasklet, f_midi_in_tasklet); + INIT_WORK(&midi->work, f_midi_in_work); status = f_midi_register_card(midi); if (status < 0) goto fail_register; diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c index 64a4112068fc..61ce8e68f7a3 100644 --- a/drivers/usb/gadget/function/f_printer.c +++ b/drivers/usb/gadget/function/f_printer.c @@ -51,6 +51,8 @@ #define GET_PORT_STATUS 1 #define SOFT_RESET 2 +#define DEFAULT_Q_LEN 10 /* same as legacy g_printer gadget */ + static int major, minors; static struct class *usb_gadget_class; static DEFINE_IDA(printer_ida); @@ -1162,6 +1164,7 @@ fail_tx_reqs: printer_req_free(dev->in_ep, req); } + usb_free_all_descriptors(f); return ret; } @@ -1363,6 +1366,9 @@ static struct usb_function_instance *gprinter_alloc_inst(void) opts->func_inst.free_func_inst = gprinter_free_inst; ret = &opts->func_inst; + /* Make sure q_len is initialized, otherwise the bound device can't support read/write! */ + opts->q_len = DEFAULT_Q_LEN; + mutex_lock(&printer_ida_lock); if (ida_is_empty(&printer_ida)) { diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c index 3633df6d7610..5d960b6603b6 100644 --- a/drivers/usb/gadget/function/f_uac2.c +++ b/drivers/usb/gadget/function/f_uac2.c @@ -271,7 +271,7 @@ static struct usb_endpoint_descriptor fs_epout_desc = { .bEndpointAddress = USB_DIR_OUT, .bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC, - .wMaxPacketSize = cpu_to_le16(1023), + /* .wMaxPacketSize = DYNAMIC */ .bInterval = 1, }; @@ -280,7 +280,7 @@ static struct usb_endpoint_descriptor hs_epout_desc = { .bDescriptorType = USB_DT_ENDPOINT, .bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC, - .wMaxPacketSize = cpu_to_le16(1024), + /* .wMaxPacketSize = DYNAMIC */ .bInterval = 4, }; @@ -348,7 +348,7 @@ static struct usb_endpoint_descriptor fs_epin_desc = { .bEndpointAddress = USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC, - .wMaxPacketSize = cpu_to_le16(1023), + /* .wMaxPacketSize = DYNAMIC */ .bInterval = 1, }; @@ -357,7 +357,7 @@ static struct usb_endpoint_descriptor hs_epin_desc = { .bDescriptorType = USB_DT_ENDPOINT, .bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC, - .wMaxPacketSize = cpu_to_le16(1024), + /* .wMaxPacketSize = DYNAMIC */ .bInterval = 4, }; @@ -444,12 +444,28 @@ struct cntrl_range_lay3 { __le32 dRES; } __packed; -static void set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts, +static int set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts, struct usb_endpoint_descriptor *ep_desc, - unsigned int factor, bool is_playback) + enum usb_device_speed speed, bool is_playback) { int chmask, srate, ssize; - u16 max_packet_size; + u16 max_size_bw, max_size_ep; + unsigned int factor; + + switch (speed) { + case USB_SPEED_FULL: + max_size_ep = 1023; + factor = 1000; + break; + + case USB_SPEED_HIGH: + max_size_ep = 1024; + factor = 8000; + break; + + default: + return -EINVAL; + } if (is_playback) { chmask = uac2_opts->p_chmask; @@ -461,10 +477,12 @@ static void set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts, ssize = uac2_opts->c_ssize; } - max_packet_size = num_channels(chmask) * ssize * + max_size_bw = num_channels(chmask) * ssize * DIV_ROUND_UP(srate, factor / (1 << (ep_desc->bInterval - 1))); - ep_desc->wMaxPacketSize = cpu_to_le16(min_t(u16, max_packet_size, - le16_to_cpu(ep_desc->wMaxPacketSize))); + ep_desc->wMaxPacketSize = cpu_to_le16(min_t(u16, max_size_bw, + max_size_ep)); + + return 0; } /* Use macro to overcome line length limitation */ @@ -670,10 +688,33 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn) } /* Calculate wMaxPacketSize according to audio bandwidth */ - set_ep_max_packet_size(uac2_opts, &fs_epin_desc, 1000, true); - set_ep_max_packet_size(uac2_opts, &fs_epout_desc, 1000, false); - set_ep_max_packet_size(uac2_opts, &hs_epin_desc, 8000, true); - set_ep_max_packet_size(uac2_opts, &hs_epout_desc, 8000, false); + ret = set_ep_max_packet_size(uac2_opts, &fs_epin_desc, USB_SPEED_FULL, + true); + if (ret < 0) { + dev_err(dev, "%s:%d Error!\n", __func__, __LINE__); + return ret; + } + + ret = set_ep_max_packet_size(uac2_opts, &fs_epout_desc, USB_SPEED_FULL, + false); + if (ret < 0) { + dev_err(dev, "%s:%d Error!\n", __func__, __LINE__); + return ret; + } + + ret = set_ep_max_packet_size(uac2_opts, &hs_epin_desc, USB_SPEED_HIGH, + true); + if (ret < 0) { + dev_err(dev, "%s:%d Error!\n", __func__, __LINE__); + return ret; + } + + ret = set_ep_max_packet_size(uac2_opts, &hs_epout_desc, USB_SPEED_HIGH, + false); + if (ret < 0) { + dev_err(dev, "%s:%d Error!\n", __func__, __LINE__); + return ret; + } if (EPOUT_EN(uac2_opts)) { agdev->out_ep = usb_ep_autoconfig(gadget, &fs_epout_desc); diff --git a/drivers/usb/gadget/function/u_audio.c b/drivers/usb/gadget/function/u_audio.c index e6d32c536781..265c4d805f81 100644 --- a/drivers/usb/gadget/function/u_audio.c +++ b/drivers/usb/gadget/function/u_audio.c @@ -23,11 +23,6 @@ #define PRD_SIZE_MAX PAGE_SIZE #define MIN_PERIODS 4 -struct uac_req { - struct uac_rtd_params *pp; /* parent param */ - struct usb_request *req; -}; - /* Runtime data params for one stream */ struct uac_rtd_params { struct snd_uac_chip *uac; /* parent chip */ @@ -41,9 +36,8 @@ struct uac_rtd_params { void *rbuf; unsigned int max_psize; /* MaxPacketSize of endpoint */ - struct uac_req *ureq; - spinlock_t lock; + struct usb_request **reqs; }; struct snd_uac_chip { @@ -79,17 +73,20 @@ static const struct snd_pcm_hardware uac_pcm_hardware = { static void u_audio_iso_complete(struct usb_ep *ep, struct usb_request *req) { unsigned int pending; - unsigned long flags, flags2; unsigned int hw_ptr; int status = req->status; - struct uac_req *ur = req->context; struct snd_pcm_substream *substream; struct snd_pcm_runtime *runtime; - struct uac_rtd_params *prm = ur->pp; + struct uac_rtd_params *prm = req->context; struct snd_uac_chip *uac = prm->uac; /* i/f shutting down */ - if (!prm->ep_enabled || req->status == -ESHUTDOWN) + if (!prm->ep_enabled) { + usb_ep_free_request(ep, req); + return; + } + + if (req->status == -ESHUTDOWN) return; /* @@ -106,16 +103,14 @@ static void u_audio_iso_complete(struct usb_ep *ep, struct usb_request *req) if (!substream) goto exit; - snd_pcm_stream_lock_irqsave(substream, flags2); + snd_pcm_stream_lock(substream); runtime = substream->runtime; if (!runtime || !snd_pcm_running(substream)) { - snd_pcm_stream_unlock_irqrestore(substream, flags2); + snd_pcm_stream_unlock(substream); goto exit; } - spin_lock_irqsave(&prm->lock, flags); - if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { /* * For each IN packet, take the quotient of the current data @@ -142,8 +137,6 @@ static void u_audio_iso_complete(struct usb_ep *ep, struct usb_request *req) hw_ptr = prm->hw_ptr; - spin_unlock_irqrestore(&prm->lock, flags); - /* Pack USB load in ALSA ring buffer */ pending = runtime->dma_bytes - hw_ptr; @@ -167,12 +160,10 @@ static void u_audio_iso_complete(struct usb_ep *ep, struct usb_request *req) } } - spin_lock_irqsave(&prm->lock, flags); /* update hw_ptr after data is copied to memory */ prm->hw_ptr = (hw_ptr + req->actual) % runtime->dma_bytes; hw_ptr = prm->hw_ptr; - spin_unlock_irqrestore(&prm->lock, flags); - snd_pcm_stream_unlock_irqrestore(substream, flags2); + snd_pcm_stream_unlock(substream); if ((hw_ptr % snd_pcm_lib_period_bytes(substream)) < req->actual) snd_pcm_period_elapsed(substream); @@ -188,7 +179,6 @@ static int uac_pcm_trigger(struct snd_pcm_substream *substream, int cmd) struct uac_rtd_params *prm; struct g_audio *audio_dev; struct uac_params *params; - unsigned long flags; int err = 0; audio_dev = uac->audio_dev; @@ -199,8 +189,6 @@ static int uac_pcm_trigger(struct snd_pcm_substream *substream, int cmd) else prm = &uac->c_prm; - spin_lock_irqsave(&prm->lock, flags); - /* Reset */ prm->hw_ptr = 0; @@ -217,8 +205,6 @@ static int uac_pcm_trigger(struct snd_pcm_substream *substream, int cmd) err = -EINVAL; } - spin_unlock_irqrestore(&prm->lock, flags); - /* Clear buffer after Play stops */ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK && !prm->ss) memset(prm->rbuf, 0, prm->max_psize * params->req_number); @@ -239,6 +225,25 @@ static snd_pcm_uframes_t uac_pcm_pointer(struct snd_pcm_substream *substream) return bytes_to_frames(substream->runtime, prm->hw_ptr); } +static u64 uac_ssize_to_fmt(int ssize) +{ + u64 ret; + + switch (ssize) { + case 3: + ret = SNDRV_PCM_FMTBIT_S24_3LE; + break; + case 4: + ret = SNDRV_PCM_FMTBIT_S32_LE; + break; + default: + ret = SNDRV_PCM_FMTBIT_S16_LE; + break; + } + + return ret; +} + static int uac_pcm_open(struct snd_pcm_substream *substream) { struct snd_uac_chip *uac = snd_pcm_substream_chip(substream); @@ -262,36 +267,14 @@ static int uac_pcm_open(struct snd_pcm_substream *substream) runtime->hw = uac_pcm_hardware; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { - spin_lock_init(&uac->p_prm.lock); runtime->hw.rate_min = p_srate; - switch (p_ssize) { - case 3: - runtime->hw.formats = SNDRV_PCM_FMTBIT_S24_3LE; - break; - case 4: - runtime->hw.formats = SNDRV_PCM_FMTBIT_S32_LE; - break; - default: - runtime->hw.formats = SNDRV_PCM_FMTBIT_S16_LE; - break; - } + runtime->hw.formats = uac_ssize_to_fmt(p_ssize); runtime->hw.channels_min = num_channels(p_chmask); runtime->hw.period_bytes_min = 2 * uac->p_prm.max_psize / runtime->hw.periods_min; } else { - spin_lock_init(&uac->c_prm.lock); runtime->hw.rate_min = c_srate; - switch (c_ssize) { - case 3: - runtime->hw.formats = SNDRV_PCM_FMTBIT_S24_3LE; - break; - case 4: - runtime->hw.formats = SNDRV_PCM_FMTBIT_S32_LE; - break; - default: - runtime->hw.formats = SNDRV_PCM_FMTBIT_S16_LE; - break; - } + runtime->hw.formats = uac_ssize_to_fmt(c_ssize); runtime->hw.channels_min = num_channels(c_chmask); runtime->hw.period_bytes_min = 2 * uac->c_prm.max_psize / runtime->hw.periods_min; @@ -335,10 +318,16 @@ static inline void free_ep(struct uac_rtd_params *prm, struct usb_ep *ep) params = &audio_dev->params; for (i = 0; i < params->req_number; i++) { - if (prm->ureq[i].req) { - usb_ep_dequeue(ep, prm->ureq[i].req); - usb_ep_free_request(ep, prm->ureq[i].req); - prm->ureq[i].req = NULL; + if (prm->reqs[i]) { + if (usb_ep_dequeue(ep, prm->reqs[i])) + usb_ep_free_request(ep, prm->reqs[i]); + /* + * If usb_ep_dequeue() cannot successfully dequeue the + * request, the request will be freed by the completion + * callback. + */ + + prm->reqs[i] = NULL; } } @@ -367,22 +356,21 @@ int u_audio_start_capture(struct g_audio *audio_dev) usb_ep_enable(ep); for (i = 0; i < params->req_number; i++) { - if (!prm->ureq[i].req) { + if (!prm->reqs[i]) { req = usb_ep_alloc_request(ep, GFP_ATOMIC); if (req == NULL) return -ENOMEM; - prm->ureq[i].req = req; - prm->ureq[i].pp = prm; + prm->reqs[i] = req; req->zero = 0; - req->context = &prm->ureq[i]; + req->context = prm; req->length = req_len; req->complete = u_audio_iso_complete; req->buf = prm->rbuf + i * ep->maxpacket; } - if (usb_ep_queue(ep, prm->ureq[i].req, GFP_ATOMIC)) + if (usb_ep_queue(ep, prm->reqs[i], GFP_ATOMIC)) dev_err(dev, "%s:%d Error!\n", __func__, __LINE__); } @@ -445,22 +433,21 @@ int u_audio_start_playback(struct g_audio *audio_dev) usb_ep_enable(ep); for (i = 0; i < params->req_number; i++) { - if (!prm->ureq[i].req) { + if (!prm->reqs[i]) { req = usb_ep_alloc_request(ep, GFP_ATOMIC); if (req == NULL) return -ENOMEM; - prm->ureq[i].req = req; - prm->ureq[i].pp = prm; + prm->reqs[i] = req; req->zero = 0; - req->context = &prm->ureq[i]; + req->context = prm; req->length = req_len; req->complete = u_audio_iso_complete; req->buf = prm->rbuf + i * ep->maxpacket; } - if (usb_ep_queue(ep, prm->ureq[i].req, GFP_ATOMIC)) + if (usb_ep_queue(ep, prm->reqs[i], GFP_ATOMIC)) dev_err(dev, "%s:%d Error!\n", __func__, __LINE__); } @@ -505,9 +492,10 @@ int g_audio_setup(struct g_audio *g_audio, const char *pcm_name, uac->c_prm.uac = uac; prm->max_psize = g_audio->out_ep_maxpsize; - prm->ureq = kcalloc(params->req_number, sizeof(struct uac_req), - GFP_KERNEL); - if (!prm->ureq) { + prm->reqs = kcalloc(params->req_number, + sizeof(struct usb_request *), + GFP_KERNEL); + if (!prm->reqs) { err = -ENOMEM; goto fail; } @@ -527,9 +515,10 @@ int g_audio_setup(struct g_audio *g_audio, const char *pcm_name, uac->p_prm.uac = uac; prm->max_psize = g_audio->in_ep_maxpsize; - prm->ureq = kcalloc(params->req_number, sizeof(struct uac_req), - GFP_KERNEL); - if (!prm->ureq) { + prm->reqs = kcalloc(params->req_number, + sizeof(struct usb_request *), + GFP_KERNEL); + if (!prm->reqs) { err = -ENOMEM; goto fail; } @@ -582,8 +571,8 @@ int g_audio_setup(struct g_audio *g_audio, const char *pcm_name, snd_fail: snd_card_free(card); fail: - kfree(uac->p_prm.ureq); - kfree(uac->c_prm.ureq); + kfree(uac->p_prm.reqs); + kfree(uac->c_prm.reqs); kfree(uac->p_prm.rbuf); kfree(uac->c_prm.rbuf); kfree(uac); @@ -605,8 +594,8 @@ void g_audio_cleanup(struct g_audio *g_audio) if (card) snd_card_free(card); - kfree(uac->p_prm.ureq); - kfree(uac->c_prm.ureq); + kfree(uac->p_prm.reqs); + kfree(uac->c_prm.reqs); kfree(uac->p_prm.rbuf); kfree(uac->c_prm.rbuf); kfree(uac); diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c index 31ea76adcc0d..d1d044d9f859 100644 --- a/drivers/usb/gadget/function/u_ether.c +++ b/drivers/usb/gadget/function/u_ether.c @@ -45,9 +45,10 @@ #define UETH__VERSION "29-May-2008" /* Experiments show that both Linux and Windows hosts allow up to 16k - * frame sizes. Set the max size to 15k+52 to prevent allocating 32k + * frame sizes. Set the max MTU size to 15k+52 to prevent allocating 32k * blocks and still have efficient handling. */ -#define GETHER_MAX_ETH_FRAME_LEN 15412 +#define GETHER_MAX_MTU_SIZE 15412 +#define GETHER_MAX_ETH_FRAME_LEN (GETHER_MAX_MTU_SIZE + ETH_HLEN) struct eth_dev { /* lock is held while accessing port_usb @@ -79,6 +80,7 @@ struct eth_dev { bool zlp; bool no_skb_reserve; + bool ifname_set; u8 host_mac[ETH_ALEN]; u8 dev_mac[ETH_ALEN]; }; @@ -786,7 +788,7 @@ struct eth_dev *gether_setup_name(struct usb_gadget *g, /* MTU range: 14 - 15412 */ net->min_mtu = ETH_HLEN; - net->max_mtu = GETHER_MAX_ETH_FRAME_LEN; + net->max_mtu = GETHER_MAX_MTU_SIZE; dev->gadget = g; SET_NETDEV_DEV(net, &g->dev); @@ -848,7 +850,7 @@ struct net_device *gether_setup_name_default(const char *netname) /* MTU range: 14 - 15412 */ net->min_mtu = ETH_HLEN; - net->max_mtu = GETHER_MAX_ETH_FRAME_LEN; + net->max_mtu = GETHER_MAX_MTU_SIZE; return net; } @@ -1003,15 +1005,45 @@ EXPORT_SYMBOL_GPL(gether_get_qmult); int gether_get_ifname(struct net_device *net, char *name, int len) { + struct eth_dev *dev = netdev_priv(net); int ret; rtnl_lock(); - ret = scnprintf(name, len, "%s\n", netdev_name(net)); + ret = scnprintf(name, len, "%s\n", + dev->ifname_set ? net->name : netdev_name(net)); rtnl_unlock(); return ret; } EXPORT_SYMBOL_GPL(gether_get_ifname); +int gether_set_ifname(struct net_device *net, const char *name, int len) +{ + struct eth_dev *dev = netdev_priv(net); + char tmp[IFNAMSIZ]; + const char *p; + + if (name[len - 1] == '\n') + len--; + + if (len >= sizeof(tmp)) + return -E2BIG; + + strscpy(tmp, name, len + 1); + if (!dev_valid_name(tmp)) + return -EINVAL; + + /* Require exactly one %d, so binding will not fail with EEXIST. */ + p = strchr(name, '%'); + if (!p || p[1] != 'd' || strchr(p + 2, '%')) + return -EINVAL; + + strncpy(net->name, tmp, sizeof(net->name)); + dev->ifname_set = true; + + return 0; +} +EXPORT_SYMBOL_GPL(gether_set_ifname); + /* * gether_cleanup - remove Ethernet-over-USB device * Context: may sleep diff --git a/drivers/usb/gadget/function/u_ether.h b/drivers/usb/gadget/function/u_ether.h index 10dd640684e2..40144546d1b0 100644 --- a/drivers/usb/gadget/function/u_ether.h +++ b/drivers/usb/gadget/function/u_ether.h @@ -244,6 +244,18 @@ unsigned gether_get_qmult(struct net_device *net); */ int gether_get_ifname(struct net_device *net, char *name, int len); +/** + * gether_set_ifname - set an ethernet-over-usb link interface name + * @net: device representing this link + * @name: new interface name + * @len: length of @name + * + * This sets the interface name of this ethernet-over-usb link. + * A single terminating newline, if any, is ignored. + * Returns zero on success, else negative errno. + */ +int gether_set_ifname(struct net_device *net, const char *name, int len); + void gether_cleanup(struct eth_dev *dev); /* connect/disconnect is handled by individual functions */ diff --git a/drivers/usb/gadget/function/u_ether_configfs.h b/drivers/usb/gadget/function/u_ether_configfs.h index bd92b5703013..3dfb460908fa 100644 --- a/drivers/usb/gadget/function/u_ether_configfs.h +++ b/drivers/usb/gadget/function/u_ether_configfs.h @@ -148,7 +148,20 @@ out: \ return ret; \ } \ \ - CONFIGFS_ATTR_RO(_f_##_opts_, ifname) + static ssize_t _f_##_opts_ifname_store(struct config_item *item, \ + const char *page, size_t len)\ + { \ + struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \ + int ret = -EBUSY; \ + \ + mutex_lock(&opts->lock); \ + if (!opts->refcnt) \ + ret = gether_set_ifname(opts->net, page, len); \ + mutex_unlock(&opts->lock); \ + return ret ?: len; \ + } \ + \ + CONFIGFS_ATTR(_f_##_opts_, ifname) #define USB_ETHER_CONFIGFS_ITEM_ATTR_U8_RW(_f_, _n_) \ static ssize_t _f_##_opts_##_n_##_show(struct config_item *item,\ diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c index 2caccbb6e014..1e59204ec7aa 100644 --- a/drivers/usb/gadget/function/u_serial.c +++ b/drivers/usb/gadget/function/u_serial.c @@ -258,9 +258,7 @@ __acquires(&port->port_lock) list_del(&req->list); req->zero = kfifo_is_empty(&port->port_write_buf); - pr_vdebug("ttyGS%d: tx len=%d, 0x%02x 0x%02x 0x%02x ...\n", - port->port_num, len, *((u8 *)req->buf), - *((u8 *)req->buf+1), *((u8 *)req->buf+2)); + pr_vdebug("ttyGS%d: tx len=%d, %3ph ...\n", port->port_num, len, req->buf); /* Drop lock while we call out of driver; completions * could be issued while we do so. Disconnection may @@ -346,7 +344,7 @@ __acquires(&port->port_lock) } /* - * RX tasklet takes data out of the RX queue and hands it up to the TTY + * RX work takes data out of the RX queue and hands it up to the TTY * layer until it refuses to take any more data (or is throttled back). * Then it issues reads for any further data. * @@ -709,7 +707,7 @@ raced_with_open: /* Iff we're disconnected, there can be no I/O in flight so it's * ok to free the circular buffer; else just scrub it. And don't - * let the push tasklet fire again until we're re-opened. + * let the push async work fire again until we're re-opened. */ if (gser == NULL) kfifo_free(&port->port_write_buf); diff --git a/drivers/usb/gadget/legacy/Kconfig b/drivers/usb/gadget/legacy/Kconfig index f02c38b32a2b..11dd6e8adc8a 100644 --- a/drivers/usb/gadget/legacy/Kconfig +++ b/drivers/usb/gadget/legacy/Kconfig @@ -515,10 +515,15 @@ config USB_G_WEBCAM config USB_RAW_GADGET tristate "USB Raw Gadget" help - USB Raw Gadget is a kernel module that provides a userspace interface - for the USB Gadget subsystem. Essentially it allows to emulate USB - devices from userspace. See Documentation/usb/raw-gadget.rst for - details. + USB Raw Gadget is a gadget driver that gives userspace low-level + control over the gadget's communication process. + + Like any other gadget driver, Raw Gadget implements USB devices via + the USB gadget API. Unlike most gadget drivers, Raw Gadget does not + implement any concrete USB functions itself but requires userspace + to do that. + + See Documentation/usb/raw-gadget.rst for details. Say "y" to link the driver statically, or "m" to build a dynamically linked module called "raw_gadget". diff --git a/drivers/usb/gadget/legacy/acm_ms.c b/drivers/usb/gadget/legacy/acm_ms.c index 59be2d8417c9..e8033e5f0c18 100644 --- a/drivers/usb/gadget/legacy/acm_ms.c +++ b/drivers/usb/gadget/legacy/acm_ms.c @@ -200,8 +200,10 @@ static int acm_ms_bind(struct usb_composite_dev *cdev) struct usb_descriptor_header *usb_desc; usb_desc = usb_otg_descriptor_alloc(gadget); - if (!usb_desc) + if (!usb_desc) { + status = -ENOMEM; goto fail_string_ids; + } usb_otg_descriptor_init(gadget, usb_desc); otg_desc[0] = usb_desc; otg_desc[1] = NULL; diff --git a/drivers/usb/gadget/legacy/ether.c b/drivers/usb/gadget/legacy/ether.c index 30313b233680..99c7fc0d1d59 100644 --- a/drivers/usb/gadget/legacy/ether.c +++ b/drivers/usb/gadget/legacy/ether.c @@ -403,8 +403,10 @@ static int eth_bind(struct usb_composite_dev *cdev) struct usb_descriptor_header *usb_desc; usb_desc = usb_otg_descriptor_alloc(gadget); - if (!usb_desc) + if (!usb_desc) { + status = -ENOMEM; goto fail1; + } usb_otg_descriptor_init(gadget, usb_desc); otg_desc[0] = usb_desc; otg_desc[1] = NULL; diff --git a/drivers/usb/gadget/legacy/raw_gadget.c b/drivers/usb/gadget/legacy/raw_gadget.c index 062dfac30399..c5a2c734234a 100644 --- a/drivers/usb/gadget/legacy/raw_gadget.c +++ b/drivers/usb/gadget/legacy/raw_gadget.c @@ -3,7 +3,8 @@ * USB Raw Gadget driver. * See Documentation/usb/raw-gadget.rst for more details. * - * Andrey Konovalov <andreyknvl@gmail.com> + * Copyright (c) 2020 Google, Inc. + * Author: Andrey Konovalov <andreyknvl@gmail.com> */ #include <linux/compiler.h> diff --git a/drivers/usb/gadget/udc/Kconfig b/drivers/usb/gadget/udc/Kconfig index 1a12aab208b4..8c614bb86c66 100644 --- a/drivers/usb/gadget/udc/Kconfig +++ b/drivers/usb/gadget/udc/Kconfig @@ -90,7 +90,7 @@ config USB_BCM63XX_UDC config USB_FSL_USB2 tristate "Freescale Highspeed USB DR Peripheral Controller" - depends on FSL_SOC || ARCH_MXC + depends on FSL_SOC help Some of Freescale PowerPC and i.MX processors have a High Speed Dual-Role(DR) USB controller, which supports device mode. diff --git a/drivers/usb/gadget/udc/Makefile b/drivers/usb/gadget/udc/Makefile index f5a7ce28aecd..a21f2224e7eb 100644 --- a/drivers/usb/gadget/udc/Makefile +++ b/drivers/usb/gadget/udc/Makefile @@ -23,7 +23,6 @@ obj-$(CONFIG_USB_ATMEL_USBA) += atmel_usba_udc.o obj-$(CONFIG_USB_BCM63XX_UDC) += bcm63xx_udc.o obj-$(CONFIG_USB_FSL_USB2) += fsl_usb2_udc.o fsl_usb2_udc-y := fsl_udc_core.o -fsl_usb2_udc-$(CONFIG_ARCH_MXC) += fsl_mxc_udc.o obj-$(CONFIG_USB_TEGRA_XUDC) += tegra-xudc.o obj-$(CONFIG_USB_M66592) += m66592-udc.o obj-$(CONFIG_USB_R8A66597) += r8a66597-udc.o diff --git a/drivers/usb/gadget/udc/aspeed-vhub/epn.c b/drivers/usb/gadget/udc/aspeed-vhub/epn.c index 0bd6b20435b8..02d8bfae58fb 100644 --- a/drivers/usb/gadget/udc/aspeed-vhub/epn.c +++ b/drivers/usb/gadget/udc/aspeed-vhub/epn.c @@ -420,7 +420,10 @@ static void ast_vhub_stop_active_req(struct ast_vhub_ep *ep, u32 state, reg, loops; /* Stop DMA activity */ - writel(0, ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT); + if (ep->epn.desc_mode) + writel(VHUB_EP_DMA_CTRL_RESET, ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT); + else + writel(0, ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT); /* Wait for it to complete */ for (loops = 0; loops < 1000; loops++) { diff --git a/drivers/usb/gadget/udc/aspeed-vhub/hub.c b/drivers/usb/gadget/udc/aspeed-vhub/hub.c index 6497185ec4e7..bfd8e77788e2 100644 --- a/drivers/usb/gadget/udc/aspeed-vhub/hub.c +++ b/drivers/usb/gadget/udc/aspeed-vhub/hub.c @@ -999,8 +999,10 @@ static int ast_vhub_of_parse_str_desc(struct ast_vhub *vhub, str_array[offset].s = NULL; ret = ast_vhub_str_alloc_add(vhub, &lang_str); - if (ret) + if (ret) { + of_node_put(child); break; + } } return ret; diff --git a/drivers/usb/gadget/udc/bdc/Kconfig b/drivers/usb/gadget/udc/bdc/Kconfig index 3e88c7670b2e..8bedb7f64eba 100644 --- a/drivers/usb/gadget/udc/bdc/Kconfig +++ b/drivers/usb/gadget/udc/bdc/Kconfig @@ -11,14 +11,3 @@ config USB_BDC_UDC Say "y" here to link the driver statically, or "m" to build a dynamically linked module called "bdc". - -if USB_BDC_UDC - -comment "Platform Support" -config USB_BDC_PCI - tristate "BDC support for PCIe based platforms" - depends on USB_PCI - default USB_BDC_UDC - help - Enable support for platforms which have BDC connected through PCIe, such as Lego3 FPGA platform. -endif diff --git a/drivers/usb/gadget/udc/bdc/Makefile b/drivers/usb/gadget/udc/bdc/Makefile index 52cb5ea48bbe..1b21c9518efc 100644 --- a/drivers/usb/gadget/udc/bdc/Makefile +++ b/drivers/usb/gadget/udc/bdc/Makefile @@ -5,5 +5,3 @@ bdc-y := bdc_core.o bdc_cmd.o bdc_ep.o bdc_udc.o ifneq ($(CONFIG_USB_GADGET_VERBOSE),) bdc-y += bdc_dbg.o endif - -obj-$(CONFIG_USB_BDC_PCI) += bdc_pci.o diff --git a/drivers/usb/gadget/udc/bdc/bdc.h b/drivers/usb/gadget/udc/bdc/bdc.h index ac75e25c3b6a..8d00b1239f21 100644 --- a/drivers/usb/gadget/udc/bdc/bdc.h +++ b/drivers/usb/gadget/udc/bdc/bdc.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0+ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * bdc.h - header for the BRCM BDC USB3.0 device controller * @@ -35,7 +35,7 @@ /* * Maximum size of ep0 response buffer for ch9 requests, * the set_sel request uses 6 so far, the max. -*/ + */ #define EP0_RESPONSE_BUFF 6 /* Start with SS as default */ #define EP0_MAX_PKT_SIZE 512 @@ -86,23 +86,23 @@ #define BDC_EPSTS5 0x74 #define BDC_EPSTS6 0x78 #define BDC_EPSTS7 0x7c -#define BDC_SRRBAL(n) (0x200 + (n * 0x10)) -#define BDC_SRRBAH(n) (0x204 + (n * 0x10)) -#define BDC_SRRINT(n) (0x208 + (n * 0x10)) -#define BDC_INTCTLS(n) (0x20c + (n * 0x10)) +#define BDC_SRRBAL(n) (0x200 + ((n) * 0x10)) +#define BDC_SRRBAH(n) (0x204 + ((n) * 0x10)) +#define BDC_SRRINT(n) (0x208 + ((n) * 0x10)) +#define BDC_INTCTLS(n) (0x20c + ((n) * 0x10)) /* Extended capability regs */ #define BDC_FSCNOC 0xcd4 #define BDC_FSCNIC 0xce4 -#define NUM_NCS(p) (p >> 28) +#define NUM_NCS(p) ((p) >> 28) /* Register bit fields and Masks */ /* BDC Configuration 0 */ #define BDC_PGS(p) (((p) & (0x7 << 8)) >> 8) -#define BDC_SPB(p) (p & 0x7) +#define BDC_SPB(p) ((p) & 0x7) /* BDC Capability1 */ -#define BDC_P64 (1 << 0) +#define BDC_P64 BIT(0) /* BDC Command register */ #define BDC_CMD_FH 0xe @@ -111,9 +111,9 @@ #define BDC_CMD_BLA 0x3 #define BDC_CMD_EPC 0x2 #define BDC_CMD_DVC 0x1 -#define BDC_CMD_CWS (0x1 << 5) +#define BDC_CMD_CWS BIT(5) #define BDC_CMD_CST(p) (((p) & (0xf << 6))>>6) -#define BDC_CMD_EPN(p) ((p & 0x1f) << 10) +#define BDC_CMD_EPN(p) (((p) & 0x1f) << 10) #define BDC_SUB_CMD_ADD (0x1 << 17) #define BDC_SUB_CMD_FWK (0x4 << 17) /* Reset sequence number */ @@ -124,7 +124,7 @@ #define BDC_SUB_CMD_EP_STP (0x2 << 17) #define BDC_SUB_CMD_EP_STL (0x4 << 17) #define BDC_SUB_CMD_EP_RST (0x1 << 17) -#define BDC_CMD_SRD (1 << 27) +#define BDC_CMD_SRD BIT(27) /* CMD completion status */ #define BDC_CMDS_SUCC 0x1 @@ -141,19 +141,19 @@ #define EPM_SHIFT 4 /* BDC USPSC */ -#define BDC_VBC (1 << 31) -#define BDC_PRC (1 << 30) -#define BDC_PCE (1 << 29) -#define BDC_CFC (1 << 28) -#define BDC_PCC (1 << 27) -#define BDC_PSC (1 << 26) -#define BDC_VBS (1 << 25) -#define BDC_PRS (1 << 24) -#define BDC_PCS (1 << 23) +#define BDC_VBC BIT(31) +#define BDC_PRC BIT(30) +#define BDC_PCE BIT(29) +#define BDC_CFC BIT(28) +#define BDC_PCC BIT(27) +#define BDC_PSC BIT(26) +#define BDC_VBS BIT(25) +#define BDC_PRS BIT(24) +#define BDC_PCS BIT(23) #define BDC_PSP(p) (((p) & (0x7 << 20))>>20) -#define BDC_SCN (1 << 8) -#define BDC_SDC (1 << 7) -#define BDC_SWS (1 << 4) +#define BDC_SCN BIT(8) +#define BDC_SDC BIT(7) +#define BDC_SWS BIT(4) #define BDC_USPSC_RW (BDC_SCN|BDC_SDC|BDC_SWS|0xf) #define BDC_PSP(p) (((p) & (0x7 << 20))>>20) @@ -163,21 +163,21 @@ #define BDC_SPEED_HS 0x3 #define BDC_SPEED_SS 0x4 -#define BDC_PST(p) (p & 0xf) +#define BDC_PST(p) ((p) & 0xf) #define BDC_PST_MASK 0xf /* USPPMS */ -#define BDC_U2E (0x1 << 31) -#define BDC_U1E (0x1 << 30) -#define BDC_U2A (0x1 << 29) -#define BDC_PORT_W1S (0x1 << 17) +#define BDC_U2E BIT(31) +#define BDC_U1E BIT(30) +#define BDC_U2A BIT(29) +#define BDC_PORT_W1S BIT(17) #define BDC_U1T(p) ((p) & 0xff) #define BDC_U2T(p) (((p) & 0xff) << 8) #define BDC_U1T_MASK 0xff /* USBPM2 */ /* Hardware LPM Enable */ -#define BDC_HLE (1 << 16) +#define BDC_HLE BIT(16) /* BDC Status and Control */ #define BDC_COP_RST (1 << 29) @@ -186,11 +186,11 @@ #define BDC_COP_MASK (BDC_COP_RST|BDC_COP_RUN|BDC_COP_STP) -#define BDC_COS (1 << 28) +#define BDC_COS BIT(28) #define BDC_CSTS(p) (((p) & (0x7 << 20)) >> 20) -#define BDC_MASK_MCW (1 << 7) -#define BDC_GIE (1 << 1) -#define BDC_GIP (1 << 0) +#define BDC_MASK_MCW BIT(7) +#define BDC_GIE BIT(1) +#define BDC_GIP BIT(0) #define BDC_HLT 1 #define BDC_NOR 2 @@ -201,19 +201,19 @@ #define BD_CHAIN 0xf #define BD_TFS_SHIFT 4 -#define BD_SOT (1 << 26) -#define BD_EOT (1 << 27) -#define BD_ISP (1 << 29) -#define BD_IOC (1 << 30) -#define BD_SBF (1 << 31) +#define BD_SOT BIT(26) +#define BD_EOT BIT(27) +#define BD_ISP BIT(29) +#define BD_IOC BIT(30) +#define BD_SBF BIT(31) #define BD_INTR_TARGET(p) (((p) & 0x1f) << 27) -#define BDC_SRR_RWS (1 << 4) -#define BDC_SRR_RST (1 << 3) -#define BDC_SRR_ISR (1 << 2) -#define BDC_SRR_IE (1 << 1) -#define BDC_SRR_IP (1 << 0) +#define BDC_SRR_RWS BIT(4) +#define BDC_SRR_RST BIT(3) +#define BDC_SRR_ISR BIT(2) +#define BDC_SRR_IE BIT(1) +#define BDC_SRR_IP BIT(0) #define BDC_SRR_EPI(p) (((p) & (0xff << 24)) >> 24) #define BDC_SRR_DPI(p) (((p) & (0xff << 16)) >> 16) #define BDC_SRR_DPI_MASK 0x00ff0000 @@ -221,14 +221,14 @@ #define MARK_CHAIN_BD (BD_CHAIN|BD_EOT|BD_SOT) /* Control transfer BD specific fields */ -#define BD_DIR_IN (1 << 25) +#define BD_DIR_IN BIT(25) #define BDC_PTC_MASK 0xf0000000 /* status report defines */ #define SR_XSF 0 #define SR_USPC 4 -#define SR_BD_LEN(p) (p & 0xffffff) +#define SR_BD_LEN(p) ((p) & 0xffffff) #define XSF_SUCC 0x1 #define XSF_SHORT 0x3 @@ -241,13 +241,13 @@ /* Transfer BD fields */ #define BD_LEN(p) ((p) & 0x1ffff) -#define BD_LTF (1 << 25) +#define BD_LTF BIT(25) #define BD_TYPE_DS 0x1 #define BD_TYPE_SS 0x2 -#define BDC_EP_ENABLED (1 << 0) -#define BDC_EP_STALL (1 << 1) -#define BDC_EP_STOP (1 << 2) +#define BDC_EP_ENABLED BIT(0) +#define BDC_EP_STALL BIT(1) +#define BDC_EP_STOP BIT(2) /* One BD can transfer max 65536 bytes */ #define BD_MAX_BUFF_SIZE (1 << 16) @@ -266,9 +266,9 @@ /* FUNCTION WAKE DEV NOTIFICATION interval, USB3 spec table 8.13 */ #define BDC_TNOTIFY 2500 /*in ms*/ /* Devstatus bitfields */ -#define REMOTE_WAKEUP_ISSUED (1 << 16) -#define DEVICE_SUSPENDED (1 << 17) -#define FUNC_WAKE_ISSUED (1 << 18) +#define REMOTE_WAKEUP_ISSUED BIT(16) +#define DEVICE_SUSPENDED BIT(17) +#define FUNC_WAKE_ISSUED BIT(18) #define REMOTE_WAKE_ENABLE (1 << USB_DEVICE_REMOTE_WAKEUP) /* On disconnect, preserve these bits and clear rest */ @@ -466,24 +466,24 @@ static inline void bdc_writel(void __iomem *base, u32 offset, u32 value) } /* Buffer descriptor list operations */ -void bdc_notify_xfr(struct bdc *, u32); -void bdc_softconn(struct bdc *); -void bdc_softdisconn(struct bdc *); -int bdc_run(struct bdc *); -int bdc_stop(struct bdc *); -int bdc_reset(struct bdc *); -int bdc_udc_init(struct bdc *); -void bdc_udc_exit(struct bdc *); -int bdc_reinit(struct bdc *); +void bdc_notify_xfr(struct bdc *bdc, u32 epnum); +void bdc_softconn(struct bdc *bdc); +void bdc_softdisconn(struct bdc *bdc); +int bdc_run(struct bdc *bdc); +int bdc_stop(struct bdc *bdc); +int bdc_reset(struct bdc *bdc); +int bdc_udc_init(struct bdc *bdc); +void bdc_udc_exit(struct bdc *bdc); +int bdc_reinit(struct bdc *bdc); /* Status report handlers */ /* Upstream port status change sr */ -void bdc_sr_uspc(struct bdc *, struct bdc_sr *); +void bdc_sr_uspc(struct bdc *bdc, struct bdc_sr *sreport); /* transfer sr */ -void bdc_sr_xsf(struct bdc *, struct bdc_sr *); +void bdc_sr_xsf(struct bdc *bdc, struct bdc_sr *sreport); /* EP0 XSF handlers */ -void bdc_xsf_ep0_setup_recv(struct bdc *, struct bdc_sr *); -void bdc_xsf_ep0_data_start(struct bdc *, struct bdc_sr *); -void bdc_xsf_ep0_status_start(struct bdc *, struct bdc_sr *); +void bdc_xsf_ep0_setup_recv(struct bdc *bdc, struct bdc_sr *sreport); +void bdc_xsf_ep0_data_start(struct bdc *bdc, struct bdc_sr *sreport); +void bdc_xsf_ep0_status_start(struct bdc *bdc, struct bdc_sr *sreport); #endif /* __LINUX_BDC_H__ */ diff --git a/drivers/usb/gadget/udc/bdc/bdc_cmd.c b/drivers/usb/gadget/udc/bdc/bdc_cmd.c index 44c2a5eef785..995f79c79f96 100644 --- a/drivers/usb/gadget/udc/bdc/bdc_cmd.c +++ b/drivers/usb/gadget/udc/bdc/bdc_cmd.c @@ -163,7 +163,7 @@ int bdc_config_ep(struct bdc *bdc, struct bdc_ep *ep) usb_endpoint_xfer_isoc(desc)) { param2 |= si; if (usb_endpoint_xfer_isoc(desc) && comp_desc) - mul = comp_desc->bmAttributes; + mul = comp_desc->bmAttributes; } param2 |= mul << EPM_SHIFT; diff --git a/drivers/usb/gadget/udc/bdc/bdc_cmd.h b/drivers/usb/gadget/udc/bdc/bdc_cmd.h index 29cc988a671a..533ad52fa6d0 100644 --- a/drivers/usb/gadget/udc/bdc/bdc_cmd.h +++ b/drivers/usb/gadget/udc/bdc/bdc_cmd.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0+ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * bdc_cmd.h - header for the BDC debug functions * @@ -10,15 +10,14 @@ #define __LINUX_BDC_CMD_H__ /* Command operations */ -int bdc_address_device(struct bdc *, u32); -int bdc_config_ep(struct bdc *, struct bdc_ep *); -int bdc_dconfig_ep(struct bdc *, struct bdc_ep *); -int bdc_stop_ep(struct bdc *, int); -int bdc_ep_set_stall(struct bdc *, int); -int bdc_ep_clear_stall(struct bdc *, int); -int bdc_ep_set_halt(struct bdc_ep *, u32 , int); -int bdc_ep_bla(struct bdc *, struct bdc_ep *, dma_addr_t); -int bdc_function_wake(struct bdc*, u8); -int bdc_function_wake_fh(struct bdc*, u8); +int bdc_address_device(struct bdc *bdc, u32 add); +int bdc_config_ep(struct bdc *bdc, struct bdc_ep *ep); +int bdc_dconfig_ep(struct bdc *bdc, struct bdc_ep *ep); +int bdc_stop_ep(struct bdc *bdc, int epnum); +int bdc_ep_set_stall(struct bdc *bdc, int epnum); +int bdc_ep_clear_stall(struct bdc *bdc, int epnum); +int bdc_ep_bla(struct bdc *bdc, struct bdc_ep *ep, dma_addr_t dma_addr); +int bdc_function_wake(struct bdc *bdc, u8 intf); +int bdc_function_wake_fh(struct bdc *bdc, u8 intf); #endif /* __LINUX_BDC_CMD_H__ */ diff --git a/drivers/usb/gadget/udc/bdc/bdc_dbg.c b/drivers/usb/gadget/udc/bdc/bdc_dbg.c index 7ba7448ad743..9c03e13308ca 100644 --- a/drivers/usb/gadget/udc/bdc/bdc_dbg.c +++ b/drivers/usb/gadget/udc/bdc/bdc_dbg.c @@ -68,7 +68,7 @@ void bdc_dbg_srr(struct bdc *bdc, u32 srr_num) sr = bdc->srr.sr_bds; addr = bdc->srr.dma_addr; - dev_vdbg(bdc->dev, "bdc_dbg_srr sr:%p dqp_index:%d\n", + dev_vdbg(bdc->dev, "%s sr:%p dqp_index:%d\n", __func__, sr, bdc->srr.dqp_index); for (i = 0; i < NUM_SR_ENTRIES; i++) { sr = &bdc->srr.sr_bds[i]; diff --git a/drivers/usb/gadget/udc/bdc/bdc_dbg.h b/drivers/usb/gadget/udc/bdc/bdc_dbg.h index 373d5abffbb8..acd8332f8584 100644 --- a/drivers/usb/gadget/udc/bdc/bdc_dbg.h +++ b/drivers/usb/gadget/udc/bdc/bdc_dbg.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0+ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * bdc_dbg.h - header for the BDC debug functions * @@ -12,10 +12,10 @@ #include "bdc.h" #ifdef CONFIG_USB_GADGET_VERBOSE -void bdc_dbg_bd_list(struct bdc *, struct bdc_ep*); -void bdc_dbg_srr(struct bdc *, u32); -void bdc_dbg_regs(struct bdc *); -void bdc_dump_epsts(struct bdc *); +void bdc_dbg_bd_list(struct bdc *bdc, struct bdc_ep *ep); +void bdc_dbg_srr(struct bdc *bdc, u32 srr_num); +void bdc_dbg_regs(struct bdc *bdc); +void bdc_dump_epsts(struct bdc *bdc); #else static inline void bdc_dbg_regs(struct bdc *bdc) { } diff --git a/drivers/usb/gadget/udc/bdc/bdc_ep.c b/drivers/usb/gadget/udc/bdc/bdc_ep.c index fafdc9fdb4a5..8e2f20b12519 100644 --- a/drivers/usb/gadget/udc/bdc/bdc_ep.c +++ b/drivers/usb/gadget/udc/bdc/bdc_ep.c @@ -68,7 +68,7 @@ static void ep_bd_list_free(struct bdc_ep *ep, u32 num_tabs) * check if the bd_table struct is allocated ? * if yes, then check if bd memory has been allocated, then * free the dma_pool and also the bd_table struct memory - */ + */ bd_table = bd_list->bd_table_array[index]; dev_dbg(bdc->dev, "bd_table:%p index:%d\n", bd_table, index); if (!bd_table) { @@ -147,7 +147,7 @@ static int ep_bd_list_alloc(struct bdc_ep *ep) /* Allocate memory for each table */ for (index = 0; index < num_tabs; index++) { /* Allocate memory for bd_table structure */ - bd_table = kzalloc(sizeof(struct bd_table), GFP_ATOMIC); + bd_table = kzalloc(sizeof(*bd_table), GFP_ATOMIC); if (!bd_table) goto fail; @@ -275,7 +275,7 @@ static inline int find_end_bdi(struct bdc_ep *ep, int next_hwd_bdi) end_bdi = next_hwd_bdi - 1; if (end_bdi < 0) end_bdi = ep->bd_list.max_bdi - 1; - else if ((end_bdi % (ep->bd_list.num_bds_table-1)) == 0) + else if ((end_bdi % (ep->bd_list.num_bds_table-1)) == 0) end_bdi--; return end_bdi; @@ -756,7 +756,7 @@ static int ep_dequeue(struct bdc_ep *ep, struct bdc_req *req) dev_dbg(bdc->dev, "%s ep:%s start:%d end:%d\n", __func__, ep->name, start_bdi, end_bdi); - dev_dbg(bdc->dev, "ep_dequeue ep=%p ep->desc=%p\n", + dev_dbg(bdc->dev, "%s ep=%p ep->desc=%p\n", __func__, ep, (void *)ep->usb_ep.desc); /* if still connected, stop the ep to see where the HW is ? */ if (!(bdc_readl(bdc->regs, BDC_USPC) & BDC_PST_MASK)) { @@ -795,7 +795,7 @@ static int ep_dequeue(struct bdc_ep *ep, struct bdc_req *req) start_pending = true; end_pending = true; } else if (end_bdi >= curr_hw_dqpi || end_bdi <= eqp_bdi) { - end_pending = true; + end_pending = true; } } else { if (start_bdi >= curr_hw_dqpi) { @@ -1405,7 +1405,7 @@ static int ep0_set_sel(struct bdc *bdc, } /* - * Queue a 0 byte bd only if wLength is more than the length and and length is + * Queue a 0 byte bd only if wLength is more than the length and length is * a multiple of MaxPacket then queue 0 byte BD */ static int ep0_queue_zlp(struct bdc *bdc) @@ -1858,12 +1858,12 @@ static int bdc_gadget_ep_enable(struct usb_ep *_ep, int ret; if (!_ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) { - pr_debug("bdc_gadget_ep_enable invalid parameters\n"); + pr_debug("%s invalid parameters\n", __func__); return -EINVAL; } if (!desc->wMaxPacketSize) { - pr_debug("bdc_gadget_ep_enable missing wMaxPacketSize\n"); + pr_debug("%s missing wMaxPacketSize\n", __func__); return -EINVAL; } diff --git a/drivers/usb/gadget/udc/bdc/bdc_ep.h b/drivers/usb/gadget/udc/bdc/bdc_ep.h index a37ff8033b4f..4d3affd07a65 100644 --- a/drivers/usb/gadget/udc/bdc/bdc_ep.h +++ b/drivers/usb/gadget/udc/bdc/bdc_ep.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0+ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * bdc_ep.h - header for the BDC debug functions * @@ -9,9 +9,9 @@ #ifndef __LINUX_BDC_EP_H__ #define __LINUX_BDC_EP_H__ -int bdc_init_ep(struct bdc *); -int bdc_ep_disable(struct bdc_ep *); -int bdc_ep_enable(struct bdc_ep *); -void bdc_free_ep(struct bdc *); +int bdc_init_ep(struct bdc *bdc); +int bdc_ep_disable(struct bdc_ep *ep); +int bdc_ep_enable(struct bdc_ep *ep); +void bdc_free_ep(struct bdc *bdc); #endif /* __LINUX_BDC_EP_H__ */ diff --git a/drivers/usb/gadget/udc/bdc/bdc_pci.c b/drivers/usb/gadget/udc/bdc/bdc_pci.c deleted file mode 100644 index 6dbc489513cd..000000000000 --- a/drivers/usb/gadget/udc/bdc/bdc_pci.c +++ /dev/null @@ -1,128 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0+ -/* - * bdc_pci.c - BRCM BDC USB3.0 device controller PCI interface file. - * - * Copyright (C) 2014 Broadcom Corporation - * - * Author: Ashwini Pahuja - * - * Based on drivers under drivers/usb/ - */ - -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/slab.h> -#include <linux/pci.h> -#include <linux/pci_ids.h> -#include <linux/platform_device.h> - -#include "bdc.h" - -#define BDC_PCI_PID 0x1570 - -struct bdc_pci { - struct device *dev; - struct platform_device *bdc; -}; - -static int bdc_setup_msi(struct pci_dev *pci) -{ - int ret; - - ret = pci_enable_msi(pci); - if (ret) { - pr_err("failed to allocate MSI entry\n"); - return ret; - } - - return ret; -} - -static int bdc_pci_probe(struct pci_dev *pci, const struct pci_device_id *id) -{ - struct resource res[2]; - struct platform_device *bdc; - struct bdc_pci *glue; - int ret = -ENOMEM; - - glue = devm_kzalloc(&pci->dev, sizeof(*glue), GFP_KERNEL); - if (!glue) - return -ENOMEM; - - glue->dev = &pci->dev; - ret = pci_enable_device(pci); - if (ret) { - dev_err(&pci->dev, "failed to enable pci device\n"); - return -ENODEV; - } - pci_set_master(pci); - - bdc = platform_device_alloc(BRCM_BDC_NAME, PLATFORM_DEVID_AUTO); - if (!bdc) - return -ENOMEM; - - memset(res, 0x00, sizeof(struct resource) * ARRAY_SIZE(res)); - bdc_setup_msi(pci); - - res[0].start = pci_resource_start(pci, 0); - res[0].end = pci_resource_end(pci, 0); - res[0].name = BRCM_BDC_NAME; - res[0].flags = IORESOURCE_MEM; - - res[1].start = pci->irq; - res[1].name = BRCM_BDC_NAME; - res[1].flags = IORESOURCE_IRQ; - - ret = platform_device_add_resources(bdc, res, ARRAY_SIZE(res)); - if (ret) { - dev_err(&pci->dev, - "couldn't add resources to bdc device\n"); - platform_device_put(bdc); - return ret; - } - - pci_set_drvdata(pci, glue); - - dma_set_coherent_mask(&bdc->dev, pci->dev.coherent_dma_mask); - - bdc->dev.dma_mask = pci->dev.dma_mask; - bdc->dev.dma_parms = pci->dev.dma_parms; - bdc->dev.parent = &pci->dev; - glue->bdc = bdc; - - ret = platform_device_add(bdc); - if (ret) { - dev_err(&pci->dev, "failed to register bdc device\n"); - platform_device_put(bdc); - return ret; - } - - return 0; -} - -static void bdc_pci_remove(struct pci_dev *pci) -{ - struct bdc_pci *glue = pci_get_drvdata(pci); - - platform_device_unregister(glue->bdc); - pci_disable_msi(pci); -} - -static struct pci_device_id bdc_pci_id_table[] = { - { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BDC_PCI_PID), }, - {} /* Terminating Entry */ -}; - -MODULE_DEVICE_TABLE(pci, bdc_pci_id_table); - -static struct pci_driver bdc_pci_driver = { - .name = "bdc-pci", - .id_table = bdc_pci_id_table, - .probe = bdc_pci_probe, - .remove = bdc_pci_remove, -}; - -MODULE_AUTHOR("Ashwini Pahuja <ashwini.linux@gmail.com>"); -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("BRCM BDC USB3 PCI Glue layer"); -module_pci_driver(bdc_pci_driver); diff --git a/drivers/usb/gadget/udc/bdc/bdc_udc.c b/drivers/usb/gadget/udc/bdc/bdc_udc.c index 248426a3e88a..5ac0ef88334e 100644 --- a/drivers/usb/gadget/udc/bdc/bdc_udc.c +++ b/drivers/usb/gadget/udc/bdc/bdc_udc.c @@ -164,7 +164,7 @@ static void bdc_func_wake_timer(struct work_struct *work) /* * Check if host has started transferring on endpoints * FUNC_WAKE_ISSUED is cleared when transfer has started after resume - */ + */ if (bdc->devstatus & FUNC_WAKE_ISSUED) { dev_dbg(bdc->dev, "FUNC_WAKE_ISSUED FLAG IS STILL SET\n"); /* flag is still set, so again send func wake */ @@ -205,7 +205,7 @@ static void handle_link_state_change(struct bdc *bdc, u32 uspc) * if not then send function wake again every * TNotification secs until host initiates * transfer to BDC, USB3 spec Table 8.13 - */ + */ schedule_delayed_work( &bdc->func_wake_notify, msecs_to_jiffies(BDC_TNOTIFY)); @@ -379,7 +379,7 @@ static int bdc_udc_start(struct usb_gadget *gadget, * Run the controller from here and when BDC is connected to * Host then driver will receive a USPC SR with VBUS present * and then driver will do a softconnect. - */ + */ ret = bdc_run(bdc); if (ret) { dev_err(bdc->dev, "%s bdc run fail\n", __func__); @@ -530,7 +530,7 @@ int bdc_udc_init(struct bdc *bdc) bdc->gadget.name = BRCM_BDC_NAME; ret = devm_request_irq(bdc->dev, bdc->irq, bdc_udc_interrupt, - IRQF_SHARED , BRCM_BDC_NAME, bdc); + IRQF_SHARED, BRCM_BDC_NAME, bdc); if (ret) { dev_err(bdc->dev, "failed to request irq #%d %d\n", diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c index 5b5cfeb6c14a..493ff93f7dda 100644 --- a/drivers/usb/gadget/udc/core.c +++ b/drivers/usb/gadget/udc/core.c @@ -29,6 +29,7 @@ * @list: for use by the udc class driver * @vbus: for udcs who care about vbus status, this value is real vbus status; * for udcs who do not care about vbus status, this value is always true + * @started: the UDC's started state. True if the UDC had started. * * This represents the internal data structure which is used by the UDC-class * to hold information about udc driver and gadget together. @@ -39,6 +40,7 @@ struct usb_udc { struct device dev; struct list_head list; bool vbus; + bool started; }; static struct class *udc_class; @@ -659,8 +661,7 @@ EXPORT_SYMBOL_GPL(usb_gadget_vbus_disconnect); * * Enables the D+ (or potentially D-) pullup. The host will start * enumerating this gadget when the pullup is active and a VBUS session - * is active (the link is powered). This pullup is always enabled unless - * usb_gadget_disconnect() has been used to disable it. + * is active (the link is powered). * * Returns zero on success, else negative errno. */ @@ -1083,7 +1084,18 @@ EXPORT_SYMBOL_GPL(usb_gadget_udc_reset); */ static inline int usb_gadget_udc_start(struct usb_udc *udc) { - return udc->gadget->ops->udc_start(udc->gadget, udc->driver); + int ret; + + if (udc->started) { + dev_err(&udc->dev, "UDC had already started\n"); + return -EBUSY; + } + + ret = udc->gadget->ops->udc_start(udc->gadget, udc->driver); + if (!ret) + udc->started = true; + + return ret; } /** @@ -1099,7 +1111,13 @@ static inline int usb_gadget_udc_start(struct usb_udc *udc) */ static inline void usb_gadget_udc_stop(struct usb_udc *udc) { + if (!udc->started) { + dev_err(&udc->dev, "UDC had already stopped\n"); + return; + } + udc->gadget->ops->udc_stop(udc->gadget); + udc->started = false; } /** @@ -1115,12 +1133,18 @@ static inline void usb_gadget_udc_stop(struct usb_udc *udc) static inline void usb_gadget_udc_set_speed(struct usb_udc *udc, enum usb_device_speed speed) { - if (udc->gadget->ops->udc_set_speed) { - enum usb_device_speed s; + struct usb_gadget *gadget = udc->gadget; + enum usb_device_speed s; - s = min(speed, udc->gadget->max_speed); - udc->gadget->ops->udc_set_speed(udc->gadget, s); - } + if (speed == USB_SPEED_UNKNOWN) + s = gadget->max_speed; + else + s = min(speed, gadget->max_speed); + + if (s == USB_SPEED_SUPER_PLUS && gadget->ops->udc_set_ssp_rate) + gadget->ops->udc_set_ssp_rate(gadget, gadget->max_ssp_rate); + else if (gadget->ops->udc_set_speed) + gadget->ops->udc_set_speed(gadget, s); } /** @@ -1223,6 +1247,8 @@ int usb_add_gadget(struct usb_gadget *gadget) udc->gadget = gadget; gadget->udc = udc; + udc->started = false; + mutex_lock(&udc_lock); list_add_tail(&udc->list, &udc_list); @@ -1530,10 +1556,13 @@ static ssize_t soft_connect_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t n) { struct usb_udc *udc = container_of(dev, struct usb_udc, dev); + ssize_t ret; + mutex_lock(&udc_lock); if (!udc->driver) { dev_err(dev, "soft-connect without a gadget driver\n"); - return -EOPNOTSUPP; + ret = -EOPNOTSUPP; + goto out; } if (sysfs_streq(buf, "connect")) { @@ -1544,10 +1573,14 @@ static ssize_t soft_connect_store(struct device *dev, usb_gadget_udc_stop(udc); } else { dev_err(dev, "unsupported command '%s'\n", buf); - return -EINVAL; + ret = -EINVAL; + goto out; } - return n; + ret = n; +out: + mutex_unlock(&udc_lock); + return ret; } static DEVICE_ATTR_WO(soft_connect); diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c index ab5e978b5052..57067763b100 100644 --- a/drivers/usb/gadget/udc/dummy_hcd.c +++ b/drivers/usb/gadget/udc/dummy_hcd.c @@ -2118,9 +2118,21 @@ static int dummy_hub_control( dum_hcd->port_status &= ~USB_PORT_STAT_POWER; set_link_state(dum_hcd); break; - default: + case USB_PORT_FEAT_ENABLE: + case USB_PORT_FEAT_C_ENABLE: + case USB_PORT_FEAT_C_SUSPEND: + /* Not allowed for USB-3 */ + if (hcd->speed == HCD_USB3) + goto error; + fallthrough; + case USB_PORT_FEAT_C_CONNECTION: + case USB_PORT_FEAT_C_RESET: dum_hcd->port_status &= ~(1 << wValue); set_link_state(dum_hcd); + break; + default: + /* Disallow INDICATOR and C_OVER_CURRENT */ + goto error; } break; case GetHubDescriptor: @@ -2258,17 +2270,20 @@ static int dummy_hub_control( } fallthrough; case USB_PORT_FEAT_RESET: + if (!(dum_hcd->port_status & USB_PORT_STAT_CONNECTION)) + break; /* if it's already enabled, disable */ if (hcd->speed == HCD_USB3) { - dum_hcd->port_status = 0; dum_hcd->port_status = (USB_SS_PORT_STAT_POWER | USB_PORT_STAT_CONNECTION | USB_PORT_STAT_RESET); - } else + } else { dum_hcd->port_status &= ~(USB_PORT_STAT_ENABLE | USB_PORT_STAT_LOW_SPEED | USB_PORT_STAT_HIGH_SPEED); + dum_hcd->port_status |= USB_PORT_STAT_RESET; + } /* * We want to reset device status. All but the * Self powered feature @@ -2280,19 +2295,19 @@ static int dummy_hub_control( * interval? Is it still 50msec as for HS? */ dum_hcd->re_timeout = jiffies + msecs_to_jiffies(50); - fallthrough; - default: - if (hcd->speed == HCD_USB3) { - if ((dum_hcd->port_status & - USB_SS_PORT_STAT_POWER) != 0) { - dum_hcd->port_status |= (1 << wValue); - } - } else - if ((dum_hcd->port_status & - USB_PORT_STAT_POWER) != 0) { - dum_hcd->port_status |= (1 << wValue); - } set_link_state(dum_hcd); + break; + case USB_PORT_FEAT_C_CONNECTION: + case USB_PORT_FEAT_C_RESET: + case USB_PORT_FEAT_C_ENABLE: + case USB_PORT_FEAT_C_SUSPEND: + /* Not allowed for USB-3, and ignored for USB-2 */ + if (hcd->speed == HCD_USB3) + goto error; + break; + default: + /* Disallow TEST, INDICATOR, and C_OVER_CURRENT */ + goto error; } break; case GetPortErrorCount: diff --git a/drivers/usb/gadget/udc/fsl_mxc_udc.c b/drivers/usb/gadget/udc/fsl_mxc_udc.c deleted file mode 100644 index 5a321992decc..000000000000 --- a/drivers/usb/gadget/udc/fsl_mxc_udc.c +++ /dev/null @@ -1,122 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0+ -/* - * Copyright (C) 2009 - * Guennadi Liakhovetski, DENX Software Engineering, <lg@denx.de> - * - * Description: - * Helper routines for i.MX3x SoCs from Freescale, needed by the fsl_usb2_udc.c - * driver to function correctly on these systems. - */ -#include <linux/clk.h> -#include <linux/delay.h> -#include <linux/err.h> -#include <linux/fsl_devices.h> -#include <linux/mod_devicetable.h> -#include <linux/platform_device.h> -#include <linux/io.h> - -#include "fsl_usb2_udc.h" - -static struct clk *mxc_ahb_clk; -static struct clk *mxc_per_clk; -static struct clk *mxc_ipg_clk; - -/* workaround ENGcm09152 for i.MX35 */ -#define MX35_USBPHYCTRL_OFFSET 0x600 -#define USBPHYCTRL_OTGBASE_OFFSET 0x8 -#define USBPHYCTRL_EVDO (1 << 23) - -int fsl_udc_clk_init(struct platform_device *pdev) -{ - struct fsl_usb2_platform_data *pdata; - unsigned long freq; - int ret; - - pdata = dev_get_platdata(&pdev->dev); - - mxc_ipg_clk = devm_clk_get(&pdev->dev, "ipg"); - if (IS_ERR(mxc_ipg_clk)) { - dev_err(&pdev->dev, "clk_get(\"ipg\") failed\n"); - return PTR_ERR(mxc_ipg_clk); - } - - mxc_ahb_clk = devm_clk_get(&pdev->dev, "ahb"); - if (IS_ERR(mxc_ahb_clk)) { - dev_err(&pdev->dev, "clk_get(\"ahb\") failed\n"); - return PTR_ERR(mxc_ahb_clk); - } - - mxc_per_clk = devm_clk_get(&pdev->dev, "per"); - if (IS_ERR(mxc_per_clk)) { - dev_err(&pdev->dev, "clk_get(\"per\") failed\n"); - return PTR_ERR(mxc_per_clk); - } - - clk_prepare_enable(mxc_ipg_clk); - clk_prepare_enable(mxc_ahb_clk); - clk_prepare_enable(mxc_per_clk); - - /* make sure USB_CLK is running at 60 MHz +/- 1000 Hz */ - if (!strcmp(pdev->id_entry->name, "imx-udc-mx27")) { - freq = clk_get_rate(mxc_per_clk); - if (pdata->phy_mode != FSL_USB2_PHY_ULPI && - (freq < 59999000 || freq > 60001000)) { - dev_err(&pdev->dev, "USB_CLK=%lu, should be 60MHz\n", freq); - ret = -EINVAL; - goto eclkrate; - } - } - - return 0; - -eclkrate: - clk_disable_unprepare(mxc_ipg_clk); - clk_disable_unprepare(mxc_ahb_clk); - clk_disable_unprepare(mxc_per_clk); - mxc_per_clk = NULL; - return ret; -} - -int fsl_udc_clk_finalize(struct platform_device *pdev) -{ - struct fsl_usb2_platform_data *pdata = dev_get_platdata(&pdev->dev); - int ret = 0; - - /* workaround ENGcm09152 for i.MX35 */ - if (pdata->workaround & FLS_USB2_WORKAROUND_ENGCM09152) { - unsigned int v; - struct resource *res = platform_get_resource - (pdev, IORESOURCE_MEM, 0); - void __iomem *phy_regs = ioremap(res->start + - MX35_USBPHYCTRL_OFFSET, 512); - if (!phy_regs) { - dev_err(&pdev->dev, "ioremap for phy address fails\n"); - ret = -EINVAL; - goto ioremap_err; - } - - v = readl(phy_regs + USBPHYCTRL_OTGBASE_OFFSET); - writel(v | USBPHYCTRL_EVDO, - phy_regs + USBPHYCTRL_OTGBASE_OFFSET); - - iounmap(phy_regs); - } - - -ioremap_err: - /* ULPI transceivers don't need usbpll */ - if (pdata->phy_mode == FSL_USB2_PHY_ULPI) { - clk_disable_unprepare(mxc_per_clk); - mxc_per_clk = NULL; - } - - return ret; -} - -void fsl_udc_clk_release(void) -{ - if (mxc_per_clk) - clk_disable_unprepare(mxc_per_clk); - clk_disable_unprepare(mxc_ahb_clk); - clk_disable_unprepare(mxc_ipg_clk); -} diff --git a/drivers/usb/gadget/udc/snps_udc_core.c b/drivers/usb/gadget/udc/snps_udc_core.c index 6c726d2e1788..d046c09fa566 100644 --- a/drivers/usb/gadget/udc/snps_udc_core.c +++ b/drivers/usb/gadget/udc/snps_udc_core.c @@ -36,7 +36,6 @@ #include <asm/unaligned.h> #include "amd5536udc.h" -static void udc_tasklet_disconnect(unsigned long); static void udc_setup_endpoints(struct udc *dev); static void udc_soft_reset(struct udc *dev); static struct udc_request *udc_alloc_bna_dummy(struct udc_ep *ep); @@ -95,9 +94,6 @@ static struct timer_list udc_pollstall_timer; static int stop_pollstall_timer; static DECLARE_COMPLETION(on_pollstall_exit); -/* tasklet for usb disconnect */ -static DECLARE_TASKLET_OLD(disconnect_tasklet, udc_tasklet_disconnect); - /* endpoint names used for print */ static const char ep0_string[] = "ep0in"; static const struct { @@ -1637,6 +1633,8 @@ static void usb_connect(struct udc *dev) */ static void usb_disconnect(struct udc *dev) { + u32 tmp; + /* Return if already disconnected */ if (!dev->connected) return; @@ -1648,23 +1646,6 @@ static void usb_disconnect(struct udc *dev) /* mask interrupts */ udc_mask_unused_interrupts(dev); - /* REVISIT there doesn't seem to be a point to having this - * talk to a tasklet ... do it directly, we already hold - * the spinlock needed to process the disconnect. - */ - - tasklet_schedule(&disconnect_tasklet); -} - -/* Tasklet for disconnect to be outside of interrupt context */ -static void udc_tasklet_disconnect(unsigned long par) -{ - struct udc *dev = udc; - u32 tmp; - - DBG(dev, "Tasklet disconnect\n"); - spin_lock_irq(&dev->lock); - if (dev->driver) { spin_unlock(&dev->lock); dev->driver->disconnect(&dev->gadget); @@ -1673,13 +1654,10 @@ static void udc_tasklet_disconnect(unsigned long par) /* empty queues */ for (tmp = 0; tmp < UDC_EP_NUM; tmp++) empty_req_queue(&dev->ep[tmp]); - } /* disable ep0 */ - ep_init(dev->regs, - &dev->ep[UDC_EP0IN_IX]); - + ep_init(dev->regs, &dev->ep[UDC_EP0IN_IX]); if (!soft_reset_occured) { /* init controller by soft reset */ @@ -1695,8 +1673,6 @@ static void udc_tasklet_disconnect(unsigned long par) tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_FS, UDC_DEVCFG_SPD); writel(tmp, &dev->regs->cfg); } - - spin_unlock_irq(&dev->lock); } /* Reset the UDC core */ diff --git a/drivers/usb/gadget/udc/udc-xilinx.c b/drivers/usb/gadget/udc/udc-xilinx.c index d5e9d20c097d..72f2ea062d55 100644 --- a/drivers/usb/gadget/udc/udc-xilinx.c +++ b/drivers/usb/gadget/udc/udc-xilinx.c @@ -843,8 +843,8 @@ static int __xudc_ep_enable(struct xusb_ep *ep, break; } - ep->buffer0ready = 0; - ep->buffer1ready = 0; + ep->buffer0ready = false; + ep->buffer1ready = false; ep->curbufnum = 0; ep->rambase = rambase[ep->epnumber]; xudc_epconfig(ep, udc); @@ -868,11 +868,11 @@ static int __xudc_ep_enable(struct xusb_ep *ep, if (ep->epnumber && !ep->is_in) { udc->write_fn(udc->addr, XUSB_BUFFREADY_OFFSET, 1 << ep->epnumber); - ep->buffer0ready = 1; + ep->buffer0ready = true; udc->write_fn(udc->addr, XUSB_BUFFREADY_OFFSET, (1 << (ep->epnumber + XUSB_STATUS_EP_BUFF2_SHIFT))); - ep->buffer1ready = 1; + ep->buffer1ready = true; } return 0; @@ -1954,7 +1954,7 @@ static void xudc_nonctrl_ep_handler(struct xusb_udc *udc, u8 epnum, if (intrstatus & (XUSB_STATUS_EP0_BUFF1_COMP_MASK << epnum)) ep->buffer0ready = 0; if (intrstatus & (XUSB_STATUS_EP0_BUFF2_COMP_MASK << epnum)) - ep->buffer1ready = 0; + ep->buffer1ready = false; if (list_empty(&ep->queue)) return; diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig index 31e59309da1f..b94f2a070c05 100644 --- a/drivers/usb/host/Kconfig +++ b/drivers/usb/host/Kconfig @@ -269,10 +269,14 @@ config USB_EHCI_HCD_AT91 config USB_EHCI_TEGRA tristate "NVIDIA Tegra HCD support" depends on ARCH_TEGRA - select USB_EHCI_ROOT_HUB_TT - select USB_TEGRA_PHY + select USB_CHIPIDEA + select USB_CHIPIDEA_HOST + select USB_CHIPIDEA_TEGRA help - This driver enables support for the internal USB Host Controllers + This option is deprecated now and the driver was removed, use + USB_CHIPIDEA_TEGRA instead. + + Enable support for the internal USB Host Controllers found in NVIDIA Tegra SoCs. The controllers are EHCI compliant. config USB_EHCI_HCD_PPC_OF diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile index c1b08703af10..3e4d298d851f 100644 --- a/drivers/usb/host/Makefile +++ b/drivers/usb/host/Makefile @@ -47,7 +47,6 @@ obj-$(CONFIG_USB_EHCI_HCD_SPEAR) += ehci-spear.o obj-$(CONFIG_USB_EHCI_HCD_STI) += ehci-st.o obj-$(CONFIG_USB_EHCI_EXYNOS) += ehci-exynos.o obj-$(CONFIG_USB_EHCI_HCD_AT91) += ehci-atmel.o -obj-$(CONFIG_USB_EHCI_TEGRA) += ehci-tegra.o obj-$(CONFIG_USB_EHCI_BRCMSTB) += ehci-brcm.o obj-$(CONFIG_USB_OXU210HP_HCD) += oxu210hp-hcd.o diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c index e358ae17d51e..1926b328b6aa 100644 --- a/drivers/usb/host/ehci-hcd.c +++ b/drivers/usb/host/ehci-hcd.c @@ -574,6 +574,7 @@ static int ehci_run (struct usb_hcd *hcd) struct ehci_hcd *ehci = hcd_to_ehci (hcd); u32 temp; u32 hcc_params; + int rc; hcd->uses_new_polling = 1; @@ -629,9 +630,20 @@ static int ehci_run (struct usb_hcd *hcd) down_write(&ehci_cf_port_reset_rwsem); ehci->rh_state = EHCI_RH_RUNNING; ehci_writel(ehci, FLAG_CF, &ehci->regs->configured_flag); + + /* Wait until HC become operational */ ehci_readl(ehci, &ehci->regs->command); /* unblock posted writes */ msleep(5); + rc = ehci_handshake(ehci, &ehci->regs->status, STS_HALT, 0, 100 * 1000); + up_write(&ehci_cf_port_reset_rwsem); + + if (rc) { + ehci_err(ehci, "USB %x.%x, controller refused to start: %d\n", + ((ehci->sbrn & 0xf0)>>4), (ehci->sbrn & 0x0f), rc); + return rc; + } + ehci->last_periodic_enable = ktime_get_real(); temp = HC_VERSION(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase)); diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c index 087402aec5cb..9f9ab5ccea88 100644 --- a/drivers/usb/host/ehci-hub.c +++ b/drivers/usb/host/ehci-hub.c @@ -345,6 +345,9 @@ static int ehci_bus_suspend (struct usb_hcd *hcd) unlink_empty_async_suspended(ehci); + /* Some Synopsys controllers mistakenly leave IAA turned on */ + ehci_writel(ehci, STS_IAA, &ehci->regs->status); + /* Any IAA cycle that started before the suspend is now invalid */ end_iaa_cycle(ehci); ehci_handle_start_intr_unlinks(ehci); diff --git a/drivers/usb/host/ehci-tegra.c b/drivers/usb/host/ehci-tegra.c deleted file mode 100644 index 869d9c4de5fc..000000000000 --- a/drivers/usb/host/ehci-tegra.c +++ /dev/null @@ -1,604 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0+ -/* - * EHCI-compliant USB host controller driver for NVIDIA Tegra SoCs - * - * Copyright (C) 2010 Google, Inc. - * Copyright (C) 2009 - 2013 NVIDIA Corporation - */ - -#include <linux/clk.h> -#include <linux/dma-mapping.h> -#include <linux/err.h> -#include <linux/gpio.h> -#include <linux/io.h> -#include <linux/irq.h> -#include <linux/module.h> -#include <linux/of.h> -#include <linux/of_device.h> -#include <linux/of_gpio.h> -#include <linux/platform_device.h> -#include <linux/pm_runtime.h> -#include <linux/reset.h> -#include <linux/slab.h> -#include <linux/usb/ehci_def.h> -#include <linux/usb/tegra_usb_phy.h> -#include <linux/usb.h> -#include <linux/usb/hcd.h> -#include <linux/usb/otg.h> - -#include "ehci.h" - -#define PORT_WAKE_BITS (PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E) - -#define TEGRA_USB_DMA_ALIGN 32 - -#define DRIVER_DESC "Tegra EHCI driver" -#define DRV_NAME "tegra-ehci" - -static struct hc_driver __read_mostly tegra_ehci_hc_driver; - -struct tegra_ehci_soc_config { - bool has_hostpc; -}; - -struct tegra_ehci_hcd { - struct clk *clk; - struct reset_control *rst; - int port_resuming; - bool needs_double_reset; -}; - -static int tegra_reset_usb_controller(struct platform_device *pdev) -{ - struct device_node *phy_np; - struct usb_hcd *hcd = platform_get_drvdata(pdev); - struct tegra_ehci_hcd *tegra = - (struct tegra_ehci_hcd *)hcd_to_ehci(hcd)->priv; - struct reset_control *rst; - int err; - - phy_np = of_parse_phandle(pdev->dev.of_node, "nvidia,phy", 0); - if (!phy_np) - return -ENOENT; - - /* - * The 1st USB controller contains some UTMI pad registers that are - * global for all the controllers on the chip. Those registers are - * also cleared when reset is asserted to the 1st controller. - */ - rst = of_reset_control_get_shared(phy_np, "utmi-pads"); - if (IS_ERR(rst)) { - dev_warn(&pdev->dev, - "can't get utmi-pads reset from the PHY\n"); - dev_warn(&pdev->dev, - "continuing, but please update your DT\n"); - } else { - /* - * PHY driver performs UTMI-pads reset in a case of - * non-legacy DT. - */ - reset_control_put(rst); - } - - of_node_put(phy_np); - - /* reset control is shared, hence initialize it first */ - err = reset_control_deassert(tegra->rst); - if (err) - return err; - - err = reset_control_assert(tegra->rst); - if (err) - return err; - - udelay(1); - - err = reset_control_deassert(tegra->rst); - if (err) - return err; - - return 0; -} - -static int tegra_ehci_internal_port_reset( - struct ehci_hcd *ehci, - u32 __iomem *portsc_reg -) -{ - u32 temp; - unsigned long flags; - int retval = 0; - int i, tries; - u32 saved_usbintr; - - spin_lock_irqsave(&ehci->lock, flags); - saved_usbintr = ehci_readl(ehci, &ehci->regs->intr_enable); - /* disable USB interrupt */ - ehci_writel(ehci, 0, &ehci->regs->intr_enable); - spin_unlock_irqrestore(&ehci->lock, flags); - - /* - * Here we have to do Port Reset at most twice for - * Port Enable bit to be set. - */ - for (i = 0; i < 2; i++) { - temp = ehci_readl(ehci, portsc_reg); - temp |= PORT_RESET; - ehci_writel(ehci, temp, portsc_reg); - mdelay(10); - temp &= ~PORT_RESET; - ehci_writel(ehci, temp, portsc_reg); - mdelay(1); - tries = 100; - do { - mdelay(1); - /* - * Up to this point, Port Enable bit is - * expected to be set after 2 ms waiting. - * USB1 usually takes extra 45 ms, for safety, - * we take 100 ms as timeout. - */ - temp = ehci_readl(ehci, portsc_reg); - } while (!(temp & PORT_PE) && tries--); - if (temp & PORT_PE) - break; - } - if (i == 2) - retval = -ETIMEDOUT; - - /* - * Clear Connect Status Change bit if it's set. - * We can't clear PORT_PEC. It will also cause PORT_PE to be cleared. - */ - if (temp & PORT_CSC) - ehci_writel(ehci, PORT_CSC, portsc_reg); - - /* - * Write to clear any interrupt status bits that might be set - * during port reset. - */ - temp = ehci_readl(ehci, &ehci->regs->status); - ehci_writel(ehci, temp, &ehci->regs->status); - - /* restore original interrupt enable bits */ - ehci_writel(ehci, saved_usbintr, &ehci->regs->intr_enable); - return retval; -} - -static int tegra_ehci_hub_control( - struct usb_hcd *hcd, - u16 typeReq, - u16 wValue, - u16 wIndex, - char *buf, - u16 wLength -) -{ - struct ehci_hcd *ehci = hcd_to_ehci(hcd); - struct tegra_ehci_hcd *tegra = (struct tegra_ehci_hcd *)ehci->priv; - u32 __iomem *status_reg; - u32 temp; - unsigned long flags; - int retval = 0; - - status_reg = &ehci->regs->port_status[(wIndex & 0xff) - 1]; - - spin_lock_irqsave(&ehci->lock, flags); - - if (typeReq == GetPortStatus) { - temp = ehci_readl(ehci, status_reg); - if (tegra->port_resuming && !(temp & PORT_SUSPEND)) { - /* Resume completed, re-enable disconnect detection */ - tegra->port_resuming = 0; - tegra_usb_phy_postresume(hcd->usb_phy); - } - } - - else if (typeReq == SetPortFeature && wValue == USB_PORT_FEAT_SUSPEND) { - temp = ehci_readl(ehci, status_reg); - if ((temp & PORT_PE) == 0 || (temp & PORT_RESET) != 0) { - retval = -EPIPE; - goto done; - } - - temp &= ~(PORT_RWC_BITS | PORT_WKCONN_E); - temp |= PORT_WKDISC_E | PORT_WKOC_E; - ehci_writel(ehci, temp | PORT_SUSPEND, status_reg); - - /* - * If a transaction is in progress, there may be a delay in - * suspending the port. Poll until the port is suspended. - */ - if (ehci_handshake(ehci, status_reg, PORT_SUSPEND, - PORT_SUSPEND, 5000)) - pr_err("%s: timeout waiting for SUSPEND\n", __func__); - - set_bit((wIndex & 0xff) - 1, &ehci->suspended_ports); - goto done; - } - - /* For USB1 port we need to issue Port Reset twice internally */ - if (tegra->needs_double_reset && - (typeReq == SetPortFeature && wValue == USB_PORT_FEAT_RESET)) { - spin_unlock_irqrestore(&ehci->lock, flags); - return tegra_ehci_internal_port_reset(ehci, status_reg); - } - - /* - * Tegra host controller will time the resume operation to clear the bit - * when the port control state switches to HS or FS Idle. This behavior - * is different from EHCI where the host controller driver is required - * to set this bit to a zero after the resume duration is timed in the - * driver. - */ - else if (typeReq == ClearPortFeature && - wValue == USB_PORT_FEAT_SUSPEND) { - temp = ehci_readl(ehci, status_reg); - if ((temp & PORT_RESET) || !(temp & PORT_PE)) { - retval = -EPIPE; - goto done; - } - - if (!(temp & PORT_SUSPEND)) - goto done; - - /* Disable disconnect detection during port resume */ - tegra_usb_phy_preresume(hcd->usb_phy); - - ehci->reset_done[wIndex-1] = jiffies + msecs_to_jiffies(25); - - temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS); - /* start resume signalling */ - ehci_writel(ehci, temp | PORT_RESUME, status_reg); - set_bit(wIndex-1, &ehci->resuming_ports); - - spin_unlock_irqrestore(&ehci->lock, flags); - msleep(20); - spin_lock_irqsave(&ehci->lock, flags); - - /* Poll until the controller clears RESUME and SUSPEND */ - if (ehci_handshake(ehci, status_reg, PORT_RESUME, 0, 2000)) - pr_err("%s: timeout waiting for RESUME\n", __func__); - if (ehci_handshake(ehci, status_reg, PORT_SUSPEND, 0, 2000)) - pr_err("%s: timeout waiting for SUSPEND\n", __func__); - - ehci->reset_done[wIndex-1] = 0; - clear_bit(wIndex-1, &ehci->resuming_ports); - - tegra->port_resuming = 1; - goto done; - } - - spin_unlock_irqrestore(&ehci->lock, flags); - - /* Handle the hub control events here */ - return ehci_hub_control(hcd, typeReq, wValue, wIndex, buf, wLength); - -done: - spin_unlock_irqrestore(&ehci->lock, flags); - return retval; -} - -struct dma_aligned_buffer { - void *kmalloc_ptr; - void *old_xfer_buffer; - u8 data[]; -}; - -static void free_dma_aligned_buffer(struct urb *urb) -{ - struct dma_aligned_buffer *temp; - size_t length; - - if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER)) - return; - - temp = container_of(urb->transfer_buffer, - struct dma_aligned_buffer, data); - - if (usb_urb_dir_in(urb)) { - if (usb_pipeisoc(urb->pipe)) - length = urb->transfer_buffer_length; - else - length = urb->actual_length; - - memcpy(temp->old_xfer_buffer, temp->data, length); - } - urb->transfer_buffer = temp->old_xfer_buffer; - kfree(temp->kmalloc_ptr); - - urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER; -} - -static int alloc_dma_aligned_buffer(struct urb *urb, gfp_t mem_flags) -{ - struct dma_aligned_buffer *temp, *kmalloc_ptr; - size_t kmalloc_size; - - if (urb->num_sgs || urb->sg || - urb->transfer_buffer_length == 0 || - !((uintptr_t)urb->transfer_buffer & (TEGRA_USB_DMA_ALIGN - 1))) - return 0; - - /* Allocate a buffer with enough padding for alignment */ - kmalloc_size = urb->transfer_buffer_length + - sizeof(struct dma_aligned_buffer) + TEGRA_USB_DMA_ALIGN - 1; - - kmalloc_ptr = kmalloc(kmalloc_size, mem_flags); - if (!kmalloc_ptr) - return -ENOMEM; - - /* Position our struct dma_aligned_buffer such that data is aligned */ - temp = PTR_ALIGN(kmalloc_ptr + 1, TEGRA_USB_DMA_ALIGN) - 1; - temp->kmalloc_ptr = kmalloc_ptr; - temp->old_xfer_buffer = urb->transfer_buffer; - if (usb_urb_dir_out(urb)) - memcpy(temp->data, urb->transfer_buffer, - urb->transfer_buffer_length); - urb->transfer_buffer = temp->data; - - urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER; - - return 0; -} - -static int tegra_ehci_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb, - gfp_t mem_flags) -{ - int ret; - - ret = alloc_dma_aligned_buffer(urb, mem_flags); - if (ret) - return ret; - - ret = usb_hcd_map_urb_for_dma(hcd, urb, mem_flags); - if (ret) - free_dma_aligned_buffer(urb); - - return ret; -} - -static void tegra_ehci_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb) -{ - usb_hcd_unmap_urb_for_dma(hcd, urb); - free_dma_aligned_buffer(urb); -} - -static const struct tegra_ehci_soc_config tegra30_soc_config = { - .has_hostpc = true, -}; - -static const struct tegra_ehci_soc_config tegra20_soc_config = { - .has_hostpc = false, -}; - -static const struct of_device_id tegra_ehci_of_match[] = { - { .compatible = "nvidia,tegra30-ehci", .data = &tegra30_soc_config }, - { .compatible = "nvidia,tegra20-ehci", .data = &tegra20_soc_config }, - { }, -}; - -static int tegra_ehci_probe(struct platform_device *pdev) -{ - const struct of_device_id *match; - const struct tegra_ehci_soc_config *soc_config; - struct resource *res; - struct usb_hcd *hcd; - struct ehci_hcd *ehci; - struct tegra_ehci_hcd *tegra; - int err = 0; - int irq; - struct usb_phy *u_phy; - - match = of_match_device(tegra_ehci_of_match, &pdev->dev); - if (!match) { - dev_err(&pdev->dev, "Error: No device match found\n"); - return -ENODEV; - } - soc_config = match->data; - - /* Right now device-tree probed devices don't get dma_mask set. - * Since shared usb code relies on it, set it here for now. - * Once we have dma capability bindings this can go away. - */ - err = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); - if (err) - return err; - - hcd = usb_create_hcd(&tegra_ehci_hc_driver, &pdev->dev, - dev_name(&pdev->dev)); - if (!hcd) { - dev_err(&pdev->dev, "Unable to create HCD\n"); - return -ENOMEM; - } - platform_set_drvdata(pdev, hcd); - ehci = hcd_to_ehci(hcd); - tegra = (struct tegra_ehci_hcd *)ehci->priv; - - hcd->has_tt = 1; - - tegra->clk = devm_clk_get(&pdev->dev, NULL); - if (IS_ERR(tegra->clk)) { - dev_err(&pdev->dev, "Can't get ehci clock\n"); - err = PTR_ERR(tegra->clk); - goto cleanup_hcd_create; - } - - tegra->rst = devm_reset_control_get_shared(&pdev->dev, "usb"); - if (IS_ERR(tegra->rst)) { - dev_err(&pdev->dev, "Can't get ehci reset\n"); - err = PTR_ERR(tegra->rst); - goto cleanup_hcd_create; - } - - err = clk_prepare_enable(tegra->clk); - if (err) - goto cleanup_hcd_create; - - err = tegra_reset_usb_controller(pdev); - if (err) { - dev_err(&pdev->dev, "Failed to reset controller\n"); - goto cleanup_clk_en; - } - - u_phy = devm_usb_get_phy_by_phandle(&pdev->dev, "nvidia,phy", 0); - if (IS_ERR(u_phy)) { - err = -EPROBE_DEFER; - goto cleanup_clk_en; - } - hcd->usb_phy = u_phy; - hcd->skip_phy_initialization = 1; - - tegra->needs_double_reset = of_property_read_bool(pdev->dev.of_node, - "nvidia,needs-double-reset"); - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - hcd->regs = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(hcd->regs)) { - err = PTR_ERR(hcd->regs); - goto cleanup_clk_en; - } - hcd->rsrc_start = res->start; - hcd->rsrc_len = resource_size(res); - - ehci->caps = hcd->regs + 0x100; - ehci->has_hostpc = soc_config->has_hostpc; - - err = usb_phy_init(hcd->usb_phy); - if (err) { - dev_err(&pdev->dev, "Failed to initialize phy\n"); - goto cleanup_clk_en; - } - - u_phy->otg = devm_kzalloc(&pdev->dev, sizeof(struct usb_otg), - GFP_KERNEL); - if (!u_phy->otg) { - err = -ENOMEM; - goto cleanup_phy; - } - u_phy->otg->host = hcd_to_bus(hcd); - - irq = platform_get_irq(pdev, 0); - if (irq < 0) { - err = irq; - goto cleanup_phy; - } - - otg_set_host(u_phy->otg, &hcd->self); - - err = usb_add_hcd(hcd, irq, IRQF_SHARED); - if (err) { - dev_err(&pdev->dev, "Failed to add USB HCD\n"); - goto cleanup_otg_set_host; - } - device_wakeup_enable(hcd->self.controller); - - return err; - -cleanup_otg_set_host: - otg_set_host(u_phy->otg, NULL); -cleanup_phy: - usb_phy_shutdown(hcd->usb_phy); -cleanup_clk_en: - clk_disable_unprepare(tegra->clk); -cleanup_hcd_create: - usb_put_hcd(hcd); - return err; -} - -static int tegra_ehci_remove(struct platform_device *pdev) -{ - struct usb_hcd *hcd = platform_get_drvdata(pdev); - struct tegra_ehci_hcd *tegra = - (struct tegra_ehci_hcd *)hcd_to_ehci(hcd)->priv; - - usb_remove_hcd(hcd); - otg_set_host(hcd->usb_phy->otg, NULL); - usb_phy_shutdown(hcd->usb_phy); - clk_disable_unprepare(tegra->clk); - usb_put_hcd(hcd); - - return 0; -} - -static void tegra_ehci_hcd_shutdown(struct platform_device *pdev) -{ - struct usb_hcd *hcd = platform_get_drvdata(pdev); - - if (hcd->driver->shutdown) - hcd->driver->shutdown(hcd); -} - -static struct platform_driver tegra_ehci_driver = { - .probe = tegra_ehci_probe, - .remove = tegra_ehci_remove, - .shutdown = tegra_ehci_hcd_shutdown, - .driver = { - .name = DRV_NAME, - .of_match_table = tegra_ehci_of_match, - } -}; - -static int tegra_ehci_reset(struct usb_hcd *hcd) -{ - struct ehci_hcd *ehci = hcd_to_ehci(hcd); - int retval; - int txfifothresh; - - retval = ehci_setup(hcd); - if (retval) - return retval; - - /* - * We should really pull this value out of tegra_ehci_soc_config, but - * to avoid needing access to it, make use of the fact that Tegra20 is - * the only one so far that needs a value of 10, and Tegra20 is the - * only one which doesn't set has_hostpc. - */ - txfifothresh = ehci->has_hostpc ? 0x10 : 10; - ehci_writel(ehci, txfifothresh << 16, &ehci->regs->txfill_tuning); - - return 0; -} - -static const struct ehci_driver_overrides tegra_overrides __initconst = { - .extra_priv_size = sizeof(struct tegra_ehci_hcd), - .reset = tegra_ehci_reset, -}; - -static int __init ehci_tegra_init(void) -{ - if (usb_disabled()) - return -ENODEV; - - pr_info(DRV_NAME ": " DRIVER_DESC "\n"); - - ehci_init_driver(&tegra_ehci_hc_driver, &tegra_overrides); - - /* - * The Tegra HW has some unusual quirks, which require Tegra-specific - * workarounds. We override certain hc_driver functions here to - * achieve that. We explicitly do not enhance ehci_driver_overrides to - * allow this more easily, since this is an unusual case, and we don't - * want to encourage others to override these functions by making it - * too easy. - */ - - tegra_ehci_hc_driver.map_urb_for_dma = tegra_ehci_map_urb_for_dma; - tegra_ehci_hc_driver.unmap_urb_for_dma = tegra_ehci_unmap_urb_for_dma; - tegra_ehci_hc_driver.hub_control = tegra_ehci_hub_control; - - return platform_driver_register(&tegra_ehci_driver); -} -module_init(ehci_tegra_init); - -static void __exit ehci_tegra_cleanup(void) -{ - platform_driver_unregister(&tegra_ehci_driver); -} -module_exit(ehci_tegra_cleanup); - -MODULE_DESCRIPTION(DRIVER_DESC); -MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:" DRV_NAME); -MODULE_DEVICE_TABLE(of, tegra_ehci_of_match); diff --git a/drivers/usb/host/xhci-ext-caps.c b/drivers/usb/host/xhci-ext-caps.c index 3351d07c431f..7a4c2c4ad50e 100644 --- a/drivers/usb/host/xhci-ext-caps.c +++ b/drivers/usb/host/xhci-ext-caps.c @@ -54,7 +54,8 @@ static int xhci_create_intel_xhci_sw_pdev(struct xhci_hcd *xhci, u32 cap_offset) } if (pci->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI) { - ret = platform_device_add_properties(pdev, role_switch_props); + ret = device_create_managed_software_node(&pdev->dev, role_switch_props, + NULL); if (ret) { dev_err(dev, "failed to register device properties\n"); platform_device_put(pdev); diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index 3589b49b6c8b..f2c4ee7c4786 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -592,23 +592,6 @@ struct xhci_ring *xhci_dma_to_transfer_ring( return ep->ring; } -struct xhci_ring *xhci_stream_id_to_ring( - struct xhci_virt_device *dev, - unsigned int ep_index, - unsigned int stream_id) -{ - struct xhci_virt_ep *ep = &dev->eps[ep_index]; - - if (stream_id == 0) - return ep->ring; - if (!ep->stream_info) - return NULL; - - if (stream_id >= ep->stream_info->num_streams) - return NULL; - return ep->stream_info->stream_rings[stream_id]; -} - /* * Change an endpoint's internal structure so it supports stream IDs. The * number of requested streams includes stream 0, which cannot be used by device @@ -994,6 +977,8 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, if (!dev) return 0; + dev->slot_id = slot_id; + /* Allocate the (output) device context that will be used in the HC. */ dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags); if (!dev->out_ctx) @@ -1012,6 +997,8 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, /* Initialize the cancellation list and watchdog timers for each ep */ for (i = 0; i < 31; i++) { + dev->eps[i].ep_index = i; + dev->eps[i].vdev = dev; xhci_init_endpoint_timer(xhci, &dev->eps[i]); INIT_LIST_HEAD(&dev->eps[i].cancelled_td_list); INIT_LIST_HEAD(&dev->eps[i].bw_endpoint_list); diff --git a/drivers/usb/host/xhci-mtk-sch.c b/drivers/usb/host/xhci-mtk-sch.c index 45c54d56ecbd..b45e5bf08997 100644 --- a/drivers/usb/host/xhci-mtk-sch.c +++ b/drivers/usb/host/xhci-mtk-sch.c @@ -200,6 +200,8 @@ static struct mu3h_sch_ep_info *create_sch_ep(struct usb_device *udev, sch_ep->sch_tt = tt; sch_ep->ep = ep; + INIT_LIST_HEAD(&sch_ep->endpoint); + INIT_LIST_HEAD(&sch_ep->tt_endpoint); return sch_ep; } @@ -373,6 +375,7 @@ static void update_bus_bw(struct mu3h_sch_bw_info *sch_bw, sch_ep->bw_budget_table[j]; } } + sch_ep->allocated = used; } static int check_sch_tt(struct usb_device *udev, @@ -541,6 +544,22 @@ static int check_sch_bw(struct usb_device *udev, return 0; } +static void destroy_sch_ep(struct usb_device *udev, + struct mu3h_sch_bw_info *sch_bw, struct mu3h_sch_ep_info *sch_ep) +{ + /* only release ep bw check passed by check_sch_bw() */ + if (sch_ep->allocated) + update_bus_bw(sch_bw, sch_ep, 0); + + list_del(&sch_ep->endpoint); + + if (sch_ep->sch_tt) { + list_del(&sch_ep->tt_endpoint); + drop_tt(udev); + } + kfree(sch_ep); +} + static bool need_bw_sch(struct usb_host_endpoint *ep, enum usb_device_speed speed, int has_tt) { @@ -583,6 +602,8 @@ int xhci_mtk_sch_init(struct xhci_hcd_mtk *mtk) mtk->sch_array = sch_array; + INIT_LIST_HEAD(&mtk->bw_ep_chk_list); + return 0; } EXPORT_SYMBOL_GPL(xhci_mtk_sch_init); @@ -601,19 +622,14 @@ int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev, struct xhci_ep_ctx *ep_ctx; struct xhci_slot_ctx *slot_ctx; struct xhci_virt_device *virt_dev; - struct mu3h_sch_bw_info *sch_bw; struct mu3h_sch_ep_info *sch_ep; - struct mu3h_sch_bw_info *sch_array; unsigned int ep_index; - int bw_index; - int ret = 0; xhci = hcd_to_xhci(hcd); virt_dev = xhci->devs[udev->slot_id]; ep_index = xhci_get_endpoint_index(&ep->desc); slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index); - sch_array = mtk->sch_array; xhci_dbg(xhci, "%s() type:%d, speed:%d, mpkt:%d, dir:%d, ep:%p\n", __func__, usb_endpoint_type(&ep->desc), udev->speed, @@ -632,35 +648,13 @@ int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev, return 0; } - bw_index = get_bw_index(xhci, udev, ep); - sch_bw = &sch_array[bw_index]; - sch_ep = create_sch_ep(udev, ep, ep_ctx); if (IS_ERR_OR_NULL(sch_ep)) return -ENOMEM; setup_sch_info(udev, ep_ctx, sch_ep); - ret = check_sch_bw(udev, sch_bw, sch_ep); - if (ret) { - xhci_err(xhci, "Not enough bandwidth!\n"); - if (is_fs_or_ls(udev->speed)) - drop_tt(udev); - - kfree(sch_ep); - return -ENOSPC; - } - - list_add_tail(&sch_ep->endpoint, &sch_bw->bw_ep_list); - - ep_ctx->reserved[0] |= cpu_to_le32(EP_BPKTS(sch_ep->pkts) - | EP_BCSCOUNT(sch_ep->cs_count) | EP_BBM(sch_ep->burst_mode)); - ep_ctx->reserved[1] |= cpu_to_le32(EP_BOFFSET(sch_ep->offset) - | EP_BREPEAT(sch_ep->repeat)); - - xhci_dbg(xhci, " PKTS:%x, CSCOUNT:%x, BM:%x, OFFSET:%x, REPEAT:%x\n", - sch_ep->pkts, sch_ep->cs_count, sch_ep->burst_mode, - sch_ep->offset, sch_ep->repeat); + list_add_tail(&sch_ep->endpoint, &mtk->bw_ep_chk_list); return 0; } @@ -675,7 +669,7 @@ void xhci_mtk_drop_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev, struct xhci_virt_device *virt_dev; struct mu3h_sch_bw_info *sch_array; struct mu3h_sch_bw_info *sch_bw; - struct mu3h_sch_ep_info *sch_ep; + struct mu3h_sch_ep_info *sch_ep, *tmp; int bw_index; xhci = hcd_to_xhci(hcd); @@ -694,17 +688,79 @@ void xhci_mtk_drop_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev, bw_index = get_bw_index(xhci, udev, ep); sch_bw = &sch_array[bw_index]; - list_for_each_entry(sch_ep, &sch_bw->bw_ep_list, endpoint) { + list_for_each_entry_safe(sch_ep, tmp, &sch_bw->bw_ep_list, endpoint) { if (sch_ep->ep == ep) { - update_bus_bw(sch_bw, sch_ep, 0); - list_del(&sch_ep->endpoint); - if (is_fs_or_ls(udev->speed)) { - list_del(&sch_ep->tt_endpoint); - drop_tt(udev); - } - kfree(sch_ep); + destroy_sch_ep(udev, sch_bw, sch_ep); break; } } } EXPORT_SYMBOL_GPL(xhci_mtk_drop_ep_quirk); + +int xhci_mtk_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) +{ + struct xhci_hcd_mtk *mtk = hcd_to_mtk(hcd); + struct xhci_hcd *xhci = hcd_to_xhci(hcd); + struct xhci_virt_device *virt_dev = xhci->devs[udev->slot_id]; + struct mu3h_sch_bw_info *sch_bw; + struct mu3h_sch_ep_info *sch_ep, *tmp; + int bw_index, ret; + + xhci_dbg(xhci, "%s() udev %s\n", __func__, dev_name(&udev->dev)); + + list_for_each_entry(sch_ep, &mtk->bw_ep_chk_list, endpoint) { + bw_index = get_bw_index(xhci, udev, sch_ep->ep); + sch_bw = &mtk->sch_array[bw_index]; + + ret = check_sch_bw(udev, sch_bw, sch_ep); + if (ret) { + xhci_err(xhci, "Not enough bandwidth!\n"); + return -ENOSPC; + } + } + + list_for_each_entry_safe(sch_ep, tmp, &mtk->bw_ep_chk_list, endpoint) { + struct xhci_ep_ctx *ep_ctx; + struct usb_host_endpoint *ep = sch_ep->ep; + unsigned int ep_index = xhci_get_endpoint_index(&ep->desc); + + bw_index = get_bw_index(xhci, udev, ep); + sch_bw = &mtk->sch_array[bw_index]; + + list_move_tail(&sch_ep->endpoint, &sch_bw->bw_ep_list); + + ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index); + ep_ctx->reserved[0] |= cpu_to_le32(EP_BPKTS(sch_ep->pkts) + | EP_BCSCOUNT(sch_ep->cs_count) + | EP_BBM(sch_ep->burst_mode)); + ep_ctx->reserved[1] |= cpu_to_le32(EP_BOFFSET(sch_ep->offset) + | EP_BREPEAT(sch_ep->repeat)); + + xhci_dbg(xhci, " PKTS:%x, CSCOUNT:%x, BM:%x, OFFSET:%x, REPEAT:%x\n", + sch_ep->pkts, sch_ep->cs_count, sch_ep->burst_mode, + sch_ep->offset, sch_ep->repeat); + } + + return xhci_check_bandwidth(hcd, udev); +} +EXPORT_SYMBOL_GPL(xhci_mtk_check_bandwidth); + +void xhci_mtk_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) +{ + struct xhci_hcd_mtk *mtk = hcd_to_mtk(hcd); + struct xhci_hcd *xhci = hcd_to_xhci(hcd); + struct mu3h_sch_bw_info *sch_bw; + struct mu3h_sch_ep_info *sch_ep, *tmp; + int bw_index; + + xhci_dbg(xhci, "%s() udev %s\n", __func__, dev_name(&udev->dev)); + + list_for_each_entry_safe(sch_ep, tmp, &mtk->bw_ep_chk_list, endpoint) { + bw_index = get_bw_index(xhci, udev, sch_ep->ep); + sch_bw = &mtk->sch_array[bw_index]; + destroy_sch_ep(udev, sch_bw, sch_ep); + } + + xhci_reset_bandwidth(hcd, udev); +} +EXPORT_SYMBOL_GPL(xhci_mtk_reset_bandwidth); diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c index 8f321f39ab96..fe010cc61f19 100644 --- a/drivers/usb/host/xhci-mtk.c +++ b/drivers/usb/host/xhci-mtk.c @@ -347,6 +347,8 @@ static void usb_wakeup_set(struct xhci_hcd_mtk *mtk, bool enable) static int xhci_mtk_setup(struct usb_hcd *hcd); static const struct xhci_driver_overrides xhci_mtk_overrides __initconst = { .reset = xhci_mtk_setup, + .check_bandwidth = xhci_mtk_check_bandwidth, + .reset_bandwidth = xhci_mtk_reset_bandwidth, }; static struct hc_driver __read_mostly xhci_mtk_hc_driver; diff --git a/drivers/usb/host/xhci-mtk.h b/drivers/usb/host/xhci-mtk.h index a93cfe817904..cbb09dfea62e 100644 --- a/drivers/usb/host/xhci-mtk.h +++ b/drivers/usb/host/xhci-mtk.h @@ -59,6 +59,7 @@ struct mu3h_sch_bw_info { * @ep_type: endpoint type * @maxpkt: max packet size of endpoint * @ep: address of usb_host_endpoint struct + * @allocated: the bandwidth is aready allocated from bus_bw * @offset: which uframe of the interval that transfer should be * scheduled first time within the interval * @repeat: the time gap between two uframes that transfers are @@ -86,6 +87,7 @@ struct mu3h_sch_ep_info { u32 ep_type; u32 maxpkt; void *ep; + bool allocated; /* * mtk xHCI scheduling information put into reserved DWs * in ep context @@ -131,6 +133,7 @@ struct xhci_hcd_mtk { struct device *dev; struct usb_hcd *hcd; struct mu3h_sch_bw_info *sch_array; + struct list_head bw_ep_chk_list; struct mu3c_ippc_regs __iomem *ippc_regs; bool has_ippc; int num_u2_ports; @@ -166,6 +169,8 @@ int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep); void xhci_mtk_drop_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep); +int xhci_mtk_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev); +void xhci_mtk_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev); #else static inline int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd, @@ -179,6 +184,16 @@ static inline void xhci_mtk_drop_ep_quirk(struct usb_hcd *hcd, { } +static inline int xhci_mtk_check_bandwidth(struct usb_hcd *hcd, + struct usb_device *udev) +{ + return 0; +} + +static inline void xhci_mtk_reset_bandwidth(struct usb_hcd *hcd, + struct usb_device *udev) +{ +} #endif #endif /* _XHCI_MTK_H_ */ diff --git a/drivers/usb/host/xhci-mvebu.c b/drivers/usb/host/xhci-mvebu.c index 60651a50770f..8ca1a235d164 100644 --- a/drivers/usb/host/xhci-mvebu.c +++ b/drivers/usb/host/xhci-mvebu.c @@ -8,6 +8,7 @@ #include <linux/mbus.h> #include <linux/of.h> #include <linux/platform_device.h> +#include <linux/phy/phy.h> #include <linux/usb.h> #include <linux/usb/hcd.h> @@ -74,6 +75,47 @@ int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd) return 0; } +int xhci_mvebu_a3700_plat_setup(struct usb_hcd *hcd) +{ + struct xhci_hcd *xhci = hcd_to_xhci(hcd); + struct device *dev = hcd->self.controller; + struct phy *phy; + int ret; + + /* Old bindings miss the PHY handle */ + phy = of_phy_get(dev->of_node, "usb3-phy"); + if (IS_ERR(phy) && PTR_ERR(phy) == -EPROBE_DEFER) + return -EPROBE_DEFER; + else if (IS_ERR(phy)) + goto phy_out; + + ret = phy_init(phy); + if (ret) + goto phy_put; + + ret = phy_set_mode(phy, PHY_MODE_USB_HOST_SS); + if (ret) + goto phy_exit; + + ret = phy_power_on(phy); + if (ret == -EOPNOTSUPP) { + /* Skip initializatin of XHCI PHY when it is unsupported by firmware */ + dev_warn(dev, "PHY unsupported by firmware\n"); + xhci->quirks |= XHCI_SKIP_PHY_INIT; + } + if (ret) + goto phy_exit; + + phy_power_off(phy); +phy_exit: + phy_exit(phy); +phy_put: + of_phy_put(phy); +phy_out: + + return 0; +} + int xhci_mvebu_a3700_init_quirk(struct usb_hcd *hcd) { struct xhci_hcd *xhci = hcd_to_xhci(hcd); diff --git a/drivers/usb/host/xhci-mvebu.h b/drivers/usb/host/xhci-mvebu.h index 3be021793cc8..01bf3fcb3eca 100644 --- a/drivers/usb/host/xhci-mvebu.h +++ b/drivers/usb/host/xhci-mvebu.h @@ -12,6 +12,7 @@ struct usb_hcd; #if IS_ENABLED(CONFIG_USB_XHCI_MVEBU) int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd); +int xhci_mvebu_a3700_plat_setup(struct usb_hcd *hcd); int xhci_mvebu_a3700_init_quirk(struct usb_hcd *hcd); #else static inline int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd) @@ -19,6 +20,11 @@ static inline int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd) return 0; } +static inline int xhci_mvebu_a3700_plat_setup(struct usb_hcd *hcd) +{ + return 0; +} + static inline int xhci_mvebu_a3700_init_quirk(struct usb_hcd *hcd) { return 0; diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c index 4d34f6005381..c1edcc9b13ce 100644 --- a/drivers/usb/host/xhci-plat.c +++ b/drivers/usb/host/xhci-plat.c @@ -44,6 +44,16 @@ static void xhci_priv_plat_start(struct usb_hcd *hcd) priv->plat_start(hcd); } +static int xhci_priv_plat_setup(struct usb_hcd *hcd) +{ + struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd); + + if (!priv->plat_setup) + return 0; + + return priv->plat_setup(hcd); +} + static int xhci_priv_init_quirk(struct usb_hcd *hcd) { struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd); @@ -111,6 +121,7 @@ static const struct xhci_plat_priv xhci_plat_marvell_armada = { }; static const struct xhci_plat_priv xhci_plat_marvell_armada3700 = { + .plat_setup = xhci_mvebu_a3700_plat_setup, .init_quirk = xhci_mvebu_a3700_init_quirk, }; @@ -330,7 +341,14 @@ static int xhci_plat_probe(struct platform_device *pdev) hcd->tpl_support = of_usb_host_tpl_support(sysdev->of_node); xhci->shared_hcd->tpl_support = hcd->tpl_support; - if (priv && (priv->quirks & XHCI_SKIP_PHY_INIT)) + + if (priv) { + ret = xhci_priv_plat_setup(hcd); + if (ret) + goto disable_usb_phy; + } + + if ((xhci->quirks & XHCI_SKIP_PHY_INIT) || (priv && (priv->quirks & XHCI_SKIP_PHY_INIT))) hcd->skip_phy_initialization = 1; if (priv && (priv->quirks & XHCI_SG_TRB_CACHE_SIZE_QUIRK)) diff --git a/drivers/usb/host/xhci-plat.h b/drivers/usb/host/xhci-plat.h index 1fb149d1fbce..561d0b7bce09 100644 --- a/drivers/usb/host/xhci-plat.h +++ b/drivers/usb/host/xhci-plat.h @@ -13,6 +13,7 @@ struct xhci_plat_priv { const char *firmware_name; unsigned long long quirks; + int (*plat_setup)(struct usb_hcd *); void (*plat_start)(struct usb_hcd *); int (*init_quirk)(struct usb_hcd *); int (*suspend_quirk)(struct usb_hcd *); diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 5677b81c0915..5e548a1c93ab 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -59,6 +59,10 @@ #include "xhci-trace.h" #include "xhci-mtk.h" +static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd, + u32 field1, u32 field2, + u32 field3, u32 field4, bool command_must_succeed); + /* * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA * address of the TRB. @@ -151,10 +155,11 @@ static void next_trb(struct xhci_hcd *xhci, /* * See Cycle bit rules. SW is the consumer for the event ring only. - * Don't make a ring full of link TRBs. That would be dumb and this would loop. */ void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring) { + unsigned int link_trb_count = 0; + /* event ring doesn't have link trbs, check for last trb */ if (ring->type == TYPE_EVENT) { if (!last_trb_on_seg(ring->deq_seg, ring->dequeue)) { @@ -170,14 +175,23 @@ void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring) /* All other rings have link trbs */ if (!trb_is_link(ring->dequeue)) { - ring->dequeue++; - ring->num_trbs_free++; + if (last_trb_on_seg(ring->deq_seg, ring->dequeue)) { + xhci_warn(xhci, "Missing link TRB at end of segment\n"); + } else { + ring->dequeue++; + ring->num_trbs_free++; + } } + while (trb_is_link(ring->dequeue)) { ring->deq_seg = ring->deq_seg->next; ring->dequeue = ring->deq_seg->trbs; - } + if (link_trb_count++ > ring->num_segs) { + xhci_warn(xhci, "Ring is an endless link TRB loop\n"); + break; + } + } out: trace_xhci_inc_deq(ring); @@ -186,7 +200,6 @@ out: /* * See Cycle bit rules. SW is the consumer for the event ring only. - * Don't make a ring full of link TRBs. That would be dumb and this would loop. * * If we've just enqueued a TRB that is in the middle of a TD (meaning the * chain bit is set), then set the chain bit in all the following link TRBs. @@ -206,11 +219,18 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, { u32 chain; union xhci_trb *next; + unsigned int link_trb_count = 0; chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN; /* If this is not event ring, there is one less usable TRB */ if (!trb_is_link(ring->enqueue)) ring->num_trbs_free--; + + if (last_trb_on_seg(ring->enq_seg, ring->enqueue)) { + xhci_err(xhci, "Tried to move enqueue past ring segment\n"); + return; + } + next = ++(ring->enqueue); /* Update the dequeue pointer further if that was a link TRB */ @@ -247,6 +267,11 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, ring->enq_seg = ring->enq_seg->next; ring->enqueue = ring->enq_seg->trbs; next = ring->enqueue; + + if (link_trb_count++ > ring->num_segs) { + xhci_warn(xhci, "%s: Ring link TRB loop\n", __func__); + break; + } } trace_xhci_inc_enq(ring); @@ -408,9 +433,8 @@ void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, trace_xhci_ring_ep_doorbell(slot_id, DB_VALUE(ep_index, stream_id)); writel(DB_VALUE(ep_index, stream_id), db_addr); - /* The CPU has better things to do at this point than wait for a - * write-posting flush. It'll get there soon enough. - */ + /* flush the write */ + readl(db_addr); } /* Ring the doorbell for any rings with pending URBs */ @@ -446,6 +470,46 @@ void xhci_ring_doorbell_for_active_rings(struct xhci_hcd *xhci, ring_doorbell_for_active_rings(xhci, slot_id, ep_index); } +static struct xhci_virt_ep *xhci_get_virt_ep(struct xhci_hcd *xhci, + unsigned int slot_id, + unsigned int ep_index) +{ + if (slot_id == 0 || slot_id >= MAX_HC_SLOTS) { + xhci_warn(xhci, "Invalid slot_id %u\n", slot_id); + return NULL; + } + if (ep_index >= EP_CTX_PER_DEV) { + xhci_warn(xhci, "Invalid endpoint index %u\n", ep_index); + return NULL; + } + if (!xhci->devs[slot_id]) { + xhci_warn(xhci, "No xhci virt device for slot_id %u\n", slot_id); + return NULL; + } + + return &xhci->devs[slot_id]->eps[ep_index]; +} + +static struct xhci_ring *xhci_virt_ep_to_ring(struct xhci_hcd *xhci, + struct xhci_virt_ep *ep, + unsigned int stream_id) +{ + /* common case, no streams */ + if (!(ep->ep_state & EP_HAS_STREAMS)) + return ep->ring; + + if (!ep->stream_info) + return NULL; + + if (stream_id == 0 || stream_id >= ep->stream_info->num_streams) { + xhci_warn(xhci, "Invalid stream_id %u request for slot_id %u ep_index %u\n", + stream_id, ep->vdev->slot_id, ep->ep_index); + return NULL; + } + + return ep->stream_info->stream_rings[stream_id]; +} + /* Get the right ring for the given slot_id, ep_index and stream_id. * If the endpoint supports streams, boundary check the URB's stream ID. * If the endpoint doesn't support streams, return the singular endpoint ring. @@ -456,30 +520,11 @@ struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci, { struct xhci_virt_ep *ep; - ep = &xhci->devs[slot_id]->eps[ep_index]; - /* Common case: no streams */ - if (!(ep->ep_state & EP_HAS_STREAMS)) - return ep->ring; - - if (stream_id == 0) { - xhci_warn(xhci, - "WARN: Slot ID %u, ep index %u has streams, " - "but URB has no stream ID.\n", - slot_id, ep_index); + ep = xhci_get_virt_ep(xhci, slot_id, ep_index); + if (!ep) return NULL; - } - if (stream_id < ep->stream_info->num_streams) - return ep->stream_info->stream_rings[stream_id]; - - xhci_warn(xhci, - "WARN: Slot ID %u, ep index %u has " - "stream IDs 1 to %u allocated, " - "but stream ID %u is requested.\n", - slot_id, ep_index, - ep->stream_info->num_streams - 1, - stream_id); - return NULL; + return xhci_virt_ep_to_ring(xhci, ep, stream_id); } @@ -506,73 +551,55 @@ static u64 xhci_get_hw_deq(struct xhci_hcd *xhci, struct xhci_virt_device *vdev, return le64_to_cpu(ep_ctx->deq); } -/* - * Move the xHC's endpoint ring dequeue pointer past cur_td. - * Record the new state of the xHC's endpoint ring dequeue segment, - * dequeue pointer, stream id, and new consumer cycle state in state. - * Update our internal representation of the ring's dequeue pointer. - * - * We do this in three jumps: - * - First we update our new ring state to be the same as when the xHC stopped. - * - Then we traverse the ring to find the segment that contains - * the last TRB in the TD. We toggle the xHC's new cycle state when we pass - * any link TRBs with the toggle cycle bit set. - * - Finally we move the dequeue state one TRB further, toggling the cycle bit - * if we've moved it past a link TRB with the toggle cycle bit set. - * - * Some of the uses of xhci_generic_trb are grotty, but if they're done - * with correct __le32 accesses they should work fine. Only users of this are - * in here. - */ -void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, - unsigned int slot_id, unsigned int ep_index, - unsigned int stream_id, struct xhci_td *cur_td, - struct xhci_dequeue_state *state) +static int xhci_move_dequeue_past_td(struct xhci_hcd *xhci, + unsigned int slot_id, unsigned int ep_index, + unsigned int stream_id, struct xhci_td *td) { struct xhci_virt_device *dev = xhci->devs[slot_id]; struct xhci_virt_ep *ep = &dev->eps[ep_index]; struct xhci_ring *ep_ring; + struct xhci_command *cmd; struct xhci_segment *new_seg; union xhci_trb *new_deq; + int new_cycle; dma_addr_t addr; u64 hw_dequeue; bool cycle_found = false; bool td_last_trb_found = false; + u32 trb_sct = 0; + int ret; ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id, ep_index, stream_id); if (!ep_ring) { - xhci_warn(xhci, "WARN can't find new dequeue state " - "for invalid stream ID %u.\n", - stream_id); - return; + xhci_warn(xhci, "WARN can't find new dequeue, invalid stream ID %u\n", + stream_id); + return -ENODEV; } /* * A cancelled TD can complete with a stall if HW cached the trb. - * In this case driver can't find cur_td, but if the ring is empty we + * In this case driver can't find td, but if the ring is empty we * can move the dequeue pointer to the current enqueue position. + * We shouldn't hit this anymore as cached cancelled TRBs are given back + * after clearing the cache, but be on the safe side and keep it anyway */ - if (!cur_td) { + if (!td) { if (list_empty(&ep_ring->td_list)) { - state->new_deq_seg = ep_ring->enq_seg; - state->new_deq_ptr = ep_ring->enqueue; - state->new_cycle_state = ep_ring->cycle_state; - goto done; + new_seg = ep_ring->enq_seg; + new_deq = ep_ring->enqueue; + new_cycle = ep_ring->cycle_state; + xhci_dbg(xhci, "ep ring empty, Set new dequeue = enqueue"); + goto deq_found; } else { - xhci_warn(xhci, "Can't find new dequeue state, missing cur_td\n"); - return; + xhci_warn(xhci, "Can't find new dequeue state, missing td\n"); + return -EINVAL; } } - /* Dig out the cycle state saved by the xHC during the stop ep cmd */ - xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, - "Finding endpoint context"); - hw_dequeue = xhci_get_hw_deq(xhci, dev, ep_index, stream_id); new_seg = ep_ring->deq_seg; new_deq = ep_ring->dequeue; - state->new_cycle_state = hw_dequeue & 0x1; - state->stream_id = stream_id; + new_cycle = hw_dequeue & 0x1; /* * We want to find the pointer, segment and cycle state of the new trb @@ -587,40 +614,71 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, if (td_last_trb_found) break; } - if (new_deq == cur_td->last_trb) + if (new_deq == td->last_trb) td_last_trb_found = true; if (cycle_found && trb_is_link(new_deq) && link_trb_toggles_cycle(new_deq)) - state->new_cycle_state ^= 0x1; + new_cycle ^= 0x1; next_trb(xhci, ep_ring, &new_seg, &new_deq); /* Search wrapped around, bail out */ if (new_deq == ep->ring->dequeue) { xhci_err(xhci, "Error: Failed finding new dequeue state\n"); - state->new_deq_seg = NULL; - state->new_deq_ptr = NULL; - return; + return -EINVAL; } } while (!cycle_found || !td_last_trb_found); - state->new_deq_seg = new_seg; - state->new_deq_ptr = new_deq; +deq_found: -done: /* Don't update the ring cycle state for the producer (us). */ - xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, - "Cycle state = 0x%x", state->new_cycle_state); + addr = xhci_trb_virt_to_dma(new_seg, new_deq); + if (addr == 0) { + xhci_warn(xhci, "Can't find dma of new dequeue ptr\n"); + xhci_warn(xhci, "deq seg = %p, deq ptr = %p\n", new_seg, new_deq); + return -EINVAL; + } + + if ((ep->ep_state & SET_DEQ_PENDING)) { + xhci_warn(xhci, "Set TR Deq already pending, don't submit for 0x%pad\n", + &addr); + return -EBUSY; + } + + /* This function gets called from contexts where it cannot sleep */ + cmd = xhci_alloc_command(xhci, false, GFP_ATOMIC); + if (!cmd) { + xhci_warn(xhci, "Can't alloc Set TR Deq cmd 0x%pad\n", &addr); + return -ENOMEM; + } + + if (stream_id) + trb_sct = SCT_FOR_TRB(SCT_PRI_TR); + ret = queue_command(xhci, cmd, + lower_32_bits(addr) | trb_sct | new_cycle, + upper_32_bits(addr), + STREAM_ID_FOR_TRB(stream_id), SLOT_ID_FOR_TRB(slot_id) | + EP_ID_FOR_TRB(ep_index) | TRB_TYPE(TRB_SET_DEQ), false); + if (ret < 0) { + xhci_free_command(xhci, cmd); + return ret; + } + ep->queued_deq_seg = new_seg; + ep->queued_deq_ptr = new_deq; xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, - "New dequeue segment = %p (virtual)", - state->new_deq_seg); - addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr); - xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, - "New dequeue pointer = 0x%llx (DMA)", - (unsigned long long) addr); + "Set TR Deq ptr 0x%llx, cycle %u\n", addr, new_cycle); + + /* Stop the TD queueing code from ringing the doorbell until + * this command completes. The HC won't set the dequeue pointer + * if the ring is running, and ringing the doorbell starts the + * ring running. + */ + ep->ep_state |= SET_DEQ_PENDING; + xhci_ring_cmd_db(xhci); + return 0; } /* flip_cycle means flip the cycle bit of all but the first and last TRB. @@ -699,159 +757,334 @@ static void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci, dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len, DMA_FROM_DEVICE); /* for in tranfers we need to copy the data from bounce to sg */ - len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs, seg->bounce_buf, - seg->bounce_len, seg->bounce_offs); - if (len != seg->bounce_len) - xhci_warn(xhci, "WARN Wrong bounce buffer read length: %zu != %d\n", - len, seg->bounce_len); + if (urb->num_sgs) { + len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs, seg->bounce_buf, + seg->bounce_len, seg->bounce_offs); + if (len != seg->bounce_len) + xhci_warn(xhci, "WARN Wrong bounce buffer read length: %zu != %d\n", + len, seg->bounce_len); + } else { + memcpy(urb->transfer_buffer + seg->bounce_offs, seg->bounce_buf, + seg->bounce_len); + } seg->bounce_len = 0; seg->bounce_offs = 0; } -/* - * When we get a command completion for a Stop Endpoint Command, we need to - * unlink any cancelled TDs from the ring. There are two ways to do that: - * - * 1. If the HW was in the middle of processing the TD that needs to be - * cancelled, then we must move the ring's dequeue pointer past the last TRB - * in the TD with a Set Dequeue Pointer Command. - * 2. Otherwise, we turn all the TRBs in the TD into No-op TRBs (with the chain - * bit cleared) so that the HW will skip over them. - */ -static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id, - union xhci_trb *trb, struct xhci_event_cmd *event) +static int xhci_td_cleanup(struct xhci_hcd *xhci, struct xhci_td *td, + struct xhci_ring *ep_ring, int status) { - unsigned int ep_index; - struct xhci_ring *ep_ring; - struct xhci_virt_ep *ep; - struct xhci_td *cur_td = NULL; - struct xhci_td *last_unlinked_td; - struct xhci_ep_ctx *ep_ctx; - struct xhci_virt_device *vdev; - u64 hw_deq; - struct xhci_dequeue_state deq_state; + struct urb *urb = NULL; - if (unlikely(TRB_TO_SUSPEND_PORT(le32_to_cpu(trb->generic.field[3])))) { - if (!xhci->devs[slot_id]) - xhci_warn(xhci, "Stop endpoint command " - "completion for disabled slot %u\n", - slot_id); - return; + /* Clean up the endpoint's TD list */ + urb = td->urb; + + /* if a bounce buffer was used to align this td then unmap it */ + xhci_unmap_td_bounce_buffer(xhci, ep_ring, td); + + /* Do one last check of the actual transfer length. + * If the host controller said we transferred more data than the buffer + * length, urb->actual_length will be a very big number (since it's + * unsigned). Play it safe and say we didn't transfer anything. + */ + if (urb->actual_length > urb->transfer_buffer_length) { + xhci_warn(xhci, "URB req %u and actual %u transfer length mismatch\n", + urb->transfer_buffer_length, urb->actual_length); + urb->actual_length = 0; + status = 0; } + /* TD might be removed from td_list if we are giving back a cancelled URB */ + if (!list_empty(&td->td_list)) + list_del_init(&td->td_list); + /* Giving back a cancelled URB, or if a slated TD completed anyway */ + if (!list_empty(&td->cancelled_td_list)) + list_del_init(&td->cancelled_td_list); - memset(&deq_state, 0, sizeof(deq_state)); - ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3])); + inc_td_cnt(urb); + /* Giveback the urb when all the tds are completed */ + if (last_td_in_urb(td)) { + if ((urb->actual_length != urb->transfer_buffer_length && + (urb->transfer_flags & URB_SHORT_NOT_OK)) || + (status != 0 && !usb_endpoint_xfer_isoc(&urb->ep->desc))) + xhci_dbg(xhci, "Giveback URB %p, len = %d, expected = %d, status = %d\n", + urb, urb->actual_length, + urb->transfer_buffer_length, status); - vdev = xhci->devs[slot_id]; - ep_ctx = xhci_get_ep_ctx(xhci, vdev->out_ctx, ep_index); - trace_xhci_handle_cmd_stop_ep(ep_ctx); + /* set isoc urb status to 0 just as EHCI, UHCI, and OHCI */ + if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) + status = 0; + xhci_giveback_urb_in_irq(xhci, td, status); + } - ep = &xhci->devs[slot_id]->eps[ep_index]; - last_unlinked_td = list_last_entry(&ep->cancelled_td_list, - struct xhci_td, cancelled_td_list); + return 0; +} - if (list_empty(&ep->cancelled_td_list)) { - xhci_stop_watchdog_timer_in_irq(xhci, ep); - ring_doorbell_for_active_rings(xhci, slot_id, ep_index); - return; + +/* Complete the cancelled URBs we unlinked from td_list. */ +static void xhci_giveback_invalidated_tds(struct xhci_virt_ep *ep) +{ + struct xhci_ring *ring; + struct xhci_td *td, *tmp_td; + + list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list, + cancelled_td_list) { + + /* + * Doesn't matter what we pass for status, since the core will + * just overwrite it (because the URB has been unlinked). + */ + ring = xhci_urb_to_transfer_ring(ep->xhci, td->urb); + + if (td->cancel_status == TD_CLEARED) + xhci_td_cleanup(ep->xhci, td, ring, 0); + + if (ep->xhci->xhc_state & XHCI_STATE_DYING) + return; + } +} + +static int xhci_reset_halted_ep(struct xhci_hcd *xhci, unsigned int slot_id, + unsigned int ep_index, enum xhci_ep_reset_type reset_type) +{ + struct xhci_command *command; + int ret = 0; + + command = xhci_alloc_command(xhci, false, GFP_ATOMIC); + if (!command) { + ret = -ENOMEM; + goto done; } - /* Fix up the ep ring first, so HW stops executing cancelled TDs. - * We have the xHCI lock, so nothing can modify this list until we drop - * it. We're also in the event handler, so we can't get re-interrupted - * if another Stop Endpoint command completes + ret = xhci_queue_reset_ep(xhci, command, slot_id, ep_index, reset_type); +done: + if (ret) + xhci_err(xhci, "ERROR queuing reset endpoint for slot %d ep_index %d, %d\n", + slot_id, ep_index, ret); + return ret; +} + +static void xhci_handle_halted_endpoint(struct xhci_hcd *xhci, + struct xhci_virt_ep *ep, unsigned int stream_id, + struct xhci_td *td, + enum xhci_ep_reset_type reset_type) +{ + unsigned int slot_id = ep->vdev->slot_id; + int err; + + /* + * Avoid resetting endpoint if link is inactive. Can cause host hang. + * Device will be reset soon to recover the link so don't do anything */ - list_for_each_entry(cur_td, &ep->cancelled_td_list, cancelled_td_list) { + if (ep->vdev->flags & VDEV_PORT_ERROR) + return; + + /* add td to cancelled list and let reset ep handler take care of it */ + if (reset_type == EP_HARD_RESET) { + ep->ep_state |= EP_HARD_CLEAR_TOGGLE; + if (td && list_empty(&td->cancelled_td_list)) { + list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list); + td->cancel_status = TD_HALTED; + } + } + + if (ep->ep_state & EP_HALTED) { + xhci_dbg(xhci, "Reset ep command already pending\n"); + return; + } + + err = xhci_reset_halted_ep(xhci, slot_id, ep->ep_index, reset_type); + if (err) + return; + + ep->ep_state |= EP_HALTED; + + xhci_ring_cmd_db(xhci); +} + +/* + * Fix up the ep ring first, so HW stops executing cancelled TDs. + * We have the xHCI lock, so nothing can modify this list until we drop it. + * We're also in the event handler, so we can't get re-interrupted if another + * Stop Endpoint command completes. + * + * only call this when ring is not in a running state + */ + +static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep) +{ + struct xhci_hcd *xhci; + struct xhci_td *td = NULL; + struct xhci_td *tmp_td = NULL; + struct xhci_td *cached_td = NULL; + struct xhci_ring *ring; + u64 hw_deq; + unsigned int slot_id = ep->vdev->slot_id; + int err; + + xhci = ep->xhci; + + list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list, cancelled_td_list) { xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, "Removing canceled TD starting at 0x%llx (dma).", (unsigned long long)xhci_trb_virt_to_dma( - cur_td->start_seg, cur_td->first_trb)); - ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb); - if (!ep_ring) { - /* This shouldn't happen unless a driver is mucking - * with the stream ID after submission. This will - * leave the TD on the hardware ring, and the hardware - * will try to execute it, and may access a buffer - * that has already been freed. In the best case, the - * hardware will execute it, and the event handler will - * ignore the completion event for that TD, since it was - * removed from the td_list for that endpoint. In - * short, don't muck with the stream ID after - * submission. - */ - xhci_warn(xhci, "WARN Cancelled URB %p " - "has invalid stream ID %u.\n", - cur_td->urb, - cur_td->urb->stream_id); - goto remove_finished_td; + td->start_seg, td->first_trb)); + list_del_init(&td->td_list); + ring = xhci_urb_to_transfer_ring(xhci, td->urb); + if (!ring) { + xhci_warn(xhci, "WARN Cancelled URB %p has invalid stream ID %u.\n", + td->urb, td->urb->stream_id); + continue; } /* - * If we stopped on the TD we need to cancel, then we have to + * If ring stopped on the TD we need to cancel, then we have to * move the xHC endpoint ring dequeue pointer past this TD. */ - hw_deq = xhci_get_hw_deq(xhci, vdev, ep_index, - cur_td->urb->stream_id); + hw_deq = xhci_get_hw_deq(xhci, ep->vdev, ep->ep_index, + td->urb->stream_id); hw_deq &= ~0xf; - if (trb_in_td(xhci, cur_td->start_seg, cur_td->first_trb, - cur_td->last_trb, hw_deq, false)) { - xhci_find_new_dequeue_state(xhci, slot_id, ep_index, - cur_td->urb->stream_id, - cur_td, &deq_state); + if (trb_in_td(xhci, td->start_seg, td->first_trb, + td->last_trb, hw_deq, false)) { + switch (td->cancel_status) { + case TD_CLEARED: /* TD is already no-op */ + case TD_CLEARING_CACHE: /* set TR deq command already queued */ + break; + case TD_DIRTY: /* TD is cached, clear it */ + case TD_HALTED: + /* FIXME stream case, several stopped rings */ + cached_td = td; + break; + } } else { - td_to_noop(xhci, ep_ring, cur_td, false); + td_to_noop(xhci, ring, td, false); + td->cancel_status = TD_CLEARED; + } + } + if (cached_td) { + cached_td->cancel_status = TD_CLEARING_CACHE; + + err = xhci_move_dequeue_past_td(xhci, slot_id, ep->ep_index, + cached_td->urb->stream_id, + cached_td); + /* Failed to move past cached td, try just setting it noop */ + if (err) { + td_to_noop(xhci, ring, cached_td, false); + cached_td->cancel_status = TD_CLEARED; } + cached_td = NULL; + } + return 0; +} -remove_finished_td: - /* - * The event handler won't see a completion for this TD anymore, - * so remove it from the endpoint ring's TD list. Keep it in - * the cancelled TD list for URB completion later. - */ - list_del_init(&cur_td->td_list); +/* + * Returns the TD the endpoint ring halted on. + * Only call for non-running rings without streams. + */ +static struct xhci_td *find_halted_td(struct xhci_virt_ep *ep) +{ + struct xhci_td *td; + u64 hw_deq; + + if (!list_empty(&ep->ring->td_list)) { /* Not streams compatible */ + hw_deq = xhci_get_hw_deq(ep->xhci, ep->vdev, ep->ep_index, 0); + hw_deq &= ~0xf; + td = list_first_entry(&ep->ring->td_list, struct xhci_td, td_list); + if (trb_in_td(ep->xhci, td->start_seg, td->first_trb, + td->last_trb, hw_deq, false)) + return td; } + return NULL; +} - xhci_stop_watchdog_timer_in_irq(xhci, ep); +/* + * When we get a command completion for a Stop Endpoint Command, we need to + * unlink any cancelled TDs from the ring. There are two ways to do that: + * + * 1. If the HW was in the middle of processing the TD that needs to be + * cancelled, then we must move the ring's dequeue pointer past the last TRB + * in the TD with a Set Dequeue Pointer Command. + * 2. Otherwise, we turn all the TRBs in the TD into No-op TRBs (with the chain + * bit cleared) so that the HW will skip over them. + */ +static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id, + union xhci_trb *trb, u32 comp_code) +{ + unsigned int ep_index; + struct xhci_virt_ep *ep; + struct xhci_ep_ctx *ep_ctx; + struct xhci_td *td = NULL; + enum xhci_ep_reset_type reset_type; + struct xhci_command *command; - /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */ - if (deq_state.new_deq_ptr && deq_state.new_deq_seg) { - xhci_queue_new_dequeue_state(xhci, slot_id, ep_index, - &deq_state); - xhci_ring_cmd_db(xhci); - } else { - /* Otherwise ring the doorbell(s) to restart queued transfers */ - ring_doorbell_for_active_rings(xhci, slot_id, ep_index); + if (unlikely(TRB_TO_SUSPEND_PORT(le32_to_cpu(trb->generic.field[3])))) { + if (!xhci->devs[slot_id]) + xhci_warn(xhci, "Stop endpoint command completion for disabled slot %u\n", + slot_id); + return; } + ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3])); + ep = xhci_get_virt_ep(xhci, slot_id, ep_index); + if (!ep) + return; + + ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep_index); + + trace_xhci_handle_cmd_stop_ep(ep_ctx); + + if (comp_code == COMP_CONTEXT_STATE_ERROR) { /* - * Drop the lock and complete the URBs in the cancelled TD list. - * New TDs to be cancelled might be added to the end of the list before - * we can complete all the URBs for the TDs we already unlinked. - * So stop when we've completed the URB for the last TD we unlinked. + * If stop endpoint command raced with a halting endpoint we need to + * reset the host side endpoint first. + * If the TD we halted on isn't cancelled the TD should be given back + * with a proper error code, and the ring dequeue moved past the TD. + * If streams case we can't find hw_deq, or the TD we halted on so do a + * soft reset. + * + * Proper error code is unknown here, it would be -EPIPE if device side + * of enadpoit halted (aka STALL), and -EPROTO if not (transaction error) + * We use -EPROTO, if device is stalled it should return a stall error on + * next transfer, which then will return -EPIPE, and device side stall is + * noted and cleared by class driver. */ - do { - cur_td = list_first_entry(&ep->cancelled_td_list, - struct xhci_td, cancelled_td_list); - list_del_init(&cur_td->cancelled_td_list); + switch (GET_EP_CTX_STATE(ep_ctx)) { + case EP_STATE_HALTED: + xhci_dbg(xhci, "Stop ep completion raced with stall, reset ep\n"); + if (ep->ep_state & EP_HAS_STREAMS) { + reset_type = EP_SOFT_RESET; + } else { + reset_type = EP_HARD_RESET; + td = find_halted_td(ep); + if (td) + td->status = -EPROTO; + } + /* reset ep, reset handler cleans up cancelled tds */ + xhci_handle_halted_endpoint(xhci, ep, 0, td, reset_type); + xhci_stop_watchdog_timer_in_irq(xhci, ep); + return; + case EP_STATE_RUNNING: + /* Race, HW handled stop ep cmd before ep was running */ + command = xhci_alloc_command(xhci, false, GFP_ATOMIC); + if (!command) + xhci_stop_watchdog_timer_in_irq(xhci, ep); - /* Clean up the cancelled URB */ - /* Doesn't matter what we pass for status, since the core will - * just overwrite it (because the URB has been unlinked). - */ - ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb); - xhci_unmap_td_bounce_buffer(xhci, ep_ring, cur_td); - inc_td_cnt(cur_td->urb); - if (last_td_in_urb(cur_td)) - xhci_giveback_urb_in_irq(xhci, cur_td, 0); + mod_timer(&ep->stop_cmd_timer, + jiffies + XHCI_STOP_EP_CMD_TIMEOUT * HZ); + xhci_queue_stop_endpoint(xhci, command, slot_id, ep_index, 0); + xhci_ring_cmd_db(xhci); - /* Stop processing the cancelled list if the watchdog timer is - * running. - */ - if (xhci->xhc_state & XHCI_STATE_DYING) return; - } while (cur_td != last_unlinked_td); + default: + break; + } + } + /* will queue a set TR deq if stopped on a cancelled, uncleared TD */ + xhci_invalidate_cancelled_tds(ep); + xhci_stop_watchdog_timer_in_irq(xhci, ep); - /* Return to the event handler with xhci->lock re-acquired */ + /* Otherwise ring the doorbell(s) to restart queued transfers */ + xhci_giveback_invalidated_tds(ep); + ring_doorbell_for_active_rings(xhci, slot_id, ep_index); } static void xhci_kill_ring_urbs(struct xhci_hcd *xhci, struct xhci_ring *ring) @@ -1064,17 +1297,18 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id, unsigned int ep_index; unsigned int stream_id; struct xhci_ring *ep_ring; - struct xhci_virt_device *dev; struct xhci_virt_ep *ep; struct xhci_ep_ctx *ep_ctx; struct xhci_slot_ctx *slot_ctx; + struct xhci_td *td, *tmp_td; ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3])); stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2])); - dev = xhci->devs[slot_id]; - ep = &dev->eps[ep_index]; + ep = xhci_get_virt_ep(xhci, slot_id, ep_index); + if (!ep) + return; - ep_ring = xhci_stream_id_to_ring(dev, ep_index, stream_id); + ep_ring = xhci_virt_ep_to_ring(xhci, ep, stream_id); if (!ep_ring) { xhci_warn(xhci, "WARN Set TR deq ptr command for freed stream ID %u\n", stream_id); @@ -1082,8 +1316,8 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id, goto cleanup; } - ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); - slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx); + ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep_index); + slot_ctx = xhci_get_slot_ctx(xhci, ep->vdev->out_ctx); trace_xhci_handle_cmd_set_deq(slot_ctx); trace_xhci_handle_cmd_set_deq_ep(ep_ctx); @@ -1136,7 +1370,7 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id, /* Update the ring's dequeue segment and dequeue pointer * to reflect the new position. */ - update_ring_for_set_deq_completion(xhci, dev, + update_ring_for_set_deq_completion(xhci, ep->vdev, ep_ring, ep_index); } else { xhci_warn(xhci, "Mismatch between completed Set TR Deq Ptr command & xHCI internal state.\n"); @@ -1144,11 +1378,19 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id, ep->queued_deq_seg, ep->queued_deq_ptr); } } - + /* HW cached TDs cleared from cache, give them back */ + list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list, + cancelled_td_list) { + ep_ring = xhci_urb_to_transfer_ring(ep->xhci, td->urb); + if (td->cancel_status == TD_CLEARING_CACHE) { + td->cancel_status = TD_CLEARED; + xhci_td_cleanup(ep->xhci, td, ep_ring, td->status); + } + } cleanup: - dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING; - dev->eps[ep_index].queued_deq_seg = NULL; - dev->eps[ep_index].queued_deq_ptr = NULL; + ep->ep_state &= ~SET_DEQ_PENDING; + ep->queued_deq_seg = NULL; + ep->queued_deq_ptr = NULL; /* Restart any rings with pending URBs */ ring_doorbell_for_active_rings(xhci, slot_id, ep_index); } @@ -1156,13 +1398,16 @@ cleanup: static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id, union xhci_trb *trb, u32 cmd_comp_code) { - struct xhci_virt_device *vdev; + struct xhci_virt_ep *ep; struct xhci_ep_ctx *ep_ctx; unsigned int ep_index; ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3])); - vdev = xhci->devs[slot_id]; - ep_ctx = xhci_get_ep_ctx(xhci, vdev->out_ctx, ep_index); + ep = xhci_get_virt_ep(xhci, slot_id, ep_index); + if (!ep) + return; + + ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep_index); trace_xhci_handle_cmd_reset_ep(ep_ctx); /* This command will only fail if the endpoint wasn't halted, @@ -1171,27 +1416,15 @@ static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id, xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep, "Ignoring reset ep completion code of %u", cmd_comp_code); - /* HW with the reset endpoint quirk needs to have a configure endpoint - * command complete before the endpoint can be used. Queue that here - * because the HW can't handle two commands being queued in a row. - */ - if (xhci->quirks & XHCI_RESET_EP_QUIRK) { - struct xhci_command *command; + /* Cleanup cancelled TDs as ep is stopped. May queue a Set TR Deq cmd */ + xhci_invalidate_cancelled_tds(ep); - command = xhci_alloc_command(xhci, false, GFP_ATOMIC); - if (!command) - return; + if (xhci->quirks & XHCI_RESET_EP_QUIRK) + xhci_dbg(xhci, "Note: Removed workaround to queue config ep for this hw"); + /* Clear our internal halted state */ + ep->ep_state &= ~EP_HALTED; - xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, - "Queueing configure endpoint command"); - xhci_queue_configure_endpoint(xhci, command, - xhci->devs[slot_id]->in_ctx->dma, slot_id, - false); - xhci_ring_cmd_db(xhci); - } else { - /* Clear our internal halted state */ - xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED; - } + xhci_giveback_invalidated_tds(ep); /* if this was a soft reset, then restart */ if ((le32_to_cpu(trb->generic.field[3])) & TRB_TSP) @@ -1226,7 +1459,7 @@ static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id) } static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id, - struct xhci_event_cmd *event, u32 cmd_comp_code) + u32 cmd_comp_code) { struct xhci_virt_device *virt_dev; struct xhci_input_control_ctx *ctrl_ctx; @@ -1244,6 +1477,8 @@ static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id, * is not waiting on the configure endpoint command. */ virt_dev = xhci->devs[slot_id]; + if (!virt_dev) + return; ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx); if (!ctrl_ctx) { xhci_warn(xhci, "Could not get input context, bad type.\n"); @@ -1288,24 +1523,27 @@ static void xhci_handle_cmd_addr_dev(struct xhci_hcd *xhci, int slot_id) struct xhci_slot_ctx *slot_ctx; vdev = xhci->devs[slot_id]; + if (!vdev) + return; slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx); trace_xhci_handle_cmd_addr_dev(slot_ctx); } -static void xhci_handle_cmd_reset_dev(struct xhci_hcd *xhci, int slot_id, - struct xhci_event_cmd *event) +static void xhci_handle_cmd_reset_dev(struct xhci_hcd *xhci, int slot_id) { struct xhci_virt_device *vdev; struct xhci_slot_ctx *slot_ctx; vdev = xhci->devs[slot_id]; + if (!vdev) { + xhci_warn(xhci, "Reset device command completion for disabled slot %u\n", + slot_id); + return; + } slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx); trace_xhci_handle_cmd_reset_dev(slot_ctx); xhci_dbg(xhci, "Completed reset device command.\n"); - if (!xhci->devs[slot_id]) - xhci_warn(xhci, "Reset device command completion " - "for disabled slot %u\n", slot_id); } static void xhci_handle_cmd_nec_get_fw(struct xhci_hcd *xhci, @@ -1398,7 +1636,7 @@ time_out_completed: static void handle_cmd_completion(struct xhci_hcd *xhci, struct xhci_event_cmd *event) { - int slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); + unsigned int slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); u64 cmd_dma; dma_addr_t cmd_dequeue_dma; u32 cmd_comp_code; @@ -1406,6 +1644,11 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, struct xhci_command *cmd; u32 cmd_type; + if (slot_id >= MAX_HC_SLOTS) { + xhci_warn(xhci, "Invalid slot_id %u\n", slot_id); + return; + } + cmd_dma = le64_to_cpu(event->cmd_trb); cmd_trb = xhci->cmd_ring->dequeue; @@ -1466,8 +1709,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, break; case TRB_CONFIG_EP: if (!cmd->completion) - xhci_handle_cmd_config_ep(xhci, slot_id, event, - cmd_comp_code); + xhci_handle_cmd_config_ep(xhci, slot_id, cmd_comp_code); break; case TRB_EVAL_CONTEXT: break; @@ -1478,7 +1720,8 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, WARN_ON(slot_id != TRB_TO_SLOT_ID( le32_to_cpu(cmd_trb->generic.field[3]))); if (!cmd->completion) - xhci_handle_cmd_stop_ep(xhci, slot_id, cmd_trb, event); + xhci_handle_cmd_stop_ep(xhci, slot_id, cmd_trb, + cmd_comp_code); break; case TRB_SET_DEQ: WARN_ON(slot_id != TRB_TO_SLOT_ID( @@ -1501,7 +1744,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, */ slot_id = TRB_TO_SLOT_ID( le32_to_cpu(cmd_trb->generic.field[3])); - xhci_handle_cmd_reset_dev(xhci, slot_id, event); + xhci_handle_cmd_reset_dev(xhci, slot_id); break; case TRB_NEC_GET_FW: xhci_handle_cmd_nec_get_fw(xhci, event); @@ -1528,11 +1771,8 @@ event_handled: } static void handle_vendor_event(struct xhci_hcd *xhci, - union xhci_trb *event) + union xhci_trb *event, u32 trb_type) { - u32 trb_type; - - trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->generic.field[3])); xhci_dbg(xhci, "Vendor specific event TRB type = %u\n", trb_type); if (trb_type == TRB_NEC_CMD_COMP && (xhci->quirks & XHCI_NEC_HOST)) handle_cmd_completion(xhci, &event->event_cmd); @@ -1849,37 +2089,6 @@ static void xhci_clear_hub_tt_buffer(struct xhci_hcd *xhci, struct xhci_td *td, } } -static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci, - unsigned int slot_id, unsigned int ep_index, - unsigned int stream_id, struct xhci_td *td, - enum xhci_ep_reset_type reset_type) -{ - struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index]; - struct xhci_command *command; - - /* - * Avoid resetting endpoint if link is inactive. Can cause host hang. - * Device will be reset soon to recover the link so don't do anything - */ - if (xhci->devs[slot_id]->flags & VDEV_PORT_ERROR) - return; - - command = xhci_alloc_command(xhci, false, GFP_ATOMIC); - if (!command) - return; - - ep->ep_state |= EP_HALTED; - - xhci_queue_reset_ep(xhci, command, slot_id, ep_index, reset_type); - - if (reset_type == EP_HARD_RESET) { - ep->ep_state |= EP_HARD_CLEAR_TOGGLE; - xhci_cleanup_stalled_ring(xhci, slot_id, ep_index, stream_id, - td); - } - xhci_ring_cmd_db(xhci); -} - /* Check if an error has halted the endpoint ring. The class driver will * cleanup the halt for a non-default control endpoint if we indicate a stall. * However, a babble and other errors also halt the endpoint ring, and the class @@ -1920,82 +2129,63 @@ int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code) return 0; } -static int xhci_td_cleanup(struct xhci_hcd *xhci, struct xhci_td *td, - struct xhci_ring *ep_ring, int *status) -{ - struct urb *urb = NULL; - - /* Clean up the endpoint's TD list */ - urb = td->urb; - - /* if a bounce buffer was used to align this td then unmap it */ - xhci_unmap_td_bounce_buffer(xhci, ep_ring, td); - - /* Do one last check of the actual transfer length. - * If the host controller said we transferred more data than the buffer - * length, urb->actual_length will be a very big number (since it's - * unsigned). Play it safe and say we didn't transfer anything. - */ - if (urb->actual_length > urb->transfer_buffer_length) { - xhci_warn(xhci, "URB req %u and actual %u transfer length mismatch\n", - urb->transfer_buffer_length, urb->actual_length); - urb->actual_length = 0; - *status = 0; - } - list_del_init(&td->td_list); - /* Was this TD slated to be cancelled but completed anyway? */ - if (!list_empty(&td->cancelled_td_list)) - list_del_init(&td->cancelled_td_list); - - inc_td_cnt(urb); - /* Giveback the urb when all the tds are completed */ - if (last_td_in_urb(td)) { - if ((urb->actual_length != urb->transfer_buffer_length && - (urb->transfer_flags & URB_SHORT_NOT_OK)) || - (*status != 0 && !usb_endpoint_xfer_isoc(&urb->ep->desc))) - xhci_dbg(xhci, "Giveback URB %p, len = %d, expected = %d, status = %d\n", - urb, urb->actual_length, - urb->transfer_buffer_length, *status); - - /* set isoc urb status to 0 just as EHCI, UHCI, and OHCI */ - if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) - *status = 0; - xhci_giveback_urb_in_irq(xhci, td, *status); - } - - return 0; -} - static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td, - struct xhci_transfer_event *event, - struct xhci_virt_ep *ep, int *status) + struct xhci_transfer_event *event, struct xhci_virt_ep *ep) { - struct xhci_virt_device *xdev; struct xhci_ep_ctx *ep_ctx; struct xhci_ring *ep_ring; - unsigned int slot_id; u32 trb_comp_code; - int ep_index; - slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); - xdev = xhci->devs[slot_id]; - ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1; ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); - ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); + ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep->ep_index); trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); - if (trb_comp_code == COMP_STOPPED_LENGTH_INVALID || - trb_comp_code == COMP_STOPPED || - trb_comp_code == COMP_STOPPED_SHORT_PACKET) { - /* The Endpoint Stop Command completion will take care of any - * stopped TDs. A stopped TD may be restarted, so don't update + switch (trb_comp_code) { + case COMP_STOPPED_LENGTH_INVALID: + case COMP_STOPPED_SHORT_PACKET: + case COMP_STOPPED: + /* + * The "Stop Endpoint" completion will take care of any + * stopped TDs. A stopped TD may be restarted, so don't update * the ring dequeue pointer or take this TD off any lists yet. */ return 0; - } - if (trb_comp_code == COMP_STALL_ERROR || - xhci_requires_manual_halt_cleanup(xhci, ep_ctx, - trb_comp_code)) { + case COMP_USB_TRANSACTION_ERROR: + case COMP_BABBLE_DETECTED_ERROR: + case COMP_SPLIT_TRANSACTION_ERROR: + /* + * If endpoint context state is not halted we might be + * racing with a reset endpoint command issued by a unsuccessful + * stop endpoint completion (context error). In that case the + * td should be on the cancelled list, and EP_HALTED flag set. + * + * Or then it's not halted due to the 0.95 spec stating that a + * babbling control endpoint should not halt. The 0.96 spec + * again says it should. Some HW claims to be 0.95 compliant, + * but it halts the control endpoint anyway. + */ + if (GET_EP_CTX_STATE(ep_ctx) != EP_STATE_HALTED) { + /* + * If EP_HALTED is set and TD is on the cancelled list + * the TD and dequeue pointer will be handled by reset + * ep command completion + */ + if ((ep->ep_state & EP_HALTED) && + !list_empty(&td->cancelled_td_list)) { + xhci_dbg(xhci, "Already resolving halted ep for 0x%llx\n", + (unsigned long long)xhci_trb_virt_to_dma( + td->start_seg, td->first_trb)); + return 0; + } + /* endpoint not halted, don't reset it */ + break; + } + /* Almost same procedure as for STALL_ERROR below */ + xhci_clear_hub_tt_buffer(xhci, td, ep); + xhci_handle_halted_endpoint(xhci, ep, ep_ring->stream_id, td, + EP_HARD_RESET); + return 0; + case COMP_STALL_ERROR: /* * xhci internal endpoint state will go to a "halt" state for * any stall, including default control pipe protocol stall. @@ -2006,18 +2196,24 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td, * stall later. Hub TT buffer should only be cleared for FS/LS * devices behind HS hubs for functional stalls. */ - if ((ep_index != 0) || (trb_comp_code != COMP_STALL_ERROR)) + if (ep->ep_index != 0) xhci_clear_hub_tt_buffer(xhci, td, ep); - xhci_cleanup_halted_endpoint(xhci, slot_id, ep_index, - ep_ring->stream_id, td, EP_HARD_RESET); - } else { - /* Update ring dequeue pointer */ - while (ep_ring->dequeue != td->last_trb) - inc_deq(xhci, ep_ring); - inc_deq(xhci, ep_ring); + + xhci_handle_halted_endpoint(xhci, ep, ep_ring->stream_id, td, + EP_HARD_RESET); + + return 0; /* xhci_handle_halted_endpoint marked td cancelled */ + default: + break; } - return xhci_td_cleanup(xhci, td, ep_ring, status); + /* Update ring dequeue pointer */ + ep_ring->dequeue = td->last_trb; + ep_ring->deq_seg = td->last_trb_seg; + ep_ring->num_trbs_free += td->num_trbs - 1; + inc_deq(xhci, ep_ring); + + return xhci_td_cleanup(xhci, td, ep_ring, td->status); } /* sum trb lengths from ring dequeue up to stop_trb, _excluding_ stop_trb */ @@ -2040,21 +2236,15 @@ static int sum_trb_lengths(struct xhci_hcd *xhci, struct xhci_ring *ring, */ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td, union xhci_trb *ep_trb, struct xhci_transfer_event *event, - struct xhci_virt_ep *ep, int *status) + struct xhci_virt_ep *ep) { - struct xhci_virt_device *xdev; - unsigned int slot_id; - int ep_index; struct xhci_ep_ctx *ep_ctx; u32 trb_comp_code; u32 remaining, requested; u32 trb_type; trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(ep_trb->generic.field[3])); - slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); - xdev = xhci->devs[slot_id]; - ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1; - ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); + ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep->ep_index); trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); requested = td->urb->transfer_buffer_length; remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); @@ -2064,13 +2254,13 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td, if (trb_type != TRB_STATUS) { xhci_warn(xhci, "WARN: Success on ctrl %s TRB without IOC set?\n", (trb_type == TRB_DATA) ? "data" : "setup"); - *status = -ESHUTDOWN; + td->status = -ESHUTDOWN; break; } - *status = 0; + td->status = 0; break; case COMP_SHORT_PACKET: - *status = 0; + td->status = 0; break; case COMP_STOPPED_SHORT_PACKET: if (trb_type == TRB_DATA || trb_type == TRB_NORMAL) @@ -2102,7 +2292,7 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td, ep_ctx, trb_comp_code)) break; xhci_dbg(xhci, "TRB error %u, halted endpoint index = %u\n", - trb_comp_code, ep_index); + trb_comp_code, ep->ep_index); fallthrough; case COMP_STALL_ERROR: /* Did we transfer part of the data (middle) phase? */ @@ -2134,7 +2324,7 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td, td->urb->actual_length = requested; finish_td: - return finish_td(xhci, td, event, ep, status); + return finish_td(xhci, td, event, ep); } /* @@ -2142,9 +2332,8 @@ finish_td: */ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, union xhci_trb *ep_trb, struct xhci_transfer_event *event, - struct xhci_virt_ep *ep, int *status) + struct xhci_virt_ep *ep) { - struct xhci_ring *ep_ring; struct urb_priv *urb_priv; int idx; struct usb_iso_packet_descriptor *frame; @@ -2153,7 +2342,6 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, u32 remaining, requested, ep_trb_len; int short_framestatus; - ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); urb_priv = td->urb->hcpriv; idx = urb_priv->num_tds_done; @@ -2214,26 +2402,23 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, } if (sum_trbs_for_length) - frame->actual_length = sum_trb_lengths(xhci, ep_ring, ep_trb) + + frame->actual_length = sum_trb_lengths(xhci, ep->ring, ep_trb) + ep_trb_len - remaining; else frame->actual_length = requested; td->urb->actual_length += frame->actual_length; - return finish_td(xhci, td, event, ep, status); + return finish_td(xhci, td, event, ep); } static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, - struct xhci_transfer_event *event, - struct xhci_virt_ep *ep, int *status) + struct xhci_virt_ep *ep, int status) { - struct xhci_ring *ep_ring; struct urb_priv *urb_priv; struct usb_iso_packet_descriptor *frame; int idx; - ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); urb_priv = td->urb->hcpriv; idx = urb_priv->num_tds_done; frame = &td->urb->iso_frame_desc[idx]; @@ -2245,11 +2430,12 @@ static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, frame->actual_length = 0; /* Update ring dequeue pointer */ - while (ep_ring->dequeue != td->last_trb) - inc_deq(xhci, ep_ring); - inc_deq(xhci, ep_ring); + ep->ring->dequeue = td->last_trb; + ep->ring->deq_seg = td->last_trb_seg; + ep->ring->num_trbs_free += td->num_trbs - 1; + inc_deq(xhci, ep->ring); - return xhci_td_cleanup(xhci, td, ep_ring, status); + return xhci_td_cleanup(xhci, td, ep->ring, status); } /* @@ -2257,18 +2443,14 @@ static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, */ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td, union xhci_trb *ep_trb, struct xhci_transfer_event *event, - struct xhci_virt_ep *ep, int *status) + struct xhci_virt_ep *ep) { struct xhci_slot_ctx *slot_ctx; struct xhci_ring *ep_ring; u32 trb_comp_code; u32 remaining, requested, ep_trb_len; - unsigned int slot_id; - int ep_index; - slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); - slot_ctx = xhci_get_slot_ctx(xhci, xhci->devs[slot_id]->out_ctx); - ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1; + slot_ctx = xhci_get_slot_ctx(xhci, ep->vdev->out_ctx); ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); @@ -2285,13 +2467,13 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td, td->urb->ep->desc.bEndpointAddress, requested, remaining); } - *status = 0; + td->status = 0; break; case COMP_SHORT_PACKET: xhci_dbg(xhci, "ep %#x - asked for %d bytes, %d bytes untransferred\n", td->urb->ep->desc.bEndpointAddress, requested, remaining); - *status = 0; + td->status = 0; break; case COMP_STOPPED_SHORT_PACKET: td->urb->actual_length = remaining; @@ -2305,9 +2487,11 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td, if ((ep_ring->err_count++ > MAX_SOFT_RETRY) || le32_to_cpu(slot_ctx->tt_info) & TT_SLOT) break; - *status = 0; - xhci_cleanup_halted_endpoint(xhci, slot_id, ep_index, - ep_ring->stream_id, td, EP_SOFT_RESET); + + td->status = 0; + + xhci_handle_halted_endpoint(xhci, ep, ep_ring->stream_id, td, + EP_SOFT_RESET); return 0; default: /* do nothing */ @@ -2326,7 +2510,7 @@ finish_td: remaining); td->urb->actual_length = 0; } - return finish_td(xhci, td, event, ep, status); + return finish_td(xhci, td, event, ep); } /* @@ -2337,7 +2521,6 @@ finish_td: static int handle_tx_event(struct xhci_hcd *xhci, struct xhci_transfer_event *event) { - struct xhci_virt_device *xdev; struct xhci_virt_ep *ep; struct xhci_ring *ep_ring; unsigned int slot_id; @@ -2358,16 +2541,14 @@ static int handle_tx_event(struct xhci_hcd *xhci, trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); ep_trb_dma = le64_to_cpu(event->buffer); - xdev = xhci->devs[slot_id]; - if (!xdev) { - xhci_err(xhci, "ERROR Transfer event pointed to bad slot %u\n", - slot_id); + ep = xhci_get_virt_ep(xhci, slot_id, ep_index); + if (!ep) { + xhci_err(xhci, "ERROR Invalid Transfer event\n"); goto err_out; } - ep = &xdev->eps[ep_index]; ep_ring = xhci_dma_to_transfer_ring(ep, ep_trb_dma); - ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); + ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep_index); if (GET_EP_CTX_STATE(ep_ctx) == EP_STATE_DISABLED) { xhci_err(xhci, @@ -2383,8 +2564,8 @@ static int handle_tx_event(struct xhci_hcd *xhci, case COMP_USB_TRANSACTION_ERROR: case COMP_INVALID_STREAM_TYPE_ERROR: case COMP_INVALID_STREAM_ID_ERROR: - xhci_cleanup_halted_endpoint(xhci, slot_id, ep_index, 0, - NULL, EP_SOFT_RESET); + xhci_handle_halted_endpoint(xhci, ep, 0, NULL, + EP_SOFT_RESET); goto cleanup; case COMP_RING_UNDERRUN: case COMP_RING_OVERRUN: @@ -2440,7 +2621,6 @@ static int handle_tx_event(struct xhci_hcd *xhci, case COMP_STALL_ERROR: xhci_dbg(xhci, "Stalled endpoint for slot %u ep %u\n", slot_id, ep_index); - ep->ep_state |= EP_HALTED; status = -EPIPE; break; case COMP_SPLIT_TRANSACTION_ERROR: @@ -2568,11 +2748,10 @@ static int handle_tx_event(struct xhci_hcd *xhci, if (trb_comp_code == COMP_STALL_ERROR || xhci_requires_manual_halt_cleanup(xhci, ep_ctx, trb_comp_code)) { - xhci_cleanup_halted_endpoint(xhci, slot_id, - ep_index, - ep_ring->stream_id, - NULL, - EP_HARD_RESET); + xhci_handle_halted_endpoint(xhci, ep, + ep_ring->stream_id, + NULL, + EP_HARD_RESET); } goto cleanup; } @@ -2631,7 +2810,7 @@ static int handle_tx_event(struct xhci_hcd *xhci, return -ESHUTDOWN; } - skip_isoc_td(xhci, td, event, ep, &status); + skip_isoc_td(xhci, td, ep, status); goto cleanup; } if (trb_comp_code == COMP_SHORT_PACKET) @@ -2659,25 +2838,26 @@ static int handle_tx_event(struct xhci_hcd *xhci, * endpoint. Otherwise, the endpoint remains stalled * indefinitely. */ + if (trb_is_noop(ep_trb)) { if (trb_comp_code == COMP_STALL_ERROR || xhci_requires_manual_halt_cleanup(xhci, ep_ctx, trb_comp_code)) - xhci_cleanup_halted_endpoint(xhci, slot_id, - ep_index, - ep_ring->stream_id, - td, EP_HARD_RESET); + xhci_handle_halted_endpoint(xhci, ep, + ep_ring->stream_id, + td, EP_HARD_RESET); goto cleanup; } + td->status = status; + /* update the urb's actual_length and give back to the core */ if (usb_endpoint_xfer_control(&td->urb->ep->desc)) - process_ctrl_td(xhci, td, ep_trb, event, ep, &status); + process_ctrl_td(xhci, td, ep_trb, event, ep); else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc)) - process_isoc_td(xhci, td, ep_trb, event, ep, &status); + process_isoc_td(xhci, td, ep_trb, event, ep); else - process_bulk_intr_td(xhci, td, ep_trb, event, ep, - &status); + process_bulk_intr_td(xhci, td, ep_trb, event, ep); cleanup: handling_skipped_tds = ep->skip && trb_comp_code != COMP_MISSED_SERVICE_ERROR && @@ -2722,6 +2902,7 @@ static int xhci_handle_event(struct xhci_hcd *xhci) { union xhci_trb *event; int update_ptrs = 1; + u32 trb_type; int ret; /* Event ring hasn't been allocated yet. */ @@ -2743,31 +2924,30 @@ static int xhci_handle_event(struct xhci_hcd *xhci) * speculative reads of the event's flags/data below. */ rmb(); + trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->event_cmd.flags)); /* FIXME: Handle more event types. */ - switch (le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK) { - case TRB_TYPE(TRB_COMPLETION): + + switch (trb_type) { + case TRB_COMPLETION: handle_cmd_completion(xhci, &event->event_cmd); break; - case TRB_TYPE(TRB_PORT_STATUS): + case TRB_PORT_STATUS: handle_port_status(xhci, event); update_ptrs = 0; break; - case TRB_TYPE(TRB_TRANSFER): + case TRB_TRANSFER: ret = handle_tx_event(xhci, &event->trans_event); if (ret >= 0) update_ptrs = 0; break; - case TRB_TYPE(TRB_DEV_NOTE): + case TRB_DEV_NOTE: handle_device_notification(xhci, event); break; default: - if ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK) >= - TRB_TYPE(48)) - handle_vendor_event(xhci, event); + if (trb_type >= TRB_VENDOR_DEFINED_LOW) + handle_vendor_event(xhci, event, trb_type); else - xhci_warn(xhci, "ERROR unknown event type %d\n", - TRB_FIELD_TO_TYPE( - le32_to_cpu(event->event_cmd.flags))); + xhci_warn(xhci, "ERROR unknown event type %d\n", trb_type); } /* Any of the above functions may drop and re-acquire the lock, so check * to make sure a watchdog timer didn't mark the host as non-responsive. @@ -2931,6 +3111,8 @@ static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, trb->field[0] = cpu_to_le32(field1); trb->field[1] = cpu_to_le32(field2); trb->field[2] = cpu_to_le32(field3); + /* make sure TRB is fully written before giving it to the controller */ + wmb(); trb->field[3] = cpu_to_le32(field4); trace_xhci_queue_trb(ring, trb); @@ -2946,6 +3128,7 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, u32 ep_state, unsigned int num_trbs, gfp_t mem_flags) { unsigned int num_trbs_needed; + unsigned int link_trb_count = 0; /* Make sure the endpoint has been added to xHC schedule */ switch (ep_state) { @@ -3017,7 +3200,19 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, ep_ring->enq_seg = ep_ring->enq_seg->next; ep_ring->enqueue = ep_ring->enq_seg->trbs; + + /* prevent infinite loop if all first trbs are link trbs */ + if (link_trb_count++ > ep_ring->num_segs) { + xhci_warn(xhci, "Ring is an endless link TRB loop\n"); + return -EINVAL; + } } + + if (last_trb_on_seg(ep_ring->enq_seg, ep_ring->enqueue)) { + xhci_warn(xhci, "Missing link TRB at end of ring segment\n"); + return -EINVAL; + } + return 0; } @@ -3036,7 +3231,8 @@ static int prepare_transfer(struct xhci_hcd *xhci, struct xhci_ring *ep_ring; struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); - ep_ring = xhci_stream_id_to_ring(xdev, ep_index, stream_id); + ep_ring = xhci_triad_to_transfer_ring(xhci, xdev->slot_id, ep_index, + stream_id); if (!ep_ring) { xhci_dbg(xhci, "Can't prepare ring for bad stream ID %u\n", stream_id); @@ -3275,12 +3471,16 @@ static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len, /* create a max max_pkt sized bounce buffer pointed to by last trb */ if (usb_urb_dir_out(urb)) { - len = sg_pcopy_to_buffer(urb->sg, urb->num_sgs, - seg->bounce_buf, new_buff_len, enqd_len); - if (len != new_buff_len) - xhci_warn(xhci, - "WARN Wrong bounce buffer write length: %zu != %d\n", - len, new_buff_len); + if (urb->num_sgs) { + len = sg_pcopy_to_buffer(urb->sg, urb->num_sgs, + seg->bounce_buf, new_buff_len, enqd_len); + if (len != new_buff_len) + xhci_warn(xhci, "WARN Wrong bounce buffer write length: %zu != %d\n", + len, new_buff_len); + } else { + memcpy(seg->bounce_buf, urb->transfer_buffer + enqd_len, new_buff_len); + } + seg->bounce_dma = dma_map_single(dev, seg->bounce_buf, max_pkt, DMA_TO_DEVICE); } else { @@ -3401,7 +3601,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, field |= TRB_IOC; more_trbs_coming = false; td->last_trb = ring->enqueue; - + td->last_trb_seg = ring->enq_seg; if (xhci_urb_suitable_for_idt(urb)) { memcpy(&send_addr, urb->transfer_buffer, trb_buff_len); @@ -3427,7 +3627,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, upper_32_bits(send_addr), length_field, field); - + td->num_trbs++; addr += trb_buff_len; sent_len = trb_buff_len; @@ -3451,8 +3651,10 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, ep_index, urb->stream_id, 1, urb, 1, mem_flags); urb_priv->td[1].last_trb = ring->enqueue; + urb_priv->td[1].last_trb_seg = ring->enq_seg; field = TRB_TYPE(TRB_NORMAL) | ring->cycle_state | TRB_IOC; queue_trb(xhci, ring, 0, 0, 0, TRB_INTR_TARGET(0), field); + urb_priv->td[1].num_trbs++; } check_trb_math(urb, enqd_len); @@ -3503,6 +3705,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, urb_priv = urb->hcpriv; td = &urb_priv->td[0]; + td->num_trbs = num_trbs; /* * Don't give the first TRB to the hardware (by toggling the cycle bit) @@ -3575,6 +3778,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, /* Save the DMA address of the last TRB in the TD */ td->last_trb = ep_ring->enqueue; + td->last_trb_seg = ep_ring->enq_seg; /* Queue status TRB - see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */ /* If the device sent data, the status stage is an OUT transfer */ @@ -3819,7 +4023,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, goto cleanup; } td = &urb_priv->td[i]; - + td->num_trbs = trbs_per_td; /* use SIA as default, if frame id is used overwrite it */ sia_frame_id = TRB_SIA; if (!(urb->transfer_flags & URB_ISO_ASAP) && @@ -3862,6 +4066,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, } else { more_trbs_coming = false; td->last_trb = ep_ring->enqueue; + td->last_trb_seg = ep_ring->enq_seg; field |= TRB_IOC; if (trb_block_event_intr(xhci, num_tds, i)) field |= TRB_BEI; @@ -4145,71 +4350,6 @@ int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, struct xhci_command *cmd, trb_slot_id | trb_ep_index | type | trb_suspend, false); } -/* Set Transfer Ring Dequeue Pointer command */ -void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci, - unsigned int slot_id, unsigned int ep_index, - struct xhci_dequeue_state *deq_state) -{ - dma_addr_t addr; - u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id); - u32 trb_ep_index = EP_ID_FOR_TRB(ep_index); - u32 trb_stream_id = STREAM_ID_FOR_TRB(deq_state->stream_id); - u32 trb_sct = 0; - u32 type = TRB_TYPE(TRB_SET_DEQ); - struct xhci_virt_ep *ep; - struct xhci_command *cmd; - int ret; - - xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, - "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), new deq ptr = %p (0x%llx dma), new cycle = %u", - deq_state->new_deq_seg, - (unsigned long long)deq_state->new_deq_seg->dma, - deq_state->new_deq_ptr, - (unsigned long long)xhci_trb_virt_to_dma( - deq_state->new_deq_seg, deq_state->new_deq_ptr), - deq_state->new_cycle_state); - - addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg, - deq_state->new_deq_ptr); - if (addr == 0) { - xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n"); - xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n", - deq_state->new_deq_seg, deq_state->new_deq_ptr); - return; - } - ep = &xhci->devs[slot_id]->eps[ep_index]; - if ((ep->ep_state & SET_DEQ_PENDING)) { - xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n"); - xhci_warn(xhci, "A Set TR Deq Ptr command is pending.\n"); - return; - } - - /* This function gets called from contexts where it cannot sleep */ - cmd = xhci_alloc_command(xhci, false, GFP_ATOMIC); - if (!cmd) - return; - - ep->queued_deq_seg = deq_state->new_deq_seg; - ep->queued_deq_ptr = deq_state->new_deq_ptr; - if (deq_state->stream_id) - trb_sct = SCT_FOR_TRB(SCT_PRI_TR); - ret = queue_command(xhci, cmd, - lower_32_bits(addr) | trb_sct | deq_state->new_cycle_state, - upper_32_bits(addr), trb_stream_id, - trb_slot_id | trb_ep_index | type, false); - if (ret < 0) { - xhci_free_command(xhci, cmd); - return; - } - - /* Stop the TD queueing code from ringing the doorbell until - * this command completes. The HC won't set the dequeue pointer - * if the ring is running, and ringing the doorbell starts the - * ring running. - */ - ep->ep_state |= SET_DEQ_PENDING; -} - int xhci_queue_reset_ep(struct xhci_hcd *xhci, struct xhci_command *cmd, int slot_id, unsigned int ep_index, enum xhci_ep_reset_type reset_type) diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c index 934be1686352..50bb91b6a4b8 100644 --- a/drivers/usb/host/xhci-tegra.c +++ b/drivers/usb/host/xhci-tegra.c @@ -623,6 +623,13 @@ static void tegra_xusb_mbox_handle(struct tegra_xusb *tegra, enable); if (err < 0) break; + + /* + * wait 500us for LFPS detector to be disabled before + * sending ACK + */ + if (!enable) + usleep_range(500, 1000); } if (err < 0) { diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 91ab81c3fc79..bd27bd670104 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -1440,15 +1440,6 @@ static unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc) return 1 << (xhci_get_endpoint_index(desc) + 1); } -/* Find the flag for this endpoint (for use in the control context). Use the - * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is - * bit 1, etc. - */ -static unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index) -{ - return 1 << (ep_index + 1); -} - /* Compute the last valid endpoint context index. Basically, this is the * endpoint index plus one. For slot contexts with more than valid endpoint, * we find the most significant bit set in the added contexts flags. @@ -1810,7 +1801,12 @@ static int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) for (; i < urb_priv->num_tds; i++) { td = &urb_priv->td[i]; - list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list); + /* TD can already be on cancelled list if ep halted on it */ + if (list_empty(&td->cancelled_td_list)) { + td->cancel_status = TD_DIRTY; + list_add_tail(&td->cancelled_td_list, + &ep->cancelled_td_list); + } } /* Queue a stop endpoint command, but only if this is @@ -2985,7 +2981,7 @@ static void xhci_check_bw_drop_ep_streams(struct xhci_hcd *xhci, * else should be touching the xhci->devs[slot_id] structure, so we * don't need to take the xhci->lock for manipulating that. */ -static int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) +int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) { int i; int ret = 0; @@ -3083,7 +3079,7 @@ command_cleanup: return ret; } -static void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) +void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) { struct xhci_hcd *xhci; struct xhci_virt_device *virt_dev; @@ -3119,84 +3115,6 @@ static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci, ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); } -static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci, - unsigned int slot_id, unsigned int ep_index, - struct xhci_dequeue_state *deq_state) -{ - struct xhci_input_control_ctx *ctrl_ctx; - struct xhci_container_ctx *in_ctx; - struct xhci_ep_ctx *ep_ctx; - u32 added_ctxs; - dma_addr_t addr; - - in_ctx = xhci->devs[slot_id]->in_ctx; - ctrl_ctx = xhci_get_input_control_ctx(in_ctx); - if (!ctrl_ctx) { - xhci_warn(xhci, "%s: Could not get input context, bad type.\n", - __func__); - return; - } - - xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx, - xhci->devs[slot_id]->out_ctx, ep_index); - ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index); - addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg, - deq_state->new_deq_ptr); - if (addr == 0) { - xhci_warn(xhci, "WARN Cannot submit config ep after " - "reset ep command\n"); - xhci_warn(xhci, "WARN deq seg = %p, deq ptr = %p\n", - deq_state->new_deq_seg, - deq_state->new_deq_ptr); - return; - } - ep_ctx->deq = cpu_to_le64(addr | deq_state->new_cycle_state); - - added_ctxs = xhci_get_endpoint_flag_from_index(ep_index); - xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx, - xhci->devs[slot_id]->out_ctx, ctrl_ctx, - added_ctxs, added_ctxs); -} - -void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int slot_id, - unsigned int ep_index, unsigned int stream_id, - struct xhci_td *td) -{ - struct xhci_dequeue_state deq_state; - - xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep, - "Cleaning up stalled endpoint ring"); - /* We need to move the HW's dequeue pointer past this TD, - * or it will attempt to resend it on the next doorbell ring. - */ - xhci_find_new_dequeue_state(xhci, slot_id, ep_index, stream_id, td, - &deq_state); - - if (!deq_state.new_deq_ptr || !deq_state.new_deq_seg) - return; - - /* HW with the reset endpoint quirk will use the saved dequeue state to - * issue a configure endpoint command later. - */ - if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) { - xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep, - "Queueing new dequeue state"); - xhci_queue_new_dequeue_state(xhci, slot_id, - ep_index, &deq_state); - } else { - /* Better hope no one uses the input context between now and the - * reset endpoint completion! - * XXX: No idea how this hardware will react when stream rings - * are enabled. - */ - xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, - "Setting up input context for " - "configure endpoint command"); - xhci_setup_input_ctx_for_quirk(xhci, slot_id, - ep_index, &deq_state); - } -} - static void xhci_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *host_ep) { @@ -4770,19 +4688,19 @@ static u16 xhci_calculate_u1_timeout(struct xhci_hcd *xhci, { unsigned long long timeout_ns; + if (xhci->quirks & XHCI_INTEL_HOST) + timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc); + else + timeout_ns = udev->u1_params.sel; + /* Prevent U1 if service interval is shorter than U1 exit latency */ if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) { - if (xhci_service_interval_to_ns(desc) <= udev->u1_params.mel) { + if (xhci_service_interval_to_ns(desc) <= timeout_ns) { dev_dbg(&udev->dev, "Disable U1, ESIT shorter than exit latency\n"); return USB3_LPM_DISABLED; } } - if (xhci->quirks & XHCI_INTEL_HOST) - timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc); - else - timeout_ns = udev->u1_params.sel; - /* The U1 timeout is encoded in 1us intervals. * Don't return a timeout of zero, because that's USB3_LPM_DISABLED. */ @@ -4834,19 +4752,19 @@ static u16 xhci_calculate_u2_timeout(struct xhci_hcd *xhci, { unsigned long long timeout_ns; + if (xhci->quirks & XHCI_INTEL_HOST) + timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc); + else + timeout_ns = udev->u2_params.sel; + /* Prevent U2 if service interval is shorter than U2 exit latency */ if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) { - if (xhci_service_interval_to_ns(desc) <= udev->u2_params.mel) { + if (xhci_service_interval_to_ns(desc) <= timeout_ns) { dev_dbg(&udev->dev, "Disable U2, ESIT shorter than exit latency\n"); return USB3_LPM_DISABLED; } } - if (xhci->quirks & XHCI_INTEL_HOST) - timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc); - else - timeout_ns = udev->u2_params.sel; - /* The U2 timeout is encoded in 256us intervals */ timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 256 * 1000); /* If the necessary timeout value is bigger than what we can set in the @@ -5510,6 +5428,10 @@ void xhci_init_driver(struct hc_driver *drv, drv->reset = over->reset; if (over->start) drv->start = over->start; + if (over->check_bandwidth) + drv->check_bandwidth = over->check_bandwidth; + if (over->reset_bandwidth) + drv->reset_bandwidth = over->reset_bandwidth; } } EXPORT_SYMBOL_GPL(xhci_init_driver); diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 25e57bc9c3cc..d41de5dc0452 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -918,6 +918,8 @@ struct xhci_bw_info { #define SS_BW_RESERVED 10 struct xhci_virt_ep { + struct xhci_virt_device *vdev; /* parent */ + unsigned int ep_index; struct xhci_ring *ring; /* Related to endpoints that are configured to use stream IDs only */ struct xhci_stream_info *stream_info; @@ -993,8 +995,10 @@ struct xhci_interval_bw_table { unsigned int ss_bw_out; }; +#define EP_CTX_PER_DEV 31 struct xhci_virt_device { + int slot_id; struct usb_device *udev; /* * Commands to the hardware are passed an "input context" that @@ -1007,7 +1011,7 @@ struct xhci_virt_device { struct xhci_container_ctx *out_ctx; /* Used for addressing devices and configuration changes */ struct xhci_container_ctx *in_ctx; - struct xhci_virt_ep eps[31]; + struct xhci_virt_ep eps[EP_CTX_PER_DEV]; u8 fake_port; u8 real_port; struct xhci_interval_bw_table *bw_table; @@ -1415,7 +1419,7 @@ union xhci_trb { /* MFINDEX Wrap Event - microframe counter wrapped */ #define TRB_MFINDEX_WRAP 39 /* TRB IDs 40-47 reserved, 48-63 is vendor-defined */ - +#define TRB_VENDOR_DEFINED_LOW 48 /* Nec vendor-specific command completion event. */ #define TRB_NEC_CMD_COMP 48 /* Get NEC firmware revision. */ @@ -1535,16 +1539,27 @@ struct xhci_segment { unsigned int bounce_len; }; +enum xhci_cancelled_td_status { + TD_DIRTY = 0, + TD_HALTED, + TD_CLEARING_CACHE, + TD_CLEARED, +}; + struct xhci_td { struct list_head td_list; struct list_head cancelled_td_list; + int status; + enum xhci_cancelled_td_status cancel_status; struct urb *urb; struct xhci_segment *start_seg; union xhci_trb *first_trb; union xhci_trb *last_trb; + struct xhci_segment *last_trb_seg; struct xhci_segment *bounce_seg; /* actual_length of the URB has already been set */ bool urb_length_set; + unsigned int num_trbs; }; /* xHCI command default timeout value */ @@ -1556,13 +1571,6 @@ struct xhci_cd { union xhci_trb *cmd_trb; }; -struct xhci_dequeue_state { - struct xhci_segment *new_deq_seg; - union xhci_trb *new_deq_ptr; - int new_cycle_state; - unsigned int stream_id; -}; - enum xhci_ring_type { TYPE_CTRL = 0, TYPE_ISOC, @@ -1920,6 +1928,8 @@ struct xhci_driver_overrides { size_t extra_priv_size; int (*reset)(struct usb_hcd *hcd); int (*start)(struct usb_hcd *hcd); + int (*check_bandwidth)(struct usb_hcd *, struct usb_device *); + void (*reset_bandwidth)(struct usb_hcd *, struct usb_device *); }; #define XHCI_CFC_DELAY 10 @@ -2046,10 +2056,6 @@ void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci, struct xhci_ring *xhci_dma_to_transfer_ring( struct xhci_virt_ep *ep, u64 address); -struct xhci_ring *xhci_stream_id_to_ring( - struct xhci_virt_device *dev, - unsigned int ep_index, - unsigned int stream_id); struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci, bool allocate_completion, gfp_t mem_flags); struct xhci_command *xhci_alloc_command_with_ctx(struct xhci_hcd *xhci, @@ -2074,6 +2080,8 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks); void xhci_shutdown(struct usb_hcd *hcd); void xhci_init_driver(struct hc_driver *drv, const struct xhci_driver_overrides *over); +int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev); +void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev); int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id); int xhci_ext_cap_init(struct xhci_hcd *xhci); @@ -2121,13 +2129,6 @@ int xhci_queue_reset_ep(struct xhci_hcd *xhci, struct xhci_command *cmd, enum xhci_ep_reset_type reset_type); int xhci_queue_reset_device(struct xhci_hcd *xhci, struct xhci_command *cmd, u32 slot_id); -void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, - unsigned int slot_id, unsigned int ep_index, - unsigned int stream_id, struct xhci_td *cur_td, - struct xhci_dequeue_state *state); -void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci, - unsigned int slot_id, unsigned int ep_index, - struct xhci_dequeue_state *deq_state); void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int slot_id, unsigned int ep_index, unsigned int stream_id, struct xhci_td *td); diff --git a/drivers/usb/misc/usb251xb.c b/drivers/usb/misc/usb251xb.c index 29fe5771c21b..507deef1f709 100644 --- a/drivers/usb/misc/usb251xb.c +++ b/drivers/usb/misc/usb251xb.c @@ -396,7 +396,7 @@ static void usb251xb_get_ports_field(struct usb251xb *hub, } static int usb251xb_get_ofdata(struct usb251xb *hub, - struct usb251xb_data *data) + const struct usb251xb_data *data) { struct device *dev = hub->dev; struct device_node *np = dev->of_node; @@ -630,7 +630,7 @@ static const struct of_device_id usb251xb_of_match[] = { MODULE_DEVICE_TABLE(of, usb251xb_of_match); #else /* CONFIG_OF */ static int usb251xb_get_ofdata(struct usb251xb *hub, - struct usb251xb_data *data) + const struct usb251xb_data *data) { return 0; } @@ -647,13 +647,11 @@ static int usb251xb_probe(struct usb251xb *hub) { struct device *dev = hub->dev; struct device_node *np = dev->of_node; - const struct of_device_id *of_id = of_match_device(usb251xb_of_match, - dev); + const struct usb251xb_data *usb_data = of_device_get_match_data(dev); int err; - if (np && of_id) { - err = usb251xb_get_ofdata(hub, - (struct usb251xb_data *)of_id->data); + if (np && usb_data) { + err = usb251xb_get_ofdata(hub, usb_data); if (err) { dev_err(dev, "failed to get ofdata: %d\n", err); return err; diff --git a/drivers/usb/misc/usb3503.c b/drivers/usb/misc/usb3503.c index 48099c6bf04c..330f494cd158 100644 --- a/drivers/usb/misc/usb3503.c +++ b/drivers/usb/misc/usb3503.c @@ -409,13 +409,18 @@ static int __init usb3503_init(void) int err; err = i2c_add_driver(&usb3503_i2c_driver); - if (err != 0) + if (err) { pr_err("usb3503: Failed to register I2C driver: %d\n", err); + return err; + } err = platform_driver_register(&usb3503_platform_driver); - if (err != 0) + if (err) { pr_err("usb3503: Failed to register platform driver: %d\n", err); + i2c_del_driver(&usb3503_i2c_driver); + return err; + } return 0; } diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c index 73ebfa6e9715..c640f98d20c5 100644 --- a/drivers/usb/misc/yurex.c +++ b/drivers/usb/misc/yurex.c @@ -496,6 +496,9 @@ static ssize_t yurex_write(struct file *file, const char __user *user_buffer, timeout = schedule_timeout(YUREX_WRITE_TIMEOUT); finish_wait(&dev->waitq, &wait); + /* make sure URB is idle after timeout or (spurious) CMD_ACK */ + usb_kill_urb(dev->cntl_urb); + mutex_unlock(&dev->io_mutex); if (retval < 0) { diff --git a/drivers/usb/musb/jz4740.c b/drivers/usb/musb/jz4740.c index c4fe1f4cd17a..5b7d576bf6ee 100644 --- a/drivers/usb/musb/jz4740.c +++ b/drivers/usb/musb/jz4740.c @@ -116,13 +116,13 @@ static int jz4740_musb_init(struct musb *musb) if (IS_ERR(musb->xceiv)) { err = PTR_ERR(musb->xceiv); if (err != -EPROBE_DEFER) - dev_err(dev, "No transceiver configured: %d", err); + dev_err(dev, "No transceiver configured: %d\n", err); return err; } glue->role_sw = usb_role_switch_register(dev, &role_sw_desc); if (IS_ERR(glue->role_sw)) { - dev_err(dev, "Failed to register USB role switch"); + dev_err(dev, "Failed to register USB role switch\n"); return PTR_ERR(glue->role_sw); } @@ -205,26 +205,26 @@ static int jz4740_probe(struct platform_device *pdev) pdata = of_device_get_match_data(dev); if (!pdata) { - dev_err(dev, "missing platform data"); + dev_err(dev, "missing platform data\n"); return -EINVAL; } musb = platform_device_alloc("musb-hdrc", PLATFORM_DEVID_AUTO); if (!musb) { - dev_err(dev, "failed to allocate musb device"); + dev_err(dev, "failed to allocate musb device\n"); return -ENOMEM; } clk = devm_clk_get(dev, "udc"); if (IS_ERR(clk)) { - dev_err(dev, "failed to get clock"); + dev_err(dev, "failed to get clock\n"); ret = PTR_ERR(clk); goto err_platform_device_put; } ret = clk_prepare_enable(clk); if (ret) { - dev_err(dev, "failed to enable clock"); + dev_err(dev, "failed to enable clock\n"); goto err_platform_device_put; } @@ -240,19 +240,19 @@ static int jz4740_probe(struct platform_device *pdev) ret = platform_device_add_resources(musb, pdev->resource, pdev->num_resources); if (ret) { - dev_err(dev, "failed to add resources"); + dev_err(dev, "failed to add resources\n"); goto err_clk_disable; } ret = platform_device_add_data(musb, pdata, sizeof(*pdata)); if (ret) { - dev_err(dev, "failed to add platform_data"); + dev_err(dev, "failed to add platform_data\n"); goto err_clk_disable; } ret = platform_device_add(musb); if (ret) { - dev_err(dev, "failed to register musb device"); + dev_err(dev, "failed to register musb device\n"); goto err_clk_disable; } diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index 849e0b770130..1cd87729ba60 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c @@ -2240,32 +2240,35 @@ int musb_queue_resume_work(struct musb *musb, { struct musb_pending_work *w; unsigned long flags; + bool is_suspended; int error; if (WARN_ON(!callback)) return -EINVAL; - if (pm_runtime_active(musb->controller)) - return callback(musb, data); + spin_lock_irqsave(&musb->list_lock, flags); + is_suspended = musb->is_runtime_suspended; + + if (is_suspended) { + w = devm_kzalloc(musb->controller, sizeof(*w), GFP_ATOMIC); + if (!w) { + error = -ENOMEM; + goto out_unlock; + } - w = devm_kzalloc(musb->controller, sizeof(*w), GFP_ATOMIC); - if (!w) - return -ENOMEM; + w->callback = callback; + w->data = data; - w->callback = callback; - w->data = data; - spin_lock_irqsave(&musb->list_lock, flags); - if (musb->is_runtime_suspended) { list_add_tail(&w->node, &musb->pending_list); error = 0; - } else { - dev_err(musb->controller, "could not add resume work %p\n", - callback); - devm_kfree(musb->controller, w); - error = -EINPROGRESS; } + +out_unlock: spin_unlock_irqrestore(&musb->list_lock, flags); + if (!is_suspended) + error = callback(musb, data); + return error; } EXPORT_SYMBOL_GPL(musb_queue_resume_work); diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c index f62ffaede1ab..ef374d4dd94a 100644 --- a/drivers/usb/musb/musb_gadget.c +++ b/drivers/usb/musb/musb_gadget.c @@ -451,7 +451,7 @@ void musb_g_tx(struct musb *musb, u8 epnum) return; } - if (request) { + if (req) { trace_musb_req_tx(req); diff --git a/drivers/usb/musb/musbhsdma.c b/drivers/usb/musb/musbhsdma.c index 0aacfc8be5a1..7acd1635850d 100644 --- a/drivers/usb/musb/musbhsdma.c +++ b/drivers/usb/musb/musbhsdma.c @@ -321,8 +321,6 @@ irqreturn_t dma_controller_irq(int irq, void *private_data) musb_channel->channel.status = MUSB_DMA_STATUS_BUS_ABORT; } else { - u8 devctl; - addr = musb_read_hsdma_addr(mbase, bchannel); channel->actual_len = addr @@ -336,8 +334,6 @@ irqreturn_t dma_controller_irq(int irq, void *private_data) < musb_channel->len) ? "=> reconfig 0" : "=> complete"); - devctl = musb_readb(mbase, MUSB_DEVCTL); - channel->status = MUSB_DMA_STATUS_FREE; /* completed */ diff --git a/drivers/usb/phy/phy-mxs-usb.c b/drivers/usb/phy/phy-mxs-usb.c index 67b39dc62b37..8a262c5a0408 100644 --- a/drivers/usb/phy/phy-mxs-usb.c +++ b/drivers/usb/phy/phy-mxs-usb.c @@ -714,14 +714,9 @@ static int mxs_phy_probe(struct platform_device *pdev) struct clk *clk; struct mxs_phy *mxs_phy; int ret; - const struct of_device_id *of_id; struct device_node *np = pdev->dev.of_node; u32 val; - of_id = of_match_device(mxs_phy_dt_ids, &pdev->dev); - if (!of_id) - return -ENODEV; - base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(base)) return PTR_ERR(base); @@ -797,7 +792,7 @@ static int mxs_phy_probe(struct platform_device *pdev) mxs_phy->phy.charger_detect = mxs_phy_charger_detect; mxs_phy->clk = clk; - mxs_phy->data = of_id->data; + mxs_phy->data = of_device_get_match_data(&pdev->dev); platform_set_drvdata(pdev, mxs_phy); diff --git a/drivers/usb/phy/phy-tegra-usb.c b/drivers/usb/phy/phy-tegra-usb.c index 03a333797382..a48452a6172b 100644 --- a/drivers/usb/phy/phy-tegra-usb.c +++ b/drivers/usb/phy/phy-tegra-usb.c @@ -45,6 +45,7 @@ #define TEGRA_PORTSC1_RWC_BITS (PORT_CSC | PORT_PEC | PORT_OCC) #define USB_SUSP_CTRL 0x400 +#define USB_WAKE_ON_RESUME_EN BIT(2) #define USB_WAKE_ON_CNNT_EN_DEV BIT(3) #define USB_WAKE_ON_DISCON_EN_DEV BIT(4) #define USB_SUSP_CLR BIT(5) @@ -56,6 +57,15 @@ #define USB_SUSP_SET BIT(14) #define USB_WAKEUP_DEBOUNCE_COUNT(x) (((x) & 0x7) << 16) +#define USB_PHY_VBUS_SENSORS 0x404 +#define B_SESS_VLD_WAKEUP_EN BIT(6) +#define B_VBUS_VLD_WAKEUP_EN BIT(14) +#define A_SESS_VLD_WAKEUP_EN BIT(22) +#define A_VBUS_VLD_WAKEUP_EN BIT(30) + +#define USB_PHY_VBUS_WAKEUP_ID 0x408 +#define VBUS_WAKEUP_WAKEUP_EN BIT(30) + #define USB1_LEGACY_CTRL 0x410 #define USB1_NO_LEGACY_MODE BIT(0) #define USB1_VBUS_SENSE_CTL_MASK (3 << 1) @@ -334,6 +344,11 @@ static int utmip_pad_power_on(struct tegra_usb_phy *phy) writel_relaxed(val, base + UTMIP_BIAS_CFG0); } + if (phy->pad_wakeup) { + phy->pad_wakeup = false; + utmip_pad_count--; + } + spin_unlock(&utmip_pad_lock); clk_disable_unprepare(phy->pad_clk); @@ -359,6 +374,17 @@ static int utmip_pad_power_off(struct tegra_usb_phy *phy) goto ulock; } + /* + * In accordance to TRM, OTG and Bias pad circuits could be turned off + * to save power if wake is enabled, but the VBUS-change detection + * method is board-specific and these circuits may need to be enabled + * to generate wakeup event, hence we will just keep them both enabled. + */ + if (phy->wakeup_enabled) { + phy->pad_wakeup = true; + utmip_pad_count++; + } + if (--utmip_pad_count == 0) { val = readl_relaxed(base + UTMIP_BIAS_CFG0); val |= UTMIP_OTGPD | UTMIP_BIASPD; @@ -503,11 +529,24 @@ static int utmi_phy_power_on(struct tegra_usb_phy *phy) writel_relaxed(val, base + UTMIP_PLL_CFG1); } + val = readl_relaxed(base + USB_SUSP_CTRL); + val &= ~USB_WAKE_ON_RESUME_EN; + writel_relaxed(val, base + USB_SUSP_CTRL); + if (phy->mode == USB_DR_MODE_PERIPHERAL) { val = readl_relaxed(base + USB_SUSP_CTRL); val &= ~(USB_WAKE_ON_CNNT_EN_DEV | USB_WAKE_ON_DISCON_EN_DEV); writel_relaxed(val, base + USB_SUSP_CTRL); + val = readl_relaxed(base + USB_PHY_VBUS_WAKEUP_ID); + val &= ~VBUS_WAKEUP_WAKEUP_EN; + writel_relaxed(val, base + USB_PHY_VBUS_WAKEUP_ID); + + val = readl_relaxed(base + USB_PHY_VBUS_SENSORS); + val &= ~(A_VBUS_VLD_WAKEUP_EN | A_SESS_VLD_WAKEUP_EN); + val &= ~(B_VBUS_VLD_WAKEUP_EN | B_SESS_VLD_WAKEUP_EN); + writel_relaxed(val, base + USB_PHY_VBUS_SENSORS); + val = readl_relaxed(base + UTMIP_BAT_CHRG_CFG0); val &= ~UTMIP_PD_CHRG; writel_relaxed(val, base + UTMIP_BAT_CHRG_CFG0); @@ -605,31 +644,51 @@ static int utmi_phy_power_off(struct tegra_usb_phy *phy) utmi_phy_clk_disable(phy); - if (phy->mode == USB_DR_MODE_PERIPHERAL) { + /* PHY won't resume if reset is asserted */ + if (!phy->wakeup_enabled) { val = readl_relaxed(base + USB_SUSP_CTRL); - val &= ~USB_WAKEUP_DEBOUNCE_COUNT(~0); - val |= USB_WAKE_ON_CNNT_EN_DEV | USB_WAKEUP_DEBOUNCE_COUNT(5); + val |= UTMIP_RESET; writel_relaxed(val, base + USB_SUSP_CTRL); } - val = readl_relaxed(base + USB_SUSP_CTRL); - val |= UTMIP_RESET; - writel_relaxed(val, base + USB_SUSP_CTRL); - val = readl_relaxed(base + UTMIP_BAT_CHRG_CFG0); val |= UTMIP_PD_CHRG; writel_relaxed(val, base + UTMIP_BAT_CHRG_CFG0); - val = readl_relaxed(base + UTMIP_XCVR_CFG0); - val |= UTMIP_FORCE_PD_POWERDOWN | UTMIP_FORCE_PD2_POWERDOWN | - UTMIP_FORCE_PDZI_POWERDOWN; - writel_relaxed(val, base + UTMIP_XCVR_CFG0); + if (!phy->wakeup_enabled) { + val = readl_relaxed(base + UTMIP_XCVR_CFG0); + val |= UTMIP_FORCE_PD_POWERDOWN | UTMIP_FORCE_PD2_POWERDOWN | + UTMIP_FORCE_PDZI_POWERDOWN; + writel_relaxed(val, base + UTMIP_XCVR_CFG0); + } val = readl_relaxed(base + UTMIP_XCVR_CFG1); val |= UTMIP_FORCE_PDDISC_POWERDOWN | UTMIP_FORCE_PDCHRP_POWERDOWN | UTMIP_FORCE_PDDR_POWERDOWN; writel_relaxed(val, base + UTMIP_XCVR_CFG1); + if (phy->wakeup_enabled) { + val = readl_relaxed(base + USB_SUSP_CTRL); + val &= ~USB_WAKEUP_DEBOUNCE_COUNT(~0); + val |= USB_WAKEUP_DEBOUNCE_COUNT(5); + val |= USB_WAKE_ON_RESUME_EN; + writel_relaxed(val, base + USB_SUSP_CTRL); + + /* + * Ask VBUS sensor to generate wake event once cable is + * connected. + */ + if (phy->mode == USB_DR_MODE_PERIPHERAL) { + val = readl_relaxed(base + USB_PHY_VBUS_WAKEUP_ID); + val |= VBUS_WAKEUP_WAKEUP_EN; + writel_relaxed(val, base + USB_PHY_VBUS_WAKEUP_ID); + + val = readl_relaxed(base + USB_PHY_VBUS_SENSORS); + val |= A_VBUS_VLD_WAKEUP_EN; + writel_relaxed(val, base + USB_PHY_VBUS_SENSORS); + } + } + return utmip_pad_power_off(phy); } @@ -765,6 +824,15 @@ static int ulpi_phy_power_off(struct tegra_usb_phy *phy) usleep_range(5000, 6000); clk_disable_unprepare(phy->clk); + /* + * Wakeup currently unimplemented for ULPI, thus PHY needs to be + * force-resumed. + */ + if (WARN_ON_ONCE(phy->wakeup_enabled)) { + ulpi_phy_power_on(phy); + return -EOPNOTSUPP; + } + return 0; } @@ -784,6 +852,9 @@ static int tegra_usb_phy_power_on(struct tegra_usb_phy *phy) phy->powered_on = true; + /* Let PHY settle down */ + usleep_range(2000, 2500); + return 0; } @@ -824,6 +895,15 @@ static void tegra_usb_phy_shutdown(struct usb_phy *u_phy) phy->freq = NULL; } +static int tegra_usb_phy_set_wakeup(struct usb_phy *u_phy, bool enable) +{ + struct tegra_usb_phy *phy = to_tegra_usb_phy(u_phy); + + phy->wakeup_enabled = enable; + + return 0; +} + static int tegra_usb_phy_set_suspend(struct usb_phy *u_phy, int suspend) { struct tegra_usb_phy *phy = to_tegra_usb_phy(u_phy); @@ -1195,6 +1275,7 @@ static int tegra_usb_phy_probe(struct platform_device *pdev) tegra_phy->u_phy.dev = &pdev->dev; tegra_phy->u_phy.init = tegra_usb_phy_init; tegra_phy->u_phy.shutdown = tegra_usb_phy_shutdown; + tegra_phy->u_phy.set_wakeup = tegra_usb_phy_set_wakeup; tegra_phy->u_phy.set_suspend = tegra_usb_phy_set_suspend; platform_set_drvdata(pdev, tegra_phy); diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c index ac9a81ae8216..e6fa13701808 100644 --- a/drivers/usb/renesas_usbhs/fifo.c +++ b/drivers/usb/renesas_usbhs/fifo.c @@ -126,6 +126,7 @@ struct usbhs_pkt *usbhs_pkt_pop(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt) } usbhs_pipe_clear_without_sequence(pipe, 0, 0); + usbhs_pipe_running(pipe, 0); __usbhsf_pkt_del(pkt); } diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig index a21ff5ab6df9..de5c01257060 100644 --- a/drivers/usb/serial/Kconfig +++ b/drivers/usb/serial/Kconfig @@ -633,6 +633,15 @@ config USB_SERIAL_UPD78F0730 To compile this driver as a module, choose M here: the module will be called upd78f0730. +config USB_SERIAL_XR + tristate "USB MaxLinear/Exar USB to Serial driver" + help + Say Y here if you want to use MaxLinear/Exar USB to Serial converter + devices. + + To compile this driver as a module, choose M here: the + module will be called xr_serial. + config USB_SERIAL_DEBUG tristate "USB Debugging Device" help diff --git a/drivers/usb/serial/Makefile b/drivers/usb/serial/Makefile index 50c53aed787a..c7bb1a88173e 100644 --- a/drivers/usb/serial/Makefile +++ b/drivers/usb/serial/Makefile @@ -61,4 +61,5 @@ obj-$(CONFIG_USB_SERIAL_UPD78F0730) += upd78f0730.o obj-$(CONFIG_USB_SERIAL_VISOR) += visor.o obj-$(CONFIG_USB_SERIAL_WISHBONE) += wishbone-serial.o obj-$(CONFIG_USB_SERIAL_WHITEHEAT) += whiteheat.o +obj-$(CONFIG_USB_SERIAL_XR) += xr_serial.o obj-$(CONFIG_USB_SERIAL_XSENS_MT) += xsens_mt.o diff --git a/drivers/usb/serial/ark3116.c b/drivers/usb/serial/ark3116.c index 71a9206ea1e2..b9bedfe9bd09 100644 --- a/drivers/usb/serial/ark3116.c +++ b/drivers/usb/serial/ark3116.c @@ -178,15 +178,13 @@ static int ark3116_port_probe(struct usb_serial_port *port) return 0; } -static int ark3116_port_remove(struct usb_serial_port *port) +static void ark3116_port_remove(struct usb_serial_port *port) { struct ark3116_private *priv = usb_get_serial_port_data(port); /* device is closed, so URBs and DMA should be down */ mutex_destroy(&priv->hw_lock); kfree(priv); - - return 0; } static void ark3116_set_termios(struct tty_struct *tty, @@ -719,9 +717,10 @@ MODULE_DESCRIPTION(DRIVER_DESC); * hardware bug or something. * * According to a patch provided here - * (http://lkml.org/lkml/2009/7/26/56), the ARK3116 can also be used - * as an IrDA dongle. Since I do not have such a thing, I could not - * investigate that aspect. However, I can speculate ;-). + * https://lore.kernel.org/lkml/200907261419.50702.linux@rainbow-software.org + * the ARK3116 can also be used as an IrDA dongle. Since I do not have + * such a thing, I could not investigate that aspect. However, I can + * speculate ;-). * * - IrDA encodes data differently than RS232. Most likely, one of * the bits in registers 9..E enables the IR ENDEC (encoder/decoder). diff --git a/drivers/usb/serial/belkin_sa.c b/drivers/usb/serial/belkin_sa.c index 9bb123ab9bc9..ed9193f3bb1a 100644 --- a/drivers/usb/serial/belkin_sa.c +++ b/drivers/usb/serial/belkin_sa.c @@ -37,7 +37,7 @@ /* function prototypes for a Belkin USB Serial Adapter F5U103 */ static int belkin_sa_port_probe(struct usb_serial_port *port); -static int belkin_sa_port_remove(struct usb_serial_port *port); +static void belkin_sa_port_remove(struct usb_serial_port *port); static int belkin_sa_open(struct tty_struct *tty, struct usb_serial_port *port); static void belkin_sa_close(struct usb_serial_port *port); @@ -134,14 +134,12 @@ static int belkin_sa_port_probe(struct usb_serial_port *port) return 0; } -static int belkin_sa_port_remove(struct usb_serial_port *port) +static void belkin_sa_port_remove(struct usb_serial_port *port) { struct belkin_sa_private *priv; priv = usb_get_serial_port_data(port); kfree(priv); - - return 0; } static int belkin_sa_open(struct tty_struct *tty, diff --git a/drivers/usb/serial/bus.c b/drivers/usb/serial/bus.c index eb0195cf37dd..7133818a58b9 100644 --- a/drivers/usb/serial/bus.c +++ b/drivers/usb/serial/bus.c @@ -16,19 +16,13 @@ static int usb_serial_device_match(struct device *dev, struct device_driver *drv) { - struct usb_serial_driver *driver; - const struct usb_serial_port *port; + const struct usb_serial_port *port = to_usb_serial_port(dev); + struct usb_serial_driver *driver = to_usb_serial_driver(drv); /* * drivers are already assigned to ports in serial_probe so it's * a simple check here. */ - port = to_usb_serial_port(dev); - if (!port) - return 0; - - driver = to_usb_serial_driver(drv); - if (driver == port->serial->type) return 1; @@ -37,16 +31,12 @@ static int usb_serial_device_match(struct device *dev, static int usb_serial_device_probe(struct device *dev) { + struct usb_serial_port *port = to_usb_serial_port(dev); struct usb_serial_driver *driver; - struct usb_serial_port *port; struct device *tty_dev; int retval = 0; int minor; - port = to_usb_serial_port(dev); - if (!port) - return -ENODEV; - /* make sure suspend/resume doesn't race against port_probe */ retval = usb_autopm_get_interface(port->serial->interface); if (retval) @@ -86,16 +76,11 @@ err_autopm_put: static int usb_serial_device_remove(struct device *dev) { + struct usb_serial_port *port = to_usb_serial_port(dev); struct usb_serial_driver *driver; - struct usb_serial_port *port; - int retval = 0; int minor; int autopm_err; - port = to_usb_serial_port(dev); - if (!port) - return -ENODEV; - /* * Make sure suspend/resume doesn't race against port_remove. * @@ -109,7 +94,7 @@ static int usb_serial_device_remove(struct device *dev) driver = port->serial->type; if (driver->port_remove) - retval = driver->port_remove(port); + driver->port_remove(port); dev_info(dev, "%s converter now disconnected from ttyUSB%d\n", driver->description, minor); @@ -117,7 +102,7 @@ static int usb_serial_device_remove(struct device *dev) if (!autopm_err) usb_autopm_put_interface(port->serial->interface); - return retval; + return 0; } static ssize_t new_id_store(struct device_driver *driver, diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c index 28deaaec581f..8d997b71056f 100644 --- a/drivers/usb/serial/ch341.c +++ b/drivers/usb/serial/ch341.c @@ -419,14 +419,12 @@ error: kfree(priv); return r; } -static int ch341_port_remove(struct usb_serial_port *port) +static void ch341_port_remove(struct usb_serial_port *port) { struct ch341_private *priv; priv = usb_get_serial_port_data(port); kfree(priv); - - return 0; } static int ch341_carrier_raised(struct usb_serial_port *port) diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index fbb10dfc56e3..9e1c609792fb 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -3,6 +3,7 @@ * Silicon Laboratories CP210x USB to RS232 serial adaptor driver * * Copyright (C) 2005 Craig Shelley (craig@microtron.org.uk) + * Copyright (C) 2010-2021 Johan Hovold (johan@kernel.org) * * Support to set flow control line levels using TIOCMGET and TIOCMSET * thanks to Karl Hiramoto karl@hiramoto.org. RTSCTS hardware flow @@ -16,9 +17,7 @@ #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/module.h> -#include <linux/moduleparam.h> #include <linux/usb.h> -#include <linux/uaccess.h> #include <linux/usb/serial.h> #include <linux/gpio/driver.h> #include <linux/bitops.h> @@ -45,7 +44,7 @@ static int cp210x_attach(struct usb_serial *); static void cp210x_disconnect(struct usb_serial *); static void cp210x_release(struct usb_serial *); static int cp210x_port_probe(struct usb_serial_port *); -static int cp210x_port_remove(struct usb_serial_port *); +static void cp210x_port_remove(struct usb_serial_port *); static void cp210x_dtr_rts(struct usb_serial_port *port, int on); static void cp210x_process_read_urb(struct urb *urb); static void cp210x_enable_event_mode(struct usb_serial_port *port); @@ -61,6 +60,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */ { USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */ { USB_DEVICE(0x0908, 0x01FF) }, /* Siemens RUGGEDCOM USB Serial Console */ + { USB_DEVICE(0x0988, 0x0578) }, /* Teraoka AD2000 */ { USB_DEVICE(0x0B00, 0x3070) }, /* Ingenico 3070 */ { USB_DEVICE(0x0BED, 0x1100) }, /* MEI (TM) Cashflow-SC Bill/Voucher Acceptor */ { USB_DEVICE(0x0BED, 0x1101) }, /* MEI series 2000 Combo Acceptor */ @@ -201,6 +201,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x1901, 0x0194) }, /* GE Healthcare Remote Alarm Box */ { USB_DEVICE(0x1901, 0x0195) }, /* GE B850/B650/B450 CP2104 DP UART interface */ { USB_DEVICE(0x1901, 0x0196) }, /* GE B850 CP2105 DP UART interface */ + { USB_DEVICE(0x199B, 0xBA30) }, /* LORD WSDA-200-USB */ { USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */ { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */ { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */ @@ -266,7 +267,12 @@ struct cp210x_port_private { u8 bInterfaceNumber; bool event_mode; enum cp210x_event_state event_state; - u8 lsr; + u8 lsr; + + struct mutex mutex; + bool crtscts; + bool dtr; + bool rts; }; static struct usb_serial_driver cp210x_device = { @@ -377,6 +383,16 @@ static struct usb_serial_driver * const serial_drivers[] = { #define CONTROL_WRITE_DTR 0x0100 #define CONTROL_WRITE_RTS 0x0200 +/* CP210X_(GET|SET)_CHARS */ +struct cp210x_special_chars { + u8 bEofChar; + u8 bErrorChar; + u8 bBreakChar; + u8 bEventChar; + u8 bXonChar; + u8 bXoffChar; +}; + /* CP210X_VENDOR_SPECIFIC values */ #define CP210X_READ_2NCONFIG 0x000E #define CP210X_READ_LATCH 0x00C2 @@ -435,17 +451,14 @@ struct cp210x_flow_ctl { /* cp210x_flow_ctl::ulControlHandshake */ #define CP210X_SERIAL_DTR_MASK GENMASK(1, 0) -#define CP210X_SERIAL_DTR_SHIFT(_mode) (_mode) +#define CP210X_SERIAL_DTR_INACTIVE (0 << 0) +#define CP210X_SERIAL_DTR_ACTIVE (1 << 0) +#define CP210X_SERIAL_DTR_FLOW_CTL (2 << 0) #define CP210X_SERIAL_CTS_HANDSHAKE BIT(3) #define CP210X_SERIAL_DSR_HANDSHAKE BIT(4) #define CP210X_SERIAL_DCD_HANDSHAKE BIT(5) #define CP210X_SERIAL_DSR_SENSITIVITY BIT(6) -/* values for cp210x_flow_ctl::ulControlHandshake::CP210X_SERIAL_DTR_MASK */ -#define CP210X_SERIAL_DTR_INACTIVE 0 -#define CP210X_SERIAL_DTR_ACTIVE 1 -#define CP210X_SERIAL_DTR_FLOW_CTL 2 - /* cp210x_flow_ctl::ulFlowReplace */ #define CP210X_SERIAL_AUTO_TRANSMIT BIT(0) #define CP210X_SERIAL_AUTO_RECEIVE BIT(1) @@ -453,14 +466,11 @@ struct cp210x_flow_ctl { #define CP210X_SERIAL_NULL_STRIPPING BIT(3) #define CP210X_SERIAL_BREAK_CHAR BIT(4) #define CP210X_SERIAL_RTS_MASK GENMASK(7, 6) -#define CP210X_SERIAL_RTS_SHIFT(_mode) (_mode << 6) +#define CP210X_SERIAL_RTS_INACTIVE (0 << 6) +#define CP210X_SERIAL_RTS_ACTIVE (1 << 6) +#define CP210X_SERIAL_RTS_FLOW_CTL (2 << 6) #define CP210X_SERIAL_XOFF_CONTINUE BIT(31) -/* values for cp210x_flow_ctl::ulFlowReplace::CP210X_SERIAL_RTS_MASK */ -#define CP210X_SERIAL_RTS_INACTIVE 0 -#define CP210X_SERIAL_RTS_ACTIVE 1 -#define CP210X_SERIAL_RTS_FLOW_CTL 2 - /* CP210X_VENDOR_SPECIFIC, CP210X_GET_DEVICEMODE call reads these 0x2 bytes. */ struct cp210x_pin_mode { u8 eci; @@ -664,16 +674,13 @@ static int cp210x_write_reg_block(struct usb_serial_port *port, u8 req, kfree(dmabuf); - if (result == bufsize) { - result = 0; - } else { + if (result < 0) { dev_err(&port->dev, "failed set req 0x%x size %d status: %d\n", req, bufsize, result); - if (result >= 0) - result = -EIO; + return result; } - return result; + return 0; } /* @@ -710,17 +717,14 @@ static int cp210x_write_vendor_block(struct usb_serial *serial, u8 type, kfree(dmabuf); - if (result == bufsize) { - result = 0; - } else { + if (result < 0) { dev_err(&serial->interface->dev, "failed to set vendor val 0x%04x size %d: %d\n", val, bufsize, result); - if (result >= 0) - result = -EIO; + return result; } - return result; + return 0; } #endif @@ -1074,30 +1078,80 @@ static void cp210x_disable_event_mode(struct usb_serial_port *port) port_priv->event_mode = false; } +static int cp210x_set_chars(struct usb_serial_port *port, + struct cp210x_special_chars *chars) +{ + struct cp210x_port_private *port_priv = usb_get_serial_port_data(port); + struct usb_serial *serial = port->serial; + void *dmabuf; + int result; + + dmabuf = kmemdup(chars, sizeof(*chars), GFP_KERNEL); + if (!dmabuf) + return -ENOMEM; + + result = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), + CP210X_SET_CHARS, REQTYPE_HOST_TO_INTERFACE, 0, + port_priv->bInterfaceNumber, + dmabuf, sizeof(*chars), USB_CTRL_SET_TIMEOUT); + + kfree(dmabuf); + + if (result < 0) { + dev_err(&port->dev, "failed to set special chars: %d\n", result); + return result; + } + + return 0; +} + static bool cp210x_termios_change(const struct ktermios *a, const struct ktermios *b) { - bool iflag_change; + bool iflag_change, cc_change; - iflag_change = ((a->c_iflag ^ b->c_iflag) & INPCK); + iflag_change = ((a->c_iflag ^ b->c_iflag) & (INPCK | IXON | IXOFF)); + cc_change = a->c_cc[VSTART] != b->c_cc[VSTART] || + a->c_cc[VSTOP] != b->c_cc[VSTOP]; - return tty_termios_hw_change(a, b) || iflag_change; + return tty_termios_hw_change(a, b) || iflag_change || cc_change; } static void cp210x_set_flow_control(struct tty_struct *tty, struct usb_serial_port *port, struct ktermios *old_termios) { + struct cp210x_port_private *port_priv = usb_get_serial_port_data(port); + struct cp210x_special_chars chars; struct cp210x_flow_ctl flow_ctl; u32 flow_repl; u32 ctl_hs; int ret; - if (old_termios && C_CRTSCTS(tty) == (old_termios->c_cflag & CRTSCTS)) + if (old_termios && + C_CRTSCTS(tty) == (old_termios->c_cflag & CRTSCTS) && + I_IXON(tty) == (old_termios->c_iflag & IXON) && + I_IXOFF(tty) == (old_termios->c_iflag & IXOFF) && + START_CHAR(tty) == old_termios->c_cc[VSTART] && + STOP_CHAR(tty) == old_termios->c_cc[VSTOP]) { return; + } + + if (I_IXON(tty) || I_IXOFF(tty)) { + memset(&chars, 0, sizeof(chars)); + + chars.bXonChar = START_CHAR(tty); + chars.bXoffChar = STOP_CHAR(tty); + + ret = cp210x_set_chars(port, &chars); + if (ret) + return; + } + + mutex_lock(&port_priv->mutex); ret = cp210x_read_reg_block(port, CP210X_GET_FLOW, &flow_ctl, sizeof(flow_ctl)); if (ret) - return; + goto out_unlock; ctl_hs = le32_to_cpu(flow_ctl.ulControlHandshake); flow_repl = le32_to_cpu(flow_ctl.ulFlowReplace); @@ -1106,26 +1160,51 @@ static void cp210x_set_flow_control(struct tty_struct *tty, ctl_hs &= ~CP210X_SERIAL_DCD_HANDSHAKE; ctl_hs &= ~CP210X_SERIAL_DSR_SENSITIVITY; ctl_hs &= ~CP210X_SERIAL_DTR_MASK; - ctl_hs |= CP210X_SERIAL_DTR_SHIFT(CP210X_SERIAL_DTR_ACTIVE); + if (port_priv->dtr) + ctl_hs |= CP210X_SERIAL_DTR_ACTIVE; + else + ctl_hs |= CP210X_SERIAL_DTR_INACTIVE; + flow_repl &= ~CP210X_SERIAL_RTS_MASK; if (C_CRTSCTS(tty)) { ctl_hs |= CP210X_SERIAL_CTS_HANDSHAKE; - flow_repl &= ~CP210X_SERIAL_RTS_MASK; - flow_repl |= CP210X_SERIAL_RTS_SHIFT(CP210X_SERIAL_RTS_FLOW_CTL); + if (port_priv->rts) + flow_repl |= CP210X_SERIAL_RTS_FLOW_CTL; + else + flow_repl |= CP210X_SERIAL_RTS_INACTIVE; + port_priv->crtscts = true; } else { ctl_hs &= ~CP210X_SERIAL_CTS_HANDSHAKE; - flow_repl &= ~CP210X_SERIAL_RTS_MASK; - flow_repl |= CP210X_SERIAL_RTS_SHIFT(CP210X_SERIAL_RTS_ACTIVE); + if (port_priv->rts) + flow_repl |= CP210X_SERIAL_RTS_ACTIVE; + else + flow_repl |= CP210X_SERIAL_RTS_INACTIVE; + port_priv->crtscts = false; } - dev_dbg(&port->dev, "%s - ulControlHandshake=0x%08x, ulFlowReplace=0x%08x\n", - __func__, ctl_hs, flow_repl); + if (I_IXOFF(tty)) + flow_repl |= CP210X_SERIAL_AUTO_RECEIVE; + else + flow_repl &= ~CP210X_SERIAL_AUTO_RECEIVE; + + if (I_IXON(tty)) + flow_repl |= CP210X_SERIAL_AUTO_TRANSMIT; + else + flow_repl &= ~CP210X_SERIAL_AUTO_TRANSMIT; + + flow_ctl.ulXonLimit = cpu_to_le32(128); + flow_ctl.ulXoffLimit = cpu_to_le32(128); + + dev_dbg(&port->dev, "%s - ctrl = 0x%02x, flow = 0x%02x\n", __func__, + ctl_hs, flow_repl); flow_ctl.ulControlHandshake = cpu_to_le32(ctl_hs); flow_ctl.ulFlowReplace = cpu_to_le32(flow_repl); cp210x_write_reg_block(port, CP210X_SET_FLOW, &flow_ctl, sizeof(flow_ctl)); +out_unlock: + mutex_unlock(&port_priv->mutex); } static void cp210x_set_termios(struct tty_struct *tty, @@ -1210,28 +1289,77 @@ static int cp210x_tiocmset(struct tty_struct *tty, static int cp210x_tiocmset_port(struct usb_serial_port *port, unsigned int set, unsigned int clear) { + struct cp210x_port_private *port_priv = usb_get_serial_port_data(port); + struct cp210x_flow_ctl flow_ctl; + u32 ctl_hs, flow_repl; u16 control = 0; + int ret; + + mutex_lock(&port_priv->mutex); if (set & TIOCM_RTS) { + port_priv->rts = true; control |= CONTROL_RTS; control |= CONTROL_WRITE_RTS; } if (set & TIOCM_DTR) { + port_priv->dtr = true; control |= CONTROL_DTR; control |= CONTROL_WRITE_DTR; } if (clear & TIOCM_RTS) { + port_priv->rts = false; control &= ~CONTROL_RTS; control |= CONTROL_WRITE_RTS; } if (clear & TIOCM_DTR) { + port_priv->dtr = false; control &= ~CONTROL_DTR; control |= CONTROL_WRITE_DTR; } - dev_dbg(&port->dev, "%s - control = 0x%.4x\n", __func__, control); + /* + * Use SET_FLOW to set DTR and enable/disable auto-RTS when hardware + * flow control is enabled. + */ + if (port_priv->crtscts && control & CONTROL_WRITE_RTS) { + ret = cp210x_read_reg_block(port, CP210X_GET_FLOW, &flow_ctl, + sizeof(flow_ctl)); + if (ret) + goto out_unlock; + + ctl_hs = le32_to_cpu(flow_ctl.ulControlHandshake); + flow_repl = le32_to_cpu(flow_ctl.ulFlowReplace); + + ctl_hs &= ~CP210X_SERIAL_DTR_MASK; + if (port_priv->dtr) + ctl_hs |= CP210X_SERIAL_DTR_ACTIVE; + else + ctl_hs |= CP210X_SERIAL_DTR_INACTIVE; - return cp210x_write_u16_reg(port, CP210X_SET_MHS, control); + flow_repl &= ~CP210X_SERIAL_RTS_MASK; + if (port_priv->rts) + flow_repl |= CP210X_SERIAL_RTS_FLOW_CTL; + else + flow_repl |= CP210X_SERIAL_RTS_INACTIVE; + + flow_ctl.ulControlHandshake = cpu_to_le32(ctl_hs); + flow_ctl.ulFlowReplace = cpu_to_le32(flow_repl); + + dev_dbg(&port->dev, "%s - ctrl = 0x%02x, flow = 0x%02x\n", + __func__, ctl_hs, flow_repl); + + ret = cp210x_write_reg_block(port, CP210X_SET_FLOW, &flow_ctl, + sizeof(flow_ctl)); + } else { + dev_dbg(&port->dev, "%s - control = 0x%04x\n", __func__, control); + + ret = cp210x_write_u16_reg(port, CP210X_SET_MHS, control); + } +out_unlock: + mutex_unlock(&port_priv->mutex); + + return ret; } static void cp210x_dtr_rts(struct usb_serial_port *port, int on) @@ -1259,7 +1387,7 @@ static int cp210x_tiocmget(struct tty_struct *tty) |((control & CONTROL_RING)? TIOCM_RI : 0) |((control & CONTROL_DCD) ? TIOCM_CD : 0); - dev_dbg(&port->dev, "%s - control = 0x%.2x\n", __func__, control); + dev_dbg(&port->dev, "%s - control = 0x%02x\n", __func__, control); return result; } @@ -1708,20 +1836,19 @@ static int cp210x_port_probe(struct usb_serial_port *port) return -ENOMEM; port_priv->bInterfaceNumber = cp210x_interface_num(serial); + mutex_init(&port_priv->mutex); usb_set_serial_port_data(port, port_priv); return 0; } -static int cp210x_port_remove(struct usb_serial_port *port) +static void cp210x_port_remove(struct usb_serial_port *port) { struct cp210x_port_private *port_priv; port_priv = usb_get_serial_port_data(port); kfree(port_priv); - - return 0; } static void cp210x_init_max_speed(struct usb_serial *serial) diff --git a/drivers/usb/serial/cyberjack.c b/drivers/usb/serial/cyberjack.c index 2e40908963da..cf389224d528 100644 --- a/drivers/usb/serial/cyberjack.c +++ b/drivers/usb/serial/cyberjack.c @@ -47,7 +47,7 @@ /* Function prototypes */ static int cyberjack_port_probe(struct usb_serial_port *port); -static int cyberjack_port_remove(struct usb_serial_port *port); +static void cyberjack_port_remove(struct usb_serial_port *port); static int cyberjack_open(struct tty_struct *tty, struct usb_serial_port *port); static void cyberjack_close(struct usb_serial_port *port); @@ -120,7 +120,7 @@ static int cyberjack_port_probe(struct usb_serial_port *port) return 0; } -static int cyberjack_port_remove(struct usb_serial_port *port) +static void cyberjack_port_remove(struct usb_serial_port *port) { struct cyberjack_private *priv; @@ -128,8 +128,6 @@ static int cyberjack_port_remove(struct usb_serial_port *port) priv = usb_get_serial_port_data(port); kfree(priv); - - return 0; } static int cyberjack_open(struct tty_struct *tty, diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c index cc028601c388..166ee2286fda 100644 --- a/drivers/usb/serial/cypress_m8.c +++ b/drivers/usb/serial/cypress_m8.c @@ -115,7 +115,7 @@ struct cypress_private { static int cypress_earthmate_port_probe(struct usb_serial_port *port); static int cypress_hidcom_port_probe(struct usb_serial_port *port); static int cypress_ca42v2_port_probe(struct usb_serial_port *port); -static int cypress_port_remove(struct usb_serial_port *port); +static void cypress_port_remove(struct usb_serial_port *port); static int cypress_open(struct tty_struct *tty, struct usb_serial_port *port); static void cypress_close(struct usb_serial_port *port); static void cypress_dtr_rts(struct usb_serial_port *port, int on); @@ -564,7 +564,7 @@ static int cypress_ca42v2_port_probe(struct usb_serial_port *port) return 0; } -static int cypress_port_remove(struct usb_serial_port *port) +static void cypress_port_remove(struct usb_serial_port *port) { struct cypress_private *priv; @@ -572,8 +572,6 @@ static int cypress_port_remove(struct usb_serial_port *port) kfifo_free(&priv->write_fifo); kfree(priv); - - return 0; } static int cypress_open(struct tty_struct *tty, struct usb_serial_port *port) diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c index 0ecd5316d85f..8b2f06539f2c 100644 --- a/drivers/usb/serial/digi_acceleport.c +++ b/drivers/usb/serial/digi_acceleport.c @@ -233,7 +233,7 @@ static int digi_startup(struct usb_serial *serial); static void digi_disconnect(struct usb_serial *serial); static void digi_release(struct usb_serial *serial); static int digi_port_probe(struct usb_serial_port *port); -static int digi_port_remove(struct usb_serial_port *port); +static void digi_port_remove(struct usb_serial_port *port); static void digi_read_bulk_callback(struct urb *urb); static int digi_read_inb_callback(struct urb *urb); static int digi_read_oob_callback(struct urb *urb); @@ -1281,14 +1281,12 @@ static int digi_port_probe(struct usb_serial_port *port) return digi_port_init(port, port->port_number); } -static int digi_port_remove(struct usb_serial_port *port) +static void digi_port_remove(struct usb_serial_port *port) { struct digi_port *priv; priv = usb_get_serial_port_data(port); kfree(priv); - - return 0; } static void digi_read_bulk_callback(struct urb *urb) diff --git a/drivers/usb/serial/f81232.c b/drivers/usb/serial/f81232.c index 0c7eacc630e0..6a8f39147d8e 100644 --- a/drivers/usb/serial/f81232.c +++ b/drivers/usb/serial/f81232.c @@ -192,13 +192,9 @@ static int f81232_set_register(struct usb_serial_port *port, u16 reg, u8 val) tmp, sizeof(val), USB_CTRL_SET_TIMEOUT); - if (status != sizeof(val)) { + if (status < 0) { dev_err(&port->dev, "%s failed status: %d\n", __func__, status); - - if (status < 0) - status = usb_translate_errors(status); - else - status = -EIO; + status = usb_translate_errors(status); } else { status = 0; } @@ -886,10 +882,6 @@ static int f81534a_ctrl_set_register(struct usb_interface *intf, u16 reg, status = usb_translate_errors(status); if (status == -EIO) continue; - } else if (status != size) { - /* Retry on short transfers */ - status = -EIO; - continue; } else { status = 0; } diff --git a/drivers/usb/serial/f81534.c b/drivers/usb/serial/f81534.c index 5661fd03e545..a763b362f081 100644 --- a/drivers/usb/serial/f81534.c +++ b/drivers/usb/serial/f81534.c @@ -235,11 +235,9 @@ static int f81534_set_register(struct usb_serial *serial, u16 reg, u8 data) USB_TYPE_VENDOR | USB_DIR_OUT, reg, 0, tmp, sizeof(u8), F81534_USB_TIMEOUT); - if (status > 0) { + if (status == sizeof(u8)) { status = 0; break; - } else if (status == 0) { - status = -EIO; } } @@ -1432,12 +1430,11 @@ static int f81534_port_probe(struct usb_serial_port *port) return f81534_set_port_output_pin(port); } -static int f81534_port_remove(struct usb_serial_port *port) +static void f81534_port_remove(struct usb_serial_port *port) { struct f81534_port_private *port_priv = usb_get_serial_port_data(port); flush_work(&port_priv->lsr_work); - return 0; } static int f81534_tiocmget(struct tty_struct *tty) diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 94398f89e600..c867592477c9 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -1069,7 +1069,7 @@ static const char *ftdi_chip_name[] = { static int ftdi_sio_probe(struct usb_serial *serial, const struct usb_device_id *id); static int ftdi_sio_port_probe(struct usb_serial_port *port); -static int ftdi_sio_port_remove(struct usb_serial_port *port); +static void ftdi_sio_port_remove(struct usb_serial_port *port); static int ftdi_open(struct tty_struct *tty, struct usb_serial_port *port); static void ftdi_dtr_rts(struct usb_serial_port *port, int on); static void ftdi_process_read_urb(struct urb *urb); @@ -1153,13 +1153,13 @@ static unsigned short int ftdi_232am_baud_base_to_divisor(int baud, int base) divisor = divisor3 >> 3; divisor3 &= 0x7; if (divisor3 == 1) - divisor |= 0xc000; + divisor |= 0xc000; /* +0.125 */ else if (divisor3 >= 4) - divisor |= 0x4000; + divisor |= 0x4000; /* +0.5 */ else if (divisor3 != 0) - divisor |= 0x8000; + divisor |= 0x8000; /* +0.25 */ else if (divisor == 1) - divisor = 0; /* special case for maximum baud rate */ + divisor = 0; /* special case for maximum baud rate */ return divisor; } @@ -1177,9 +1177,9 @@ static u32 ftdi_232bm_baud_base_to_divisor(int baud, int base) divisor = divisor3 >> 3; divisor |= (u32)divfrac[divisor3 & 0x7] << 14; /* Deal with special cases for highest baud rates. */ - if (divisor == 1) + if (divisor == 1) /* 1.0 */ divisor = 0; - else if (divisor == 0x4001) + else if (divisor == 0x4001) /* 1.5 */ divisor = 1; return divisor; } @@ -1201,9 +1201,9 @@ static u32 ftdi_2232h_baud_base_to_divisor(int baud, int base) divisor = divisor3 >> 3; divisor |= (u32)divfrac[divisor3 & 0x7] << 14; /* Deal with special cases for highest baud rates. */ - if (divisor == 1) + if (divisor == 1) /* 1.0 */ divisor = 0; - else if (divisor == 0x4001) + else if (divisor == 0x4001) /* 1.5 */ divisor = 1; /* * Set this bit to turn off a divide by 2.5 on baud rate generator @@ -1386,8 +1386,9 @@ static int change_speed(struct tty_struct *tty, struct usb_serial_port *port) index_value = get_ftdi_divisor(tty, port); value = (u16)index_value; index = (u16)(index_value >> 16); - if ((priv->chip_type == FT2232C) || (priv->chip_type == FT2232H) || - (priv->chip_type == FT4232H) || (priv->chip_type == FT232H)) { + if (priv->chip_type == FT2232C || priv->chip_type == FT2232H || + priv->chip_type == FT4232H || priv->chip_type == FT232H || + priv->chip_type == FTX) { /* Probably the BM type needs the MSB of the encoded fractional * divider also moved like for the chips above. Any infos? */ index = (u16)((index << 8) | priv->interface); @@ -2399,7 +2400,7 @@ static int ftdi_stmclite_probe(struct usb_serial *serial) return 0; } -static int ftdi_sio_port_remove(struct usb_serial_port *port) +static void ftdi_sio_port_remove(struct usb_serial_port *port) { struct ftdi_private *priv = usb_get_serial_port_data(port); @@ -2408,8 +2409,6 @@ static int ftdi_sio_port_remove(struct usb_serial_port *port) remove_sysfs_attrs(port); kfree(priv); - - return 0; } static int ftdi_open(struct tty_struct *tty, struct usb_serial_port *port) diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c index c02c19bb1183..50e8bdc77e71 100644 --- a/drivers/usb/serial/garmin_gps.c +++ b/drivers/usb/serial/garmin_gps.c @@ -1401,7 +1401,7 @@ err_free: } -static int garmin_port_remove(struct usb_serial_port *port) +static void garmin_port_remove(struct usb_serial_port *port) { struct garmin_data *garmin_data_p = usb_get_serial_port_data(port); @@ -1409,7 +1409,6 @@ static int garmin_port_remove(struct usb_serial_port *port) usb_kill_urb(port->interrupt_in_urb); del_timer_sync(&garmin_data_p->timer); kfree(garmin_data_p); - return 0; } diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c index ba5d8df69518..a493670c06e6 100644 --- a/drivers/usb/serial/io_edgeport.c +++ b/drivers/usb/serial/io_edgeport.c @@ -293,7 +293,7 @@ static int edge_startup(struct usb_serial *serial); static void edge_disconnect(struct usb_serial *serial); static void edge_release(struct usb_serial *serial); static int edge_port_probe(struct usb_serial_port *port); -static int edge_port_remove(struct usb_serial_port *port); +static void edge_port_remove(struct usb_serial_port *port); /* function prototypes for all of our local functions */ @@ -3078,14 +3078,12 @@ static int edge_port_probe(struct usb_serial_port *port) return 0; } -static int edge_port_remove(struct usb_serial_port *port) +static void edge_port_remove(struct usb_serial_port *port) { struct edgeport_port *edge_port; edge_port = usb_get_serial_port_data(port); kfree(edge_port); - - return 0; } static struct usb_serial_driver edgeport_2port_device = { diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c index c327d4cf7928..e800547be9e0 100644 --- a/drivers/usb/serial/io_ti.c +++ b/drivers/usb/serial/io_ti.c @@ -266,7 +266,7 @@ static int ti_vread_sync(struct usb_device *dev, __u8 request, if (status < 0) return status; if (status != size) { - dev_dbg(&dev->dev, "%s - wanted to write %d, but only wrote %d\n", + dev_dbg(&dev->dev, "%s - wanted to read %d, but only read %d\n", __func__, size, status); return -ECOMM; } @@ -283,11 +283,7 @@ static int ti_vsend_sync(struct usb_device *dev, u8 request, u16 value, value, index, data, size, timeout); if (status < 0) return status; - if (status != size) { - dev_dbg(&dev->dev, "%s - wanted to write %d, but only wrote %d\n", - __func__, size, status); - return -ECOMM; - } + return 0; } @@ -2629,15 +2625,13 @@ err: return ret; } -static int edge_port_remove(struct usb_serial_port *port) +static void edge_port_remove(struct usb_serial_port *port) { struct edgeport_port *edge_port; edge_port = usb_get_serial_port_data(port); edge_remove_sysfs_attrs(port); kfree(edge_port); - - return 0; } /* Sysfs Attributes */ diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c index f1201d4de297..093afd67a664 100644 --- a/drivers/usb/serial/iuu_phoenix.c +++ b/drivers/usb/serial/iuu_phoenix.c @@ -100,7 +100,7 @@ static int iuu_port_probe(struct usb_serial_port *port) return 0; } -static int iuu_port_remove(struct usb_serial_port *port) +static void iuu_port_remove(struct usb_serial_port *port) { struct iuu_private *priv = usb_get_serial_port_data(port); @@ -108,8 +108,6 @@ static int iuu_port_remove(struct usb_serial_port *port) kfree(priv->writebuf); kfree(priv->buf); kfree(priv); - - return 0; } static int iuu_tiocmset(struct tty_struct *tty, @@ -532,23 +530,29 @@ static int iuu_uart_flush(struct usb_serial_port *port) struct device *dev = &port->dev; int i; int status; - u8 rxcmd = IUU_UART_RX; + u8 *rxcmd; struct iuu_private *priv = usb_get_serial_port_data(port); if (iuu_led(port, 0xF000, 0, 0, 0xFF) < 0) return -EIO; + rxcmd = kmalloc(1, GFP_KERNEL); + if (!rxcmd) + return -ENOMEM; + + rxcmd[0] = IUU_UART_RX; + for (i = 0; i < 2; i++) { - status = bulk_immediate(port, &rxcmd, 1); + status = bulk_immediate(port, rxcmd, 1); if (status != IUU_OPERATION_OK) { dev_dbg(dev, "%s - uart_flush_write error\n", __func__); - return status; + goto out_free; } status = read_immediate(port, &priv->len, 1); if (status != IUU_OPERATION_OK) { dev_dbg(dev, "%s - uart_flush_read error\n", __func__); - return status; + goto out_free; } if (priv->len > 0) { @@ -556,12 +560,16 @@ static int iuu_uart_flush(struct usb_serial_port *port) status = read_immediate(port, priv->buf, priv->len); if (status != IUU_OPERATION_OK) { dev_dbg(dev, "%s - uart_flush_read error\n", __func__); - return status; + goto out_free; } } } dev_dbg(dev, "%s - uart_flush_read OK!\n", __func__); iuu_led(port, 0, 0xF000, 0, 0xFF); + +out_free: + kfree(rxcmd); + return status; } diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c index aa3dbce22cfb..622077dcc344 100644 --- a/drivers/usb/serial/keyspan.c +++ b/drivers/usb/serial/keyspan.c @@ -49,7 +49,7 @@ static int keyspan_startup(struct usb_serial *serial); static void keyspan_disconnect(struct usb_serial *serial); static void keyspan_release(struct usb_serial *serial); static int keyspan_port_probe(struct usb_serial_port *port); -static int keyspan_port_remove(struct usb_serial_port *port); +static void keyspan_port_remove(struct usb_serial_port *port); static int keyspan_write_room(struct tty_struct *tty); static int keyspan_write(struct tty_struct *tty, struct usb_serial_port *port, const unsigned char *buf, int count); @@ -2985,7 +2985,7 @@ err_in_buffer: return -ENOMEM; } -static int keyspan_port_remove(struct usb_serial_port *port) +static void keyspan_port_remove(struct usb_serial_port *port) { struct keyspan_port_private *p_priv; int i; @@ -3014,8 +3014,6 @@ static int keyspan_port_remove(struct usb_serial_port *port) kfree(p_priv->in_buffer[i]); kfree(p_priv); - - return 0; } /* Structs for the devices, pre and post renumeration. */ diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c index e6f933e8d25f..39b0f5f344c2 100644 --- a/drivers/usb/serial/keyspan_pda.c +++ b/drivers/usb/serial/keyspan_pda.c @@ -672,14 +672,12 @@ static int keyspan_pda_port_probe(struct usb_serial_port *port) return 0; } -static int keyspan_pda_port_remove(struct usb_serial_port *port) +static void keyspan_pda_port_remove(struct usb_serial_port *port) { struct keyspan_pda_private *priv; priv = usb_get_serial_port_data(port); kfree(priv); - - return 0; } static struct usb_serial_driver keyspan_pda_fake_device = { diff --git a/drivers/usb/serial/kl5kusb105.c b/drivers/usb/serial/kl5kusb105.c index 5f6b82ebccc5..f1e9628a9907 100644 --- a/drivers/usb/serial/kl5kusb105.c +++ b/drivers/usb/serial/kl5kusb105.c @@ -52,7 +52,7 @@ * Function prototypes */ static int klsi_105_port_probe(struct usb_serial_port *port); -static int klsi_105_port_remove(struct usb_serial_port *port); +static void klsi_105_port_remove(struct usb_serial_port *port); static int klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port); static void klsi_105_close(struct usb_serial_port *port); static void klsi_105_set_termios(struct tty_struct *tty, @@ -231,14 +231,12 @@ static int klsi_105_port_probe(struct usb_serial_port *port) return 0; } -static int klsi_105_port_remove(struct usb_serial_port *port) +static void klsi_105_port_remove(struct usb_serial_port *port) { struct klsi_105_private *priv; priv = usb_get_serial_port_data(port); kfree(priv); - - return 0; } static int klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port) diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c index 49aacb0a327c..a9bc546626ab 100644 --- a/drivers/usb/serial/kobil_sct.c +++ b/drivers/usb/serial/kobil_sct.c @@ -48,7 +48,7 @@ /* Function prototypes */ static int kobil_port_probe(struct usb_serial_port *probe); -static int kobil_port_remove(struct usb_serial_port *probe); +static void kobil_port_remove(struct usb_serial_port *probe); static int kobil_open(struct tty_struct *tty, struct usb_serial_port *port); static void kobil_close(struct usb_serial_port *port); static int kobil_write(struct tty_struct *tty, struct usb_serial_port *port, @@ -143,14 +143,12 @@ static int kobil_port_probe(struct usb_serial_port *port) } -static int kobil_port_remove(struct usb_serial_port *port) +static void kobil_port_remove(struct usb_serial_port *port) { struct kobil_private *priv; priv = usb_get_serial_port_data(port); kfree(priv); - - return 0; } static void kobil_init_termios(struct tty_struct *tty) diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c index 7887c312d9a9..ecd5b921e374 100644 --- a/drivers/usb/serial/mct_u232.c +++ b/drivers/usb/serial/mct_u232.c @@ -39,7 +39,7 @@ * Function prototypes */ static int mct_u232_port_probe(struct usb_serial_port *port); -static int mct_u232_port_remove(struct usb_serial_port *remove); +static void mct_u232_port_remove(struct usb_serial_port *remove); static int mct_u232_open(struct tty_struct *tty, struct usb_serial_port *port); static void mct_u232_close(struct usb_serial_port *port); static void mct_u232_dtr_rts(struct usb_serial_port *port, int on); @@ -400,14 +400,12 @@ static int mct_u232_port_probe(struct usb_serial_port *port) return 0; } -static int mct_u232_port_remove(struct usb_serial_port *port) +static void mct_u232_port_remove(struct usb_serial_port *port) { struct mct_u232_private *priv; priv = usb_get_serial_port_data(port); kfree(priv); - - return 0; } static int mct_u232_open(struct tty_struct *tty, struct usb_serial_port *port) diff --git a/drivers/usb/serial/metro-usb.c b/drivers/usb/serial/metro-usb.c index e63cea02cfd8..0bfe4459c37f 100644 --- a/drivers/usb/serial/metro-usb.c +++ b/drivers/usb/serial/metro-usb.c @@ -256,14 +256,12 @@ static int metrousb_port_probe(struct usb_serial_port *port) return 0; } -static int metrousb_port_remove(struct usb_serial_port *port) +static void metrousb_port_remove(struct usb_serial_port *port) { struct metrousb_private *metro_priv; metro_priv = usb_get_serial_port_data(port); kfree(metro_priv); - - return 0; } static void metrousb_throttle(struct tty_struct *tty) diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c index 41ee2984a0df..701dfb32b129 100644 --- a/drivers/usb/serial/mos7720.c +++ b/drivers/usb/serial/mos7720.c @@ -215,8 +215,10 @@ static int read_mos_reg(struct usb_serial *serial, unsigned int serial_portnum, int status; buf = kmalloc(1, GFP_KERNEL); - if (!buf) + if (!buf) { + *data = 0; return -ENOMEM; + } status = usb_control_msg(usbdev, pipe, request, requesttype, value, index, buf, 1, MOS_WDR_TIMEOUT); @@ -1092,8 +1094,10 @@ static int mos7720_write(struct tty_struct *tty, struct usb_serial_port *port, if (urb->transfer_buffer == NULL) { urb->transfer_buffer = kmalloc(URB_TRANSFER_BUFFER_SIZE, GFP_ATOMIC); - if (!urb->transfer_buffer) + if (!urb->transfer_buffer) { + bytes_sent = -ENOMEM; goto exit; + } } transfer_size = min(count, URB_TRANSFER_BUFFER_SIZE); @@ -1756,14 +1760,12 @@ static int mos7720_port_probe(struct usb_serial_port *port) return 0; } -static int mos7720_port_remove(struct usb_serial_port *port) +static void mos7720_port_remove(struct usb_serial_port *port) { struct moschip_port *mos7720_port; mos7720_port = usb_get_serial_port_data(port); kfree(mos7720_port); - - return 0; } static struct usb_serial_driver moschip7720_2port_driver = { diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c index 23f91d658cb4..1bf0d066f55a 100644 --- a/drivers/usb/serial/mos7840.c +++ b/drivers/usb/serial/mos7840.c @@ -883,8 +883,10 @@ static int mos7840_write(struct tty_struct *tty, struct usb_serial_port *port, if (urb->transfer_buffer == NULL) { urb->transfer_buffer = kmalloc(URB_TRANSFER_BUFFER_SIZE, GFP_ATOMIC); - if (!urb->transfer_buffer) + if (!urb->transfer_buffer) { + bytes_sent = -ENOMEM; goto exit; + } } transfer_size = min(count, URB_TRANSFER_BUFFER_SIZE); @@ -1743,7 +1745,7 @@ error: return status; } -static int mos7840_port_remove(struct usb_serial_port *port) +static void mos7840_port_remove(struct usb_serial_port *port) { struct moschip_port *mos7840_port = usb_get_serial_port_data(port); @@ -1760,8 +1762,6 @@ static int mos7840_port_remove(struct usb_serial_port *port) } kfree(mos7840_port); - - return 0; } static struct usb_serial_driver moschip7840_4port_device = { diff --git a/drivers/usb/serial/mxuport.c b/drivers/usb/serial/mxuport.c index 5d38c2a0f590..eb45a9b0005c 100644 --- a/drivers/usb/serial/mxuport.c +++ b/drivers/usb/serial/mxuport.c @@ -261,13 +261,6 @@ static int mxuport_send_ctrl_data_urb(struct usb_serial *serial, return status; } - if (status != size) { - dev_err(&serial->interface->dev, - "%s - short write (%d / %zd)\n", - __func__, status, size); - return -EIO; - } - return 0; } diff --git a/drivers/usb/serial/omninet.c b/drivers/usb/serial/omninet.c index 5b6e982a9376..83c62f920c50 100644 --- a/drivers/usb/serial/omninet.c +++ b/drivers/usb/serial/omninet.c @@ -36,7 +36,7 @@ static int omninet_prepare_write_buffer(struct usb_serial_port *port, static int omninet_calc_num_ports(struct usb_serial *serial, struct usb_serial_endpoints *epds); static int omninet_port_probe(struct usb_serial_port *port); -static int omninet_port_remove(struct usb_serial_port *port); +static void omninet_port_remove(struct usb_serial_port *port); static const struct usb_device_id id_table[] = { { USB_DEVICE(ZYXEL_VENDOR_ID, ZYXEL_OMNINET_ID) }, @@ -121,14 +121,12 @@ static int omninet_port_probe(struct usb_serial_port *port) return 0; } -static int omninet_port_remove(struct usb_serial_port *port) +static void omninet_port_remove(struct usb_serial_port *port) { struct omninet_data *od; od = usb_get_serial_port_data(port); kfree(od); - - return 0; } #define OMNINET_HEADERLEN 4 diff --git a/drivers/usb/serial/opticon.c b/drivers/usb/serial/opticon.c index 0af76800bd78..eecb72aef83e 100644 --- a/drivers/usb/serial/opticon.c +++ b/drivers/usb/serial/opticon.c @@ -385,13 +385,11 @@ static int opticon_port_probe(struct usb_serial_port *port) return 0; } -static int opticon_port_remove(struct usb_serial_port *port) +static void opticon_port_remove(struct usb_serial_port *port) { struct opticon_private *priv = usb_get_serial_port_data(port); kfree(priv); - - return 0; } static struct usb_serial_driver opticon_device = { diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 2c21e34235bb..c6969ca72839 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -425,6 +425,8 @@ static void option_instat_callback(struct urb *urb); #define CINTERION_PRODUCT_AHXX_2RMNET 0x0084 #define CINTERION_PRODUCT_AHXX_AUDIO 0x0085 #define CINTERION_PRODUCT_CLS8 0x00b0 +#define CINTERION_PRODUCT_MV31_MBIM 0x00b3 +#define CINTERION_PRODUCT_MV31_RMNET 0x00b7 /* Olivetti products */ #define OLIVETTI_VENDOR_ID 0x0b3c @@ -1117,6 +1119,8 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0xff, 0xff), .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0, 0) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, 0x0620, 0xff, 0xff, 0x30) }, /* EM160R-GL */ + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, 0x0620, 0xff, 0, 0) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x30) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0, 0) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x10), @@ -1565,7 +1569,8 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1272, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1273, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1274, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1275, 0xff, 0xff, 0xff) }, + { USB_DEVICE(ZTE_VENDOR_ID, 0x1275), /* ZTE P685M */ + .driver_info = RSVD(3) | RSVD(4) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1276, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1277, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1278, 0xff, 0xff, 0xff) }, @@ -1912,6 +1917,10 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDMNET) }, { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, /* HC28 enumerates with Siemens or Cinterion VID depending on FW revision */ { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) }, + { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_MBIM, 0xff), + .driver_info = RSVD(3)}, + { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_RMNET, 0xff), + .driver_info = RSVD(0)}, { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100), .driver_info = RSVD(4) }, { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD120), @@ -2057,6 +2066,7 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0105, 0xff), /* Fibocom NL678 series */ .driver_info = RSVD(6) }, { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) }, /* Fibocom NL668-AM/NL652-EU (laptop MBIM) */ + { USB_DEVICE_INTERFACE_CLASS(0x2df3, 0x9d03, 0xff) }, /* LongSung M5710 */ { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) }, /* GosunCn GM500 RNDIS */ { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1405, 0xff) }, /* GosunCn GM500 MBIM */ { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1406, 0xff) }, /* GosunCn GM500 ECM/NCM */ diff --git a/drivers/usb/serial/oti6858.c b/drivers/usb/serial/oti6858.c index 8151dd7a45e8..65cd0341fa78 100644 --- a/drivers/usb/serial/oti6858.c +++ b/drivers/usb/serial/oti6858.c @@ -132,7 +132,7 @@ static int oti6858_tiocmget(struct tty_struct *tty); static int oti6858_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear); static int oti6858_port_probe(struct usb_serial_port *port); -static int oti6858_port_remove(struct usb_serial_port *port); +static void oti6858_port_remove(struct usb_serial_port *port); /* device info */ static struct usb_serial_driver oti6858_device = { @@ -344,14 +344,12 @@ static int oti6858_port_probe(struct usb_serial_port *port) return 0; } -static int oti6858_port_remove(struct usb_serial_port *port) +static void oti6858_port_remove(struct usb_serial_port *port) { struct oti6858_private *priv; priv = usb_get_serial_port_data(port); kfree(priv); - - return 0; } static int oti6858_write(struct tty_struct *tty, struct usb_serial_port *port, diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c index be8067017eaa..eed9acd1ae08 100644 --- a/drivers/usb/serial/pl2303.c +++ b/drivers/usb/serial/pl2303.c @@ -183,6 +183,7 @@ struct pl2303_type_data { speed_t max_baud_rate; unsigned long quirks; unsigned int no_autoxonxoff:1; + unsigned int no_divisors:1; }; struct pl2303_serial_private { @@ -209,6 +210,7 @@ static const struct pl2303_type_data pl2303_type_data[TYPE_COUNT] = { }, [TYPE_HXN] = { .max_baud_rate = 12000000, + .no_divisors = true, }, }; @@ -448,13 +450,11 @@ static int pl2303_port_probe(struct usb_serial_port *port) return 0; } -static int pl2303_port_remove(struct usb_serial_port *port) +static void pl2303_port_remove(struct usb_serial_port *port) { struct pl2303_private *priv = usb_get_serial_port_data(port); kfree(priv); - - return 0; } static int pl2303_set_control_lines(struct usb_serial_port *port, u8 value) @@ -571,8 +571,12 @@ static void pl2303_encode_baud_rate(struct tty_struct *tty, baud = min_t(speed_t, baud, spriv->type->max_baud_rate); /* * Use direct method for supported baud rates, otherwise use divisors. + * Newer chip types do not support divisor encoding. */ - baud_sup = pl2303_get_supported_baud_rate(baud); + if (spriv->type->no_divisors) + baud_sup = baud; + else + baud_sup = pl2303_get_supported_baud_rate(baud); if (baud == baud_sup) baud = pl2303_encode_baud_rate_direct(buf, baud); diff --git a/drivers/usb/serial/quatech2.c b/drivers/usb/serial/quatech2.c index 872d1bc86ab4..599dcb2e374d 100644 --- a/drivers/usb/serial/quatech2.c +++ b/drivers/usb/serial/quatech2.c @@ -727,7 +727,7 @@ err_buf: return -ENOMEM; } -static int qt2_port_remove(struct usb_serial_port *port) +static void qt2_port_remove(struct usb_serial_port *port) { struct qt2_port_private *port_priv; @@ -735,8 +735,6 @@ static int qt2_port_remove(struct usb_serial_port *port) usb_free_urb(port_priv->write_urb); kfree(port_priv->write_buffer); kfree(port_priv); - - return 0; } static int qt2_tiocmget(struct tty_struct *tty) diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c index 57fc3c31712e..54e16ffc30a0 100644 --- a/drivers/usb/serial/sierra.c +++ b/drivers/usb/serial/sierra.c @@ -901,15 +901,13 @@ static int sierra_port_probe(struct usb_serial_port *port) return 0; } -static int sierra_port_remove(struct usb_serial_port *port) +static void sierra_port_remove(struct usb_serial_port *port) { struct sierra_port_private *portdata; portdata = usb_get_serial_port_data(port); usb_set_serial_port_data(port, NULL); kfree(portdata); - - return 0; } #ifdef CONFIG_PM diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c index 3bac55bd9bd9..7039dc918827 100644 --- a/drivers/usb/serial/spcp8x5.c +++ b/drivers/usb/serial/spcp8x5.c @@ -169,14 +169,12 @@ static int spcp8x5_port_probe(struct usb_serial_port *port) return 0; } -static int spcp8x5_port_remove(struct usb_serial_port *port) +static void spcp8x5_port_remove(struct usb_serial_port *port) { struct spcp8x5_private *priv; priv = usb_get_serial_port_data(port); kfree(priv); - - return 0; } static int spcp8x5_set_ctrl_line(struct usb_serial_port *port, u8 mcr) diff --git a/drivers/usb/serial/ssu100.c b/drivers/usb/serial/ssu100.c index 7d39d35e52a1..89fdc5c19285 100644 --- a/drivers/usb/serial/ssu100.c +++ b/drivers/usb/serial/ssu100.c @@ -366,14 +366,12 @@ static int ssu100_port_probe(struct usb_serial_port *port) return 0; } -static int ssu100_port_remove(struct usb_serial_port *port) +static void ssu100_port_remove(struct usb_serial_port *port) { struct ssu100_port_private *priv; priv = usb_get_serial_port_data(port); kfree(priv); - - return 0; } static int ssu100_tiocmget(struct tty_struct *tty) diff --git a/drivers/usb/serial/symbolserial.c b/drivers/usb/serial/symbolserial.c index 6ca24e86f686..d7f73ad6e778 100644 --- a/drivers/usb/serial/symbolserial.c +++ b/drivers/usb/serial/symbolserial.c @@ -160,13 +160,11 @@ static int symbol_port_probe(struct usb_serial_port *port) return 0; } -static int symbol_port_remove(struct usb_serial_port *port) +static void symbol_port_remove(struct usb_serial_port *port) { struct symbol_private *priv = usb_get_serial_port_data(port); kfree(priv); - - return 0; } static struct usb_serial_driver symbol_device = { diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c index 73075b9351c5..7252b0ce75a6 100644 --- a/drivers/usb/serial/ti_usb_3410_5052.c +++ b/drivers/usb/serial/ti_usb_3410_5052.c @@ -303,7 +303,7 @@ struct ti_device { static int ti_startup(struct usb_serial *serial); static void ti_release(struct usb_serial *serial); static int ti_port_probe(struct usb_serial_port *port); -static int ti_port_remove(struct usb_serial_port *port); +static void ti_port_remove(struct usb_serial_port *port); static int ti_open(struct tty_struct *tty, struct usb_serial_port *port); static void ti_close(struct usb_serial_port *port); static int ti_write(struct tty_struct *tty, struct usb_serial_port *port, @@ -629,14 +629,12 @@ static int ti_port_probe(struct usb_serial_port *port) return 0; } -static int ti_port_remove(struct usb_serial_port *port) +static void ti_port_remove(struct usb_serial_port *port) { struct ti_port *tport; tport = usb_get_serial_port_data(port); kfree(tport); - - return 0; } static int ti_open(struct tty_struct *tty, struct usb_serial_port *port) diff --git a/drivers/usb/serial/upd78f0730.c b/drivers/usb/serial/upd78f0730.c index 0a2268c479af..26d7b003b7e3 100644 --- a/drivers/usb/serial/upd78f0730.c +++ b/drivers/usb/serial/upd78f0730.c @@ -145,14 +145,11 @@ static int upd78f0730_send_ctl(struct usb_serial_port *port, kfree(buf); - if (res != size) { + if (res < 0) { struct device *dev = &port->dev; dev_err(dev, "failed to send control request %02x: %d\n", *(u8 *)data, res); - /* The maximum expected length of a transfer is 6 bytes */ - if (res >= 0) - res = -EIO; return res; } @@ -174,15 +171,13 @@ static int upd78f0730_port_probe(struct usb_serial_port *port) return 0; } -static int upd78f0730_port_remove(struct usb_serial_port *port) +static void upd78f0730_port_remove(struct usb_serial_port *port) { struct upd78f0730_port_private *private; private = usb_get_serial_port_data(port); mutex_destroy(&private->lock); kfree(private); - - return 0; } static int upd78f0730_tiocmget(struct tty_struct *tty) diff --git a/drivers/usb/serial/usb-wwan.h b/drivers/usb/serial/usb-wwan.h index 934e9361cf6b..79dafd98e0a1 100644 --- a/drivers/usb/serial/usb-wwan.h +++ b/drivers/usb/serial/usb-wwan.h @@ -10,7 +10,7 @@ extern void usb_wwan_dtr_rts(struct usb_serial_port *port, int on); extern int usb_wwan_open(struct tty_struct *tty, struct usb_serial_port *port); extern void usb_wwan_close(struct usb_serial_port *port); extern int usb_wwan_port_probe(struct usb_serial_port *port); -extern int usb_wwan_port_remove(struct usb_serial_port *port); +extern void usb_wwan_port_remove(struct usb_serial_port *port); extern int usb_wwan_write_room(struct tty_struct *tty); extern int usb_wwan_tiocmget(struct tty_struct *tty); extern int usb_wwan_tiocmset(struct tty_struct *tty, diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c index 4b9845807bee..46d46a4f99c9 100644 --- a/drivers/usb/serial/usb_wwan.c +++ b/drivers/usb/serial/usb_wwan.c @@ -544,7 +544,7 @@ bail_out_error: } EXPORT_SYMBOL_GPL(usb_wwan_port_probe); -int usb_wwan_port_remove(struct usb_serial_port *port) +void usb_wwan_port_remove(struct usb_serial_port *port) { int i; struct usb_wwan_port_private *portdata; @@ -562,8 +562,6 @@ int usb_wwan_port_remove(struct usb_serial_port *port) } kfree(portdata); - - return 0; } EXPORT_SYMBOL(usb_wwan_port_remove); diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c index ca3bd58f2025..ccfd5ed652cd 100644 --- a/drivers/usb/serial/whiteheat.c +++ b/drivers/usb/serial/whiteheat.c @@ -79,7 +79,7 @@ static int whiteheat_firmware_attach(struct usb_serial *serial); static int whiteheat_attach(struct usb_serial *serial); static void whiteheat_release(struct usb_serial *serial); static int whiteheat_port_probe(struct usb_serial_port *port); -static int whiteheat_port_remove(struct usb_serial_port *port); +static void whiteheat_port_remove(struct usb_serial_port *port); static int whiteheat_open(struct tty_struct *tty, struct usb_serial_port *port); static void whiteheat_close(struct usb_serial_port *port); @@ -345,14 +345,12 @@ static int whiteheat_port_probe(struct usb_serial_port *port) return 0; } -static int whiteheat_port_remove(struct usb_serial_port *port) +static void whiteheat_port_remove(struct usb_serial_port *port) { struct whiteheat_private *info; info = usb_get_serial_port_data(port); kfree(info); - - return 0; } static int whiteheat_open(struct tty_struct *tty, struct usb_serial_port *port) diff --git a/drivers/usb/serial/xr_serial.c b/drivers/usb/serial/xr_serial.c new file mode 100644 index 000000000000..483d07dee19d --- /dev/null +++ b/drivers/usb/serial/xr_serial.c @@ -0,0 +1,611 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * MaxLinear/Exar USB to Serial driver + * + * Copyright (c) 2020 Manivannan Sadhasivam <mani@kernel.org> + * + * Based on the initial driver written by Patong Yang: + * + * https://lore.kernel.org/r/20180404070634.nhspvmxcjwfgjkcv@advantechmxl-desktop + * + * Copyright (c) 2018 Patong Yang <patong.mxl@gmail.com> + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/tty.h> +#include <linux/usb.h> +#include <linux/usb/serial.h> + +struct xr_txrx_clk_mask { + u16 tx; + u16 rx0; + u16 rx1; +}; + +#define XR_INT_OSC_HZ 48000000U +#define XR21V141X_MIN_SPEED 46U +#define XR21V141X_MAX_SPEED XR_INT_OSC_HZ + +/* USB Requests */ +#define XR21V141X_SET_REQ 0 +#define XR21V141X_GET_REQ 1 + +#define XR21V141X_CLOCK_DIVISOR_0 0x04 +#define XR21V141X_CLOCK_DIVISOR_1 0x05 +#define XR21V141X_CLOCK_DIVISOR_2 0x06 +#define XR21V141X_TX_CLOCK_MASK_0 0x07 +#define XR21V141X_TX_CLOCK_MASK_1 0x08 +#define XR21V141X_RX_CLOCK_MASK_0 0x09 +#define XR21V141X_RX_CLOCK_MASK_1 0x0a + +/* XR21V141X register blocks */ +#define XR21V141X_UART_REG_BLOCK 0 +#define XR21V141X_UM_REG_BLOCK 4 +#define XR21V141X_UART_CUSTOM_BLOCK 0x66 + +/* XR21V141X UART Manager Registers */ +#define XR21V141X_UM_FIFO_ENABLE_REG 0x10 +#define XR21V141X_UM_ENABLE_TX_FIFO 0x01 +#define XR21V141X_UM_ENABLE_RX_FIFO 0x02 + +#define XR21V141X_UM_RX_FIFO_RESET 0x18 +#define XR21V141X_UM_TX_FIFO_RESET 0x1c + +#define XR21V141X_UART_ENABLE_TX 0x1 +#define XR21V141X_UART_ENABLE_RX 0x2 + +#define XR21V141X_UART_MODE_RI BIT(0) +#define XR21V141X_UART_MODE_CD BIT(1) +#define XR21V141X_UART_MODE_DSR BIT(2) +#define XR21V141X_UART_MODE_DTR BIT(3) +#define XR21V141X_UART_MODE_CTS BIT(4) +#define XR21V141X_UART_MODE_RTS BIT(5) + +#define XR21V141X_UART_BREAK_ON 0xff +#define XR21V141X_UART_BREAK_OFF 0 + +#define XR21V141X_UART_DATA_MASK GENMASK(3, 0) +#define XR21V141X_UART_DATA_7 0x7 +#define XR21V141X_UART_DATA_8 0x8 + +#define XR21V141X_UART_PARITY_MASK GENMASK(6, 4) +#define XR21V141X_UART_PARITY_SHIFT 4 +#define XR21V141X_UART_PARITY_NONE (0x0 << XR21V141X_UART_PARITY_SHIFT) +#define XR21V141X_UART_PARITY_ODD (0x1 << XR21V141X_UART_PARITY_SHIFT) +#define XR21V141X_UART_PARITY_EVEN (0x2 << XR21V141X_UART_PARITY_SHIFT) +#define XR21V141X_UART_PARITY_MARK (0x3 << XR21V141X_UART_PARITY_SHIFT) +#define XR21V141X_UART_PARITY_SPACE (0x4 << XR21V141X_UART_PARITY_SHIFT) + +#define XR21V141X_UART_STOP_MASK BIT(7) +#define XR21V141X_UART_STOP_SHIFT 7 +#define XR21V141X_UART_STOP_1 (0x0 << XR21V141X_UART_STOP_SHIFT) +#define XR21V141X_UART_STOP_2 (0x1 << XR21V141X_UART_STOP_SHIFT) + +#define XR21V141X_UART_FLOW_MODE_NONE 0x0 +#define XR21V141X_UART_FLOW_MODE_HW 0x1 +#define XR21V141X_UART_FLOW_MODE_SW 0x2 + +#define XR21V141X_UART_MODE_GPIO_MASK GENMASK(2, 0) +#define XR21V141X_UART_MODE_RTS_CTS 0x1 +#define XR21V141X_UART_MODE_DTR_DSR 0x2 +#define XR21V141X_UART_MODE_RS485 0x3 +#define XR21V141X_UART_MODE_RS485_ADDR 0x4 + +#define XR21V141X_REG_ENABLE 0x03 +#define XR21V141X_REG_FORMAT 0x0b +#define XR21V141X_REG_FLOW_CTRL 0x0c +#define XR21V141X_REG_XON_CHAR 0x10 +#define XR21V141X_REG_XOFF_CHAR 0x11 +#define XR21V141X_REG_LOOPBACK 0x12 +#define XR21V141X_REG_TX_BREAK 0x14 +#define XR21V141X_REG_RS845_DELAY 0x15 +#define XR21V141X_REG_GPIO_MODE 0x1a +#define XR21V141X_REG_GPIO_DIR 0x1b +#define XR21V141X_REG_GPIO_INT_MASK 0x1c +#define XR21V141X_REG_GPIO_SET 0x1d +#define XR21V141X_REG_GPIO_CLR 0x1e +#define XR21V141X_REG_GPIO_STATUS 0x1f + +static int xr_set_reg(struct usb_serial_port *port, u8 block, u8 reg, u8 val) +{ + struct usb_serial *serial = port->serial; + int ret; + + ret = usb_control_msg(serial->dev, + usb_sndctrlpipe(serial->dev, 0), + XR21V141X_SET_REQ, + USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, + val, reg | (block << 8), NULL, 0, + USB_CTRL_SET_TIMEOUT); + if (ret < 0) { + dev_err(&port->dev, "Failed to set reg 0x%02x: %d\n", reg, ret); + return ret; + } + + return 0; +} + +static int xr_get_reg(struct usb_serial_port *port, u8 block, u8 reg, u8 *val) +{ + struct usb_serial *serial = port->serial; + u8 *dmabuf; + int ret; + + dmabuf = kmalloc(1, GFP_KERNEL); + if (!dmabuf) + return -ENOMEM; + + ret = usb_control_msg(serial->dev, + usb_rcvctrlpipe(serial->dev, 0), + XR21V141X_GET_REQ, + USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, + 0, reg | (block << 8), dmabuf, 1, + USB_CTRL_GET_TIMEOUT); + if (ret == 1) { + *val = *dmabuf; + ret = 0; + } else { + dev_err(&port->dev, "Failed to get reg 0x%02x: %d\n", reg, ret); + if (ret >= 0) + ret = -EIO; + } + + kfree(dmabuf); + + return ret; +} + +static int xr_set_reg_uart(struct usb_serial_port *port, u8 reg, u8 val) +{ + return xr_set_reg(port, XR21V141X_UART_REG_BLOCK, reg, val); +} + +static int xr_get_reg_uart(struct usb_serial_port *port, u8 reg, u8 *val) +{ + return xr_get_reg(port, XR21V141X_UART_REG_BLOCK, reg, val); +} + +static int xr_set_reg_um(struct usb_serial_port *port, u8 reg, u8 val) +{ + return xr_set_reg(port, XR21V141X_UM_REG_BLOCK, reg, val); +} + +/* + * According to datasheet, below is the recommended sequence for enabling UART + * module in XR21V141X: + * + * Enable Tx FIFO + * Enable Tx and Rx + * Enable Rx FIFO + */ +static int xr_uart_enable(struct usb_serial_port *port) +{ + int ret; + + ret = xr_set_reg_um(port, XR21V141X_UM_FIFO_ENABLE_REG, + XR21V141X_UM_ENABLE_TX_FIFO); + if (ret) + return ret; + + ret = xr_set_reg_uart(port, XR21V141X_REG_ENABLE, + XR21V141X_UART_ENABLE_TX | XR21V141X_UART_ENABLE_RX); + if (ret) + return ret; + + ret = xr_set_reg_um(port, XR21V141X_UM_FIFO_ENABLE_REG, + XR21V141X_UM_ENABLE_TX_FIFO | XR21V141X_UM_ENABLE_RX_FIFO); + + if (ret) + xr_set_reg_uart(port, XR21V141X_REG_ENABLE, 0); + + return ret; +} + +static int xr_uart_disable(struct usb_serial_port *port) +{ + int ret; + + ret = xr_set_reg_uart(port, XR21V141X_REG_ENABLE, 0); + if (ret) + return ret; + + ret = xr_set_reg_um(port, XR21V141X_UM_FIFO_ENABLE_REG, 0); + + return ret; +} + +static int xr_tiocmget(struct tty_struct *tty) +{ + struct usb_serial_port *port = tty->driver_data; + u8 status; + int ret; + + ret = xr_get_reg_uart(port, XR21V141X_REG_GPIO_STATUS, &status); + if (ret) + return ret; + + /* + * Modem control pins are active low, so reading '0' means it is active + * and '1' means not active. + */ + ret = ((status & XR21V141X_UART_MODE_DTR) ? 0 : TIOCM_DTR) | + ((status & XR21V141X_UART_MODE_RTS) ? 0 : TIOCM_RTS) | + ((status & XR21V141X_UART_MODE_CTS) ? 0 : TIOCM_CTS) | + ((status & XR21V141X_UART_MODE_DSR) ? 0 : TIOCM_DSR) | + ((status & XR21V141X_UART_MODE_RI) ? 0 : TIOCM_RI) | + ((status & XR21V141X_UART_MODE_CD) ? 0 : TIOCM_CD); + + return ret; +} + +static int xr_tiocmset_port(struct usb_serial_port *port, + unsigned int set, unsigned int clear) +{ + u8 gpio_set = 0; + u8 gpio_clr = 0; + int ret = 0; + + /* Modem control pins are active low, so set & clr are swapped */ + if (set & TIOCM_RTS) + gpio_clr |= XR21V141X_UART_MODE_RTS; + if (set & TIOCM_DTR) + gpio_clr |= XR21V141X_UART_MODE_DTR; + if (clear & TIOCM_RTS) + gpio_set |= XR21V141X_UART_MODE_RTS; + if (clear & TIOCM_DTR) + gpio_set |= XR21V141X_UART_MODE_DTR; + + /* Writing '0' to gpio_{set/clr} bits has no effect, so no need to do */ + if (gpio_clr) + ret = xr_set_reg_uart(port, XR21V141X_REG_GPIO_CLR, gpio_clr); + + if (gpio_set) + ret = xr_set_reg_uart(port, XR21V141X_REG_GPIO_SET, gpio_set); + + return ret; +} + +static int xr_tiocmset(struct tty_struct *tty, + unsigned int set, unsigned int clear) +{ + struct usb_serial_port *port = tty->driver_data; + + return xr_tiocmset_port(port, set, clear); +} + +static void xr_dtr_rts(struct usb_serial_port *port, int on) +{ + if (on) + xr_tiocmset_port(port, TIOCM_DTR | TIOCM_RTS, 0); + else + xr_tiocmset_port(port, 0, TIOCM_DTR | TIOCM_RTS); +} + +static void xr_break_ctl(struct tty_struct *tty, int break_state) +{ + struct usb_serial_port *port = tty->driver_data; + u8 state; + + if (break_state == 0) + state = XR21V141X_UART_BREAK_OFF; + else + state = XR21V141X_UART_BREAK_ON; + + dev_dbg(&port->dev, "Turning break %s\n", + state == XR21V141X_UART_BREAK_OFF ? "off" : "on"); + xr_set_reg_uart(port, XR21V141X_REG_TX_BREAK, state); +} + +/* Tx and Rx clock mask values obtained from section 3.3.4 of datasheet */ +static const struct xr_txrx_clk_mask xr21v141x_txrx_clk_masks[] = { + { 0x000, 0x000, 0x000 }, + { 0x000, 0x000, 0x000 }, + { 0x100, 0x000, 0x100 }, + { 0x020, 0x400, 0x020 }, + { 0x010, 0x100, 0x010 }, + { 0x208, 0x040, 0x208 }, + { 0x104, 0x820, 0x108 }, + { 0x844, 0x210, 0x884 }, + { 0x444, 0x110, 0x444 }, + { 0x122, 0x888, 0x224 }, + { 0x912, 0x448, 0x924 }, + { 0x492, 0x248, 0x492 }, + { 0x252, 0x928, 0x292 }, + { 0x94a, 0x4a4, 0xa52 }, + { 0x52a, 0xaa4, 0x54a }, + { 0xaaa, 0x954, 0x4aa }, + { 0xaaa, 0x554, 0xaaa }, + { 0x555, 0xad4, 0x5aa }, + { 0xb55, 0xab4, 0x55a }, + { 0x6b5, 0x5ac, 0xb56 }, + { 0x5b5, 0xd6c, 0x6d6 }, + { 0xb6d, 0xb6a, 0xdb6 }, + { 0x76d, 0x6da, 0xbb6 }, + { 0xedd, 0xdda, 0x76e }, + { 0xddd, 0xbba, 0xeee }, + { 0x7bb, 0xf7a, 0xdde }, + { 0xf7b, 0xef6, 0x7de }, + { 0xdf7, 0xbf6, 0xf7e }, + { 0x7f7, 0xfee, 0xefe }, + { 0xfdf, 0xfbe, 0x7fe }, + { 0xf7f, 0xefe, 0xffe }, + { 0xfff, 0xffe, 0xffd }, +}; + +static int xr_set_baudrate(struct tty_struct *tty, + struct usb_serial_port *port) +{ + u32 divisor, baud, idx; + u16 tx_mask, rx_mask; + int ret; + + baud = tty->termios.c_ospeed; + if (!baud) + return 0; + + baud = clamp(baud, XR21V141X_MIN_SPEED, XR21V141X_MAX_SPEED); + divisor = XR_INT_OSC_HZ / baud; + idx = ((32 * XR_INT_OSC_HZ) / baud) & 0x1f; + tx_mask = xr21v141x_txrx_clk_masks[idx].tx; + + if (divisor & 0x01) + rx_mask = xr21v141x_txrx_clk_masks[idx].rx1; + else + rx_mask = xr21v141x_txrx_clk_masks[idx].rx0; + + dev_dbg(&port->dev, "Setting baud rate: %u\n", baud); + /* + * XR21V141X uses fractional baud rate generator with 48MHz internal + * oscillator and 19-bit programmable divisor. So theoretically it can + * generate most commonly used baud rates with high accuracy. + */ + ret = xr_set_reg_uart(port, XR21V141X_CLOCK_DIVISOR_0, + divisor & 0xff); + if (ret) + return ret; + + ret = xr_set_reg_uart(port, XR21V141X_CLOCK_DIVISOR_1, + (divisor >> 8) & 0xff); + if (ret) + return ret; + + ret = xr_set_reg_uart(port, XR21V141X_CLOCK_DIVISOR_2, + (divisor >> 16) & 0xff); + if (ret) + return ret; + + ret = xr_set_reg_uart(port, XR21V141X_TX_CLOCK_MASK_0, + tx_mask & 0xff); + if (ret) + return ret; + + ret = xr_set_reg_uart(port, XR21V141X_TX_CLOCK_MASK_1, + (tx_mask >> 8) & 0xff); + if (ret) + return ret; + + ret = xr_set_reg_uart(port, XR21V141X_RX_CLOCK_MASK_0, + rx_mask & 0xff); + if (ret) + return ret; + + ret = xr_set_reg_uart(port, XR21V141X_RX_CLOCK_MASK_1, + (rx_mask >> 8) & 0xff); + if (ret) + return ret; + + tty_encode_baud_rate(tty, baud, baud); + + return 0; +} + +static void xr_set_flow_mode(struct tty_struct *tty, + struct usb_serial_port *port, + struct ktermios *old_termios) +{ + u8 flow, gpio_mode; + int ret; + + ret = xr_get_reg_uart(port, XR21V141X_REG_GPIO_MODE, &gpio_mode); + if (ret) + return; + + /* Set GPIO mode for controlling the pins manually by default. */ + gpio_mode &= ~XR21V141X_UART_MODE_GPIO_MASK; + + if (C_CRTSCTS(tty) && C_BAUD(tty) != B0) { + dev_dbg(&port->dev, "Enabling hardware flow ctrl\n"); + gpio_mode |= XR21V141X_UART_MODE_RTS_CTS; + flow = XR21V141X_UART_FLOW_MODE_HW; + } else if (I_IXON(tty)) { + u8 start_char = START_CHAR(tty); + u8 stop_char = STOP_CHAR(tty); + + dev_dbg(&port->dev, "Enabling sw flow ctrl\n"); + flow = XR21V141X_UART_FLOW_MODE_SW; + + xr_set_reg_uart(port, XR21V141X_REG_XON_CHAR, start_char); + xr_set_reg_uart(port, XR21V141X_REG_XOFF_CHAR, stop_char); + } else { + dev_dbg(&port->dev, "Disabling flow ctrl\n"); + flow = XR21V141X_UART_FLOW_MODE_NONE; + } + + /* + * As per the datasheet, UART needs to be disabled while writing to + * FLOW_CONTROL register. + */ + xr_uart_disable(port); + xr_set_reg_uart(port, XR21V141X_REG_FLOW_CTRL, flow); + xr_uart_enable(port); + + xr_set_reg_uart(port, XR21V141X_REG_GPIO_MODE, gpio_mode); + + if (C_BAUD(tty) == B0) + xr_dtr_rts(port, 0); + else if (old_termios && (old_termios->c_cflag & CBAUD) == B0) + xr_dtr_rts(port, 1); +} + +static void xr_set_termios(struct tty_struct *tty, + struct usb_serial_port *port, + struct ktermios *old_termios) +{ + struct ktermios *termios = &tty->termios; + u8 bits = 0; + int ret; + + if (!old_termios || (tty->termios.c_ospeed != old_termios->c_ospeed)) + xr_set_baudrate(tty, port); + + switch (C_CSIZE(tty)) { + case CS5: + case CS6: + /* CS5 and CS6 are not supported, so just restore old setting */ + termios->c_cflag &= ~CSIZE; + if (old_termios) + termios->c_cflag |= old_termios->c_cflag & CSIZE; + else + bits |= XR21V141X_UART_DATA_8; + break; + case CS7: + bits |= XR21V141X_UART_DATA_7; + break; + case CS8: + default: + bits |= XR21V141X_UART_DATA_8; + break; + } + + if (C_PARENB(tty)) { + if (C_CMSPAR(tty)) { + if (C_PARODD(tty)) + bits |= XR21V141X_UART_PARITY_MARK; + else + bits |= XR21V141X_UART_PARITY_SPACE; + } else { + if (C_PARODD(tty)) + bits |= XR21V141X_UART_PARITY_ODD; + else + bits |= XR21V141X_UART_PARITY_EVEN; + } + } + + if (C_CSTOPB(tty)) + bits |= XR21V141X_UART_STOP_2; + else + bits |= XR21V141X_UART_STOP_1; + + ret = xr_set_reg_uart(port, XR21V141X_REG_FORMAT, bits); + if (ret) + return; + + xr_set_flow_mode(tty, port, old_termios); +} + +static int xr_open(struct tty_struct *tty, struct usb_serial_port *port) +{ + u8 gpio_dir; + int ret; + + ret = xr_uart_enable(port); + if (ret) { + dev_err(&port->dev, "Failed to enable UART\n"); + return ret; + } + + /* + * Configure DTR and RTS as outputs and RI, CD, DSR and CTS as + * inputs. + */ + gpio_dir = XR21V141X_UART_MODE_DTR | XR21V141X_UART_MODE_RTS; + xr_set_reg_uart(port, XR21V141X_REG_GPIO_DIR, gpio_dir); + + /* Setup termios */ + if (tty) + xr_set_termios(tty, port, NULL); + + ret = usb_serial_generic_open(tty, port); + if (ret) { + xr_uart_disable(port); + return ret; + } + + return 0; +} + +static void xr_close(struct usb_serial_port *port) +{ + usb_serial_generic_close(port); + + xr_uart_disable(port); +} + +static int xr_probe(struct usb_serial *serial, const struct usb_device_id *id) +{ + struct usb_driver *driver = serial->type->usb_driver; + struct usb_interface *control_interface; + int ret; + + /* Don't bind to control interface */ + if (serial->interface->cur_altsetting->desc.bInterfaceNumber == 0) + return -ENODEV; + + /* But claim the control interface during data interface probe */ + control_interface = usb_ifnum_to_if(serial->dev, 0); + if (!control_interface) + return -ENODEV; + + ret = usb_driver_claim_interface(driver, control_interface, NULL); + if (ret) { + dev_err(&serial->interface->dev, "Failed to claim control interface\n"); + return ret; + } + + return 0; +} + +static void xr_disconnect(struct usb_serial *serial) +{ + struct usb_driver *driver = serial->type->usb_driver; + struct usb_interface *control_interface; + + control_interface = usb_ifnum_to_if(serial->dev, 0); + usb_driver_release_interface(driver, control_interface); +} + +static const struct usb_device_id id_table[] = { + { USB_DEVICE(0x04e2, 0x1410) }, /* XR21V141X */ + { } +}; +MODULE_DEVICE_TABLE(usb, id_table); + +static struct usb_serial_driver xr_device = { + .driver = { + .owner = THIS_MODULE, + .name = "xr_serial", + }, + .id_table = id_table, + .num_ports = 1, + .probe = xr_probe, + .disconnect = xr_disconnect, + .open = xr_open, + .close = xr_close, + .break_ctl = xr_break_ctl, + .set_termios = xr_set_termios, + .tiocmget = xr_tiocmget, + .tiocmset = xr_tiocmset, + .dtr_rts = xr_dtr_rts +}; + +static struct usb_serial_driver * const serial_drivers[] = { + &xr_device, NULL +}; + +module_usb_serial_driver(serial_drivers, id_table); + +MODULE_AUTHOR("Manivannan Sadhasivam <mani@kernel.org>"); +MODULE_DESCRIPTION("MaxLinear/Exar USB to Serial driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h index 870e9cf3d5dc..f9677a5ec31b 100644 --- a/drivers/usb/storage/unusual_uas.h +++ b/drivers/usb/storage/unusual_uas.h @@ -91,6 +91,13 @@ UNUSUAL_DEV(0x152d, 0x0578, 0x0000, 0x9999, US_FL_BROKEN_FUA), /* Reported-by: Thinh Nguyen <thinhn@synopsys.com> */ +UNUSUAL_DEV(0x154b, 0xf00b, 0x0000, 0x9999, + "PNY", + "Pro Elite SSD", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_NO_ATA_1X), + +/* Reported-by: Thinh Nguyen <thinhn@synopsys.com> */ UNUSUAL_DEV(0x154b, 0xf00d, 0x0000, 0x9999, "PNY", "Pro Elite SSD", diff --git a/drivers/usb/typec/altmodes/Kconfig b/drivers/usb/typec/altmodes/Kconfig index 187690fd1a5b..60d375e9c3c7 100644 --- a/drivers/usb/typec/altmodes/Kconfig +++ b/drivers/usb/typec/altmodes/Kconfig @@ -20,6 +20,6 @@ config TYPEC_NVIDIA_ALTMODE to enable support for VirtualLink devices with NVIDIA GPUs. To compile this driver as a module, choose M here: the - module will be called typec_displayport. + module will be called typec_nvidia. endmenu diff --git a/drivers/usb/typec/altmodes/displayport.c b/drivers/usb/typec/altmodes/displayport.c index e62e5e3da01e..b7f094435b00 100644 --- a/drivers/usb/typec/altmodes/displayport.c +++ b/drivers/usb/typec/altmodes/displayport.c @@ -15,8 +15,8 @@ #include <linux/usb/typec_dp.h> #include "displayport.h" -#define DP_HEADER(_dp, cmd) (VDO((_dp)->alt->svid, 1, cmd) | \ - VDO_OPOS(USB_TYPEC_DP_MODE)) +#define DP_HEADER(_dp, ver, cmd) (VDO((_dp)->alt->svid, 1, ver, cmd) \ + | VDO_OPOS(USB_TYPEC_DP_MODE)) enum { DP_CONF_USB, @@ -156,9 +156,14 @@ static int dp_altmode_configured(struct dp_altmode *dp) static int dp_altmode_configure_vdm(struct dp_altmode *dp, u32 conf) { - u32 header = DP_HEADER(dp, DP_CMD_CONFIGURE); + int svdm_version = typec_altmode_get_svdm_version(dp->alt); + u32 header; int ret; + if (svdm_version < 0) + return svdm_version; + + header = DP_HEADER(dp, svdm_version, DP_CMD_CONFIGURE); ret = typec_altmode_notify(dp->alt, TYPEC_STATE_SAFE, &dp->data); if (ret) { dev_err(&dp->alt->dev, @@ -181,6 +186,7 @@ static int dp_altmode_configure_vdm(struct dp_altmode *dp, u32 conf) static void dp_altmode_work(struct work_struct *work) { struct dp_altmode *dp = container_of(work, struct dp_altmode, work); + int svdm_version; u32 header; u32 vdo; int ret; @@ -194,7 +200,10 @@ static void dp_altmode_work(struct work_struct *work) dev_err(&dp->alt->dev, "failed to enter mode\n"); break; case DP_STATE_UPDATE: - header = DP_HEADER(dp, DP_CMD_STATUS_UPDATE); + svdm_version = typec_altmode_get_svdm_version(dp->alt); + if (svdm_version < 0) + break; + header = DP_HEADER(dp, svdm_version, DP_CMD_STATUS_UPDATE); vdo = 1; ret = typec_altmode_vdm(dp->alt, header, &vdo, 2); if (ret) diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c index ebfd3113a9a8..45f0bf65e9ab 100644 --- a/drivers/usb/typec/class.c +++ b/drivers/usb/typec/class.c @@ -27,6 +27,7 @@ struct typec_cable { enum typec_plug_type type; struct usb_pd_identity *identity; unsigned int active:1; + u16 pd_revision; /* 0300H = "3.0" */ }; struct typec_partner { @@ -36,6 +37,8 @@ struct typec_partner { enum typec_accessory accessory; struct ida mode_ids; int num_altmodes; + u16 pd_revision; /* 0300H = "3.0" */ + enum usb_pd_svdm_ver svdm_version; }; struct typec_port { @@ -86,7 +89,7 @@ static const char * const typec_accessory_modes[] = { /* Product types defined in USB PD Specification R3.0 V2.0 */ static const char * const product_type_ufp[8] = { - [IDH_PTYPE_UNDEF] = "undefined", + [IDH_PTYPE_NOT_UFP] = "not_ufp", [IDH_PTYPE_HUB] = "hub", [IDH_PTYPE_PERIPH] = "peripheral", [IDH_PTYPE_PSD] = "psd", @@ -94,17 +97,17 @@ static const char * const product_type_ufp[8] = { }; static const char * const product_type_dfp[8] = { - [IDH_PTYPE_DFP_UNDEF] = "undefined", + [IDH_PTYPE_NOT_DFP] = "not_dfp", [IDH_PTYPE_DFP_HUB] = "hub", [IDH_PTYPE_DFP_HOST] = "host", [IDH_PTYPE_DFP_PB] = "power_brick", - [IDH_PTYPE_DFP_AMC] = "amc", }; static const char * const product_type_cable[8] = { - [IDH_PTYPE_UNDEF] = "undefined", + [IDH_PTYPE_NOT_CABLE] = "not_cable", [IDH_PTYPE_PCABLE] = "passive", [IDH_PTYPE_ACABLE] = "active", + [IDH_PTYPE_VPD] = "vpd", }; static struct usb_pd_identity *get_pd_identity(struct device *dev) @@ -264,6 +267,11 @@ type_show(struct device *dev, struct device_attribute *attr, char *buf) } static DEVICE_ATTR_RO(type); +static ssize_t usb_power_delivery_revision_show(struct device *dev, + struct device_attribute *attr, + char *buf); +static DEVICE_ATTR_RO(usb_power_delivery_revision); + /* ------------------------------------------------------------------------- */ /* Alternate Modes */ @@ -680,6 +688,7 @@ static struct attribute *typec_partner_attrs[] = { &dev_attr_supports_usb_power_delivery.attr, &dev_attr_number_of_alternate_modes.attr, &dev_attr_type.attr, + &dev_attr_usb_power_delivery_revision.attr, NULL }; @@ -741,6 +750,30 @@ int typec_partner_set_identity(struct typec_partner *partner) EXPORT_SYMBOL_GPL(typec_partner_set_identity); /** + * typec_partner_set_pd_revision - Set the PD revision supported by the partner + * @partner: The partner to be updated. + * @pd_revision: USB Power Delivery Specification Revision supported by partner + * + * This routine is used to report that the PD revision of the port partner has + * become available. + */ +void typec_partner_set_pd_revision(struct typec_partner *partner, u16 pd_revision) +{ + if (partner->pd_revision == pd_revision) + return; + + partner->pd_revision = pd_revision; + sysfs_notify(&partner->dev.kobj, NULL, "usb_power_delivery_revision"); + if (pd_revision != 0 && !partner->usb_pd) { + partner->usb_pd = 1; + sysfs_notify(&partner->dev.kobj, NULL, + "supports_usb_power_delivery"); + } + kobject_uevent(&partner->dev.kobj, KOBJ_CHANGE); +} +EXPORT_SYMBOL_GPL(typec_partner_set_pd_revision); + +/** * typec_partner_set_num_altmodes - Set the number of available partner altmodes * @partner: The partner to be updated. * @num_altmodes: The number of altmodes we want to specify as available. @@ -766,6 +799,7 @@ int typec_partner_set_num_altmodes(struct typec_partner *partner, int num_altmod return ret; sysfs_notify(&partner->dev.kobj, NULL, "number_of_alternate_modes"); + kobject_uevent(&partner->dev.kobj, KOBJ_CHANGE); return 0; } @@ -792,6 +826,20 @@ typec_partner_register_altmode(struct typec_partner *partner, EXPORT_SYMBOL_GPL(typec_partner_register_altmode); /** + * typec_partner_set_svdm_version - Set negotiated Structured VDM (SVDM) Version + * @partner: USB Type-C Partner that supports SVDM + * @svdm_version: Negotiated SVDM Version + * + * This routine is used to save the negotiated SVDM Version. + */ +void typec_partner_set_svdm_version(struct typec_partner *partner, + enum usb_pd_svdm_ver svdm_version) +{ + partner->svdm_version = svdm_version; +} +EXPORT_SYMBOL_GPL(typec_partner_set_svdm_version); + +/** * typec_register_partner - Register a USB Type-C Partner * @port: The USB Type-C Port the partner is connected to * @desc: Description of the partner @@ -814,6 +862,8 @@ struct typec_partner *typec_register_partner(struct typec_port *port, partner->usb_pd = desc->usb_pd; partner->accessory = desc->accessory; partner->num_altmodes = -1; + partner->pd_revision = desc->pd_revision; + partner->svdm_version = port->cap->svdm_version; if (desc->identity) { /* @@ -923,6 +973,7 @@ int typec_plug_set_num_altmodes(struct typec_plug *plug, int num_altmodes) return ret; sysfs_notify(&plug->dev.kobj, NULL, "number_of_alternate_modes"); + kobject_uevent(&plug->dev.kobj, KOBJ_CHANGE); return 0; } @@ -1026,6 +1077,7 @@ static DEVICE_ATTR_RO(plug_type); static struct attribute *typec_cable_attrs[] = { &dev_attr_type.attr, &dev_attr_plug_type.attr, + &dev_attr_usb_power_delivery_revision.attr, NULL }; ATTRIBUTE_GROUPS(typec_cable); @@ -1128,6 +1180,7 @@ struct typec_cable *typec_register_cable(struct typec_port *port, cable->type = desc->type; cable->active = desc->active; + cable->pd_revision = desc->pd_revision; if (desc->identity) { /* @@ -1497,11 +1550,23 @@ static ssize_t usb_power_delivery_revision_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct typec_port *p = to_typec_port(dev); + u16 rev = 0; - return sprintf(buf, "%d\n", (p->cap->pd_revision >> 8) & 0xff); + if (is_typec_partner(dev)) { + struct typec_partner *partner = to_typec_partner(dev); + + rev = partner->pd_revision; + } else if (is_typec_cable(dev)) { + struct typec_cable *cable = to_typec_cable(dev); + + rev = cable->pd_revision; + } else if (is_typec_port(dev)) { + struct typec_port *p = to_typec_port(dev); + + rev = p->cap->pd_revision; + } + return sysfs_emit(buf, "%d.%d\n", (rev >> 8) & 0xff, (rev >> 4) & 0xf); } -static DEVICE_ATTR_RO(usb_power_delivery_revision); static ssize_t orientation_show(struct device *dev, struct device_attribute *attr, @@ -1846,6 +1911,33 @@ EXPORT_SYMBOL_GPL(typec_set_mode); /* --------------------------------------- */ /** + * typec_get_negotiated_svdm_version - Get negotiated SVDM Version + * @port: USB Type-C Port. + * + * Get the negotiated SVDM Version. The Version is set to the port default + * value stored in typec_capability on partner registration, and updated after + * a successful Discover Identity if the negotiated value is less than the + * default value. + * + * Returns usb_pd_svdm_ver if the partner has been registered otherwise -ENODEV. + */ +int typec_get_negotiated_svdm_version(struct typec_port *port) +{ + enum usb_pd_svdm_ver svdm_version; + struct device *partner_dev; + + partner_dev = device_find_child(&port->dev, NULL, partner_match); + if (!partner_dev) + return -ENODEV; + + svdm_version = to_typec_partner(partner_dev)->svdm_version; + put_device(partner_dev); + + return svdm_version; +} +EXPORT_SYMBOL_GPL(typec_get_negotiated_svdm_version); + +/** * typec_get_drvdata - Return private driver data pointer * @port: USB Type-C port */ diff --git a/drivers/usb/typec/mux/intel_pmc_mux.c b/drivers/usb/typec/mux/intel_pmc_mux.c index cf37a59ce130..46a25b8db72e 100644 --- a/drivers/usb/typec/mux/intel_pmc_mux.c +++ b/drivers/usb/typec/mux/intel_pmc_mux.c @@ -207,10 +207,21 @@ static int pmc_usb_mux_dp_hpd(struct pmc_usb_port *port, struct typec_displayport_data *dp) { u8 msg[2] = { }; + int ret; msg[0] = PMC_USB_DP_HPD; msg[0] |= port->usb3_port << PMC_USB_MSG_USB3_PORT_SHIFT; + /* Configure HPD first if HPD,IRQ comes together */ + if (!IOM_PORT_HPD_ASSERTED(port->iom_status) && + dp->status & DP_STATUS_IRQ_HPD && + dp->status & DP_STATUS_HPD_STATE) { + msg[1] = PMC_USB_DP_HPD_LVL; + ret = pmc_usb_command(port, msg, sizeof(msg)); + if (ret) + return ret; + } + if (dp->status & DP_STATUS_IRQ_HPD) msg[1] = PMC_USB_DP_HPD_IRQ; diff --git a/drivers/usb/typec/tcpm/tcpci.c b/drivers/usb/typec/tcpm/tcpci.c index f676abab044b..a27deb0b5f03 100644 --- a/drivers/usb/typec/tcpm/tcpci.c +++ b/drivers/usb/typec/tcpm/tcpci.c @@ -255,6 +255,14 @@ static int tcpci_set_polarity(struct tcpc_dev *tcpc, TCPC_TCPC_CTRL_ORIENTATION : 0); } +static void tcpci_set_partner_usb_comm_capable(struct tcpc_dev *tcpc, bool capable) +{ + struct tcpci *tcpci = tcpc_to_tcpci(tcpc); + + if (tcpci->data->set_partner_usb_comm_capable) + tcpci->data->set_partner_usb_comm_capable(tcpci, tcpci->data, capable); +} + static int tcpci_set_vconn(struct tcpc_dev *tcpc, bool enable) { struct tcpci *tcpci = tcpc_to_tcpci(tcpc); @@ -720,6 +728,7 @@ struct tcpci *tcpci_register_port(struct device *dev, struct tcpci_data *data) tcpci->tcpc.set_bist_data = tcpci_set_bist_data; tcpci->tcpc.enable_frs = tcpci_enable_frs; tcpci->tcpc.frs_sourcing_vbus = tcpci_frs_sourcing_vbus; + tcpci->tcpc.set_partner_usb_comm_capable = tcpci_set_partner_usb_comm_capable; if (tcpci->data->auto_discharge_disconnect) { tcpci->tcpc.enable_auto_vbus_discharge = tcpci_enable_auto_vbus_discharge; diff --git a/drivers/usb/typec/tcpm/tcpci.h b/drivers/usb/typec/tcpm/tcpci.h index c3c7d07d9b4e..57b6e24e0a0c 100644 --- a/drivers/usb/typec/tcpm/tcpci.h +++ b/drivers/usb/typec/tcpm/tcpci.h @@ -161,6 +161,10 @@ struct tcpci; * Optional; Enables TCPC to autonously discharge vbus on disconnect. * @vbus_vsafe0v: * optional; Set when TCPC can detect whether vbus is at VSAFE0V. + * @set_partner_usb_comm_capable: + * Optional; The USB Communications Capable bit indicates if port + * partner is capable of communication over the USB data lines + * (e.g. D+/- or SS Tx/Rx). Called to notify the status of the bit. */ struct tcpci_data { struct regmap *regmap; @@ -175,6 +179,8 @@ struct tcpci_data { enum typec_cc_status cc); int (*set_vbus)(struct tcpci *tcpci, struct tcpci_data *data, bool source, bool sink); void (*frs_sourcing_vbus)(struct tcpci *tcpci, struct tcpci_data *data); + void (*set_partner_usb_comm_capable)(struct tcpci *tcpci, struct tcpci_data *data, + bool capable); }; struct tcpci *tcpci_register_port(struct device *dev, struct tcpci_data *data); diff --git a/drivers/usb/typec/tcpm/tcpci_maxim.c b/drivers/usb/typec/tcpm/tcpci_maxim.c index 319266329b42..041a1c393594 100644 --- a/drivers/usb/typec/tcpm/tcpci_maxim.c +++ b/drivers/usb/typec/tcpm/tcpci_maxim.c @@ -5,13 +5,10 @@ * MAXIM TCPCI based TCPC driver */ -#include <linux/gpio.h> -#include <linux/gpio/consumer.h> #include <linux/interrupt.h> #include <linux/i2c.h> #include <linux/kernel.h> #include <linux/module.h> -#include <linux/of_gpio.h> #include <linux/regmap.h> #include <linux/usb/pd.h> #include <linux/usb/tcpm.h> @@ -22,6 +19,9 @@ #define PD_ACTIVITY_TIMEOUT_MS 10000 #define TCPC_VENDOR_ALERT 0x80 +#define TCPC_VENDOR_USBSW_CTRL 0x93 +#define TCPC_VENDOR_USBSW_CTRL_ENABLE_USB_DATA 0x9 +#define TCPC_VENDOR_USBSW_CTRL_DISABLE_USB_DATA 0 #define TCPC_RECEIVE_BUFFER_COUNT_OFFSET 0 #define TCPC_RECEIVE_BUFFER_FRAME_TYPE_OFFSET 1 @@ -158,7 +158,7 @@ static void process_rx(struct max_tcpci_chip *chip, u16 status) */ ret = regmap_raw_read(chip->data.regmap, TCPC_RX_BYTE_CNT, rx_buf, 2); if (ret < 0) { - dev_err(chip->dev, "TCPC_RX_BYTE_CNT read failed ret:%d", ret); + dev_err(chip->dev, "TCPC_RX_BYTE_CNT read failed ret:%d\n", ret); return; } @@ -167,13 +167,13 @@ static void process_rx(struct max_tcpci_chip *chip, u16 status) if (count == 0 || frame_type != TCPC_RX_BUF_FRAME_TYPE_SOP) { max_tcpci_write16(chip, TCPC_ALERT, TCPC_ALERT_RX_STATUS); - dev_err(chip->dev, "%s", count == 0 ? "error: count is 0" : + dev_err(chip->dev, "%s\n", count == 0 ? "error: count is 0" : "error frame_type is not SOP"); return; } if (count > sizeof(struct pd_message) || count + 1 > TCPC_RECEIVE_BUFFER_LEN) { - dev_err(chip->dev, "Invalid TCPC_RX_BYTE_CNT %d", count); + dev_err(chip->dev, "Invalid TCPC_RX_BYTE_CNT %d\n", count); return; } @@ -184,7 +184,7 @@ static void process_rx(struct max_tcpci_chip *chip, u16 status) count += 1; ret = regmap_raw_read(chip->data.regmap, TCPC_RX_BYTE_CNT, rx_buf, count); if (ret < 0) { - dev_err(chip->dev, "Error: TCPC_RX_BYTE_CNT read failed: %d", ret); + dev_err(chip->dev, "Error: TCPC_RX_BYTE_CNT read failed: %d\n", ret); return; } @@ -277,6 +277,21 @@ static void process_tx(struct max_tcpci_chip *chip, u16 status) max_tcpci_init_regs(chip); } +/* Enable USB switches when partner is USB communications capable */ +static void max_tcpci_set_partner_usb_comm_capable(struct tcpci *tcpci, struct tcpci_data *data, + bool capable) +{ + struct max_tcpci_chip *chip = tdata_to_max_tcpci(data); + int ret; + + ret = max_tcpci_write8(chip, TCPC_VENDOR_USBSW_CTRL, capable ? + TCPC_VENDOR_USBSW_CTRL_ENABLE_USB_DATA : + TCPC_VENDOR_USBSW_CTRL_DISABLE_USB_DATA); + + if (ret < 0) + dev_err(chip->dev, "Failed to enable USB switches"); +} + static irqreturn_t _max_tcpci_irq(struct max_tcpci_chip *chip, u16 status) { u16 mask; @@ -317,7 +332,7 @@ static irqreturn_t _max_tcpci_irq(struct max_tcpci_chip *chip, u16 status) return ret; if (reg_status & TCPC_SINK_FAST_ROLE_SWAP) { - dev_info(chip->dev, "FRS Signal"); + dev_info(chip->dev, "FRS Signal\n"); tcpm_sink_frs(chip->port); } } @@ -456,12 +471,12 @@ static int max_tcpci_probe(struct i2c_client *client, const struct i2c_device_id chip->data.frs_sourcing_vbus = max_tcpci_frs_sourcing_vbus; chip->data.auto_discharge_disconnect = true; chip->data.vbus_vsafe0v = true; + chip->data.set_partner_usb_comm_capable = max_tcpci_set_partner_usb_comm_capable; max_tcpci_init_regs(chip); chip->tcpci = tcpci_register_port(chip->dev, &chip->data); if (IS_ERR(chip->tcpci)) { - dev_err(&client->dev, "TCPCI port registration failed"); - ret = PTR_ERR(chip->tcpci); + dev_err(&client->dev, "TCPCI port registration failed\n"); return PTR_ERR(chip->tcpci); } chip->port = tcpci_get_tcpm_port(chip->tcpci); diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c index 22a85b396f69..be0b6469dd3d 100644 --- a/drivers/usb/typec/tcpm/tcpm.c +++ b/drivers/usb/typec/tcpm/tcpm.c @@ -76,6 +76,8 @@ S(SNK_HARD_RESET_SINK_ON), \ \ S(SOFT_RESET), \ + S(SRC_SOFT_RESET_WAIT_SNK_TX), \ + S(SNK_SOFT_RESET), \ S(SOFT_RESET_SEND), \ \ S(DR_SWAP_ACCEPT), \ @@ -139,7 +141,46 @@ \ S(ERROR_RECOVERY), \ S(PORT_RESET), \ - S(PORT_RESET_WAIT_OFF) + S(PORT_RESET_WAIT_OFF), \ + \ + S(AMS_START), \ + S(CHUNK_NOT_SUPP) + +#define FOREACH_AMS(S) \ + S(NONE_AMS), \ + S(POWER_NEGOTIATION), \ + S(GOTOMIN), \ + S(SOFT_RESET_AMS), \ + S(HARD_RESET), \ + S(CABLE_RESET), \ + S(GET_SOURCE_CAPABILITIES), \ + S(GET_SINK_CAPABILITIES), \ + S(POWER_ROLE_SWAP), \ + S(FAST_ROLE_SWAP), \ + S(DATA_ROLE_SWAP), \ + S(VCONN_SWAP), \ + S(SOURCE_ALERT), \ + S(GETTING_SOURCE_EXTENDED_CAPABILITIES),\ + S(GETTING_SOURCE_SINK_STATUS), \ + S(GETTING_BATTERY_CAPABILITIES), \ + S(GETTING_BATTERY_STATUS), \ + S(GETTING_MANUFACTURER_INFORMATION), \ + S(SECURITY), \ + S(FIRMWARE_UPDATE), \ + S(DISCOVER_IDENTITY), \ + S(SOURCE_STARTUP_CABLE_PLUG_DISCOVER_IDENTITY), \ + S(DISCOVER_SVIDS), \ + S(DISCOVER_MODES), \ + S(DFP_TO_UFP_ENTER_MODE), \ + S(DFP_TO_UFP_EXIT_MODE), \ + S(DFP_TO_CABLE_PLUG_ENTER_MODE), \ + S(DFP_TO_CABLE_PLUG_EXIT_MODE), \ + S(ATTENTION), \ + S(BIST), \ + S(UNSTRUCTURED_VDMS), \ + S(STRUCTURED_VDMS), \ + S(COUNTRY_INFO), \ + S(COUNTRY_CODES) #define GENERATE_ENUM(e) e #define GENERATE_STRING(s) #s @@ -152,6 +193,14 @@ static const char * const tcpm_states[] = { FOREACH_STATE(GENERATE_STRING) }; +enum tcpm_ams { + FOREACH_AMS(GENERATE_ENUM) +}; + +static const char * const tcpm_ams_str[] = { + FOREACH_AMS(GENERATE_STRING) +}; + enum vdm_states { VDM_STATE_ERR_BUSY = -3, VDM_STATE_ERR_SEND = -2, @@ -161,6 +210,7 @@ enum vdm_states { VDM_STATE_READY = 1, VDM_STATE_BUSY = 2, VDM_STATE_WAIT_RSP_BUSY = 3, + VDM_STATE_SEND_MESSAGE = 4, }; enum pd_msg_request { @@ -302,6 +352,7 @@ struct tcpm_port { struct hrtimer enable_frs_timer; struct kthread_work enable_frs; bool state_machine_running; + bool vdm_sm_running; struct completion tx_complete; enum tcpm_transmit_status tx_status; @@ -381,6 +432,12 @@ struct tcpm_port { /* Sink caps have been queried */ bool sink_cap_done; + /* Collision Avoidance and Atomic Message Sequence */ + enum tcpm_state upcoming_state; + enum tcpm_ams ams; + enum tcpm_ams next_ams; + bool in_ams; + #ifdef CONFIG_DEBUG_FS struct dentry *dentry; struct mutex logbuffer_lock; /* log buffer access lock */ @@ -396,6 +453,12 @@ struct pd_rx_event { struct pd_message msg; }; +static const char * const pd_rev[] = { + [PD_REV10] = "rev1", + [PD_REV20] = "rev2", + [PD_REV30] = "rev3", +}; + #define tcpm_cc_is_sink(cc) \ ((cc) == TYPEC_CC_RP_DEF || (cc) == TYPEC_CC_RP_1_5 || \ (cc) == TYPEC_CC_RP_3_0) @@ -440,6 +503,10 @@ struct pd_rx_event { ((port)->typec_caps.data == TYPEC_PORT_DFP ? \ TYPEC_HOST : TYPEC_DEVICE) +#define tcpm_sink_tx_ok(port) \ + (tcpm_port_is_sink(port) && \ + ((port)->cc1 == TYPEC_CC_RP_3_0 || (port)->cc2 == TYPEC_CC_RP_3_0)) + static enum tcpm_state tcpm_default_state(struct tcpm_port *port) { if (port->port_type == TYPEC_PORT_DRP) { @@ -666,6 +733,67 @@ static void tcpm_debugfs_exit(const struct tcpm_port *port) { } #endif +static void tcpm_set_cc(struct tcpm_port *port, enum typec_cc_status cc) +{ + tcpm_log(port, "cc:=%d", cc); + port->cc_req = cc; + port->tcpc->set_cc(port->tcpc, cc); +} + +/* + * Determine RP value to set based on maximum current supported + * by a port if configured as source. + * Returns CC value to report to link partner. + */ +static enum typec_cc_status tcpm_rp_cc(struct tcpm_port *port) +{ + const u32 *src_pdo = port->src_pdo; + int nr_pdo = port->nr_src_pdo; + int i; + + /* + * Search for first entry with matching voltage. + * It should report the maximum supported current. + */ + for (i = 0; i < nr_pdo; i++) { + const u32 pdo = src_pdo[i]; + + if (pdo_type(pdo) == PDO_TYPE_FIXED && + pdo_fixed_voltage(pdo) == 5000) { + unsigned int curr = pdo_max_current(pdo); + + if (curr >= 3000) + return TYPEC_CC_RP_3_0; + else if (curr >= 1500) + return TYPEC_CC_RP_1_5; + return TYPEC_CC_RP_DEF; + } + } + + return TYPEC_CC_RP_DEF; +} + +static int tcpm_ams_finish(struct tcpm_port *port) +{ + int ret = 0; + + tcpm_log(port, "AMS %s finished", tcpm_ams_str[port->ams]); + + if (port->pd_capable && port->pwr_role == TYPEC_SOURCE) { + if (port->negotiated_rev >= PD_REV30) + tcpm_set_cc(port, SINK_TX_OK); + else + tcpm_set_cc(port, SINK_TX_NG); + } else if (port->pwr_role == TYPEC_SOURCE) { + tcpm_set_cc(port, tcpm_rp_cc(port)); + } + + port->in_ams = false; + port->ams = NONE_AMS; + + return ret; +} + static int tcpm_pd_transmit(struct tcpm_port *port, enum tcpm_transmit_type type, const struct pd_message *msg) @@ -693,13 +821,30 @@ static int tcpm_pd_transmit(struct tcpm_port *port, switch (port->tx_status) { case TCPC_TX_SUCCESS: port->message_id = (port->message_id + 1) & PD_HEADER_ID_MASK; - return 0; + /* + * USB PD rev 2.0, 8.3.2.2.1: + * USB PD rev 3.0, 8.3.2.1.3: + * "... Note that every AMS is Interruptible until the first + * Message in the sequence has been successfully sent (GoodCRC + * Message received)." + */ + if (port->ams != NONE_AMS) + port->in_ams = true; + break; case TCPC_TX_DISCARDED: - return -EAGAIN; + ret = -EAGAIN; + break; case TCPC_TX_FAILED: default: - return -EIO; + ret = -EIO; + break; } + + /* Some AMS don't expect responses. Finish them here. */ + if (port->ams == ATTENTION || port->ams == SOURCE_ALERT) + tcpm_ams_finish(port); + + return ret; } void tcpm_pd_transmit_complete(struct tcpm_port *port, @@ -804,39 +949,6 @@ static int tcpm_set_current_limit(struct tcpm_port *port, u32 max_ma, u32 mv) return ret; } -/* - * Determine RP value to set based on maximum current supported - * by a port if configured as source. - * Returns CC value to report to link partner. - */ -static enum typec_cc_status tcpm_rp_cc(struct tcpm_port *port) -{ - const u32 *src_pdo = port->src_pdo; - int nr_pdo = port->nr_src_pdo; - int i; - - /* - * Search for first entry with matching voltage. - * It should report the maximum supported current. - */ - for (i = 0; i < nr_pdo; i++) { - const u32 pdo = src_pdo[i]; - - if (pdo_type(pdo) == PDO_TYPE_FIXED && - pdo_fixed_voltage(pdo) == 5000) { - unsigned int curr = pdo_max_current(pdo); - - if (curr >= 3000) - return TYPEC_CC_RP_3_0; - else if (curr >= 1500) - return TYPEC_CC_RP_1_5; - return TYPEC_CC_RP_DEF; - } - } - - return TYPEC_CC_RP_DEF; -} - static int tcpm_set_attached_state(struct tcpm_port *port, bool attached) { return port->tcpc->set_roles(port->tcpc, attached, port->pwr_role, @@ -911,13 +1023,47 @@ static int tcpm_set_pwr_role(struct tcpm_port *port, enum typec_role role) return 0; } +/* + * Transform the PDO to be compliant to PD rev2.0. + * Return 0 if the PDO type is not defined in PD rev2.0. + * Otherwise, return the converted PDO. + */ +static u32 tcpm_forge_legacy_pdo(struct tcpm_port *port, u32 pdo, enum typec_role role) +{ + switch (pdo_type(pdo)) { + case PDO_TYPE_FIXED: + if (role == TYPEC_SINK) + return pdo & ~PDO_FIXED_FRS_CURR_MASK; + else + return pdo & ~PDO_FIXED_UNCHUNK_EXT; + case PDO_TYPE_VAR: + case PDO_TYPE_BATT: + return pdo; + case PDO_TYPE_APDO: + default: + return 0; + } +} + static int tcpm_pd_send_source_caps(struct tcpm_port *port) { struct pd_message msg; - int i; + u32 pdo; + unsigned int i, nr_pdo = 0; memset(&msg, 0, sizeof(msg)); - if (!port->nr_src_pdo) { + + for (i = 0; i < port->nr_src_pdo; i++) { + if (port->negotiated_rev >= PD_REV30) { + msg.payload[nr_pdo++] = cpu_to_le32(port->src_pdo[i]); + } else { + pdo = tcpm_forge_legacy_pdo(port, port->src_pdo[i], TYPEC_SOURCE); + if (pdo) + msg.payload[nr_pdo++] = cpu_to_le32(pdo); + } + } + + if (!nr_pdo) { /* No source capabilities defined, sink only */ msg.header = PD_HEADER_LE(PD_CTRL_REJECT, port->pwr_role, @@ -930,10 +1076,8 @@ static int tcpm_pd_send_source_caps(struct tcpm_port *port) port->data_role, port->negotiated_rev, port->message_id, - port->nr_src_pdo); + nr_pdo); } - for (i = 0; i < port->nr_src_pdo; i++) - msg.payload[i] = cpu_to_le32(port->src_pdo[i]); return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg); } @@ -941,10 +1085,22 @@ static int tcpm_pd_send_source_caps(struct tcpm_port *port) static int tcpm_pd_send_sink_caps(struct tcpm_port *port) { struct pd_message msg; - int i; + u32 pdo; + unsigned int i, nr_pdo = 0; memset(&msg, 0, sizeof(msg)); - if (!port->nr_snk_pdo) { + + for (i = 0; i < port->nr_snk_pdo; i++) { + if (port->negotiated_rev >= PD_REV30) { + msg.payload[nr_pdo++] = cpu_to_le32(port->snk_pdo[i]); + } else { + pdo = tcpm_forge_legacy_pdo(port, port->snk_pdo[i], TYPEC_SINK); + if (pdo) + msg.payload[nr_pdo++] = cpu_to_le32(pdo); + } + } + + if (!nr_pdo) { /* No sink capabilities defined, source only */ msg.header = PD_HEADER_LE(PD_CTRL_REJECT, port->pwr_role, @@ -957,10 +1113,8 @@ static int tcpm_pd_send_sink_caps(struct tcpm_port *port) port->data_role, port->negotiated_rev, port->message_id, - port->nr_snk_pdo); + nr_pdo); } - for (i = 0; i < port->nr_snk_pdo; i++) - msg.payload[i] = cpu_to_le32(port->snk_pdo[i]); return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg); } @@ -1000,16 +1154,17 @@ static void tcpm_set_state(struct tcpm_port *port, enum tcpm_state state, unsigned int delay_ms) { if (delay_ms) { - tcpm_log(port, "pending state change %s -> %s @ %u ms", - tcpm_states[port->state], tcpm_states[state], - delay_ms); + tcpm_log(port, "pending state change %s -> %s @ %u ms [%s %s]", + tcpm_states[port->state], tcpm_states[state], delay_ms, + pd_rev[port->negotiated_rev], tcpm_ams_str[port->ams]); port->delayed_state = state; mod_tcpm_delayed_work(port, delay_ms); port->delayed_runtime = ktime_add(ktime_get(), ms_to_ktime(delay_ms)); port->delay_ms = delay_ms; } else { - tcpm_log(port, "state change %s -> %s", - tcpm_states[port->state], tcpm_states[state]); + tcpm_log(port, "state change %s -> %s [%s %s]", + tcpm_states[port->state], tcpm_states[state], + pd_rev[port->negotiated_rev], tcpm_ams_str[port->ams]); port->delayed_state = INVALID_STATE; port->prev_state = port->state; port->state = state; @@ -1031,10 +1186,11 @@ static void tcpm_set_state_cond(struct tcpm_port *port, enum tcpm_state state, tcpm_set_state(port, state, delay_ms); else tcpm_log(port, - "skipped %sstate change %s -> %s [%u ms], context state %s", + "skipped %sstate change %s -> %s [%u ms], context state %s [%s %s]", delay_ms ? "delayed " : "", tcpm_states[port->state], tcpm_states[state], - delay_ms, tcpm_states[port->enter_state]); + delay_ms, tcpm_states[port->enter_state], + pd_rev[port->negotiated_rev], tcpm_ams_str[port->ams]); } static void tcpm_queue_message(struct tcpm_port *port, @@ -1044,6 +1200,149 @@ static void tcpm_queue_message(struct tcpm_port *port, mod_tcpm_delayed_work(port, 0); } +static bool tcpm_vdm_ams(struct tcpm_port *port) +{ + switch (port->ams) { + case DISCOVER_IDENTITY: + case SOURCE_STARTUP_CABLE_PLUG_DISCOVER_IDENTITY: + case DISCOVER_SVIDS: + case DISCOVER_MODES: + case DFP_TO_UFP_ENTER_MODE: + case DFP_TO_UFP_EXIT_MODE: + case DFP_TO_CABLE_PLUG_ENTER_MODE: + case DFP_TO_CABLE_PLUG_EXIT_MODE: + case ATTENTION: + case UNSTRUCTURED_VDMS: + case STRUCTURED_VDMS: + break; + default: + return false; + } + + return true; +} + +static bool tcpm_ams_interruptible(struct tcpm_port *port) +{ + switch (port->ams) { + /* Interruptible AMS */ + case NONE_AMS: + case SECURITY: + case FIRMWARE_UPDATE: + case DISCOVER_IDENTITY: + case SOURCE_STARTUP_CABLE_PLUG_DISCOVER_IDENTITY: + case DISCOVER_SVIDS: + case DISCOVER_MODES: + case DFP_TO_UFP_ENTER_MODE: + case DFP_TO_UFP_EXIT_MODE: + case DFP_TO_CABLE_PLUG_ENTER_MODE: + case DFP_TO_CABLE_PLUG_EXIT_MODE: + case UNSTRUCTURED_VDMS: + case STRUCTURED_VDMS: + case COUNTRY_INFO: + case COUNTRY_CODES: + break; + /* Non-Interruptible AMS */ + default: + if (port->in_ams) + return false; + break; + } + + return true; +} + +static int tcpm_ams_start(struct tcpm_port *port, enum tcpm_ams ams) +{ + int ret = 0; + + tcpm_log(port, "AMS %s start", tcpm_ams_str[ams]); + + if (!tcpm_ams_interruptible(port) && + !(ams == HARD_RESET || ams == SOFT_RESET_AMS)) { + port->upcoming_state = INVALID_STATE; + tcpm_log(port, "AMS %s not interruptible, aborting", + tcpm_ams_str[port->ams]); + return -EAGAIN; + } + + if (port->pwr_role == TYPEC_SOURCE) { + enum typec_cc_status cc_req = port->cc_req; + + port->ams = ams; + + if (ams == HARD_RESET) { + tcpm_set_cc(port, tcpm_rp_cc(port)); + tcpm_pd_transmit(port, TCPC_TX_HARD_RESET, NULL); + tcpm_set_state(port, HARD_RESET_START, 0); + return ret; + } else if (ams == SOFT_RESET_AMS) { + if (!port->explicit_contract) + tcpm_set_cc(port, tcpm_rp_cc(port)); + tcpm_set_state(port, SOFT_RESET_SEND, 0); + return ret; + } else if (tcpm_vdm_ams(port)) { + /* tSinkTx is enforced in vdm_run_state_machine */ + if (port->negotiated_rev >= PD_REV30) + tcpm_set_cc(port, SINK_TX_NG); + return ret; + } + + if (port->negotiated_rev >= PD_REV30) + tcpm_set_cc(port, SINK_TX_NG); + + switch (port->state) { + case SRC_READY: + case SRC_STARTUP: + case SRC_SOFT_RESET_WAIT_SNK_TX: + case SOFT_RESET: + case SOFT_RESET_SEND: + if (port->negotiated_rev >= PD_REV30) + tcpm_set_state(port, AMS_START, + cc_req == SINK_TX_OK ? + PD_T_SINK_TX : 0); + else + tcpm_set_state(port, AMS_START, 0); + break; + default: + if (port->negotiated_rev >= PD_REV30) + tcpm_set_state(port, SRC_READY, + cc_req == SINK_TX_OK ? + PD_T_SINK_TX : 0); + else + tcpm_set_state(port, SRC_READY, 0); + break; + } + } else { + if (port->negotiated_rev >= PD_REV30 && + !tcpm_sink_tx_ok(port) && + ams != SOFT_RESET_AMS && + ams != HARD_RESET) { + port->upcoming_state = INVALID_STATE; + tcpm_log(port, "Sink TX No Go"); + return -EAGAIN; + } + + port->ams = ams; + + if (ams == HARD_RESET) { + tcpm_pd_transmit(port, TCPC_TX_HARD_RESET, NULL); + tcpm_set_state(port, HARD_RESET_START, 0); + return ret; + } else if (tcpm_vdm_ams(port)) { + return ret; + } + + if (port->state == SNK_READY || + port->state == SNK_SOFT_RESET) + tcpm_set_state(port, AMS_START, 0); + else + tcpm_set_state(port, SNK_READY, 0); + } + + return ret; +} + /* * VDM/VDO handling functions */ @@ -1176,8 +1475,10 @@ static int tcpm_pd_svdm(struct tcpm_port *port, struct typec_altmode *adev, const u32 *p, int cnt, u32 *response, enum adev_actions *adev_action) { + struct typec_port *typec = port->typec_port; struct typec_altmode *pdev; struct pd_mode_data *modep; + int svdm_version; int rlen = 0; int cmd_type; int cmd; @@ -1194,14 +1495,33 @@ static int tcpm_pd_svdm(struct tcpm_port *port, struct typec_altmode *adev, pdev = typec_match_altmode(port->partner_altmode, ALTMODE_DISCOVERY_MAX, PD_VDO_VID(p[0]), PD_VDO_OPOS(p[0])); + svdm_version = typec_get_negotiated_svdm_version(typec); + if (svdm_version < 0) + return 0; + switch (cmd_type) { case CMDT_INIT: switch (cmd) { case CMD_DISCOVER_IDENT: + if (PD_VDO_VID(p[0]) != USB_SID_PD) + break; + + if (PD_VDO_SVDM_VER(p[0]) < svdm_version) + typec_partner_set_svdm_version(port->partner, + PD_VDO_SVDM_VER(p[0])); /* 6.4.4.3.1: Only respond as UFP (device) */ if (port->data_role == TYPEC_DEVICE && port->nr_snk_vdo) { - for (i = 0; i < port->nr_snk_vdo; i++) + /* + * Product Type DFP and Connector Type are not defined in SVDM + * version 1.0 and shall be set to zero. + */ + if (typec_get_negotiated_svdm_version(typec) < SVDM_VER_2_0) + response[1] = port->snk_vdo[0] & ~IDH_DFP_MASK + & ~IDH_CONN_MASK; + else + response[1] = port->snk_vdo[0]; + for (i = 1; i < port->nr_snk_vdo; i++) response[i + 1] = port->snk_vdo[i]; rlen = port->nr_snk_vdo + 1; } @@ -1230,27 +1550,34 @@ static int tcpm_pd_svdm(struct tcpm_port *port, struct typec_altmode *adev, response[0] = p[0] | VDO_CMDT(CMDT_RSP_BUSY); rlen = 1; } + response[0] = (response[0] & ~VDO_SVDM_VERS_MASK) | + (VDO_SVDM_VERS(typec_get_negotiated_svdm_version(typec))); break; case CMDT_RSP_ACK: /* silently drop message if we are not connected */ if (IS_ERR_OR_NULL(port->partner)) break; + tcpm_ams_finish(port); + switch (cmd) { case CMD_DISCOVER_IDENT: + if (PD_VDO_SVDM_VER(p[0]) < svdm_version) + typec_partner_set_svdm_version(port->partner, + PD_VDO_SVDM_VER(p[0])); /* 6.4.4.3.1 */ svdm_consume_identity(port, p, cnt); - response[0] = VDO(USB_SID_PD, 1, CMD_DISCOVER_SVID); + response[0] = VDO(USB_SID_PD, 1, typec_get_negotiated_svdm_version(typec), + CMD_DISCOVER_SVID); rlen = 1; break; case CMD_DISCOVER_SVID: /* 6.4.4.3.2 */ if (svdm_consume_svids(port, p, cnt)) { - response[0] = VDO(USB_SID_PD, 1, - CMD_DISCOVER_SVID); + response[0] = VDO(USB_SID_PD, 1, svdm_version, CMD_DISCOVER_SVID); rlen = 1; } else if (modep->nsvids && supports_modal(port)) { - response[0] = VDO(modep->svids[0], 1, + response[0] = VDO(modep->svids[0], 1, svdm_version, CMD_DISCOVER_MODES); rlen = 1; } @@ -1261,10 +1588,11 @@ static int tcpm_pd_svdm(struct tcpm_port *port, struct typec_altmode *adev, modep->svid_index++; if (modep->svid_index < modep->nsvids) { u16 svid = modep->svids[modep->svid_index]; - response[0] = VDO(svid, 1, CMD_DISCOVER_MODES); + response[0] = VDO(svid, 1, svdm_version, CMD_DISCOVER_MODES); rlen = 1; } else { tcpm_register_partner_altmodes(port); + port->vdm_sm_running = false; } break; case CMD_ENTER_MODE: @@ -1281,21 +1609,45 @@ static int tcpm_pd_svdm(struct tcpm_port *port, struct typec_altmode *adev, return 0; } break; + case VDO_CMD_VENDOR(0) ... VDO_CMD_VENDOR(15): + break; default: + /* Unrecognized SVDM */ + response[0] = p[0] | VDO_CMDT(CMDT_RSP_NAK); + rlen = 1; + response[0] = (response[0] & ~VDO_SVDM_VERS_MASK) | + (VDO_SVDM_VERS(svdm_version)); break; } break; case CMDT_RSP_NAK: + tcpm_ams_finish(port); switch (cmd) { + case CMD_DISCOVER_IDENT: + case CMD_DISCOVER_SVID: + case CMD_DISCOVER_MODES: + case VDO_CMD_VENDOR(0) ... VDO_CMD_VENDOR(15): + break; case CMD_ENTER_MODE: /* Back to USB Operation */ *adev_action = ADEV_NOTIFY_USB_AND_QUEUE_VDM; return 0; default: + /* Unrecognized SVDM */ + response[0] = p[0] | VDO_CMDT(CMDT_RSP_NAK); + rlen = 1; + response[0] = (response[0] & ~VDO_SVDM_VERS_MASK) | + (VDO_SVDM_VERS(svdm_version)); break; } + port->vdm_sm_running = false; break; default: + response[0] = p[0] | VDO_CMDT(CMDT_RSP_NAK); + rlen = 1; + response[0] = (response[0] & ~VDO_SVDM_VERS_MASK) | + (VDO_SVDM_VERS(svdm_version)); + port->vdm_sm_running = false; break; } @@ -1331,8 +1683,12 @@ static void tcpm_handle_vdm_request(struct tcpm_port *port, port->vdm_state = VDM_STATE_DONE; } - if (PD_VDO_SVDM(p[0])) + if (PD_VDO_SVDM(p[0])) { rlen = tcpm_pd_svdm(port, adev, p, cnt, response, &adev_action); + } else { + if (port->negotiated_rev >= PD_REV30) + tcpm_queue_message(port, PD_MSG_CTRL_NOT_SUPP); + } /* * We are done with any state stored in the port struct now, except @@ -1368,7 +1724,13 @@ static void tcpm_handle_vdm_request(struct tcpm_port *port, break; case ADEV_QUEUE_VDM_SEND_EXIT_MODE_ON_FAIL: if (typec_altmode_vdm(adev, p[0], &p[1], cnt)) { - response[0] = VDO(adev->svid, 1, CMD_EXIT_MODE); + int svdm_version = typec_get_negotiated_svdm_version( + port->typec_port); + if (svdm_version < 0) + break; + + response[0] = VDO(adev->svid, 1, svdm_version, + CMD_EXIT_MODE); response[0] |= VDO_OPOS(adev->mode); rlen = 1; } @@ -1395,14 +1757,19 @@ static void tcpm_handle_vdm_request(struct tcpm_port *port, static void tcpm_send_vdm(struct tcpm_port *port, u32 vid, int cmd, const u32 *data, int count) { + int svdm_version = typec_get_negotiated_svdm_version(port->typec_port); u32 header; + if (svdm_version < 0) + return; + if (WARN_ON(count > VDO_MAX_SIZE - 1)) count = VDO_MAX_SIZE - 1; /* set VDM header with VID & CMD */ header = VDO(vid, ((vid & USB_SID_PD) == USB_SID_PD) ? - 1 : (PD_VDO_CMD(cmd) <= CMD_ATTENTION), cmd); + 1 : (PD_VDO_CMD(cmd) <= CMD_ATTENTION), + svdm_version, cmd); tcpm_queue_vdm(port, header, data, count); } @@ -1435,7 +1802,8 @@ static unsigned int vdm_ready_timeout(u32 vdm_hdr) static void vdm_run_state_machine(struct tcpm_port *port) { struct pd_message msg; - int i, res; + int i, res = 0; + u32 vdo_hdr = port->vdo_data[0]; switch (port->vdm_state) { case VDM_STATE_READY: @@ -1452,26 +1820,49 @@ static void vdm_run_state_machine(struct tcpm_port *port) if (port->state != SRC_READY && port->state != SNK_READY) break; - /* Prepare and send VDM */ - memset(&msg, 0, sizeof(msg)); - msg.header = PD_HEADER_LE(PD_DATA_VENDOR_DEF, - port->pwr_role, - port->data_role, - port->negotiated_rev, - port->message_id, port->vdo_count); - for (i = 0; i < port->vdo_count; i++) - msg.payload[i] = cpu_to_le32(port->vdo_data[i]); - res = tcpm_pd_transmit(port, TCPC_TX_SOP, &msg); - if (res < 0) { - port->vdm_state = VDM_STATE_ERR_SEND; - } else { - unsigned long timeout; + /* TODO: AMS operation for Unstructured VDM */ + if (PD_VDO_SVDM(vdo_hdr) && PD_VDO_CMDT(vdo_hdr) == CMDT_INIT) { + switch (PD_VDO_CMD(vdo_hdr)) { + case CMD_DISCOVER_IDENT: + res = tcpm_ams_start(port, DISCOVER_IDENTITY); + if (res == 0) + port->send_discover = false; + break; + case CMD_DISCOVER_SVID: + res = tcpm_ams_start(port, DISCOVER_SVIDS); + break; + case CMD_DISCOVER_MODES: + res = tcpm_ams_start(port, DISCOVER_MODES); + break; + case CMD_ENTER_MODE: + res = tcpm_ams_start(port, DFP_TO_UFP_ENTER_MODE); + break; + case CMD_EXIT_MODE: + res = tcpm_ams_start(port, DFP_TO_UFP_EXIT_MODE); + break; + case CMD_ATTENTION: + res = tcpm_ams_start(port, ATTENTION); + break; + case VDO_CMD_VENDOR(0) ... VDO_CMD_VENDOR(15): + res = tcpm_ams_start(port, STRUCTURED_VDMS); + break; + default: + res = -EOPNOTSUPP; + break; + } - port->vdm_retries = 0; - port->vdm_state = VDM_STATE_BUSY; - timeout = vdm_ready_timeout(port->vdo_data[0]); - mod_vdm_delayed_work(port, timeout); + if (res < 0) { + port->vdm_sm_running = false; + return; + } } + + port->vdm_state = VDM_STATE_SEND_MESSAGE; + mod_vdm_delayed_work(port, (port->negotiated_rev >= PD_REV30 && + port->pwr_role == TYPEC_SOURCE && + PD_VDO_SVDM(vdo_hdr) && + PD_VDO_CMDT(vdo_hdr) == CMDT_INIT) ? + PD_T_SINK_TX : 0); break; case VDM_STATE_WAIT_RSP_BUSY: port->vdo_data[0] = port->vdo_retry; @@ -1480,6 +1871,8 @@ static void vdm_run_state_machine(struct tcpm_port *port) break; case VDM_STATE_BUSY: port->vdm_state = VDM_STATE_ERR_TMOUT; + if (port->ams != NONE_AMS) + tcpm_ams_finish(port); break; case VDM_STATE_ERR_SEND: /* @@ -1492,6 +1885,29 @@ static void vdm_run_state_machine(struct tcpm_port *port) tcpm_log(port, "VDM Tx error, retry"); port->vdm_retries++; port->vdm_state = VDM_STATE_READY; + tcpm_ams_finish(port); + } + break; + case VDM_STATE_SEND_MESSAGE: + /* Prepare and send VDM */ + memset(&msg, 0, sizeof(msg)); + msg.header = PD_HEADER_LE(PD_DATA_VENDOR_DEF, + port->pwr_role, + port->data_role, + port->negotiated_rev, + port->message_id, port->vdo_count); + for (i = 0; i < port->vdo_count; i++) + msg.payload[i] = cpu_to_le32(port->vdo_data[i]); + res = tcpm_pd_transmit(port, TCPC_TX_SOP, &msg); + if (res < 0) { + port->vdm_state = VDM_STATE_ERR_SEND; + } else { + unsigned long timeout; + + port->vdm_retries = 0; + port->vdm_state = VDM_STATE_BUSY; + timeout = vdm_ready_timeout(vdo_hdr); + mod_vdm_delayed_work(port, timeout); } break; default: @@ -1514,7 +1930,11 @@ static void vdm_state_machine_work(struct kthread_work *work) prev_state = port->vdm_state; vdm_run_state_machine(port); } while (port->vdm_state != prev_state && - port->vdm_state != VDM_STATE_BUSY); + port->vdm_state != VDM_STATE_BUSY && + port->vdm_state != VDM_STATE_SEND_MESSAGE); + + if (port->vdm_state == VDM_STATE_ERR_TMOUT) + port->vdm_sm_running = false; mutex_unlock(&port->lock); } @@ -1642,9 +2062,14 @@ static int tcpm_validate_caps(struct tcpm_port *port, const u32 *pdo, static int tcpm_altmode_enter(struct typec_altmode *altmode, u32 *vdo) { struct tcpm_port *port = typec_altmode_get_drvdata(altmode); + int svdm_version; u32 header; - header = VDO(altmode->svid, vdo ? 2 : 1, CMD_ENTER_MODE); + svdm_version = typec_get_negotiated_svdm_version(port->typec_port); + if (svdm_version < 0) + return svdm_version; + + header = VDO(altmode->svid, vdo ? 2 : 1, svdm_version, CMD_ENTER_MODE); header |= VDO_OPOS(altmode->mode); tcpm_queue_vdm_unlocked(port, header, vdo, vdo ? 1 : 0); @@ -1654,9 +2079,14 @@ static int tcpm_altmode_enter(struct typec_altmode *altmode, u32 *vdo) static int tcpm_altmode_exit(struct typec_altmode *altmode) { struct tcpm_port *port = typec_altmode_get_drvdata(altmode); + int svdm_version; u32 header; - header = VDO(altmode->svid, 1, CMD_EXIT_MODE); + svdm_version = typec_get_negotiated_svdm_version(port->typec_port); + if (svdm_version < 0) + return svdm_version; + + header = VDO(altmode->svid, 1, svdm_version, CMD_EXIT_MODE); header |= VDO_OPOS(altmode->mode); tcpm_queue_vdm_unlocked(port, header, NULL, 0); @@ -1736,6 +2166,71 @@ static int tcpm_set_auto_vbus_discharge_threshold(struct tcpm_port *port, return ret; } +static void tcpm_pd_handle_state(struct tcpm_port *port, + enum tcpm_state state, + enum tcpm_ams ams, + unsigned int delay_ms) +{ + switch (port->state) { + case SRC_READY: + case SNK_READY: + port->ams = ams; + tcpm_set_state(port, state, delay_ms); + break; + /* 8.3.3.4.1.1 and 6.8.1 power transitioning */ + case SNK_TRANSITION_SINK: + case SNK_TRANSITION_SINK_VBUS: + case SRC_TRANSITION_SUPPLY: + tcpm_set_state(port, HARD_RESET_SEND, 0); + break; + default: + if (!tcpm_ams_interruptible(port)) { + tcpm_set_state(port, port->pwr_role == TYPEC_SOURCE ? + SRC_SOFT_RESET_WAIT_SNK_TX : + SNK_SOFT_RESET, + 0); + } else { + /* process the Message 6.8.1 */ + port->upcoming_state = state; + port->next_ams = ams; + tcpm_set_state(port, ready_state(port), delay_ms); + } + break; + } +} + +static void tcpm_pd_handle_msg(struct tcpm_port *port, + enum pd_msg_request message, + enum tcpm_ams ams) +{ + switch (port->state) { + case SRC_READY: + case SNK_READY: + port->ams = ams; + tcpm_queue_message(port, message); + break; + /* PD 3.0 Spec 8.3.3.4.1.1 and 6.8.1 */ + case SNK_TRANSITION_SINK: + case SNK_TRANSITION_SINK_VBUS: + case SRC_TRANSITION_SUPPLY: + tcpm_set_state(port, HARD_RESET_SEND, 0); + break; + default: + if (!tcpm_ams_interruptible(port)) { + tcpm_set_state(port, port->pwr_role == TYPEC_SOURCE ? + SRC_SOFT_RESET_WAIT_SNK_TX : + SNK_SOFT_RESET, + 0); + } else { + port->next_ams = ams; + tcpm_set_state(port, ready_state(port), 0); + /* 6.8.1 process the Message */ + tcpm_queue_message(port, message); + } + break; + } +} + static void tcpm_pd_data_request(struct tcpm_port *port, const struct pd_message *msg) { @@ -1749,9 +2244,6 @@ static void tcpm_pd_data_request(struct tcpm_port *port, switch (type) { case PD_DATA_SOURCE_CAP: - if (port->pwr_role != TYPEC_SINK) - break; - for (i = 0; i < cnt; i++) port->source_caps[i] = le32_to_cpu(msg->payload[i]); @@ -1767,12 +2259,26 @@ static void tcpm_pd_data_request(struct tcpm_port *port, * to comply with 6.2.1.1.5 of the USB PD 3.0 spec. We don't * support Rev 1.0 so just do nothing in that scenario. */ - if (rev == PD_REV10) + if (rev == PD_REV10) { + if (port->ams == GET_SOURCE_CAPABILITIES) + tcpm_ams_finish(port); break; + } if (rev < PD_MAX_REV) port->negotiated_rev = rev; + if (port->pwr_role == TYPEC_SOURCE) { + if (port->ams == GET_SOURCE_CAPABILITIES) + tcpm_pd_handle_state(port, SRC_READY, NONE_AMS, 0); + /* Unexpected Source Capabilities */ + else + tcpm_pd_handle_msg(port, + port->negotiated_rev < PD_REV30 ? + PD_MSG_CTRL_REJECT : + PD_MSG_CTRL_NOT_SUPP, + NONE_AMS); + } else if (port->state == SNK_WAIT_CAPABILITIES) { /* * This message may be received even if VBUS is not * present. This is quite unexpected; see USB PD @@ -1786,30 +2292,55 @@ static void tcpm_pd_data_request(struct tcpm_port *port, * but be prepared to keep waiting for VBUS after it was * handled. */ - tcpm_set_state(port, SNK_NEGOTIATE_CAPABILITIES, 0); + port->ams = POWER_NEGOTIATION; + port->in_ams = true; + tcpm_set_state(port, SNK_NEGOTIATE_CAPABILITIES, 0); + } else { + if (port->ams == GET_SOURCE_CAPABILITIES) + tcpm_ams_finish(port); + tcpm_pd_handle_state(port, SNK_NEGOTIATE_CAPABILITIES, + POWER_NEGOTIATION, 0); + } break; case PD_DATA_REQUEST: - if (port->pwr_role != TYPEC_SOURCE || - cnt != 1) { - tcpm_queue_message(port, PD_MSG_CTRL_REJECT); - break; - } - /* * Adjust revision in subsequent message headers, as required, * to comply with 6.2.1.1.5 of the USB PD 3.0 spec. We don't * support Rev 1.0 so just reject in that scenario. */ if (rev == PD_REV10) { - tcpm_queue_message(port, PD_MSG_CTRL_REJECT); + tcpm_pd_handle_msg(port, + port->negotiated_rev < PD_REV30 ? + PD_MSG_CTRL_REJECT : + PD_MSG_CTRL_NOT_SUPP, + NONE_AMS); break; } if (rev < PD_MAX_REV) port->negotiated_rev = rev; + if (port->pwr_role != TYPEC_SOURCE || cnt != 1) { + tcpm_pd_handle_msg(port, + port->negotiated_rev < PD_REV30 ? + PD_MSG_CTRL_REJECT : + PD_MSG_CTRL_NOT_SUPP, + NONE_AMS); + break; + } + port->sink_request = le32_to_cpu(msg->payload[0]); - tcpm_set_state(port, SRC_NEGOTIATE_CAPABILITIES, 0); + + if (port->vdm_sm_running && port->explicit_contract) { + tcpm_pd_handle_msg(port, PD_MSG_CTRL_WAIT, port->ams); + break; + } + + if (port->state == SRC_SEND_CAPABILITIES) + tcpm_set_state(port, SRC_NEGOTIATE_CAPABILITIES, 0); + else + tcpm_pd_handle_state(port, SRC_NEGOTIATE_CAPABILITIES, + POWER_NEGOTIATION, 0); break; case PD_DATA_SINK_CAP: /* We don't do anything with this at the moment... */ @@ -1830,16 +2361,22 @@ static void tcpm_pd_data_request(struct tcpm_port *port, port->nr_sink_caps = cnt; port->sink_cap_done = true; - tcpm_set_state(port, SNK_READY, 0); + if (port->ams == GET_SINK_CAPABILITIES) + tcpm_pd_handle_state(port, ready_state(port), NONE_AMS, 0); + /* Unexpected Sink Capabilities */ + else + tcpm_pd_handle_msg(port, + port->negotiated_rev < PD_REV30 ? + PD_MSG_CTRL_REJECT : + PD_MSG_CTRL_NOT_SUPP, + NONE_AMS); break; case PD_DATA_VENDOR_DEF: tcpm_handle_vdm_request(port, msg->payload, cnt); break; case PD_DATA_BIST: - if (port->state == SRC_READY || port->state == SNK_READY) { - port->bist_request = le32_to_cpu(msg->payload[0]); - tcpm_set_state(port, BIST_RX, 0); - } + port->bist_request = le32_to_cpu(msg->payload[0]); + tcpm_pd_handle_state(port, BIST_RX, BIST, 0); break; case PD_DATA_ALERT: tcpm_handle_alert(port, msg->payload, cnt); @@ -1847,10 +2384,17 @@ static void tcpm_pd_data_request(struct tcpm_port *port, case PD_DATA_BATT_STATUS: case PD_DATA_GET_COUNTRY_INFO: /* Currently unsupported */ - tcpm_queue_message(port, PD_MSG_CTRL_NOT_SUPP); + tcpm_pd_handle_msg(port, port->negotiated_rev < PD_REV30 ? + PD_MSG_CTRL_REJECT : + PD_MSG_CTRL_NOT_SUPP, + NONE_AMS); break; default: - tcpm_log(port, "Unhandled data message type %#x", type); + tcpm_pd_handle_msg(port, port->negotiated_rev < PD_REV30 ? + PD_MSG_CTRL_REJECT : + PD_MSG_CTRL_NOT_SUPP, + NONE_AMS); + tcpm_log(port, "Unrecognized data message type %#x", type); break; } } @@ -1875,26 +2419,10 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port, case PD_CTRL_PING: break; case PD_CTRL_GET_SOURCE_CAP: - switch (port->state) { - case SRC_READY: - case SNK_READY: - tcpm_queue_message(port, PD_MSG_DATA_SOURCE_CAP); - break; - default: - tcpm_queue_message(port, PD_MSG_CTRL_REJECT); - break; - } + tcpm_pd_handle_msg(port, PD_MSG_DATA_SOURCE_CAP, GET_SOURCE_CAPABILITIES); break; case PD_CTRL_GET_SINK_CAP: - switch (port->state) { - case SRC_READY: - case SNK_READY: - tcpm_queue_message(port, PD_MSG_DATA_SINK_CAP); - break; - default: - tcpm_queue_message(port, PD_MSG_CTRL_REJECT); - break; - } + tcpm_pd_handle_msg(port, PD_MSG_DATA_SINK_CAP, GET_SINK_CAPABILITIES); break; case PD_CTRL_GOTO_MIN: break; @@ -1910,6 +2438,10 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port, TYPEC_PWR_MODE_PD, port->pps_data.active, port->supply_voltage); + /* Set VDM running flag ASAP */ + if (port->data_role == TYPEC_HOST && + port->send_discover) + port->vdm_sm_running = true; tcpm_set_state(port, SNK_READY, 0); } else { /* @@ -1933,6 +2465,11 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port, tcpm_set_state(port, FR_SWAP_SNK_SRC_NEW_SINK_READY, 0); break; default: + tcpm_pd_handle_state(port, + port->pwr_role == TYPEC_SOURCE ? + SRC_SOFT_RESET_WAIT_SNK_TX : + SNK_SOFT_RESET, + NONE_AMS, 0); break; } break; @@ -1942,10 +2479,14 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port, switch (port->state) { case SNK_NEGOTIATE_CAPABILITIES: /* USB PD specification, Figure 8-43 */ - if (port->explicit_contract) + if (port->explicit_contract) { next_state = SNK_READY; - else + if (port->data_role == TYPEC_HOST && + port->send_discover) + port->vdm_sm_running = true; + } else { next_state = SNK_WAIT_CAPABILITIES; + } tcpm_set_state(port, next_state, 0); break; case SNK_NEGOTIATE_PPS_CAPABILITIES: @@ -1954,6 +2495,11 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port, port->pps_data.op_curr = port->current_limit; port->pps_status = (type == PD_CTRL_WAIT ? -EAGAIN : -EOPNOTSUPP); + + if (port->data_role == TYPEC_HOST && + port->send_discover) + port->vdm_sm_running = true; + tcpm_set_state(port, SNK_READY, 0); break; case DR_SWAP_SEND: @@ -1979,6 +2525,11 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port, tcpm_set_state(port, ready_state(port), 0); break; default: + tcpm_pd_handle_state(port, + port->pwr_role == TYPEC_SOURCE ? + SRC_SOFT_RESET_WAIT_SNK_TX : + SNK_SOFT_RESET, + NONE_AMS, 0); break; } break; @@ -1995,15 +2546,20 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port, tcpm_set_state(port, SNK_TRANSITION_SINK, 0); break; case SOFT_RESET_SEND: - port->message_id = 0; - port->rx_msgid = -1; - if (port->pwr_role == TYPEC_SOURCE) - next_state = SRC_SEND_CAPABILITIES; - else - next_state = SNK_WAIT_CAPABILITIES; - tcpm_set_state(port, next_state, 0); + if (port->ams == SOFT_RESET_AMS) + tcpm_ams_finish(port); + if (port->pwr_role == TYPEC_SOURCE) { + port->upcoming_state = SRC_SEND_CAPABILITIES; + tcpm_ams_start(port, POWER_NEGOTIATION); + } else { + tcpm_set_state(port, SNK_WAIT_CAPABILITIES, 0); + } break; case DR_SWAP_SEND: + if (port->data_role == TYPEC_DEVICE && + port->send_discover) + port->vdm_sm_running = true; + tcpm_set_state(port, DR_SWAP_CHANGE_DR, 0); break; case PR_SWAP_SEND: @@ -2016,57 +2572,62 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port, tcpm_set_state(port, FR_SWAP_SNK_SRC_TRANSITION_TO_OFF, 0); break; default: + tcpm_pd_handle_state(port, + port->pwr_role == TYPEC_SOURCE ? + SRC_SOFT_RESET_WAIT_SNK_TX : + SNK_SOFT_RESET, + NONE_AMS, 0); break; } break; case PD_CTRL_SOFT_RESET: + port->ams = SOFT_RESET_AMS; tcpm_set_state(port, SOFT_RESET, 0); break; case PD_CTRL_DR_SWAP: - if (port->typec_caps.data != TYPEC_PORT_DRD) { - tcpm_queue_message(port, PD_MSG_CTRL_REJECT); - break; - } /* * XXX * 6.3.9: If an alternate mode is active, a request to swap * alternate modes shall trigger a port reset. */ - switch (port->state) { - case SRC_READY: - case SNK_READY: - tcpm_set_state(port, DR_SWAP_ACCEPT, 0); - break; - default: - tcpm_queue_message(port, PD_MSG_CTRL_WAIT); - break; + if (port->typec_caps.data != TYPEC_PORT_DRD) { + tcpm_pd_handle_msg(port, + port->negotiated_rev < PD_REV30 ? + PD_MSG_CTRL_REJECT : + PD_MSG_CTRL_NOT_SUPP, + NONE_AMS); + } else { + if (port->vdm_sm_running) { + tcpm_queue_message(port, PD_MSG_CTRL_WAIT); + break; + } + + tcpm_pd_handle_state(port, DR_SWAP_ACCEPT, DATA_ROLE_SWAP, 0); } break; case PD_CTRL_PR_SWAP: if (port->port_type != TYPEC_PORT_DRP) { - tcpm_queue_message(port, PD_MSG_CTRL_REJECT); - break; - } - switch (port->state) { - case SRC_READY: - case SNK_READY: - tcpm_set_state(port, PR_SWAP_ACCEPT, 0); - break; - default: - tcpm_queue_message(port, PD_MSG_CTRL_WAIT); - break; + tcpm_pd_handle_msg(port, + port->negotiated_rev < PD_REV30 ? + PD_MSG_CTRL_REJECT : + PD_MSG_CTRL_NOT_SUPP, + NONE_AMS); + } else { + if (port->vdm_sm_running) { + tcpm_queue_message(port, PD_MSG_CTRL_WAIT); + break; + } + + tcpm_pd_handle_state(port, PR_SWAP_ACCEPT, POWER_ROLE_SWAP, 0); } break; case PD_CTRL_VCONN_SWAP: - switch (port->state) { - case SRC_READY: - case SNK_READY: - tcpm_set_state(port, VCONN_SWAP_ACCEPT, 0); - break; - default: + if (port->vdm_sm_running) { tcpm_queue_message(port, PD_MSG_CTRL_WAIT); break; } + + tcpm_pd_handle_state(port, VCONN_SWAP_ACCEPT, VCONN_SWAP, 0); break; case PD_CTRL_GET_SOURCE_CAP_EXT: case PD_CTRL_GET_STATUS: @@ -2074,10 +2635,19 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port, case PD_CTRL_GET_PPS_STATUS: case PD_CTRL_GET_COUNTRY_CODES: /* Currently not supported */ - tcpm_queue_message(port, PD_MSG_CTRL_NOT_SUPP); + tcpm_pd_handle_msg(port, + port->negotiated_rev < PD_REV30 ? + PD_MSG_CTRL_REJECT : + PD_MSG_CTRL_NOT_SUPP, + NONE_AMS); break; default: - tcpm_log(port, "Unhandled ctrl message type %#x", type); + tcpm_pd_handle_msg(port, + port->negotiated_rev < PD_REV30 ? + PD_MSG_CTRL_REJECT : + PD_MSG_CTRL_NOT_SUPP, + NONE_AMS); + tcpm_log(port, "Unrecognized ctrl message type %#x", type); break; } } @@ -2089,11 +2659,13 @@ static void tcpm_pd_ext_msg_request(struct tcpm_port *port, unsigned int data_size = pd_ext_header_data_size_le(msg->ext_msg.header); if (!(msg->ext_msg.header & PD_EXT_HDR_CHUNKED)) { + tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS); tcpm_log(port, "Unchunked extended messages unsupported"); return; } if (data_size > PD_EXT_MAX_CHUNK_DATA) { + tcpm_pd_handle_state(port, CHUNK_NOT_SUPP, NONE_AMS, PD_T_CHUNK_NOT_SUPP); tcpm_log(port, "Chunk handling not yet supported"); return; } @@ -2106,16 +2678,18 @@ static void tcpm_pd_ext_msg_request(struct tcpm_port *port, */ if (msg->ext_msg.data[USB_PD_EXT_SDB_EVENT_FLAGS] & USB_PD_EXT_SDB_PPS_EVENTS) - tcpm_set_state(port, GET_PPS_STATUS_SEND, 0); + tcpm_pd_handle_state(port, GET_PPS_STATUS_SEND, + GETTING_SOURCE_SINK_STATUS, 0); + else - tcpm_set_state(port, ready_state(port), 0); + tcpm_pd_handle_state(port, ready_state(port), NONE_AMS, 0); break; case PD_EXT_PPS_STATUS: /* * For now the PPS status message is used to clear events * and nothing more. */ - tcpm_set_state(port, ready_state(port), 0); + tcpm_pd_handle_state(port, ready_state(port), NONE_AMS, 0); break; case PD_EXT_SOURCE_CAP_EXT: case PD_EXT_GET_BATT_CAP: @@ -2129,10 +2703,11 @@ static void tcpm_pd_ext_msg_request(struct tcpm_port *port, case PD_EXT_FW_UPDATE_RESPONSE: case PD_EXT_COUNTRY_INFO: case PD_EXT_COUNTRY_CODES: - tcpm_queue_message(port, PD_MSG_CTRL_NOT_SUPP); + tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS); break; default: - tcpm_log(port, "Unhandled extended message type %#x", type); + tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS); + tcpm_log(port, "Unrecognized extended message type %#x", type); break; } } @@ -2245,7 +2820,12 @@ static bool tcpm_send_queued_message(struct tcpm_port *port) tcpm_pd_send_control(port, PD_CTRL_NOT_SUPP); break; case PD_MSG_DATA_SINK_CAP: - tcpm_pd_send_sink_caps(port); + ret = tcpm_pd_send_sink_caps(port); + if (ret < 0) { + tcpm_log(port, "Unable to send snk caps, ret=%d", ret); + tcpm_set_state(port, SNK_SOFT_RESET, 0); + } + tcpm_ams_finish(port); break; case PD_MSG_DATA_SOURCE_CAP: ret = tcpm_pd_send_source_caps(port); @@ -2255,8 +2835,11 @@ static bool tcpm_send_queued_message(struct tcpm_port *port) ret); tcpm_set_state(port, SOFT_RESET_SEND, 0); } else if (port->pwr_role == TYPEC_SOURCE) { + tcpm_ams_finish(port); tcpm_set_state(port, HARD_RESET_SEND, PD_T_SENDER_RESPONSE); + } else { + tcpm_ams_finish(port); } break; default: @@ -2776,13 +3359,6 @@ static bool tcpm_start_toggling(struct tcpm_port *port, enum typec_cc_status cc) return ret == 0; } -static void tcpm_set_cc(struct tcpm_port *port, enum typec_cc_status cc) -{ - tcpm_log(port, "cc:=%d", cc); - port->cc_req = cc; - port->tcpc->set_cc(port->tcpc, cc); -} - static int tcpm_init_vbus(struct tcpm_port *port) { int ret; @@ -2904,6 +3480,14 @@ static void tcpm_unregister_altmodes(struct tcpm_port *port) memset(modep, 0, sizeof(*modep)); } +static void tcpm_set_partner_usb_comm_capable(struct tcpm_port *port, bool capable) +{ + tcpm_log(port, "Setting usb_comm capable %s", capable ? "true" : "false"); + + if (port->tcpc->set_partner_usb_comm_capable) + port->tcpc->set_partner_usb_comm_capable(port->tcpc, capable); +} + static void tcpm_reset_port(struct tcpm_port *port) { int ret; @@ -2912,11 +3496,15 @@ static void tcpm_reset_port(struct tcpm_port *port) ret = port->tcpc->enable_auto_vbus_discharge(port->tcpc, false); tcpm_log_force(port, "Disable vbus discharge ret:%d", ret); } + port->in_ams = false; + port->ams = NONE_AMS; + port->vdm_sm_running = false; tcpm_unregister_altmodes(port); tcpm_typec_disconnect(port); port->attached = false; port->pd_capable = false; port->pps_data.supported = false; + tcpm_set_partner_usb_comm_capable(port, false); /* * First Rx ID should be 0; set this to a sentinel of -1 so that @@ -3090,6 +3678,7 @@ static void run_state_machine(struct tcpm_port *port) int ret; enum typec_pwr_opmode opmode; unsigned int msecs; + enum tcpm_state upcoming_state; port->enter_state = port->state; switch (port->state) { @@ -3190,7 +3779,12 @@ static void run_state_machine(struct tcpm_port *port) port->message_id = 0; port->rx_msgid = -1; port->explicit_contract = false; - tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0); + /* SNK -> SRC POWER/FAST_ROLE_SWAP finished */ + if (port->ams == POWER_ROLE_SWAP || + port->ams == FAST_ROLE_SWAP) + tcpm_ams_finish(port); + port->upcoming_state = SRC_SEND_CAPABILITIES; + tcpm_ams_start(port, POWER_NEGOTIATION); break; case SRC_SEND_CAPABILITIES: port->caps_count++; @@ -3251,6 +3845,8 @@ static void run_state_machine(struct tcpm_port *port) } } else { tcpm_pd_send_control(port, PD_CTRL_ACCEPT); + tcpm_set_partner_usb_comm_capable(port, + !!(port->sink_request & RDO_USB_COMM)); tcpm_set_state(port, SRC_TRANSITION_SUPPLY, PD_T_SRC_TRANSITION); } @@ -3272,6 +3868,24 @@ static void run_state_machine(struct tcpm_port *port) tcpm_swap_complete(port, 0); tcpm_typec_connect(port); + if (port->ams != NONE_AMS) + tcpm_ams_finish(port); + if (port->next_ams != NONE_AMS) { + port->ams = port->next_ams; + port->next_ams = NONE_AMS; + } + + /* + * If previous AMS is interrupted, switch to the upcoming + * state. + */ + if (port->upcoming_state != INVALID_STATE) { + upcoming_state = port->upcoming_state; + port->upcoming_state = INVALID_STATE; + tcpm_set_state(port, upcoming_state, 0); + break; + } + tcpm_check_send_discover(port); /* * 6.3.5 @@ -3389,6 +4003,12 @@ static void run_state_machine(struct tcpm_port *port) port->message_id = 0; port->rx_msgid = -1; port->explicit_contract = false; + + if (port->ams == POWER_ROLE_SWAP || + port->ams == FAST_ROLE_SWAP) + /* SRC -> SNK POWER/FAST_ROLE_SWAP finished */ + tcpm_ams_finish(port); + tcpm_set_state(port, SNK_DISCOVERY, 0); break; case SNK_DISCOVERY: @@ -3437,7 +4057,7 @@ static void run_state_machine(struct tcpm_port *port) */ if (port->vbus_never_low) { port->vbus_never_low = false; - tcpm_set_state(port, SOFT_RESET_SEND, + tcpm_set_state(port, SNK_SOFT_RESET, PD_T_SINK_WAIT_CAP); } else { tcpm_set_state(port, hard_reset_state(port), @@ -3446,6 +4066,8 @@ static void run_state_machine(struct tcpm_port *port) break; case SNK_NEGOTIATE_CAPABILITIES: port->pd_capable = true; + tcpm_set_partner_usb_comm_capable(port, + !!(port->source_caps[0] & PDO_FIXED_USB_COMM)); port->hard_reset_count = 0; ret = tcpm_pd_send_request(port); if (ret < 0) { @@ -3490,9 +4112,28 @@ static void run_state_machine(struct tcpm_port *port) tcpm_swap_complete(port, 0); tcpm_typec_connect(port); - tcpm_check_send_discover(port); mod_enable_frs_delayed_work(port, 0); tcpm_pps_complete(port, port->pps_status); + + if (port->ams != NONE_AMS) + tcpm_ams_finish(port); + if (port->next_ams != NONE_AMS) { + port->ams = port->next_ams; + port->next_ams = NONE_AMS; + } + + /* + * If previous AMS is interrupted, switch to the upcoming + * state. + */ + if (port->upcoming_state != INVALID_STATE) { + upcoming_state = port->upcoming_state; + port->upcoming_state = INVALID_STATE; + tcpm_set_state(port, upcoming_state, 0); + break; + } + + tcpm_check_send_discover(port); power_supply_changed(port->psy); break; @@ -3513,8 +4154,14 @@ static void run_state_machine(struct tcpm_port *port) /* Hard_Reset states */ case HARD_RESET_SEND: - tcpm_pd_transmit(port, TCPC_TX_HARD_RESET, NULL); - tcpm_set_state(port, HARD_RESET_START, 0); + if (port->ams != NONE_AMS) + tcpm_ams_finish(port); + /* + * State machine will be directed to HARD_RESET_START, + * thus set upcoming_state to INVALID_STATE. + */ + port->upcoming_state = INVALID_STATE; + tcpm_ams_start(port, HARD_RESET); break; case HARD_RESET_START: port->sink_cap_done = false; @@ -3558,6 +4205,8 @@ static void run_state_machine(struct tcpm_port *port) case SRC_HARD_RESET_VBUS_ON: tcpm_set_vconn(port, true); tcpm_set_vbus(port, true); + if (port->ams == HARD_RESET) + tcpm_ams_finish(port); port->tcpc->set_pd_rx(port->tcpc, true); tcpm_set_attached_state(port, true); tcpm_set_state(port, SRC_UNATTACHED, PD_T_PS_SOURCE_ON); @@ -3579,6 +4228,8 @@ static void run_state_machine(struct tcpm_port *port) tcpm_set_state(port, SNK_HARD_RESET_SINK_ON, PD_T_SAFE_0V); break; case SNK_HARD_RESET_WAIT_VBUS: + if (port->ams == HARD_RESET) + tcpm_ams_finish(port); /* Assume we're disconnected if VBUS doesn't come back. */ tcpm_set_state(port, SNK_UNATTACHED, PD_T_SRC_RECOVER_MAX + PD_T_SRC_TURN_ON); @@ -3606,6 +4257,8 @@ static void run_state_machine(struct tcpm_port *port) 5000); tcpm_set_charge(port, true); } + if (port->ams == HARD_RESET) + tcpm_ams_finish(port); tcpm_set_attached_state(port, true); tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB, false, VSAFE5V); tcpm_set_state(port, SNK_STARTUP, 0); @@ -3616,10 +4269,20 @@ static void run_state_machine(struct tcpm_port *port) port->message_id = 0; port->rx_msgid = -1; tcpm_pd_send_control(port, PD_CTRL_ACCEPT); - if (port->pwr_role == TYPEC_SOURCE) - tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0); - else + tcpm_ams_finish(port); + if (port->pwr_role == TYPEC_SOURCE) { + port->upcoming_state = SRC_SEND_CAPABILITIES; + tcpm_ams_start(port, POWER_NEGOTIATION); + } else { tcpm_set_state(port, SNK_WAIT_CAPABILITIES, 0); + } + break; + case SRC_SOFT_RESET_WAIT_SNK_TX: + case SNK_SOFT_RESET: + if (port->ams != NONE_AMS) + tcpm_ams_finish(port); + port->upcoming_state = SOFT_RESET_SEND; + tcpm_ams_start(port, SOFT_RESET_AMS); break; case SOFT_RESET_SEND: port->message_id = 0; @@ -3639,10 +4302,14 @@ static void run_state_machine(struct tcpm_port *port) break; case DR_SWAP_ACCEPT: tcpm_pd_send_control(port, PD_CTRL_ACCEPT); + /* Set VDM state machine running flag ASAP */ + if (port->data_role == TYPEC_DEVICE && port->send_discover) + port->vdm_sm_running = true; tcpm_set_state_cond(port, DR_SWAP_CHANGE_DR, 0); break; case DR_SWAP_SEND_TIMEOUT: tcpm_swap_complete(port, -ETIMEDOUT); + tcpm_ams_finish(port); tcpm_set_state(port, ready_state(port), 0); break; case DR_SWAP_CHANGE_DR: @@ -3655,6 +4322,7 @@ static void run_state_machine(struct tcpm_port *port) TYPEC_HOST); port->send_discover = true; } + tcpm_ams_finish(port); tcpm_set_state(port, ready_state(port), 0); break; @@ -3782,6 +4450,7 @@ static void run_state_machine(struct tcpm_port *port) case VCONN_SWAP_ACCEPT: tcpm_pd_send_control(port, PD_CTRL_ACCEPT); + tcpm_ams_finish(port); tcpm_set_state(port, VCONN_SWAP_START, 0); break; case VCONN_SWAP_SEND: @@ -3791,6 +4460,8 @@ static void run_state_machine(struct tcpm_port *port) break; case VCONN_SWAP_SEND_TIMEOUT: tcpm_swap_complete(port, -ETIMEDOUT); + if (port->data_role == TYPEC_HOST && port->send_discover) + port->vdm_sm_running = true; tcpm_set_state(port, ready_state(port), 0); break; case VCONN_SWAP_START: @@ -3806,10 +4477,14 @@ static void run_state_machine(struct tcpm_port *port) case VCONN_SWAP_TURN_ON_VCONN: tcpm_set_vconn(port, true); tcpm_pd_send_control(port, PD_CTRL_PS_RDY); + if (port->data_role == TYPEC_HOST && port->send_discover) + port->vdm_sm_running = true; tcpm_set_state(port, ready_state(port), 0); break; case VCONN_SWAP_TURN_OFF_VCONN: tcpm_set_vconn(port, false); + if (port->data_role == TYPEC_HOST && port->send_discover) + port->vdm_sm_running = true; tcpm_set_state(port, ready_state(port), 0); break; @@ -3817,6 +4492,8 @@ static void run_state_machine(struct tcpm_port *port) case PR_SWAP_CANCEL: case VCONN_SWAP_CANCEL: tcpm_swap_complete(port, port->swap_status); + if (port->data_role == TYPEC_HOST && port->send_discover) + port->vdm_sm_running = true; if (port->pwr_role == TYPEC_SOURCE) tcpm_set_state(port, SRC_READY, 0); else @@ -3886,6 +4563,25 @@ static void run_state_machine(struct tcpm_port *port) tcpm_default_state(port), port->vbus_present ? PD_T_PS_SOURCE_OFF : 0); break; + + /* AMS intermediate state */ + case AMS_START: + if (port->upcoming_state == INVALID_STATE) { + tcpm_set_state(port, port->pwr_role == TYPEC_SOURCE ? + SRC_READY : SNK_READY, 0); + break; + } + + upcoming_state = port->upcoming_state; + port->upcoming_state = INVALID_STATE; + tcpm_set_state(port, upcoming_state, 0); + break; + + /* Chunk state */ + case CHUNK_NOT_SUPP: + tcpm_pd_send_control(port, PD_CTRL_NOT_SUPP); + tcpm_set_state(port, port->pwr_role == TYPEC_SOURCE ? SRC_READY : SNK_READY, 0); + break; default: WARN(1, "Unexpected port state %d\n", port->state); break; @@ -4127,6 +4823,9 @@ static void _tcpm_pd_vbus_on(struct tcpm_port *port) switch (port->state) { case SNK_TRANSITION_SINK_VBUS: port->explicit_contract = true; + /* Set the VDM flag ASAP */ + if (port->data_role == TYPEC_HOST && port->send_discover) + port->vdm_sm_running = true; tcpm_set_state(port, SNK_READY, 0); break; case SNK_DISCOVERY: @@ -4262,6 +4961,17 @@ static void _tcpm_pd_vbus_off(struct tcpm_port *port) /* Do nothing, waiting for sink detection */ break; + case SRC_STARTUP: + case SRC_SEND_CAPABILITIES: + case SRC_SEND_CAPABILITIES_TIMEOUT: + case SRC_NEGOTIATE_CAPABILITIES: + case SRC_TRANSITION_SUPPLY: + case SRC_READY: + case SRC_WAIT_NEW_CAPABILITIES: + /* Force to unattached state to re-initiate connection */ + tcpm_set_state(port, SRC_UNATTACHED, 0); + break; + case PORT_RESET: /* * State set back to default mode once the timer completes. @@ -4313,6 +5023,10 @@ static void _tcpm_pd_hard_reset(struct tcpm_port *port) if (port->bist_request == BDO_MODE_TESTDATA && port->tcpc->set_bist_data) port->tcpc->set_bist_data(port->tcpc, false); + if (port->ams != NONE_AMS) + port->ams = NONE_AMS; + if (port->hard_reset_count < PD_N_HARD_RESET_COUNT) + port->ams = HARD_RESET; /* * If we keep receiving hard reset requests, executing the hard reset * must have failed. Revert to error recovery if that happens. @@ -4363,10 +5077,16 @@ static void tcpm_pd_event_handler(struct kthread_work *work) _tcpm_cc_change(port, cc1, cc2); } if (events & TCPM_FRS_EVENT) { - if (port->state == SNK_READY) - tcpm_set_state(port, FR_SWAP_SEND, 0); - else + if (port->state == SNK_READY) { + int ret; + + port->upcoming_state = FR_SWAP_SEND; + ret = tcpm_ams_start(port, FAST_ROLE_SWAP); + if (ret == -EAGAIN) + port->upcoming_state = INVALID_STATE; + } else { tcpm_log(port, "Discarding FRS_SIGNAL! Not in sink ready"); + } } if (events & TCPM_SOURCING_VBUS) { tcpm_log(port, "sourcing vbus"); @@ -4435,6 +5155,7 @@ EXPORT_SYMBOL_GPL(tcpm_sourcing_vbus); static void tcpm_enable_frs_work(struct kthread_work *work) { struct tcpm_port *port = container_of(work, struct tcpm_port, enable_frs); + int ret; mutex_lock(&port->lock); /* Not FRS capable */ @@ -4449,9 +5170,14 @@ static void tcpm_enable_frs_work(struct kthread_work *work) if (port->state != SNK_READY || port->vdm_state != VDM_STATE_DONE || port->send_discover) goto resched; - tcpm_set_state(port, GET_SINK_CAP, 0); - port->sink_cap_done = true; - + port->upcoming_state = GET_SINK_CAP; + ret = tcpm_ams_start(port, GET_SINK_CAPABILITIES); + if (ret == -EAGAIN) { + port->upcoming_state = INVALID_STATE; + } else { + port->sink_cap_done = true; + goto unlock; + } resched: mod_enable_frs_delayed_work(port, GET_SINK_CAP_RETRY_MS); unlock: @@ -4501,7 +5227,12 @@ static int tcpm_dr_set(struct typec_port *p, enum typec_data_role data) port->non_pd_role_swap = true; tcpm_set_state(port, PORT_RESET, 0); } else { - tcpm_set_state(port, DR_SWAP_SEND, 0); + port->upcoming_state = DR_SWAP_SEND; + ret = tcpm_ams_start(port, DATA_ROLE_SWAP); + if (ret == -EAGAIN) { + port->upcoming_state = INVALID_STATE; + goto port_unlock; + } } port->swap_status = 0; @@ -4547,10 +5278,16 @@ static int tcpm_pr_set(struct typec_port *p, enum typec_role role) goto port_unlock; } + port->upcoming_state = PR_SWAP_SEND; + ret = tcpm_ams_start(port, POWER_ROLE_SWAP); + if (ret == -EAGAIN) { + port->upcoming_state = INVALID_STATE; + goto port_unlock; + } + port->swap_status = 0; port->swap_pending = true; reinit_completion(&port->swap_complete); - tcpm_set_state(port, PR_SWAP_SEND, 0); mutex_unlock(&port->lock); if (!wait_for_completion_timeout(&port->swap_complete, @@ -4586,10 +5323,16 @@ static int tcpm_vconn_set(struct typec_port *p, enum typec_role role) goto port_unlock; } + port->upcoming_state = VCONN_SWAP_SEND; + ret = tcpm_ams_start(port, VCONN_SWAP); + if (ret == -EAGAIN) { + port->upcoming_state = INVALID_STATE; + goto port_unlock; + } + port->swap_status = 0; port->swap_pending = true; reinit_completion(&port->swap_complete); - tcpm_set_state(port, VCONN_SWAP_SEND, 0); mutex_unlock(&port->lock); if (!wait_for_completion_timeout(&port->swap_complete, @@ -4654,6 +5397,13 @@ static int tcpm_pps_set_op_curr(struct tcpm_port *port, u16 op_curr) goto port_unlock; } + port->upcoming_state = SNK_NEGOTIATE_PPS_CAPABILITIES; + ret = tcpm_ams_start(port, POWER_NEGOTIATION); + if (ret == -EAGAIN) { + port->upcoming_state = INVALID_STATE; + goto port_unlock; + } + /* Round down operating current to align with PPS valid steps */ op_curr = op_curr - (op_curr % RDO_PROG_CURR_MA_STEP); @@ -4661,7 +5411,6 @@ static int tcpm_pps_set_op_curr(struct tcpm_port *port, u16 op_curr) port->pps_data.op_curr = op_curr; port->pps_status = 0; port->pps_pending = true; - tcpm_set_state(port, SNK_NEGOTIATE_PPS_CAPABILITIES, 0); mutex_unlock(&port->lock); if (!wait_for_completion_timeout(&port->pps_complete, @@ -4710,6 +5459,13 @@ static int tcpm_pps_set_out_volt(struct tcpm_port *port, u16 out_volt) goto port_unlock; } + port->upcoming_state = SNK_NEGOTIATE_PPS_CAPABILITIES; + ret = tcpm_ams_start(port, POWER_NEGOTIATION); + if (ret == -EAGAIN) { + port->upcoming_state = INVALID_STATE; + goto port_unlock; + } + /* Round down output voltage to align with PPS valid steps */ out_volt = out_volt - (out_volt % RDO_PROG_VOLT_MV_STEP); @@ -4717,7 +5473,6 @@ static int tcpm_pps_set_out_volt(struct tcpm_port *port, u16 out_volt) port->pps_data.out_volt = out_volt; port->pps_status = 0; port->pps_pending = true; - tcpm_set_state(port, SNK_NEGOTIATE_PPS_CAPABILITIES, 0); mutex_unlock(&port->lock); if (!wait_for_completion_timeout(&port->pps_complete, @@ -4757,6 +5512,16 @@ static int tcpm_pps_activate(struct tcpm_port *port, bool activate) goto port_unlock; } + if (activate) + port->upcoming_state = SNK_NEGOTIATE_PPS_CAPABILITIES; + else + port->upcoming_state = SNK_NEGOTIATE_CAPABILITIES; + ret = tcpm_ams_start(port, POWER_NEGOTIATION); + if (ret == -EAGAIN) { + port->upcoming_state = INVALID_STATE; + goto port_unlock; + } + reinit_completion(&port->pps_complete); port->pps_status = 0; port->pps_pending = true; @@ -4765,9 +5530,6 @@ static int tcpm_pps_activate(struct tcpm_port *port, bool activate) if (activate) { port->pps_data.out_volt = port->supply_voltage; port->pps_data.op_curr = port->current_limit; - tcpm_set_state(port, SNK_NEGOTIATE_PPS_CAPABILITIES, 0); - } else { - tcpm_set_state(port, SNK_NEGOTIATE_CAPABILITIES, 0); } mutex_unlock(&port->lock); @@ -4960,6 +5722,20 @@ sink: port->new_source_frs_current = frs_current; } + /* sink-vdos is optional */ + ret = fwnode_property_count_u32(fwnode, "sink-vdos"); + if (ret < 0) + ret = 0; + + port->nr_snk_vdo = min(ret, VDO_MAX_OBJECTS); + if (port->nr_snk_vdo) { + ret = fwnode_property_read_u32_array(fwnode, "sink-vdos", + port->snk_vdo, + port->nr_snk_vdo); + if (ret < 0) + return ret; + } + return 0; } @@ -5265,6 +6041,7 @@ struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc) port->typec_caps.fwnode = tcpc->fwnode; port->typec_caps.revision = 0x0120; /* Type-C spec release 1.2 */ port->typec_caps.pd_revision = 0x0300; /* USB-PD spec release 3.0 */ + port->typec_caps.svdm_version = SVDM_VER_2_0; port->typec_caps.driver_data = port; port->typec_caps.ops = &tcpm_ops; port->typec_caps.orientation_aware = 1; diff --git a/drivers/usb/typec/ucsi/Kconfig b/drivers/usb/typec/ucsi/Kconfig index 2192d7c4fec7..5e9b37b3f25e 100644 --- a/drivers/usb/typec/ucsi/Kconfig +++ b/drivers/usb/typec/ucsi/Kconfig @@ -3,6 +3,7 @@ config TYPEC_UCSI tristate "USB Type-C Connector System Software Interface driver" depends on !CPU_BIG_ENDIAN + depends on USB_ROLE_SWITCH || !USB_ROLE_SWITCH help USB Type-C Connector System Software Interface (UCSI) is a specification for an interface that allows the operating system to diff --git a/drivers/usb/typec/ucsi/displayport.c b/drivers/usb/typec/ucsi/displayport.c index 261131c9e37c..73cd5bf35047 100644 --- a/drivers/usb/typec/ucsi/displayport.c +++ b/drivers/usb/typec/ucsi/displayport.c @@ -49,6 +49,7 @@ static int ucsi_displayport_enter(struct typec_altmode *alt, u32 *vdo) { struct ucsi_dp *dp = typec_altmode_get_drvdata(alt); struct ucsi *ucsi = dp->con->ucsi; + int svdm_version; u64 command; u8 cur = 0; int ret; @@ -83,7 +84,13 @@ static int ucsi_displayport_enter(struct typec_altmode *alt, u32 *vdo) * mode, and letting the alt mode driver continue. */ - dp->header = VDO(USB_TYPEC_DP_SID, 1, CMD_ENTER_MODE); + svdm_version = typec_altmode_get_svdm_version(alt); + if (svdm_version < 0) { + ret = svdm_version; + goto err_unlock; + } + + dp->header = VDO(USB_TYPEC_DP_SID, 1, svdm_version, CMD_ENTER_MODE); dp->header |= VDO_OPOS(USB_TYPEC_DP_MODE); dp->header |= VDO_CMDT(CMDT_RSP_ACK); @@ -101,6 +108,7 @@ err_unlock: static int ucsi_displayport_exit(struct typec_altmode *alt) { struct ucsi_dp *dp = typec_altmode_get_drvdata(alt); + int svdm_version; u64 command; int ret = 0; @@ -120,7 +128,13 @@ static int ucsi_displayport_exit(struct typec_altmode *alt) if (ret < 0) goto out_unlock; - dp->header = VDO(USB_TYPEC_DP_SID, 1, CMD_EXIT_MODE); + svdm_version = typec_altmode_get_svdm_version(alt); + if (svdm_version < 0) { + ret = svdm_version; + goto out_unlock; + } + + dp->header = VDO(USB_TYPEC_DP_SID, 1, svdm_version, CMD_EXIT_MODE); dp->header |= VDO_OPOS(USB_TYPEC_DP_MODE); dp->header |= VDO_CMDT(CMDT_RSP_ACK); @@ -186,6 +200,7 @@ static int ucsi_displayport_vdm(struct typec_altmode *alt, struct ucsi_dp *dp = typec_altmode_get_drvdata(alt); int cmd_type = PD_VDO_CMDT(header); int cmd = PD_VDO_CMD(header); + int svdm_version; mutex_lock(&dp->con->lock); @@ -198,9 +213,20 @@ static int ucsi_displayport_vdm(struct typec_altmode *alt, return -EOPNOTSUPP; } + svdm_version = typec_altmode_get_svdm_version(alt); + if (svdm_version < 0) { + mutex_unlock(&dp->con->lock); + return svdm_version; + } + switch (cmd_type) { case CMDT_INIT: - dp->header = VDO(USB_TYPEC_DP_SID, 1, cmd); + if (PD_VDO_SVDM_VER(header) < svdm_version) { + typec_partner_set_svdm_version(dp->con->partner, PD_VDO_SVDM_VER(header)); + svdm_version = PD_VDO_SVDM_VER(header); + } + + dp->header = VDO(USB_TYPEC_DP_SID, 1, svdm_version, cmd); dp->header |= VDO_OPOS(USB_TYPEC_DP_MODE); switch (cmd) { diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c index f02958927cbd..244270755ae6 100644 --- a/drivers/usb/typec/ucsi/ucsi.c +++ b/drivers/usb/typec/ucsi/ucsi.c @@ -588,6 +588,7 @@ static void ucsi_unregister_partner(struct ucsi_connector *con) static void ucsi_partner_change(struct ucsi_connector *con) { + enum usb_role u_role = USB_ROLE_NONE; int ret; if (!con->partner) @@ -595,11 +596,14 @@ static void ucsi_partner_change(struct ucsi_connector *con) switch (UCSI_CONSTAT_PARTNER_TYPE(con->status.flags)) { case UCSI_CONSTAT_PARTNER_TYPE_UFP: - case UCSI_CONSTAT_PARTNER_TYPE_CABLE: case UCSI_CONSTAT_PARTNER_TYPE_CABLE_AND_UFP: + u_role = USB_ROLE_HOST; + fallthrough; + case UCSI_CONSTAT_PARTNER_TYPE_CABLE: typec_set_data_role(con->port, TYPEC_HOST); break; case UCSI_CONSTAT_PARTNER_TYPE_DFP: + u_role = USB_ROLE_DEVICE; typec_set_data_role(con->port, TYPEC_DEVICE); break; default: @@ -610,6 +614,15 @@ static void ucsi_partner_change(struct ucsi_connector *con) if (!completion_done(&con->complete)) complete(&con->complete); + /* Only notify USB controller if partner supports USB data */ + if (!(UCSI_CONSTAT_PARTNER_FLAGS(con->status.flags) & UCSI_CONSTAT_PARTNER_FLAG_USB)) + u_role = USB_ROLE_NONE; + + ret = usb_role_switch_set_role(con->usb_role_sw, u_role); + if (ret) + dev_err(con->ucsi->dev, "con:%d: failed to set usb role:%d\n", + con->num, u_role); + /* Can't rely on Partner Flags field. Always checking the alt modes. */ ret = ucsi_register_altmodes(con, UCSI_RECIPIENT_SOP); if (ret) @@ -628,6 +641,7 @@ static void ucsi_handle_connector_change(struct work_struct *work) struct ucsi_connector_status pre_ack_status; struct ucsi_connector_status post_ack_status; enum typec_role role; + enum usb_role u_role = USB_ROLE_NONE; u16 inferred_changes; u16 changed_flags; u64 command; @@ -753,11 +767,14 @@ static void ucsi_handle_connector_change(struct work_struct *work) switch (UCSI_CONSTAT_PARTNER_TYPE(con->status.flags)) { case UCSI_CONSTAT_PARTNER_TYPE_UFP: - case UCSI_CONSTAT_PARTNER_TYPE_CABLE: case UCSI_CONSTAT_PARTNER_TYPE_CABLE_AND_UFP: + u_role = USB_ROLE_HOST; + fallthrough; + case UCSI_CONSTAT_PARTNER_TYPE_CABLE: typec_set_data_role(con->port, TYPEC_HOST); break; case UCSI_CONSTAT_PARTNER_TYPE_DFP: + u_role = USB_ROLE_DEVICE; typec_set_data_role(con->port, TYPEC_DEVICE); break; default: @@ -770,6 +787,16 @@ static void ucsi_handle_connector_change(struct work_struct *work) ucsi_unregister_partner(con); ucsi_port_psy_changed(con); + + /* Only notify USB controller if partner supports USB data */ + if (!(UCSI_CONSTAT_PARTNER_FLAGS(con->status.flags) & + UCSI_CONSTAT_PARTNER_FLAG_USB)) + u_role = USB_ROLE_NONE; + + ret = usb_role_switch_set_role(con->usb_role_sw, u_role); + if (ret) + dev_err(ucsi->dev, "con:%d: failed to set usb role:%d\n", + con->num, u_role); } if (con->status.change & UCSI_CONSTAT_PARTNER_CHANGE) @@ -988,6 +1015,7 @@ static int ucsi_register_port(struct ucsi *ucsi, int index) struct ucsi_connector *con = &ucsi->connector[index]; struct typec_capability *cap = &con->typec_cap; enum typec_accessory *accessory = cap->accessory; + enum usb_role u_role = USB_ROLE_NONE; u64 command; int ret; @@ -1024,6 +1052,7 @@ static int ucsi_register_port(struct ucsi *ucsi, int index) cap->revision = ucsi->cap.typec_version; cap->pd_revision = ucsi->cap.pd_version; + cap->svdm_version = SVDM_VER_2_0; cap->prefer_role = TYPEC_NO_PREFERRED_ROLE; if (con->cap.op_mode & UCSI_CONCAP_OPMODE_AUDIO_ACCESSORY) @@ -1066,11 +1095,14 @@ static int ucsi_register_port(struct ucsi *ucsi, int index) switch (UCSI_CONSTAT_PARTNER_TYPE(con->status.flags)) { case UCSI_CONSTAT_PARTNER_TYPE_UFP: - case UCSI_CONSTAT_PARTNER_TYPE_CABLE: case UCSI_CONSTAT_PARTNER_TYPE_CABLE_AND_UFP: + u_role = USB_ROLE_HOST; + fallthrough; + case UCSI_CONSTAT_PARTNER_TYPE_CABLE: typec_set_data_role(con->port, TYPEC_HOST); break; case UCSI_CONSTAT_PARTNER_TYPE_DFP: + u_role = USB_ROLE_DEVICE; typec_set_data_role(con->port, TYPEC_DEVICE); break; default: @@ -1086,6 +1118,24 @@ static int ucsi_register_port(struct ucsi *ucsi, int index) ucsi_port_psy_changed(con); } + con->usb_role_sw = fwnode_usb_role_switch_get(cap->fwnode); + if (IS_ERR(con->usb_role_sw)) { + dev_err(ucsi->dev, "con%d: failed to get usb role switch\n", + con->num); + con->usb_role_sw = NULL; + } + + /* Only notify USB controller if partner supports USB data */ + if (!(UCSI_CONSTAT_PARTNER_FLAGS(con->status.flags) & UCSI_CONSTAT_PARTNER_FLAG_USB)) + u_role = USB_ROLE_NONE; + + ret = usb_role_switch_set_role(con->usb_role_sw, u_role); + if (ret) { + dev_err(ucsi->dev, "con:%d: failed to set usb role:%d\n", + con->num, u_role); + ret = 0; + } + if (con->partner) { ret = ucsi_register_altmodes(con, UCSI_RECIPIENT_SOP); if (ret) { diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h index dd9ba60ab4a3..3920e20a9e9e 100644 --- a/drivers/usb/typec/ucsi/ucsi.h +++ b/drivers/usb/typec/ucsi/ucsi.h @@ -8,6 +8,7 @@ #include <linux/power_supply.h> #include <linux/types.h> #include <linux/usb/typec.h> +#include <linux/usb/role.h> /* -------------------------------------------------------------------------- */ @@ -331,6 +332,8 @@ struct ucsi_connector { u32 rdo; u32 src_pdos[UCSI_MAX_PDOS]; int num_pdos; + + struct usb_role_switch *usb_role_sw; }; int ucsi_send_command(struct ucsi *ucsi, u64 command, diff --git a/drivers/usb/usbip/stub_main.c b/drivers/usb/usbip/stub_main.c index c1c0bbc9f8b1..77a5b3f8736a 100644 --- a/drivers/usb/usbip/stub_main.c +++ b/drivers/usb/usbip/stub_main.c @@ -23,7 +23,7 @@ struct kmem_cache *stub_priv_cache; */ #define MAX_BUSID 16 static struct bus_id_priv busid_table[MAX_BUSID]; -static spinlock_t busid_table_lock; +static DEFINE_SPINLOCK(busid_table_lock); static void init_busid_table(void) { @@ -35,8 +35,6 @@ static void init_busid_table(void) */ memset(busid_table, 0, sizeof(busid_table)); - spin_lock_init(&busid_table_lock); - for (i = 0; i < MAX_BUSID; i++) spin_lock_init(&busid_table[i].busid_lock); } diff --git a/drivers/usb/usbip/usbip_common.h b/drivers/usb/usbip/usbip_common.h index 8be857a4fa13..d60ce17d3dd2 100644 --- a/drivers/usb/usbip/usbip_common.h +++ b/drivers/usb/usbip/usbip_common.h @@ -277,6 +277,10 @@ struct usbip_device { void (*reset)(struct usbip_device *); void (*unusable)(struct usbip_device *); } eh_ops; + +#ifdef CONFIG_KCOV + u64 kcov_handle; +#endif }; #define kthread_get_run(threadfn, data, namefmt, ...) \ @@ -337,4 +341,29 @@ static inline int interface_to_devnum(struct usb_interface *interface) return udev->devnum; } +#ifdef CONFIG_KCOV + +static inline void usbip_kcov_handle_init(struct usbip_device *ud) +{ + ud->kcov_handle = kcov_common_handle(); +} + +static inline void usbip_kcov_remote_start(struct usbip_device *ud) +{ + kcov_remote_start_common(ud->kcov_handle); +} + +static inline void usbip_kcov_remote_stop(void) +{ + kcov_remote_stop(); +} + +#else /* CONFIG_KCOV */ + +static inline void usbip_kcov_handle_init(struct usbip_device *ud) { } +static inline void usbip_kcov_remote_start(struct usbip_device *ud) { } +static inline void usbip_kcov_remote_stop(void) { } + +#endif /* CONFIG_KCOV */ + #endif /* __USBIP_COMMON_H */ diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c index 66cde5e5f796..3209b5ddd30c 100644 --- a/drivers/usb/usbip/vhci_hcd.c +++ b/drivers/usb/usbip/vhci_hcd.c @@ -396,6 +396,8 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, default: usbip_dbg_vhci_rh(" ClearPortFeature: default %x\n", wValue); + if (wValue >= 32) + goto error; vhci_hcd->port_status[rhport] &= ~(1 << wValue); break; } diff --git a/drivers/usb/usbip/vhci_rx.c b/drivers/usb/usbip/vhci_rx.c index 266024cbb64f..7f2d1c241559 100644 --- a/drivers/usb/usbip/vhci_rx.c +++ b/drivers/usb/usbip/vhci_rx.c @@ -261,7 +261,9 @@ int vhci_rx_loop(void *data) if (usbip_event_happened(ud)) break; + usbip_kcov_remote_start(ud); vhci_rx_pdu(ud); + usbip_kcov_remote_stop(); } return 0; diff --git a/drivers/usb/usbip/vhci_sysfs.c b/drivers/usb/usbip/vhci_sysfs.c index be37aec250c2..96e5371dc335 100644 --- a/drivers/usb/usbip/vhci_sysfs.c +++ b/drivers/usb/usbip/vhci_sysfs.c @@ -383,6 +383,7 @@ static ssize_t attach_store(struct device *dev, struct device_attribute *attr, vdev->ud.sockfd = sockfd; vdev->ud.tcp_socket = socket; vdev->ud.status = VDEV_ST_NOTASSIGNED; + usbip_kcov_handle_init(&vdev->ud); spin_unlock(&vdev->ud.lock); spin_unlock_irqrestore(&vhci->lock, flags); |