summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorEric Anholt <eric@anholt.net>2015-06-23 17:01:19 -0700
committerEric Anholt <eric@anholt.net>2015-06-23 17:01:27 -0700
commitd0d5beea146588ac94b08bd48496987191d76568 (patch)
tree719ad1b5af342aec1597ded7465439e70dacc6b4
parentd9fa11803a0d2e3b1eaf4041ddd0ca5651300d96 (diff)
parent56d1015c668327beefaf8142b0f83960437052c7 (diff)
downloadlinux-d0d5beea146588ac94b08bd48496987191d76568.tar.gz
Merge remote-tracking branch vc4-kms-v3d' into vc4-kms-v3d-rpi2
-rw-r--r--Documentation/devicetree/bindings/arm/bcm/brcm,bcm2835.txt31
-rw-r--r--Documentation/devicetree/bindings/arm/bcm/raspberrypi,firmware.txt14
-rw-r--r--Documentation/devicetree/bindings/mailbox/brcm,bcm2835-mbox.txt26
-rw-r--r--Documentation/devicetree/bindings/mailbox/nvidia,tegra124-xusb-mbox.txt30
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.txt1
-rw-r--r--MAINTAINERS6
-rw-r--r--arch/arm/boot/dts/bcm2835-rpi-b-plus.dts2
-rw-r--r--arch/arm/boot/dts/bcm2835-rpi-b.dts2
-rw-r--r--arch/arm/boot/dts/bcm2835-rpi.dtsi2
-rw-r--r--arch/arm/boot/dts/bcm2835.dtsi63
-rw-r--r--arch/arm/mach-bcm/board_bcm2835.c91
-rw-r--r--drivers/base/power/domain.c2
-rw-r--r--drivers/firmware/Makefile1
-rw-r--r--drivers/firmware/raspberrypi.c230
-rw-r--r--drivers/gpu/drm/Kconfig2
-rw-r--r--drivers/gpu/drm/Makefile1
-rw-r--r--drivers/gpu/drm/drm_gem_cma_helper.c5
-rw-r--r--drivers/gpu/drm/vc4/Kconfig10
-rw-r--r--drivers/gpu/drm/vc4/Makefile24
-rw-r--r--drivers/gpu/drm/vc4/vc4_bo.c431
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c445
-rw-r--r--drivers/gpu/drm/vc4/vc4_debugfs.c40
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c280
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h448
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c653
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c588
-rw-r--r--drivers/gpu/drm/vc4/vc4_hvs.c159
-rw-r--r--drivers/gpu/drm/vc4/vc4_irq.c213
-rw-r--r--drivers/gpu/drm/vc4/vc4_kms.c80
-rw-r--r--drivers/gpu/drm/vc4/vc4_packet.h367
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c299
-rw-r--r--drivers/gpu/drm/vc4/vc4_qpu_defines.h268
-rw-r--r--drivers/gpu/drm/vc4/vc4_regs.h437
-rw-r--r--drivers/gpu/drm/vc4/vc4_render_cl.c448
-rw-r--r--drivers/gpu/drm/vc4/vc4_v3d.c278
-rw-r--r--drivers/gpu/drm/vc4/vc4_validate.c952
-rw-r--r--drivers/gpu/drm/vc4/vc4_validate_shaders.c537
-rw-r--r--drivers/mailbox/Kconfig17
-rw-r--r--drivers/mailbox/Makefile4
-rw-r--r--drivers/mailbox/arm_mhu.c2
-rw-r--r--drivers/mailbox/bcm2835-mailbox.c217
-rw-r--r--drivers/mailbox/mailbox-altera.c2
-rw-r--r--drivers/mailbox/mailbox.c11
-rw-r--r--drivers/mailbox/omap-mailbox.c8
-rw-r--r--drivers/mailbox/pcc.c2
-rw-r--r--drivers/mailbox/tegra-xusb-mailbox.c290
-rw-r--r--drivers/watchdog/bcm2835_wdt.c62
-rw-r--r--include/drm/drmP.h1
-rw-r--r--include/dt-bindings/pinctrl/bcm2835.h27
-rw-r--r--include/linux/mailbox_controller.h2
-rw-r--r--include/soc/bcm2835/raspberrypi-firmware-property.h112
-rw-r--r--include/soc/tegra/xusb.h43
-rw-r--r--include/uapi/drm/vc4_drm.h204
-rw-r--r--mm/page_alloc.c2
54 files changed, 8359 insertions, 113 deletions
diff --git a/Documentation/devicetree/bindings/arm/bcm/brcm,bcm2835.txt b/Documentation/devicetree/bindings/arm/bcm/brcm,bcm2835.txt
index ac683480c486..c78576bb7729 100644
--- a/Documentation/devicetree/bindings/arm/bcm/brcm,bcm2835.txt
+++ b/Documentation/devicetree/bindings/arm/bcm/brcm,bcm2835.txt
@@ -1,8 +1,35 @@
Broadcom BCM2835 device tree bindings
-------------------------------------------
-Boards with the BCM2835 SoC shall have the following properties:
+Raspberry Pi Model A
+Required root node properties:
+compatible = "raspberrypi,model-a", "brcm,bcm2835";
-Required root node property:
+Raspberry Pi Model A+
+Required root node properties:
+compatible = "raspberrypi,model-a-plus", "brcm,bcm2835";
+Raspberry Pi Model B
+Required root node properties:
+compatible = "raspberrypi,model-b", "brcm,bcm2835";
+
+Raspberry Pi Model B (no P5)
+early model B with I2C0 rather than I2C1 routed to the expansion header
+Required root node properties:
+compatible = "raspberrypi,model-b-i2c0", "brcm,bcm2835";
+
+Raspberry Pi Model B rev2
+Required root node properties:
+compatible = "raspberrypi,model-b-rev2", "brcm,bcm2835";
+
+Raspberry Pi Model B+
+Required root node properties:
+compatible = "raspberrypi,model-b-plus", "brcm,bcm2835";
+
+Raspberry Pi Compute Module
+Required root node properties:
+compatible = "raspberrypi,compute-module", "brcm,bcm2835";
+
+Generic BCM2835 board
+Required root node properties:
compatible = "brcm,bcm2835";
diff --git a/Documentation/devicetree/bindings/arm/bcm/raspberrypi,firmware.txt b/Documentation/devicetree/bindings/arm/bcm/raspberrypi,firmware.txt
new file mode 100644
index 000000000000..dbf60f90c01b
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/bcm/raspberrypi,firmware.txt
@@ -0,0 +1,14 @@
+Raspberry Pi VideoCore firmware driver
+
+Required properties:
+
+- compatible: Should be "rasbperrypi,firmware"
+- mboxes: Phandle to the firmware device's Mailbox.
+ (See: ../mailbox/mailbox.txt for more information)
+
+Example:
+
+firmware {
+ compatible = "raspberrypi,firmware";
+ mboxes = <&mailbox>;
+};
diff --git a/Documentation/devicetree/bindings/mailbox/brcm,bcm2835-mbox.txt b/Documentation/devicetree/bindings/mailbox/brcm,bcm2835-mbox.txt
new file mode 100644
index 000000000000..e893615ef635
--- /dev/null
+++ b/Documentation/devicetree/bindings/mailbox/brcm,bcm2835-mbox.txt
@@ -0,0 +1,26 @@
+Broadcom BCM2835 VideoCore mailbox IPC
+
+Required properties:
+
+- compatible: Should be "brcm,bcm2835-mbox"
+- reg: Specifies base physical address and size of the registers
+- interrupts: The interrupt number
+ See bindings/interrupt-controller/brcm,bcm2835-armctrl-ic.txt
+- #mbox-cells: Specifies the number of cells needed to encode a mailbox
+ channel. The value shall be 0, since there is only one
+ mailbox channel implemented by the device.
+
+Example:
+
+mailbox: mailbox@7e00b800 {
+ compatible = "brcm,bcm2835-mbox";
+ reg = <0x7e00b880 0x40>;
+ interrupts = <0 1>;
+ #mbox-cells = <0>;
+};
+
+firmware: firmware {
+ compatible = "raspberrypi,firmware";
+ mboxes = <&mailbox>;
+ #power-domain-cells = <1>;
+};
diff --git a/Documentation/devicetree/bindings/mailbox/nvidia,tegra124-xusb-mbox.txt b/Documentation/devicetree/bindings/mailbox/nvidia,tegra124-xusb-mbox.txt
new file mode 100644
index 000000000000..9d89afadc070
--- /dev/null
+++ b/Documentation/devicetree/bindings/mailbox/nvidia,tegra124-xusb-mbox.txt
@@ -0,0 +1,30 @@
+NVIDIA Tegra XUSB mailbox
+=========================
+
+The Tegra XUSB mailbox is used by the Tegra xHCI controller's firmware to
+communicate requests to the host and PHY drivers.
+
+Refer to ./mailbox.txt for generic information about mailbox device-tree
+bindings.
+
+Required properties:
+--------------------
+ - compatible: For Tegra124, must contain "nvidia,tegra124-xusb-mbox".
+ Otherwise, must contain '"nvidia,<chip>-xusb-mbox",
+ "nvidia,tegra124-xusb-mbox"' where <chip> is tegra132.
+ - #mbox-cells: Should be 0. There is only one physical channel.
+
+Example:
+--------
+ mailbox {
+ compatible = "nvidia,tegra124-xusb-mbox";
+
+ #mbox-cells = <0>;
+ };
+
+ usb-host {
+ ...
+ mboxes = <&xusb_mbox>;
+ mbox-names = "xusb";
+ ...
+ };
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index 80339192c93e..3fc90ac4f801 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -159,6 +159,7 @@ radxa Radxa
raidsonic RaidSonic Technology GmbH
ralink Mediatek/Ralink Technology Corp.
ramtron Ramtron International
+raspberrypi Raspberry Pi Foundation
realtek Realtek Semiconductor Corp.
renesas Renesas Electronics Corporation
ricoh Ricoh Co. Ltd.
diff --git a/MAINTAINERS b/MAINTAINERS
index d8afd2953678..7b88c449ad1a 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2153,6 +2153,12 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/rpi/linux-rpi.git
S: Maintained
N: bcm2835
+BROADCOM BCM2835 DRM DRIVER
+M: Eric Anholt <eric@anholt.net>
+T: git git://github.com/anholt/linux
+S: Maintained
+F: drivers/gpu/drm/vc4/*
+
BROADCOM BCM33XX MIPS ARCHITECTURE
M: Kevin Cernekee <cernekee@gmail.com>
L: linux-mips@linux-mips.org
diff --git a/arch/arm/boot/dts/bcm2835-rpi-b-plus.dts b/arch/arm/boot/dts/bcm2835-rpi-b-plus.dts
index b0fb0e8f512f..35f50fcb0c9c 100644
--- a/arch/arm/boot/dts/bcm2835-rpi-b-plus.dts
+++ b/arch/arm/boot/dts/bcm2835-rpi-b-plus.dts
@@ -1,5 +1,5 @@
/dts-v1/;
-/include/ "bcm2835-rpi.dtsi"
+#include "bcm2835-rpi.dtsi"
/ {
compatible = "raspberrypi,model-b-plus", "brcm,bcm2835";
diff --git a/arch/arm/boot/dts/bcm2835-rpi-b.dts b/arch/arm/boot/dts/bcm2835-rpi-b.dts
index b867224027e7..8ac6c2910944 100644
--- a/arch/arm/boot/dts/bcm2835-rpi-b.dts
+++ b/arch/arm/boot/dts/bcm2835-rpi-b.dts
@@ -1,5 +1,5 @@
/dts-v1/;
-/include/ "bcm2835-rpi.dtsi"
+#include "bcm2835-rpi.dtsi"
/ {
compatible = "raspberrypi,model-b", "brcm,bcm2835";
diff --git a/arch/arm/boot/dts/bcm2835-rpi.dtsi b/arch/arm/boot/dts/bcm2835-rpi.dtsi
index 466f02b44685..8498f421b31f 100644
--- a/arch/arm/boot/dts/bcm2835-rpi.dtsi
+++ b/arch/arm/boot/dts/bcm2835-rpi.dtsi
@@ -1,4 +1,4 @@
-/include/ "bcm2835.dtsi"
+#include "bcm2835.dtsi"
/ {
/* This is left here in case u-boot needs it */
diff --git a/arch/arm/boot/dts/bcm2835.dtsi b/arch/arm/boot/dts/bcm2835.dtsi
index 4b6dd65f3464..76c9cc18d911 100644
--- a/arch/arm/boot/dts/bcm2835.dtsi
+++ b/arch/arm/boot/dts/bcm2835.dtsi
@@ -1,4 +1,6 @@
-/include/ "skeleton.dtsi"
+#include <dt-bindings/pinctrl/bcm2835.h>
+#include <dt-bindings/gpio/gpio.h>
+#include "skeleton.dtsi"
/ {
compatible = "brcm,bcm2835";
@@ -119,7 +121,7 @@
status = "disabled";
};
- i2c0: i2c@20205000 {
+ i2c0: i2c@7e205000 {
compatible = "brcm,bcm2835-i2c";
reg = <0x7e205000 0x1000>;
interrupts = <2 21>;
@@ -150,6 +152,15 @@
status = "disabled";
};
+ i2c2: i2c@7e805000 {
+ compatible = "brcm,bcm2835-i2c";
+ reg = <0x7e805000 0x1000>;
+ interrupts = <2 21>;
+ clocks = <&clk_i2c>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
usb: usb@7e980000 {
compatible = "brcm,bcm2835-usb";
reg = <0x7e980000 0x10000>;
@@ -174,6 +185,54 @@
thermal: thermal {
compatible = "brcm,bcm2835-thermal";
};
+
+ v3d: brcm,vc4-v3d@7ec00000 {
+ compatible = "brcm,vc4-v3d";
+ reg = <0x7ec00000 0x1000>;
+ interrupts = <1 10>;
+ };
+
+ hdmi: brcm,vc4-hdmi@7e902000 {
+ compatible = "brcm,vc4-hdmi";
+ reg = <0x7e902000 0x600>,
+ <0x7e808000 0x100>;
+ interrupts = <2 8>, <2 9>;
+ ddc = <&i2c2>;
+ hpd-gpio = <&gpio 46 GPIO_ACTIVE_HIGH>;
+ crtc = <&pv2>;
+ };
+
+ pv0: brcm,vc4-pixelvalve@7e206000 {
+ compatible = "brcm,vc4-pixelvalve";
+ reg = <0x7e206000 0x100>;
+ interrupts = <2 13>; /* pwa2 */
+ };
+
+ pv1: brcm,vc4-pixelvalve@7e207000 {
+ compatible = "brcm,vc4-pixelvalve";
+ reg = <0x7e207000 0x100>;
+ interrupts = <2 14>; /* pwa1 */
+ };
+
+ pv2: brcm,vc4-pixelvalve@7e807000 {
+ compatible = "brcm,vc4-pixelvalve";
+ reg = <0x7e807000 0x100>;
+ interrupts = <2 10>; /* pixelvalve */
+ };
+
+ hvs: brcm,hvs@7e400000 {
+ compatible = "brcm,vc4-hvs";
+ reg = <0x7e400000 0x6000>;
+ };
+
+ vc4: vc4@0x7e4c0000 {
+ compatible = "brcm,vc4";
+
+ gpus = <&v3d>;
+ crtcs = <&pv0>, <&pv1>, <&pv2>;
+ encoders = <&hdmi>;
+ hvss = <&hvs>;
+ };
};
clocks {
diff --git a/arch/arm/mach-bcm/board_bcm2835.c b/arch/arm/mach-bcm/board_bcm2835.c
index f7fdacd826d2..1e6f1cf95bf3 100644
--- a/arch/arm/mach-bcm/board_bcm2835.c
+++ b/arch/arm/mach-bcm/board_bcm2835.c
@@ -12,7 +12,6 @@
* GNU General Public License for more details.
*/
-#include <linux/delay.h>
#include <linux/init.h>
#include <linux/irqchip.h>
#include <linux/of_address.h>
@@ -23,89 +22,6 @@
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
-#define PM_RSTC 0x1c
-#define PM_RSTS 0x20
-#define PM_WDOG 0x24
-
-#define PM_PASSWORD 0x5a000000
-#define PM_RSTC_WRCFG_MASK 0x00000030
-#define PM_RSTC_WRCFG_FULL_RESET 0x00000020
-#define PM_RSTS_HADWRH_SET 0x00000040
-
-#define BCM2835_PERIPH_PHYS 0x20000000
-#define BCM2835_PERIPH_VIRT 0xf0000000
-#define BCM2835_PERIPH_SIZE SZ_16M
-
-static void __iomem *wdt_regs;
-
-/*
- * The machine restart method can be called from an atomic context so we won't
- * be able to ioremap the regs then.
- */
-static void bcm2835_setup_restart(void)
-{
- struct device_node *np = of_find_compatible_node(NULL, NULL,
- "brcm,bcm2835-pm-wdt");
- if (WARN(!np, "unable to setup watchdog restart"))
- return;
-
- wdt_regs = of_iomap(np, 0);
- WARN(!wdt_regs, "failed to remap watchdog regs");
-}
-
-static void bcm2835_restart(enum reboot_mode mode, const char *cmd)
-{
- u32 val;
-
- if (!wdt_regs)
- return;
-
- /* use a timeout of 10 ticks (~150us) */
- writel_relaxed(10 | PM_PASSWORD, wdt_regs + PM_WDOG);
- val = readl_relaxed(wdt_regs + PM_RSTC);
- val &= ~PM_RSTC_WRCFG_MASK;
- val |= PM_PASSWORD | PM_RSTC_WRCFG_FULL_RESET;
- writel_relaxed(val, wdt_regs + PM_RSTC);
-
- /* No sleeping, possibly atomic. */
- mdelay(1);
-}
-
-/*
- * We can't really power off, but if we do the normal reset scheme, and
- * indicate to bootcode.bin not to reboot, then most of the chip will be
- * powered off.
- */
-static void bcm2835_power_off(void)
-{
- u32 val;
-
- /*
- * We set the watchdog hard reset bit here to distinguish this reset
- * from the normal (full) reset. bootcode.bin will not reboot after a
- * hard reset.
- */
- val = readl_relaxed(wdt_regs + PM_RSTS);
- val &= ~PM_RSTC_WRCFG_MASK;
- val |= PM_PASSWORD | PM_RSTS_HADWRH_SET;
- writel_relaxed(val, wdt_regs + PM_RSTS);
-
- /* Continue with normal reset mechanism */
- bcm2835_restart(REBOOT_HARD, "");
-}
-
-static struct map_desc io_map __initdata = {
- .virtual = BCM2835_PERIPH_VIRT,
- .pfn = __phys_to_pfn(BCM2835_PERIPH_PHYS),
- .length = BCM2835_PERIPH_SIZE,
- .type = MT_DEVICE
-};
-
-static void __init bcm2835_map_io(void)
-{
- iotable_init(&io_map, 1);
-}
-
static void __init bcm2835_init(void)
{
struct device_node *np = of_find_node_by_path("/system");
@@ -113,10 +29,6 @@ static void __init bcm2835_init(void)
u64 val64;
int ret;
- bcm2835_setup_restart();
- if (wdt_regs)
- pm_power_off = bcm2835_power_off;
-
bcm2835_init_clocks();
ret = of_platform_populate(NULL, of_default_bus_match_table, NULL,
@@ -138,9 +50,6 @@ static const char * const bcm2835_compat[] = {
};
DT_MACHINE_START(BCM2835, "BCM2835")
- .map_io = bcm2835_map_io,
- .init_irq = irqchip_init,
.init_machine = bcm2835_init,
- .restart = bcm2835_restart,
.dt_compat = bcm2835_compat
MACHINE_END
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 2327613d4539..dd8a86b248b6 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -2091,7 +2091,7 @@ EXPORT_SYMBOL_GPL(of_genpd_del_provider);
struct generic_pm_domain *of_genpd_get_from_provider(
struct of_phandle_args *genpdspec)
{
- struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
+ struct generic_pm_domain *genpd = ERR_PTR(-EPROBE_DEFER);
struct of_genpd_provider *provider;
mutex_lock(&of_genpd_mutex);
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
index 3fdd3912709a..41ced2885b46 100644
--- a/drivers/firmware/Makefile
+++ b/drivers/firmware/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_ISCSI_IBFT) += iscsi_ibft.o
obj-$(CONFIG_FIRMWARE_MEMMAP) += memmap.o
obj-$(CONFIG_QCOM_SCM) += qcom_scm.o
CFLAGS_qcom_scm.o :=$(call as-instr,.arch_extension sec,-DREQUIRES_SEC=1)
+obj-$(CONFIG_BCM2835_MBOX) += raspberrypi.o
obj-$(CONFIG_GOOGLE_FIRMWARE) += google/
obj-$(CONFIG_EFI) += efi/
diff --git a/drivers/firmware/raspberrypi.c b/drivers/firmware/raspberrypi.c
new file mode 100644
index 000000000000..d612815fa106
--- /dev/null
+++ b/drivers/firmware/raspberrypi.c
@@ -0,0 +1,230 @@
+/*
+ * Copyright © 2015 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * Defines interfaces for interacting wtih the Raspberry Pi firmware's
+ * property channel.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/mailbox_client.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <soc/bcm2835/raspberrypi-firmware-property.h>
+
+#define MBOX_MSG(chan, data28) (((data28) & ~0xf) | ((chan) & 0xf))
+#define MBOX_CHAN(msg) ((msg) & 0xf)
+#define MBOX_DATA28(msg) ((msg) & ~0xf)
+#define MBOX_CHAN_PROPERTY 8
+
+struct rpi_firmware {
+ struct mbox_client cl;
+ struct mbox_chan *chan; /* The property channel. */
+ struct completion c;
+ u32 enabled;
+};
+
+static DEFINE_MUTEX(transaction_lock);
+
+static void response_callback(struct mbox_client *cl, void *msg)
+{
+ struct rpi_firmware *fw = container_of(cl, struct rpi_firmware, cl);
+ complete(&fw->c);
+}
+
+/*
+ * Sends a request to the firmware through the BCM2835 mailbox driver,
+ * and synchronously waits for the reply.
+ */
+static int
+rpi_firmware_transaction(struct rpi_firmware *fw, u32 chan, u32 data)
+{
+ u32 message = MBOX_MSG(chan, data);
+ int ret;
+
+ WARN_ON(data & 0xf);
+
+ mutex_lock(&transaction_lock);
+ reinit_completion(&fw->c);
+ ret = mbox_send_message(fw->chan, &message);
+ if (ret >= 0) {
+ wait_for_completion(&fw->c);
+ ret = 0;
+ } else {
+ dev_err(fw->cl.dev, "mbox_send_message returned %d\n", ret);
+ }
+ mutex_unlock(&transaction_lock);
+
+ return ret;
+}
+
+/**
+ * rpi_firmware_property_list - Submit firmware property list
+ * @of_node: Pointer to the firmware Device Tree node.
+ * @data: Buffer holding tags.
+ * @tag_size: Size of tags buffer.
+ *
+ * Submits a set of concatenated tags to the VPU firmware through the
+ * mailbox property interface.
+ *
+ * The buffer header and the ending tag are added by this function and
+ * don't need to be supplied, just the actual tags for your operation.
+ * See struct rpi_firmware_property_tag_header for the per-tag
+ * structure.
+ */
+int rpi_firmware_property_list(struct device_node *of_node,
+ void *data, size_t tag_size)
+{
+ struct platform_device *pdev = of_find_device_by_node(of_node);
+ struct rpi_firmware *fw = platform_get_drvdata(pdev);
+ size_t size = tag_size + 12;
+ u32 *buf;
+ dma_addr_t bus_addr;
+ int ret = 0;
+
+ if (!fw)
+ return -EPROBE_DEFER;
+
+ /* Packets are processed a dword at a time. */
+ if (size & 3)
+ return -EINVAL;
+
+ buf = dma_alloc_coherent(fw->cl.dev, PAGE_ALIGN(size), &bus_addr,
+ GFP_ATOMIC);
+ if (!buf)
+ return -ENOMEM;
+
+ /* The firmware will error out without parsing in this case. */
+ WARN_ON(size >= 1024 * 1024);
+
+ buf[0] = size;
+ buf[1] = RPI_FIRMWARE_STATUS_REQUEST;
+ memcpy(&buf[2], data, tag_size);
+ buf[size / 4 - 1] = RPI_FIRMWARE_PROPERTY_END;
+ wmb();
+
+ ret = rpi_firmware_transaction(fw, MBOX_CHAN_PROPERTY, bus_addr);
+
+ rmb();
+ memcpy(data, &buf[2], tag_size);
+ if (ret == 0 && buf[1] != RPI_FIRMWARE_STATUS_SUCCESS) {
+ /*
+ * The tag name here might not be the one causing the
+ * error, if there were multiple tags in the request.
+ * But single-tag is the most common, so go with it.
+ */
+ dev_err(fw->cl.dev, "Request 0x%08x returned status 0x%08x\n",
+ buf[2], buf[1]);
+ ret = -EINVAL;
+ }
+
+ dma_free_coherent(fw->cl.dev, PAGE_ALIGN(size), buf, bus_addr);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(rpi_firmware_property_list);
+
+/**
+ * rpi_firmware_property - Submit single firmware property
+ * @of_node: Pointer to the firmware Device Tree node.
+ * @tag: One of enum_mbox_property_tag.
+ * @tag_data: Tag data buffer.
+ * @buf_size: Buffer size.
+ *
+ * Submits a single tag to the VPU firmware through the mailbox
+ * property interface.
+ *
+ * This is a convenience wrapper around
+ * rpi_firmware_property_list() to avoid some of the
+ * boilerplate in property calls.
+ */
+int rpi_firmware_property(struct device_node *of_node,
+ u32 tag, void *tag_data, size_t buf_size)
+{
+ /* Single tags are very small (generally 8 bytes), so the
+ * stack should be safe.
+ */
+ u8 data[buf_size + sizeof(struct rpi_firmware_property_tag_header)];
+ struct rpi_firmware_property_tag_header *header =
+ (struct rpi_firmware_property_tag_header *)data;
+ int ret;
+
+ header->tag = tag;
+ header->buf_size = buf_size;
+ header->req_resp_size = 0;
+ memcpy(data + sizeof(struct rpi_firmware_property_tag_header),
+ tag_data, buf_size);
+
+ ret = rpi_firmware_property_list(of_node, &data, sizeof(data));
+ memcpy(tag_data,
+ data + sizeof(struct rpi_firmware_property_tag_header),
+ buf_size);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(rpi_firmware_property);
+
+static int rpi_firmware_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rpi_firmware *fw;
+
+ fw = devm_kzalloc(dev, sizeof(*fw), GFP_KERNEL);
+ if (!fw)
+ return -ENOMEM;
+
+ fw->cl.dev = dev;
+ fw->cl.rx_callback = response_callback;
+ fw->cl.tx_block = true;
+
+ fw->chan = mbox_request_channel(&fw->cl, 0);
+ if (IS_ERR(fw->chan)) {
+ int ret = PTR_ERR(fw->chan);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get mbox channel: %d\n", ret);
+ return ret;
+ }
+
+ init_completion(&fw->c);
+
+ dev_info(dev, "Firmware driver\n");
+ platform_set_drvdata(pdev, fw);
+
+ return 0;
+}
+
+static int rpi_firmware_remove(struct platform_device *pdev)
+{
+ struct rpi_firmware *fw = platform_get_drvdata(pdev);
+
+ mbox_free_channel(fw->chan);
+
+ return 0;
+}
+
+static const struct of_device_id rpi_firmware_of_match[] = {
+ { .compatible = "raspberrypi,firmware", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, rpi_firmware_of_match);
+
+static struct platform_driver rpi_firmware_driver = {
+ .driver = {
+ .name = "raspberrypi-firmware",
+ .owner = THIS_MODULE,
+ .of_match_table = rpi_firmware_of_match,
+ },
+ .probe = rpi_firmware_probe,
+ .remove = rpi_firmware_remove,
+};
+module_platform_driver(rpi_firmware_driver);
+
+MODULE_AUTHOR("Eric Anholt <eric@anholt.net>");
+MODULE_DESCRIPTION("Raspberry Pi firmware driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 47f2ce81b412..3151dc84894f 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -217,3 +217,5 @@ source "drivers/gpu/drm/sti/Kconfig"
source "drivers/gpu/drm/amd/amdkfd/Kconfig"
source "drivers/gpu/drm/imx/Kconfig"
+
+source "drivers/gpu/drm/vc4/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 7d4944e1a60c..c6b413e7929f 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -43,6 +43,7 @@ obj-$(CONFIG_DRM_MGA) += mga/
obj-$(CONFIG_DRM_I810) += i810/
obj-$(CONFIG_DRM_I915) += i915/
obj-$(CONFIG_DRM_MGAG200) += mgag200/
+obj-$(CONFIG_DRM_VC4) += vc4/
obj-$(CONFIG_DRM_CIRRUS_QEMU) += cirrus/
obj-$(CONFIG_DRM_SIS) += sis/
obj-$(CONFIG_DRM_SAVAGE)+= savage/
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
index e419eedf751d..be121c9a8e92 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -58,8 +58,11 @@ __drm_gem_cma_create(struct drm_device *drm, size_t size)
struct drm_gem_cma_object *cma_obj;
struct drm_gem_object *gem_obj;
int ret;
+ size_t obj_size = (drm->driver->gem_obj_size ?
+ drm->driver->gem_obj_size :
+ sizeof(*cma_obj));
- cma_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL);
+ cma_obj = kzalloc(obj_size, GFP_KERNEL);
if (!cma_obj)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/gpu/drm/vc4/Kconfig b/drivers/gpu/drm/vc4/Kconfig
new file mode 100644
index 000000000000..09706c1eeaca
--- /dev/null
+++ b/drivers/gpu/drm/vc4/Kconfig
@@ -0,0 +1,10 @@
+config DRM_VC4
+ tristate "Broadcom VC4 Graphics"
+ depends on ARCH_BCM2835
+ depends on DRM
+ select DRM_KMS_HELPER
+ select DRM_KMS_FB_HELPER
+ select DRM_KMS_CMA_HELPER
+ help
+ Choose this option if you have a system that has a Broadcom
+ VC4 GPU, such as the Raspberry Pi or other BCM2708/BCM2835.
diff --git a/drivers/gpu/drm/vc4/Makefile b/drivers/gpu/drm/vc4/Makefile
new file mode 100644
index 000000000000..cf3898da60f5
--- /dev/null
+++ b/drivers/gpu/drm/vc4/Makefile
@@ -0,0 +1,24 @@
+ccflags-y := -Iinclude/drm
+
+# Please keep these build lists sorted!
+
+# core driver code
+vc4-y := \
+ vc4_bo.o \
+ vc4_crtc.o \
+ vc4_drv.o \
+ vc4_kms.o \
+ vc4_gem.o \
+ vc4_hdmi.o \
+ vc4_hvs.o \
+ vc4_irq.o \
+ vc4_plane.o \
+ vc4_render_cl.o \
+ vc4_v3d.o \
+ vc4_validate.o \
+ vc4_validate_shaders.o \
+ $()
+
+vc4-$(CONFIG_DEBUG_FS) += vc4_debugfs.o
+
+obj-$(CONFIG_DRM_VC4) += vc4.o
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
new file mode 100644
index 000000000000..e4f14aa8ca9f
--- /dev/null
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -0,0 +1,431 @@
+/*
+ * Copyright © 2014-2015 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * VC4 GEM BO management support.
+ *
+ * The VC4 GPU architecture (both scanout and rendering) has direct
+ * access to system memory with no MMU in between. To support it, we
+ * use the GEM CMA helper functions to allocate contiguous ranges of
+ * physical memory for our BOs.
+ */
+
+#include "vc4_drv.h"
+#include "uapi/drm/vc4_drm.h"
+
+static struct {
+ u32 num_allocated;
+ u32 size_allocated;
+ u32 num_cached;
+ u32 size_cached;
+} bo_stats;
+
+static void
+vc4_bo_stats_dump(void)
+{
+ DRM_INFO("num bos allocated: %d\n",
+ bo_stats.num_allocated);
+ DRM_INFO("size bos allocated: %dkb\n",
+ bo_stats.size_allocated / 1024);
+ DRM_INFO("num bos used: %d\n",
+ bo_stats.num_allocated - bo_stats.num_cached);
+ DRM_INFO("size bos used: %dkb\n",
+ (bo_stats.size_allocated - bo_stats.size_cached) / 1024);
+ DRM_INFO("num bos cached: %d\n",
+ bo_stats.num_cached);
+ DRM_INFO("size bos cached: %dkb\n",
+ bo_stats.size_cached / 1024);
+}
+
+static uint32_t
+bo_page_index(size_t size)
+{
+ return (size / PAGE_SIZE) - 1;
+}
+
+static void
+vc4_bo_destroy(struct vc4_bo *bo)
+{
+ bo_stats.num_allocated--;
+ bo_stats.size_allocated -= bo->base.base.size;
+ drm_gem_cma_free_object(&bo->base.base);
+}
+
+static void
+vc4_bo_remove_from_cache(struct vc4_bo *bo)
+{
+ bo_stats.num_cached--;
+ bo_stats.size_cached -= bo->base.base.size;
+
+ list_del(&bo->unref_head);
+ list_del(&bo->size_head);
+}
+
+static struct list_head *
+vc4_get_cache_list_for_size(struct drm_device *dev, size_t size)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ uint32_t page_index = bo_page_index(size);
+
+ if (vc4->bo_cache.size_list_size <= page_index) {
+ uint32_t new_size = max(vc4->bo_cache.size_list_size * 2,
+ page_index + 1);
+ struct list_head *new_list;
+ uint32_t i;
+
+ new_list = kmalloc(new_size * sizeof(struct list_head),
+ GFP_KERNEL);
+ if (!new_list)
+ return NULL;
+
+ /* Rebase the old cached BO lists to their new list
+ * head locations.
+ */
+ for (i = 0; i < vc4->bo_cache.size_list_size; i++) {
+ struct list_head *old_list = &vc4->bo_cache.size_list[i];
+ if (list_empty(old_list))
+ INIT_LIST_HEAD(&new_list[i]);
+ else
+ list_replace(old_list, &new_list[i]);
+ }
+ /* And initialize the brand new BO list heads. */
+ for (i = vc4->bo_cache.size_list_size; i < new_size; i++)
+ INIT_LIST_HEAD(&new_list[i]);
+
+ kfree(vc4->bo_cache.size_list);
+ vc4->bo_cache.size_list = new_list;
+ vc4->bo_cache.size_list_size = new_size;
+ }
+
+ return &vc4->bo_cache.size_list[page_index];
+}
+
+void
+vc4_bo_cache_purge(struct drm_device *dev)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+
+ while (!list_empty(&vc4->bo_cache.time_list)) {
+ struct vc4_bo *bo = list_last_entry(&vc4->bo_cache.time_list,
+ struct vc4_bo, unref_head);
+ vc4_bo_remove_from_cache(bo);
+ vc4_bo_destroy(bo);
+ }
+}
+
+struct vc4_bo *
+vc4_bo_create(struct drm_device *dev, size_t unaligned_size)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ uint32_t size = roundup(unaligned_size, PAGE_SIZE);
+ uint32_t page_index = bo_page_index(size);
+ struct drm_gem_cma_object *cma_obj;
+ int pass;
+
+ if (size == 0)
+ return NULL;
+
+ /* First, try to get a vc4_bo from the kernel BO cache. */
+ if (vc4->bo_cache.size_list_size > page_index) {
+ if (!list_empty(&vc4->bo_cache.size_list[page_index])) {
+ struct vc4_bo *bo =
+ list_first_entry(&vc4->bo_cache.size_list[page_index],
+ struct vc4_bo, size_head);
+ vc4_bo_remove_from_cache(bo);
+ kref_init(&bo->base.base.refcount);
+ return bo;
+ }
+ }
+
+ /* Otherwise, make a new BO. */
+ for (pass = 0; ; pass++) {
+ cma_obj = drm_gem_cma_create(dev, size);
+ if (!IS_ERR(cma_obj))
+ break;
+
+ switch (pass) {
+ case 0:
+ /*
+ * If we've run out of CMA memory, kill the cache of
+ * CMA allocations we've got laying around and try again.
+ */
+ vc4_bo_cache_purge(dev);
+ break;
+ case 1:
+ /*
+ * Getting desperate, so try to wait for any
+ * previous rendering to finish, free its
+ * unreferenced BOs to the cache, and then
+ * free the cache.
+ */
+ vc4_wait_for_seqno(dev, vc4->emit_seqno, ~0ull);
+ vc4_job_handle_completed(vc4);
+ vc4_bo_cache_purge(dev);
+ break;
+ case 3:
+ DRM_ERROR("Failed to allocate from CMA:\n");
+ vc4_bo_stats_dump();
+ return NULL;
+ }
+ }
+
+ bo_stats.num_allocated++;
+ bo_stats.size_allocated += size;
+
+ return to_vc4_bo(&cma_obj->base);
+}
+
+int
+vc4_dumb_create(struct drm_file *file_priv,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
+ struct vc4_bo *bo = NULL;
+ int ret;
+
+ if (args->pitch < min_pitch)
+ args->pitch = min_pitch;
+
+ if (args->size < args->pitch * args->height)
+ args->size = args->pitch * args->height;
+
+ mutex_lock(&dev->struct_mutex);
+ bo = vc4_bo_create(dev, roundup(args->size, PAGE_SIZE));
+ mutex_unlock(&dev->struct_mutex);
+ if (!bo)
+ return -ENOMEM;
+
+ ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
+ drm_gem_object_unreference_unlocked(&bo->base.base);
+
+ return ret;
+}
+
+static void
+vc4_bo_cache_free_old(struct drm_device *dev)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ unsigned long expire_time = jiffies - msecs_to_jiffies(1000);
+
+ while (!list_empty(&vc4->bo_cache.time_list)) {
+ struct vc4_bo *bo = list_last_entry(&vc4->bo_cache.time_list,
+ struct vc4_bo, unref_head);
+ if (time_before(expire_time, bo->free_time)) {
+ mod_timer(&vc4->bo_cache.time_timer,
+ round_jiffies_up(jiffies +
+ msecs_to_jiffies(1000)));
+ return;
+ }
+
+ vc4_bo_remove_from_cache(bo);
+ vc4_bo_destroy(bo);
+ }
+}
+
+/* Called on the last userspace/kernel unreference of the BO. Returns
+ * it to the BO cache if possible, otherwise frees it.
+ *
+ * Note that this is called with the struct_mutex held.
+ */
+void
+vc4_free_object(struct drm_gem_object *gem_bo)
+{
+ struct drm_device *dev = gem_bo->dev;
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct vc4_bo *bo = to_vc4_bo(gem_bo);
+ struct list_head *cache_list;
+
+ /* If the object references someone else's memory, we can't cache it.
+ */
+ if (gem_bo->import_attach) {
+ vc4_bo_destroy(bo);
+ return;
+ }
+
+ /* Don't cache if it was publicly named. */
+ if (gem_bo->name) {
+ vc4_bo_destroy(bo);
+ return;
+ }
+
+ cache_list = vc4_get_cache_list_for_size(dev, gem_bo->size);
+ if (!cache_list) {
+ vc4_bo_destroy(bo);
+ return;
+ }
+
+ kfree(bo->validated_shader);
+ bo->validated_shader = NULL;
+
+ /* If the BO was exported, and it's made it to this point,
+ * then the dmabuf usage has been completely finished (so it's
+ * safe now to let it turn into a shader again).
+ */
+ bo->dma_buf_import_export = false;
+
+ bo->free_time = jiffies;
+ list_add(&bo->size_head, cache_list);
+ list_add(&bo->unref_head, &vc4->bo_cache.time_list);
+
+ bo_stats.num_cached++;
+ bo_stats.size_cached += gem_bo->size;
+
+ vc4_bo_cache_free_old(dev);
+}
+
+static void
+vc4_bo_cache_time_work(struct work_struct *work)
+{
+ struct vc4_dev *vc4 =
+ container_of(work, struct vc4_dev, bo_cache.time_work);
+ struct drm_device *dev = vc4->dev;
+
+ mutex_lock(&dev->struct_mutex);
+ vc4_bo_cache_free_old(dev);
+ mutex_unlock(&dev->struct_mutex);
+}
+
+static void
+vc4_bo_cache_time_timer(unsigned long data)
+{
+ struct drm_device *dev = (struct drm_device *)data;
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+
+ schedule_work(&vc4->bo_cache.time_work);
+}
+
+void
+vc4_bo_cache_init(struct drm_device *dev)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+
+ INIT_LIST_HEAD(&vc4->bo_cache.time_list);
+
+ INIT_WORK(&vc4->bo_cache.time_work, vc4_bo_cache_time_work);
+ setup_timer(&vc4->bo_cache.time_timer,
+ vc4_bo_cache_time_timer,
+ (unsigned long) dev);
+}
+
+void
+vc4_bo_cache_destroy(struct drm_device *dev)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+
+ del_timer(&vc4->bo_cache.time_timer);
+ cancel_work_sync(&vc4->bo_cache.time_work);
+
+ vc4_bo_cache_purge(dev);
+
+ if (bo_stats.num_allocated) {
+ DRM_ERROR("Destroying BO cache while BOs still allocated:\n");
+ vc4_bo_stats_dump();
+ }
+}
+
+struct drm_gem_object *
+vc4_prime_import(struct drm_device *dev, struct dma_buf *dma_buf)
+{
+ struct drm_gem_object *obj = drm_gem_prime_import(dev, dma_buf);
+
+ if (!IS_ERR_OR_NULL(obj)) {
+ struct vc4_bo *bo = to_vc4_bo(obj);
+ bo->dma_buf_import_export = true;
+ }
+
+ return obj;
+}
+
+struct dma_buf *
+vc4_prime_export(struct drm_device *dev, struct drm_gem_object *obj, int flags)
+{
+ struct vc4_bo *bo = to_vc4_bo(obj);
+
+ mutex_lock(&dev->struct_mutex);
+ if (bo->validated_shader) {
+ mutex_unlock(&dev->struct_mutex);
+ DRM_ERROR("Attempting to export shader BO\n");
+ return ERR_PTR(-EINVAL);
+ }
+ bo->dma_buf_import_export = true;
+ mutex_unlock(&dev->struct_mutex);
+
+ return drm_gem_prime_export(dev, obj, flags);
+}
+
+int
+vc4_create_bo_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_vc4_create_bo *args = data;
+ struct vc4_bo *bo = NULL;
+ int ret;
+
+ args->size = roundup(args->size, PAGE_SIZE);
+ if (args->size == 0)
+ return -EINVAL;
+
+ mutex_lock(&dev->struct_mutex);
+ bo = vc4_bo_create(dev, args->size);
+ mutex_unlock(&dev->struct_mutex);
+ if (!bo)
+ return -ENOMEM;
+
+ ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
+ drm_gem_object_unreference_unlocked(&bo->base.base);
+
+ return ret;
+}
+
+int
+vc4_mmap_bo_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_vc4_mmap_bo *args = data;
+ struct drm_gem_object *gem_obj;
+
+ gem_obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (!gem_obj) {
+ DRM_ERROR("Failed to look up GEM BO %d\n", args->handle);
+ return -EINVAL;
+ }
+
+ /* The mmap offset was set up at BO allocation time. */
+ args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
+
+ drm_gem_object_unreference(gem_obj);
+ return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+int vc4_bo_stats_debugfs(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+
+ mutex_lock(&dev->struct_mutex);
+
+ seq_printf(m, "num bos allocated: %d\n",
+ bo_stats.num_allocated);
+ seq_printf(m, "size bos allocated: %dkb\n",
+ bo_stats.size_allocated / 1024);
+ seq_printf(m, "num bos used: %d\n",
+ bo_stats.num_allocated - bo_stats.num_cached);
+ seq_printf(m, "size bos used: %dkb\n",
+ (bo_stats.size_allocated - bo_stats.size_cached) / 1024);
+ seq_printf(m, "num bos cached: %d\n",
+ bo_stats.num_cached);
+ seq_printf(m, "size bos cached: %dkb\n",
+ bo_stats.size_cached / 1024);
+
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+#endif
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
new file mode 100644
index 000000000000..d1ded0b5a547
--- /dev/null
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -0,0 +1,445 @@
+/*
+ * Copyright (C) 2015 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * Controls the timings of the hardware's pixel valve.
+ */
+
+#include "drm_atomic_helper.h"
+#include "drm_crtc_helper.h"
+#include "linux/component.h"
+#include "vc4_drv.h"
+#include "vc4_regs.h"
+
+#define CRTC_WRITE(offset, val) writel(val, vc4_crtc->regs + (offset))
+#define CRTC_READ(offset) readl(vc4_crtc->regs + (offset))
+
+#define CRTC_REG(reg) { reg, #reg }
+static const struct {
+ u32 reg;
+ const char *name;
+} crtc_regs[] = {
+ CRTC_REG(PV_CONTROL),
+ CRTC_REG(PV_V_CONTROL),
+ CRTC_REG(PV_VSYNCD),
+ CRTC_REG(PV_HORZA),
+ CRTC_REG(PV_HORZB),
+ CRTC_REG(PV_VERTA),
+ CRTC_REG(PV_VERTB),
+ CRTC_REG(PV_VERTA_EVEN),
+ CRTC_REG(PV_VERTB_EVEN),
+ CRTC_REG(PV_INTEN),
+ CRTC_REG(PV_INTSTAT),
+ CRTC_REG(PV_STAT),
+ CRTC_REG(PV_HACT_ACT),
+};
+
+static void
+vc4_crtc_dump_regs(struct vc4_crtc *vc4_crtc)
+{
+ int i;
+
+ rmb();
+ for (i = 0; i < ARRAY_SIZE(crtc_regs); i++) {
+ DRM_INFO("0x%04x (%s): 0x%08x\n",
+ crtc_regs[i].reg, crtc_regs[i].name,
+ CRTC_READ(crtc_regs[i].reg));
+ }
+}
+
+static void vc4_crtc_destroy(struct drm_crtc *crtc)
+{
+ drm_crtc_cleanup(crtc);
+}
+
+static bool vc4_crtc_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static u32 vc4_get_fifo_full_level(u32 format)
+{
+ static const u32 fifo_len_bytes = 64;
+ static const u32 hvs_latency_pix = 6;
+
+ switch(format) {
+ case PV_CONTROL_FORMAT_DSIV_16:
+ case PV_CONTROL_FORMAT_DSIC_16:
+ return fifo_len_bytes - 2 * hvs_latency_pix;
+ case PV_CONTROL_FORMAT_DSIV_18:
+ return fifo_len_bytes - 14;
+ case PV_CONTROL_FORMAT_24:
+ case PV_CONTROL_FORMAT_DSIV_24:
+ default:
+ return fifo_len_bytes - 3 * hvs_latency_pix;
+ }
+}
+
+static void vc4_crtc_mode_set_nofb(struct drm_crtc *crtc)
+{
+ struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
+ struct drm_crtc_state *state = crtc->state;
+ struct drm_display_mode *mode = &state->adjusted_mode;
+ u32 vactive = (mode->vdisplay >>
+ ((mode->flags & DRM_MODE_FLAG_INTERLACE) ? 1 : 0));
+ u32 format = PV_CONTROL_FORMAT_24;
+ bool debug_dump_regs = false;
+
+ if (debug_dump_regs) {
+ DRM_INFO("CRTC %d regs before:\n", drm_crtc_index(crtc));
+ vc4_crtc_dump_regs(vc4_crtc);
+ }
+
+ /* Reset the PV fifo. */
+ CRTC_WRITE(PV_CONTROL, 0);
+ CRTC_WRITE(PV_CONTROL, PV_CONTROL_FIFO_CLR | PV_CONTROL_EN);
+ CRTC_WRITE(PV_CONTROL, 0);
+
+ CRTC_WRITE(PV_HORZA,
+ VC4_SET_FIELD(mode->htotal - mode->hsync_end,
+ PV_HORZA_HBP) |
+ VC4_SET_FIELD(mode->hsync_end - mode->hsync_start,
+ PV_HORZA_HSYNC));
+ CRTC_WRITE(PV_HORZB,
+ VC4_SET_FIELD(mode->hsync_start - mode->hdisplay,
+ PV_HORZB_HFP) |
+ VC4_SET_FIELD(mode->hdisplay, PV_HORZB_HACTIVE));
+
+ CRTC_WRITE(PV_VERTA,
+ VC4_SET_FIELD(mode->vtotal - mode->vsync_end,
+ PV_VERTA_VBP) |
+ VC4_SET_FIELD(mode->vsync_end - mode->vsync_start,
+ PV_VERTA_VSYNC));
+ CRTC_WRITE(PV_VERTB,
+ VC4_SET_FIELD(mode->vsync_start - mode->vdisplay,
+ PV_VERTB_VFP) |
+ VC4_SET_FIELD(vactive, PV_VERTB_VACTIVE));
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ /* Write PV_VERTA_EVEN/VERTB_EVEN */
+ }
+
+ CRTC_WRITE(PV_HACT_ACT, mode->hdisplay);
+
+ CRTC_WRITE(PV_CONTROL,
+ VC4_SET_FIELD(format, PV_CONTROL_FORMAT) |
+ VC4_SET_FIELD(vc4_get_fifo_full_level(format),
+ PV_CONTROL_FIFO_LEVEL) |
+ PV_CONTROL_CLR_AT_START |
+ PV_CONTROL_TRIGGER_UNDERFLOW |
+ PV_CONTROL_WAIT_HSTART |
+ PV_CONTROL_CLK_MUX_EN |
+ VC4_SET_FIELD(PV_CONTROL_CLK_SELECT_DPI_SMI_HDMI,
+ PV_CONTROL_CLK_SELECT) |
+ PV_CONTROL_FIFO_CLR |
+ PV_CONTROL_EN);
+
+ CRTC_WRITE(PV_V_CONTROL,
+ PV_VCONTROL_CONTINUOUS);
+
+ if (debug_dump_regs) {
+ DRM_INFO("CRTC %d regs after:\n", drm_crtc_index(crtc));
+ vc4_crtc_dump_regs(vc4_crtc);
+ }
+}
+
+static void vc4_crtc_disable(struct drm_crtc *crtc)
+{
+ struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
+
+ CRTC_WRITE(PV_V_CONTROL,
+ CRTC_READ(PV_V_CONTROL) & ~PV_VCONTROL_VIDEN);
+
+ if (drm_crtc_index(crtc) == 0) {
+ do {
+ cpu_relax();
+ } while (CRTC_READ(PV_STAT) & (PV_STAT_RUNNING_MASK));
+ }
+}
+
+static void vc4_crtc_enable(struct drm_crtc *crtc)
+{
+ struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
+
+ CRTC_WRITE(PV_V_CONTROL,
+ CRTC_READ(PV_V_CONTROL) | PV_VCONTROL_VIDEN);
+}
+
+static int vc4_crtc_atomic_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ struct drm_device *dev = crtc->dev;
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct drm_plane *plane;
+ struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
+ u32 dlist_count = 0;
+
+ drm_atomic_crtc_state_for_each_plane(plane, state) {
+ struct drm_plane_state *plane_state =
+ state->state->plane_states[drm_plane_index(plane)];
+
+ /* plane might not have changed, in which case take
+ * current state:
+ */
+ if (!plane_state)
+ plane_state = plane->state;
+
+ dlist_count += vc4_plane_dlist_size(plane_state);
+ }
+
+ dlist_count++; /* Account for SCALER_CTL0_END. */
+
+ if (!vc4_crtc->dlist || dlist_count > vc4_crtc->dlist_size) {
+ vc4_crtc->dlist = ((u32 __iomem *)vc4->hvs->dlist +
+ HVS_BOOTLOADER_DLIST_END);
+ vc4_crtc->dlist_size = ((SCALER_DLIST_SIZE >> 2) -
+ HVS_BOOTLOADER_DLIST_END);
+
+ if (dlist_count > vc4_crtc->dlist_size) {
+ DRM_DEBUG_KMS("dlist too large for CRTC (%d > %d).\n",
+ dlist_count, vc4_crtc->dlist_size);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static void vc4_crtc_atomic_begin(struct drm_crtc *crtc)
+{
+}
+
+static void vc4_crtc_atomic_flush(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
+ struct drm_plane *plane;
+ bool debug_dump_regs = false;
+ u32 __iomem *dlist_next = vc4_crtc->dlist;
+
+ if (debug_dump_regs) {
+ DRM_INFO("CRTC %d HVS before:\n", drm_crtc_index(crtc));
+ vc4_hvs_dump_state(dev);
+ }
+
+ /*
+ * Copy all the active planes' dlist contents to the hardware dlist.
+ *
+ * XXX: If the new display list was large enough that it
+ * overlapped a currently-read display list, we need to do
+ * something like disable scanout before putting in the new
+ * list.
+ */
+ drm_atomic_crtc_for_each_plane(plane, crtc) {
+ dlist_next += vc4_plane_write_dlist(plane, dlist_next);
+ }
+
+ if (dlist_next == vc4_crtc->dlist) {
+ /* If no planes were enabled, use the SCALER_CTL0_END
+ * at the start of the display list memory (in the
+ * bootloader section). We'll rewrite that
+ * SCALER_CTL0_END, just in case, though.
+ */
+ writel(SCALER_CTL0_END, vc4->hvs->dlist);
+ HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel), 0);
+ } else {
+ writel(SCALER_CTL0_END, dlist_next);
+ dlist_next++;
+
+ HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel),
+ (u32 *)vc4_crtc->dlist - (u32 *)vc4->hvs->dlist);
+
+ /* Make the next display list start after ours. */
+ vc4_crtc->dlist_size -= (dlist_next - vc4_crtc->dlist);
+ vc4_crtc->dlist = dlist_next;
+ }
+
+ if (debug_dump_regs) {
+ DRM_INFO("CRTC %d HVS after:\n", drm_crtc_index(crtc));
+ vc4_hvs_dump_state(dev);
+ }
+}
+
+int vc4_enable_vblank(struct drm_device *dev, int crtc_id)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct vc4_crtc *vc4_crtc = vc4->crtc[crtc_id];
+
+ CRTC_WRITE(PV_INTEN, PV_INT_VFP_START);
+
+ return 0;
+}
+
+void vc4_disable_vblank(struct drm_device *dev, int crtc_id)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct vc4_crtc *vc4_crtc = vc4->crtc[crtc_id];
+
+ CRTC_WRITE(PV_INTEN, 0);
+}
+
+static irqreturn_t vc4_crtc_irq_handler(int irq, void *data)
+{
+ struct vc4_crtc *vc4_crtc = data;
+ u32 stat = CRTC_READ(PV_INTSTAT);
+ irqreturn_t ret = IRQ_NONE;
+
+ if (stat & PV_INT_VFP_START) {
+ drm_crtc_handle_vblank(&vc4_crtc->base);
+ CRTC_WRITE(PV_INTSTAT, PV_INT_VFP_START);
+ ret = IRQ_HANDLED;
+ }
+
+ return ret;
+}
+
+static const struct drm_crtc_funcs vc4_crtc_funcs = {
+ .set_config = drm_atomic_helper_set_config,
+ .destroy = vc4_crtc_destroy,
+ .page_flip = drm_atomic_helper_page_flip,
+ .set_property = NULL,
+ .cursor_set = NULL, /* handled by drm_mode_cursor_universal */
+ .cursor_move = NULL, /* handled by drm_mode_cursor_universal */
+ .reset = drm_atomic_helper_crtc_reset,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+};
+
+static const struct drm_crtc_helper_funcs vc4_crtc_helper_funcs = {
+ .mode_fixup = vc4_crtc_mode_fixup,
+ .mode_set_nofb = vc4_crtc_mode_set_nofb,
+ .disable = vc4_crtc_disable,
+ .enable = vc4_crtc_enable,
+ .atomic_check = vc4_crtc_atomic_check,
+ .atomic_begin = vc4_crtc_atomic_begin,
+ .atomic_flush = vc4_crtc_atomic_flush,
+};
+
+static int vc4_crtc_bind(struct device *dev, struct device *master, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct drm_device *drm = dev_get_drvdata(master);
+ struct vc4_dev *vc4 = to_vc4_dev(drm);
+ struct vc4_crtc *vc4_crtc;
+ struct drm_crtc *crtc;
+ struct drm_plane *primary_plane, *cursor_plane;
+ int ret;
+
+ primary_plane = vc4_plane_init(drm, DRM_PLANE_TYPE_PRIMARY);
+ if (!primary_plane) {
+ dev_err(dev, "failed to construct primary plane\n");
+ ret = PTR_ERR(primary_plane);
+ goto fail;
+ }
+
+ cursor_plane = vc4_plane_init(drm, DRM_PLANE_TYPE_CURSOR);
+ if (!cursor_plane) {
+ dev_err(dev, "failed to construct cursor_plane\n");
+ ret = PTR_ERR(cursor_plane);
+ goto fail;
+ }
+
+ vc4_crtc = devm_kzalloc(dev, sizeof(*vc4_crtc), GFP_KERNEL);
+ if (!vc4_crtc)
+ return -ENOMEM;
+ crtc = &vc4_crtc->base;
+
+ vc4_crtc->regs = vc4_ioremap_regs(pdev, 0);
+ if (IS_ERR(vc4_crtc->regs))
+ return PTR_ERR(vc4_crtc->regs);
+
+ drm_crtc_init_with_planes(drm, crtc, primary_plane, cursor_plane,
+ &vc4_crtc_funcs);
+ drm_crtc_helper_add(crtc, &vc4_crtc_helper_funcs);
+ primary_plane->crtc = crtc;
+ cursor_plane->crtc = crtc;
+ vc4->crtc[drm_crtc_index(crtc)] = vc4_crtc;
+
+ /* Until we have full scanout setup to route things through to
+ * encoders, line things up like the firmware did.
+ */
+ switch (drm_crtc_index(crtc)) {
+ case 0:
+ vc4_crtc->channel = 0;
+ break;
+ case 1:
+ vc4_crtc->channel = 2;
+ break;
+ default:
+ case 2:
+ vc4_crtc->channel = 1;
+ break;
+ }
+
+ CRTC_WRITE(PV_INTEN, 0);
+ CRTC_WRITE(PV_INTSTAT, PV_INT_VFP_START);
+ ret = devm_request_irq(dev, platform_get_irq(pdev, 0),
+ vc4_crtc_irq_handler, 0, "vc4 crtc", vc4_crtc);
+
+ platform_set_drvdata(pdev, vc4_crtc);
+
+ return 0;
+
+fail:
+ return ret;
+}
+
+static void vc4_crtc_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct vc4_crtc *vc4_crtc = dev_get_drvdata(dev);
+
+ vc4_crtc_destroy(&vc4_crtc->base);
+
+ CRTC_WRITE(PV_INTEN, 0);
+
+ platform_set_drvdata(pdev, NULL);
+}
+
+static const struct component_ops vc4_crtc_ops = {
+ .bind = vc4_crtc_bind,
+ .unbind = vc4_crtc_unbind,
+};
+
+static int vc4_crtc_dev_probe(struct platform_device *pdev)
+{
+ return component_add(&pdev->dev, &vc4_crtc_ops);
+}
+
+static int vc4_crtc_dev_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &vc4_crtc_ops);
+ return 0;
+}
+
+static const struct of_device_id vc4_crtc_dt_match[] = {
+ { .compatible = "brcm,vc4-pixelvalve" },
+ {}
+};
+
+static struct platform_driver vc4_crtc_driver = {
+ .probe = vc4_crtc_dev_probe,
+ .remove = vc4_crtc_dev_remove,
+ .driver = {
+ .name = "vc4_crtc",
+ .of_match_table = vc4_crtc_dt_match,
+ },
+};
+
+void __init vc4_crtc_register(void)
+{
+ platform_driver_register(&vc4_crtc_driver);
+}
+
+void __exit vc4_crtc_unregister(void)
+{
+ platform_driver_unregister(&vc4_crtc_driver);
+}
diff --git a/drivers/gpu/drm/vc4/vc4_debugfs.c b/drivers/gpu/drm/vc4/vc4_debugfs.c
new file mode 100644
index 000000000000..ca4743126736
--- /dev/null
+++ b/drivers/gpu/drm/vc4/vc4_debugfs.c
@@ -0,0 +1,40 @@
+/*
+ * Copyright © 2014 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/seq_file.h>
+#include <linux/circ_buf.h>
+#include <linux/ctype.h>
+#include <linux/debugfs.h>
+#include <drm/drmP.h>
+
+#include "vc4_drv.h"
+#include "vc4_regs.h"
+
+static const struct drm_info_list vc4_debugfs_list[] = {
+ {"bo_stats", vc4_bo_stats_debugfs, 0},
+ {"v3d_ident", vc4_v3d_debugfs_ident, 0},
+ {"v3d_regs", vc4_v3d_debugfs_regs, 0},
+ {"hdmi_regs", vc4_hdmi_debugfs_regs, 0},
+ {"hvs_regs", vc4_hvs_debugfs_regs, 0},
+};
+#define VC4_DEBUGFS_ENTRIES ARRAY_SIZE(vc4_debugfs_list)
+
+int
+vc4_debugfs_init(struct drm_minor *minor)
+{
+ return drm_debugfs_create_files(vc4_debugfs_list,
+ VC4_DEBUGFS_ENTRIES,
+ minor->debugfs_root, minor);
+}
+
+void
+vc4_debugfs_cleanup(struct drm_minor *minor)
+{
+ drm_debugfs_remove_files(vc4_debugfs_list,
+ VC4_DEBUGFS_ENTRIES, minor);
+}
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
new file mode 100644
index 000000000000..8d32dca645ad
--- /dev/null
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -0,0 +1,280 @@
+/*
+ * Copyright (C) 2014-2015 Broadcom
+ * Copyright (C) 2013 Red Hat
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/component.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+
+#include "uapi/drm/vc4_drm.h"
+#include "vc4_drv.h"
+#include "vc4_regs.h"
+
+#define DRIVER_NAME "vc4"
+#define DRIVER_DESC "Broadcom VC4 graphics"
+#define DRIVER_DATE "20140616"
+#define DRIVER_MAJOR 0
+#define DRIVER_MINOR 0
+#define DRIVER_PATCHLEVEL 0
+
+/*
+ * Helper function for mapping the regs on a platform device.
+ *
+ * We assume only one register range per device, so we use index 0.
+ */
+void __iomem *
+vc4_ioremap_regs(struct platform_device *dev, int index)
+{
+ struct resource *res;
+ void __iomem *map;
+
+ res = platform_get_resource(dev, IORESOURCE_MEM, index);
+ map = devm_ioremap_resource(&dev->dev, res);
+ if (IS_ERR(map)) {
+ int ret = PTR_ERR(map);
+ DRM_ERROR("Failed to map registers: %d\n", ret);
+ return map;
+ }
+
+ return map;
+}
+
+static int
+vc4_drm_load(struct drm_device *dev, unsigned long flags)
+{
+ struct platform_device *firmware_pdev;
+ struct vc4_dev *vc4;
+ int ret;
+
+ vc4 = devm_kzalloc(dev->dev, sizeof(*vc4), GFP_KERNEL);
+ if (!vc4)
+ return -ENOMEM;
+
+ vc4->firmware_node = of_parse_phandle(dev->dev->of_node, "firmware", 0);
+ if (!vc4->firmware_node) {
+ DRM_ERROR("Failed to parse firmware node.\n");
+ return -EINVAL;
+ }
+ firmware_pdev = of_find_device_by_node(vc4->firmware_node);
+ if (!platform_get_drvdata(firmware_pdev)) {
+ DRM_DEBUG("firmware device not probed yet.\n");
+ return -EPROBE_DEFER;
+ }
+
+ dev_set_drvdata(dev->dev, dev);
+ vc4->dev = dev;
+ dev->dev_private = vc4;
+
+ drm_mode_config_init(dev);
+
+ vc4_gem_init(dev);
+
+ ret = component_bind_all(dev->dev, dev);
+ if (ret) {
+ vc4_gem_destroy(dev);
+ return ret;
+ }
+
+ vc4_kms_load(dev);
+
+ return 0;
+}
+
+static int vc4_drm_unload(struct drm_device *dev)
+{
+ drm_mode_config_cleanup(dev);
+
+ component_unbind_all(dev->dev, dev);
+
+ return 0;
+}
+
+static const struct file_operations vc4_drm_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = drm_gem_cma_mmap,
+ .poll = drm_poll,
+ .read = drm_read,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = drm_compat_ioctl,
+#endif
+ .llseek = noop_llseek,
+};
+
+static const struct drm_ioctl_desc vc4_drm_ioctls[] = {
+ DRM_IOCTL_DEF_DRV(VC4_SUBMIT_CL, vc4_submit_cl_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(VC4_WAIT_SEQNO, vc4_wait_seqno_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(VC4_WAIT_BO, vc4_wait_bo_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(VC4_CREATE_BO, vc4_create_bo_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(VC4_MMAP_BO, vc4_mmap_bo_ioctl, 0),
+};
+
+static struct drm_driver vc4_drm_driver = {
+ .driver_features = (DRIVER_MODESET |
+ DRIVER_GEM |
+ DRIVER_HAVE_IRQ |
+ DRIVER_PRIME),
+ .load = vc4_drm_load,
+ .unload = vc4_drm_unload,
+ .set_busid = drm_platform_set_busid,
+
+ .irq_handler = vc4_irq,
+ .irq_preinstall = vc4_irq_preinstall,
+ .irq_postinstall = vc4_irq_postinstall,
+ .irq_uninstall = vc4_irq_uninstall,
+
+ .enable_vblank = vc4_enable_vblank,
+ .disable_vblank = vc4_disable_vblank,
+ .get_vblank_counter = drm_vblank_count,
+
+#if defined(CONFIG_DEBUG_FS)
+ .debugfs_init = vc4_debugfs_init,
+ .debugfs_cleanup = vc4_debugfs_cleanup,
+#endif
+
+ .gem_free_object = vc4_free_object,
+ .gem_vm_ops = &drm_gem_cma_vm_ops,
+
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_import = vc4_prime_import,
+ .gem_prime_export = vc4_prime_export,
+ .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
+ .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
+ .gem_prime_vmap = drm_gem_cma_prime_vmap,
+ .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
+ .gem_prime_mmap = drm_gem_cma_prime_mmap,
+
+ .dumb_create = vc4_dumb_create,
+ .dumb_map_offset = drm_gem_cma_dumb_map_offset,
+ .dumb_destroy = drm_gem_dumb_destroy,
+
+ .ioctls = vc4_drm_ioctls,
+ .num_ioctls = ARRAY_SIZE(vc4_drm_ioctls),
+ .fops = &vc4_drm_fops,
+
+ .gem_obj_size = sizeof(struct vc4_bo),
+
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+ .patchlevel = DRIVER_PATCHLEVEL,
+};
+
+static int vc4_drm_bind(struct device *dev)
+{
+ return drm_platform_init(&vc4_drm_driver, to_platform_device(dev));
+}
+
+static void vc4_drm_unbind(struct device *dev)
+{
+ drm_put_dev(platform_get_drvdata(to_platform_device(dev)));
+}
+
+static const struct component_master_ops vc4_drm_ops = {
+ .bind = vc4_drm_bind,
+ .unbind = vc4_drm_unbind,
+};
+
+/* NOTE: the CONFIG_OF case duplicates the same code as exynos or imx
+ * (or probably any other).. so probably some room for some helpers
+ */
+static int compare_of(struct device *dev, void *data)
+{
+ return dev->of_node == data;
+}
+
+static int add_components(struct device *dev, struct component_match **matchptr,
+ const char *name)
+{
+ struct device_node *np = dev->of_node;
+ unsigned i;
+
+ for (i = 0; ; i++) {
+ struct device_node *node;
+
+ node = of_parse_phandle(np, name, i);
+ if (!node)
+ break;
+
+ component_match_add(dev, matchptr, compare_of, node);
+ }
+
+ return 0;
+}
+
+static int
+vc4_platform_drm_probe(struct platform_device *pdev)
+{
+ struct component_match *match = NULL;
+
+ add_components(&pdev->dev, &match, "gpus");
+ add_components(&pdev->dev, &match, "crtcs");
+ add_components(&pdev->dev, &match, "encoders");
+ add_components(&pdev->dev, &match, "hvss");
+
+ pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ return component_master_add_with_match(&pdev->dev, &vc4_drm_ops, match);
+}
+
+static int
+vc4_platform_drm_remove(struct platform_device *pdev)
+{
+ drm_put_dev(platform_get_drvdata(pdev));
+
+ return 0;
+}
+
+static const struct of_device_id vc4_of_match[] = {
+ { .compatible = "brcm,vc4", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, vc4_of_match);
+
+static struct platform_driver vc4_platform_driver = {
+ .probe = vc4_platform_drm_probe,
+ .remove = vc4_platform_drm_remove,
+ .driver = {
+ .name = "vc4-drm",
+ .owner = THIS_MODULE,
+ .of_match_table = vc4_of_match,
+ },
+};
+
+static int __init vc4_drm_register(void)
+{
+ vc4_v3d_register();
+ vc4_hdmi_register();
+ vc4_crtc_register();
+ vc4_hvs_register();
+ return platform_driver_register(&vc4_platform_driver);
+}
+
+static void __exit vc4_drm_unregister(void)
+{
+ platform_driver_unregister(&vc4_platform_driver);
+ vc4_hvs_unregister();
+ vc4_crtc_unregister();
+ vc4_hdmi_unregister();
+ vc4_v3d_unregister();
+}
+
+module_init(vc4_drm_register);
+module_exit(vc4_drm_unregister);
+
+MODULE_ALIAS("platform:vc4-drm");
+MODULE_DESCRIPTION("Broadcom VC4 DRM Driver");
+MODULE_AUTHOR("Eric Anholt <eric@anholt.net>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
new file mode 100644
index 000000000000..9fdddf823114
--- /dev/null
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -0,0 +1,448 @@
+/*
+ * Copyright (C) 2015 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "drmP.h"
+#include "drm_gem_cma_helper.h"
+
+struct vc4_dev {
+ struct drm_device *dev;
+
+ struct device_node *firmware_node;
+
+ struct vc4_hdmi *hdmi;
+ struct vc4_hvs *hvs;
+ struct vc4_crtc *crtc[3];
+ struct vc4_v3d *v3d;
+
+ /* Sequence number for the last job queued in job_list.
+ * Starts at 0 (no jobs emitted).
+ */
+ uint64_t emit_seqno;
+
+ /* Sequence number for the last completed job on the GPU.
+ * Starts at 0 (no jobs completed).
+ */
+ uint64_t finished_seqno;
+
+ /* List of all struct vc4_exec_info for jobs to be executed.
+ * The first job in the list is the one currently programmed
+ * into ct0ca/ct1ca for execution.
+ */
+ struct list_head job_list;
+ /* List of the finished vc4_exec_infos waiting to be freed by
+ * job_done_work.
+ */
+ struct list_head job_done_list;
+ spinlock_t job_lock;
+ wait_queue_head_t job_wait_queue;
+ struct work_struct job_done_work;
+
+ /* The binner overflow memory that's currently set up in
+ * BPOA/BPOS registers. When overflow occurs and a new one is
+ * allocated, the previous one will be moved to
+ * vc4->current_exec's free list.
+ */
+ struct vc4_bo *overflow_mem;
+ struct work_struct overflow_mem_work;
+
+ /* The kernel-space BO cache. Tracks buffers that have been
+ * unreferenced by all other users (refcounts of 0!) but not
+ * yet freed, so we can do cheap allocations.
+ */
+ struct vc4_bo_cache {
+ /* Array of list heads for entries in the BO cache,
+ * based on number of pages, so we can do O(1) lookups
+ * in the cache when allocating.
+ */
+ struct list_head *size_list;
+ uint32_t size_list_size;
+
+ /* List of all BOs in the cache, ordered by age, so we
+ * can do O(1) lookups when trying to free old
+ * buffers.
+ */
+ struct list_head time_list;
+ struct work_struct time_work;
+ struct timer_list time_timer;
+ } bo_cache;
+
+ struct {
+ uint32_t last_ct0ca, last_ct1ca;
+ struct timer_list timer;
+ struct work_struct reset_work;
+ } hangcheck;
+};
+
+static inline struct vc4_dev *
+to_vc4_dev(struct drm_device *dev)
+{
+ return (struct vc4_dev *)dev->dev_private;
+}
+
+struct vc4_bo {
+ struct drm_gem_cma_object base;
+ /* seqno of the last job to render to this BO. */
+ uint64_t seqno;
+
+ /* List entry for the BO's position in either
+ * vc4_exec_info->unref_list or vc4_dev->bo_cache.time_list
+ */
+ struct list_head unref_head;
+
+ /* Time in jiffies when the BO was put in vc4->bo_cache. */
+ unsigned long free_time;
+
+ /* List entry for the BO's position in vc4_dev->bo_cache.size_list */
+ struct list_head size_head;
+
+ /* Cached state from validation of the shader code. */
+ struct vc4_validated_shader_info *validated_shader;
+
+ /* Set if the buffer has been either imported or exported via
+ * dmabufs. Used for shader mapping security checks.
+ */
+ bool dma_buf_import_export;
+};
+
+static inline struct vc4_bo *
+to_vc4_bo(struct drm_gem_object *bo)
+{
+ return (struct vc4_bo *)bo;
+}
+
+struct vc4_v3d {
+ struct platform_device *pdev;
+ void __iomem *regs;
+};
+
+struct vc4_hvs {
+ struct platform_device *pdev;
+ void __iomem *regs;
+ void __iomem *dlist;
+};
+
+struct vc4_crtc {
+ struct drm_crtc base;
+ void __iomem *regs;
+
+ /* Which HVS channel we're using for our CRTC. */
+ int channel;
+
+ /*
+ * Pointer to the actual hardware display list memory for the
+ * crtc.
+ */
+ u32 __iomem *dlist;
+
+ u32 dlist_size; /* in dwords */
+};
+
+static inline struct vc4_crtc *
+to_vc4_crtc(struct drm_crtc *crtc)
+{
+ return (struct vc4_crtc *)crtc;
+}
+
+struct vc4_plane {
+ struct drm_plane base;
+};
+
+static inline struct vc4_plane *
+to_vc4_plane(struct drm_plane *plane)
+{
+ return (struct vc4_plane *)plane;
+}
+
+#define V3D_READ(offset) readl(vc4->v3d->regs + offset)
+#define V3D_WRITE(offset, val) writel(val, vc4->v3d->regs + offset)
+#define HVS_READ(offset) readl(vc4->hvs->regs + offset)
+#define HVS_WRITE(offset, val) writel(val, vc4->hvs->regs + offset)
+
+enum vc4_bo_mode {
+ VC4_MODE_UNDECIDED,
+ VC4_MODE_RENDER,
+ VC4_MODE_SHADER,
+};
+
+struct vc4_bo_exec_state {
+ struct drm_gem_cma_object *bo;
+ enum vc4_bo_mode mode;
+};
+
+struct vc4_exec_info {
+ /* Sequence number for this bin/render job. */
+ uint64_t seqno;
+
+ /* Kernel-space copy of the ioctl arguments */
+ struct drm_vc4_submit_cl *args;
+
+ /* This is the array of BOs that were looked up at the start of exec.
+ * Command validation will use indices into this array.
+ */
+ struct vc4_bo_exec_state *bo;
+ uint32_t bo_count;
+
+ /* Pointers for our position in vc4->job_list */
+ struct list_head head;
+
+ /* List of other BOs used in the job that need to be released
+ * once the job is complete.
+ */
+ struct list_head unref_list;
+
+ /* Current unvalidated indices into @bo loaded by the non-hardware
+ * VC4_PACKET_GEM_HANDLES.
+ */
+ uint32_t bo_index[2];
+
+ /* This is the BO where we store the validated command lists, shader
+ * records, and uniforms.
+ */
+ struct drm_gem_cma_object *exec_bo;
+
+ /**
+ * This tracks the per-shader-record state (packet 64) that
+ * determines the length of the shader record and the offset
+ * it's expected to be found at. It gets read in from the
+ * command lists.
+ */
+ struct vc4_shader_state {
+ uint8_t packet;
+ uint32_t addr;
+ /* Maximum vertex index referenced by any primitive using this
+ * shader state.
+ */
+ uint32_t max_index;
+ } *shader_state;
+
+ /** How many shader states the user declared they were using. */
+ uint32_t shader_state_size;
+ /** How many shader state records the validator has seen. */
+ uint32_t shader_state_count;
+
+ bool found_tile_binning_mode_config_packet;
+ bool found_start_tile_binning_packet;
+ bool found_increment_semaphore_packet;
+ uint8_t bin_tiles_x, bin_tiles_y;
+ struct drm_gem_cma_object *tile_bo;
+ uint32_t tile_alloc_offset;
+
+ /**
+ * Computed addresses pointing into exec_bo where we start the
+ * bin thread (ct0) and render thread (ct1).
+ */
+ uint32_t ct0ca, ct0ea;
+ uint32_t ct1ca, ct1ea;
+
+ /* Pointers to the shader recs. These paddr gets incremented as CL
+ * packets are relocated in validate_gl_shader_state, and the vaddrs
+ * (u and v) get incremented and size decremented as the shader recs
+ * themselves are validated.
+ */
+ void *shader_rec_u;
+ void *shader_rec_v;
+ uint32_t shader_rec_p;
+ uint32_t shader_rec_size;
+
+ /* Pointers to the uniform data. These pointers are incremented, and
+ * size decremented, as each batch of uniforms is uploaded.
+ */
+ void *uniforms_u;
+ void *uniforms_v;
+ uint32_t uniforms_p;
+ uint32_t uniforms_size;
+};
+
+static inline struct vc4_exec_info *
+vc4_first_job(struct vc4_dev *vc4)
+{
+ if (list_empty(&vc4->job_list))
+ return NULL;
+ return list_first_entry(&vc4->job_list, struct vc4_exec_info, head);
+}
+
+/**
+ * struct vc4_texture_sample_info - saves the offsets into the UBO for texture
+ * setup parameters.
+ *
+ * This will be used at draw time to relocate the reference to the texture
+ * contents in p0, and validate that the offset combined with
+ * width/height/stride/etc. from p1 and p2/p3 doesn't sample outside the BO.
+ * Note that the hardware treats unprovided config parameters as 0, so not all
+ * of them need to be set up for every texure sample, and we'll store ~0 as
+ * the offset to mark the unused ones.
+ *
+ * See the VC4 3D architecture guide page 41 ("Texture and Memory Lookup Unit
+ * Setup") for definitions of the texture parameters.
+ */
+struct vc4_texture_sample_info {
+ bool is_direct;
+ uint32_t p_offset[4];
+};
+
+/**
+ * struct vc4_validated_shader_info - information about validated shaders that
+ * needs to be used from command list validation.
+ *
+ * For a given shader, each time a shader state record references it, we need
+ * to verify that the shader doesn't read more uniforms than the shader state
+ * record's uniform BO pointer can provide, and we need to apply relocations
+ * and validate the shader state record's uniforms that define the texture
+ * samples.
+ */
+struct vc4_validated_shader_info
+{
+ uint32_t uniforms_size;
+ uint32_t uniforms_src_size;
+ uint32_t num_texture_samples;
+ struct vc4_texture_sample_info *texture_samples;
+};
+
+/* vc4_bo.c */
+void vc4_free_object(struct drm_gem_object *gem_obj);
+struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t size);
+int vc4_dumb_create(struct drm_file *file_priv,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+struct dma_buf *vc4_prime_export(struct drm_device *dev,
+ struct drm_gem_object *obj, int flags);
+
+/* vc4_crtc.c */
+void vc4_crtc_register(void);
+void vc4_crtc_unregister(void);
+int vc4_enable_vblank(struct drm_device *dev, int crtc_id);
+void vc4_disable_vblank(struct drm_device *dev, int crtc_id);
+
+/**
+ * _wait_for - magic (register) wait macro
+ *
+ * Does the right thing for modeset paths when run under kdgb or similar atomic
+ * contexts. Note that it's important that we check the condition again after
+ * having timed out, since the timeout could be due to preemption or similar and
+ * we've never had a chance to check the condition before the timeout.
+ */
+#define _wait_for(COND, MS, W) ({ \
+ unsigned long timeout__ = jiffies + msecs_to_jiffies(MS) + 1; \
+ int ret__ = 0; \
+ while (!(COND)) { \
+ if (time_after(jiffies, timeout__)) { \
+ if (!(COND)) \
+ ret__ = -ETIMEDOUT; \
+ break; \
+ } \
+ if (W && drm_can_sleep()) { \
+ msleep(W); \
+ } else { \
+ cpu_relax(); \
+ } \
+ } \
+ ret__; \
+})
+
+#define wait_for(COND, MS) _wait_for(COND, MS, 1)
+
+/* vc4_bo.c */
+void vc4_bo_cache_init(struct drm_device *dev);
+void vc4_bo_cache_destroy(struct drm_device *dev);
+void vc4_free_object(struct drm_gem_object *gem_obj);
+struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t size);
+int vc4_dumb_create(struct drm_file *file_priv,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+struct drm_gem_object *vc4_prime_import(struct drm_device *dev,
+ struct dma_buf *dma_buf);
+struct dma_buf *vc4_prime_export(struct drm_device *dev,
+ struct drm_gem_object *obj, int flags);
+int vc4_create_bo_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int vc4_bo_stats_debugfs(struct seq_file *m, void *unused);
+
+/* vc4_debugfs.c */
+int vc4_debugfs_init(struct drm_minor *minor);
+void vc4_debugfs_cleanup(struct drm_minor *minor);
+
+/* vc4_drv.c */
+void __iomem *vc4_ioremap_regs(struct platform_device *dev, int index);
+
+/* vc4_gem.c */
+void vc4_gem_init(struct drm_device *dev);
+void vc4_gem_destroy(struct drm_device *dev);
+int vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int vc4_wait_seqno_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int vc4_wait_bo_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+void vc4_submit_next_job(struct drm_device *dev);
+int vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno,
+ uint64_t timeout_ns);
+void vc4_job_handle_completed(struct vc4_dev *vc4);
+
+/* vc4_hdmi.c */
+void vc4_hdmi_register(void);
+void vc4_hdmi_unregister(void);
+struct drm_encoder *vc4_hdmi_encoder_init(struct drm_device *dev);
+struct drm_connector *vc4_hdmi_connector_init(struct drm_device *dev,
+ struct drm_encoder *encoder);
+int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused);
+
+/* vc4_irq.c */
+irqreturn_t vc4_irq(int irq, void *arg);
+void vc4_irq_preinstall(struct drm_device *dev);
+int vc4_irq_postinstall(struct drm_device *dev);
+void vc4_irq_uninstall(struct drm_device *dev);
+void vc4_irq_reset(struct drm_device *dev);
+
+/* vc4_hvs.c */
+void vc4_hvs_register(void);
+void vc4_hvs_unregister(void);
+void vc4_hvs_dump_state(struct drm_device *dev);
+int vc4_hvs_debugfs_regs(struct seq_file *m, void *unused);
+
+/* vc4_kms.c */
+int vc4_kms_load(struct drm_device *dev);
+
+/* vc4_plane.c */
+struct drm_plane *vc4_plane_init(struct drm_device *dev,
+ enum drm_plane_type type);
+u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist);
+u32 vc4_plane_dlist_size(struct drm_plane_state *state);
+
+/* vc4_v3d.c */
+void vc4_v3d_register(void);
+void vc4_v3d_unregister(void);
+int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused);
+int vc4_v3d_debugfs_regs(struct seq_file *m, void *unused);
+int vc4_v3d_set_power(struct vc4_dev *vc4, bool on);
+
+/* vc4_validate.c */
+int
+vc4_validate_bin_cl(struct drm_device *dev,
+ void *validated,
+ void *unvalidated,
+ struct vc4_exec_info *exec);
+
+int
+vc4_validate_shader_recs(struct drm_device *dev, struct vc4_exec_info *exec);
+
+struct vc4_validated_shader_info *
+vc4_validate_shader(struct drm_gem_cma_object *shader_obj);
+
+bool vc4_use_bo(struct vc4_exec_info *exec,
+ uint32_t hindex,
+ enum vc4_bo_mode mode,
+ struct drm_gem_cma_object **obj);
+
+int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec);
+
+bool vc4_check_tex_size(struct vc4_exec_info *exec,
+ struct drm_gem_cma_object *fbo,
+ uint32_t offset, uint8_t tiling_format,
+ uint32_t width, uint32_t height, uint8_t cpp);
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
new file mode 100644
index 000000000000..28c6a9c80fa0
--- /dev/null
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -0,0 +1,653 @@
+/*
+ * Copyright © 2014 Broadcom
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/io.h>
+
+#include "uapi/drm/vc4_drm.h"
+#include "vc4_drv.h"
+#include "vc4_regs.h"
+
+static void
+vc4_queue_hangcheck(struct drm_device *dev)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+
+ mod_timer(&vc4->hangcheck.timer,
+ round_jiffies_up(jiffies + msecs_to_jiffies(100)));
+}
+
+static void
+vc4_reset(struct drm_device *dev)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+
+ DRM_INFO("Resetting GPU.\n");
+ vc4_v3d_set_power(vc4, false);
+ vc4_v3d_set_power(vc4, true);
+
+ vc4_irq_reset(dev);
+
+ /* Rearm the hangcheck -- another job might have been waiting
+ * for our hung one to get kicked off, and vc4_irq_reset()
+ * would have started it.
+ */
+ vc4_queue_hangcheck(dev);
+}
+
+static void
+vc4_reset_work(struct work_struct *work)
+{
+ struct vc4_dev *vc4 =
+ container_of(work, struct vc4_dev, hangcheck.reset_work);
+
+ vc4_reset(vc4->dev);
+}
+
+static void
+vc4_hangcheck_elapsed(unsigned long data)
+{
+ struct drm_device *dev = (struct drm_device *)data;
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ uint32_t ct0ca, ct1ca;
+
+ /* If idle, we can stop watching for hangs. */
+ if (list_empty(&vc4->job_list))
+ return;
+
+ ct0ca = V3D_READ(V3D_CTNCA(0));
+ ct1ca = V3D_READ(V3D_CTNCA(1));
+
+ /* If we've made any progress in execution, rearm the timer
+ * and wait.
+ */
+ if (ct0ca != vc4->hangcheck.last_ct0ca ||
+ ct1ca != vc4->hangcheck.last_ct1ca) {
+ vc4->hangcheck.last_ct0ca = ct0ca;
+ vc4->hangcheck.last_ct1ca = ct1ca;
+ vc4_queue_hangcheck(dev);
+ return;
+ }
+
+ /* We've gone too long with no progress, reset. This has to
+ * be done from a work struct, since resetting can sleep and
+ * this timer hook isn't allowed to.
+ */
+ schedule_work(&vc4->hangcheck.reset_work);
+}
+
+static void
+submit_cl(struct drm_device *dev, uint32_t thread, uint32_t start, uint32_t end)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+
+ /* Stop any existing thread and set state to "stopped at halt" */
+ V3D_WRITE(V3D_CTNCS(thread), V3D_CTRUN);
+ barrier();
+
+ V3D_WRITE(V3D_CTNCA(thread), start);
+ barrier();
+
+ /* Set the end address of the control list. Writing this
+ * register is what starts the job.
+ */
+ V3D_WRITE(V3D_CTNEA(thread), end);
+ barrier();
+}
+
+int
+vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno, uint64_t timeout_ns)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ int ret = 0;
+ unsigned long timeout_expire;
+ DEFINE_WAIT(wait);
+
+ if (vc4->finished_seqno >= seqno)
+ return 0;
+
+ if (timeout_ns == 0)
+ return -ETIME;
+
+ timeout_expire = jiffies + nsecs_to_jiffies(timeout_ns);
+
+ for (;;) {
+ prepare_to_wait(&vc4->job_wait_queue, &wait,
+ TASK_INTERRUPTIBLE);
+
+ if (signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ break;
+ }
+
+ if (vc4->finished_seqno >= seqno)
+ break;
+
+ if (timeout_ns != ~0ull) {
+ if (time_after_eq(jiffies, timeout_expire)) {
+ ret = -ETIME;
+ break;
+ }
+ schedule_timeout(timeout_expire - jiffies);
+ } else {
+ schedule();
+ }
+ }
+
+ finish_wait(&vc4->job_wait_queue, &wait);
+
+ if (ret && ret != -ERESTARTSYS) {
+ DRM_ERROR("timeout waiting for render thread idle\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void
+vc4_flush_caches(struct drm_device *dev)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+
+ /* Flush the GPU L2 caches. These caches sit on top of system
+ * L3 (the 128kb or so shared with the CPU), and are
+ * non-allocating in the L3.
+ */
+ V3D_WRITE(V3D_L2CACTL,
+ V3D_L2CACTL_L2CCLR);
+
+ V3D_WRITE(V3D_SLCACTL,
+ VC4_SET_FIELD(0xf, V3D_SLCACTL_T1CC) |
+ VC4_SET_FIELD(0xf, V3D_SLCACTL_T0CC) |
+ VC4_SET_FIELD(0xf, V3D_SLCACTL_UCC) |
+ VC4_SET_FIELD(0xf, V3D_SLCACTL_ICC));
+}
+
+/* Sets the registers for the next job to be actually be executed in
+ * the hardware.
+ *
+ * The job_lock should be held during this.
+ */
+void
+vc4_submit_next_job(struct drm_device *dev)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct vc4_exec_info *exec = vc4_first_job(vc4);
+
+ if (!exec)
+ return;
+
+ vc4_flush_caches(dev);
+
+ /* Disable the binner's pre-loaded overflow memory address */
+ V3D_WRITE(V3D_BPOA, 0);
+ V3D_WRITE(V3D_BPOS, 0);
+
+ if (exec->ct0ca != exec->ct0ea)
+ submit_cl(dev, 0, exec->ct0ca, exec->ct0ea);
+ submit_cl(dev, 1, exec->ct1ca, exec->ct1ea);
+}
+
+static void
+vc4_update_bo_seqnos(struct vc4_exec_info *exec, uint64_t seqno)
+{
+ struct vc4_bo *bo;
+ unsigned i;
+
+ for (i = 0; i < exec->bo_count; i++) {
+ bo = to_vc4_bo(&exec->bo[i].bo->base);
+ bo->seqno = seqno;
+ }
+
+ list_for_each_entry(bo, &exec->unref_list, unref_head) {
+ bo->seqno = seqno;
+ }
+}
+
+/* Queues a struct vc4_exec_info for execution. If no job is
+ * currently executing, then submits it.
+ *
+ * Unlike most GPUs, our hardware only handles one command list at a
+ * time. To queue multiple jobs at once, we'd need to edit the
+ * previous command list to have a jump to the new one at the end, and
+ * then bump the end address. That's a change for a later date,
+ * though.
+ */
+static void
+vc4_queue_submit(struct drm_device *dev, struct vc4_exec_info *exec)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ uint64_t seqno = ++vc4->emit_seqno;
+ unsigned long irqflags;
+
+ exec->seqno = seqno;
+ vc4_update_bo_seqnos(exec, seqno);
+
+ spin_lock_irqsave(&vc4->job_lock, irqflags);
+ list_add_tail(&exec->head, &vc4->job_list);
+
+ /* If no job was executing, kick ours off. Otherwise, it'll
+ * get started when the previous job's frame done interrupt
+ * occurs.
+ */
+ if (vc4_first_job(vc4) == exec) {
+ vc4_submit_next_job(dev);
+ vc4_queue_hangcheck(dev);
+ }
+
+ spin_unlock_irqrestore(&vc4->job_lock, irqflags);
+}
+
+/**
+ * Looks up a bunch of GEM handles for BOs and stores the array for
+ * use in the command validator that actually writes relocated
+ * addresses pointing to them.
+ */
+static int
+vc4_cl_lookup_bos(struct drm_device *dev,
+ struct drm_file *file_priv,
+ struct vc4_exec_info *exec)
+{
+ struct drm_vc4_submit_cl *args = exec->args;
+ uint32_t *handles;
+ int ret = 0;
+ int i;
+
+ exec->bo_count = args->bo_handle_count;
+
+ if (!exec->bo_count) {
+ /* See comment on bo_index for why we have to check
+ * this.
+ */
+ DRM_ERROR("Rendering requires BOs to validate\n");
+ return -EINVAL;
+ }
+
+ exec->bo = kcalloc(exec->bo_count, sizeof(struct vc4_bo_exec_state),
+ GFP_KERNEL);
+ if (!exec->bo) {
+ DRM_ERROR("Failed to allocate validated BO pointers\n");
+ return -ENOMEM;
+ }
+
+ handles = drm_malloc_ab(exec->bo_count, sizeof(uint32_t));
+ if (!handles) {
+ DRM_ERROR("Failed to allocate incoming GEM handles\n");
+ goto fail;
+ }
+
+ ret = copy_from_user(handles,
+ (void __user *)(uintptr_t)args->bo_handles,
+ exec->bo_count * sizeof(uint32_t));
+ if (ret) {
+ DRM_ERROR("Failed to copy in GEM handles\n");
+ goto fail;
+ }
+
+ spin_lock(&file_priv->table_lock);
+ for (i = 0; i < exec->bo_count; i++) {
+ struct drm_gem_object *bo = idr_find(&file_priv->object_idr,
+ handles[i]);
+ if (!bo) {
+ DRM_ERROR("Failed to look up GEM BO %d: %d\n",
+ i, handles[i]);
+ ret = -EINVAL;
+ spin_unlock(&file_priv->table_lock);
+ goto fail;
+ }
+ drm_gem_object_reference(bo);
+ exec->bo[i].bo = (struct drm_gem_cma_object *)bo;
+ }
+ spin_unlock(&file_priv->table_lock);
+
+fail:
+ kfree(handles);
+ return 0;
+}
+
+static int
+vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
+{
+ struct drm_vc4_submit_cl *args = exec->args;
+ void *temp = NULL;
+ void *bin;
+ int ret = 0;
+ uint32_t bin_offset = 0;
+ uint32_t shader_rec_offset = roundup(bin_offset + args->bin_cl_size,
+ 16);
+ uint32_t uniforms_offset = shader_rec_offset + args->shader_rec_size;
+ uint32_t exec_size = uniforms_offset + args->uniforms_size;
+ uint32_t temp_size = exec_size + (sizeof(struct vc4_shader_state) *
+ args->shader_rec_count);
+ struct vc4_bo *bo;
+
+ if (uniforms_offset < shader_rec_offset ||
+ exec_size < uniforms_offset ||
+ args->shader_rec_count >= (UINT_MAX /
+ sizeof(struct vc4_shader_state)) ||
+ temp_size < exec_size) {
+ DRM_ERROR("overflow in exec arguments\n");
+ goto fail;
+ }
+
+ /* Allocate space where we'll store the copied in user command lists
+ * and shader records.
+ *
+ * We don't just copy directly into the BOs because we need to
+ * read the contents back for validation, and I think the
+ * bo->vaddr is uncached access.
+ */
+ temp = kmalloc(temp_size, GFP_KERNEL);
+ if (!temp) {
+ DRM_ERROR("Failed to allocate storage for copying "
+ "in bin/render CLs.\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
+ bin = temp + bin_offset;
+ exec->shader_rec_u = temp + shader_rec_offset;
+ exec->uniforms_u = temp + uniforms_offset;
+ exec->shader_state = temp + exec_size;
+ exec->shader_state_size = args->shader_rec_count;
+
+ ret = copy_from_user(bin,
+ (void __user *)(uintptr_t)args->bin_cl,
+ args->bin_cl_size);
+ if (ret) {
+ DRM_ERROR("Failed to copy in bin cl\n");
+ goto fail;
+ }
+
+ ret = copy_from_user(exec->shader_rec_u,
+ (void __user *)(uintptr_t)args->shader_rec,
+ args->shader_rec_size);
+ if (ret) {
+ DRM_ERROR("Failed to copy in shader recs\n");
+ goto fail;
+ }
+
+ ret = copy_from_user(exec->uniforms_u,
+ (void __user *)(uintptr_t)args->uniforms,
+ args->uniforms_size);
+ if (ret) {
+ DRM_ERROR("Failed to copy in uniforms cl\n");
+ goto fail;
+ }
+
+ bo = vc4_bo_create(dev, exec_size);
+ if (!bo) {
+ DRM_ERROR("Couldn't allocate BO for binning\n");
+ ret = PTR_ERR(exec->exec_bo);
+ goto fail;
+ }
+ exec->exec_bo = &bo->base;
+
+ list_add_tail(&to_vc4_bo(&exec->exec_bo->base)->unref_head,
+ &exec->unref_list);
+
+ exec->ct0ca = exec->exec_bo->paddr + bin_offset;
+
+ exec->shader_rec_v = exec->exec_bo->vaddr + shader_rec_offset;
+ exec->shader_rec_p = exec->exec_bo->paddr + shader_rec_offset;
+ exec->shader_rec_size = args->shader_rec_size;
+
+ exec->uniforms_v = exec->exec_bo->vaddr + uniforms_offset;
+ exec->uniforms_p = exec->exec_bo->paddr + uniforms_offset;
+ exec->uniforms_size = args->uniforms_size;
+
+ ret = vc4_validate_bin_cl(dev,
+ exec->exec_bo->vaddr + bin_offset,
+ bin,
+ exec);
+ if (ret)
+ goto fail;
+
+ ret = vc4_validate_shader_recs(dev, exec);
+
+fail:
+ kfree(temp);
+ return ret;
+}
+
+static void
+vc4_complete_exec(struct vc4_exec_info *exec)
+{
+ unsigned i;
+
+ if (exec->bo) {
+ for (i = 0; i < exec->bo_count; i++)
+ drm_gem_object_unreference(&exec->bo[i].bo->base);
+ kfree(exec->bo);
+ }
+
+ while (!list_empty(&exec->unref_list)) {
+ struct vc4_bo *bo = list_first_entry(&exec->unref_list,
+ struct vc4_bo, unref_head);
+ list_del(&bo->unref_head);
+ drm_gem_object_unreference(&bo->base.base);
+ }
+
+ kfree(exec);
+}
+
+void
+vc4_job_handle_completed(struct vc4_dev *vc4)
+{
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&vc4->job_lock, irqflags);
+ while (!list_empty(&vc4->job_done_list)) {
+ struct vc4_exec_info *exec =
+ list_first_entry(&vc4->job_done_list,
+ struct vc4_exec_info, head);
+ list_del(&exec->head);
+
+ spin_unlock_irqrestore(&vc4->job_lock, irqflags);
+ vc4_complete_exec(exec);
+ spin_lock_irqsave(&vc4->job_lock, irqflags);
+ }
+ spin_unlock_irqrestore(&vc4->job_lock, irqflags);
+
+}
+
+/* Scheduled when any job has been completed, this walks the list of
+ * jobs that had completed and unrefs their BOs and frees their exec
+ * structs.
+ */
+static void
+vc4_job_done_work(struct work_struct *work)
+{
+ struct vc4_dev *vc4 =
+ container_of(work, struct vc4_dev, job_done_work);
+ struct drm_device *dev = vc4->dev;
+
+ /* Need the struct lock for drm_gem_object_unreference(). */
+ mutex_lock(&dev->struct_mutex);
+ vc4_job_handle_completed(vc4);
+ mutex_unlock(&dev->struct_mutex);
+}
+
+static int
+vc4_wait_for_seqno_ioctl_helper(struct drm_device *dev,
+ uint64_t seqno,
+ uint64_t *timeout_ns)
+{
+ unsigned long start = jiffies;
+ int ret = vc4_wait_for_seqno(dev, seqno, *timeout_ns);
+
+ if ((ret == -EINTR || ret == -ERESTARTSYS) && *timeout_ns != ~0ull) {
+ uint64_t delta = jiffies_to_nsecs(jiffies - start);
+ if (*timeout_ns >= delta)
+ *timeout_ns -= delta;
+ }
+
+ return ret;
+}
+
+int
+vc4_wait_seqno_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_vc4_wait_seqno *args = data;
+
+ return vc4_wait_for_seqno_ioctl_helper(dev, args->seqno,
+ &args->timeout_ns);
+}
+
+int
+vc4_wait_bo_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ int ret;
+ struct drm_vc4_wait_bo *args = data;
+ struct drm_gem_object *gem_obj;
+ struct vc4_bo *bo;
+
+ gem_obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (!gem_obj) {
+ DRM_ERROR("Failed to look up GEM BO %d\n", args->handle);
+ return -EINVAL;
+ }
+ bo = to_vc4_bo(gem_obj);
+
+ ret = vc4_wait_for_seqno_ioctl_helper(dev, bo->seqno, &args->timeout_ns);
+
+ drm_gem_object_unreference(gem_obj);
+ return ret;
+}
+
+/**
+ * Submits a command list to the VC4.
+ *
+ * This is what is called batchbuffer emitting on other hardware.
+ */
+int
+vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct drm_vc4_submit_cl *args = data;
+ struct vc4_exec_info *exec;
+ int ret;
+
+ if ((args->flags & ~VC4_SUBMIT_CL_USE_CLEAR_COLOR) != 0) {
+ DRM_ERROR("Unknown flags: 0x%02x\n", args->flags);
+ return -EINVAL;
+ }
+
+ exec = kcalloc(1, sizeof(*exec), GFP_KERNEL);
+ if (!exec) {
+ DRM_ERROR("malloc failure on exec struct\n");
+ return -ENOMEM;
+ }
+
+ exec->args = args;
+ INIT_LIST_HEAD(&exec->unref_list);
+
+ mutex_lock(&dev->struct_mutex);
+
+ ret = vc4_cl_lookup_bos(dev, file_priv, exec);
+ if (ret)
+ goto fail;
+
+ if (exec->args->bin_cl_size != 0) {
+ ret = vc4_get_bcl(dev, exec);
+ if (ret)
+ goto fail;
+ } else {
+ exec->ct0ca = exec->ct0ea = 0;
+ }
+
+ ret = vc4_get_rcl(dev, exec);
+ if (ret)
+ goto fail;
+
+ /* Clear this out of the struct we'll be putting in the queue,
+ * since it's part of our stack.
+ */
+ exec->args = NULL;
+
+ vc4_queue_submit(dev, exec);
+
+ /* Return the seqno for our job. */
+ args->seqno = vc4->emit_seqno;
+
+ mutex_unlock(&dev->struct_mutex);
+
+ /* To keep any client from getting too far ahead (particularly
+ * a problem when BO caching is involved), we wait on the
+ * previous rendering before returning to userspace.
+ */
+ vc4_wait_for_seqno(dev, args->seqno - 1, ~0ull);
+
+ return 0;
+
+fail:
+ vc4_complete_exec(exec);
+
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
+}
+
+void
+vc4_gem_init(struct drm_device *dev)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+
+ INIT_LIST_HEAD(&vc4->job_list);
+ INIT_LIST_HEAD(&vc4->job_done_list);
+ spin_lock_init(&vc4->job_lock);
+
+ INIT_WORK(&vc4->hangcheck.reset_work, vc4_reset_work);
+ setup_timer(&vc4->hangcheck.timer,
+ vc4_hangcheck_elapsed,
+ (unsigned long) dev);
+
+ INIT_WORK(&vc4->job_done_work, vc4_job_done_work);
+
+ vc4_bo_cache_init(dev);
+}
+
+void
+vc4_gem_destroy(struct drm_device *dev)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+
+ /* Waiting for exec to finish would need to be done before
+ * unregistering V3D.
+ */
+ WARN_ON(vc4->emit_seqno != vc4->finished_seqno);
+
+ /* V3D should already have disabled its interrupt and cleared
+ * the overflow allocation registers. Now free the object.
+ */
+ if (vc4->overflow_mem) {
+ drm_gem_object_unreference_unlocked(&vc4->overflow_mem->base.base);
+ vc4->overflow_mem = NULL;
+ }
+
+ vc4_bo_cache_destroy(dev);
+}
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
new file mode 100644
index 000000000000..5529b7bb5503
--- /dev/null
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -0,0 +1,588 @@
+/*
+ * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "drm_atomic_helper.h"
+#include "drm_crtc_helper.h"
+#include "drm_edid.h"
+#include "linux/component.h"
+#include "linux/of_gpio.h"
+#include "linux/of_platform.h"
+#include "soc/bcm2835/raspberrypi-firmware-property.h"
+#include "vc4_drv.h"
+#include "vc4_regs.h"
+
+/* General HDMI hardware state. */
+struct vc4_hdmi {
+ struct platform_device *pdev;
+ struct i2c_adapter *ddc;
+ void __iomem *hdmicore_regs;
+ void __iomem *hd_regs;
+ int hpd_gpio;
+};
+#define HDMI_READ(offset) readl(vc4->hdmi->hdmicore_regs + offset)
+#define HDMI_WRITE(offset, val) writel(val, vc4->hdmi->hdmicore_regs + offset)
+#define HD_READ(offset) readl(vc4->hdmi->hd_regs + offset)
+#define HD_WRITE(offset, val) writel(val, vc4->hdmi->hd_regs + offset)
+
+/* VC4 HDMI encoder KMS struct */
+struct vc4_hdmi_encoder {
+ struct drm_encoder base;
+ bool hdmi_monitor;
+};
+static inline struct vc4_hdmi_encoder *
+to_vc4_hdmi_encoder(struct drm_encoder *encoder)
+{
+ return container_of(encoder, struct vc4_hdmi_encoder, base);
+}
+
+/* VC4 HDMI connector KMS struct */
+struct vc4_hdmi_connector {
+ struct drm_connector base;
+
+ /*
+ * Since the connector is attached to just the one encoder,
+ * this is the reference to it so we can do the best_encoder()
+ * hook.
+ */
+ struct drm_encoder *encoder;
+};
+static inline struct vc4_hdmi_connector *
+to_vc4_hdmi_connector(struct drm_connector *connector)
+{
+ return container_of(connector, struct vc4_hdmi_connector, base);
+}
+
+#define HDMI_REG(reg) { reg, #reg }
+static const struct {
+ u32 reg;
+ const char *name;
+} hdmi_regs[] = {
+ HDMI_REG(VC4_HDMI_CORE_REV),
+ HDMI_REG(VC4_HDMI_SW_RESET_CONTROL),
+ HDMI_REG(VC4_HDMI_HOTPLUG_INT),
+ HDMI_REG(VC4_HDMI_HOTPLUG),
+ HDMI_REG(VC4_HDMI_HORZA),
+ HDMI_REG(VC4_HDMI_HORZB),
+ HDMI_REG(VC4_HDMI_FIFO_CTL),
+ HDMI_REG(VC4_HDMI_SCHEDULER_CONTROL),
+ HDMI_REG(VC4_HDMI_VERTA0),
+ HDMI_REG(VC4_HDMI_VERTA1),
+ HDMI_REG(VC4_HDMI_VERTB0),
+ HDMI_REG(VC4_HDMI_VERTB1),
+ HDMI_REG(VC4_HDMI_TX_PHY_RESET_CTL),
+};
+static const struct {
+ u32 reg;
+ const char *name;
+} hd_regs[] = {
+ HDMI_REG(VC4_HD_M_CTL),
+ HDMI_REG(VC4_HD_MAI_CTL),
+ HDMI_REG(VC4_HD_VID_CTL),
+ HDMI_REG(VC4_HD_CSC_CTL),
+ HDMI_REG(VC4_HD_FRAME_COUNT),
+};
+
+#ifdef CONFIG_DEBUG_FS
+int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hdmi_regs); i++) {
+ seq_printf(m, "%s (0x%04x): 0x%08x\n",
+ hdmi_regs[i].name, hdmi_regs[i].reg,
+ HDMI_READ(hdmi_regs[i].reg));
+ }
+
+ for (i = 0; i < ARRAY_SIZE(hd_regs); i++) {
+ seq_printf(m, "%s (0x%04x): 0x%08x\n",
+ hd_regs[i].name, hd_regs[i].reg,
+ HD_READ(hd_regs[i].reg));
+ }
+
+ return 0;
+}
+#endif /* CONFIG_DEBUG_FS */
+
+static void vc4_hdmi_dump_regs(struct drm_device *dev)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ int i;
+
+ rmb();
+ for (i = 0; i < ARRAY_SIZE(hdmi_regs); i++) {
+ DRM_INFO("0x%04x (%s): 0x%08x\n",
+ hdmi_regs[i].reg, hdmi_regs[i].name,
+ HDMI_READ(hdmi_regs[i].reg));
+ }
+ for (i = 0; i < ARRAY_SIZE(hd_regs); i++) {
+ DRM_INFO("0x%04x (%s): 0x%08x\n",
+ hd_regs[i].reg, hd_regs[i].name,
+ HD_READ(hd_regs[i].reg));
+ }
+}
+
+static enum drm_connector_status
+vc4_hdmi_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct drm_device *dev = connector->dev;
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+
+ if (vc4->hdmi->hpd_gpio) {
+ if (gpio_get_value(vc4->hdmi->hpd_gpio))
+ return connector_status_connected;
+ else
+ return connector_status_disconnected;
+ }
+
+ if (HDMI_READ(VC4_HDMI_HOTPLUG) & VC4_HDMI_HOTPLUG_CONNECTED)
+ return connector_status_connected;
+ else
+ return connector_status_disconnected;
+}
+
+static void vc4_hdmi_connector_destroy(struct drm_connector *connector)
+{
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+}
+
+static int vc4_hdmi_connector_get_modes(struct drm_connector *connector)
+{
+ struct vc4_hdmi_connector *vc4_connector =
+ to_vc4_hdmi_connector(connector);
+ struct drm_encoder *encoder = vc4_connector->encoder;
+ struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder);
+ struct drm_device *dev = connector->dev;
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ int ret = 0;
+ struct edid *edid;
+
+ edid = drm_get_edid(connector, vc4->hdmi->ddc);
+ if (!edid)
+ return -ENODEV;
+
+ vc4_encoder->hdmi_monitor = drm_detect_hdmi_monitor(edid);
+ drm_mode_connector_update_edid_property(connector, edid);
+ ret = drm_add_edid_modes(connector, edid);
+
+ return ret;
+}
+
+static struct drm_encoder *
+vc4_hdmi_connector_best_encoder(struct drm_connector *connector)
+{
+ struct vc4_hdmi_connector *hdmi_connector =
+ to_vc4_hdmi_connector(connector);
+ return hdmi_connector->encoder;
+}
+
+static const struct drm_connector_funcs vc4_hdmi_connector_funcs = {
+ .dpms = drm_atomic_helper_connector_dpms,
+ .detect = vc4_hdmi_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = vc4_hdmi_connector_destroy,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static const struct drm_connector_helper_funcs vc4_hdmi_connector_helper_funcs = {
+ .get_modes = vc4_hdmi_connector_get_modes,
+ .mode_valid = NULL,
+ .best_encoder = vc4_hdmi_connector_best_encoder,
+};
+
+struct drm_connector *vc4_hdmi_connector_init(struct drm_device *dev,
+ struct drm_encoder *encoder)
+{
+ struct drm_connector *connector = NULL;
+ struct vc4_hdmi_connector *hdmi_connector;
+ int ret = 0;
+
+ hdmi_connector = devm_kzalloc(dev->dev, sizeof(*hdmi_connector),
+ GFP_KERNEL);
+ if (!hdmi_connector) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+ connector = &hdmi_connector->base;
+
+ hdmi_connector->encoder = encoder;
+
+ drm_connector_init(dev, connector, &vc4_hdmi_connector_funcs,
+ DRM_MODE_CONNECTOR_HDMIA);
+ drm_connector_helper_add(connector, &vc4_hdmi_connector_helper_funcs);
+
+ connector->polled = (DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT);
+
+ connector->interlace_allowed = 0;
+ connector->doublescan_allowed = 0;
+
+ drm_connector_register(connector);
+
+ drm_mode_connector_attach_encoder(connector, encoder);
+
+ return connector;
+
+ fail:
+ if (connector)
+ vc4_hdmi_connector_destroy(connector);
+
+ return ERR_PTR(ret);
+}
+
+static void vc4_encoder_destroy(struct drm_encoder *encoder)
+{
+ drm_encoder_cleanup(encoder);
+}
+
+static const struct drm_encoder_funcs vc4_hdmi_encoder_funcs = {
+ .destroy = vc4_encoder_destroy,
+};
+
+static bool vc4_hdmi_encoder_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+/*
+ * Asks the firmware to set the pixel clock.
+ *
+ * This should probably be in a separate firmware clock provider
+ * driver.
+ */
+static void
+vc4_set_pixel_clock(struct vc4_dev *vc4, u32 clock)
+{
+ u32 packet[2];
+ int ret;
+
+ packet[0] = 8; /* Pixel clock. */
+ packet[1] = clock;
+
+ ret = rpi_firmware_property(vc4->firmware_node,
+ RPI_FIRMWARE_SET_CLOCK_RATE,
+ &packet, sizeof(packet));
+
+ if (ret)
+ DRM_ERROR("Failed to set pixel clock: %d\n", ret);
+}
+
+static void vc4_hdmi_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *unadjusted_mode,
+ struct drm_display_mode *mode)
+{
+ struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder);
+ struct drm_device *dev = encoder->dev;
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ bool debug_dump_regs = false;
+ bool hsync_pos = !(mode->flags & DRM_MODE_FLAG_NHSYNC);
+ bool vsync_pos = !(mode->flags & DRM_MODE_FLAG_NVSYNC);
+ u32 vactive = (mode->vdisplay >>
+ ((mode->flags & DRM_MODE_FLAG_INTERLACE) ? 1 : 0));
+ u32 verta = (VC4_SET_FIELD(mode->vsync_end - mode->vsync_start,
+ VC4_HDMI_VERTA_VSP) |
+ VC4_SET_FIELD(mode->vsync_start - mode->vdisplay,
+ VC4_HDMI_VERTA_VFP) |
+ VC4_SET_FIELD(vactive, VC4_HDMI_VERTA_VAL));
+ u32 vertb = (VC4_SET_FIELD(0, VC4_HDMI_VERTB_VSPO) |
+ VC4_SET_FIELD(mode->vtotal - mode->vsync_end,
+ VC4_HDMI_VERTB_VBP));
+
+ if (debug_dump_regs) {
+ DRM_INFO("HDMI regs before:\n");
+ vc4_hdmi_dump_regs(dev);
+ }
+
+
+ HD_WRITE(VC4_HD_VID_CTL, 0);
+ /* XXX: Set state machine clock. */
+
+ vc4_set_pixel_clock(vc4, mode->clock * 1000);
+
+ HDMI_WRITE(VC4_HDMI_SCHEDULER_CONTROL,
+ HDMI_READ(VC4_HDMI_SCHEDULER_CONTROL) |
+ VC4_HDMI_SCHEDULER_CONTROL_MANUAL_FORMAT |
+ VC4_HDMI_SCHEDULER_CONTROL_IGNORE_VSYNC_PREDICTS);
+
+ HDMI_WRITE(VC4_HDMI_HORZA,
+ (vsync_pos ? VC4_HDMI_HORZA_VPOS : 0) |
+ (hsync_pos ? VC4_HDMI_HORZA_HPOS : 0) |
+ VC4_SET_FIELD(mode->hdisplay, VC4_HDMI_HORZA_HAP));
+
+ HDMI_WRITE(VC4_HDMI_HORZB,
+ VC4_SET_FIELD(mode->htotal - mode->hsync_end,
+ VC4_HDMI_HORZB_HBP) |
+ VC4_SET_FIELD(mode->hsync_end - mode->hsync_start,
+ VC4_HDMI_HORZB_HSP) |
+ VC4_SET_FIELD(mode->hsync_start - mode->hdisplay,
+ VC4_HDMI_HORZB_HFP));
+
+ HDMI_WRITE(VC4_HDMI_VERTA0, verta);
+ HDMI_WRITE(VC4_HDMI_VERTA1, verta);
+
+ HDMI_WRITE(VC4_HDMI_VERTB0, vertb);
+ HDMI_WRITE(VC4_HDMI_VERTB1, vertb);
+
+ HD_WRITE(VC4_HD_VID_CTL,
+ (vsync_pos ? 0 : VC4_HD_VID_CTL_VSYNC_LOW) |
+ (hsync_pos ? 0 : VC4_HD_VID_CTL_HSYNC_LOW));
+
+ /* The RGB order applies even when CSC is disabled. */
+ HD_WRITE(VC4_HD_CSC_CTL, VC4_SET_FIELD(VC4_HD_CSC_CTL_ORDER_BGR,
+ VC4_HD_CSC_CTL_ORDER));
+
+ HDMI_WRITE(VC4_HDMI_FIFO_CTL, VC4_HDMI_FIFO_CTL_MASTER_SLAVE_N);
+
+ /* XXX: Wait for video. */
+
+ HD_WRITE(VC4_HD_VID_CTL,
+ HD_READ(VC4_HD_VID_CTL) |
+ VC4_HD_VID_CTL_ENABLE |
+ VC4_HD_VID_CTL_FRAME_COUNTER_RESET);
+
+ if (vc4_encoder->hdmi_monitor) {
+ HDMI_WRITE(VC4_HDMI_SCHEDULER_CONTROL,
+ HDMI_READ(VC4_HDMI_SCHEDULER_CONTROL) |
+ VC4_HDMI_SCHEDULER_CONTROL_MODE_HDMI);
+
+ while (!(HDMI_READ(VC4_HDMI_SCHEDULER_CONTROL) &
+ VC4_HDMI_SCHEDULER_CONTROL_HDMI_ACTIVE)) {
+ cpu_relax();
+ }
+ } else {
+ HDMI_WRITE(VC4_HDMI_RAM_PACKET_CONFIG,
+ HDMI_READ(VC4_HDMI_RAM_PACKET_CONFIG) &
+ ~(VC4_HDMI_RAM_PACKET_ENABLE));
+ HDMI_WRITE(VC4_HDMI_SCHEDULER_CONTROL,
+ HDMI_READ(VC4_HDMI_SCHEDULER_CONTROL) &
+ ~VC4_HDMI_SCHEDULER_CONTROL_MODE_HDMI);
+
+ while (HDMI_READ(VC4_HDMI_SCHEDULER_CONTROL) &
+ VC4_HDMI_SCHEDULER_CONTROL_HDMI_ACTIVE) {
+ cpu_relax();
+ }
+ }
+
+ if (vc4_encoder->hdmi_monitor) {
+ WARN_ON(!(HDMI_READ(VC4_HDMI_SCHEDULER_CONTROL) &
+ VC4_HDMI_SCHEDULER_CONTROL_HDMI_ACTIVE));
+ HDMI_WRITE(VC4_HDMI_SCHEDULER_CONTROL,
+ HDMI_READ(VC4_HDMI_SCHEDULER_CONTROL) |
+ VC4_HDMI_SCHEDULER_CONTROL_VERT_ALWAYS_KEEPOUT);
+
+ /* XXX: Set up HDMI_RAM_PACKET_CONFIG (1 << 16) and
+ * set up infoframe.
+ */
+
+ /* XXX: Set up drift fifo? */
+ }
+
+ if (debug_dump_regs) {
+ DRM_INFO("HDMI regs after:\n");
+ vc4_hdmi_dump_regs(dev);
+ }
+}
+
+static void vc4_hdmi_encoder_disable(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+
+ HDMI_WRITE(VC4_HDMI_TX_PHY_RESET_CTL, 0xf << 16);
+}
+
+static void vc4_hdmi_encoder_enable(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+
+ HDMI_WRITE(VC4_HDMI_TX_PHY_RESET_CTL, 0);
+}
+
+static const struct drm_encoder_helper_funcs vc4_hdmi_encoder_helper_funcs = {
+ .mode_fixup = vc4_hdmi_encoder_mode_fixup,
+ .mode_set = vc4_hdmi_encoder_mode_set,
+ .disable = vc4_hdmi_encoder_disable,
+ .enable = vc4_hdmi_encoder_enable,
+};
+
+static struct drm_crtc *
+vc4_get_crtc_node(struct platform_device *pdev)
+{
+ struct device_node *crtc_node;
+ struct platform_device *crtc_pdev;
+
+ crtc_node = of_parse_phandle(pdev->dev.of_node, "crtc", 0);
+ if (!crtc_node) {
+ DRM_ERROR ("No CRTC for hdmi in DT\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ crtc_pdev = of_find_device_by_node(crtc_node);
+ if (!crtc_pdev) {
+ DRM_ERROR ("No CRTC device attached to OF node\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ return platform_get_drvdata(crtc_pdev);
+}
+
+/* initialize encoder */
+struct drm_encoder *vc4_hdmi_encoder_init(struct drm_device *dev)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct drm_encoder *encoder = NULL;
+ struct vc4_hdmi_encoder *vc4_hdmi_encoder;
+ struct drm_crtc *crtc;
+ int ret;
+
+ vc4_hdmi_encoder = devm_kzalloc(dev->dev, sizeof(*vc4_hdmi_encoder),
+ GFP_KERNEL);
+ if (!vc4_hdmi_encoder) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+ encoder = &vc4_hdmi_encoder->base;
+
+ crtc = vc4_get_crtc_node(vc4->hdmi->pdev);
+ if (IS_ERR(crtc)) {
+ ret = PTR_ERR(crtc);
+ goto fail;
+ }
+
+ drm_encoder_init(dev, encoder, &vc4_hdmi_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS);
+ drm_encoder_helper_add(encoder, &vc4_hdmi_encoder_helper_funcs);
+
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
+
+ return encoder;
+
+fail:
+ if (encoder)
+ vc4_encoder_destroy(encoder);
+
+ return ERR_PTR(ret);
+}
+
+static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct drm_device *drm = dev_get_drvdata(master);
+ struct vc4_dev *vc4 = drm->dev_private;
+ struct vc4_hdmi *hdmi;
+ struct device_node *ddc_node;
+ u32 value;
+
+ hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL);
+ if (!hdmi)
+ return -ENOMEM;
+
+ hdmi->pdev = pdev;
+ hdmi->hdmicore_regs = vc4_ioremap_regs(pdev, 0);
+ if (IS_ERR(hdmi->hdmicore_regs))
+ return PTR_ERR(hdmi->hdmicore_regs);
+
+ hdmi->hd_regs = vc4_ioremap_regs(pdev, 1);
+ if (IS_ERR(hdmi->hd_regs))
+ return PTR_ERR(hdmi->hd_regs);
+
+ /* DDC i2c driver */
+ ddc_node = of_parse_phandle(dev->of_node, "ddc", 0);
+ if (!ddc_node) {
+ DRM_ERROR("Failed to find ddc node in device tree\n");
+ return -ENODEV;
+ }
+
+ hdmi->ddc = of_find_i2c_adapter_by_node(ddc_node);
+ if (!hdmi->ddc) {
+ DRM_ERROR("Failed to get ddc i2c adapter by node\n");
+ return -EPROBE_DEFER;
+ }
+
+ /* Only use the GPIO HPD pin if present in the DT, otherwise
+ * we'll use the HDMI core's register.
+ */
+ if (of_find_property(dev->of_node, "hpd-gpio", &value)) {
+ hdmi->hpd_gpio = of_get_named_gpio(dev->of_node, "hpd-gpio", 0);
+ if (hdmi->hpd_gpio < 0)
+ return hdmi->hpd_gpio;
+ }
+
+ vc4->hdmi = hdmi;
+
+ return 0;
+}
+
+static void vc4_hdmi_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct drm_device *drm = dev_get_drvdata(master);
+ struct vc4_dev *vc4 = drm->dev_private;
+
+ put_device(&vc4->hdmi->ddc->dev);
+
+ vc4->hdmi = NULL;
+}
+
+static const struct component_ops vc4_hdmi_ops = {
+ .bind = vc4_hdmi_bind,
+ .unbind = vc4_hdmi_unbind,
+};
+
+static int vc4_hdmi_dev_probe(struct platform_device *pdev)
+{
+ return component_add(&pdev->dev, &vc4_hdmi_ops);
+}
+
+static int vc4_hdmi_dev_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &vc4_hdmi_ops);
+ return 0;
+}
+
+static const struct of_device_id vc4_hdmi_dt_match[] = {
+ { .compatible = "brcm,vc4-hdmi" },
+ {}
+};
+
+static struct platform_driver vc4_hdmi_driver = {
+ .probe = vc4_hdmi_dev_probe,
+ .remove = vc4_hdmi_dev_remove,
+ .driver = {
+ .name = "vc4_hdmi",
+ .of_match_table = vc4_hdmi_dt_match,
+ },
+};
+
+void __init vc4_hdmi_register(void)
+{
+ platform_driver_register(&vc4_hdmi_driver);
+}
+
+void __exit vc4_hdmi_unregister(void)
+{
+ platform_driver_unregister(&vc4_hdmi_driver);
+}
diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c
new file mode 100644
index 000000000000..4ba37162f39c
--- /dev/null
+++ b/drivers/gpu/drm/vc4/vc4_hvs.c
@@ -0,0 +1,159 @@
+/*
+ * Copyright (C) 2015 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "linux/component.h"
+#include "vc4_drv.h"
+#include "vc4_regs.h"
+
+#define HVS_REG(reg) { reg, #reg }
+static const struct {
+ u32 reg;
+ const char *name;
+} hvs_regs[] = {
+ HVS_REG(SCALER_DISPCTRL),
+ HVS_REG(SCALER_DISPSTAT),
+ HVS_REG(SCALER_DISPID),
+ HVS_REG(SCALER_DISPECTRL),
+ HVS_REG(SCALER_DISPPROF),
+ HVS_REG(SCALER_DISPDITHER),
+ HVS_REG(SCALER_DISPEOLN),
+ HVS_REG(SCALER_DISPLIST0),
+ HVS_REG(SCALER_DISPLIST1),
+ HVS_REG(SCALER_DISPLIST2),
+ HVS_REG(SCALER_DISPLSTAT),
+ HVS_REG(SCALER_DISPLACT0),
+ HVS_REG(SCALER_DISPLACT1),
+ HVS_REG(SCALER_DISPLACT2),
+ HVS_REG(SCALER_DISPCTRL0),
+ HVS_REG(SCALER_DISPBKGND0),
+ HVS_REG(SCALER_DISPSTAT0),
+ HVS_REG(SCALER_DISPBASE0),
+ HVS_REG(SCALER_DISPCTRL1),
+ HVS_REG(SCALER_DISPBKGND1),
+ HVS_REG(SCALER_DISPSTAT1),
+ HVS_REG(SCALER_DISPBASE1),
+ HVS_REG(SCALER_DISPCTRL2),
+ HVS_REG(SCALER_DISPBKGND2),
+ HVS_REG(SCALER_DISPSTAT2),
+ HVS_REG(SCALER_DISPBASE2),
+ HVS_REG(SCALER_DISPALPHA2),
+};
+
+void
+vc4_hvs_dump_state(struct drm_device *dev)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ int i;
+
+ rmb();
+ for (i = 0; i < ARRAY_SIZE(hvs_regs); i++) {
+ DRM_INFO("0x%04x (%s): 0x%08x\n",
+ hvs_regs[i].reg, hvs_regs[i].name,
+ HVS_READ(hvs_regs[i].reg));
+ }
+
+ DRM_INFO("HVS ctx:\n");
+ for (i = 0; i < 64; i += 4) {
+ DRM_INFO("0x%08x (%s): 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ i * 4, i < HVS_BOOTLOADER_DLIST_END ? "B" : "D",
+ ((uint32_t *)vc4->hvs->dlist)[i + 0],
+ ((uint32_t *)vc4->hvs->dlist)[i + 1],
+ ((uint32_t *)vc4->hvs->dlist)[i + 2],
+ ((uint32_t *)vc4->hvs->dlist)[i + 3]);
+ }
+}
+
+#ifdef CONFIG_DEBUG_FS
+int vc4_hvs_debugfs_regs(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hvs_regs); i++) {
+ seq_printf(m, "%s (0x%04x): 0x%08x\n",
+ hvs_regs[i].name, hvs_regs[i].reg,
+ HVS_READ(hvs_regs[i].reg));
+ }
+
+ return 0;
+}
+#endif
+
+static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct drm_device *drm = dev_get_drvdata(master);
+ struct vc4_dev *vc4 = drm->dev_private;
+ struct vc4_hvs *hvs = NULL;
+
+ hvs = devm_kzalloc(&pdev->dev, sizeof(*hvs), GFP_KERNEL);
+ if (!hvs)
+ return -ENOMEM;
+
+ hvs->pdev = pdev;
+
+ hvs->regs = vc4_ioremap_regs(pdev, 0);
+ if (IS_ERR(hvs->regs))
+ return PTR_ERR(hvs->regs);
+
+ hvs->dlist = hvs->regs + SCALER_DLIST_START;
+
+ vc4->hvs = hvs;
+ return 0;
+}
+
+static void vc4_hvs_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct drm_device *drm = dev_get_drvdata(master);
+ struct vc4_dev *vc4 = drm->dev_private;
+
+ vc4->hvs = NULL;
+}
+
+static const struct component_ops vc4_hvs_ops = {
+ .bind = vc4_hvs_bind,
+ .unbind = vc4_hvs_unbind,
+};
+
+static int vc4_hvs_dev_probe(struct platform_device *pdev)
+{
+ return component_add(&pdev->dev, &vc4_hvs_ops);
+}
+
+static int vc4_hvs_dev_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &vc4_hvs_ops);
+ return 0;
+}
+
+static const struct of_device_id vc4_hvs_dt_match[] = {
+ { .compatible = "brcm,vc4-hvs" },
+ {}
+};
+
+static struct platform_driver vc4_hvs_driver = {
+ .probe = vc4_hvs_dev_probe,
+ .remove = vc4_hvs_dev_remove,
+ .driver = {
+ .name = "vc4_hvs",
+ .of_match_table = vc4_hvs_dt_match,
+ },
+};
+
+void __init vc4_hvs_register(void)
+{
+ platform_driver_register(&vc4_hvs_driver);
+}
+
+void __exit vc4_hvs_unregister(void)
+{
+ platform_driver_unregister(&vc4_hvs_driver);
+}
diff --git a/drivers/gpu/drm/vc4/vc4_irq.c b/drivers/gpu/drm/vc4/vc4_irq.c
new file mode 100644
index 000000000000..2d0b91a9bd93
--- /dev/null
+++ b/drivers/gpu/drm/vc4/vc4_irq.c
@@ -0,0 +1,213 @@
+/*
+ * Copyright © 2014 Broadcom
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+/** DOC: Interrupt management for the V3D engine.
+ *
+ * We have an interrupt status register (V3D_INTCTL) which reports
+ * interrupts, and where writing 1 bits clears those interrupts.
+ * There are also a pair of interrupt registers
+ * (V3D_INTENA/V3D_INTDIS) where writing a 1 to their bits enables or
+ * disables that specific interrupt, and 0s written are ignored
+ * (reading either one returns the set of enabled interrupts).
+ *
+ * When we take a render frame interrupt, we need to wake the
+ * processes waiting for some frame to be done, and get the next frame
+ * submitted ASAP (so the hardware doesn't sit idle when there's work
+ * to do).
+ *
+ * When we take the binner out of memory interrupt, we need to
+ * allocate some new memory and pass it to the binner so that the
+ * current job can make progress.
+ */
+
+#include "vc4_drv.h"
+#include "vc4_regs.h"
+
+#define V3D_DRIVER_IRQS (V3D_INT_OUTOMEM | \
+ V3D_INT_FRDONE)
+
+DECLARE_WAIT_QUEUE_HEAD(render_wait);
+
+static void
+vc4_overflow_mem_work(struct work_struct *work)
+{
+ struct vc4_dev *vc4 =
+ container_of(work, struct vc4_dev, overflow_mem_work);
+ struct drm_device *dev = vc4->dev;
+ struct vc4_bo *bo;
+
+ mutex_lock(&dev->struct_mutex);
+ bo = vc4_bo_create(dev, 256 * 1024);
+ mutex_unlock(&dev->struct_mutex);
+ if (!bo) {
+ DRM_ERROR("Couldn't allocate binner overflow mem\n");
+ return;
+ }
+
+ /* If there's a job executing currently, then our previous
+ * overflow allocation is getting used in that job and we need
+ * to queue it to be released when the job is done. But if no
+ * job is executing at all, then we can free the old overflow
+ * object direcctly.
+ *
+ * No lock necessary for this pointer since we're the only
+ * ones that update the pointer, and our workqueue won't
+ * reenter.
+ */
+ if (vc4->overflow_mem) {
+ struct vc4_exec_info *current_exec;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&vc4->job_lock, irqflags);
+ current_exec = vc4_first_job(vc4);
+ if (current_exec) {
+ vc4->overflow_mem->seqno = vc4->finished_seqno + 1;
+ list_add_tail(&vc4->overflow_mem->unref_head,
+ &current_exec->unref_list);
+ vc4->overflow_mem = NULL;
+ }
+ spin_unlock_irqrestore(&vc4->job_lock, irqflags);
+ }
+
+ if (vc4->overflow_mem) {
+ drm_gem_object_unreference_unlocked(&vc4->overflow_mem->base.base);
+ }
+ vc4->overflow_mem = bo;
+
+ V3D_WRITE(V3D_BPOA, bo->base.paddr);
+ V3D_WRITE(V3D_BPOS, bo->base.base.size);
+ V3D_WRITE(V3D_INTCTL, V3D_INT_OUTOMEM);
+ V3D_WRITE(V3D_INTENA, V3D_INT_OUTOMEM);
+}
+
+static void
+vc4_irq_finish_job(struct drm_device *dev)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct vc4_exec_info *exec = vc4_first_job(vc4);
+
+ if (!exec)
+ return;
+
+ vc4->finished_seqno++;
+ list_move_tail(&exec->head, &vc4->job_done_list);
+ vc4_submit_next_job(dev);
+
+ wake_up_all(&vc4->job_wait_queue);
+ schedule_work(&vc4->job_done_work);
+}
+
+irqreturn_t
+vc4_irq(int irq, void *arg)
+{
+ struct drm_device *dev = arg;
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ uint32_t intctl;
+ irqreturn_t status = IRQ_NONE;
+
+ barrier();
+ intctl = V3D_READ(V3D_INTCTL);
+
+ /* Acknowledge the interrupts we're handling here. The render
+ * frame done interrupt will be cleared, while OUTOMEM will
+ * stay high until the underlying cause is cleared.
+ */
+ V3D_WRITE(V3D_INTCTL, intctl);
+
+ if (intctl & V3D_INT_OUTOMEM) {
+ /* Disable OUTOMEM until the work is done. */
+ V3D_WRITE(V3D_INTDIS, V3D_INT_OUTOMEM);
+ schedule_work(&vc4->overflow_mem_work);
+ status = IRQ_HANDLED;
+ }
+
+ if (intctl & V3D_INT_FRDONE) {
+ spin_lock(&vc4->job_lock);
+ vc4_irq_finish_job(dev);
+ spin_unlock(&vc4->job_lock);
+ status = IRQ_HANDLED;
+ }
+
+ return status;
+}
+
+void
+vc4_irq_preinstall(struct drm_device *dev)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+
+ init_waitqueue_head(&vc4->job_wait_queue);
+ INIT_WORK(&vc4->overflow_mem_work, vc4_overflow_mem_work);
+
+ /* Clear any pending interrupts someone might have left around
+ * for us.
+ */
+ V3D_WRITE(V3D_INTCTL, V3D_DRIVER_IRQS);
+}
+
+int
+vc4_irq_postinstall(struct drm_device *dev)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+
+ /* Enable both the render done and out of memory interrupts. */
+ V3D_WRITE(V3D_INTENA, V3D_DRIVER_IRQS);
+
+ return 0;
+}
+
+void
+vc4_irq_uninstall(struct drm_device *dev)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+
+ /* Disable sending interrupts for our driver's IRQs. */
+ V3D_WRITE(V3D_INTDIS, V3D_DRIVER_IRQS);
+
+ /* Clear any pending interrupts we might have left. */
+ V3D_WRITE(V3D_INTCTL, V3D_DRIVER_IRQS);
+
+ cancel_work_sync(&vc4->overflow_mem_work);
+}
+
+/** Reinitializes interrupt registers when a GPU reset is performed. */
+void vc4_irq_reset(struct drm_device *dev)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ unsigned long irqflags;
+
+ /* Acknowledge any stale IRQs. */
+ V3D_WRITE(V3D_INTCTL, V3D_DRIVER_IRQS);
+
+ /*
+ * Turn all our interrupts on. Binner out of memory is the
+ * only one we expect to trigger at this point, since we've
+ * just come from poweron and haven't supplied any overflow
+ * memory yet.
+ */
+ V3D_WRITE(V3D_INTENA, V3D_DRIVER_IRQS);
+
+ spin_lock_irqsave(&vc4->job_lock, irqflags);
+ vc4_irq_finish_job(dev);
+ spin_unlock_irqrestore(&vc4->job_lock, irqflags);
+}
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
new file mode 100644
index 000000000000..98b57bd0fc80
--- /dev/null
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -0,0 +1,80 @@
+/*
+ * Copyright (C) 2015 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "drm_crtc.h"
+#include "drm_atomic_helper.h"
+#include "drm_crtc_helper.h"
+#include "drm_plane_helper.h"
+#include "drm_fb_cma_helper.h"
+#include "vc4_drv.h"
+
+static const struct drm_mode_config_funcs vc4_mode_funcs = {
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+ .fb_create = drm_fb_cma_create,
+};
+
+/*
+ * Calls out to initialize all of the VC4 KMS objects.
+ */
+static int
+vc4_init_modeset_objects(struct drm_device *dev)
+{
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ int ret = 0;
+
+ encoder = vc4_hdmi_encoder_init(dev);
+ if (IS_ERR(encoder)) {
+ dev_err(dev->dev, "failed to construct HDMI encoder\n");
+ ret = PTR_ERR(encoder);
+ goto fail;
+ }
+
+ connector = vc4_hdmi_connector_init(dev, encoder);
+ if (IS_ERR(connector)) {
+ ret = PTR_ERR(connector);
+ dev_err(dev->dev, "failed to initialize HDMI connector\n");
+ goto fail;
+ }
+
+fail:
+ return ret;
+}
+
+int
+vc4_kms_load(struct drm_device *dev)
+{
+ int ret;
+
+ ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
+ if (ret < 0) {
+ dev_err(dev->dev, "failed to initialize vblank\n");
+ return ret;
+ }
+
+ dev->mode_config.max_width = 2048;
+ dev->mode_config.max_height = 2048;
+ dev->mode_config.funcs = &vc4_mode_funcs;
+ dev->mode_config.preferred_depth = 24;
+
+ ret = vc4_init_modeset_objects(dev);
+ if (ret)
+ goto fail;
+
+ drm_mode_config_reset(dev);
+
+ drm_fbdev_cma_init(dev, 32,
+ dev->mode_config.num_crtc,
+ dev->mode_config.num_connector);
+
+ drm_kms_helper_poll_init(dev);
+
+fail:
+ return ret;
+}
diff --git a/drivers/gpu/drm/vc4/vc4_packet.h b/drivers/gpu/drm/vc4/vc4_packet.h
new file mode 100644
index 000000000000..1eb23416311d
--- /dev/null
+++ b/drivers/gpu/drm/vc4/vc4_packet.h
@@ -0,0 +1,367 @@
+/*
+ * Copyright © 2014 Broadcom
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef VC4_PACKET_H
+#define VC4_PACKET_H
+
+#include "vc4_regs.h" /* for VC4_MASK, VC4_GET_FIELD, VC4_SET_FIELD */
+
+enum vc4_packet {
+ VC4_PACKET_HALT = 0,
+ VC4_PACKET_NOP = 1,
+
+ VC4_PACKET_FLUSH = 4,
+ VC4_PACKET_FLUSH_ALL = 5,
+ VC4_PACKET_START_TILE_BINNING = 6,
+ VC4_PACKET_INCREMENT_SEMAPHORE = 7,
+ VC4_PACKET_WAIT_ON_SEMAPHORE = 8,
+
+ VC4_PACKET_BRANCH = 16,
+ VC4_PACKET_BRANCH_TO_SUB_LIST = 17,
+
+ VC4_PACKET_STORE_MS_TILE_BUFFER = 24,
+ VC4_PACKET_STORE_MS_TILE_BUFFER_AND_EOF = 25,
+ VC4_PACKET_STORE_FULL_RES_TILE_BUFFER = 26,
+ VC4_PACKET_LOAD_FULL_RES_TILE_BUFFER = 27,
+ VC4_PACKET_STORE_TILE_BUFFER_GENERAL = 28,
+ VC4_PACKET_LOAD_TILE_BUFFER_GENERAL = 29,
+
+ VC4_PACKET_GL_INDEXED_PRIMITIVE = 32,
+ VC4_PACKET_GL_ARRAY_PRIMITIVE = 33,
+
+ VC4_PACKET_COMPRESSED_PRIMITIVE = 48,
+ VC4_PACKET_CLIPPED_COMPRESSED_PRIMITIVE = 49,
+
+ VC4_PACKET_PRIMITIVE_LIST_FORMAT = 56,
+
+ VC4_PACKET_GL_SHADER_STATE = 64,
+ VC4_PACKET_NV_SHADER_STATE = 65,
+ VC4_PACKET_VG_SHADER_STATE = 66,
+
+ VC4_PACKET_CONFIGURATION_BITS = 96,
+ VC4_PACKET_FLAT_SHADE_FLAGS = 97,
+ VC4_PACKET_POINT_SIZE = 98,
+ VC4_PACKET_LINE_WIDTH = 99,
+ VC4_PACKET_RHT_X_BOUNDARY = 100,
+ VC4_PACKET_DEPTH_OFFSET = 101,
+ VC4_PACKET_CLIP_WINDOW = 102,
+ VC4_PACKET_VIEWPORT_OFFSET = 103,
+ VC4_PACKET_Z_CLIPPING = 104,
+ VC4_PACKET_CLIPPER_XY_SCALING = 105,
+ VC4_PACKET_CLIPPER_Z_SCALING = 106,
+
+ VC4_PACKET_TILE_BINNING_MODE_CONFIG = 112,
+ VC4_PACKET_TILE_RENDERING_MODE_CONFIG = 113,
+ VC4_PACKET_CLEAR_COLORS = 114,
+ VC4_PACKET_TILE_COORDINATES = 115,
+
+ /* Not an actual hardware packet -- this is what we use to put
+ * references to GEM bos in the command stream, since we need the u32
+ * int the actual address packet in order to store the offset from the
+ * start of the BO.
+ */
+ VC4_PACKET_GEM_HANDLES = 254,
+} __attribute__ ((__packed__));
+
+#define VC4_PACKET_HALT_SIZE 1
+#define VC4_PACKET_NOP_SIZE 1
+#define VC4_PACKET_FLUSH_SIZE 1
+#define VC4_PACKET_FLUSH_ALL_SIZE 1
+#define VC4_PACKET_START_TILE_BINNING_SIZE 1
+#define VC4_PACKET_INCREMENT_SEMAPHORE_SIZE 1
+#define VC4_PACKET_WAIT_ON_SEMAPHORE_SIZE 1
+#define VC4_PACKET_BRANCH_TO_SUB_LIST_SIZE 5
+#define VC4_PACKET_STORE_MS_TILE_BUFFER_SIZE 1
+#define VC4_PACKET_STORE_MS_TILE_BUFFER_AND_EOF_SIZE 1
+#define VC4_PACKET_STORE_TILE_BUFFER_GENERAL_SIZE 7
+#define VC4_PACKET_LOAD_TILE_BUFFER_GENERAL_SIZE 7
+#define VC4_PACKET_GL_INDEXED_PRIMITIVE_SIZE 14
+#define VC4_PACKET_GL_ARRAY_PRIMITIVE_SIZE 10
+#define VC4_PACKET_PRIMITIVE_LIST_FORMAT_SIZE 2
+#define VC4_PACKET_GL_SHADER_STATE_SIZE 5
+#define VC4_PACKET_NV_SHADER_STATE_SIZE 5
+#define VC4_PACKET_CONFIGURATION_BITS_SIZE 4
+#define VC4_PACKET_FLAT_SHADE_FLAGS_SIZE 5
+#define VC4_PACKET_POINT_SIZE_SIZE 5
+#define VC4_PACKET_LINE_WIDTH_SIZE 5
+#define VC4_PACKET_RHT_X_BOUNDARY_SIZE 3
+#define VC4_PACKET_DEPTH_OFFSET_SIZE 5
+#define VC4_PACKET_CLIP_WINDOW_SIZE 9
+#define VC4_PACKET_VIEWPORT_OFFSET_SIZE 5
+#define VC4_PACKET_CLIPPER_XY_SCALING_SIZE 9
+#define VC4_PACKET_CLIPPER_Z_SCALING_SIZE 9
+#define VC4_PACKET_TILE_BINNING_MODE_CONFIG_SIZE 16
+#define VC4_PACKET_TILE_RENDERING_MODE_CONFIG_SIZE 11
+#define VC4_PACKET_CLEAR_COLORS_SIZE 14
+#define VC4_PACKET_TILE_COORDINATES_SIZE 3
+#define VC4_PACKET_GEM_HANDLES_SIZE 9
+
+/** @{
+ * Bits used by packets like VC4_PACKET_STORE_TILE_BUFFER_GENERAL and
+ * VC4_PACKET_TILE_RENDERING_MODE_CONFIG.
+*/
+#define VC4_TILING_FORMAT_LINEAR 0
+#define VC4_TILING_FORMAT_T 1
+#define VC4_TILING_FORMAT_LT 2
+/** @} */
+
+/** @{
+ *
+ * byte 2 of VC4_PACKET_STORE_TILE_BUFFER_GENERAL and
+ * VC4_PACKET_LOAD_TILE_BUFFER_GENERAL (low bits of the address)
+ */
+
+#define VC4_LOADSTORE_TILE_BUFFER_EOF (1 << 3)
+#define VC4_LOADSTORE_TILE_BUFFER_DISABLE_FULL_VG_MASK (1 << 2)
+#define VC4_LOADSTORE_TILE_BUFFER_DISABLE_FULL_ZS (1 << 1)
+#define VC4_LOADSTORE_TILE_BUFFER_DISABLE_FULL_COLOR (1 << 0)
+
+/** @} */
+
+/** @{
+ *
+ * byte 0-1 of VC4_PACKET_STORE_TILE_BUFFER_GENERAL and
+ * VC4_PACKET_LOAD_TILE_BUFFER_GENERAL
+ */
+#define VC4_STORE_TILE_BUFFER_DISABLE_VG_MASK_CLEAR (1 << 15)
+#define VC4_STORE_TILE_BUFFER_DISABLE_ZS_CLEAR (1 << 14)
+#define VC4_STORE_TILE_BUFFER_DISABLE_COLOR_CLEAR (1 << 13)
+#define VC4_STORE_TILE_BUFFER_DISABLE_SWAP (1 << 12)
+
+#define VC4_LOADSTORE_TILE_BUFFER_FORMAT_MASK VC4_MASK(9, 8)
+#define VC4_LOADSTORE_TILE_BUFFER_FORMAT_SHIFT 8
+#define VC4_LOADSTORE_TILE_BUFFER_RGBA8888 0
+#define VC4_LOADSTORE_TILE_BUFFER_BGR565_DITHER 1
+#define VC4_LOADSTORE_TILE_BUFFER_BGR565 2
+/** @} */
+
+/** @{
+ *
+ * byte 0 of VC4_PACKET_STORE_TILE_BUFFER_GENERAL and
+ * VC4_PACKET_LOAD_TILE_BUFFER_GENERAL
+ */
+#define VC4_STORE_TILE_BUFFER_MODE_MASK VC4_MASK(7, 6)
+#define VC4_STORE_TILE_BUFFER_MODE_SHIFT 6
+#define VC4_STORE_TILE_BUFFER_MODE_SAMPLE0 (0 << 6)
+#define VC4_STORE_TILE_BUFFER_MODE_DECIMATE_X4 (1 << 6)
+#define VC4_STORE_TILE_BUFFER_MODE_DECIMATE_X16 (2 << 6)
+
+/** The values of the field are VC4_TILING_FORMAT_* */
+#define VC4_LOADSTORE_TILE_BUFFER_TILING_MASK VC4_MASK(5, 4)
+#define VC4_LOADSTORE_TILE_BUFFER_TILING_SHIFT 4
+
+#define VC4_LOADSTORE_TILE_BUFFER_BUFFER_MASK VC4_MASK(2, 0)
+#define VC4_LOADSTORE_TILE_BUFFER_BUFFER_SHIFT 0
+#define VC4_LOADSTORE_TILE_BUFFER_NONE 0
+#define VC4_LOADSTORE_TILE_BUFFER_COLOR 1
+#define VC4_LOADSTORE_TILE_BUFFER_ZS 2
+#define VC4_LOADSTORE_TILE_BUFFER_Z 3
+#define VC4_LOADSTORE_TILE_BUFFER_VG_MASK 4
+#define VC4_LOADSTORE_TILE_BUFFER_FULL 5
+/** @} */
+
+#define VC4_INDEX_BUFFER_U8 (0 << 4)
+#define VC4_INDEX_BUFFER_U16 (1 << 4)
+
+/* This flag is only present in NV shader state. */
+#define VC4_SHADER_FLAG_SHADED_CLIP_COORDS (1 << 3)
+#define VC4_SHADER_FLAG_ENABLE_CLIPPING (1 << 2)
+#define VC4_SHADER_FLAG_VS_POINT_SIZE (1 << 1)
+#define VC4_SHADER_FLAG_FS_SINGLE_THREAD (1 << 0)
+
+/** @{ byte 2 of config bits. */
+#define VC4_CONFIG_BITS_EARLY_Z_UPDATE (1 << 1)
+#define VC4_CONFIG_BITS_EARLY_Z (1 << 0)
+/** @} */
+
+/** @{ byte 1 of config bits. */
+#define VC4_CONFIG_BITS_Z_UPDATE (1 << 7)
+/** same values in this 3-bit field as PIPE_FUNC_* */
+#define VC4_CONFIG_BITS_DEPTH_FUNC_SHIFT 4
+#define VC4_CONFIG_BITS_COVERAGE_READ_LEAVE (1 << 3)
+
+#define VC4_CONFIG_BITS_COVERAGE_UPDATE_NONZERO (0 << 1)
+#define VC4_CONFIG_BITS_COVERAGE_UPDATE_ODD (1 << 1)
+#define VC4_CONFIG_BITS_COVERAGE_UPDATE_OR (2 << 1)
+#define VC4_CONFIG_BITS_COVERAGE_UPDATE_ZERO (3 << 1)
+
+#define VC4_CONFIG_BITS_COVERAGE_PIPE_SELECT (1 << 0)
+/** @} */
+
+/** @{ byte 0 of config bits. */
+#define VC4_CONFIG_BITS_RASTERIZER_OVERSAMPLE_NONE (0 << 6)
+#define VC4_CONFIG_BITS_RASTERIZER_OVERSAMPLE_4X (1 << 6)
+#define VC4_CONFIG_BITS_RASTERIZER_OVERSAMPLE_16X (2 << 6)
+
+#define VC4_CONFIG_BITS_AA_POINTS_AND_LINES (1 << 4)
+#define VC4_CONFIG_BITS_ENABLE_DEPTH_OFFSET (1 << 3)
+#define VC4_CONFIG_BITS_CW_PRIMITIVES (1 << 2)
+#define VC4_CONFIG_BITS_ENABLE_PRIM_BACK (1 << 1)
+#define VC4_CONFIG_BITS_ENABLE_PRIM_FRONT (1 << 0)
+/** @} */
+
+/** @{ bits in the last u8 of VC4_PACKET_TILE_BINNING_MODE_CONFIG */
+#define VC4_BIN_CONFIG_DB_NON_MS (1 << 7)
+
+#define VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_MASK VC4_MASK(6, 5)
+#define VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_SHIFT 5
+#define VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_32 0
+#define VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_64 1
+#define VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_128 2
+#define VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_256 3
+
+#define VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_MASK VC4_MASK(4, 3)
+#define VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_SHIFT 3
+#define VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_32 0
+#define VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_64 1
+#define VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_128 2
+#define VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_256 3
+
+#define VC4_BIN_CONFIG_AUTO_INIT_TSDA (1 << 2)
+#define VC4_BIN_CONFIG_TILE_BUFFER_64BIT (1 << 1)
+#define VC4_BIN_CONFIG_MS_MODE_4X (1 << 0)
+/** @} */
+
+/** @{ bits in the last u16 of VC4_PACKET_TILE_RENDERING_MODE_CONFIG */
+#define VC4_RENDER_CONFIG_DB_NON_MS (1 << 12)
+#define VC4_RENDER_CONFIG_EARLY_Z_COVERAGE_DISABLE (1 << 11)
+#define VC4_RENDER_CONFIG_EARLY_Z_DIRECTION_G (1 << 10)
+#define VC4_RENDER_CONFIG_COVERAGE_MODE (1 << 9)
+#define VC4_RENDER_CONFIG_ENABLE_VG_MASK (1 << 8)
+
+/** The values of the field are VC4_TILING_FORMAT_* */
+#define VC4_RENDER_CONFIG_MEMORY_FORMAT_MASK VC4_MASK(7, 6)
+#define VC4_RENDER_CONFIG_MEMORY_FORMAT_SHIFT 6
+
+#define VC4_RENDER_CONFIG_DECIMATE_MODE_1X (0 << 4)
+#define VC4_RENDER_CONFIG_DECIMATE_MODE_4X (1 << 4)
+#define VC4_RENDER_CONFIG_DECIMATE_MODE_16X (2 << 4)
+
+#define VC4_RENDER_CONFIG_FORMAT_MASK VC4_MASK(3, 2)
+#define VC4_RENDER_CONFIG_FORMAT_SHIFT 2
+#define VC4_RENDER_CONFIG_FORMAT_BGR565_DITHERED 0
+#define VC4_RENDER_CONFIG_FORMAT_RGBA8888 1
+#define VC4_RENDER_CONFIG_FORMAT_BGR565 2
+
+#define VC4_RENDER_CONFIG_TILE_BUFFER_64BIT (1 << 1)
+#define VC4_RENDER_CONFIG_MS_MODE_4X (1 << 0)
+
+#define VC4_PRIMITIVE_LIST_FORMAT_16_INDEX (1 << 4)
+#define VC4_PRIMITIVE_LIST_FORMAT_32_XY (3 << 4)
+#define VC4_PRIMITIVE_LIST_FORMAT_TYPE_POINTS (0 << 0)
+#define VC4_PRIMITIVE_LIST_FORMAT_TYPE_LINES (1 << 0)
+#define VC4_PRIMITIVE_LIST_FORMAT_TYPE_TRIANGLES (2 << 0)
+#define VC4_PRIMITIVE_LIST_FORMAT_TYPE_RHT (3 << 0)
+
+enum vc4_texture_data_type {
+ VC4_TEXTURE_TYPE_RGBA8888 = 0,
+ VC4_TEXTURE_TYPE_RGBX8888 = 1,
+ VC4_TEXTURE_TYPE_RGBA4444 = 2,
+ VC4_TEXTURE_TYPE_RGBA5551 = 3,
+ VC4_TEXTURE_TYPE_RGB565 = 4,
+ VC4_TEXTURE_TYPE_LUMINANCE = 5,
+ VC4_TEXTURE_TYPE_ALPHA = 6,
+ VC4_TEXTURE_TYPE_LUMALPHA = 7,
+ VC4_TEXTURE_TYPE_ETC1 = 8,
+ VC4_TEXTURE_TYPE_S16F = 9,
+ VC4_TEXTURE_TYPE_S8 = 10,
+ VC4_TEXTURE_TYPE_S16 = 11,
+ VC4_TEXTURE_TYPE_BW1 = 12,
+ VC4_TEXTURE_TYPE_A4 = 13,
+ VC4_TEXTURE_TYPE_A1 = 14,
+ VC4_TEXTURE_TYPE_RGBA64 = 15,
+ VC4_TEXTURE_TYPE_RGBA32R = 16,
+ VC4_TEXTURE_TYPE_YUV422R = 17,
+};
+
+#define VC4_TEX_P0_OFFSET_MASK VC4_MASK(31, 12)
+#define VC4_TEX_P0_OFFSET_SHIFT 12
+#define VC4_TEX_P0_CSWIZ_MASK VC4_MASK(11, 10)
+#define VC4_TEX_P0_CSWIZ_SHIFT 10
+#define VC4_TEX_P0_CMMODE_MASK VC4_MASK(9, 9)
+#define VC4_TEX_P0_CMMODE_SHIFT 9
+#define VC4_TEX_P0_FLIPY_MASK VC4_MASK(8, 8)
+#define VC4_TEX_P0_FLIPY_SHIFT 8
+#define VC4_TEX_P0_TYPE_MASK VC4_MASK(7, 4)
+#define VC4_TEX_P0_TYPE_SHIFT 4
+#define VC4_TEX_P0_MIPLVLS_MASK VC4_MASK(3, 0)
+#define VC4_TEX_P0_MIPLVLS_SHIFT 0
+
+#define VC4_TEX_P1_TYPE4_MASK VC4_MASK(31, 31)
+#define VC4_TEX_P1_TYPE4_SHIFT 31
+#define VC4_TEX_P1_HEIGHT_MASK VC4_MASK(30, 20)
+#define VC4_TEX_P1_HEIGHT_SHIFT 20
+#define VC4_TEX_P1_ETCFLIP_MASK VC4_MASK(19, 19)
+#define VC4_TEX_P1_ETCFLIP_SHIFT 19
+#define VC4_TEX_P1_WIDTH_MASK VC4_MASK(18, 8)
+#define VC4_TEX_P1_WIDTH_SHIFT 8
+
+#define VC4_TEX_P1_MAGFILT_MASK VC4_MASK(7, 7)
+#define VC4_TEX_P1_MAGFILT_SHIFT 7
+# define VC4_TEX_P1_MAGFILT_LINEAR 0
+# define VC4_TEX_P1_MAGFILT_NEAREST 1
+
+#define VC4_TEX_P1_MINFILT_MASK VC4_MASK(6, 4)
+#define VC4_TEX_P1_MINFILT_SHIFT 4
+# define VC4_TEX_P1_MINFILT_LINEAR 0
+# define VC4_TEX_P1_MINFILT_NEAREST 1
+# define VC4_TEX_P1_MINFILT_NEAR_MIP_NEAR 2
+# define VC4_TEX_P1_MINFILT_NEAR_MIP_LIN 3
+# define VC4_TEX_P1_MINFILT_LIN_MIP_NEAR 4
+# define VC4_TEX_P1_MINFILT_LIN_MIP_LIN 5
+
+#define VC4_TEX_P1_WRAP_T_MASK VC4_MASK(3, 2)
+#define VC4_TEX_P1_WRAP_T_SHIFT 2
+#define VC4_TEX_P1_WRAP_S_MASK VC4_MASK(1, 0)
+#define VC4_TEX_P1_WRAP_S_SHIFT 0
+# define VC4_TEX_P1_WRAP_REPEAT 0
+# define VC4_TEX_P1_WRAP_CLAMP 1
+# define VC4_TEX_P1_WRAP_MIRROR 2
+# define VC4_TEX_P1_WRAP_BORDER 3
+
+#define VC4_TEX_P2_PTYPE_MASK VC4_MASK(31, 30)
+#define VC4_TEX_P2_PTYPE_SHIFT 30
+# define VC4_TEX_P2_PTYPE_IGNORED 0
+# define VC4_TEX_P2_PTYPE_CUBE_MAP_STRIDE 1
+# define VC4_TEX_P2_PTYPE_CHILD_IMAGE_DIMENSIONS 2
+# define VC4_TEX_P2_PTYPE_CHILD_IMAGE_OFFSETS 3
+
+/* VC4_TEX_P2_PTYPE_CUBE_MAP_STRIDE bits */
+#define VC4_TEX_P2_CMST_MASK VC4_MASK(29, 12)
+#define VC4_TEX_P2_CMST_SHIFT 12
+#define VC4_TEX_P2_BSLOD_MASK VC4_MASK(0, 0)
+#define VC4_TEX_P2_BSLOD_SHIFT 0
+
+/* VC4_TEX_P2_PTYPE_CHILD_IMAGE_DIMENSIONS */
+#define VC4_TEX_P2_CHEIGHT_MASK VC4_MASK(22, 12)
+#define VC4_TEX_P2_CHEIGHT_SHIFT 12
+#define VC4_TEX_P2_CWIDTH_MASK VC4_MASK(10, 0)
+#define VC4_TEX_P2_CWIDTH_SHIFT 0
+
+/* VC4_TEX_P2_PTYPE_CHILD_IMAGE_OFFSETS */
+#define VC4_TEX_P2_CYOFF_MASK VC4_MASK(22, 12)
+#define VC4_TEX_P2_CYOFF_SHIFT 12
+#define VC4_TEX_P2_CXOFF_MASK VC4_MASK(10, 0)
+#define VC4_TEX_P2_CXOFF_SHIFT 0
+
+#endif /* VC4_PACKET_H */
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
new file mode 100644
index 000000000000..cfae803389d9
--- /dev/null
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -0,0 +1,299 @@
+/*
+ * Copyright (C) 2015 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * Controls an individual layer of pixels being scanned out by the HVS.
+ */
+
+#include "vc4_drv.h"
+#include "vc4_regs.h"
+#include "drm_atomic_helper.h"
+#include "drm_fb_cma_helper.h"
+#include "drm_plane_helper.h"
+
+struct vc4_plane_state {
+ struct drm_plane_state base;
+ u32 *dlist;
+ u32 dlist_size; /* Number of dwords in allocated for the display list */
+ u32 dlist_count; /* Number of used dwords in the display list. */
+};
+
+static inline struct vc4_plane_state *
+to_vc4_plane_state(struct drm_plane_state *state)
+{
+ return (struct vc4_plane_state *)state;
+}
+
+static const struct hvs_format {
+ u32 drm; /* DRM_FORMAT_* */
+ u32 hvs; /* HVS_FORMAT_* */
+ u32 pixel_order;
+ bool has_alpha;
+} hvs_formats[] = {
+ {
+ .drm = DRM_FORMAT_XRGB8888, .hvs = HVS_PIXEL_FORMAT_RGBA8888,
+ .pixel_order = HVS_PIXEL_ORDER_ABGR, .has_alpha = false,
+ },
+ {
+ .drm = DRM_FORMAT_ARGB8888, .hvs = HVS_PIXEL_FORMAT_RGBA8888,
+ .pixel_order = HVS_PIXEL_ORDER_ABGR, .has_alpha = true,
+ },
+};
+
+static const struct hvs_format *
+vc4_get_hvs_format(u32 drm_format)
+{
+ unsigned i;
+
+ for (i = 0; i < ARRAY_SIZE(hvs_formats); i++) {
+ if (hvs_formats[i].drm == drm_format)
+ return &hvs_formats[i];
+ }
+
+ return NULL;
+}
+
+static bool plane_enabled(struct drm_plane_state *state)
+{
+ return state->fb && state->crtc;
+}
+
+struct drm_plane_state *vc4_plane_duplicate_state(struct drm_plane *plane)
+{
+ struct vc4_plane_state *vc4_state;
+
+ if (WARN_ON(!plane->state))
+ return NULL;
+
+ vc4_state = kmemdup(plane->state, sizeof(*vc4_state), GFP_KERNEL);
+ if (!vc4_state)
+ return NULL;
+
+ __drm_atomic_helper_plane_duplicate_state(plane, &vc4_state->base);
+
+ if (vc4_state->dlist) {
+ vc4_state->dlist = kmemdup(vc4_state->dlist,
+ vc4_state->dlist_count * 4,
+ GFP_KERNEL);
+ if (!vc4_state->dlist) {
+ kfree(vc4_state);
+ return NULL;
+ }
+ vc4_state->dlist_size = vc4_state->dlist_count;
+ }
+
+ return &vc4_state->base;
+}
+
+void vc4_plane_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
+
+ kfree(vc4_state->dlist);
+ __drm_atomic_helper_plane_destroy_state(plane, &vc4_state->base);
+ kfree(state);
+}
+
+/* Called during init to allocate the plane's atomic state. */
+void vc4_plane_reset(struct drm_plane *plane)
+{
+ struct vc4_plane_state *vc4_state;
+
+ WARN_ON(plane->state);
+
+ vc4_state = kzalloc(sizeof(*vc4_state), GFP_KERNEL);
+ if (!vc4_state)
+ return;
+
+ plane->state = &vc4_state->base;
+ vc4_state->base.plane = plane;
+}
+
+static void vc4_dlist_write(struct vc4_plane_state *vc4_state, u32 val)
+{
+ if (vc4_state->dlist_count == vc4_state->dlist_size) {
+ u32 new_size = max(4u, vc4_state->dlist_count * 2);
+ u32 *new_dlist = kmalloc(new_size * 4, GFP_KERNEL);
+ if (!new_dlist)
+ return;
+ memcpy(new_dlist, vc4_state->dlist, vc4_state->dlist_count * 4);
+
+ kfree(vc4_state->dlist);
+ vc4_state->dlist = new_dlist;
+ vc4_state->dlist_size = new_size;
+ }
+
+ vc4_state->dlist[vc4_state->dlist_count++] = val;
+}
+
+/*
+ * Writes out a full display list for an active plane to the plane's
+ * private dlist state.
+ */
+static int
+vc4_plane_mode_set(struct drm_plane *plane, struct drm_plane_state *state)
+{
+ struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
+ struct drm_framebuffer *fb = state->fb;
+ struct drm_gem_cma_object *bo = drm_fb_cma_get_gem_obj(fb, 0);
+ u32 ctl0_offset = vc4_state->dlist_count;
+ const struct hvs_format *format = vc4_get_hvs_format(fb->pixel_format);
+
+ vc4_dlist_write(vc4_state,
+ SCALER_CTL0_VALID |
+ (format->pixel_order << SCALER_CTL0_ORDER_SHIFT) |
+ (format->hvs << SCALER_CTL0_PIXEL_FORMAT_SHIFT) |
+ SCALER_CTL0_UNITY);
+
+ /* Position Word 0: Image Positions and Alpha Value */
+ vc4_dlist_write(vc4_state,
+ VC4_SET_FIELD(0xff, SCALER_POS0_FIXED_ALPHA) |
+ VC4_SET_FIELD(state->crtc_x, SCALER_POS0_START_X) |
+ VC4_SET_FIELD(state->crtc_y, SCALER_POS0_START_Y));
+
+ /*
+ * Position Word 1: Scaled Image Dimensions.
+ * Skipped due to SCALER_CTL0_UNITY scaling.
+ */
+
+ /* Position Word 2: Source Image Size, Alpha Mode */
+ vc4_dlist_write(vc4_state,
+ VC4_SET_FIELD(format->has_alpha ?
+ SCALER_POS2_ALPHA_MODE_PIPELINE :
+ SCALER_POS2_ALPHA_MODE_FIXED,
+ SCALER_POS2_ALPHA_MODE) |
+ VC4_SET_FIELD(state->crtc_w, SCALER_POS2_WIDTH) |
+ VC4_SET_FIELD(state->crtc_h, SCALER_POS2_HEIGHT));
+
+ /* Position Word 3: Context. Written by the HVS. */
+ vc4_dlist_write(vc4_state, 0xc0c0c0c0);
+
+ /* Pointer Word 0: RGB / Y Pointer */
+ vc4_dlist_write(vc4_state, bo->paddr + fb->offsets[0]);
+
+ /* Pointer Context Word 0: Written by the HVS */
+ vc4_dlist_write(vc4_state, 0xc0c0c0c0);
+
+ /* Pitch word 0: Pointer 0 Pitch */
+ vc4_dlist_write(vc4_state,
+ VC4_SET_FIELD(fb->pitches[0], SCALER_SRC_PITCH));
+
+ vc4_state->dlist[ctl0_offset] |=
+ VC4_SET_FIELD(vc4_state->dlist_count, SCALER_CTL0_SIZE);
+
+ return 0;
+}
+
+/*
+ * If a modeset involves changing the setup of a plane, the atomic
+ * infrastructure will call this to validate a proposed plane setup.
+ * However, if a plane isn't getting updated, this (and the
+ * corresponding vc4_plane_atomic_update) won't get called. Thus, we
+ * compute the dlist here and have all active plane dlists get updated
+ * in the CRTC's flush.
+ */
+static int vc4_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
+
+ vc4_state->dlist_count = 0;
+
+ if (plane_enabled(state)) {
+ return vc4_plane_mode_set(plane, state);
+ } else {
+ return 0;
+ }
+}
+
+static void vc4_plane_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ /* No contents here. Since we don't know where in the CRTC's
+ * dlist we should be stored, our dlist is uploaded to the
+ * hardware with vc4_plane_write_dlist() at CRTC atomic_flush
+ * time.
+ */
+}
+
+u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist)
+{
+ struct vc4_plane_state *vc4_state = to_vc4_plane_state(plane->state);
+ int i;
+
+ /* Can't memcpy_toio() because it needs to be 32-bit writes. */
+ for (i = 0; i < vc4_state->dlist_count; i++)
+ writel(vc4_state->dlist[i], &dlist[i]);
+
+ return vc4_state->dlist_count;
+}
+
+u32 vc4_plane_dlist_size(struct drm_plane_state *state)
+{
+ struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
+
+ return vc4_state->dlist_count;
+}
+
+static const struct drm_plane_helper_funcs vc4_plane_helper_funcs = {
+ .prepare_fb = NULL,
+ .cleanup_fb = NULL,
+ .atomic_check = vc4_plane_atomic_check,
+ .atomic_update = vc4_plane_atomic_update,
+};
+
+static void vc4_plane_destroy(struct drm_plane *plane)
+{
+ drm_plane_helper_disable(plane);
+ drm_plane_cleanup(plane);
+}
+
+static const struct drm_plane_funcs vc4_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = vc4_plane_destroy,
+ .set_property = NULL,
+ .reset = vc4_plane_reset,
+ .atomic_duplicate_state = vc4_plane_duplicate_state,
+ .atomic_destroy_state = vc4_plane_destroy_state,
+};
+
+struct drm_plane *vc4_plane_init(struct drm_device *dev,
+ enum drm_plane_type type)
+{
+ struct drm_plane *plane = NULL;
+ struct vc4_plane *vc4_plane;
+ u32 formats[ARRAY_SIZE(hvs_formats)];
+ int ret = 0;
+ unsigned i;
+
+ vc4_plane = devm_kzalloc(dev->dev, sizeof(*vc4_plane),
+ GFP_KERNEL);
+ if (!vc4_plane) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(hvs_formats); i++)
+ formats[i] = hvs_formats[i].drm;
+ plane = &vc4_plane->base;
+ ret = drm_universal_plane_init(dev, plane, 0xff,
+ &vc4_plane_funcs,
+ formats, ARRAY_SIZE(formats),
+ type);
+
+ drm_plane_helper_add(plane, &vc4_plane_helper_funcs);
+
+ return plane;
+fail:
+ if (plane)
+ vc4_plane_destroy(plane);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/vc4/vc4_qpu_defines.h b/drivers/gpu/drm/vc4/vc4_qpu_defines.h
new file mode 100644
index 000000000000..e47c994d36bf
--- /dev/null
+++ b/drivers/gpu/drm/vc4/vc4_qpu_defines.h
@@ -0,0 +1,268 @@
+/*
+ * Copyright © 2014 Broadcom
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef VC4_QPU_DEFINES_H
+#define VC4_QPU_DEFINES_H
+
+enum qpu_op_add {
+ QPU_A_NOP,
+ QPU_A_FADD,
+ QPU_A_FSUB,
+ QPU_A_FMIN,
+ QPU_A_FMAX,
+ QPU_A_FMINABS,
+ QPU_A_FMAXABS,
+ QPU_A_FTOI,
+ QPU_A_ITOF,
+ QPU_A_ADD = 12,
+ QPU_A_SUB,
+ QPU_A_SHR,
+ QPU_A_ASR,
+ QPU_A_ROR,
+ QPU_A_SHL,
+ QPU_A_MIN,
+ QPU_A_MAX,
+ QPU_A_AND,
+ QPU_A_OR,
+ QPU_A_XOR,
+ QPU_A_NOT,
+ QPU_A_CLZ,
+ QPU_A_V8ADDS = 30,
+ QPU_A_V8SUBS = 31,
+};
+
+enum qpu_op_mul {
+ QPU_M_NOP,
+ QPU_M_FMUL,
+ QPU_M_MUL24,
+ QPU_M_V8MULD,
+ QPU_M_V8MIN,
+ QPU_M_V8MAX,
+ QPU_M_V8ADDS,
+ QPU_M_V8SUBS,
+};
+
+enum qpu_raddr {
+ QPU_R_FRAG_PAYLOAD_ZW = 15, /* W for A file, Z for B file */
+ /* 0-31 are the plain regfile a or b fields */
+ QPU_R_UNIF = 32,
+ QPU_R_VARY = 35,
+ QPU_R_ELEM_QPU = 38,
+ QPU_R_NOP,
+ QPU_R_XY_PIXEL_COORD = 41,
+ QPU_R_MS_REV_FLAGS = 41,
+ QPU_R_VPM = 48,
+ QPU_R_VPM_LD_BUSY,
+ QPU_R_VPM_LD_WAIT,
+ QPU_R_MUTEX_ACQUIRE,
+};
+
+enum qpu_waddr {
+ /* 0-31 are the plain regfile a or b fields */
+ QPU_W_ACC0 = 32, /* aka r0 */
+ QPU_W_ACC1,
+ QPU_W_ACC2,
+ QPU_W_ACC3,
+ QPU_W_TMU_NOSWAP,
+ QPU_W_ACC5,
+ QPU_W_HOST_INT,
+ QPU_W_NOP,
+ QPU_W_UNIFORMS_ADDRESS,
+ QPU_W_QUAD_XY, /* X for regfile a, Y for regfile b */
+ QPU_W_MS_FLAGS = 42,
+ QPU_W_REV_FLAG = 42,
+ QPU_W_TLB_STENCIL_SETUP = 43,
+ QPU_W_TLB_Z,
+ QPU_W_TLB_COLOR_MS,
+ QPU_W_TLB_COLOR_ALL,
+ QPU_W_TLB_ALPHA_MASK,
+ QPU_W_VPM,
+ QPU_W_VPMVCD_SETUP, /* LD for regfile a, ST for regfile b */
+ QPU_W_VPM_ADDR, /* LD for regfile a, ST for regfile b */
+ QPU_W_MUTEX_RELEASE,
+ QPU_W_SFU_RECIP,
+ QPU_W_SFU_RECIPSQRT,
+ QPU_W_SFU_EXP,
+ QPU_W_SFU_LOG,
+ QPU_W_TMU0_S,
+ QPU_W_TMU0_T,
+ QPU_W_TMU0_R,
+ QPU_W_TMU0_B,
+ QPU_W_TMU1_S,
+ QPU_W_TMU1_T,
+ QPU_W_TMU1_R,
+ QPU_W_TMU1_B,
+};
+
+enum qpu_sig_bits {
+ QPU_SIG_SW_BREAKPOINT,
+ QPU_SIG_NONE,
+ QPU_SIG_THREAD_SWITCH,
+ QPU_SIG_PROG_END,
+ QPU_SIG_WAIT_FOR_SCOREBOARD,
+ QPU_SIG_SCOREBOARD_UNLOCK,
+ QPU_SIG_LAST_THREAD_SWITCH,
+ QPU_SIG_COVERAGE_LOAD,
+ QPU_SIG_COLOR_LOAD,
+ QPU_SIG_COLOR_LOAD_END,
+ QPU_SIG_LOAD_TMU0,
+ QPU_SIG_LOAD_TMU1,
+ QPU_SIG_ALPHA_MASK_LOAD,
+ QPU_SIG_SMALL_IMM,
+ QPU_SIG_LOAD_IMM,
+ QPU_SIG_BRANCH
+};
+
+enum qpu_mux {
+ /* hardware mux values */
+ QPU_MUX_R0,
+ QPU_MUX_R1,
+ QPU_MUX_R2,
+ QPU_MUX_R3,
+ QPU_MUX_R4,
+ QPU_MUX_R5,
+ QPU_MUX_A,
+ QPU_MUX_B,
+
+ /* non-hardware mux values */
+ QPU_MUX_IMM,
+};
+
+enum qpu_cond {
+ QPU_COND_NEVER,
+ QPU_COND_ALWAYS,
+ QPU_COND_ZS,
+ QPU_COND_ZC,
+ QPU_COND_NS,
+ QPU_COND_NC,
+ QPU_COND_CS,
+ QPU_COND_CC,
+};
+
+enum qpu_pack_mul {
+ QPU_PACK_MUL_NOP,
+ QPU_PACK_MUL_8888 = 3, /* replicated to each 8 bits of the 32-bit dst. */
+ QPU_PACK_MUL_8A,
+ QPU_PACK_MUL_8B,
+ QPU_PACK_MUL_8C,
+ QPU_PACK_MUL_8D,
+};
+
+enum qpu_pack_a {
+ QPU_PACK_A_NOP,
+ /* convert to 16 bit float if float input, or to int16. */
+ QPU_PACK_A_16A,
+ QPU_PACK_A_16B,
+ /* replicated to each 8 bits of the 32-bit dst. */
+ QPU_PACK_A_8888,
+ /* Convert to 8-bit unsigned int. */
+ QPU_PACK_A_8A,
+ QPU_PACK_A_8B,
+ QPU_PACK_A_8C,
+ QPU_PACK_A_8D,
+
+ /* Saturating variants of the previous instructions. */
+ QPU_PACK_A_32_SAT, /* int-only */
+ QPU_PACK_A_16A_SAT, /* int or float */
+ QPU_PACK_A_16B_SAT,
+ QPU_PACK_A_8888_SAT,
+ QPU_PACK_A_8A_SAT,
+ QPU_PACK_A_8B_SAT,
+ QPU_PACK_A_8C_SAT,
+ QPU_PACK_A_8D_SAT,
+};
+
+enum qpu_unpack_r4 {
+ QPU_UNPACK_R4_NOP,
+ QPU_UNPACK_R4_F16A_TO_F32,
+ QPU_UNPACK_R4_F16B_TO_F32,
+ QPU_UNPACK_R4_8D_REP,
+ QPU_UNPACK_R4_8A,
+ QPU_UNPACK_R4_8B,
+ QPU_UNPACK_R4_8C,
+ QPU_UNPACK_R4_8D,
+};
+
+#define QPU_MASK(high, low) ((((uint64_t)1<<((high)-(low)+1))-1)<<(low))
+/* Using the GNU statement expression extension */
+#define QPU_SET_FIELD(value, field) \
+ ({ \
+ uint64_t fieldval = (uint64_t)(value) << field ## _SHIFT; \
+ assert((fieldval & ~ field ## _MASK) == 0); \
+ fieldval & field ## _MASK; \
+ })
+
+#define QPU_GET_FIELD(word, field) ((uint32_t)(((word) & field ## _MASK) >> field ## _SHIFT))
+
+#define QPU_SIG_SHIFT 60
+#define QPU_SIG_MASK QPU_MASK(63, 60)
+
+#define QPU_UNPACK_SHIFT 57
+#define QPU_UNPACK_MASK QPU_MASK(59, 57)
+
+/**
+ * If set, the pack field means PACK_MUL or R4 packing, instead of normal
+ * regfile a packing.
+ */
+#define QPU_PM ((uint64_t)1 << 56)
+
+#define QPU_PACK_SHIFT 52
+#define QPU_PACK_MASK QPU_MASK(55, 52)
+
+#define QPU_COND_ADD_SHIFT 49
+#define QPU_COND_ADD_MASK QPU_MASK(51, 49)
+#define QPU_COND_MUL_SHIFT 46
+#define QPU_COND_MUL_MASK QPU_MASK(48, 46)
+
+#define QPU_SF ((uint64_t)1 << 45)
+
+#define QPU_WADDR_ADD_SHIFT 38
+#define QPU_WADDR_ADD_MASK QPU_MASK(43, 38)
+#define QPU_WADDR_MUL_SHIFT 32
+#define QPU_WADDR_MUL_MASK QPU_MASK(37, 32)
+
+#define QPU_OP_MUL_SHIFT 29
+#define QPU_OP_MUL_MASK QPU_MASK(31, 29)
+
+#define QPU_RADDR_A_SHIFT 18
+#define QPU_RADDR_A_MASK QPU_MASK(23, 18)
+#define QPU_RADDR_B_SHIFT 12
+#define QPU_RADDR_B_MASK QPU_MASK(17, 12)
+#define QPU_SMALL_IMM_SHIFT 12
+#define QPU_SMALL_IMM_MASK QPU_MASK(17, 12)
+
+#define QPU_ADD_A_SHIFT 9
+#define QPU_ADD_A_MASK QPU_MASK(11, 9)
+#define QPU_ADD_B_SHIFT 6
+#define QPU_ADD_B_MASK QPU_MASK(8, 6)
+#define QPU_MUL_A_SHIFT 3
+#define QPU_MUL_A_MASK QPU_MASK(5, 3)
+#define QPU_MUL_B_SHIFT 0
+#define QPU_MUL_B_MASK QPU_MASK(2, 0)
+
+#define QPU_WS ((uint64_t)1 << 44)
+
+#define QPU_OP_ADD_SHIFT 24
+#define QPU_OP_ADD_MASK QPU_MASK(28, 24)
+
+#endif /* VC4_QPU_DEFINES_H */
diff --git a/drivers/gpu/drm/vc4/vc4_regs.h b/drivers/gpu/drm/vc4/vc4_regs.h
new file mode 100644
index 000000000000..6efd872a32dd
--- /dev/null
+++ b/drivers/gpu/drm/vc4/vc4_regs.h
@@ -0,0 +1,437 @@
+/*
+ * Copyright © 2014-2015 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define VC4_MASK(high, low) (((1 << ((high) - (low) + 1)) - 1) << (low))
+/* Using the GNU statement expression extension */
+#define VC4_SET_FIELD(value, field) \
+ ({ \
+ uint32_t fieldval = (value) << field ## _SHIFT; \
+ WARN_ON((fieldval & ~ field ## _MASK) != 0); \
+ fieldval & field ## _MASK; \
+ })
+
+#define VC4_GET_FIELD(word, field) (((word) & field ## _MASK) >> field ## _SHIFT)
+
+#define V3D_IDENT0 0x00000
+# define V3D_EXPECTED_IDENT0 \
+ ((2 << 24) | \
+ ('V' << 0) | \
+ ('3' << 8) | \
+ ('D' << 16))
+
+#define V3D_IDENT1 0x00004
+/* Multiples of 1kb */
+# define V3D_IDENT1_VPM_SIZE_MASK VC4_MASK(31, 28)
+# define V3D_IDENT1_VPM_SIZE_SHIFT 28
+# define V3D_IDENT1_NSEM_MASK VC4_MASK(23, 16)
+# define V3D_IDENT1_NSEM_SHIFT 16
+# define V3D_IDENT1_TUPS_MASK VC4_MASK(15, 12)
+# define V3D_IDENT1_TUPS_SHIFT 12
+# define V3D_IDENT1_QUPS_MASK VC4_MASK(11, 8)
+# define V3D_IDENT1_QUPS_SHIFT 8
+# define V3D_IDENT1_NSLC_MASK VC4_MASK(7, 4)
+# define V3D_IDENT1_NSLC_SHIFT 4
+# define V3D_IDENT1_REV_MASK VC4_MASK(3, 0)
+# define V3D_IDENT1_REV_SHIFT 0
+
+#define V3D_IDENT2 0x00008
+#define V3D_SCRATCH 0x00010
+#define V3D_L2CACTL 0x00020
+# define V3D_L2CACTL_L2CCLR (1 << 2)
+# define V3D_L2CACTL_L2CDIS (1 << 1)
+# define V3D_L2CACTL_L2CENA (1 << 0)
+
+#define V3D_SLCACTL 0x00024
+# define V3D_SLCACTL_T1CC_MASK VC4_MASK(27, 24)
+# define V3D_SLCACTL_T1CC_SHIFT 24
+# define V3D_SLCACTL_T0CC_MASK VC4_MASK(19, 16)
+# define V3D_SLCACTL_T0CC_SHIFT 16
+# define V3D_SLCACTL_UCC_MASK VC4_MASK(11, 8)
+# define V3D_SLCACTL_UCC_SHIFT 8
+# define V3D_SLCACTL_ICC_MASK VC4_MASK(3, 0)
+# define V3D_SLCACTL_ICC_SHIFT 0
+
+#define V3D_INTCTL 0x00030
+#define V3D_INTENA 0x00034
+#define V3D_INTDIS 0x00038
+# define V3D_INT_SPILLUSE (1 << 3)
+# define V3D_INT_OUTOMEM (1 << 2)
+# define V3D_INT_FLDONE (1 << 1)
+# define V3D_INT_FRDONE (1 << 0)
+
+#define V3D_CT0CS 0x00100
+#define V3D_CT1CS 0x00104
+#define V3D_CTNCS(n) (V3D_CT0CS + 4 * n)
+# define V3D_CTRSTA (1 << 15)
+# define V3D_CTSEMA (1 << 12)
+# define V3D_CTRTSD (1 << 8)
+# define V3D_CTRUN (1 << 5)
+# define V3D_CTSUBS (1 << 4)
+# define V3D_CTERR (1 << 3)
+# define V3D_CTMODE (1 << 0)
+
+#define V3D_CT0EA 0x00108
+#define V3D_CT1EA 0x0010c
+#define V3D_CTNEA(n) (V3D_CT0EA + 4 * (n))
+#define V3D_CT0CA 0x00110
+#define V3D_CT1CA 0x00114
+#define V3D_CTNCA(n) (V3D_CT0CA + 4 * (n))
+#define V3D_CT00RA0 0x00118
+#define V3D_CT01RA0 0x0011c
+#define V3D_CTNRA0(n) (V3D_CT00RA0 + 4 * (n))
+#define V3D_CT0LC 0x00120
+#define V3D_CT1LC 0x00124
+#define V3D_CTNLC(n) (V3D_CT0LC + 4 * (n))
+#define V3D_CT0PC 0x00128
+#define V3D_CT1PC 0x0012c
+#define V3D_CTNPC(n) (V3D_CT0PC + 4 * (n))
+
+#define V3D_PCS 0x00130
+# define V3D_BMOOM (1 << 8)
+# define V3D_RMBUSY (1 << 3)
+# define V3D_RMACTIVE (1 << 2)
+# define V3D_BMBUSY (1 << 1)
+# define V3D_BMACTIVE (1 << 0)
+
+#define V3D_BFC 0x00134
+#define V3D_RFC 0x00138
+#define V3D_BPCA 0x00300
+#define V3D_BPCS 0x00304
+#define V3D_BPOA 0x00308
+#define V3D_BPOS 0x0030c
+#define V3D_BXCF 0x00310
+#define V3D_SQRSV0 0x00410
+#define V3D_SQRSV1 0x00414
+#define V3D_SQCNTL 0x00418
+#define V3D_SRQPC 0x00430
+#define V3D_SRQUA 0x00434
+#define V3D_SRQUL 0x00438
+#define V3D_SRQCS 0x0043c
+#define V3D_VPACNTL 0x00500
+#define V3D_VPMBASE 0x00504
+#define V3D_PCTRC 0x00670
+#define V3D_PCTRE 0x00674
+#define V3D_PCTR0 0x00680
+#define V3D_PCTRS0 0x00684
+#define V3D_PCTR1 0x00688
+#define V3D_PCTRS1 0x0068c
+#define V3D_PCTR2 0x00690
+#define V3D_PCTRS2 0x00694
+#define V3D_PCTR3 0x00698
+#define V3D_PCTRS3 0x0069c
+#define V3D_PCTR4 0x006a0
+#define V3D_PCTRS4 0x006a4
+#define V3D_PCTR5 0x006a8
+#define V3D_PCTRS5 0x006ac
+#define V3D_PCTR6 0x006b0
+#define V3D_PCTRS6 0x006b4
+#define V3D_PCTR7 0x006b8
+#define V3D_PCTRS7 0x006bc
+#define V3D_PCTR8 0x006c0
+#define V3D_PCTRS8 0x006c4
+#define V3D_PCTR9 0x006c8
+#define V3D_PCTRS9 0x006cc
+#define V3D_PCTR10 0x006d0
+#define V3D_PCTRS10 0x006d4
+#define V3D_PCTR11 0x006d8
+#define V3D_PCTRS11 0x006dc
+#define V3D_PCTR12 0x006e0
+#define V3D_PCTRS12 0x006e4
+#define V3D_PCTR13 0x006e8
+#define V3D_PCTRS13 0x006ec
+#define V3D_PCTR14 0x006f0
+#define V3D_PCTRS14 0x006f4
+#define V3D_PCTR15 0x006f8
+#define V3D_PCTRS15 0x006fc
+#define V3D_BGE 0x00f00
+#define V3D_FDBGO 0x00f04
+#define V3D_FDBGB 0x00f08
+#define V3D_FDBGR 0x00f0c
+#define V3D_FDBGS 0x00f10
+#define V3D_ERRSTAT 0x00f20
+
+/* XXX: mask widths */
+#define PV_CONTROL 0x00
+# define PV_CONTROL_CLK_MUX_EN (1 << 24)
+# define PV_CONTROL_FORMAT_MASK VC4_MASK(23, 21)
+# define PV_CONTROL_FORMAT_SHIFT 21
+# define PV_CONTROL_FORMAT_24 0
+# define PV_CONTROL_FORMAT_DSIV_16 1
+# define PV_CONTROL_FORMAT_DSIC_16 2
+# define PV_CONTROL_FORMAT_DSIV_18 3
+# define PV_CONTROL_FORMAT_DSIV_24 4
+
+# define PV_CONTROL_FIFO_LEVEL_MASK VC4_MASK(20, 15)
+# define PV_CONTROL_FIFO_LEVEL_SHIFT 15
+# define PV_CONTROL_CLR_AT_START (1 << 14)
+# define PV_CONTROL_TRIGGER_UNDERFLOW (1 << 13)
+# define PV_CONTROL_WAIT_HSTART (1 << 12)
+# define PV_CONTROL_CLK_SELECT_DSI 0
+# define PV_CONTROL_CLK_SELECT_DPI_SMI_HDMI 1
+# define PV_CONTROL_CLK_SELECT_MASK VC4_MASK(3, 2)
+# define PV_CONTROL_CLK_SELECT_SHIFT 2
+# define PV_CONTROL_FIFO_CLR (1 << 1)
+# define PV_CONTROL_EN (1 << 0)
+
+#define PV_V_CONTROL 0x04
+# define PV_VCONTROL_CONTINUOUS (1 << 1)
+# define PV_VCONTROL_VIDEN (1 << 0)
+
+#define PV_VSYNCD 0x08
+
+#define PV_HORZA 0x0c
+# define PV_HORZA_HBP_MASK VC4_MASK(31, 16)
+# define PV_HORZA_HBP_SHIFT 16
+# define PV_HORZA_HSYNC_MASK VC4_MASK(15, 0)
+# define PV_HORZA_HSYNC_SHIFT 0
+
+#define PV_HORZB 0x10
+# define PV_HORZB_HFP_MASK VC4_MASK(31, 16)
+# define PV_HORZB_HFP_SHIFT 16
+# define PV_HORZB_HACTIVE_MASK VC4_MASK(15, 0)
+# define PV_HORZB_HACTIVE_SHIFT 0
+
+#define PV_VERTA 0x14
+# define PV_VERTA_VBP_MASK VC4_MASK(31, 16)
+# define PV_VERTA_VBP_SHIFT 16
+# define PV_VERTA_VSYNC_MASK VC4_MASK(15, 0)
+# define PV_VERTA_VSYNC_SHIFT 0
+
+#define PV_VERTB 0x18
+# define PV_VERTB_VFP_MASK VC4_MASK(31, 16)
+# define PV_VERTB_VFP_SHIFT 16
+# define PV_VERTB_VACTIVE_MASK VC4_MASK(15, 0)
+# define PV_VERTB_VACTIVE_SHIFT 0
+
+#define PV_VERTA_EVEN 0x1c
+#define PV_VERTB_EVEN 0x20
+
+#define PV_INTEN 0x24
+#define PV_INTSTAT 0x28
+# define PV_INT_VID_IDLE (1 << 9)
+# define PV_INT_VFP_END (1 << 8)
+# define PV_INT_VFP_START (1 << 7)
+# define PV_INT_VACT_START (1 << 6)
+# define PV_INT_VBP_START (1 << 5)
+# define PV_INT_VSYNC_START (1 << 4)
+# define PV_INT_HFP_START (1 << 3)
+# define PV_INT_HACT_START (1 << 2)
+# define PV_INT_HBP_START (1 << 1)
+# define PV_INT_HSYNC_START (1 << 0)
+
+#define PV_STAT 0x2c
+# define PV_STAT_IDLE (1 << 8)
+# define PV_STAT_RUNNING_MASK VC4_MASK(7, 0)
+
+#define PV_HACT_ACT 0x30
+
+#define SCALER_DISPCTRL 0x00000000
+#define SCALER_DISPSTAT 0x00000004
+#define SCALER_DISPID 0x00000008
+#define SCALER_DISPECTRL 0x0000000c
+#define SCALER_DISPPROF 0x00000010
+#define SCALER_DISPDITHER 0x00000014
+#define SCALER_DISPEOLN 0x00000018
+#define SCALER_DISPLIST0 0x00000020
+#define SCALER_DISPLIST1 0x00000024
+#define SCALER_DISPLIST2 0x00000028
+#define SCALER_DISPLSTAT 0x0000002c
+#define SCALER_DISPLISTX(x) (SCALER_DISPLIST0 + \
+ (x) * (SCALER_DISPLIST1 - \
+ SCALER_DISPLIST0))
+
+#define SCALER_DISPLACT0 0x00000030
+#define SCALER_DISPLACT1 0x00000034
+#define SCALER_DISPLACT2 0x00000038
+#define SCALER_DISPCTRL0 0x00000040
+#define SCALER_DISPBKGND0 0x00000044
+#define SCALER_DISPSTAT0 0x00000048
+#define SCALER_DISPBASE0 0x0000004c
+#define SCALER_DISPCTRL1 0x00000050
+#define SCALER_DISPBKGND1 0x00000054
+#define SCALER_DISPSTAT1 0x00000058
+#define SCALER_DISPBASE1 0x0000005c
+#define SCALER_DISPCTRL2 0x00000060
+#define SCALER_DISPBKGND2 0x00000064
+#define SCALER_DISPSTAT2 0x00000068
+#define SCALER_DISPBASE2 0x0000006c
+#define SCALER_DISPALPHA2 0x00000070
+#define SCALER_GAMADDR 0x00000078
+#define SCALER_GAMDATA 0x000000e0
+#define SCALER_DLIST_START 0x00002000
+#define SCALER_DLIST_SIZE 0x00004000
+
+#define VC4_HDMI_CORE_REV 0x000
+#define VC4_HDMI_SW_RESET_CONTROL 0x004
+#define VC4_HDMI_HOTPLUG_INT 0x008
+
+#define VC4_HDMI_HOTPLUG 0x00c
+# define VC4_HDMI_HOTPLUG_CONNECTED (1 << 0)
+
+#define VC4_HDMI_RAM_PACKET_CONFIG 0x0a0
+# define VC4_HDMI_RAM_PACKET_ENABLE (1 << 16)
+
+#define VC4_HDMI_HORZA 0x0c4
+# define VC4_HDMI_HORZA_VPOS (1 << 14)
+# define VC4_HDMI_HORZA_HPOS (1 << 13)
+/* Horizontal active pixels (hdisplay). */
+# define VC4_HDMI_HORZA_HAP_MASK VC4_MASK(12, 0)
+# define VC4_HDMI_HORZA_HAP_SHIFT 0
+
+#define VC4_HDMI_HORZB 0x0c8
+/* Horizontal pack porch (htotal - hsync_end). */
+# define VC4_HDMI_HORZB_HBP_MASK VC4_MASK(29, 20)
+# define VC4_HDMI_HORZB_HBP_SHIFT 20
+/* Horizontal sync pulse (hsync_end - hsync_start). */
+# define VC4_HDMI_HORZB_HSP_MASK VC4_MASK(19, 10)
+# define VC4_HDMI_HORZB_HSP_SHIFT 10
+/* Horizontal front porch (hsync_start - hdisplay). */
+# define VC4_HDMI_HORZB_HFP_MASK VC4_MASK(9, 0)
+# define VC4_HDMI_HORZB_HFP_SHIFT 0
+
+#define VC4_HDMI_FIFO_CTL 0x05c
+# define VC4_HDMI_FIFO_CTL_MASTER_SLAVE_N (1 << 0)
+
+#define VC4_HDMI_SCHEDULER_CONTROL 0x0c0
+# define VC4_HDMI_SCHEDULER_CONTROL_MANUAL_FORMAT (1 << 15)
+# define VC4_HDMI_SCHEDULER_CONTROL_IGNORE_VSYNC_PREDICTS (1 << 5)
+# define VC4_HDMI_SCHEDULER_CONTROL_VERT_ALWAYS_KEEPOUT (1 << 3)
+# define VC4_HDMI_SCHEDULER_CONTROL_HDMI_ACTIVE (1 << 1)
+# define VC4_HDMI_SCHEDULER_CONTROL_MODE_HDMI (1 << 0)
+
+#define VC4_HDMI_VERTA0 0x0cc
+#define VC4_HDMI_VERTA1 0x0d4
+/* Vertical sync pulse (vsync_end - vsync_start). */
+# define VC4_HDMI_VERTA_VSP_MASK VC4_MASK(24, 20)
+# define VC4_HDMI_VERTA_VSP_SHIFT 20
+/* Vertical front porch (vsync_start - vdisplay). */
+# define VC4_HDMI_VERTA_VFP_MASK VC4_MASK(19, 13)
+# define VC4_HDMI_VERTA_VFP_SHIFT 13
+/* Vertical active lines (vdisplay). */
+# define VC4_HDMI_VERTA_VAL_MASK VC4_MASK(12, 0)
+# define VC4_HDMI_VERTA_VAL_SHIFT 0
+
+#define VC4_HDMI_VERTB0 0x0d0
+#define VC4_HDMI_VERTB1 0x0d8
+/* Vertical sync pulse offset (for interlaced) */
+# define VC4_HDMI_VERTB_VSPO_MASK VC4_MASK(21, 9)
+# define VC4_HDMI_VERTB_VSPO_SHIFT 9
+/* Vertical pack porch (vtotal - vsync_end). */
+# define VC4_HDMI_VERTB_VBP_MASK VC4_MASK(8, 0)
+# define VC4_HDMI_VERTB_VBP_SHIFT 0
+
+#define VC4_HDMI_TX_PHY_RESET_CTL 0x2c0
+
+#define VC4_HD_M_CTL 0x000
+#define VC4_HD_MAI_CTL 0x014
+
+#define VC4_HD_VID_CTL 0x038
+# define VC4_HD_VID_CTL_ENABLE (1 << 31)
+# define VC4_HD_VID_CTL_UNDERFLOW_ENABLE (1 << 30)
+# define VC4_HD_VID_CTL_FRAME_COUNTER_RESET (1 << 29)
+# define VC4_HD_VID_CTL_VSYNC_LOW (1 << 28)
+# define VC4_HD_VID_CTL_HSYNC_LOW (1 << 27)
+
+#define VC4_HD_CSC_CTL 0x040
+# define VC4_HD_CSC_CTL_ORDER_MASK VC4_MASK(7, 5)
+# define VC4_HD_CSC_CTL_ORDER_SHIFT 5
+# define VC4_HD_CSC_CTL_ORDER_RGB 0
+# define VC4_HD_CSC_CTL_ORDER_BGR 1
+# define VC4_HD_CSC_CTL_ORDER_BRG 2
+# define VC4_HD_CSC_CTL_ORDER_GRB 3
+# define VC4_HD_CSC_CTL_ORDER_GBR 4
+# define VC4_HD_CSC_CTL_ORDER_RBG 5
+# define VC4_HD_CSC_CTL_PADMSB (1 << 4)
+# define VC4_HD_CSC_CTL_MODE_MASK VC4_MASK(3, 2)
+# define VC4_HD_CSC_CTL_MODE_SHIFT 2
+# define VC4_HD_CSC_CTL_MODE_RGB_TO_SD_YPRPB 0
+# define VC4_HD_CSC_CTL_MODE_RGB_TO_HD_YPRPB 1
+# define VC4_HD_CSC_CTL_MODE_CUSTOM 2
+# define VC4_HD_CSC_CTL_RGB2YCC (1 << 1)
+# define VC4_HD_CSC_CTL_ENABLE (1 << 0)
+
+#define VC4_HD_FRAME_COUNT 0x068
+
+/* HVS display list information. */
+#define HVS_BOOTLOADER_DLIST_END 32
+
+enum hvs_pixel_format {
+ /* 8bpp */
+ HVS_PIXEL_FORMAT_RGB332 = 0,
+ /* 16bpp */
+ HVS_PIXEL_FORMAT_RGBA4444 = 1,
+ HVS_PIXEL_FORMAT_RGB555 = 2,
+ HVS_PIXEL_FORMAT_RGBA5551 = 3,
+ HVS_PIXEL_FORMAT_RGB565 = 4,
+ /* 24bpp */
+ HVS_PIXEL_FORMAT_RGB888 = 5,
+ HVS_PIXEL_FORMAT_RGBA6666 = 6,
+ /* 32bpp */
+ HVS_PIXEL_FORMAT_RGBA8888 = 7
+};
+
+/* Note: the LSB is the rightmost character shown. Only valid for
+ * HVS_PIXEL_FORMAT_RGB8888, not RGB888.
+ */
+#define HVS_PIXEL_ORDER_RGBA 0
+#define HVS_PIXEL_ORDER_BGRA 1
+#define HVS_PIXEL_ORDER_ARGB 2
+#define HVS_PIXEL_ORDER_ABGR 3
+
+#define HVS_PIXEL_ORDER_XBRG 0
+#define HVS_PIXEL_ORDER_XRBG 1
+#define HVS_PIXEL_ORDER_XRGB 2
+#define HVS_PIXEL_ORDER_XBGR 3
+
+#define HVS_PIXEL_ORDER_XYCBCR 0
+#define HVS_PIXEL_ORDER_XYCRCB 1
+#define HVS_PIXEL_ORDER_YXCBCR 2
+#define HVS_PIXEL_ORDER_YXCRCB 3
+
+#define SCALER_CTL0_END (1 << 31)
+#define SCALER_CTL0_VALID (1 << 30)
+
+#define SCALER_CTL0_SIZE_MASK VC4_MASK(29, 24)
+#define SCALER_CTL0_SIZE_SHIFT 24
+
+#define SCALER_CTL0_HFLIP (1 << 16)
+#define SCALER_CTL0_VFLIP (1 << 15)
+
+#define SCALER_CTL0_ORDER_MASK VC4_MASK(14, 13)
+#define SCALER_CTL0_ORDER_SHIFT 13
+
+/* Set to indicate no scaling. */
+#define SCALER_CTL0_UNITY (1 << 4)
+
+#define SCALER_CTL0_PIXEL_FORMAT_MASK VC4_MASK(3, 0)
+#define SCALER_CTL0_PIXEL_FORMAT_SHIFT 0
+
+#define SCALER_POS0_FIXED_ALPHA_MASK VC4_MASK(31, 24)
+#define SCALER_POS0_FIXED_ALPHA_SHIFT 24
+
+#define SCALER_POS0_START_Y_MASK VC4_MASK(23, 12)
+#define SCALER_POS0_START_Y_SHIFT 12
+
+#define SCALER_POS0_START_X_MASK VC4_MASK(11, 0)
+#define SCALER_POS0_START_X_SHIFT 0
+
+#define SCALER_POS2_ALPHA_MODE_MASK VC4_MASK(31,30)
+#define SCALER_POS2_ALPHA_MODE_SHIFT 30
+#define SCALER_POS2_ALPHA_MODE_PIPELINE 0
+#define SCALER_POS2_ALPHA_MODE_FIXED 1
+#define SCALER_POS2_ALPHA_MODE_FIXED_NONZERO 2
+#define SCALER_POS2_ALPHA_MODE_FIXED_OVER_0x07 3
+
+#define SCALER_POS2_HEIGHT_MASK VC4_MASK(27, 16)
+#define SCALER_POS2_HEIGHT_SHIFT 16
+
+#define SCALER_POS2_WIDTH_MASK VC4_MASK(11, 0)
+#define SCALER_POS2_WIDTH_SHIFT 0
+
+#define SCALER_SRC_PITCH_MASK VC4_MASK(15, 0)
+#define SCALER_SRC_PITCH_SHIFT 0
diff --git a/drivers/gpu/drm/vc4/vc4_render_cl.c b/drivers/gpu/drm/vc4/vc4_render_cl.c
new file mode 100644
index 000000000000..973ffa20ffb6
--- /dev/null
+++ b/drivers/gpu/drm/vc4/vc4_render_cl.c
@@ -0,0 +1,448 @@
+/*
+ * Copyright © 2014-2015 Broadcom
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+/**
+ * DOC: Render command list generation
+ *
+ * In the VC4 driver, render command list generation is performed by the
+ * kernel instead of userspace. We do this because validating a
+ * user-submitted command list is hard to get right and has high CPU overhead,
+ * while the number of valid configurations for render command lists is
+ * actually fairly low.
+ */
+
+#include "uapi/drm/vc4_drm.h"
+#include "vc4_drv.h"
+#include "vc4_packet.h"
+
+struct vc4_rcl_setup {
+ struct drm_gem_cma_object *color_read;
+ struct drm_gem_cma_object *color_ms_write;
+ struct drm_gem_cma_object *zs_read;
+ struct drm_gem_cma_object *zs_write;
+
+ struct drm_gem_cma_object *rcl;
+ u32 next_offset;
+};
+
+static inline void rcl_u8(struct vc4_rcl_setup *setup, u8 val)
+{
+ *(u8 *)(setup->rcl->vaddr + setup->next_offset) = val;
+ setup->next_offset += 1;
+}
+
+static inline void rcl_u16(struct vc4_rcl_setup *setup, u16 val)
+{
+ *(u16 *)(setup->rcl->vaddr + setup->next_offset) = val;
+ setup->next_offset += 2;
+}
+
+static inline void rcl_u32(struct vc4_rcl_setup *setup, u32 val)
+{
+ *(u32 *)(setup->rcl->vaddr + setup->next_offset) = val;
+ setup->next_offset += 4;
+}
+
+
+/*
+ * Emits a no-op STORE_TILE_BUFFER_GENERAL.
+ *
+ * If we emit a PACKET_TILE_COORDINATES, it must be followed by a store of
+ * some sort before another load is triggered.
+ */
+static void vc4_store_before_load(struct vc4_rcl_setup *setup)
+{
+ rcl_u8(setup, VC4_PACKET_STORE_TILE_BUFFER_GENERAL);
+ rcl_u16(setup,
+ VC4_SET_FIELD(VC4_LOADSTORE_TILE_BUFFER_NONE,
+ VC4_LOADSTORE_TILE_BUFFER_BUFFER) |
+ VC4_STORE_TILE_BUFFER_DISABLE_COLOR_CLEAR |
+ VC4_STORE_TILE_BUFFER_DISABLE_ZS_CLEAR |
+ VC4_STORE_TILE_BUFFER_DISABLE_VG_MASK_CLEAR);
+ rcl_u32(setup, 0); /* no address, since we're in None mode */
+}
+
+/*
+ * Emits a PACKET_TILE_COORDINATES if one isn't already pending.
+ *
+ * The tile coordinates packet triggers a pending load if there is one, are
+ * used for clipping during rendering, and determine where loads/stores happen
+ * relative to their base address.
+ */
+static void vc4_tile_coordinates(struct vc4_rcl_setup *setup,
+ uint32_t x, uint32_t y)
+{
+ rcl_u8(setup, VC4_PACKET_TILE_COORDINATES);
+ rcl_u8(setup, x);
+ rcl_u8(setup, y);
+}
+
+static void emit_tile(struct vc4_exec_info *exec,
+ struct vc4_rcl_setup *setup,
+ uint8_t x, uint8_t y, bool first, bool last)
+{
+ bool has_bin = exec->args->bin_cl_size != 0;
+
+ /* Note that the load doesn't actually occur until the
+ * tile coords packet is processed, and only one load
+ * may be outstanding at a time.
+ */
+ if (setup->color_read) {
+ rcl_u8(setup, VC4_PACKET_LOAD_TILE_BUFFER_GENERAL);
+ rcl_u16(setup, exec->args->color_read.bits);
+ rcl_u32(setup,
+ setup->color_read->paddr +
+ exec->args->color_read.offset);
+ }
+
+ if (setup->zs_read) {
+ if (setup->color_read) {
+ /* Exec previous load. */
+ vc4_tile_coordinates(setup, x, y);
+ vc4_store_before_load(setup);
+ }
+
+ rcl_u8(setup, VC4_PACKET_LOAD_TILE_BUFFER_GENERAL);
+ rcl_u16(setup, exec->args->zs_read.bits);
+ rcl_u32(setup,
+ setup->zs_read->paddr + exec->args->zs_read.offset);
+ }
+
+ /* Clipping depends on tile coordinates having been
+ * emitted, so we always need one here.
+ */
+ vc4_tile_coordinates(setup, x, y);
+
+ /* Wait for the binner before jumping to the first
+ * tile's lists.
+ */
+ if (first && has_bin)
+ rcl_u8(setup, VC4_PACKET_WAIT_ON_SEMAPHORE);
+
+ if (has_bin) {
+ rcl_u8(setup, VC4_PACKET_BRANCH_TO_SUB_LIST);
+ rcl_u32(setup, (exec->tile_bo->paddr +
+ exec->tile_alloc_offset +
+ (y * exec->bin_tiles_x + x) * 32));
+ }
+
+ if (setup->zs_write) {
+ rcl_u8(setup, VC4_PACKET_STORE_TILE_BUFFER_GENERAL);
+ rcl_u16(setup, exec->args->zs_write.bits |
+ (setup->color_ms_write ?
+ VC4_STORE_TILE_BUFFER_DISABLE_COLOR_CLEAR : 0));
+ rcl_u32(setup,
+ (setup->zs_write->paddr + exec->args->zs_write.offset) |
+ ((last && !setup->color_ms_write) ?
+ VC4_LOADSTORE_TILE_BUFFER_EOF : 0));
+ }
+
+ if (setup->color_ms_write) {
+ if (setup->zs_write) {
+ /* Reset after previous store */
+ vc4_tile_coordinates(setup, x, y);
+ }
+
+ if (last)
+ rcl_u8(setup, VC4_PACKET_STORE_MS_TILE_BUFFER_AND_EOF);
+ else
+ rcl_u8(setup, VC4_PACKET_STORE_MS_TILE_BUFFER);
+ }
+}
+
+static int vc4_create_rcl_bo(struct drm_device *dev, struct vc4_exec_info *exec,
+ struct vc4_rcl_setup *setup)
+{
+ bool has_bin = exec->args->bin_cl_size != 0;
+ uint8_t min_x_tile = exec->args->min_x_tile;
+ uint8_t min_y_tile = exec->args->min_y_tile;
+ uint8_t max_x_tile = exec->args->max_x_tile;
+ uint8_t max_y_tile = exec->args->max_y_tile;
+ uint8_t xtiles = max_x_tile - min_x_tile + 1;
+ uint8_t ytiles = max_y_tile - min_y_tile + 1;
+ uint8_t x, y;
+ uint32_t size, loop_body_size;
+
+ size = VC4_PACKET_TILE_RENDERING_MODE_CONFIG_SIZE;
+ loop_body_size = VC4_PACKET_TILE_COORDINATES_SIZE;
+
+ if (exec->args->flags & VC4_SUBMIT_CL_USE_CLEAR_COLOR) {
+ size += VC4_PACKET_CLEAR_COLORS_SIZE +
+ VC4_PACKET_TILE_COORDINATES_SIZE +
+ VC4_PACKET_STORE_TILE_BUFFER_GENERAL_SIZE;
+ }
+
+ if (setup->color_read) {
+ loop_body_size += (VC4_PACKET_LOAD_TILE_BUFFER_GENERAL_SIZE);
+ }
+ if (setup->zs_read) {
+ if (setup->color_read) {
+ loop_body_size += VC4_PACKET_TILE_COORDINATES_SIZE;
+ loop_body_size += VC4_PACKET_STORE_TILE_BUFFER_GENERAL_SIZE;
+ }
+ loop_body_size += VC4_PACKET_LOAD_TILE_BUFFER_GENERAL_SIZE;
+ }
+
+ if (has_bin) {
+ size += VC4_PACKET_WAIT_ON_SEMAPHORE_SIZE;
+ loop_body_size += VC4_PACKET_BRANCH_TO_SUB_LIST_SIZE;
+ }
+
+ if (setup->zs_write)
+ loop_body_size += VC4_PACKET_LOAD_TILE_BUFFER_GENERAL_SIZE;
+ if (setup->color_ms_write) {
+ if (setup->zs_write)
+ loop_body_size += VC4_PACKET_TILE_COORDINATES_SIZE;
+ loop_body_size += VC4_PACKET_STORE_MS_TILE_BUFFER_SIZE;
+ }
+ size += xtiles * ytiles * loop_body_size;
+
+ setup->rcl = &vc4_bo_create(dev, size)->base;
+ if (!setup->rcl)
+ return -ENOMEM;
+ list_add_tail(&to_vc4_bo(&setup->rcl->base)->unref_head,
+ &exec->unref_list);
+
+ rcl_u8(setup, VC4_PACKET_TILE_RENDERING_MODE_CONFIG);
+ rcl_u32(setup,
+ (setup->color_ms_write ?
+ (setup->color_ms_write->paddr +
+ exec->args->color_ms_write.offset) :
+ 0));
+ rcl_u16(setup, exec->args->width);
+ rcl_u16(setup, exec->args->height);
+ rcl_u16(setup, exec->args->color_ms_write.bits);
+
+ /* The tile buffer gets cleared when the previous tile is stored. If
+ * the clear values changed between frames, then the tile buffer has
+ * stale clear values in it, so we have to do a store in None mode (no
+ * writes) so that we trigger the tile buffer clear.
+ */
+ if (exec->args->flags & VC4_SUBMIT_CL_USE_CLEAR_COLOR) {
+ rcl_u8(setup, VC4_PACKET_CLEAR_COLORS);
+ rcl_u32(setup, exec->args->clear_color[0]);
+ rcl_u32(setup, exec->args->clear_color[1]);
+ rcl_u32(setup, exec->args->clear_z);
+ rcl_u8(setup, exec->args->clear_s);
+
+ vc4_tile_coordinates(setup, 0, 0);
+
+ rcl_u8(setup, VC4_PACKET_STORE_TILE_BUFFER_GENERAL);
+ rcl_u16(setup, VC4_LOADSTORE_TILE_BUFFER_NONE);
+ rcl_u32(setup, 0); /* no address, since we're in None mode */
+ }
+
+ for (y = min_y_tile; y <= max_y_tile; y++) {
+ for (x = min_x_tile; x <= max_x_tile; x++) {
+ bool first = (x == min_x_tile && y == min_y_tile);
+ bool last = (x == max_x_tile && y == max_y_tile);
+ emit_tile(exec, setup, x, y, first, last);
+ }
+ }
+
+ BUG_ON(setup->next_offset != size);
+ exec->ct1ca = setup->rcl->paddr;
+ exec->ct1ea = setup->rcl->paddr + setup->next_offset;
+
+ return 0;
+}
+
+static int vc4_rcl_surface_setup(struct vc4_exec_info *exec,
+ struct drm_gem_cma_object **obj,
+ struct drm_vc4_submit_rcl_surface *surf)
+{
+ uint8_t tiling = VC4_GET_FIELD(surf->bits,
+ VC4_LOADSTORE_TILE_BUFFER_TILING);
+ uint8_t buffer = VC4_GET_FIELD(surf->bits,
+ VC4_LOADSTORE_TILE_BUFFER_BUFFER);
+ uint8_t format = VC4_GET_FIELD(surf->bits,
+ VC4_LOADSTORE_TILE_BUFFER_FORMAT);
+ int cpp;
+
+ if (surf->pad != 0) {
+ DRM_ERROR("Padding unset\n");
+ return -EINVAL;
+ }
+
+ if (surf->hindex == ~0)
+ return 0;
+
+ if (!vc4_use_bo(exec, surf->hindex, VC4_MODE_RENDER, obj))
+ return -EINVAL;
+
+ if (surf->bits & ~(VC4_LOADSTORE_TILE_BUFFER_TILING_MASK |
+ VC4_LOADSTORE_TILE_BUFFER_BUFFER_MASK |
+ VC4_LOADSTORE_TILE_BUFFER_FORMAT_MASK)) {
+ DRM_ERROR("Unknown bits in load/store: 0x%04x\n",
+ surf->bits);
+ return -EINVAL;
+ }
+
+ if (tiling > VC4_TILING_FORMAT_LT) {
+ DRM_ERROR("Bad tiling format\n");
+ return -EINVAL;
+ }
+
+ if (buffer == VC4_LOADSTORE_TILE_BUFFER_ZS) {
+ if (format != 0) {
+ DRM_ERROR("No color format should be set for ZS\n");
+ return -EINVAL;
+ }
+ cpp = 4;
+ } else if (buffer == VC4_LOADSTORE_TILE_BUFFER_COLOR) {
+ switch (format) {
+ case VC4_LOADSTORE_TILE_BUFFER_BGR565:
+ case VC4_LOADSTORE_TILE_BUFFER_BGR565_DITHER:
+ cpp = 2;
+ break;
+ case VC4_LOADSTORE_TILE_BUFFER_RGBA8888:
+ cpp = 4;
+ break;
+ default:
+ DRM_ERROR("Bad tile buffer format\n");
+ return -EINVAL;
+ }
+ } else {
+ DRM_ERROR("Bad load/store buffer %d.\n", buffer);
+ return -EINVAL;
+ }
+
+ if (surf->offset & 0xf) {
+ DRM_ERROR("load/store buffer must be 16b aligned.\n");
+ return -EINVAL;
+ }
+
+ if (!vc4_check_tex_size(exec, *obj, surf->offset, tiling,
+ exec->args->width, exec->args->height, cpp)) {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+vc4_rcl_ms_surface_setup(struct vc4_exec_info *exec,
+ struct drm_gem_cma_object **obj,
+ struct drm_vc4_submit_rcl_surface *surf)
+{
+ uint8_t tiling = VC4_GET_FIELD(surf->bits,
+ VC4_RENDER_CONFIG_MEMORY_FORMAT);
+ uint8_t format = VC4_GET_FIELD(surf->bits,
+ VC4_RENDER_CONFIG_FORMAT);
+ int cpp;
+
+ if (surf->pad != 0) {
+ DRM_ERROR("Padding unset\n");
+ return -EINVAL;
+ }
+
+ if (surf->bits & ~(VC4_RENDER_CONFIG_MEMORY_FORMAT_MASK |
+ VC4_RENDER_CONFIG_FORMAT_MASK)) {
+ DRM_ERROR("Unknown bits in render config: 0x%04x\n",
+ surf->bits);
+ return -EINVAL;
+ }
+
+ if (surf->hindex == ~0)
+ return 0;
+
+ if (!vc4_use_bo(exec, surf->hindex, VC4_MODE_RENDER, obj))
+ return -EINVAL;
+
+ if (tiling > VC4_TILING_FORMAT_LT) {
+ DRM_ERROR("Bad tiling format\n");
+ return -EINVAL;
+ }
+
+ switch (format) {
+ case VC4_RENDER_CONFIG_FORMAT_BGR565_DITHERED:
+ case VC4_RENDER_CONFIG_FORMAT_BGR565:
+ cpp = 2;
+ break;
+ case VC4_RENDER_CONFIG_FORMAT_RGBA8888:
+ cpp = 4;
+ break;
+ default:
+ DRM_ERROR("Bad tile buffer format\n");
+ return -EINVAL;
+ }
+
+ if (!vc4_check_tex_size(exec, *obj, surf->offset, tiling,
+ exec->args->width, exec->args->height, cpp)) {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec)
+{
+ struct vc4_rcl_setup setup = {0};
+ struct drm_vc4_submit_cl *args = exec->args;
+ bool has_bin = args->bin_cl_size != 0;
+ int ret;
+
+ if (args->min_x_tile > args->max_x_tile ||
+ args->min_y_tile > args->max_y_tile) {
+ DRM_ERROR("Bad render tile set (%d,%d)-(%d,%d)\n",
+ args->min_x_tile, args->min_y_tile,
+ args->max_x_tile, args->max_y_tile);
+ return -EINVAL;
+ }
+
+ if (has_bin &&
+ (args->max_x_tile > exec->bin_tiles_x ||
+ args->max_y_tile > exec->bin_tiles_y)) {
+ DRM_ERROR("Render tiles (%d,%d) outside of bin config (%d,%d)\n",
+ args->max_x_tile, args->max_y_tile,
+ exec->bin_tiles_x, exec->bin_tiles_y);
+ return -EINVAL;
+ }
+
+ ret = vc4_rcl_surface_setup(exec, &setup.color_read, &args->color_read);
+ if (ret)
+ return ret;
+
+ ret = vc4_rcl_ms_surface_setup(exec, &setup.color_ms_write,
+ &args->color_ms_write);
+ if (ret)
+ return ret;
+
+ ret = vc4_rcl_surface_setup(exec, &setup.zs_read, &args->zs_read);
+ if (ret)
+ return ret;
+
+ ret = vc4_rcl_surface_setup(exec, &setup.zs_write, &args->zs_write);
+ if (ret)
+ return ret;
+
+ /* We shouldn't even have the job submitted to us if there's no
+ * surface to write out.
+ */
+ if (!setup.color_ms_write && !setup.zs_write) {
+ DRM_ERROR("RCL requires color or Z/S write\n");
+ return -EINVAL;
+ }
+
+ return vc4_create_rcl_bo(dev, exec, &setup);
+}
diff --git a/drivers/gpu/drm/vc4/vc4_v3d.c b/drivers/gpu/drm/vc4/vc4_v3d.c
new file mode 100644
index 000000000000..8841d8e97695
--- /dev/null
+++ b/drivers/gpu/drm/vc4/vc4_v3d.c
@@ -0,0 +1,278 @@
+/*
+ * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "linux/component.h"
+#include "soc/bcm2835/raspberrypi-firmware-property.h"
+#include "vc4_drv.h"
+#include "vc4_regs.h"
+
+#ifdef CONFIG_DEBUG_FS
+#define REGDEF(reg) { reg, #reg }
+static const struct {
+ uint32_t reg;
+ const char *name;
+} vc4_reg_defs[] = {
+ REGDEF(V3D_IDENT0),
+ REGDEF(V3D_IDENT1),
+ REGDEF(V3D_IDENT2),
+ REGDEF(V3D_SCRATCH),
+ REGDEF(V3D_L2CACTL),
+ REGDEF(V3D_SLCACTL),
+ REGDEF(V3D_INTCTL),
+ REGDEF(V3D_INTENA),
+ REGDEF(V3D_INTDIS),
+ REGDEF(V3D_CT0CS),
+ REGDEF(V3D_CT1CS),
+ REGDEF(V3D_CT0EA),
+ REGDEF(V3D_CT1EA),
+ REGDEF(V3D_CT0CA),
+ REGDEF(V3D_CT1CA),
+ REGDEF(V3D_CT00RA0),
+ REGDEF(V3D_CT01RA0),
+ REGDEF(V3D_CT0LC),
+ REGDEF(V3D_CT1LC),
+ REGDEF(V3D_CT0PC),
+ REGDEF(V3D_CT1PC),
+ REGDEF(V3D_PCS),
+ REGDEF(V3D_BFC),
+ REGDEF(V3D_RFC),
+ REGDEF(V3D_BPCA),
+ REGDEF(V3D_BPCS),
+ REGDEF(V3D_BPOA),
+ REGDEF(V3D_BPOS),
+ REGDEF(V3D_BXCF),
+ REGDEF(V3D_SQRSV0),
+ REGDEF(V3D_SQRSV1),
+ REGDEF(V3D_SQCNTL),
+ REGDEF(V3D_SRQPC),
+ REGDEF(V3D_SRQUA),
+ REGDEF(V3D_SRQUL),
+ REGDEF(V3D_SRQCS),
+ REGDEF(V3D_VPACNTL),
+ REGDEF(V3D_VPMBASE),
+ REGDEF(V3D_PCTRC),
+ REGDEF(V3D_PCTRE),
+ REGDEF(V3D_PCTR0),
+ REGDEF(V3D_PCTRS0),
+ REGDEF(V3D_PCTR1),
+ REGDEF(V3D_PCTRS1),
+ REGDEF(V3D_PCTR2),
+ REGDEF(V3D_PCTRS2),
+ REGDEF(V3D_PCTR3),
+ REGDEF(V3D_PCTRS3),
+ REGDEF(V3D_PCTR4),
+ REGDEF(V3D_PCTRS4),
+ REGDEF(V3D_PCTR5),
+ REGDEF(V3D_PCTRS5),
+ REGDEF(V3D_PCTR6),
+ REGDEF(V3D_PCTRS6),
+ REGDEF(V3D_PCTR7),
+ REGDEF(V3D_PCTRS7),
+ REGDEF(V3D_PCTR8),
+ REGDEF(V3D_PCTRS8),
+ REGDEF(V3D_PCTR9),
+ REGDEF(V3D_PCTRS9),
+ REGDEF(V3D_PCTR10),
+ REGDEF(V3D_PCTRS10),
+ REGDEF(V3D_PCTR11),
+ REGDEF(V3D_PCTRS11),
+ REGDEF(V3D_PCTR12),
+ REGDEF(V3D_PCTRS12),
+ REGDEF(V3D_PCTR13),
+ REGDEF(V3D_PCTRS13),
+ REGDEF(V3D_PCTR14),
+ REGDEF(V3D_PCTRS14),
+ REGDEF(V3D_PCTR15),
+ REGDEF(V3D_PCTRS15),
+ REGDEF(V3D_BGE),
+ REGDEF(V3D_FDBGO),
+ REGDEF(V3D_FDBGB),
+ REGDEF(V3D_FDBGR),
+ REGDEF(V3D_FDBGS),
+ REGDEF(V3D_ERRSTAT),
+};
+
+int vc4_v3d_debugfs_regs(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(vc4_reg_defs); i++) {
+ seq_printf(m, "%s (0x%04x): 0x%08x\n",
+ vc4_reg_defs[i].name, vc4_reg_defs[i].reg,
+ V3D_READ(vc4_reg_defs[i].reg));
+ }
+
+ return 0;
+}
+
+int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ uint32_t ident1 = V3D_READ(V3D_IDENT1);
+ uint32_t nslc = VC4_GET_FIELD(ident1, V3D_IDENT1_NSLC);
+ uint32_t tups = VC4_GET_FIELD(ident1, V3D_IDENT1_TUPS);
+ uint32_t qups = VC4_GET_FIELD(ident1, V3D_IDENT1_QUPS);
+
+ seq_printf(m, "Revision: %d\n", VC4_GET_FIELD(ident1, V3D_IDENT1_REV));
+ seq_printf(m, "Slices: %d\n", nslc);
+ seq_printf(m, "TMUs: %d\n", nslc * tups);
+ seq_printf(m, "QPUs: %d\n", nslc * qups);
+ seq_printf(m, "Semaphores: %d\n", VC4_GET_FIELD(ident1, V3D_IDENT1_NSEM));
+
+ return 0;
+}
+#endif /* CONFIG_DEBUG_FS */
+
+/*
+ * Asks the firmware to turn on power to the V3D engine.
+ *
+ * This may be doable with just the clocks interface, though this
+ * packet does some other register setup from the firmware, too.
+ */
+int
+vc4_v3d_set_power(struct vc4_dev *vc4, bool on)
+{
+ u32 packet = on;
+
+ return rpi_firmware_property(vc4->firmware_node,
+ RPI_FIRMWARE_SET_ENABLE_QPU,
+ &packet, sizeof(packet));
+}
+
+static void vc4_v3d_init_hw(struct drm_device *dev)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+
+ /* Take all the memory that would have been reserved for user
+ * QPU programs, since we don't have an interface for running
+ * them, anyway.
+ */
+ V3D_WRITE(V3D_VPMBASE, 0);
+}
+
+static int vc4_v3d_bind(struct device *dev, struct device *master, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct drm_device *drm = dev_get_drvdata(master);
+ struct vc4_dev *vc4 = to_vc4_dev(drm);
+ struct vc4_v3d *v3d = NULL;
+ int ret;
+
+ v3d = devm_kzalloc(&pdev->dev, sizeof(*v3d), GFP_KERNEL);
+ if (!v3d)
+ return -ENOMEM;
+
+ v3d->pdev = pdev;
+
+ v3d->regs = vc4_ioremap_regs(pdev, 0);
+ if (IS_ERR(v3d->regs))
+ return PTR_ERR(v3d->regs);
+
+ vc4->v3d = v3d;
+
+ ret = vc4_v3d_set_power(vc4, true);
+ if (ret)
+ return ret;
+
+ if (V3D_READ(V3D_IDENT0) != V3D_EXPECTED_IDENT0) {
+ DRM_ERROR("V3D_IDENT0 read 0x%08x instead of 0x%08x\n",
+ V3D_READ(V3D_IDENT0), V3D_EXPECTED_IDENT0);
+ return -EINVAL;
+ }
+
+ /* Reset the binner overflow address/size at setup, to be sure
+ * we don't reuse an old one.
+ */
+ V3D_WRITE(V3D_BPOA, 0);
+ V3D_WRITE(V3D_BPOS, 0);
+
+ vc4_v3d_init_hw(drm);
+
+ ret = drm_irq_install(drm, platform_get_irq(pdev, 0));
+ if (ret) {
+ DRM_ERROR("Failed to install IRQ handler\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void vc4_v3d_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct drm_device *drm = dev_get_drvdata(master);
+ struct vc4_dev *vc4 = to_vc4_dev(drm);
+
+ drm_irq_uninstall(drm);
+
+ /* Disable the binner's overflow memory address, so the next
+ * driver probe (if any) doesn't try to reuse our old
+ * allocation.
+ */
+ V3D_WRITE(V3D_BPOA, 0);
+ V3D_WRITE(V3D_BPOS, 0);
+
+ vc4_v3d_set_power(vc4, false);
+
+ vc4->v3d = NULL;
+}
+
+static const struct component_ops vc4_v3d_ops = {
+ .bind = vc4_v3d_bind,
+ .unbind = vc4_v3d_unbind,
+};
+
+static int vc4_v3d_dev_probe(struct platform_device *pdev)
+{
+ return component_add(&pdev->dev, &vc4_v3d_ops);
+}
+
+static int vc4_v3d_dev_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &vc4_v3d_ops);
+ return 0;
+}
+
+static const struct of_device_id vc4_v3d_dt_match[] = {
+ { .compatible = "brcm,vc4-v3d" },
+ {}
+};
+
+static struct platform_driver vc4_v3d_driver = {
+ .probe = vc4_v3d_dev_probe,
+ .remove = vc4_v3d_dev_remove,
+ .driver = {
+ .name = "vc4_v3d",
+ .of_match_table = vc4_v3d_dt_match,
+ },
+};
+
+void __init vc4_v3d_register(void)
+{
+ platform_driver_register(&vc4_v3d_driver);
+}
+
+void __exit vc4_v3d_unregister(void)
+{
+ platform_driver_unregister(&vc4_v3d_driver);
+}
diff --git a/drivers/gpu/drm/vc4/vc4_validate.c b/drivers/gpu/drm/vc4/vc4_validate.c
new file mode 100644
index 000000000000..9eaa0f9b53f2
--- /dev/null
+++ b/drivers/gpu/drm/vc4/vc4_validate.c
@@ -0,0 +1,952 @@
+/*
+ * Copyright © 2014 Broadcom
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+/**
+ * Command list validator for VC4.
+ *
+ * The VC4 has no IOMMU between it and system memory. So, a user with
+ * access to execute command lists could escalate privilege by
+ * overwriting system memory (drawing to it as a framebuffer) or
+ * reading system memory it shouldn't (reading it as a texture, or
+ * uniform data, or vertex data).
+ *
+ * This validates command lists to ensure that all accesses are within
+ * the bounds of the GEM objects referenced. It explicitly whitelists
+ * packets, and looks at the offsets in any address fields to make
+ * sure they're constrained within the BOs they reference.
+ *
+ * Note that because of the validation that's happening anyway, this
+ * is where GEM relocation processing happens.
+ */
+
+#include "uapi/drm/vc4_drm.h"
+#include "vc4_drv.h"
+#include "vc4_packet.h"
+
+#define VALIDATE_ARGS \
+ struct vc4_exec_info *exec, \
+ void *validated, \
+ void *untrusted
+
+
+/** Return the width in pixels of a 64-byte microtile. */
+static uint32_t
+utile_width(int cpp)
+{
+ switch (cpp) {
+ case 1:
+ case 2:
+ return 8;
+ case 4:
+ return 4;
+ case 8:
+ return 2;
+ default:
+ DRM_ERROR("unknown cpp: %d\n", cpp);
+ return 1;
+ }
+}
+
+/** Return the height in pixels of a 64-byte microtile. */
+static uint32_t
+utile_height(int cpp)
+{
+ switch (cpp) {
+ case 1:
+ return 8;
+ case 2:
+ case 4:
+ case 8:
+ return 4;
+ default:
+ DRM_ERROR("unknown cpp: %d\n", cpp);
+ return 1;
+ }
+}
+
+/**
+ * The texture unit decides what tiling format a particular miplevel is using
+ * this function, so we lay out our miptrees accordingly.
+ */
+static bool
+size_is_lt(uint32_t width, uint32_t height, int cpp)
+{
+ return (width <= 4 * utile_width(cpp) ||
+ height <= 4 * utile_height(cpp));
+}
+
+bool
+vc4_use_bo(struct vc4_exec_info *exec,
+ uint32_t hindex,
+ enum vc4_bo_mode mode,
+ struct drm_gem_cma_object **obj)
+{
+ *obj = NULL;
+
+ if (hindex >= exec->bo_count) {
+ DRM_ERROR("BO index %d greater than BO count %d\n",
+ hindex, exec->bo_count);
+ return false;
+ }
+
+ if (exec->bo[hindex].mode != mode) {
+ if (exec->bo[hindex].mode == VC4_MODE_UNDECIDED) {
+ exec->bo[hindex].mode = mode;
+ } else {
+ DRM_ERROR("BO index %d reused with mode %d vs %d\n",
+ hindex, exec->bo[hindex].mode, mode);
+ return false;
+ }
+ }
+
+ *obj = exec->bo[hindex].bo;
+ return true;
+}
+
+static bool
+vc4_use_handle(struct vc4_exec_info *exec,
+ uint32_t gem_handles_packet_index,
+ enum vc4_bo_mode mode,
+ struct drm_gem_cma_object **obj)
+{
+ return vc4_use_bo(exec, exec->bo_index[gem_handles_packet_index],
+ mode, obj);
+}
+
+static uint32_t
+gl_shader_rec_size(uint32_t pointer_bits)
+{
+ uint32_t attribute_count = pointer_bits & 7;
+ bool extended = pointer_bits & 8;
+
+ if (attribute_count == 0)
+ attribute_count = 8;
+
+ if (extended)
+ return 100 + attribute_count * 4;
+ else
+ return 36 + attribute_count * 8;
+}
+
+bool
+vc4_check_tex_size(struct vc4_exec_info *exec, struct drm_gem_cma_object *fbo,
+ uint32_t offset, uint8_t tiling_format,
+ uint32_t width, uint32_t height, uint8_t cpp)
+{
+ uint32_t aligned_width, aligned_height, stride, size;
+ uint32_t utile_w = utile_width(cpp);
+ uint32_t utile_h = utile_height(cpp);
+
+ /* The shaded vertex format stores signed 12.4 fixed point
+ * (-2048,2047) offsets from the viewport center, so we should
+ * never have a render target larger than 4096. The texture
+ * unit can only sample from 2048x2048, so it's even more
+ * restricted. This lets us avoid worrying about overflow in
+ * our math.
+ */
+ if (width > 4096 || height > 4096) {
+ DRM_ERROR("Surface dimesions (%d,%d) too large", width, height);
+ return false;
+ }
+
+ switch (tiling_format) {
+ case VC4_TILING_FORMAT_LINEAR:
+ aligned_width = round_up(width, utile_w);
+ aligned_height = height;
+ break;
+ case VC4_TILING_FORMAT_T:
+ aligned_width = round_up(width, utile_w * 8);
+ aligned_height = round_up(height, utile_h * 8);
+ break;
+ case VC4_TILING_FORMAT_LT:
+ aligned_width = round_up(width, utile_w);
+ aligned_height = round_up(height, utile_h);
+ break;
+ default:
+ DRM_ERROR("buffer tiling %d unsupported\n", tiling_format);
+ return false;
+ }
+
+ stride = aligned_width * cpp;
+ size = stride * aligned_height;
+
+ if (size + offset < size ||
+ size + offset > fbo->base.size) {
+ DRM_ERROR("Overflow in %dx%d (%dx%d) fbo size (%d + %d > %d)\n",
+ width, height,
+ aligned_width, aligned_height,
+ size, offset, fbo->base.size);
+ return false;
+ }
+
+ return true;
+}
+
+static int
+validate_flush_all(VALIDATE_ARGS)
+{
+ if (exec->found_increment_semaphore_packet) {
+ DRM_ERROR("VC4_PACKET_FLUSH_ALL after "
+ "VC4_PACKET_INCREMENT_SEMAPHORE\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+validate_start_tile_binning(VALIDATE_ARGS)
+{
+ if (exec->found_start_tile_binning_packet) {
+ DRM_ERROR("Duplicate VC4_PACKET_START_TILE_BINNING\n");
+ return -EINVAL;
+ }
+ exec->found_start_tile_binning_packet = true;
+
+ if (!exec->found_tile_binning_mode_config_packet) {
+ DRM_ERROR("missing VC4_PACKET_TILE_BINNING_MODE_CONFIG\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+validate_increment_semaphore(VALIDATE_ARGS)
+{
+ if (exec->found_increment_semaphore_packet) {
+ DRM_ERROR("Duplicate VC4_PACKET_INCREMENT_SEMAPHORE\n");
+ return -EINVAL;
+ }
+ exec->found_increment_semaphore_packet = true;
+
+ /* Once we've found the semaphore increment, there should be one FLUSH
+ * then the end of the command list. The FLUSH actually triggers the
+ * increment, so we only need to make sure there
+ */
+
+ return 0;
+}
+
+static int
+validate_indexed_prim_list(VALIDATE_ARGS)
+{
+ struct drm_gem_cma_object *ib;
+ uint32_t length = *(uint32_t *)(untrusted + 1);
+ uint32_t offset = *(uint32_t *)(untrusted + 5);
+ uint32_t max_index = *(uint32_t *)(untrusted + 9);
+ uint32_t index_size = (*(uint8_t *)(untrusted + 0) >> 4) ? 2 : 1;
+ struct vc4_shader_state *shader_state;
+
+ if (exec->found_increment_semaphore_packet) {
+ DRM_ERROR("Drawing after VC4_PACKET_INCREMENT_SEMAPHORE\n");
+ return -EINVAL;
+ }
+
+ /* Check overflow condition */
+ if (exec->shader_state_count == 0) {
+ DRM_ERROR("shader state must precede primitives\n");
+ return -EINVAL;
+ }
+ shader_state = &exec->shader_state[exec->shader_state_count - 1];
+
+ if (max_index > shader_state->max_index)
+ shader_state->max_index = max_index;
+
+ if (!vc4_use_handle(exec, 0, VC4_MODE_RENDER, &ib))
+ return -EINVAL;
+
+ if (offset > ib->base.size ||
+ (ib->base.size - offset) / index_size < length) {
+ DRM_ERROR("IB access overflow (%d + %d*%d > %d)\n",
+ offset, length, index_size, ib->base.size);
+ return -EINVAL;
+ }
+
+ *(uint32_t *)(validated + 5) = ib->paddr + offset;
+
+ return 0;
+}
+
+static int
+validate_gl_array_primitive(VALIDATE_ARGS)
+{
+ uint32_t length = *(uint32_t *)(untrusted + 1);
+ uint32_t base_index = *(uint32_t *)(untrusted + 5);
+ uint32_t max_index;
+ struct vc4_shader_state *shader_state;
+
+ if (exec->found_increment_semaphore_packet) {
+ DRM_ERROR("Drawing after VC4_PACKET_INCREMENT_SEMAPHORE\n");
+ return -EINVAL;
+ }
+
+ /* Check overflow condition */
+ if (exec->shader_state_count == 0) {
+ DRM_ERROR("shader state must precede primitives\n");
+ return -EINVAL;
+ }
+ shader_state = &exec->shader_state[exec->shader_state_count - 1];
+
+ if (length + base_index < length) {
+ DRM_ERROR("primitive vertex count overflow\n");
+ return -EINVAL;
+ }
+ max_index = length + base_index - 1;
+
+ if (max_index > shader_state->max_index)
+ shader_state->max_index = max_index;
+
+ return 0;
+}
+
+static int
+validate_gl_shader_state(VALIDATE_ARGS)
+{
+ uint32_t i = exec->shader_state_count++;
+
+ if (i >= exec->shader_state_size) {
+ DRM_ERROR("More requests for shader states than declared\n");
+ return -EINVAL;
+ }
+
+ exec->shader_state[i].packet = VC4_PACKET_GL_SHADER_STATE;
+ exec->shader_state[i].addr = *(uint32_t *)untrusted;
+ exec->shader_state[i].max_index = 0;
+
+ if (exec->shader_state[i].addr & ~0xf) {
+ DRM_ERROR("high bits set in GL shader rec reference\n");
+ return -EINVAL;
+ }
+
+ *(uint32_t *)validated = (exec->shader_rec_p +
+ exec->shader_state[i].addr);
+
+ exec->shader_rec_p +=
+ roundup(gl_shader_rec_size(exec->shader_state[i].addr), 16);
+
+ return 0;
+}
+
+static int
+validate_nv_shader_state(VALIDATE_ARGS)
+{
+ uint32_t i = exec->shader_state_count++;
+
+ if (i >= exec->shader_state_size) {
+ DRM_ERROR("More requests for shader states than declared\n");
+ return -EINVAL;
+ }
+
+ exec->shader_state[i].packet = VC4_PACKET_NV_SHADER_STATE;
+ exec->shader_state[i].addr = *(uint32_t *)untrusted;
+
+ if (exec->shader_state[i].addr & 15) {
+ DRM_ERROR("NV shader state address 0x%08x misaligned\n",
+ exec->shader_state[i].addr);
+ return -EINVAL;
+ }
+
+ *(uint32_t *)validated = (exec->shader_state[i].addr +
+ exec->shader_rec_p);
+
+ return 0;
+}
+
+static int
+validate_tile_binning_config(VALIDATE_ARGS)
+{
+ struct drm_device *dev = exec->exec_bo->base.dev;
+ uint8_t flags;
+ uint32_t tile_state_size, tile_alloc_size;
+ uint32_t tile_count;
+
+ if (exec->found_tile_binning_mode_config_packet) {
+ DRM_ERROR("Duplicate VC4_PACKET_TILE_BINNING_MODE_CONFIG\n");
+ return -EINVAL;
+ }
+ exec->found_tile_binning_mode_config_packet = true;
+
+ exec->bin_tiles_x = *(uint8_t *)(untrusted + 12);
+ exec->bin_tiles_y = *(uint8_t *)(untrusted + 13);
+ tile_count = exec->bin_tiles_x * exec->bin_tiles_y;
+ flags = *(uint8_t *)(untrusted + 14);
+
+ if (exec->bin_tiles_x == 0 ||
+ exec->bin_tiles_y == 0) {
+ DRM_ERROR("Tile binning config of %dx%d too small\n",
+ exec->bin_tiles_x, exec->bin_tiles_y);
+ return -EINVAL;
+ }
+
+ if (flags & (VC4_BIN_CONFIG_DB_NON_MS |
+ VC4_BIN_CONFIG_TILE_BUFFER_64BIT |
+ VC4_BIN_CONFIG_MS_MODE_4X)) {
+ DRM_ERROR("unsupported bining config flags 0x%02x\n", flags);
+ return -EINVAL;
+ }
+
+ /* The tile state data array is 48 bytes per tile, and we put it at
+ * the start of a BO containing both it and the tile alloc.
+ */
+ tile_state_size = 48 * tile_count;
+
+ /* Since the tile alloc array will follow us, align. */
+ exec->tile_alloc_offset = roundup(tile_state_size, 4096);
+
+ *(uint8_t *)(validated + 14) =
+ ((flags & ~(VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_MASK |
+ VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_MASK)) |
+ VC4_BIN_CONFIG_AUTO_INIT_TSDA |
+ VC4_SET_FIELD(VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_32,
+ VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE) |
+ VC4_SET_FIELD(VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_128,
+ VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE));
+
+ /* Initial block size. */
+ tile_alloc_size = 32 * tile_count;
+
+ /*
+ * The initial allocation gets rounded to the next 256 bytes before
+ * the hardware starts fulfilling further allocations.
+ */
+ tile_alloc_size = roundup(tile_alloc_size, 256);
+
+ /* Add space for the extra allocations. This is what gets used first,
+ * before overflow memory. It must have at least 4096 bytes, but we
+ * want to avoid overflow memory usage if possible.
+ */
+ tile_alloc_size += 1024 * 1024;
+
+ exec->tile_bo = &vc4_bo_create(dev, exec->tile_alloc_offset +
+ tile_alloc_size)->base;
+ if (!exec->tile_bo)
+ return -ENOMEM;
+ list_add_tail(&to_vc4_bo(&exec->tile_bo->base)->unref_head,
+ &exec->unref_list);
+
+ /* tile alloc address. */
+ *(uint32_t *)(validated + 0) = (exec->tile_bo->paddr +
+ exec->tile_alloc_offset);
+ /* tile alloc size. */
+ *(uint32_t *)(validated + 4) = tile_alloc_size;
+ /* tile state address. */
+ *(uint32_t *)(validated + 8) = exec->tile_bo->paddr;
+
+ return 0;
+}
+
+static int
+validate_gem_handles(VALIDATE_ARGS)
+{
+ memcpy(exec->bo_index, untrusted, sizeof(exec->bo_index));
+ return 0;
+}
+
+#define VC4_DEFINE_PACKET(packet, name, func) \
+ [packet] = { packet ## _SIZE, name, func }
+
+static const struct cmd_info {
+ uint16_t len;
+ const char *name;
+ int (*func)(struct vc4_exec_info *exec, void *validated,
+ void *untrusted);
+} cmd_info[] = {
+ VC4_DEFINE_PACKET(VC4_PACKET_HALT, "halt", NULL),
+ VC4_DEFINE_PACKET(VC4_PACKET_NOP, "nop", NULL),
+ VC4_DEFINE_PACKET(VC4_PACKET_FLUSH, "flush", NULL),
+ VC4_DEFINE_PACKET(VC4_PACKET_FLUSH_ALL, "flush all state", validate_flush_all),
+ VC4_DEFINE_PACKET(VC4_PACKET_START_TILE_BINNING, "start tile binning", validate_start_tile_binning),
+ VC4_DEFINE_PACKET(VC4_PACKET_INCREMENT_SEMAPHORE, "increment semaphore", validate_increment_semaphore),
+
+ VC4_DEFINE_PACKET(VC4_PACKET_GL_INDEXED_PRIMITIVE, "Indexed Primitive List", validate_indexed_prim_list),
+
+ VC4_DEFINE_PACKET(VC4_PACKET_GL_ARRAY_PRIMITIVE, "Vertex Array Primitives", validate_gl_array_primitive),
+
+ /* This is only used by clipped primitives (packets 48 and 49), which
+ * we don't support parsing yet.
+ */
+ VC4_DEFINE_PACKET(VC4_PACKET_PRIMITIVE_LIST_FORMAT, "primitive list format", NULL),
+
+ VC4_DEFINE_PACKET(VC4_PACKET_GL_SHADER_STATE, "GL Shader State", validate_gl_shader_state),
+ VC4_DEFINE_PACKET(VC4_PACKET_NV_SHADER_STATE, "NV Shader State", validate_nv_shader_state),
+
+ VC4_DEFINE_PACKET(VC4_PACKET_CONFIGURATION_BITS, "configuration bits", NULL),
+ VC4_DEFINE_PACKET(VC4_PACKET_FLAT_SHADE_FLAGS, "flat shade flags", NULL),
+ VC4_DEFINE_PACKET(VC4_PACKET_POINT_SIZE, "point size", NULL),
+ VC4_DEFINE_PACKET(VC4_PACKET_LINE_WIDTH, "line width", NULL),
+ VC4_DEFINE_PACKET(VC4_PACKET_RHT_X_BOUNDARY, "RHT X boundary", NULL),
+ VC4_DEFINE_PACKET(VC4_PACKET_DEPTH_OFFSET, "Depth Offset", NULL),
+ VC4_DEFINE_PACKET(VC4_PACKET_CLIP_WINDOW, "Clip Window", NULL),
+ VC4_DEFINE_PACKET(VC4_PACKET_VIEWPORT_OFFSET, "Viewport Offset", NULL),
+ VC4_DEFINE_PACKET(VC4_PACKET_CLIPPER_XY_SCALING, "Clipper XY Scaling", NULL),
+ /* Note: The docs say this was also 105, but it was 106 in the
+ * initial userland code drop.
+ */
+ VC4_DEFINE_PACKET(VC4_PACKET_CLIPPER_Z_SCALING, "Clipper Z Scale and Offset", NULL),
+
+ VC4_DEFINE_PACKET(VC4_PACKET_TILE_BINNING_MODE_CONFIG, "tile binning configuration", validate_tile_binning_config),
+
+ VC4_DEFINE_PACKET(VC4_PACKET_GEM_HANDLES, "GEM handles", validate_gem_handles),
+};
+
+int
+vc4_validate_bin_cl(struct drm_device *dev,
+ void *validated,
+ void *unvalidated,
+ struct vc4_exec_info *exec)
+{
+ uint32_t len = exec->args->bin_cl_size;
+ uint32_t dst_offset = 0;
+ uint32_t src_offset = 0;
+
+ while (src_offset < len) {
+ void *dst_pkt = validated + dst_offset;
+ void *src_pkt = unvalidated + src_offset;
+ u8 cmd = *(uint8_t *)src_pkt;
+ const struct cmd_info *info;
+
+ if (cmd > ARRAY_SIZE(cmd_info)) {
+ DRM_ERROR("0x%08x: packet %d out of bounds\n",
+ src_offset, cmd);
+ return -EINVAL;
+ }
+
+ info = &cmd_info[cmd];
+ if (!info->name) {
+ DRM_ERROR("0x%08x: packet %d invalid\n",
+ src_offset, cmd);
+ return -EINVAL;
+ }
+
+#if 0
+ DRM_INFO("0x%08x: packet %d (%s) size %d processing...\n",
+ src_offset, cmd, info->name, info->len);
+#endif
+
+ if (src_offset + info->len > len) {
+ DRM_ERROR("0x%08x: packet %d (%s) length 0x%08x "
+ "exceeds bounds (0x%08x)\n",
+ src_offset, cmd, info->name, info->len,
+ src_offset + len);
+ return -EINVAL;
+ }
+
+ if (cmd != VC4_PACKET_GEM_HANDLES)
+ memcpy(dst_pkt, src_pkt, info->len);
+
+ if (info->func && info->func(exec,
+ dst_pkt + 1,
+ src_pkt + 1)) {
+ DRM_ERROR("0x%08x: packet %d (%s) failed to "
+ "validate\n",
+ src_offset, cmd, info->name);
+ return -EINVAL;
+ }
+
+ src_offset += info->len;
+ /* GEM handle loading doesn't produce HW packets. */
+ if (cmd != VC4_PACKET_GEM_HANDLES)
+ dst_offset += info->len;
+
+ /* When the CL hits halt, it'll stop reading anything else. */
+ if (cmd == VC4_PACKET_HALT)
+ break;
+ }
+
+ exec->ct0ea = exec->ct0ca + dst_offset;
+
+ if (!exec->found_start_tile_binning_packet) {
+ DRM_ERROR("Bin CL missing VC4_PACKET_START_TILE_BINNING\n");
+ return -EINVAL;
+ }
+
+ if (!exec->found_increment_semaphore_packet) {
+ DRM_ERROR("Bin CL missing VC4_PACKET_INCREMENT_SEMAPHORE\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static bool
+reloc_tex(struct vc4_exec_info *exec,
+ void *uniform_data_u,
+ struct vc4_texture_sample_info *sample,
+ uint32_t texture_handle_index)
+
+{
+ struct drm_gem_cma_object *tex;
+ uint32_t p0 = *(uint32_t *)(uniform_data_u + sample->p_offset[0]);
+ uint32_t p1 = *(uint32_t *)(uniform_data_u + sample->p_offset[1]);
+ uint32_t p2 = (sample->p_offset[2] != ~0 ?
+ *(uint32_t *)(uniform_data_u + sample->p_offset[2]) : 0);
+ uint32_t p3 = (sample->p_offset[3] != ~0 ?
+ *(uint32_t *)(uniform_data_u + sample->p_offset[3]) : 0);
+ uint32_t *validated_p0 = exec->uniforms_v + sample->p_offset[0];
+ uint32_t offset = p0 & VC4_TEX_P0_OFFSET_MASK;
+ uint32_t miplevels = VC4_GET_FIELD(p0, VC4_TEX_P0_MIPLVLS);
+ uint32_t width = VC4_GET_FIELD(p1, VC4_TEX_P1_WIDTH);
+ uint32_t height = VC4_GET_FIELD(p1, VC4_TEX_P1_HEIGHT);
+ uint32_t cpp, tiling_format, utile_w, utile_h;
+ uint32_t i;
+ uint32_t cube_map_stride = 0;
+ enum vc4_texture_data_type type;
+
+ if (!vc4_use_bo(exec, texture_handle_index, VC4_MODE_RENDER, &tex))
+ return false;
+
+ if (sample->is_direct) {
+ uint32_t remaining_size = tex->base.size - p0;
+ if (p0 > tex->base.size - 4) {
+ DRM_ERROR("UBO offset greater than UBO size\n");
+ return false;
+ }
+ if (p1 > remaining_size - 4) {
+ DRM_ERROR("UBO clamp would allow reads outside of UBO\n");
+ return false;
+ }
+ *validated_p0 = tex->paddr + p0;
+ return true;
+ }
+
+ if (width == 0)
+ width = 2048;
+ if (height == 0)
+ height = 2048;
+
+ if (p0 & VC4_TEX_P0_CMMODE_MASK) {
+ if (VC4_GET_FIELD(p2, VC4_TEX_P2_PTYPE) ==
+ VC4_TEX_P2_PTYPE_CUBE_MAP_STRIDE)
+ cube_map_stride = p2 & VC4_TEX_P2_CMST_MASK;
+ if (VC4_GET_FIELD(p3, VC4_TEX_P2_PTYPE) ==
+ VC4_TEX_P2_PTYPE_CUBE_MAP_STRIDE) {
+ if (cube_map_stride) {
+ DRM_ERROR("Cube map stride set twice\n");
+ return false;
+ }
+
+ cube_map_stride = p3 & VC4_TEX_P2_CMST_MASK;
+ }
+ if (!cube_map_stride) {
+ DRM_ERROR("Cube map stride not set\n");
+ return false;
+ }
+ }
+
+ type = (VC4_GET_FIELD(p0, VC4_TEX_P0_TYPE) |
+ (VC4_GET_FIELD(p1, VC4_TEX_P1_TYPE4) << 4));
+
+ switch (type) {
+ case VC4_TEXTURE_TYPE_RGBA8888:
+ case VC4_TEXTURE_TYPE_RGBX8888:
+ case VC4_TEXTURE_TYPE_RGBA32R:
+ cpp = 4;
+ break;
+ case VC4_TEXTURE_TYPE_RGBA4444:
+ case VC4_TEXTURE_TYPE_RGBA5551:
+ case VC4_TEXTURE_TYPE_RGB565:
+ case VC4_TEXTURE_TYPE_LUMALPHA:
+ case VC4_TEXTURE_TYPE_S16F:
+ case VC4_TEXTURE_TYPE_S16:
+ cpp = 2;
+ break;
+ case VC4_TEXTURE_TYPE_LUMINANCE:
+ case VC4_TEXTURE_TYPE_ALPHA:
+ case VC4_TEXTURE_TYPE_S8:
+ cpp = 1;
+ break;
+ case VC4_TEXTURE_TYPE_ETC1:
+ case VC4_TEXTURE_TYPE_BW1:
+ case VC4_TEXTURE_TYPE_A4:
+ case VC4_TEXTURE_TYPE_A1:
+ case VC4_TEXTURE_TYPE_RGBA64:
+ case VC4_TEXTURE_TYPE_YUV422R:
+ default:
+ DRM_ERROR("Texture format %d unsupported\n", type);
+ return false;
+ }
+ utile_w = utile_width(cpp);
+ utile_h = utile_height(cpp);
+
+ if (type == VC4_TEXTURE_TYPE_RGBA32R) {
+ tiling_format = VC4_TILING_FORMAT_LINEAR;
+ } else {
+ if (size_is_lt(width, height, cpp))
+ tiling_format = VC4_TILING_FORMAT_LT;
+ else
+ tiling_format = VC4_TILING_FORMAT_T;
+ }
+
+ if (!vc4_check_tex_size(exec, tex, offset + cube_map_stride * 5,
+ tiling_format, width, height, cpp)) {
+ return false;
+ }
+
+ /* The mipmap levels are stored before the base of the texture. Make
+ * sure there is actually space in the BO.
+ */
+ for (i = 1; i <= miplevels; i++) {
+ uint32_t level_width = max(width >> i, 1u);
+ uint32_t level_height = max(height >> i, 1u);
+ uint32_t aligned_width, aligned_height;
+ uint32_t level_size;
+
+ /* Once the levels get small enough, they drop from T to LT. */
+ if (tiling_format == VC4_TILING_FORMAT_T &&
+ size_is_lt(level_width, level_height, cpp)) {
+ tiling_format = VC4_TILING_FORMAT_LT;
+ }
+
+ switch (tiling_format) {
+ case VC4_TILING_FORMAT_T:
+ aligned_width = round_up(level_width, utile_w * 8);
+ aligned_height = round_up(level_height, utile_h * 8);
+ break;
+ case VC4_TILING_FORMAT_LT:
+ aligned_width = round_up(level_width, utile_w);
+ aligned_height = round_up(level_height, utile_h);
+ break;
+ default:
+ aligned_width = round_up(level_width, utile_w);
+ aligned_height = level_height;
+ break;
+ }
+
+ level_size = aligned_width * cpp * aligned_height;
+
+ if (offset < level_size) {
+ DRM_ERROR("Level %d (%dx%d -> %dx%d) size %db "
+ "overflowed buffer bounds (offset %d)\n",
+ i, level_width, level_height,
+ aligned_width, aligned_height,
+ level_size, offset);
+ return false;
+ }
+
+ offset -= level_size;
+ }
+
+ *validated_p0 = tex->paddr + p0;
+
+ return true;
+}
+
+static int
+validate_shader_rec(struct drm_device *dev,
+ struct vc4_exec_info *exec,
+ struct vc4_shader_state *state)
+{
+ uint32_t *src_handles;
+ void *pkt_u, *pkt_v;
+ enum shader_rec_reloc_type {
+ RELOC_CODE,
+ RELOC_VBO,
+ };
+ struct shader_rec_reloc {
+ enum shader_rec_reloc_type type;
+ uint32_t offset;
+ };
+ static const struct shader_rec_reloc gl_relocs[] = {
+ { RELOC_CODE, 4 }, /* fs */
+ { RELOC_CODE, 16 }, /* vs */
+ { RELOC_CODE, 28 }, /* cs */
+ };
+ static const struct shader_rec_reloc nv_relocs[] = {
+ { RELOC_CODE, 4 }, /* fs */
+ { RELOC_VBO, 12 }
+ };
+ const struct shader_rec_reloc *relocs;
+ struct drm_gem_cma_object *bo[ARRAY_SIZE(gl_relocs) + 8];
+ uint32_t nr_attributes = 0, nr_fixed_relocs, nr_relocs, packet_size;
+ int i;
+ struct vc4_validated_shader_info *validated_shader;
+
+ if (state->packet == VC4_PACKET_NV_SHADER_STATE) {
+ relocs = nv_relocs;
+ nr_fixed_relocs = ARRAY_SIZE(nv_relocs);
+
+ packet_size = 16;
+ } else {
+ relocs = gl_relocs;
+ nr_fixed_relocs = ARRAY_SIZE(gl_relocs);
+
+ nr_attributes = state->addr & 0x7;
+ if (nr_attributes == 0)
+ nr_attributes = 8;
+ packet_size = gl_shader_rec_size(state->addr);
+ }
+ nr_relocs = nr_fixed_relocs + nr_attributes;
+
+ if (nr_relocs * 4 > exec->shader_rec_size) {
+ DRM_ERROR("overflowed shader recs reading %d handles "
+ "from %d bytes left\n",
+ nr_relocs, exec->shader_rec_size);
+ return -EINVAL;
+ }
+ src_handles = exec->shader_rec_u;
+ exec->shader_rec_u += nr_relocs * 4;
+ exec->shader_rec_size -= nr_relocs * 4;
+
+ if (packet_size > exec->shader_rec_size) {
+ DRM_ERROR("overflowed shader recs copying %db packet "
+ "from %d bytes left\n",
+ packet_size, exec->shader_rec_size);
+ return -EINVAL;
+ }
+ pkt_u = exec->shader_rec_u;
+ pkt_v = exec->shader_rec_v;
+ memcpy(pkt_v, pkt_u, packet_size);
+ exec->shader_rec_u += packet_size;
+ /* Shader recs have to be aligned to 16 bytes (due to the attribute
+ * flags being in the low bytes), so round the next validated shader
+ * rec address up. This should be safe, since we've got so many
+ * relocations in a shader rec packet.
+ */
+ BUG_ON(roundup(packet_size, 16) - packet_size > nr_relocs * 4);
+ exec->shader_rec_v += roundup(packet_size, 16);
+ exec->shader_rec_size -= packet_size;
+
+ for (i = 0; i < nr_relocs; i++) {
+ enum vc4_bo_mode mode;
+
+ if (i < nr_fixed_relocs && relocs[i].type == RELOC_CODE)
+ mode = VC4_MODE_SHADER;
+ else
+ mode = VC4_MODE_RENDER;
+
+ if (!vc4_use_bo(exec, src_handles[i], mode, &bo[i])) {
+ return false;
+ }
+ }
+
+ for (i = 0; i < nr_fixed_relocs; i++) {
+ uint32_t o = relocs[i].offset;
+ uint32_t src_offset = *(uint32_t *)(pkt_u + o);
+ uint32_t *texture_handles_u;
+ void *uniform_data_u;
+ uint32_t tex;
+
+ *(uint32_t *)(pkt_v + o) = bo[i]->paddr + src_offset;
+
+ switch (relocs[i].type) {
+ case RELOC_CODE:
+ if (src_offset != 0) {
+ DRM_ERROR("Shaders must be at offset 0 of "
+ "the BO.\n");
+ goto fail;
+ }
+
+ validated_shader = vc4_validate_shader(bo[i]);
+ if (!validated_shader)
+ goto fail;
+
+ if (validated_shader->uniforms_src_size >
+ exec->uniforms_size) {
+ DRM_ERROR("Uniforms src buffer overflow\n");
+ goto fail;
+ }
+
+ texture_handles_u = exec->uniforms_u;
+ uniform_data_u = (texture_handles_u +
+ validated_shader->num_texture_samples);
+
+ memcpy(exec->uniforms_v, uniform_data_u,
+ validated_shader->uniforms_size);
+
+ for (tex = 0;
+ tex < validated_shader->num_texture_samples;
+ tex++) {
+ if (!reloc_tex(exec,
+ uniform_data_u,
+ &validated_shader->texture_samples[tex],
+ texture_handles_u[tex])) {
+ goto fail;
+ }
+ }
+
+ *(uint32_t *)(pkt_v + o + 4) = exec->uniforms_p;
+
+ exec->uniforms_u += validated_shader->uniforms_src_size;
+ exec->uniforms_v += validated_shader->uniforms_size;
+ exec->uniforms_p += validated_shader->uniforms_size;
+
+ break;
+
+ case RELOC_VBO:
+ break;
+ }
+ }
+
+ for (i = 0; i < nr_attributes; i++) {
+ struct drm_gem_cma_object *vbo = bo[nr_fixed_relocs + i];
+ uint32_t o = 36 + i * 8;
+ uint32_t offset = *(uint32_t *)(pkt_u + o + 0);
+ uint32_t attr_size = *(uint8_t *)(pkt_u + o + 4) + 1;
+ uint32_t stride = *(uint8_t *)(pkt_u + o + 5);
+ uint32_t max_index;
+
+ if (state->addr & 0x8)
+ stride |= (*(uint32_t *)(pkt_u + 100 + i * 4)) & ~0xff;
+
+ if (vbo->base.size < offset ||
+ vbo->base.size - offset < attr_size) {
+ DRM_ERROR("BO offset overflow (%d + %d > %d)\n",
+ offset, attr_size, vbo->base.size);
+ return -EINVAL;
+ }
+
+ if (stride != 0) {
+ max_index = ((vbo->base.size - offset - attr_size) /
+ stride);
+ if (state->max_index > max_index) {
+ DRM_ERROR("primitives use index %d out of supplied %d\n",
+ state->max_index, max_index);
+ return -EINVAL;
+ }
+ }
+
+ *(uint32_t *)(pkt_v + o) = vbo->paddr + offset;
+ }
+
+ return 0;
+
+fail:
+ return -EINVAL;
+}
+
+int
+vc4_validate_shader_recs(struct drm_device *dev,
+ struct vc4_exec_info *exec)
+{
+ uint32_t i;
+ int ret = 0;
+
+ for (i = 0; i < exec->shader_state_count; i++) {
+ ret = validate_shader_rec(dev, exec, &exec->shader_state[i]);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/vc4/vc4_validate_shaders.c b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
new file mode 100644
index 000000000000..01351874ff8c
--- /dev/null
+++ b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
@@ -0,0 +1,537 @@
+/*
+ * Copyright © 2014 Broadcom
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+/**
+ * DOC: Shader validator for VC4.
+ *
+ * The VC4 has no IOMMU between it and system memory. So, a user with access
+ * to execute shaders could escalate privilege by overwriting system memory
+ * (using the VPM write address register in the general-purpose DMA mode) or
+ * reading system memory it shouldn't (reading it as a texture, or uniform
+ * data, or vertex data).
+ *
+ * This walks over a shader starting from some offset within a BO, ensuring
+ * that its accesses are appropriately bounded, and recording how many texture
+ * accesses are made and where so that we can do relocations for them in the
+ * uniform stream.
+ *
+ * The kernel API has shaders stored in user-mapped BOs. The BOs will be
+ * forcibly unmapped from the process before validation, and any cache of
+ * validated state will be flushed if the mapping is faulted back in.
+ *
+ * Storing the shaders in BOs means that the validation process will be slow
+ * due to uncached reads, but since shaders are long-lived and shader BOs are
+ * never actually modified, this shouldn't be a problem.
+ */
+
+#include "vc4_drv.h"
+#include "vc4_qpu_defines.h"
+
+struct vc4_shader_validation_state {
+ struct vc4_texture_sample_info tmu_setup[2];
+ int tmu_write_count[2];
+
+ /* For registers that were last written to by a MIN instruction with
+ * one argument being a uniform, the address of the uniform.
+ * Otherwise, ~0.
+ *
+ * This is used for the validation of direct address memory reads.
+ */
+ uint32_t live_min_clamp_offsets[32 + 32 + 4];
+ bool live_max_clamp_regs[32 + 32 + 4];
+};
+
+static uint32_t
+waddr_to_live_reg_index(uint32_t waddr, bool is_b)
+{
+ if (waddr < 32) {
+ if (is_b)
+ return 32 + waddr;
+ else
+ return waddr;
+ } else if (waddr <= QPU_W_ACC3) {
+
+ return 64 + waddr - QPU_W_ACC0;
+ } else {
+ return ~0;
+ }
+}
+
+static uint32_t
+raddr_add_a_to_live_reg_index(uint64_t inst)
+{
+ uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG);
+ uint32_t add_a = QPU_GET_FIELD(inst, QPU_ADD_A);
+ uint32_t raddr_a = QPU_GET_FIELD(inst, QPU_RADDR_A);
+ uint32_t raddr_b = QPU_GET_FIELD(inst, QPU_RADDR_B);
+
+ if (add_a == QPU_MUX_A) {
+ return raddr_a;
+ } else if (add_a == QPU_MUX_B && sig != QPU_SIG_SMALL_IMM) {
+ return 32 + raddr_b;
+ } else if (add_a <= QPU_MUX_R3) {
+ return 64 + add_a;
+ } else {
+ return ~0;
+ }
+}
+
+static bool
+is_tmu_submit(uint32_t waddr)
+{
+ return (waddr == QPU_W_TMU0_S ||
+ waddr == QPU_W_TMU1_S);
+}
+
+static bool
+is_tmu_write(uint32_t waddr)
+{
+ return (waddr >= QPU_W_TMU0_S &&
+ waddr <= QPU_W_TMU1_B);
+}
+
+static bool
+record_validated_texture_sample(struct vc4_validated_shader_info *validated_shader,
+ struct vc4_shader_validation_state *validation_state,
+ int tmu)
+{
+ uint32_t s = validated_shader->num_texture_samples;
+ int i;
+ struct vc4_texture_sample_info *temp_samples;
+
+ temp_samples = krealloc(validated_shader->texture_samples,
+ (s + 1) * sizeof(*temp_samples),
+ GFP_KERNEL);
+ if (!temp_samples)
+ return false;
+
+ memcpy(&temp_samples[s],
+ &validation_state->tmu_setup[tmu],
+ sizeof(*temp_samples));
+
+ validated_shader->num_texture_samples = s + 1;
+ validated_shader->texture_samples = temp_samples;
+
+ for (i = 0; i < 4; i++)
+ validation_state->tmu_setup[tmu].p_offset[i] = ~0;
+
+ return true;
+}
+
+static bool
+check_tmu_write(uint64_t inst,
+ struct vc4_validated_shader_info *validated_shader,
+ struct vc4_shader_validation_state *validation_state,
+ bool is_mul)
+{
+ uint32_t waddr = (is_mul ?
+ QPU_GET_FIELD(inst, QPU_WADDR_MUL) :
+ QPU_GET_FIELD(inst, QPU_WADDR_ADD));
+ uint32_t raddr_a = QPU_GET_FIELD(inst, QPU_RADDR_A);
+ uint32_t raddr_b = QPU_GET_FIELD(inst, QPU_RADDR_B);
+ int tmu = waddr > QPU_W_TMU0_B;
+ bool submit = is_tmu_submit(waddr);
+ bool is_direct = submit && validation_state->tmu_write_count[tmu] == 0;
+ uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG);
+
+ if (is_direct) {
+ uint32_t add_b = QPU_GET_FIELD(inst, QPU_ADD_B);
+ uint32_t clamp_reg, clamp_offset;
+
+ if (sig == QPU_SIG_SMALL_IMM) {
+ DRM_ERROR("direct TMU read used small immediate\n");
+ return false;
+ }
+
+ /* Make sure that this texture load is an add of the base
+ * address of the UBO to a clamped offset within the UBO.
+ */
+ if (is_mul ||
+ QPU_GET_FIELD(inst, QPU_OP_ADD) != QPU_A_ADD) {
+ DRM_ERROR("direct TMU load wasn't an add\n");
+ return false;
+ }
+
+ /* We assert that the the clamped address is the first
+ * argument, and the UBO base address is the second argument.
+ * This is arbitrary, but simpler than supporting flipping the
+ * two either way.
+ */
+ clamp_reg = raddr_add_a_to_live_reg_index(inst);
+ if (clamp_reg == ~0) {
+ DRM_ERROR("direct TMU load wasn't clamped\n");
+ return false;
+ }
+
+ clamp_offset = validation_state->live_min_clamp_offsets[clamp_reg];
+ if (clamp_offset == ~0) {
+ DRM_ERROR("direct TMU load wasn't clamped\n");
+ return false;
+ }
+
+ /* Store the clamp value's offset in p1 (see reloc_tex() in
+ * vc4_validate.c).
+ */
+ validation_state->tmu_setup[tmu].p_offset[1] =
+ clamp_offset;
+
+ if (!(add_b == QPU_MUX_A && raddr_a == QPU_R_UNIF) &&
+ !(add_b == QPU_MUX_B && raddr_b == QPU_R_UNIF)) {
+ DRM_ERROR("direct TMU load didn't add to a uniform\n");
+ return false;
+ }
+
+ validation_state->tmu_setup[tmu].is_direct = true;
+ } else {
+ if (raddr_a == QPU_R_UNIF || (sig != QPU_SIG_SMALL_IMM &&
+ raddr_b == QPU_R_UNIF)) {
+ DRM_ERROR("uniform read in the same instruction as "
+ "texture setup.\n");
+ return false;
+ }
+ }
+
+ if (validation_state->tmu_write_count[tmu] >= 4) {
+ DRM_ERROR("TMU%d got too many parameters before dispatch\n",
+ tmu);
+ return false;
+ }
+ validation_state->tmu_setup[tmu].p_offset[validation_state->tmu_write_count[tmu]] =
+ validated_shader->uniforms_size;
+ validation_state->tmu_write_count[tmu]++;
+ /* Since direct uses a RADDR uniform reference, it will get counted in
+ * check_instruction_reads()
+ */
+ if (!is_direct)
+ validated_shader->uniforms_size += 4;
+
+ if (submit) {
+ if (!record_validated_texture_sample(validated_shader,
+ validation_state, tmu)) {
+ return false;
+ }
+
+ validation_state->tmu_write_count[tmu] = 0;
+ }
+
+ return true;
+}
+
+static bool
+check_register_write(uint64_t inst,
+ struct vc4_validated_shader_info *validated_shader,
+ struct vc4_shader_validation_state *validation_state,
+ bool is_mul)
+{
+ uint32_t waddr = (is_mul ?
+ QPU_GET_FIELD(inst, QPU_WADDR_MUL) :
+ QPU_GET_FIELD(inst, QPU_WADDR_ADD));
+
+ switch (waddr) {
+ case QPU_W_UNIFORMS_ADDRESS:
+ /* XXX: We'll probably need to support this for reladdr, but
+ * it's definitely a security-related one.
+ */
+ DRM_ERROR("uniforms address load unsupported\n");
+ return false;
+
+ case QPU_W_TLB_COLOR_MS:
+ case QPU_W_TLB_COLOR_ALL:
+ case QPU_W_TLB_Z:
+ /* These only interact with the tile buffer, not main memory,
+ * so they're safe.
+ */
+ return true;
+
+ case QPU_W_TMU0_S:
+ case QPU_W_TMU0_T:
+ case QPU_W_TMU0_R:
+ case QPU_W_TMU0_B:
+ case QPU_W_TMU1_S:
+ case QPU_W_TMU1_T:
+ case QPU_W_TMU1_R:
+ case QPU_W_TMU1_B:
+ return check_tmu_write(inst, validated_shader, validation_state,
+ is_mul);
+
+ case QPU_W_HOST_INT:
+ case QPU_W_TMU_NOSWAP:
+ case QPU_W_TLB_ALPHA_MASK:
+ case QPU_W_MUTEX_RELEASE:
+ /* XXX: I haven't thought about these, so don't support them
+ * for now.
+ */
+ DRM_ERROR("Unsupported waddr %d\n", waddr);
+ return false;
+
+ case QPU_W_VPM_ADDR:
+ DRM_ERROR("General VPM DMA unsupported\n");
+ return false;
+
+ case QPU_W_VPM:
+ case QPU_W_VPMVCD_SETUP:
+ /* We allow VPM setup in general, even including VPM DMA
+ * configuration setup, because the (unsafe) DMA can only be
+ * triggered by QPU_W_VPM_ADDR writes.
+ */
+ return true;
+
+ case QPU_W_TLB_STENCIL_SETUP:
+ return true;
+ }
+
+ return true;
+}
+
+static void
+track_live_clamps(uint64_t inst,
+ struct vc4_validated_shader_info *validated_shader,
+ struct vc4_shader_validation_state *validation_state)
+{
+ uint32_t op_add = QPU_GET_FIELD(inst, QPU_OP_ADD);
+ uint32_t waddr_add = QPU_GET_FIELD(inst, QPU_WADDR_ADD);
+ uint32_t waddr_mul = QPU_GET_FIELD(inst, QPU_WADDR_MUL);
+ uint32_t cond_add = QPU_GET_FIELD(inst, QPU_COND_ADD);
+ uint32_t add_a = QPU_GET_FIELD(inst, QPU_ADD_A);
+ uint32_t add_b = QPU_GET_FIELD(inst, QPU_ADD_B);
+ uint32_t raddr_a = QPU_GET_FIELD(inst, QPU_RADDR_A);
+ uint32_t raddr_b = QPU_GET_FIELD(inst, QPU_RADDR_B);
+ uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG);
+ bool ws = inst & QPU_WS;
+ uint32_t lri_add_a, lri_add, lri_mul;
+ bool add_a_is_min_0;
+
+ /* Check whether OP_ADD's A argumennt comes from a live MAX(x, 0),
+ * before we clear previous live state.
+ */
+ lri_add_a = raddr_add_a_to_live_reg_index(inst);
+ add_a_is_min_0 = (lri_add_a != ~0 &&
+ validation_state->live_max_clamp_regs[lri_add_a]);
+
+ /* Clear live state for registers written by our instruction. */
+ lri_add = waddr_to_live_reg_index(waddr_add, ws);
+ lri_mul = waddr_to_live_reg_index(waddr_mul, !ws);
+ if (lri_mul != ~0) {
+ validation_state->live_max_clamp_regs[lri_mul] = false;
+ validation_state->live_min_clamp_offsets[lri_mul] = ~0;
+ }
+ if (lri_add != ~0) {
+ validation_state->live_max_clamp_regs[lri_add] = false;
+ validation_state->live_min_clamp_offsets[lri_add] = ~0;
+ } else {
+ /* Nothing further to do for live tracking, since only ADDs
+ * generate new live clamp registers.
+ */
+ return;
+ }
+
+ /* Now, handle remaining live clamp tracking for the ADD operation. */
+
+ if (cond_add != QPU_COND_ALWAYS)
+ return;
+
+ if (op_add == QPU_A_MAX) {
+ /* Track live clamps of a value to a minimum of 0 (in either
+ * arg).
+ */
+ if (sig != QPU_SIG_SMALL_IMM || raddr_b != 0 ||
+ (add_a != QPU_MUX_B && add_b != QPU_MUX_B)) {
+ return;
+ }
+
+ validation_state->live_max_clamp_regs[lri_add] = true;
+ } if (op_add == QPU_A_MIN) {
+ /* Track live clamps of a value clamped to a minimum of 0 and
+ * a maximum of some uniform's offset.
+ */
+ if (!add_a_is_min_0)
+ return;
+
+ if (!(add_b == QPU_MUX_A && raddr_a == QPU_R_UNIF) &&
+ !(add_b == QPU_MUX_B && raddr_b == QPU_R_UNIF &&
+ sig != QPU_SIG_SMALL_IMM)) {
+ return;
+ }
+
+ validation_state->live_min_clamp_offsets[lri_add] =
+ validated_shader->uniforms_size;
+ }
+}
+
+static bool
+check_instruction_writes(uint64_t inst,
+ struct vc4_validated_shader_info *validated_shader,
+ struct vc4_shader_validation_state *validation_state)
+{
+ uint32_t waddr_add = QPU_GET_FIELD(inst, QPU_WADDR_ADD);
+ uint32_t waddr_mul = QPU_GET_FIELD(inst, QPU_WADDR_MUL);
+ bool ok;
+
+ if (is_tmu_write(waddr_add) && is_tmu_write(waddr_mul)) {
+ DRM_ERROR("ADD and MUL both set up textures\n");
+ return false;
+ }
+
+ ok = (check_register_write(inst, validated_shader, validation_state, false) &&
+ check_register_write(inst, validated_shader, validation_state, true));
+
+ track_live_clamps(inst, validated_shader, validation_state);
+
+ return ok;
+}
+
+static bool
+check_instruction_reads(uint64_t inst,
+ struct vc4_validated_shader_info *validated_shader)
+{
+ uint32_t raddr_a = QPU_GET_FIELD(inst, QPU_RADDR_A);
+ uint32_t raddr_b = QPU_GET_FIELD(inst, QPU_RADDR_B);
+ uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG);
+
+ if (raddr_a == QPU_R_UNIF ||
+ (raddr_b == QPU_R_UNIF && sig != QPU_SIG_SMALL_IMM)) {
+ /* This can't overflow the uint32_t, because we're reading 8
+ * bytes of instruction to increment by 4 here, so we'd
+ * already be OOM.
+ */
+ validated_shader->uniforms_size += 4;
+ }
+
+ return true;
+}
+
+struct vc4_validated_shader_info *
+vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(shader_obj->base.dev);
+ struct vc4_bo *shader_bo = to_vc4_bo(&shader_obj->base);
+ bool found_shader_end = false;
+ int shader_end_ip = 0;
+ uint32_t ip, max_ip;
+ uint64_t *shader;
+ struct vc4_validated_shader_info *validated_shader;
+ struct vc4_shader_validation_state validation_state;
+ int i;
+
+ if (shader_bo->validated_shader)
+ return shader_bo->validated_shader;
+
+ /* Our validation relies on nothing modifying the shader
+ * contents after us, so just ban sending us busy BOs.
+ */
+ if (shader_bo->seqno > vc4->finished_seqno) {
+ DRM_ERROR("shader BO is currently busy on the GPU.\n");
+ return NULL;
+ }
+
+ if (shader_bo->dma_buf_import_export) {
+ DRM_ERROR("shader BO was exported through dmabuf.\n");
+ return NULL;
+ }
+
+ memset(&validation_state, 0, sizeof(validation_state));
+
+ for (i = 0; i < 8; i++)
+ validation_state.tmu_setup[i / 4].p_offset[i % 4] = ~0;
+ for (i = 0; i < ARRAY_SIZE(validation_state.live_min_clamp_offsets); i++)
+ validation_state.live_min_clamp_offsets[i] = ~0;
+
+ shader = shader_obj->vaddr;
+ max_ip = shader_obj->base.size / sizeof(uint64_t);
+
+ validated_shader = kcalloc(sizeof(*validated_shader), 1, GFP_KERNEL);
+ if (!validated_shader)
+ return NULL;
+
+ for (ip = 0; ip < max_ip; ip++) {
+ uint64_t inst = shader[ip];
+ uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG);
+
+ switch (sig) {
+ case QPU_SIG_NONE:
+ case QPU_SIG_WAIT_FOR_SCOREBOARD:
+ case QPU_SIG_SCOREBOARD_UNLOCK:
+ case QPU_SIG_COLOR_LOAD:
+ case QPU_SIG_LOAD_TMU0:
+ case QPU_SIG_LOAD_TMU1:
+ case QPU_SIG_PROG_END:
+ case QPU_SIG_SMALL_IMM:
+ if (!check_instruction_writes(inst, validated_shader,
+ &validation_state)) {
+ DRM_ERROR("Bad write at ip %d\n", ip);
+ goto fail;
+ }
+
+ if (!check_instruction_reads(inst, validated_shader))
+ goto fail;
+
+ if (sig == QPU_SIG_PROG_END) {
+ found_shader_end = true;
+ shader_end_ip = ip;
+ }
+
+ break;
+
+ case QPU_SIG_LOAD_IMM:
+ if (!check_instruction_writes(inst, validated_shader,
+ &validation_state)) {
+ DRM_ERROR("Bad LOAD_IMM write at ip %d\n", ip);
+ goto fail;
+ }
+ break;
+
+ default:
+ DRM_ERROR("Unsupported QPU signal %d at "
+ "instruction %d\n", sig, ip);
+ goto fail;
+ }
+
+ /* There are two delay slots after program end is signaled
+ * that are still executed, then we're finished.
+ */
+ if (found_shader_end && ip == shader_end_ip + 2)
+ break;
+ }
+
+ if (ip == max_ip) {
+ DRM_ERROR("shader failed to terminate before "
+ "shader BO end at %d\n",
+ shader_obj->base.size);
+ goto fail;
+ }
+
+ /* Again, no chance of integer overflow here because the worst case
+ * scenario is 8 bytes of uniforms plus handles per 8-byte
+ * instruction.
+ */
+ validated_shader->uniforms_src_size =
+ (validated_shader->uniforms_size +
+ 4 * validated_shader->num_texture_samples);
+
+ shader_bo->validated_shader = validated_shader;
+ return validated_shader;
+
+fail:
+ kfree(validated_shader);
+ return NULL;
+}
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
index 7f19cb434f2b..cc3e89ac6982 100644
--- a/drivers/mailbox/Kconfig
+++ b/drivers/mailbox/Kconfig
@@ -62,8 +62,25 @@ config PCC
config ALTERA_MBOX
tristate "Altera Mailbox"
+ depends on HAS_IOMEM
help
An implementation of the Altera Mailbox soft core. It is used
to send message between processors. Say Y here if you want to use the
Altera mailbox support.
+
+config BCM2835_MBOX
+ tristate "BCM2835 Mailbox"
+ depends on ARCH_BCM2835
+ help
+ An implementation of the BCM2385 Mailbox. It is used to invoke
+ the services of the Videocore. Say Y here if you want to use the
+ BCM2835 Mailbox.
+
+config TEGRA_XUSB_MBOX
+ tristate "NVIDIA Tegra XUSB Mailbox"
+ depends on MFD_TEGRA_XUSB
+ help
+ Mailbox driver for the XUSB complex found on NVIDIA Tegra124 and
+ later SoCs. The XUSB mailbox is used to communicate between the
+ XUSB microcontroller and the host processor.
endif
diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile
index cecfad323669..95017fc3d3e8 100644
--- a/drivers/mailbox/Makefile
+++ b/drivers/mailbox/Makefile
@@ -13,3 +13,7 @@ obj-$(CONFIG_OMAP2PLUS_MBOX) += omap-mailbox.o
obj-$(CONFIG_PCC) += pcc.o
obj-$(CONFIG_ALTERA_MBOX) += mailbox-altera.o
+
+obj-$(CONFIG_BCM2835_MBOX) += bcm2835-mailbox.o
+
+obj-$(CONFIG_TEGRA_XUSB_MBOX) += tegra-xusb-mailbox.o
diff --git a/drivers/mailbox/arm_mhu.c b/drivers/mailbox/arm_mhu.c
index ac693c635357..d9e99f981aa9 100644
--- a/drivers/mailbox/arm_mhu.c
+++ b/drivers/mailbox/arm_mhu.c
@@ -110,7 +110,7 @@ static void mhu_shutdown(struct mbox_chan *chan)
free_irq(mlink->irq, chan);
}
-static struct mbox_chan_ops mhu_ops = {
+static const struct mbox_chan_ops mhu_ops = {
.send_data = mhu_send_data,
.startup = mhu_startup,
.shutdown = mhu_shutdown,
diff --git a/drivers/mailbox/bcm2835-mailbox.c b/drivers/mailbox/bcm2835-mailbox.c
new file mode 100644
index 000000000000..0b47dd42f3bd
--- /dev/null
+++ b/drivers/mailbox/bcm2835-mailbox.c
@@ -0,0 +1,217 @@
+/*
+ * Copyright (C) 2010,2015 Broadcom
+ * Copyright (C) 2013-2014 Lubomir Rintel
+ * Copyright (C) 2013 Craig McGeachie
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This device provides a mechanism for writing to the mailboxes,
+ * that are shared between the ARM and the VideoCore processor
+ *
+ * Parts of the driver are based on:
+ * - arch/arm/mach-bcm2708/vcio.c file written by Gray Girling that was
+ * obtained from branch "rpi-3.6.y" of git://github.com/raspberrypi/
+ * linux.git
+ * - drivers/mailbox/bcm2835-ipc.c by Lubomir Rintel at
+ * https://github.com/hackerspace/rpi-linux/blob/lr-raspberry-pi/drivers/
+ * mailbox/bcm2835-ipc.c
+ * - documentation available on the following web site:
+ * https://github.com/raspberrypi/firmware/wiki/Mailbox-property-interface
+ */
+
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+
+/* Mailboxes */
+#define ARM_0_MAIL0 0x00
+#define ARM_0_MAIL1 0x20
+
+/*
+ * Mailbox registers. We basically only support mailbox 0 & 1. We
+ * deliver to the VC in mailbox 1, it delivers to us in mailbox 0. See
+ * BCM2835-ARM-Peripherals.pdf section 1.3 for an explanation about
+ * the placement of memory barriers.
+ */
+#define MAIL0_RD (ARM_0_MAIL0 + 0x00)
+#define MAIL0_POL (ARM_0_MAIL0 + 0x10)
+#define MAIL0_STA (ARM_0_MAIL0 + 0x18)
+#define MAIL0_CNF (ARM_0_MAIL0 + 0x1C)
+#define MAIL1_WRT (ARM_0_MAIL1 + 0x00)
+#define MAIL1_STA (ARM_0_MAIL1 + 0x18)
+
+/* Status register: FIFO state. */
+#define ARM_MS_FULL BIT(31)
+#define ARM_MS_EMPTY BIT(30)
+
+/* Configuration register: Enable interrupts. */
+#define ARM_MC_IHAVEDATAIRQEN BIT(0)
+
+struct bcm2835_mbox {
+ void __iomem *regs;
+ spinlock_t lock;
+ struct mbox_controller controller;
+};
+
+static struct bcm2835_mbox *bcm2835_link_mbox(struct mbox_chan *link)
+{
+ return container_of(link->mbox, struct bcm2835_mbox, controller);
+}
+
+static irqreturn_t bcm2835_mbox_irq(int irq, void *dev_id)
+{
+ struct bcm2835_mbox *mbox = dev_id;
+ struct device *dev = mbox->controller.dev;
+ struct mbox_chan *link = &mbox->controller.chans[0];
+
+ while (!(readl(mbox->regs + MAIL0_STA) & ARM_MS_EMPTY)) {
+ u32 msg = readl(mbox->regs + MAIL0_RD);
+ dev_dbg(dev, "Reply 0x%08X\n", msg);
+ mbox_chan_received_data(link, &msg);
+ }
+ return IRQ_HANDLED;
+}
+
+static int bcm2835_send_data(struct mbox_chan *link, void *data)
+{
+ struct bcm2835_mbox *mbox = bcm2835_link_mbox(link);
+ u32 msg = *(u32 *)data;
+
+ spin_lock(&mbox->lock);
+ writel(msg, mbox->regs + MAIL1_WRT);
+ dev_dbg(mbox->controller.dev, "Request 0x%08X\n", msg);
+ spin_unlock(&mbox->lock);
+ return 0;
+}
+
+static int bcm2835_startup(struct mbox_chan *link)
+{
+ struct bcm2835_mbox *mbox = bcm2835_link_mbox(link);
+
+ /* Enable the interrupt on data reception */
+ writel(ARM_MC_IHAVEDATAIRQEN, mbox->regs + MAIL0_CNF);
+
+ return 0;
+}
+
+static void bcm2835_shutdown(struct mbox_chan *link)
+{
+ struct bcm2835_mbox *mbox = bcm2835_link_mbox(link);
+
+ writel(0, mbox->regs + MAIL0_CNF);
+}
+
+static bool bcm2835_last_tx_done(struct mbox_chan *link)
+{
+ struct bcm2835_mbox *mbox = bcm2835_link_mbox(link);
+ bool ret;
+
+ spin_lock(&mbox->lock);
+ ret = !(readl(mbox->regs + MAIL1_STA) & ARM_MS_FULL);
+ spin_unlock(&mbox->lock);
+ return ret;
+}
+
+static const struct mbox_chan_ops bcm2835_mbox_chan_ops = {
+ .send_data = bcm2835_send_data,
+ .startup = bcm2835_startup,
+ .shutdown = bcm2835_shutdown,
+ .last_tx_done = bcm2835_last_tx_done
+};
+
+static struct mbox_chan *bcm2835_mbox_index_xlate(struct mbox_controller *mbox,
+ const struct of_phandle_args *sp)
+{
+ if (sp->args_count != 0)
+ return NULL;
+
+ return &mbox->chans[0];
+}
+
+static int bcm2835_mbox_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ int ret = 0;
+ struct resource *iomem;
+ struct bcm2835_mbox *mbox;
+
+ mbox = devm_kzalloc(dev, sizeof(*mbox), GFP_KERNEL);
+ if (mbox == NULL)
+ return -ENOMEM;
+ spin_lock_init(&mbox->lock);
+
+ ret = devm_request_irq(dev, irq_of_parse_and_map(dev->of_node, 0),
+ bcm2835_mbox_irq, 0, dev_name(dev), mbox);
+ if (ret) {
+ dev_err(dev, "Failed to register a mailbox IRQ handler: %d\n",
+ ret);
+ return -ENODEV;
+ }
+
+ iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mbox->regs = devm_ioremap_resource(&pdev->dev, iomem);
+ if (IS_ERR(mbox->regs)) {
+ ret = PTR_ERR(mbox->regs);
+ dev_err(&pdev->dev, "Failed to remap mailbox regs: %d\n", ret);
+ return ret;
+ }
+
+ mbox->controller.txdone_poll = true;
+ mbox->controller.txpoll_period = 5;
+ mbox->controller.ops = &bcm2835_mbox_chan_ops;
+ mbox->controller.of_xlate = &bcm2835_mbox_index_xlate;
+ mbox->controller.dev = dev;
+ mbox->controller.num_chans = 1;
+ mbox->controller.chans = devm_kzalloc(dev,
+ sizeof(*mbox->controller.chans), GFP_KERNEL);
+ if (!mbox->controller.chans)
+ return -ENOMEM;
+
+ ret = mbox_controller_register(&mbox->controller);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, mbox);
+ dev_info(dev, "mailbox enabled\n");
+
+ return ret;
+}
+
+static int bcm2835_mbox_remove(struct platform_device *pdev)
+{
+ struct bcm2835_mbox *mbox = platform_get_drvdata(pdev);
+ mbox_controller_unregister(&mbox->controller);
+ return 0;
+}
+
+static const struct of_device_id bcm2835_mbox_of_match[] = {
+ { .compatible = "brcm,bcm2835-mbox", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, bcm2835_mbox_of_match);
+
+static struct platform_driver bcm2835_mbox_driver = {
+ .driver = {
+ .name = "bcm2835-mbox",
+ .owner = THIS_MODULE,
+ .of_match_table = bcm2835_mbox_of_match,
+ },
+ .probe = bcm2835_mbox_probe,
+ .remove = bcm2835_mbox_remove,
+};
+module_platform_driver(bcm2835_mbox_driver);
+
+MODULE_AUTHOR("Lubomir Rintel <lkundrak@v3.sk>");
+MODULE_DESCRIPTION("BCM2835 mailbox IPC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mailbox/mailbox-altera.c b/drivers/mailbox/mailbox-altera.c
index a266265677d3..bb682c926b0a 100644
--- a/drivers/mailbox/mailbox-altera.c
+++ b/drivers/mailbox/mailbox-altera.c
@@ -285,7 +285,7 @@ static void altera_mbox_shutdown(struct mbox_chan *chan)
}
}
-static struct mbox_chan_ops altera_mbox_ops = {
+static const struct mbox_chan_ops altera_mbox_ops = {
.send_data = altera_mbox_send_data,
.startup = altera_mbox_startup,
.shutdown = altera_mbox_shutdown,
diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
index 19b491d2964f..c3c42d42d017 100644
--- a/drivers/mailbox/mailbox.c
+++ b/drivers/mailbox/mailbox.c
@@ -318,7 +318,7 @@ struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index)
return ERR_PTR(-ENODEV);
}
- chan = NULL;
+ chan = ERR_PTR(-EPROBE_DEFER);
list_for_each_entry(mbox, &mbox_cons, node)
if (mbox->dev->of_node == spec.np) {
chan = mbox->of_xlate(mbox, &spec);
@@ -327,7 +327,12 @@ struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index)
of_node_put(spec.np);
- if (!chan || chan->cl || !try_module_get(mbox->dev->driver->owner)) {
+ if (IS_ERR(chan)) {
+ mutex_unlock(&con_mutex);
+ return chan;
+ }
+
+ if (chan->cl || !try_module_get(mbox->dev->driver->owner)) {
dev_dbg(dev, "%s: mailbox not free\n", __func__);
mutex_unlock(&con_mutex);
return ERR_PTR(-EBUSY);
@@ -390,7 +395,7 @@ of_mbox_index_xlate(struct mbox_controller *mbox,
int ind = sp->args[0];
if (ind >= mbox->num_chans)
- return NULL;
+ return ERR_PTR(-EINVAL);
return &mbox->chans[ind];
}
diff --git a/drivers/mailbox/omap-mailbox.c b/drivers/mailbox/omap-mailbox.c
index 0f332c178b07..a3dbfd9c6479 100644
--- a/drivers/mailbox/omap-mailbox.c
+++ b/drivers/mailbox/omap-mailbox.c
@@ -604,7 +604,7 @@ static int omap_mbox_chan_send_data(struct mbox_chan *chan, void *data)
return ret;
}
-static struct mbox_chan_ops omap_mbox_chan_ops = {
+static const struct mbox_chan_ops omap_mbox_chan_ops = {
.startup = omap_mbox_chan_startup,
.send_data = omap_mbox_chan_send_data,
.shutdown = omap_mbox_chan_shutdown,
@@ -639,18 +639,18 @@ static struct mbox_chan *omap_mbox_of_xlate(struct mbox_controller *controller,
mdev = container_of(controller, struct omap_mbox_device, controller);
if (WARN_ON(!mdev))
- return NULL;
+ return ERR_PTR(-EINVAL);
node = of_find_node_by_phandle(phandle);
if (!node) {
pr_err("%s: could not find node phandle 0x%x\n",
__func__, phandle);
- return NULL;
+ return ERR_PTR(-ENODEV);
}
mbox = omap_mbox_device_find(mdev, node->name);
of_node_put(node);
- return mbox ? mbox->chan : NULL;
+ return mbox ? mbox->chan : ERR_PTR(-ENOENT);
}
static int omap_mbox_probe(struct platform_device *pdev)
diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c
index 7e91d68a3ac3..26d121d1d501 100644
--- a/drivers/mailbox/pcc.c
+++ b/drivers/mailbox/pcc.c
@@ -198,7 +198,7 @@ static int pcc_send_data(struct mbox_chan *chan, void *data)
return 0;
}
-static struct mbox_chan_ops pcc_chan_ops = {
+static const struct mbox_chan_ops pcc_chan_ops = {
.send_data = pcc_send_data,
};
diff --git a/drivers/mailbox/tegra-xusb-mailbox.c b/drivers/mailbox/tegra-xusb-mailbox.c
new file mode 100644
index 000000000000..4e2477dd7881
--- /dev/null
+++ b/drivers/mailbox/tegra-xusb-mailbox.c
@@ -0,0 +1,290 @@
+/*
+ * NVIDIA Tegra XUSB mailbox driver
+ *
+ * Copyright (C) 2014 NVIDIA Corporation
+ * Copyright (C) 2014 Google, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#include <soc/tegra/xusb.h>
+
+#define XUSB_MBOX_NUM_CHANS 2 /* Host + PHY */
+
+#define XUSB_CFG_ARU_MBOX_CMD 0xe4
+#define MBOX_DEST_FALC BIT(27)
+#define MBOX_DEST_PME BIT(28)
+#define MBOX_DEST_SMI BIT(29)
+#define MBOX_DEST_XHCI BIT(30)
+#define MBOX_INT_EN BIT(31)
+#define XUSB_CFG_ARU_MBOX_DATA_IN 0xe8
+#define CMD_DATA_SHIFT 0
+#define CMD_DATA_MASK 0xffffff
+#define CMD_TYPE_SHIFT 24
+#define CMD_TYPE_MASK 0xff
+#define XUSB_CFG_ARU_MBOX_DATA_OUT 0xec
+#define XUSB_CFG_ARU_MBOX_OWNER 0xf0
+#define MBOX_OWNER_NONE 0
+#define MBOX_OWNER_FW 1
+#define MBOX_OWNER_SW 2
+#define XUSB_CFG_ARU_SMI_INTR 0x428
+#define MBOX_SMI_INTR_FW_HANG BIT(1)
+#define MBOX_SMI_INTR_EN BIT(3)
+
+struct tegra_xusb_mbox {
+ struct mbox_controller mbox;
+ struct regmap *fpci_regs;
+ spinlock_t lock;
+ int irq;
+};
+
+static inline u32 mbox_readl(struct tegra_xusb_mbox *mbox, unsigned long offset)
+{
+ u32 val;
+
+ regmap_read(mbox->fpci_regs, offset, &val);
+
+ return val;
+}
+
+static inline void mbox_writel(struct tegra_xusb_mbox *mbox, u32 val,
+ unsigned long offset)
+{
+ regmap_write(mbox->fpci_regs, offset, val);
+}
+
+static inline struct tegra_xusb_mbox *to_tegra_mbox(struct mbox_controller *c)
+{
+ return container_of(c, struct tegra_xusb_mbox, mbox);
+}
+
+static inline u32 mbox_pack_msg(struct tegra_xusb_mbox_msg *msg)
+{
+ u32 val;
+
+ val = (msg->cmd & CMD_TYPE_MASK) << CMD_TYPE_SHIFT;
+ val |= (msg->data & CMD_DATA_MASK) << CMD_DATA_SHIFT;
+
+ return val;
+}
+
+static inline void mbox_unpack_msg(u32 val, struct tegra_xusb_mbox_msg *msg)
+{
+ msg->cmd = (val >> CMD_TYPE_SHIFT) & CMD_TYPE_MASK;
+ msg->data = (val >> CMD_DATA_SHIFT) & CMD_DATA_MASK;
+}
+
+static bool mbox_cmd_requires_ack(enum tegra_xusb_mbox_cmd cmd)
+{
+ switch (cmd) {
+ case MBOX_CMD_SET_BW:
+ case MBOX_CMD_ACK:
+ case MBOX_CMD_NAK:
+ return false;
+ default:
+ return true;
+ }
+}
+
+static int tegra_xusb_mbox_send_data(struct mbox_chan *chan, void *data)
+{
+ struct tegra_xusb_mbox *mbox = to_tegra_mbox(chan->mbox);
+ struct tegra_xusb_mbox_msg *msg = data;
+ unsigned long flags;
+ u32 reg;
+
+ dev_dbg(mbox->mbox.dev, "TX message %#x:%#x\n", msg->cmd, msg->data);
+
+ spin_lock_irqsave(&mbox->lock, flags);
+ /*
+ * Acquire the mailbox. The firmware still owns the mailbox for
+ * ACK/NAK messages.
+ */
+ if (!(msg->cmd == MBOX_CMD_ACK || msg->cmd == MBOX_CMD_NAK)) {
+ if (mbox_readl(mbox, XUSB_CFG_ARU_MBOX_OWNER) !=
+ MBOX_OWNER_NONE) {
+ dev_err(mbox->mbox.dev, "Mailbox not idle\n");
+ goto busy;
+ }
+
+ mbox_writel(mbox, MBOX_OWNER_SW, XUSB_CFG_ARU_MBOX_OWNER);
+ if (mbox_readl(mbox, XUSB_CFG_ARU_MBOX_OWNER) !=
+ MBOX_OWNER_SW) {
+ dev_err(mbox->mbox.dev, "Failed to acquire mailbox");
+ goto busy;
+ }
+ }
+
+ mbox_writel(mbox, mbox_pack_msg(msg), XUSB_CFG_ARU_MBOX_DATA_IN);
+ reg = mbox_readl(mbox, XUSB_CFG_ARU_MBOX_CMD);
+ reg |= MBOX_INT_EN | MBOX_DEST_FALC;
+ mbox_writel(mbox, reg, XUSB_CFG_ARU_MBOX_CMD);
+
+ spin_unlock_irqrestore(&mbox->lock, flags);
+
+ return 0;
+busy:
+ spin_unlock_irqrestore(&mbox->lock, flags);
+ return -EBUSY;
+}
+
+static int tegra_xusb_mbox_startup(struct mbox_chan *chan)
+{
+ return 0;
+}
+
+static void tegra_xusb_mbox_shutdown(struct mbox_chan *chan)
+{
+}
+
+static bool tegra_xusb_mbox_last_tx_done(struct mbox_chan *chan)
+{
+ struct tegra_xusb_mbox *mbox = to_tegra_mbox(chan->mbox);
+
+ return mbox_readl(mbox, XUSB_CFG_ARU_MBOX_OWNER) == MBOX_OWNER_NONE;
+}
+
+static const struct mbox_chan_ops tegra_xusb_mbox_chan_ops = {
+ .send_data = tegra_xusb_mbox_send_data,
+ .startup = tegra_xusb_mbox_startup,
+ .shutdown = tegra_xusb_mbox_shutdown,
+ .last_tx_done = tegra_xusb_mbox_last_tx_done,
+};
+
+static irqreturn_t tegra_xusb_mbox_irq(int irq, void *p)
+{
+ struct tegra_xusb_mbox *mbox = p;
+ struct tegra_xusb_mbox_msg msg;
+ unsigned int i;
+ u32 reg;
+
+ spin_lock(&mbox->lock);
+
+ /* Clear mbox interrupts */
+ reg = mbox_readl(mbox, XUSB_CFG_ARU_SMI_INTR);
+ if (reg & MBOX_SMI_INTR_FW_HANG)
+ dev_err(mbox->mbox.dev, "Controller firmware hang\n");
+ mbox_writel(mbox, reg, XUSB_CFG_ARU_SMI_INTR);
+
+ reg = mbox_readl(mbox, XUSB_CFG_ARU_MBOX_DATA_OUT);
+ mbox_unpack_msg(reg, &msg);
+
+ reg = mbox_readl(mbox, XUSB_CFG_ARU_MBOX_CMD);
+ reg &= ~MBOX_DEST_SMI;
+ mbox_writel(mbox, reg, XUSB_CFG_ARU_MBOX_CMD);
+
+ /* Clear mailbox owner if no ACK/NAK is required. */
+ if (!mbox_cmd_requires_ack(msg.cmd))
+ mbox_writel(mbox, MBOX_OWNER_NONE, XUSB_CFG_ARU_MBOX_OWNER);
+
+ dev_dbg(mbox->mbox.dev, "RX message %#x:%#x\n", msg.cmd, msg.data);
+ for (i = 0; i < XUSB_MBOX_NUM_CHANS; i++) {
+ if (mbox->mbox.chans[i].cl)
+ mbox_chan_received_data(&mbox->mbox.chans[i], &msg);
+ }
+
+ spin_unlock(&mbox->lock);
+
+ return IRQ_HANDLED;
+}
+
+static struct mbox_chan *tegra_xusb_mbox_of_xlate(struct mbox_controller *ctlr,
+ const struct of_phandle_args *sp)
+{
+ struct tegra_xusb_mbox *mbox = to_tegra_mbox(ctlr);
+ struct mbox_chan *chan = ERR_PTR(-EINVAL);
+ unsigned long flags;
+ unsigned int i;
+
+ /* Pick the first available (virtual) channel. */
+ spin_lock_irqsave(&mbox->lock, flags);
+ for (i = 0; XUSB_MBOX_NUM_CHANS; i++) {
+ if (!ctlr->chans[i].cl) {
+ chan = &ctlr->chans[i];
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&mbox->lock, flags);
+
+ return chan;
+}
+
+static const struct of_device_id tegra_xusb_mbox_of_match[] = {
+ { .compatible = "nvidia,tegra124-xusb-mbox" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, tegra_xusb_mbox_of_match);
+
+static int tegra_xusb_mbox_probe(struct platform_device *pdev)
+{
+ struct tegra_xusb_mbox *mbox;
+ int ret;
+
+ mbox = devm_kzalloc(&pdev->dev, sizeof(*mbox), GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, mbox);
+ spin_lock_init(&mbox->lock);
+ mbox->fpci_regs = dev_get_drvdata(pdev->dev.parent);
+
+ mbox->mbox.dev = &pdev->dev;
+ mbox->mbox.chans = devm_kcalloc(&pdev->dev, XUSB_MBOX_NUM_CHANS,
+ sizeof(*mbox->mbox.chans), GFP_KERNEL);
+ if (!mbox->mbox.chans)
+ return -ENOMEM;
+ mbox->mbox.num_chans = XUSB_MBOX_NUM_CHANS;
+ mbox->mbox.ops = &tegra_xusb_mbox_chan_ops;
+ mbox->mbox.txdone_poll = true;
+ mbox->mbox.txpoll_period = 1;
+ mbox->mbox.of_xlate = tegra_xusb_mbox_of_xlate;
+
+ mbox->irq = platform_get_irq(pdev, 0);
+ if (mbox->irq < 0)
+ return mbox->irq;
+ ret = devm_request_irq(&pdev->dev, mbox->irq, tegra_xusb_mbox_irq, 0,
+ dev_name(&pdev->dev), mbox);
+ if (ret < 0)
+ return ret;
+
+ ret = mbox_controller_register(&mbox->mbox);
+ if (ret < 0)
+ dev_err(&pdev->dev, "failed to register mailbox: %d\n", ret);
+
+ return ret;
+}
+
+static int tegra_xusb_mbox_remove(struct platform_device *pdev)
+{
+ struct tegra_xusb_mbox *mbox = platform_get_drvdata(pdev);
+
+ synchronize_irq(mbox->irq);
+ devm_free_irq(&pdev->dev, mbox->irq, mbox);
+ mbox_controller_unregister(&mbox->mbox);
+
+ return 0;
+}
+
+static struct platform_driver tegra_xusb_mbox_driver = {
+ .probe = tegra_xusb_mbox_probe,
+ .remove = tegra_xusb_mbox_remove,
+ .driver = {
+ .name = "tegra-xusb-mbox",
+ .of_match_table = tegra_xusb_mbox_of_match,
+ },
+};
+module_platform_driver(tegra_xusb_mbox_driver);
+
+MODULE_AUTHOR("Andrew Bresticker <abrestic@chromium.org>");
+MODULE_DESCRIPTION("NVIDIA Tegra XUSB mailbox driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/watchdog/bcm2835_wdt.c b/drivers/watchdog/bcm2835_wdt.c
index 2b5a9bbf80b7..7116968dee12 100644
--- a/drivers/watchdog/bcm2835_wdt.c
+++ b/drivers/watchdog/bcm2835_wdt.c
@@ -13,20 +13,25 @@
* option) any later version.
*/
+#include <linux/delay.h>
+#include <linux/reboot.h>
#include <linux/types.h>
#include <linux/module.h>
#include <linux/io.h>
#include <linux/watchdog.h>
#include <linux/platform_device.h>
#include <linux/of_address.h>
+#include <linux/of_platform.h>
#define PM_RSTC 0x1c
+#define PM_RSTS 0x20
#define PM_WDOG 0x24
#define PM_PASSWORD 0x5a000000
#define PM_WDOG_TIME_SET 0x000fffff
#define PM_RSTC_WRCFG_CLR 0xffffffcf
+#define PM_RSTS_HADWRH_SET 0x00000040
#define PM_RSTC_WRCFG_SET 0x00000030
#define PM_RSTC_WRCFG_FULL_RESET 0x00000020
#define PM_RSTC_RESET 0x00000102
@@ -37,6 +42,7 @@
struct bcm2835_wdt {
void __iomem *base;
spinlock_t lock;
+ struct notifier_block restart_handler;
};
static unsigned int heartbeat;
@@ -106,6 +112,53 @@ static struct watchdog_device bcm2835_wdt_wdd = {
.timeout = WDOG_TICKS_TO_SECS(PM_WDOG_TIME_SET),
};
+static int
+bcm2835_restart(struct notifier_block *this, unsigned long mode, void *cmd)
+{
+ struct bcm2835_wdt *wdt = container_of(this, struct bcm2835_wdt,
+ restart_handler);
+ u32 val;
+
+ /* use a timeout of 10 ticks (~150us) */
+ writel_relaxed(10 | PM_PASSWORD, wdt->base + PM_WDOG);
+ val = readl_relaxed(wdt->base + PM_RSTC);
+ val &= PM_RSTC_WRCFG_CLR;
+ val |= PM_PASSWORD | PM_RSTC_WRCFG_FULL_RESET;
+ writel_relaxed(val, wdt->base + PM_RSTC);
+
+ /* No sleeping, possibly atomic. */
+ mdelay(1);
+
+ return 0;
+}
+
+/*
+ * We can't really power off, but if we do the normal reset scheme, and
+ * indicate to bootcode.bin not to reboot, then most of the chip will be
+ * powered off.
+ */
+static void bcm2835_power_off(void)
+{
+ struct device_node *np =
+ of_find_compatible_node(NULL, NULL, "brcm,bcm2835-pm-wdt");
+ struct platform_device *pdev = of_find_device_by_node(np);
+ struct bcm2835_wdt *wdt = platform_get_drvdata(pdev);
+ u32 val;
+
+ /*
+ * We set the watchdog hard reset bit here to distinguish this reset
+ * from the normal (full) reset. bootcode.bin will not reboot after a
+ * hard reset.
+ */
+ val = readl_relaxed(wdt->base + PM_RSTS);
+ val &= PM_RSTC_WRCFG_CLR;
+ val |= PM_PASSWORD | PM_RSTS_HADWRH_SET;
+ writel_relaxed(val, wdt->base + PM_RSTS);
+
+ /* Continue with normal reset mechanism */
+ bcm2835_restart(&wdt->restart_handler, REBOOT_HARD, NULL);
+}
+
static int bcm2835_wdt_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -136,6 +189,12 @@ static int bcm2835_wdt_probe(struct platform_device *pdev)
return err;
}
+ wdt->restart_handler.notifier_call = bcm2835_restart;
+ wdt->restart_handler.priority = 128;
+ register_restart_handler(&wdt->restart_handler);
+ if (pm_power_off == NULL)
+ pm_power_off = bcm2835_power_off;
+
dev_info(dev, "Broadcom BCM2835 watchdog timer");
return 0;
}
@@ -144,6 +203,9 @@ static int bcm2835_wdt_remove(struct platform_device *pdev)
{
struct bcm2835_wdt *wdt = platform_get_drvdata(pdev);
+ unregister_restart_handler(&wdt->restart_handler);
+ if (pm_power_off == bcm2835_power_off)
+ pm_power_off = NULL;
watchdog_unregister_device(&bcm2835_wdt_wdd);
iounmap(wdt->base);
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 62c40777c009..f59a177aa344 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -621,6 +621,7 @@ struct drm_driver {
u32 driver_features;
int dev_priv_size;
+ size_t gem_obj_size;
const struct drm_ioctl_desc *ioctls;
int num_ioctls;
const struct file_operations *fops;
diff --git a/include/dt-bindings/pinctrl/bcm2835.h b/include/dt-bindings/pinctrl/bcm2835.h
new file mode 100644
index 000000000000..6f0bc37af39c
--- /dev/null
+++ b/include/dt-bindings/pinctrl/bcm2835.h
@@ -0,0 +1,27 @@
+/*
+ * Header providing constants for bcm2835 pinctrl bindings.
+ *
+ * Copyright (C) 2015 Stefan Wahren <stefan.wahren@i2se.com>
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#ifndef __DT_BINDINGS_PINCTRL_BCM2835_H__
+#define __DT_BINDINGS_PINCTRL_BCM2835_H__
+
+/* brcm,function property */
+#define BCM2835_FSEL_GPIO_IN 0
+#define BCM2835_FSEL_GPIO_OUT 1
+#define BCM2835_FSEL_ALT5 2
+#define BCM2835_FSEL_ALT4 3
+#define BCM2835_FSEL_ALT0 4
+#define BCM2835_FSEL_ALT1 5
+#define BCM2835_FSEL_ALT2 6
+#define BCM2835_FSEL_ALT3 7
+
+#endif /* __DT_BINDINGS_PINCTRL_BCM2835_H__ */
diff --git a/include/linux/mailbox_controller.h b/include/linux/mailbox_controller.h
index d4cf96f07cfc..68c42454439b 100644
--- a/include/linux/mailbox_controller.h
+++ b/include/linux/mailbox_controller.h
@@ -72,7 +72,7 @@ struct mbox_chan_ops {
*/
struct mbox_controller {
struct device *dev;
- struct mbox_chan_ops *ops;
+ const struct mbox_chan_ops *ops;
struct mbox_chan *chans;
int num_chans;
bool txdone_irq;
diff --git a/include/soc/bcm2835/raspberrypi-firmware-property.h b/include/soc/bcm2835/raspberrypi-firmware-property.h
new file mode 100644
index 000000000000..b007e924d5dc
--- /dev/null
+++ b/include/soc/bcm2835/raspberrypi-firmware-property.h
@@ -0,0 +1,112 @@
+/*
+ * Copyright © 2015 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/types.h>
+#include <linux/of_device.h>
+
+enum rpi_firmware_property_status {
+ RPI_FIRMWARE_STATUS_REQUEST = 0,
+ RPI_FIRMWARE_STATUS_SUCCESS = 0x80000000,
+ RPI_FIRMWARE_STATUS_ERROR = 0x80000001,
+};
+
+/**
+ * struct rpi_firmware_property_tag_header - Firmware property tag header
+ * @tag: One of enum_mbox_property_tag.
+ * @buf_size: The number of bytes in the value buffer following this
+ * struct.
+ * @req_resp_size: On submit, the length of the request (though it doesn't
+ * appear to be currently used by the firmware). On return,
+ * the length of the response (always 4 byte aligned), with
+ * the low bit set.
+ */
+struct rpi_firmware_property_tag_header {
+ u32 tag;
+ u32 buf_size;
+ u32 req_resp_size;
+};
+
+enum rpi_firmware_property_tag {
+ RPI_FIRMWARE_PROPERTY_END = 0,
+ RPI_FIRMWARE_GET_FIRMWARE_REVISION = 0x00000001,
+
+ RPI_FIRMWARE_SET_CURSOR_INFO = 0x00008010,
+ RPI_FIRMWARE_SET_CURSOR_STATE = 0x00008011,
+
+ RPI_FIRMWARE_GET_BOARD_MODEL = 0x00010001,
+ RPI_FIRMWARE_GET_BOARD_REVISION = 0x00010002,
+ RPI_FIRMWARE_GET_BOARD_MAC_ADDRESS = 0x00010003,
+ RPI_FIRMWARE_GET_BOARD_SERIAL = 0x00010004,
+ RPI_FIRMWARE_GET_ARM_MEMORY = 0x00010005,
+ RPI_FIRMWARE_GET_VC_MEMORY = 0x00010006,
+ RPI_FIRMWARE_GET_CLOCKS = 0x00010007,
+ RPI_FIRMWARE_GET_POWER_STATE = 0x00020001,
+ RPI_FIRMWARE_GET_TIMING = 0x00020002,
+ RPI_FIRMWARE_SET_POWER_STATE = 0x00028001,
+ RPI_FIRMWARE_GET_CLOCK_STATE = 0x00030001,
+ RPI_FIRMWARE_GET_CLOCK_RATE = 0x00030002,
+ RPI_FIRMWARE_GET_VOLTAGE = 0x00030003,
+ RPI_FIRMWARE_GET_MAX_CLOCK_RATE = 0x00030004,
+ RPI_FIRMWARE_GET_MAX_VOLTAGE = 0x00030005,
+ RPI_FIRMWARE_GET_TEMPERATURE = 0x00030006,
+ RPI_FIRMWARE_GET_MIN_CLOCK_RATE = 0x00030007,
+ RPI_FIRMWARE_GET_MIN_VOLTAGE = 0x00030008,
+ RPI_FIRMWARE_GET_TURBO = 0x00030009,
+ RPI_FIRMWARE_GET_MAX_TEMPERATURE = 0x0003000a,
+ RPI_FIRMWARE_ALLOCATE_MEMORY = 0x0003000c,
+ RPI_FIRMWARE_LOCK_MEMORY = 0x0003000d,
+ RPI_FIRMWARE_UNLOCK_MEMORY = 0x0003000e,
+ RPI_FIRMWARE_RELEASE_MEMORY = 0x0003000f,
+ RPI_FIRMWARE_EXECUTE_CODE = 0x00030010,
+ RPI_FIRMWARE_EXECUTE_QPU = 0x00030011,
+ RPI_FIRMWARE_SET_ENABLE_QPU = 0x00030012,
+ RPI_FIRMWARE_GET_DISPMANX_RESOURCE_MEM_HANDLE = 0x00030014,
+ RPI_FIRMWARE_GET_EDID_BLOCK = 0x00030020,
+ RPI_FIRMWARE_SET_CLOCK_STATE = 0x00038001,
+ RPI_FIRMWARE_SET_CLOCK_RATE = 0x00038002,
+ RPI_FIRMWARE_SET_VOLTAGE = 0x00038003,
+ RPI_FIRMWARE_SET_TURBO = 0x00038009,
+
+ /* Dispmanx TAGS */
+ RPI_FIRMWARE_FRAMEBUFFER_ALLOCATE = 0x00040001,
+ RPI_FIRMWARE_FRAMEBUFFER_BLANK = 0x00040002,
+ RPI_FIRMWARE_FRAMEBUFFER_GET_PHYSICAL_WIDTH_HEIGHT = 0x00040003,
+ RPI_FIRMWARE_FRAMEBUFFER_GET_VIRTUAL_WIDTH_HEIGHT = 0x00040004,
+ RPI_FIRMWARE_FRAMEBUFFER_GET_DEPTH = 0x00040005,
+ RPI_FIRMWARE_FRAMEBUFFER_GET_PIXEL_ORDER = 0x00040006,
+ RPI_FIRMWARE_FRAMEBUFFER_GET_ALPHA_MODE = 0x00040007,
+ RPI_FIRMWARE_FRAMEBUFFER_GET_PITCH = 0x00040008,
+ RPI_FIRMWARE_FRAMEBUFFER_GET_VIRTUAL_OFFSET = 0x00040009,
+ RPI_FIRMWARE_FRAMEBUFFER_GET_OVERSCAN = 0x0004000a,
+ RPI_FIRMWARE_FRAMEBUFFER_GET_PALETTE = 0x0004000b,
+ RPI_FIRMWARE_FRAMEBUFFER_RELEASE = 0x00048001,
+ RPI_FIRMWARE_FRAMEBUFFER_TEST_PHYSICAL_WIDTH_HEIGHT = 0x00044003,
+ RPI_FIRMWARE_FRAMEBUFFER_TEST_VIRTUAL_WIDTH_HEIGHT = 0x00044004,
+ RPI_FIRMWARE_FRAMEBUFFER_TEST_DEPTH = 0x00044005,
+ RPI_FIRMWARE_FRAMEBUFFER_TEST_PIXEL_ORDER = 0x00044006,
+ RPI_FIRMWARE_FRAMEBUFFER_TEST_ALPHA_MODE = 0x00044007,
+ RPI_FIRMWARE_FRAMEBUFFER_TEST_VIRTUAL_OFFSET = 0x00044009,
+ RPI_FIRMWARE_FRAMEBUFFER_TEST_OVERSCAN = 0x0004400a,
+ RPI_FIRMWARE_FRAMEBUFFER_TEST_PALETTE = 0x0004400b,
+ RPI_FIRMWARE_FRAMEBUFFER_SET_PHYSICAL_WIDTH_HEIGHT = 0x00048003,
+ RPI_FIRMWARE_FRAMEBUFFER_SET_VIRTUAL_WIDTH_HEIGHT = 0x00048004,
+ RPI_FIRMWARE_FRAMEBUFFER_SET_DEPTH = 0x00048005,
+ RPI_FIRMWARE_FRAMEBUFFER_SET_PIXEL_ORDER = 0x00048006,
+ RPI_FIRMWARE_FRAMEBUFFER_SET_ALPHA_MODE = 0x00048007,
+ RPI_FIRMWARE_FRAMEBUFFER_SET_VIRTUAL_OFFSET = 0x00048009,
+ RPI_FIRMWARE_FRAMEBUFFER_SET_OVERSCAN = 0x0004800a,
+ RPI_FIRMWARE_FRAMEBUFFER_SET_PALETTE = 0x0004800b,
+
+ RPI_FIRMWARE_GET_COMMAND_LINE = 0x00050001,
+ RPI_FIRMWARE_GET_DMA_CHANNELS = 0x00060001,
+};
+
+int rpi_firmware_property(struct device_node *of_node,
+ u32 tag, void *data, size_t len);
+int rpi_firmware_property_list(struct device_node *of_node,
+ void *data, size_t tag_size);
diff --git a/include/soc/tegra/xusb.h b/include/soc/tegra/xusb.h
new file mode 100644
index 000000000000..5ce5e12e99fc
--- /dev/null
+++ b/include/soc/tegra/xusb.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2014 NVIDIA Corporation
+ * Copyright (C) 2014 Google, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#ifndef __SOC_TEGRA_XUSB_H__
+#define __SOC_TEGRA_XUSB_H__
+
+/* Command requests from the firmware */
+enum tegra_xusb_mbox_cmd {
+ MBOX_CMD_MSG_ENABLED = 1,
+ MBOX_CMD_INC_FALC_CLOCK,
+ MBOX_CMD_DEC_FALC_CLOCK,
+ MBOX_CMD_INC_SSPI_CLOCK,
+ MBOX_CMD_DEC_SSPI_CLOCK,
+ MBOX_CMD_SET_BW, /* no ACK/NAK required */
+ MBOX_CMD_SET_SS_PWR_GATING,
+ MBOX_CMD_SET_SS_PWR_UNGATING,
+ MBOX_CMD_SAVE_DFE_CTLE_CTX,
+ MBOX_CMD_AIRPLANE_MODE_ENABLED, /* unused */
+ MBOX_CMD_AIRPLANE_MODE_DISABLED, /* unused */
+ MBOX_CMD_START_HSIC_IDLE,
+ MBOX_CMD_STOP_HSIC_IDLE,
+ MBOX_CMD_DBC_WAKE_STACK, /* unused */
+ MBOX_CMD_HSIC_PRETEND_CONNECT,
+
+ MBOX_CMD_MAX,
+
+ /* Response message to above commands */
+ MBOX_CMD_ACK = 128,
+ MBOX_CMD_NAK
+};
+
+struct tegra_xusb_mbox_msg {
+ u32 cmd;
+ u32 data;
+};
+
+#endif /* __SOC_TEGRA_XUSB_H__ */
diff --git a/include/uapi/drm/vc4_drm.h b/include/uapi/drm/vc4_drm.h
new file mode 100644
index 000000000000..4d55307c5107
--- /dev/null
+++ b/include/uapi/drm/vc4_drm.h
@@ -0,0 +1,204 @@
+/*
+ * Copyright © 2014-2015 Broadcom
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef _UAPI_VC4_DRM_H_
+#define _UAPI_VC4_DRM_H_
+
+#include <drm/drm.h>
+
+#define DRM_VC4_SUBMIT_CL 0x00
+#define DRM_VC4_WAIT_SEQNO 0x01
+#define DRM_VC4_WAIT_BO 0x02
+#define DRM_VC4_CREATE_BO 0x03
+#define DRM_VC4_MMAP_BO 0x04
+
+#define DRM_IOCTL_VC4_SUBMIT_CL DRM_IOWR( DRM_COMMAND_BASE + DRM_VC4_SUBMIT_CL, struct drm_vc4_submit_cl)
+#define DRM_IOCTL_VC4_WAIT_SEQNO DRM_IOWR( DRM_COMMAND_BASE + DRM_VC4_WAIT_SEQNO, struct drm_vc4_wait_seqno)
+#define DRM_IOCTL_VC4_WAIT_BO DRM_IOWR( DRM_COMMAND_BASE + DRM_VC4_WAIT_BO, struct drm_vc4_wait_bo)
+#define DRM_IOCTL_VC4_CREATE_BO DRM_IOWR( DRM_COMMAND_BASE + DRM_VC4_CREATE_BO, struct drm_vc4_create_bo)
+#define DRM_IOCTL_VC4_MMAP_BO DRM_IOWR( DRM_COMMAND_BASE + DRM_VC4_MMAP_BO, struct drm_vc4_mmap_bo)
+
+struct drm_vc4_submit_rcl_surface {
+ uint32_t hindex; /* Handle index, or ~0 if not present. */
+ uint32_t offset; /* Offset to start of buffer. */
+ /*
+ * Bits for either render config (color_ms_write) or load/store packet.
+ */
+ uint16_t bits;
+ uint16_t pad;
+};
+
+/**
+ * struct drm_vc4_submit_cl - ioctl argument for submitting commands to the 3D
+ * engine.
+ *
+ * Drivers typically use GPU BOs to store batchbuffers / command lists and
+ * their associated state. However, because the VC4 lacks an MMU, we have to
+ * do validation of memory accesses by the GPU commands. If we were to store
+ * our commands in BOs, we'd need to do uncached readback from them to do the
+ * validation process, which is too expensive. Instead, userspace accumulates
+ * commands and associated state in plain memory, then the kernel copies the
+ * data to its own address space, and then validates and stores it in a GPU
+ * BO.
+ */
+struct drm_vc4_submit_cl {
+ /* Pointer to the binner command list.
+ *
+ * This is the first set of commands executed, which runs the
+ * coordinate shader to determine where primitives land on the screen,
+ * then writes out the state updates and draw calls necessary per tile
+ * to the tile allocation BO.
+ */
+ uint64_t bin_cl;
+
+ /* Pointer to the shader records.
+ *
+ * Shader records are the structures read by the hardware that contain
+ * pointers to uniforms, shaders, and vertex attributes. The
+ * reference to the shader record has enough information to determine
+ * how many pointers are necessary (fixed number for shaders/uniforms,
+ * and an attribute count), so those BO indices into bo_handles are
+ * just stored as uint32_ts before each shader record passed in.
+ */
+ uint64_t shader_rec;
+
+ /* Pointer to uniform data and texture handles for the textures
+ * referenced by the shader.
+ *
+ * For each shader state record, there is a set of uniform data in the
+ * order referenced by the record (FS, VS, then CS). Each set of
+ * uniform data has a uint32_t index into bo_handles per texture
+ * sample operation, in the order the QPU_W_TMUn_S writes appear in
+ * the program. Following the texture BO handle indices is the actual
+ * uniform data.
+ *
+ * The individual uniform state blocks don't have sizes passed in,
+ * because the kernel has to determine the sizes anyway during shader
+ * code validation.
+ */
+ uint64_t uniforms;
+ uint64_t bo_handles;
+
+ /* Size in bytes of the binner command list. */
+ uint32_t bin_cl_size;
+ /* Size in bytes of the set of shader records. */
+ uint32_t shader_rec_size;
+ /* Number of shader records.
+ *
+ * This could just be computed from the contents of shader_records and
+ * the address bits of references to them from the bin CL, but it
+ * keeps the kernel from having to resize some allocations it makes.
+ */
+ uint32_t shader_rec_count;
+ /* Size in bytes of the uniform state. */
+ uint32_t uniforms_size;
+
+ /* Number of BO handles passed in (size is that times 4). */
+ uint32_t bo_handle_count;
+
+ /* RCL setup: */
+ uint16_t width;
+ uint16_t height;
+ uint8_t min_x_tile;
+ uint8_t min_y_tile;
+ uint8_t max_x_tile;
+ uint8_t max_y_tile;
+ struct drm_vc4_submit_rcl_surface color_read;
+ struct drm_vc4_submit_rcl_surface color_ms_write;
+ struct drm_vc4_submit_rcl_surface zs_read;
+ struct drm_vc4_submit_rcl_surface zs_write;
+ uint32_t clear_color[2];
+ uint32_t clear_z;
+ uint8_t clear_s;
+
+ uint32_t pad:24;
+
+#define VC4_SUBMIT_CL_USE_CLEAR_COLOR (1 << 0)
+ uint32_t flags;
+
+ /* Returned value of the seqno of this render job (for the
+ * wait ioctl).
+ */
+ uint64_t seqno;
+};
+
+/**
+ * struct drm_vc4_wait_seqno - ioctl argument for waiting for
+ * DRM_VC4_SUBMIT_CL completion using its returned seqno.
+ *
+ * timeout_ns is the timeout in nanoseconds, where "0" means "don't
+ * block, just return the status."
+ */
+struct drm_vc4_wait_seqno {
+ uint64_t seqno;
+ uint64_t timeout_ns;
+};
+
+/**
+ * struct drm_vc4_wait_bo - ioctl argument for waiting for
+ * completion of the last DRM_VC4_SUBMIT_CL on a BO.
+ *
+ * This is useful for cases where multiple processes might be
+ * rendering to a BO and you want to wait for all rendering to be
+ * completed.
+ */
+struct drm_vc4_wait_bo {
+ uint32_t handle;
+ uint32_t pad;
+ uint64_t timeout_ns;
+};
+
+/**
+ * struct drm_vc4_create_bo - ioctl argument for creating VC4 BOs.
+ *
+ * There are currently no values for the flags argument, but it may be
+ * used in a future extension.
+ */
+struct drm_vc4_create_bo {
+ uint32_t size;
+ uint32_t flags;
+ /** Returned GEM handle for the BO. */
+ uint32_t handle;
+ uint32_t pad;
+};
+
+/**
+ * struct drm_vc4_mmap_bo - ioctl argument for mapping VC4 BOs.
+ *
+ * This doesn't actually perform an mmap. Instead, it returns the
+ * offset you need to use in an mmap on the DRM device node. This
+ * means that tools like valgrind end up knowing about the mapped
+ * memory.
+ *
+ * There are currently no values for the flags argument, but it may be
+ * used in a future extension.
+ */
+struct drm_vc4_mmap_bo {
+ /** Handle for the object being mapped. */
+ uint32_t handle;
+ uint32_t flags;
+ /** offset into the drm node to use for subsequent mmap call. */
+ uint64_t offset;
+};
+
+#endif /* _UAPI_VC4_DRM_H_ */
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index ebffa0e4a9c0..997b7e955034 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -6482,8 +6482,6 @@ int alloc_contig_range(unsigned long start, unsigned long end,
/* Make sure the range is really isolated. */
if (test_pages_isolated(outer_start, end, false)) {
- pr_info("%s: [%lx, %lx) PFNs busy\n",
- __func__, outer_start, end);
ret = -EBUSY;
goto done;
}