summaryrefslogtreecommitdiff
path: root/drivers/gpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/arm/malidp_mw.c2
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c41
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c7
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c8
-rw-r--r--drivers/gpu/drm/i915/gvt/dmabuf.c64
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c16
-rw-r--r--drivers/gpu/drm/i915/gvt/hypercall.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c23
-rw-r--r--drivers/gpu/drm/i915/gvt/mpt.h15
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c67
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.h2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c15
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.h1
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_plane.c1
17 files changed, 224 insertions, 47 deletions
diff --git a/drivers/gpu/drm/arm/malidp_mw.c b/drivers/gpu/drm/arm/malidp_mw.c
index 875a3a9eabfa..7d0e7b031e44 100644
--- a/drivers/gpu/drm/arm/malidp_mw.c
+++ b/drivers/gpu/drm/arm/malidp_mw.c
@@ -56,7 +56,7 @@ malidp_mw_connector_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
-const struct drm_connector_helper_funcs malidp_mw_connector_helper_funcs = {
+static const struct drm_connector_helper_funcs malidp_mw_connector_helper_funcs = {
.get_modes = malidp_mw_connector_get_modes,
.mode_valid = malidp_mw_connector_mode_valid,
};
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 021c5a98db09..38bf111e5f9b 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -398,7 +398,7 @@ drm_dp_encode_sideband_req(const struct drm_dp_sideband_msg_req_body *req,
memcpy(&buf[idx], req->u.i2c_read.transactions[i].bytes, req->u.i2c_read.transactions[i].num_bytes);
idx += req->u.i2c_read.transactions[i].num_bytes;
- buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 5;
+ buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 4;
buf[idx] |= (req->u.i2c_read.transactions[i].i2c_transaction_delay & 0xf);
idx++;
}
@@ -1209,6 +1209,8 @@ static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
txmsg->state == DRM_DP_SIDEBAND_TX_SENT) {
mstb->tx_slots[txmsg->seqno] = NULL;
}
+ mgr->is_waiting_for_dwn_reply = false;
+
}
out:
if (unlikely(ret == -EIO) && drm_debug_enabled(DRM_UT_DP)) {
@@ -1218,6 +1220,7 @@ out:
}
mutex_unlock(&mgr->qlock);
+ drm_dp_mst_kick_tx(mgr);
return ret;
}
@@ -2342,7 +2345,7 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
{
struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
struct drm_dp_mst_port *port;
- int old_ddps, ret;
+ int old_ddps, old_input, ret, i;
u8 new_pdt;
bool dowork = false, create_connector = false;
@@ -2373,6 +2376,7 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
}
old_ddps = port->ddps;
+ old_input = port->input;
port->input = conn_stat->input_port;
port->mcs = conn_stat->message_capability_status;
port->ldps = conn_stat->legacy_device_plug_status;
@@ -2397,6 +2401,28 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
dowork = false;
}
+ if (!old_input && old_ddps != port->ddps && !port->ddps) {
+ for (i = 0; i < mgr->max_payloads; i++) {
+ struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i];
+ struct drm_dp_mst_port *port_validated;
+
+ if (!vcpi)
+ continue;
+
+ port_validated =
+ container_of(vcpi, struct drm_dp_mst_port, vcpi);
+ port_validated =
+ drm_dp_mst_topology_get_port_validated(mgr, port_validated);
+ if (!port_validated) {
+ mutex_lock(&mgr->payload_lock);
+ vcpi->num_slots = 0;
+ mutex_unlock(&mgr->payload_lock);
+ } else {
+ drm_dp_mst_topology_put_port(port_validated);
+ }
+ }
+ }
+
if (port->connector)
drm_modeset_unlock(&mgr->base.lock);
else if (create_connector)
@@ -2759,9 +2785,11 @@ static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
ret = process_single_tx_qlock(mgr, txmsg, false);
if (ret == 1) {
/* txmsg is sent it should be in the slots now */
+ mgr->is_waiting_for_dwn_reply = true;
list_del(&txmsg->next);
} else if (ret) {
DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
+ mgr->is_waiting_for_dwn_reply = false;
list_del(&txmsg->next);
if (txmsg->seqno != -1)
txmsg->dst->tx_slots[txmsg->seqno] = NULL;
@@ -2801,7 +2829,8 @@ static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
}
- if (list_is_singular(&mgr->tx_msg_downq))
+ if (list_is_singular(&mgr->tx_msg_downq) &&
+ !mgr->is_waiting_for_dwn_reply)
process_single_down_tx_qlock(mgr);
mutex_unlock(&mgr->qlock);
}
@@ -3756,6 +3785,7 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
mutex_lock(&mgr->qlock);
txmsg->state = DRM_DP_SIDEBAND_TX_RX;
mstb->tx_slots[slot] = NULL;
+ mgr->is_waiting_for_dwn_reply = false;
mutex_unlock(&mgr->qlock);
wake_up_all(&mgr->tx_waitq);
@@ -3765,6 +3795,9 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
no_msg:
drm_dp_mst_topology_put_mstb(mstb);
clear_down_rep_recv:
+ mutex_lock(&mgr->qlock);
+ mgr->is_waiting_for_dwn_reply = false;
+ mutex_unlock(&mgr->qlock);
memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
return 0;
@@ -4597,7 +4630,7 @@ static void drm_dp_tx_work(struct work_struct *work)
struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
mutex_lock(&mgr->qlock);
- if (!list_empty(&mgr->tx_msg_downq))
+ if (!list_empty(&mgr->tx_msg_downq) && !mgr->is_waiting_for_dwn_reply)
process_single_down_tx_qlock(mgr);
mutex_unlock(&mgr->qlock);
}
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 57f510687b85..4c7cbce7bae7 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -1267,7 +1267,7 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
* Changes struct fb_var_screeninfo are currently not pushed back
* to KMS, hence fail if different settings are requested.
*/
- if (var->bits_per_pixel != fb->format->cpp[0] * 8 ||
+ if (var->bits_per_pixel > fb->format->cpp[0] * 8 ||
var->xres > fb->width || var->yres > fb->height ||
var->xres_virtual > fb->width || var->yres_virtual > fb->height) {
drm_dbg_kms(dev, "fb requested width/height/bpp can't fit in current fb "
@@ -1293,6 +1293,11 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
}
/*
+ * Likewise, bits_per_pixel should be rounded up to a supported value.
+ */
+ var->bits_per_pixel = fb->format->cpp[0] * 8;
+
+ /*
* drm fbdev emulation doesn't support changing the pixel format at all,
* so reject all pixel format changing requests.
*/
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index 7ae087b0504d..88b6fcaa20be 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -1313,6 +1313,7 @@ static int gsc_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ component_del(dev, &gsc_component_ops);
pm_runtime_dont_use_autosuspend(dev);
pm_runtime_disable(dev);
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 9e430590fb3a..0cf0f6fae675 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -2836,6 +2836,14 @@ static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
/* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt,glk */
batch = gen8_emit_flush_coherentl3_wa(engine, batch);
+ /* WaClearSlmSpaceAtContextSwitch:skl,bxt,kbl,glk,cfl */
+ batch = gen8_emit_pipe_control(batch,
+ PIPE_CONTROL_FLUSH_L3 |
+ PIPE_CONTROL_STORE_DATA_INDEX |
+ PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_QW_WRITE,
+ LRC_PPHWSP_SCRATCH_ADDR);
+
batch = emit_lri(batch, lri, ARRAY_SIZE(lri));
/* WaMediaPoolStateCmdInWABB:bxt,glk */
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
index e451298d11c3..2477a1e5a166 100644
--- a/drivers/gpu/drm/i915/gvt/dmabuf.c
+++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
@@ -36,13 +36,32 @@
#define GEN8_DECODE_PTE(pte) (pte & GENMASK_ULL(63, 12))
+static int vgpu_pin_dma_address(struct intel_vgpu *vgpu,
+ unsigned long size,
+ dma_addr_t dma_addr)
+{
+ int ret = 0;
+
+ if (intel_gvt_hypervisor_dma_pin_guest_page(vgpu, dma_addr))
+ ret = -EINVAL;
+
+ return ret;
+}
+
+static void vgpu_unpin_dma_address(struct intel_vgpu *vgpu,
+ dma_addr_t dma_addr)
+{
+ intel_gvt_hypervisor_dma_unmap_guest_page(vgpu, dma_addr);
+}
+
static int vgpu_gem_get_pages(
struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+ struct intel_vgpu *vgpu;
struct sg_table *st;
struct scatterlist *sg;
- int i, ret;
+ int i, j, ret;
gen8_pte_t __iomem *gtt_entries;
struct intel_vgpu_fb_info *fb_info;
u32 page_num;
@@ -51,6 +70,10 @@ static int vgpu_gem_get_pages(
if (WARN_ON(!fb_info))
return -ENODEV;
+ vgpu = fb_info->obj->vgpu;
+ if (WARN_ON(!vgpu))
+ return -ENODEV;
+
st = kmalloc(sizeof(*st), GFP_KERNEL);
if (unlikely(!st))
return -ENOMEM;
@@ -64,21 +87,53 @@ static int vgpu_gem_get_pages(
gtt_entries = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm +
(fb_info->start >> PAGE_SHIFT);
for_each_sg(st->sgl, sg, page_num, i) {
+ dma_addr_t dma_addr =
+ GEN8_DECODE_PTE(readq(&gtt_entries[i]));
+ if (vgpu_pin_dma_address(vgpu, PAGE_SIZE, dma_addr)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
sg->offset = 0;
sg->length = PAGE_SIZE;
- sg_dma_address(sg) =
- GEN8_DECODE_PTE(readq(&gtt_entries[i]));
sg_dma_len(sg) = PAGE_SIZE;
+ sg_dma_address(sg) = dma_addr;
}
__i915_gem_object_set_pages(obj, st, PAGE_SIZE);
+out:
+ if (ret) {
+ dma_addr_t dma_addr;
+
+ for_each_sg(st->sgl, sg, i, j) {
+ dma_addr = sg_dma_address(sg);
+ if (dma_addr)
+ vgpu_unpin_dma_address(vgpu, dma_addr);
+ }
+ sg_free_table(st);
+ kfree(st);
+ }
+
+ return ret;
- return 0;
}
static void vgpu_gem_put_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
+ struct scatterlist *sg;
+
+ if (obj->base.dma_buf) {
+ struct intel_vgpu_fb_info *fb_info = obj->gvt_info;
+ struct intel_vgpu_dmabuf_obj *obj = fb_info->obj;
+ struct intel_vgpu *vgpu = obj->vgpu;
+ int i;
+
+ for_each_sg(pages->sgl, sg, fb_info->size, i)
+ vgpu_unpin_dma_address(vgpu,
+ sg_dma_address(sg));
+ }
+
sg_free_table(pages);
kfree(pages);
}
@@ -163,6 +218,7 @@ static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
drm_gem_private_object_init(dev, &obj->base,
roundup(info->size, PAGE_SIZE));
i915_gem_object_init(obj, &intel_vgpu_gem_ops, &lock_class);
+ i915_gem_object_set_readonly(obj);
obj->read_domains = I915_GEM_DOMAIN_GTT;
obj->write_domain = 0;
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 21af822a79e0..6d28d72e6c7e 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -341,6 +341,10 @@ static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id);
engine_mask |= BIT(VCS1);
}
+ if (data & GEN9_GRDOM_GUC) {
+ gvt_dbg_mmio("vgpu%d: request GUC Reset\n", vgpu->id);
+ vgpu_vreg_t(vgpu, GUC_STATUS) |= GS_MIA_IN_RESET;
+ }
engine_mask &= INTEL_INFO(vgpu->gvt->dev_priv)->engine_mask;
}
@@ -1636,6 +1640,16 @@ static int edp_psr_imr_iir_write(struct intel_vgpu *vgpu,
return 0;
}
+static int guc_status_read(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data,
+ unsigned int bytes)
+{
+ /* keep MIA_IN_RESET before clearing */
+ read_vreg(vgpu, offset, p_data, bytes);
+ vgpu_vreg(vgpu, offset) &= ~GS_MIA_IN_RESET;
+ return 0;
+}
+
static int mmio_read_from_hw(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
@@ -2672,6 +2686,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DH(EDP_PSR_IMR, D_BDW_PLUS, NULL, edp_psr_imr_iir_write);
MMIO_DH(EDP_PSR_IIR, D_BDW_PLUS, NULL, edp_psr_imr_iir_write);
+ MMIO_DH(GUC_STATUS, D_ALL, guc_status_read, NULL);
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h
index 9599c0a762b2..b17c4a1599cd 100644
--- a/drivers/gpu/drm/i915/gvt/hypercall.h
+++ b/drivers/gpu/drm/i915/gvt/hypercall.h
@@ -66,6 +66,8 @@ struct intel_gvt_mpt {
unsigned long size, dma_addr_t *dma_addr);
void (*dma_unmap_guest_page)(unsigned long handle, dma_addr_t dma_addr);
+ int (*dma_pin_guest_page)(unsigned long handle, dma_addr_t dma_addr);
+
int (*map_gfn_to_mfn)(unsigned long handle, unsigned long gfn,
unsigned long mfn, unsigned int nr, bool map);
int (*set_trap_area)(unsigned long handle, u64 start, u64 end,
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 04a5a0d90823..3259a1fa69e1 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -1916,6 +1916,28 @@ err_unlock:
return ret;
}
+static int kvmgt_dma_pin_guest_page(unsigned long handle, dma_addr_t dma_addr)
+{
+ struct kvmgt_guest_info *info;
+ struct gvt_dma *entry;
+ int ret = 0;
+
+ if (!handle_valid(handle))
+ return -ENODEV;
+
+ info = (struct kvmgt_guest_info *)handle;
+
+ mutex_lock(&info->vgpu->vdev.cache_lock);
+ entry = __gvt_cache_find_dma_addr(info->vgpu, dma_addr);
+ if (entry)
+ kref_get(&entry->ref);
+ else
+ ret = -ENOMEM;
+ mutex_unlock(&info->vgpu->vdev.cache_lock);
+
+ return ret;
+}
+
static void __gvt_dma_release(struct kref *ref)
{
struct gvt_dma *entry = container_of(ref, typeof(*entry), ref);
@@ -2027,6 +2049,7 @@ static struct intel_gvt_mpt kvmgt_mpt = {
.gfn_to_mfn = kvmgt_gfn_to_pfn,
.dma_map_guest_page = kvmgt_dma_map_guest_page,
.dma_unmap_guest_page = kvmgt_dma_unmap_guest_page,
+ .dma_pin_guest_page = kvmgt_dma_pin_guest_page,
.set_opregion = kvmgt_set_opregion,
.set_edid = kvmgt_set_edid,
.get_vfio_device = kvmgt_get_vfio_device,
diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h
index 0f9440128123..9ad224df9c68 100644
--- a/drivers/gpu/drm/i915/gvt/mpt.h
+++ b/drivers/gpu/drm/i915/gvt/mpt.h
@@ -255,6 +255,21 @@ static inline void intel_gvt_hypervisor_dma_unmap_guest_page(
}
/**
+ * intel_gvt_hypervisor_dma_pin_guest_page - pin guest dma buf
+ * @vgpu: a vGPU
+ * @dma_addr: guest dma addr
+ *
+ * Returns:
+ * 0 on success, negative error code if failed.
+ */
+static inline int
+intel_gvt_hypervisor_dma_pin_guest_page(struct intel_vgpu *vgpu,
+ dma_addr_t dma_addr)
+{
+ return intel_gvt_host.mpt->dma_pin_guest_page(vgpu->handle, dma_addr);
+}
+
+/**
* intel_gvt_hypervisor_map_gfn_to_mfn - map a GFN region to MFN
* @vgpu: a vGPU
* @gfn: guest PFN
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index d5a6e4e3d0fd..85bd9bf4f6ee 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -212,9 +212,9 @@ static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt)
*/
void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu)
{
- mutex_lock(&vgpu->gvt->lock);
+ mutex_lock(&vgpu->vgpu_lock);
vgpu->active = true;
- mutex_unlock(&vgpu->gvt->lock);
+ mutex_unlock(&vgpu->vgpu_lock);
}
/**
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index 3b5e016d16c4..5fa1073cf26b 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -230,28 +230,25 @@ static void mtk_dsi_mask(struct mtk_dsi *dsi, u32 offset, u32 mask, u32 data)
static void mtk_dsi_phy_timconfig(struct mtk_dsi *dsi)
{
u32 timcon0, timcon1, timcon2, timcon3;
- u32 ui, cycle_time;
+ u32 data_rate_mhz = DIV_ROUND_UP(dsi->data_rate, 1000000);
struct mtk_phy_timing *timing = &dsi->phy_timing;
- ui = DIV_ROUND_UP(1000000000, dsi->data_rate);
- cycle_time = div_u64(8000000000ULL, dsi->data_rate);
+ timing->lpx = (60 * data_rate_mhz / (8 * 1000)) + 1;
+ timing->da_hs_prepare = (80 * data_rate_mhz + 4 * 1000) / 8000;
+ timing->da_hs_zero = (170 * data_rate_mhz + 10 * 1000) / 8000 + 1 -
+ timing->da_hs_prepare;
+ timing->da_hs_trail = timing->da_hs_prepare + 1;
- timing->lpx = NS_TO_CYCLE(60, cycle_time);
- timing->da_hs_prepare = NS_TO_CYCLE(50 + 5 * ui, cycle_time);
- timing->da_hs_zero = NS_TO_CYCLE(110 + 6 * ui, cycle_time);
- timing->da_hs_trail = NS_TO_CYCLE(77 + 4 * ui, cycle_time);
+ timing->ta_go = 4 * timing->lpx - 2;
+ timing->ta_sure = timing->lpx + 2;
+ timing->ta_get = 4 * timing->lpx;
+ timing->da_hs_exit = 2 * timing->lpx + 1;
- timing->ta_go = 4 * timing->lpx;
- timing->ta_sure = 3 * timing->lpx / 2;
- timing->ta_get = 5 * timing->lpx;
- timing->da_hs_exit = 2 * timing->lpx;
-
- timing->clk_hs_zero = NS_TO_CYCLE(336, cycle_time);
- timing->clk_hs_trail = NS_TO_CYCLE(100, cycle_time) + 10;
-
- timing->clk_hs_prepare = NS_TO_CYCLE(64, cycle_time);
- timing->clk_hs_post = NS_TO_CYCLE(80 + 52 * ui, cycle_time);
- timing->clk_hs_exit = 2 * timing->lpx;
+ timing->clk_hs_prepare = 70 * data_rate_mhz / (8 * 1000);
+ timing->clk_hs_post = timing->clk_hs_prepare + 8;
+ timing->clk_hs_trail = timing->clk_hs_prepare;
+ timing->clk_hs_zero = timing->clk_hs_trail * 4;
+ timing->clk_hs_exit = 2 * timing->clk_hs_trail;
timcon0 = timing->lpx | timing->da_hs_prepare << 8 |
timing->da_hs_zero << 16 | timing->da_hs_trail << 24;
@@ -482,27 +479,39 @@ static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi)
dsi_tmp_buf_bpp - 10);
data_phy_cycles = timing->lpx + timing->da_hs_prepare +
- timing->da_hs_zero + timing->da_hs_exit + 2;
+ timing->da_hs_zero + timing->da_hs_exit + 3;
if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) {
- if (vm->hfront_porch * dsi_tmp_buf_bpp >
+ if ((vm->hfront_porch + vm->hback_porch) * dsi_tmp_buf_bpp >
data_phy_cycles * dsi->lanes + 18) {
- horizontal_frontporch_byte = vm->hfront_porch *
- dsi_tmp_buf_bpp -
- data_phy_cycles *
- dsi->lanes - 18;
+ horizontal_frontporch_byte =
+ vm->hfront_porch * dsi_tmp_buf_bpp -
+ (data_phy_cycles * dsi->lanes + 18) *
+ vm->hfront_porch /
+ (vm->hfront_porch + vm->hback_porch);
+
+ horizontal_backporch_byte =
+ horizontal_backporch_byte -
+ (data_phy_cycles * dsi->lanes + 18) *
+ vm->hback_porch /
+ (vm->hfront_porch + vm->hback_porch);
} else {
DRM_WARN("HFP less than d-phy, FPS will under 60Hz\n");
horizontal_frontporch_byte = vm->hfront_porch *
dsi_tmp_buf_bpp;
}
} else {
- if (vm->hfront_porch * dsi_tmp_buf_bpp >
+ if ((vm->hfront_porch + vm->hback_porch) * dsi_tmp_buf_bpp >
data_phy_cycles * dsi->lanes + 12) {
- horizontal_frontporch_byte = vm->hfront_porch *
- dsi_tmp_buf_bpp -
- data_phy_cycles *
- dsi->lanes - 12;
+ horizontal_frontporch_byte =
+ vm->hfront_porch * dsi_tmp_buf_bpp -
+ (data_phy_cycles * dsi->lanes + 12) *
+ vm->hfront_porch /
+ (vm->hfront_porch + vm->hback_porch);
+ horizontal_backporch_byte = horizontal_backporch_byte -
+ (data_phy_cycles * dsi->lanes + 12) *
+ vm->hback_porch /
+ (vm->hfront_porch + vm->hback_porch);
} else {
DRM_WARN("HFP less than d-phy, FPS will under 60Hz\n");
horizontal_frontporch_byte = vm->hfront_porch *
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.h b/drivers/gpu/drm/rockchip/cdn-dp-core.h
index 83c4586665b4..81ac9b658a70 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-core.h
+++ b/drivers/gpu/drm/rockchip/cdn-dp-core.h
@@ -95,7 +95,7 @@ struct cdn_dp_device {
struct cdn_dp_port *port[MAX_PHY];
u8 ports;
u8 max_lanes;
- u8 max_rate;
+ unsigned int max_rate;
u8 lanes;
int active_port;
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
index a7c4654445c7..68d4644ac2dc 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
@@ -685,8 +685,6 @@ static void sun4i_hdmi_unbind(struct device *dev, struct device *master,
struct sun4i_hdmi *hdmi = dev_get_drvdata(dev);
cec_unregister_adapter(hdmi->cec_adap);
- drm_connector_cleanup(&hdmi->connector);
- drm_encoder_cleanup(&hdmi->encoder);
i2c_del_adapter(hdmi->i2c);
i2c_put_adapter(hdmi->ddc_i2c);
clk_disable_unprepare(hdmi->mod_clk);
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index 42651d737c55..c81cdce6ed55 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -489,7 +489,7 @@ static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
WARN_ON(!tcon->quirks->has_channel_0);
- tcon->dclk_min_div = 1;
+ tcon->dclk_min_div = tcon->quirks->dclk_min_div;
tcon->dclk_max_div = 127;
sun4i_tcon0_mode_set_common(tcon, mode);
@@ -1426,12 +1426,14 @@ static int sun8i_r40_tcon_tv_set_mux(struct sun4i_tcon *tcon,
static const struct sun4i_tcon_quirks sun4i_a10_quirks = {
.has_channel_0 = true,
.has_channel_1 = true,
+ .dclk_min_div = 4,
.set_mux = sun4i_a10_tcon_set_mux,
};
static const struct sun4i_tcon_quirks sun5i_a13_quirks = {
.has_channel_0 = true,
.has_channel_1 = true,
+ .dclk_min_div = 4,
.set_mux = sun5i_a13_tcon_set_mux,
};
@@ -1440,6 +1442,7 @@ static const struct sun4i_tcon_quirks sun6i_a31_quirks = {
.has_channel_1 = true,
.has_lvds_alt = true,
.needs_de_be_mux = true,
+ .dclk_min_div = 1,
.set_mux = sun6i_tcon_set_mux,
};
@@ -1447,11 +1450,13 @@ static const struct sun4i_tcon_quirks sun6i_a31s_quirks = {
.has_channel_0 = true,
.has_channel_1 = true,
.needs_de_be_mux = true,
+ .dclk_min_div = 1,
};
static const struct sun4i_tcon_quirks sun7i_a20_quirks = {
.has_channel_0 = true,
.has_channel_1 = true,
+ .dclk_min_div = 4,
/* Same display pipeline structure as A10 */
.set_mux = sun4i_a10_tcon_set_mux,
};
@@ -1459,11 +1464,13 @@ static const struct sun4i_tcon_quirks sun7i_a20_quirks = {
static const struct sun4i_tcon_quirks sun8i_a33_quirks = {
.has_channel_0 = true,
.has_lvds_alt = true,
+ .dclk_min_div = 1,
};
static const struct sun4i_tcon_quirks sun8i_a83t_lcd_quirks = {
.supports_lvds = true,
.has_channel_0 = true,
+ .dclk_min_div = 1,
};
static const struct sun4i_tcon_quirks sun8i_a83t_tv_quirks = {
@@ -1477,11 +1484,13 @@ static const struct sun4i_tcon_quirks sun8i_r40_tv_quirks = {
static const struct sun4i_tcon_quirks sun8i_v3s_quirks = {
.has_channel_0 = true,
+ .dclk_min_div = 1,
};
static const struct sun4i_tcon_quirks sun9i_a80_tcon_lcd_quirks = {
- .has_channel_0 = true,
- .needs_edp_reset = true,
+ .has_channel_0 = true,
+ .needs_edp_reset = true,
+ .dclk_min_div = 1,
};
static const struct sun4i_tcon_quirks sun9i_a80_tcon_tv_quirks = {
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.h b/drivers/gpu/drm/sun4i/sun4i_tcon.h
index f9f1fe80b206..a62ec826ae71 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.h
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.h
@@ -224,6 +224,7 @@ struct sun4i_tcon_quirks {
bool needs_de_be_mux; /* sun6i needs mux to select backend */
bool needs_edp_reset; /* a80 edp reset needed for tcon0 access */
bool supports_lvds; /* Does the TCON support an LVDS output? */
+ u8 dclk_min_div; /* minimum divider for TCON0 DCLK */
/* callback to handle tcon muxing options */
int (*set_mux)(struct sun4i_tcon *, const struct drm_encoder *);
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index ac42c84d2d7f..d1c3f5fbfee4 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -260,6 +260,7 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
if (!objs)
return;
virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]);
+ virtio_gpu_array_lock_resv(objs);
virtio_gpu_cmd_transfer_to_host_2d
(vgdev, 0,
plane->state->crtc_w,