summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorZhao Yakui <yakui.zhao@intel.com>2016-06-28 17:38:19 +0800
committerXiang, Haihao <haihao.xiang@intel.com>2018-01-11 08:25:16 +0800
commit468b09447847a044049bec5cccfaa3baf420effb (patch)
treead5686f74a22d6558069e938056a5c1aa0490832
parent20e50e3a92c9427556aaa8072ff3b55d41f0fd12 (diff)
downloadlibva-intel-driver-468b09447847a044049bec5cccfaa3baf420effb.tar.gz
Add the initial support of CNL
Signed-off-by: Zhao Yakui <yakui.zhao@intel.com> Signed-off-by: Xiang, Haihao <haihao.xiang@intel.com> Signed-off-by: peng.chen <peng.c.chen@intel.com>
-rw-r--r--src/gen75_picture_process.c4
-rw-r--r--src/gen75_vpp_gpe.c12
-rw-r--r--src/gen75_vpp_vebox.c263
-rw-r--r--src/gen75_vpp_vebox.h3
-rw-r--r--src/gen8_mfc.c9
-rw-r--r--src/gen8_post_processing.c6
-rw-r--r--src/gen9_mfd.c82
-rw-r--r--src/i965_device_info.c69
-rw-r--r--src/i965_drv_video.c10
-rw-r--r--src/i965_gpe_utils.c15
-rw-r--r--src/i965_pciids.h15
-rw-r--r--src/i965_post_processing.c5
-rw-r--r--src/intel_batchbuffer.c6
-rw-r--r--src/intel_common_vpp_internal.c3
-rw-r--r--src/intel_driver.c3
-rw-r--r--src/intel_driver.h2
16 files changed, 477 insertions, 30 deletions
diff --git a/src/gen75_picture_process.c b/src/gen75_picture_process.c
index 5a9ce499..e3ecfdd2 100644
--- a/src/gen75_picture_process.c
+++ b/src/gen75_picture_process.c
@@ -65,7 +65,7 @@ static VAStatus
gen75_vpp_vebox(VADriverContextP ctx,
struct intel_video_process_context* proc_ctx)
{
- VAStatus va_status = VA_STATUS_SUCCESS;
+ VAStatus va_status = VA_STATUS_ERROR_UNIMPLEMENTED;
VAProcPipelineParameterBuffer* pipeline_param = proc_ctx->pipeline_param;
struct i965_driver_data *i965 = i965_driver_data(ctx);
@@ -84,6 +84,8 @@ gen75_vpp_vebox(VADriverContextP ctx,
va_status = gen8_vebox_process_picture(ctx, proc_ctx->vpp_vebox_ctx);
else if (IS_GEN9(i965->intel.device_info))
va_status = gen9_vebox_process_picture(ctx, proc_ctx->vpp_vebox_ctx);
+ else if (IS_GEN10(i965->intel.device_info))
+ va_status = gen10_vebox_process_picture(ctx, proc_ctx->vpp_vebox_ctx);
return va_status;
}
diff --git a/src/gen75_vpp_gpe.c b/src/gen75_vpp_gpe.c
index ad893e83..02dc5769 100644
--- a/src/gen75_vpp_gpe.c
+++ b/src/gen75_vpp_gpe.c
@@ -586,7 +586,8 @@ vpp_gpe_process(VADriverContextP ctx,
if (IS_HASWELL(i965->intel.device_info))
return gen75_gpe_process(ctx, vpp_gpe_ctx);
else if (IS_GEN8(i965->intel.device_info) ||
- IS_GEN9(i965->intel.device_info))
+ IS_GEN9(i965->intel.device_info) ||
+ IS_GEN10(i965->intel.device_info))
return gen8_gpe_process(ctx, vpp_gpe_ctx);
return VA_STATUS_ERROR_UNIMPLEMENTED;
@@ -627,7 +628,8 @@ vpp_gpe_process_sharpening(VADriverContextP ctx,
if (IS_HASWELL(i965->intel.device_info))
vpp_kernels = gen75_vpp_sharpening_kernels;
else if (IS_GEN8(i965->intel.device_info) ||
- IS_GEN9(i965->intel.device_info)) // TODO: build the sharpening kernel for GEN9
+ IS_GEN9(i965->intel.device_info) ||
+ IS_GEN10(i965->intel.device_info))
vpp_kernels = gen8_vpp_sharpening_kernels;
else
return VA_STATUS_ERROR_UNIMPLEMENTED;
@@ -854,7 +856,8 @@ vpp_gpe_context_init(VADriverContextP ctx)
assert(IS_HASWELL(i965->intel.device_info) ||
IS_GEN8(i965->intel.device_info) ||
- IS_GEN9(i965->intel.device_info));
+ IS_GEN9(i965->intel.device_info) ||
+ IS_GEN10(i965->intel.device_info));
vpp_gpe_ctx->surface_tmp = VA_INVALID_ID;
vpp_gpe_ctx->surface_tmp_object = NULL;
@@ -879,7 +882,8 @@ vpp_gpe_context_init(VADriverContextP ctx)
gpe_ctx->idrt.entry_size = ALIGN(sizeof(struct gen6_interface_descriptor_data), 64);
} else if (IS_GEN8(i965->intel.device_info) ||
- IS_GEN9(i965->intel.device_info)) {
+ IS_GEN9(i965->intel.device_info) ||
+ IS_GEN10(i965->intel.device_info)) {
vpp_gpe_ctx->gpe_context_init = gen8_gpe_context_init;
vpp_gpe_ctx->gpe_context_destroy = gen8_gpe_context_destroy;
vpp_gpe_ctx->gpe_load_kernels = gen8_gpe_load_kernels;
diff --git a/src/gen75_vpp_vebox.c b/src/gen75_vpp_vebox.c
index 59d0efa4..8985dcb5 100644
--- a/src/gen75_vpp_vebox.c
+++ b/src/gen75_vpp_vebox.c
@@ -242,7 +242,8 @@ void hsw_veb_dndi_table(VADriverContextP ctx, struct intel_vebox_context *proc_c
7); // chr temp diff low
if (IS_GEN8(i965->intel.device_info) ||
- IS_GEN9(i965->intel.device_info))
+ IS_GEN9(i965->intel.device_info) ||
+ IS_GEN10(i965->intel.device_info))
*p_table ++ = 0; // parameters for hot pixel,
}
@@ -2254,7 +2255,7 @@ void skl_veb_state_table_setup(VADriverContextP ctx, struct intel_vebox_context
dri_bo *iecp_bo = proc_ctx->iecp_state_table.bo;
dri_bo_map(iecp_bo, 1);
proc_ctx->iecp_state_table.ptr = iecp_bo->virtual;
- memset(proc_ctx->iecp_state_table.ptr, 0, 90 * 4);
+ memset(proc_ctx->iecp_state_table.ptr, 0, 2048); // Change the size to 2048 in case a large table used in the future
hsw_veb_iecp_std_table(ctx, proc_ctx);
hsw_veb_iecp_ace_table(ctx, proc_ctx);
@@ -2472,3 +2473,261 @@ gen9_vebox_process_picture(VADriverContextP ctx,
return status;
}
+
+void
+cnl_veb_state_command(VADriverContextP ctx, struct intel_vebox_context *proc_ctx)
+{
+ struct intel_batchbuffer *batch = proc_ctx->batch;
+
+ BEGIN_VEB_BATCH(batch, 0x13);
+ OUT_VEB_BATCH(batch, VEB_STATE | (0x13 - 2));
+ OUT_VEB_BATCH(batch,
+ 0 << 25 | // state surface control bits
+ 0 << 23 | // reserved.
+ 0 << 22 | // gamut expansion position
+ 0 << 15 | // reserved.
+ 0 << 14 | // single slice vebox enable
+ 0 << 13 | // hot pixel filter enable
+ 0 << 12 | // alpha plane enable
+ 0 << 11 | // vignette enable
+ 0 << 10 | // demosaic enable
+ proc_ctx->current_output_type << 8 | // DI output frame
+ 1 << 7 | // 444->422 downsample method
+ 1 << 6 | // 422->420 downsample method
+ proc_ctx->is_first_frame << 5 | // DN/DI first frame
+ proc_ctx->is_di_enabled << 4 | // DI enable
+ proc_ctx->is_dn_enabled << 3 | // DN enable
+ proc_ctx->is_iecp_enabled << 2 | // global IECP enabled
+ 0 << 1 | // ColorGamutCompressionEnable
+ 0) ; // ColorGamutExpansionEnable.
+
+ OUT_RELOC64(batch,
+ proc_ctx->dndi_state_table.bo,
+ I915_GEM_DOMAIN_INSTRUCTION, 0, 0); // DW 2-3
+
+ OUT_RELOC64(batch,
+ proc_ctx->iecp_state_table.bo,
+ I915_GEM_DOMAIN_INSTRUCTION, 0, 0); // DW 4-5
+
+ OUT_RELOC64(batch,
+ proc_ctx->gamut_state_table.bo,
+ I915_GEM_DOMAIN_INSTRUCTION, 0, 0); // DW 6-7
+
+ OUT_RELOC64(batch,
+ proc_ctx->vertex_state_table.bo,
+ I915_GEM_DOMAIN_INSTRUCTION, 0, 0); // DW 8-9
+
+ OUT_VEB_BATCH(batch, 0);/*caputre pipe state pointer*/
+ OUT_VEB_BATCH(batch, 0);
+
+ OUT_VEB_BATCH(batch, 0);/*lace lut table state pointer*/
+ OUT_VEB_BATCH(batch, 0);
+
+ OUT_VEB_BATCH(batch, 0);/*gamma correction values address*/
+ OUT_VEB_BATCH(batch, 0);
+
+ OUT_VEB_BATCH(batch, 0);
+ OUT_VEB_BATCH(batch, 0);
+ OUT_VEB_BATCH(batch, 0);
+
+
+ ADVANCE_VEB_BATCH(batch);
+}
+
+void cnl_veb_dndi_iecp_command(VADriverContextP ctx, struct intel_vebox_context *proc_ctx)
+{
+ struct intel_batchbuffer *batch = proc_ctx->batch;
+ unsigned char frame_ctrl_bits = 0;
+ struct object_surface *obj_surface = proc_ctx->frame_store[FRAME_IN_CURRENT].obj_surface;
+ unsigned int width64 = ALIGN(proc_ctx->width_input, 64);
+
+ assert(obj_surface);
+ if (width64 > obj_surface->orig_width)
+ width64 = obj_surface->orig_width;
+
+ BEGIN_VEB_BATCH(batch, 0x18);
+ OUT_VEB_BATCH(batch, VEB_DNDI_IECP_STATE | (0x18 - 2));//DWord 0
+ OUT_VEB_BATCH(batch, (width64 - 1));
+
+ OUT_RELOC64(batch,
+ proc_ctx->frame_store[FRAME_IN_CURRENT].obj_surface->bo,
+ I915_GEM_DOMAIN_RENDER, 0, frame_ctrl_bits);//DWord 2-3
+
+ OUT_RELOC64(batch,
+ proc_ctx->frame_store[FRAME_IN_PREVIOUS].obj_surface->bo,
+ I915_GEM_DOMAIN_RENDER, 0, frame_ctrl_bits);//DWord 4-4
+
+ OUT_RELOC64(batch,
+ proc_ctx->frame_store[FRAME_IN_STMM].obj_surface->bo,
+ I915_GEM_DOMAIN_RENDER, 0, frame_ctrl_bits);//DWord 6-7
+
+ OUT_RELOC64(batch,
+ proc_ctx->frame_store[FRAME_OUT_STMM].obj_surface->bo,
+ I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, frame_ctrl_bits);//DWord 8-9
+
+ OUT_RELOC64(batch,
+ proc_ctx->frame_store[FRAME_OUT_CURRENT_DN].obj_surface->bo,
+ I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, frame_ctrl_bits);//DWord 10-11
+
+ OUT_RELOC64(batch,
+ proc_ctx->frame_store[FRAME_OUT_CURRENT].obj_surface->bo,
+ I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, frame_ctrl_bits);//DWord 12-13
+
+ OUT_RELOC64(batch,
+ proc_ctx->frame_store[FRAME_OUT_PREVIOUS].obj_surface->bo,
+ I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, frame_ctrl_bits);//DWord 14-15
+
+ OUT_RELOC64(batch,
+ proc_ctx->frame_store[FRAME_OUT_STATISTIC].obj_surface->bo,
+ I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, frame_ctrl_bits);//DWord 16-17
+
+ OUT_VEB_BATCH(batch, 0); //DWord 18
+ OUT_VEB_BATCH(batch, 0); //DWord 19
+
+ OUT_VEB_BATCH(batch, 0); //DWord 20
+ OUT_VEB_BATCH(batch, 0); //DWord 21
+ OUT_VEB_BATCH(batch, 0); //DWord 22
+ OUT_VEB_BATCH(batch, 0); //DWord 23
+
+ ADVANCE_VEB_BATCH(batch);
+}
+
+void cnl_veb_surface_state(VADriverContextP ctx, struct intel_vebox_context *proc_ctx, unsigned int is_output)
+{
+ struct intel_batchbuffer *batch = proc_ctx->batch;
+ unsigned int u_offset_y = 0, v_offset_y = 0;
+ unsigned int is_uv_interleaved = 0, tiling = 0, swizzle = 0;
+ unsigned int surface_format = PLANAR_420_8;
+ struct object_surface* obj_surf = NULL;
+ unsigned int surface_pitch = 0;
+ unsigned int half_pitch_chroma = 0;
+ unsigned int derived_pitch;
+
+ if (is_output) {
+ obj_surf = proc_ctx->frame_store[FRAME_OUT_CURRENT].obj_surface;
+ } else {
+ obj_surf = proc_ctx->frame_store[FRAME_IN_CURRENT].obj_surface;
+ }
+
+ assert(obj_surf->fourcc == VA_FOURCC_NV12 ||
+ obj_surf->fourcc == VA_FOURCC_YUY2 ||
+ obj_surf->fourcc == VA_FOURCC_AYUV ||
+ obj_surf->fourcc == VA_FOURCC_RGBA ||
+ obj_surf->fourcc == VA_FOURCC_P010);
+
+ if (obj_surf->fourcc == VA_FOURCC_NV12) {
+ surface_format = PLANAR_420_8;
+ surface_pitch = obj_surf->width;
+ is_uv_interleaved = 1;
+ half_pitch_chroma = 0;
+ } else if (obj_surf->fourcc == VA_FOURCC_YUY2) {
+ surface_format = YCRCB_NORMAL;
+ surface_pitch = obj_surf->width * 2;
+ is_uv_interleaved = 0;
+ half_pitch_chroma = 0;
+ } else if (obj_surf->fourcc == VA_FOURCC_AYUV) {
+ surface_format = PACKED_444A_8;
+ surface_pitch = obj_surf->width * 4;
+ is_uv_interleaved = 0;
+ half_pitch_chroma = 0;
+ } else if (obj_surf->fourcc == VA_FOURCC_RGBA) {
+ surface_format = R8G8B8A8_UNORM_SRGB;
+ surface_pitch = obj_surf->width * 4;
+ is_uv_interleaved = 0;
+ half_pitch_chroma = 0;
+ } else if (obj_surf->fourcc == VA_FOURCC_P010) {
+ surface_format = PLANAR_420_16;
+ surface_pitch = obj_surf->width;
+ is_uv_interleaved = 1;
+ half_pitch_chroma = 0;
+ }
+
+ derived_pitch = surface_pitch;
+
+ u_offset_y = obj_surf->y_cb_offset;
+ v_offset_y = obj_surf->y_cr_offset;
+
+ dri_bo_get_tiling(obj_surf->bo, &tiling, &swizzle);
+
+ BEGIN_VEB_BATCH(batch, 9);
+ OUT_VEB_BATCH(batch, VEB_SURFACE_STATE | (9 - 2));
+ OUT_VEB_BATCH(batch,
+ 0 << 1 | // reserved
+ is_output); // surface indentification.
+
+ OUT_VEB_BATCH(batch,
+ (obj_surf->orig_height - 1) << 18 | // height . w3
+ (obj_surf->orig_width - 1) << 4 | // width
+ 0); // reserve
+
+ OUT_VEB_BATCH(batch,
+ surface_format << 27 | // surface format, YCbCr420. w4
+ is_uv_interleaved << 20 | // interleave chrome , two seperate palar
+ (surface_pitch - 1) << 3 | // surface pitch, 64 align
+ half_pitch_chroma << 2 | // half pitch for chrome
+ !!tiling << 1 | // tiled surface, linear surface used
+ (tiling == I915_TILING_Y)); // tiled walk, ignored when liner surface
+
+ OUT_VEB_BATCH(batch,
+ 0 << 16 | // X offset for V(Cb)
+ u_offset_y); // Y offset for V(Cb)
+
+ OUT_VEB_BATCH(batch,
+ 0 << 16 | // X offset for V(Cr)
+ v_offset_y); // Y offset for V(Cr)
+
+ OUT_VEB_BATCH(batch, 0);
+
+ OUT_VEB_BATCH(batch, derived_pitch - 1);
+
+ OUT_VEB_BATCH(batch, 0);
+
+ ADVANCE_VEB_BATCH(batch);
+}
+
+VAStatus
+gen10_vebox_process_picture(VADriverContextP ctx, struct intel_vebox_context *proc_ctx)
+{
+ VAStatus status;
+
+ status = gen75_vebox_init_pipe_params(ctx, proc_ctx);
+ if (status != VA_STATUS_SUCCESS)
+ return status;
+
+ status = gen75_vebox_init_filter_params(ctx, proc_ctx);
+ if (status != VA_STATUS_SUCCESS)
+ return status;
+
+ status = hsw_veb_pre_format_convert(ctx, proc_ctx);
+ if (status != VA_STATUS_SUCCESS)
+ return status;
+
+ status = gen75_vebox_ensure_surfaces(ctx, proc_ctx);
+ if (status != VA_STATUS_SUCCESS)
+ return status;
+
+ status = gen75_vebox_ensure_surfaces_storage(ctx, proc_ctx);
+ if (status != VA_STATUS_SUCCESS)
+ return status;
+
+ if (proc_ctx->filters_mask & VPP_SHARP_MASK) {
+ vpp_sharpness_filtering(ctx, proc_ctx);
+ } else if (proc_ctx->format_convert_flags & POST_COPY_CONVERT) {
+ assert(proc_ctx->is_second_field);
+ /* directly copy the saved frame in the second call */
+ } else {
+ intel_batchbuffer_start_atomic_veb(proc_ctx->batch, 0x1000);
+ intel_batchbuffer_emit_mi_flush(proc_ctx->batch);
+ skl_veb_state_table_setup(ctx, proc_ctx);
+ cnl_veb_state_command(ctx, proc_ctx);
+ cnl_veb_surface_state(ctx, proc_ctx, INPUT_SURFACE);
+ cnl_veb_surface_state(ctx, proc_ctx, OUTPUT_SURFACE);
+ cnl_veb_dndi_iecp_command(ctx, proc_ctx);
+ intel_batchbuffer_end_atomic(proc_ctx->batch);
+ intel_batchbuffer_flush(proc_ctx->batch);
+ }
+
+ status = hsw_veb_post_format_convert(ctx, proc_ctx);
+
+ return status;
+}
diff --git a/src/gen75_vpp_vebox.h b/src/gen75_vpp_vebox.h
index 66cfda86..1394ba29 100644
--- a/src/gen75_vpp_vebox.h
+++ b/src/gen75_vpp_vebox.h
@@ -171,4 +171,7 @@ VAStatus gen8_vebox_process_picture(VADriverContextP ctx,
VAStatus gen9_vebox_process_picture(VADriverContextP ctx,
struct intel_vebox_context *proc_ctx);
+VAStatus gen10_vebox_process_picture(VADriverContextP ctx,
+ struct intel_vebox_context *proc_ctx);
+
#endif
diff --git a/src/gen8_mfc.c b/src/gen8_mfc.c
index 7ab46d7a..6b23f444 100644
--- a/src/gen8_mfc.c
+++ b/src/gen8_mfc.c
@@ -1614,7 +1614,8 @@ gen8_mfc_avc_batchbuffer_pipeline(VADriverContextP ctx,
intel_batchbuffer_start_atomic(batch, 0x4000);
- if (IS_GEN9(i965->intel.device_info))
+ if (IS_GEN9(i965->intel.device_info) ||
+ IS_GEN10(i965->intel.device_info))
gen9_gpe_pipeline_setup(ctx, &mfc_context->gpe_context, batch);
else
gen8_gpe_pipeline_setup(ctx, &mfc_context->gpe_context, batch);
@@ -1640,7 +1641,8 @@ gen8_mfc_avc_batchbuffer_pipeline(VADriverContextP ctx,
mfc_context->aux_batchbuffer = NULL;
}
- if (IS_GEN9(i965->intel.device_info))
+ if (IS_GEN9(i965->intel.device_info) ||
+ IS_GEN10(i965->intel.device_info))
gen9_gpe_pipeline_end(ctx, &mfc_context->gpe_context, batch);
intel_batchbuffer_end_atomic(batch);
@@ -4584,7 +4586,8 @@ Bool gen8_mfc_context_init(VADriverContextP ctx, struct intel_encoder_context *e
mfc_context->gpe_context.vfe_state.urb_entry_size = 59 - 1;
mfc_context->gpe_context.vfe_state.curbe_allocation_size = 37 - 1;
- if (IS_GEN9(i965->intel.device_info)) {
+ if (IS_GEN9(i965->intel.device_info) ||
+ IS_GEN10(i965->intel.device_info)) {
gen8_gpe_load_kernels(ctx,
&mfc_context->gpe_context,
gen9_mfc_kernels,
diff --git a/src/gen8_post_processing.c b/src/gen8_post_processing.c
index 5d42deac..43e787f5 100644
--- a/src/gen8_post_processing.c
+++ b/src/gen8_post_processing.c
@@ -410,7 +410,8 @@ gen8_pp_set_surface_state(VADriverContextP ctx, struct i965_post_processing_cont
ss = (struct gen8_surface_state *)((char *)ss_bo->virtual + SURFACE_STATE_OFFSET(index));
memset(ss, 0, sizeof(*ss));
- if (IS_GEN9(i965->intel.device_info))
+ if (IS_GEN9(i965->intel.device_info) ||
+ IS_GEN10(i965->intel.device_info))
ss->ss1.surface_mocs = GEN9_CACHE_PTE;
ss->ss0.surface_type = I965_SURFACE_2D;
@@ -459,7 +460,8 @@ gen8_pp_set_surface2_state(VADriverContextP ctx, struct i965_post_processing_con
ss2 = (struct gen8_surface_state2 *)((char *)ss2_bo->virtual + SURFACE_STATE_OFFSET(index));
memset(ss2, 0, sizeof(*ss2));
- if (IS_GEN9(i965->intel.device_info))
+ if (IS_GEN9(i965->intel.device_info) ||
+ IS_GEN10(i965->intel.device_info))
ss2->ss5.surface_object_mocs = GEN9_CACHE_PTE;
ss2->ss6.base_addr = surf_bo->offset + surf_bo_offset;
diff --git a/src/gen9_mfd.c b/src/gen9_mfd.c
index 85a14f19..38fde98e 100644
--- a/src/gen9_mfd.c
+++ b/src/gen9_mfd.c
@@ -195,7 +195,8 @@ gen9_hcpd_pipe_mode_select(VADriverContextP ctx,
assert((codec == HCP_CODEC_HEVC) || (codec == HCP_CODEC_VP9));
if (IS_KBL(i965->intel.device_info) ||
- IS_GLK(i965->intel.device_info)) {
+ IS_GLK(i965->intel.device_info) ||
+ IS_GEN10(i965->intel.device_info)) {
BEGIN_BCS_BATCH(batch, 6);
OUT_BCS_BATCH(batch, HCP_PIPE_MODE_SELECT | (6 - 2));
@@ -219,6 +220,9 @@ gen9_hcpd_pipe_mode_select(VADriverContextP ctx,
OUT_BCS_BATCH(batch, 0);
OUT_BCS_BATCH(batch, 0);
+ } else if (IS_GEN10(i965->intel.device_info)) {
+ OUT_BCS_BATCH(batch, 0);
+ OUT_BCS_BATCH(batch, 0);
}
ADVANCE_BCS_BATCH(batch);
@@ -350,6 +354,67 @@ gen9_hcpd_ind_obj_base_addr_state(VADriverContextP ctx,
}
static void
+gen10_hcpd_ind_obj_base_addr_state(VADriverContextP ctx,
+ dri_bo *slice_data_bo,
+ struct gen9_hcpd_context *gen9_hcpd_context)
+{
+ struct intel_batchbuffer *batch = gen9_hcpd_context->base.batch;
+
+ BEGIN_BCS_BATCH(batch, 29);
+
+ OUT_BCS_BATCH(batch, HCP_IND_OBJ_BASE_ADDR_STATE | (29 - 2));
+
+ /* DW1..5 indirect bitstream*/
+ OUT_RELOC64(batch,
+ slice_data_bo,
+ I915_GEM_DOMAIN_INSTRUCTION, 0,
+ 0);
+ OUT_BCS_BATCH(batch, 0);
+ OUT_BCS_RELOC64(batch, slice_data_bo,
+ I915_GEM_DOMAIN_RENDER,
+ 0, ALIGN(slice_data_bo->size, 4096));
+
+ /* DW 6..8 Indirect CU */
+ OUT_BCS_BATCH(batch, 0);
+ OUT_BCS_BATCH(batch, 0);
+ OUT_BCS_BATCH(batch, 0);
+
+ /* DW 9..13 Indirect PAK_PSE */
+ OUT_BCS_BATCH(batch, 0);
+ OUT_BCS_BATCH(batch, 0);
+ OUT_BCS_BATCH(batch, 0);
+ OUT_BCS_BATCH(batch, 0);
+ OUT_BCS_BATCH(batch, 0);
+
+ /* DW 14..16. Compressed_header */
+ OUT_BCS_BATCH(batch, 0);
+ OUT_BCS_BATCH(batch, 0);
+ OUT_BCS_BATCH(batch, 0);
+
+ /* DW 17..19 . Prob-counter Stream-out */
+ OUT_BCS_BATCH(batch, 0);
+ OUT_BCS_BATCH(batch, 0);
+ OUT_BCS_BATCH(batch, 0);
+
+ /* DW 20..22. Prob-delta stream-in */
+ OUT_BCS_BATCH(batch, 0);
+ OUT_BCS_BATCH(batch, 0);
+ OUT_BCS_BATCH(batch, 0);
+
+ /* DW 23..25. Tile Record */
+ OUT_BCS_BATCH(batch, 0);
+ OUT_BCS_BATCH(batch, 0);
+ OUT_BCS_BATCH(batch, 0);
+
+ /* DW 26..28. CU level statics buffer */
+ OUT_BCS_BATCH(batch, 0);
+ OUT_BCS_BATCH(batch, 0);
+ OUT_BCS_BATCH(batch, 0);
+
+ ADVANCE_BCS_BATCH(batch);
+}
+
+static void
gen9_hcpd_qm_state(VADriverContextP ctx,
int size_id,
int color_component,
@@ -943,7 +1008,10 @@ gen9_hcpd_hevc_decode_picture(VADriverContextP ctx,
slice_param = (VASliceParameterBufferHEVC *)decode_state->slice_params[j]->buffer;
slice_data_bo = decode_state->slice_datas[j]->bo;
- gen9_hcpd_ind_obj_base_addr_state(ctx, slice_data_bo, gen9_hcpd_context);
+ if (IS_GEN10(i965->intel.device_info))
+ gen10_hcpd_ind_obj_base_addr_state(ctx, slice_data_bo, gen9_hcpd_context);
+ else
+ gen9_hcpd_ind_obj_base_addr_state(ctx, slice_data_bo, gen9_hcpd_context);
if (j == decode_state->num_slice_params - 1)
next_slice_group_param = NULL;
@@ -1532,7 +1600,8 @@ gen9_hcpd_vp9_pic_state(VADriverContextP ctx,
segmentIDStreamOutEnable << 30 |
pic_param->pic_fields.bits.lossless_flag << 29 |
segmentation_temporal_update << 28 |
- pic_param->pic_fields.bits.segmentation_update_map << 27 |
+ (pic_param->pic_fields.bits.segmentation_enabled &&
+ pic_param->pic_fields.bits.segmentation_update_map) << 27 |
pic_param->pic_fields.bits.segmentation_enabled << 26 |
pic_param->sharpness_level << 23 |
pic_param->filter_level << 17 |
@@ -1735,7 +1804,11 @@ gen9_hcpd_vp9_decode_picture(VADriverContextP ctx,
slice_data_bo = decode_state->slice_datas[0]->bo;
gen9_hcpd_vp9_pipe_buf_addr_state(ctx, decode_state, gen9_hcpd_context);
- gen9_hcpd_ind_obj_base_addr_state(ctx, slice_data_bo, gen9_hcpd_context);
+
+ if (IS_GEN10(i965->intel.device_info))
+ gen10_hcpd_ind_obj_base_addr_state(ctx, slice_data_bo, gen9_hcpd_context);
+ else
+ gen9_hcpd_ind_obj_base_addr_state(ctx, slice_data_bo, gen9_hcpd_context);
//If segmentation is disabled, only SegParam[0] is valid,
//all others should be populated with 0
@@ -1750,7 +1823,6 @@ gen9_hcpd_vp9_decode_picture(VADriverContextP ctx,
}
gen9_hcpd_vp9_pic_state(ctx, decode_state, gen9_hcpd_context);
-
gen9_hcpd_vp9_bsd_object(ctx, pic_param, slice_param, gen9_hcpd_context);
intel_batchbuffer_end_atomic(batch);
diff --git a/src/i965_device_info.c b/src/i965_device_info.c
index 3e9485e8..f11264cd 100644
--- a/src/i965_device_info.c
+++ b/src/i965_device_info.c
@@ -641,6 +641,68 @@ static struct hw_codec_info cfl_hw_codec_info = {
},
};
+static struct hw_codec_info cnl_hw_codec_info = {
+ .dec_hw_context_init = gen9_dec_hw_context_init,
+ .enc_hw_context_init = gen9_enc_hw_context_init,
+ .proc_hw_context_init = gen75_proc_context_init,
+ .render_init = gen9_render_init,
+ .post_processing_context_init = gen9_post_processing_context_init,
+ .max_resolution = gen9_max_resolution,
+ .preinit_hw_codec = gen9_hw_codec_preinit,
+
+ .max_width = 4096, /* default. See max_resolution */
+ .max_height = 4096, /* default. See max_resolution */
+ .min_linear_wpitch = 64,
+ .min_linear_hpitch = 16,
+
+ .h264_mvc_dec_profiles = (VA_PROFILE_MASK(H264StereoHigh) | VA_PROFILE_MASK(H264MultiviewHigh)),
+ .vp9_dec_profiles = VP9_PROFILE_MASK(0) | VP9_PROFILE_MASK(2),
+ .vp9_enc_profiles = VP9_PROFILE_MASK(0),
+
+ .h264_dec_chroma_formats = EXTRA_H264_DEC_CHROMA_FORMATS,
+ .jpeg_dec_chroma_formats = EXTRA_JPEG_DEC_CHROMA_FORMATS,
+ .jpeg_enc_chroma_formats = EXTRA_JPEG_ENC_CHROMA_FORMATS,
+ .hevc_dec_chroma_formats = EXTRA_HEVC_DEC_CHROMA_FORMATS,
+ .vp9_dec_chroma_formats = EXTRA_VP9_DEC_CHROMA_FORMATS,
+
+ .has_mpeg2_decoding = 1,
+ .has_mpeg2_encoding = 1,
+ .has_h264_decoding = 1,
+ .has_h264_encoding = 1,
+ .has_vc1_decoding = 1,
+ .has_jpeg_decoding = 1,
+ .has_jpeg_encoding = 1,
+ .has_vpp = 1,
+ .has_accelerated_getimage = 1,
+ .has_accelerated_putimage = 1,
+ .has_tiled_surface = 1,
+ .has_di_motion_adptive = 1,
+ .has_di_motion_compensated = 1,
+ .has_vp8_decoding = 1,
+ .has_vp8_encoding = 0,
+ .has_h264_mvc_encoding = 1,
+ .has_hevc_decoding = 1,
+ .has_hevc_encoding = 0,
+ .has_hevc10_decoding = 1,
+ .has_hevc10_encoding = 0,
+ .has_vp9_decoding = 1,
+ .has_vpp_p010 = 1,
+ .has_vp9_encoding = 0,
+ .has_lp_h264_encoding = 0,
+
+ .lp_h264_brc_mode = VA_RC_CQP,
+ .h264_brc_mode = VA_RC_CQP | VA_RC_CBR | VA_RC_VBR | VA_RC_MB,
+
+ .num_filters = 5,
+ .filters = {
+ { VAProcFilterNoiseReduction, I965_RING_VEBOX },
+ { VAProcFilterDeinterlacing, I965_RING_VEBOX },
+ { VAProcFilterSharpening, I965_RING_NULL },
+ { VAProcFilterColorBalance, I965_RING_VEBOX},
+ { VAProcFilterSkinToneEnhancement, I965_RING_VEBOX},
+ },
+};
+
struct hw_codec_info *
i965_get_codec_info(int devid)
{
@@ -806,6 +868,13 @@ static const struct intel_device_info cfl_device_info = {
.is_cfllake = 1,
};
+static const struct intel_device_info cnl_device_info = {
+ .gen = 10,
+
+ .urb_size = 4096,
+ .max_wm_threads = 64, /* per PSD */
+};
+
const struct intel_device_info *
i965_get_device_info(int devid)
{
diff --git a/src/i965_drv_video.c b/src/i965_drv_video.c
index 7d475299..2c7f8498 100644
--- a/src/i965_drv_video.c
+++ b/src/i965_drv_video.c
@@ -2083,6 +2083,7 @@ i965_guess_surface_format(VADriverContextP ctx,
if (IS_GEN6(i965->intel.device_info) ||
IS_GEN7(i965->intel.device_info) ||
IS_GEN8(i965->intel.device_info) ||
+ IS_GEN10(i965->intel.device_info) ||
IS_GEN9(i965->intel.device_info)) {
*fourcc = VA_FOURCC_NV12;
*is_tiled = 1;
@@ -5783,7 +5784,8 @@ i965_GetSurfaceAttributes(
attrib_list[i].value.value.i = VA_FOURCC_NV12;
} else if (IS_GEN7(i965->intel.device_info) ||
IS_GEN8(i965->intel.device_info) ||
- IS_GEN9(i965->intel.device_info)) {
+ IS_GEN9(i965->intel.device_info) ||
+ IS_GEN10(i965->intel.device_info)) {
if (obj_config->profile == VAProfileJPEGBaseline)
attrib_list[i].value.value.i = 0; /* internal format */
else
@@ -5861,7 +5863,8 @@ i965_GetSurfaceAttributes(
}
} else if (IS_GEN7(i965->intel.device_info) ||
IS_GEN8(i965->intel.device_info) ||
- IS_GEN9(i965->intel.device_info)) {
+ IS_GEN9(i965->intel.device_info) ||
+ IS_GEN10(i965->intel.device_info)) {
if (obj_config->entrypoint == VAEntrypointEncSlice ||
obj_config->entrypoint == VAEntrypointVideoProc ||
obj_config->entrypoint == VAEntrypointEncSliceLP ||
@@ -6176,7 +6179,8 @@ i965_QuerySurfaceAttributes(VADriverContextP ctx,
}
}
} else if (IS_GEN8(i965->intel.device_info) ||
- IS_GEN9(i965->intel.device_info)) {
+ IS_GEN9(i965->intel.device_info) ||
+ IS_GEN10(i965->intel.device_info)) {
if (obj_config->entrypoint == VAEntrypointVLD) { /* decode */
if (obj_config->profile == VAProfileJPEGBaseline) {
attribs[i].type = VASurfaceAttribPixelFormat;
diff --git a/src/i965_gpe_utils.c b/src/i965_gpe_utils.c
index 32c30b4a..fe90c9be 100644
--- a/src/i965_gpe_utils.c
+++ b/src/i965_gpe_utils.c
@@ -748,7 +748,8 @@ gen8_gpe_set_surface2_state(VADriverContextP ctx,
memset(ss, 0, sizeof(*ss));
/* ss0 */
- if (IS_GEN9(i965->intel.device_info))
+ if (IS_GEN9(i965->intel.device_info) ||
+ IS_GEN10(i965->intel.device_info))
ss->ss5.surface_object_mocs = GEN9_CACHE_PTE;
ss->ss6.base_addr = (uint32_t)obj_surface->bo->offset64;
@@ -810,7 +811,8 @@ gen8_gpe_set_media_rw_surface_state(VADriverContextP ctx,
memset(ss, 0, sizeof(*ss));
/* ss0 */
- if (IS_GEN9(i965->intel.device_info))
+ if (IS_GEN9(i965->intel.device_info) ||
+ IS_GEN10(i965->intel.device_info))
ss->ss1.surface_mocs = GEN9_CACHE_PTE;
ss->ss0.surface_type = I965_SURFACE_2D;
@@ -844,7 +846,8 @@ gen8_gpe_set_media_chroma_surface_state(VADriverContextP ctx,
cbcr_offset = obj_surface->height * obj_surface->width;
memset(ss, 0, sizeof(*ss));
/* ss0 */
- if (IS_GEN9(i965->intel.device_info))
+ if (IS_GEN9(i965->intel.device_info) ||
+ IS_GEN10(i965->intel.device_info))
ss->ss1.surface_mocs = GEN9_CACHE_PTE;
ss->ss0.surface_type = I965_SURFACE_2D;
@@ -934,7 +937,8 @@ gen8_gpe_set_buffer_surface_state(VADriverContextP ctx,
memset(ss, 0, sizeof(*ss));
/* ss0 */
ss->ss0.surface_type = I965_SURFACE_BUFFER;
- if (IS_GEN9(i965->intel.device_info))
+ if (IS_GEN9(i965->intel.device_info) ||
+ IS_GEN10(i965->intel.device_info))
ss->ss1.surface_mocs = GEN9_CACHE_PTE;
/* ss1 */
@@ -2809,7 +2813,8 @@ i965_gpe_table_init(VADriverContextP ctx)
gpe->mi_store_register_mem = gen8_gpe_mi_store_register_mem;
gpe->mi_store_data_imm = gen8_gpe_mi_store_data_imm;
gpe->mi_flush_dw = gen8_gpe_mi_flush_dw;
- } else if (IS_GEN9(i965->intel.device_info)) {
+ } else if (IS_GEN9(i965->intel.device_info) ||
+ IS_GEN10(i965->intel.device_info)) {
gpe->context_init = gen8_gpe_context_init;
gpe->context_destroy = gen8_gpe_context_destroy;
gpe->context_add_surface = gen9_gpe_context_add_surface;
diff --git a/src/i965_pciids.h b/src/i965_pciids.h
index f9e8af58..8768532c 100644
--- a/src/i965_pciids.h
+++ b/src/i965_pciids.h
@@ -196,3 +196,18 @@ CHIPSET(0x3E93, cfl, cfl, "Intel(R) Coffee Lake")
CHIPSET(0x3E94, cfl, cfl, "Intel(R) Coffee Lake")
CHIPSET(0x3E96, cfl, cfl, "Intel(R) Coffee Lake")
CHIPSET(0x3E9B, cfl, cfl, "Intel(R) Coffee Lake")
+CHIPSET(0x5A40, cnl, cnl, "Intel(R) Cannolake")
+CHIPSET(0x5A41, cnl, cnl, "Intel(R) Cannolake")
+CHIPSET(0x5A42, cnl, cnl, "Intel(R) Cannolake")
+CHIPSET(0x5A44, cnl, cnl, "Intel(R) Cannolake")
+CHIPSET(0x5A45, cnl, cnl, "Intel(R) Cannolake")
+CHIPSET(0x5A49, cnl, cnl, "Intel(R) Cannolake")
+CHIPSET(0x5A4A, cnl, cnl, "Intel(R) Cannolake")
+CHIPSET(0x5A50, cnl, cnl, "Intel(R) Cannolake")
+CHIPSET(0x5A51, cnl, cnl, "Intel(R) Cannolake")
+CHIPSET(0x5A52, cnl, cnl, "Intel(R) Cannolake")
+CHIPSET(0x5A54, cnl, cnl, "Intel(R) Cannolake")
+CHIPSET(0x5A55, cnl, cnl, "Intel(R) Cannolake")
+CHIPSET(0x5A59, cnl, cnl, "Intel(R) Cannolake")
+CHIPSET(0x5A5A, cnl, cnl, "Intel(R) Cannolake")
+CHIPSET(0x5A5C, cnl, cnl, "Intel(R) Cannolake")
diff --git a/src/i965_post_processing.c b/src/i965_post_processing.c
index 9d902fb0..cd6b2c03 100644
--- a/src/i965_post_processing.c
+++ b/src/i965_post_processing.c
@@ -5368,6 +5368,8 @@ vebox_processing_simple(VADriverContextP ctx,
if (IS_GEN9(i965->intel.device_info))
status = gen9_vebox_process_picture(ctx, pp_context->vebox_proc_ctx);
+ else if (IS_GEN10(i965->intel.device_info))
+ status = gen10_vebox_process_picture(ctx, pp_context->vebox_proc_ctx);
return status;
}
@@ -6205,7 +6207,8 @@ i965_proc_picture(VADriverContextP ctx,
if (IS_GEN7(i965->intel.device_info) ||
IS_GEN8(i965->intel.device_info) ||
- IS_GEN9(i965->intel.device_info)) {
+ IS_GEN9(i965->intel.device_info) ||
+ IS_GEN10(i965->intel.device_info)) {
unsigned int saved_filter_flag;
struct i965_post_processing_context *i965pp_context = i965->pp_context;
diff --git a/src/intel_batchbuffer.c b/src/intel_batchbuffer.c
index 09a9737d..6d12130f 100644
--- a/src/intel_batchbuffer.c
+++ b/src/intel_batchbuffer.c
@@ -220,9 +220,11 @@ intel_batchbuffer_emit_mi_flush(struct intel_batchbuffer *batch)
if (IS_GEN6(intel->device_info) ||
IS_GEN7(intel->device_info) ||
IS_GEN8(intel->device_info) ||
- IS_GEN9(intel->device_info)) {
+ IS_GEN9(intel->device_info) ||
+ IS_GEN10(intel->device_info)) {
if (ring_flag == I915_EXEC_RENDER) {
- if (IS_GEN8(intel->device_info) || IS_GEN9(intel->device_info)) {
+ if (IS_GEN8(intel->device_info) || IS_GEN9(intel->device_info) ||
+ IS_GEN10(intel->device_info)) {
BEGIN_BATCH(batch, 6);
OUT_BATCH(batch, CMD_PIPE_CONTROL | (6 - 2));
diff --git a/src/intel_common_vpp_internal.c b/src/intel_common_vpp_internal.c
index 20969fb9..a5efcaab 100644
--- a/src/intel_common_vpp_internal.c
+++ b/src/intel_common_vpp_internal.c
@@ -76,7 +76,8 @@ intel_10bit_8bit_scaling_post_processing(VADriverContextP ctx,
struct i965_driver_data *i965 = i965_driver_data(ctx);
VAStatus va_status = VA_STATUS_ERROR_UNIMPLEMENTED;
- if (IS_GEN9(i965->intel.device_info))
+ if (IS_GEN9(i965->intel.device_info) ||
+ IS_GEN10(i965->intel.device_info))
va_status = gen9_10bit_8bit_scaling_post_processing(ctx, pp_context,
src_surface,
src_rect,
diff --git a/src/intel_driver.c b/src/intel_driver.c
index 21b8da68..8e8c9af3 100644
--- a/src/intel_driver.c
+++ b/src/intel_driver.c
@@ -158,7 +158,8 @@ intel_driver_init(VADriverContextP ctx)
#define GEN9_PTE_CACHE 2
- if (IS_GEN9(intel->device_info))
+ if (IS_GEN9(intel->device_info) ||
+ IS_GEN10(intel->device_info))
intel->mocs_state = GEN9_PTE_CACHE;
intel_driver_get_revid(intel, &intel->revision);
diff --git a/src/intel_driver.h b/src/intel_driver.h
index 80643c32..f88dee09 100644
--- a/src/intel_driver.h
+++ b/src/intel_driver.h
@@ -239,4 +239,6 @@ struct intel_region {
#define IS_GLK(device_info) (device_info->is_glklake)
+#define IS_GEN10(device_info) (device_info->gen == 10)
+
#endif /* _INTEL_DRIVER_H_ */