diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 1787e1299b1b2a8ec6647473aa64c4c03efd71f0..68fecf355471474999e9796baf110fd1dc321db4 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -56,6 +56,15 @@ i915-$(CONFIG_COMPAT)   += i915_ioc32.o
 i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o intel_pipe_crc.o
 i915-$(CONFIG_PERF_EVENTS) += i915_pmu.o
 
+# Test the headers are compilable as standalone units
+i915-$(CONFIG_DRM_I915_WERROR) += \
+	test_i915_active_types_standalone.o \
+	test_i915_gem_context_types_standalone.o \
+	test_i915_timeline_types_standalone.o \
+	test_intel_context_types_standalone.o \
+	test_intel_engine_types_standalone.o \
+	test_intel_workarounds_types_standalone.o
+
 # GEM code
 i915-y += \
 	  i915_active.o \
@@ -77,6 +86,7 @@ i915-y += \
 	  i915_gem_tiling.o \
 	  i915_gem_userptr.o \
 	  i915_gemfs.o \
+	  i915_globals.o \
 	  i915_query.o \
 	  i915_request.o \
 	  i915_scheduler.o \
@@ -84,6 +94,7 @@ i915-y += \
 	  i915_trace_points.o \
 	  i915_vma.o \
 	  intel_breadcrumbs.o \
+	  intel_context.o \
 	  intel_engine_cs.o \
 	  intel_hangcheck.o \
 	  intel_lrc.o \
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 35b4ec3f7618b887e5661d0d652cca99b6ed02c6..cf4a1ecf6853659f6c231b07e1d5149c5f480555 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -391,12 +391,12 @@ struct cmd_info {
 #define F_POST_HANDLE	(1<<2)
 	u32 flag;
 
-#define R_RCS	(1 << RCS)
-#define R_VCS1  (1 << VCS)
-#define R_VCS2  (1 << VCS2)
+#define R_RCS	BIT(RCS0)
+#define R_VCS1  BIT(VCS0)
+#define R_VCS2  BIT(VCS1)
 #define R_VCS	(R_VCS1 | R_VCS2)
-#define R_BCS	(1 << BCS)
-#define R_VECS	(1 << VECS)
+#define R_BCS	BIT(BCS0)
+#define R_VECS	BIT(VECS0)
 #define R_ALL (R_RCS | R_VCS | R_BCS | R_VECS)
 	/* rings that support this cmd: BLT/RCS/VCS/VECS */
 	u16 rings;
@@ -558,7 +558,7 @@ static const struct decode_info decode_info_vebox = {
 };
 
 static const struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = {
-	[RCS] = {
+	[RCS0] = {
 		&decode_info_mi,
 		NULL,
 		NULL,
@@ -569,7 +569,7 @@ static const struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = {
 		NULL,
 	},
 
-	[VCS] = {
+	[VCS0] = {
 		&decode_info_mi,
 		NULL,
 		NULL,
@@ -580,7 +580,7 @@ static const struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = {
 		NULL,
 	},
 
-	[BCS] = {
+	[BCS0] = {
 		&decode_info_mi,
 		NULL,
 		&decode_info_2d,
@@ -591,7 +591,7 @@ static const struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = {
 		NULL,
 	},
 
-	[VECS] = {
+	[VECS0] = {
 		&decode_info_mi,
 		NULL,
 		NULL,
@@ -602,7 +602,7 @@ static const struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = {
 		NULL,
 	},
 
-	[VCS2] = {
+	[VCS1] = {
 		&decode_info_mi,
 		NULL,
 		NULL,
@@ -631,8 +631,7 @@ static inline const struct cmd_info *find_cmd_entry(struct intel_gvt *gvt,
 	struct cmd_entry *e;
 
 	hash_for_each_possible(gvt->cmd_table, e, hlist, opcode) {
-		if ((opcode == e->info->opcode) &&
-				(e->info->rings & (1 << ring_id)))
+		if (opcode == e->info->opcode && e->info->rings & BIT(ring_id))
 			return e->info;
 	}
 	return NULL;
@@ -943,15 +942,12 @@ static int cmd_handler_lri(struct parser_exec_state *s)
 	struct intel_gvt *gvt = s->vgpu->gvt;
 
 	for (i = 1; i < cmd_len; i += 2) {
-		if (IS_BROADWELL(gvt->dev_priv) &&
-				(s->ring_id != RCS)) {
-			if (s->ring_id == BCS &&
-					cmd_reg(s, i) ==
-					i915_mmio_reg_offset(DERRMR))
+		if (IS_BROADWELL(gvt->dev_priv) && s->ring_id != RCS0) {
+			if (s->ring_id == BCS0 &&
+			    cmd_reg(s, i) == i915_mmio_reg_offset(DERRMR))
 				ret |= 0;
 			else
-				ret |= (cmd_reg_inhibit(s, i)) ?
-					-EBADRQC : 0;
+				ret |= cmd_reg_inhibit(s, i) ? -EBADRQC : 0;
 		}
 		if (ret)
 			break;
@@ -1047,27 +1043,27 @@ struct cmd_interrupt_event {
 };
 
 static struct cmd_interrupt_event cmd_interrupt_events[] = {
-	[RCS] = {
+	[RCS0] = {
 		.pipe_control_notify = RCS_PIPE_CONTROL,
 		.mi_flush_dw = INTEL_GVT_EVENT_RESERVED,
 		.mi_user_interrupt = RCS_MI_USER_INTERRUPT,
 	},
-	[BCS] = {
+	[BCS0] = {
 		.pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
 		.mi_flush_dw = BCS_MI_FLUSH_DW,
 		.mi_user_interrupt = BCS_MI_USER_INTERRUPT,
 	},
-	[VCS] = {
+	[VCS0] = {
 		.pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
 		.mi_flush_dw = VCS_MI_FLUSH_DW,
 		.mi_user_interrupt = VCS_MI_USER_INTERRUPT,
 	},
-	[VCS2] = {
+	[VCS1] = {
 		.pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
 		.mi_flush_dw = VCS2_MI_FLUSH_DW,
 		.mi_user_interrupt = VCS2_MI_USER_INTERRUPT,
 	},
-	[VECS] = {
+	[VECS0] = {
 		.pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
 		.mi_flush_dw = VECS_MI_FLUSH_DW,
 		.mi_user_interrupt = VECS_MI_USER_INTERRUPT,
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
index 3e7e2b80c8579017cecdda478bc6166e1f46e061..f27edf17b4ab6ce089db97fa28ec44894f5ed7bc 100644
--- a/drivers/gpu/drm/i915/gvt/dmabuf.c
+++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
@@ -153,7 +153,7 @@ static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct drm_i915_gem_object *obj;
 
-	obj = i915_gem_object_alloc(dev_priv);
+	obj = i915_gem_object_alloc();
 	if (obj == NULL)
 		return NULL;
 
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
index 70494e394d2cbd0c2dceba4a8d466dfb11ec7658..1a93472cb34eff6170643a1aa76cc33943661b5f 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -47,17 +47,16 @@
 		((a)->lrca == (b)->lrca))
 
 static int context_switch_events[] = {
-	[RCS] = RCS_AS_CONTEXT_SWITCH,
-	[BCS] = BCS_AS_CONTEXT_SWITCH,
-	[VCS] = VCS_AS_CONTEXT_SWITCH,
-	[VCS2] = VCS2_AS_CONTEXT_SWITCH,
-	[VECS] = VECS_AS_CONTEXT_SWITCH,
+	[RCS0]  = RCS_AS_CONTEXT_SWITCH,
+	[BCS0]  = BCS_AS_CONTEXT_SWITCH,
+	[VCS0]  = VCS_AS_CONTEXT_SWITCH,
+	[VCS1]  = VCS2_AS_CONTEXT_SWITCH,
+	[VECS0] = VECS_AS_CONTEXT_SWITCH,
 };
 
-static int ring_id_to_context_switch_event(int ring_id)
+static int ring_id_to_context_switch_event(unsigned int ring_id)
 {
-	if (WARN_ON(ring_id < RCS ||
-		    ring_id >= ARRAY_SIZE(context_switch_events)))
+	if (WARN_ON(ring_id >= ARRAY_SIZE(context_switch_events)))
 		return -EINVAL;
 
 	return context_switch_events[ring_id];
@@ -411,7 +410,7 @@ static int complete_execlist_workload(struct intel_vgpu_workload *workload)
 	gvt_dbg_el("complete workload %p status %d\n", workload,
 			workload->status);
 
-	if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id)))
+	if (workload->status || (vgpu->resetting_eng & BIT(ring_id)))
 		goto out;
 
 	if (!list_empty(workload_q_head(vgpu, ring_id))) {
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index bc64b810e0d5300c9cc6ea812f1a0800f3d2f6da..b596cb42e24eb83621fdddf3d7934086824e5e17 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -323,25 +323,25 @@ static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
 	} else {
 		if (data & GEN6_GRDOM_RENDER) {
 			gvt_dbg_mmio("vgpu%d: request RCS reset\n", vgpu->id);
-			engine_mask |= (1 << RCS);
+			engine_mask |= BIT(RCS0);
 		}
 		if (data & GEN6_GRDOM_MEDIA) {
 			gvt_dbg_mmio("vgpu%d: request VCS reset\n", vgpu->id);
-			engine_mask |= (1 << VCS);
+			engine_mask |= BIT(VCS0);
 		}
 		if (data & GEN6_GRDOM_BLT) {
 			gvt_dbg_mmio("vgpu%d: request BCS Reset\n", vgpu->id);
-			engine_mask |= (1 << BCS);
+			engine_mask |= BIT(BCS0);
 		}
 		if (data & GEN6_GRDOM_VECS) {
 			gvt_dbg_mmio("vgpu%d: request VECS Reset\n", vgpu->id);
-			engine_mask |= (1 << VECS);
+			engine_mask |= BIT(VECS0);
 		}
 		if (data & GEN8_GRDOM_MEDIA2) {
 			gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id);
-			if (HAS_BSD2(vgpu->gvt->dev_priv))
-				engine_mask |= (1 << VCS2);
+			engine_mask |= BIT(VCS1);
 		}
+		engine_mask &= INTEL_INFO(vgpu->gvt->dev_priv)->engine_mask;
 	}
 
 	/* vgpu_lock already hold by emulate mmio r/w */
@@ -1704,7 +1704,7 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
 			return 0;
 
 		ret = intel_vgpu_select_submission_ops(vgpu,
-			       ENGINE_MASK(ring_id),
+			       BIT(ring_id),
 			       INTEL_VGPU_EXECLIST_SUBMISSION);
 		if (ret)
 			return ret;
@@ -1724,19 +1724,19 @@ static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu,
 
 	switch (offset) {
 	case 0x4260:
-		id = RCS;
+		id = RCS0;
 		break;
 	case 0x4264:
-		id = VCS;
+		id = VCS0;
 		break;
 	case 0x4268:
-		id = VCS2;
+		id = VCS1;
 		break;
 	case 0x426c:
-		id = BCS;
+		id = BCS0;
 		break;
 	case 0x4270:
-		id = VECS;
+		id = VECS0;
 		break;
 	default:
 		return -EINVAL;
@@ -1793,7 +1793,7 @@ static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
 	MMIO_F(prefix(BLT_RING_BASE), s, f, am, rm, d, r, w); \
 	MMIO_F(prefix(GEN6_BSD_RING_BASE), s, f, am, rm, d, r, w); \
 	MMIO_F(prefix(VEBOX_RING_BASE), s, f, am, rm, d, r, w); \
-	if (HAS_BSD2(dev_priv)) \
+	if (HAS_ENGINE(dev_priv, VCS1)) \
 		MMIO_F(prefix(GEN8_BSD2_RING_BASE), s, f, am, rm, d, r, w); \
 } while (0)
 
diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c
index 67125c5eec6eb59c7502065ae2c4a3698b390822..951681813230fbcd10052a07efe80c51c4ea716c 100644
--- a/drivers/gpu/drm/i915/gvt/interrupt.c
+++ b/drivers/gpu/drm/i915/gvt/interrupt.c
@@ -536,7 +536,7 @@ static void gen8_init_irq(
 	SET_BIT_INFO(irq, 4, VCS_MI_FLUSH_DW, INTEL_GVT_IRQ_INFO_GT1);
 	SET_BIT_INFO(irq, 8, VCS_AS_CONTEXT_SWITCH, INTEL_GVT_IRQ_INFO_GT1);
 
-	if (HAS_BSD2(gvt->dev_priv)) {
+	if (HAS_ENGINE(gvt->dev_priv, VCS1)) {
 		SET_BIT_INFO(irq, 16, VCS2_MI_USER_INTERRUPT,
 			INTEL_GVT_IRQ_INFO_GT1);
 		SET_BIT_INFO(irq, 20, VCS2_MI_FLUSH_DW,
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index 7d84cfb9051ac886579648ac7bb2cc5e2a70b3fa..f64c76dd11d4f61038ac48bf02ceaa3caeaf02c3 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -41,102 +41,102 @@
 
 /* Raw offset is appened to each line for convenience. */
 static struct engine_mmio gen8_engine_mmio_list[] __cacheline_aligned = {
-	{RCS, GFX_MODE_GEN7, 0xffff, false}, /* 0x229c */
-	{RCS, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */
-	{RCS, HWSTAM, 0x0, false}, /* 0x2098 */
-	{RCS, INSTPM, 0xffff, true}, /* 0x20c0 */
-	{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 0), 0, false}, /* 0x24d0 */
-	{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 1), 0, false}, /* 0x24d4 */
-	{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 2), 0, false}, /* 0x24d8 */
-	{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 3), 0, false}, /* 0x24dc */
-	{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 4), 0, false}, /* 0x24e0 */
-	{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 5), 0, false}, /* 0x24e4 */
-	{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 6), 0, false}, /* 0x24e8 */
-	{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 7), 0, false}, /* 0x24ec */
-	{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 8), 0, false}, /* 0x24f0 */
-	{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 9), 0, false}, /* 0x24f4 */
-	{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 10), 0, false}, /* 0x24f8 */
-	{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 11), 0, false}, /* 0x24fc */
-	{RCS, CACHE_MODE_1, 0xffff, true}, /* 0x7004 */
-	{RCS, GEN7_GT_MODE, 0xffff, true}, /* 0x7008 */
-	{RCS, CACHE_MODE_0_GEN7, 0xffff, true}, /* 0x7000 */
-	{RCS, GEN7_COMMON_SLICE_CHICKEN1, 0xffff, true}, /* 0x7010 */
-	{RCS, HDC_CHICKEN0, 0xffff, true}, /* 0x7300 */
-	{RCS, VF_GUARDBAND, 0xffff, true}, /* 0x83a4 */
-
-	{BCS, RING_GFX_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2229c */
-	{BCS, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */
-	{BCS, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */
-	{BCS, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */
-	{BCS, RING_EXCC(BLT_RING_BASE), 0x0, false}, /* 0x22028 */
-	{RCS, INVALID_MMIO_REG, 0, false } /* Terminated */
+	{RCS0, GFX_MODE_GEN7, 0xffff, false}, /* 0x229c */
+	{RCS0, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */
+	{RCS0, HWSTAM, 0x0, false}, /* 0x2098 */
+	{RCS0, INSTPM, 0xffff, true}, /* 0x20c0 */
+	{RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 0), 0, false}, /* 0x24d0 */
+	{RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 1), 0, false}, /* 0x24d4 */
+	{RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 2), 0, false}, /* 0x24d8 */
+	{RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 3), 0, false}, /* 0x24dc */
+	{RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 4), 0, false}, /* 0x24e0 */
+	{RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 5), 0, false}, /* 0x24e4 */
+	{RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 6), 0, false}, /* 0x24e8 */
+	{RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 7), 0, false}, /* 0x24ec */
+	{RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 8), 0, false}, /* 0x24f0 */
+	{RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 9), 0, false}, /* 0x24f4 */
+	{RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 10), 0, false}, /* 0x24f8 */
+	{RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 11), 0, false}, /* 0x24fc */
+	{RCS0, CACHE_MODE_1, 0xffff, true}, /* 0x7004 */
+	{RCS0, GEN7_GT_MODE, 0xffff, true}, /* 0x7008 */
+	{RCS0, CACHE_MODE_0_GEN7, 0xffff, true}, /* 0x7000 */
+	{RCS0, GEN7_COMMON_SLICE_CHICKEN1, 0xffff, true}, /* 0x7010 */
+	{RCS0, HDC_CHICKEN0, 0xffff, true}, /* 0x7300 */
+	{RCS0, VF_GUARDBAND, 0xffff, true}, /* 0x83a4 */
+
+	{BCS0, RING_GFX_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2229c */
+	{BCS0, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */
+	{BCS0, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */
+	{BCS0, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */
+	{BCS0, RING_EXCC(BLT_RING_BASE), 0x0, false}, /* 0x22028 */
+	{RCS0, INVALID_MMIO_REG, 0, false } /* Terminated */
 };
 
 static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = {
-	{RCS, GFX_MODE_GEN7, 0xffff, false}, /* 0x229c */
-	{RCS, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */
-	{RCS, HWSTAM, 0x0, false}, /* 0x2098 */
-	{RCS, INSTPM, 0xffff, true}, /* 0x20c0 */
-	{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 0), 0, false}, /* 0x24d0 */
-	{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 1), 0, false}, /* 0x24d4 */
-	{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 2), 0, false}, /* 0x24d8 */
-	{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 3), 0, false}, /* 0x24dc */
-	{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 4), 0, false}, /* 0x24e0 */
-	{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 5), 0, false}, /* 0x24e4 */
-	{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 6), 0, false}, /* 0x24e8 */
-	{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 7), 0, false}, /* 0x24ec */
-	{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 8), 0, false}, /* 0x24f0 */
-	{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 9), 0, false}, /* 0x24f4 */
-	{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 10), 0, false}, /* 0x24f8 */
-	{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 11), 0, false}, /* 0x24fc */
-	{RCS, CACHE_MODE_1, 0xffff, true}, /* 0x7004 */
-	{RCS, GEN7_GT_MODE, 0xffff, true}, /* 0x7008 */
-	{RCS, CACHE_MODE_0_GEN7, 0xffff, true}, /* 0x7000 */
-	{RCS, GEN7_COMMON_SLICE_CHICKEN1, 0xffff, true}, /* 0x7010 */
-	{RCS, HDC_CHICKEN0, 0xffff, true}, /* 0x7300 */
-	{RCS, VF_GUARDBAND, 0xffff, true}, /* 0x83a4 */
-
-	{RCS, GEN8_PRIVATE_PAT_LO, 0, false}, /* 0x40e0 */
-	{RCS, GEN8_PRIVATE_PAT_HI, 0, false}, /* 0x40e4 */
-	{RCS, GEN8_CS_CHICKEN1, 0xffff, true}, /* 0x2580 */
-	{RCS, COMMON_SLICE_CHICKEN2, 0xffff, true}, /* 0x7014 */
-	{RCS, GEN9_CS_DEBUG_MODE1, 0xffff, false}, /* 0x20ec */
-	{RCS, GEN8_L3SQCREG4, 0, false}, /* 0xb118 */
-	{RCS, GEN7_HALF_SLICE_CHICKEN1, 0xffff, true}, /* 0xe100 */
-	{RCS, HALF_SLICE_CHICKEN2, 0xffff, true}, /* 0xe180 */
-	{RCS, HALF_SLICE_CHICKEN3, 0xffff, true}, /* 0xe184 */
-	{RCS, GEN9_HALF_SLICE_CHICKEN5, 0xffff, true}, /* 0xe188 */
-	{RCS, GEN9_HALF_SLICE_CHICKEN7, 0xffff, true}, /* 0xe194 */
-	{RCS, GEN8_ROW_CHICKEN, 0xffff, true}, /* 0xe4f0 */
-	{RCS, TRVATTL3PTRDW(0), 0, false}, /* 0x4de0 */
-	{RCS, TRVATTL3PTRDW(1), 0, false}, /* 0x4de4 */
-	{RCS, TRNULLDETCT, 0, false}, /* 0x4de8 */
-	{RCS, TRINVTILEDETCT, 0, false}, /* 0x4dec */
-	{RCS, TRVADR, 0, false}, /* 0x4df0 */
-	{RCS, TRTTE, 0, false}, /* 0x4df4 */
-
-	{BCS, RING_GFX_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2229c */
-	{BCS, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */
-	{BCS, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */
-	{BCS, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */
-	{BCS, RING_EXCC(BLT_RING_BASE), 0x0, false}, /* 0x22028 */
-
-	{VCS2, RING_EXCC(GEN8_BSD2_RING_BASE), 0xffff, false}, /* 0x1c028 */
-
-	{VECS, RING_EXCC(VEBOX_RING_BASE), 0xffff, false}, /* 0x1a028 */
-
-	{RCS, GEN8_HDC_CHICKEN1, 0xffff, true}, /* 0x7304 */
-	{RCS, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */
-	{RCS, GEN7_UCGCTL4, 0x0, false}, /* 0x940c */
-	{RCS, GAMT_CHKN_BIT_REG, 0x0, false}, /* 0x4ab8 */
-
-	{RCS, GEN9_GAMT_ECO_REG_RW_IA, 0x0, false}, /* 0x4ab0 */
-	{RCS, GEN9_CSFE_CHICKEN1_RCS, 0xffff, false}, /* 0x20d4 */
-
-	{RCS, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */
-	{RCS, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */
-	{RCS, FF_SLICE_CS_CHICKEN2, 0xffff, false}, /* 0x20e4 */
-	{RCS, INVALID_MMIO_REG, 0, false } /* Terminated */
+	{RCS0, GFX_MODE_GEN7, 0xffff, false}, /* 0x229c */
+	{RCS0, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */
+	{RCS0, HWSTAM, 0x0, false}, /* 0x2098 */
+	{RCS0, INSTPM, 0xffff, true}, /* 0x20c0 */
+	{RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 0), 0, false}, /* 0x24d0 */
+	{RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 1), 0, false}, /* 0x24d4 */
+	{RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 2), 0, false}, /* 0x24d8 */
+	{RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 3), 0, false}, /* 0x24dc */
+	{RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 4), 0, false}, /* 0x24e0 */
+	{RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 5), 0, false}, /* 0x24e4 */
+	{RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 6), 0, false}, /* 0x24e8 */
+	{RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 7), 0, false}, /* 0x24ec */
+	{RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 8), 0, false}, /* 0x24f0 */
+	{RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 9), 0, false}, /* 0x24f4 */
+	{RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 10), 0, false}, /* 0x24f8 */
+	{RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 11), 0, false}, /* 0x24fc */
+	{RCS0, CACHE_MODE_1, 0xffff, true}, /* 0x7004 */
+	{RCS0, GEN7_GT_MODE, 0xffff, true}, /* 0x7008 */
+	{RCS0, CACHE_MODE_0_GEN7, 0xffff, true}, /* 0x7000 */
+	{RCS0, GEN7_COMMON_SLICE_CHICKEN1, 0xffff, true}, /* 0x7010 */
+	{RCS0, HDC_CHICKEN0, 0xffff, true}, /* 0x7300 */
+	{RCS0, VF_GUARDBAND, 0xffff, true}, /* 0x83a4 */
+
+	{RCS0, GEN8_PRIVATE_PAT_LO, 0, false}, /* 0x40e0 */
+	{RCS0, GEN8_PRIVATE_PAT_HI, 0, false}, /* 0x40e4 */
+	{RCS0, GEN8_CS_CHICKEN1, 0xffff, true}, /* 0x2580 */
+	{RCS0, COMMON_SLICE_CHICKEN2, 0xffff, true}, /* 0x7014 */
+	{RCS0, GEN9_CS_DEBUG_MODE1, 0xffff, false}, /* 0x20ec */
+	{RCS0, GEN8_L3SQCREG4, 0, false}, /* 0xb118 */
+	{RCS0, GEN7_HALF_SLICE_CHICKEN1, 0xffff, true}, /* 0xe100 */
+	{RCS0, HALF_SLICE_CHICKEN2, 0xffff, true}, /* 0xe180 */
+	{RCS0, HALF_SLICE_CHICKEN3, 0xffff, true}, /* 0xe184 */
+	{RCS0, GEN9_HALF_SLICE_CHICKEN5, 0xffff, true}, /* 0xe188 */
+	{RCS0, GEN9_HALF_SLICE_CHICKEN7, 0xffff, true}, /* 0xe194 */
+	{RCS0, GEN8_ROW_CHICKEN, 0xffff, true}, /* 0xe4f0 */
+	{RCS0, TRVATTL3PTRDW(0), 0, false}, /* 0x4de0 */
+	{RCS0, TRVATTL3PTRDW(1), 0, false}, /* 0x4de4 */
+	{RCS0, TRNULLDETCT, 0, false}, /* 0x4de8 */
+	{RCS0, TRINVTILEDETCT, 0, false}, /* 0x4dec */
+	{RCS0, TRVADR, 0, false}, /* 0x4df0 */
+	{RCS0, TRTTE, 0, false}, /* 0x4df4 */
+
+	{BCS0, RING_GFX_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2229c */
+	{BCS0, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */
+	{BCS0, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */
+	{BCS0, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */
+	{BCS0, RING_EXCC(BLT_RING_BASE), 0x0, false}, /* 0x22028 */
+
+	{VCS1, RING_EXCC(GEN8_BSD2_RING_BASE), 0xffff, false}, /* 0x1c028 */
+
+	{VECS0, RING_EXCC(VEBOX_RING_BASE), 0xffff, false}, /* 0x1a028 */
+
+	{RCS0, GEN8_HDC_CHICKEN1, 0xffff, true}, /* 0x7304 */
+	{RCS0, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */
+	{RCS0, GEN7_UCGCTL4, 0x0, false}, /* 0x940c */
+	{RCS0, GAMT_CHKN_BIT_REG, 0x0, false}, /* 0x4ab8 */
+
+	{RCS0, GEN9_GAMT_ECO_REG_RW_IA, 0x0, false}, /* 0x4ab0 */
+	{RCS0, GEN9_CSFE_CHICKEN1_RCS, 0xffff, false}, /* 0x20d4 */
+
+	{RCS0, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */
+	{RCS0, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */
+	{RCS0, FF_SLICE_CS_CHICKEN2, 0xffff, false}, /* 0x20e4 */
+	{RCS0, INVALID_MMIO_REG, 0, false } /* Terminated */
 };
 
 static struct {
@@ -149,11 +149,11 @@ static void load_render_mocs(struct drm_i915_private *dev_priv)
 {
 	i915_reg_t offset;
 	u32 regs[] = {
-		[RCS] = 0xc800,
-		[VCS] = 0xc900,
-		[VCS2] = 0xca00,
-		[BCS] = 0xcc00,
-		[VECS] = 0xcb00,
+		[RCS0]  = 0xc800,
+		[VCS0]  = 0xc900,
+		[VCS1]  = 0xca00,
+		[BCS0]  = 0xcc00,
+		[VECS0] = 0xcb00,
 	};
 	int ring_id, i;
 
@@ -301,7 +301,7 @@ int intel_vgpu_restore_inhibit_context(struct intel_vgpu *vgpu,
 		goto out;
 
 	/* no MOCS register in context except render engine */
-	if (req->engine->id != RCS)
+	if (req->engine->id != RCS0)
 		goto out;
 
 	ret = restore_render_mocs_control_for_inhibit(vgpu, req);
@@ -331,11 +331,11 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
 	enum forcewake_domains fw;
 	i915_reg_t reg;
 	u32 regs[] = {
-		[RCS] = 0x4260,
-		[VCS] = 0x4264,
-		[VCS2] = 0x4268,
-		[BCS] = 0x426c,
-		[VECS] = 0x4270,
+		[RCS0]  = 0x4260,
+		[VCS0]  = 0x4264,
+		[VCS1]  = 0x4268,
+		[BCS0]  = 0x426c,
+		[VECS0] = 0x4270,
 	};
 
 	if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
@@ -353,7 +353,7 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
 	 */
 	fw = intel_uncore_forcewake_for_reg(dev_priv, reg,
 					    FW_REG_READ | FW_REG_WRITE);
-	if (ring_id == RCS && (INTEL_GEN(dev_priv) >= 9))
+	if (ring_id == RCS0 && INTEL_GEN(dev_priv) >= 9)
 		fw |= FORCEWAKE_RENDER;
 
 	intel_uncore_forcewake_get(dev_priv, fw);
@@ -378,11 +378,11 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
 	u32 old_v, new_v;
 
 	u32 regs[] = {
-		[RCS] = 0xc800,
-		[VCS] = 0xc900,
-		[VCS2] = 0xca00,
-		[BCS] = 0xcc00,
-		[VECS] = 0xcb00,
+		[RCS0]  = 0xc800,
+		[VCS0]  = 0xc900,
+		[VCS1]  = 0xca00,
+		[BCS0]  = 0xcc00,
+		[VECS0] = 0xcb00,
 	};
 	int i;
 
@@ -390,8 +390,10 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
 	if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
 		return;
 
-	if ((IS_KABYLAKE(dev_priv)  || IS_BROXTON(dev_priv)
-		|| IS_COFFEELAKE(dev_priv)) && ring_id == RCS)
+	if (ring_id == RCS0 &&
+	    (IS_KABYLAKE(dev_priv) ||
+	     IS_BROXTON(dev_priv) ||
+	     IS_COFFEELAKE(dev_priv)))
 		return;
 
 	if (!pre && !gen9_render_mocs.initialized)
@@ -414,7 +416,7 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
 		offset.reg += 4;
 	}
 
-	if (ring_id == RCS) {
+	if (ring_id == RCS0) {
 		l3_offset.reg = 0xb020;
 		for (i = 0; i < GEN9_MOCS_SIZE / 2; i++) {
 			if (pre)
@@ -492,7 +494,8 @@ static void switch_mmio(struct intel_vgpu *pre,
 			 * itself.
 			 */
 			if (mmio->in_context &&
-			    !is_inhibit_context(&s->shadow_ctx->__engine[ring_id]))
+			    !is_inhibit_context(intel_context_lookup(s->shadow_ctx,
+								     dev_priv->engine[ring_id])))
 				continue;
 
 			if (mmio->mask)
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 1bb8f936fdaa75f2ee738bdf3235a247fac90fe8..7550e09939aea6d4b757d7f1c6e16338e10f35a1 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -93,7 +93,7 @@ static void sr_oa_regs(struct intel_vgpu_workload *workload,
 		i915_mmio_reg_offset(EU_PERF_CNTL6),
 	};
 
-	if (workload->ring_id != RCS)
+	if (workload->ring_id != RCS0)
 		return;
 
 	if (save) {
@@ -149,7 +149,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
 	COPY_REG_MASKED(ctx_ctrl);
 	COPY_REG(ctx_timestamp);
 
-	if (ring_id == RCS) {
+	if (ring_id == RCS0) {
 		COPY_REG(bb_per_ctx_ptr);
 		COPY_REG(rcs_indirect_ctx);
 		COPY_REG(rcs_indirect_ctx_offset);
@@ -177,7 +177,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
 
 	context_page_num = context_page_num >> PAGE_SHIFT;
 
-	if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
+	if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS0)
 		context_page_num = 19;
 
 	i = 2;
@@ -440,8 +440,7 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
 	if (ret)
 		goto err_unpin;
 
-	if ((workload->ring_id == RCS) &&
-	    (workload->wa_ctx.indirect_ctx.size != 0)) {
+	if (workload->ring_id == RCS0 && workload->wa_ctx.indirect_ctx.size) {
 		ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
 		if (ret)
 			goto err_shadow;
@@ -791,7 +790,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
 	context_page_num = rq->engine->context_size;
 	context_page_num = context_page_num >> PAGE_SHIFT;
 
-	if (IS_BROADWELL(gvt->dev_priv) && rq->engine->id == RCS)
+	if (IS_BROADWELL(gvt->dev_priv) && rq->engine->id == RCS0)
 		context_page_num = 19;
 
 	i = 2;
@@ -891,8 +890,8 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
 				workload->status = 0;
 		}
 
-		if (!workload->status && !(vgpu->resetting_eng &
-					   ENGINE_MASK(ring_id))) {
+		if (!workload->status &&
+		    !(vgpu->resetting_eng & BIT(ring_id))) {
 			update_guest_context(workload);
 
 			for_each_set_bit(event, workload->pending_events,
@@ -915,7 +914,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
 
 	list_del_init(&workload->list);
 
-	if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) {
+	if (workload->status || vgpu->resetting_eng & BIT(ring_id)) {
 		/* if workload->status is not successful means HW GPU
 		 * has occurred GPU hang or something wrong with i915/GVT,
 		 * and GVT won't inject context switch interrupt to guest.
@@ -929,7 +928,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
 		 * cleaned up during the resetting process later, so doing
 		 * the workload clean up here doesn't have any impact.
 		 **/
-		intel_vgpu_clean_workloads(vgpu, ENGINE_MASK(ring_id));
+		intel_vgpu_clean_workloads(vgpu, BIT(ring_id));
 	}
 
 	workload->complete(workload);
@@ -1102,9 +1101,9 @@ i915_context_ppgtt_root_restore(struct intel_vgpu_submission *s)
 	struct i915_hw_ppgtt *i915_ppgtt = s->shadow_ctx->ppgtt;
 	int i;
 
-	if (i915_vm_is_48bit(&i915_ppgtt->vm))
+	if (i915_vm_is_4lvl(&i915_ppgtt->vm)) {
 		px_dma(&i915_ppgtt->pml4) = s->i915_context_pml4;
-	else {
+	} else {
 		for (i = 0; i < GEN8_3LVL_PDPES; i++)
 			px_dma(i915_ppgtt->pdp.page_directory[i]) =
 						s->i915_context_pdps[i];
@@ -1155,7 +1154,7 @@ i915_context_ppgtt_root_save(struct intel_vgpu_submission *s)
 	struct i915_hw_ppgtt *i915_ppgtt = s->shadow_ctx->ppgtt;
 	int i;
 
-	if (i915_vm_is_48bit(&i915_ppgtt->vm))
+	if (i915_vm_is_4lvl(&i915_ppgtt->vm))
 		s->i915_context_pml4 = px_dma(&i915_ppgtt->pml4);
 	else {
 		for (i = 0; i < GEN8_3LVL_PDPES; i++)
@@ -1438,7 +1437,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
 	workload->rb_start = start;
 	workload->rb_ctl = ctl;
 
-	if (ring_id == RCS) {
+	if (ring_id == RCS0) {
 		intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
 			RING_CTX_OFF(bb_per_ctx_ptr.val), &per_ctx, 4);
 		intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 720e2b10adaa10f8ccc4c7472da4e300eb4b296f..314e40121e47e0d770fc0a6039bd8bcf4947baae 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -44,7 +44,7 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu)
 	vgpu_vreg_t(vgpu, vgtif_reg(display_ready)) = 0;
 	vgpu_vreg_t(vgpu, vgtif_reg(vgt_id)) = vgpu->id;
 
-	vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) = VGT_CAPS_FULL_48BIT_PPGTT;
+	vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) = VGT_CAPS_FULL_PPGTT;
 	vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HWSP_EMULATION;
 	vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HUGE_GTT;
 
diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index db7bb5bd5adde4863df871b71e1279d633ea74f6..863ae12707bac4b853655ef912da8eb2df920012 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -6,6 +6,7 @@
 
 #include "i915_drv.h"
 #include "i915_active.h"
+#include "i915_globals.h"
 
 #define BKL(ref) (&(ref)->i915->drm.struct_mutex)
 
@@ -17,6 +18,7 @@
  * nodes from a local slab cache to hopefully reduce the fragmentation.
  */
 static struct i915_global_active {
+	struct i915_global base;
 	struct kmem_cache *slab_cache;
 } global;
 
@@ -285,16 +287,27 @@ void i915_active_retire_noop(struct i915_active_request *active,
 #include "selftests/i915_active.c"
 #endif
 
+static void i915_global_active_shrink(void)
+{
+	kmem_cache_shrink(global.slab_cache);
+}
+
+static void i915_global_active_exit(void)
+{
+	kmem_cache_destroy(global.slab_cache);
+}
+
+static struct i915_global_active global = { {
+	.shrink = i915_global_active_shrink,
+	.exit = i915_global_active_exit,
+} };
+
 int __init i915_global_active_init(void)
 {
 	global.slab_cache = KMEM_CACHE(active_node, SLAB_HWCACHE_ALIGN);
 	if (!global.slab_cache)
 		return -ENOMEM;
 
+	i915_global_register(&global.base);
 	return 0;
 }
-
-void __exit i915_global_active_exit(void)
-{
-	kmem_cache_destroy(global.slab_cache);
-}
diff --git a/drivers/gpu/drm/i915/i915_active.h b/drivers/gpu/drm/i915/i915_active.h
index 12b5c1d287d196de138abbcf7ce473654d7a88f9..7d758719ce39d03dc7b0741786030de8978cebf6 100644
--- a/drivers/gpu/drm/i915/i915_active.h
+++ b/drivers/gpu/drm/i915/i915_active.h
@@ -108,19 +108,6 @@ i915_active_request_set_retire_fn(struct i915_active_request *active,
 	active->retire = fn ?: i915_active_retire_noop;
 }
 
-static inline struct i915_request *
-__i915_active_request_peek(const struct i915_active_request *active)
-{
-	/*
-	 * Inside the error capture (running with the driver in an unknown
-	 * state), we want to bend the rules slightly (a lot).
-	 *
-	 * Work is in progress to make it safer, in the meantime this keeps
-	 * the known issue from spamming the logs.
-	 */
-	return rcu_dereference_protected(active->request, 1);
-}
-
 /**
  * i915_active_request_raw - return the active request
  * @active - the active tracker
@@ -419,7 +406,4 @@ void i915_active_fini(struct i915_active *ref);
 static inline void i915_active_fini(struct i915_active *ref) { }
 #endif
 
-int i915_global_active_init(void);
-void i915_global_active_exit(void);
-
 #endif /* _I915_ACTIVE_H_ */
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index 33e8eed64423af5f00fac38c6ca88dccd7b7911e..503d548a55f7d5003b3f676e12be6da0b81e4721 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -868,8 +868,8 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
 	if (!IS_GEN(engine->i915, 7))
 		return;
 
-	switch (engine->id) {
-	case RCS:
+	switch (engine->class) {
+	case RENDER_CLASS:
 		if (IS_HASWELL(engine->i915)) {
 			cmd_tables = hsw_render_ring_cmds;
 			cmd_table_count =
@@ -889,12 +889,12 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
 
 		engine->get_cmd_length_mask = gen7_render_get_cmd_length_mask;
 		break;
-	case VCS:
+	case VIDEO_DECODE_CLASS:
 		cmd_tables = gen7_video_cmds;
 		cmd_table_count = ARRAY_SIZE(gen7_video_cmds);
 		engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
 		break;
-	case BCS:
+	case COPY_ENGINE_CLASS:
 		if (IS_HASWELL(engine->i915)) {
 			cmd_tables = hsw_blt_ring_cmds;
 			cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmds);
@@ -913,14 +913,14 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
 
 		engine->get_cmd_length_mask = gen7_blt_get_cmd_length_mask;
 		break;
-	case VECS:
+	case VIDEO_ENHANCEMENT_CLASS:
 		cmd_tables = hsw_vebox_cmds;
 		cmd_table_count = ARRAY_SIZE(hsw_vebox_cmds);
 		/* VECS can use the same length_mask function as VCS */
 		engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
 		break;
 	default:
-		MISSING_CASE(engine->id);
+		MISSING_CASE(engine->class);
 		return;
 	}
 
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 0bd890c04fe4f7c911bd9bde1a79af11af08ff5c..08683dca7775032d2fb204a0163fa7b72b34f238 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -388,12 +388,9 @@ static void print_context_stats(struct seq_file *m,
 	struct i915_gem_context *ctx;
 
 	list_for_each_entry(ctx, &i915->contexts.list, link) {
-		struct intel_engine_cs *engine;
-		enum intel_engine_id id;
-
-		for_each_engine(engine, i915, id) {
-			struct intel_context *ce = to_intel_context(ctx, engine);
+		struct intel_context *ce;
 
+		list_for_each_entry(ce, &ctx->active_engines, active_link) {
 			if (ce->state)
 				per_file_stats(0, ce->state->obj, &kstats);
 			if (ce->ring)
@@ -1281,14 +1278,11 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
 	intel_wakeref_t wakeref;
 	enum intel_engine_id id;
 
+	seq_printf(m, "Reset flags: %lx\n", dev_priv->gpu_error.flags);
 	if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
-		seq_puts(m, "Wedged\n");
+		seq_puts(m, "\tWedged\n");
 	if (test_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags))
-		seq_puts(m, "Reset in progress: struct_mutex backoff\n");
-	if (waitqueue_active(&dev_priv->gpu_error.wait_queue))
-		seq_puts(m, "Waiter holding struct mutex\n");
-	if (waitqueue_active(&dev_priv->gpu_error.reset_queue))
-		seq_puts(m, "struct_mutex blocked for reset\n");
+		seq_puts(m, "\tDevice (global) reset in progress\n");
 
 	if (!i915_modparams.enable_hangcheck) {
 		seq_puts(m, "Hangcheck disabled\n");
@@ -1298,10 +1292,10 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
 	with_intel_runtime_pm(dev_priv, wakeref) {
 		for_each_engine(engine, dev_priv, id) {
 			acthd[id] = intel_engine_get_active_head(engine);
-			seqno[id] = intel_engine_get_seqno(engine);
+			seqno[id] = intel_engine_get_hangcheck_seqno(engine);
 		}
 
-		intel_engine_get_instdone(dev_priv->engine[RCS], &instdone);
+		intel_engine_get_instdone(dev_priv->engine[RCS0], &instdone);
 	}
 
 	if (timer_pending(&dev_priv->gpu_error.hangcheck_work.timer))
@@ -1318,8 +1312,9 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
 	for_each_engine(engine, dev_priv, id) {
 		seq_printf(m, "%s:\n", engine->name);
 		seq_printf(m, "\tseqno = %x [current %x, last %x], %dms ago\n",
-			   engine->hangcheck.seqno, seqno[id],
-			   intel_engine_last_submit(engine),
+			   engine->hangcheck.last_seqno,
+			   seqno[id],
+			   engine->hangcheck.next_seqno,
 			   jiffies_to_msecs(jiffies -
 					    engine->hangcheck.action_timestamp));
 
@@ -1327,7 +1322,7 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
 			   (long long)engine->hangcheck.acthd,
 			   (long long)acthd[id]);
 
-		if (engine->id == RCS) {
+		if (engine->id == RCS0) {
 			seq_puts(m, "\tinstdone read =\n");
 
 			i915_instdone_info(dev_priv, m, &instdone);
@@ -1882,9 +1877,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
 {
 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 	struct drm_device *dev = &dev_priv->drm;
-	struct intel_engine_cs *engine;
 	struct i915_gem_context *ctx;
-	enum intel_engine_id id;
 	int ret;
 
 	ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -1892,6 +1885,8 @@ static int i915_context_status(struct seq_file *m, void *unused)
 		return ret;
 
 	list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
+		struct intel_context *ce;
+
 		seq_puts(m, "HW context ");
 		if (!list_empty(&ctx->hw_id_link))
 			seq_printf(m, "%x [pin %u]", ctx->hw_id,
@@ -1914,11 +1909,8 @@ static int i915_context_status(struct seq_file *m, void *unused)
 		seq_putc(m, ctx->remap_slice ? 'R' : 'r');
 		seq_putc(m, '\n');
 
-		for_each_engine(engine, dev_priv, id) {
-			struct intel_context *ce =
-				to_intel_context(ctx, engine);
-
-			seq_printf(m, "%s: ", engine->name);
+		list_for_each_entry(ce, &ctx->active_engines, active_link) {
+			seq_printf(m, "%s: ", ce->engine->name);
 			if (ce->state)
 				describe_obj(m, ce->state->obj);
 			if (ce->ring)
@@ -2023,11 +2015,9 @@ static const char *rps_power_to_str(unsigned int power)
 static int i915_rps_boost_info(struct seq_file *m, void *data)
 {
 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
-	struct drm_device *dev = &dev_priv->drm;
 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
 	u32 act_freq = rps->cur_freq;
 	intel_wakeref_t wakeref;
-	struct drm_file *file;
 
 	with_intel_runtime_pm_if_in_use(dev_priv, wakeref) {
 		if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
@@ -2061,22 +2051,7 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
 		   intel_gpu_freq(dev_priv, rps->efficient_freq),
 		   intel_gpu_freq(dev_priv, rps->boost_freq));
 
-	mutex_lock(&dev->filelist_mutex);
-	list_for_each_entry_reverse(file, &dev->filelist, lhead) {
-		struct drm_i915_file_private *file_priv = file->driver_priv;
-		struct task_struct *task;
-
-		rcu_read_lock();
-		task = pid_task(file->pid, PIDTYPE_PID);
-		seq_printf(m, "%s [%d]: %d boosts\n",
-			   task ? task->comm : "<unknown>",
-			   task ? task->pid : -1,
-			   atomic_read(&file_priv->rps_client.boosts));
-		rcu_read_unlock();
-	}
-	seq_printf(m, "Kernel (anonymous) boosts: %d\n",
-		   atomic_read(&rps->boosts));
-	mutex_unlock(&dev->filelist_mutex);
+	seq_printf(m, "Wait boosts: %d\n", atomic_read(&rps->boosts));
 
 	if (INTEL_GEN(dev_priv) >= 6 &&
 	    rps->enabled &&
@@ -2607,7 +2582,6 @@ static int
 i915_edp_psr_debug_set(void *data, u64 val)
 {
 	struct drm_i915_private *dev_priv = data;
-	struct drm_modeset_acquire_ctx ctx;
 	intel_wakeref_t wakeref;
 	int ret;
 
@@ -2618,18 +2592,7 @@ i915_edp_psr_debug_set(void *data, u64 val)
 
 	wakeref = intel_runtime_pm_get(dev_priv);
 
-	drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
-
-retry:
-	ret = intel_psr_set_debugfs_mode(dev_priv, &ctx, val);
-	if (ret == -EDEADLK) {
-		ret = drm_modeset_backoff(&ctx);
-		if (!ret)
-			goto retry;
-	}
-
-	drm_modeset_drop_locks(&ctx);
-	drm_modeset_acquire_fini(&ctx);
+	ret = intel_psr_debug_set(dev_priv, val);
 
 	intel_runtime_pm_put(dev_priv, wakeref);
 
@@ -2686,8 +2649,7 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused)
 	seq_printf(m, "Runtime power status: %s\n",
 		   enableddisabled(!dev_priv->power_domains.wakeref));
 
-	seq_printf(m, "GPU idle: %s (epoch %u)\n",
-		   yesno(!dev_priv->gt.awake), dev_priv->gt.epoch);
+	seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->gt.awake));
 	seq_printf(m, "IRQs disabled: %s\n",
 		   yesno(!intel_irqs_enabled(dev_priv)));
 #ifdef CONFIG_PM
@@ -3123,8 +3085,7 @@ static int i915_engine_info(struct seq_file *m, void *unused)
 
 	wakeref = intel_runtime_pm_get(dev_priv);
 
-	seq_printf(m, "GT awake? %s (epoch %u)\n",
-		   yesno(dev_priv->gt.awake), dev_priv->gt.epoch);
+	seq_printf(m, "GT awake? %s\n", yesno(dev_priv->gt.awake));
 	seq_printf(m, "Global active requests: %d\n",
 		   dev_priv->gt.active_requests);
 	seq_printf(m, "CS timestamp frequency: %u kHz\n",
@@ -3211,7 +3172,7 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused)
 static int i915_wa_registers(struct seq_file *m, void *unused)
 {
 	struct drm_i915_private *i915 = node_to_i915(m->private);
-	const struct i915_wa_list *wal = &i915->engine[RCS]->ctx_wa_list;
+	const struct i915_wa_list *wal = &i915->engine[RCS0]->ctx_wa_list;
 	struct i915_wa *wa;
 	unsigned int i;
 
@@ -3865,11 +3826,18 @@ static const struct file_operations i915_cur_wm_latency_fops = {
 static int
 i915_wedged_get(void *data, u64 *val)
 {
-	struct drm_i915_private *dev_priv = data;
+	int ret = i915_terminally_wedged(data);
 
-	*val = i915_terminally_wedged(&dev_priv->gpu_error);
-
-	return 0;
+	switch (ret) {
+	case -EIO:
+		*val = 1;
+		return 0;
+	case 0:
+		*val = 0;
+		return 0;
+	default:
+		return ret;
+	}
 }
 
 static int
@@ -3877,16 +3845,9 @@ i915_wedged_set(void *data, u64 val)
 {
 	struct drm_i915_private *i915 = data;
 
-	/*
-	 * There is no safeguard against this debugfs entry colliding
-	 * with the hangcheck calling same i915_handle_error() in
-	 * parallel, causing an explosion. For now we assume that the
-	 * test harness is responsible enough not to inject gpu hangs
-	 * while it is writing to 'i915_wedged'
-	 */
-
-	if (i915_reset_backoff(&i915->gpu_error))
-		return -EAGAIN;
+	/* Flush any previous reset before applying for a new one */
+	wait_event(i915->gpu_error.reset_queue,
+		   !test_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags));
 
 	i915_handle_error(i915, val, I915_ERROR_CAPTURE,
 			  "Manually set wedged engine mask = %llx", val);
@@ -3927,12 +3888,9 @@ static int
 i915_drop_caches_set(void *data, u64 val)
 {
 	struct drm_i915_private *i915 = data;
-	intel_wakeref_t wakeref;
-	int ret = 0;
 
 	DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
 		  val, val & DROP_ALL);
-	wakeref = intel_runtime_pm_get(i915);
 
 	if (val & DROP_RESET_ACTIVE &&
 	    wait_for(intel_engines_are_idle(i915), I915_IDLE_ENGINES_TIMEOUT))
@@ -3941,9 +3899,11 @@ i915_drop_caches_set(void *data, u64 val)
 	/* No need to check and wait for gpu resets, only libdrm auto-restarts
 	 * on ioctls on -EAGAIN. */
 	if (val & (DROP_ACTIVE | DROP_RETIRE | DROP_RESET_SEQNO)) {
+		int ret;
+
 		ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
 		if (ret)
-			goto out;
+			return ret;
 
 		if (val & DROP_ACTIVE)
 			ret = i915_gem_wait_for_idle(i915,
@@ -3957,7 +3917,7 @@ i915_drop_caches_set(void *data, u64 val)
 		mutex_unlock(&i915->drm.struct_mutex);
 	}
 
-	if (val & DROP_RESET_ACTIVE && i915_terminally_wedged(&i915->gpu_error))
+	if (val & DROP_RESET_ACTIVE && i915_terminally_wedged(i915))
 		i915_handle_error(i915, ALL_ENGINES, 0, NULL);
 
 	fs_reclaim_acquire(GFP_KERNEL);
@@ -3982,10 +3942,7 @@ i915_drop_caches_set(void *data, u64 val)
 	if (val & DROP_FREED)
 		i915_gem_drain_freed_objects(i915);
 
-out:
-	intel_runtime_pm_put(i915, wakeref);
-
-	return ret;
+	return 0;
 }
 
 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 9df65d386d11b40349df879fec224f0990a40b62..9e380cd317dcd0faf2a7bdfa9ed78316db208969 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -188,6 +188,11 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
 		DRM_DEBUG_KMS("Found Cannon Lake LP PCH (CNP-LP)\n");
 		WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_COFFEELAKE(dev_priv));
 		return PCH_CNP;
+	case INTEL_PCH_CMP_DEVICE_ID_TYPE:
+		DRM_DEBUG_KMS("Found Comet Lake PCH (CMP)\n");
+		WARN_ON(!IS_COFFEELAKE(dev_priv));
+		/* CometPoint is CNP Compatible */
+		return PCH_CNP;
 	case INTEL_PCH_ICP_DEVICE_ID_TYPE:
 		DRM_DEBUG_KMS("Found Ice Lake PCH\n");
 		WARN_ON(!IS_ICELAKE(dev_priv));
@@ -219,20 +224,20 @@ intel_virt_detect_pch(const struct drm_i915_private *dev_priv)
 	 * make an educated guess as to which PCH is really there.
 	 */
 
-	if (IS_GEN(dev_priv, 5))
-		id = INTEL_PCH_IBX_DEVICE_ID_TYPE;
-	else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
-		id = INTEL_PCH_CPT_DEVICE_ID_TYPE;
+	if (IS_ICELAKE(dev_priv))
+		id = INTEL_PCH_ICP_DEVICE_ID_TYPE;
+	else if (IS_CANNONLAKE(dev_priv) || IS_COFFEELAKE(dev_priv))
+		id = INTEL_PCH_CNP_DEVICE_ID_TYPE;
+	else if (IS_KABYLAKE(dev_priv) || IS_SKYLAKE(dev_priv))
+		id = INTEL_PCH_SPT_DEVICE_ID_TYPE;
 	else if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
 		id = INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
 	else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
 		id = INTEL_PCH_LPT_DEVICE_ID_TYPE;
-	else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
-		id = INTEL_PCH_SPT_DEVICE_ID_TYPE;
-	else if (IS_COFFEELAKE(dev_priv) || IS_CANNONLAKE(dev_priv))
-		id = INTEL_PCH_CNP_DEVICE_ID_TYPE;
-	else if (IS_ICELAKE(dev_priv))
-		id = INTEL_PCH_ICP_DEVICE_ID_TYPE;
+	else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
+		id = INTEL_PCH_CPT_DEVICE_ID_TYPE;
+	else if (IS_GEN(dev_priv, 5))
+		id = INTEL_PCH_IBX_DEVICE_ID_TYPE;
 
 	if (id)
 		DRM_DEBUG_KMS("Assuming PCH ID %04x\n", id);
@@ -330,16 +335,16 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data,
 		value = dev_priv->overlay ? 1 : 0;
 		break;
 	case I915_PARAM_HAS_BSD:
-		value = !!dev_priv->engine[VCS];
+		value = !!dev_priv->engine[VCS0];
 		break;
 	case I915_PARAM_HAS_BLT:
-		value = !!dev_priv->engine[BCS];
+		value = !!dev_priv->engine[BCS0];
 		break;
 	case I915_PARAM_HAS_VEBOX:
-		value = !!dev_priv->engine[VECS];
+		value = !!dev_priv->engine[VECS0];
 		break;
 	case I915_PARAM_HAS_BSD2:
-		value = !!dev_priv->engine[VCS2];
+		value = !!dev_priv->engine[VCS1];
 		break;
 	case I915_PARAM_HAS_LLC:
 		value = HAS_LLC(dev_priv);
@@ -348,10 +353,10 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data,
 		value = HAS_WT(dev_priv);
 		break;
 	case I915_PARAM_HAS_ALIASING_PPGTT:
-		value = min_t(int, INTEL_PPGTT(dev_priv), I915_GEM_PPGTT_FULL);
+		value = INTEL_PPGTT(dev_priv);
 		break;
 	case I915_PARAM_HAS_SEMAPHORES:
-		value = 0;
+		value = !!(dev_priv->caps.scheduler & I915_SCHEDULER_CAP_SEMAPHORES);
 		break;
 	case I915_PARAM_HAS_SECURE_BATCHES:
 		value = capable(CAP_SYS_ADMIN);
@@ -714,8 +719,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
 	return 0;
 
 cleanup_gem:
-	if (i915_gem_suspend(dev_priv))
-		DRM_ERROR("failed to idle hardware; continuing to unload!\n");
+	i915_gem_suspend(dev_priv);
 	i915_gem_fini(dev_priv);
 cleanup_modeset:
 	intel_modeset_cleanup(dev);
@@ -873,6 +877,7 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv)
 	mutex_init(&dev_priv->av_mutex);
 	mutex_init(&dev_priv->wm.wm_mutex);
 	mutex_init(&dev_priv->pps_mutex);
+	mutex_init(&dev_priv->hdcp_comp_mutex);
 
 	i915_memcpy_init_early(dev_priv);
 	intel_runtime_pm_init_early(dev_priv);
@@ -1034,110 +1039,180 @@ static void intel_sanitize_options(struct drm_i915_private *dev_priv)
 	intel_gvt_sanitize_options(dev_priv);
 }
 
-static enum dram_rank skl_get_dimm_rank(u8 size, u32 rank)
+#define DRAM_TYPE_STR(type) [INTEL_DRAM_ ## type] = #type
+
+static const char *intel_dram_type_str(enum intel_dram_type type)
 {
-	if (size == 0)
-		return I915_DRAM_RANK_INVALID;
-	if (rank == SKL_DRAM_RANK_SINGLE)
-		return I915_DRAM_RANK_SINGLE;
-	else if (rank == SKL_DRAM_RANK_DUAL)
-		return I915_DRAM_RANK_DUAL;
+	static const char * const str[] = {
+		DRAM_TYPE_STR(UNKNOWN),
+		DRAM_TYPE_STR(DDR3),
+		DRAM_TYPE_STR(DDR4),
+		DRAM_TYPE_STR(LPDDR3),
+		DRAM_TYPE_STR(LPDDR4),
+	};
 
-	return I915_DRAM_RANK_INVALID;
+	if (type >= ARRAY_SIZE(str))
+		type = INTEL_DRAM_UNKNOWN;
+
+	return str[type];
 }
 
-static bool
-skl_is_16gb_dimm(enum dram_rank rank, u8 size, u8 width)
+#undef DRAM_TYPE_STR
+
+static int intel_dimm_num_devices(const struct dram_dimm_info *dimm)
 {
-	if (rank == I915_DRAM_RANK_SINGLE && width == 8 && size == 16)
-		return true;
-	else if (rank == I915_DRAM_RANK_DUAL && width == 8 && size == 32)
-		return true;
-	else if (rank == SKL_DRAM_RANK_SINGLE && width == 16 && size == 8)
-		return true;
-	else if (rank == SKL_DRAM_RANK_DUAL && width == 16 && size == 16)
-		return true;
+	return dimm->ranks * 64 / (dimm->width ?: 1);
+}
 
-	return false;
+/* Returns total GB for the whole DIMM */
+static int skl_get_dimm_size(u16 val)
+{
+	return val & SKL_DRAM_SIZE_MASK;
 }
 
-static int
-skl_dram_get_channel_info(struct dram_channel_info *ch, u32 val)
+static int skl_get_dimm_width(u16 val)
 {
-	u32 tmp_l, tmp_s;
-	u32 s_val = val >> SKL_DRAM_S_SHIFT;
+	if (skl_get_dimm_size(val) == 0)
+		return 0;
 
-	if (!val)
-		return -EINVAL;
+	switch (val & SKL_DRAM_WIDTH_MASK) {
+	case SKL_DRAM_WIDTH_X8:
+	case SKL_DRAM_WIDTH_X16:
+	case SKL_DRAM_WIDTH_X32:
+		val = (val & SKL_DRAM_WIDTH_MASK) >> SKL_DRAM_WIDTH_SHIFT;
+		return 8 << val;
+	default:
+		MISSING_CASE(val);
+		return 0;
+	}
+}
+
+static int skl_get_dimm_ranks(u16 val)
+{
+	if (skl_get_dimm_size(val) == 0)
+		return 0;
+
+	val = (val & SKL_DRAM_RANK_MASK) >> SKL_DRAM_RANK_SHIFT;
+
+	return val + 1;
+}
 
-	tmp_l = val & SKL_DRAM_SIZE_MASK;
-	tmp_s = s_val & SKL_DRAM_SIZE_MASK;
+/* Returns total GB for the whole DIMM */
+static int cnl_get_dimm_size(u16 val)
+{
+	return (val & CNL_DRAM_SIZE_MASK) / 2;
+}
+
+static int cnl_get_dimm_width(u16 val)
+{
+	if (cnl_get_dimm_size(val) == 0)
+		return 0;
 
-	if (tmp_l == 0 && tmp_s == 0)
+	switch (val & CNL_DRAM_WIDTH_MASK) {
+	case CNL_DRAM_WIDTH_X8:
+	case CNL_DRAM_WIDTH_X16:
+	case CNL_DRAM_WIDTH_X32:
+		val = (val & CNL_DRAM_WIDTH_MASK) >> CNL_DRAM_WIDTH_SHIFT;
+		return 8 << val;
+	default:
+		MISSING_CASE(val);
+		return 0;
+	}
+}
+
+static int cnl_get_dimm_ranks(u16 val)
+{
+	if (cnl_get_dimm_size(val) == 0)
+		return 0;
+
+	val = (val & CNL_DRAM_RANK_MASK) >> CNL_DRAM_RANK_SHIFT;
+
+	return val + 1;
+}
+
+static bool
+skl_is_16gb_dimm(const struct dram_dimm_info *dimm)
+{
+	/* Convert total GB to Gb per DRAM device */
+	return 8 * dimm->size / (intel_dimm_num_devices(dimm) ?: 1) == 16;
+}
+
+static void
+skl_dram_get_dimm_info(struct drm_i915_private *dev_priv,
+		       struct dram_dimm_info *dimm,
+		       int channel, char dimm_name, u16 val)
+{
+	if (INTEL_GEN(dev_priv) >= 10) {
+		dimm->size = cnl_get_dimm_size(val);
+		dimm->width = cnl_get_dimm_width(val);
+		dimm->ranks = cnl_get_dimm_ranks(val);
+	} else {
+		dimm->size = skl_get_dimm_size(val);
+		dimm->width = skl_get_dimm_width(val);
+		dimm->ranks = skl_get_dimm_ranks(val);
+	}
+
+	DRM_DEBUG_KMS("CH%u DIMM %c size: %u GB, width: X%u, ranks: %u, 16Gb DIMMs: %s\n",
+		      channel, dimm_name, dimm->size, dimm->width, dimm->ranks,
+		      yesno(skl_is_16gb_dimm(dimm)));
+}
+
+static int
+skl_dram_get_channel_info(struct drm_i915_private *dev_priv,
+			  struct dram_channel_info *ch,
+			  int channel, u32 val)
+{
+	skl_dram_get_dimm_info(dev_priv, &ch->dimm_l,
+			       channel, 'L', val & 0xffff);
+	skl_dram_get_dimm_info(dev_priv, &ch->dimm_s,
+			       channel, 'S', val >> 16);
+
+	if (ch->dimm_l.size == 0 && ch->dimm_s.size == 0) {
+		DRM_DEBUG_KMS("CH%u not populated\n", channel);
 		return -EINVAL;
+	}
 
-	ch->l_info.size = tmp_l;
-	ch->s_info.size = tmp_s;
-
-	tmp_l = (val & SKL_DRAM_WIDTH_MASK) >> SKL_DRAM_WIDTH_SHIFT;
-	tmp_s = (s_val & SKL_DRAM_WIDTH_MASK) >> SKL_DRAM_WIDTH_SHIFT;
-	ch->l_info.width = (1 << tmp_l) * 8;
-	ch->s_info.width = (1 << tmp_s) * 8;
-
-	tmp_l = val & SKL_DRAM_RANK_MASK;
-	tmp_s = s_val & SKL_DRAM_RANK_MASK;
-	ch->l_info.rank = skl_get_dimm_rank(ch->l_info.size, tmp_l);
-	ch->s_info.rank = skl_get_dimm_rank(ch->s_info.size, tmp_s);
-
-	if (ch->l_info.rank == I915_DRAM_RANK_DUAL ||
-	    ch->s_info.rank == I915_DRAM_RANK_DUAL)
-		ch->rank = I915_DRAM_RANK_DUAL;
-	else if (ch->l_info.rank == I915_DRAM_RANK_SINGLE &&
-		 ch->s_info.rank == I915_DRAM_RANK_SINGLE)
-		ch->rank = I915_DRAM_RANK_DUAL;
+	if (ch->dimm_l.ranks == 2 || ch->dimm_s.ranks == 2)
+		ch->ranks = 2;
+	else if (ch->dimm_l.ranks == 1 && ch->dimm_s.ranks == 1)
+		ch->ranks = 2;
 	else
-		ch->rank = I915_DRAM_RANK_SINGLE;
+		ch->ranks = 1;
 
-	ch->is_16gb_dimm = skl_is_16gb_dimm(ch->l_info.rank, ch->l_info.size,
-					    ch->l_info.width) ||
-			   skl_is_16gb_dimm(ch->s_info.rank, ch->s_info.size,
-					    ch->s_info.width);
+	ch->is_16gb_dimm =
+		skl_is_16gb_dimm(&ch->dimm_l) ||
+		skl_is_16gb_dimm(&ch->dimm_s);
 
-	DRM_DEBUG_KMS("(size:width:rank) L(%dGB:X%d:%s) S(%dGB:X%d:%s)\n",
-		      ch->l_info.size, ch->l_info.width,
-		      ch->l_info.rank ? "dual" : "single",
-		      ch->s_info.size, ch->s_info.width,
-		      ch->s_info.rank ? "dual" : "single");
+	DRM_DEBUG_KMS("CH%u ranks: %u, 16Gb DIMMs: %s\n",
+		      channel, ch->ranks, yesno(ch->is_16gb_dimm));
 
 	return 0;
 }
 
 static bool
-intel_is_dram_symmetric(u32 val_ch0, u32 val_ch1,
-			struct dram_channel_info *ch0)
+intel_is_dram_symmetric(const struct dram_channel_info *ch0,
+			const struct dram_channel_info *ch1)
 {
-	return (val_ch0 == val_ch1 &&
-		(ch0->s_info.size == 0 ||
-		 (ch0->l_info.size == ch0->s_info.size &&
-		  ch0->l_info.width == ch0->s_info.width &&
-		  ch0->l_info.rank == ch0->s_info.rank)));
+	return !memcmp(ch0, ch1, sizeof(*ch0)) &&
+		(ch0->dimm_s.size == 0 ||
+		 !memcmp(&ch0->dimm_l, &ch0->dimm_s, sizeof(ch0->dimm_l)));
 }
 
 static int
 skl_dram_get_channels_info(struct drm_i915_private *dev_priv)
 {
 	struct dram_info *dram_info = &dev_priv->dram_info;
-	struct dram_channel_info ch0, ch1;
-	u32 val_ch0, val_ch1;
+	struct dram_channel_info ch0 = {}, ch1 = {};
+	u32 val;
 	int ret;
 
-	val_ch0 = I915_READ(SKL_MAD_DIMM_CH0_0_0_0_MCHBAR_MCMAIN);
-	ret = skl_dram_get_channel_info(&ch0, val_ch0);
+	val = I915_READ(SKL_MAD_DIMM_CH0_0_0_0_MCHBAR_MCMAIN);
+	ret = skl_dram_get_channel_info(dev_priv, &ch0, 0, val);
 	if (ret == 0)
 		dram_info->num_channels++;
 
-	val_ch1 = I915_READ(SKL_MAD_DIMM_CH1_0_0_0_MCHBAR_MCMAIN);
-	ret = skl_dram_get_channel_info(&ch1, val_ch1);
+	val = I915_READ(SKL_MAD_DIMM_CH1_0_0_0_MCHBAR_MCMAIN);
+	ret = skl_dram_get_channel_info(dev_priv, &ch1, 1, val);
 	if (ret == 0)
 		dram_info->num_channels++;
 
@@ -1151,28 +1226,47 @@ skl_dram_get_channels_info(struct drm_i915_private *dev_priv)
 	 * will be same as if single rank memory, so consider single rank
 	 * memory.
 	 */
-	if (ch0.rank == I915_DRAM_RANK_SINGLE ||
-	    ch1.rank == I915_DRAM_RANK_SINGLE)
-		dram_info->rank = I915_DRAM_RANK_SINGLE;
+	if (ch0.ranks == 1 || ch1.ranks == 1)
+		dram_info->ranks = 1;
 	else
-		dram_info->rank = max(ch0.rank, ch1.rank);
+		dram_info->ranks = max(ch0.ranks, ch1.ranks);
 
-	if (dram_info->rank == I915_DRAM_RANK_INVALID) {
+	if (dram_info->ranks == 0) {
 		DRM_INFO("couldn't get memory rank information\n");
 		return -EINVAL;
 	}
 
 	dram_info->is_16gb_dimm = ch0.is_16gb_dimm || ch1.is_16gb_dimm;
 
-	dev_priv->dram_info.symmetric_memory = intel_is_dram_symmetric(val_ch0,
-								       val_ch1,
-								       &ch0);
+	dram_info->symmetric_memory = intel_is_dram_symmetric(&ch0, &ch1);
 
-	DRM_DEBUG_KMS("memory configuration is %sSymmetric memory\n",
-		      dev_priv->dram_info.symmetric_memory ? "" : "not ");
+	DRM_DEBUG_KMS("Memory configuration is symmetric? %s\n",
+		      yesno(dram_info->symmetric_memory));
 	return 0;
 }
 
+static enum intel_dram_type
+skl_get_dram_type(struct drm_i915_private *dev_priv)
+{
+	u32 val;
+
+	val = I915_READ(SKL_MAD_INTER_CHANNEL_0_0_0_MCHBAR_MCMAIN);
+
+	switch (val & SKL_DRAM_DDR_TYPE_MASK) {
+	case SKL_DRAM_DDR_TYPE_DDR3:
+		return INTEL_DRAM_DDR3;
+	case SKL_DRAM_DDR_TYPE_DDR4:
+		return INTEL_DRAM_DDR4;
+	case SKL_DRAM_DDR_TYPE_LPDDR3:
+		return INTEL_DRAM_LPDDR3;
+	case SKL_DRAM_DDR_TYPE_LPDDR4:
+		return INTEL_DRAM_LPDDR4;
+	default:
+		MISSING_CASE(val);
+		return INTEL_DRAM_UNKNOWN;
+	}
+}
+
 static int
 skl_get_dram_info(struct drm_i915_private *dev_priv)
 {
@@ -1180,6 +1274,9 @@ skl_get_dram_info(struct drm_i915_private *dev_priv)
 	u32 mem_freq_khz, val;
 	int ret;
 
+	dram_info->type = skl_get_dram_type(dev_priv);
+	DRM_DEBUG_KMS("DRAM type: %s\n", intel_dram_type_str(dram_info->type));
+
 	ret = skl_dram_get_channels_info(dev_priv);
 	if (ret)
 		return ret;
@@ -1200,6 +1297,85 @@ skl_get_dram_info(struct drm_i915_private *dev_priv)
 	return 0;
 }
 
+/* Returns Gb per DRAM device */
+static int bxt_get_dimm_size(u32 val)
+{
+	switch (val & BXT_DRAM_SIZE_MASK) {
+	case BXT_DRAM_SIZE_4GBIT:
+		return 4;
+	case BXT_DRAM_SIZE_6GBIT:
+		return 6;
+	case BXT_DRAM_SIZE_8GBIT:
+		return 8;
+	case BXT_DRAM_SIZE_12GBIT:
+		return 12;
+	case BXT_DRAM_SIZE_16GBIT:
+		return 16;
+	default:
+		MISSING_CASE(val);
+		return 0;
+	}
+}
+
+static int bxt_get_dimm_width(u32 val)
+{
+	if (!bxt_get_dimm_size(val))
+		return 0;
+
+	val = (val & BXT_DRAM_WIDTH_MASK) >> BXT_DRAM_WIDTH_SHIFT;
+
+	return 8 << val;
+}
+
+static int bxt_get_dimm_ranks(u32 val)
+{
+	if (!bxt_get_dimm_size(val))
+		return 0;
+
+	switch (val & BXT_DRAM_RANK_MASK) {
+	case BXT_DRAM_RANK_SINGLE:
+		return 1;
+	case BXT_DRAM_RANK_DUAL:
+		return 2;
+	default:
+		MISSING_CASE(val);
+		return 0;
+	}
+}
+
+static enum intel_dram_type bxt_get_dimm_type(u32 val)
+{
+	if (!bxt_get_dimm_size(val))
+		return INTEL_DRAM_UNKNOWN;
+
+	switch (val & BXT_DRAM_TYPE_MASK) {
+	case BXT_DRAM_TYPE_DDR3:
+		return INTEL_DRAM_DDR3;
+	case BXT_DRAM_TYPE_LPDDR3:
+		return INTEL_DRAM_LPDDR3;
+	case BXT_DRAM_TYPE_DDR4:
+		return INTEL_DRAM_DDR4;
+	case BXT_DRAM_TYPE_LPDDR4:
+		return INTEL_DRAM_LPDDR4;
+	default:
+		MISSING_CASE(val);
+		return INTEL_DRAM_UNKNOWN;
+	}
+}
+
+static void bxt_get_dimm_info(struct dram_dimm_info *dimm,
+			      u32 val)
+{
+	dimm->width = bxt_get_dimm_width(val);
+	dimm->ranks = bxt_get_dimm_ranks(val);
+
+	/*
+	 * Size in register is Gb per DRAM device. Convert to total
+	 * GB to match the way we report this for non-LP platforms.
+	 */
+	dimm->size = bxt_get_dimm_size(val) * intel_dimm_num_devices(dimm) / 8;
+}
+
 static int
 bxt_get_dram_info(struct drm_i915_private *dev_priv)
 {
@@ -1228,57 +1404,44 @@ bxt_get_dram_info(struct drm_i915_private *dev_priv)
 	 * Now read each DUNIT8/9/10/11 to check the rank of each dimms.
 	 */
 	for (i = BXT_D_CR_DRP0_DUNIT_START; i <= BXT_D_CR_DRP0_DUNIT_END; i++) {
-		u8 size, width;
-		enum dram_rank rank;
-		u32 tmp;
+		struct dram_dimm_info dimm;
+		enum intel_dram_type type;
 
 		val = I915_READ(BXT_D_CR_DRP0_DUNIT(i));
 		if (val == 0xFFFFFFFF)
 			continue;
 
 		dram_info->num_channels++;
-		tmp = val & BXT_DRAM_RANK_MASK;
-
-		if (tmp == BXT_DRAM_RANK_SINGLE)
-			rank = I915_DRAM_RANK_SINGLE;
-		else if (tmp == BXT_DRAM_RANK_DUAL)
-			rank = I915_DRAM_RANK_DUAL;
-		else
-			rank = I915_DRAM_RANK_INVALID;
-
-		tmp = val & BXT_DRAM_SIZE_MASK;
-		if (tmp == BXT_DRAM_SIZE_4GB)
-			size = 4;
-		else if (tmp == BXT_DRAM_SIZE_6GB)
-			size = 6;
-		else if (tmp == BXT_DRAM_SIZE_8GB)
-			size = 8;
-		else if (tmp == BXT_DRAM_SIZE_12GB)
-			size = 12;
-		else if (tmp == BXT_DRAM_SIZE_16GB)
-			size = 16;
-		else
-			size = 0;
-
-		tmp = (val & BXT_DRAM_WIDTH_MASK) >> BXT_DRAM_WIDTH_SHIFT;
-		width = (1 << tmp) * 8;
-		DRM_DEBUG_KMS("dram size:%dGB width:X%d rank:%s\n", size,
-			      width, rank == I915_DRAM_RANK_SINGLE ? "single" :
-			      rank == I915_DRAM_RANK_DUAL ? "dual" : "unknown");
+
+		bxt_get_dimm_info(&dimm, val);
+		type = bxt_get_dimm_type(val);
+
+		WARN_ON(type != INTEL_DRAM_UNKNOWN &&
+			dram_info->type != INTEL_DRAM_UNKNOWN &&
+			dram_info->type != type);
+
+		DRM_DEBUG_KMS("CH%u DIMM size: %u GB, width: X%u, ranks: %u, type: %s\n",
+			      i - BXT_D_CR_DRP0_DUNIT_START,
+			      dimm.size, dimm.width, dimm.ranks,
+			      intel_dram_type_str(type));
 
 		/*
 		 * If any of the channel is single rank channel,
 		 * worst case output will be same as if single rank
 		 * memory, so consider single rank memory.
 		 */
-		if (dram_info->rank == I915_DRAM_RANK_INVALID)
-			dram_info->rank = rank;
-		else if (rank == I915_DRAM_RANK_SINGLE)
-			dram_info->rank = I915_DRAM_RANK_SINGLE;
+		if (dram_info->ranks == 0)
+			dram_info->ranks = dimm.ranks;
+		else if (dimm.ranks == 1)
+			dram_info->ranks = 1;
+
+		if (type != INTEL_DRAM_UNKNOWN)
+			dram_info->type = type;
 	}
 
-	if (dram_info->rank == I915_DRAM_RANK_INVALID) {
-		DRM_INFO("couldn't get memory rank information\n");
+	if (dram_info->type == INTEL_DRAM_UNKNOWN ||
+	    dram_info->ranks == 0) {
+		DRM_INFO("couldn't get memory information\n");
 		return -EINVAL;
 	}
 
@@ -1290,14 +1453,8 @@ static void
 intel_get_dram_info(struct drm_i915_private *dev_priv)
 {
 	struct dram_info *dram_info = &dev_priv->dram_info;
-	char bandwidth_str[32];
 	int ret;
 
-	dram_info->valid = false;
-	dram_info->rank = I915_DRAM_RANK_INVALID;
-	dram_info->bandwidth_kbps = 0;
-	dram_info->num_channels = 0;
-
 	/*
 	 * Assume 16Gb DIMMs are present until proven otherwise.
 	 * This is only used for the level 0 watermark latency
@@ -1305,28 +1462,22 @@ intel_get_dram_info(struct drm_i915_private *dev_priv)
 	 */
 	dram_info->is_16gb_dimm = !IS_GEN9_LP(dev_priv);
 
-	if (INTEL_GEN(dev_priv) < 9 || IS_GEMINILAKE(dev_priv))
+	if (INTEL_GEN(dev_priv) < 9)
 		return;
 
-	/* Need to calculate bandwidth only for Gen9 */
-	if (IS_BROXTON(dev_priv))
+	if (IS_GEN9_LP(dev_priv))
 		ret = bxt_get_dram_info(dev_priv);
-	else if (IS_GEN(dev_priv, 9))
-		ret = skl_get_dram_info(dev_priv);
 	else
-		ret = skl_dram_get_channels_info(dev_priv);
+		ret = skl_get_dram_info(dev_priv);
 	if (ret)
 		return;
 
-	if (dram_info->bandwidth_kbps)
-		sprintf(bandwidth_str, "%d KBps", dram_info->bandwidth_kbps);
-	else
-		sprintf(bandwidth_str, "unknown");
-	DRM_DEBUG_KMS("DRAM bandwidth:%s, total-channels: %u\n",
-		      bandwidth_str, dram_info->num_channels);
-	DRM_DEBUG_KMS("DRAM rank: %s rank 16GB-dimm:%s\n",
-		      (dram_info->rank == I915_DRAM_RANK_DUAL) ?
-		      "dual" : "single", yesno(dram_info->is_16gb_dimm));
+	DRM_DEBUG_KMS("DRAM bandwidth: %u kBps, channels: %u\n",
+		      dram_info->bandwidth_kbps,
+		      dram_info->num_channels);
+
+	DRM_DEBUG_KMS("DRAM ranks: %u, 16Gb DIMMs: %s\n",
+		      dram_info->ranks, yesno(dram_info->is_16gb_dimm));
 }
 
 /**
@@ -1348,7 +1499,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
 
 	if (HAS_PPGTT(dev_priv)) {
 		if (intel_vgpu_active(dev_priv) &&
-		    !intel_vgpu_has_full_48bit_ppgtt(dev_priv)) {
+		    !intel_vgpu_has_full_ppgtt(dev_priv)) {
 			i915_report_error(dev_priv,
 					  "incompatible vGPU found, support for isolated ppGTT required\n");
 			return -ENXIO;
@@ -1753,8 +1904,7 @@ void i915_driver_unload(struct drm_device *dev)
 	/* Flush any external code that still may be under the RCU lock */
 	synchronize_rcu();
 
-	if (i915_gem_suspend(dev_priv))
-		DRM_ERROR("failed to idle hardware; continuing to unload!\n");
+	i915_gem_suspend(dev_priv);
 
 	drm_atomic_helper_shutdown(dev);
 
@@ -1862,7 +2012,6 @@ static bool suspend_to_idle(struct drm_i915_private *dev_priv)
 static int i915_drm_prepare(struct drm_device *dev)
 {
 	struct drm_i915_private *i915 = to_i915(dev);
-	int err;
 
 	/*
 	 * NB intel_display_suspend() may issue new requests after we've
@@ -1870,12 +2019,9 @@ static int i915_drm_prepare(struct drm_device *dev)
 	 * split out that work and pull it forward so that after point,
 	 * the GPU is not woken again.
 	 */
-	err = i915_gem_suspend(i915);
-	if (err)
-		dev_err(&i915->drm.pdev->dev,
-			"GEM idle failed, suspend/resume might fail\n");
+	i915_gem_suspend(i915);
 
-	return err;
+	return 0;
 }
 
 static int i915_drm_suspend(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 9adc7bb9e69ccfec96e468f95435b83e084ffcce..363b2d3e4d50b214633f2be811f04c5d8d2d6892 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -55,6 +55,7 @@
 #include <drm/drm_util.h>
 #include <drm/drm_dsc.h>
 #include <drm/drm_connector.h>
+#include <drm/i915_mei_hdcp_interface.h>
 
 #include "i915_fixed.h"
 #include "i915_params.h"
@@ -91,8 +92,8 @@
 
 #define DRIVER_NAME		"i915"
 #define DRIVER_DESC		"Intel Graphics"
-#define DRIVER_DATE		"20190207"
-#define DRIVER_TIMESTAMP	1549572331
+#define DRIVER_DATE		"20190320"
+#define DRIVER_TIMESTAMP	1553069028
 
 /* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
  * WARN_ON()) for hw state sanity checks to check for unexpected conditions
@@ -217,10 +218,6 @@ struct drm_i915_file_private {
 	} mm;
 	struct idr context_idr;
 
-	struct intel_rps_client {
-		atomic_t boosts;
-	} rps_client;
-
 	unsigned int bsd_engine;
 
 /*
@@ -508,7 +505,7 @@ struct i915_psr {
 
 	u32 debug;
 	bool sink_support;
-	bool prepared, enabled;
+	bool enabled;
 	struct intel_dp *dp;
 	enum pipe pipe;
 	bool active;
@@ -526,16 +523,22 @@ struct i915_psr {
 	u16 su_x_granularity;
 };
 
+/*
+ * Sorted by south display engine compatibility.
+ * If the new PCH comes with a south display engine that is not
+ * inherited from the latest item, please do not add it to the
+ * end. Instead, add it right after its "parent" PCH.
+ */
 enum intel_pch {
+	PCH_NOP = -1,	/* PCH without south display */
 	PCH_NONE = 0,	/* No PCH present */
 	PCH_IBX,	/* Ibexpeak PCH */
 	PCH_CPT,	/* Cougarpoint/Pantherpoint PCH */
 	PCH_LPT,	/* Lynxpoint/Wildcatpoint PCH */
 	PCH_SPT,        /* Sunrisepoint PCH */
 	PCH_KBP,        /* Kaby Lake PCH */
-	PCH_CNP,        /* Cannon Lake PCH */
+	PCH_CNP,        /* Cannon/Comet Lake PCH */
 	PCH_ICP,	/* Ice Lake PCH */
-	PCH_NOP,	/* PCH without south display */
 };
 
 enum intel_sbi_destination {
@@ -1009,6 +1012,7 @@ struct intel_vbt_data {
 		enum psr_lines_to_wait lines_to_wait;
 		int tp1_wakeup_time_us;
 		int tp2_tp3_wakeup_time_us;
+		int psr2_tp2_tp3_wakeup_time_us;
 	} psr;
 
 	struct {
@@ -1130,6 +1134,7 @@ struct skl_wm_level {
 	u16 plane_res_b;
 	u8 plane_res_l;
 	bool plane_en;
+	bool ignore_lines;
 };
 
 /* Stores plane specific WM parameters */
@@ -1200,7 +1205,11 @@ enum intel_pipe_crc_source {
 	INTEL_PIPE_CRC_SOURCE_NONE,
 	INTEL_PIPE_CRC_SOURCE_PLANE1,
 	INTEL_PIPE_CRC_SOURCE_PLANE2,
-	INTEL_PIPE_CRC_SOURCE_PF,
+	INTEL_PIPE_CRC_SOURCE_PLANE3,
+	INTEL_PIPE_CRC_SOURCE_PLANE4,
+	INTEL_PIPE_CRC_SOURCE_PLANE5,
+	INTEL_PIPE_CRC_SOURCE_PLANE6,
+	INTEL_PIPE_CRC_SOURCE_PLANE7,
 	INTEL_PIPE_CRC_SOURCE_PIPE,
 	/* TV/DP on pre-gen5/vlv can't use the pipe source. */
 	INTEL_PIPE_CRC_SOURCE_TV,
@@ -1468,13 +1477,6 @@ struct intel_cdclk_state {
 struct drm_i915_private {
 	struct drm_device drm;
 
-	struct kmem_cache *objects;
-	struct kmem_cache *vmas;
-	struct kmem_cache *luts;
-	struct kmem_cache *requests;
-	struct kmem_cache *dependencies;
-	struct kmem_cache *priorities;
-
 	const struct intel_device_info __info; /* Use INTEL_INFO() to access. */
 	struct intel_runtime_info __runtime; /* Use RUNTIME_INFO() to access. */
 	struct intel_driver_caps caps;
@@ -1831,13 +1833,16 @@ struct drm_i915_private {
 		bool valid;
 		bool is_16gb_dimm;
 		u8 num_channels;
-		enum dram_rank {
-			I915_DRAM_RANK_INVALID = 0,
-			I915_DRAM_RANK_SINGLE,
-			I915_DRAM_RANK_DUAL
-		} rank;
+		u8 ranks;
 		u32 bandwidth_kbps;
 		bool symmetric_memory;
+		enum intel_dram_type {
+			INTEL_DRAM_UNKNOWN,
+			INTEL_DRAM_DDR3,
+			INTEL_DRAM_DDR4,
+			INTEL_DRAM_LPDDR3,
+			INTEL_DRAM_LPDDR4
+		} type;
 	} dram_info;
 
 	struct i915_runtime_pm runtime_pm;
@@ -1997,6 +2002,7 @@ struct drm_i915_private {
 			struct list_head hwsp_free_list;
 		} timelines;
 
+		intel_engine_mask_t active_engines;
 		struct list_head active_rings;
 		struct list_head closed_vma;
 		u32 active_requests;
@@ -2010,12 +2016,6 @@ struct drm_i915_private {
 		 */
 		intel_wakeref_t awake;
 
-		/**
-		 * The number of times we have woken up.
-		 */
-		unsigned int epoch;
-#define I915_EPOCH_INVALID 0
-
 		/**
 		 * We leave the user IRQ off as much as possible,
 		 * but this means that requests will finish and never
@@ -2055,18 +2055,25 @@ struct drm_i915_private {
 
 	struct i915_pmu pmu;
 
+	struct i915_hdcp_comp_master *hdcp_master;
+	bool hdcp_comp_added;
+
+	/* Mutex to protect the above hdcp component related values. */
+	struct mutex hdcp_comp_mutex;
+
 	/*
 	 * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
 	 * will be rejected. Instead look for a better place.
 	 */
 };
 
+struct dram_dimm_info {
+	u8 size, width, ranks;
+};
+
 struct dram_channel_info {
-	struct info {
-		u8 size, width;
-		enum dram_rank rank;
-	} l_info, s_info;
-	enum dram_rank rank;
+	struct dram_dimm_info dimm_l, dimm_s;
+	u8 ranks;
 	bool is_16gb_dimm;
 };
 
@@ -2104,7 +2111,7 @@ static inline struct drm_i915_private *huc_to_i915(struct intel_huc *huc)
 
 /* Iterator over subset of engines selected by mask */
 #define for_each_engine_masked(engine__, dev_priv__, mask__, tmp__) \
-	for ((tmp__) = (mask__) & INTEL_INFO(dev_priv__)->ring_mask; \
+	for ((tmp__) = (mask__) & INTEL_INFO(dev_priv__)->engine_mask; \
 	     (tmp__) ? \
 	     ((engine__) = (dev_priv__)->engine[__mask_next_bit(tmp__)]), 1 : \
 	     0;)
@@ -2425,24 +2432,8 @@ static inline unsigned int i915_sg_segment_size(void)
 #define IS_GEN9_LP(dev_priv)	(IS_GEN(dev_priv, 9) && IS_LP(dev_priv))
 #define IS_GEN9_BC(dev_priv)	(IS_GEN(dev_priv, 9) && !IS_LP(dev_priv))
 
-#define ENGINE_MASK(id)	BIT(id)
-#define RENDER_RING	ENGINE_MASK(RCS)
-#define BSD_RING	ENGINE_MASK(VCS)
-#define BLT_RING	ENGINE_MASK(BCS)
-#define VEBOX_RING	ENGINE_MASK(VECS)
-#define BSD2_RING	ENGINE_MASK(VCS2)
-#define BSD3_RING	ENGINE_MASK(VCS3)
-#define BSD4_RING	ENGINE_MASK(VCS4)
-#define VEBOX2_RING	ENGINE_MASK(VECS2)
-#define ALL_ENGINES	(~0)
-
-#define HAS_ENGINE(dev_priv, id) \
-	(!!(INTEL_INFO(dev_priv)->ring_mask & ENGINE_MASK(id)))
-
-#define HAS_BSD(dev_priv)	HAS_ENGINE(dev_priv, VCS)
-#define HAS_BSD2(dev_priv)	HAS_ENGINE(dev_priv, VCS2)
-#define HAS_BLT(dev_priv)	HAS_ENGINE(dev_priv, BCS)
-#define HAS_VEBOX(dev_priv)	HAS_ENGINE(dev_priv, VECS)
+#define ALL_ENGINES	(~0u)
+#define HAS_ENGINE(dev_priv, id) (INTEL_INFO(dev_priv)->engine_mask & BIT(id))
 
 #define HAS_LLC(dev_priv)	(INTEL_INFO(dev_priv)->has_llc)
 #define HAS_SNOOP(dev_priv)	(INTEL_INFO(dev_priv)->has_snoop)
@@ -2461,13 +2452,11 @@ static inline unsigned int i915_sg_segment_size(void)
 
 #define HAS_EXECLISTS(dev_priv) HAS_LOGICAL_RING_CONTEXTS(dev_priv)
 
-#define INTEL_PPGTT(dev_priv) (INTEL_INFO(dev_priv)->ppgtt)
+#define INTEL_PPGTT(dev_priv) (INTEL_INFO(dev_priv)->ppgtt_type)
 #define HAS_PPGTT(dev_priv) \
 	(INTEL_PPGTT(dev_priv) != INTEL_PPGTT_NONE)
 #define HAS_FULL_PPGTT(dev_priv) \
 	(INTEL_PPGTT(dev_priv) >= INTEL_PPGTT_FULL)
-#define HAS_FULL_48BIT_PPGTT(dev_priv)	\
-	(INTEL_PPGTT(dev_priv) >= INTEL_PPGTT_FULL_4LVL)
 
 #define HAS_PAGE_SIZES(dev_priv, sizes) ({ \
 	GEM_BUG_ON((sizes) == 0); \
@@ -2511,6 +2500,7 @@ static inline unsigned int i915_sg_segment_size(void)
 #define HAS_DDI(dev_priv)		 (INTEL_INFO(dev_priv)->display.has_ddi)
 #define HAS_FPGA_DBG_UNCLAIMED(dev_priv) (INTEL_INFO(dev_priv)->has_fpga_dbg)
 #define HAS_PSR(dev_priv)		 (INTEL_INFO(dev_priv)->display.has_psr)
+#define HAS_TRANSCODER_EDP(dev_priv)	 (INTEL_INFO(dev_priv)->trans_offsets[TRANSCODER_EDP] != 0)
 
 #define HAS_RC6(dev_priv)		 (INTEL_INFO(dev_priv)->has_rc6)
 #define HAS_RC6p(dev_priv)		 (INTEL_INFO(dev_priv)->has_rc6p)
@@ -2557,6 +2547,7 @@ static inline unsigned int i915_sg_segment_size(void)
 #define INTEL_PCH_KBP_DEVICE_ID_TYPE		0xA280
 #define INTEL_PCH_CNP_DEVICE_ID_TYPE		0xA300
 #define INTEL_PCH_CNP_LP_DEVICE_ID_TYPE		0x9D80
+#define INTEL_PCH_CMP_DEVICE_ID_TYPE		0x0280
 #define INTEL_PCH_ICP_DEVICE_ID_TYPE		0x3480
 #define INTEL_PCH_P2X_DEVICE_ID_TYPE		0x7100
 #define INTEL_PCH_P3X_DEVICE_ID_TYPE		0x7000
@@ -2566,8 +2557,6 @@ static inline unsigned int i915_sg_segment_size(void)
 #define INTEL_PCH_ID(dev_priv) ((dev_priv)->pch_id)
 #define HAS_PCH_ICP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_ICP)
 #define HAS_PCH_CNP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CNP)
-#define HAS_PCH_CNP_LP(dev_priv) \
-	(INTEL_PCH_ID(dev_priv) == INTEL_PCH_CNP_LP_DEVICE_ID_TYPE)
 #define HAS_PCH_KBP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_KBP)
 #define HAS_PCH_SPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_SPT)
 #define HAS_PCH_LPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_LPT)
@@ -2799,8 +2788,6 @@ void i915_gem_load_init_fences(struct drm_i915_private *dev_priv);
 int i915_gem_freeze(struct drm_i915_private *dev_priv);
 int i915_gem_freeze_late(struct drm_i915_private *dev_priv);
 
-void *i915_gem_object_alloc(struct drm_i915_private *dev_priv);
-void i915_gem_object_free(struct drm_i915_gem_object *obj);
 void i915_gem_object_init(struct drm_i915_gem_object *obj,
 			 const struct drm_i915_gem_object_ops *ops);
 struct drm_i915_gem_object *
@@ -3001,7 +2988,12 @@ i915_gem_obj_finish_shmem_access(struct drm_i915_gem_object *obj)
 	i915_gem_object_unpin_pages(obj);
 }
 
-int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
+static inline int __must_check
+i915_mutex_lock_interruptible(struct drm_device *dev)
+{
+	return mutex_lock_interruptible(&dev->struct_mutex);
+}
+
 int i915_gem_dumb_create(struct drm_file *file_priv,
 			 struct drm_device *dev,
 			 struct drm_mode_create_dumb *args);
@@ -3015,22 +3007,14 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old,
 
 int __must_check i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno);
 
-struct i915_request *
-i915_gem_find_active_request(struct intel_engine_cs *engine);
-
-static inline bool i915_reset_backoff(struct i915_gpu_error *error)
-{
-	return unlikely(test_bit(I915_RESET_BACKOFF, &error->flags));
-}
-
-static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
+static inline bool __i915_wedged(struct i915_gpu_error *error)
 {
 	return unlikely(test_bit(I915_WEDGED, &error->flags));
 }
 
-static inline bool i915_reset_backoff_or_wedged(struct i915_gpu_error *error)
+static inline bool i915_reset_failed(struct drm_i915_private *i915)
 {
-	return i915_reset_backoff(error) | i915_terminally_wedged(error);
+	return __i915_wedged(&i915->gpu_error);
 }
 
 static inline u32 i915_reset_count(struct i915_gpu_error *error)
@@ -3055,14 +3039,13 @@ void i915_gem_fini(struct drm_i915_private *dev_priv);
 void i915_gem_cleanup_engines(struct drm_i915_private *dev_priv);
 int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
 			   unsigned int flags, long timeout);
-int __must_check i915_gem_suspend(struct drm_i915_private *dev_priv);
+void i915_gem_suspend(struct drm_i915_private *dev_priv);
 void i915_gem_suspend_late(struct drm_i915_private *dev_priv);
 void i915_gem_resume(struct drm_i915_private *dev_priv);
 vm_fault_t i915_gem_fault(struct vm_fault *vmf);
 int i915_gem_object_wait(struct drm_i915_gem_object *obj,
 			 unsigned int flags,
-			 long timeout,
-			 struct intel_rps_client *rps);
+			 long timeout);
 int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
 				  unsigned int flags,
 				  const struct i915_sched_attr *attr);
@@ -3105,7 +3088,6 @@ struct drm_i915_fence_reg *
 i915_reserve_fence(struct drm_i915_private *dev_priv);
 void i915_unreserve_fence(struct drm_i915_fence_reg *fence);
 
-void i915_gem_revoke_fences(struct drm_i915_private *dev_priv);
 void i915_gem_restore_fences(struct drm_i915_private *dev_priv);
 
 void i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv);
@@ -3141,7 +3123,7 @@ int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
 int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data,
 				  struct drm_file *file);
 void i915_oa_init_reg_state(struct intel_engine_cs *engine,
-			    struct i915_gem_context *ctx,
+			    struct intel_context *ce,
 			    u32 *reg_state);
 
 /* i915_gem_evict.c */
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 8558e81fdc2af85dd52486b7c8d55580fe997373..b7086c8d472639b2f8de608d37a501b8643c2f19 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -42,6 +42,7 @@
 #include "i915_drv.h"
 #include "i915_gem_clflush.h"
 #include "i915_gemfs.h"
+#include "i915_globals.h"
 #include "i915_reset.h"
 #include "i915_trace.h"
 #include "i915_vgpu.h"
@@ -100,48 +101,7 @@ static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
 	spin_unlock(&dev_priv->mm.object_stat_lock);
 }
 
-static int
-i915_gem_wait_for_error(struct i915_gpu_error *error)
-{
-	int ret;
-
-	might_sleep();
-
-	/*
-	 * Only wait 10 seconds for the gpu reset to complete to avoid hanging
-	 * userspace. If it takes that long something really bad is going on and
-	 * we should simply try to bail out and fail as gracefully as possible.
-	 */
-	ret = wait_event_interruptible_timeout(error->reset_queue,
-					       !i915_reset_backoff(error),
-					       I915_RESET_TIMEOUT);
-	if (ret == 0) {
-		DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
-		return -EIO;
-	} else if (ret < 0) {
-		return ret;
-	} else {
-		return 0;
-	}
-}
-
-int i915_mutex_lock_interruptible(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	int ret;
-
-	ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
-	if (ret)
-		return ret;
-
-	ret = mutex_lock_interruptible(&dev->struct_mutex);
-	if (ret)
-		return ret;
-
-	return 0;
-}
-
-static u32 __i915_gem_park(struct drm_i915_private *i915)
+static void __i915_gem_park(struct drm_i915_private *i915)
 {
 	intel_wakeref_t wakeref;
 
@@ -152,9 +112,7 @@ static u32 __i915_gem_park(struct drm_i915_private *i915)
 	GEM_BUG_ON(!list_empty(&i915->gt.active_rings));
 
 	if (!i915->gt.awake)
-		return I915_EPOCH_INVALID;
-
-	GEM_BUG_ON(i915->gt.epoch == I915_EPOCH_INVALID);
+		return;
 
 	/*
 	 * Be paranoid and flush a concurrent interrupt to make sure
@@ -183,7 +141,7 @@ static u32 __i915_gem_park(struct drm_i915_private *i915)
 
 	intel_display_power_put(i915, POWER_DOMAIN_GT_IRQ, wakeref);
 
-	return i915->gt.epoch;
+	i915_globals_park();
 }
 
 void i915_gem_park(struct drm_i915_private *i915)
@@ -225,8 +183,7 @@ void i915_gem_unpark(struct drm_i915_private *i915)
 	i915->gt.awake = intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ);
 	GEM_BUG_ON(!i915->gt.awake);
 
-	if (unlikely(++i915->gt.epoch == 0)) /* keep 0 as invalid */
-		i915->gt.epoch = 1;
+	i915_globals_unpark();
 
 	intel_enable_gt_powersave(i915);
 	i915_update_gfx_val(i915);
@@ -459,8 +416,7 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
 static long
 i915_gem_object_wait_fence(struct dma_fence *fence,
 			   unsigned int flags,
-			   long timeout,
-			   struct intel_rps_client *rps_client)
+			   long timeout)
 {
 	struct i915_request *rq;
 
@@ -478,27 +434,6 @@ i915_gem_object_wait_fence(struct dma_fence *fence,
 	if (i915_request_completed(rq))
 		goto out;
 
-	/*
-	 * This client is about to stall waiting for the GPU. In many cases
-	 * this is undesirable and limits the throughput of the system, as
-	 * many clients cannot continue processing user input/output whilst
-	 * blocked. RPS autotuning may take tens of milliseconds to respond
-	 * to the GPU load and thus incurs additional latency for the client.
-	 * We can circumvent that by promoting the GPU frequency to maximum
-	 * before we wait. This makes the GPU throttle up much more quickly
-	 * (good for benchmarks and user experience, e.g. window animations),
-	 * but at a cost of spending more power processing the workload
-	 * (bad for battery). Not all clients even want their results
-	 * immediately and for them we should just let the GPU select its own
-	 * frequency to maximise efficiency. To prevent a single client from
-	 * forcing the clocks too high for the whole system, we only allow
-	 * each client to waitboost once in a busy period.
-	 */
-	if (rps_client && !i915_request_started(rq)) {
-		if (INTEL_GEN(rq->i915) >= 6)
-			gen6_rps_boost(rq, rps_client);
-	}
-
 	timeout = i915_request_wait(rq, flags, timeout);
 
 out:
@@ -511,8 +446,7 @@ i915_gem_object_wait_fence(struct dma_fence *fence,
 static long
 i915_gem_object_wait_reservation(struct reservation_object *resv,
 				 unsigned int flags,
-				 long timeout,
-				 struct intel_rps_client *rps_client)
+				 long timeout)
 {
 	unsigned int seq = __read_seqcount_begin(&resv->seq);
 	struct dma_fence *excl;
@@ -530,8 +464,7 @@ i915_gem_object_wait_reservation(struct reservation_object *resv,
 
 		for (i = 0; i < count; i++) {
 			timeout = i915_gem_object_wait_fence(shared[i],
-							     flags, timeout,
-							     rps_client);
+							     flags, timeout);
 			if (timeout < 0)
 				break;
 
@@ -557,8 +490,7 @@ i915_gem_object_wait_reservation(struct reservation_object *resv,
 	}
 
 	if (excl && timeout >= 0)
-		timeout = i915_gem_object_wait_fence(excl, flags, timeout,
-						     rps_client);
+		timeout = i915_gem_object_wait_fence(excl, flags, timeout);
 
 	dma_fence_put(excl);
 
@@ -652,30 +584,19 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
  * @obj: i915 gem object
  * @flags: how to wait (under a lock, for all rendering or just for writes etc)
  * @timeout: how long to wait
- * @rps_client: client (user process) to charge for any waitboosting
  */
 int
 i915_gem_object_wait(struct drm_i915_gem_object *obj,
 		     unsigned int flags,
-		     long timeout,
-		     struct intel_rps_client *rps_client)
+		     long timeout)
 {
 	might_sleep();
 	GEM_BUG_ON(timeout < 0);
 
-	timeout = i915_gem_object_wait_reservation(obj->resv,
-						   flags, timeout,
-						   rps_client);
+	timeout = i915_gem_object_wait_reservation(obj->resv, flags, timeout);
 	return timeout < 0 ? timeout : 0;
 }
 
-static struct intel_rps_client *to_rps_client(struct drm_file *file)
-{
-	struct drm_i915_file_private *fpriv = file->driver_priv;
-
-	return &fpriv->rps_client;
-}
-
 static int
 i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
 		     struct drm_i915_gem_pwrite *args,
@@ -698,17 +619,6 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
 	return 0;
 }
 
-void *i915_gem_object_alloc(struct drm_i915_private *dev_priv)
-{
-	return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL);
-}
-
-void i915_gem_object_free(struct drm_i915_gem_object *obj)
-{
-	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
-	kmem_cache_free(dev_priv->objects, obj);
-}
-
 static int
 i915_gem_create(struct drm_file *file,
 		struct drm_i915_private *dev_priv,
@@ -881,8 +791,7 @@ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
 	ret = i915_gem_object_wait(obj,
 				   I915_WAIT_INTERRUPTIBLE |
 				   I915_WAIT_LOCKED,
-				   MAX_SCHEDULE_TIMEOUT,
-				   NULL);
+				   MAX_SCHEDULE_TIMEOUT);
 	if (ret)
 		return ret;
 
@@ -934,8 +843,7 @@ int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
 				   I915_WAIT_INTERRUPTIBLE |
 				   I915_WAIT_LOCKED |
 				   I915_WAIT_ALL,
-				   MAX_SCHEDULE_TIMEOUT,
-				   NULL);
+				   MAX_SCHEDULE_TIMEOUT);
 	if (ret)
 		return ret;
 
@@ -1197,8 +1105,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
 
 	ret = i915_gem_object_wait(obj,
 				   I915_WAIT_INTERRUPTIBLE,
-				   MAX_SCHEDULE_TIMEOUT,
-				   to_rps_client(file));
+				   MAX_SCHEDULE_TIMEOUT);
 	if (ret)
 		goto out;
 
@@ -1497,8 +1404,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
 	ret = i915_gem_object_wait(obj,
 				   I915_WAIT_INTERRUPTIBLE |
 				   I915_WAIT_ALL,
-				   MAX_SCHEDULE_TIMEOUT,
-				   to_rps_client(file));
+				   MAX_SCHEDULE_TIMEOUT);
 	if (ret)
 		goto err;
 
@@ -1596,8 +1502,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
 				   I915_WAIT_INTERRUPTIBLE |
 				   I915_WAIT_PRIORITY |
 				   (write_domain ? I915_WAIT_ALL : 0),
-				   MAX_SCHEDULE_TIMEOUT,
-				   to_rps_client(file));
+				   MAX_SCHEDULE_TIMEOUT);
 	if (err)
 		goto out;
 
@@ -1891,6 +1796,7 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
 	intel_wakeref_t wakeref;
 	struct i915_vma *vma;
 	pgoff_t page_offset;
+	int srcu;
 	int ret;
 
 	/* Sanity check that we allow writing into this object */
@@ -1909,8 +1815,7 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
 	 */
 	ret = i915_gem_object_wait(obj,
 				   I915_WAIT_INTERRUPTIBLE,
-				   MAX_SCHEDULE_TIMEOUT,
-				   NULL);
+				   MAX_SCHEDULE_TIMEOUT);
 	if (ret)
 		goto err;
 
@@ -1920,9 +1825,15 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
 
 	wakeref = intel_runtime_pm_get(dev_priv);
 
+	srcu = i915_reset_trylock(dev_priv);
+	if (srcu < 0) {
+		ret = srcu;
+		goto err_rpm;
+	}
+
 	ret = i915_mutex_lock_interruptible(dev);
 	if (ret)
-		goto err_rpm;
+		goto err_reset;
 
 	/* Access to snoopable pages through the GTT is incoherent. */
 	if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev_priv)) {
@@ -1930,7 +1841,6 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
 		goto err_unlock;
 	}
 
-
 	/* Now pin it into the GTT as needed */
 	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
 				       PIN_MAPPABLE |
@@ -1995,6 +1905,8 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
 	__i915_vma_unpin(vma);
 err_unlock:
 	mutex_unlock(&dev->struct_mutex);
+err_reset:
+	i915_reset_unlock(dev_priv, srcu);
 err_rpm:
 	intel_runtime_pm_put(dev_priv, wakeref);
 	i915_gem_object_unpin_pages(obj);
@@ -2007,7 +1919,7 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
 		 * fail). But any other -EIO isn't ours (e.g. swap in failure)
 		 * and so needs to be reported.
 		 */
-		if (!i915_terminally_wedged(&dev_priv->gpu_error))
+		if (!i915_terminally_wedged(dev_priv))
 			return VM_FAULT_SIGBUS;
 		/* else: fall through */
 	case -EAGAIN:
@@ -2488,7 +2400,7 @@ static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
 		do {
 			cond_resched();
 			page = shmem_read_mapping_page_gfp(mapping, i, gfp);
-			if (likely(!IS_ERR(page)))
+			if (!IS_ERR(page))
 				break;
 
 			if (!*s) {
@@ -2895,51 +2807,6 @@ i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
 	return 0;
 }
 
-static bool match_ring(struct i915_request *rq)
-{
-	struct drm_i915_private *dev_priv = rq->i915;
-	u32 ring = I915_READ(RING_START(rq->engine->mmio_base));
-
-	return ring == i915_ggtt_offset(rq->ring->vma);
-}
-
-struct i915_request *
-i915_gem_find_active_request(struct intel_engine_cs *engine)
-{
-	struct i915_request *request, *active = NULL;
-	unsigned long flags;
-
-	/*
-	 * We are called by the error capture, reset and to dump engine
-	 * state at random points in time. In particular, note that neither is
-	 * crucially ordered with an interrupt. After a hang, the GPU is dead
-	 * and we assume that no more writes can happen (we waited long enough
-	 * for all writes that were in transaction to be flushed) - adding an
-	 * extra delay for a recent interrupt is pointless. Hence, we do
-	 * not need an engine->irq_seqno_barrier() before the seqno reads.
-	 * At all other times, we must assume the GPU is still running, but
-	 * we only care about the snapshot of this moment.
-	 */
-	spin_lock_irqsave(&engine->timeline.lock, flags);
-	list_for_each_entry(request, &engine->timeline.requests, link) {
-		if (i915_request_completed(request))
-			continue;
-
-		if (!i915_request_started(request))
-			break;
-
-		/* More than one preemptible request may match! */
-		if (!match_ring(request))
-			break;
-
-		active = request;
-		break;
-	}
-	spin_unlock_irqrestore(&engine->timeline.lock, flags);
-
-	return active;
-}
-
 static void
 i915_gem_retire_work_handler(struct work_struct *work)
 {
@@ -2964,180 +2831,105 @@ i915_gem_retire_work_handler(struct work_struct *work)
 				   round_jiffies_up_relative(HZ));
 }
 
-static void shrink_caches(struct drm_i915_private *i915)
+static bool switch_to_kernel_context_sync(struct drm_i915_private *i915,
+					  unsigned long mask)
 {
-	/*
-	 * kmem_cache_shrink() discards empty slabs and reorders partially
-	 * filled slabs to prioritise allocating from the mostly full slabs,
-	 * with the aim of reducing fragmentation.
-	 */
-	kmem_cache_shrink(i915->priorities);
-	kmem_cache_shrink(i915->dependencies);
-	kmem_cache_shrink(i915->requests);
-	kmem_cache_shrink(i915->luts);
-	kmem_cache_shrink(i915->vmas);
-	kmem_cache_shrink(i915->objects);
-}
-
-struct sleep_rcu_work {
-	union {
-		struct rcu_head rcu;
-		struct work_struct work;
-	};
-	struct drm_i915_private *i915;
-	unsigned int epoch;
-};
+	bool result = true;
 
-static inline bool
-same_epoch(struct drm_i915_private *i915, unsigned int epoch)
-{
 	/*
-	 * There is a small chance that the epoch wrapped since we started
-	 * sleeping. If we assume that epoch is at least a u32, then it will
-	 * take at least 2^32 * 100ms for it to wrap, or about 326 years.
+	 * Even if we fail to switch, give whatever is running a small chance
+	 * to save itself before we report the failure. Yes, this may be a
+	 * false positive due to e.g. ENOMEM, caveat emptor!
 	 */
-	return epoch == READ_ONCE(i915->gt.epoch);
-}
-
-static void __sleep_work(struct work_struct *work)
-{
-	struct sleep_rcu_work *s = container_of(work, typeof(*s), work);
-	struct drm_i915_private *i915 = s->i915;
-	unsigned int epoch = s->epoch;
-
-	kfree(s);
-	if (same_epoch(i915, epoch))
-		shrink_caches(i915);
-}
+	if (i915_gem_switch_to_kernel_context(i915, mask))
+		result = false;
 
-static void __sleep_rcu(struct rcu_head *rcu)
-{
-	struct sleep_rcu_work *s = container_of(rcu, typeof(*s), rcu);
-	struct drm_i915_private *i915 = s->i915;
-
-	destroy_rcu_head(&s->rcu);
+	if (i915_gem_wait_for_idle(i915,
+				   I915_WAIT_LOCKED |
+				   I915_WAIT_FOR_IDLE_BOOST,
+				   I915_GEM_IDLE_TIMEOUT))
+		result = false;
+
+	if (!result) {
+		if (i915_modparams.reset) { /* XXX hide warning from gem_eio */
+			dev_err(i915->drm.dev,
+				"Failed to idle engines, declaring wedged!\n");
+			GEM_TRACE_DUMP();
+		}
 
-	if (same_epoch(i915, s->epoch)) {
-		INIT_WORK(&s->work, __sleep_work);
-		queue_work(i915->wq, &s->work);
-	} else {
-		kfree(s);
+		/* Forcibly cancel outstanding work and leave the gpu quiet. */
+		i915_gem_set_wedged(i915);
 	}
-}
 
-static inline bool
-new_requests_since_last_retire(const struct drm_i915_private *i915)
-{
-	return (READ_ONCE(i915->gt.active_requests) ||
-		work_pending(&i915->gt.idle_work.work));
+	i915_retire_requests(i915); /* ensure we flush after wedging */
+	return result;
 }
 
-static void assert_kernel_context_is_current(struct drm_i915_private *i915)
+static bool load_power_context(struct drm_i915_private *i915)
 {
-	struct intel_engine_cs *engine;
-	enum intel_engine_id id;
+	/* Force loading the kernel context on all engines */
+	if (!switch_to_kernel_context_sync(i915, ALL_ENGINES))
+		return false;
 
-	if (i915_terminally_wedged(&i915->gpu_error))
-		return;
+	/*
+	 * Immediately park the GPU so that we enable powersaving and
+	 * treat it as idle. The next time we issue a request, we will
+	 * unpark and start using the engine->pinned_default_state, otherwise
+	 * it is in limbo and an early reset may fail.
+	 */
+	__i915_gem_park(i915);
 
-	GEM_BUG_ON(i915->gt.active_requests);
-	for_each_engine(engine, i915, id) {
-		GEM_BUG_ON(__i915_active_request_peek(&engine->timeline.last_request));
-		GEM_BUG_ON(engine->last_retired_context !=
-			   to_intel_context(i915->kernel_context, engine));
-	}
+	return true;
 }
 
 static void
 i915_gem_idle_work_handler(struct work_struct *work)
 {
-	struct drm_i915_private *dev_priv =
-		container_of(work, typeof(*dev_priv), gt.idle_work.work);
-	unsigned int epoch = I915_EPOCH_INVALID;
+	struct drm_i915_private *i915 =
+		container_of(work, typeof(*i915), gt.idle_work.work);
 	bool rearm_hangcheck;
 
-	if (!READ_ONCE(dev_priv->gt.awake))
+	if (!READ_ONCE(i915->gt.awake))
 		return;
 
-	if (READ_ONCE(dev_priv->gt.active_requests))
+	if (READ_ONCE(i915->gt.active_requests))
 		return;
 
-	/*
-	 * Flush out the last user context, leaving only the pinned
-	 * kernel context resident. When we are idling on the kernel_context,
-	 * no more new requests (with a context switch) are emitted and we
-	 * can finally rest. A consequence is that the idle work handler is
-	 * always called at least twice before idling (and if the system is
-	 * idle that implies a round trip through the retire worker).
-	 */
-	mutex_lock(&dev_priv->drm.struct_mutex);
-	i915_gem_switch_to_kernel_context(dev_priv);
-	mutex_unlock(&dev_priv->drm.struct_mutex);
-
-	GEM_TRACE("active_requests=%d (after switch-to-kernel-context)\n",
-		  READ_ONCE(dev_priv->gt.active_requests));
-
-	/*
-	 * Wait for last execlists context complete, but bail out in case a
-	 * new request is submitted. As we don't trust the hardware, we
-	 * continue on if the wait times out. This is necessary to allow
-	 * the machine to suspend even if the hardware dies, and we will
-	 * try to recover in resume (after depriving the hardware of power,
-	 * it may be in a better mmod).
-	 */
-	__wait_for(if (new_requests_since_last_retire(dev_priv)) return,
-		   intel_engines_are_idle(dev_priv),
-		   I915_IDLE_ENGINES_TIMEOUT * 1000,
-		   10, 500);
-
 	rearm_hangcheck =
-		cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
+		cancel_delayed_work_sync(&i915->gpu_error.hangcheck_work);
 
-	if (!mutex_trylock(&dev_priv->drm.struct_mutex)) {
+	if (!mutex_trylock(&i915->drm.struct_mutex)) {
 		/* Currently busy, come back later */
-		mod_delayed_work(dev_priv->wq,
-				 &dev_priv->gt.idle_work,
+		mod_delayed_work(i915->wq,
+				 &i915->gt.idle_work,
 				 msecs_to_jiffies(50));
 		goto out_rearm;
 	}
 
 	/*
-	 * New request retired after this work handler started, extend active
-	 * period until next instance of the work.
+	 * Flush out the last user context, leaving only the pinned
+	 * kernel context resident. Should anything unfortunate happen
+	 * while we are idle (such as the GPU being power cycled), no users
+	 * will be harmed.
 	 */
-	if (new_requests_since_last_retire(dev_priv))
-		goto out_unlock;
+	if (!work_pending(&i915->gt.idle_work.work) &&
+	    !i915->gt.active_requests) {
+		++i915->gt.active_requests; /* don't requeue idle */
 
-	epoch = __i915_gem_park(dev_priv);
+		switch_to_kernel_context_sync(i915, i915->gt.active_engines);
 
-	assert_kernel_context_is_current(dev_priv);
+		if (!--i915->gt.active_requests) {
+			__i915_gem_park(i915);
+			rearm_hangcheck = false;
+		}
+	}
 
-	rearm_hangcheck = false;
-out_unlock:
-	mutex_unlock(&dev_priv->drm.struct_mutex);
+	mutex_unlock(&i915->drm.struct_mutex);
 
 out_rearm:
 	if (rearm_hangcheck) {
-		GEM_BUG_ON(!dev_priv->gt.awake);
-		i915_queue_hangcheck(dev_priv);
-	}
-
-	/*
-	 * When we are idle, it is an opportune time to reap our caches.
-	 * However, we have many objects that utilise RCU and the ordered
-	 * i915->wq that this work is executing on. To try and flush any
-	 * pending frees now we are idle, we first wait for an RCU grace
-	 * period, and then queue a task (that will run last on the wq) to
-	 * shrink and re-optimize the caches.
-	 */
-	if (same_epoch(dev_priv, epoch)) {
-		struct sleep_rcu_work *s = kmalloc(sizeof(*s), GFP_KERNEL);
-		if (s) {
-			init_rcu_head(&s->rcu);
-			s->i915 = dev_priv;
-			s->epoch = epoch;
-			call_rcu(&s->rcu, __sleep_rcu);
-		}
+		GEM_BUG_ON(!i915->gt.awake);
+		i915_queue_hangcheck(i915);
 	}
 }
 
@@ -3171,7 +2963,7 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
 		list_del(&lut->obj_link);
 		list_del(&lut->ctx_link);
 
-		kmem_cache_free(i915->luts, lut);
+		i915_lut_handle_free(lut);
 		__i915_gem_object_release_unless_active(obj);
 	}
 
@@ -3234,8 +3026,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
 				   I915_WAIT_INTERRUPTIBLE |
 				   I915_WAIT_PRIORITY |
 				   I915_WAIT_ALL,
-				   to_wait_timeout(args->timeout_ns),
-				   to_rps_client(file));
+				   to_wait_timeout(args->timeout_ns));
 
 	if (args->timeout_ns > 0) {
 		args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start));
@@ -3304,7 +3095,7 @@ wait_for_timelines(struct drm_i915_private *i915,
 		 * stalls, so allow the gpu to boost to maximum clocks.
 		 */
 		if (flags & I915_WAIT_FOR_IDLE_BOOST)
-			gen6_rps_boost(rq, NULL);
+			gen6_rps_boost(rq);
 
 		timeout = i915_request_wait(rq, flags, timeout);
 		i915_request_put(rq);
@@ -3340,19 +3131,11 @@ int i915_gem_wait_for_idle(struct drm_i915_private *i915,
 
 		lockdep_assert_held(&i915->drm.struct_mutex);
 
-		if (GEM_SHOW_DEBUG() && !timeout) {
-			/* Presume that timeout was non-zero to begin with! */
-			dev_warn(&i915->drm.pdev->dev,
-				 "Missed idle-completion interrupt!\n");
-			GEM_TRACE_DUMP();
-		}
-
 		err = wait_for_engines(i915);
 		if (err)
 			return err;
 
 		i915_retire_requests(i915);
-		GEM_BUG_ON(i915->gt.active_requests);
 	}
 
 	return 0;
@@ -3399,8 +3182,7 @@ i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write)
 				   I915_WAIT_INTERRUPTIBLE |
 				   I915_WAIT_LOCKED |
 				   (write ? I915_WAIT_ALL : 0),
-				   MAX_SCHEDULE_TIMEOUT,
-				   NULL);
+				   MAX_SCHEDULE_TIMEOUT);
 	if (ret)
 		return ret;
 
@@ -3462,8 +3244,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
 				   I915_WAIT_INTERRUPTIBLE |
 				   I915_WAIT_LOCKED |
 				   (write ? I915_WAIT_ALL : 0),
-				   MAX_SCHEDULE_TIMEOUT,
-				   NULL);
+				   MAX_SCHEDULE_TIMEOUT);
 	if (ret)
 		return ret;
 
@@ -3578,8 +3359,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
 					   I915_WAIT_INTERRUPTIBLE |
 					   I915_WAIT_LOCKED |
 					   I915_WAIT_ALL,
-					   MAX_SCHEDULE_TIMEOUT,
-					   NULL);
+					   MAX_SCHEDULE_TIMEOUT);
 		if (ret)
 			return ret;
 
@@ -3717,8 +3497,7 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
 
 	ret = i915_gem_object_wait(obj,
 				   I915_WAIT_INTERRUPTIBLE,
-				   MAX_SCHEDULE_TIMEOUT,
-				   to_rps_client(file));
+				   MAX_SCHEDULE_TIMEOUT);
 	if (ret)
 		goto out;
 
@@ -3844,8 +3623,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
 				   I915_WAIT_INTERRUPTIBLE |
 				   I915_WAIT_LOCKED |
 				   (write ? I915_WAIT_ALL : 0),
-				   MAX_SCHEDULE_TIMEOUT,
-				   NULL);
+				   MAX_SCHEDULE_TIMEOUT);
 	if (ret)
 		return ret;
 
@@ -3891,8 +3669,9 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
 	long ret;
 
 	/* ABI: return -EIO if already wedged */
-	if (i915_terminally_wedged(&dev_priv->gpu_error))
-		return -EIO;
+	ret = i915_terminally_wedged(dev_priv);
+	if (ret)
+		return ret;
 
 	spin_lock(&file_priv->mm.lock);
 	list_for_each_entry(request, &file_priv->mm.request_list, client_link) {
@@ -3968,7 +3747,7 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
 	}
 
 	vma = i915_vma_instance(obj, vm, view);
-	if (unlikely(IS_ERR(vma)))
+	if (IS_ERR(vma))
 		return vma;
 
 	if (i915_vma_misplaced(vma, size, alignment, flags)) {
@@ -4002,20 +3781,17 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
 
 static __always_inline unsigned int __busy_read_flag(unsigned int id)
 {
-	/* Note that we could alias engines in the execbuf API, but
-	 * that would be very unwise as it prevents userspace from
-	 * fine control over engine selection. Ahem.
-	 *
-	 * This should be something like EXEC_MAX_ENGINE instead of
-	 * I915_NUM_ENGINES.
-	 */
-	BUILD_BUG_ON(I915_NUM_ENGINES > 16);
+	if (id == I915_ENGINE_CLASS_INVALID)
+		return 0xffff0000;
+
+	GEM_BUG_ON(id >= 16);
 	return 0x10000 << id;
 }
 
 static __always_inline unsigned int __busy_write_id(unsigned int id)
 {
-	/* The uABI guarantees an active writer is also amongst the read
+	/*
+	 * The uABI guarantees an active writer is also amongst the read
 	 * engines. This would be true if we accessed the activity tracking
 	 * under the lock, but as we perform the lookup of the object and
 	 * its activity locklessly we can not guarantee that the last_write
@@ -4023,16 +3799,20 @@ static __always_inline unsigned int __busy_write_id(unsigned int id)
 	 * last_read - hence we always set both read and write busy for
 	 * last_write.
 	 */
-	return id | __busy_read_flag(id);
+	if (id == I915_ENGINE_CLASS_INVALID)
+		return 0xffffffff;
+
+	return (id + 1) | __busy_read_flag(id);
 }
 
 static __always_inline unsigned int
 __busy_set_if_active(const struct dma_fence *fence,
 		     unsigned int (*flag)(unsigned int id))
 {
-	struct i915_request *rq;
+	const struct i915_request *rq;
 
-	/* We have to check the current hw status of the fence as the uABI
+	/*
+	 * We have to check the current hw status of the fence as the uABI
 	 * guarantees forward progress. We could rely on the idle worker
 	 * to eventually flush us, but to minimise latency just ask the
 	 * hardware.
@@ -4043,11 +3823,11 @@ __busy_set_if_active(const struct dma_fence *fence,
 		return 0;
 
 	/* opencode to_request() in order to avoid const warnings */
-	rq = container_of(fence, struct i915_request, fence);
+	rq = container_of(fence, const struct i915_request, fence);
 	if (i915_request_completed(rq))
 		return 0;
 
-	return flag(rq->engine->uabi_id);
+	return flag(rq->engine->uabi_class);
 }
 
 static __always_inline unsigned int
@@ -4081,7 +3861,8 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
 	if (!obj)
 		goto out;
 
-	/* A discrepancy here is that we do not report the status of
+	/*
+	 * A discrepancy here is that we do not report the status of
 	 * non-i915 fences, i.e. even though we may report the object as idle,
 	 * a call to set-domain may still stall waiting for foreign rendering.
 	 * This also means that wait-ioctl may report an object as busy,
@@ -4281,7 +4062,7 @@ i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size)
 	if (overflows_type(size, obj->base.size))
 		return ERR_PTR(-E2BIG);
 
-	obj = i915_gem_object_alloc(dev_priv);
+	obj = i915_gem_object_alloc();
 	if (obj == NULL)
 		return ERR_PTR(-ENOMEM);
 
@@ -4545,7 +4326,7 @@ void i915_gem_sanitize(struct drm_i915_private *i915)
 	 * back to defaults, recovering from whatever wedged state we left it
 	 * in and so worth trying to use the device once more.
 	 */
-	if (i915_terminally_wedged(&i915->gpu_error))
+	if (i915_terminally_wedged(i915))
 		i915_gem_unset_wedged(i915);
 
 	/*
@@ -4566,10 +4347,9 @@ void i915_gem_sanitize(struct drm_i915_private *i915)
 	mutex_unlock(&i915->drm.struct_mutex);
 }
 
-int i915_gem_suspend(struct drm_i915_private *i915)
+void i915_gem_suspend(struct drm_i915_private *i915)
 {
 	intel_wakeref_t wakeref;
-	int ret;
 
 	GEM_TRACE("\n");
 
@@ -4589,22 +4369,7 @@ int i915_gem_suspend(struct drm_i915_private *i915)
 	 * state. Fortunately, the kernel_context is disposable and we do
 	 * not rely on its state.
 	 */
-	if (!i915_terminally_wedged(&i915->gpu_error)) {
-		ret = i915_gem_switch_to_kernel_context(i915);
-		if (ret)
-			goto err_unlock;
-
-		ret = i915_gem_wait_for_idle(i915,
-					     I915_WAIT_INTERRUPTIBLE |
-					     I915_WAIT_LOCKED |
-					     I915_WAIT_FOR_IDLE_BOOST,
-					     MAX_SCHEDULE_TIMEOUT);
-		if (ret && ret != -EIO)
-			goto err_unlock;
-
-		assert_kernel_context_is_current(i915);
-	}
-	i915_retire_requests(i915); /* ensure we flush after wedging */
+	switch_to_kernel_context_sync(i915, i915->gt.active_engines);
 
 	mutex_unlock(&i915->drm.struct_mutex);
 	i915_reset_flush(i915);
@@ -4617,23 +4382,13 @@ int i915_gem_suspend(struct drm_i915_private *i915)
 	 */
 	drain_delayed_work(&i915->gt.idle_work);
 
-	intel_uc_suspend(i915);
-
 	/*
 	 * Assert that we successfully flushed all the work and
 	 * reset the GPU back to its idle, low power state.
 	 */
-	WARN_ON(i915->gt.awake);
-	if (WARN_ON(!intel_engines_are_idle(i915)))
-		i915_gem_set_wedged(i915); /* no hope, discard everything */
-
-	intel_runtime_pm_put(i915, wakeref);
-	return 0;
+	GEM_BUG_ON(i915->gt.awake);
 
-err_unlock:
-	mutex_unlock(&i915->drm.struct_mutex);
 	intel_runtime_pm_put(i915, wakeref);
-	return ret;
 }
 
 void i915_gem_suspend_late(struct drm_i915_private *i915)
@@ -4701,7 +4456,7 @@ void i915_gem_resume(struct drm_i915_private *i915)
 	intel_uc_resume(i915);
 
 	/* Always reload a context for powersaving. */
-	if (i915_gem_switch_to_kernel_context(i915))
+	if (!load_power_context(i915))
 		goto err_wedged;
 
 out_unlock:
@@ -4710,8 +4465,9 @@ void i915_gem_resume(struct drm_i915_private *i915)
 	return;
 
 err_wedged:
-	if (!i915_terminally_wedged(&i915->gpu_error)) {
-		DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
+	if (!i915_reset_failed(i915)) {
+		dev_err(i915->drm.dev,
+			"Failed to re-initialize GPU, declaring it wedged!\n");
 		i915_gem_set_wedged(i915);
 	}
 	goto out_unlock;
@@ -4781,6 +4537,8 @@ static int __i915_gem_restart_engines(void *data)
 		}
 	}
 
+	intel_engines_set_scheduler_caps(i915);
+
 	return 0;
 }
 
@@ -4816,10 +4574,9 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv)
 	init_unused_rings(dev_priv);
 
 	BUG_ON(!dev_priv->kernel_context);
-	if (i915_terminally_wedged(&dev_priv->gpu_error)) {
-		ret = -EIO;
+	ret = i915_terminally_wedged(dev_priv);
+	if (ret)
 		goto out;
-	}
 
 	ret = i915_ppgtt_init_hw(dev_priv);
 	if (ret) {
@@ -4864,7 +4621,7 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
 	struct i915_gem_context *ctx;
 	struct intel_engine_cs *engine;
 	enum intel_engine_id id;
-	int err;
+	int err = 0;
 
 	/*
 	 * As we reset the gpu during very early sanitisation, the current
@@ -4897,36 +4654,27 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
 			goto err_active;
 	}
 
-	err = i915_gem_switch_to_kernel_context(i915);
-	if (err)
-		goto err_active;
-
-	if (i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED, HZ / 5)) {
-		i915_gem_set_wedged(i915);
-		err = -EIO; /* Caller will declare us wedged */
+	/* Flush the default context image to memory, and enable powersaving. */
+	if (!load_power_context(i915)) {
+		err = -EIO;
 		goto err_active;
 	}
 
-	assert_kernel_context_is_current(i915);
-
-	/*
-	 * Immediately park the GPU so that we enable powersaving and
-	 * treat it as idle. The next time we issue a request, we will
-	 * unpark and start using the engine->pinned_default_state, otherwise
-	 * it is in limbo and an early reset may fail.
-	 */
-	__i915_gem_park(i915);
-
 	for_each_engine(engine, i915, id) {
+		struct intel_context *ce;
 		struct i915_vma *state;
 		void *vaddr;
 
-		GEM_BUG_ON(to_intel_context(ctx, engine)->pin_count);
+		ce = intel_context_lookup(ctx, engine);
+		if (!ce)
+			continue;
 
-		state = to_intel_context(ctx, engine)->state;
+		state = ce->state;
 		if (!state)
 			continue;
 
+		GEM_BUG_ON(intel_context_is_pinned(ce));
+
 		/*
 		 * As we will hold a reference to the logical state, it will
 		 * not be torn down with the context, and importantly the
@@ -4982,19 +4730,10 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
 err_active:
 	/*
 	 * If we have to abandon now, we expect the engines to be idle
-	 * and ready to be torn-down. First try to flush any remaining
-	 * request, ensure we are pointing at the kernel context and
-	 * then remove it.
+	 * and ready to be torn-down. The quickest way we can accomplish
+	 * this is by declaring ourselves wedged.
 	 */
-	if (WARN_ON(i915_gem_switch_to_kernel_context(i915)))
-		goto out_ctx;
-
-	if (WARN_ON(i915_gem_wait_for_idle(i915,
-					   I915_WAIT_LOCKED,
-					   MAX_SCHEDULE_TIMEOUT)))
-		goto out_ctx;
-
-	i915_gem_contexts_lost(i915);
+	i915_gem_set_wedged(i915);
 	goto out_ctx;
 }
 
@@ -5152,7 +4891,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
 err_init_hw:
 	mutex_unlock(&dev_priv->drm.struct_mutex);
 
-	WARN_ON(i915_gem_suspend(dev_priv));
+	i915_gem_suspend(dev_priv);
 	i915_gem_suspend_late(dev_priv);
 
 	i915_gem_drain_workqueue(dev_priv);
@@ -5192,7 +4931,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
 		 * wedged. But we only want to do this where the GPU is angry,
 		 * for all other failure, such as an allocation failure, bail.
 		 */
-		if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
+		if (!i915_reset_failed(dev_priv)) {
 			i915_load_error(dev_priv,
 					"Failed to initialize GPU, declaring it wedged!\n");
 			i915_gem_set_wedged(dev_priv);
@@ -5305,36 +5044,7 @@ static void i915_gem_init__mm(struct drm_i915_private *i915)
 
 int i915_gem_init_early(struct drm_i915_private *dev_priv)
 {
-	int err = -ENOMEM;
-
-	dev_priv->objects = KMEM_CACHE(drm_i915_gem_object, SLAB_HWCACHE_ALIGN);
-	if (!dev_priv->objects)
-		goto err_out;
-
-	dev_priv->vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
-	if (!dev_priv->vmas)
-		goto err_objects;
-
-	dev_priv->luts = KMEM_CACHE(i915_lut_handle, 0);
-	if (!dev_priv->luts)
-		goto err_vmas;
-
-	dev_priv->requests = KMEM_CACHE(i915_request,
-					SLAB_HWCACHE_ALIGN |
-					SLAB_RECLAIM_ACCOUNT |
-					SLAB_TYPESAFE_BY_RCU);
-	if (!dev_priv->requests)
-		goto err_luts;
-
-	dev_priv->dependencies = KMEM_CACHE(i915_dependency,
-					    SLAB_HWCACHE_ALIGN |
-					    SLAB_RECLAIM_ACCOUNT);
-	if (!dev_priv->dependencies)
-		goto err_requests;
-
-	dev_priv->priorities = KMEM_CACHE(i915_priolist, SLAB_HWCACHE_ALIGN);
-	if (!dev_priv->priorities)
-		goto err_dependencies;
+	int err;
 
 	INIT_LIST_HEAD(&dev_priv->gt.active_rings);
 	INIT_LIST_HEAD(&dev_priv->gt.closed_vma);
@@ -5348,6 +5058,7 @@ int i915_gem_init_early(struct drm_i915_private *dev_priv)
 	init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
 	init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
 	mutex_init(&dev_priv->gpu_error.wedge_mutex);
+	init_srcu_struct(&dev_priv->gpu_error.reset_backoff_srcu);
 
 	atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0);
 
@@ -5358,19 +5069,6 @@ int i915_gem_init_early(struct drm_i915_private *dev_priv)
 		DRM_NOTE("Unable to create a private tmpfs mount, hugepage support will be disabled(%d).\n", err);
 
 	return 0;
-
-err_dependencies:
-	kmem_cache_destroy(dev_priv->dependencies);
-err_requests:
-	kmem_cache_destroy(dev_priv->requests);
-err_luts:
-	kmem_cache_destroy(dev_priv->luts);
-err_vmas:
-	kmem_cache_destroy(dev_priv->vmas);
-err_objects:
-	kmem_cache_destroy(dev_priv->objects);
-err_out:
-	return err;
 }
 
 void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
@@ -5380,15 +5078,7 @@ void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
 	GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count));
 	WARN_ON(dev_priv->mm.object_count);
 
-	kmem_cache_destroy(dev_priv->priorities);
-	kmem_cache_destroy(dev_priv->dependencies);
-	kmem_cache_destroy(dev_priv->requests);
-	kmem_cache_destroy(dev_priv->luts);
-	kmem_cache_destroy(dev_priv->vmas);
-	kmem_cache_destroy(dev_priv->objects);
-
-	/* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */
-	rcu_barrier();
+	cleanup_srcu_struct(&dev_priv->gpu_error.reset_backoff_srcu);
 
 	i915_gemfs_fini(dev_priv);
 }
diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h
index b0e4b976880c0bf6699197f4f0215e15289c23f4..5c073fe736640bf0902874628cb3072b2a277d19 100644
--- a/drivers/gpu/drm/i915/i915_gem.h
+++ b/drivers/gpu/drm/i915/i915_gem.h
@@ -75,12 +75,14 @@ struct drm_i915_private;
 
 #define I915_NUM_ENGINES 8
 
+#define I915_GEM_IDLE_TIMEOUT (HZ / 5)
+
 void i915_gem_park(struct drm_i915_private *i915);
 void i915_gem_unpark(struct drm_i915_private *i915);
 
 static inline void __tasklet_disable_sync_once(struct tasklet_struct *t)
 {
-	if (atomic_inc_return(&t->count) == 1)
+	if (!atomic_fetch_inc(&t->count))
 		tasklet_unlock_wait(t);
 }
 
@@ -89,4 +91,9 @@ static inline bool __tasklet_is_enabled(const struct tasklet_struct *t)
 	return !atomic_read(&t->count);
 }
 
+static inline bool __tasklet_enable(struct tasklet_struct *t)
+{
+	return atomic_dec_and_test(&t->count);
+}
+
 #endif /* __I915_GEM_H__ */
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 280813a4bf82a6fdd223c1b6f8dcbcdbe2980e12..d776d43707e0be72488a7fd1ea314afe53fcac91 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -88,12 +88,28 @@
 #include <linux/log2.h>
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
+#include "i915_globals.h"
 #include "i915_trace.h"
 #include "intel_lrc_reg.h"
 #include "intel_workarounds.h"
 
 #define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
 
+static struct i915_global_gem_context {
+	struct i915_global base;
+	struct kmem_cache *slab_luts;
+} global;
+
+struct i915_lut_handle *i915_lut_handle_alloc(void)
+{
+	return kmem_cache_alloc(global.slab_luts, GFP_KERNEL);
+}
+
+void i915_lut_handle_free(struct i915_lut_handle *lut)
+{
+	return kmem_cache_free(global.slab_luts, lut);
+}
+
 static void lut_close(struct i915_gem_context *ctx)
 {
 	struct i915_lut_handle *lut, *ln;
@@ -102,7 +118,7 @@ static void lut_close(struct i915_gem_context *ctx)
 
 	list_for_each_entry_safe(lut, ln, &ctx->handles_list, ctx_link) {
 		list_del(&lut->obj_link);
-		kmem_cache_free(ctx->i915->luts, lut);
+		i915_lut_handle_free(lut);
 	}
 
 	rcu_read_lock();
@@ -206,25 +222,23 @@ static void release_hw_id(struct i915_gem_context *ctx)
 
 static void i915_gem_context_free(struct i915_gem_context *ctx)
 {
-	unsigned int n;
+	struct intel_context *it, *n;
 
 	lockdep_assert_held(&ctx->i915->drm.struct_mutex);
 	GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
+	GEM_BUG_ON(!list_empty(&ctx->active_engines));
 
 	release_hw_id(ctx);
 	i915_ppgtt_put(ctx->ppgtt);
 
-	for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) {
-		struct intel_context *ce = &ctx->__engine[n];
-
-		if (ce->ops)
-			ce->ops->destroy(ce);
-	}
+	rbtree_postorder_for_each_entry_safe(it, n, &ctx->hw_contexts, node)
+		intel_context_put(it);
 
 	kfree(ctx->name);
 	put_pid(ctx->pid);
 
 	list_del(&ctx->link);
+	mutex_destroy(&ctx->mutex);
 
 	kfree_rcu(ctx, rcu);
 }
@@ -307,7 +321,7 @@ static u32 default_desc_template(const struct drm_i915_private *i915,
 	desc = GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE;
 
 	address_mode = INTEL_LEGACY_32B_CONTEXT;
-	if (ppgtt && i915_vm_is_48bit(&ppgtt->vm))
+	if (ppgtt && i915_vm_is_4lvl(&ppgtt->vm))
 		address_mode = INTEL_LEGACY_64B_CONTEXT;
 	desc |= address_mode << GEN8_CTX_ADDRESSING_MODE_SHIFT;
 
@@ -322,39 +336,13 @@ static u32 default_desc_template(const struct drm_i915_private *i915,
 	return desc;
 }
 
-static void intel_context_retire(struct i915_active_request *active,
-				 struct i915_request *rq)
-{
-	struct intel_context *ce =
-		container_of(active, typeof(*ce), active_tracker);
-
-	intel_context_unpin(ce);
-}
-
-void
-intel_context_init(struct intel_context *ce,
-		   struct i915_gem_context *ctx,
-		   struct intel_engine_cs *engine)
-{
-	ce->gem_context = ctx;
-
-	INIT_LIST_HEAD(&ce->signal_link);
-	INIT_LIST_HEAD(&ce->signals);
-
-	/* Use the whole device by default */
-	ce->sseu = intel_device_default_sseu(ctx->i915);
-
-	i915_active_request_init(&ce->active_tracker,
-				 NULL, intel_context_retire);
-}
-
 static struct i915_gem_context *
 __create_hw_context(struct drm_i915_private *dev_priv,
 		    struct drm_i915_file_private *file_priv)
 {
 	struct i915_gem_context *ctx;
-	unsigned int n;
 	int ret;
+	int i;
 
 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
 	if (ctx == NULL)
@@ -364,9 +352,11 @@ __create_hw_context(struct drm_i915_private *dev_priv,
 	list_add_tail(&ctx->link, &dev_priv->contexts.list);
 	ctx->i915 = dev_priv;
 	ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL);
+	INIT_LIST_HEAD(&ctx->active_engines);
+	mutex_init(&ctx->mutex);
 
-	for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++)
-		intel_context_init(&ctx->__engine[n], ctx, dev_priv->engine[n]);
+	ctx->hw_contexts = RB_ROOT;
+	spin_lock_init(&ctx->hw_contexts_lock);
 
 	INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
 	INIT_LIST_HEAD(&ctx->handles_list);
@@ -401,10 +391,15 @@ __create_hw_context(struct drm_i915_private *dev_priv,
 	ctx->remap_slice = ALL_L3_SLICES(dev_priv);
 
 	i915_gem_context_set_bannable(ctx);
+	i915_gem_context_set_recoverable(ctx);
+
 	ctx->ring_size = 4 * PAGE_SIZE;
 	ctx->desc_template =
 		default_desc_template(dev_priv, dev_priv->mm.aliasing_ppgtt);
 
+	for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
+		ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
+
 	return ctx;
 
 err_pid:
@@ -563,7 +558,7 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
 	GEM_BUG_ON(dev_priv->kernel_context);
 	GEM_BUG_ON(dev_priv->preempt_context);
 
-	intel_engine_init_ctx_wa(dev_priv->engine[RCS]);
+	intel_engine_init_ctx_wa(dev_priv->engine[RCS0]);
 	init_contexts(dev_priv);
 
 	/* lowest priority; idle task */
@@ -682,81 +677,98 @@ last_request_on_engine(struct i915_timeline *timeline,
 	return NULL;
 }
 
-static bool engine_has_kernel_context_barrier(struct intel_engine_cs *engine)
+struct context_barrier_task {
+	struct i915_active base;
+	void (*task)(void *data);
+	void *data;
+};
+
+static void cb_retire(struct i915_active *base)
 {
-	struct drm_i915_private *i915 = engine->i915;
-	const struct intel_context * const ce =
-		to_intel_context(i915->kernel_context, engine);
-	struct i915_timeline *barrier = ce->ring->timeline;
-	struct intel_ring *ring;
-	bool any_active = false;
+	struct context_barrier_task *cb = container_of(base, typeof(*cb), base);
+
+	if (cb->task)
+		cb->task(cb->data);
+
+	i915_active_fini(&cb->base);
+	kfree(cb);
+}
+
+I915_SELFTEST_DECLARE(static unsigned long context_barrier_inject_fault);
+static int context_barrier_task(struct i915_gem_context *ctx,
+				unsigned long engines,
+				void (*task)(void *data),
+				void *data)
+{
+	struct drm_i915_private *i915 = ctx->i915;
+	struct context_barrier_task *cb;
+	struct intel_context *ce;
+	intel_wakeref_t wakeref;
+	int err = 0;
 
 	lockdep_assert_held(&i915->drm.struct_mutex);
-	list_for_each_entry(ring, &i915->gt.active_rings, active_link) {
-		struct i915_request *rq;
+	GEM_BUG_ON(!task);
 
-		rq = last_request_on_engine(ring->timeline, engine);
-		if (!rq)
-			continue;
+	cb = kmalloc(sizeof(*cb), GFP_KERNEL);
+	if (!cb)
+		return -ENOMEM;
 
-		any_active = true;
+	i915_active_init(i915, &cb->base, cb_retire);
+	i915_active_acquire(&cb->base);
+
+	wakeref = intel_runtime_pm_get(i915);
+	list_for_each_entry(ce, &ctx->active_engines, active_link) {
+		struct intel_engine_cs *engine = ce->engine;
+		struct i915_request *rq;
 
-		if (rq->hw_context == ce)
+		if (!(ce->engine->mask & engines))
 			continue;
 
-		/*
-		 * Was this request submitted after the previous
-		 * switch-to-kernel-context?
-		 */
-		if (!i915_timeline_sync_is_later(barrier, &rq->fence)) {
-			GEM_TRACE("%s needs barrier for %llx:%lld\n",
-				  ring->timeline->name,
-				  rq->fence.context,
-				  rq->fence.seqno);
-			return false;
+		if (I915_SELFTEST_ONLY(context_barrier_inject_fault &
+				       engine->mask)) {
+			err = -ENXIO;
+			break;
+		}
+
+		rq = i915_request_alloc(engine, ctx);
+		if (IS_ERR(rq)) {
+			err = PTR_ERR(rq);
+			break;
 		}
 
-		GEM_TRACE("%s has barrier after %llx:%lld\n",
-			  ring->timeline->name,
-			  rq->fence.context,
-			  rq->fence.seqno);
+		err = i915_active_ref(&cb->base, rq->fence.context, rq);
+		i915_request_add(rq);
+		if (err)
+			break;
 	}
+	intel_runtime_pm_put(i915, wakeref);
 
-	/*
-	 * If any other timeline was still active and behind the last barrier,
-	 * then our last switch-to-kernel-context must still be queued and
-	 * will run last (leaving the engine in the kernel context when it
-	 * eventually idles).
-	 */
-	if (any_active)
-		return true;
+	cb->task = err ? NULL : task; /* caller needs to unwind instead */
+	cb->data = data;
 
-	/* The engine is idle; check that it is idling in the kernel context. */
-	return engine->last_retired_context == ce;
+	i915_active_release(&cb->base);
+
+	return err;
 }
 
-int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915)
+int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915,
+				      unsigned long mask)
 {
 	struct intel_engine_cs *engine;
-	enum intel_engine_id id;
 
 	GEM_TRACE("awake?=%s\n", yesno(i915->gt.awake));
 
 	lockdep_assert_held(&i915->drm.struct_mutex);
 	GEM_BUG_ON(!i915->kernel_context);
 
-	i915_retire_requests(i915);
+	/* Inoperable, so presume the GPU is safely pointing into the void! */
+	if (i915_terminally_wedged(i915))
+		return 0;
 
-	for_each_engine(engine, i915, id) {
+	for_each_engine_masked(engine, i915, mask, mask) {
 		struct intel_ring *ring;
 		struct i915_request *rq;
 
-		GEM_BUG_ON(!to_intel_context(i915->kernel_context, engine));
-		if (engine_has_kernel_context_barrier(engine))
-			continue;
-
-		GEM_TRACE("emit barrier on %s\n", engine->name);
-
 		rq = i915_request_alloc(engine, i915->kernel_context);
 		if (IS_ERR(rq))
 			return PTR_ERR(rq);
@@ -779,7 +791,6 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915)
 			i915_sw_fence_await_sw_fence_gfp(&rq->submit,
 							 &prev->submit,
 							 I915_FENCE_GFP);
-			i915_timeline_sync_set(rq->timeline, &prev->fence);
 		}
 
 		i915_request_add(rq);
@@ -796,18 +807,22 @@ static bool client_is_banned(struct drm_i915_file_private *file_priv)
 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
 				  struct drm_file *file)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
 	struct drm_i915_gem_context_create *args = data;
 	struct drm_i915_file_private *file_priv = file->driver_priv;
 	struct i915_gem_context *ctx;
 	int ret;
 
-	if (!DRIVER_CAPS(dev_priv)->has_logical_contexts)
+	if (!DRIVER_CAPS(i915)->has_logical_contexts)
 		return -ENODEV;
 
 	if (args->pad != 0)
 		return -EINVAL;
 
+	ret = i915_terminally_wedged(i915);
+	if (ret)
+		return ret;
+
 	if (client_is_banned(file_priv)) {
 		DRM_DEBUG("client %s[%d] banned from creating ctx\n",
 			  current->comm,
@@ -820,7 +835,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
 	if (ret)
 		return ret;
 
-	ctx = i915_gem_create_context(dev_priv, file_priv);
+	ctx = i915_gem_create_context(i915, file_priv);
 	mutex_unlock(&dev->struct_mutex);
 	if (IS_ERR(ctx))
 		return PTR_ERR(ctx);
@@ -869,7 +884,6 @@ static int get_sseu(struct i915_gem_context *ctx,
 	struct drm_i915_gem_context_param_sseu user_sseu;
 	struct intel_engine_cs *engine;
 	struct intel_context *ce;
-	int ret;
 
 	if (args->size == 0)
 		goto out;
@@ -889,19 +903,16 @@ static int get_sseu(struct i915_gem_context *ctx,
 	if (!engine)
 		return -EINVAL;
 
-	/* Only use for mutex here is to serialize get_param and set_param. */
-	ret = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex);
-	if (ret)
-		return ret;
-
-	ce = to_intel_context(ctx, engine);
+	ce = intel_context_pin_lock(ctx, engine); /* serialises with set_sseu */
+	if (IS_ERR(ce))
+		return PTR_ERR(ce);
 
 	user_sseu.slice_mask = ce->sseu.slice_mask;
 	user_sseu.subslice_mask = ce->sseu.subslice_mask;
 	user_sseu.min_eus_per_subslice = ce->sseu.min_eus_per_subslice;
 	user_sseu.max_eus_per_subslice = ce->sseu.max_eus_per_subslice;
 
-	mutex_unlock(&ctx->i915->drm.struct_mutex);
+	intel_context_pin_unlock(ce);
 
 	if (copy_to_user(u64_to_user_ptr(args->value), &user_sseu,
 			 sizeof(user_sseu)))
@@ -951,6 +962,10 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
 		args->size = 0;
 		args->value = i915_gem_context_is_bannable(ctx);
 		break;
+	case I915_CONTEXT_PARAM_RECOVERABLE:
+		args->size = 0;
+		args->value = i915_gem_context_is_recoverable(ctx);
+		break;
 	case I915_CONTEXT_PARAM_PRIORITY:
 		args->size = 0;
 		args->value = ctx->sched.priority >> I915_USER_PRIORITY_SHIFT;
@@ -993,23 +1008,28 @@ static int gen8_emit_rpcs_config(struct i915_request *rq,
 }
 
 static int
-gen8_modify_rpcs_gpu(struct intel_context *ce,
-		     struct intel_engine_cs *engine,
-		     struct intel_sseu sseu)
+gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu)
 {
-	struct drm_i915_private *i915 = engine->i915;
+	struct drm_i915_private *i915 = ce->engine->i915;
 	struct i915_request *rq, *prev;
 	intel_wakeref_t wakeref;
 	int ret;
 
-	GEM_BUG_ON(!ce->pin_count);
+	lockdep_assert_held(&ce->pin_mutex);
 
-	lockdep_assert_held(&i915->drm.struct_mutex);
+	/*
+	 * If the context is not idle, we have to submit an ordered request to
+	 * modify its context image via the kernel context (writing to our own
+	 * image, or into the registers directory, does not stick). Pristine
+	 * and idle contexts will be configured on pinning.
+	 */
+	if (!intel_context_is_pinned(ce))
+		return 0;
 
 	/* Submitting requests etc needs the hw awake. */
 	wakeref = intel_runtime_pm_get(i915);
 
-	rq = i915_request_alloc(engine, i915->kernel_context);
+	rq = i915_request_alloc(ce->engine, i915->kernel_context);
 	if (IS_ERR(rq)) {
 		ret = PTR_ERR(rq);
 		goto out_put;
@@ -1057,27 +1077,26 @@ __i915_gem_context_reconfigure_sseu(struct i915_gem_context *ctx,
 				    struct intel_engine_cs *engine,
 				    struct intel_sseu sseu)
 {
-	struct intel_context *ce = to_intel_context(ctx, engine);
+	struct intel_context *ce;
 	int ret = 0;
 
 	GEM_BUG_ON(INTEL_GEN(ctx->i915) < 8);
-	GEM_BUG_ON(engine->id != RCS);
+	GEM_BUG_ON(engine->id != RCS0);
+
+	ce = intel_context_pin_lock(ctx, engine);
+	if (IS_ERR(ce))
+		return PTR_ERR(ce);
 
 	/* Nothing to do if unmodified. */
 	if (!memcmp(&ce->sseu, &sseu, sizeof(sseu)))
-		return 0;
-
-	/*
-	 * If context is not idle we have to submit an ordered request to modify
-	 * its context image via the kernel context. Pristine and idle contexts
-	 * will be configured on pinning.
-	 */
-	if (ce->pin_count)
-		ret = gen8_modify_rpcs_gpu(ce, engine, sseu);
+		goto unlock;
 
+	ret = gen8_modify_rpcs(ce, sseu);
 	if (!ret)
 		ce->sseu = sseu;
 
+unlock:
+	intel_context_pin_unlock(ce);
 	return ret;
 }
 
@@ -1285,6 +1304,15 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
 			i915_gem_context_clear_bannable(ctx);
 		break;
 
+	case I915_CONTEXT_PARAM_RECOVERABLE:
+		if (args->size)
+			ret = -EINVAL;
+		else if (args->value)
+			i915_gem_context_set_recoverable(ctx);
+		else
+			i915_gem_context_clear_recoverable(ctx);
+		break;
+
 	case I915_CONTEXT_PARAM_PRIORITY:
 		{
 			s64 priority = args->value;
@@ -1385,3 +1413,28 @@ int __i915_gem_context_pin_hw_id(struct i915_gem_context *ctx)
 #include "selftests/mock_context.c"
 #include "selftests/i915_gem_context.c"
 #endif
+
+static void i915_global_gem_context_shrink(void)
+{
+	kmem_cache_shrink(global.slab_luts);
+}
+
+static void i915_global_gem_context_exit(void)
+{
+	kmem_cache_destroy(global.slab_luts);
+}
+
+static struct i915_global_gem_context global = { {
+	.shrink = i915_global_gem_context_shrink,
+	.exit = i915_global_gem_context_exit,
+} };
+
+int __init i915_global_gem_context_init(void)
+{
+	global.slab_luts = KMEM_CACHE(i915_lut_handle, 0);
+	if (!global.slab_luts)
+		return -ENOMEM;
+
+	i915_global_register(&global.base);
+	return 0;
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h
index ca150a764c24d48547106108692e1ac5b2e9ed82..5a32c4b4816f87e144300beb14b06458c0445a35 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/i915_gem_context.h
@@ -25,210 +25,16 @@
 #ifndef __I915_GEM_CONTEXT_H__
 #define __I915_GEM_CONTEXT_H__
 
-#include <linux/bitops.h>
-#include <linux/list.h>
-#include <linux/radix-tree.h>
+#include "i915_gem_context_types.h"
 
 #include "i915_gem.h"
 #include "i915_scheduler.h"
+#include "intel_context.h"
 #include "intel_device_info.h"
 
-struct pid;
-
 struct drm_device;
 struct drm_file;
 
-struct drm_i915_private;
-struct drm_i915_file_private;
-struct i915_hw_ppgtt;
-struct i915_request;
-struct i915_vma;
-struct intel_ring;
-
-#define DEFAULT_CONTEXT_HANDLE 0
-
-struct intel_context;
-
-struct intel_context_ops {
-	void (*unpin)(struct intel_context *ce);
-	void (*destroy)(struct intel_context *ce);
-};
-
-/*
- * Powergating configuration for a particular (context,engine).
- */
-struct intel_sseu {
-	u8 slice_mask;
-	u8 subslice_mask;
-	u8 min_eus_per_subslice;
-	u8 max_eus_per_subslice;
-};
-
-/**
- * struct i915_gem_context - client state
- *
- * The struct i915_gem_context represents the combined view of the driver and
- * logical hardware state for a particular client.
- */
-struct i915_gem_context {
-	/** i915: i915 device backpointer */
-	struct drm_i915_private *i915;
-
-	/** file_priv: owning file descriptor */
-	struct drm_i915_file_private *file_priv;
-
-	/**
-	 * @ppgtt: unique address space (GTT)
-	 *
-	 * In full-ppgtt mode, each context has its own address space ensuring
-	 * complete seperation of one client from all others.
-	 *
-	 * In other modes, this is a NULL pointer with the expectation that
-	 * the caller uses the shared global GTT.
-	 */
-	struct i915_hw_ppgtt *ppgtt;
-
-	/**
-	 * @pid: process id of creator
-	 *
-	 * Note that who created the context may not be the principle user,
-	 * as the context may be shared across a local socket. However,
-	 * that should only affect the default context, all contexts created
-	 * explicitly by the client are expected to be isolated.
-	 */
-	struct pid *pid;
-
-	/**
-	 * @name: arbitrary name
-	 *
-	 * A name is constructed for the context from the creator's process
-	 * name, pid and user handle in order to uniquely identify the
-	 * context in messages.
-	 */
-	const char *name;
-
-	/** link: place with &drm_i915_private.context_list */
-	struct list_head link;
-	struct llist_node free_link;
-
-	/**
-	 * @ref: reference count
-	 *
-	 * A reference to a context is held by both the client who created it
-	 * and on each request submitted to the hardware using the request
-	 * (to ensure the hardware has access to the state until it has
-	 * finished all pending writes). See i915_gem_context_get() and
-	 * i915_gem_context_put() for access.
-	 */
-	struct kref ref;
-
-	/**
-	 * @rcu: rcu_head for deferred freeing.
-	 */
-	struct rcu_head rcu;
-
-	/**
-	 * @user_flags: small set of booleans controlled by the user
-	 */
-	unsigned long user_flags;
-#define UCONTEXT_NO_ZEROMAP		0
-#define UCONTEXT_NO_ERROR_CAPTURE	1
-#define UCONTEXT_BANNABLE		2
-
-	/**
-	 * @flags: small set of booleans
-	 */
-	unsigned long flags;
-#define CONTEXT_BANNED			0
-#define CONTEXT_CLOSED			1
-#define CONTEXT_FORCE_SINGLE_SUBMISSION	2
-
-	/**
-	 * @hw_id: - unique identifier for the context
-	 *
-	 * The hardware needs to uniquely identify the context for a few
-	 * functions like fault reporting, PASID, scheduling. The
-	 * &drm_i915_private.context_hw_ida is used to assign a unqiue
-	 * id for the lifetime of the context.
-	 *
-	 * @hw_id_pin_count: - number of times this context had been pinned
-	 * for use (should be, at most, once per engine).
-	 *
-	 * @hw_id_link: - all contexts with an assigned id are tracked
-	 * for possible repossession.
-	 */
-	unsigned int hw_id;
-	atomic_t hw_id_pin_count;
-	struct list_head hw_id_link;
-
-	/**
-	 * @user_handle: userspace identifier
-	 *
-	 * A unique per-file identifier is generated from
-	 * &drm_i915_file_private.contexts.
-	 */
-	u32 user_handle;
-
-	struct i915_sched_attr sched;
-
-	/** engine: per-engine logical HW state */
-	struct intel_context {
-		struct i915_gem_context *gem_context;
-		struct intel_engine_cs *active;
-		struct list_head signal_link;
-		struct list_head signals;
-		struct i915_vma *state;
-		struct intel_ring *ring;
-		u32 *lrc_reg_state;
-		u64 lrc_desc;
-		int pin_count;
-
-		/**
-		 * active_tracker: Active tracker for the external rq activity
-		 * on this intel_context object.
-		 */
-		struct i915_active_request active_tracker;
-
-		const struct intel_context_ops *ops;
-
-		/** sseu: Control eu/slice partitioning */
-		struct intel_sseu sseu;
-	} __engine[I915_NUM_ENGINES];
-
-	/** ring_size: size for allocating the per-engine ring buffer */
-	u32 ring_size;
-	/** desc_template: invariant fields for the HW context descriptor */
-	u32 desc_template;
-
-	/** guilty_count: How many times this context has caused a GPU hang. */
-	atomic_t guilty_count;
-	/**
-	 * @active_count: How many times this context was active during a GPU
-	 * hang, but did not cause it.
-	 */
-	atomic_t active_count;
-
-#define CONTEXT_SCORE_GUILTY		10
-#define CONTEXT_SCORE_BAN_THRESHOLD	40
-	/** ban_score: Accumulated score of all hangs caused by this context. */
-	atomic_t ban_score;
-
-	/** remap_slice: Bitmask of cache lines that need remapping */
-	u8 remap_slice;
-
-	/** handles_vma: rbtree to look up our context specific obj/vma for
-	 * the user handle. (user handles are per fd, but the binding is
-	 * per vm, which may be one per context or shared with the global GTT)
-	 */
-	struct radix_tree_root handles_vma;
-
-	/** handles_list: reverse list of all the rbtree entries in use for
-	 * this context, which allows us to free all the allocations on
-	 * context close.
-	 */
-	struct list_head handles_list;
-};
-
 static inline bool i915_gem_context_is_closed(const struct i915_gem_context *ctx)
 {
 	return test_bit(CONTEXT_CLOSED, &ctx->flags);
@@ -270,6 +76,21 @@ static inline void i915_gem_context_clear_bannable(struct i915_gem_context *ctx)
 	clear_bit(UCONTEXT_BANNABLE, &ctx->user_flags);
 }
 
+static inline bool i915_gem_context_is_recoverable(const struct i915_gem_context *ctx)
+{
+	return test_bit(UCONTEXT_RECOVERABLE, &ctx->user_flags);
+}
+
+static inline void i915_gem_context_set_recoverable(struct i915_gem_context *ctx)
+{
+	set_bit(UCONTEXT_RECOVERABLE, &ctx->user_flags);
+}
+
+static inline void i915_gem_context_clear_recoverable(struct i915_gem_context *ctx)
+{
+	clear_bit(UCONTEXT_RECOVERABLE, &ctx->user_flags);
+}
+
 static inline bool i915_gem_context_is_banned(const struct i915_gem_context *ctx)
 {
 	return test_bit(CONTEXT_BANNED, &ctx->flags);
@@ -315,35 +136,6 @@ static inline bool i915_gem_context_is_kernel(struct i915_gem_context *ctx)
 	return !ctx->file_priv;
 }
 
-static inline struct intel_context *
-to_intel_context(struct i915_gem_context *ctx,
-		 const struct intel_engine_cs *engine)
-{
-	return &ctx->__engine[engine->id];
-}
-
-static inline struct intel_context *
-intel_context_pin(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
-{
-	return engine->context_pin(engine, ctx);
-}
-
-static inline void __intel_context_pin(struct intel_context *ce)
-{
-	GEM_BUG_ON(!ce->pin_count);
-	ce->pin_count++;
-}
-
-static inline void intel_context_unpin(struct intel_context *ce)
-{
-	GEM_BUG_ON(!ce->pin_count);
-	if (--ce->pin_count)
-		return;
-
-	GEM_BUG_ON(!ce->ops);
-	ce->ops->unpin(ce);
-}
-
 /* i915_gem_context.c */
 int __must_check i915_gem_contexts_init(struct drm_i915_private *dev_priv);
 void i915_gem_contexts_lost(struct drm_i915_private *dev_priv);
@@ -354,7 +146,8 @@ int i915_gem_context_open(struct drm_i915_private *i915,
 void i915_gem_context_close(struct drm_file *file);
 
 int i915_switch_context(struct i915_request *rq);
-int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv);
+int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915,
+				      unsigned long engine_mask);
 
 void i915_gem_context_release(struct kref *ctx_ref);
 struct i915_gem_context *
@@ -386,8 +179,7 @@ static inline void i915_gem_context_put(struct i915_gem_context *ctx)
 	kref_put(&ctx->ref, i915_gem_context_release);
 }
 
-void intel_context_init(struct intel_context *ce,
-			struct i915_gem_context *ctx,
-			struct intel_engine_cs *engine);
+struct i915_lut_handle *i915_lut_handle_alloc(void);
+void i915_lut_handle_free(struct i915_lut_handle *lut);
 
 #endif /* !__I915_GEM_CONTEXT_H__ */
diff --git a/drivers/gpu/drm/i915/i915_gem_context_types.h b/drivers/gpu/drm/i915/i915_gem_context_types.h
new file mode 100644
index 0000000000000000000000000000000000000000..2bf19730eaa97086fe718d6bcdbf6fa38e15b572
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_context_types.h
@@ -0,0 +1,182 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __I915_GEM_CONTEXT_TYPES_H__
+#define __I915_GEM_CONTEXT_TYPES_H__
+
+#include <linux/atomic.h>
+#include <linux/list.h>
+#include <linux/llist.h>
+#include <linux/kref.h>
+#include <linux/mutex.h>
+#include <linux/radix-tree.h>
+#include <linux/rbtree.h>
+#include <linux/rcupdate.h>
+#include <linux/types.h>
+
+#include "i915_scheduler.h"
+#include "intel_context_types.h"
+
+struct pid;
+
+struct drm_i915_private;
+struct drm_i915_file_private;
+struct i915_hw_ppgtt;
+struct i915_timeline;
+struct intel_ring;
+
+/**
+ * struct i915_gem_context - client state
+ *
+ * The struct i915_gem_context represents the combined view of the driver and
+ * logical hardware state for a particular client.
+ */
+struct i915_gem_context {
+	/** i915: i915 device backpointer */
+	struct drm_i915_private *i915;
+
+	/** file_priv: owning file descriptor */
+	struct drm_i915_file_private *file_priv;
+
+	/**
+	 * @ppgtt: unique address space (GTT)
+	 *
+	 * In full-ppgtt mode, each context has its own address space ensuring
+	 * complete seperation of one client from all others.
+	 *
+	 * In other modes, this is a NULL pointer with the expectation that
+	 * the caller uses the shared global GTT.
+	 */
+	struct i915_hw_ppgtt *ppgtt;
+
+	/**
+	 * @pid: process id of creator
+	 *
+	 * Note that who created the context may not be the principle user,
+	 * as the context may be shared across a local socket. However,
+	 * that should only affect the default context, all contexts created
+	 * explicitly by the client are expected to be isolated.
+	 */
+	struct pid *pid;
+
+	/**
+	 * @name: arbitrary name
+	 *
+	 * A name is constructed for the context from the creator's process
+	 * name, pid and user handle in order to uniquely identify the
+	 * context in messages.
+	 */
+	const char *name;
+
+	/** link: place with &drm_i915_private.context_list */
+	struct list_head link;
+	struct llist_node free_link;
+
+	/**
+	 * @ref: reference count
+	 *
+	 * A reference to a context is held by both the client who created it
+	 * and on each request submitted to the hardware using the request
+	 * (to ensure the hardware has access to the state until it has
+	 * finished all pending writes). See i915_gem_context_get() and
+	 * i915_gem_context_put() for access.
+	 */
+	struct kref ref;
+
+	/**
+	 * @rcu: rcu_head for deferred freeing.
+	 */
+	struct rcu_head rcu;
+
+	/**
+	 * @user_flags: small set of booleans controlled by the user
+	 */
+	unsigned long user_flags;
+#define UCONTEXT_NO_ZEROMAP		0
+#define UCONTEXT_NO_ERROR_CAPTURE	1
+#define UCONTEXT_BANNABLE		2
+#define UCONTEXT_RECOVERABLE		3
+
+	/**
+	 * @flags: small set of booleans
+	 */
+	unsigned long flags;
+#define CONTEXT_BANNED			0
+#define CONTEXT_CLOSED			1
+#define CONTEXT_FORCE_SINGLE_SUBMISSION	2
+
+	/**
+	 * @hw_id: - unique identifier for the context
+	 *
+	 * The hardware needs to uniquely identify the context for a few
+	 * functions like fault reporting, PASID, scheduling. The
+	 * &drm_i915_private.context_hw_ida is used to assign a unqiue
+	 * id for the lifetime of the context.
+	 *
+	 * @hw_id_pin_count: - number of times this context had been pinned
+	 * for use (should be, at most, once per engine).
+	 *
+	 * @hw_id_link: - all contexts with an assigned id are tracked
+	 * for possible repossession.
+	 */
+	unsigned int hw_id;
+	atomic_t hw_id_pin_count;
+	struct list_head hw_id_link;
+
+	struct list_head active_engines;
+	struct mutex mutex;
+
+	/**
+	 * @user_handle: userspace identifier
+	 *
+	 * A unique per-file identifier is generated from
+	 * &drm_i915_file_private.contexts.
+	 */
+	u32 user_handle;
+#define DEFAULT_CONTEXT_HANDLE 0
+
+	struct i915_sched_attr sched;
+
+	/** hw_contexts: per-engine logical HW state */
+	struct rb_root hw_contexts;
+	spinlock_t hw_contexts_lock;
+
+	/** ring_size: size for allocating the per-engine ring buffer */
+	u32 ring_size;
+	/** desc_template: invariant fields for the HW context descriptor */
+	u32 desc_template;
+
+	/** guilty_count: How many times this context has caused a GPU hang. */
+	atomic_t guilty_count;
+	/**
+	 * @active_count: How many times this context was active during a GPU
+	 * hang, but did not cause it.
+	 */
+	atomic_t active_count;
+
+	/**
+	 * @hang_timestamp: The last time(s) this context caused a GPU hang
+	 */
+	unsigned long hang_timestamp[2];
+#define CONTEXT_FAST_HANG_JIFFIES (120 * HZ) /* 3 hangs within 120s? Banned! */
+
+	/** remap_slice: Bitmask of cache lines that need remapping */
+	u8 remap_slice;
+
+	/** handles_vma: rbtree to look up our context specific obj/vma for
+	 * the user handle. (user handles are per fd, but the binding is
+	 * per vm, which may be one per context or shared with the global GTT)
+	 */
+	struct radix_tree_root handles_vma;
+
+	/** handles_list: reverse list of all the rbtree entries in use for
+	 * this context, which allows us to free all the allocations on
+	 * context close.
+	 */
+	struct list_head handles_list;
+};
+
+#endif /* __I915_GEM_CONTEXT_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 02f7298bfe57cda5ad1d344adb0ff99079267c00..33181678990ec764985dd9593b09f47989455f87 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -300,7 +300,7 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
 
 	get_dma_buf(dma_buf);
 
-	obj = i915_gem_object_alloc(to_i915(dev));
+	obj = i915_gem_object_alloc();
 	if (obj == NULL) {
 		ret = -ENOMEM;
 		goto fail_detach;
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 68d74c50ac392dba58ea511386912790399c2a13..060f5903544a042427f195e95c0bf057b129a029 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -38,31 +38,21 @@ I915_SELFTEST_DECLARE(static struct igt_evict_ctl {
 
 static bool ggtt_is_idle(struct drm_i915_private *i915)
 {
-       struct intel_engine_cs *engine;
-       enum intel_engine_id id;
-
-       if (i915->gt.active_requests)
-	       return false;
-
-       for_each_engine(engine, i915, id) {
-	       if (!intel_engine_has_kernel_context(engine))
-		       return false;
-       }
-
-       return true;
+	return !i915->gt.active_requests;
 }
 
 static int ggtt_flush(struct drm_i915_private *i915)
 {
 	int err;
 
-	/* Not everything in the GGTT is tracked via vma (otherwise we
+	/*
+	 * Not everything in the GGTT is tracked via vma (otherwise we
 	 * could evict as required with minimal stalling) so we are forced
 	 * to idle the GPU and explicitly retire outstanding requests in
 	 * the hopes that we can then remove contexts and the like only
 	 * bound by their active reference.
 	 */
-	err = i915_gem_switch_to_kernel_context(i915);
+	err = i915_gem_switch_to_kernel_context(i915, i915->gt.active_engines);
 	if (err)
 		return err;
 
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 02adcaf6ebea69086aa07be57fed55b347636c94..ee6d301a96277616788dbe94365149d50c5c7338 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -794,8 +794,8 @@ static int eb_wait_for_ring(const struct i915_execbuffer *eb)
 	 * keeping all of their resources pinned.
 	 */
 
-	ce = to_intel_context(eb->ctx, eb->engine);
-	if (!ce->ring) /* first use, assume empty! */
+	ce = intel_context_lookup(eb->ctx, eb->engine);
+	if (!ce || !ce->ring) /* first use, assume empty! */
 		return 0;
 
 	rq = __eb_wait_for_ring(ce->ring);
@@ -849,12 +849,12 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
 		}
 
 		vma = i915_vma_instance(obj, eb->vm, NULL);
-		if (unlikely(IS_ERR(vma))) {
+		if (IS_ERR(vma)) {
 			err = PTR_ERR(vma);
 			goto err_obj;
 		}
 
-		lut = kmem_cache_alloc(eb->i915->luts, GFP_KERNEL);
+		lut = i915_lut_handle_alloc();
 		if (unlikely(!lut)) {
 			err = -ENOMEM;
 			goto err_obj;
@@ -862,7 +862,7 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
 
 		err = radix_tree_insert(handles_vma, handle, vma);
 		if (unlikely(err)) {
-			kmem_cache_free(eb->i915->luts, lut);
+			i915_lut_handle_free(lut);
 			goto err_obj;
 		}
 
@@ -1957,7 +1957,7 @@ static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
 	u32 *cs;
 	int i;
 
-	if (!IS_GEN(rq->i915, 7) || rq->engine->id != RCS) {
+	if (!IS_GEN(rq->i915, 7) || rq->engine->id != RCS0) {
 		DRM_DEBUG("sol reset is gen7/rcs only\n");
 		return -EINVAL;
 	}
@@ -2082,11 +2082,11 @@ gen8_dispatch_bsd_engine(struct drm_i915_private *dev_priv,
 #define I915_USER_RINGS (4)
 
 static const enum intel_engine_id user_ring_map[I915_USER_RINGS + 1] = {
-	[I915_EXEC_DEFAULT]	= RCS,
-	[I915_EXEC_RENDER]	= RCS,
-	[I915_EXEC_BLT]		= BCS,
-	[I915_EXEC_BSD]		= VCS,
-	[I915_EXEC_VEBOX]	= VECS
+	[I915_EXEC_DEFAULT]	= RCS0,
+	[I915_EXEC_RENDER]	= RCS0,
+	[I915_EXEC_BLT]		= BCS0,
+	[I915_EXEC_BSD]		= VCS0,
+	[I915_EXEC_VEBOX]	= VECS0
 };
 
 static struct intel_engine_cs *
@@ -2109,7 +2109,7 @@ eb_select_engine(struct drm_i915_private *dev_priv,
 		return NULL;
 	}
 
-	if (user_ring_id == I915_EXEC_BSD && HAS_BSD2(dev_priv)) {
+	if (user_ring_id == I915_EXEC_BSD && HAS_ENGINE(dev_priv, VCS1)) {
 		unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK;
 
 		if (bsd_idx == I915_EXEC_BSD_DEFAULT) {
@@ -2312,10 +2312,6 @@ i915_gem_do_execbuffer(struct drm_device *dev,
 	if (args->flags & I915_EXEC_IS_PINNED)
 		eb.batch_flags |= I915_DISPATCH_PINNED;
 
-	eb.engine = eb_select_engine(eb.i915, file, args);
-	if (!eb.engine)
-		return -EINVAL;
-
 	if (args->flags & I915_EXEC_FENCE_IN) {
 		in_fence = sync_file_get_fence(lower_32_bits(args->rsvd2));
 		if (!in_fence)
@@ -2340,6 +2336,12 @@ i915_gem_do_execbuffer(struct drm_device *dev,
 	if (unlikely(err))
 		goto err_destroy;
 
+	eb.engine = eb_select_engine(eb.i915, file, args);
+	if (!eb.engine) {
+		err = -EINVAL;
+		goto err_engine;
+	}
+
 	/*
 	 * Take a local wakeref for preparing to dispatch the execbuf as
 	 * we expect to access the hardware fairly frequently in the
@@ -2505,6 +2507,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
 	mutex_unlock(&dev->struct_mutex);
 err_rpm:
 	intel_runtime_pm_put(eb.i915, wakeref);
+err_engine:
 	i915_gem_context_put(eb.ctx);
 err_destroy:
 	eb_destroy(&eb);
diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.c b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
index e037e94792f3530e202e66c8d0b5c915bf49e454..65624b8e4d159a783937190d1ad5d68a368b51ec 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence_reg.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
@@ -210,6 +210,7 @@ static int fence_update(struct drm_i915_fence_reg *fence,
 			struct i915_vma *vma)
 {
 	intel_wakeref_t wakeref;
+	struct i915_vma *old;
 	int ret;
 
 	if (vma) {
@@ -229,49 +230,55 @@ static int fence_update(struct drm_i915_fence_reg *fence,
 			return ret;
 	}
 
-	if (fence->vma) {
-		struct i915_vma *old = fence->vma;
-
+	old = xchg(&fence->vma, NULL);
+	if (old) {
 		ret = i915_active_request_retire(&old->last_fence,
 					     &old->obj->base.dev->struct_mutex);
-		if (ret)
+		if (ret) {
+			fence->vma = old;
 			return ret;
+		}
 
 		i915_vma_flush_writes(old);
-	}
 
-	if (fence->vma && fence->vma != vma) {
-		/* Ensure that all userspace CPU access is completed before
+		/*
+		 * Ensure that all userspace CPU access is completed before
 		 * stealing the fence.
 		 */
-		GEM_BUG_ON(fence->vma->fence != fence);
-		i915_vma_revoke_mmap(fence->vma);
-
-		fence->vma->fence = NULL;
-		fence->vma = NULL;
+		if (old != vma) {
+			GEM_BUG_ON(old->fence != fence);
+			i915_vma_revoke_mmap(old);
+			old->fence = NULL;
+		}
 
 		list_move(&fence->link, &fence->i915->mm.fence_list);
 	}
 
-	/* We only need to update the register itself if the device is awake.
+	/*
+	 * We only need to update the register itself if the device is awake.
 	 * If the device is currently powered down, we will defer the write
 	 * to the runtime resume, see i915_gem_restore_fences().
+	 *
+	 * This only works for removing the fence register, on acquisition
+	 * the caller must hold the rpm wakeref. The fence register must
+	 * be cleared before we can use any other fences to ensure that
+	 * the new fences do not overlap the elided clears, confusing HW.
 	 */
 	wakeref = intel_runtime_pm_get_if_in_use(fence->i915);
-	if (wakeref) {
-		fence_write(fence, vma);
-		intel_runtime_pm_put(fence->i915, wakeref);
+	if (!wakeref) {
+		GEM_BUG_ON(vma);
+		return 0;
 	}
 
-	if (vma) {
-		if (fence->vma != vma) {
-			vma->fence = fence;
-			fence->vma = vma;
-		}
+	WRITE_ONCE(fence->vma, vma);
+	fence_write(fence, vma);
 
+	if (vma) {
+		vma->fence = fence;
 		list_move_tail(&fence->link, &fence->i915->mm.fence_list);
 	}
 
+	intel_runtime_pm_put(fence->i915, wakeref);
 	return 0;
 }
 
@@ -435,32 +442,6 @@ void i915_unreserve_fence(struct drm_i915_fence_reg *fence)
 	list_add(&fence->link, &fence->i915->mm.fence_list);
 }
 
-/**
- * i915_gem_revoke_fences - revoke fence state
- * @dev_priv: i915 device private
- *
- * Removes all GTT mmappings via the fence registers. This forces any user
- * of the fence to reacquire that fence before continuing with their access.
- * One use is during GPU reset where the fence register is lost and we need to
- * revoke concurrent userspace access via GTT mmaps until the hardware has been
- * reset and the fence registers have been restored.
- */
-void i915_gem_revoke_fences(struct drm_i915_private *dev_priv)
-{
-	int i;
-
-	lockdep_assert_held(&dev_priv->drm.struct_mutex);
-
-	for (i = 0; i < dev_priv->num_fence_regs; i++) {
-		struct drm_i915_fence_reg *fence = &dev_priv->fence_regs[i];
-
-		GEM_BUG_ON(fence->vma && fence->vma->fence != fence);
-
-		if (fence->vma)
-			i915_vma_revoke_mmap(fence->vma);
-	}
-}
-
 /**
  * i915_gem_restore_fences - restore fence state
  * @dev_priv: i915 device private
@@ -473,9 +454,10 @@ void i915_gem_restore_fences(struct drm_i915_private *dev_priv)
 {
 	int i;
 
+	rcu_read_lock(); /* keep obj alive as we dereference */
 	for (i = 0; i < dev_priv->num_fence_regs; i++) {
 		struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
-		struct i915_vma *vma = reg->vma;
+		struct i915_vma *vma = READ_ONCE(reg->vma);
 
 		GEM_BUG_ON(vma && vma->fence != reg);
 
@@ -483,18 +465,12 @@ void i915_gem_restore_fences(struct drm_i915_private *dev_priv)
 		 * Commit delayed tiling changes if we have an object still
 		 * attached to the fence, otherwise just clear the fence.
 		 */
-		if (vma && !i915_gem_object_is_tiled(vma->obj)) {
-			GEM_BUG_ON(!reg->dirty);
-			GEM_BUG_ON(i915_vma_has_userfault(vma));
-
-			list_move(&reg->link, &dev_priv->mm.fence_list);
-			vma->fence = NULL;
+		if (vma && !i915_gem_object_is_tiled(vma->obj))
 			vma = NULL;
-		}
 
 		fence_write(reg, vma);
-		reg->vma = vma;
 	}
+	rcu_read_unlock();
 }
 
 /**
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index d646d37eec2f8a751a3a7f1642bae26841d2b24f..b8055c8d4e719a2dca74eba377f740093cdbe3bd 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -584,7 +584,7 @@ setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
 	 * for all.
 	 */
 	size = I915_GTT_PAGE_SIZE_4K;
-	if (i915_vm_is_48bit(vm) &&
+	if (i915_vm_is_4lvl(vm) &&
 	    HAS_PAGE_SIZES(vm->i915, I915_GTT_PAGE_SIZE_64K)) {
 		size = I915_GTT_PAGE_SIZE_64K;
 		gfp |= __GFP_NOWARN;
@@ -613,7 +613,7 @@ setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
 
 		vm->scratch_page.page = page;
 		vm->scratch_page.daddr = addr;
-		vm->scratch_page.order = order;
+		vm->scratch_order = order;
 		return 0;
 
 unmap_page:
@@ -632,10 +632,11 @@ setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
 static void cleanup_scratch_page(struct i915_address_space *vm)
 {
 	struct i915_page_dma *p = &vm->scratch_page;
+	int order = vm->scratch_order;
 
-	dma_unmap_page(vm->dma, p->daddr, BIT(p->order) << PAGE_SHIFT,
+	dma_unmap_page(vm->dma, p->daddr, BIT(order) << PAGE_SHIFT,
 		       PCI_DMA_BIDIRECTIONAL);
-	__free_pages(p->page, p->order);
+	__free_pages(p->page, order);
 }
 
 static struct i915_page_table *alloc_pt(struct i915_address_space *vm)
@@ -726,18 +727,13 @@ static void __pdp_fini(struct i915_page_directory_pointer *pdp)
 	pdp->page_directory = NULL;
 }
 
-static inline bool use_4lvl(const struct i915_address_space *vm)
-{
-	return i915_vm_is_48bit(vm);
-}
-
 static struct i915_page_directory_pointer *
 alloc_pdp(struct i915_address_space *vm)
 {
 	struct i915_page_directory_pointer *pdp;
 	int ret = -ENOMEM;
 
-	GEM_BUG_ON(!use_4lvl(vm));
+	GEM_BUG_ON(!i915_vm_is_4lvl(vm));
 
 	pdp = kzalloc(sizeof(*pdp), GFP_KERNEL);
 	if (!pdp)
@@ -766,7 +762,7 @@ static void free_pdp(struct i915_address_space *vm,
 {
 	__pdp_fini(pdp);
 
-	if (!use_4lvl(vm))
+	if (!i915_vm_is_4lvl(vm))
 		return;
 
 	cleanup_px(vm, pdp);
@@ -791,14 +787,15 @@ static void gen8_initialize_pml4(struct i915_address_space *vm,
 	memset_p((void **)pml4->pdps, vm->scratch_pdp, GEN8_PML4ES_PER_PML4);
 }
 
-/* PDE TLBs are a pain to invalidate on GEN8+. When we modify
+/*
+ * PDE TLBs are a pain to invalidate on GEN8+. When we modify
  * the page table structures, we mark them dirty so that
  * context switching/execlist queuing code takes extra steps
  * to ensure that tlbs are flushed.
  */
 static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
 {
-	ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->vm.i915)->ring_mask;
+	ppgtt->pd_dirty_engines = ALL_ENGINES;
 }
 
 /* Removes entries from a single page table, releasing it if it's empty.
@@ -809,8 +806,6 @@ static bool gen8_ppgtt_clear_pt(const struct i915_address_space *vm,
 				u64 start, u64 length)
 {
 	unsigned int num_entries = gen8_pte_count(start, length);
-	unsigned int pte = gen8_pte_index(start);
-	unsigned int pte_end = pte + num_entries;
 	gen8_pte_t *vaddr;
 
 	GEM_BUG_ON(num_entries > pt->used_ptes);
@@ -820,8 +815,7 @@ static bool gen8_ppgtt_clear_pt(const struct i915_address_space *vm,
 		return true;
 
 	vaddr = kmap_atomic_px(pt);
-	while (pte < pte_end)
-		vaddr[pte++] = vm->scratch_pte;
+	memset64(vaddr + gen8_pte_index(start), vm->scratch_pte, num_entries);
 	kunmap_atomic(vaddr);
 
 	return false;
@@ -872,7 +866,7 @@ static void gen8_ppgtt_set_pdpe(struct i915_address_space *vm,
 	gen8_ppgtt_pdpe_t *vaddr;
 
 	pdp->page_directory[pdpe] = pd;
-	if (!use_4lvl(vm))
+	if (!i915_vm_is_4lvl(vm))
 		return;
 
 	vaddr = kmap_atomic_px(pdp);
@@ -937,7 +931,7 @@ static void gen8_ppgtt_clear_4lvl(struct i915_address_space *vm,
 	struct i915_page_directory_pointer *pdp;
 	unsigned int pml4e;
 
-	GEM_BUG_ON(!use_4lvl(vm));
+	GEM_BUG_ON(!i915_vm_is_4lvl(vm));
 
 	gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
 		GEM_BUG_ON(pdp == vm->scratch_pdp);
@@ -1219,7 +1213,7 @@ static int gen8_init_scratch(struct i915_address_space *vm)
 
 		GEM_BUG_ON(!clone->has_read_only);
 
-		vm->scratch_page.order = clone->scratch_page.order;
+		vm->scratch_order = clone->scratch_order;
 		vm->scratch_pte = clone->scratch_pte;
 		vm->scratch_pt  = clone->scratch_pt;
 		vm->scratch_pd  = clone->scratch_pd;
@@ -1248,7 +1242,7 @@ static int gen8_init_scratch(struct i915_address_space *vm)
 		goto free_pt;
 	}
 
-	if (use_4lvl(vm)) {
+	if (i915_vm_is_4lvl(vm)) {
 		vm->scratch_pdp = alloc_pdp(vm);
 		if (IS_ERR(vm->scratch_pdp)) {
 			ret = PTR_ERR(vm->scratch_pdp);
@@ -1258,7 +1252,7 @@ static int gen8_init_scratch(struct i915_address_space *vm)
 
 	gen8_initialize_pt(vm, vm->scratch_pt);
 	gen8_initialize_pd(vm, vm->scratch_pd);
-	if (use_4lvl(vm))
+	if (i915_vm_is_4lvl(vm))
 		gen8_initialize_pdp(vm, vm->scratch_pdp);
 
 	return 0;
@@ -1280,7 +1274,7 @@ static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
 	enum vgt_g2v_type msg;
 	int i;
 
-	if (use_4lvl(vm)) {
+	if (i915_vm_is_4lvl(vm)) {
 		const u64 daddr = px_dma(&ppgtt->pml4);
 
 		I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
@@ -1310,7 +1304,7 @@ static void gen8_free_scratch(struct i915_address_space *vm)
 	if (!vm->scratch_page.daddr)
 		return;
 
-	if (use_4lvl(vm))
+	if (i915_vm_is_4lvl(vm))
 		free_pdp(vm, vm->scratch_pdp);
 	free_pd(vm, vm->scratch_pd);
 	free_pt(vm, vm->scratch_pt);
@@ -1356,7 +1350,7 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
 	if (intel_vgpu_active(dev_priv))
 		gen8_ppgtt_notify_vgt(ppgtt, false);
 
-	if (use_4lvl(vm))
+	if (i915_vm_is_4lvl(vm))
 		gen8_ppgtt_cleanup_4lvl(ppgtt);
 	else
 		gen8_ppgtt_cleanup_3lvl(&ppgtt->vm, &ppgtt->pdp);
@@ -1519,6 +1513,23 @@ static int gen8_preallocate_top_level_pdp(struct i915_hw_ppgtt *ppgtt)
 	return -ENOMEM;
 }
 
+static void ppgtt_init(struct drm_i915_private *i915,
+		       struct i915_hw_ppgtt *ppgtt)
+{
+	kref_init(&ppgtt->ref);
+
+	ppgtt->vm.i915 = i915;
+	ppgtt->vm.dma = &i915->drm.pdev->dev;
+	ppgtt->vm.total = BIT_ULL(INTEL_INFO(i915)->ppgtt_size);
+
+	i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT);
+
+	ppgtt->vm.vma_ops.bind_vma    = ppgtt_bind_vma;
+	ppgtt->vm.vma_ops.unbind_vma  = ppgtt_unbind_vma;
+	ppgtt->vm.vma_ops.set_pages   = ppgtt_set_pages;
+	ppgtt->vm.vma_ops.clear_pages = clear_pages;
+}
+
 /*
  * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
  * with a net effect resembling a 2-level page table in normal x86 terms. Each
@@ -1535,20 +1546,11 @@ static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
 	if (!ppgtt)
 		return ERR_PTR(-ENOMEM);
 
-	kref_init(&ppgtt->ref);
-
-	ppgtt->vm.i915 = i915;
-	ppgtt->vm.dma = &i915->drm.pdev->dev;
-
-	ppgtt->vm.total = HAS_FULL_48BIT_PPGTT(i915) ?
-		1ULL << 48 :
-		1ULL << 32;
+	ppgtt_init(i915, ppgtt);
 
 	/* From bdw, there is support for read-only pages in the PPGTT. */
 	ppgtt->vm.has_read_only = true;
 
-	i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT);
-
 	/* There are only few exceptions for gen >=6. chv and bxt.
 	 * And we are not sure about the latter so play safe for now.
 	 */
@@ -1559,7 +1561,7 @@ static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
 	if (err)
 		goto err_free;
 
-	if (use_4lvl(&ppgtt->vm)) {
+	if (i915_vm_is_4lvl(&ppgtt->vm)) {
 		err = setup_px(&ppgtt->vm, &ppgtt->pml4);
 		if (err)
 			goto err_scratch;
@@ -1592,11 +1594,6 @@ static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
 
 	ppgtt->vm.cleanup = gen8_ppgtt_cleanup;
 
-	ppgtt->vm.vma_ops.bind_vma    = ppgtt_bind_vma;
-	ppgtt->vm.vma_ops.unbind_vma  = ppgtt_unbind_vma;
-	ppgtt->vm.vma_ops.set_pages   = ppgtt_set_pages;
-	ppgtt->vm.vma_ops.clear_pages = clear_pages;
-
 	return ppgtt;
 
 err_scratch:
@@ -1672,8 +1669,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
 
 	while (num_entries) {
 		struct i915_page_table *pt = ppgtt->base.pd.page_table[pde++];
-		const unsigned int end = min(pte + num_entries, GEN6_PTES);
-		const unsigned int count = end - pte;
+		const unsigned int count = min(num_entries, GEN6_PTES - pte);
 		gen6_pte_t *vaddr;
 
 		GEM_BUG_ON(pt == vm->scratch_pt);
@@ -1693,9 +1689,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
 		 */
 
 		vaddr = kmap_atomic_px(pt);
-		do {
-			vaddr[pte++] = scratch_pte;
-		} while (pte < end);
+		memset32(vaddr + pte, scratch_pte, count);
 		kunmap_atomic(vaddr);
 
 		pte = 0;
@@ -1913,7 +1907,7 @@ static struct i915_vma *pd_vma_create(struct gen6_hw_ppgtt *ppgtt, int size)
 	GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
 	GEM_BUG_ON(size > ggtt->vm.total);
 
-	vma = kmem_cache_zalloc(i915->vmas, GFP_KERNEL);
+	vma = i915_vma_alloc();
 	if (!vma)
 		return ERR_PTR(-ENOMEM);
 
@@ -1991,25 +1985,13 @@ static struct i915_hw_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915)
 	if (!ppgtt)
 		return ERR_PTR(-ENOMEM);
 
-	kref_init(&ppgtt->base.ref);
-
-	ppgtt->base.vm.i915 = i915;
-	ppgtt->base.vm.dma = &i915->drm.pdev->dev;
-
-	ppgtt->base.vm.total = I915_PDES * GEN6_PTES * I915_GTT_PAGE_SIZE;
-
-	i915_address_space_init(&ppgtt->base.vm, VM_CLASS_PPGTT);
+	ppgtt_init(i915, &ppgtt->base);
 
 	ppgtt->base.vm.allocate_va_range = gen6_alloc_va_range;
 	ppgtt->base.vm.clear_range = gen6_ppgtt_clear_range;
 	ppgtt->base.vm.insert_entries = gen6_ppgtt_insert_entries;
 	ppgtt->base.vm.cleanup = gen6_ppgtt_cleanup;
 
-	ppgtt->base.vm.vma_ops.bind_vma    = ppgtt_bind_vma;
-	ppgtt->base.vm.vma_ops.unbind_vma  = ppgtt_unbind_vma;
-	ppgtt->base.vm.vma_ops.set_pages   = ppgtt_set_pages;
-	ppgtt->base.vm.vma_ops.clear_pages = clear_pages;
-
 	ppgtt->base.vm.pte_encode = ggtt->vm.pte_encode;
 
 	err = gen6_ppgtt_init_scratch(ppgtt);
@@ -3701,7 +3683,7 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma)
 	}
 
 	ret = 0;
-	if (unlikely(IS_ERR(vma->pages))) {
+	if (IS_ERR(vma->pages)) {
 		ret = PTR_ERR(vma->pages);
 		vma->pages = NULL;
 		DRM_ERROR("Failed to get pages for VMA view type %u (%d)!\n",
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 03ade71b8d9a046a5c5581f38eb967018e3ab1a3..35f21a2ae36c6ab7fdc2a20215afeaa1053eff02 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -213,7 +213,6 @@ struct i915_vma;
 
 struct i915_page_dma {
 	struct page *page;
-	int order;
 	union {
 		dma_addr_t daddr;
 
@@ -293,6 +292,7 @@ struct i915_address_space {
 #define VM_CLASS_PPGTT 1
 
 	u64 scratch_pte;
+	int scratch_order;
 	struct i915_page_dma scratch_page;
 	struct i915_page_table *scratch_pt;
 	struct i915_page_directory *scratch_pd;
@@ -348,7 +348,7 @@ struct i915_address_space {
 #define i915_is_ggtt(vm) ((vm)->is_ggtt)
 
 static inline bool
-i915_vm_is_48bit(const struct i915_address_space *vm)
+i915_vm_is_4lvl(const struct i915_address_space *vm)
 {
 	return (vm->total - 1) >> 32;
 }
@@ -356,7 +356,7 @@ i915_vm_is_48bit(const struct i915_address_space *vm)
 static inline bool
 i915_vm_has_scratch_64K(struct i915_address_space *vm)
 {
-	return vm->scratch_page.order == get_order(I915_GTT_PAGE_SIZE_64K);
+	return vm->scratch_order == get_order(I915_GTT_PAGE_SIZE_64K);
 }
 
 /* The Graphics Translation Table is the way in which GEN hardware translates a
@@ -390,7 +390,7 @@ struct i915_hw_ppgtt {
 	struct i915_address_space vm;
 	struct kref ref;
 
-	unsigned long pd_dirty_rings;
+	unsigned long pd_dirty_engines;
 	union {
 		struct i915_pml4 pml4;		/* GEN8+ & 48b PPGTT */
 		struct i915_page_directory_pointer pdp;	/* GEN8+ */
@@ -488,7 +488,7 @@ static inline u32 gen6_pde_index(u32 addr)
 static inline unsigned int
 i915_pdpes_per_pdp(const struct i915_address_space *vm)
 {
-	if (i915_vm_is_48bit(vm))
+	if (i915_vm_is_4lvl(vm))
 		return GEN8_PML4ES_PER_PML4;
 
 	return GEN8_3LVL_PDPES;
diff --git a/drivers/gpu/drm/i915/i915_gem_internal.c b/drivers/gpu/drm/i915/i915_gem_internal.c
index fddde1033e747ee65f0914114975f9d8d29c88de..ab627ed1269c701a80670c848825321ee9137bb0 100644
--- a/drivers/gpu/drm/i915/i915_gem_internal.c
+++ b/drivers/gpu/drm/i915/i915_gem_internal.c
@@ -193,7 +193,7 @@ i915_gem_object_create_internal(struct drm_i915_private *i915,
 	if (overflows_type(size, obj->base.size))
 		return ERR_PTR(-E2BIG);
 
-	obj = i915_gem_object_alloc(i915);
+	obj = i915_gem_object_alloc();
 	if (!obj)
 		return ERR_PTR(-ENOMEM);
 
diff --git a/drivers/gpu/drm/i915/i915_gem_object.c b/drivers/gpu/drm/i915/i915_gem_object.c
index aab8cdd80e6d339eb950409a5f235cd9b43a98e4..ac6a5ab8458665ae24c5645188a90be293612516 100644
--- a/drivers/gpu/drm/i915/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/i915_gem_object.c
@@ -24,6 +24,22 @@
 
 #include "i915_drv.h"
 #include "i915_gem_object.h"
+#include "i915_globals.h"
+
+static struct i915_global_object {
+	struct i915_global base;
+	struct kmem_cache *slab_objects;
+} global;
+
+struct drm_i915_gem_object *i915_gem_object_alloc(void)
+{
+	return kmem_cache_zalloc(global.slab_objects, GFP_KERNEL);
+}
+
+void i915_gem_object_free(struct drm_i915_gem_object *obj)
+{
+	return kmem_cache_free(global.slab_objects, obj);
+}
 
 /**
  * Mark up the object's coherency levels for a given cache_level
@@ -46,3 +62,29 @@ void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
 	obj->cache_dirty =
 		!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE);
 }
+
+static void i915_global_objects_shrink(void)
+{
+	kmem_cache_shrink(global.slab_objects);
+}
+
+static void i915_global_objects_exit(void)
+{
+	kmem_cache_destroy(global.slab_objects);
+}
+
+static struct i915_global_object global = { {
+	.shrink = i915_global_objects_shrink,
+	.exit = i915_global_objects_exit,
+} };
+
+int __init i915_global_objects_init(void)
+{
+	global.slab_objects =
+		KMEM_CACHE(drm_i915_gem_object, SLAB_HWCACHE_ALIGN);
+	if (!global.slab_objects)
+		return -ENOMEM;
+
+	i915_global_register(&global.base);
+	return 0;
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_object.h b/drivers/gpu/drm/i915/i915_gem_object.h
index fab040331cdb250d44b5791833da357fa5c82206..1a24dc97e4fdb0abe820e5fd3ffcb399bfc1ac2d 100644
--- a/drivers/gpu/drm/i915/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/i915_gem_object.h
@@ -304,6 +304,9 @@ to_intel_bo(struct drm_gem_object *gem)
 	return container_of(gem, struct drm_i915_gem_object, base);
 }
 
+struct drm_i915_gem_object *i915_gem_object_alloc(void);
+void i915_gem_object_free(struct drm_i915_gem_object *obj);
+
 /**
  * i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle
  * @filp: DRM file private date
@@ -500,4 +503,3 @@ void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
 void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj);
 
 #endif
-
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index 90baf9086d0a49f0fd2667a20ec337f520361051..91196348c68c2b9a267f614c6639b8c2853f4757 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -42,7 +42,7 @@ struct intel_render_state {
 static const struct intel_renderstate_rodata *
 render_state_get_rodata(const struct intel_engine_cs *engine)
 {
-	if (engine->id != RCS)
+	if (engine->id != RCS0)
 		return NULL;
 
 	switch (INTEL_GEN(engine->i915)) {
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 74a9661479ca54b8c04e6141e14d992585aa0d52..0a8082cfc761a967776b55b140c2800e5f6ce572 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -565,7 +565,7 @@ _i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
 	struct drm_i915_gem_object *obj;
 	unsigned int cache_level;
 
-	obj = i915_gem_object_alloc(dev_priv);
+	obj = i915_gem_object_alloc();
 	if (obj == NULL)
 		return NULL;
 
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 1d3f9a31ad61921c17923029712f7a943e664bd6..ad0087127144e6c334d0f66a4c32ccff9a111a11 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -795,7 +795,7 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
 			return -ENODEV;
 	}
 
-	obj = i915_gem_object_alloc(dev_priv);
+	obj = i915_gem_object_alloc();
 	if (obj == NULL)
 		return -ENOMEM;
 
diff --git a/drivers/gpu/drm/i915/i915_globals.c b/drivers/gpu/drm/i915/i915_globals.c
new file mode 100644
index 0000000000000000000000000000000000000000..2f5c72e2a9d15f1281b113ce97e15fd439541f34
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_globals.c
@@ -0,0 +1,135 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+
+#include "i915_active.h"
+#include "i915_gem_context.h"
+#include "i915_gem_object.h"
+#include "i915_globals.h"
+#include "i915_request.h"
+#include "i915_scheduler.h"
+#include "i915_vma.h"
+
+static LIST_HEAD(globals);
+
+void __init i915_global_register(struct i915_global *global)
+{
+	GEM_BUG_ON(!global->shrink);
+	GEM_BUG_ON(!global->exit);
+
+	list_add_tail(&global->link, &globals);
+}
+
+static void __i915_globals_cleanup(void)
+{
+	struct i915_global *global, *next;
+
+	list_for_each_entry_safe_reverse(global, next, &globals, link)
+		global->exit();
+}
+
+static __initconst int (* const initfn[])(void) = {
+	i915_global_active_init,
+	i915_global_context_init,
+	i915_global_gem_context_init,
+	i915_global_objects_init,
+	i915_global_request_init,
+	i915_global_scheduler_init,
+	i915_global_vma_init,
+};
+
+int __init i915_globals_init(void)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(initfn); i++) {
+		int err;
+
+		err = initfn[i]();
+		if (err) {
+			__i915_globals_cleanup();
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+static void i915_globals_shrink(void)
+{
+	struct i915_global *global;
+
+	/*
+	 * kmem_cache_shrink() discards empty slabs and reorders partially
+	 * filled slabs to prioritise allocating from the mostly full slabs,
+	 * with the aim of reducing fragmentation.
+	 */
+	list_for_each_entry(global, &globals, link)
+		global->shrink();
+}
+
+static atomic_t active;
+static atomic_t epoch;
+struct park_work {
+	struct rcu_work work;
+	int epoch;
+};
+
+static void __i915_globals_park(struct work_struct *work)
+{
+	struct park_work *wrk = container_of(work, typeof(*wrk), work.work);
+
+	/* Confirm nothing woke up in the last grace period */
+	if (wrk->epoch == atomic_read(&epoch))
+		i915_globals_shrink();
+
+	kfree(wrk);
+}
+
+void i915_globals_park(void)
+{
+	struct park_work *wrk;
+
+	/*
+	 * Defer shrinking the global slab caches (and other work) until
+	 * after a RCU grace period has completed with no activity. This
+	 * is to try and reduce the latency impact on the consumers caused
+	 * by us shrinking the caches the same time as they are trying to
+	 * allocate, with the assumption being that if we idle long enough
+	 * for an RCU grace period to elapse since the last use, it is likely
+	 * to be longer until we need the caches again.
+	 */
+	if (!atomic_dec_and_test(&active))
+		return;
+
+	wrk = kmalloc(sizeof(*wrk), GFP_KERNEL);
+	if (!wrk)
+		return;
+
+	wrk->epoch = atomic_inc_return(&epoch);
+	INIT_RCU_WORK(&wrk->work, __i915_globals_park);
+	queue_rcu_work(system_wq, &wrk->work);
+}
+
+void i915_globals_unpark(void)
+{
+	atomic_inc(&epoch);
+	atomic_inc(&active);
+}
+
+void __exit i915_globals_exit(void)
+{
+	/* Flush any residual park_work */
+	rcu_barrier();
+	flush_scheduled_work();
+
+	__i915_globals_cleanup();
+
+	/* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */
+	rcu_barrier();
+}
diff --git a/drivers/gpu/drm/i915/i915_globals.h b/drivers/gpu/drm/i915/i915_globals.h
new file mode 100644
index 0000000000000000000000000000000000000000..04c1ce107fc066cde75c2e2cb9bd416006ac2121
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_globals.h
@@ -0,0 +1,35 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef _I915_GLOBALS_H_
+#define _I915_GLOBALS_H_
+
+typedef void (*i915_global_func_t)(void);
+
+struct i915_global {
+	struct list_head link;
+
+	i915_global_func_t shrink;
+	i915_global_func_t exit;
+};
+
+void i915_global_register(struct i915_global *global);
+
+int i915_globals_init(void);
+void i915_globals_park(void);
+void i915_globals_unpark(void);
+void i915_globals_exit(void);
+
+/* constructors */
+int i915_global_active_init(void);
+int i915_global_context_init(void);
+int i915_global_gem_context_init(void);
+int i915_global_objects_init(void);
+int i915_global_request_init(void);
+int i915_global_scheduler_init(void);
+int i915_global_vma_init(void);
+
+#endif /* _I915_GLOBALS_H_ */
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index aa6791255252f1800b2609285fb399653625b000..26bac517e38360ea9f1e4113690b4c41d07045b8 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -380,19 +380,16 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m,
 	err_printf(m, "%s [%d]:\n", name, count);
 
 	while (count--) {
-		err_printf(m, "    %08x_%08x %8u %02x %02x %02x",
+		err_printf(m, "    %08x_%08x %8u %02x %02x",
 			   upper_32_bits(err->gtt_offset),
 			   lower_32_bits(err->gtt_offset),
 			   err->size,
 			   err->read_domains,
-			   err->write_domain,
-			   err->wseqno);
+			   err->write_domain);
 		err_puts(m, tiling_flag(err->tiling));
 		err_puts(m, dirty_flag(err->dirty));
 		err_puts(m, purgeable_flag(err->purgeable));
 		err_puts(m, err->userptr ? " userptr" : "");
-		err_puts(m, err->engine != -1 ? " " : "");
-		err_puts(m, engine_name(m->i915, err->engine));
 		err_puts(m, i915_cache_level_str(m->i915, err->cache_level));
 
 		if (err->name)
@@ -414,7 +411,7 @@ static void error_print_instdone(struct drm_i915_error_state_buf *m,
 	err_printf(m, "  INSTDONE: 0x%08x\n",
 		   ee->instdone.instdone);
 
-	if (ee->engine_id != RCS || INTEL_GEN(m->i915) <= 3)
+	if (ee->engine_id != RCS0 || INTEL_GEN(m->i915) <= 3)
 		return;
 
 	err_printf(m, "  SC_INSTDONE: 0x%08x\n",
@@ -434,11 +431,6 @@ static void error_print_instdone(struct drm_i915_error_state_buf *m,
 			   ee->instdone.row[slice][subslice]);
 }
 
-static const char *bannable(const struct drm_i915_error_context *ctx)
-{
-	return ctx->bannable ? "" : " (unbannable)";
-}
-
 static void error_print_request(struct drm_i915_error_state_buf *m,
 				const char *prefix,
 				const struct drm_i915_error_request *erq,
@@ -447,9 +439,8 @@ static void error_print_request(struct drm_i915_error_state_buf *m,
 	if (!erq->seqno)
 		return;
 
-	err_printf(m, "%s pid %d, ban score %d, seqno %8x:%08x%s%s, prio %d, emitted %dms, start %08x, head %08x, tail %08x\n",
-		   prefix, erq->pid, erq->ban_score,
-		   erq->context, erq->seqno,
+	err_printf(m, "%s pid %d, seqno %8x:%08x%s%s, prio %d, emitted %dms, start %08x, head %08x, tail %08x\n",
+		   prefix, erq->pid, erq->context, erq->seqno,
 		   test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
 			    &erq->flags) ? "!" : "",
 		   test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
@@ -463,10 +454,9 @@ static void error_print_context(struct drm_i915_error_state_buf *m,
 				const char *header,
 				const struct drm_i915_error_context *ctx)
 {
-	err_printf(m, "%s%s[%d] user_handle %d hw_id %d, prio %d, ban score %d%s guilty %d active %d\n",
+	err_printf(m, "%s%s[%d] user_handle %d hw_id %d, prio %d, guilty %d active %d\n",
 		   header, ctx->comm, ctx->pid, ctx->handle, ctx->hw_id,
-		   ctx->sched_attr.priority, ctx->ban_score, bannable(ctx),
-		   ctx->guilty, ctx->active);
+		   ctx->sched_attr.priority, ctx->guilty, ctx->active);
 }
 
 static void error_print_engine(struct drm_i915_error_state_buf *m,
@@ -512,13 +502,6 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
 	if (INTEL_GEN(m->i915) >= 6) {
 		err_printf(m, "  RC PSMI: 0x%08x\n", ee->rc_psmi);
 		err_printf(m, "  FAULT_REG: 0x%08x\n", ee->fault_reg);
-		err_printf(m, "  SYNC_0: 0x%08x\n",
-			   ee->semaphore_mboxes[0]);
-		err_printf(m, "  SYNC_1: 0x%08x\n",
-			   ee->semaphore_mboxes[1]);
-		if (HAS_VEBOX(m->i915))
-			err_printf(m, "  SYNC_2: 0x%08x\n",
-				   ee->semaphore_mboxes[2]);
 	}
 	if (HAS_PPGTT(m->i915)) {
 		err_printf(m, "  GFX_MODE: 0x%08x\n", ee->vm_info.gfx_mode);
@@ -533,8 +516,6 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
 				   ee->vm_info.pp_dir_base);
 		}
 	}
-	err_printf(m, "  seqno: 0x%08x\n", ee->seqno);
-	err_printf(m, "  last_seqno: 0x%08x\n", ee->last_seqno);
 	err_printf(m, "  ring->head: 0x%08x\n", ee->cpu_ring_head);
 	err_printf(m, "  ring->tail: 0x%08x\n", ee->cpu_ring_tail);
 	err_printf(m, "  hangcheck timestamp: %dms (%lu%s)\n",
@@ -688,12 +669,10 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
 		if (!error->engine[i].context.pid)
 			continue;
 
-		err_printf(m, "Active process (on ring %s): %s [%d], score %d%s\n",
+		err_printf(m, "Active process (on ring %s): %s [%d]\n",
 			   engine_name(m->i915, i),
 			   error->engine[i].context.comm,
-			   error->engine[i].context.pid,
-			   error->engine[i].context.ban_score,
-			   bannable(&error->engine[i].context));
+			   error->engine[i].context.pid);
 	}
 	err_printf(m, "Reset count: %u\n", error->reset_count);
 	err_printf(m, "Suspend count: %u\n", error->suspend_count);
@@ -779,13 +758,11 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
 		if (obj) {
 			err_puts(m, m->i915->engine[i]->name);
 			if (ee->context.pid)
-				err_printf(m, " (submitted by %s [%d], ctx %d [%d], score %d%s)",
+				err_printf(m, " (submitted by %s [%d], ctx %d [%d])",
 					   ee->context.comm,
 					   ee->context.pid,
 					   ee->context.handle,
-					   ee->context.hw_id,
-					   ee->context.ban_score,
-					   bannable(&ee->context));
+					   ee->context.hw_id);
 			err_printf(m, " --- gtt_offset = 0x%08x %08x\n",
 				   upper_32_bits(obj->gtt_offset),
 				   lower_32_bits(obj->gtt_offset));
@@ -1061,27 +1038,6 @@ i915_error_object_create(struct drm_i915_private *i915,
 	return dst;
 }
 
-/* The error capture is special as tries to run underneath the normal
- * locking rules - so we use the raw version of the i915_active_request lookup.
- */
-static inline u32
-__active_get_seqno(struct i915_active_request *active)
-{
-	struct i915_request *request;
-
-	request = __i915_active_request_peek(active);
-	return request ? request->global_seqno : 0;
-}
-
-static inline int
-__active_get_engine_id(struct i915_active_request *active)
-{
-	struct i915_request *request;
-
-	request = __i915_active_request_peek(active);
-	return request ? request->engine->id : -1;
-}
-
 static void capture_bo(struct drm_i915_error_buffer *err,
 		       struct i915_vma *vma)
 {
@@ -1090,9 +1046,6 @@ static void capture_bo(struct drm_i915_error_buffer *err,
 	err->size = obj->base.size;
 	err->name = obj->base.name;
 
-	err->wseqno = __active_get_seqno(&obj->frontbuffer_write);
-	err->engine = __active_get_engine_id(&obj->frontbuffer_write);
-
 	err->gtt_offset = vma->node.start;
 	err->read_domains = obj->read_domains;
 	err->write_domain = obj->write_domain;
@@ -1178,18 +1131,6 @@ static void gem_record_fences(struct i915_gpu_state *error)
 	error->nfence = i;
 }
 
-static void gen6_record_semaphore_state(struct intel_engine_cs *engine,
-					struct drm_i915_error_engine *ee)
-{
-	struct drm_i915_private *dev_priv = engine->i915;
-
-	ee->semaphore_mboxes[0] = I915_READ(RING_SYNC_0(engine->mmio_base));
-	ee->semaphore_mboxes[1] = I915_READ(RING_SYNC_1(engine->mmio_base));
-	if (HAS_VEBOX(dev_priv))
-		ee->semaphore_mboxes[2] =
-			I915_READ(RING_SYNC_2(engine->mmio_base));
-}
-
 static void error_record_engine_registers(struct i915_gpu_state *error,
 					  struct intel_engine_cs *engine,
 					  struct drm_i915_error_engine *ee)
@@ -1198,12 +1139,10 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
 
 	if (INTEL_GEN(dev_priv) >= 6) {
 		ee->rc_psmi = I915_READ(RING_PSMI_CTL(engine->mmio_base));
-		if (INTEL_GEN(dev_priv) >= 8) {
+		if (INTEL_GEN(dev_priv) >= 8)
 			ee->fault_reg = I915_READ(GEN8_RING_FAULT_REG);
-		} else {
-			gen6_record_semaphore_state(engine, ee);
+		else
 			ee->fault_reg = I915_READ(RING_FAULT_REG(engine));
-		}
 	}
 
 	if (INTEL_GEN(dev_priv) >= 4) {
@@ -1227,8 +1166,6 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
 
 	ee->instpm = I915_READ(RING_INSTPM(engine->mmio_base));
 	ee->acthd = intel_engine_get_active_head(engine);
-	ee->seqno = intel_engine_get_seqno(engine);
-	ee->last_seqno = intel_engine_last_submit(engine);
 	ee->start = I915_READ_START(engine);
 	ee->head = I915_READ_HEAD(engine);
 	ee->tail = I915_READ_TAIL(engine);
@@ -1242,16 +1179,17 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
 		if (IS_GEN(dev_priv, 7)) {
 			switch (engine->id) {
 			default:
-			case RCS:
+				MISSING_CASE(engine->id);
+			case RCS0:
 				mmio = RENDER_HWS_PGA_GEN7;
 				break;
-			case BCS:
+			case BCS0:
 				mmio = BLT_HWS_PGA_GEN7;
 				break;
-			case VCS:
+			case VCS0:
 				mmio = BSD_HWS_PGA_GEN7;
 				break;
-			case VECS:
+			case VECS0:
 				mmio = VEBOX_HWS_PGA_GEN7;
 				break;
 			}
@@ -1299,10 +1237,9 @@ static void record_request(struct i915_request *request,
 	struct i915_gem_context *ctx = request->gem_context;
 
 	erq->flags = request->fence.flags;
-	erq->context = ctx->hw_id;
+	erq->context = request->fence.context;
+	erq->seqno = request->fence.seqno;
 	erq->sched_attr = request->sched.attr;
-	erq->ban_score = atomic_read(&ctx->ban_score);
-	erq->seqno = request->global_seqno;
 	erq->jiffies = request->emitted_jiffies;
 	erq->start = i915_ggtt_offset(request->ring->vma);
 	erq->head = request->head;
@@ -1396,8 +1333,6 @@ static void record_context(struct drm_i915_error_context *e,
 	e->handle = ctx->user_handle;
 	e->hw_id = ctx->hw_id;
 	e->sched_attr = ctx->sched;
-	e->ban_score = atomic_read(&ctx->ban_score);
-	e->bannable = i915_gem_context_is_bannable(ctx);
 	e->guilty = atomic_read(&ctx->guilty_count);
 	e->active = atomic_read(&ctx->active_count);
 }
@@ -1476,7 +1411,7 @@ static void gem_record_rings(struct i915_gpu_state *error)
 		error_record_engine_registers(error, engine, ee);
 		error_record_engine_execlists(engine, ee);
 
-		request = i915_gem_find_active_request(engine);
+		request = intel_engine_find_active_request(engine);
 		if (request) {
 			struct i915_gem_context *ctx = request->gem_context;
 			struct intel_ring *ring;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index 53b1f22dd365689c048df41fcc1ea8a4c148fa06..99d6b7b270c2033bc2d8fa6771c51615482a90bb 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -94,8 +94,6 @@ struct i915_gpu_state {
 		u32 cpu_ring_head;
 		u32 cpu_ring_tail;
 
-		u32 last_seqno;
-
 		/* Register state */
 		u32 start;
 		u32 tail;
@@ -108,13 +106,11 @@ struct i915_gpu_state {
 		u32 bbstate;
 		u32 instpm;
 		u32 instps;
-		u32 seqno;
 		u64 bbaddr;
 		u64 acthd;
 		u32 fault_reg;
 		u64 faddr;
 		u32 rc_psmi; /* sleep state */
-		u32 semaphore_mboxes[I915_NUM_ENGINES - 1];
 		struct intel_instdone instdone;
 
 		struct drm_i915_error_context {
@@ -122,10 +118,8 @@ struct i915_gpu_state {
 			pid_t pid;
 			u32 handle;
 			u32 hw_id;
-			int ban_score;
 			int active;
 			int guilty;
-			bool bannable;
 			struct i915_sched_attr sched_attr;
 		} context;
 
@@ -149,7 +143,6 @@ struct i915_gpu_state {
 			long jiffies;
 			pid_t pid;
 			u32 context;
-			int ban_score;
 			u32 seqno;
 			u32 start;
 			u32 head;
@@ -170,7 +163,6 @@ struct i915_gpu_state {
 	struct drm_i915_error_buffer {
 		u32 size;
 		u32 name;
-		u32 wseqno;
 		u64 gtt_offset;
 		u32 read_domains;
 		u32 write_domain;
@@ -179,7 +171,6 @@ struct i915_gpu_state {
 		u32 dirty:1;
 		u32 purgeable:1;
 		u32 userptr:1;
-		s32 engine:4;
 		u32 cache_level:3;
 	} *active_bo[I915_NUM_ENGINES], *pinned_bo;
 	u32 active_bo_count[I915_NUM_ENGINES], pinned_bo_count;
@@ -204,39 +195,13 @@ struct i915_gpu_error {
 
 	atomic_t pending_fb_pin;
 
-	/**
-	 * State variable controlling the reset flow and count
-	 *
-	 * This is a counter which gets incremented when reset is triggered,
-	 *
-	 * Before the reset commences, the I915_RESET_BACKOFF bit is set
-	 * meaning that any waiters holding onto the struct_mutex should
-	 * relinquish the lock immediately in order for the reset to start.
-	 *
-	 * If reset is not completed successfully, the I915_WEDGE bit is
-	 * set meaning that hardware is terminally sour and there is no
-	 * recovery. All waiters on the reset_queue will be woken when
-	 * that happens.
-	 *
-	 * This counter is used by the wait_seqno code to notice that reset
-	 * event happened and it needs to restart the entire ioctl (since most
-	 * likely the seqno it waited for won't ever signal anytime soon).
-	 *
-	 * This is important for lock-free wait paths, where no contended lock
-	 * naturally enforces the correct ordering between the bail-out of the
-	 * waiter and the gpu reset work code.
-	 */
-	unsigned long reset_count;
-
 	/**
 	 * flags: Control various stages of the GPU reset
 	 *
-	 * #I915_RESET_BACKOFF - When we start a reset, we want to stop any
-	 * other users acquiring the struct_mutex. To do this we set the
-	 * #I915_RESET_BACKOFF bit in the error flags when we detect a reset
-	 * and then check for that bit before acquiring the struct_mutex (in
-	 * i915_mutex_lock_interruptible()?). I915_RESET_BACKOFF serves a
-	 * secondary role in preventing two concurrent global reset attempts.
+	 * #I915_RESET_BACKOFF - When we start a global reset, we need to
+	 * serialise with any other users attempting to do the same, and
+	 * any global resources that may be clobber by the reset (such as
+	 * FENCE registers).
 	 *
 	 * #I915_RESET_ENGINE[num_engines] - Since the driver doesn't need to
 	 * acquire the struct_mutex to reset an engine, we need an explicit
@@ -255,6 +220,9 @@ struct i915_gpu_error {
 #define I915_RESET_ENGINE	2
 #define I915_WEDGED		(BITS_PER_LONG - 1)
 
+	/** Number of times the device has been reset (global) */
+	u32 reset_count;
+
 	/** Number of times an engine has been reset */
 	u32 reset_engine_count[I915_NUM_ENGINES];
 
@@ -272,6 +240,8 @@ struct i915_gpu_error {
 	 */
 	wait_queue_head_t reset_queue;
 
+	struct srcu_struct reset_backoff_srcu;
+
 	struct i915_gpu_restart *restart;
 };
 
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 441d2674b2725227116b5cd852779e86920f5fa0..82d487189a3406e57ab7865d8fe82d680444f541 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1288,6 +1288,18 @@ static void gen6_pm_rps_work(struct work_struct *work)
 
 	rps->last_adj = adj;
 
+	/*
+	 * Limit deboosting and boosting to keep ourselves at the extremes
+	 * when in the respective power modes (i.e. slowly decrease frequencies
+	 * while in the HIGH_POWER zone and slowly increase frequencies while
+	 * in the LOW_POWER zone). On idle, we will hit the timeout and drop
+	 * to the next level quickly, and conversely if busy we expect to
+	 * hit a waitboost and rapidly switch into max power.
+	 */
+	if ((adj < 0 && rps->power.mode == HIGH_POWER) ||
+	    (adj > 0 && rps->power.mode == LOW_POWER))
+		rps->last_adj = 0;
+
 	/* sysfs frequency interfaces may have snuck in while servicing the
 	 * interrupt
 	 */
@@ -1415,20 +1427,20 @@ static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv,
 			       u32 gt_iir)
 {
 	if (gt_iir & GT_RENDER_USER_INTERRUPT)
-		intel_engine_breadcrumbs_irq(dev_priv->engine[RCS]);
+		intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
 	if (gt_iir & ILK_BSD_USER_INTERRUPT)
-		intel_engine_breadcrumbs_irq(dev_priv->engine[VCS]);
+		intel_engine_breadcrumbs_irq(dev_priv->engine[VCS0]);
 }
 
 static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
 			       u32 gt_iir)
 {
 	if (gt_iir & GT_RENDER_USER_INTERRUPT)
-		intel_engine_breadcrumbs_irq(dev_priv->engine[RCS]);
+		intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
 	if (gt_iir & GT_BSD_USER_INTERRUPT)
-		intel_engine_breadcrumbs_irq(dev_priv->engine[VCS]);
+		intel_engine_breadcrumbs_irq(dev_priv->engine[VCS0]);
 	if (gt_iir & GT_BLT_USER_INTERRUPT)
-		intel_engine_breadcrumbs_irq(dev_priv->engine[BCS]);
+		intel_engine_breadcrumbs_irq(dev_priv->engine[BCS0]);
 
 	if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
 		      GT_BSD_CS_ERROR_INTERRUPT |
@@ -1463,8 +1475,8 @@ static void gen8_gt_irq_ack(struct drm_i915_private *i915,
 
 #define GEN8_GT_IRQS (GEN8_GT_RCS_IRQ | \
 		      GEN8_GT_BCS_IRQ | \
+		      GEN8_GT_VCS0_IRQ | \
 		      GEN8_GT_VCS1_IRQ | \
-		      GEN8_GT_VCS2_IRQ | \
 		      GEN8_GT_VECS_IRQ | \
 		      GEN8_GT_PM_IRQ | \
 		      GEN8_GT_GUC_IRQ)
@@ -1475,7 +1487,7 @@ static void gen8_gt_irq_ack(struct drm_i915_private *i915,
 			raw_reg_write(regs, GEN8_GT_IIR(0), gt_iir[0]);
 	}
 
-	if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
+	if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) {
 		gt_iir[1] = raw_reg_read(regs, GEN8_GT_IIR(1));
 		if (likely(gt_iir[1]))
 			raw_reg_write(regs, GEN8_GT_IIR(1), gt_iir[1]);
@@ -1498,21 +1510,21 @@ static void gen8_gt_irq_handler(struct drm_i915_private *i915,
 				u32 master_ctl, u32 gt_iir[4])
 {
 	if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
-		gen8_cs_irq_handler(i915->engine[RCS],
+		gen8_cs_irq_handler(i915->engine[RCS0],
 				    gt_iir[0] >> GEN8_RCS_IRQ_SHIFT);
-		gen8_cs_irq_handler(i915->engine[BCS],
+		gen8_cs_irq_handler(i915->engine[BCS0],
 				    gt_iir[0] >> GEN8_BCS_IRQ_SHIFT);
 	}
 
-	if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
-		gen8_cs_irq_handler(i915->engine[VCS],
+	if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) {
+		gen8_cs_irq_handler(i915->engine[VCS0],
+				    gt_iir[1] >> GEN8_VCS0_IRQ_SHIFT);
+		gen8_cs_irq_handler(i915->engine[VCS1],
 				    gt_iir[1] >> GEN8_VCS1_IRQ_SHIFT);
-		gen8_cs_irq_handler(i915->engine[VCS2],
-				    gt_iir[1] >> GEN8_VCS2_IRQ_SHIFT);
 	}
 
 	if (master_ctl & GEN8_GT_VECS_IRQ) {
-		gen8_cs_irq_handler(i915->engine[VECS],
+		gen8_cs_irq_handler(i915->engine[VECS0],
 				    gt_iir[3] >> GEN8_VECS_IRQ_SHIFT);
 	}
 
@@ -1693,7 +1705,9 @@ static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
 {
 	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
 	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
-	u32 crcs[5];
+	u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 };
+
+	trace_intel_pipe_crc(crtc, crcs);
 
 	spin_lock(&pipe_crc->lock);
 	/*
@@ -1712,11 +1726,6 @@ static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
 	}
 	spin_unlock(&pipe_crc->lock);
 
-	crcs[0] = crc0;
-	crcs[1] = crc1;
-	crcs[2] = crc2;
-	crcs[3] = crc3;
-	crcs[4] = crc4;
 	drm_crtc_add_crc_entry(&crtc->base, true,
 				drm_crtc_accurate_vblank_count(&crtc->base),
 				crcs);
@@ -1792,13 +1801,11 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
 	if (INTEL_GEN(dev_priv) >= 8)
 		return;
 
-	if (HAS_VEBOX(dev_priv)) {
-		if (pm_iir & PM_VEBOX_USER_INTERRUPT)
-			intel_engine_breadcrumbs_irq(dev_priv->engine[VECS]);
+	if (pm_iir & PM_VEBOX_USER_INTERRUPT)
+		intel_engine_breadcrumbs_irq(dev_priv->engine[VECS0]);
 
-		if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
-			DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
-	}
+	if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
+		DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
 }
 
 static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir)
@@ -2667,6 +2674,25 @@ static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
 		DRM_ERROR("Unexpected DE HPD interrupt 0x%08x\n", iir);
 }
 
+static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
+{
+	u32 mask = GEN8_AUX_CHANNEL_A;
+
+	if (INTEL_GEN(dev_priv) >= 9)
+		mask |= GEN9_AUX_CHANNEL_B |
+			GEN9_AUX_CHANNEL_C |
+			GEN9_AUX_CHANNEL_D;
+
+	if (IS_CNL_WITH_PORT_F(dev_priv))
+		mask |= CNL_AUX_CHANNEL_F;
+
+	if (INTEL_GEN(dev_priv) >= 11)
+		mask |= ICL_AUX_CHANNEL_E |
+			CNL_AUX_CHANNEL_F;
+
+	return mask;
+}
+
 static irqreturn_t
 gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
 {
@@ -2722,20 +2748,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
 			I915_WRITE(GEN8_DE_PORT_IIR, iir);
 			ret = IRQ_HANDLED;
 
-			tmp_mask = GEN8_AUX_CHANNEL_A;
-			if (INTEL_GEN(dev_priv) >= 9)
-				tmp_mask |= GEN9_AUX_CHANNEL_B |
-					    GEN9_AUX_CHANNEL_C |
-					    GEN9_AUX_CHANNEL_D;
-
-			if (INTEL_GEN(dev_priv) >= 11)
-				tmp_mask |= ICL_AUX_CHANNEL_E;
-
-			if (IS_CNL_WITH_PORT_F(dev_priv) ||
-			    INTEL_GEN(dev_priv) >= 11)
-				tmp_mask |= CNL_AUX_CHANNEL_F;
-
-			if (iir & tmp_mask) {
+			if (iir & gen8_de_port_aux_mask(dev_priv)) {
 				dp_aux_irq_handler(dev_priv);
 				found = true;
 			}
@@ -2816,11 +2829,9 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
 			I915_WRITE(SDEIIR, iir);
 			ret = IRQ_HANDLED;
 
-			if (HAS_PCH_ICP(dev_priv))
+			if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
 				icp_irq_handler(dev_priv, iir);
-			else if (HAS_PCH_SPT(dev_priv) ||
-				 HAS_PCH_KBP(dev_priv) ||
-				 HAS_PCH_CNP(dev_priv))
+			else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
 				spt_irq_handler(dev_priv, iir);
 			else
 				cpt_irq_handler(dev_priv, iir);
@@ -3402,7 +3413,7 @@ static void gen11_irq_reset(struct drm_device *dev)
 	GEN3_IRQ_RESET(GEN11_GU_MISC_);
 	GEN3_IRQ_RESET(GEN8_PCU_);
 
-	if (HAS_PCH_ICP(dev_priv))
+	if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
 		GEN3_IRQ_RESET(SDE);
 }
 
@@ -3583,7 +3594,7 @@ static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
 
 	gen11_hpd_detection_setup(dev_priv);
 
-	if (HAS_PCH_ICP(dev_priv))
+	if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
 		icp_hpd_irq_setup(dev_priv);
 }
 
@@ -3767,7 +3778,7 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
 		 * RPS interrupts will get enabled/disabled on demand when RPS
 		 * itself is enabled/disabled.
 		 */
-		if (HAS_VEBOX(dev_priv)) {
+		if (HAS_ENGINE(dev_priv, VECS0)) {
 			pm_irqs |= PM_VEBOX_USER_INTERRUPT;
 			dev_priv->pm_ier |= PM_VEBOX_USER_INTERRUPT;
 		}
@@ -3879,18 +3890,21 @@ static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
 {
 	/* These are interrupts we'll toggle with the ring mask register */
 	u32 gt_interrupts[] = {
-		GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
-			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
-			GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
-			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
-		GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
-			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
-			GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
-			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
+		(GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
+		 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
+		 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
+		 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT),
+
+		(GT_RENDER_USER_INTERRUPT << GEN8_VCS0_IRQ_SHIFT |
+		 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS0_IRQ_SHIFT |
+		 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
+		 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT),
+
 		0,
-		GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
-			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
-		};
+
+		(GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
+		 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT)
+	};
 
 	dev_priv->pm_ier = 0x0;
 	dev_priv->pm_imr = ~dev_priv->pm_ier;
@@ -4036,7 +4050,7 @@ static int gen11_irq_postinstall(struct drm_device *dev)
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 gu_misc_masked = GEN11_GU_MISC_GSE;
 
-	if (HAS_PCH_ICP(dev_priv))
+	if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
 		icp_irq_postinstall(dev);
 
 	gen11_gt_irq_postinstall(dev_priv);
@@ -4218,7 +4232,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
 		I915_WRITE16(IIR, iir);
 
 		if (iir & I915_USER_INTERRUPT)
-			intel_engine_breadcrumbs_irq(dev_priv->engine[RCS]);
+			intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
 
 		if (iir & I915_MASTER_ERROR_INTERRUPT)
 			i8xx_error_irq_handler(dev_priv, eir, eir_stuck);
@@ -4326,7 +4340,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
 		I915_WRITE(IIR, iir);
 
 		if (iir & I915_USER_INTERRUPT)
-			intel_engine_breadcrumbs_irq(dev_priv->engine[RCS]);
+			intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
 
 		if (iir & I915_MASTER_ERROR_INTERRUPT)
 			i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
@@ -4471,10 +4485,10 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
 		I915_WRITE(IIR, iir);
 
 		if (iir & I915_USER_INTERRUPT)
-			intel_engine_breadcrumbs_irq(dev_priv->engine[RCS]);
+			intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
 
 		if (iir & I915_BSD_USER_INTERRUPT)
-			intel_engine_breadcrumbs_irq(dev_priv->engine[VCS]);
+			intel_engine_breadcrumbs_irq(dev_priv->engine[VCS0]);
 
 		if (iir & I915_MASTER_ERROR_INTERRUPT)
 			i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
@@ -4605,8 +4619,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
 		dev->driver->disable_vblank = gen8_disable_vblank;
 		if (IS_GEN9_LP(dev_priv))
 			dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
-		else if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv) ||
-			 HAS_PCH_CNP(dev_priv))
+		else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
 			dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
 		else
 			dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 66f82f3f050f34aa1a6756800b2436ceab50c9ab..7a6054eadb8e99604140d019086ecc5093c3915a 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -28,14 +28,44 @@
 
 #include <drm/drm_drv.h>
 
-#include "i915_active.h"
 #include "i915_drv.h"
+#include "i915_globals.h"
 #include "i915_selftest.h"
 
 #define PLATFORM(x) .platform = (x), .platform_mask = BIT(x)
 #define GEN(x) .gen = (x), .gen_mask = BIT((x) - 1)
 
-#define GEN_DEFAULT_PIPEOFFSETS \
+#define I845_PIPE_OFFSETS \
+	.pipe_offsets = { \
+		[TRANSCODER_A] = PIPE_A_OFFSET,	\
+	}, \
+	.trans_offsets = { \
+		[TRANSCODER_A] = TRANSCODER_A_OFFSET, \
+	}
+
+#define I9XX_PIPE_OFFSETS \
+	.pipe_offsets = { \
+		[TRANSCODER_A] = PIPE_A_OFFSET,	\
+		[TRANSCODER_B] = PIPE_B_OFFSET, \
+	}, \
+	.trans_offsets = { \
+		[TRANSCODER_A] = TRANSCODER_A_OFFSET, \
+		[TRANSCODER_B] = TRANSCODER_B_OFFSET, \
+	}
+
+#define IVB_PIPE_OFFSETS \
+	.pipe_offsets = { \
+		[TRANSCODER_A] = PIPE_A_OFFSET,	\
+		[TRANSCODER_B] = PIPE_B_OFFSET, \
+		[TRANSCODER_C] = PIPE_C_OFFSET, \
+	}, \
+	.trans_offsets = { \
+		[TRANSCODER_A] = TRANSCODER_A_OFFSET, \
+		[TRANSCODER_B] = TRANSCODER_B_OFFSET, \
+		[TRANSCODER_C] = TRANSCODER_C_OFFSET, \
+	}
+
+#define HSW_PIPE_OFFSETS \
 	.pipe_offsets = { \
 		[TRANSCODER_A] = PIPE_A_OFFSET,	\
 		[TRANSCODER_B] = PIPE_B_OFFSET, \
@@ -49,7 +79,7 @@
 		[TRANSCODER_EDP] = TRANSCODER_EDP_OFFSET, \
 	}
 
-#define GEN_CHV_PIPEOFFSETS \
+#define CHV_PIPE_OFFSETS \
 	.pipe_offsets = { \
 		[TRANSCODER_A] = PIPE_A_OFFSET, \
 		[TRANSCODER_B] = PIPE_B_OFFSET, \
@@ -61,11 +91,30 @@
 		[TRANSCODER_C] = CHV_TRANSCODER_C_OFFSET, \
 	}
 
-#define CURSOR_OFFSETS \
-	.cursor_offsets = { CURSOR_A_OFFSET, CURSOR_B_OFFSET, CHV_CURSOR_C_OFFSET }
+#define I845_CURSOR_OFFSETS \
+	.cursor_offsets = { \
+		[PIPE_A] = CURSOR_A_OFFSET, \
+	}
+
+#define I9XX_CURSOR_OFFSETS \
+	.cursor_offsets = { \
+		[PIPE_A] = CURSOR_A_OFFSET, \
+		[PIPE_B] = CURSOR_B_OFFSET, \
+	}
+
+#define CHV_CURSOR_OFFSETS \
+	.cursor_offsets = { \
+		[PIPE_A] = CURSOR_A_OFFSET, \
+		[PIPE_B] = CURSOR_B_OFFSET, \
+		[PIPE_C] = CHV_CURSOR_C_OFFSET, \
+	}
 
 #define IVB_CURSOR_OFFSETS \
-	.cursor_offsets = { CURSOR_A_OFFSET, IVB_CURSOR_B_OFFSET, IVB_CURSOR_C_OFFSET }
+	.cursor_offsets = { \
+		[PIPE_A] = CURSOR_A_OFFSET, \
+		[PIPE_B] = IVB_CURSOR_B_OFFSET, \
+		[PIPE_C] = IVB_CURSOR_C_OFFSET, \
+	}
 
 #define BDW_COLORS \
 	.color = { .degamma_lut_size = 512, .gamma_lut_size = 512 }
@@ -75,7 +124,7 @@
 		   .gamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING, \
 	}
 #define GLK_COLORS \
-	.color = { .degamma_lut_size = 0, .gamma_lut_size = 1024, \
+	.color = { .degamma_lut_size = 33, .gamma_lut_size = 1024, \
 		   .degamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING | \
 					DRM_COLOR_LUT_EQUAL_CHANNELS, \
 	}
@@ -85,7 +134,25 @@
 #define GEN_DEFAULT_PAGE_SIZES \
 	.page_sizes = I915_GTT_PAGE_SIZE_4K
 
-#define GEN2_FEATURES \
+#define I830_FEATURES \
+	GEN(2), \
+	.is_mobile = 1, \
+	.num_pipes = 2, \
+	.display.has_overlay = 1, \
+	.display.cursor_needs_physical = 1, \
+	.display.overlay_needs_physical = 1, \
+	.display.has_gmch = 1, \
+	.gpu_reset_clobbers_display = true, \
+	.hws_needs_physical = 1, \
+	.unfenced_needs_alignment = 1, \
+	.engine_mask = BIT(RCS0), \
+	.has_snoop = true, \
+	.has_coherent_ggtt = false, \
+	I9XX_PIPE_OFFSETS, \
+	I9XX_CURSOR_OFFSETS, \
+	GEN_DEFAULT_PAGE_SIZES
+
+#define I845_FEATURES \
 	GEN(2), \
 	.num_pipes = 1, \
 	.display.has_overlay = 1, \
@@ -94,37 +161,31 @@
 	.gpu_reset_clobbers_display = true, \
 	.hws_needs_physical = 1, \
 	.unfenced_needs_alignment = 1, \
-	.ring_mask = RENDER_RING, \
+	.engine_mask = BIT(RCS0), \
 	.has_snoop = true, \
 	.has_coherent_ggtt = false, \
-	GEN_DEFAULT_PIPEOFFSETS, \
-	GEN_DEFAULT_PAGE_SIZES, \
-	CURSOR_OFFSETS
+	I845_PIPE_OFFSETS, \
+	I845_CURSOR_OFFSETS, \
+	GEN_DEFAULT_PAGE_SIZES
 
 static const struct intel_device_info intel_i830_info = {
-	GEN2_FEATURES,
+	I830_FEATURES,
 	PLATFORM(INTEL_I830),
-	.is_mobile = 1,
-	.display.cursor_needs_physical = 1,
-	.num_pipes = 2, /* legal, last one wins */
 };
 
 static const struct intel_device_info intel_i845g_info = {
-	GEN2_FEATURES,
+	I845_FEATURES,
 	PLATFORM(INTEL_I845G),
 };
 
 static const struct intel_device_info intel_i85x_info = {
-	GEN2_FEATURES,
+	I830_FEATURES,
 	PLATFORM(INTEL_I85X),
-	.is_mobile = 1,
-	.num_pipes = 2, /* legal, last one wins */
-	.display.cursor_needs_physical = 1,
 	.display.has_fbc = 1,
 };
 
 static const struct intel_device_info intel_i865g_info = {
-	GEN2_FEATURES,
+	I845_FEATURES,
 	PLATFORM(INTEL_I865G),
 };
 
@@ -133,12 +194,12 @@ static const struct intel_device_info intel_i865g_info = {
 	.num_pipes = 2, \
 	.display.has_gmch = 1, \
 	.gpu_reset_clobbers_display = true, \
-	.ring_mask = RENDER_RING, \
+	.engine_mask = BIT(RCS0), \
 	.has_snoop = true, \
 	.has_coherent_ggtt = true, \
-	GEN_DEFAULT_PIPEOFFSETS, \
-	GEN_DEFAULT_PAGE_SIZES, \
-	CURSOR_OFFSETS
+	I9XX_PIPE_OFFSETS, \
+	I9XX_CURSOR_OFFSETS, \
+	GEN_DEFAULT_PAGE_SIZES
 
 static const struct intel_device_info intel_i915g_info = {
 	GEN3_FEATURES,
@@ -210,12 +271,12 @@ static const struct intel_device_info intel_pineview_info = {
 	.display.has_hotplug = 1, \
 	.display.has_gmch = 1, \
 	.gpu_reset_clobbers_display = true, \
-	.ring_mask = RENDER_RING, \
+	.engine_mask = BIT(RCS0), \
 	.has_snoop = true, \
 	.has_coherent_ggtt = true, \
-	GEN_DEFAULT_PIPEOFFSETS, \
-	GEN_DEFAULT_PAGE_SIZES, \
-	CURSOR_OFFSETS
+	I9XX_PIPE_OFFSETS, \
+	I9XX_CURSOR_OFFSETS, \
+	GEN_DEFAULT_PAGE_SIZES
 
 static const struct intel_device_info intel_i965g_info = {
 	GEN4_FEATURES,
@@ -239,7 +300,7 @@ static const struct intel_device_info intel_i965gm_info = {
 static const struct intel_device_info intel_g45_info = {
 	GEN4_FEATURES,
 	PLATFORM(INTEL_G45),
-	.ring_mask = RENDER_RING | BSD_RING,
+	.engine_mask = BIT(RCS0) | BIT(VCS0),
 	.gpu_reset_clobbers_display = false,
 };
 
@@ -249,7 +310,7 @@ static const struct intel_device_info intel_gm45_info = {
 	.is_mobile = 1,
 	.display.has_fbc = 1,
 	.display.supports_tv = 1,
-	.ring_mask = RENDER_RING | BSD_RING,
+	.engine_mask = BIT(RCS0) | BIT(VCS0),
 	.gpu_reset_clobbers_display = false,
 };
 
@@ -257,14 +318,14 @@ static const struct intel_device_info intel_gm45_info = {
 	GEN(5), \
 	.num_pipes = 2, \
 	.display.has_hotplug = 1, \
-	.ring_mask = RENDER_RING | BSD_RING, \
+	.engine_mask = BIT(RCS0) | BIT(VCS0), \
 	.has_snoop = true, \
 	.has_coherent_ggtt = true, \
 	/* ilk does support rc6, but we do not implement [power] contexts */ \
 	.has_rc6 = 0, \
-	GEN_DEFAULT_PIPEOFFSETS, \
-	GEN_DEFAULT_PAGE_SIZES, \
-	CURSOR_OFFSETS
+	I9XX_PIPE_OFFSETS, \
+	I9XX_CURSOR_OFFSETS, \
+	GEN_DEFAULT_PAGE_SIZES
 
 static const struct intel_device_info intel_ironlake_d_info = {
 	GEN5_FEATURES,
@@ -283,15 +344,16 @@ static const struct intel_device_info intel_ironlake_m_info = {
 	.num_pipes = 2, \
 	.display.has_hotplug = 1, \
 	.display.has_fbc = 1, \
-	.ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
+	.engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \
 	.has_coherent_ggtt = true, \
 	.has_llc = 1, \
 	.has_rc6 = 1, \
 	.has_rc6p = 1, \
-	.ppgtt = INTEL_PPGTT_ALIASING, \
-	GEN_DEFAULT_PIPEOFFSETS, \
-	GEN_DEFAULT_PAGE_SIZES, \
-	CURSOR_OFFSETS
+	.ppgtt_type = INTEL_PPGTT_ALIASING, \
+	.ppgtt_size = 31, \
+	I9XX_PIPE_OFFSETS, \
+	I9XX_CURSOR_OFFSETS, \
+	GEN_DEFAULT_PAGE_SIZES
 
 #define SNB_D_PLATFORM \
 	GEN6_FEATURES, \
@@ -328,15 +390,16 @@ static const struct intel_device_info intel_sandybridge_m_gt2_info = {
 	.num_pipes = 3, \
 	.display.has_hotplug = 1, \
 	.display.has_fbc = 1, \
-	.ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
+	.engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \
 	.has_coherent_ggtt = true, \
 	.has_llc = 1, \
 	.has_rc6 = 1, \
 	.has_rc6p = 1, \
-	.ppgtt = INTEL_PPGTT_FULL, \
-	GEN_DEFAULT_PIPEOFFSETS, \
-	GEN_DEFAULT_PAGE_SIZES, \
-	IVB_CURSOR_OFFSETS
+	.ppgtt_type = INTEL_PPGTT_FULL, \
+	.ppgtt_size = 31, \
+	IVB_PIPE_OFFSETS, \
+	IVB_CURSOR_OFFSETS, \
+	GEN_DEFAULT_PAGE_SIZES
 
 #define IVB_D_PLATFORM \
 	GEN7_FEATURES, \
@@ -386,24 +449,26 @@ static const struct intel_device_info intel_valleyview_info = {
 	.has_rc6 = 1,
 	.display.has_gmch = 1,
 	.display.has_hotplug = 1,
-	.ppgtt = INTEL_PPGTT_FULL,
+	.ppgtt_type = INTEL_PPGTT_FULL,
+	.ppgtt_size = 31,
 	.has_snoop = true,
 	.has_coherent_ggtt = false,
-	.ring_mask = RENDER_RING | BSD_RING | BLT_RING,
+	.engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0),
 	.display_mmio_offset = VLV_DISPLAY_BASE,
+	I9XX_PIPE_OFFSETS,
+	I9XX_CURSOR_OFFSETS,
 	GEN_DEFAULT_PAGE_SIZES,
-	GEN_DEFAULT_PIPEOFFSETS,
-	CURSOR_OFFSETS
 };
 
 #define G75_FEATURES  \
 	GEN7_FEATURES, \
-	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \
+	.engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \
 	.display.has_ddi = 1, \
 	.has_fpga_dbg = 1, \
 	.display.has_psr = 1, \
 	.display.has_dp_mst = 1, \
 	.has_rc6p = 0 /* RC6p removed-by HSW */, \
+	HSW_PIPE_OFFSETS, \
 	.has_runtime_pm = 1
 
 #define HSW_PLATFORM \
@@ -433,7 +498,8 @@ static const struct intel_device_info intel_haswell_gt3_info = {
 	.page_sizes = I915_GTT_PAGE_SIZE_4K | \
 		      I915_GTT_PAGE_SIZE_2M, \
 	.has_logical_ring_contexts = 1, \
-	.ppgtt = INTEL_PPGTT_FULL_4LVL, \
+	.ppgtt_type = INTEL_PPGTT_FULL, \
+	.ppgtt_size = 48, \
 	.has_64bit_reloc = 1, \
 	.has_reset_engine = 1
 
@@ -462,7 +528,8 @@ static const struct intel_device_info intel_broadwell_rsvd_info = {
 static const struct intel_device_info intel_broadwell_gt3_info = {
 	BDW_PLATFORM,
 	.gt = 3,
-	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
+	.engine_mask =
+		BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1),
 };
 
 static const struct intel_device_info intel_cherryview_info = {
@@ -471,21 +538,22 @@ static const struct intel_device_info intel_cherryview_info = {
 	.num_pipes = 3,
 	.display.has_hotplug = 1,
 	.is_lp = 1,
-	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
+	.engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0),
 	.has_64bit_reloc = 1,
 	.has_runtime_pm = 1,
 	.has_rc6 = 1,
 	.has_logical_ring_contexts = 1,
 	.display.has_gmch = 1,
-	.ppgtt = INTEL_PPGTT_FULL,
+	.ppgtt_type = INTEL_PPGTT_FULL,
+	.ppgtt_size = 32,
 	.has_reset_engine = 1,
 	.has_snoop = true,
 	.has_coherent_ggtt = false,
 	.display_mmio_offset = VLV_DISPLAY_BASE,
-	GEN_DEFAULT_PAGE_SIZES,
-	GEN_CHV_PIPEOFFSETS,
-	CURSOR_OFFSETS,
+	CHV_PIPE_OFFSETS,
+	CHV_CURSOR_OFFSETS,
 	CHV_COLORS,
+	GEN_DEFAULT_PAGE_SIZES,
 };
 
 #define GEN9_DEFAULT_PAGE_SIZES \
@@ -521,7 +589,8 @@ static const struct intel_device_info intel_skylake_gt2_info = {
 
 #define SKL_GT3_PLUS_PLATFORM \
 	SKL_PLATFORM, \
-	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING
+	.engine_mask = \
+		BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1)
 
 
 static const struct intel_device_info intel_skylake_gt3_info = {
@@ -538,7 +607,7 @@ static const struct intel_device_info intel_skylake_gt4_info = {
 	GEN(9), \
 	.is_lp = 1, \
 	.display.has_hotplug = 1, \
-	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \
+	.engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \
 	.num_pipes = 3, \
 	.has_64bit_reloc = 1, \
 	.display.has_ddi = 1, \
@@ -552,15 +621,16 @@ static const struct intel_device_info intel_skylake_gt4_info = {
 	.has_logical_ring_contexts = 1, \
 	.has_logical_ring_preemption = 1, \
 	.has_guc = 1, \
-	.ppgtt = INTEL_PPGTT_FULL_4LVL, \
+	.ppgtt_type = INTEL_PPGTT_FULL, \
+	.ppgtt_size = 48, \
 	.has_reset_engine = 1, \
 	.has_snoop = true, \
 	.has_coherent_ggtt = false, \
 	.display.has_ipc = 1, \
-	GEN9_DEFAULT_PAGE_SIZES, \
-	GEN_DEFAULT_PIPEOFFSETS, \
+	HSW_PIPE_OFFSETS, \
 	IVB_CURSOR_OFFSETS, \
-	BDW_COLORS
+	BDW_COLORS, \
+	GEN9_DEFAULT_PAGE_SIZES
 
 static const struct intel_device_info intel_broxton_info = {
 	GEN9_LP_FEATURES,
@@ -592,7 +662,8 @@ static const struct intel_device_info intel_kabylake_gt2_info = {
 static const struct intel_device_info intel_kabylake_gt3_info = {
 	KBL_PLATFORM,
 	.gt = 3,
-	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
+	.engine_mask =
+		BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1),
 };
 
 #define CFL_PLATFORM \
@@ -612,7 +683,8 @@ static const struct intel_device_info intel_coffeelake_gt2_info = {
 static const struct intel_device_info intel_coffeelake_gt3_info = {
 	CFL_PLATFORM,
 	.gt = 3,
-	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
+	.engine_mask =
+		BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1),
 };
 
 #define GEN10_FEATURES \
@@ -648,13 +720,14 @@ static const struct intel_device_info intel_cannonlake_info = {
 	}, \
 	GEN(11), \
 	.ddb_size = 2048, \
-	.has_logical_ring_elsq = 1
+	.has_logical_ring_elsq = 1, \
+	.color = { .degamma_lut_size = 33, .gamma_lut_size = 1024 }
 
 static const struct intel_device_info intel_icelake_11_info = {
 	GEN11_FEATURES,
 	PLATFORM(INTEL_ICELAKE),
-	.is_alpha_support = 1,
-	.ring_mask = RENDER_RING | BLT_RING | VEBOX_RING | BSD_RING | BSD3_RING,
+	.engine_mask =
+		BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2),
 };
 
 #undef GEN
@@ -722,6 +795,8 @@ static const struct pci_device_id pciidlist[] = {
 	INTEL_WHL_U_GT2_IDS(&intel_coffeelake_gt2_info),
 	INTEL_AML_CFL_GT2_IDS(&intel_coffeelake_gt2_info),
 	INTEL_WHL_U_GT3_IDS(&intel_coffeelake_gt3_info),
+	INTEL_CML_GT1_IDS(&intel_coffeelake_gt1_info),
+	INTEL_CML_GT2_IDS(&intel_coffeelake_gt2_info),
 	INTEL_CNL_IDS(&intel_cannonlake_info),
 	INTEL_ICL_11_IDS(&intel_icelake_11_info),
 	{0, 0, 0}
@@ -801,7 +876,9 @@ static int __init i915_init(void)
 	bool use_kms = true;
 	int err;
 
-	i915_global_active_init();
+	err = i915_globals_init();
+	if (err)
+		return err;
 
 	err = i915_mock_selftests();
 	if (err)
@@ -834,7 +911,7 @@ static void __exit i915_exit(void)
 		return;
 
 	pci_unregister_driver(&i915_pci_driver);
-	i915_global_active_exit();
+	i915_globals_exit();
 }
 
 module_init(i915_init);
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 9ebf99f3d8d3ee524443f3cea90a1ab56e48fe3c..9b0292a38865fd9a76faf7e61924021628e183c8 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -1202,7 +1202,7 @@ static int i915_oa_read(struct i915_perf_stream *stream,
 static struct intel_context *oa_pin_context(struct drm_i915_private *i915,
 					    struct i915_gem_context *ctx)
 {
-	struct intel_engine_cs *engine = i915->engine[RCS];
+	struct intel_engine_cs *engine = i915->engine[RCS0];
 	struct intel_context *ce;
 	int ret;
 
@@ -1629,13 +1629,14 @@ static void hsw_disable_metric_set(struct drm_i915_private *dev_priv)
  * It's fine to put out-of-date values into these per-context registers
  * in the case that the OA unit has been disabled.
  */
-static void gen8_update_reg_state_unlocked(struct i915_gem_context *ctx,
-					   u32 *reg_state,
-					   const struct i915_oa_config *oa_config)
+static void
+gen8_update_reg_state_unlocked(struct intel_context *ce,
+			       u32 *reg_state,
+			       const struct i915_oa_config *oa_config)
 {
-	struct drm_i915_private *dev_priv = ctx->i915;
-	u32 ctx_oactxctrl = dev_priv->perf.oa.ctx_oactxctrl_offset;
-	u32 ctx_flexeu0 = dev_priv->perf.oa.ctx_flexeu0_offset;
+	struct drm_i915_private *i915 = ce->gem_context->i915;
+	u32 ctx_oactxctrl = i915->perf.oa.ctx_oactxctrl_offset;
+	u32 ctx_flexeu0 = i915->perf.oa.ctx_flexeu0_offset;
 	/* The MMIO offsets for Flex EU registers aren't contiguous */
 	i915_reg_t flex_regs[] = {
 		EU_PERF_CNTL0,
@@ -1649,8 +1650,8 @@ static void gen8_update_reg_state_unlocked(struct i915_gem_context *ctx,
 	int i;
 
 	CTX_REG(reg_state, ctx_oactxctrl, GEN8_OACTXCONTROL,
-		(dev_priv->perf.oa.period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
-		(dev_priv->perf.oa.periodic ? GEN8_OA_TIMER_ENABLE : 0) |
+		(i915->perf.oa.period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
+		(i915->perf.oa.periodic ? GEN8_OA_TIMER_ENABLE : 0) |
 		GEN8_OA_COUNTER_RESUME);
 
 	for (i = 0; i < ARRAY_SIZE(flex_regs); i++) {
@@ -1678,10 +1679,9 @@ static void gen8_update_reg_state_unlocked(struct i915_gem_context *ctx,
 		CTX_REG(reg_state, state_offset, flex_regs[i], value);
 	}
 
-	CTX_REG(reg_state, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
-		gen8_make_rpcs(dev_priv,
-			       &to_intel_context(ctx,
-						 dev_priv->engine[RCS])->sseu));
+	CTX_REG(reg_state,
+		CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
+		gen8_make_rpcs(i915, &ce->sseu));
 }
 
 /*
@@ -1711,7 +1711,7 @@ static void gen8_update_reg_state_unlocked(struct i915_gem_context *ctx,
 static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
 				       const struct i915_oa_config *oa_config)
 {
-	struct intel_engine_cs *engine = dev_priv->engine[RCS];
+	struct intel_engine_cs *engine = dev_priv->engine[RCS0];
 	unsigned int map_type = i915_coherent_map_type(dev_priv);
 	struct i915_gem_context *ctx;
 	struct i915_request *rq;
@@ -1740,11 +1740,11 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
 
 	/* Update all contexts now that we've stalled the submission. */
 	list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
-		struct intel_context *ce = to_intel_context(ctx, engine);
+		struct intel_context *ce = intel_context_lookup(ctx, engine);
 		u32 *regs;
 
 		/* OA settings will be set upon first use */
-		if (!ce->state)
+		if (!ce || !ce->state)
 			continue;
 
 		regs = i915_gem_object_pin_map(ce->state->obj, map_type);
@@ -1754,7 +1754,7 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
 		ce->state->obj->mm.dirty = true;
 		regs += LRC_STATE_PN * PAGE_SIZE / sizeof(*regs);
 
-		gen8_update_reg_state_unlocked(ctx, regs, oa_config);
+		gen8_update_reg_state_unlocked(ce, regs, oa_config);
 
 		i915_gem_object_unpin_map(ce->state->obj);
 	}
@@ -2138,17 +2138,17 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
 }
 
 void i915_oa_init_reg_state(struct intel_engine_cs *engine,
-			    struct i915_gem_context *ctx,
-			    u32 *reg_state)
+			    struct intel_context *ce,
+			    u32 *regs)
 {
 	struct i915_perf_stream *stream;
 
-	if (engine->id != RCS)
+	if (engine->class != RENDER_CLASS)
 		return;
 
 	stream = engine->i915->perf.oa.exclusive_stream;
 	if (stream)
-		gen8_update_reg_state_unlocked(ctx, reg_state, stream->oa_config);
+		gen8_update_reg_state_unlocked(ce, regs, stream->oa_config);
 }
 
 /**
@@ -2881,12 +2881,24 @@ void i915_perf_register(struct drm_i915_private *dev_priv)
 
 	sysfs_attr_init(&dev_priv->perf.oa.test_config.sysfs_metric_id.attr);
 
-	if (IS_HASWELL(dev_priv)) {
-		i915_perf_load_test_config_hsw(dev_priv);
-	} else if (IS_BROADWELL(dev_priv)) {
-		i915_perf_load_test_config_bdw(dev_priv);
-	} else if (IS_CHERRYVIEW(dev_priv)) {
-		i915_perf_load_test_config_chv(dev_priv);
+	if (INTEL_GEN(dev_priv) >= 11) {
+		i915_perf_load_test_config_icl(dev_priv);
+	} else if (IS_CANNONLAKE(dev_priv)) {
+		i915_perf_load_test_config_cnl(dev_priv);
+	} else if (IS_COFFEELAKE(dev_priv)) {
+		if (IS_CFL_GT2(dev_priv))
+			i915_perf_load_test_config_cflgt2(dev_priv);
+		if (IS_CFL_GT3(dev_priv))
+			i915_perf_load_test_config_cflgt3(dev_priv);
+	} else if (IS_GEMINILAKE(dev_priv)) {
+		i915_perf_load_test_config_glk(dev_priv);
+	} else if (IS_KABYLAKE(dev_priv)) {
+		if (IS_KBL_GT2(dev_priv))
+			i915_perf_load_test_config_kblgt2(dev_priv);
+		else if (IS_KBL_GT3(dev_priv))
+			i915_perf_load_test_config_kblgt3(dev_priv);
+	} else if (IS_BROXTON(dev_priv)) {
+		i915_perf_load_test_config_bxt(dev_priv);
 	} else if (IS_SKYLAKE(dev_priv)) {
 		if (IS_SKL_GT2(dev_priv))
 			i915_perf_load_test_config_sklgt2(dev_priv);
@@ -2894,25 +2906,13 @@ void i915_perf_register(struct drm_i915_private *dev_priv)
 			i915_perf_load_test_config_sklgt3(dev_priv);
 		else if (IS_SKL_GT4(dev_priv))
 			i915_perf_load_test_config_sklgt4(dev_priv);
-	} else if (IS_BROXTON(dev_priv)) {
-		i915_perf_load_test_config_bxt(dev_priv);
-	} else if (IS_KABYLAKE(dev_priv)) {
-		if (IS_KBL_GT2(dev_priv))
-			i915_perf_load_test_config_kblgt2(dev_priv);
-		else if (IS_KBL_GT3(dev_priv))
-			i915_perf_load_test_config_kblgt3(dev_priv);
-	} else if (IS_GEMINILAKE(dev_priv)) {
-		i915_perf_load_test_config_glk(dev_priv);
-	} else if (IS_COFFEELAKE(dev_priv)) {
-		if (IS_CFL_GT2(dev_priv))
-			i915_perf_load_test_config_cflgt2(dev_priv);
-		if (IS_CFL_GT3(dev_priv))
-			i915_perf_load_test_config_cflgt3(dev_priv);
-	} else if (IS_CANNONLAKE(dev_priv)) {
-		i915_perf_load_test_config_cnl(dev_priv);
-	} else if (IS_ICELAKE(dev_priv)) {
-		i915_perf_load_test_config_icl(dev_priv);
-	}
+	} else if (IS_CHERRYVIEW(dev_priv)) {
+		i915_perf_load_test_config_chv(dev_priv);
+	} else if (IS_BROADWELL(dev_priv)) {
+		i915_perf_load_test_config_bdw(dev_priv);
+	} else if (IS_HASWELL(dev_priv)) {
+		i915_perf_load_test_config_hsw(dev_priv);
+}
 
 	if (dev_priv->perf.oa.test_config.id == 0)
 		goto sysfs_error;
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index b745c49a5af620e61adbfe05585f2f8ad7b7ad03..46a52da3db297bec061d874858b8fbb440dc4654 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -102,7 +102,7 @@ static bool pmu_needs_timer(struct drm_i915_private *i915, bool gpu_active)
 	 *
 	 * Use RCS as proxy for all engines.
 	 */
-	else if (intel_engine_supports_stats(i915->engine[RCS]))
+	else if (intel_engine_supports_stats(i915->engine[RCS0]))
 		enable &= ~BIT(I915_SAMPLE_BUSY);
 
 	/*
@@ -149,14 +149,6 @@ void i915_pmu_gt_unparked(struct drm_i915_private *i915)
 	spin_unlock_irq(&i915->pmu.lock);
 }
 
-static bool grab_forcewake(struct drm_i915_private *i915, bool fw)
-{
-	if (!fw)
-		intel_uncore_forcewake_get(i915, FORCEWAKE_ALL);
-
-	return true;
-}
-
 static void
 add_sample(struct i915_pmu_sample *sample, u32 val)
 {
@@ -169,49 +161,48 @@ engines_sample(struct drm_i915_private *dev_priv, unsigned int period_ns)
 	struct intel_engine_cs *engine;
 	enum intel_engine_id id;
 	intel_wakeref_t wakeref;
-	bool fw = false;
+	unsigned long flags;
 
 	if ((dev_priv->pmu.enable & ENGINE_SAMPLE_MASK) == 0)
 		return;
 
-	if (!dev_priv->gt.awake)
-		return;
-
-	wakeref = intel_runtime_pm_get_if_in_use(dev_priv);
+	wakeref = 0;
+	if (READ_ONCE(dev_priv->gt.awake))
+		wakeref = intel_runtime_pm_get_if_in_use(dev_priv);
 	if (!wakeref)
 		return;
 
+	spin_lock_irqsave(&dev_priv->uncore.lock, flags);
 	for_each_engine(engine, dev_priv, id) {
-		u32 current_seqno = intel_engine_get_seqno(engine);
-		u32 last_seqno = intel_engine_last_submit(engine);
+		struct intel_engine_pmu *pmu = &engine->pmu;
+		bool busy;
 		u32 val;
 
-		val = !i915_seqno_passed(current_seqno, last_seqno);
-
-		if (val)
-			add_sample(&engine->pmu.sample[I915_SAMPLE_BUSY],
-				   period_ns);
-
-		if (val && (engine->pmu.enable &
-		    (BIT(I915_SAMPLE_WAIT) | BIT(I915_SAMPLE_SEMA)))) {
-			fw = grab_forcewake(dev_priv, fw);
-
-			val = I915_READ_FW(RING_CTL(engine->mmio_base));
-		} else {
-			val = 0;
-		}
+		val = I915_READ_FW(RING_CTL(engine->mmio_base));
+		if (val == 0) /* powerwell off => engine idle */
+			continue;
 
 		if (val & RING_WAIT)
-			add_sample(&engine->pmu.sample[I915_SAMPLE_WAIT],
-				   period_ns);
-
+			add_sample(&pmu->sample[I915_SAMPLE_WAIT], period_ns);
 		if (val & RING_WAIT_SEMAPHORE)
-			add_sample(&engine->pmu.sample[I915_SAMPLE_SEMA],
-				   period_ns);
-	}
+			add_sample(&pmu->sample[I915_SAMPLE_SEMA], period_ns);
 
-	if (fw)
-		intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+		/*
+		 * While waiting on a semaphore or event, MI_MODE reports the
+		 * ring as idle. However, previously using the seqno, and with
+		 * execlists sampling, we account for the ring waiting as the
+		 * engine being busy. Therefore, we record the sample as being
+		 * busy if either waiting or !idle.
+		 */
+		busy = val & (RING_WAIT_SEMAPHORE | RING_WAIT);
+		if (!busy) {
+			val = I915_READ_FW(RING_MI_MODE(engine->mmio_base));
+			busy = !(val & MODE_IDLE);
+		}
+		if (busy)
+			add_sample(&pmu->sample[I915_SAMPLE_BUSY], period_ns);
+	}
+	spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
 
 	intel_runtime_pm_put(dev_priv, wakeref);
 }
diff --git a/drivers/gpu/drm/i915/i915_pvinfo.h b/drivers/gpu/drm/i915/i915_pvinfo.h
index eeaa3d506d95dc357ec5f317252c7d4bd11a3ed7..969e514916abec2b5a272e459550e6cd120a0558 100644
--- a/drivers/gpu/drm/i915/i915_pvinfo.h
+++ b/drivers/gpu/drm/i915/i915_pvinfo.h
@@ -52,7 +52,7 @@ enum vgt_g2v_type {
 /*
  * VGT capabilities type
  */
-#define VGT_CAPS_FULL_48BIT_PPGTT	BIT(2)
+#define VGT_CAPS_FULL_PPGTT		BIT(2)
 #define VGT_CAPS_HWSP_EMULATION		BIT(3)
 #define VGT_CAPS_HUGE_GTT		BIT(4)
 
diff --git a/drivers/gpu/drm/i915/i915_query.c b/drivers/gpu/drm/i915/i915_query.c
index cbcb957b7141d768541853129cdc029d5f804029..782183b78f49c3ae727ae4dbc8ad75392d229583 100644
--- a/drivers/gpu/drm/i915/i915_query.c
+++ b/drivers/gpu/drm/i915/i915_query.c
@@ -10,12 +10,34 @@
 #include "i915_query.h"
 #include <uapi/drm/i915_drm.h>
 
+static int copy_query_item(void *query_hdr, size_t query_sz,
+			   u32 total_length,
+			   struct drm_i915_query_item *query_item)
+{
+	if (query_item->length == 0)
+		return total_length;
+
+	if (query_item->length < total_length)
+		return -EINVAL;
+
+	if (copy_from_user(query_hdr, u64_to_user_ptr(query_item->data_ptr),
+			   query_sz))
+		return -EFAULT;
+
+	if (!access_ok(u64_to_user_ptr(query_item->data_ptr),
+		       total_length))
+		return -EFAULT;
+
+	return 0;
+}
+
 static int query_topology_info(struct drm_i915_private *dev_priv,
 			       struct drm_i915_query_item *query_item)
 {
 	const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
 	struct drm_i915_query_topology_info topo;
 	u32 slice_length, subslice_length, eu_length, total_length;
+	int ret;
 
 	if (query_item->flags != 0)
 		return -EINVAL;
@@ -33,23 +55,14 @@ static int query_topology_info(struct drm_i915_private *dev_priv,
 
 	total_length = sizeof(topo) + slice_length + subslice_length + eu_length;
 
-	if (query_item->length == 0)
-		return total_length;
-
-	if (query_item->length < total_length)
-		return -EINVAL;
-
-	if (copy_from_user(&topo, u64_to_user_ptr(query_item->data_ptr),
-			   sizeof(topo)))
-		return -EFAULT;
+	ret = copy_query_item(&topo, sizeof(topo), total_length,
+			      query_item);
+	if (ret != 0)
+		return ret;
 
 	if (topo.flags != 0)
 		return -EINVAL;
 
-	if (!access_ok(u64_to_user_ptr(query_item->data_ptr),
-		       total_length))
-		return -EFAULT;
-
 	memset(&topo, 0, sizeof(topo));
 	topo.max_slices = sseu->max_slices;
 	topo.max_subslices = sseu->max_subslices;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index cf80c5d8af34295f7b8bca64eef8611e3340f1b3..85e8d1a1f70b26fe8b4a636cc1f8e8e96115f261 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -25,6 +25,9 @@
 #ifndef _I915_REG_H_
 #define _I915_REG_H_
 
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+
 /**
  * DOC: The i915 register macro definition style guide
  *
@@ -59,15 +62,13 @@
  * significant to least significant bit. Indent the register content macros
  * using two extra spaces between ``#define`` and the macro name.
  *
- * For bit fields, define a ``_MASK`` and a ``_SHIFT`` macro. Define bit field
- * contents so that they are already shifted in place, and can be directly
- * OR'd. For convenience, function-like macros may be used to define bit fields,
- * but do note that the macros may be needed to read as well as write the
- * register contents.
+ * Define bit fields using ``REG_GENMASK(h, l)``. Define bit field contents
+ * using ``REG_FIELD_PREP(mask, value)``. This will define the values already
+ * shifted in place, so they can be directly OR'd together. For convenience,
+ * function-like macros may be used to define bit fields, but do note that the
+ * macros may be needed to read as well as write the register contents.
  *
- * Define bits using ``(1 << N)`` instead of ``BIT(N)``. We may change this in
- * the future, but this is the prevailing style. Do **not** add ``_BIT`` suffix
- * to the name.
+ * Define bits using ``REG_BIT(N)``. Do **not** add ``_BIT`` suffix to the name.
  *
  * Group the register and its contents together without blank lines, separate
  * from other registers and their contents with one blank line.
@@ -105,17 +106,78 @@
  *  #define _FOO_A                      0xf000
  *  #define _FOO_B                      0xf001
  *  #define FOO(pipe)                   _MMIO_PIPE(pipe, _FOO_A, _FOO_B)
- *  #define   FOO_ENABLE                (1 << 31)
- *  #define   FOO_MODE_MASK             (0xf << 16)
- *  #define   FOO_MODE_SHIFT            16
- *  #define   FOO_MODE_BAR              (0 << 16)
- *  #define   FOO_MODE_BAZ              (1 << 16)
- *  #define   FOO_MODE_QUX_SNB          (2 << 16)
+ *  #define   FOO_ENABLE                REG_BIT(31)
+ *  #define   FOO_MODE_MASK             REG_GENMASK(19, 16)
+ *  #define   FOO_MODE_BAR              REG_FIELD_PREP(FOO_MODE_MASK, 0)
+ *  #define   FOO_MODE_BAZ              REG_FIELD_PREP(FOO_MODE_MASK, 1)
+ *  #define   FOO_MODE_QUX_SNB          REG_FIELD_PREP(FOO_MODE_MASK, 2)
  *
  *  #define BAR                         _MMIO(0xb000)
  *  #define GEN8_BAR                    _MMIO(0xb888)
  */
 
+/**
+ * REG_BIT() - Prepare a u32 bit value
+ * @__n: 0-based bit number
+ *
+ * Local wrapper for BIT() to force u32, with compile time checks.
+ *
+ * @return: Value with bit @__n set.
+ */
+#define REG_BIT(__n)							\
+	((u32)(BIT(__n) +						\
+	       BUILD_BUG_ON_ZERO(__builtin_constant_p(__n) &&		\
+				 ((__n) < 0 || (__n) > 31))))
+
+/**
+ * REG_GENMASK() - Prepare a continuous u32 bitmask
+ * @__high: 0-based high bit
+ * @__low: 0-based low bit
+ *
+ * Local wrapper for GENMASK() to force u32, with compile time checks.
+ *
+ * @return: Continuous bitmask from @__high to @__low, inclusive.
+ */
+#define REG_GENMASK(__high, __low)					\
+	((u32)(GENMASK(__high, __low) +					\
+	       BUILD_BUG_ON_ZERO(__builtin_constant_p(__high) &&	\
+				 __builtin_constant_p(__low) &&		\
+				 ((__low) < 0 || (__high) > 31 || (__low) > (__high)))))
+
+/*
+ * Local integer constant expression version of is_power_of_2().
+ */
+#define IS_POWER_OF_2(__x)		((__x) && (((__x) & ((__x) - 1)) == 0))
+
+/**
+ * REG_FIELD_PREP() - Prepare a u32 bitfield value
+ * @__mask: shifted mask defining the field's length and position
+ * @__val: value to put in the field
+
+ * Local copy of FIELD_PREP() to generate an integer constant expression, force
+ * u32 and for consistency with REG_FIELD_GET(), REG_BIT() and REG_GENMASK().
+ *
+ * @return: @__val masked and shifted into the field defined by @__mask.
+ */
+#define REG_FIELD_PREP(__mask, __val)						\
+	((u32)((((typeof(__mask))(__val) << __bf_shf(__mask)) & (__mask)) +	\
+	       BUILD_BUG_ON_ZERO(!__builtin_constant_p(__mask)) +		\
+	       BUILD_BUG_ON_ZERO((__mask) == 0 || (__mask) > U32_MAX) +		\
+	       BUILD_BUG_ON_ZERO(!IS_POWER_OF_2((__mask) + (1ULL << __bf_shf(__mask)))) + \
+	       BUILD_BUG_ON_ZERO(__builtin_choose_expr(__builtin_constant_p(__val), (~((__mask) >> __bf_shf(__mask)) & (__val)), 0))))
+
+/**
+ * REG_FIELD_GET() - Extract a u32 bitfield value
+ * @__mask: shifted mask defining the field's length and position
+ * @__val: value to extract the bitfield value from
+ *
+ * Local wrapper for FIELD_GET() to force u32 and for consistency with
+ * REG_FIELD_PREP(), REG_BIT() and REG_GENMASK().
+ *
+ * @return: Masked and shifted value of the field defined by @__mask in @__val.
+ */
+#define REG_FIELD_GET(__mask, __val)	((u32)FIELD_GET(__mask, __val))
+
 typedef struct {
 	u32 reg;
 } i915_reg_t;
@@ -210,14 +272,14 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 
 /* Engine ID */
 
-#define RCS_HW		0
-#define VCS_HW		1
-#define BCS_HW		2
-#define VECS_HW		3
-#define VCS2_HW		4
-#define VCS3_HW		6
-#define VCS4_HW		7
-#define VECS2_HW	12
+#define RCS0_HW		0
+#define VCS0_HW		1
+#define BCS0_HW		2
+#define VECS0_HW	3
+#define VCS1_HW		4
+#define VCS2_HW		6
+#define VCS3_HW		7
+#define VECS1_HW	12
 
 /* Engine class */
 
@@ -1044,7 +1106,32 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 /* See configdb bunit SB addr map */
 #define BUNIT_REG_BISOC				0x11
 
-#define PUNIT_REG_DSPFREQ			0x36
+/* PUNIT_REG_*SSPM0 */
+#define   _SSPM0_SSC(val)			((val) << 0)
+#define   SSPM0_SSC_MASK			_SSPM0_SSC(0x3)
+#define   SSPM0_SSC_PWR_ON			_SSPM0_SSC(0x0)
+#define   SSPM0_SSC_CLK_GATE			_SSPM0_SSC(0x1)
+#define   SSPM0_SSC_RESET			_SSPM0_SSC(0x2)
+#define   SSPM0_SSC_PWR_GATE			_SSPM0_SSC(0x3)
+#define   _SSPM0_SSS(val)			((val) << 24)
+#define   SSPM0_SSS_MASK			_SSPM0_SSS(0x3)
+#define   SSPM0_SSS_PWR_ON			_SSPM0_SSS(0x0)
+#define   SSPM0_SSS_CLK_GATE			_SSPM0_SSS(0x1)
+#define   SSPM0_SSS_RESET			_SSPM0_SSS(0x2)
+#define   SSPM0_SSS_PWR_GATE			_SSPM0_SSS(0x3)
+
+/* PUNIT_REG_*SSPM1 */
+#define   SSPM1_FREQSTAT_SHIFT			24
+#define   SSPM1_FREQSTAT_MASK			(0x1f << SSPM1_FREQSTAT_SHIFT)
+#define   SSPM1_FREQGUAR_SHIFT			8
+#define   SSPM1_FREQGUAR_MASK			(0x1f << SSPM1_FREQGUAR_SHIFT)
+#define   SSPM1_FREQ_SHIFT			0
+#define   SSPM1_FREQ_MASK			(0x1f << SSPM1_FREQ_SHIFT)
+
+#define PUNIT_REG_VEDSSPM0			0x32
+#define PUNIT_REG_VEDSSPM1			0x33
+
+#define PUNIT_REG_DSPSSPM			0x36
 #define   DSPFREQSTAT_SHIFT_CHV			24
 #define   DSPFREQSTAT_MASK_CHV			(0x1f << DSPFREQSTAT_SHIFT_CHV)
 #define   DSPFREQGUAR_SHIFT_CHV			8
@@ -1069,6 +1156,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 #define   DP_SSS_RESET(pipe)			_DP_SSS(0x2, (pipe))
 #define   DP_SSS_PWR_GATE(pipe)			_DP_SSS(0x3, (pipe))
 
+#define PUNIT_REG_ISPSSPM0			0x39
+#define PUNIT_REG_ISPSSPM1			0x3a
+
 /*
  * i915_power_well_id:
  *
@@ -1860,13 +1950,13 @@ enum i915_power_well_id {
 #define _CNL_PORT_TX_DW4_LN1_AE		0x1624D0
 #define CNL_PORT_TX_DW4_GRP(port)	_MMIO(_CNL_PORT_TX_DW_GRP(4, (port)))
 #define CNL_PORT_TX_DW4_LN0(port)	_MMIO(_CNL_PORT_TX_DW_LN0(4, (port)))
-#define CNL_PORT_TX_DW4_LN(port, ln)   _MMIO(_CNL_PORT_TX_DW_LN0(4, (port)) + \
+#define CNL_PORT_TX_DW4_LN(ln, port)   _MMIO(_CNL_PORT_TX_DW_LN0(4, (port)) + \
 					   ((ln) * (_CNL_PORT_TX_DW4_LN1_AE - \
 						    _CNL_PORT_TX_DW4_LN0_AE)))
 #define ICL_PORT_TX_DW4_AUX(port)	_MMIO(_ICL_PORT_TX_DW_AUX(4, port))
 #define ICL_PORT_TX_DW4_GRP(port)	_MMIO(_ICL_PORT_TX_DW_GRP(4, port))
 #define ICL_PORT_TX_DW4_LN0(port)	_MMIO(_ICL_PORT_TX_DW_LN(4, 0, port))
-#define ICL_PORT_TX_DW4_LN(port, ln)	_MMIO(_ICL_PORT_TX_DW_LN(4, ln, port))
+#define ICL_PORT_TX_DW4_LN(ln, port)	_MMIO(_ICL_PORT_TX_DW_LN(4, ln, port))
 #define   LOADGEN_SELECT		(1 << 31)
 #define   POST_CURSOR_1(x)		((x) << 12)
 #define   POST_CURSOR_1_MASK		(0x3F << 12)
@@ -1893,11 +1983,11 @@ enum i915_power_well_id {
 #define ICL_PORT_TX_DW7_AUX(port)	_MMIO(_ICL_PORT_TX_DW_AUX(7, port))
 #define ICL_PORT_TX_DW7_GRP(port)	_MMIO(_ICL_PORT_TX_DW_GRP(7, port))
 #define ICL_PORT_TX_DW7_LN0(port)	_MMIO(_ICL_PORT_TX_DW_LN(7, 0, port))
-#define ICL_PORT_TX_DW7_LN(port, ln)	_MMIO(_ICL_PORT_TX_DW_LN(7, ln, port))
+#define ICL_PORT_TX_DW7_LN(ln, port)	_MMIO(_ICL_PORT_TX_DW_LN(7, ln, port))
 #define   N_SCALAR(x)			((x) << 24)
 #define   N_SCALAR_MASK			(0x7F << 24)
 
-#define MG_PHY_PORT_LN(port, ln, ln0p1, ln0p2, ln1p1) \
+#define MG_PHY_PORT_LN(ln, port, ln0p1, ln0p2, ln1p1) \
 	_MMIO(_PORT((port) - PORT_C, ln0p1, ln0p2) + (ln) * ((ln1p1) - (ln0p1)))
 
 #define MG_TX_LINK_PARAMS_TX1LN0_PORT1		0x16812C
@@ -1908,8 +1998,8 @@ enum i915_power_well_id {
 #define MG_TX_LINK_PARAMS_TX1LN1_PORT3		0x16A52C
 #define MG_TX_LINK_PARAMS_TX1LN0_PORT4		0x16B12C
 #define MG_TX_LINK_PARAMS_TX1LN1_PORT4		0x16B52C
-#define MG_TX1_LINK_PARAMS(port, ln) \
-	MG_PHY_PORT_LN(port, ln, MG_TX_LINK_PARAMS_TX1LN0_PORT1, \
+#define MG_TX1_LINK_PARAMS(ln, port) \
+	MG_PHY_PORT_LN(ln, port, MG_TX_LINK_PARAMS_TX1LN0_PORT1, \
 				 MG_TX_LINK_PARAMS_TX1LN0_PORT2, \
 				 MG_TX_LINK_PARAMS_TX1LN1_PORT1)
 
@@ -1921,8 +2011,8 @@ enum i915_power_well_id {
 #define MG_TX_LINK_PARAMS_TX2LN1_PORT3		0x16A4AC
 #define MG_TX_LINK_PARAMS_TX2LN0_PORT4		0x16B0AC
 #define MG_TX_LINK_PARAMS_TX2LN1_PORT4		0x16B4AC
-#define MG_TX2_LINK_PARAMS(port, ln) \
-	MG_PHY_PORT_LN(port, ln, MG_TX_LINK_PARAMS_TX2LN0_PORT1, \
+#define MG_TX2_LINK_PARAMS(ln, port) \
+	MG_PHY_PORT_LN(ln, port, MG_TX_LINK_PARAMS_TX2LN0_PORT1, \
 				 MG_TX_LINK_PARAMS_TX2LN0_PORT2, \
 				 MG_TX_LINK_PARAMS_TX2LN1_PORT1)
 #define   CRI_USE_FS32			(1 << 5)
@@ -1935,8 +2025,8 @@ enum i915_power_well_id {
 #define MG_TX_PISO_READLOAD_TX1LN1_PORT3		0x16A54C
 #define MG_TX_PISO_READLOAD_TX1LN0_PORT4		0x16B14C
 #define MG_TX_PISO_READLOAD_TX1LN1_PORT4		0x16B54C
-#define MG_TX1_PISO_READLOAD(port, ln) \
-	MG_PHY_PORT_LN(port, ln, MG_TX_PISO_READLOAD_TX1LN0_PORT1, \
+#define MG_TX1_PISO_READLOAD(ln, port) \
+	MG_PHY_PORT_LN(ln, port, MG_TX_PISO_READLOAD_TX1LN0_PORT1, \
 				 MG_TX_PISO_READLOAD_TX1LN0_PORT2, \
 				 MG_TX_PISO_READLOAD_TX1LN1_PORT1)
 
@@ -1948,8 +2038,8 @@ enum i915_power_well_id {
 #define MG_TX_PISO_READLOAD_TX2LN1_PORT3		0x16A4CC
 #define MG_TX_PISO_READLOAD_TX2LN0_PORT4		0x16B0CC
 #define MG_TX_PISO_READLOAD_TX2LN1_PORT4		0x16B4CC
-#define MG_TX2_PISO_READLOAD(port, ln) \
-	MG_PHY_PORT_LN(port, ln, MG_TX_PISO_READLOAD_TX2LN0_PORT1, \
+#define MG_TX2_PISO_READLOAD(ln, port) \
+	MG_PHY_PORT_LN(ln, port, MG_TX_PISO_READLOAD_TX2LN0_PORT1, \
 				 MG_TX_PISO_READLOAD_TX2LN0_PORT2, \
 				 MG_TX_PISO_READLOAD_TX2LN1_PORT1)
 #define   CRI_CALCINIT					(1 << 1)
@@ -1962,8 +2052,8 @@ enum i915_power_well_id {
 #define MG_TX_SWINGCTRL_TX1LN1_PORT3		0x16A548
 #define MG_TX_SWINGCTRL_TX1LN0_PORT4		0x16B148
 #define MG_TX_SWINGCTRL_TX1LN1_PORT4		0x16B548
-#define MG_TX1_SWINGCTRL(port, ln) \
-	MG_PHY_PORT_LN(port, ln, MG_TX_SWINGCTRL_TX1LN0_PORT1, \
+#define MG_TX1_SWINGCTRL(ln, port) \
+	MG_PHY_PORT_LN(ln, port, MG_TX_SWINGCTRL_TX1LN0_PORT1, \
 				 MG_TX_SWINGCTRL_TX1LN0_PORT2, \
 				 MG_TX_SWINGCTRL_TX1LN1_PORT1)
 
@@ -1975,8 +2065,8 @@ enum i915_power_well_id {
 #define MG_TX_SWINGCTRL_TX2LN1_PORT3		0x16A4C8
 #define MG_TX_SWINGCTRL_TX2LN0_PORT4		0x16B0C8
 #define MG_TX_SWINGCTRL_TX2LN1_PORT4		0x16B4C8
-#define MG_TX2_SWINGCTRL(port, ln) \
-	MG_PHY_PORT_LN(port, ln, MG_TX_SWINGCTRL_TX2LN0_PORT1, \
+#define MG_TX2_SWINGCTRL(ln, port) \
+	MG_PHY_PORT_LN(ln, port, MG_TX_SWINGCTRL_TX2LN0_PORT1, \
 				 MG_TX_SWINGCTRL_TX2LN0_PORT2, \
 				 MG_TX_SWINGCTRL_TX2LN1_PORT1)
 #define   CRI_TXDEEMPH_OVERRIDE_17_12(x)		((x) << 0)
@@ -1990,8 +2080,8 @@ enum i915_power_well_id {
 #define MG_TX_DRVCTRL_TX1LN1_TXPORT3			0x16A544
 #define MG_TX_DRVCTRL_TX1LN0_TXPORT4			0x16B144
 #define MG_TX_DRVCTRL_TX1LN1_TXPORT4			0x16B544
-#define MG_TX1_DRVCTRL(port, ln) \
-	MG_PHY_PORT_LN(port, ln, MG_TX_DRVCTRL_TX1LN0_TXPORT1, \
+#define MG_TX1_DRVCTRL(ln, port) \
+	MG_PHY_PORT_LN(ln, port, MG_TX_DRVCTRL_TX1LN0_TXPORT1, \
 				 MG_TX_DRVCTRL_TX1LN0_TXPORT2, \
 				 MG_TX_DRVCTRL_TX1LN1_TXPORT1)
 
@@ -2003,8 +2093,8 @@ enum i915_power_well_id {
 #define MG_TX_DRVCTRL_TX2LN1_PORT3			0x16A4C4
 #define MG_TX_DRVCTRL_TX2LN0_PORT4			0x16B0C4
 #define MG_TX_DRVCTRL_TX2LN1_PORT4			0x16B4C4
-#define MG_TX2_DRVCTRL(port, ln) \
-	MG_PHY_PORT_LN(port, ln, MG_TX_DRVCTRL_TX2LN0_PORT1, \
+#define MG_TX2_DRVCTRL(ln, port) \
+	MG_PHY_PORT_LN(ln, port, MG_TX_DRVCTRL_TX2LN0_PORT1, \
 				 MG_TX_DRVCTRL_TX2LN0_PORT2, \
 				 MG_TX_DRVCTRL_TX2LN1_PORT1)
 #define   CRI_TXDEEMPH_OVERRIDE_11_6(x)			((x) << 24)
@@ -2023,8 +2113,8 @@ enum i915_power_well_id {
 #define MG_CLKHUB_LN1_PORT3			0x16A79C
 #define MG_CLKHUB_LN0_PORT4			0x16B39C
 #define MG_CLKHUB_LN1_PORT4			0x16B79C
-#define MG_CLKHUB(port, ln) \
-	MG_PHY_PORT_LN(port, ln, MG_CLKHUB_LN0_PORT1, \
+#define MG_CLKHUB(ln, port) \
+	MG_PHY_PORT_LN(ln, port, MG_CLKHUB_LN0_PORT1, \
 				 MG_CLKHUB_LN0_PORT2, \
 				 MG_CLKHUB_LN1_PORT1)
 #define   CFG_LOW_RATE_LKREN_EN				(1 << 11)
@@ -2037,8 +2127,8 @@ enum i915_power_well_id {
 #define MG_TX_DCC_TX1LN1_PORT3			0x16A510
 #define MG_TX_DCC_TX1LN0_PORT4			0x16B110
 #define MG_TX_DCC_TX1LN1_PORT4			0x16B510
-#define MG_TX1_DCC(port, ln) \
-	MG_PHY_PORT_LN(port, ln, MG_TX_DCC_TX1LN0_PORT1, \
+#define MG_TX1_DCC(ln, port) \
+	MG_PHY_PORT_LN(ln, port, MG_TX_DCC_TX1LN0_PORT1, \
 				 MG_TX_DCC_TX1LN0_PORT2, \
 				 MG_TX_DCC_TX1LN1_PORT1)
 #define MG_TX_DCC_TX2LN0_PORT1			0x168090
@@ -2049,8 +2139,8 @@ enum i915_power_well_id {
 #define MG_TX_DCC_TX2LN1_PORT3			0x16A490
 #define MG_TX_DCC_TX2LN0_PORT4			0x16B090
 #define MG_TX_DCC_TX2LN1_PORT4			0x16B490
-#define MG_TX2_DCC(port, ln) \
-	MG_PHY_PORT_LN(port, ln, MG_TX_DCC_TX2LN0_PORT1, \
+#define MG_TX2_DCC(ln, port) \
+	MG_PHY_PORT_LN(ln, port, MG_TX_DCC_TX2LN0_PORT1, \
 				 MG_TX_DCC_TX2LN0_PORT2, \
 				 MG_TX_DCC_TX2LN1_PORT1)
 #define   CFG_AMI_CK_DIV_OVERRIDE_VAL(x)	((x) << 25)
@@ -2065,8 +2155,8 @@ enum i915_power_well_id {
 #define MG_DP_MODE_LN1_ACU_PORT3			0x16A7A0
 #define MG_DP_MODE_LN0_ACU_PORT4			0x16B3A0
 #define MG_DP_MODE_LN1_ACU_PORT4			0x16B7A0
-#define MG_DP_MODE(port, ln)	\
-	MG_PHY_PORT_LN(port, ln, MG_DP_MODE_LN0_ACU_PORT1, \
+#define MG_DP_MODE(ln, port)	\
+	MG_PHY_PORT_LN(ln, port, MG_DP_MODE_LN0_ACU_PORT1, \
 				 MG_DP_MODE_LN0_ACU_PORT2, \
 				 MG_DP_MODE_LN1_ACU_PORT1)
 #define   MG_DP_MODE_CFG_DP_X2_MODE			(1 << 7)
@@ -3989,6 +4079,15 @@ enum {
 /* Pipe A CRC regs */
 #define _PIPE_CRC_CTL_A			0x60050
 #define   PIPE_CRC_ENABLE		(1 << 31)
+/* skl+ source selection */
+#define   PIPE_CRC_SOURCE_PLANE_1_SKL	(0 << 28)
+#define   PIPE_CRC_SOURCE_PLANE_2_SKL	(2 << 28)
+#define   PIPE_CRC_SOURCE_DMUX_SKL	(4 << 28)
+#define   PIPE_CRC_SOURCE_PLANE_3_SKL	(6 << 28)
+#define   PIPE_CRC_SOURCE_PLANE_4_SKL	(7 << 28)
+#define   PIPE_CRC_SOURCE_PLANE_5_SKL	(5 << 28)
+#define   PIPE_CRC_SOURCE_PLANE_6_SKL	(3 << 28)
+#define   PIPE_CRC_SOURCE_PLANE_7_SKL	(1 << 28)
 /* ivb+ source selection */
 #define   PIPE_CRC_SOURCE_PRIMARY_IVB	(0 << 29)
 #define   PIPE_CRC_SOURCE_SPRITE_IVB	(1 << 29)
@@ -4168,6 +4267,7 @@ enum {
 #define   EDP_PSR_TP2_TP3_TIME_100us		(1 << 8)
 #define   EDP_PSR_TP2_TP3_TIME_2500us		(2 << 8)
 #define   EDP_PSR_TP2_TP3_TIME_0us		(3 << 8)
+#define   EDP_PSR_TP4_TIME_0US			(3 << 6) /* ICL+ */
 #define   EDP_PSR_TP1_TIME_500us		(0 << 4)
 #define   EDP_PSR_TP1_TIME_100us		(1 << 4)
 #define   EDP_PSR_TP1_TIME_2500us		(2 << 4)
@@ -4612,13 +4712,14 @@ enum {
 #define   VIDEO_DIP_ENABLE		(1 << 31)
 #define   VIDEO_DIP_PORT(port)		((port) << 29)
 #define   VIDEO_DIP_PORT_MASK		(3 << 29)
-#define   VIDEO_DIP_ENABLE_GCP		(1 << 25)
+#define   VIDEO_DIP_ENABLE_GCP		(1 << 25) /* ilk+ */
 #define   VIDEO_DIP_ENABLE_AVI		(1 << 21)
 #define   VIDEO_DIP_ENABLE_VENDOR	(2 << 21)
-#define   VIDEO_DIP_ENABLE_GAMUT	(4 << 21)
+#define   VIDEO_DIP_ENABLE_GAMUT	(4 << 21) /* ilk+ */
 #define   VIDEO_DIP_ENABLE_SPD		(8 << 21)
 #define   VIDEO_DIP_SELECT_AVI		(0 << 19)
 #define   VIDEO_DIP_SELECT_VENDOR	(1 << 19)
+#define   VIDEO_DIP_SELECT_GAMUT	(2 << 19)
 #define   VIDEO_DIP_SELECT_SPD		(3 << 19)
 #define   VIDEO_DIP_SELECT_MASK		(3 << 19)
 #define   VIDEO_DIP_FREQ_ONCE		(0 << 16)
@@ -4653,18 +4754,17 @@ enum {
 
 #define _PP_STATUS			0x61200
 #define PP_STATUS(pps_idx)		_MMIO_PPS(pps_idx, _PP_STATUS)
-#define   PP_ON				(1 << 31)
+#define   PP_ON				REG_BIT(31)
 
 #define _PP_CONTROL_1			0xc7204
 #define _PP_CONTROL_2			0xc7304
 #define ICP_PP_CONTROL(x)		_MMIO(((x) == 1) ? _PP_CONTROL_1 : \
 					      _PP_CONTROL_2)
-#define  POWER_CYCLE_DELAY_MASK	(0x1f << 4)
-#define  POWER_CYCLE_DELAY_SHIFT	4
-#define  VDD_OVERRIDE_FORCE		(1 << 3)
-#define  BACKLIGHT_ENABLE		(1 << 2)
-#define  PWR_DOWN_ON_RESET		(1 << 1)
-#define  PWR_STATE_TARGET		(1 << 0)
+#define  POWER_CYCLE_DELAY_MASK		REG_GENMASK(8, 4)
+#define  VDD_OVERRIDE_FORCE		REG_BIT(3)
+#define  BACKLIGHT_ENABLE		REG_BIT(2)
+#define  PWR_DOWN_ON_RESET		REG_BIT(1)
+#define  PWR_STATE_TARGET		REG_BIT(0)
 /*
  * Indicates that all dependencies of the panel are on:
  *
@@ -4672,62 +4772,53 @@ enum {
  * - pipe enabled
  * - LVDS/DVOB/DVOC on
  */
-#define   PP_READY			(1 << 30)
-#define   PP_SEQUENCE_NONE		(0 << 28)
-#define   PP_SEQUENCE_POWER_UP		(1 << 28)
-#define   PP_SEQUENCE_POWER_DOWN	(2 << 28)
-#define   PP_SEQUENCE_MASK		(3 << 28)
-#define   PP_SEQUENCE_SHIFT		28
-#define   PP_CYCLE_DELAY_ACTIVE		(1 << 27)
-#define   PP_SEQUENCE_STATE_MASK	0x0000000f
-#define   PP_SEQUENCE_STATE_OFF_IDLE	(0x0 << 0)
-#define   PP_SEQUENCE_STATE_OFF_S0_1	(0x1 << 0)
-#define   PP_SEQUENCE_STATE_OFF_S0_2	(0x2 << 0)
-#define   PP_SEQUENCE_STATE_OFF_S0_3	(0x3 << 0)
-#define   PP_SEQUENCE_STATE_ON_IDLE	(0x8 << 0)
-#define   PP_SEQUENCE_STATE_ON_S1_0	(0x9 << 0)
-#define   PP_SEQUENCE_STATE_ON_S1_2	(0xa << 0)
-#define   PP_SEQUENCE_STATE_ON_S1_3	(0xb << 0)
-#define   PP_SEQUENCE_STATE_RESET	(0xf << 0)
+#define   PP_READY			REG_BIT(30)
+#define   PP_SEQUENCE_MASK		REG_GENMASK(29, 28)
+#define   PP_SEQUENCE_NONE		REG_FIELD_PREP(PP_SEQUENCE_MASK, 0)
+#define   PP_SEQUENCE_POWER_UP		REG_FIELD_PREP(PP_SEQUENCE_MASK, 1)
+#define   PP_SEQUENCE_POWER_DOWN	REG_FIELD_PREP(PP_SEQUENCE_MASK, 2)
+#define   PP_CYCLE_DELAY_ACTIVE		REG_BIT(27)
+#define   PP_SEQUENCE_STATE_MASK	REG_GENMASK(3, 0)
+#define   PP_SEQUENCE_STATE_OFF_IDLE	REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0x0)
+#define   PP_SEQUENCE_STATE_OFF_S0_1	REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0x1)
+#define   PP_SEQUENCE_STATE_OFF_S0_2	REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0x2)
+#define   PP_SEQUENCE_STATE_OFF_S0_3	REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0x3)
+#define   PP_SEQUENCE_STATE_ON_IDLE	REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0x8)
+#define   PP_SEQUENCE_STATE_ON_S1_1	REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0x9)
+#define   PP_SEQUENCE_STATE_ON_S1_2	REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0xa)
+#define   PP_SEQUENCE_STATE_ON_S1_3	REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0xb)
+#define   PP_SEQUENCE_STATE_RESET	REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0xf)
 
 #define _PP_CONTROL			0x61204
 #define PP_CONTROL(pps_idx)		_MMIO_PPS(pps_idx, _PP_CONTROL)
-#define  PANEL_UNLOCK_REGS		(0xabcd << 16)
-#define  PANEL_UNLOCK_MASK		(0xffff << 16)
-#define  BXT_POWER_CYCLE_DELAY_MASK	0x1f0
-#define  BXT_POWER_CYCLE_DELAY_SHIFT	4
-#define  EDP_FORCE_VDD			(1 << 3)
-#define  EDP_BLC_ENABLE			(1 << 2)
-#define  PANEL_POWER_RESET		(1 << 1)
-#define  PANEL_POWER_ON			(1 << 0)
+#define  PANEL_UNLOCK_MASK		REG_GENMASK(31, 16)
+#define  PANEL_UNLOCK_REGS		REG_FIELD_PREP(PANEL_UNLOCK_MASK, 0xabcd)
+#define  BXT_POWER_CYCLE_DELAY_MASK	REG_GENMASK(8, 4)
+#define  EDP_FORCE_VDD			REG_BIT(3)
+#define  EDP_BLC_ENABLE			REG_BIT(2)
+#define  PANEL_POWER_RESET		REG_BIT(1)
+#define  PANEL_POWER_ON			REG_BIT(0)
 
 #define _PP_ON_DELAYS			0x61208
 #define PP_ON_DELAYS(pps_idx)		_MMIO_PPS(pps_idx, _PP_ON_DELAYS)
-#define  PANEL_PORT_SELECT_SHIFT	30
-#define  PANEL_PORT_SELECT_MASK		(3 << 30)
-#define  PANEL_PORT_SELECT_LVDS		(0 << 30)
-#define  PANEL_PORT_SELECT_DPA		(1 << 30)
-#define  PANEL_PORT_SELECT_DPC		(2 << 30)
-#define  PANEL_PORT_SELECT_DPD		(3 << 30)
-#define  PANEL_PORT_SELECT_VLV(port)	((port) << 30)
-#define  PANEL_POWER_UP_DELAY_MASK	0x1fff0000
-#define  PANEL_POWER_UP_DELAY_SHIFT	16
-#define  PANEL_LIGHT_ON_DELAY_MASK	0x1fff
-#define  PANEL_LIGHT_ON_DELAY_SHIFT	0
+#define  PANEL_PORT_SELECT_MASK		REG_GENMASK(31, 30)
+#define  PANEL_PORT_SELECT_LVDS		REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, 0)
+#define  PANEL_PORT_SELECT_DPA		REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, 1)
+#define  PANEL_PORT_SELECT_DPC		REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, 2)
+#define  PANEL_PORT_SELECT_DPD		REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, 3)
+#define  PANEL_PORT_SELECT_VLV(port)	REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, port)
+#define  PANEL_POWER_UP_DELAY_MASK	REG_GENMASK(28, 16)
+#define  PANEL_LIGHT_ON_DELAY_MASK	REG_GENMASK(12, 0)
 
 #define _PP_OFF_DELAYS			0x6120C
 #define PP_OFF_DELAYS(pps_idx)		_MMIO_PPS(pps_idx, _PP_OFF_DELAYS)
-#define  PANEL_POWER_DOWN_DELAY_MASK	0x1fff0000
-#define  PANEL_POWER_DOWN_DELAY_SHIFT	16
-#define  PANEL_LIGHT_OFF_DELAY_MASK	0x1fff
-#define  PANEL_LIGHT_OFF_DELAY_SHIFT	0
+#define  PANEL_POWER_DOWN_DELAY_MASK	REG_GENMASK(28, 16)
+#define  PANEL_LIGHT_OFF_DELAY_MASK	REG_GENMASK(12, 0)
 
 #define _PP_DIVISOR			0x61210
 #define PP_DIVISOR(pps_idx)		_MMIO_PPS(pps_idx, _PP_DIVISOR)
-#define  PP_REFERENCE_DIVIDER_MASK	0xffffff00
-#define  PP_REFERENCE_DIVIDER_SHIFT	8
-#define  PANEL_POWER_CYCLE_DELAY_MASK	0x1f
-#define  PANEL_POWER_CYCLE_DELAY_SHIFT	0
+#define  PP_REFERENCE_DIVIDER_MASK	REG_GENMASK(31, 8)
+#define  PANEL_POWER_CYCLE_DELAY_MASK	REG_GENMASK(4, 0)
 
 /* Panel fitting */
 #define PFIT_CONTROL	_MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61230)
@@ -5590,9 +5681,15 @@ enum {
 #define   PIPECONF_SINGLE_WIDE	0
 #define   PIPECONF_PIPE_UNLOCKED 0
 #define   PIPECONF_PIPE_LOCKED	(1 << 25)
-#define   PIPECONF_PALETTE	0
-#define   PIPECONF_GAMMA		(1 << 24)
 #define   PIPECONF_FORCE_BORDER	(1 << 25)
+#define   PIPECONF_GAMMA_MODE_MASK_I9XX	(1 << 24) /* gmch */
+#define   PIPECONF_GAMMA_MODE_MASK_ILK	(3 << 24) /* ilk-ivb */
+#define   PIPECONF_GAMMA_MODE_8BIT	(0 << 24) /* gmch,ilk-ivb */
+#define   PIPECONF_GAMMA_MODE_10BIT	(1 << 24) /* gmch,ilk-ivb */
+#define   PIPECONF_GAMMA_MODE_12BIT	(2 << 24) /* ilk-ivb */
+#define   PIPECONF_GAMMA_MODE_SPLIT	(3 << 24) /* ivb */
+#define   PIPECONF_GAMMA_MODE(x)	((x) << 24) /* pass in GAMMA_MODE_MODE_* */
+#define   PIPECONF_GAMMA_MODE_SHIFT	24
 #define   PIPECONF_INTERLACE_MASK	(7 << 21)
 #define   PIPECONF_INTERLACE_MASK_HSW	(3 << 21)
 /* Note that pre-gen3 does not support interlaced display directly. Panel
@@ -5998,6 +6095,7 @@ enum {
 #define _CUR_WM_TRANS_A_0	0x70168
 #define _CUR_WM_TRANS_B_0	0x71168
 #define   PLANE_WM_EN		(1 << 31)
+#define   PLANE_WM_IGNORE_LINES	(1 << 30)
 #define   PLANE_WM_LINES_SHIFT	14
 #define   PLANE_WM_LINES_MASK	0x1f
 #define   PLANE_WM_BLOCKS_MASK	0x7ff /* skl+: 10 bits, icl+ 11 bits */
@@ -6124,7 +6222,7 @@ enum {
 #define   MCURSOR_PIPE_SELECT_SHIFT	28
 #define   MCURSOR_PIPE_SELECT(pipe)	((pipe) << 28)
 #define   MCURSOR_GAMMA_ENABLE  (1 << 26)
-#define   MCURSOR_PIPE_CSC_ENABLE (1 << 24)
+#define   MCURSOR_PIPE_CSC_ENABLE (1 << 24) /* ilk+ */
 #define   MCURSOR_ROTATE_180	(1 << 15)
 #define   MCURSOR_TRICKLE_FEED_DISABLE	(1 << 14)
 #define _CURABASE		0x70084
@@ -6179,7 +6277,7 @@ enum {
 #define   DISPPLANE_RGBA888			(0xf << 26)
 #define   DISPPLANE_STEREO_ENABLE		(1 << 25)
 #define   DISPPLANE_STEREO_DISABLE		0
-#define   DISPPLANE_PIPE_CSC_ENABLE		(1 << 24)
+#define   DISPPLANE_PIPE_CSC_ENABLE		(1 << 24) /* ilk+ */
 #define   DISPPLANE_SEL_PIPE_SHIFT		24
 #define   DISPPLANE_SEL_PIPE_MASK		(3 << DISPPLANE_SEL_PIPE_SHIFT)
 #define   DISPPLANE_SEL_PIPE(pipe)		((pipe) << DISPPLANE_SEL_PIPE_SHIFT)
@@ -7114,11 +7212,12 @@ enum {
 #define _GAMMA_MODE_A		0x4a480
 #define _GAMMA_MODE_B		0x4ac80
 #define GAMMA_MODE(pipe) _MMIO_PIPE(pipe, _GAMMA_MODE_A, _GAMMA_MODE_B)
-#define GAMMA_MODE_MODE_MASK	(3 << 0)
-#define GAMMA_MODE_MODE_8BIT	(0 << 0)
-#define GAMMA_MODE_MODE_10BIT	(1 << 0)
-#define GAMMA_MODE_MODE_12BIT	(2 << 0)
-#define GAMMA_MODE_MODE_SPLIT	(3 << 0)
+#define  PRE_CSC_GAMMA_ENABLE	(1 << 31)
+#define  POST_CSC_GAMMA_ENABLE	(1 << 30)
+#define  GAMMA_MODE_MODE_8BIT	(0 << 0)
+#define  GAMMA_MODE_MODE_10BIT	(1 << 0)
+#define  GAMMA_MODE_MODE_12BIT	(2 << 0)
+#define  GAMMA_MODE_MODE_SPLIT	(3 << 0)
 
 /* DMC/CSR */
 #define CSR_PROGRAM(i)		_MMIO(0x80000 + (i) * 4)
@@ -7213,8 +7312,8 @@ enum {
 #define  GEN8_GT_VECS_IRQ		(1 << 6)
 #define  GEN8_GT_GUC_IRQ		(1 << 5)
 #define  GEN8_GT_PM_IRQ			(1 << 4)
-#define  GEN8_GT_VCS2_IRQ		(1 << 3)
-#define  GEN8_GT_VCS1_IRQ		(1 << 2)
+#define  GEN8_GT_VCS1_IRQ		(1 << 3) /* NB: VCS2 in bspec! */
+#define  GEN8_GT_VCS0_IRQ		(1 << 2) /* NB: VCS1 in bpsec! */
 #define  GEN8_GT_BCS_IRQ		(1 << 1)
 #define  GEN8_GT_RCS_IRQ		(1 << 0)
 
@@ -7235,8 +7334,8 @@ enum {
 
 #define GEN8_RCS_IRQ_SHIFT 0
 #define GEN8_BCS_IRQ_SHIFT 16
-#define GEN8_VCS1_IRQ_SHIFT 0
-#define GEN8_VCS2_IRQ_SHIFT 16
+#define GEN8_VCS0_IRQ_SHIFT 0  /* NB: VCS1 in bspec! */
+#define GEN8_VCS1_IRQ_SHIFT 16 /* NB: VCS2 in bpsec! */
 #define GEN8_VECS_IRQ_SHIFT 0
 #define GEN8_WD_IRQ_SHIFT 16
 
@@ -7622,13 +7721,13 @@ enum {
 #define  GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE	(1 << 2)
 
 /*GEN11 chicken */
-#define _PIPEA_CHICKEN			0x70038
-#define _PIPEB_CHICKEN			0x71038
-#define _PIPEC_CHICKEN			0x72038
-#define  PER_PIXEL_ALPHA_BYPASS_EN	(1 << 7)
-#define  PM_FILL_MAINTAIN_DBUF_FULLNESS	(1 << 0)
-#define PIPE_CHICKEN(pipe)		_MMIO_PIPE(pipe, _PIPEA_CHICKEN,\
-						   _PIPEB_CHICKEN)
+#define _PIPEA_CHICKEN				0x70038
+#define _PIPEB_CHICKEN				0x71038
+#define _PIPEC_CHICKEN				0x72038
+#define PIPE_CHICKEN(pipe)			_MMIO_PIPE(pipe, _PIPEA_CHICKEN,\
+							   _PIPEB_CHICKEN)
+#define   PIXEL_ROUNDING_TRUNC_FB_PASSTHRU 	(1 << 15)
+#define   PER_PIXEL_ALPHA_BYPASS_EN		(1 << 7)
 
 /* PCH */
 
@@ -8098,10 +8197,11 @@ enum {
 #define _ICL_VIDEO_DIP_PPS_ECC_B	0x613D4
 
 #define HSW_TVIDEO_DIP_CTL(trans)		_MMIO_TRANS2(trans, _HSW_VIDEO_DIP_CTL_A)
+#define HSW_TVIDEO_DIP_GCP(trans)		_MMIO_TRANS2(trans, _HSW_VIDEO_DIP_GCP_A)
 #define HSW_TVIDEO_DIP_AVI_DATA(trans, i)	_MMIO_TRANS2(trans, _HSW_VIDEO_DIP_AVI_DATA_A + (i) * 4)
 #define HSW_TVIDEO_DIP_VS_DATA(trans, i)	_MMIO_TRANS2(trans, _HSW_VIDEO_DIP_VS_DATA_A + (i) * 4)
 #define HSW_TVIDEO_DIP_SPD_DATA(trans, i)	_MMIO_TRANS2(trans, _HSW_VIDEO_DIP_SPD_DATA_A + (i) * 4)
-#define HSW_TVIDEO_DIP_GCP(trans)		_MMIO_TRANS2(trans, _HSW_VIDEO_DIP_GCP_A)
+#define HSW_TVIDEO_DIP_GMP_DATA(trans, i)	_MMIO_TRANS2(trans, _HSW_VIDEO_DIP_GMP_DATA_A + (i) * 4)
 #define HSW_TVIDEO_DIP_VSC_DATA(trans, i)	_MMIO_TRANS2(trans, _HSW_VIDEO_DIP_VSC_DATA_A + (i) * 4)
 #define ICL_VIDEO_DIP_PPS_DATA(trans, i)	_MMIO_TRANS2(trans, _ICL_VIDEO_DIP_PPS_DATA_A + (i) * 4)
 #define ICL_VIDEO_DIP_PPS_ECC(trans, i)		_MMIO_TRANS2(trans, _ICL_VIDEO_DIP_PPS_ECC_A + (i) * 4)
@@ -9750,7 +9850,7 @@ enum skl_power_gate {
 #define  DPLL_CFGCR1_KDIV(x)		((x) << 6)
 #define  DPLL_CFGCR1_KDIV_1		(1 << 6)
 #define  DPLL_CFGCR1_KDIV_2		(2 << 6)
-#define  DPLL_CFGCR1_KDIV_4		(4 << 6)
+#define  DPLL_CFGCR1_KDIV_3		(4 << 6)
 #define  DPLL_CFGCR1_PDIV_MASK		(0xf << 2)
 #define  DPLL_CFGCR1_PDIV_SHIFT		(2)
 #define  DPLL_CFGCR1_PDIV(x)		((x) << 2)
@@ -9819,16 +9919,29 @@ enum skl_power_gate {
 #define  BXT_DRAM_WIDTH_X64			(0x3 << 4)
 #define  BXT_DRAM_SIZE_MASK			(0x7 << 6)
 #define  BXT_DRAM_SIZE_SHIFT			6
-#define  BXT_DRAM_SIZE_4GB			(0x0 << 6)
-#define  BXT_DRAM_SIZE_6GB			(0x1 << 6)
-#define  BXT_DRAM_SIZE_8GB			(0x2 << 6)
-#define  BXT_DRAM_SIZE_12GB			(0x3 << 6)
-#define  BXT_DRAM_SIZE_16GB			(0x4 << 6)
+#define  BXT_DRAM_SIZE_4GBIT			(0x0 << 6)
+#define  BXT_DRAM_SIZE_6GBIT			(0x1 << 6)
+#define  BXT_DRAM_SIZE_8GBIT			(0x2 << 6)
+#define  BXT_DRAM_SIZE_12GBIT			(0x3 << 6)
+#define  BXT_DRAM_SIZE_16GBIT			(0x4 << 6)
+#define  BXT_DRAM_TYPE_MASK			(0x7 << 22)
+#define  BXT_DRAM_TYPE_SHIFT			22
+#define  BXT_DRAM_TYPE_DDR3			(0x0 << 22)
+#define  BXT_DRAM_TYPE_LPDDR3			(0x1 << 22)
+#define  BXT_DRAM_TYPE_LPDDR4			(0x2 << 22)
+#define  BXT_DRAM_TYPE_DDR4			(0x4 << 22)
 
 #define SKL_MEMORY_FREQ_MULTIPLIER_HZ		266666666
 #define SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU	_MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5E04)
 #define  SKL_REQ_DATA_MASK			(0xF << 0)
 
+#define SKL_MAD_INTER_CHANNEL_0_0_0_MCHBAR_MCMAIN _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5000)
+#define  SKL_DRAM_DDR_TYPE_MASK			(0x3 << 0)
+#define  SKL_DRAM_DDR_TYPE_DDR4			(0 << 0)
+#define  SKL_DRAM_DDR_TYPE_DDR3			(1 << 0)
+#define  SKL_DRAM_DDR_TYPE_LPDDR3		(2 << 0)
+#define  SKL_DRAM_DDR_TYPE_LPDDR4		(3 << 0)
+
 #define SKL_MAD_DIMM_CH0_0_0_0_MCHBAR_MCMAIN	_MMIO(MCHBAR_MIRROR_BASE_SNB + 0x500C)
 #define SKL_MAD_DIMM_CH1_0_0_0_MCHBAR_MCMAIN	_MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5010)
 #define  SKL_DRAM_S_SHIFT			16
@@ -9840,8 +9953,21 @@ enum skl_power_gate {
 #define  SKL_DRAM_WIDTH_X32			(0x2 << 8)
 #define  SKL_DRAM_RANK_MASK			(0x1 << 10)
 #define  SKL_DRAM_RANK_SHIFT			10
-#define  SKL_DRAM_RANK_SINGLE			(0x0 << 10)
-#define  SKL_DRAM_RANK_DUAL			(0x1 << 10)
+#define  SKL_DRAM_RANK_1			(0x0 << 10)
+#define  SKL_DRAM_RANK_2			(0x1 << 10)
+#define  SKL_DRAM_RANK_MASK			(0x1 << 10)
+#define  CNL_DRAM_SIZE_MASK			0x7F
+#define  CNL_DRAM_WIDTH_MASK			(0x3 << 7)
+#define  CNL_DRAM_WIDTH_SHIFT			7
+#define  CNL_DRAM_WIDTH_X8			(0x0 << 7)
+#define  CNL_DRAM_WIDTH_X16			(0x1 << 7)
+#define  CNL_DRAM_WIDTH_X32			(0x2 << 7)
+#define  CNL_DRAM_RANK_MASK			(0x3 << 9)
+#define  CNL_DRAM_RANK_SHIFT			9
+#define  CNL_DRAM_RANK_1			(0x0 << 9)
+#define  CNL_DRAM_RANK_2			(0x1 << 9)
+#define  CNL_DRAM_RANK_3			(0x2 << 9)
+#define  CNL_DRAM_RANK_4			(0x3 << 9)
 
 /* Please see hsw_read_dcomp() and hsw_write_dcomp() before using this register,
  * since on HSW we can't write to it using I915_WRITE. */
@@ -9886,10 +10012,14 @@ enum skl_power_gate {
 #define _PIPE_A_CSC_COEFF_BU	0x4901c
 #define _PIPE_A_CSC_COEFF_RV_GV	0x49020
 #define _PIPE_A_CSC_COEFF_BV	0x49024
+
 #define _PIPE_A_CSC_MODE	0x49028
-#define   CSC_BLACK_SCREEN_OFFSET	(1 << 2)
-#define   CSC_POSITION_BEFORE_GAMMA	(1 << 1)
-#define   CSC_MODE_YUV_TO_RGB		(1 << 0)
+#define  ICL_CSC_ENABLE			(1 << 31)
+#define  ICL_OUTPUT_CSC_ENABLE		(1 << 30)
+#define  CSC_BLACK_SCREEN_OFFSET	(1 << 2)
+#define  CSC_POSITION_BEFORE_GAMMA	(1 << 1)
+#define  CSC_MODE_YUV_TO_RGB		(1 << 0)
+
 #define _PIPE_A_CSC_PREOFF_HI	0x49030
 #define _PIPE_A_CSC_PREOFF_ME	0x49034
 #define _PIPE_A_CSC_PREOFF_LO	0x49038
@@ -9925,6 +10055,70 @@ enum skl_power_gate {
 #define PIPE_CSC_POSTOFF_ME(pipe)	_MMIO_PIPE(pipe, _PIPE_A_CSC_POSTOFF_ME, _PIPE_B_CSC_POSTOFF_ME)
 #define PIPE_CSC_POSTOFF_LO(pipe)	_MMIO_PIPE(pipe, _PIPE_A_CSC_POSTOFF_LO, _PIPE_B_CSC_POSTOFF_LO)
 
+/* Pipe Output CSC */
+#define _PIPE_A_OUTPUT_CSC_COEFF_RY_GY	0x49050
+#define _PIPE_A_OUTPUT_CSC_COEFF_BY	0x49054
+#define _PIPE_A_OUTPUT_CSC_COEFF_RU_GU	0x49058
+#define _PIPE_A_OUTPUT_CSC_COEFF_BU	0x4905c
+#define _PIPE_A_OUTPUT_CSC_COEFF_RV_GV	0x49060
+#define _PIPE_A_OUTPUT_CSC_COEFF_BV	0x49064
+#define _PIPE_A_OUTPUT_CSC_PREOFF_HI	0x49068
+#define _PIPE_A_OUTPUT_CSC_PREOFF_ME	0x4906c
+#define _PIPE_A_OUTPUT_CSC_PREOFF_LO	0x49070
+#define _PIPE_A_OUTPUT_CSC_POSTOFF_HI	0x49074
+#define _PIPE_A_OUTPUT_CSC_POSTOFF_ME	0x49078
+#define _PIPE_A_OUTPUT_CSC_POSTOFF_LO	0x4907c
+
+#define _PIPE_B_OUTPUT_CSC_COEFF_RY_GY	0x49150
+#define _PIPE_B_OUTPUT_CSC_COEFF_BY	0x49154
+#define _PIPE_B_OUTPUT_CSC_COEFF_RU_GU	0x49158
+#define _PIPE_B_OUTPUT_CSC_COEFF_BU	0x4915c
+#define _PIPE_B_OUTPUT_CSC_COEFF_RV_GV	0x49160
+#define _PIPE_B_OUTPUT_CSC_COEFF_BV	0x49164
+#define _PIPE_B_OUTPUT_CSC_PREOFF_HI	0x49168
+#define _PIPE_B_OUTPUT_CSC_PREOFF_ME	0x4916c
+#define _PIPE_B_OUTPUT_CSC_PREOFF_LO	0x49170
+#define _PIPE_B_OUTPUT_CSC_POSTOFF_HI	0x49174
+#define _PIPE_B_OUTPUT_CSC_POSTOFF_ME	0x49178
+#define _PIPE_B_OUTPUT_CSC_POSTOFF_LO	0x4917c
+
+#define PIPE_CSC_OUTPUT_COEFF_RY_GY(pipe)	_MMIO_PIPE(pipe,\
+							   _PIPE_A_OUTPUT_CSC_COEFF_RY_GY,\
+							   _PIPE_B_OUTPUT_CSC_COEFF_RY_GY)
+#define PIPE_CSC_OUTPUT_COEFF_BY(pipe)		_MMIO_PIPE(pipe, \
+							   _PIPE_A_OUTPUT_CSC_COEFF_BY, \
+							   _PIPE_B_OUTPUT_CSC_COEFF_BY)
+#define PIPE_CSC_OUTPUT_COEFF_RU_GU(pipe)	_MMIO_PIPE(pipe, \
+							   _PIPE_A_OUTPUT_CSC_COEFF_RU_GU, \
+							   _PIPE_B_OUTPUT_CSC_COEFF_RU_GU)
+#define PIPE_CSC_OUTPUT_COEFF_BU(pipe)		_MMIO_PIPE(pipe, \
+							   _PIPE_A_OUTPUT_CSC_COEFF_BU, \
+							   _PIPE_B_OUTPUT_CSC_COEFF_BU)
+#define PIPE_CSC_OUTPUT_COEFF_RV_GV(pipe)	_MMIO_PIPE(pipe, \
+							   _PIPE_A_OUTPUT_CSC_COEFF_RV_GV, \
+							   _PIPE_B_OUTPUT_CSC_COEFF_RV_GV)
+#define PIPE_CSC_OUTPUT_COEFF_BV(pipe)		_MMIO_PIPE(pipe, \
+							   _PIPE_A_OUTPUT_CSC_COEFF_BV, \
+							   _PIPE_B_OUTPUT_CSC_COEFF_BV)
+#define PIPE_CSC_OUTPUT_PREOFF_HI(pipe)		_MMIO_PIPE(pipe, \
+							   _PIPE_A_OUTPUT_CSC_PREOFF_HI, \
+							   _PIPE_B_OUTPUT_CSC_PREOFF_HI)
+#define PIPE_CSC_OUTPUT_PREOFF_ME(pipe)		_MMIO_PIPE(pipe, \
+							   _PIPE_A_OUTPUT_CSC_PREOFF_ME, \
+							   _PIPE_B_OUTPUT_CSC_PREOFF_ME)
+#define PIPE_CSC_OUTPUT_PREOFF_LO(pipe)		_MMIO_PIPE(pipe, \
+							   _PIPE_A_OUTPUT_CSC_PREOFF_LO, \
+							   _PIPE_B_OUTPUT_CSC_PREOFF_LO)
+#define PIPE_CSC_OUTPUT_POSTOFF_HI(pipe)	_MMIO_PIPE(pipe, \
+							   _PIPE_A_OUTPUT_CSC_POSTOFF_HI, \
+							   _PIPE_B_OUTPUT_CSC_POSTOFF_HI)
+#define PIPE_CSC_OUTPUT_POSTOFF_ME(pipe)	_MMIO_PIPE(pipe, \
+							   _PIPE_A_OUTPUT_CSC_POSTOFF_ME, \
+							   _PIPE_B_OUTPUT_CSC_POSTOFF_ME)
+#define PIPE_CSC_OUTPUT_POSTOFF_LO(pipe)	_MMIO_PIPE(pipe, \
+							   _PIPE_A_OUTPUT_CSC_POSTOFF_LO, \
+							   _PIPE_B_OUTPUT_CSC_POSTOFF_LO)
+
 /* pipe degamma/gamma LUTs on IVB+ */
 #define _PAL_PREC_INDEX_A	0x4A400
 #define _PAL_PREC_INDEX_B	0x4AC00
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index c2a5c48c7541d6d1bb230933748b210ff036bd78..0a3d94517d0a344543d41d7ab7721be65fe45f3f 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -22,16 +22,31 @@
  *
  */
 
-#include <linux/prefetch.h>
 #include <linux/dma-fence-array.h>
+#include <linux/irq_work.h>
+#include <linux/prefetch.h>
 #include <linux/sched.h>
 #include <linux/sched/clock.h>
 #include <linux/sched/signal.h>
 
 #include "i915_drv.h"
 #include "i915_active.h"
+#include "i915_globals.h"
 #include "i915_reset.h"
 
+struct execute_cb {
+	struct list_head link;
+	struct irq_work work;
+	struct i915_sw_fence *fence;
+};
+
+static struct i915_global_request {
+	struct i915_global base;
+	struct kmem_cache *slab_requests;
+	struct kmem_cache *slab_dependencies;
+	struct kmem_cache *slab_execute_cbs;
+} global;
+
 static const char *i915_fence_get_driver_name(struct dma_fence *fence)
 {
 	return "i915";
@@ -68,7 +83,9 @@ static signed long i915_fence_wait(struct dma_fence *fence,
 				   bool interruptible,
 				   signed long timeout)
 {
-	return i915_request_wait(to_request(fence), interruptible, timeout);
+	return i915_request_wait(to_request(fence),
+				 interruptible | I915_WAIT_PRIORITY,
+				 timeout);
 }
 
 static void i915_fence_release(struct dma_fence *fence)
@@ -84,7 +101,7 @@ static void i915_fence_release(struct dma_fence *fence)
 	 */
 	i915_sw_fence_fini(&rq->submit);
 
-	kmem_cache_free(rq->i915->requests, rq);
+	kmem_cache_free(global.slab_requests, rq);
 }
 
 const struct dma_fence_ops i915_fence_ops = {
@@ -177,12 +194,10 @@ static void free_capture_list(struct i915_request *request)
 static void __retire_engine_request(struct intel_engine_cs *engine,
 				    struct i915_request *rq)
 {
-	GEM_TRACE("%s(%s) fence %llx:%lld, global=%d, current %d:%d\n",
+	GEM_TRACE("%s(%s) fence %llx:%lld, current %d\n",
 		  __func__, engine->name,
 		  rq->fence.context, rq->fence.seqno,
-		  rq->global_seqno,
-		  hwsp_seqno(rq),
-		  intel_engine_get_seqno(engine));
+		  hwsp_seqno(rq));
 
 	GEM_BUG_ON(!i915_request_completed(rq));
 
@@ -241,12 +256,10 @@ static void i915_request_retire(struct i915_request *request)
 {
 	struct i915_active_request *active, *next;
 
-	GEM_TRACE("%s fence %llx:%lld, global=%d, current %d:%d\n",
+	GEM_TRACE("%s fence %llx:%lld, current %d\n",
 		  request->engine->name,
 		  request->fence.context, request->fence.seqno,
-		  request->global_seqno,
-		  hwsp_seqno(request),
-		  intel_engine_get_seqno(request->engine));
+		  hwsp_seqno(request));
 
 	lockdep_assert_held(&request->i915->drm.struct_mutex);
 	GEM_BUG_ON(!i915_sw_fence_signaled(&request->submit));
@@ -288,15 +301,13 @@ static void i915_request_retire(struct i915_request *request)
 
 	i915_request_remove_from_client(request);
 
-	/* Retirement decays the ban score as it is a sign of ctx progress */
-	atomic_dec_if_positive(&request->gem_context->ban_score);
 	intel_context_unpin(request->hw_context);
 
 	__retire_engine_upto(request->engine, request);
 
 	unreserve_gt(request->i915);
 
-	i915_sched_node_fini(request->i915, &request->sched);
+	i915_sched_node_fini(&request->sched);
 	i915_request_put(request);
 }
 
@@ -305,12 +316,10 @@ void i915_request_retire_upto(struct i915_request *rq)
 	struct intel_ring *ring = rq->ring;
 	struct i915_request *tmp;
 
-	GEM_TRACE("%s fence %llx:%lld, global=%d, current %d:%d\n",
+	GEM_TRACE("%s fence %llx:%lld, current %d\n",
 		  rq->engine->name,
 		  rq->fence.context, rq->fence.seqno,
-		  rq->global_seqno,
-		  hwsp_seqno(rq),
-		  intel_engine_get_seqno(rq->engine));
+		  hwsp_seqno(rq));
 
 	lockdep_assert_held(&rq->i915->drm.struct_mutex);
 	GEM_BUG_ON(!i915_request_completed(rq));
@@ -326,9 +335,67 @@ void i915_request_retire_upto(struct i915_request *rq)
 	} while (tmp != rq);
 }
 
-static u32 timeline_get_seqno(struct i915_timeline *tl)
+static void irq_execute_cb(struct irq_work *wrk)
+{
+	struct execute_cb *cb = container_of(wrk, typeof(*cb), work);
+
+	i915_sw_fence_complete(cb->fence);
+	kmem_cache_free(global.slab_execute_cbs, cb);
+}
+
+static void __notify_execute_cb(struct i915_request *rq)
+{
+	struct execute_cb *cb;
+
+	lockdep_assert_held(&rq->lock);
+
+	if (list_empty(&rq->execute_cb))
+		return;
+
+	list_for_each_entry(cb, &rq->execute_cb, link)
+		irq_work_queue(&cb->work);
+
+	/*
+	 * XXX Rollback on __i915_request_unsubmit()
+	 *
+	 * In the future, perhaps when we have an active time-slicing scheduler,
+	 * it will be interesting to unsubmit parallel execution and remove
+	 * busywaits from the GPU until their master is restarted. This is
+	 * quite hairy, we have to carefully rollback the fence and do a
+	 * preempt-to-idle cycle on the target engine, all the while the
+	 * master execute_cb may refire.
+	 */
+	INIT_LIST_HEAD(&rq->execute_cb);
+}
+
+static int
+i915_request_await_execution(struct i915_request *rq,
+			     struct i915_request *signal,
+			     gfp_t gfp)
 {
-	return tl->seqno += 1 + tl->has_initial_breadcrumb;
+	struct execute_cb *cb;
+
+	if (i915_request_is_active(signal))
+		return 0;
+
+	cb = kmem_cache_alloc(global.slab_execute_cbs, gfp);
+	if (!cb)
+		return -ENOMEM;
+
+	cb->fence = &rq->submit;
+	i915_sw_fence_await(cb->fence);
+	init_irq_work(&cb->work, irq_execute_cb);
+
+	spin_lock_irq(&signal->lock);
+	if (i915_request_is_active(signal)) {
+		i915_sw_fence_complete(cb->fence);
+		kmem_cache_free(global.slab_execute_cbs, cb);
+	} else {
+		list_add_tail(&cb->link, &signal->execute_cb);
+	}
+	spin_unlock_irq(&signal->lock);
+
+	return 0;
 }
 
 static void move_to_timeline(struct i915_request *request,
@@ -342,42 +409,33 @@ static void move_to_timeline(struct i915_request *request,
 	spin_unlock(&request->timeline->lock);
 }
 
-static u32 next_global_seqno(struct i915_timeline *tl)
-{
-	if (!++tl->seqno)
-		++tl->seqno;
-	return tl->seqno;
-}
-
 void __i915_request_submit(struct i915_request *request)
 {
 	struct intel_engine_cs *engine = request->engine;
-	u32 seqno;
 
-	GEM_TRACE("%s fence %llx:%lld -> global=%d, current %d:%d\n",
+	GEM_TRACE("%s fence %llx:%lld -> current %d\n",
 		  engine->name,
 		  request->fence.context, request->fence.seqno,
-		  engine->timeline.seqno + 1,
-		  hwsp_seqno(request),
-		  intel_engine_get_seqno(engine));
+		  hwsp_seqno(request));
 
 	GEM_BUG_ON(!irqs_disabled());
 	lockdep_assert_held(&engine->timeline.lock);
 
-	GEM_BUG_ON(request->global_seqno);
-
-	seqno = next_global_seqno(&engine->timeline);
-	GEM_BUG_ON(!seqno);
-	GEM_BUG_ON(intel_engine_signaled(engine, seqno));
+	if (i915_gem_context_is_banned(request->gem_context))
+		i915_request_skip(request, -EIO);
 
 	/* We may be recursing from the signal callback of another i915 fence */
 	spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
+
 	GEM_BUG_ON(test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags));
 	set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags);
-	request->global_seqno = seqno;
+
 	if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags) &&
 	    !i915_request_enable_breadcrumb(request))
 		intel_engine_queue_breadcrumbs(engine);
+
+	__notify_execute_cb(request);
+
 	spin_unlock(&request->lock);
 
 	engine->emit_fini_breadcrumb(request,
@@ -406,12 +464,10 @@ void __i915_request_unsubmit(struct i915_request *request)
 {
 	struct intel_engine_cs *engine = request->engine;
 
-	GEM_TRACE("%s fence %llx:%lld <- global=%d, current %d:%d\n",
+	GEM_TRACE("%s fence %llx:%lld, current %d\n",
 		  engine->name,
 		  request->fence.context, request->fence.seqno,
-		  request->global_seqno,
-		  hwsp_seqno(request),
-		  intel_engine_get_seqno(engine));
+		  hwsp_seqno(request));
 
 	GEM_BUG_ON(!irqs_disabled());
 	lockdep_assert_held(&engine->timeline.lock);
@@ -420,18 +476,25 @@ void __i915_request_unsubmit(struct i915_request *request)
 	 * Only unwind in reverse order, required so that the per-context list
 	 * is kept in seqno/ring order.
 	 */
-	GEM_BUG_ON(!request->global_seqno);
-	GEM_BUG_ON(request->global_seqno != engine->timeline.seqno);
-	GEM_BUG_ON(intel_engine_has_completed(engine, request->global_seqno));
-	engine->timeline.seqno--;
 
 	/* We may be recursing from the signal callback of another i915 fence */
 	spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
-	request->global_seqno = 0;
+
+	/*
+	 * As we do not allow WAIT to preempt inflight requests,
+	 * once we have executed a request, along with triggering
+	 * any execution callbacks, we must preserve its ordering
+	 * within the non-preemptible FIFO.
+	 */
+	BUILD_BUG_ON(__NO_PREEMPTION & ~I915_PRIORITY_MASK); /* only internal */
+	request->sched.attr.priority |= __NO_PREEMPTION;
+
 	if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags))
 		i915_request_cancel_breadcrumb(request);
+
 	GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags));
 	clear_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags);
+
 	spin_unlock(&request->lock);
 
 	/* Transfer back from the global per-engine timeline to per-context */
@@ -518,7 +581,7 @@ i915_request_alloc_slow(struct intel_context *ce)
 	ring_retire_requests(ring);
 
 out:
-	return kmem_cache_alloc(ce->gem_context->i915->requests, GFP_KERNEL);
+	return kmem_cache_alloc(global.slab_requests, GFP_KERNEL);
 }
 
 static int add_timeline_barrier(struct i915_request *rq)
@@ -539,8 +602,10 @@ struct i915_request *
 i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
 {
 	struct drm_i915_private *i915 = engine->i915;
-	struct i915_request *rq;
 	struct intel_context *ce;
+	struct i915_timeline *tl;
+	struct i915_request *rq;
+	u32 seqno;
 	int ret;
 
 	lockdep_assert_held(&i915->drm.struct_mutex);
@@ -556,8 +621,9 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
 	 * ABI: Before userspace accesses the GPU (e.g. execbuffer), report
 	 * EIO if the GPU is already wedged.
 	 */
-	if (i915_terminally_wedged(&i915->gpu_error))
-		return ERR_PTR(-EIO);
+	ret = i915_terminally_wedged(i915);
+	if (ret)
+		return ERR_PTR(ret);
 
 	/*
 	 * Pinning the contexts may generate requests in order to acquire
@@ -569,6 +635,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
 		return ERR_CAST(ce);
 
 	reserve_gt(i915);
+	mutex_lock(&ce->ring->timeline->mutex);
 
 	/* Move our oldest request to the slab-cache (if not in use!) */
 	rq = list_first_entry(&ce->ring->request_list, typeof(*rq), ring_link);
@@ -605,7 +672,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
 	 *
 	 * Do not use kmem_cache_zalloc() here!
 	 */
-	rq = kmem_cache_alloc(i915->requests,
+	rq = kmem_cache_alloc(global.slab_requests,
 			      GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
 	if (unlikely(!rq)) {
 		rq = i915_request_alloc_slow(ce);
@@ -615,24 +682,28 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
 		}
 	}
 
-	rq->rcustate = get_state_synchronize_rcu();
-
 	INIT_LIST_HEAD(&rq->active_list);
+	INIT_LIST_HEAD(&rq->execute_cb);
+
+	tl = ce->ring->timeline;
+	ret = i915_timeline_get_seqno(tl, rq, &seqno);
+	if (ret)
+		goto err_free;
+
 	rq->i915 = i915;
 	rq->engine = engine;
 	rq->gem_context = ctx;
 	rq->hw_context = ce;
 	rq->ring = ce->ring;
-	rq->timeline = ce->ring->timeline;
+	rq->timeline = tl;
 	GEM_BUG_ON(rq->timeline == &engine->timeline);
-	rq->hwsp_seqno = rq->timeline->hwsp_seqno;
+	rq->hwsp_seqno = tl->hwsp_seqno;
+	rq->hwsp_cacheline = tl->hwsp_cacheline;
+	rq->rcustate = get_state_synchronize_rcu(); /* acts as smp_mb() */
 
 	spin_lock_init(&rq->lock);
-	dma_fence_init(&rq->fence,
-		       &i915_fence_ops,
-		       &rq->lock,
-		       rq->timeline->fence_context,
-		       timeline_get_seqno(rq->timeline));
+	dma_fence_init(&rq->fence, &i915_fence_ops, &rq->lock,
+		       tl->fence_context, seqno);
 
 	/* We bump the ref for the fence chain */
 	i915_sw_fence_init(&i915_request_get(rq)->submit, submit_notify);
@@ -640,7 +711,6 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
 	i915_sched_node_init(&rq->sched);
 
 	/* No zalloc, must clear what we need by hand */
-	rq->global_seqno = 0;
 	rq->file_priv = NULL;
 	rq->batch = NULL;
 	rq->capture_list = NULL;
@@ -693,13 +763,62 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
 	GEM_BUG_ON(!list_empty(&rq->sched.signalers_list));
 	GEM_BUG_ON(!list_empty(&rq->sched.waiters_list));
 
-	kmem_cache_free(i915->requests, rq);
+err_free:
+	kmem_cache_free(global.slab_requests, rq);
 err_unreserve:
+	mutex_unlock(&ce->ring->timeline->mutex);
 	unreserve_gt(i915);
 	intel_context_unpin(ce);
 	return ERR_PTR(ret);
 }
 
+static int
+emit_semaphore_wait(struct i915_request *to,
+		    struct i915_request *from,
+		    gfp_t gfp)
+{
+	u32 hwsp_offset;
+	u32 *cs;
+	int err;
+
+	GEM_BUG_ON(!from->timeline->has_initial_breadcrumb);
+	GEM_BUG_ON(INTEL_GEN(to->i915) < 8);
+
+	/* We need to pin the signaler's HWSP until we are finished reading. */
+	err = i915_timeline_read_hwsp(from, to, &hwsp_offset);
+	if (err)
+		return err;
+
+	/* Only submit our spinner after the signaler is running! */
+	err = i915_request_await_execution(to, from, gfp);
+	if (err)
+		return err;
+
+	cs = intel_ring_begin(to, 4);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
+
+	/*
+	 * Using greater-than-or-equal here means we have to worry
+	 * about seqno wraparound. To side step that issue, we swap
+	 * the timeline HWSP upon wrapping, so that everyone listening
+	 * for the old (pre-wrap) values do not see the much smaller
+	 * (post-wrap) values than they were expecting (and so wait
+	 * forever).
+	 */
+	*cs++ = MI_SEMAPHORE_WAIT |
+		MI_SEMAPHORE_GLOBAL_GTT |
+		MI_SEMAPHORE_POLL |
+		MI_SEMAPHORE_SAD_GTE_SDD;
+	*cs++ = from->fence.seqno;
+	*cs++ = hwsp_offset;
+	*cs++ = 0;
+
+	intel_ring_advance(to, cs);
+	to->sched.flags |= I915_SCHED_HAS_SEMAPHORE;
+	return 0;
+}
+
 static int
 i915_request_await_request(struct i915_request *to, struct i915_request *from)
 {
@@ -712,9 +831,7 @@ i915_request_await_request(struct i915_request *to, struct i915_request *from)
 		return 0;
 
 	if (to->engine->schedule) {
-		ret = i915_sched_node_add_dependency(to->i915,
-						     &to->sched,
-						     &from->sched);
+		ret = i915_sched_node_add_dependency(&to->sched, &from->sched);
 		if (ret < 0)
 			return ret;
 	}
@@ -723,6 +840,9 @@ i915_request_await_request(struct i915_request *to, struct i915_request *from)
 		ret = i915_sw_fence_await_sw_fence_gfp(&to->submit,
 						       &from->submit,
 						       I915_FENCE_GFP);
+	} else if (intel_engine_has_semaphores(to->engine) &&
+		   to->gem_context->sched.priority >= I915_PRIORITY_NORMAL) {
+		ret = emit_semaphore_wait(to, from, I915_FENCE_GFP);
 	} else {
 		ret = i915_sw_fence_await_dma_fence(&to->submit,
 						    &from->fence, 0,
@@ -889,7 +1009,7 @@ void i915_request_add(struct i915_request *request)
 	GEM_TRACE("%s fence %llx:%lld\n",
 		  engine->name, request->fence.context, request->fence.seqno);
 
-	lockdep_assert_held(&request->i915->drm.struct_mutex);
+	lockdep_assert_held(&request->timeline->mutex);
 	trace_i915_request_add(request);
 
 	/*
@@ -948,6 +1068,7 @@ void i915_request_add(struct i915_request *request)
 		GEM_TRACE("marking %s as active\n", ring->timeline->name);
 		list_add(&ring->active_link, &request->i915->gt.active_rings);
 	}
+	request->i915->gt.active_engines |= request->engine->mask;
 	request->emitted_jiffies = jiffies;
 
 	/*
@@ -966,6 +1087,21 @@ void i915_request_add(struct i915_request *request)
 	if (engine->schedule) {
 		struct i915_sched_attr attr = request->gem_context->sched;
 
+		/*
+		 * Boost actual workloads past semaphores!
+		 *
+		 * With semaphores we spin on one engine waiting for another,
+		 * simply to reduce the latency of starting our work when
+		 * the signaler completes. However, if there is any other
+		 * work that we could be doing on this engine instead, that
+		 * is better utilisation and will reduce the overall duration
+		 * of the current work. To avoid PI boosting a semaphore
+		 * far in the distance past over useful work, we keep a history
+		 * of any semaphore use along our dependency chain.
+		 */
+		if (!(request->sched.flags & I915_SCHED_HAS_SEMAPHORE))
+			attr.priority |= I915_PRIORITY_NOSEMAPHORE;
+
 		/*
 		 * Boost priorities to new clients (new request flows).
 		 *
@@ -1000,6 +1136,8 @@ void i915_request_add(struct i915_request *request)
 	 */
 	if (prev && i915_request_completed(prev))
 		i915_request_retire_upto(prev);
+
+	mutex_unlock(&request->timeline->mutex);
 }
 
 static unsigned long local_clock_us(unsigned int *cpu)
@@ -1136,8 +1274,23 @@ long i915_request_wait(struct i915_request *rq,
 	if (__i915_spin_request(rq, state, 5))
 		goto out;
 
-	if (flags & I915_WAIT_PRIORITY)
+	/*
+	 * This client is about to stall waiting for the GPU. In many cases
+	 * this is undesirable and limits the throughput of the system, as
+	 * many clients cannot continue processing user input/output whilst
+	 * blocked. RPS autotuning may take tens of milliseconds to respond
+	 * to the GPU load and thus incurs additional latency for the client.
+	 * We can circumvent that by promoting the GPU frequency to maximum
+	 * before we sleep. This makes the GPU throttle up much more quickly
+	 * (good for benchmarks and user experience, e.g. window animations),
+	 * but at a cost of spending more power processing the workload
+	 * (bad for battery).
+	 */
+	if (flags & I915_WAIT_PRIORITY) {
+		if (!i915_request_started(rq) && INTEL_GEN(rq->i915) >= 6)
+			gen6_rps_boost(rq);
 		i915_schedule_bump_priority(rq, I915_PRIORITY_WAIT);
+	}
 
 	wait.tsk = current;
 	if (dma_fence_add_callback(&rq->fence, &wait.cb, request_wait_wake))
@@ -1179,11 +1332,66 @@ void i915_retire_requests(struct drm_i915_private *i915)
 	if (!i915->gt.active_requests)
 		return;
 
-	list_for_each_entry_safe(ring, tmp, &i915->gt.active_rings, active_link)
+	list_for_each_entry_safe(ring, tmp,
+				 &i915->gt.active_rings, active_link) {
+		intel_ring_get(ring); /* last rq holds reference! */
 		ring_retire_requests(ring);
+		intel_ring_put(ring);
+	}
 }
 
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
 #include "selftests/mock_request.c"
 #include "selftests/i915_request.c"
 #endif
+
+static void i915_global_request_shrink(void)
+{
+	kmem_cache_shrink(global.slab_dependencies);
+	kmem_cache_shrink(global.slab_execute_cbs);
+	kmem_cache_shrink(global.slab_requests);
+}
+
+static void i915_global_request_exit(void)
+{
+	kmem_cache_destroy(global.slab_dependencies);
+	kmem_cache_destroy(global.slab_execute_cbs);
+	kmem_cache_destroy(global.slab_requests);
+}
+
+static struct i915_global_request global = { {
+	.shrink = i915_global_request_shrink,
+	.exit = i915_global_request_exit,
+} };
+
+int __init i915_global_request_init(void)
+{
+	global.slab_requests = KMEM_CACHE(i915_request,
+					  SLAB_HWCACHE_ALIGN |
+					  SLAB_RECLAIM_ACCOUNT |
+					  SLAB_TYPESAFE_BY_RCU);
+	if (!global.slab_requests)
+		return -ENOMEM;
+
+	global.slab_execute_cbs = KMEM_CACHE(execute_cb,
+					     SLAB_HWCACHE_ALIGN |
+					     SLAB_RECLAIM_ACCOUNT |
+					     SLAB_TYPESAFE_BY_RCU);
+	if (!global.slab_execute_cbs)
+		goto err_requests;
+
+	global.slab_dependencies = KMEM_CACHE(i915_dependency,
+					      SLAB_HWCACHE_ALIGN |
+					      SLAB_RECLAIM_ACCOUNT);
+	if (!global.slab_dependencies)
+		goto err_execute_cbs;
+
+	i915_global_register(&global.base);
+	return 0;
+
+err_execute_cbs:
+	kmem_cache_destroy(global.slab_execute_cbs);
+err_requests:
+	kmem_cache_destroy(global.slab_requests);
+	return -ENOMEM;
+}
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 40f3e8dcbdd51a2f02935092b52cd6c7f9348060..8c8fa50106440922a701936e7b6797fe2f3c59df 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -29,6 +29,7 @@
 
 #include "i915_gem.h"
 #include "i915_scheduler.h"
+#include "i915_selftest.h"
 #include "i915_sw_fence.h"
 
 #include <uapi/drm/i915_drm.h>
@@ -37,6 +38,7 @@ struct drm_file;
 struct drm_i915_gem_object;
 struct i915_request;
 struct i915_timeline;
+struct i915_timeline_cacheline;
 
 struct i915_capture_list {
 	struct i915_capture_list *next;
@@ -127,6 +129,7 @@ struct i915_request {
 	 */
 	struct i915_sw_fence submit;
 	wait_queue_entry_t submitq;
+	struct list_head execute_cb;
 
 	/*
 	 * A list of everyone we wait upon, and everyone who waits upon us.
@@ -147,13 +150,15 @@ struct i915_request {
 	 */
 	const u32 *hwsp_seqno;
 
-	/**
-	 * GEM sequence number associated with this request on the
-	 * global execution timeline. It is zero when the request is not
-	 * on the HW queue (i.e. not on the engine timeline list).
-	 * Its value is guarded by the timeline spinlock.
+	/*
+	 * If we need to access the timeline's seqno for this request in
+	 * another request, we need to keep a read reference to this associated
+	 * cacheline, so that we do not free and recycle it before the foreign
+	 * observers have completed. Hence, we keep a pointer to the cacheline
+	 * inside the timeline's HWSP vma, but it is only valid while this
+	 * request has not completed and guarded by the timeline mutex.
 	 */
-	u32 global_seqno;
+	struct i915_timeline_cacheline *hwsp_cacheline;
 
 	/** Position in the ring of the start of the request */
 	u32 head;
@@ -204,6 +209,11 @@ struct i915_request {
 	struct drm_i915_file_private *file_priv;
 	/** file_priv list entry for this request */
 	struct list_head client_link;
+
+	I915_SELFTEST_DECLARE(struct {
+		struct list_head link;
+		unsigned long delay;
+	} mock;)
 };
 
 #define I915_FENCE_GFP (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
@@ -247,30 +257,6 @@ i915_request_put(struct i915_request *rq)
 	dma_fence_put(&rq->fence);
 }
 
-/**
- * i915_request_global_seqno - report the current global seqno
- * @request - the request
- *
- * A request is assigned a global seqno only when it is on the hardware
- * execution queue. The global seqno can be used to maintain a list of
- * requests on the same engine in retirement order, for example for
- * constructing a priority queue for waiting. Prior to its execution, or
- * if it is subsequently removed in the event of preemption, its global
- * seqno is zero. As both insertion and removal from the execution queue
- * may operate in IRQ context, it is not guarded by the usual struct_mutex
- * BKL. Instead those relying on the global seqno must be prepared for its
- * value to change between reads. Only when the request is complete can
- * the global seqno be stable (due to the memory barriers on submitting
- * the commands to the hardware to write the breadcrumb, if the HWS shows
- * that it has passed the global seqno and the global seqno is unchanged
- * after the read, it is indeed complete).
- */
-static inline u32
-i915_request_global_seqno(const struct i915_request *request)
-{
-	return READ_ONCE(request->global_seqno);
-}
-
 int i915_request_await_object(struct i915_request *to,
 			      struct drm_i915_gem_object *obj,
 			      bool write);
@@ -358,10 +344,27 @@ static inline bool __i915_request_has_started(const struct i915_request *rq)
  * i915_request_started - check if the request has begun being executed
  * @rq: the request
  *
- * Returns true if the request has been submitted to hardware, and the hardware
- * has advanced passed the end of the previous request and so should be either
- * currently processing the request (though it may be preempted and so
- * not necessarily the next request to complete) or have completed the request.
+ * If the timeline is not using initial breadcrumbs, a request is
+ * considered started if the previous request on its timeline (i.e.
+ * context) has been signaled.
+ *
+ * If the timeline is using semaphores, it will also be emitting an
+ * "initial breadcrumb" after the semaphores are complete and just before
+ * it began executing the user payload. A request can therefore be active
+ * on the HW and not yet started as it is still busywaiting on its
+ * dependencies (via HW semaphores).
+ *
+ * If the request has started, its dependencies will have been signaled
+ * (either by fences or by semaphores) and it will have begun processing
+ * the user payload.
+ *
+ * However, even if a request has started, it may have been preempted and
+ * so no longer active, or it may have already completed.
+ *
+ * See also i915_request_is_active().
+ *
+ * Returns true if the request has begun executing the user payload, or
+ * has completed:
  */
 static inline bool i915_request_started(const struct i915_request *rq)
 {
diff --git a/drivers/gpu/drm/i915/i915_reset.c b/drivers/gpu/drm/i915/i915_reset.c
index 0e0ddf2e681521915b9255cf1a03dfaf79e3578e..fb86d5ca5d8b8c9d3bda5640f0707092cdb75e9b 100644
--- a/drivers/gpu/drm/i915/i915_reset.c
+++ b/drivers/gpu/drm/i915/i915_reset.c
@@ -22,24 +22,15 @@ static void engine_skip_context(struct i915_request *rq)
 {
 	struct intel_engine_cs *engine = rq->engine;
 	struct i915_gem_context *hung_ctx = rq->gem_context;
-	struct i915_timeline *timeline = rq->timeline;
 
 	lockdep_assert_held(&engine->timeline.lock);
-	GEM_BUG_ON(timeline == &engine->timeline);
 
-	spin_lock(&timeline->lock);
-
-	if (i915_request_is_active(rq)) {
-		list_for_each_entry_continue(rq,
-					     &engine->timeline.requests, link)
-			if (rq->gem_context == hung_ctx)
-				i915_request_skip(rq, -EIO);
-	}
-
-	list_for_each_entry(rq, &timeline->requests, link)
-		i915_request_skip(rq, -EIO);
+	if (!i915_request_is_active(rq))
+		return;
 
-	spin_unlock(&timeline->lock);
+	list_for_each_entry_continue(rq, &engine->timeline.requests, link)
+		if (rq->gem_context == hung_ctx)
+			i915_request_skip(rq, -EIO);
 }
 
 static void client_mark_guilty(struct drm_i915_file_private *file_priv,
@@ -68,23 +59,29 @@ static void client_mark_guilty(struct drm_i915_file_private *file_priv,
 
 static bool context_mark_guilty(struct i915_gem_context *ctx)
 {
-	unsigned int score;
-	bool banned, bannable;
+	unsigned long prev_hang;
+	bool banned;
+	int i;
 
 	atomic_inc(&ctx->guilty_count);
 
-	bannable = i915_gem_context_is_bannable(ctx);
-	score = atomic_add_return(CONTEXT_SCORE_GUILTY, &ctx->ban_score);
-	banned = score >= CONTEXT_SCORE_BAN_THRESHOLD;
-
-	/* Cool contexts don't accumulate client ban score */
-	if (!bannable)
+	/* Cool contexts are too cool to be banned! (Used for reset testing.) */
+	if (!i915_gem_context_is_bannable(ctx))
 		return false;
 
+	/* Record the timestamp for the last N hangs */
+	prev_hang = ctx->hang_timestamp[0];
+	for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp) - 1; i++)
+		ctx->hang_timestamp[i] = ctx->hang_timestamp[i + 1];
+	ctx->hang_timestamp[i] = jiffies;
+
+	/* If we have hung N+1 times in rapid succession, we ban the context! */
+	banned = !i915_gem_context_is_recoverable(ctx);
+	if (time_before(jiffies, prev_hang + CONTEXT_FAST_HANG_JIFFIES))
+		banned = true;
 	if (banned) {
-		DRM_DEBUG_DRIVER("context %s: guilty %d, score %u, banned\n",
-				 ctx->name, atomic_read(&ctx->guilty_count),
-				 score);
+		DRM_DEBUG_DRIVER("context %s: guilty %d, banned\n",
+				 ctx->name, atomic_read(&ctx->guilty_count));
 		i915_gem_context_set_banned(ctx);
 	}
 
@@ -101,6 +98,12 @@ static void context_mark_innocent(struct i915_gem_context *ctx)
 
 void i915_reset_request(struct i915_request *rq, bool guilty)
 {
+	GEM_TRACE("%s rq=%llx:%lld, guilty? %s\n",
+		  rq->engine->name,
+		  rq->fence.context,
+		  rq->fence.seqno,
+		  yesno(guilty));
+
 	lockdep_assert_held(&rq->engine->timeline.lock);
 	GEM_BUG_ON(i915_request_completed(rq));
 
@@ -119,8 +122,10 @@ static void gen3_stop_engine(struct intel_engine_cs *engine)
 	struct drm_i915_private *dev_priv = engine->i915;
 	const u32 base = engine->mmio_base;
 
+	GEM_TRACE("%s\n", engine->name);
+
 	if (intel_engine_stop_cs(engine))
-		DRM_DEBUG_DRIVER("%s: timed out on STOP_RING\n", engine->name);
+		GEM_TRACE("%s: timed out on STOP_RING\n", engine->name);
 
 	I915_WRITE_FW(RING_HEAD(base), I915_READ_FW(RING_TAIL(base)));
 	POSTING_READ_FW(RING_HEAD(base)); /* paranoia */
@@ -133,9 +138,9 @@ static void gen3_stop_engine(struct intel_engine_cs *engine)
 	I915_WRITE_FW(RING_CTL(base), 0);
 
 	/* Check acts as a post */
-	if (I915_READ_FW(RING_HEAD(base)) != 0)
-		DRM_DEBUG_DRIVER("%s: ring head not parked\n",
-				 engine->name);
+	if (I915_READ_FW(RING_HEAD(base)))
+		GEM_TRACE("%s: ring head [%x] not parked\n",
+			  engine->name, I915_READ_FW(RING_HEAD(base)));
 }
 
 static void i915_stop_engines(struct drm_i915_private *i915,
@@ -298,12 +303,12 @@ static int gen6_reset_engines(struct drm_i915_private *i915,
 			      unsigned int retry)
 {
 	struct intel_engine_cs *engine;
-	const u32 hw_engine_mask[I915_NUM_ENGINES] = {
-		[RCS] = GEN6_GRDOM_RENDER,
-		[BCS] = GEN6_GRDOM_BLT,
-		[VCS] = GEN6_GRDOM_MEDIA,
-		[VCS2] = GEN8_GRDOM_MEDIA2,
-		[VECS] = GEN6_GRDOM_VECS,
+	const u32 hw_engine_mask[] = {
+		[RCS0]  = GEN6_GRDOM_RENDER,
+		[BCS0]  = GEN6_GRDOM_BLT,
+		[VCS0]  = GEN6_GRDOM_MEDIA,
+		[VCS1]  = GEN8_GRDOM_MEDIA2,
+		[VECS0] = GEN6_GRDOM_VECS,
 	};
 	u32 hw_mask;
 
@@ -313,8 +318,10 @@ static int gen6_reset_engines(struct drm_i915_private *i915,
 		unsigned int tmp;
 
 		hw_mask = 0;
-		for_each_engine_masked(engine, i915, engine_mask, tmp)
+		for_each_engine_masked(engine, i915, engine_mask, tmp) {
+			GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask));
 			hw_mask |= hw_engine_mask[engine->id];
+		}
 	}
 
 	return gen6_hw_domain_reset(i915, hw_mask);
@@ -421,28 +428,27 @@ static int gen11_reset_engines(struct drm_i915_private *i915,
 			       unsigned int engine_mask,
 			       unsigned int retry)
 {
-	const u32 hw_engine_mask[I915_NUM_ENGINES] = {
-		[RCS] = GEN11_GRDOM_RENDER,
-		[BCS] = GEN11_GRDOM_BLT,
-		[VCS] = GEN11_GRDOM_MEDIA,
-		[VCS2] = GEN11_GRDOM_MEDIA2,
-		[VCS3] = GEN11_GRDOM_MEDIA3,
-		[VCS4] = GEN11_GRDOM_MEDIA4,
-		[VECS] = GEN11_GRDOM_VECS,
-		[VECS2] = GEN11_GRDOM_VECS2,
+	const u32 hw_engine_mask[] = {
+		[RCS0]  = GEN11_GRDOM_RENDER,
+		[BCS0]  = GEN11_GRDOM_BLT,
+		[VCS0]  = GEN11_GRDOM_MEDIA,
+		[VCS1]  = GEN11_GRDOM_MEDIA2,
+		[VCS2]  = GEN11_GRDOM_MEDIA3,
+		[VCS3]  = GEN11_GRDOM_MEDIA4,
+		[VECS0] = GEN11_GRDOM_VECS,
+		[VECS1] = GEN11_GRDOM_VECS2,
 	};
 	struct intel_engine_cs *engine;
 	unsigned int tmp;
 	u32 hw_mask;
 	int ret;
 
-	BUILD_BUG_ON(VECS2 + 1 != I915_NUM_ENGINES);
-
 	if (engine_mask == ALL_ENGINES) {
 		hw_mask = GEN11_GRDOM_FULL;
 	} else {
 		hw_mask = 0;
 		for_each_engine_masked(engine, i915, engine_mask, tmp) {
+			GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask));
 			hw_mask |= hw_engine_mask[engine->id];
 			hw_mask |= gen11_lock_sfc(i915, engine);
 		}
@@ -532,9 +538,6 @@ typedef int (*reset_func)(struct drm_i915_private *,
 
 static reset_func intel_get_gpu_reset(struct drm_i915_private *i915)
 {
-	if (!i915_modparams.reset)
-		return NULL;
-
 	if (INTEL_GEN(i915) >= 8)
 		return gen8_reset_engines;
 	else if (INTEL_GEN(i915) >= 6)
@@ -582,7 +585,8 @@ int intel_gpu_reset(struct drm_i915_private *i915, unsigned int engine_mask)
 		 *
 		 * FIXME: Wa for more modern gens needs to be validated
 		 */
-		i915_stop_engines(i915, engine_mask);
+		if (retry)
+			i915_stop_engines(i915, engine_mask);
 
 		GEM_TRACE("engine_mask=%x\n", engine_mask);
 		preempt_disable();
@@ -599,6 +603,9 @@ bool intel_has_gpu_reset(struct drm_i915_private *i915)
 	if (USES_GUC(i915))
 		return false;
 
+	if (!i915_modparams.reset)
+		return NULL;
+
 	return intel_get_gpu_reset(i915);
 }
 
@@ -639,6 +646,32 @@ static void reset_prepare_engine(struct intel_engine_cs *engine)
 	engine->reset.prepare(engine);
 }
 
+static void revoke_mmaps(struct drm_i915_private *i915)
+{
+	int i;
+
+	for (i = 0; i < i915->num_fence_regs; i++) {
+		struct drm_vma_offset_node *node;
+		struct i915_vma *vma;
+		u64 vma_offset;
+
+		vma = READ_ONCE(i915->fence_regs[i].vma);
+		if (!vma)
+			continue;
+
+		if (!i915_vma_has_userfault(vma))
+			continue;
+
+		GEM_BUG_ON(vma->fence != &i915->fence_regs[i]);
+		node = &vma->obj->base.vma_node;
+		vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
+		unmap_mapping_range(i915->drm.anon_inode->i_mapping,
+				    drm_vma_node_offset_addr(node) + vma_offset,
+				    vma->size,
+				    1);
+	}
+}
+
 static void reset_prepare(struct drm_i915_private *i915)
 {
 	struct intel_engine_cs *engine;
@@ -647,7 +680,12 @@ static void reset_prepare(struct drm_i915_private *i915)
 	for_each_engine(engine, i915, id)
 		reset_prepare_engine(engine);
 
-	intel_uc_sanitize(i915);
+	intel_uc_reset_prepare(i915);
+}
+
+static void gt_revoke(struct drm_i915_private *i915)
+{
+	revoke_mmaps(i915);
 }
 
 static int gt_reset(struct drm_i915_private *i915, unsigned int stalled_mask)
@@ -665,7 +703,7 @@ static int gt_reset(struct drm_i915_private *i915, unsigned int stalled_mask)
 		return err;
 
 	for_each_engine(engine, i915, id)
-		intel_engine_reset(engine, stalled_mask & ENGINE_MASK(id));
+		intel_engine_reset(engine, stalled_mask & engine->mask);
 
 	i915_gem_restore_fences(i915);
 
@@ -722,8 +760,10 @@ static void reset_finish(struct drm_i915_private *i915)
 	struct intel_engine_cs *engine;
 	enum intel_engine_id id;
 
-	for_each_engine(engine, i915, id)
+	for_each_engine(engine, i915, id) {
 		reset_finish_engine(engine);
+		intel_engine_signal_breadcrumbs(engine);
+	}
 }
 
 static void reset_restart(struct drm_i915_private *i915)
@@ -761,23 +801,19 @@ static void nop_submit_request(struct i915_request *request)
 	spin_lock_irqsave(&engine->timeline.lock, flags);
 	__i915_request_submit(request);
 	i915_request_mark_complete(request);
-	intel_engine_write_global_seqno(engine, request->global_seqno);
 	spin_unlock_irqrestore(&engine->timeline.lock, flags);
 
 	intel_engine_queue_breadcrumbs(engine);
 }
 
-void i915_gem_set_wedged(struct drm_i915_private *i915)
+static void __i915_gem_set_wedged(struct drm_i915_private *i915)
 {
 	struct i915_gpu_error *error = &i915->gpu_error;
 	struct intel_engine_cs *engine;
 	enum intel_engine_id id;
 
-	mutex_lock(&error->wedge_mutex);
-	if (test_bit(I915_WEDGED, &error->flags)) {
-		mutex_unlock(&error->wedge_mutex);
+	if (test_bit(I915_WEDGED, &error->flags))
 		return;
-	}
 
 	if (GEM_SHOW_DEBUG() && !intel_engines_are_idle(i915)) {
 		struct drm_printer p = drm_debug_printer(__func__);
@@ -793,11 +829,10 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
 	 * rolling the global seqno forward (since this would complete requests
 	 * for which we haven't set the fence error to EIO yet).
 	 */
-	for_each_engine(engine, i915, id)
-		reset_prepare_engine(engine);
+	reset_prepare(i915);
 
 	/* Even if the GPU reset fails, it should still stop the engines */
-	if (INTEL_GEN(i915) >= 5)
+	if (!INTEL_INFO(i915)->gpu_reset_clobbers_display)
 		intel_gpu_reset(i915, ALL_ENGINES);
 
 	for_each_engine(engine, i915, id) {
@@ -811,31 +846,35 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
 	 * either this call here to intel_engine_write_global_seqno, or the one
 	 * in nop_submit_request.
 	 */
-	synchronize_rcu();
+	synchronize_rcu_expedited();
 
 	/* Mark all executing requests as skipped */
 	for_each_engine(engine, i915, id)
 		engine->cancel_requests(engine);
 
-	for_each_engine(engine, i915, id) {
-		reset_finish_engine(engine);
-		intel_engine_signal_breadcrumbs(engine);
-	}
+	reset_finish(i915);
 
 	smp_mb__before_atomic();
 	set_bit(I915_WEDGED, &error->flags);
 
 	GEM_TRACE("end\n");
-	mutex_unlock(&error->wedge_mutex);
+}
 
-	wake_up_all(&error->reset_queue);
+void i915_gem_set_wedged(struct drm_i915_private *i915)
+{
+	struct i915_gpu_error *error = &i915->gpu_error;
+	intel_wakeref_t wakeref;
+
+	mutex_lock(&error->wedge_mutex);
+	with_intel_runtime_pm(i915, wakeref)
+		__i915_gem_set_wedged(i915);
+	mutex_unlock(&error->wedge_mutex);
 }
 
-bool i915_gem_unset_wedged(struct drm_i915_private *i915)
+static bool __i915_gem_unset_wedged(struct drm_i915_private *i915)
 {
 	struct i915_gpu_error *error = &i915->gpu_error;
 	struct i915_timeline *tl;
-	bool ret = false;
 
 	if (!test_bit(I915_WEDGED, &error->flags))
 		return true;
@@ -843,8 +882,6 @@ bool i915_gem_unset_wedged(struct drm_i915_private *i915)
 	if (!i915->gt.scratch) /* Never full initialised, recovery impossible */
 		return false;
 
-	mutex_lock(&error->wedge_mutex);
-
 	GEM_TRACE("start\n");
 
 	/*
@@ -860,30 +897,20 @@ bool i915_gem_unset_wedged(struct drm_i915_private *i915)
 	mutex_lock(&i915->gt.timelines.mutex);
 	list_for_each_entry(tl, &i915->gt.timelines.active_list, link) {
 		struct i915_request *rq;
-		long timeout;
 
 		rq = i915_active_request_get_unlocked(&tl->last_request);
 		if (!rq)
 			continue;
 
 		/*
-		 * We can't use our normal waiter as we want to
-		 * avoid recursively trying to handle the current
-		 * reset. The basic dma_fence_default_wait() installs
-		 * a callback for dma_fence_signal(), which is
-		 * triggered by our nop handler (indirectly, the
-		 * callback enables the signaler thread which is
-		 * woken by the nop_submit_request() advancing the seqno
-		 * and when the seqno passes the fence, the signaler
-		 * then signals the fence waking us up).
+		 * All internal dependencies (i915_requests) will have
+		 * been flushed by the set-wedge, but we may be stuck waiting
+		 * for external fences. These should all be capped to 10s
+		 * (I915_FENCE_TIMEOUT) so this wait should not be unbounded
+		 * in the worst case.
 		 */
-		timeout = dma_fence_default_wait(&rq->fence, true,
-						 MAX_SCHEDULE_TIMEOUT);
+		dma_fence_default_wait(&rq->fence, false, MAX_SCHEDULE_TIMEOUT);
 		i915_request_put(rq);
-		if (timeout < 0) {
-			mutex_unlock(&i915->gt.timelines.mutex);
-			goto unlock;
-		}
 	}
 	mutex_unlock(&i915->gt.timelines.mutex);
 
@@ -904,57 +931,37 @@ bool i915_gem_unset_wedged(struct drm_i915_private *i915)
 
 	smp_mb__before_atomic(); /* complete takeover before enabling execbuf */
 	clear_bit(I915_WEDGED, &i915->gpu_error.flags);
-	ret = true;
-unlock:
-	mutex_unlock(&i915->gpu_error.wedge_mutex);
 
-	return ret;
+	return true;
 }
 
-struct __i915_reset {
-	struct drm_i915_private *i915;
-	unsigned int stalled_mask;
-};
-
-static int __i915_reset__BKL(void *data)
+bool i915_gem_unset_wedged(struct drm_i915_private *i915)
 {
-	struct __i915_reset *arg = data;
-	int err;
+	struct i915_gpu_error *error = &i915->gpu_error;
+	bool result;
 
-	err = intel_gpu_reset(arg->i915, ALL_ENGINES);
-	if (err)
-		return err;
+	mutex_lock(&error->wedge_mutex);
+	result = __i915_gem_unset_wedged(i915);
+	mutex_unlock(&error->wedge_mutex);
 
-	return gt_reset(arg->i915, arg->stalled_mask);
+	return result;
 }
 
-#if RESET_UNDER_STOP_MACHINE
-/*
- * XXX An alternative to using stop_machine would be to park only the
- * processes that have a GGTT mmap. By remote parking the threads (SIGSTOP)
- * we should be able to prevent their memmory accesses via the lost fence
- * registers over the course of the reset without the potential recursive
- * of mutexes between the pagefault handler and reset.
- *
- * See igt/gem_mmap_gtt/hang
- */
-#define __do_reset(fn, arg) stop_machine(fn, arg, NULL)
-#else
-#define __do_reset(fn, arg) fn(arg)
-#endif
-
 static int do_reset(struct drm_i915_private *i915, unsigned int stalled_mask)
 {
-	struct __i915_reset arg = { i915, stalled_mask };
 	int err, i;
 
-	err = __do_reset(__i915_reset__BKL, &arg);
+	gt_revoke(i915);
+
+	err = intel_gpu_reset(i915, ALL_ENGINES);
 	for (i = 0; err && i < RESET_MAX_RETRIES; i++) {
-		msleep(100);
-		err = __do_reset(__i915_reset__BKL, &arg);
+		msleep(10 * (i + 1));
+		err = intel_gpu_reset(i915, ALL_ENGINES);
 	}
+	if (err)
+		return err;
 
-	return err;
+	return gt_reset(i915, stalled_mask);
 }
 
 /**
@@ -966,8 +973,6 @@ static int do_reset(struct drm_i915_private *i915, unsigned int stalled_mask)
  * Reset the chip.  Useful if a hang is detected. Marks the device as wedged
  * on failure.
  *
- * Caller must hold the struct_mutex.
- *
  * Procedure is fairly simple:
  *   - reset the chip using the reset reg
  *   - re-init context state
@@ -990,7 +995,7 @@ void i915_reset(struct drm_i915_private *i915,
 	GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &error->flags));
 
 	/* Clear any previous failed attempts at recovery. Time to try again. */
-	if (!i915_gem_unset_wedged(i915))
+	if (!__i915_gem_unset_wedged(i915))
 		return;
 
 	if (reason)
@@ -1007,11 +1012,17 @@ void i915_reset(struct drm_i915_private *i915,
 		goto error;
 	}
 
+	if (INTEL_INFO(i915)->gpu_reset_clobbers_display)
+		intel_runtime_pm_disable_interrupts(i915);
+
 	if (do_reset(i915, stalled_mask)) {
 		dev_err(i915->drm.dev, "Failed to reset chip\n");
 		goto taint;
 	}
 
+	if (INTEL_INFO(i915)->gpu_reset_clobbers_display)
+		intel_runtime_pm_enable_interrupts(i915);
+
 	intel_overlay_reset(i915);
 
 	/*
@@ -1033,7 +1044,7 @@ void i915_reset(struct drm_i915_private *i915,
 
 finish:
 	reset_finish(i915);
-	if (!i915_terminally_wedged(error))
+	if (!__i915_wedged(error))
 		reset_restart(i915);
 	return;
 
@@ -1052,14 +1063,14 @@ void i915_reset(struct drm_i915_private *i915,
 	 */
 	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
 error:
-	i915_gem_set_wedged(i915);
+	__i915_gem_set_wedged(i915);
 	goto finish;
 }
 
 static inline int intel_gt_reset_engine(struct drm_i915_private *i915,
 					struct intel_engine_cs *engine)
 {
-	return intel_gpu_reset(i915, intel_engine_flag(engine));
+	return intel_gpu_reset(i915, engine->mask);
 }
 
 /**
@@ -1144,7 +1155,12 @@ static void i915_reset_device(struct drm_i915_private *i915,
 	i915_wedge_on_timeout(&w, i915, 5 * HZ) {
 		intel_prepare_reset(i915);
 
+		/* Flush everyone using a resource about to be clobbered */
+		synchronize_srcu_expedited(&error->reset_backoff_srcu);
+
+		mutex_lock(&error->wedge_mutex);
 		i915_reset(i915, engine_mask, reason);
+		mutex_unlock(&error->wedge_mutex);
 
 		intel_finish_reset(i915);
 	}
@@ -1190,7 +1206,7 @@ void i915_clear_error_registers(struct drm_i915_private *dev_priv)
 				   I915_READ(RING_FAULT_REG(engine)) &
 				   ~RING_FAULT_VALID);
 		}
-		POSTING_READ(RING_FAULT_REG(dev_priv->engine[RCS]));
+		POSTING_READ(RING_FAULT_REG(dev_priv->engine[RCS0]));
 	}
 }
 
@@ -1212,6 +1228,7 @@ void i915_handle_error(struct drm_i915_private *i915,
 		       unsigned long flags,
 		       const char *fmt, ...)
 {
+	struct i915_gpu_error *error = &i915->gpu_error;
 	struct intel_engine_cs *engine;
 	intel_wakeref_t wakeref;
 	unsigned int tmp;
@@ -1237,7 +1254,7 @@ void i915_handle_error(struct drm_i915_private *i915,
 	 */
 	wakeref = intel_runtime_pm_get(i915);
 
-	engine_mask &= INTEL_INFO(i915)->ring_mask;
+	engine_mask &= INTEL_INFO(i915)->engine_mask;
 
 	if (flags & I915_ERROR_CAPTURE) {
 		i915_capture_error_state(i915, engine_mask, msg);
@@ -1248,20 +1265,19 @@ void i915_handle_error(struct drm_i915_private *i915,
 	 * Try engine reset when available. We fall back to full reset if
 	 * single reset fails.
 	 */
-	if (intel_has_reset_engine(i915) &&
-	    !i915_terminally_wedged(&i915->gpu_error)) {
+	if (intel_has_reset_engine(i915) && !__i915_wedged(error)) {
 		for_each_engine_masked(engine, i915, engine_mask, tmp) {
 			BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE);
 			if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
-					     &i915->gpu_error.flags))
+					     &error->flags))
 				continue;
 
 			if (i915_reset_engine(engine, msg) == 0)
-				engine_mask &= ~intel_engine_flag(engine);
+				engine_mask &= ~engine->mask;
 
 			clear_bit(I915_RESET_ENGINE + engine->id,
-				  &i915->gpu_error.flags);
-			wake_up_bit(&i915->gpu_error.flags,
+				  &error->flags);
+			wake_up_bit(&error->flags,
 				    I915_RESET_ENGINE + engine->id);
 		}
 	}
@@ -1270,18 +1286,20 @@ void i915_handle_error(struct drm_i915_private *i915,
 		goto out;
 
 	/* Full reset needs the mutex, stop any other user trying to do so. */
-	if (test_and_set_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags)) {
-		wait_event(i915->gpu_error.reset_queue,
-			   !test_bit(I915_RESET_BACKOFF,
-				     &i915->gpu_error.flags));
-		goto out;
+	if (test_and_set_bit(I915_RESET_BACKOFF, &error->flags)) {
+		wait_event(error->reset_queue,
+			   !test_bit(I915_RESET_BACKOFF, &error->flags));
+		goto out; /* piggy-back on the other reset */
 	}
 
+	/* Make sure i915_reset_trylock() sees the I915_RESET_BACKOFF */
+	synchronize_rcu_expedited();
+
 	/* Prevent any other reset-engine attempt. */
 	for_each_engine(engine, i915, tmp) {
 		while (test_and_set_bit(I915_RESET_ENGINE + engine->id,
-					&i915->gpu_error.flags))
-			wait_on_bit(&i915->gpu_error.flags,
+					&error->flags))
+			wait_on_bit(&error->flags,
 				    I915_RESET_ENGINE + engine->id,
 				    TASK_UNINTERRUPTIBLE);
 	}
@@ -1290,16 +1308,74 @@ void i915_handle_error(struct drm_i915_private *i915,
 
 	for_each_engine(engine, i915, tmp) {
 		clear_bit(I915_RESET_ENGINE + engine->id,
-			  &i915->gpu_error.flags);
+			  &error->flags);
 	}
 
-	clear_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags);
-	wake_up_all(&i915->gpu_error.reset_queue);
+	clear_bit(I915_RESET_BACKOFF, &error->flags);
+	wake_up_all(&error->reset_queue);
 
 out:
 	intel_runtime_pm_put(i915, wakeref);
 }
 
+int i915_reset_trylock(struct drm_i915_private *i915)
+{
+	struct i915_gpu_error *error = &i915->gpu_error;
+	int srcu;
+
+	might_lock(&error->reset_backoff_srcu);
+	might_sleep();
+
+	rcu_read_lock();
+	while (test_bit(I915_RESET_BACKOFF, &error->flags)) {
+		rcu_read_unlock();
+
+		if (wait_event_interruptible(error->reset_queue,
+					     !test_bit(I915_RESET_BACKOFF,
+						       &error->flags)))
+			return -EINTR;
+
+		rcu_read_lock();
+	}
+	srcu = srcu_read_lock(&error->reset_backoff_srcu);
+	rcu_read_unlock();
+
+	return srcu;
+}
+
+void i915_reset_unlock(struct drm_i915_private *i915, int tag)
+__releases(&i915->gpu_error.reset_backoff_srcu)
+{
+	struct i915_gpu_error *error = &i915->gpu_error;
+
+	srcu_read_unlock(&error->reset_backoff_srcu, tag);
+}
+
+int i915_terminally_wedged(struct drm_i915_private *i915)
+{
+	struct i915_gpu_error *error = &i915->gpu_error;
+
+	might_sleep();
+
+	if (!__i915_wedged(error))
+		return 0;
+
+	/* Reset still in progress? Maybe we will recover? */
+	if (!test_bit(I915_RESET_BACKOFF, &error->flags))
+		return -EIO;
+
+	/* XXX intel_reset_finish() still takes struct_mutex!!! */
+	if (mutex_is_locked(&i915->drm.struct_mutex))
+		return -EAGAIN;
+
+	if (wait_event_interruptible(error->reset_queue,
+				     !test_bit(I915_RESET_BACKOFF,
+					       &error->flags)))
+		return -EINTR;
+
+	return __i915_wedged(error) ? -EIO : 0;
+}
+
 bool i915_reset_flush(struct drm_i915_private *i915)
 {
 	int err;
diff --git a/drivers/gpu/drm/i915/i915_reset.h b/drivers/gpu/drm/i915/i915_reset.h
index f2d347f319dfad4733f489968c757f055f0cb2f4..16f2389f656f39a32c7581a444bb13b370a214cb 100644
--- a/drivers/gpu/drm/i915/i915_reset.h
+++ b/drivers/gpu/drm/i915/i915_reset.h
@@ -9,6 +9,7 @@
 
 #include <linux/compiler.h>
 #include <linux/types.h>
+#include <linux/srcu.h>
 
 struct drm_i915_private;
 struct intel_engine_cs;
@@ -32,6 +33,11 @@ int i915_reset_engine(struct intel_engine_cs *engine,
 void i915_reset_request(struct i915_request *rq, bool guilty);
 bool i915_reset_flush(struct drm_i915_private *i915);
 
+int __must_check i915_reset_trylock(struct drm_i915_private *i915);
+void i915_reset_unlock(struct drm_i915_private *i915, int tag);
+
+int i915_terminally_wedged(struct drm_i915_private *i915);
+
 bool intel_has_gpu_reset(struct drm_i915_private *i915);
 bool intel_has_reset_engine(struct drm_i915_private *i915);
 
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index 8bc042551692c3db5b0ff30b14bcf46c905ede2d..e0f609d015647a4ee923f8597ec66c409f484c19 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -7,9 +7,16 @@
 #include <linux/mutex.h>
 
 #include "i915_drv.h"
+#include "i915_globals.h"
 #include "i915_request.h"
 #include "i915_scheduler.h"
 
+static struct i915_global_scheduler {
+	struct i915_global base;
+	struct kmem_cache *slab_dependencies;
+	struct kmem_cache *slab_priorities;
+} global;
+
 static DEFINE_SPINLOCK(schedule_lock);
 
 static const struct i915_request *
@@ -18,6 +25,11 @@ node_to_request(const struct i915_sched_node *node)
 	return container_of(node, const struct i915_request, sched);
 }
 
+static inline bool node_started(const struct i915_sched_node *node)
+{
+	return i915_request_started(node_to_request(node));
+}
+
 static inline bool node_signaled(const struct i915_sched_node *node)
 {
 	return i915_request_completed(node_to_request(node));
@@ -29,19 +41,19 @@ void i915_sched_node_init(struct i915_sched_node *node)
 	INIT_LIST_HEAD(&node->waiters_list);
 	INIT_LIST_HEAD(&node->link);
 	node->attr.priority = I915_PRIORITY_INVALID;
+	node->flags = 0;
 }
 
 static struct i915_dependency *
-i915_dependency_alloc(struct drm_i915_private *i915)
+i915_dependency_alloc(void)
 {
-	return kmem_cache_alloc(i915->dependencies, GFP_KERNEL);
+	return kmem_cache_alloc(global.slab_dependencies, GFP_KERNEL);
 }
 
 static void
-i915_dependency_free(struct drm_i915_private *i915,
-		     struct i915_dependency *dep)
+i915_dependency_free(struct i915_dependency *dep)
 {
-	kmem_cache_free(i915->dependencies, dep);
+	kmem_cache_free(global.slab_dependencies, dep);
 }
 
 bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
@@ -60,6 +72,11 @@ bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
 		dep->signaler = signal;
 		dep->flags = flags;
 
+		/* Keep track of whether anyone on this chain has a semaphore */
+		if (signal->flags & I915_SCHED_HAS_SEMAPHORE &&
+		    !node_started(signal))
+			node->flags |= I915_SCHED_HAS_SEMAPHORE;
+
 		ret = true;
 	}
 
@@ -68,25 +85,23 @@ bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
 	return ret;
 }
 
-int i915_sched_node_add_dependency(struct drm_i915_private *i915,
-				   struct i915_sched_node *node,
+int i915_sched_node_add_dependency(struct i915_sched_node *node,
 				   struct i915_sched_node *signal)
 {
 	struct i915_dependency *dep;
 
-	dep = i915_dependency_alloc(i915);
+	dep = i915_dependency_alloc();
 	if (!dep)
 		return -ENOMEM;
 
 	if (!__i915_sched_node_add_dependency(node, signal, dep,
 					      I915_DEPENDENCY_ALLOC))
-		i915_dependency_free(i915, dep);
+		i915_dependency_free(dep);
 
 	return 0;
 }
 
-void i915_sched_node_fini(struct drm_i915_private *i915,
-			  struct i915_sched_node *node)
+void i915_sched_node_fini(struct i915_sched_node *node)
 {
 	struct i915_dependency *dep, *tmp;
 
@@ -106,7 +121,7 @@ void i915_sched_node_fini(struct drm_i915_private *i915,
 
 		list_del(&dep->wait_link);
 		if (dep->flags & I915_DEPENDENCY_ALLOC)
-			i915_dependency_free(i915, dep);
+			i915_dependency_free(dep);
 	}
 
 	/* Remove ourselves from everyone who depends upon us */
@@ -116,7 +131,7 @@ void i915_sched_node_fini(struct drm_i915_private *i915,
 
 		list_del(&dep->signal_link);
 		if (dep->flags & I915_DEPENDENCY_ALLOC)
-			i915_dependency_free(i915, dep);
+			i915_dependency_free(dep);
 	}
 
 	spin_unlock(&schedule_lock);
@@ -193,7 +208,7 @@ i915_sched_lookup_priolist(struct intel_engine_cs *engine, int prio)
 	if (prio == I915_PRIORITY_NORMAL) {
 		p = &execlists->default_priolist;
 	} else {
-		p = kmem_cache_alloc(engine->i915->priorities, GFP_ATOMIC);
+		p = kmem_cache_alloc(global.slab_priorities, GFP_ATOMIC);
 		/* Convert an allocation failure to a priority bump */
 		if (unlikely(!p)) {
 			prio = I915_PRIORITY_NORMAL; /* recurses just once */
@@ -301,6 +316,10 @@ static void __i915_schedule(struct i915_request *rq,
 	list_for_each_entry(dep, &dfs, dfs_link) {
 		struct i915_sched_node *node = dep->signaler;
 
+		/* If we are already flying, we know we have no signalers */
+		if (node_started(node))
+			continue;
+
 		/*
 		 * Within an engine, there can be no cycle, but we may
 		 * refer to the same dependency chain multiple times
@@ -313,7 +332,6 @@ static void __i915_schedule(struct i915_request *rq,
 			if (node_signaled(p->signaler))
 				continue;
 
-			GEM_BUG_ON(p->signaler->attr.priority < node->attr.priority);
 			if (prio > READ_ONCE(p->signaler->attr.priority))
 				list_move_tail(&p->dfs_link, &dfs);
 		}
@@ -415,3 +433,45 @@ void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump)
 
 	spin_unlock_bh(&schedule_lock);
 }
+
+void __i915_priolist_free(struct i915_priolist *p)
+{
+	kmem_cache_free(global.slab_priorities, p);
+}
+
+static void i915_global_scheduler_shrink(void)
+{
+	kmem_cache_shrink(global.slab_dependencies);
+	kmem_cache_shrink(global.slab_priorities);
+}
+
+static void i915_global_scheduler_exit(void)
+{
+	kmem_cache_destroy(global.slab_dependencies);
+	kmem_cache_destroy(global.slab_priorities);
+}
+
+static struct i915_global_scheduler global = { {
+	.shrink = i915_global_scheduler_shrink,
+	.exit = i915_global_scheduler_exit,
+} };
+
+int __init i915_global_scheduler_init(void)
+{
+	global.slab_dependencies = KMEM_CACHE(i915_dependency,
+					      SLAB_HWCACHE_ALIGN);
+	if (!global.slab_dependencies)
+		return -ENOMEM;
+
+	global.slab_priorities = KMEM_CACHE(i915_priolist,
+					    SLAB_HWCACHE_ALIGN);
+	if (!global.slab_priorities)
+		goto err_priorities;
+
+	i915_global_register(&global.base);
+	return 0;
+
+err_priorities:
+	kmem_cache_destroy(global.slab_priorities);
+	return -ENOMEM;
+}
diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h
index dbe9cb7ecd82928bc83be7b042994a1566724d82..9a1d257f3d6e59e2743036678ae29ab00f16a385 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.h
+++ b/drivers/gpu/drm/i915/i915_scheduler.h
@@ -24,14 +24,17 @@ enum {
 	I915_PRIORITY_INVALID = INT_MIN
 };
 
-#define I915_USER_PRIORITY_SHIFT 2
+#define I915_USER_PRIORITY_SHIFT 3
 #define I915_USER_PRIORITY(x) ((x) << I915_USER_PRIORITY_SHIFT)
 
 #define I915_PRIORITY_COUNT BIT(I915_USER_PRIORITY_SHIFT)
 #define I915_PRIORITY_MASK (I915_PRIORITY_COUNT - 1)
 
-#define I915_PRIORITY_WAIT	((u8)BIT(0))
-#define I915_PRIORITY_NEWCLIENT	((u8)BIT(1))
+#define I915_PRIORITY_WAIT		((u8)BIT(0))
+#define I915_PRIORITY_NEWCLIENT		((u8)BIT(1))
+#define I915_PRIORITY_NOSEMAPHORE	((u8)BIT(2))
+
+#define __NO_PREEMPTION (I915_PRIORITY_WAIT)
 
 struct i915_sched_attr {
 	/**
@@ -72,6 +75,8 @@ struct i915_sched_node {
 	struct list_head waiters_list; /* those after us, they depend upon us */
 	struct list_head link;
 	struct i915_sched_attr attr;
+	unsigned int flags;
+#define I915_SCHED_HAS_SEMAPHORE	BIT(0)
 };
 
 struct i915_dependency {
@@ -83,6 +88,25 @@ struct i915_dependency {
 #define I915_DEPENDENCY_ALLOC BIT(0)
 };
 
+struct i915_priolist {
+	struct list_head requests[I915_PRIORITY_COUNT];
+	struct rb_node node;
+	unsigned long used;
+	int priority;
+};
+
+#define priolist_for_each_request(it, plist, idx) \
+	for (idx = 0; idx < ARRAY_SIZE((plist)->requests); idx++) \
+		list_for_each_entry(it, &(plist)->requests[idx], sched.link)
+
+#define priolist_for_each_request_consume(it, n, plist, idx) \
+	for (; \
+	     (plist)->used ? (idx = __ffs((plist)->used)), 1 : 0; \
+	     (plist)->used &= ~BIT(idx)) \
+		list_for_each_entry_safe(it, n, \
+					 &(plist)->requests[idx], \
+					 sched.link)
+
 void i915_sched_node_init(struct i915_sched_node *node);
 
 bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
@@ -90,12 +114,10 @@ bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
 				      struct i915_dependency *dep,
 				      unsigned long flags);
 
-int i915_sched_node_add_dependency(struct drm_i915_private *i915,
-				   struct i915_sched_node *node,
+int i915_sched_node_add_dependency(struct i915_sched_node *node,
 				   struct i915_sched_node *signal);
 
-void i915_sched_node_fini(struct drm_i915_private *i915,
-			  struct i915_sched_node *node);
+void i915_sched_node_fini(struct i915_sched_node *node);
 
 void i915_schedule(struct i915_request *request,
 		   const struct i915_sched_attr *attr);
@@ -105,4 +127,11 @@ void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump);
 struct list_head *
 i915_sched_lookup_priolist(struct intel_engine_cs *engine, int prio);
 
+void __i915_priolist_free(struct i915_priolist *p);
+static inline void i915_priolist_free(struct i915_priolist *p)
+{
+	if (p->priority != I915_PRIORITY_NORMAL)
+		__i915_priolist_free(p);
+}
+
 #endif /* _I915_SCHEDULER_H_ */
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index 7c58b049ecb50a47bf06d0322d6077f6e06e955d..8d1400d378d7a7746ec08426d683b52e5ea066f2 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -192,7 +192,7 @@ static void __i915_sw_fence_complete(struct i915_sw_fence *fence,
 	__i915_sw_fence_notify(fence, FENCE_FREE);
 }
 
-static void i915_sw_fence_complete(struct i915_sw_fence *fence)
+void i915_sw_fence_complete(struct i915_sw_fence *fence)
 {
 	debug_fence_assert(fence);
 
@@ -202,7 +202,7 @@ static void i915_sw_fence_complete(struct i915_sw_fence *fence)
 	__i915_sw_fence_complete(fence, NULL);
 }
 
-static void i915_sw_fence_await(struct i915_sw_fence *fence)
+void i915_sw_fence_await(struct i915_sw_fence *fence)
 {
 	debug_fence_assert(fence);
 	WARN_ON(atomic_inc_return(&fence->pending) <= 1);
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.h b/drivers/gpu/drm/i915/i915_sw_fence.h
index 0e055ea0179f321a4aafc8b95a667f8bbc8543f6..6dec9e1d11029888d11d3e250aa9644c7339430a 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.h
+++ b/drivers/gpu/drm/i915/i915_sw_fence.h
@@ -79,6 +79,9 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
 				    unsigned long timeout,
 				    gfp_t gfp);
 
+void i915_sw_fence_await(struct i915_sw_fence *fence);
+void i915_sw_fence_complete(struct i915_sw_fence *fence);
+
 static inline bool i915_sw_fence_signaled(const struct i915_sw_fence *fence)
 {
 	return atomic_read(&fence->pending) <= 0;
diff --git a/drivers/gpu/drm/i915/i915_timeline.c b/drivers/gpu/drm/i915/i915_timeline.c
index b2202d2e58a26341ab57cdc0fc3c86247d653ac4..8484ba6e51d1caa3afe8d7cf153c983b4eb87272 100644
--- a/drivers/gpu/drm/i915/i915_timeline.c
+++ b/drivers/gpu/drm/i915/i915_timeline.c
@@ -6,19 +6,32 @@
 
 #include "i915_drv.h"
 
-#include "i915_timeline.h"
+#include "i915_active.h"
 #include "i915_syncmap.h"
+#include "i915_timeline.h"
+
+#define ptr_set_bit(ptr, bit) ((typeof(ptr))((unsigned long)(ptr) | BIT(bit)))
+#define ptr_test_bit(ptr, bit) ((unsigned long)(ptr) & BIT(bit))
 
 struct i915_timeline_hwsp {
-	struct i915_vma *vma;
+	struct i915_gt_timelines *gt;
 	struct list_head free_link;
+	struct i915_vma *vma;
 	u64 free_bitmap;
 };
 
-static inline struct i915_timeline_hwsp *
-i915_timeline_hwsp(const struct i915_timeline *tl)
+struct i915_timeline_cacheline {
+	struct i915_active active;
+	struct i915_timeline_hwsp *hwsp;
+	void *vaddr;
+#define CACHELINE_BITS 6
+#define CACHELINE_FREE CACHELINE_BITS
+};
+
+static inline struct drm_i915_private *
+hwsp_to_i915(struct i915_timeline_hwsp *hwsp)
 {
-	return tl->hwsp_ggtt->private;
+	return container_of(hwsp->gt, struct drm_i915_private, gt.timelines);
 }
 
 static struct i915_vma *__hwsp_alloc(struct drm_i915_private *i915)
@@ -71,6 +84,7 @@ hwsp_alloc(struct i915_timeline *timeline, unsigned int *cacheline)
 		vma->private = hwsp;
 		hwsp->vma = vma;
 		hwsp->free_bitmap = ~0ull;
+		hwsp->gt = gt;
 
 		spin_lock(&gt->hwsp_lock);
 		list_add(&hwsp->free_link, &gt->hwsp_free_list);
@@ -88,14 +102,9 @@ hwsp_alloc(struct i915_timeline *timeline, unsigned int *cacheline)
 	return hwsp->vma;
 }
 
-static void hwsp_free(struct i915_timeline *timeline)
+static void __idle_hwsp_free(struct i915_timeline_hwsp *hwsp, int cacheline)
 {
-	struct i915_gt_timelines *gt = &timeline->i915->gt.timelines;
-	struct i915_timeline_hwsp *hwsp;
-
-	hwsp = i915_timeline_hwsp(timeline);
-	if (!hwsp) /* leave global HWSP alone! */
-		return;
+	struct i915_gt_timelines *gt = hwsp->gt;
 
 	spin_lock(&gt->hwsp_lock);
 
@@ -103,7 +112,8 @@ static void hwsp_free(struct i915_timeline *timeline)
 	if (!hwsp->free_bitmap)
 		list_add_tail(&hwsp->free_link, &gt->hwsp_free_list);
 
-	hwsp->free_bitmap |= BIT_ULL(timeline->hwsp_offset / CACHELINE_BYTES);
+	GEM_BUG_ON(cacheline >= BITS_PER_TYPE(hwsp->free_bitmap));
+	hwsp->free_bitmap |= BIT_ULL(cacheline);
 
 	/* And if no one is left using it, give the page back to the system */
 	if (hwsp->free_bitmap == ~0ull) {
@@ -115,6 +125,76 @@ static void hwsp_free(struct i915_timeline *timeline)
 	spin_unlock(&gt->hwsp_lock);
 }
 
+static void __idle_cacheline_free(struct i915_timeline_cacheline *cl)
+{
+	GEM_BUG_ON(!i915_active_is_idle(&cl->active));
+
+	i915_gem_object_unpin_map(cl->hwsp->vma->obj);
+	i915_vma_put(cl->hwsp->vma);
+	__idle_hwsp_free(cl->hwsp, ptr_unmask_bits(cl->vaddr, CACHELINE_BITS));
+
+	i915_active_fini(&cl->active);
+	kfree(cl);
+}
+
+static void __cacheline_retire(struct i915_active *active)
+{
+	struct i915_timeline_cacheline *cl =
+		container_of(active, typeof(*cl), active);
+
+	i915_vma_unpin(cl->hwsp->vma);
+	if (ptr_test_bit(cl->vaddr, CACHELINE_FREE))
+		__idle_cacheline_free(cl);
+}
+
+static struct i915_timeline_cacheline *
+cacheline_alloc(struct i915_timeline_hwsp *hwsp, unsigned int cacheline)
+{
+	struct i915_timeline_cacheline *cl;
+	void *vaddr;
+
+	GEM_BUG_ON(cacheline >= BIT(CACHELINE_BITS));
+
+	cl = kmalloc(sizeof(*cl), GFP_KERNEL);
+	if (!cl)
+		return ERR_PTR(-ENOMEM);
+
+	vaddr = i915_gem_object_pin_map(hwsp->vma->obj, I915_MAP_WB);
+	if (IS_ERR(vaddr)) {
+		kfree(cl);
+		return ERR_CAST(vaddr);
+	}
+
+	i915_vma_get(hwsp->vma);
+	cl->hwsp = hwsp;
+	cl->vaddr = page_pack_bits(vaddr, cacheline);
+
+	i915_active_init(hwsp_to_i915(hwsp), &cl->active, __cacheline_retire);
+
+	return cl;
+}
+
+static void cacheline_acquire(struct i915_timeline_cacheline *cl)
+{
+	if (cl && i915_active_acquire(&cl->active))
+		__i915_vma_pin(cl->hwsp->vma);
+}
+
+static void cacheline_release(struct i915_timeline_cacheline *cl)
+{
+	if (cl)
+		i915_active_release(&cl->active);
+}
+
+static void cacheline_free(struct i915_timeline_cacheline *cl)
+{
+	GEM_BUG_ON(ptr_test_bit(cl->vaddr, CACHELINE_FREE));
+	cl->vaddr = ptr_set_bit(cl->vaddr, CACHELINE_FREE);
+
+	if (i915_active_is_idle(&cl->active))
+		__idle_cacheline_free(cl);
+}
+
 int i915_timeline_init(struct drm_i915_private *i915,
 		       struct i915_timeline *timeline,
 		       const char *name,
@@ -136,32 +216,44 @@ int i915_timeline_init(struct drm_i915_private *i915,
 	timeline->name = name;
 	timeline->pin_count = 0;
 	timeline->has_initial_breadcrumb = !hwsp;
+	timeline->hwsp_cacheline = NULL;
 
-	timeline->hwsp_offset = I915_GEM_HWS_SEQNO_ADDR;
 	if (!hwsp) {
+		struct i915_timeline_cacheline *cl;
 		unsigned int cacheline;
 
 		hwsp = hwsp_alloc(timeline, &cacheline);
 		if (IS_ERR(hwsp))
 			return PTR_ERR(hwsp);
 
+		cl = cacheline_alloc(hwsp->private, cacheline);
+		if (IS_ERR(cl)) {
+			__idle_hwsp_free(hwsp->private, cacheline);
+			return PTR_ERR(cl);
+		}
+
+		timeline->hwsp_cacheline = cl;
 		timeline->hwsp_offset = cacheline * CACHELINE_BYTES;
-	}
-	timeline->hwsp_ggtt = i915_vma_get(hwsp);
 
-	vaddr = i915_gem_object_pin_map(hwsp->obj, I915_MAP_WB);
-	if (IS_ERR(vaddr)) {
-		hwsp_free(timeline);
-		i915_vma_put(hwsp);
-		return PTR_ERR(vaddr);
+		vaddr = page_mask_bits(cl->vaddr);
+	} else {
+		timeline->hwsp_offset = I915_GEM_HWS_SEQNO_ADDR;
+
+		vaddr = i915_gem_object_pin_map(hwsp->obj, I915_MAP_WB);
+		if (IS_ERR(vaddr))
+			return PTR_ERR(vaddr);
 	}
 
 	timeline->hwsp_seqno =
 		memset(vaddr + timeline->hwsp_offset, 0, CACHELINE_BYTES);
 
+	timeline->hwsp_ggtt = i915_vma_get(hwsp);
+	GEM_BUG_ON(timeline->hwsp_offset >= hwsp->size);
+
 	timeline->fence_context = dma_fence_context_alloc(1);
 
 	spin_lock_init(&timeline->lock);
+	mutex_init(&timeline->mutex);
 
 	INIT_ACTIVE_REQUEST(&timeline->barrier);
 	INIT_ACTIVE_REQUEST(&timeline->last_request);
@@ -239,9 +331,12 @@ void i915_timeline_fini(struct i915_timeline *timeline)
 	GEM_BUG_ON(i915_active_request_isset(&timeline->barrier));
 
 	i915_syncmap_free(&timeline->sync);
-	hwsp_free(timeline);
 
-	i915_gem_object_unpin_map(timeline->hwsp_ggtt->obj);
+	if (timeline->hwsp_cacheline)
+		cacheline_free(timeline->hwsp_cacheline);
+	else
+		i915_gem_object_unpin_map(timeline->hwsp_ggtt->obj);
+
 	i915_vma_put(timeline->hwsp_ggtt);
 }
 
@@ -284,6 +379,7 @@ int i915_timeline_pin(struct i915_timeline *tl)
 		i915_ggtt_offset(tl->hwsp_ggtt) +
 		offset_in_page(tl->hwsp_offset);
 
+	cacheline_acquire(tl->hwsp_cacheline);
 	timeline_add_to_active(tl);
 
 	return 0;
@@ -293,6 +389,157 @@ int i915_timeline_pin(struct i915_timeline *tl)
 	return err;
 }
 
+static u32 timeline_advance(struct i915_timeline *tl)
+{
+	GEM_BUG_ON(!tl->pin_count);
+	GEM_BUG_ON(tl->seqno & tl->has_initial_breadcrumb);
+
+	return tl->seqno += 1 + tl->has_initial_breadcrumb;
+}
+
+static void timeline_rollback(struct i915_timeline *tl)
+{
+	tl->seqno -= 1 + tl->has_initial_breadcrumb;
+}
+
+static noinline int
+__i915_timeline_get_seqno(struct i915_timeline *tl,
+			  struct i915_request *rq,
+			  u32 *seqno)
+{
+	struct i915_timeline_cacheline *cl;
+	unsigned int cacheline;
+	struct i915_vma *vma;
+	void *vaddr;
+	int err;
+
+	/*
+	 * If there is an outstanding GPU reference to this cacheline,
+	 * such as it being sampled by a HW semaphore on another timeline,
+	 * we cannot wraparound our seqno value (the HW semaphore does
+	 * a strict greater-than-or-equals compare, not i915_seqno_passed).
+	 * So if the cacheline is still busy, we must detach ourselves
+	 * from it and leave it inflight alongside its users.
+	 *
+	 * However, if nobody is watching and we can guarantee that nobody
+	 * will, we could simply reuse the same cacheline.
+	 *
+	 * if (i915_active_request_is_signaled(&tl->last_request) &&
+	 *     i915_active_is_signaled(&tl->hwsp_cacheline->active))
+	 *	return 0;
+	 *
+	 * That seems unlikely for a busy timeline that needed to wrap in
+	 * the first place, so just replace the cacheline.
+	 */
+
+	vma = hwsp_alloc(tl, &cacheline);
+	if (IS_ERR(vma)) {
+		err = PTR_ERR(vma);
+		goto err_rollback;
+	}
+
+	err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
+	if (err) {
+		__idle_hwsp_free(vma->private, cacheline);
+		goto err_rollback;
+	}
+
+	cl = cacheline_alloc(vma->private, cacheline);
+	if (IS_ERR(cl)) {
+		err = PTR_ERR(cl);
+		__idle_hwsp_free(vma->private, cacheline);
+		goto err_unpin;
+	}
+	GEM_BUG_ON(cl->hwsp->vma != vma);
+
+	/*
+	 * Attach the old cacheline to the current request, so that we only
+	 * free it after the current request is retired, which ensures that
+	 * all writes into the cacheline from previous requests are complete.
+	 */
+	err = i915_active_ref(&tl->hwsp_cacheline->active,
+			      tl->fence_context, rq);
+	if (err)
+		goto err_cacheline;
+
+	cacheline_release(tl->hwsp_cacheline); /* ownership now xfered to rq */
+	cacheline_free(tl->hwsp_cacheline);
+
+	i915_vma_unpin(tl->hwsp_ggtt); /* binding kept alive by old cacheline */
+	i915_vma_put(tl->hwsp_ggtt);
+
+	tl->hwsp_ggtt = i915_vma_get(vma);
+
+	vaddr = page_mask_bits(cl->vaddr);
+	tl->hwsp_offset = cacheline * CACHELINE_BYTES;
+	tl->hwsp_seqno =
+		memset(vaddr + tl->hwsp_offset, 0, CACHELINE_BYTES);
+
+	tl->hwsp_offset += i915_ggtt_offset(vma);
+
+	cacheline_acquire(cl);
+	tl->hwsp_cacheline = cl;
+
+	*seqno = timeline_advance(tl);
+	GEM_BUG_ON(i915_seqno_passed(*tl->hwsp_seqno, *seqno));
+	return 0;
+
+err_cacheline:
+	cacheline_free(cl);
+err_unpin:
+	i915_vma_unpin(vma);
+err_rollback:
+	timeline_rollback(tl);
+	return err;
+}
+
+int i915_timeline_get_seqno(struct i915_timeline *tl,
+			    struct i915_request *rq,
+			    u32 *seqno)
+{
+	*seqno = timeline_advance(tl);
+
+	/* Replace the HWSP on wraparound for HW semaphores */
+	if (unlikely(!*seqno && tl->hwsp_cacheline))
+		return __i915_timeline_get_seqno(tl, rq, seqno);
+
+	return 0;
+}
+
+static int cacheline_ref(struct i915_timeline_cacheline *cl,
+			 struct i915_request *rq)
+{
+	return i915_active_ref(&cl->active, rq->fence.context, rq);
+}
+
+int i915_timeline_read_hwsp(struct i915_request *from,
+			    struct i915_request *to,
+			    u32 *hwsp)
+{
+	struct i915_timeline_cacheline *cl = from->hwsp_cacheline;
+	struct i915_timeline *tl = from->timeline;
+	int err;
+
+	GEM_BUG_ON(to->timeline == tl);
+
+	mutex_lock_nested(&tl->mutex, SINGLE_DEPTH_NESTING);
+	err = i915_request_completed(from);
+	if (!err)
+		err = cacheline_ref(cl, to);
+	if (!err) {
+		if (likely(cl == tl->hwsp_cacheline)) {
+			*hwsp = tl->hwsp_offset;
+		} else { /* across a seqno wrap, recover the original offset */
+			*hwsp = i915_ggtt_offset(cl->hwsp->vma) +
+				ptr_unmask_bits(cl->vaddr, CACHELINE_BITS) *
+				CACHELINE_BYTES;
+		}
+	}
+	mutex_unlock(&tl->mutex);
+
+	return err;
+}
+
 void i915_timeline_unpin(struct i915_timeline *tl)
 {
 	GEM_BUG_ON(!tl->pin_count);
@@ -300,6 +547,7 @@ void i915_timeline_unpin(struct i915_timeline *tl)
 		return;
 
 	timeline_remove_from_active(tl);
+	cacheline_release(tl->hwsp_cacheline);
 
 	/*
 	 * Since this timeline is idle, all bariers upon which we were waiting
diff --git a/drivers/gpu/drm/i915/i915_timeline.h b/drivers/gpu/drm/i915/i915_timeline.h
index 7bec7d2e45bfa242d30d8a76f38b99ae44226903..9126c82064903a86e67b77fa6f956e973a36fd85 100644
--- a/drivers/gpu/drm/i915/i915_timeline.h
+++ b/drivers/gpu/drm/i915/i915_timeline.h
@@ -25,72 +25,10 @@
 #ifndef I915_TIMELINE_H
 #define I915_TIMELINE_H
 
-#include <linux/list.h>
-#include <linux/kref.h>
+#include <linux/lockdep.h>
 
-#include "i915_active.h"
-#include "i915_request.h"
 #include "i915_syncmap.h"
-#include "i915_utils.h"
-
-struct i915_vma;
-struct i915_timeline_hwsp;
-
-struct i915_timeline {
-	u64 fence_context;
-	u32 seqno;
-
-	spinlock_t lock;
-#define TIMELINE_CLIENT 0 /* default subclass */
-#define TIMELINE_ENGINE 1
-
-	unsigned int pin_count;
-	const u32 *hwsp_seqno;
-	struct i915_vma *hwsp_ggtt;
-	u32 hwsp_offset;
-
-	bool has_initial_breadcrumb;
-
-	/**
-	 * List of breadcrumbs associated with GPU requests currently
-	 * outstanding.
-	 */
-	struct list_head requests;
-
-	/* Contains an RCU guarded pointer to the last request. No reference is
-	 * held to the request, users must carefully acquire a reference to
-	 * the request using i915_active_request_get_request_rcu(), or hold the
-	 * struct_mutex.
-	 */
-	struct i915_active_request last_request;
-
-	/**
-	 * We track the most recent seqno that we wait on in every context so
-	 * that we only have to emit a new await and dependency on a more
-	 * recent sync point. As the contexts may be executed out-of-order, we
-	 * have to track each individually and can not rely on an absolute
-	 * global_seqno. When we know that all tracked fences are completed
-	 * (i.e. when the driver is idle), we know that the syncmap is
-	 * redundant and we can discard it without loss of generality.
-	 */
-	struct i915_syncmap *sync;
-
-	/**
-	 * Barrier provides the ability to serialize ordering between different
-	 * timelines.
-	 *
-	 * Users can call i915_timeline_set_barrier which will make all
-	 * subsequent submissions to this timeline be executed only after the
-	 * barrier has been completed.
-	 */
-	struct i915_active_request barrier;
-
-	struct list_head link;
-	const char *name;
-	struct drm_i915_private *i915;
-
-	struct kref kref;
-};
+#include "i915_timeline_types.h"
 
 int i915_timeline_init(struct drm_i915_private *i915,
 		       struct i915_timeline *tl,
@@ -160,8 +98,15 @@ static inline bool i915_timeline_sync_is_later(struct i915_timeline *tl,
 }
 
 int i915_timeline_pin(struct i915_timeline *tl);
+int i915_timeline_get_seqno(struct i915_timeline *tl,
+			    struct i915_request *rq,
+			    u32 *seqno);
 void i915_timeline_unpin(struct i915_timeline *tl);
 
+int i915_timeline_read_hwsp(struct i915_request *from,
+			    struct i915_request *until,
+			    u32 *hwsp_offset);
+
 void i915_timelines_init(struct drm_i915_private *i915);
 void i915_timelines_park(struct drm_i915_private *i915);
 void i915_timelines_fini(struct drm_i915_private *i915);
diff --git a/drivers/gpu/drm/i915/i915_timeline_types.h b/drivers/gpu/drm/i915/i915_timeline_types.h
new file mode 100644
index 0000000000000000000000000000000000000000..8ff146dc05ba271a6e4deba5266bf3cce41f6e4d
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_timeline_types.h
@@ -0,0 +1,80 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2016 Intel Corporation
+ */
+
+#ifndef __I915_TIMELINE_TYPES_H__
+#define __I915_TIMELINE_TYPES_H__
+
+#include <linux/list.h>
+#include <linux/kref.h>
+#include <linux/types.h>
+
+#include "i915_active.h"
+
+struct drm_i915_private;
+struct i915_vma;
+struct i915_timeline_cacheline;
+struct i915_syncmap;
+
+struct i915_timeline {
+	u64 fence_context;
+	u32 seqno;
+
+	spinlock_t lock;
+#define TIMELINE_CLIENT 0 /* default subclass */
+#define TIMELINE_ENGINE 1
+	struct mutex mutex; /* protects the flow of requests */
+
+	unsigned int pin_count;
+	const u32 *hwsp_seqno;
+	struct i915_vma *hwsp_ggtt;
+	u32 hwsp_offset;
+
+	struct i915_timeline_cacheline *hwsp_cacheline;
+
+	bool has_initial_breadcrumb;
+
+	/**
+	 * List of breadcrumbs associated with GPU requests currently
+	 * outstanding.
+	 */
+	struct list_head requests;
+
+	/* Contains an RCU guarded pointer to the last request. No reference is
+	 * held to the request, users must carefully acquire a reference to
+	 * the request using i915_active_request_get_request_rcu(), or hold the
+	 * struct_mutex.
+	 */
+	struct i915_active_request last_request;
+
+	/**
+	 * We track the most recent seqno that we wait on in every context so
+	 * that we only have to emit a new await and dependency on a more
+	 * recent sync point. As the contexts may be executed out-of-order, we
+	 * have to track each individually and can not rely on an absolute
+	 * global_seqno. When we know that all tracked fences are completed
+	 * (i.e. when the driver is idle), we know that the syncmap is
+	 * redundant and we can discard it without loss of generality.
+	 */
+	struct i915_syncmap *sync;
+
+	/**
+	 * Barrier provides the ability to serialize ordering between different
+	 * timelines.
+	 *
+	 * Users can call i915_timeline_set_barrier which will make all
+	 * subsequent submissions to this timeline be executed only after the
+	 * barrier has been completed.
+	 */
+	struct i915_active_request barrier;
+
+	struct list_head link;
+	const char *name;
+	struct drm_i915_private *i915;
+
+	struct kref kref;
+};
+
+#endif /* __I915_TIMELINE_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index eab313c3163c91e97e6d1e4449e12d963ca6773f..12893304c8f895e96c44e0349dc2149a63c53721 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -18,6 +18,87 @@
 
 /* watermark/fifo updates */
 
+TRACE_EVENT(intel_pipe_enable,
+	    TP_PROTO(struct drm_i915_private *dev_priv, enum pipe pipe),
+	    TP_ARGS(dev_priv, pipe),
+
+	    TP_STRUCT__entry(
+			     __array(u32, frame, 3)
+			     __array(u32, scanline, 3)
+			     __field(enum pipe, pipe)
+			     ),
+
+	    TP_fast_assign(
+			   enum pipe _pipe;
+			   for_each_pipe(dev_priv, _pipe) {
+				   __entry->frame[_pipe] =
+					   dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, _pipe);
+				   __entry->scanline[_pipe] =
+					   intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, _pipe));
+			   }
+			   __entry->pipe = pipe;
+			   ),
+
+	    TP_printk("pipe %c enable, pipe A: frame=%u, scanline=%u, pipe B: frame=%u, scanline=%u, pipe C: frame=%u, scanline=%u",
+		      pipe_name(__entry->pipe),
+		      __entry->frame[PIPE_A], __entry->scanline[PIPE_A],
+		      __entry->frame[PIPE_B], __entry->scanline[PIPE_B],
+		      __entry->frame[PIPE_C], __entry->scanline[PIPE_C])
+);
+
+TRACE_EVENT(intel_pipe_disable,
+	    TP_PROTO(struct drm_i915_private *dev_priv, enum pipe pipe),
+	    TP_ARGS(dev_priv, pipe),
+
+	    TP_STRUCT__entry(
+			     __array(u32, frame, 3)
+			     __array(u32, scanline, 3)
+			     __field(enum pipe, pipe)
+			     ),
+
+	    TP_fast_assign(
+			   enum pipe _pipe;
+			   for_each_pipe(dev_priv, _pipe) {
+				   __entry->frame[_pipe] =
+					   dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, _pipe);
+				   __entry->scanline[_pipe] =
+					   intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, _pipe));
+			   }
+			   __entry->pipe = pipe;
+			   ),
+
+	    TP_printk("pipe %c disable, pipe A: frame=%u, scanline=%u, pipe B: frame=%u, scanline=%u, pipe C: frame=%u, scanline=%u",
+		      pipe_name(__entry->pipe),
+		      __entry->frame[PIPE_A], __entry->scanline[PIPE_A],
+		      __entry->frame[PIPE_B], __entry->scanline[PIPE_B],
+		      __entry->frame[PIPE_C], __entry->scanline[PIPE_C])
+);
+
+TRACE_EVENT(intel_pipe_crc,
+	    TP_PROTO(struct intel_crtc *crtc, const u32 *crcs),
+	    TP_ARGS(crtc, crcs),
+
+	    TP_STRUCT__entry(
+			     __field(enum pipe, pipe)
+			     __field(u32, frame)
+			     __field(u32, scanline)
+			     __array(u32, crcs, 5)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->pipe = crtc->pipe;
+			   __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev,
+										       crtc->pipe);
+			   __entry->scanline = intel_get_crtc_scanline(crtc);
+			   memcpy(__entry->crcs, crcs, sizeof(__entry->crcs));
+			   ),
+
+	    TP_printk("pipe %c, frame=%u, scanline=%u crc=%08x %08x %08x %08x %08x",
+		      pipe_name(__entry->pipe), __entry->frame, __entry->scanline,
+		      __entry->crcs[0], __entry->crcs[1], __entry->crcs[2],
+		      __entry->crcs[3], __entry->crcs[4])
+);
+
 TRACE_EVENT(intel_cpu_fifo_underrun,
 	    TP_PROTO(struct drm_i915_private *dev_priv, enum pipe pipe),
 	    TP_ARGS(dev_priv, pipe),
@@ -627,7 +708,6 @@ DECLARE_EVENT_CLASS(i915_request,
 			     __field(u16, class)
 			     __field(u16, instance)
 			     __field(u32, seqno)
-			     __field(u32, global)
 			     ),
 
 	    TP_fast_assign(
@@ -637,13 +717,11 @@ DECLARE_EVENT_CLASS(i915_request,
 			   __entry->instance = rq->engine->instance;
 			   __entry->ctx = rq->fence.context;
 			   __entry->seqno = rq->fence.seqno;
-			   __entry->global = rq->global_seqno;
 			   ),
 
-	    TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, global=%u",
+	    TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u",
 		      __entry->dev, __entry->class, __entry->instance,
-		      __entry->hw_id, __entry->ctx, __entry->seqno,
-		      __entry->global)
+		      __entry->hw_id, __entry->ctx, __entry->seqno)
 );
 
 DEFINE_EVENT(i915_request, i915_request_add,
@@ -673,7 +751,6 @@ TRACE_EVENT(i915_request_in,
 			     __field(u16, class)
 			     __field(u16, instance)
 			     __field(u32, seqno)
-			     __field(u32, global_seqno)
 			     __field(u32, port)
 			     __field(u32, prio)
 			    ),
@@ -685,15 +762,14 @@ TRACE_EVENT(i915_request_in,
 			   __entry->instance = rq->engine->instance;
 			   __entry->ctx = rq->fence.context;
 			   __entry->seqno = rq->fence.seqno;
-			   __entry->global_seqno = rq->global_seqno;
 			   __entry->prio = rq->sched.attr.priority;
 			   __entry->port = port;
 			   ),
 
-	    TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, prio=%u, global=%u, port=%u",
+	    TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, prio=%u, port=%u",
 		      __entry->dev, __entry->class, __entry->instance,
 		      __entry->hw_id, __entry->ctx, __entry->seqno,
-		      __entry->prio, __entry->global_seqno, __entry->port)
+		      __entry->prio, __entry->port)
 );
 
 TRACE_EVENT(i915_request_out,
@@ -707,7 +783,6 @@ TRACE_EVENT(i915_request_out,
 			     __field(u16, class)
 			     __field(u16, instance)
 			     __field(u32, seqno)
-			     __field(u32, global_seqno)
 			     __field(u32, completed)
 			    ),
 
@@ -718,14 +793,13 @@ TRACE_EVENT(i915_request_out,
 			   __entry->instance = rq->engine->instance;
 			   __entry->ctx = rq->fence.context;
 			   __entry->seqno = rq->fence.seqno;
-			   __entry->global_seqno = rq->global_seqno;
 			   __entry->completed = i915_request_completed(rq);
 			   ),
 
-		    TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, global=%u, completed?=%u",
+		    TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, completed?=%u",
 			      __entry->dev, __entry->class, __entry->instance,
 			      __entry->hw_id, __entry->ctx, __entry->seqno,
-			      __entry->global_seqno, __entry->completed)
+			      __entry->completed)
 );
 
 #else
@@ -768,7 +842,6 @@ TRACE_EVENT(i915_request_wait_begin,
 			     __field(u16, class)
 			     __field(u16, instance)
 			     __field(u32, seqno)
-			     __field(u32, global)
 			     __field(unsigned int, flags)
 			     ),
 
@@ -785,14 +858,13 @@ TRACE_EVENT(i915_request_wait_begin,
 			   __entry->instance = rq->engine->instance;
 			   __entry->ctx = rq->fence.context;
 			   __entry->seqno = rq->fence.seqno;
-			   __entry->global = rq->global_seqno;
 			   __entry->flags = flags;
 			   ),
 
-	    TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, global=%u, blocking=%u, flags=0x%x",
+	    TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, blocking=%u, flags=0x%x",
 		      __entry->dev, __entry->class, __entry->instance,
 		      __entry->hw_id, __entry->ctx, __entry->seqno,
-		      __entry->global, !!(__entry->flags & I915_WAIT_LOCKED),
+		      !!(__entry->flags & I915_WAIT_LOCKED),
 		      __entry->flags)
 );
 
diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c
index 869cf4a3b6de75fee593c0f66c953cc1035434a6..3b2d83f704e30778aff104df58eee026f8b7364a 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.c
+++ b/drivers/gpu/drm/i915/i915_vgpu.c
@@ -81,9 +81,9 @@ void i915_check_vgpu(struct drm_i915_private *dev_priv)
 	DRM_INFO("Virtual GPU for Intel GVT-g detected.\n");
 }
 
-bool intel_vgpu_has_full_48bit_ppgtt(struct drm_i915_private *dev_priv)
+bool intel_vgpu_has_full_ppgtt(struct drm_i915_private *dev_priv)
 {
-	return dev_priv->vgpu.caps & VGT_CAPS_FULL_48BIT_PPGTT;
+	return dev_priv->vgpu.caps & VGT_CAPS_FULL_PPGTT;
 }
 
 struct _balloon_info_ {
diff --git a/drivers/gpu/drm/i915/i915_vgpu.h b/drivers/gpu/drm/i915/i915_vgpu.h
index 551acc3900464bb7d4aee586ba7792155631bef5..ebe1b7bced980707040b74eee88e2c9693f8f38f 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.h
+++ b/drivers/gpu/drm/i915/i915_vgpu.h
@@ -28,7 +28,7 @@
 
 void i915_check_vgpu(struct drm_i915_private *dev_priv);
 
-bool intel_vgpu_has_full_48bit_ppgtt(struct drm_i915_private *dev_priv);
+bool intel_vgpu_has_full_ppgtt(struct drm_i915_private *dev_priv);
 
 static inline bool
 intel_vgpu_has_hwsp_emulation(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index b713bed20c3880c088a45c49e8a787c786f6e971..36726392e737759c4ffc52840de6e745f2454082 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -25,11 +25,27 @@
 #include "i915_vma.h"
 
 #include "i915_drv.h"
+#include "i915_globals.h"
 #include "intel_ringbuffer.h"
 #include "intel_frontbuffer.h"
 
 #include <drm/drm_gem.h>
 
+static struct i915_global_vma {
+	struct i915_global base;
+	struct kmem_cache *slab_vmas;
+} global;
+
+struct i915_vma *i915_vma_alloc(void)
+{
+	return kmem_cache_zalloc(global.slab_vmas, GFP_KERNEL);
+}
+
+void i915_vma_free(struct i915_vma *vma)
+{
+	return kmem_cache_free(global.slab_vmas, vma);
+}
+
 #if IS_ENABLED(CONFIG_DRM_I915_ERRLOG_GEM) && IS_ENABLED(CONFIG_DRM_DEBUG_MM)
 
 #include <linux/stackdepot.h>
@@ -115,7 +131,7 @@ vma_create(struct drm_i915_gem_object *obj,
 	/* The aliasing_ppgtt should never be used directly! */
 	GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm);
 
-	vma = kmem_cache_zalloc(vm->i915->vmas, GFP_KERNEL);
+	vma = i915_vma_alloc();
 	if (vma == NULL)
 		return ERR_PTR(-ENOMEM);
 
@@ -190,7 +206,7 @@ vma_create(struct drm_i915_gem_object *obj,
 		cmp = i915_vma_compare(pos, vm, view);
 		if (cmp == 0) {
 			spin_unlock(&obj->vma.lock);
-			kmem_cache_free(vm->i915->vmas, vma);
+			i915_vma_free(vma);
 			return pos;
 		}
 
@@ -222,7 +238,7 @@ vma_create(struct drm_i915_gem_object *obj,
 	return vma;
 
 err_vma:
-	kmem_cache_free(vm->i915->vmas, vma);
+	i915_vma_free(vma);
 	return ERR_PTR(-E2BIG);
 }
 
@@ -803,8 +819,6 @@ void i915_vma_reopen(struct i915_vma *vma)
 
 static void __i915_vma_destroy(struct i915_vma *vma)
 {
-	struct drm_i915_private *i915 = vma->vm->i915;
-
 	GEM_BUG_ON(vma->node.allocated);
 	GEM_BUG_ON(vma->fence);
 
@@ -825,7 +839,7 @@ static void __i915_vma_destroy(struct i915_vma *vma)
 
 	i915_active_fini(&vma->active);
 
-	kmem_cache_free(i915->vmas, vma);
+	i915_vma_free(vma);
 }
 
 void i915_vma_destroy(struct i915_vma *vma)
@@ -1041,3 +1055,28 @@ int i915_vma_unbind(struct i915_vma *vma)
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
 #include "selftests/i915_vma.c"
 #endif
+
+static void i915_global_vma_shrink(void)
+{
+	kmem_cache_shrink(global.slab_vmas);
+}
+
+static void i915_global_vma_exit(void)
+{
+	kmem_cache_destroy(global.slab_vmas);
+}
+
+static struct i915_global_vma global = { {
+	.shrink = i915_global_vma_shrink,
+	.exit = i915_global_vma_exit,
+} };
+
+int __init i915_global_vma_init(void)
+{
+	global.slab_vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
+	if (!global.slab_vmas)
+		return -ENOMEM;
+
+	i915_global_register(&global.base);
+	return 0;
+}
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 7c742027f8661479d9079c71909bd9875d7ede3c..6eab70953a57f7c254da4f386da1a15de055a81f 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -440,4 +440,7 @@ void i915_vma_parked(struct drm_i915_private *i915);
 	list_for_each_entry(V, &(OBJ)->vma.list, obj_link)		\
 		for_each_until(!i915_vma_is_ggtt(V))
 
+struct i915_vma *i915_vma_alloc(void);
+void i915_vma_free(struct i915_vma *vma);
+
 #endif
diff --git a/drivers/gpu/drm/i915/icl_dsi.c b/drivers/gpu/drm/i915/icl_dsi.c
index 73a7bee24a663faa672ade21cfe7ea7cb1bc4b46..beb30d9a855c87ff791fc7e66b505a11bb42d7af 100644
--- a/drivers/gpu/drm/i915/icl_dsi.c
+++ b/drivers/gpu/drm/i915/icl_dsi.c
@@ -246,13 +246,13 @@ static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder)
 
 		for (lane = 0; lane <= 3; lane++) {
 			/* Bspec: must not use GRP register for write */
-			tmp = I915_READ(ICL_PORT_TX_DW4_LN(port, lane));
+			tmp = I915_READ(ICL_PORT_TX_DW4_LN(lane, port));
 			tmp &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
 				 CURSOR_COEFF_MASK);
 			tmp |= POST_CURSOR_1(0x0);
 			tmp |= POST_CURSOR_2(0x0);
 			tmp |= CURSOR_COEFF(0x3f);
-			I915_WRITE(ICL_PORT_TX_DW4_LN(port, lane), tmp);
+			I915_WRITE(ICL_PORT_TX_DW4_LN(lane, port), tmp);
 		}
 	}
 }
@@ -390,11 +390,11 @@ static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder)
 		tmp &= ~LOADGEN_SELECT;
 		I915_WRITE(ICL_PORT_TX_DW4_AUX(port), tmp);
 		for (lane = 0; lane <= 3; lane++) {
-			tmp = I915_READ(ICL_PORT_TX_DW4_LN(port, lane));
+			tmp = I915_READ(ICL_PORT_TX_DW4_LN(lane, port));
 			tmp &= ~LOADGEN_SELECT;
 			if (lane != 2)
 				tmp |= LOADGEN_SELECT;
-			I915_WRITE(ICL_PORT_TX_DW4_LN(port, lane), tmp);
+			I915_WRITE(ICL_PORT_TX_DW4_LN(lane, port), tmp);
 		}
 	}
 
diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c
index dd6c096992373af6724e07d9a98725464dacc6d0..9d32a6fcf8408a07e9e0f33295485cac03d5491b 100644
--- a/drivers/gpu/drm/i915/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/intel_atomic_plane.c
@@ -121,6 +121,7 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
 
 	new_crtc_state->active_planes &= ~BIT(plane->id);
 	new_crtc_state->nv12_planes &= ~BIT(plane->id);
+	new_crtc_state->c8_planes &= ~BIT(plane->id);
 	new_plane_state->base.visible = false;
 
 	if (!new_plane_state->base.crtc && !old_plane_state->base.crtc)
@@ -138,6 +139,10 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
 	    is_planar_yuv_format(new_plane_state->base.fb->format->format))
 		new_crtc_state->nv12_planes |= BIT(plane->id);
 
+	if (new_plane_state->base.visible &&
+	    new_plane_state->base.fb->format->format == DRM_FORMAT_C8)
+		new_crtc_state->c8_planes |= BIT(plane->id);
+
 	if (new_plane_state->base.visible || old_plane_state->base.visible)
 		new_crtc_state->update_planes |= BIT(plane->id);
 
@@ -214,6 +219,35 @@ skl_next_plane_to_commit(struct intel_atomic_state *state,
 	return NULL;
 }
 
+void intel_update_plane(struct intel_plane *plane,
+			const struct intel_crtc_state *crtc_state,
+			const struct intel_plane_state *plane_state)
+{
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+
+	trace_intel_update_plane(&plane->base, crtc);
+	plane->update_plane(plane, crtc_state, plane_state);
+}
+
+void intel_update_slave(struct intel_plane *plane,
+			const struct intel_crtc_state *crtc_state,
+			const struct intel_plane_state *plane_state)
+{
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+
+	trace_intel_update_plane(&plane->base, crtc);
+	plane->update_slave(plane, crtc_state, plane_state);
+}
+
+void intel_disable_plane(struct intel_plane *plane,
+			 const struct intel_crtc_state *crtc_state)
+{
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+
+	trace_intel_disable_plane(&plane->base, crtc);
+	plane->disable_plane(plane, crtc_state);
+}
+
 void skl_update_planes_on_crtc(struct intel_atomic_state *state,
 			       struct intel_crtc *crtc)
 {
@@ -238,8 +272,7 @@ void skl_update_planes_on_crtc(struct intel_atomic_state *state,
 			intel_atomic_get_new_plane_state(state, plane);
 
 		if (new_plane_state->base.visible) {
-			trace_intel_update_plane(&plane->base, crtc);
-			plane->update_plane(plane, new_crtc_state, new_plane_state);
+			intel_update_plane(plane, new_crtc_state, new_plane_state);
 		} else if (new_plane_state->slave) {
 			struct intel_plane *master =
 				new_plane_state->linked_plane;
@@ -256,11 +289,9 @@ void skl_update_planes_on_crtc(struct intel_atomic_state *state,
 			new_plane_state =
 				intel_atomic_get_new_plane_state(state, master);
 
-			trace_intel_update_plane(&plane->base, crtc);
-			plane->update_slave(plane, new_crtc_state, new_plane_state);
+			intel_update_slave(plane, new_crtc_state, new_plane_state);
 		} else {
-			trace_intel_disable_plane(&plane->base, crtc);
-			plane->disable_plane(plane, new_crtc_state);
+			intel_disable_plane(plane, new_crtc_state);
 		}
 	}
 }
@@ -280,13 +311,10 @@ void i9xx_update_planes_on_crtc(struct intel_atomic_state *state,
 		    !(update_mask & BIT(plane->id)))
 			continue;
 
-		if (new_plane_state->base.visible) {
-			trace_intel_update_plane(&plane->base, crtc);
-			plane->update_plane(plane, new_crtc_state, new_plane_state);
-		} else {
-			trace_intel_disable_plane(&plane->base, crtc);
-			plane->disable_plane(plane, new_crtc_state);
-		}
+		if (new_plane_state->base.visible)
+			intel_update_plane(plane, new_crtc_state, new_plane_state);
+		else
+			intel_disable_plane(plane, new_crtc_state);
 	}
 }
 
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index 5104c6bbd66fa087d606cb19e21e2f3db7a4312a..502b57ce72aba9839b7d739197fb43c881a919e3 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -741,27 +741,31 @@ void intel_init_audio_hooks(struct drm_i915_private *dev_priv)
 	}
 }
 
-static void i915_audio_component_get_power(struct device *kdev)
+static unsigned long i915_audio_component_get_power(struct device *kdev)
 {
-	intel_display_power_get(kdev_to_i915(kdev), POWER_DOMAIN_AUDIO);
+	/* Catch potential impedance mismatches before they occur! */
+	BUILD_BUG_ON(sizeof(intel_wakeref_t) > sizeof(unsigned long));
+
+	return intel_display_power_get(kdev_to_i915(kdev), POWER_DOMAIN_AUDIO);
 }
 
-static void i915_audio_component_put_power(struct device *kdev)
+static void i915_audio_component_put_power(struct device *kdev,
+					   unsigned long cookie)
 {
-	intel_display_power_put_unchecked(kdev_to_i915(kdev),
-					  POWER_DOMAIN_AUDIO);
+	intel_display_power_put(kdev_to_i915(kdev), POWER_DOMAIN_AUDIO, cookie);
 }
 
 static void i915_audio_component_codec_wake_override(struct device *kdev,
 						     bool enable)
 {
 	struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
+	unsigned long cookie;
 	u32 tmp;
 
 	if (!IS_GEN(dev_priv, 9))
 		return;
 
-	i915_audio_component_get_power(kdev);
+	cookie = i915_audio_component_get_power(kdev);
 
 	/*
 	 * Enable/disable generating the codec wake signal, overriding the
@@ -779,7 +783,7 @@ static void i915_audio_component_codec_wake_override(struct device *kdev,
 		usleep_range(1000, 1500);
 	}
 
-	i915_audio_component_put_power(kdev);
+	i915_audio_component_put_power(kdev, cookie);
 }
 
 /* Get CDCLK in kHz  */
@@ -850,12 +854,13 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev, int port,
 	struct i915_audio_component *acomp = dev_priv->audio_component;
 	struct intel_encoder *encoder;
 	struct intel_crtc *crtc;
+	unsigned long cookie;
 	int err = 0;
 
 	if (!HAS_DDI(dev_priv))
 		return 0;
 
-	i915_audio_component_get_power(kdev);
+	cookie = i915_audio_component_get_power(kdev);
 	mutex_lock(&dev_priv->av_mutex);
 
 	/* 1. get the pipe */
@@ -875,7 +880,7 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev, int port,
 
  unlock:
 	mutex_unlock(&dev_priv->av_mutex);
-	i915_audio_component_put_power(kdev);
+	i915_audio_component_put_power(kdev, cookie);
 	return err;
 }
 
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 4364f42cac6b88cfd8eef1f82783a863e483884d..64f20175a6dcccf2a1aa6086a20e5e31b9fccbf9 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -760,6 +760,31 @@ parse_psr(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
 		dev_priv->vbt.psr.tp1_wakeup_time_us = psr_table->tp1_wakeup_time * 100;
 		dev_priv->vbt.psr.tp2_tp3_wakeup_time_us = psr_table->tp2_tp3_wakeup_time * 100;
 	}
+
+	if (bdb->version >= 226) {
+		u32 wakeup_time = psr_table->psr2_tp2_tp3_wakeup_time;
+
+		wakeup_time = (wakeup_time >> (2 * panel_type)) & 0x3;
+		switch (wakeup_time) {
+		case 0:
+			wakeup_time = 500;
+			break;
+		case 1:
+			wakeup_time = 100;
+			break;
+		case 3:
+			wakeup_time = 50;
+			break;
+		default:
+		case 2:
+			wakeup_time = 2500;
+			break;
+		}
+		dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us = wakeup_time;
+	} else {
+		/* Reusing PSR1 wakeup time for PSR2 in older VBTs */
+		dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us = dev_priv->vbt.psr.tp2_tp3_wakeup_time_us;
+	}
 }
 
 static void parse_dsi_backlight_ports(struct drm_i915_private *dev_priv,
@@ -2094,8 +2119,8 @@ bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv,
 		dvo_port = child->dvo_port;
 
 		if (dvo_port == DVO_PORT_MIPIA ||
-		    (dvo_port == DVO_PORT_MIPIB && IS_ICELAKE(dev_priv)) ||
-		    (dvo_port == DVO_PORT_MIPIC && !IS_ICELAKE(dev_priv))) {
+		    (dvo_port == DVO_PORT_MIPIB && INTEL_GEN(dev_priv) >= 11) ||
+		    (dvo_port == DVO_PORT_MIPIC && INTEL_GEN(dev_priv) < 11)) {
 			if (port)
 				*port = dvo_port - DVO_PORT_MIPIA;
 			return true;
diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c
index 15ba950dee00e789738e4362a7e3050ef9caf1d4..21fb4e0d6c4e65753ed8c34b91a502dc9e286841 100644
--- a/drivers/gpu/drm/i915/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/intel_cdclk.c
@@ -468,7 +468,7 @@ static void vlv_get_cdclk(struct drm_i915_private *dev_priv,
 					       cdclk_state->vco);
 
 	mutex_lock(&dev_priv->pcu_lock);
-	val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
+	val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
 	mutex_unlock(&dev_priv->pcu_lock);
 
 	if (IS_VALLEYVIEW(dev_priv))
@@ -543,11 +543,11 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
 	wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
 
 	mutex_lock(&dev_priv->pcu_lock);
-	val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
+	val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
 	val &= ~DSPFREQGUAR_MASK;
 	val |= (cmd << DSPFREQGUAR_SHIFT);
-	vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
-	if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
+	vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, val);
+	if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) &
 		      DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT),
 		     50)) {
 		DRM_ERROR("timed out waiting for CDclk change\n");
@@ -624,11 +624,11 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv,
 	wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
 
 	mutex_lock(&dev_priv->pcu_lock);
-	val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
+	val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
 	val &= ~DSPFREQGUAR_MASK_CHV;
 	val |= (cmd << DSPFREQGUAR_SHIFT_CHV);
-	vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
-	if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
+	vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, val);
+	if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) &
 		      DSPFREQSTAT_MASK_CHV) == (cmd << DSPFREQSTAT_SHIFT_CHV),
 		     50)) {
 		DRM_ERROR("timed out waiting for CDclk change\n");
@@ -2560,7 +2560,7 @@ static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
  */
 void intel_update_max_cdclk(struct drm_i915_private *dev_priv)
 {
-	if (IS_ICELAKE(dev_priv)) {
+	if (INTEL_GEN(dev_priv) >= 11) {
 		if (dev_priv->cdclk.hw.ref == 24000)
 			dev_priv->max_cdclk_freq = 648000;
 		else
@@ -2668,7 +2668,7 @@ static int cnp_rawclk(struct drm_i915_private *dev_priv)
 
 		rawclk |= CNP_RAWCLK_DEN(DIV_ROUND_CLOSEST(numerator * 1000,
 							   fraction) - 1);
-		if (HAS_PCH_ICP(dev_priv))
+		if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
 			rawclk |= ICP_RAWCLK_NUM(numerator);
 	}
 
@@ -2723,7 +2723,7 @@ static int g4x_hrawclk(struct drm_i915_private *dev_priv)
  */
 void intel_update_rawclk(struct drm_i915_private *dev_priv)
 {
-	if (HAS_PCH_CNP(dev_priv) || HAS_PCH_ICP(dev_priv))
+	if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
 		dev_priv->rawclk_freq = cnp_rawclk(dev_priv);
 	else if (HAS_PCH_SPLIT(dev_priv))
 		dev_priv->rawclk_freq = pch_rawclk(dev_priv);
@@ -2744,18 +2744,13 @@ void intel_update_rawclk(struct drm_i915_private *dev_priv)
  */
 void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
 {
-	if (IS_CHERRYVIEW(dev_priv)) {
-		dev_priv->display.set_cdclk = chv_set_cdclk;
-		dev_priv->display.modeset_calc_cdclk =
-			vlv_modeset_calc_cdclk;
-	} else if (IS_VALLEYVIEW(dev_priv)) {
-		dev_priv->display.set_cdclk = vlv_set_cdclk;
-		dev_priv->display.modeset_calc_cdclk =
-			vlv_modeset_calc_cdclk;
-	} else if (IS_BROADWELL(dev_priv)) {
-		dev_priv->display.set_cdclk = bdw_set_cdclk;
+	if (INTEL_GEN(dev_priv) >= 11) {
+		dev_priv->display.set_cdclk = icl_set_cdclk;
+		dev_priv->display.modeset_calc_cdclk = icl_modeset_calc_cdclk;
+	} else if (IS_CANNONLAKE(dev_priv)) {
+		dev_priv->display.set_cdclk = cnl_set_cdclk;
 		dev_priv->display.modeset_calc_cdclk =
-			bdw_modeset_calc_cdclk;
+			cnl_modeset_calc_cdclk;
 	} else if (IS_GEN9_LP(dev_priv)) {
 		dev_priv->display.set_cdclk = bxt_set_cdclk;
 		dev_priv->display.modeset_calc_cdclk =
@@ -2764,23 +2759,28 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
 		dev_priv->display.set_cdclk = skl_set_cdclk;
 		dev_priv->display.modeset_calc_cdclk =
 			skl_modeset_calc_cdclk;
-	} else if (IS_CANNONLAKE(dev_priv)) {
-		dev_priv->display.set_cdclk = cnl_set_cdclk;
+	} else if (IS_BROADWELL(dev_priv)) {
+		dev_priv->display.set_cdclk = bdw_set_cdclk;
 		dev_priv->display.modeset_calc_cdclk =
-			cnl_modeset_calc_cdclk;
-	} else if (IS_ICELAKE(dev_priv)) {
-		dev_priv->display.set_cdclk = icl_set_cdclk;
-		dev_priv->display.modeset_calc_cdclk = icl_modeset_calc_cdclk;
+			bdw_modeset_calc_cdclk;
+	} else if (IS_CHERRYVIEW(dev_priv)) {
+		dev_priv->display.set_cdclk = chv_set_cdclk;
+		dev_priv->display.modeset_calc_cdclk =
+			vlv_modeset_calc_cdclk;
+	} else if (IS_VALLEYVIEW(dev_priv)) {
+		dev_priv->display.set_cdclk = vlv_set_cdclk;
+		dev_priv->display.modeset_calc_cdclk =
+			vlv_modeset_calc_cdclk;
 	}
 
-	if (IS_ICELAKE(dev_priv))
+	if (INTEL_GEN(dev_priv) >= 11)
 		dev_priv->display.get_cdclk = icl_get_cdclk;
 	else if (IS_CANNONLAKE(dev_priv))
 		dev_priv->display.get_cdclk = cnl_get_cdclk;
-	else if (IS_GEN9_BC(dev_priv))
-		dev_priv->display.get_cdclk = skl_get_cdclk;
 	else if (IS_GEN9_LP(dev_priv))
 		dev_priv->display.get_cdclk = bxt_get_cdclk;
+	else if (IS_GEN9_BC(dev_priv))
+		dev_priv->display.get_cdclk = skl_get_cdclk;
 	else if (IS_BROADWELL(dev_priv))
 		dev_priv->display.get_cdclk = bdw_get_cdclk;
 	else if (IS_HASWELL(dev_priv))
diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/intel_color.c
index 71a1f12c6b2a5016a5846bacfb6d89b4960e4371..467fd1a1630c200115324cd2572a35f3b7ae0667 100644
--- a/drivers/gpu/drm/i915/intel_color.c
+++ b/drivers/gpu/drm/i915/intel_color.c
@@ -40,23 +40,6 @@
 #define CTM_COEFF_ABS(coeff)		((coeff) & (CTM_COEFF_SIGN - 1))
 
 #define LEGACY_LUT_LENGTH		256
-
-/* Post offset values for RGB->YCBCR conversion */
-#define POSTOFF_RGB_TO_YUV_HI 0x800
-#define POSTOFF_RGB_TO_YUV_ME 0x100
-#define POSTOFF_RGB_TO_YUV_LO 0x800
-
-/*
- * These values are direct register values specified in the Bspec,
- * for RGB->YUV conversion matrix (colorspace BT709)
- */
-#define CSC_RGB_TO_YUV_RU_GU 0x2ba809d8
-#define CSC_RGB_TO_YUV_BU 0x37e80000
-#define CSC_RGB_TO_YUV_RY_GY 0x1e089cc0
-#define CSC_RGB_TO_YUV_BY 0xb5280000
-#define CSC_RGB_TO_YUV_RV_GV 0xbce89ad8
-#define CSC_RGB_TO_YUV_BV 0x1e080000
-
 /*
  * Extract the CSC coefficient from a CTM coefficient (in U32.32 fixed point
  * format). This macro takes the coefficient we want transformed and the
@@ -69,10 +52,45 @@
 #define ILK_CSC_COEFF_FP(coeff, fbits)	\
 	(clamp_val(((coeff) >> (32 - (fbits) - 3)) + 4, 0, 0xfff) & 0xff8)
 
-#define ILK_CSC_COEFF_LIMITED_RANGE	\
-	ILK_CSC_COEFF_FP(CTM_COEFF_LIMITED_RANGE, 9)
-#define ILK_CSC_COEFF_1_0		\
-	((7 << 12) | ILK_CSC_COEFF_FP(CTM_COEFF_1_0, 8))
+#define ILK_CSC_COEFF_LIMITED_RANGE 0x0dc0
+#define ILK_CSC_COEFF_1_0 0x7800
+
+#define ILK_CSC_POSTOFF_LIMITED_RANGE (16 * (1 << 12) / 255)
+
+static const u16 ilk_csc_off_zero[3] = {};
+
+static const u16 ilk_csc_coeff_identity[9] = {
+	ILK_CSC_COEFF_1_0, 0, 0,
+	0, ILK_CSC_COEFF_1_0, 0,
+	0, 0, ILK_CSC_COEFF_1_0,
+};
+
+static const u16 ilk_csc_postoff_limited_range[3] = {
+	ILK_CSC_POSTOFF_LIMITED_RANGE,
+	ILK_CSC_POSTOFF_LIMITED_RANGE,
+	ILK_CSC_POSTOFF_LIMITED_RANGE,
+};
+
+static const u16 ilk_csc_coeff_limited_range[9] = {
+	ILK_CSC_COEFF_LIMITED_RANGE, 0, 0,
+	0, ILK_CSC_COEFF_LIMITED_RANGE, 0,
+	0, 0, ILK_CSC_COEFF_LIMITED_RANGE,
+};
+
+/*
+ * These values are direct register values specified in the Bspec,
+ * for RGB->YUV conversion matrix (colorspace BT709)
+ */
+static const u16 ilk_csc_coeff_rgb_to_ycbcr[9] = {
+	0x1e08, 0x9cc0, 0xb528,
+	0x2ba8, 0x09d8, 0x37e8,
+	0xbce8, 0x9ad8, 0x1e08,
+};
+
+/* Post offset values for RGB->YCBCR conversion */
+static const u16 ilk_csc_postoff_rgb_to_ycbcr[3] = {
+	0x0800, 0x0100, 0x0800,
+};
 
 static bool lut_is_legacy(const struct drm_property_blob *lut)
 {
@@ -113,145 +131,180 @@ static u64 *ctm_mult_by_limited(u64 *result, const u64 *input)
 	return result;
 }
 
-static void ilk_load_ycbcr_conversion_matrix(struct intel_crtc *crtc)
+static void ilk_update_pipe_csc(struct intel_crtc *crtc,
+				const u16 preoff[3],
+				const u16 coeff[9],
+				const u16 postoff[3])
 {
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	enum pipe pipe = crtc->pipe;
 
-	I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), 0);
-	I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0);
-	I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), 0);
+	I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), preoff[0]);
+	I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), preoff[1]);
+	I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), preoff[2]);
 
-	I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), CSC_RGB_TO_YUV_RU_GU);
-	I915_WRITE(PIPE_CSC_COEFF_BU(pipe), CSC_RGB_TO_YUV_BU);
+	I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), coeff[0] << 16 | coeff[1]);
+	I915_WRITE(PIPE_CSC_COEFF_BY(pipe), coeff[2] << 16);
 
-	I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), CSC_RGB_TO_YUV_RY_GY);
-	I915_WRITE(PIPE_CSC_COEFF_BY(pipe), CSC_RGB_TO_YUV_BY);
+	I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), coeff[3] << 16 | coeff[4]);
+	I915_WRITE(PIPE_CSC_COEFF_BU(pipe), coeff[5] << 16);
 
-	I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), CSC_RGB_TO_YUV_RV_GV);
-	I915_WRITE(PIPE_CSC_COEFF_BV(pipe), CSC_RGB_TO_YUV_BV);
+	I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), coeff[6] << 16 | coeff[7]);
+	I915_WRITE(PIPE_CSC_COEFF_BV(pipe), coeff[8] << 16);
 
-	I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), POSTOFF_RGB_TO_YUV_HI);
-	I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), POSTOFF_RGB_TO_YUV_ME);
-	I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), POSTOFF_RGB_TO_YUV_LO);
-	I915_WRITE(PIPE_CSC_MODE(pipe), 0);
+	if (INTEL_GEN(dev_priv) >= 7) {
+		I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff[0]);
+		I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff[1]);
+		I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), postoff[2]);
+	}
 }
 
-static void ilk_load_csc_matrix(const struct intel_crtc_state *crtc_state)
+static void icl_update_output_csc(struct intel_crtc *crtc,
+				  const u16 preoff[3],
+				  const u16 coeff[9],
+				  const u16 postoff[3])
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-	bool limited_color_range = false;
 	enum pipe pipe = crtc->pipe;
-	u16 coeffs[9] = {};
-	int i;
+
+	I915_WRITE(PIPE_CSC_OUTPUT_PREOFF_HI(pipe), preoff[0]);
+	I915_WRITE(PIPE_CSC_OUTPUT_PREOFF_ME(pipe), preoff[1]);
+	I915_WRITE(PIPE_CSC_OUTPUT_PREOFF_LO(pipe), preoff[2]);
+
+	I915_WRITE(PIPE_CSC_OUTPUT_COEFF_RY_GY(pipe), coeff[0] << 16 | coeff[1]);
+	I915_WRITE(PIPE_CSC_OUTPUT_COEFF_BY(pipe), coeff[2]);
+
+	I915_WRITE(PIPE_CSC_OUTPUT_COEFF_RU_GU(pipe), coeff[3] << 16 | coeff[4]);
+	I915_WRITE(PIPE_CSC_OUTPUT_COEFF_BU(pipe), coeff[5]);
+
+	I915_WRITE(PIPE_CSC_OUTPUT_COEFF_RV_GV(pipe), coeff[6] << 16 | coeff[7]);
+	I915_WRITE(PIPE_CSC_OUTPUT_COEFF_BV(pipe), coeff[8]);
+
+	I915_WRITE(PIPE_CSC_OUTPUT_POSTOFF_HI(pipe), postoff[0]);
+	I915_WRITE(PIPE_CSC_OUTPUT_POSTOFF_ME(pipe), postoff[1]);
+	I915_WRITE(PIPE_CSC_OUTPUT_POSTOFF_LO(pipe), postoff[2]);
+}
+
+static bool ilk_csc_limited_range(const struct intel_crtc_state *crtc_state)
+{
+	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
 
 	/*
 	 * FIXME if there's a gamma LUT after the CSC, we should
 	 * do the range compression using the gamma LUT instead.
 	 */
-	if (INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv))
-		limited_color_range = crtc_state->limited_color_range;
+	return crtc_state->limited_color_range &&
+		(IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv) ||
+		 IS_GEN_RANGE(dev_priv, 9, 10));
+}
 
-	if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
-	    crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) {
-		ilk_load_ycbcr_conversion_matrix(crtc);
-		return;
-	} else if (crtc_state->base.ctm) {
-		struct drm_color_ctm *ctm = crtc_state->base.ctm->data;
-		const u64 *input;
-		u64 temp[9];
+static void ilk_csc_convert_ctm(const struct intel_crtc_state *crtc_state,
+				u16 coeffs[9])
+{
+	const struct drm_color_ctm *ctm = crtc_state->base.ctm->data;
+	const u64 *input;
+	u64 temp[9];
+	int i;
 
-		if (limited_color_range)
-			input = ctm_mult_by_limited(temp, ctm->matrix);
-		else
-			input = ctm->matrix;
+	if (ilk_csc_limited_range(crtc_state))
+		input = ctm_mult_by_limited(temp, ctm->matrix);
+	else
+		input = ctm->matrix;
 
-		/*
-		 * Convert fixed point S31.32 input to format supported by the
-		 * hardware.
-		 */
-		for (i = 0; i < ARRAY_SIZE(coeffs); i++) {
-			u64 abs_coeff = ((1ULL << 63) - 1) & input[i];
+	/*
+	 * Convert fixed point S31.32 input to format supported by the
+	 * hardware.
+	 */
+	for (i = 0; i < 9; i++) {
+		u64 abs_coeff = ((1ULL << 63) - 1) & input[i];
 
-			/*
-			 * Clamp input value to min/max supported by
-			 * hardware.
-			 */
-			abs_coeff = clamp_val(abs_coeff, 0, CTM_COEFF_4_0 - 1);
-
-			/* sign bit */
-			if (CTM_COEFF_NEGATIVE(input[i]))
-				coeffs[i] |= 1 << 15;
-
-			if (abs_coeff < CTM_COEFF_0_125)
-				coeffs[i] |= (3 << 12) |
-					ILK_CSC_COEFF_FP(abs_coeff, 12);
-			else if (abs_coeff < CTM_COEFF_0_25)
-				coeffs[i] |= (2 << 12) |
-					ILK_CSC_COEFF_FP(abs_coeff, 11);
-			else if (abs_coeff < CTM_COEFF_0_5)
-				coeffs[i] |= (1 << 12) |
-					ILK_CSC_COEFF_FP(abs_coeff, 10);
-			else if (abs_coeff < CTM_COEFF_1_0)
-				coeffs[i] |= ILK_CSC_COEFF_FP(abs_coeff, 9);
-			else if (abs_coeff < CTM_COEFF_2_0)
-				coeffs[i] |= (7 << 12) |
-					ILK_CSC_COEFF_FP(abs_coeff, 8);
-			else
-				coeffs[i] |= (6 << 12) |
-					ILK_CSC_COEFF_FP(abs_coeff, 7);
-		}
-	} else {
 		/*
-		 * Load an identity matrix if no coefficients are provided.
-		 *
-		 * TODO: Check what kind of values actually come out of the
-		 * pipe with these coeff/postoff values and adjust to get the
-		 * best accuracy. Perhaps we even need to take the bpc value
-		 * into consideration.
+		 * Clamp input value to min/max supported by
+		 * hardware.
 		 */
-		for (i = 0; i < 3; i++) {
-			if (limited_color_range)
-				coeffs[i * 3 + i] =
-					ILK_CSC_COEFF_LIMITED_RANGE;
-			else
-				coeffs[i * 3 + i] = ILK_CSC_COEFF_1_0;
-		}
+		abs_coeff = clamp_val(abs_coeff, 0, CTM_COEFF_4_0 - 1);
+
+		coeffs[i] = 0;
+
+		/* sign bit */
+		if (CTM_COEFF_NEGATIVE(input[i]))
+			coeffs[i] |= 1 << 15;
+
+		if (abs_coeff < CTM_COEFF_0_125)
+			coeffs[i] |= (3 << 12) |
+				ILK_CSC_COEFF_FP(abs_coeff, 12);
+		else if (abs_coeff < CTM_COEFF_0_25)
+			coeffs[i] |= (2 << 12) |
+				ILK_CSC_COEFF_FP(abs_coeff, 11);
+		else if (abs_coeff < CTM_COEFF_0_5)
+			coeffs[i] |= (1 << 12) |
+				ILK_CSC_COEFF_FP(abs_coeff, 10);
+		else if (abs_coeff < CTM_COEFF_1_0)
+			coeffs[i] |= ILK_CSC_COEFF_FP(abs_coeff, 9);
+		else if (abs_coeff < CTM_COEFF_2_0)
+			coeffs[i] |= (7 << 12) |
+				ILK_CSC_COEFF_FP(abs_coeff, 8);
+		else
+			coeffs[i] |= (6 << 12) |
+				ILK_CSC_COEFF_FP(abs_coeff, 7);
 	}
+}
 
-	I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), coeffs[0] << 16 | coeffs[1]);
-	I915_WRITE(PIPE_CSC_COEFF_BY(pipe), coeffs[2] << 16);
-
-	I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), coeffs[3] << 16 | coeffs[4]);
-	I915_WRITE(PIPE_CSC_COEFF_BU(pipe), coeffs[5] << 16);
-
-	I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), coeffs[6] << 16 | coeffs[7]);
-	I915_WRITE(PIPE_CSC_COEFF_BV(pipe), coeffs[8] << 16);
-
-	I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), 0);
-	I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0);
-	I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), 0);
+static void ilk_load_csc_matrix(const struct intel_crtc_state *crtc_state)
+{
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	bool limited_color_range = ilk_csc_limited_range(crtc_state);
 
-	if (INTEL_GEN(dev_priv) > 6) {
-		u16 postoff = 0;
+	if (crtc_state->base.ctm) {
+		u16 coeff[9];
+
+		ilk_csc_convert_ctm(crtc_state, coeff);
+		ilk_update_pipe_csc(crtc, ilk_csc_off_zero, coeff,
+				    limited_color_range ?
+				    ilk_csc_postoff_limited_range :
+				    ilk_csc_off_zero);
+	} else if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) {
+		ilk_update_pipe_csc(crtc, ilk_csc_off_zero,
+				    ilk_csc_coeff_rgb_to_ycbcr,
+				    ilk_csc_postoff_rgb_to_ycbcr);
+	} else if (limited_color_range) {
+		ilk_update_pipe_csc(crtc, ilk_csc_off_zero,
+				    ilk_csc_coeff_limited_range,
+				    ilk_csc_postoff_limited_range);
+	} else if (crtc_state->csc_enable) {
+		ilk_update_pipe_csc(crtc, ilk_csc_off_zero,
+				    ilk_csc_coeff_identity,
+				    ilk_csc_off_zero);
+	}
 
-		if (limited_color_range)
-			postoff = (16 * (1 << 12) / 255) & 0x1fff;
+	I915_WRITE(PIPE_CSC_MODE(crtc->pipe), crtc_state->csc_mode);
+}
 
-		I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff);
-		I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff);
-		I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), postoff);
+static void icl_load_csc_matrix(const struct intel_crtc_state *crtc_state)
+{
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 
-		I915_WRITE(PIPE_CSC_MODE(pipe), 0);
-	} else {
-		u32 mode = CSC_MODE_YUV_TO_RGB;
+	if (crtc_state->base.ctm) {
+		u16 coeff[9];
 
-		if (limited_color_range)
-			mode |= CSC_BLACK_SCREEN_OFFSET;
+		ilk_csc_convert_ctm(crtc_state, coeff);
+		ilk_update_pipe_csc(crtc, ilk_csc_off_zero,
+				    coeff, ilk_csc_off_zero);
+	}
 
-		I915_WRITE(PIPE_CSC_MODE(pipe), mode);
+	if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) {
+		icl_update_output_csc(crtc, ilk_csc_off_zero,
+				      ilk_csc_coeff_rgb_to_ycbcr,
+				      ilk_csc_postoff_rgb_to_ycbcr);
+	} else if (crtc_state->limited_color_range) {
+		icl_update_output_csc(crtc, ilk_csc_off_zero,
+				      ilk_csc_coeff_limited_range,
+				      ilk_csc_postoff_limited_range);
 	}
+
+	I915_WRITE(PIPE_CSC_MODE(crtc->pipe), crtc_state->csc_mode);
 }
 
 /*
@@ -262,7 +315,6 @@ static void cherryview_load_csc_matrix(const struct intel_crtc_state *crtc_state
 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	enum pipe pipe = crtc->pipe;
-	u32 mode;
 
 	if (crtc_state->base.ctm) {
 		const struct drm_color_ctm *ctm = crtc_state->base.ctm->data;
@@ -296,12 +348,7 @@ static void cherryview_load_csc_matrix(const struct intel_crtc_state *crtc_state
 		I915_WRITE(CGM_PIPE_CSC_COEFF8(pipe), coeffs[8]);
 	}
 
-	mode = (crtc_state->base.ctm ? CGM_PIPE_MODE_CSC : 0);
-	if (!crtc_state_is_legacy_gamma(crtc_state)) {
-		mode |= (crtc_state->base.degamma_lut ? CGM_PIPE_MODE_DEGAMMA : 0) |
-			(crtc_state->base.gamma_lut ? CGM_PIPE_MODE_GAMMA : 0);
-	}
-	I915_WRITE(CGM_PIPE_MODE(pipe), mode);
+	I915_WRITE(CGM_PIPE_MODE(pipe), crtc_state->cgm_mode);
 }
 
 /* Loads the legacy palette/gamma unit for the CRTC. */
@@ -351,6 +398,32 @@ static void i9xx_load_luts(const struct intel_crtc_state *crtc_state)
 	i9xx_load_luts_internal(crtc_state, crtc_state->base.gamma_lut);
 }
 
+static void i9xx_color_commit(const struct intel_crtc_state *crtc_state)
+{
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	enum pipe pipe = crtc->pipe;
+	u32 val;
+
+	val = I915_READ(PIPECONF(pipe));
+	val &= ~PIPECONF_GAMMA_MODE_MASK_I9XX;
+	val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
+	I915_WRITE(PIPECONF(pipe), val);
+}
+
+static void ilk_color_commit(const struct intel_crtc_state *crtc_state)
+{
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	enum pipe pipe = crtc->pipe;
+	u32 val;
+
+	val = I915_READ(PIPECONF(pipe));
+	val &= ~PIPECONF_GAMMA_MODE_MASK_ILK;
+	val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
+	I915_WRITE(PIPECONF(pipe), val);
+}
+
 static void hsw_color_commit(const struct intel_crtc_state *crtc_state)
 {
 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
@@ -361,6 +434,32 @@ static void hsw_color_commit(const struct intel_crtc_state *crtc_state)
 	ilk_load_csc_matrix(crtc_state);
 }
 
+static void skl_color_commit(const struct intel_crtc_state *crtc_state)
+{
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	enum pipe pipe = crtc->pipe;
+	u32 val = 0;
+
+	/*
+	 * We don't (yet) allow userspace to control the pipe background color,
+	 * so force it to black, but apply pipe gamma and CSC appropriately
+	 * so that its handling will match how we program our planes.
+	 */
+	if (crtc_state->gamma_enable)
+		val |= SKL_BOTTOM_COLOR_GAMMA_ENABLE;
+	if (crtc_state->csc_enable)
+		val |= SKL_BOTTOM_COLOR_CSC_ENABLE;
+	I915_WRITE(SKL_BOTTOM_COLOR(pipe), val);
+
+	I915_WRITE(GAMMA_MODE(crtc->pipe), crtc_state->gamma_mode);
+
+	if (INTEL_GEN(dev_priv) >= 11)
+		icl_load_csc_matrix(crtc_state);
+	else
+		ilk_load_csc_matrix(crtc_state);
+}
+
 static void bdw_load_degamma_lut(const struct intel_crtc_state *crtc_state)
 {
 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
@@ -440,6 +539,12 @@ static void bdw_load_gamma_lut(const struct intel_crtc_state *crtc_state, u32 of
 		I915_WRITE(PREC_PAL_GC_MAX(pipe, 1), (1 << 16) - 1);
 		I915_WRITE(PREC_PAL_GC_MAX(pipe, 2), (1 << 16) - 1);
 	}
+
+	/*
+	 * Reset the index, otherwise it prevents the legacy palette to be
+	 * written properly.
+	 */
+	I915_WRITE(PREC_PAL_INDEX(pipe), 0);
 }
 
 /* Loads the palette/gamma unit for the CRTC on Broadwell+. */
@@ -447,7 +552,6 @@ static void broadwell_load_luts(const struct intel_crtc_state *crtc_state)
 {
 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-	enum pipe pipe = crtc->pipe;
 
 	if (crtc_state_is_legacy_gamma(crtc_state)) {
 		i9xx_load_luts(crtc_state);
@@ -455,12 +559,6 @@ static void broadwell_load_luts(const struct intel_crtc_state *crtc_state)
 		bdw_load_degamma_lut(crtc_state);
 		bdw_load_gamma_lut(crtc_state,
 				   INTEL_INFO(dev_priv)->color.degamma_lut_size);
-
-		/*
-		 * Reset the index, otherwise it prevents the legacy palette to be
-		 * written properly.
-		 */
-		I915_WRITE(PREC_PAL_INDEX(pipe), 0);
 	}
 }
 
@@ -469,7 +567,7 @@ static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state)
 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	enum pipe pipe = crtc->pipe;
-	const u32 lut_size = 33;
+	const u32 lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
 	u32 i;
 
 	/*
@@ -480,14 +578,32 @@ static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state)
 	I915_WRITE(PRE_CSC_GAMC_INDEX(pipe), 0);
 	I915_WRITE(PRE_CSC_GAMC_INDEX(pipe), PRE_CSC_GAMC_AUTO_INCREMENT);
 
-	/*
-	 *  FIXME: The pipe degamma table in geminilake doesn't support
-	 *  different values per channel, so this just loads a linear table.
-	 */
-	for (i = 0; i < lut_size; i++) {
-		u32 v = (i * (1 << 16)) / (lut_size - 1);
+	if (crtc_state->base.degamma_lut) {
+		struct drm_color_lut *lut = crtc_state->base.degamma_lut->data;
+
+		for (i = 0; i < lut_size; i++) {
+			/*
+			 * First 33 entries represent range from 0 to 1.0
+			 * 34th and 35th entry will represent extended range
+			 * inputs 3.0 and 7.0 respectively, currently clamped
+			 * at 1.0. Since the precision is 16bit, the user
+			 * value can be directly filled to register.
+			 * The pipe degamma table in GLK+ onwards doesn't
+			 * support different values per channel, so this just
+			 * programs green value which will be equal to Red and
+			 * Blue into the lut registers.
+			 * ToDo: Extend to max 7.0. Enable 32 bit input value
+			 * as compared to just 16 to achieve this.
+			 */
+			I915_WRITE(PRE_CSC_GAMC_DATA(pipe), lut[i].green);
+		}
+	} else {
+		/* load a linear table. */
+		for (i = 0; i < lut_size; i++) {
+			u32 v = (i * (1 << 16)) / (lut_size - 1);
 
-		I915_WRITE(PRE_CSC_GAMC_DATA(pipe), v);
+			I915_WRITE(PRE_CSC_GAMC_DATA(pipe), v);
+		}
 	}
 
 	/* Clamp values > 1.0. */
@@ -497,23 +613,23 @@ static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state)
 
 static void glk_load_luts(const struct intel_crtc_state *crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-	enum pipe pipe = crtc->pipe;
-
 	glk_load_degamma_lut(crtc_state);
 
-	if (crtc_state_is_legacy_gamma(crtc_state)) {
+	if (crtc_state_is_legacy_gamma(crtc_state))
 		i9xx_load_luts(crtc_state);
-	} else {
+	else
 		bdw_load_gamma_lut(crtc_state, 0);
+}
 
-		/*
-		 * Reset the index, otherwise it prevents the legacy palette to be
-		 * written properly.
-		 */
-		I915_WRITE(PREC_PAL_INDEX(pipe), 0);
-	}
+static void icl_load_luts(const struct intel_crtc_state *crtc_state)
+{
+	glk_load_degamma_lut(crtc_state);
+
+	if (crtc_state_is_legacy_gamma(crtc_state))
+		i9xx_load_luts(crtc_state);
+	else
+		/* ToDo: Add support for multi segment gamma LUT */
+		bdw_load_gamma_lut(crtc_state, 0);
 }
 
 static void cherryview_load_luts(const struct intel_crtc_state *crtc_state)
@@ -585,8 +701,57 @@ void intel_color_commit(const struct intel_crtc_state *crtc_state)
 {
 	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
 
-	if (dev_priv->display.color_commit)
-		dev_priv->display.color_commit(crtc_state);
+	dev_priv->display.color_commit(crtc_state);
+}
+
+static bool need_plane_update(struct intel_plane *plane,
+			      const struct intel_crtc_state *crtc_state)
+{
+	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+
+	/*
+	 * On pre-SKL the pipe gamma enable and pipe csc enable for
+	 * the pipe bottom color are configured via the primary plane.
+	 * We have to reconfigure that even if the plane is inactive.
+	 */
+	return crtc_state->active_planes & BIT(plane->id) ||
+		(INTEL_GEN(dev_priv) < 9 &&
+		 plane->id == PLANE_PRIMARY);
+}
+
+static int
+intel_color_add_affected_planes(struct intel_crtc_state *new_crtc_state)
+{
+	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct intel_atomic_state *state =
+		to_intel_atomic_state(new_crtc_state->base.state);
+	const struct intel_crtc_state *old_crtc_state =
+		intel_atomic_get_old_crtc_state(state, crtc);
+	struct intel_plane *plane;
+
+	if (!new_crtc_state->base.active ||
+	    drm_atomic_crtc_needs_modeset(&new_crtc_state->base))
+		return 0;
+
+	if (new_crtc_state->gamma_enable == old_crtc_state->gamma_enable &&
+	    new_crtc_state->csc_enable == old_crtc_state->csc_enable)
+		return 0;
+
+	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
+		struct intel_plane_state *plane_state;
+
+		if (!need_plane_update(plane, new_crtc_state))
+			continue;
+
+		plane_state = intel_atomic_get_plane_state(state, plane);
+		if (IS_ERR(plane_state))
+			return PTR_ERR(plane_state);
+
+		new_crtc_state->update_planes |= BIT(plane->id);
+	}
+
+	return 0;
 }
 
 static int check_lut_size(const struct drm_property_blob *lut, int expected)
@@ -606,22 +771,70 @@ static int check_lut_size(const struct drm_property_blob *lut, int expected)
 	return 0;
 }
 
+static u32 chv_cgm_mode(const struct intel_crtc_state *crtc_state)
+{
+	u32 cgm_mode = 0;
+
+	if (crtc_state_is_legacy_gamma(crtc_state))
+		return 0;
+
+	if (crtc_state->base.degamma_lut)
+		cgm_mode |= CGM_PIPE_MODE_DEGAMMA;
+	if (crtc_state->base.ctm)
+		cgm_mode |= CGM_PIPE_MODE_CSC;
+	if (crtc_state->base.gamma_lut)
+		cgm_mode |= CGM_PIPE_MODE_GAMMA;
+
+	return cgm_mode;
+}
+
 int intel_color_check(struct intel_crtc_state *crtc_state)
 {
 	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
 	const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
 	const struct drm_property_blob *degamma_lut = crtc_state->base.degamma_lut;
+	bool limited_color_range = false;
 	int gamma_length, degamma_length;
 	u32 gamma_tests, degamma_tests;
+	int ret;
 
 	degamma_length = INTEL_INFO(dev_priv)->color.degamma_lut_size;
 	gamma_length = INTEL_INFO(dev_priv)->color.gamma_lut_size;
 	degamma_tests = INTEL_INFO(dev_priv)->color.degamma_lut_tests;
 	gamma_tests = INTEL_INFO(dev_priv)->color.gamma_lut_tests;
 
+	/* C8 needs the legacy LUT all to itself */
+	if (crtc_state->c8_planes &&
+	    !crtc_state_is_legacy_gamma(crtc_state))
+		return -EINVAL;
+
+	crtc_state->gamma_enable = (gamma_lut || degamma_lut) &&
+		!crtc_state->c8_planes;
+
+	if (INTEL_GEN(dev_priv) >= 9 ||
+	    IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+		limited_color_range = crtc_state->limited_color_range;
+
+	crtc_state->csc_enable =
+		crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB ||
+		crtc_state->base.ctm || limited_color_range;
+
+	ret = intel_color_add_affected_planes(crtc_state);
+	if (ret)
+		return ret;
+
+	crtc_state->csc_mode = 0;
+
+	if (IS_CHERRYVIEW(dev_priv))
+		crtc_state->cgm_mode = chv_cgm_mode(crtc_state);
+
 	/* Always allow legacy gamma LUT with no further checking. */
-	if (crtc_state_is_legacy_gamma(crtc_state)) {
+	if (!crtc_state->gamma_enable ||
+	    crtc_state_is_legacy_gamma(crtc_state)) {
 		crtc_state->gamma_mode = GAMMA_MODE_MODE_8BIT;
+		if (INTEL_GEN(dev_priv) >= 11 &&
+		    crtc_state->gamma_enable)
+			crtc_state->gamma_mode |= POST_CSC_GAMMA_ENABLE;
 		return 0;
 	}
 
@@ -633,13 +846,26 @@ int intel_color_check(struct intel_crtc_state *crtc_state)
 	    drm_color_lut_check(gamma_lut, gamma_tests))
 		return -EINVAL;
 
-	if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+	if (INTEL_GEN(dev_priv) >= 11)
+		crtc_state->gamma_mode = GAMMA_MODE_MODE_10BIT |
+					 PRE_CSC_GAMMA_ENABLE |
+					 POST_CSC_GAMMA_ENABLE;
+	else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
 		crtc_state->gamma_mode = GAMMA_MODE_MODE_10BIT;
 	else if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
 		crtc_state->gamma_mode = GAMMA_MODE_MODE_SPLIT;
 	else
 		crtc_state->gamma_mode = GAMMA_MODE_MODE_8BIT;
 
+	if (INTEL_GEN(dev_priv) >= 11) {
+		if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB ||
+		    crtc_state->limited_color_range)
+			crtc_state->csc_mode |= ICL_OUTPUT_CSC_ENABLE;
+
+		if (crtc_state->base.ctm)
+			crtc_state->csc_mode |= ICL_CSC_ENABLE;
+	}
+
 	return 0;
 }
 
@@ -649,20 +875,29 @@ void intel_color_init(struct intel_crtc *crtc)
 
 	drm_mode_crtc_set_gamma_size(&crtc->base, 256);
 
-	if (IS_CHERRYVIEW(dev_priv)) {
-		dev_priv->display.load_luts = cherryview_load_luts;
-	} else if (IS_HASWELL(dev_priv)) {
-		dev_priv->display.load_luts = i9xx_load_luts;
-		dev_priv->display.color_commit = hsw_color_commit;
-	} else if (IS_BROADWELL(dev_priv) || IS_GEN9_BC(dev_priv) ||
-		   IS_BROXTON(dev_priv)) {
-		dev_priv->display.load_luts = broadwell_load_luts;
-		dev_priv->display.color_commit = hsw_color_commit;
-	} else if (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) {
-		dev_priv->display.load_luts = glk_load_luts;
-		dev_priv->display.color_commit = hsw_color_commit;
+	if (HAS_GMCH(dev_priv)) {
+		if (IS_CHERRYVIEW(dev_priv))
+			dev_priv->display.load_luts = cherryview_load_luts;
+		else
+			dev_priv->display.load_luts = i9xx_load_luts;
+
+		dev_priv->display.color_commit = i9xx_color_commit;
 	} else {
-		dev_priv->display.load_luts = i9xx_load_luts;
+		if (INTEL_GEN(dev_priv) >= 11)
+			dev_priv->display.load_luts = icl_load_luts;
+		else if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
+			dev_priv->display.load_luts = glk_load_luts;
+		else if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
+			dev_priv->display.load_luts = broadwell_load_luts;
+		else
+			dev_priv->display.load_luts = i9xx_load_luts;
+
+		if (INTEL_GEN(dev_priv) >= 9)
+			dev_priv->display.color_commit = skl_color_commit;
+		else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+			dev_priv->display.color_commit = hsw_color_commit;
+		else
+			dev_priv->display.color_commit = ilk_color_commit;
 	}
 
 	/* Enable color management support when we have degamma & gamma LUTs. */
diff --git a/drivers/gpu/drm/i915/intel_connector.c b/drivers/gpu/drm/i915/intel_connector.c
index 8352d0bd8813110787e89149325fbeb27ada30c0..848dd9e728d83f9427241e70bb53d6258886fb3c 100644
--- a/drivers/gpu/drm/i915/intel_connector.c
+++ b/drivers/gpu/drm/i915/intel_connector.c
@@ -88,6 +88,8 @@ void intel_connector_destroy(struct drm_connector *connector)
 
 	kfree(intel_connector->detect_edid);
 
+	intel_hdcp_cleanup(intel_connector);
+
 	if (!IS_ERR_OR_NULL(intel_connector->edid))
 		kfree(intel_connector->edid);
 
diff --git a/drivers/gpu/drm/i915/intel_context.c b/drivers/gpu/drm/i915/intel_context.c
new file mode 100644
index 0000000000000000000000000000000000000000..8931e0fee873d48c8adbdba8c951f0b90ec85cc8
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_context.c
@@ -0,0 +1,269 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "i915_gem_context.h"
+#include "i915_globals.h"
+#include "intel_context.h"
+#include "intel_ringbuffer.h"
+
+static struct i915_global_context {
+	struct i915_global base;
+	struct kmem_cache *slab_ce;
+} global;
+
+struct intel_context *intel_context_alloc(void)
+{
+	return kmem_cache_zalloc(global.slab_ce, GFP_KERNEL);
+}
+
+void intel_context_free(struct intel_context *ce)
+{
+	kmem_cache_free(global.slab_ce, ce);
+}
+
+struct intel_context *
+intel_context_lookup(struct i915_gem_context *ctx,
+		     struct intel_engine_cs *engine)
+{
+	struct intel_context *ce = NULL;
+	struct rb_node *p;
+
+	spin_lock(&ctx->hw_contexts_lock);
+	p = ctx->hw_contexts.rb_node;
+	while (p) {
+		struct intel_context *this =
+			rb_entry(p, struct intel_context, node);
+
+		if (this->engine == engine) {
+			GEM_BUG_ON(this->gem_context != ctx);
+			ce = this;
+			break;
+		}
+
+		if (this->engine < engine)
+			p = p->rb_right;
+		else
+			p = p->rb_left;
+	}
+	spin_unlock(&ctx->hw_contexts_lock);
+
+	return ce;
+}
+
+struct intel_context *
+__intel_context_insert(struct i915_gem_context *ctx,
+		       struct intel_engine_cs *engine,
+		       struct intel_context *ce)
+{
+	struct rb_node **p, *parent;
+	int err = 0;
+
+	spin_lock(&ctx->hw_contexts_lock);
+
+	parent = NULL;
+	p = &ctx->hw_contexts.rb_node;
+	while (*p) {
+		struct intel_context *this;
+
+		parent = *p;
+		this = rb_entry(parent, struct intel_context, node);
+
+		if (this->engine == engine) {
+			err = -EEXIST;
+			ce = this;
+			break;
+		}
+
+		if (this->engine < engine)
+			p = &parent->rb_right;
+		else
+			p = &parent->rb_left;
+	}
+	if (!err) {
+		rb_link_node(&ce->node, parent, p);
+		rb_insert_color(&ce->node, &ctx->hw_contexts);
+	}
+
+	spin_unlock(&ctx->hw_contexts_lock);
+
+	return ce;
+}
+
+void __intel_context_remove(struct intel_context *ce)
+{
+	struct i915_gem_context *ctx = ce->gem_context;
+
+	spin_lock(&ctx->hw_contexts_lock);
+	rb_erase(&ce->node, &ctx->hw_contexts);
+	spin_unlock(&ctx->hw_contexts_lock);
+}
+
+static struct intel_context *
+intel_context_instance(struct i915_gem_context *ctx,
+		       struct intel_engine_cs *engine)
+{
+	struct intel_context *ce, *pos;
+
+	ce = intel_context_lookup(ctx, engine);
+	if (likely(ce))
+		return ce;
+
+	ce = intel_context_alloc();
+	if (!ce)
+		return ERR_PTR(-ENOMEM);
+
+	intel_context_init(ce, ctx, engine);
+
+	pos = __intel_context_insert(ctx, engine, ce);
+	if (unlikely(pos != ce)) /* Beaten! Use their HW context instead */
+		intel_context_free(ce);
+
+	GEM_BUG_ON(intel_context_lookup(ctx, engine) != pos);
+	return pos;
+}
+
+struct intel_context *
+intel_context_pin_lock(struct i915_gem_context *ctx,
+		       struct intel_engine_cs *engine)
+	__acquires(ce->pin_mutex)
+{
+	struct intel_context *ce;
+
+	ce = intel_context_instance(ctx, engine);
+	if (IS_ERR(ce))
+		return ce;
+
+	if (mutex_lock_interruptible(&ce->pin_mutex))
+		return ERR_PTR(-EINTR);
+
+	return ce;
+}
+
+struct intel_context *
+intel_context_pin(struct i915_gem_context *ctx,
+		  struct intel_engine_cs *engine)
+{
+	struct intel_context *ce;
+	int err;
+
+	ce = intel_context_instance(ctx, engine);
+	if (IS_ERR(ce))
+		return ce;
+
+	if (likely(atomic_inc_not_zero(&ce->pin_count)))
+		return ce;
+
+	if (mutex_lock_interruptible(&ce->pin_mutex))
+		return ERR_PTR(-EINTR);
+
+	if (likely(!atomic_read(&ce->pin_count))) {
+		err = ce->ops->pin(ce);
+		if (err)
+			goto err;
+
+		i915_gem_context_get(ctx);
+		GEM_BUG_ON(ce->gem_context != ctx);
+
+		mutex_lock(&ctx->mutex);
+		list_add(&ce->active_link, &ctx->active_engines);
+		mutex_unlock(&ctx->mutex);
+
+		intel_context_get(ce);
+		smp_mb__before_atomic(); /* flush pin before it is visible */
+	}
+
+	atomic_inc(&ce->pin_count);
+	GEM_BUG_ON(!intel_context_is_pinned(ce)); /* no overflow! */
+
+	mutex_unlock(&ce->pin_mutex);
+	return ce;
+
+err:
+	mutex_unlock(&ce->pin_mutex);
+	return ERR_PTR(err);
+}
+
+void intel_context_unpin(struct intel_context *ce)
+{
+	if (likely(atomic_add_unless(&ce->pin_count, -1, 1)))
+		return;
+
+	/* We may be called from inside intel_context_pin() to evict another */
+	intel_context_get(ce);
+	mutex_lock_nested(&ce->pin_mutex, SINGLE_DEPTH_NESTING);
+
+	if (likely(atomic_dec_and_test(&ce->pin_count))) {
+		ce->ops->unpin(ce);
+
+		mutex_lock(&ce->gem_context->mutex);
+		list_del(&ce->active_link);
+		mutex_unlock(&ce->gem_context->mutex);
+
+		i915_gem_context_put(ce->gem_context);
+		intel_context_put(ce);
+	}
+
+	mutex_unlock(&ce->pin_mutex);
+	intel_context_put(ce);
+}
+
+static void intel_context_retire(struct i915_active_request *active,
+				 struct i915_request *rq)
+{
+	struct intel_context *ce =
+		container_of(active, typeof(*ce), active_tracker);
+
+	intel_context_unpin(ce);
+}
+
+void
+intel_context_init(struct intel_context *ce,
+		   struct i915_gem_context *ctx,
+		   struct intel_engine_cs *engine)
+{
+	kref_init(&ce->ref);
+
+	ce->gem_context = ctx;
+	ce->engine = engine;
+	ce->ops = engine->cops;
+
+	INIT_LIST_HEAD(&ce->signal_link);
+	INIT_LIST_HEAD(&ce->signals);
+
+	mutex_init(&ce->pin_mutex);
+
+	/* Use the whole device by default */
+	ce->sseu = intel_device_default_sseu(ctx->i915);
+
+	i915_active_request_init(&ce->active_tracker,
+				 NULL, intel_context_retire);
+}
+
+static void i915_global_context_shrink(void)
+{
+	kmem_cache_shrink(global.slab_ce);
+}
+
+static void i915_global_context_exit(void)
+{
+	kmem_cache_destroy(global.slab_ce);
+}
+
+static struct i915_global_context global = { {
+	.shrink = i915_global_context_shrink,
+	.exit = i915_global_context_exit,
+} };
+
+int __init i915_global_context_init(void)
+{
+	global.slab_ce = KMEM_CACHE(intel_context, SLAB_HWCACHE_ALIGN);
+	if (!global.slab_ce)
+		return -ENOMEM;
+
+	i915_global_register(&global.base);
+	return 0;
+}
diff --git a/drivers/gpu/drm/i915/intel_context.h b/drivers/gpu/drm/i915/intel_context.h
new file mode 100644
index 0000000000000000000000000000000000000000..ebc861b1a49e2bcf91a4c8b68ce7867b1cc3e42b
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_context.h
@@ -0,0 +1,87 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_CONTEXT_H__
+#define __INTEL_CONTEXT_H__
+
+#include <linux/lockdep.h>
+
+#include "intel_context_types.h"
+#include "intel_engine_types.h"
+
+struct intel_context *intel_context_alloc(void);
+void intel_context_free(struct intel_context *ce);
+
+void intel_context_init(struct intel_context *ce,
+			struct i915_gem_context *ctx,
+			struct intel_engine_cs *engine);
+
+/**
+ * intel_context_lookup - Find the matching HW context for this (ctx, engine)
+ * @ctx - the parent GEM context
+ * @engine - the target HW engine
+ *
+ * May return NULL if the HW context hasn't been instantiated (i.e. unused).
+ */
+struct intel_context *
+intel_context_lookup(struct i915_gem_context *ctx,
+		     struct intel_engine_cs *engine);
+
+/**
+ * intel_context_pin_lock - Stablises the 'pinned' status of the HW context
+ * @ctx - the parent GEM context
+ * @engine - the target HW engine
+ *
+ * Acquire a lock on the pinned status of the HW context, such that the context
+ * can neither be bound to the GPU or unbound whilst the lock is held, i.e.
+ * intel_context_is_pinned() remains stable.
+ */
+struct intel_context *
+intel_context_pin_lock(struct i915_gem_context *ctx,
+		       struct intel_engine_cs *engine);
+
+static inline bool
+intel_context_is_pinned(struct intel_context *ce)
+{
+	return atomic_read(&ce->pin_count);
+}
+
+static inline void intel_context_pin_unlock(struct intel_context *ce)
+__releases(ce->pin_mutex)
+{
+	mutex_unlock(&ce->pin_mutex);
+}
+
+struct intel_context *
+__intel_context_insert(struct i915_gem_context *ctx,
+		       struct intel_engine_cs *engine,
+		       struct intel_context *ce);
+void
+__intel_context_remove(struct intel_context *ce);
+
+struct intel_context *
+intel_context_pin(struct i915_gem_context *ctx, struct intel_engine_cs *engine);
+
+static inline void __intel_context_pin(struct intel_context *ce)
+{
+	GEM_BUG_ON(!intel_context_is_pinned(ce));
+	atomic_inc(&ce->pin_count);
+}
+
+void intel_context_unpin(struct intel_context *ce);
+
+static inline struct intel_context *intel_context_get(struct intel_context *ce)
+{
+	kref_get(&ce->ref);
+	return ce;
+}
+
+static inline void intel_context_put(struct intel_context *ce)
+{
+	kref_put(&ce->ref, ce->ops->destroy);
+}
+
+#endif /* __INTEL_CONTEXT_H__ */
diff --git a/drivers/gpu/drm/i915/intel_context_types.h b/drivers/gpu/drm/i915/intel_context_types.h
new file mode 100644
index 0000000000000000000000000000000000000000..624729a3587514d3198b7f86c7b65cf5950dd7de
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_context_types.h
@@ -0,0 +1,73 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_CONTEXT_TYPES__
+#define __INTEL_CONTEXT_TYPES__
+
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/rbtree.h>
+#include <linux/types.h>
+
+#include "i915_active_types.h"
+
+struct i915_gem_context;
+struct i915_vma;
+struct intel_context;
+struct intel_ring;
+
+struct intel_context_ops {
+	int (*pin)(struct intel_context *ce);
+	void (*unpin)(struct intel_context *ce);
+
+	void (*destroy)(struct kref *kref);
+};
+
+/*
+ * Powergating configuration for a particular (context,engine).
+ */
+struct intel_sseu {
+	u8 slice_mask;
+	u8 subslice_mask;
+	u8 min_eus_per_subslice;
+	u8 max_eus_per_subslice;
+};
+
+struct intel_context {
+	struct kref ref;
+
+	struct i915_gem_context *gem_context;
+	struct intel_engine_cs *engine;
+	struct intel_engine_cs *active;
+
+	struct list_head active_link;
+	struct list_head signal_link;
+	struct list_head signals;
+
+	struct i915_vma *state;
+	struct intel_ring *ring;
+
+	u32 *lrc_reg_state;
+	u64 lrc_desc;
+
+	atomic_t pin_count;
+	struct mutex pin_mutex; /* guards pinning and associated on-gpuing */
+
+	/**
+	 * active_tracker: Active tracker for the external rq activity
+	 * on this intel_context object.
+	 */
+	struct i915_active_request active_tracker;
+
+	const struct intel_context_ops *ops;
+	struct rb_node node;
+
+	/** sseu: Control eu/slice partitioning */
+	struct intel_sseu sseu;
+};
+
+#endif /* __INTEL_CONTEXT_TYPES__ */
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 14d580cdefd3e875e08b7af0be350d4f877fb7ef..933df3a57a8abbffe668d4583dd5c87650b662b7 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -851,7 +851,7 @@ static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port por
 
 	level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
 
-	if (IS_ICELAKE(dev_priv)) {
+	if (INTEL_GEN(dev_priv) >= 11) {
 		if (intel_port_is_combophy(dev_priv, port))
 			icl_get_combo_buf_trans(dev_priv, port, INTEL_OUTPUT_HDMI,
 						0, &n_entries);
@@ -1349,8 +1349,8 @@ int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv,
 	case DPLL_CFGCR1_KDIV_2:
 		p2 = 2;
 		break;
-	case DPLL_CFGCR1_KDIV_4:
-		p2 = 4;
+	case DPLL_CFGCR1_KDIV_3:
+		p2 = 3;
 		break;
 	}
 
@@ -1477,11 +1477,7 @@ static void icl_ddi_clock_get(struct intel_encoder *encoder,
 
 	pll_id = intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll);
 	if (intel_port_is_combophy(dev_priv, port)) {
-		if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI))
-			link_clock = cnl_calc_wrpll_link(dev_priv, pll_id);
-		else
-			link_clock = icl_calc_dp_combo_pll_link(dev_priv,
-								pll_id);
+		link_clock = cnl_calc_wrpll_link(dev_priv, pll_id);
 	} else {
 		if (pll_id == DPLL_ID_ICL_TBTPLL)
 			link_clock = icl_calc_tbt_pll_link(dev_priv, port);
@@ -1678,7 +1674,7 @@ static void intel_ddi_clock_get(struct intel_encoder *encoder,
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 
-	if (IS_ICELAKE(dev_priv))
+	if (INTEL_GEN(dev_priv) >= 11)
 		icl_ddi_clock_get(encoder, pipe_config);
 	else if (IS_CANNONLAKE(dev_priv))
 		cnl_ddi_clock_get(encoder, pipe_config);
@@ -1911,7 +1907,7 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
 		goto out;
 	}
 
-	if (port == PORT_A)
+	if (HAS_TRANSCODER_EDP(dev_priv) && port == PORT_A)
 		cpu_transcoder = TRANSCODER_EDP;
 	else
 		cpu_transcoder = (enum transcoder) pipe;
@@ -1973,7 +1969,7 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
 	if (!(tmp & DDI_BUF_CTL_ENABLE))
 		goto out;
 
-	if (port == PORT_A) {
+	if (HAS_TRANSCODER_EDP(dev_priv) && port == PORT_A) {
 		tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
 
 		switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
@@ -2225,7 +2221,7 @@ u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder)
 	enum port port = encoder->port;
 	int n_entries;
 
-	if (IS_ICELAKE(dev_priv)) {
+	if (INTEL_GEN(dev_priv) >= 11) {
 		if (intel_port_is_combophy(dev_priv, port))
 			icl_get_combo_buf_trans(dev_priv, port, encoder->type,
 						intel_dp->link_rate, &n_entries);
@@ -2317,13 +2313,13 @@ static void cnl_ddi_vswing_program(struct intel_encoder *encoder,
 	/* Program PORT_TX_DW4 */
 	/* We cannot write to GRP. It would overrite individual loadgen */
 	for (ln = 0; ln < 4; ln++) {
-		val = I915_READ(CNL_PORT_TX_DW4_LN(port, ln));
+		val = I915_READ(CNL_PORT_TX_DW4_LN(ln, port));
 		val &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
 			 CURSOR_COEFF_MASK);
 		val |= POST_CURSOR_1(ddi_translations[level].dw4_post_cursor_1);
 		val |= POST_CURSOR_2(ddi_translations[level].dw4_post_cursor_2);
 		val |= CURSOR_COEFF(ddi_translations[level].dw4_cursor_coeff);
-		I915_WRITE(CNL_PORT_TX_DW4_LN(port, ln), val);
+		I915_WRITE(CNL_PORT_TX_DW4_LN(ln, port), val);
 	}
 
 	/* Program PORT_TX_DW5 */
@@ -2379,14 +2375,14 @@ static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder,
 	 * > 6 GHz (LN0=0, LN1=0, LN2=0, LN3=0)
 	 */
 	for (ln = 0; ln <= 3; ln++) {
-		val = I915_READ(CNL_PORT_TX_DW4_LN(port, ln));
+		val = I915_READ(CNL_PORT_TX_DW4_LN(ln, port));
 		val &= ~LOADGEN_SELECT;
 
 		if ((rate <= 600000 && width == 4 && ln >= 1)  ||
 		    (rate <= 600000 && width < 4 && (ln == 1 || ln == 2))) {
 			val |= LOADGEN_SELECT;
 		}
-		I915_WRITE(CNL_PORT_TX_DW4_LN(port, ln), val);
+		I915_WRITE(CNL_PORT_TX_DW4_LN(ln, port), val);
 	}
 
 	/* 3. Set PORT_CL_DW5 SUS Clock Config to 11b */
@@ -2448,13 +2444,13 @@ static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv,
 	/* Program PORT_TX_DW4 */
 	/* We cannot write to GRP. It would overwrite individual loadgen. */
 	for (ln = 0; ln <= 3; ln++) {
-		val = I915_READ(ICL_PORT_TX_DW4_LN(port, ln));
+		val = I915_READ(ICL_PORT_TX_DW4_LN(ln, port));
 		val &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
 			 CURSOR_COEFF_MASK);
 		val |= POST_CURSOR_1(ddi_translations[level].dw4_post_cursor_1);
 		val |= POST_CURSOR_2(ddi_translations[level].dw4_post_cursor_2);
 		val |= CURSOR_COEFF(ddi_translations[level].dw4_cursor_coeff);
-		I915_WRITE(ICL_PORT_TX_DW4_LN(port, ln), val);
+		I915_WRITE(ICL_PORT_TX_DW4_LN(ln, port), val);
 	}
 
 	/* Program PORT_TX_DW7 */
@@ -2505,14 +2501,14 @@ static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
 	 * > 6 GHz (LN0=0, LN1=0, LN2=0, LN3=0)
 	 */
 	for (ln = 0; ln <= 3; ln++) {
-		val = I915_READ(ICL_PORT_TX_DW4_LN(port, ln));
+		val = I915_READ(ICL_PORT_TX_DW4_LN(ln, port));
 		val &= ~LOADGEN_SELECT;
 
 		if ((rate <= 600000 && width == 4 && ln >= 1) ||
 		    (rate <= 600000 && width < 4 && (ln == 1 || ln == 2))) {
 			val |= LOADGEN_SELECT;
 		}
-		I915_WRITE(ICL_PORT_TX_DW4_LN(port, ln), val);
+		I915_WRITE(ICL_PORT_TX_DW4_LN(ln, port), val);
 	}
 
 	/* 3. Set PORT_CL_DW5 SUS Clock Config to 11b */
@@ -2555,33 +2551,33 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
 
 	/* Set MG_TX_LINK_PARAMS cri_use_fs32 to 0. */
 	for (ln = 0; ln < 2; ln++) {
-		val = I915_READ(MG_TX1_LINK_PARAMS(port, ln));
+		val = I915_READ(MG_TX1_LINK_PARAMS(ln, port));
 		val &= ~CRI_USE_FS32;
-		I915_WRITE(MG_TX1_LINK_PARAMS(port, ln), val);
+		I915_WRITE(MG_TX1_LINK_PARAMS(ln, port), val);
 
-		val = I915_READ(MG_TX2_LINK_PARAMS(port, ln));
+		val = I915_READ(MG_TX2_LINK_PARAMS(ln, port));
 		val &= ~CRI_USE_FS32;
-		I915_WRITE(MG_TX2_LINK_PARAMS(port, ln), val);
+		I915_WRITE(MG_TX2_LINK_PARAMS(ln, port), val);
 	}
 
 	/* Program MG_TX_SWINGCTRL with values from vswing table */
 	for (ln = 0; ln < 2; ln++) {
-		val = I915_READ(MG_TX1_SWINGCTRL(port, ln));
+		val = I915_READ(MG_TX1_SWINGCTRL(ln, port));
 		val &= ~CRI_TXDEEMPH_OVERRIDE_17_12_MASK;
 		val |= CRI_TXDEEMPH_OVERRIDE_17_12(
 			ddi_translations[level].cri_txdeemph_override_17_12);
-		I915_WRITE(MG_TX1_SWINGCTRL(port, ln), val);
+		I915_WRITE(MG_TX1_SWINGCTRL(ln, port), val);
 
-		val = I915_READ(MG_TX2_SWINGCTRL(port, ln));
+		val = I915_READ(MG_TX2_SWINGCTRL(ln, port));
 		val &= ~CRI_TXDEEMPH_OVERRIDE_17_12_MASK;
 		val |= CRI_TXDEEMPH_OVERRIDE_17_12(
 			ddi_translations[level].cri_txdeemph_override_17_12);
-		I915_WRITE(MG_TX2_SWINGCTRL(port, ln), val);
+		I915_WRITE(MG_TX2_SWINGCTRL(ln, port), val);
 	}
 
 	/* Program MG_TX_DRVCTRL with values from vswing table */
 	for (ln = 0; ln < 2; ln++) {
-		val = I915_READ(MG_TX1_DRVCTRL(port, ln));
+		val = I915_READ(MG_TX1_DRVCTRL(ln, port));
 		val &= ~(CRI_TXDEEMPH_OVERRIDE_11_6_MASK |
 			 CRI_TXDEEMPH_OVERRIDE_5_0_MASK);
 		val |= CRI_TXDEEMPH_OVERRIDE_5_0(
@@ -2589,9 +2585,9 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
 			CRI_TXDEEMPH_OVERRIDE_11_6(
 				ddi_translations[level].cri_txdeemph_override_11_6) |
 			CRI_TXDEEMPH_OVERRIDE_EN;
-		I915_WRITE(MG_TX1_DRVCTRL(port, ln), val);
+		I915_WRITE(MG_TX1_DRVCTRL(ln, port), val);
 
-		val = I915_READ(MG_TX2_DRVCTRL(port, ln));
+		val = I915_READ(MG_TX2_DRVCTRL(ln, port));
 		val &= ~(CRI_TXDEEMPH_OVERRIDE_11_6_MASK |
 			 CRI_TXDEEMPH_OVERRIDE_5_0_MASK);
 		val |= CRI_TXDEEMPH_OVERRIDE_5_0(
@@ -2599,7 +2595,7 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
 			CRI_TXDEEMPH_OVERRIDE_11_6(
 				ddi_translations[level].cri_txdeemph_override_11_6) |
 			CRI_TXDEEMPH_OVERRIDE_EN;
-		I915_WRITE(MG_TX2_DRVCTRL(port, ln), val);
+		I915_WRITE(MG_TX2_DRVCTRL(ln, port), val);
 
 		/* FIXME: Program CRI_LOADGEN_SEL after the spec is updated */
 	}
@@ -2610,17 +2606,17 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
 	 * values from table for which TX1 and TX2 enabled.
 	 */
 	for (ln = 0; ln < 2; ln++) {
-		val = I915_READ(MG_CLKHUB(port, ln));
+		val = I915_READ(MG_CLKHUB(ln, port));
 		if (link_clock < 300000)
 			val |= CFG_LOW_RATE_LKREN_EN;
 		else
 			val &= ~CFG_LOW_RATE_LKREN_EN;
-		I915_WRITE(MG_CLKHUB(port, ln), val);
+		I915_WRITE(MG_CLKHUB(ln, port), val);
 	}
 
 	/* Program the MG_TX_DCC<LN, port being used> based on the link frequency */
 	for (ln = 0; ln < 2; ln++) {
-		val = I915_READ(MG_TX1_DCC(port, ln));
+		val = I915_READ(MG_TX1_DCC(ln, port));
 		val &= ~CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK;
 		if (link_clock <= 500000) {
 			val &= ~CFG_AMI_CK_DIV_OVERRIDE_EN;
@@ -2628,9 +2624,9 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
 			val |= CFG_AMI_CK_DIV_OVERRIDE_EN |
 				CFG_AMI_CK_DIV_OVERRIDE_VAL(1);
 		}
-		I915_WRITE(MG_TX1_DCC(port, ln), val);
+		I915_WRITE(MG_TX1_DCC(ln, port), val);
 
-		val = I915_READ(MG_TX2_DCC(port, ln));
+		val = I915_READ(MG_TX2_DCC(ln, port));
 		val &= ~CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK;
 		if (link_clock <= 500000) {
 			val &= ~CFG_AMI_CK_DIV_OVERRIDE_EN;
@@ -2638,18 +2634,18 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
 			val |= CFG_AMI_CK_DIV_OVERRIDE_EN |
 				CFG_AMI_CK_DIV_OVERRIDE_VAL(1);
 		}
-		I915_WRITE(MG_TX2_DCC(port, ln), val);
+		I915_WRITE(MG_TX2_DCC(ln, port), val);
 	}
 
 	/* Program MG_TX_PISO_READLOAD with values from vswing table */
 	for (ln = 0; ln < 2; ln++) {
-		val = I915_READ(MG_TX1_PISO_READLOAD(port, ln));
+		val = I915_READ(MG_TX1_PISO_READLOAD(ln, port));
 		val |= CRI_CALCINIT;
-		I915_WRITE(MG_TX1_PISO_READLOAD(port, ln), val);
+		I915_WRITE(MG_TX1_PISO_READLOAD(ln, port), val);
 
-		val = I915_READ(MG_TX2_PISO_READLOAD(port, ln));
+		val = I915_READ(MG_TX2_PISO_READLOAD(ln, port));
 		val |= CRI_CALCINIT;
-		I915_WRITE(MG_TX2_PISO_READLOAD(port, ln), val);
+		I915_WRITE(MG_TX2_PISO_READLOAD(ln, port), val);
 	}
 }
 
@@ -2698,7 +2694,7 @@ u32 bxt_signal_levels(struct intel_dp *intel_dp)
 	struct intel_encoder *encoder = &dport->base;
 	int level = intel_ddi_dp_level(intel_dp);
 
-	if (IS_ICELAKE(dev_priv))
+	if (INTEL_GEN(dev_priv) >= 11)
 		icl_ddi_vswing_sequence(encoder, intel_dp->link_rate,
 					level, encoder->type);
 	else if (IS_CANNONLAKE(dev_priv))
@@ -2867,7 +2863,7 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
 
 	mutex_lock(&dev_priv->dpll_lock);
 
-	if (IS_ICELAKE(dev_priv)) {
+	if (INTEL_GEN(dev_priv) >= 11) {
 		if (!intel_port_is_combophy(dev_priv, port))
 			I915_WRITE(DDI_CLK_SEL(port),
 				   icl_pll_to_ddi_clk_sel(encoder, crtc_state));
@@ -2909,7 +2905,7 @@ static void intel_ddi_clk_disable(struct intel_encoder *encoder)
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	enum port port = encoder->port;
 
-	if (IS_ICELAKE(dev_priv)) {
+	if (INTEL_GEN(dev_priv) >= 11) {
 		if (!intel_port_is_combophy(dev_priv, port))
 			I915_WRITE(DDI_CLK_SEL(port), DDI_CLK_SEL_NONE);
 	} else if (IS_CANNONLAKE(dev_priv)) {
@@ -2928,7 +2924,7 @@ static void icl_enable_phy_clock_gating(struct intel_digital_port *dig_port)
 	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
 	enum port port = dig_port->base.port;
 	enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
-	i915_reg_t mg_regs[2] = { MG_DP_MODE(port, 0), MG_DP_MODE(port, 1) };
+	i915_reg_t mg_regs[2] = { MG_DP_MODE(0, port), MG_DP_MODE(1, port) };
 	u32 val;
 	int i;
 
@@ -2999,8 +2995,8 @@ static void icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port)
 	if (tc_port == PORT_TC_NONE || intel_dig_port->tc_type == TC_PORT_TBT)
 		return;
 
-	ln0 = I915_READ(MG_DP_MODE(port, 0));
-	ln1 = I915_READ(MG_DP_MODE(port, 1));
+	ln0 = I915_READ(MG_DP_MODE(0, port));
+	ln1 = I915_READ(MG_DP_MODE(1, port));
 
 	switch (intel_dig_port->tc_type) {
 	case TC_PORT_TYPEC:
@@ -3050,8 +3046,8 @@ static void icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port)
 		return;
 	}
 
-	I915_WRITE(MG_DP_MODE(port, 0), ln0);
-	I915_WRITE(MG_DP_MODE(port, 1), ln1);
+	I915_WRITE(MG_DP_MODE(0, port), ln0);
+	I915_WRITE(MG_DP_MODE(1, port), ln1);
 }
 
 static void intel_dp_sink_set_fec_ready(struct intel_dp *intel_dp,
@@ -3126,7 +3122,7 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
 	icl_program_mg_dp_mode(dig_port);
 	icl_disable_phy_clock_gating(dig_port);
 
-	if (IS_ICELAKE(dev_priv))
+	if (INTEL_GEN(dev_priv) >= 11)
 		icl_ddi_vswing_sequence(encoder, crtc_state->port_clock,
 					level, encoder->type);
 	else if (IS_CANNONLAKE(dev_priv))
@@ -3175,7 +3171,7 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
 	icl_program_mg_dp_mode(dig_port);
 	icl_disable_phy_clock_gating(dig_port);
 
-	if (IS_ICELAKE(dev_priv))
+	if (INTEL_GEN(dev_priv) >= 11)
 		icl_ddi_vswing_sequence(encoder, crtc_state->port_clock,
 					level, INTEL_OUTPUT_HDMI);
 	else if (IS_CANNONLAKE(dev_priv))
@@ -3556,7 +3552,7 @@ static void intel_ddi_update_pipe_dp(struct intel_encoder *encoder,
 {
 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
 
-	intel_psr_enable(intel_dp, crtc_state);
+	intel_psr_update(intel_dp, crtc_state);
 	intel_edp_drrs_enable(intel_dp, crtc_state);
 
 	intel_panel_update_backlight(encoder, crtc_state, conn_state);
@@ -3711,7 +3707,7 @@ static bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
 void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv,
 					 struct intel_crtc_state *crtc_state)
 {
-	if (IS_ICELAKE(dev_priv) && crtc_state->port_clock > 594000)
+	if (INTEL_GEN(dev_priv) >= 11 && crtc_state->port_clock > 594000)
 		crtc_state->min_voltage_level = 1;
 	else if (IS_CANNONLAKE(dev_priv) && crtc_state->port_clock > 594000)
 		crtc_state->min_voltage_level = 2;
@@ -3764,7 +3760,10 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
 		pipe_config->has_hdmi_sink = true;
 		intel_dig_port = enc_to_dig_port(&encoder->base);
 
-		if (intel_dig_port->infoframe_enabled(encoder, pipe_config))
+		pipe_config->infoframes.enable |=
+			intel_hdmi_infoframes_enabled(encoder, pipe_config);
+
+		if (pipe_config->infoframes.enable)
 			pipe_config->has_infoframe = true;
 
 		if (temp & TRANS_DDI_HDMI_SCRAMBLING)
@@ -3828,6 +3827,18 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
 			bxt_ddi_phy_get_lane_lat_optim_mask(encoder);
 
 	intel_ddi_compute_min_voltage_level(dev_priv, pipe_config);
+
+	intel_hdmi_read_gcp_infoframe(encoder, pipe_config);
+
+	intel_read_infoframe(encoder, pipe_config,
+			     HDMI_INFOFRAME_TYPE_AVI,
+			     &pipe_config->infoframes.avi);
+	intel_read_infoframe(encoder, pipe_config,
+			     HDMI_INFOFRAME_TYPE_SPD,
+			     &pipe_config->infoframes.spd);
+	intel_read_infoframe(encoder, pipe_config,
+			     HDMI_INFOFRAME_TYPE_VENDOR,
+			     &pipe_config->infoframes.hdmi);
 }
 
 static enum intel_output_type
@@ -3856,7 +3867,7 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder,
 	enum port port = encoder->port;
 	int ret;
 
-	if (port == PORT_A)
+	if (HAS_TRANSCODER_EDP(dev_priv) && port == PORT_A)
 		pipe_config->cpu_transcoder = TRANSCODER_EDP;
 
 	if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI))
@@ -3958,15 +3969,7 @@ static int modeset_pipe(struct drm_crtc *crtc,
 		goto out;
 	}
 
-	crtc_state->mode_changed = true;
-
-	ret = drm_atomic_add_affected_connectors(state, crtc);
-	if (ret)
-		goto out;
-
-	ret = drm_atomic_add_affected_planes(state, crtc);
-	if (ret)
-		goto out;
+	crtc_state->connectors_changed = true;
 
 	ret = drm_atomic_commit(state);
 out:
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index 855a5074ad775c2314dbf1e645008ee8709d9bb9..eddf83807957efc19f806bd7316bb1c60f2b0778 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -738,9 +738,9 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
 		runtime->num_scalers[PIPE_C] = 1;
 	}
 
-	BUILD_BUG_ON(I915_NUM_ENGINES > BITS_PER_TYPE(intel_ring_mask_t));
+	BUILD_BUG_ON(BITS_PER_TYPE(intel_engine_mask_t) < I915_NUM_ENGINES);
 
-	if (IS_GEN(dev_priv, 11))
+	if (INTEL_GEN(dev_priv) >= 11)
 		for_each_pipe(dev_priv, pipe)
 			runtime->num_sprites[pipe] = 6;
 	else if (IS_GEN(dev_priv, 10) || IS_GEMINILAKE(dev_priv))
@@ -844,7 +844,7 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
 
 	if (IS_GEN(dev_priv, 6) && intel_vtd_active()) {
 		DRM_INFO("Disabling ppGTT for VT-d support\n");
-		info->ppgtt = INTEL_PPGTT_NONE;
+		info->ppgtt_type = INTEL_PPGTT_NONE;
 	}
 
 	/* Initialize command stream timestamp frequency */
@@ -887,7 +887,7 @@ void intel_device_info_init_mmio(struct drm_i915_private *dev_priv)
 			continue;
 
 		if (!(BIT(i) & RUNTIME_INFO(dev_priv)->vdbox_enable)) {
-			info->ring_mask &= ~ENGINE_MASK(_VCS(i));
+			info->engine_mask &= ~BIT(_VCS(i));
 			DRM_DEBUG_DRIVER("vcs%u fused off\n", i);
 			continue;
 		}
@@ -906,7 +906,7 @@ void intel_device_info_init_mmio(struct drm_i915_private *dev_priv)
 			continue;
 
 		if (!(BIT(i) & RUNTIME_INFO(dev_priv)->vebox_enable)) {
-			info->ring_mask &= ~ENGINE_MASK(_VECS(i));
+			info->engine_mask &= ~BIT(_VECS(i));
 			DRM_DEBUG_DRIVER("vecs%u fused off\n", i);
 		}
 	}
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index e8b8661df746c3317926275310632c0bf05a98b9..6234570a9b1796bf4dd93e1e361d204662d6fc3b 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -76,11 +76,10 @@ enum intel_platform {
 	INTEL_MAX_PLATFORMS
 };
 
-enum intel_ppgtt {
+enum intel_ppgtt_type {
 	INTEL_PPGTT_NONE = I915_GEM_PPGTT_NONE,
 	INTEL_PPGTT_ALIASING = I915_GEM_PPGTT_ALIASING,
 	INTEL_PPGTT_FULL = I915_GEM_PPGTT_FULL,
-	INTEL_PPGTT_FULL_4LVL,
 };
 
 #define DEV_INFO_FOR_EACH_FLAG(func) \
@@ -150,19 +149,21 @@ struct sseu_dev_info {
 	u8 eu_mask[GEN_MAX_SLICES * GEN_MAX_SUBSLICES];
 };
 
-typedef u8 intel_ring_mask_t;
+typedef u8 intel_engine_mask_t;
 
 struct intel_device_info {
 	u16 gen_mask;
 
 	u8 gen;
 	u8 gt; /* GT number, 0 if undefined */
-	intel_ring_mask_t ring_mask; /* Rings supported by the HW */
+	intel_engine_mask_t engine_mask; /* Engines supported by the HW */
 
 	enum intel_platform platform;
 	u32 platform_mask;
 
-	enum intel_ppgtt ppgtt;
+	enum intel_ppgtt_type ppgtt_type;
+	unsigned int ppgtt_size; /* log2, e.g. 31/32/48 bits */
+
 	unsigned int page_sizes; /* page sizes supported by the HW */
 
 	u32 display_mmio_offset;
@@ -200,7 +201,7 @@ struct intel_runtime_info {
 	u8 num_sprites[I915_MAX_PIPES];
 	u8 num_scalers[I915_MAX_PIPES];
 
-	u8 num_rings;
+	u8 num_engines;
 
 	/* Slice/subslice/EU info */
 	struct sseu_dev_info sseu;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 94496488641cb988809efb31003c281b1898faff..8574c212b37c4a8b85f537e7511dfdb5af05932e 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -595,7 +595,7 @@ i9xx_select_p2_div(const struct intel_limit *limit,
 		   const struct intel_crtc_state *crtc_state,
 		   int target)
 {
-	struct drm_device *dev = crtc_state->base.crtc->dev;
+	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
 
 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
 		/*
@@ -603,7 +603,7 @@ i9xx_select_p2_div(const struct intel_limit *limit,
 		 * We haven't figured out how to reliably set up different
 		 * single/dual channel state, if we even can.
 		 */
-		if (intel_is_dual_link_lvds(dev))
+		if (intel_is_dual_link_lvds(dev_priv))
 			return limit->p2.p2_fast;
 		else
 			return limit->p2.p2_slow;
@@ -951,14 +951,15 @@ chv_find_best_dpll(const struct intel_limit *limit,
 	return found;
 }
 
-bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
+bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
 			struct dpll *best_clock)
 {
 	int refclk = 100000;
 	const struct intel_limit *limit = &intel_limits_bxt;
 
 	return chv_find_best_dpll(limit, crtc_state,
-				  target_clock, refclk, NULL, best_clock);
+				  crtc_state->port_clock, refclk,
+				  NULL, best_clock);
 }
 
 bool intel_crtc_active(struct intel_crtc *crtc)
@@ -1441,19 +1442,6 @@ static void chv_enable_pll(struct intel_crtc *crtc,
 	}
 }
 
-static int intel_num_dvo_pipes(struct drm_i915_private *dev_priv)
-{
-	struct intel_crtc *crtc;
-	int count = 0;
-
-	for_each_intel_crtc(&dev_priv->drm, crtc) {
-		count += crtc->base.state->active &&
-			intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO);
-	}
-
-	return count;
-}
-
 static void i9xx_enable_pll(struct intel_crtc *crtc,
 			    const struct intel_crtc_state *crtc_state)
 {
@@ -1468,26 +1456,12 @@ static void i9xx_enable_pll(struct intel_crtc *crtc,
 	if (IS_MOBILE(dev_priv) && !IS_I830(dev_priv))
 		assert_panel_unlocked(dev_priv, crtc->pipe);
 
-	/* Enable DVO 2x clock on both PLLs if necessary */
-	if (IS_I830(dev_priv) && intel_num_dvo_pipes(dev_priv) > 0) {
-		/*
-		 * It appears to be important that we don't enable this
-		 * for the current pipe before otherwise configuring the
-		 * PLL. No idea how this should be handled if multiple
-		 * DVO outputs are enabled simultaneosly.
-		 */
-		dpll |= DPLL_DVO_2X_MODE;
-		I915_WRITE(DPLL(!crtc->pipe),
-			   I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE);
-	}
-
 	/*
 	 * Apparently we need to have VGA mode enabled prior to changing
 	 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
 	 * dividers, even though the register value does change.
 	 */
-	I915_WRITE(reg, 0);
-
+	I915_WRITE(reg, dpll & ~DPLL_VGA_MODE_DIS);
 	I915_WRITE(reg, dpll);
 
 	/* Wait for the clocks to stabilize. */
@@ -1520,16 +1494,6 @@ static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	enum pipe pipe = crtc->pipe;
 
-	/* Disable DVO 2x clock on both PLLs if necessary */
-	if (IS_I830(dev_priv) &&
-	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO) &&
-	    !intel_num_dvo_pipes(dev_priv)) {
-		I915_WRITE(DPLL(PIPE_B),
-			   I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
-		I915_WRITE(DPLL(PIPE_A),
-			   I915_READ(DPLL(PIPE_A)) & ~DPLL_DVO_2X_MODE);
-	}
-
 	/* Don't disable pipe or pipe PLLs if needed */
 	if (IS_I830(dev_priv))
 		return;
@@ -1658,14 +1622,15 @@ static void ironlake_enable_pch_transcoder(const struct intel_crtc_state *crtc_s
 	}
 
 	val &= ~TRANS_INTERLACE_MASK;
-	if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
+	if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) {
 		if (HAS_PCH_IBX(dev_priv) &&
 		    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
 			val |= TRANS_LEGACY_INTERLACED_ILK;
 		else
 			val |= TRANS_INTERLACED;
-	else
+	} else {
 		val |= TRANS_PROGRESSIVE;
+	}
 
 	I915_WRITE(reg, val | TRANS_ENABLE);
 	if (intel_wait_for_register(dev_priv,
@@ -1830,6 +1795,8 @@ static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
 		/* FIXME: assert CPU port conditions for SNB+ */
 	}
 
+	trace_intel_pipe_enable(dev_priv, pipe);
+
 	reg = PIPECONF(cpu_transcoder);
 	val = I915_READ(reg);
 	if (val & PIPECONF_ENABLE) {
@@ -1869,6 +1836,8 @@ static void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
 	 */
 	assert_planes_disabled(crtc);
 
+	trace_intel_pipe_disable(dev_priv, pipe);
+
 	reg = PIPECONF(cpu_transcoder);
 	val = I915_READ(reg);
 	if ((val & PIPECONF_ENABLE) == 0)
@@ -2855,8 +2824,7 @@ static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
 	if (plane->id == PLANE_PRIMARY)
 		intel_pre_disable_primary_noatomic(&crtc->base);
 
-	trace_intel_disable_plane(&plane->base, crtc);
-	plane->disable_plane(plane, crtc_state);
+	intel_disable_plane(plane, crtc_state);
 }
 
 static void
@@ -3260,9 +3228,10 @@ static u32 i9xx_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	u32 dspcntr = 0;
 
-	dspcntr |= DISPPLANE_GAMMA_ENABLE;
+	if (crtc_state->gamma_enable)
+		dspcntr |= DISPPLANE_GAMMA_ENABLE;
 
-	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+	if (crtc_state->csc_enable)
 		dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
 
 	if (INTEL_GEN(dev_priv) < 5)
@@ -3489,7 +3458,7 @@ static void i9xx_disable_plane(struct intel_plane *plane,
 	 *
 	 * On pre-g4x there is no way to gamma correct the
 	 * pipe bottom color but we'll keep on doing this
-	 * anyway.
+	 * anyway so that the crtc state readout works correctly.
 	 */
 	dspcntr = i9xx_plane_ctl_crtc(crtc_state);
 
@@ -3764,8 +3733,11 @@ u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
 	if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
 		return plane_ctl;
 
-	plane_ctl |= PLANE_CTL_PIPE_GAMMA_ENABLE;
-	plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE;
+	if (crtc_state->gamma_enable)
+		plane_ctl |= PLANE_CTL_PIPE_GAMMA_ENABLE;
+
+	if (crtc_state->csc_enable)
+		plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE;
 
 	return plane_ctl;
 }
@@ -3817,8 +3789,11 @@ u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state)
 	if (INTEL_GEN(dev_priv) >= 11)
 		return plane_color_ctl;
 
-	plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE;
-	plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE;
+	if (crtc_state->gamma_enable)
+		plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE;
+
+	if (crtc_state->csc_enable)
+		plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE;
 
 	return plane_color_ctl;
 }
@@ -3977,9 +3952,6 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
 		 * The display has been reset as well,
 		 * so need a full re-initialization.
 		 */
-		intel_runtime_pm_disable_interrupts(dev_priv);
-		intel_runtime_pm_enable_interrupts(dev_priv);
-
 		intel_pps_unlock_regs_wa(dev_priv);
 		intel_modeset_init_hw(dev);
 		intel_init_clock_gating(dev_priv);
@@ -4019,13 +3991,13 @@ static void icl_set_pipe_chicken(struct intel_crtc *crtc)
 	 * and rounding for per-pixel values 00 and 0xff
 	 */
 	tmp |= PER_PIXEL_ALPHA_BYPASS_EN;
-
 	/*
-	 * W/A for underruns with linear/X-tiled with
-	 * WM1+ disabled.
+	 * Display WA # 1605353570: icl
+	 * Set the pixel rounding bit to 1 for allowing
+	 * passthrough of Frame buffer pixels unmodified
+	 * across pipe
 	 */
-	tmp |= PM_FILL_MAINTAIN_DBUF_FULLNESS;
-
+	tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU;
 	I915_WRITE(PIPE_CHICKEN(pipe), tmp);
 }
 
@@ -4064,16 +4036,6 @@ static void intel_update_pipe_config(const struct intel_crtc_state *old_crtc_sta
 			ironlake_pfit_disable(old_crtc_state);
 	}
 
-	/*
-	 * We don't (yet) allow userspace to control the pipe background color,
-	 * so force it to black, but apply pipe gamma and CSC so that its
-	 * handling will match how we program our planes.
-	 */
-	if (INTEL_GEN(dev_priv) >= 9)
-		I915_WRITE(SKL_BOTTOM_COLOR(crtc->pipe),
-			   SKL_BOTTOM_COLOR_GAMMA_ENABLE |
-			   SKL_BOTTOM_COLOR_CSC_ENABLE);
-
 	if (INTEL_GEN(dev_priv) >= 11)
 		icl_set_pipe_chicken(crtc);
 }
@@ -5101,10 +5063,10 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
 	/* range checks */
 	if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
 	    dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
-	    (IS_GEN(dev_priv, 11) &&
+	    (INTEL_GEN(dev_priv) >= 11 &&
 	     (src_w > ICL_MAX_SRC_W || src_h > ICL_MAX_SRC_H ||
 	      dst_w > ICL_MAX_DST_W || dst_h > ICL_MAX_DST_H)) ||
-	    (!IS_GEN(dev_priv, 11) &&
+	    (INTEL_GEN(dev_priv) < 11 &&
 	     (src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
 	      dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H)))	{
 		DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
@@ -5673,7 +5635,7 @@ static void intel_crtc_disable_planes(struct intel_atomic_state *state,
 		    !(update_mask & BIT(plane->id)))
 			continue;
 
-		plane->disable_plane(plane, new_crtc_state);
+		intel_disable_plane(plane, new_crtc_state);
 
 		if (old_plane_state->base.visible)
 			fb_bits |= plane->frontbuffer_bit;
@@ -5824,6 +5786,14 @@ static void intel_encoders_update_pipe(struct drm_crtc *crtc,
 	}
 }
 
+static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state)
+{
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_plane *plane = to_intel_plane(crtc->base.primary);
+
+	plane->disable_plane(plane, crtc_state);
+}
+
 static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
 				 struct drm_atomic_state *old_state)
 {
@@ -5889,6 +5859,8 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
 	 */
 	intel_color_load_luts(pipe_config);
 	intel_color_commit(pipe_config);
+	/* update DSPCNTR to configure gamma for pipe bottom color */
+	intel_disable_primary_plane(pipe_config);
 
 	if (dev_priv->display.initial_watermarks != NULL)
 		dev_priv->display.initial_watermarks(old_intel_state, pipe_config);
@@ -6017,6 +5989,9 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
 	 */
 	intel_color_load_luts(pipe_config);
 	intel_color_commit(pipe_config);
+	/* update DSPCNTR to configure gamma/csc for pipe bottom color */
+	if (INTEL_GEN(dev_priv) < 9)
+		intel_disable_primary_plane(pipe_config);
 
 	if (INTEL_GEN(dev_priv) >= 11)
 		icl_set_pipe_chicken(intel_crtc);
@@ -6197,7 +6172,7 @@ bool intel_port_is_combophy(struct drm_i915_private *dev_priv, enum port port)
 	if (port == PORT_NONE)
 		return false;
 
-	if (IS_ICELAKE(dev_priv))
+	if (INTEL_GEN(dev_priv) >= 11)
 		return port <= PORT_B;
 
 	return false;
@@ -6205,7 +6180,7 @@ bool intel_port_is_combophy(struct drm_i915_private *dev_priv, enum port port)
 
 bool intel_port_is_tc(struct drm_i915_private *dev_priv, enum port port)
 {
-	if (IS_ICELAKE(dev_priv))
+	if (INTEL_GEN(dev_priv) >= 11)
 		return port >= PORT_C && port <= PORT_F;
 
 	return false;
@@ -6374,6 +6349,8 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
 
 	intel_color_load_luts(pipe_config);
 	intel_color_commit(pipe_config);
+	/* update DSPCNTR to configure gamma for pipe bottom color */
+	intel_disable_primary_plane(pipe_config);
 
 	dev_priv->display.initial_watermarks(old_intel_state,
 					     pipe_config);
@@ -6431,6 +6408,8 @@ static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
 
 	intel_color_load_luts(pipe_config);
 	intel_color_commit(pipe_config);
+	/* update DSPCNTR to configure gamma for pipe bottom color */
+	intel_disable_primary_plane(pipe_config);
 
 	if (dev_priv->display.initial_watermarks != NULL)
 		dev_priv->display.initial_watermarks(old_intel_state,
@@ -6813,7 +6792,13 @@ static bool hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
 	if (!hsw_crtc_state_ips_capable(crtc_state))
 		return false;
 
-	if (crtc_state->ips_force_disable)
+	/*
+	 * When IPS gets enabled, the pipe CRC changes. Since IPS gets
+	 * enabled and disabled dynamically based on package C states,
+	 * user space can't make reliable use of the CRCs, so let's just
+	 * completely disable it.
+	 */
+	if (crtc_state->crc_enabled)
 		return false;
 
 	/* IPS should be fine as long as at least one plane is enabled. */
@@ -6888,8 +6873,7 @@ static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
 static int intel_crtc_compute_config(struct intel_crtc *crtc,
 				     struct intel_crtc_state *pipe_config)
 {
-	struct drm_device *dev = crtc->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
 	int clock_limit = dev_priv->max_dotclk_freq;
 
@@ -6939,7 +6923,7 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
 		}
 
 		if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
-		    intel_is_dual_link_lvds(dev)) {
+		    intel_is_dual_link_lvds(dev_priv)) {
 			DRM_DEBUG_KMS("Odd pipe source width not supported with dual link LVDS\n");
 			return -EINVAL;
 		}
@@ -7556,7 +7540,19 @@ static void i8xx_compute_dpll(struct intel_crtc *crtc,
 			dpll |= PLL_P2_DIVIDE_BY_4;
 	}
 
-	if (!IS_I830(dev_priv) &&
+	/*
+	 * Bspec:
+	 * "[Almador Errata}: For the correct operation of the muxed DVO pins
+	 *  (GDEVSELB/I2Cdata, GIRDBY/I2CClk) and (GFRAMEB/DVI_Data,
+	 *  GTRDYB/DVI_Clk): Bit 31 (DPLL VCO Enable) and Bit 30 (2X Clock
+	 *  Enable) must be set to “1” in both the DPLL A Control Register
+	 *  (06014h-06017h) and DPLL B Control Register (06018h-0601Bh)."
+	 *
+	 * For simplicity We simply keep both bits always enabled in
+	 * both DPLLS. The spec says we should disable the DVO 2X clock
+	 * when not needed, but this seems to work fine in practice.
+	 */
+	if (IS_I830(dev_priv) ||
 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
 		dpll |= DPLL_DVO_2X_MODE;
 
@@ -7764,13 +7760,16 @@ static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
 			pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
 		else
 			pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
-	} else
+	} else {
 		pipeconf |= PIPECONF_PROGRESSIVE;
+	}
 
 	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
 	     crtc_state->limited_color_range)
 		pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
 
+	pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
+
 	I915_WRITE(PIPECONF(crtc->pipe), pipeconf);
 	POSTING_READ(PIPECONF(crtc->pipe));
 }
@@ -7814,8 +7813,7 @@ static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
 static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
 				  struct intel_crtc_state *crtc_state)
 {
-	struct drm_device *dev = crtc->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	const struct intel_limit *limit;
 	int refclk = 96000;
 
@@ -7828,7 +7826,7 @@ static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
 			DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
 		}
 
-		if (intel_is_dual_link_lvds(dev))
+		if (intel_is_dual_link_lvds(dev_priv))
 			limit = &intel_limits_g4x_dual_channel_lvds;
 		else
 			limit = &intel_limits_g4x_single_channel_lvds;
@@ -8178,6 +8176,24 @@ static void intel_get_crtc_ycbcr_config(struct intel_crtc *crtc,
 	pipe_config->output_format = output;
 }
 
+static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state)
+{
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_plane *plane = to_intel_plane(crtc->base.primary);
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
+	u32 tmp;
+
+	tmp = I915_READ(DSPCNTR(i9xx_plane));
+
+	if (tmp & DISPPLANE_GAMMA_ENABLE)
+		crtc_state->gamma_enable = true;
+
+	if (!HAS_GMCH(dev_priv) &&
+	    tmp & DISPPLANE_PIPE_CSC_ENABLE)
+		crtc_state->csc_enable = true;
+}
+
 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
 				 struct intel_crtc_state *pipe_config)
 {
@@ -8223,6 +8239,14 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
 	    (tmp & PIPECONF_COLOR_RANGE_SELECT))
 		pipe_config->limited_color_range = true;
 
+	pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_I9XX) >>
+		PIPECONF_GAMMA_MODE_SHIFT;
+
+	if (IS_CHERRYVIEW(dev_priv))
+		pipe_config->cgm_mode = I915_READ(CGM_PIPE_MODE(crtc->pipe));
+
+	i9xx_get_pipe_color_config(pipe_config);
+
 	if (INTEL_GEN(dev_priv) < 4)
 		pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
 
@@ -8255,14 +8279,6 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
 	}
 	pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
 	if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
-		/*
-		 * DPLL_DVO_2X_MODE must be enabled for both DPLLs
-		 * on 830. Filter it out here so that we don't
-		 * report errors due to that.
-		 */
-		if (IS_I830(dev_priv))
-			pipe_config->dpll_hw_state.dpll &= ~DPLL_DVO_2X_MODE;
-
 		pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
 		pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
 	} else {
@@ -8762,6 +8778,8 @@ static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state)
 	if (crtc_state->limited_color_range)
 		val |= PIPECONF_COLOR_RANGE_SELECT;
 
+	val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
+
 	I915_WRITE(PIPECONF(pipe), val);
 	POSTING_READ(PIPECONF(pipe));
 }
@@ -8842,13 +8860,11 @@ static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
 	return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
 }
 
-static void ironlake_compute_dpll(struct intel_crtc *intel_crtc,
+static void ironlake_compute_dpll(struct intel_crtc *crtc,
 				  struct intel_crtc_state *crtc_state,
 				  struct dpll *reduced_clock)
 {
-	struct drm_crtc *crtc = &intel_crtc->base;
-	struct drm_device *dev = crtc->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	u32 dpll, fp, fp2;
 	int factor;
 
@@ -8857,10 +8873,12 @@ static void ironlake_compute_dpll(struct intel_crtc *intel_crtc,
 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
 		if ((intel_panel_use_ssc(dev_priv) &&
 		     dev_priv->vbt.lvds_ssc_freq == 100000) ||
-		    (HAS_PCH_IBX(dev_priv) && intel_is_dual_link_lvds(dev)))
+		    (HAS_PCH_IBX(dev_priv) &&
+		     intel_is_dual_link_lvds(dev_priv)))
 			factor = 25;
-	} else if (crtc_state->sdvo_tv_clock)
+	} else if (crtc_state->sdvo_tv_clock) {
 		factor = 20;
+	}
 
 	fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
 
@@ -8947,8 +8965,7 @@ static void ironlake_compute_dpll(struct intel_crtc *intel_crtc,
 static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
 				       struct intel_crtc_state *crtc_state)
 {
-	struct drm_device *dev = crtc->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	const struct intel_limit *limit;
 	int refclk = 120000;
 
@@ -8966,7 +8983,7 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
 			refclk = dev_priv->vbt.lvds_ssc_freq;
 		}
 
-		if (intel_is_dual_link_lvds(dev)) {
+		if (intel_is_dual_link_lvds(dev_priv)) {
 			if (refclk == 100000)
 				limit = &intel_limits_ironlake_dual_lvds_100m;
 			else
@@ -8990,7 +9007,7 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
 
 	ironlake_compute_dpll(crtc, crtc_state, NULL);
 
-	if (!intel_get_shared_dpll(crtc, crtc_state, NULL)) {
+	if (!intel_get_shared_dpll(crtc_state, NULL)) {
 		DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
 			      pipe_name(crtc->pipe));
 		return -EINVAL;
@@ -9296,6 +9313,13 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
 	if (tmp & PIPECONF_COLOR_RANGE_SELECT)
 		pipe_config->limited_color_range = true;
 
+	pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_ILK) >>
+		PIPECONF_GAMMA_MODE_SHIFT;
+
+	pipe_config->csc_mode = I915_READ(PIPE_CSC_MODE(crtc->pipe));
+
+	i9xx_get_pipe_color_config(pipe_config);
+
 	if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
 		struct intel_shared_dpll *pll;
 		enum intel_dpll_id pll_id;
@@ -9580,11 +9604,11 @@ static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
 		to_intel_atomic_state(crtc_state->base.state);
 
 	if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) ||
-	    IS_ICELAKE(dev_priv)) {
+	    INTEL_GEN(dev_priv) >= 11) {
 		struct intel_encoder *encoder =
 			intel_get_crtc_new_encoder(state, crtc_state);
 
-		if (!intel_get_shared_dpll(crtc, crtc_state, encoder)) {
+		if (!intel_get_shared_dpll(crtc_state, encoder)) {
 			DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
 				      pipe_name(crtc->pipe));
 			return -EINVAL;
@@ -9622,9 +9646,6 @@ static void icelake_get_ddi_pll(struct drm_i915_private *dev_priv,
 		temp = I915_READ(DPCLKA_CFGCR0_ICL) &
 		       DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
 		id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
-
-		if (WARN_ON(!intel_dpll_is_combophy(id)))
-			return;
 	} else if (intel_port_is_tc(dev_priv, port)) {
 		id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv, port));
 	} else {
@@ -9718,15 +9739,18 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
 	struct drm_device *dev = crtc->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	enum intel_display_power_domain power_domain;
-	unsigned long panel_transcoder_mask = BIT(TRANSCODER_EDP);
+	unsigned long panel_transcoder_mask = 0;
 	unsigned long enabled_panel_transcoders = 0;
 	enum transcoder panel_transcoder;
 	u32 tmp;
 
-	if (IS_ICELAKE(dev_priv))
+	if (INTEL_GEN(dev_priv) >= 11)
 		panel_transcoder_mask |=
 			BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
 
+	if (HAS_TRANSCODER_EDP(dev_priv))
+		panel_transcoder_mask |= BIT(TRANSCODER_EDP);
+
 	/*
 	 * The pipe->transcoder mapping is fixed with the exception of the eDP
 	 * and DSI transcoders handled below.
@@ -9856,7 +9880,7 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
 
 	port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
 
-	if (IS_ICELAKE(dev_priv))
+	if (INTEL_GEN(dev_priv) >= 11)
 		icelake_get_ddi_pll(dev_priv, port, pipe_config);
 	else if (IS_CANNONLAKE(dev_priv))
 		cannonlake_get_ddi_pll(dev_priv, port, pipe_config);
@@ -9919,7 +9943,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
 		goto out;
 
 	if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
-	    IS_ICELAKE(dev_priv)) {
+	    INTEL_GEN(dev_priv) >= 11) {
 		haswell_get_ddi_port_state(crtc, pipe_config);
 		intel_get_pipe_timings(crtc, pipe_config);
 	}
@@ -9927,8 +9951,21 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
 	intel_get_pipe_src_size(crtc, pipe_config);
 	intel_get_crtc_ycbcr_config(crtc, pipe_config);
 
-	pipe_config->gamma_mode =
-		I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK;
+	pipe_config->gamma_mode = I915_READ(GAMMA_MODE(crtc->pipe));
+
+	pipe_config->csc_mode = I915_READ(PIPE_CSC_MODE(crtc->pipe));
+
+	if (INTEL_GEN(dev_priv) >= 9) {
+		u32 tmp = I915_READ(SKL_BOTTOM_COLOR(crtc->pipe));
+
+		if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE)
+			pipe_config->gamma_enable = true;
+
+		if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE)
+			pipe_config->csc_enable = true;
+	} else {
+		i9xx_get_pipe_color_config(pipe_config);
+	}
 
 	power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
 	if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
@@ -10100,7 +10137,12 @@ i845_cursor_max_stride(struct intel_plane *plane,
 
 static u32 i845_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
 {
-	return CURSOR_GAMMA_ENABLE;
+	u32 cntl = 0;
+
+	if (crtc_state->gamma_enable)
+		cntl |= CURSOR_GAMMA_ENABLE;
+
+	return cntl;
 }
 
 static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state,
@@ -10254,9 +10296,10 @@ static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
 	if (INTEL_GEN(dev_priv) >= 11)
 		return cntl;
 
-	cntl |= MCURSOR_GAMMA_ENABLE;
+	if (crtc_state->gamma_enable)
+		cntl = MCURSOR_GAMMA_ENABLE;
 
-	if (HAS_DDI(dev_priv))
+	if (crtc_state->csc_enable)
 		cntl |= MCURSOR_PIPE_CSC_ENABLE;
 
 	if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
@@ -11245,16 +11288,11 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
 			return ret;
 	}
 
-	if (mode_changed || crtc_state->color_mgmt_changed) {
+	if (mode_changed || pipe_config->update_pipe ||
+	    crtc_state->color_mgmt_changed) {
 		ret = intel_color_check(pipe_config);
 		if (ret)
 			return ret;
-
-		/*
-		 * Changing color management on Intel hardware is
-		 * handled as part of planes update.
-		 */
-		crtc_state->planes_changed = true;
 	}
 
 	ret = 0;
@@ -11425,6 +11463,16 @@ intel_dump_m_n_config(struct intel_crtc_state *pipe_config, char *id,
 		      m_n->link_m, m_n->link_n, m_n->tu);
 }
 
+static void
+intel_dump_infoframe(struct drm_i915_private *dev_priv,
+		     const union hdmi_infoframe *frame)
+{
+	if ((drm_debug & DRM_UT_KMS) == 0)
+		return;
+
+	hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, frame);
+}
+
 #define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
 
 static const char * const output_type_str[] = {
@@ -11528,6 +11576,22 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
 	DRM_DEBUG_KMS("audio: %i, infoframes: %i\n",
 		      pipe_config->has_audio, pipe_config->has_infoframe);
 
+	DRM_DEBUG_KMS("infoframes enabled: 0x%x\n",
+		      pipe_config->infoframes.enable);
+
+	if (pipe_config->infoframes.enable &
+	    intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL))
+		DRM_DEBUG_KMS("GCP: 0x%x\n", pipe_config->infoframes.gcp);
+	if (pipe_config->infoframes.enable &
+	    intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI))
+		intel_dump_infoframe(dev_priv, &pipe_config->infoframes.avi);
+	if (pipe_config->infoframes.enable &
+	    intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD))
+		intel_dump_infoframe(dev_priv, &pipe_config->infoframes.spd);
+	if (pipe_config->infoframes.enable &
+	    intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR))
+		intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi);
+
 	DRM_DEBUG_KMS("requested mode:\n");
 	drm_mode_debug_printmodeline(&pipe_config->base.mode);
 	DRM_DEBUG_KMS("adjusted mode:\n");
@@ -11676,7 +11740,7 @@ clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
 	saved_state->shared_dpll = crtc_state->shared_dpll;
 	saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
 	saved_state->pch_pfit.force_thru = crtc_state->pch_pfit.force_thru;
-	saved_state->ips_force_disable = crtc_state->ips_force_disable;
+	saved_state->crc_enabled = crtc_state->crc_enabled;
 	if (IS_G4X(dev_priv) ||
 	    IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
 		saved_state->wm = crtc_state->wm;
@@ -11895,6 +11959,37 @@ intel_compare_link_m_n(const struct intel_link_m_n *m_n,
 	return false;
 }
 
+static bool
+intel_compare_infoframe(const union hdmi_infoframe *a,
+			const union hdmi_infoframe *b)
+{
+	return memcmp(a, b, sizeof(*a)) == 0;
+}
+
+static void
+pipe_config_infoframe_err(struct drm_i915_private *dev_priv,
+			  bool adjust, const char *name,
+			  const union hdmi_infoframe *a,
+			  const union hdmi_infoframe *b)
+{
+	if (adjust) {
+		if ((drm_debug & DRM_UT_KMS) == 0)
+			return;
+
+		drm_dbg(DRM_UT_KMS, "mismatch in %s infoframe", name);
+		drm_dbg(DRM_UT_KMS, "expected:");
+		hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a);
+		drm_dbg(DRM_UT_KMS, "found");
+		hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b);
+	} else {
+		drm_err("mismatch in %s infoframe", name);
+		drm_err("expected:");
+		hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a);
+		drm_err("found");
+		hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b);
+	}
+}
+
 static void __printf(3, 4)
 pipe_config_err(bool adjust, const char *name, const char *format, ...)
 {
@@ -12078,7 +12173,17 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
 	} \
 } while (0)
 
-#define PIPE_CONF_QUIRK(quirk)	\
+#define PIPE_CONF_CHECK_INFOFRAME(name) do { \
+	if (!intel_compare_infoframe(&current_config->infoframes.name, \
+				     &pipe_config->infoframes.name)) { \
+		pipe_config_infoframe_err(dev_priv, adjust, __stringify(name), \
+					  &current_config->infoframes.name, \
+					  &pipe_config->infoframes.name); \
+		ret = false; \
+	} \
+} while (0)
+
+#define PIPE_CONF_QUIRK(quirk) \
 	((current_config->quirks | pipe_config->quirks) & (quirk))
 
 	PIPE_CONF_CHECK_I(cpu_transcoder);
@@ -12159,6 +12264,14 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
 
 		PIPE_CONF_CHECK_I(scaler_state.scaler_id);
 		PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
+
+		PIPE_CONF_CHECK_X(gamma_mode);
+		if (IS_CHERRYVIEW(dev_priv))
+			PIPE_CONF_CHECK_X(cgm_mode);
+		else
+			PIPE_CONF_CHECK_X(csc_mode);
+		PIPE_CONF_CHECK_BOOL(gamma_enable);
+		PIPE_CONF_CHECK_BOOL(csc_enable);
 	}
 
 	PIPE_CONF_CHECK_BOOL(double_wide);
@@ -12207,6 +12320,12 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
 
 	PIPE_CONF_CHECK_I(min_voltage_level);
 
+	PIPE_CONF_CHECK_X(infoframes.enable);
+	PIPE_CONF_CHECK_X(infoframes.gcp);
+	PIPE_CONF_CHECK_INFOFRAME(avi);
+	PIPE_CONF_CHECK_INFOFRAME(spd);
+	PIPE_CONF_CHECK_INFOFRAME(hdmi);
+
 #undef PIPE_CONF_CHECK_X
 #undef PIPE_CONF_CHECK_I
 #undef PIPE_CONF_CHECK_BOOL
@@ -12241,12 +12360,15 @@ static void verify_wm_state(struct drm_crtc *crtc,
 			    struct drm_crtc_state *new_state)
 {
 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
-	struct skl_ddb_allocation hw_ddb, *sw_ddb;
-	struct skl_pipe_wm hw_wm, *sw_wm;
-	struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
+	struct skl_hw_state {
+		struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
+		struct skl_ddb_entry ddb_uv[I915_MAX_PLANES];
+		struct skl_ddb_allocation ddb;
+		struct skl_pipe_wm wm;
+	} *hw;
+	struct skl_ddb_allocation *sw_ddb;
+	struct skl_pipe_wm *sw_wm;
 	struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
-	struct skl_ddb_entry hw_ddb_y[I915_MAX_PLANES];
-	struct skl_ddb_entry hw_ddb_uv[I915_MAX_PLANES];
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	const enum pipe pipe = intel_crtc->pipe;
 	int plane, level, max_level = ilk_wm_max_level(dev_priv);
@@ -12254,22 +12376,29 @@ static void verify_wm_state(struct drm_crtc *crtc,
 	if (INTEL_GEN(dev_priv) < 9 || !new_state->active)
 		return;
 
-	skl_pipe_wm_get_hw_state(intel_crtc, &hw_wm);
+	hw = kzalloc(sizeof(*hw), GFP_KERNEL);
+	if (!hw)
+		return;
+
+	skl_pipe_wm_get_hw_state(intel_crtc, &hw->wm);
 	sw_wm = &to_intel_crtc_state(new_state)->wm.skl.optimal;
 
-	skl_pipe_ddb_get_hw_state(intel_crtc, hw_ddb_y, hw_ddb_uv);
+	skl_pipe_ddb_get_hw_state(intel_crtc, hw->ddb_y, hw->ddb_uv);
 
-	skl_ddb_get_hw_state(dev_priv, &hw_ddb);
+	skl_ddb_get_hw_state(dev_priv, &hw->ddb);
 	sw_ddb = &dev_priv->wm.skl_hw.ddb;
 
-	if (INTEL_GEN(dev_priv) >= 11)
-		if (hw_ddb.enabled_slices != sw_ddb->enabled_slices)
-			DRM_ERROR("mismatch in DBUF Slices (expected %u, got %u)\n",
-				  sw_ddb->enabled_slices,
-				  hw_ddb.enabled_slices);
+	if (INTEL_GEN(dev_priv) >= 11 &&
+	    hw->ddb.enabled_slices != sw_ddb->enabled_slices)
+		DRM_ERROR("mismatch in DBUF Slices (expected %u, got %u)\n",
+			  sw_ddb->enabled_slices,
+			  hw->ddb.enabled_slices);
+
 	/* planes */
 	for_each_universal_plane(dev_priv, pipe, plane) {
-		hw_plane_wm = &hw_wm.planes[plane];
+		struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
+
+		hw_plane_wm = &hw->wm.planes[plane];
 		sw_plane_wm = &sw_wm->planes[plane];
 
 		/* Watermarks */
@@ -12301,7 +12430,7 @@ static void verify_wm_state(struct drm_crtc *crtc,
 		}
 
 		/* DDB */
-		hw_ddb_entry = &hw_ddb_y[plane];
+		hw_ddb_entry = &hw->ddb_y[plane];
 		sw_ddb_entry = &to_intel_crtc_state(new_state)->wm.skl.plane_ddb_y[plane];
 
 		if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
@@ -12319,7 +12448,9 @@ static void verify_wm_state(struct drm_crtc *crtc,
 	 * once the plane becomes visible, we can skip this check
 	 */
 	if (1) {
-		hw_plane_wm = &hw_wm.planes[PLANE_CURSOR];
+		struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
+
+		hw_plane_wm = &hw->wm.planes[PLANE_CURSOR];
 		sw_plane_wm = &sw_wm->planes[PLANE_CURSOR];
 
 		/* Watermarks */
@@ -12351,7 +12482,7 @@ static void verify_wm_state(struct drm_crtc *crtc,
 		}
 
 		/* DDB */
-		hw_ddb_entry = &hw_ddb_y[PLANE_CURSOR];
+		hw_ddb_entry = &hw->ddb_y[PLANE_CURSOR];
 		sw_ddb_entry = &to_intel_crtc_state(new_state)->wm.skl.plane_ddb_y[PLANE_CURSOR];
 
 		if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
@@ -12361,6 +12492,8 @@ static void verify_wm_state(struct drm_crtc *crtc,
 				  hw_ddb_entry->start, hw_ddb_entry->end);
 		}
 	}
+
+	kfree(hw);
 }
 
 static void
@@ -12517,7 +12650,8 @@ intel_verify_planes(struct intel_atomic_state *state)
 
 	for_each_new_intel_plane_in_state(state, plane,
 					  plane_state, i)
-		assert_plane(plane, plane_state->base.visible);
+		assert_plane(plane, plane_state->slave ||
+			     plane_state->base.visible);
 }
 
 static void
@@ -13576,7 +13710,7 @@ static int do_rps_boost(struct wait_queue_entry *_wait,
 	 * vblank without our intervention, so leave RPS alone.
 	 */
 	if (!i915_request_started(rq))
-		gen6_rps_boost(rq, NULL);
+		gen6_rps_boost(rq);
 	i915_request_put(rq);
 
 	drm_crtc_vblank_put(wait->crtc);
@@ -14109,14 +14243,11 @@ intel_legacy_cursor_update(struct drm_plane *plane,
 	 */
 	crtc_state->active_planes = new_crtc_state->active_planes;
 
-	if (plane->state->visible) {
-		trace_intel_update_plane(plane, to_intel_crtc(crtc));
-		intel_plane->update_plane(intel_plane, crtc_state,
-					  to_intel_plane_state(plane->state));
-	} else {
-		trace_intel_disable_plane(plane, to_intel_crtc(crtc));
-		intel_plane->disable_plane(intel_plane, crtc_state);
-	}
+	if (plane->state->visible)
+		intel_update_plane(intel_plane, crtc_state,
+				   to_intel_plane_state(plane->state));
+	else
+		intel_disable_plane(intel_plane, crtc_state);
 
 	intel_plane_unpin_fb(to_intel_plane_state(old_plane_state));
 
@@ -14566,7 +14697,7 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
 	if (!HAS_DISPLAY(dev_priv))
 		return;
 
-	if (IS_ICELAKE(dev_priv)) {
+	if (INTEL_GEN(dev_priv) >= 11) {
 		intel_ddi_init(dev_priv, PORT_A);
 		intel_ddi_init(dev_priv, PORT_B);
 		intel_ddi_init(dev_priv, PORT_C);
@@ -15467,6 +15598,8 @@ int intel_modeset_init(struct drm_device *dev)
 	intel_update_czclk(dev_priv);
 	intel_modeset_init_hw(dev);
 
+	intel_hdcp_component_init(dev_priv);
+
 	if (dev_priv->max_cdclk_freq == 0)
 		intel_update_max_cdclk(dev_priv);
 
@@ -15542,7 +15675,7 @@ void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
 		      pipe_name(pipe), clock.vco, clock.dot);
 
 	fp = i9xx_dpll_compute_fp(&clock);
-	dpll = (I915_READ(DPLL(pipe)) & DPLL_DVO_2X_MODE) |
+	dpll = DPLL_DVO_2X_MODE |
 		DPLL_VGA_MODE_DIS |
 		((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) |
 		PLL_P2_DIVIDE_BY_4 |
@@ -16328,6 +16461,8 @@ void intel_modeset_cleanup(struct drm_device *dev)
 	/* flush any delayed tasks or pending work */
 	flush_scheduled_work();
 
+	intel_hdcp_component_fini(dev_priv);
+
 	drm_mode_config_cleanup(dev);
 
 	intel_overlay_cleanup(dev_priv);
@@ -16374,8 +16509,6 @@ struct intel_display_error_state {
 
 	u32 power_well_driver;
 
-	int num_transcoders;
-
 	struct intel_cursor_error_state {
 		u32 control;
 		u32 position;
@@ -16400,6 +16533,7 @@ struct intel_display_error_state {
 	} plane[I915_MAX_PIPES];
 
 	struct intel_transcoder_error_state {
+		bool available;
 		bool power_domain_on;
 		enum transcoder cpu_transcoder;
 
@@ -16426,6 +16560,8 @@ intel_display_capture_error_state(struct drm_i915_private *dev_priv)
 	};
 	int i;
 
+	BUILD_BUG_ON(ARRAY_SIZE(transcoders) != ARRAY_SIZE(error->transcoder));
+
 	if (!HAS_DISPLAY(dev_priv))
 		return NULL;
 
@@ -16466,14 +16602,13 @@ intel_display_capture_error_state(struct drm_i915_private *dev_priv)
 			error->pipe[i].stat = I915_READ(PIPESTAT(i));
 	}
 
-	/* Note: this does not include DSI transcoders. */
-	error->num_transcoders = INTEL_INFO(dev_priv)->num_pipes;
-	if (HAS_DDI(dev_priv))
-		error->num_transcoders++; /* Account for eDP. */
-
-	for (i = 0; i < error->num_transcoders; i++) {
+	for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
 		enum transcoder cpu_transcoder = transcoders[i];
 
+		if (!INTEL_INFO(dev_priv)->trans_offsets[cpu_transcoder])
+			continue;
+
+		error->transcoder[i].available = true;
 		error->transcoder[i].power_domain_on =
 			__intel_display_power_is_enabled(dev_priv,
 				POWER_DOMAIN_TRANSCODER(cpu_transcoder));
@@ -16537,7 +16672,10 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m,
 		err_printf(m, "  BASE: %08x\n", error->cursor[i].base);
 	}
 
-	for (i = 0; i < error->num_transcoders; i++) {
+	for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
+		if (!error->transcoder[i].available)
+			continue;
+
 		err_printf(m, "CPU transcoder: %s\n",
 			   transcoder_name(error->transcoder[i].cpu_transcoder));
 		err_printf(m, "  Power: %s\n",
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index cf709835fb9a9eece3c0761c21c53c34a25b7e22..7c043e8f6298f3a4f994ae39d626601558fcb6c7 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -949,8 +949,11 @@ static void intel_pps_get_registers(struct intel_dp *intel_dp,
 	regs->pp_stat = PP_STATUS(pps_idx);
 	regs->pp_on = PP_ON_DELAYS(pps_idx);
 	regs->pp_off = PP_OFF_DELAYS(pps_idx);
-	if (!IS_GEN9_LP(dev_priv) && !HAS_PCH_CNP(dev_priv) &&
-	    !HAS_PCH_ICP(dev_priv))
+
+	/* Cycle delay moved from PP_DIVISOR to PP_CONTROL */
+	if (IS_GEN9_LP(dev_priv) || INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
+		regs->pp_div = INVALID_MMIO_REG;
+	else
 		regs->pp_div = PP_DIVISOR(pps_idx);
 }
 
@@ -4780,7 +4783,7 @@ static void intel_dp_check_service_irq(struct intel_dp *intel_dp)
 		intel_dp_handle_test_request(intel_dp);
 
 	if (val & DP_CP_IRQ)
-		intel_hdcp_check_link(intel_dp->attached_connector);
+		intel_hdcp_handle_cp_irq(intel_dp->attached_connector);
 
 	if (val & DP_SINK_SPECIFIC_IRQ)
 		DRM_DEBUG_DRIVER("Sink specific irq unhandled\n");
@@ -5623,6 +5626,18 @@ void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
 		edp_panel_vdd_off_sync(intel_dp);
 }
 
+static void intel_dp_hdcp_wait_for_cp_irq(struct intel_hdcp *hdcp, int timeout)
+{
+	long ret;
+
+#define C (hdcp->cp_irq_count_cached != atomic_read(&hdcp->cp_irq_count))
+	ret = wait_event_interruptible_timeout(hdcp->cp_irq_queue, C,
+					       msecs_to_jiffies(timeout));
+
+	if (!ret)
+		DRM_DEBUG_KMS("Timedout at waiting for CP_IRQ\n");
+}
+
 static
 int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
 				u8 *an)
@@ -5847,6 +5862,336 @@ int intel_dp_hdcp_capable(struct intel_digital_port *intel_dig_port,
 	return 0;
 }
 
+struct hdcp2_dp_errata_stream_type {
+	u8	msg_id;
+	u8	stream_type;
+} __packed;
+
+static struct hdcp2_dp_msg_data {
+	u8 msg_id;
+	u32 offset;
+	bool msg_detectable;
+	u32 timeout;
+	u32 timeout2; /* Added for non_paired situation */
+	} hdcp2_msg_data[] = {
+		{HDCP_2_2_AKE_INIT, DP_HDCP_2_2_AKE_INIT_OFFSET, false, 0, 0},
+		{HDCP_2_2_AKE_SEND_CERT, DP_HDCP_2_2_AKE_SEND_CERT_OFFSET,
+				false, HDCP_2_2_CERT_TIMEOUT_MS, 0},
+		{HDCP_2_2_AKE_NO_STORED_KM, DP_HDCP_2_2_AKE_NO_STORED_KM_OFFSET,
+				false, 0, 0},
+		{HDCP_2_2_AKE_STORED_KM, DP_HDCP_2_2_AKE_STORED_KM_OFFSET,
+				false, 0, 0},
+		{HDCP_2_2_AKE_SEND_HPRIME, DP_HDCP_2_2_AKE_SEND_HPRIME_OFFSET,
+				true, HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS,
+				HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS},
+		{HDCP_2_2_AKE_SEND_PAIRING_INFO,
+				DP_HDCP_2_2_AKE_SEND_PAIRING_INFO_OFFSET, true,
+				HDCP_2_2_PAIRING_TIMEOUT_MS, 0},
+		{HDCP_2_2_LC_INIT, DP_HDCP_2_2_LC_INIT_OFFSET, false, 0, 0},
+		{HDCP_2_2_LC_SEND_LPRIME, DP_HDCP_2_2_LC_SEND_LPRIME_OFFSET,
+				false, HDCP_2_2_DP_LPRIME_TIMEOUT_MS, 0},
+		{HDCP_2_2_SKE_SEND_EKS, DP_HDCP_2_2_SKE_SEND_EKS_OFFSET, false,
+				0, 0},
+		{HDCP_2_2_REP_SEND_RECVID_LIST,
+				DP_HDCP_2_2_REP_SEND_RECVID_LIST_OFFSET, true,
+				HDCP_2_2_RECVID_LIST_TIMEOUT_MS, 0},
+		{HDCP_2_2_REP_SEND_ACK, DP_HDCP_2_2_REP_SEND_ACK_OFFSET, false,
+				0, 0},
+		{HDCP_2_2_REP_STREAM_MANAGE,
+				DP_HDCP_2_2_REP_STREAM_MANAGE_OFFSET, false,
+				0, 0},
+		{HDCP_2_2_REP_STREAM_READY, DP_HDCP_2_2_REP_STREAM_READY_OFFSET,
+				false, HDCP_2_2_STREAM_READY_TIMEOUT_MS, 0},
+/* local define to shovel this through the write_2_2 interface */
+#define HDCP_2_2_ERRATA_DP_STREAM_TYPE	50
+		{HDCP_2_2_ERRATA_DP_STREAM_TYPE,
+				DP_HDCP_2_2_REG_STREAM_TYPE_OFFSET, false,
+				0, 0},
+		};
+
+static inline
+int intel_dp_hdcp2_read_rx_status(struct intel_digital_port *intel_dig_port,
+				  u8 *rx_status)
+{
+	ssize_t ret;
+
+	ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
+			       DP_HDCP_2_2_REG_RXSTATUS_OFFSET, rx_status,
+			       HDCP_2_2_DP_RXSTATUS_LEN);
+	if (ret != HDCP_2_2_DP_RXSTATUS_LEN) {
+		DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
+		return ret >= 0 ? -EIO : ret;
+	}
+
+	return 0;
+}
+
+static
+int hdcp2_detect_msg_availability(struct intel_digital_port *intel_dig_port,
+				  u8 msg_id, bool *msg_ready)
+{
+	u8 rx_status;
+	int ret;
+
+	*msg_ready = false;
+	ret = intel_dp_hdcp2_read_rx_status(intel_dig_port, &rx_status);
+	if (ret < 0)
+		return ret;
+
+	switch (msg_id) {
+	case HDCP_2_2_AKE_SEND_HPRIME:
+		if (HDCP_2_2_DP_RXSTATUS_H_PRIME(rx_status))
+			*msg_ready = true;
+		break;
+	case HDCP_2_2_AKE_SEND_PAIRING_INFO:
+		if (HDCP_2_2_DP_RXSTATUS_PAIRING(rx_status))
+			*msg_ready = true;
+		break;
+	case HDCP_2_2_REP_SEND_RECVID_LIST:
+		if (HDCP_2_2_DP_RXSTATUS_READY(rx_status))
+			*msg_ready = true;
+		break;
+	default:
+		DRM_ERROR("Unidentified msg_id: %d\n", msg_id);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static ssize_t
+intel_dp_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port,
+			    struct hdcp2_dp_msg_data *hdcp2_msg_data)
+{
+	struct intel_dp *dp = &intel_dig_port->dp;
+	struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
+	u8 msg_id = hdcp2_msg_data->msg_id;
+	int ret, timeout;
+	bool msg_ready = false;
+
+	if (msg_id == HDCP_2_2_AKE_SEND_HPRIME && !hdcp->is_paired)
+		timeout = hdcp2_msg_data->timeout2;
+	else
+		timeout = hdcp2_msg_data->timeout;
+
+	/*
+	 * There is no way to detect the CERT, LPRIME and STREAM_READY
+	 * availability. So Wait for timeout and read the msg.
+	 */
+	if (!hdcp2_msg_data->msg_detectable) {
+		mdelay(timeout);
+		ret = 0;
+	} else {
+		/*
+		 * As we want to check the msg availability at timeout, Ignoring
+		 * the timeout at wait for CP_IRQ.
+		 */
+		intel_dp_hdcp_wait_for_cp_irq(hdcp, timeout);
+		ret = hdcp2_detect_msg_availability(intel_dig_port,
+						    msg_id, &msg_ready);
+		if (!msg_ready)
+			ret = -ETIMEDOUT;
+	}
+
+	if (ret)
+		DRM_DEBUG_KMS("msg_id %d, ret %d, timeout(mSec): %d\n",
+			      hdcp2_msg_data->msg_id, ret, timeout);
+
+	return ret;
+}
+
+static struct hdcp2_dp_msg_data *get_hdcp2_dp_msg_data(u8 msg_id)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(hdcp2_msg_data); i++)
+		if (hdcp2_msg_data[i].msg_id == msg_id)
+			return &hdcp2_msg_data[i];
+
+	return NULL;
+}
+
+static
+int intel_dp_hdcp2_write_msg(struct intel_digital_port *intel_dig_port,
+			     void *buf, size_t size)
+{
+	struct intel_dp *dp = &intel_dig_port->dp;
+	struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
+	unsigned int offset;
+	u8 *byte = buf;
+	ssize_t ret, bytes_to_write, len;
+	struct hdcp2_dp_msg_data *hdcp2_msg_data;
+
+	hdcp2_msg_data = get_hdcp2_dp_msg_data(*byte);
+	if (!hdcp2_msg_data)
+		return -EINVAL;
+
+	offset = hdcp2_msg_data->offset;
+
+	/* No msg_id in DP HDCP2.2 msgs */
+	bytes_to_write = size - 1;
+	byte++;
+
+	hdcp->cp_irq_count_cached = atomic_read(&hdcp->cp_irq_count);
+
+	while (bytes_to_write) {
+		len = bytes_to_write > DP_AUX_MAX_PAYLOAD_BYTES ?
+				DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_write;
+
+		ret = drm_dp_dpcd_write(&intel_dig_port->dp.aux,
+					offset, (void *)byte, len);
+		if (ret < 0)
+			return ret;
+
+		bytes_to_write -= ret;
+		byte += ret;
+		offset += ret;
+	}
+
+	return size;
+}
+
+static
+ssize_t get_receiver_id_list_size(struct intel_digital_port *intel_dig_port)
+{
+	u8 rx_info[HDCP_2_2_RXINFO_LEN];
+	u32 dev_cnt;
+	ssize_t ret;
+
+	ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
+			       DP_HDCP_2_2_REG_RXINFO_OFFSET,
+			       (void *)rx_info, HDCP_2_2_RXINFO_LEN);
+	if (ret != HDCP_2_2_RXINFO_LEN)
+		return ret >= 0 ? -EIO : ret;
+
+	dev_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 |
+		   HDCP_2_2_DEV_COUNT_LO(rx_info[1]));
+
+	if (dev_cnt > HDCP_2_2_MAX_DEVICE_COUNT)
+		dev_cnt = HDCP_2_2_MAX_DEVICE_COUNT;
+
+	ret = sizeof(struct hdcp2_rep_send_receiverid_list) -
+		HDCP_2_2_RECEIVER_IDS_MAX_LEN +
+		(dev_cnt * HDCP_2_2_RECEIVER_ID_LEN);
+
+	return ret;
+}
+
+static
+int intel_dp_hdcp2_read_msg(struct intel_digital_port *intel_dig_port,
+			    u8 msg_id, void *buf, size_t size)
+{
+	unsigned int offset;
+	u8 *byte = buf;
+	ssize_t ret, bytes_to_recv, len;
+	struct hdcp2_dp_msg_data *hdcp2_msg_data;
+
+	hdcp2_msg_data = get_hdcp2_dp_msg_data(msg_id);
+	if (!hdcp2_msg_data)
+		return -EINVAL;
+	offset = hdcp2_msg_data->offset;
+
+	ret = intel_dp_hdcp2_wait_for_msg(intel_dig_port, hdcp2_msg_data);
+	if (ret < 0)
+		return ret;
+
+	if (msg_id == HDCP_2_2_REP_SEND_RECVID_LIST) {
+		ret = get_receiver_id_list_size(intel_dig_port);
+		if (ret < 0)
+			return ret;
+
+		size = ret;
+	}
+	bytes_to_recv = size - 1;
+
+	/* DP adaptation msgs has no msg_id */
+	byte++;
+
+	while (bytes_to_recv) {
+		len = bytes_to_recv > DP_AUX_MAX_PAYLOAD_BYTES ?
+		      DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_recv;
+
+		ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, offset,
+				       (void *)byte, len);
+		if (ret < 0) {
+			DRM_DEBUG_KMS("msg_id %d, ret %zd\n", msg_id, ret);
+			return ret;
+		}
+
+		bytes_to_recv -= ret;
+		byte += ret;
+		offset += ret;
+	}
+	byte = buf;
+	*byte = msg_id;
+
+	return size;
+}
+
+static
+int intel_dp_hdcp2_config_stream_type(struct intel_digital_port *intel_dig_port,
+				      bool is_repeater, u8 content_type)
+{
+	struct hdcp2_dp_errata_stream_type stream_type_msg;
+
+	if (is_repeater)
+		return 0;
+
+	/*
+	 * Errata for DP: As Stream type is used for encryption, Receiver
+	 * should be communicated with stream type for the decryption of the
+	 * content.
+	 * Repeater will be communicated with stream type as a part of it's
+	 * auth later in time.
+	 */
+	stream_type_msg.msg_id = HDCP_2_2_ERRATA_DP_STREAM_TYPE;
+	stream_type_msg.stream_type = content_type;
+
+	return intel_dp_hdcp2_write_msg(intel_dig_port, &stream_type_msg,
+					sizeof(stream_type_msg));
+}
+
+static
+int intel_dp_hdcp2_check_link(struct intel_digital_port *intel_dig_port)
+{
+	u8 rx_status;
+	int ret;
+
+	ret = intel_dp_hdcp2_read_rx_status(intel_dig_port, &rx_status);
+	if (ret)
+		return ret;
+
+	if (HDCP_2_2_DP_RXSTATUS_REAUTH_REQ(rx_status))
+		ret = HDCP_REAUTH_REQUEST;
+	else if (HDCP_2_2_DP_RXSTATUS_LINK_FAILED(rx_status))
+		ret = HDCP_LINK_INTEGRITY_FAILURE;
+	else if (HDCP_2_2_DP_RXSTATUS_READY(rx_status))
+		ret = HDCP_TOPOLOGY_CHANGE;
+
+	return ret;
+}
+
+static
+int intel_dp_hdcp2_capable(struct intel_digital_port *intel_dig_port,
+			   bool *capable)
+{
+	u8 rx_caps[3];
+	int ret;
+
+	*capable = false;
+	ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
+			       DP_HDCP_2_2_REG_RX_CAPS_OFFSET,
+			       rx_caps, HDCP_2_2_RXCAPS_LEN);
+	if (ret != HDCP_2_2_RXCAPS_LEN)
+		return ret >= 0 ? -EIO : ret;
+
+	if (rx_caps[0] == HDCP_2_2_RX_CAPS_VERSION_VAL &&
+	    HDCP_2_2_DP_HDCP_CAPABLE(rx_caps[2]))
+		*capable = true;
+
+	return 0;
+}
+
 static const struct intel_hdcp_shim intel_dp_hdcp_shim = {
 	.write_an_aksv = intel_dp_hdcp_write_an_aksv,
 	.read_bksv = intel_dp_hdcp_read_bksv,
@@ -5859,6 +6204,12 @@ static const struct intel_hdcp_shim intel_dp_hdcp_shim = {
 	.toggle_signalling = intel_dp_hdcp_toggle_signalling,
 	.check_link = intel_dp_hdcp_check_link,
 	.hdcp_capable = intel_dp_hdcp_capable,
+	.write_2_2_msg = intel_dp_hdcp2_write_msg,
+	.read_2_2_msg = intel_dp_hdcp2_read_msg,
+	.config_stream_type = intel_dp_hdcp2_config_stream_type,
+	.check_2_2_link = intel_dp_hdcp2_check_link,
+	.hdcp_2_2_capable = intel_dp_hdcp2_capable,
+	.protocol = HDCP_PROTOCOL_DP,
 };
 
 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
@@ -6072,43 +6423,34 @@ static void
 intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
 {
 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-	u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
+	u32 pp_on, pp_off, pp_ctl;
 	struct pps_registers regs;
 
 	intel_pps_get_registers(intel_dp, &regs);
 
-	/* Workaround: Need to write PP_CONTROL with the unlock key as
-	 * the very first thing. */
 	pp_ctl = ironlake_get_pp_control(intel_dp);
 
+	/* Ensure PPS is unlocked */
+	if (!HAS_DDI(dev_priv))
+		I915_WRITE(regs.pp_ctrl, pp_ctl);
+
 	pp_on = I915_READ(regs.pp_on);
 	pp_off = I915_READ(regs.pp_off);
-	if (!IS_GEN9_LP(dev_priv) && !HAS_PCH_CNP(dev_priv) &&
-	    !HAS_PCH_ICP(dev_priv)) {
-		I915_WRITE(regs.pp_ctrl, pp_ctl);
-		pp_div = I915_READ(regs.pp_div);
-	}
 
 	/* Pull timing values out of registers */
-	seq->t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
-		     PANEL_POWER_UP_DELAY_SHIFT;
-
-	seq->t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
-		  PANEL_LIGHT_ON_DELAY_SHIFT;
+	seq->t1_t3 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, pp_on);
+	seq->t8 = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, pp_on);
+	seq->t9 = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, pp_off);
+	seq->t10 = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, pp_off);
 
-	seq->t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
-		  PANEL_LIGHT_OFF_DELAY_SHIFT;
+	if (i915_mmio_reg_valid(regs.pp_div)) {
+		u32 pp_div;
 
-	seq->t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
-		   PANEL_POWER_DOWN_DELAY_SHIFT;
+		pp_div = I915_READ(regs.pp_div);
 
-	if (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv) ||
-	    HAS_PCH_ICP(dev_priv)) {
-		seq->t11_t12 = ((pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
-				BXT_POWER_CYCLE_DELAY_SHIFT) * 1000;
+		seq->t11_t12 = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, pp_div) * 1000;
 	} else {
-		seq->t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
-		       PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
+		seq->t11_t12 = REG_FIELD_GET(BXT_POWER_CYCLE_DELAY_MASK, pp_ctl) * 1000;
 	}
 }
 
@@ -6233,7 +6575,7 @@ intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
 					      bool force_disable_vdd)
 {
 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-	u32 pp_on, pp_off, pp_div, port_sel = 0;
+	u32 pp_on, pp_off, port_sel = 0;
 	int div = dev_priv->rawclk_freq / 1000;
 	struct pps_registers regs;
 	enum port port = dp_to_dig_port(intel_dp)->base.port;
@@ -6268,23 +6610,10 @@ intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
 		I915_WRITE(regs.pp_ctrl, pp);
 	}
 
-	pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
-		(seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT);
-	pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
-		 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
-	/* Compute the divisor for the pp clock, simply match the Bspec
-	 * formula. */
-	if (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv) ||
-	    HAS_PCH_ICP(dev_priv)) {
-		pp_div = I915_READ(regs.pp_ctrl);
-		pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
-		pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
-				<< BXT_POWER_CYCLE_DELAY_SHIFT);
-	} else {
-		pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
-		pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
-				<< PANEL_POWER_CYCLE_DELAY_SHIFT);
-	}
+	pp_on = REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, seq->t1_t3) |
+		REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, seq->t8);
+	pp_off = REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, seq->t9) |
+		REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, seq->t10);
 
 	/* Haswell doesn't have any port selection bits for the panel
 	 * power sequencer any more. */
@@ -6311,19 +6640,29 @@ intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
 
 	I915_WRITE(regs.pp_on, pp_on);
 	I915_WRITE(regs.pp_off, pp_off);
-	if (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv) ||
-	    HAS_PCH_ICP(dev_priv))
-		I915_WRITE(regs.pp_ctrl, pp_div);
-	else
-		I915_WRITE(regs.pp_div, pp_div);
+
+	/*
+	 * Compute the divisor for the pp clock, simply match the Bspec formula.
+	 */
+	if (i915_mmio_reg_valid(regs.pp_div)) {
+		I915_WRITE(regs.pp_div,
+			   REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) |
+			   REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000)));
+	} else {
+		u32 pp_ctl;
+
+		pp_ctl = I915_READ(regs.pp_ctrl);
+		pp_ctl &= ~BXT_POWER_CYCLE_DELAY_MASK;
+		pp_ctl |= REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000));
+		I915_WRITE(regs.pp_ctrl, pp_ctl);
+	}
 
 	DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
 		      I915_READ(regs.pp_on),
 		      I915_READ(regs.pp_off),
-		      (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv)  ||
-		       HAS_PCH_ICP(dev_priv)) ?
-		      (I915_READ(regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK) :
-		      I915_READ(regs.pp_div));
+		      i915_mmio_reg_valid(regs.pp_div) ?
+		      I915_READ(regs.pp_div) :
+		      (I915_READ(regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK));
 }
 
 static void intel_dp_pps_init(struct intel_dp *intel_dp)
@@ -6734,7 +7073,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
 	 * eDP and LVDS bail out early in this case to prevent interfering
 	 * with an already powered-on LVDS power sequencer.
 	 */
-	if (intel_get_lvds_encoder(&dev_priv->drm)) {
+	if (intel_get_lvds_encoder(dev_priv)) {
 		WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
 		DRM_INFO("LVDS was detected, not registering eDP\n");
 
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c
index 0a42d11c4c3377e36886442a1748b72e167ddb85..dfe6a7114d563fe29697b2ee3db1873176ef4caf 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c
@@ -241,11 +241,11 @@ void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
 }
 
 static struct intel_shared_dpll *
-intel_find_shared_dpll(struct intel_crtc *crtc,
-		       struct intel_crtc_state *crtc_state,
+intel_find_shared_dpll(struct intel_crtc_state *crtc_state,
 		       enum intel_dpll_id range_min,
 		       enum intel_dpll_id range_max)
 {
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	struct intel_shared_dpll *pll, *unused_pll = NULL;
 	struct intel_shared_dpll_state *shared_dpll;
@@ -420,9 +420,10 @@ static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
 }
 
 static struct intel_shared_dpll *
-ibx_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
+ibx_get_dpll(struct intel_crtc_state *crtc_state,
 	     struct intel_encoder *encoder)
 {
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	struct intel_shared_dpll *pll;
 	enum intel_dpll_id i;
@@ -436,7 +437,7 @@ ibx_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
 			      crtc->base.base.id, crtc->base.name,
 			      pll->info->name);
 	} else {
-		pll = intel_find_shared_dpll(crtc, crtc_state,
+		pll = intel_find_shared_dpll(crtc_state,
 					     DPLL_ID_PCH_PLL_A,
 					     DPLL_ID_PCH_PLL_B);
 	}
@@ -764,15 +765,13 @@ hsw_ddi_calculate_wrpll(int clock /* in Hz */,
 	*r2_out = best.r2;
 }
 
-static struct intel_shared_dpll *hsw_ddi_hdmi_get_dpll(int clock,
-						       struct intel_crtc *crtc,
-						       struct intel_crtc_state *crtc_state)
+static struct intel_shared_dpll *hsw_ddi_hdmi_get_dpll(struct intel_crtc_state *crtc_state)
 {
 	struct intel_shared_dpll *pll;
 	u32 val;
 	unsigned int p, n2, r2;
 
-	hsw_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p);
+	hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p);
 
 	val = WRPLL_PLL_ENABLE | WRPLL_PLL_LCPLL |
 	      WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
@@ -780,7 +779,7 @@ static struct intel_shared_dpll *hsw_ddi_hdmi_get_dpll(int clock,
 
 	crtc_state->dpll_hw_state.wrpll = val;
 
-	pll = intel_find_shared_dpll(crtc, crtc_state,
+	pll = intel_find_shared_dpll(crtc_state,
 				     DPLL_ID_WRPLL1, DPLL_ID_WRPLL2);
 
 	if (!pll)
@@ -790,11 +789,12 @@ static struct intel_shared_dpll *hsw_ddi_hdmi_get_dpll(int clock,
 }
 
 static struct intel_shared_dpll *
-hsw_ddi_dp_get_dpll(struct intel_encoder *encoder, int clock)
+hsw_ddi_dp_get_dpll(struct intel_crtc_state *crtc_state)
 {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
 	struct intel_shared_dpll *pll;
 	enum intel_dpll_id pll_id;
+	int clock = crtc_state->port_clock;
 
 	switch (clock / 2) {
 	case 81000:
@@ -820,19 +820,18 @@ hsw_ddi_dp_get_dpll(struct intel_encoder *encoder, int clock)
 }
 
 static struct intel_shared_dpll *
-hsw_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
+hsw_get_dpll(struct intel_crtc_state *crtc_state,
 	     struct intel_encoder *encoder)
 {
 	struct intel_shared_dpll *pll;
-	int clock = crtc_state->port_clock;
 
 	memset(&crtc_state->dpll_hw_state, 0,
 	       sizeof(crtc_state->dpll_hw_state));
 
 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
-		pll = hsw_ddi_hdmi_get_dpll(clock, crtc, crtc_state);
+		pll = hsw_ddi_hdmi_get_dpll(crtc_state);
 	} else if (intel_crtc_has_dp_encoder(crtc_state)) {
-		pll = hsw_ddi_dp_get_dpll(encoder, clock);
+		pll = hsw_ddi_dp_get_dpll(crtc_state);
 	} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
 		if (WARN_ON(crtc_state->port_clock / 2 != 135000))
 			return NULL;
@@ -840,7 +839,7 @@ hsw_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
 		crtc_state->dpll_hw_state.spll =
 			SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC;
 
-		pll = intel_find_shared_dpll(crtc, crtc_state,
+		pll = intel_find_shared_dpll(crtc_state,
 					     DPLL_ID_SPLL, DPLL_ID_SPLL);
 	} else {
 		return NULL;
@@ -1308,9 +1307,7 @@ skl_ddi_calculate_wrpll(int clock /* in Hz */,
 	return true;
 }
 
-static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc *crtc,
-				      struct intel_crtc_state *crtc_state,
-				      int clock)
+static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
 {
 	u32 ctrl1, cfgcr1, cfgcr2;
 	struct skl_wrpll_params wrpll_params = { 0, };
@@ -1323,7 +1320,8 @@ static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc *crtc,
 
 	ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
 
-	if (!skl_ddi_calculate_wrpll(clock * 1000, &wrpll_params))
+	if (!skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
+				     &wrpll_params))
 		return false;
 
 	cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
@@ -1346,8 +1344,7 @@ static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc *crtc,
 }
 
 static bool
-skl_ddi_dp_set_dpll_hw_state(int clock,
-			     struct intel_dpll_hw_state *dpll_hw_state)
+skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
 {
 	u32 ctrl1;
 
@@ -1356,7 +1353,7 @@ skl_ddi_dp_set_dpll_hw_state(int clock,
 	 * as the DPLL id in this function.
 	 */
 	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
-	switch (clock / 2) {
+	switch (crtc_state->port_clock / 2) {
 	case 81000:
 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
 		break;
@@ -1378,44 +1375,43 @@ skl_ddi_dp_set_dpll_hw_state(int clock,
 		break;
 	}
 
-	dpll_hw_state->ctrl1 = ctrl1;
+	memset(&crtc_state->dpll_hw_state, 0,
+	       sizeof(crtc_state->dpll_hw_state));
+
+	crtc_state->dpll_hw_state.ctrl1 = ctrl1;
+
 	return true;
 }
 
 static struct intel_shared_dpll *
-skl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
+skl_get_dpll(struct intel_crtc_state *crtc_state,
 	     struct intel_encoder *encoder)
 {
 	struct intel_shared_dpll *pll;
-	int clock = crtc_state->port_clock;
 	bool bret;
-	struct intel_dpll_hw_state dpll_hw_state;
-
-	memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
 
 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
-		bret = skl_ddi_hdmi_pll_dividers(crtc, crtc_state, clock);
+		bret = skl_ddi_hdmi_pll_dividers(crtc_state);
 		if (!bret) {
 			DRM_DEBUG_KMS("Could not get HDMI pll dividers.\n");
 			return NULL;
 		}
 	} else if (intel_crtc_has_dp_encoder(crtc_state)) {
-		bret = skl_ddi_dp_set_dpll_hw_state(clock, &dpll_hw_state);
+		bret = skl_ddi_dp_set_dpll_hw_state(crtc_state);
 		if (!bret) {
 			DRM_DEBUG_KMS("Could not set DP dpll HW state.\n");
 			return NULL;
 		}
-		crtc_state->dpll_hw_state = dpll_hw_state;
 	} else {
 		return NULL;
 	}
 
 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
-		pll = intel_find_shared_dpll(crtc, crtc_state,
+		pll = intel_find_shared_dpll(crtc_state,
 					     DPLL_ID_SKL_DPLL0,
 					     DPLL_ID_SKL_DPLL0);
 	else
-		pll = intel_find_shared_dpll(crtc, crtc_state,
+		pll = intel_find_shared_dpll(crtc_state,
 					     DPLL_ID_SKL_DPLL1,
 					     DPLL_ID_SKL_DPLL3);
 	if (!pll)
@@ -1692,10 +1688,10 @@ static const struct bxt_clk_div bxt_dp_clk_val[] = {
 };
 
 static bool
-bxt_ddi_hdmi_pll_dividers(struct intel_crtc *intel_crtc,
-			  struct intel_crtc_state *crtc_state, int clock,
+bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
 			  struct bxt_clk_div *clk_div)
 {
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
 	struct dpll best_clock;
 
 	/* Calculate HDMI div */
@@ -1703,9 +1699,10 @@ bxt_ddi_hdmi_pll_dividers(struct intel_crtc *intel_crtc,
 	 * FIXME: tie the following calculation into
 	 * i9xx_crtc_compute_clock
 	 */
-	if (!bxt_find_best_dpll(crtc_state, clock, &best_clock)) {
+	if (!bxt_find_best_dpll(crtc_state, &best_clock)) {
 		DRM_DEBUG_DRIVER("no PLL dividers found for clock %d pipe %c\n",
-				 clock, pipe_name(intel_crtc->pipe));
+				 crtc_state->port_clock,
+				 pipe_name(crtc->pipe));
 		return false;
 	}
 
@@ -1722,8 +1719,10 @@ bxt_ddi_hdmi_pll_dividers(struct intel_crtc *intel_crtc,
 	return true;
 }
 
-static void bxt_ddi_dp_pll_dividers(int clock, struct bxt_clk_div *clk_div)
+static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state,
+				    struct bxt_clk_div *clk_div)
 {
+	int clock = crtc_state->port_clock;
 	int i;
 
 	*clk_div = bxt_dp_clk_val[0];
@@ -1737,14 +1736,17 @@ static void bxt_ddi_dp_pll_dividers(int clock, struct bxt_clk_div *clk_div)
 	clk_div->vco = clock * 10 / 2 * clk_div->p1 * clk_div->p2;
 }
 
-static bool bxt_ddi_set_dpll_hw_state(int clock,
-			  struct bxt_clk_div *clk_div,
-			  struct intel_dpll_hw_state *dpll_hw_state)
+static bool bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
+				      const struct bxt_clk_div *clk_div)
 {
+	struct intel_dpll_hw_state *dpll_hw_state = &crtc_state->dpll_hw_state;
+	int clock = crtc_state->port_clock;
 	int vco = clk_div->vco;
 	u32 prop_coef, int_coef, gain_ctl, targ_cnt;
 	u32 lanestagger;
 
+	memset(dpll_hw_state, 0, sizeof(*dpll_hw_state));
+
 	if (vco >= 6200000 && vco <= 6700000) {
 		prop_coef = 4;
 		int_coef = 9;
@@ -1804,55 +1806,45 @@ static bool bxt_ddi_set_dpll_hw_state(int clock,
 }
 
 static bool
-bxt_ddi_dp_set_dpll_hw_state(int clock,
-			     struct intel_dpll_hw_state *dpll_hw_state)
+bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
 {
-	struct bxt_clk_div clk_div = {0};
+	struct bxt_clk_div clk_div = {};
 
-	bxt_ddi_dp_pll_dividers(clock, &clk_div);
+	bxt_ddi_dp_pll_dividers(crtc_state, &clk_div);
 
-	return bxt_ddi_set_dpll_hw_state(clock, &clk_div, dpll_hw_state);
+	return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
 }
 
 static bool
-bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc *intel_crtc,
-			       struct intel_crtc_state *crtc_state, int clock,
-			       struct intel_dpll_hw_state *dpll_hw_state)
+bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
 {
-	struct bxt_clk_div clk_div = { };
+	struct bxt_clk_div clk_div = {};
 
-	bxt_ddi_hdmi_pll_dividers(intel_crtc, crtc_state, clock, &clk_div);
+	bxt_ddi_hdmi_pll_dividers(crtc_state, &clk_div);
 
-	return bxt_ddi_set_dpll_hw_state(clock, &clk_div, dpll_hw_state);
+	return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
 }
 
 static struct intel_shared_dpll *
-bxt_get_dpll(struct intel_crtc *crtc,
-		struct intel_crtc_state *crtc_state,
-		struct intel_encoder *encoder)
+bxt_get_dpll(struct intel_crtc_state *crtc_state,
+	     struct intel_encoder *encoder)
 {
-	struct intel_dpll_hw_state dpll_hw_state = { };
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	struct intel_shared_dpll *pll;
-	int i, clock = crtc_state->port_clock;
+	enum intel_dpll_id id;
 
 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) &&
-	    !bxt_ddi_hdmi_set_dpll_hw_state(crtc, crtc_state, clock,
-					    &dpll_hw_state))
+	    !bxt_ddi_hdmi_set_dpll_hw_state(crtc_state))
 		return NULL;
 
 	if (intel_crtc_has_dp_encoder(crtc_state) &&
-	    !bxt_ddi_dp_set_dpll_hw_state(clock, &dpll_hw_state))
+	    !bxt_ddi_dp_set_dpll_hw_state(crtc_state))
 		return NULL;
 
-	memset(&crtc_state->dpll_hw_state, 0,
-	       sizeof(crtc_state->dpll_hw_state));
-
-	crtc_state->dpll_hw_state = dpll_hw_state;
-
 	/* 1:1 mapping between ports and PLLs */
-	i = (enum intel_dpll_id) encoder->port;
-	pll = intel_get_shared_dpll_by_id(dev_priv, i);
+	id = (enum intel_dpll_id) encoder->port;
+	pll = intel_get_shared_dpll_by_id(dev_priv, id);
 
 	DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n",
 		      crtc->base.base.id, crtc->base.name, pll->info->name);
@@ -1911,8 +1903,7 @@ static void intel_ddi_pll_init(struct drm_device *dev)
 struct intel_dpll_mgr {
 	const struct dpll_info *dpll_info;
 
-	struct intel_shared_dpll *(*get_dpll)(struct intel_crtc *crtc,
-					      struct intel_crtc_state *crtc_state,
+	struct intel_shared_dpll *(*get_dpll)(struct intel_crtc_state *crtc_state,
 					      struct intel_encoder *encoder);
 
 	void (*dump_hw_state)(struct drm_i915_private *dev_priv,
@@ -2242,11 +2233,11 @@ int cnl_hdmi_pll_ref_clock(struct drm_i915_private *dev_priv)
 }
 
 static bool
-cnl_ddi_calculate_wrpll(int clock,
-			struct drm_i915_private *dev_priv,
+cnl_ddi_calculate_wrpll(struct intel_crtc_state *crtc_state,
 			struct skl_wrpll_params *wrpll_params)
 {
-	u32 afe_clock = clock * 5;
+	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+	u32 afe_clock = crtc_state->port_clock * 5;
 	u32 ref_clock;
 	u32 dco_min = 7998000;
 	u32 dco_max = 10000000;
@@ -2282,23 +2273,20 @@ cnl_ddi_calculate_wrpll(int clock,
 
 	ref_clock = cnl_hdmi_pll_ref_clock(dev_priv);
 
-	cnl_wrpll_params_populate(wrpll_params, best_dco, ref_clock, pdiv, qdiv,
-				  kdiv);
+	cnl_wrpll_params_populate(wrpll_params, best_dco, ref_clock,
+				  pdiv, qdiv, kdiv);
 
 	return true;
 }
 
-static bool cnl_ddi_hdmi_pll_dividers(struct intel_crtc *crtc,
-				      struct intel_crtc_state *crtc_state,
-				      int clock)
+static bool cnl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
 {
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	u32 cfgcr0, cfgcr1;
 	struct skl_wrpll_params wrpll_params = { 0, };
 
 	cfgcr0 = DPLL_CFGCR0_HDMI_MODE;
 
-	if (!cnl_ddi_calculate_wrpll(clock, dev_priv, &wrpll_params))
+	if (!cnl_ddi_calculate_wrpll(crtc_state, &wrpll_params))
 		return false;
 
 	cfgcr0 |= DPLL_CFGCR0_DCO_FRACTION(wrpll_params.dco_fraction) |
@@ -2319,14 +2307,13 @@ static bool cnl_ddi_hdmi_pll_dividers(struct intel_crtc *crtc,
 }
 
 static bool
-cnl_ddi_dp_set_dpll_hw_state(int clock,
-			     struct intel_dpll_hw_state *dpll_hw_state)
+cnl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
 {
 	u32 cfgcr0;
 
 	cfgcr0 = DPLL_CFGCR0_SSC_ENABLE;
 
-	switch (clock / 2) {
+	switch (crtc_state->port_clock / 2) {
 	case 81000:
 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_810;
 		break;
@@ -2356,41 +2343,40 @@ cnl_ddi_dp_set_dpll_hw_state(int clock,
 		break;
 	}
 
-	dpll_hw_state->cfgcr0 = cfgcr0;
+	memset(&crtc_state->dpll_hw_state, 0,
+	       sizeof(crtc_state->dpll_hw_state));
+
+	crtc_state->dpll_hw_state.cfgcr0 = cfgcr0;
+
 	return true;
 }
 
 static struct intel_shared_dpll *
-cnl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
+cnl_get_dpll(struct intel_crtc_state *crtc_state,
 	     struct intel_encoder *encoder)
 {
 	struct intel_shared_dpll *pll;
-	int clock = crtc_state->port_clock;
 	bool bret;
-	struct intel_dpll_hw_state dpll_hw_state;
-
-	memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
 
 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
-		bret = cnl_ddi_hdmi_pll_dividers(crtc, crtc_state, clock);
+		bret = cnl_ddi_hdmi_pll_dividers(crtc_state);
 		if (!bret) {
 			DRM_DEBUG_KMS("Could not get HDMI pll dividers.\n");
 			return NULL;
 		}
 	} else if (intel_crtc_has_dp_encoder(crtc_state)) {
-		bret = cnl_ddi_dp_set_dpll_hw_state(clock, &dpll_hw_state);
+		bret = cnl_ddi_dp_set_dpll_hw_state(crtc_state);
 		if (!bret) {
 			DRM_DEBUG_KMS("Could not set DP dpll HW state.\n");
 			return NULL;
 		}
-		crtc_state->dpll_hw_state = dpll_hw_state;
 	} else {
 		DRM_DEBUG_KMS("Skip DPLL setup for output_types 0x%x\n",
 			      crtc_state->output_types);
 		return NULL;
 	}
 
-	pll = intel_find_shared_dpll(crtc, crtc_state,
+	pll = intel_find_shared_dpll(crtc_state,
 				     DPLL_ID_SKL_DPLL0,
 				     DPLL_ID_SKL_DPLL2);
 	if (!pll) {
@@ -2431,47 +2417,69 @@ static const struct intel_dpll_mgr cnl_pll_mgr = {
 	.dump_hw_state = cnl_dump_hw_state,
 };
 
+struct icl_combo_pll_params {
+	int clock;
+	struct skl_wrpll_params wrpll;
+};
+
 /*
  * These values alrea already adjusted: they're the bits we write to the
  * registers, not the logical values.
  */
-static const struct skl_wrpll_params icl_dp_combo_pll_24MHz_values[] = {
-	{ .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [0]: 5.4 */
-	  .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0},
-	{ .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [1]: 2.7 */
-	  .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0},
-	{ .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [2]: 1.62 */
-	  .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0},
-	{ .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [3]: 3.24 */
-	  .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0},
-	{ .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [4]: 2.16 */
-	  .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2},
-	{ .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [5]: 4.32 */
-	  .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0},
-	{ .dco_integer = 0x195, .dco_fraction = 0x0000,		/* [6]: 6.48 */
-	  .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0},
-	{ .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [7]: 8.1 */
-	  .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0},
+static const struct icl_combo_pll_params icl_dp_combo_pll_24MHz_values[] = {
+	{ 540000,
+	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [0]: 5.4 */
+	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
+	{ 270000,
+	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [1]: 2.7 */
+	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
+	{ 162000,
+	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [2]: 1.62 */
+	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
+	{ 324000,
+	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [3]: 3.24 */
+	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
+	{ 216000,
+	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [4]: 2.16 */
+	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
+	{ 432000,
+	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [5]: 4.32 */
+	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
+	{ 648000,
+	  { .dco_integer = 0x195, .dco_fraction = 0x0000,		/* [6]: 6.48 */
+	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
+	{ 810000,
+	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [7]: 8.1 */
+	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
 };
 
+
 /* Also used for 38.4 MHz values. */
-static const struct skl_wrpll_params icl_dp_combo_pll_19_2MHz_values[] = {
-	{ .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [0]: 5.4 */
-	  .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0},
-	{ .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [1]: 2.7 */
-	  .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0},
-	{ .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [2]: 1.62 */
-	  .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0},
-	{ .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [3]: 3.24 */
-	  .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0},
-	{ .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [4]: 2.16 */
-	  .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2},
-	{ .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [5]: 4.32 */
-	  .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0},
-	{ .dco_integer = 0x1FA, .dco_fraction = 0x2000,		/* [6]: 6.48 */
-	  .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0},
-	{ .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [7]: 8.1 */
-	  .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0},
+static const struct icl_combo_pll_params icl_dp_combo_pll_19_2MHz_values[] = {
+	{ 540000,
+	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [0]: 5.4 */
+	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
+	{ 270000,
+	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [1]: 2.7 */
+	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
+	{ 162000,
+	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [2]: 1.62 */
+	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
+	{ 324000,
+	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [3]: 3.24 */
+	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
+	{ 216000,
+	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [4]: 2.16 */
+	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
+	{ 432000,
+	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [5]: 4.32 */
+	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
+	{ 648000,
+	  { .dco_integer = 0x1FA, .dco_fraction = 0x2000,		/* [6]: 6.48 */
+	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
+	{ 810000,
+	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [7]: 8.1 */
+	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
 };
 
 static const struct skl_wrpll_params icl_tbt_pll_24MHz_values = {
@@ -2484,72 +2492,53 @@ static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = {
 	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
 };
 
-static bool icl_calc_dp_combo_pll(struct drm_i915_private *dev_priv, int clock,
+static bool icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
 				  struct skl_wrpll_params *pll_params)
 {
-	const struct skl_wrpll_params *params;
-
-	params = dev_priv->cdclk.hw.ref == 24000 ?
-			icl_dp_combo_pll_24MHz_values :
-			icl_dp_combo_pll_19_2MHz_values;
+	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+	const struct icl_combo_pll_params *params =
+		dev_priv->cdclk.hw.ref == 24000 ?
+		icl_dp_combo_pll_24MHz_values :
+		icl_dp_combo_pll_19_2MHz_values;
+	int clock = crtc_state->port_clock;
+	int i;
 
-	switch (clock) {
-	case 540000:
-		*pll_params = params[0];
-		break;
-	case 270000:
-		*pll_params = params[1];
-		break;
-	case 162000:
-		*pll_params = params[2];
-		break;
-	case 324000:
-		*pll_params = params[3];
-		break;
-	case 216000:
-		*pll_params = params[4];
-		break;
-	case 432000:
-		*pll_params = params[5];
-		break;
-	case 648000:
-		*pll_params = params[6];
-		break;
-	case 810000:
-		*pll_params = params[7];
-		break;
-	default:
-		MISSING_CASE(clock);
-		return false;
+	for (i = 0; i < ARRAY_SIZE(icl_dp_combo_pll_24MHz_values); i++) {
+		if (clock == params[i].clock) {
+			*pll_params = params[i].wrpll;
+			return true;
+		}
 	}
 
-	return true;
+	MISSING_CASE(clock);
+	return false;
 }
 
-static bool icl_calc_tbt_pll(struct drm_i915_private *dev_priv, int clock,
+static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
 			     struct skl_wrpll_params *pll_params)
 {
+	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+
 	*pll_params = dev_priv->cdclk.hw.ref == 24000 ?
 			icl_tbt_pll_24MHz_values : icl_tbt_pll_19_2MHz_values;
 	return true;
 }
 
 static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state,
-				struct intel_encoder *encoder, int clock,
-				struct intel_dpll_hw_state *pll_state)
+				struct intel_encoder *encoder)
 {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
 	u32 cfgcr0, cfgcr1;
 	struct skl_wrpll_params pll_params = { 0 };
 	bool ret;
 
 	if (intel_port_is_tc(dev_priv, encoder->port))
-		ret = icl_calc_tbt_pll(dev_priv, clock, &pll_params);
+		ret = icl_calc_tbt_pll(crtc_state, &pll_params);
 	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
 		 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
-		ret = cnl_ddi_calculate_wrpll(clock, dev_priv, &pll_params);
+		ret = cnl_ddi_calculate_wrpll(crtc_state, &pll_params);
 	else
-		ret = icl_calc_dp_combo_pll(dev_priv, clock, &pll_params);
+		ret = icl_calc_dp_combo_pll(crtc_state, &pll_params);
 
 	if (!ret)
 		return false;
@@ -2563,82 +2552,16 @@ static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state,
 		 DPLL_CFGCR1_PDIV(pll_params.pdiv) |
 		 DPLL_CFGCR1_CENTRAL_FREQ_8400;
 
-	pll_state->cfgcr0 = cfgcr0;
-	pll_state->cfgcr1 = cfgcr1;
-	return true;
-}
-
-int icl_calc_dp_combo_pll_link(struct drm_i915_private *dev_priv,
-			       u32 pll_id)
-{
-	u32 cfgcr0, cfgcr1;
-	u32 pdiv, kdiv, qdiv_mode, qdiv_ratio, dco_integer, dco_fraction;
-	const struct skl_wrpll_params *params;
-	int index, n_entries, link_clock;
-
-	/* Read back values from DPLL CFGCR registers */
-	cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(pll_id));
-	cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(pll_id));
-
-	dco_integer = cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK;
-	dco_fraction = (cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
-		DPLL_CFGCR0_DCO_FRACTION_SHIFT;
-	pdiv = (cfgcr1 & DPLL_CFGCR1_PDIV_MASK) >> DPLL_CFGCR1_PDIV_SHIFT;
-	kdiv = (cfgcr1 & DPLL_CFGCR1_KDIV_MASK) >> DPLL_CFGCR1_KDIV_SHIFT;
-	qdiv_mode = (cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1)) >>
-		DPLL_CFGCR1_QDIV_MODE_SHIFT;
-	qdiv_ratio = (cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
-		DPLL_CFGCR1_QDIV_RATIO_SHIFT;
-
-	params = dev_priv->cdclk.hw.ref == 24000 ?
-		icl_dp_combo_pll_24MHz_values :
-		icl_dp_combo_pll_19_2MHz_values;
-	n_entries = ARRAY_SIZE(icl_dp_combo_pll_24MHz_values);
-
-	for (index = 0; index < n_entries; index++) {
-		if (dco_integer == params[index].dco_integer &&
-		    dco_fraction == params[index].dco_fraction &&
-		    pdiv == params[index].pdiv &&
-		    kdiv == params[index].kdiv &&
-		    qdiv_mode == params[index].qdiv_mode &&
-		    qdiv_ratio == params[index].qdiv_ratio)
-			break;
-	}
+	memset(&crtc_state->dpll_hw_state, 0,
+	       sizeof(crtc_state->dpll_hw_state));
 
-	/* Map PLL Index to Link Clock */
-	switch (index) {
-	default:
-		MISSING_CASE(index);
-		/* fall through */
-	case 0:
-		link_clock = 540000;
-		break;
-	case 1:
-		link_clock = 270000;
-		break;
-	case 2:
-		link_clock = 162000;
-		break;
-	case 3:
-		link_clock = 324000;
-		break;
-	case 4:
-		link_clock = 216000;
-		break;
-	case 5:
-		link_clock = 432000;
-		break;
-	case 6:
-		link_clock = 648000;
-		break;
-	case 7:
-		link_clock = 810000;
-		break;
-	}
+	crtc_state->dpll_hw_state.cfgcr0 = cfgcr0;
+	crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
 
-	return link_clock;
+	return true;
 }
 
+
 static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id)
 {
 	return id - DPLL_ID_ICL_MGPLL1;
@@ -2649,11 +2572,6 @@ enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
 	return tc_port + DPLL_ID_ICL_MGPLL1;
 }
 
-bool intel_dpll_is_combophy(enum intel_dpll_id id)
-{
-	return id == DPLL_ID_ICL_DPLL0 || id == DPLL_ID_ICL_DPLL1;
-}
-
 static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
 				     u32 *target_dco_khz,
 				     struct intel_dpll_hw_state *state)
@@ -2728,12 +2646,12 @@ static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
  * The specification for this function uses real numbers, so the math had to be
  * adapted to integer-only calculation, that's why it looks so different.
  */
-static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
-				  struct intel_encoder *encoder, int clock,
-				  struct intel_dpll_hw_state *pll_state)
+static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state)
 {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+	struct intel_dpll_hw_state *pll_state = &crtc_state->dpll_hw_state;
 	int refclk_khz = dev_priv->cdclk.hw.ref;
+	int clock = crtc_state->port_clock;
 	u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
 	u32 iref_ndiv, iref_trim, iref_pulse_w;
 	u32 prop_coeff, int_coeff;
@@ -2743,6 +2661,8 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
 	bool use_ssc = false;
 	bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
 
+	memset(pll_state, 0, sizeof(*pll_state));
+
 	if (!icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
 				      pll_state)) {
 		DRM_DEBUG_KMS("Failed to find divisors for clock %d\n", clock);
@@ -2892,23 +2812,20 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
 }
 
 static struct intel_shared_dpll *
-icl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
+icl_get_dpll(struct intel_crtc_state *crtc_state,
 	     struct intel_encoder *encoder)
 {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
 	struct intel_digital_port *intel_dig_port;
 	struct intel_shared_dpll *pll;
-	struct intel_dpll_hw_state pll_state = {};
 	enum port port = encoder->port;
 	enum intel_dpll_id min, max;
-	int clock = crtc_state->port_clock;
 	bool ret;
 
 	if (intel_port_is_combophy(dev_priv, port)) {
 		min = DPLL_ID_ICL_DPLL0;
 		max = DPLL_ID_ICL_DPLL1;
-		ret = icl_calc_dpll_state(crtc_state, encoder, clock,
-					  &pll_state);
+		ret = icl_calc_dpll_state(crtc_state, encoder);
 	} else if (intel_port_is_tc(dev_priv, port)) {
 		if (encoder->type == INTEL_OUTPUT_DP_MST) {
 			struct intel_dp_mst_encoder *mst_encoder;
@@ -2922,16 +2839,14 @@ icl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
 		if (intel_dig_port->tc_type == TC_PORT_TBT) {
 			min = DPLL_ID_ICL_TBTPLL;
 			max = min;
-			ret = icl_calc_dpll_state(crtc_state, encoder, clock,
-						  &pll_state);
+			ret = icl_calc_dpll_state(crtc_state, encoder);
 		} else {
 			enum tc_port tc_port;
 
 			tc_port = intel_port_to_tc(dev_priv, port);
 			min = icl_tc_port_to_pll_id(tc_port);
 			max = min;
-			ret = icl_calc_mg_pll_state(crtc_state, encoder, clock,
-						    &pll_state);
+			ret = icl_calc_mg_pll_state(crtc_state);
 		}
 	} else {
 		MISSING_CASE(port);
@@ -2943,9 +2858,8 @@ icl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
 		return NULL;
 	}
 
-	crtc_state->dpll_hw_state = pll_state;
 
-	pll = intel_find_shared_dpll(crtc, crtc_state, min, max);
+	pll = intel_find_shared_dpll(crtc_state, min, max);
 	if (!pll) {
 		DRM_DEBUG_KMS("No PLL selected\n");
 		return NULL;
@@ -2956,19 +2870,72 @@ icl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
 	return pll;
 }
 
-static i915_reg_t icl_pll_id_to_enable_reg(enum intel_dpll_id id)
+static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv,
+				struct intel_shared_dpll *pll,
+				struct intel_dpll_hw_state *hw_state)
 {
-	if (intel_dpll_is_combophy(id))
-		return CNL_DPLL_ENABLE(id);
-	else if (id == DPLL_ID_ICL_TBTPLL)
-		return TBT_PLL_ENABLE;
+	const enum intel_dpll_id id = pll->info->id;
+	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
+	intel_wakeref_t wakeref;
+	bool ret = false;
+	u32 val;
+
+	wakeref = intel_display_power_get_if_enabled(dev_priv,
+						     POWER_DOMAIN_PLLS);
+	if (!wakeref)
+		return false;
 
-	return MG_PLL_ENABLE(icl_pll_id_to_tc_port(id));
+	val = I915_READ(MG_PLL_ENABLE(tc_port));
+	if (!(val & PLL_ENABLE))
+		goto out;
+
+	hw_state->mg_refclkin_ctl = I915_READ(MG_REFCLKIN_CTL(tc_port));
+	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
+
+	hw_state->mg_clktop2_coreclkctl1 =
+		I915_READ(MG_CLKTOP2_CORECLKCTL1(tc_port));
+	hw_state->mg_clktop2_coreclkctl1 &=
+		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
+
+	hw_state->mg_clktop2_hsclkctl =
+		I915_READ(MG_CLKTOP2_HSCLKCTL(tc_port));
+	hw_state->mg_clktop2_hsclkctl &=
+		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
+		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
+		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
+		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
+
+	hw_state->mg_pll_div0 = I915_READ(MG_PLL_DIV0(tc_port));
+	hw_state->mg_pll_div1 = I915_READ(MG_PLL_DIV1(tc_port));
+	hw_state->mg_pll_lf = I915_READ(MG_PLL_LF(tc_port));
+	hw_state->mg_pll_frac_lock = I915_READ(MG_PLL_FRAC_LOCK(tc_port));
+	hw_state->mg_pll_ssc = I915_READ(MG_PLL_SSC(tc_port));
+
+	hw_state->mg_pll_bias = I915_READ(MG_PLL_BIAS(tc_port));
+	hw_state->mg_pll_tdc_coldst_bias =
+		I915_READ(MG_PLL_TDC_COLDST_BIAS(tc_port));
+
+	if (dev_priv->cdclk.hw.ref == 38400) {
+		hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
+		hw_state->mg_pll_bias_mask = 0;
+	} else {
+		hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
+		hw_state->mg_pll_bias_mask = -1U;
+	}
+
+	hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
+	hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
+
+	ret = true;
+out:
+	intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref);
+	return ret;
 }
 
 static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
 				 struct intel_shared_dpll *pll,
-				 struct intel_dpll_hw_state *hw_state)
+				 struct intel_dpll_hw_state *hw_state,
+				 i915_reg_t enable_reg)
 {
 	const enum intel_dpll_id id = pll->info->id;
 	intel_wakeref_t wakeref;
@@ -2980,54 +2947,12 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
 	if (!wakeref)
 		return false;
 
-	val = I915_READ(icl_pll_id_to_enable_reg(id));
+	val = I915_READ(enable_reg);
 	if (!(val & PLL_ENABLE))
 		goto out;
 
-	if (intel_dpll_is_combophy(id) ||
-	    id == DPLL_ID_ICL_TBTPLL) {
-		hw_state->cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(id));
-		hw_state->cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(id));
-	} else {
-		enum tc_port tc_port = icl_pll_id_to_tc_port(id);
-
-		hw_state->mg_refclkin_ctl = I915_READ(MG_REFCLKIN_CTL(tc_port));
-		hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
-
-		hw_state->mg_clktop2_coreclkctl1 =
-			I915_READ(MG_CLKTOP2_CORECLKCTL1(tc_port));
-		hw_state->mg_clktop2_coreclkctl1 &=
-			MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
-
-		hw_state->mg_clktop2_hsclkctl =
-			I915_READ(MG_CLKTOP2_HSCLKCTL(tc_port));
-		hw_state->mg_clktop2_hsclkctl &=
-			MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
-			MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
-			MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
-			MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
-
-		hw_state->mg_pll_div0 = I915_READ(MG_PLL_DIV0(tc_port));
-		hw_state->mg_pll_div1 = I915_READ(MG_PLL_DIV1(tc_port));
-		hw_state->mg_pll_lf = I915_READ(MG_PLL_LF(tc_port));
-		hw_state->mg_pll_frac_lock = I915_READ(MG_PLL_FRAC_LOCK(tc_port));
-		hw_state->mg_pll_ssc = I915_READ(MG_PLL_SSC(tc_port));
-
-		hw_state->mg_pll_bias = I915_READ(MG_PLL_BIAS(tc_port));
-		hw_state->mg_pll_tdc_coldst_bias =
-			I915_READ(MG_PLL_TDC_COLDST_BIAS(tc_port));
-
-		if (dev_priv->cdclk.hw.ref == 38400) {
-			hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
-			hw_state->mg_pll_bias_mask = 0;
-		} else {
-			hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
-			hw_state->mg_pll_bias_mask = -1U;
-		}
-
-		hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
-		hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
-	}
+	hw_state->cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(id));
+	hw_state->cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(id));
 
 	ret = true;
 out:
@@ -3035,6 +2960,21 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
 	return ret;
 }
 
+static bool combo_pll_get_hw_state(struct drm_i915_private *dev_priv,
+				   struct intel_shared_dpll *pll,
+				   struct intel_dpll_hw_state *hw_state)
+{
+	return icl_pll_get_hw_state(dev_priv, pll, hw_state,
+				    CNL_DPLL_ENABLE(pll->info->id));
+}
+
+static bool tbt_pll_get_hw_state(struct drm_i915_private *dev_priv,
+				 struct intel_shared_dpll *pll,
+				 struct intel_dpll_hw_state *hw_state)
+{
+	return icl_pll_get_hw_state(dev_priv, pll, hw_state, TBT_PLL_ENABLE);
+}
+
 static void icl_dpll_write(struct drm_i915_private *dev_priv,
 			   struct intel_shared_dpll *pll)
 {
@@ -3096,11 +3036,10 @@ static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
 	POSTING_READ(MG_PLL_TDC_COLDST_BIAS(tc_port));
 }
 
-static void icl_pll_enable(struct drm_i915_private *dev_priv,
-			   struct intel_shared_dpll *pll)
+static void icl_pll_power_enable(struct drm_i915_private *dev_priv,
+				 struct intel_shared_dpll *pll,
+				 i915_reg_t enable_reg)
 {
-	const enum intel_dpll_id id = pll->info->id;
-	i915_reg_t enable_reg = icl_pll_id_to_enable_reg(id);
 	u32 val;
 
 	val = I915_READ(enable_reg);
@@ -3113,35 +3052,88 @@ static void icl_pll_enable(struct drm_i915_private *dev_priv,
 	 */
 	if (intel_wait_for_register(dev_priv, enable_reg, PLL_POWER_STATE,
 				    PLL_POWER_STATE, 1))
-		DRM_ERROR("PLL %d Power not enabled\n", id);
+		DRM_ERROR("PLL %d Power not enabled\n", pll->info->id);
+}
 
-	if (intel_dpll_is_combophy(id) || id == DPLL_ID_ICL_TBTPLL)
-		icl_dpll_write(dev_priv, pll);
-	else
-		icl_mg_pll_write(dev_priv, pll);
+static void icl_pll_enable(struct drm_i915_private *dev_priv,
+			   struct intel_shared_dpll *pll,
+			   i915_reg_t enable_reg)
+{
+	u32 val;
+
+	val = I915_READ(enable_reg);
+	val |= PLL_ENABLE;
+	I915_WRITE(enable_reg, val);
+
+	/* Timeout is actually 600us. */
+	if (intel_wait_for_register(dev_priv, enable_reg, PLL_LOCK, PLL_LOCK,
+				    1))
+		DRM_ERROR("PLL %d not locked\n", pll->info->id);
+}
+
+static void combo_pll_enable(struct drm_i915_private *dev_priv,
+			     struct intel_shared_dpll *pll)
+{
+	i915_reg_t enable_reg = CNL_DPLL_ENABLE(pll->info->id);
+
+	icl_pll_power_enable(dev_priv, pll, enable_reg);
+
+	icl_dpll_write(dev_priv, pll);
 
 	/*
 	 * DVFS pre sequence would be here, but in our driver the cdclk code
 	 * paths should already be setting the appropriate voltage, hence we do
-	 * nothign here.
+	 * nothing here.
 	 */
 
-	val = I915_READ(enable_reg);
-	val |= PLL_ENABLE;
-	I915_WRITE(enable_reg, val);
+	icl_pll_enable(dev_priv, pll, enable_reg);
 
-	if (intel_wait_for_register(dev_priv, enable_reg, PLL_LOCK, PLL_LOCK,
-				    1)) /* 600us actually. */
-		DRM_ERROR("PLL %d not locked\n", id);
+	/* DVFS post sequence would be here. See the comment above. */
+}
+
+static void tbt_pll_enable(struct drm_i915_private *dev_priv,
+			   struct intel_shared_dpll *pll)
+{
+	icl_pll_power_enable(dev_priv, pll, TBT_PLL_ENABLE);
+
+	icl_dpll_write(dev_priv, pll);
+
+	/*
+	 * DVFS pre sequence would be here, but in our driver the cdclk code
+	 * paths should already be setting the appropriate voltage, hence we do
+	 * nothing here.
+	 */
+
+	icl_pll_enable(dev_priv, pll, TBT_PLL_ENABLE);
+
+	/* DVFS post sequence would be here. See the comment above. */
+}
+
+static void mg_pll_enable(struct drm_i915_private *dev_priv,
+			  struct intel_shared_dpll *pll)
+{
+	i915_reg_t enable_reg =
+		MG_PLL_ENABLE(icl_pll_id_to_tc_port(pll->info->id));
+
+	icl_pll_power_enable(dev_priv, pll, enable_reg);
+
+	icl_mg_pll_write(dev_priv, pll);
+
+	/*
+	 * DVFS pre sequence would be here, but in our driver the cdclk code
+	 * paths should already be setting the appropriate voltage, hence we do
+	 * nothing here.
+	 */
+
+	icl_pll_enable(dev_priv, pll, enable_reg);
 
 	/* DVFS post sequence would be here. See the comment above. */
 }
 
 static void icl_pll_disable(struct drm_i915_private *dev_priv,
-			    struct intel_shared_dpll *pll)
+			    struct intel_shared_dpll *pll,
+			    i915_reg_t enable_reg)
 {
-	const enum intel_dpll_id id = pll->info->id;
-	i915_reg_t enable_reg = icl_pll_id_to_enable_reg(id);
 	u32 val;
 
 	/* The first steps are done by intel_ddi_post_disable(). */
@@ -3158,7 +3150,7 @@ static void icl_pll_disable(struct drm_i915_private *dev_priv,
 
 	/* Timeout is actually 1us. */
 	if (intel_wait_for_register(dev_priv, enable_reg, PLL_LOCK, 0, 1))
-		DRM_ERROR("PLL %d locked\n", id);
+		DRM_ERROR("PLL %d locked\n", pll->info->id);
 
 	/* DVFS post sequence would be here. See the comment above. */
 
@@ -3172,7 +3164,28 @@ static void icl_pll_disable(struct drm_i915_private *dev_priv,
 	 */
 	if (intel_wait_for_register(dev_priv, enable_reg, PLL_POWER_STATE, 0,
 				    1))
-		DRM_ERROR("PLL %d Power not disabled\n", id);
+		DRM_ERROR("PLL %d Power not disabled\n", pll->info->id);
+}
+
+static void combo_pll_disable(struct drm_i915_private *dev_priv,
+			      struct intel_shared_dpll *pll)
+{
+	icl_pll_disable(dev_priv, pll, CNL_DPLL_ENABLE(pll->info->id));
+}
+
+static void tbt_pll_disable(struct drm_i915_private *dev_priv,
+			    struct intel_shared_dpll *pll)
+{
+	icl_pll_disable(dev_priv, pll, TBT_PLL_ENABLE);
+}
+
+static void mg_pll_disable(struct drm_i915_private *dev_priv,
+			   struct intel_shared_dpll *pll)
+{
+	i915_reg_t enable_reg =
+		MG_PLL_ENABLE(icl_pll_id_to_tc_port(pll->info->id));
+
+	icl_pll_disable(dev_priv, pll, enable_reg);
 }
 
 static void icl_dump_hw_state(struct drm_i915_private *dev_priv,
@@ -3197,20 +3210,32 @@ static void icl_dump_hw_state(struct drm_i915_private *dev_priv,
 		      hw_state->mg_pll_tdc_coldst_bias);
 }
 
-static const struct intel_shared_dpll_funcs icl_pll_funcs = {
-	.enable = icl_pll_enable,
-	.disable = icl_pll_disable,
-	.get_hw_state = icl_pll_get_hw_state,
+static const struct intel_shared_dpll_funcs combo_pll_funcs = {
+	.enable = combo_pll_enable,
+	.disable = combo_pll_disable,
+	.get_hw_state = combo_pll_get_hw_state,
+};
+
+static const struct intel_shared_dpll_funcs tbt_pll_funcs = {
+	.enable = tbt_pll_enable,
+	.disable = tbt_pll_disable,
+	.get_hw_state = tbt_pll_get_hw_state,
+};
+
+static const struct intel_shared_dpll_funcs mg_pll_funcs = {
+	.enable = mg_pll_enable,
+	.disable = mg_pll_disable,
+	.get_hw_state = mg_pll_get_hw_state,
 };
 
 static const struct dpll_info icl_plls[] = {
-	{ "DPLL 0",   &icl_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
-	{ "DPLL 1",   &icl_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
-	{ "TBT PLL",  &icl_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
-	{ "MG PLL 1", &icl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
-	{ "MG PLL 2", &icl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
-	{ "MG PLL 3", &icl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
-	{ "MG PLL 4", &icl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
+	{ "DPLL 0",   &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
+	{ "DPLL 1",   &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
+	{ "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
+	{ "MG PLL 1", &mg_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
+	{ "MG PLL 2", &mg_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
+	{ "MG PLL 3", &mg_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
+	{ "MG PLL 4", &mg_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
 	{ },
 };
 
@@ -3233,7 +3258,7 @@ void intel_shared_dpll_init(struct drm_device *dev)
 	const struct dpll_info *dpll_info;
 	int i;
 
-	if (IS_ICELAKE(dev_priv))
+	if (INTEL_GEN(dev_priv) >= 11)
 		dpll_mgr = &icl_pll_mgr;
 	else if (IS_CANNONLAKE(dev_priv))
 		dpll_mgr = &cnl_pll_mgr;
@@ -3271,31 +3296,29 @@ void intel_shared_dpll_init(struct drm_device *dev)
 
 /**
  * intel_get_shared_dpll - get a shared DPLL for CRTC and encoder combination
- * @crtc: CRTC
- * @crtc_state: atomic state for @crtc
+ * @crtc_state: atomic state for the crtc
  * @encoder: encoder
  *
  * Find an appropriate DPLL for the given CRTC and encoder combination. A
- * reference from the @crtc to the returned pll is registered in the atomic
- * state. That configuration is made effective by calling
+ * reference from the @crtc_state to the returned pll is registered in the
+ * atomic state. That configuration is made effective by calling
  * intel_shared_dpll_swap_state(). The reference should be released by calling
  * intel_release_shared_dpll().
  *
  * Returns:
- * A shared DPLL to be used by @crtc and @encoder with the given @crtc_state.
+ * A shared DPLL to be used by @crtc_state and @encoder.
  */
 struct intel_shared_dpll *
-intel_get_shared_dpll(struct intel_crtc *crtc,
-		      struct intel_crtc_state *crtc_state,
+intel_get_shared_dpll(struct intel_crtc_state *crtc_state,
 		      struct intel_encoder *encoder)
 {
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr;
 
 	if (WARN_ON(!dpll_mgr))
 		return NULL;
 
-	return dpll_mgr->get_dpll(crtc, crtc_state, encoder);
+	return dpll_mgr->get_dpll(crtc_state, encoder);
 }
 
 /**
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.h b/drivers/gpu/drm/i915/intel_dpll_mgr.h
index 40e8391a92f24dd2ec116e4950a01ceb962905b3..bd8124cc81edff33e15244b936c6e77ba92a12cb 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.h
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.h
@@ -327,8 +327,7 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv,
 			bool state);
 #define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true)
 #define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false)
-struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
-						struct intel_crtc_state *state,
+struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc_state *state,
 						struct intel_encoder *encoder);
 void intel_release_shared_dpll(struct intel_shared_dpll *dpll,
 			       struct intel_crtc *crtc,
@@ -341,8 +340,6 @@ void intel_shared_dpll_init(struct drm_device *dev);
 
 void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
 			      struct intel_dpll_hw_state *hw_state);
-int icl_calc_dp_combo_pll_link(struct drm_i915_private *dev_priv,
-			       u32 pll_id);
 int cnl_hdmi_pll_ref_clock(struct drm_i915_private *dev_priv);
 enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port);
 bool intel_dpll_is_combophy(enum intel_dpll_id id);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 375f51d14dda344d9256429249c1bbb601bb5fd3..4d7ae579fc92d2dfd3e32ec1ab065ed69a1e6943 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -41,6 +41,7 @@
 #include <drm/drm_rect.h>
 #include <drm/drm_vblank.h>
 #include <drm/drm_atomic.h>
+#include <drm/i915_mei_hdcp_interface.h>
 #include <media/cec-notifier.h>
 
 struct drm_printer;
@@ -323,6 +324,13 @@ struct intel_panel {
 
 struct intel_digital_port;
 
+enum check_link_response {
+	HDCP_LINK_PROTECTED	= 0,
+	HDCP_TOPOLOGY_CHANGE,
+	HDCP_LINK_INTEGRITY_FAILURE,
+	HDCP_REAUTH_REQUEST
+};
+
 /*
  * This structure serves as a translation layer between the generic HDCP code
  * and the bus-specific code. What that means is that HDCP over HDMI differs
@@ -395,6 +403,32 @@ struct intel_hdcp_shim {
 	/* Detects panel's hdcp capability. This is optional for HDMI. */
 	int (*hdcp_capable)(struct intel_digital_port *intel_dig_port,
 			    bool *hdcp_capable);
+
+	/* HDCP adaptation(DP/HDMI) required on the port */
+	enum hdcp_wired_protocol protocol;
+
+	/* Detects whether sink is HDCP2.2 capable */
+	int (*hdcp_2_2_capable)(struct intel_digital_port *intel_dig_port,
+				bool *capable);
+
+	/* Write HDCP2.2 messages */
+	int (*write_2_2_msg)(struct intel_digital_port *intel_dig_port,
+			     void *buf, size_t size);
+
+	/* Read HDCP2.2 messages */
+	int (*read_2_2_msg)(struct intel_digital_port *intel_dig_port,
+			    u8 msg_id, void *buf, size_t size);
+
+	/*
+	 * Implementation of DP HDCP2.2 Errata for the communication of stream
+	 * type to Receivers. In DP HDCP2.2 Stream type is one of the input to
+	 * the HDCP2.2 Cipher for En/De-Cryption. Not applicable for HDMI.
+	 */
+	int (*config_stream_type)(struct intel_digital_port *intel_dig_port,
+				  bool is_repeater, u8 type);
+
+	/* HDCP2.2 Link Integrity Check */
+	int (*check_2_2_link)(struct intel_digital_port *intel_dig_port);
 };
 
 struct intel_hdcp {
@@ -404,6 +438,50 @@ struct intel_hdcp {
 	u64 value;
 	struct delayed_work check_work;
 	struct work_struct prop_work;
+
+	/* HDCP1.4 Encryption status */
+	bool hdcp_encrypted;
+
+	/* HDCP2.2 related definitions */
+	/* Flag indicates whether this connector supports HDCP2.2 or not. */
+	bool hdcp2_supported;
+
+	/* HDCP2.2 Encryption status */
+	bool hdcp2_encrypted;
+
+	/*
+	 * Content Stream Type defined by content owner. TYPE0(0x0) content can
+	 * flow in the link protected by HDCP2.2 or HDCP1.4, where as TYPE1(0x1)
+	 * content can flow only through a link protected by HDCP2.2.
+	 */
+	u8 content_type;
+	struct hdcp_port_data port_data;
+
+	bool is_paired;
+	bool is_repeater;
+
+	/*
+	 * Count of ReceiverID_List received. Initialized to 0 at AKE_INIT.
+	 * Incremented after processing the RepeaterAuth_Send_ReceiverID_List.
+	 * When it rolls over re-auth has to be triggered.
+	 */
+	u32 seq_num_v;
+
+	/*
+	 * Count of RepeaterAuth_Stream_Manage msg propagated.
+	 * Initialized to 0 on AKE_INIT. Incremented after every successful
+	 * transmission of RepeaterAuth_Stream_Manage message. When it rolls
+	 * over re-Auth has to be triggered.
+	 */
+	u32 seq_num_m;
+
+	/*
+	 * Work queue to signal the CP_IRQ. Used for the waiters to read the
+	 * available information from HDCP DP sink.
+	 */
+	wait_queue_head_t cp_irq_queue;
+	atomic_t cp_irq_count;
+	int cp_irq_count_cached;
 };
 
 struct intel_connector {
@@ -921,7 +999,8 @@ struct intel_crtc_state {
 	struct intel_link_m_n fdi_m_n;
 
 	bool ips_enabled;
-	bool ips_force_disable;
+
+	bool crc_enabled;
 
 	bool enable_fbc;
 
@@ -942,13 +1021,30 @@ struct intel_crtc_state {
 	/* Gamma mode programmed on the pipe */
 	u32 gamma_mode;
 
+	union {
+		/* CSC mode programmed on the pipe */
+		u32 csc_mode;
+
+		/* CHV CGM mode */
+		u32 cgm_mode;
+	};
+
 	/* bitmask of visible planes (enum plane_id) */
 	u8 active_planes;
 	u8 nv12_planes;
+	u8 c8_planes;
 
 	/* bitmask of planes that will be updated during the commit */
 	u8 update_planes;
 
+	struct {
+		u32 enable;
+		u32 gcp;
+		union hdmi_infoframe avi;
+		union hdmi_infoframe spd;
+		union hdmi_infoframe hdmi;
+	} infoframes;
+
 	/* HDMI scrambling status */
 	bool hdmi_scrambling;
 
@@ -961,6 +1057,12 @@ struct intel_crtc_state {
 	/* Output down scaling is done in LSPCON device */
 	bool lspcon_downsampling;
 
+	/* enable pipe gamma? */
+	bool gamma_enable;
+
+	/* enable pipe csc? */
+	bool csc_enable;
+
 	/* Display Stream compression state */
 	struct {
 		bool compression_enable;
@@ -989,9 +1091,6 @@ struct intel_crtc {
 
 	struct intel_crtc_state *config;
 
-	/* global reset count when the last flip was submitted */
-	unsigned int reset_count;
-
 	/* Access to these should be protected by dev_priv->irq_lock. */
 	bool cpu_fifo_underrun_disabled;
 	bool pch_fifo_underrun_disabled;
@@ -1260,11 +1359,15 @@ struct intel_digital_port {
 				const struct intel_crtc_state *crtc_state,
 				unsigned int type,
 				const void *frame, ssize_t len);
+	void (*read_infoframe)(struct intel_encoder *encoder,
+			       const struct intel_crtc_state *crtc_state,
+			       unsigned int type,
+			       void *frame, ssize_t len);
 	void (*set_infoframes)(struct intel_encoder *encoder,
 			       bool enable,
 			       const struct intel_crtc_state *crtc_state,
 			       const struct drm_connector_state *conn_state);
-	bool (*infoframe_enabled)(struct intel_encoder *encoder,
+	u32 (*infoframes_enabled)(struct intel_encoder *encoder,
 				  const struct intel_crtc_state *pipe_config);
 };
 
@@ -1738,7 +1841,7 @@ void intel_dp_get_m_n(struct intel_crtc *crtc,
 void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state,
 		      enum link_m_n_set m_n);
 int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
-bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
+bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
 			struct dpll *best_clock);
 int chv_calc_dpll_params(int refclk, struct dpll *pll_clock);
 
@@ -2000,13 +2103,22 @@ bool intel_hdmi_handle_sink_scrambling(struct intel_encoder *encoder,
 				       bool scrambling);
 void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable);
 void intel_infoframe_init(struct intel_digital_port *intel_dig_port);
+u32 intel_hdmi_infoframes_enabled(struct intel_encoder *encoder,
+				  const struct intel_crtc_state *crtc_state);
+u32 intel_hdmi_infoframe_enable(unsigned int type);
+void intel_hdmi_read_gcp_infoframe(struct intel_encoder *encoder,
+				   struct intel_crtc_state *crtc_state);
+void intel_read_infoframe(struct intel_encoder *encoder,
+			  const struct intel_crtc_state *crtc_state,
+			  enum hdmi_infoframe_type type,
+			  union hdmi_infoframe *frame);
 
 /* intel_lvds.c */
 bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv,
 			     i915_reg_t lvds_reg, enum pipe *pipe);
 void intel_lvds_init(struct drm_i915_private *dev_priv);
-struct intel_encoder *intel_get_lvds_encoder(struct drm_device *dev);
-bool intel_is_dual_link_lvds(struct drm_device *dev);
+struct intel_encoder *intel_get_lvds_encoder(struct drm_i915_private *dev_priv);
+bool intel_is_dual_link_lvds(struct drm_i915_private *dev_priv);
 
 /* intel_overlay.c */
 void intel_overlay_setup(struct drm_i915_private *dev_priv);
@@ -2068,9 +2180,12 @@ int intel_hdcp_init(struct intel_connector *connector,
 		    const struct intel_hdcp_shim *hdcp_shim);
 int intel_hdcp_enable(struct intel_connector *connector);
 int intel_hdcp_disable(struct intel_connector *connector);
-int intel_hdcp_check_link(struct intel_connector *connector);
 bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port);
 bool intel_hdcp_capable(struct intel_connector *connector);
+void intel_hdcp_component_init(struct drm_i915_private *dev_priv);
+void intel_hdcp_component_fini(struct drm_i915_private *dev_priv);
+void intel_hdcp_cleanup(struct intel_connector *connector);
+void intel_hdcp_handle_cp_irq(struct intel_connector *connector);
 
 /* intel_psr.c */
 #define CAN_PSR(dev_priv) (HAS_PSR(dev_priv) && dev_priv->psr.sink_support)
@@ -2079,9 +2194,9 @@ void intel_psr_enable(struct intel_dp *intel_dp,
 		      const struct intel_crtc_state *crtc_state);
 void intel_psr_disable(struct intel_dp *intel_dp,
 		      const struct intel_crtc_state *old_crtc_state);
-int intel_psr_set_debugfs_mode(struct drm_i915_private *dev_priv,
-			       struct drm_modeset_acquire_ctx *ctx,
-			       u64 value);
+void intel_psr_update(struct intel_dp *intel_dp,
+		      const struct intel_crtc_state *crtc_state);
+int intel_psr_debug_set(struct drm_i915_private *dev_priv, u64 value);
 void intel_psr_invalidate(struct drm_i915_private *dev_priv,
 			  unsigned frontbuffer_bits,
 			  enum fb_op_origin origin);
@@ -2261,7 +2376,7 @@ void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv);
 void gen6_rps_busy(struct drm_i915_private *dev_priv);
 void gen6_rps_reset_ei(struct drm_i915_private *dev_priv);
 void gen6_rps_idle(struct drm_i915_private *dev_priv);
-void gen6_rps_boost(struct i915_request *rq, struct intel_rps_client *rps);
+void gen6_rps_boost(struct i915_request *rq);
 void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv);
 void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv);
 void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv);
@@ -2375,6 +2490,14 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
 			       struct intel_crtc_state *crtc_state);
 
 /* intel_atomic_plane.c */
+void intel_update_plane(struct intel_plane *plane,
+			const struct intel_crtc_state *crtc_state,
+			const struct intel_plane_state *plane_state);
+void intel_update_slave(struct intel_plane *plane,
+			const struct intel_crtc_state *crtc_state,
+			const struct intel_plane_state *plane_state);
+void intel_disable_plane(struct intel_plane *plane,
+			 const struct intel_crtc_state *crtc_state);
 struct intel_plane *intel_plane_alloc(void);
 void intel_plane_free(struct intel_plane *plane);
 struct drm_plane_state *intel_plane_duplicate_state(struct drm_plane *plane);
@@ -2404,11 +2527,15 @@ void lspcon_write_infoframe(struct intel_encoder *encoder,
 			    const struct intel_crtc_state *crtc_state,
 			    unsigned int type,
 			    const void *buf, ssize_t len);
+void lspcon_read_infoframe(struct intel_encoder *encoder,
+			   const struct intel_crtc_state *crtc_state,
+			   unsigned int type,
+			   void *frame, ssize_t len);
 void lspcon_set_infoframes(struct intel_encoder *encoder,
 			   bool enable,
 			   const struct intel_crtc_state *crtc_state,
 			   const struct drm_connector_state *conn_state);
-bool lspcon_infoframe_enabled(struct intel_encoder *encoder,
+u32 lspcon_infoframes_enabled(struct intel_encoder *encoder,
 			      const struct intel_crtc_state *pipe_config);
 void lspcon_ycbcr420_config(struct drm_connector *connector,
 			    struct intel_crtc_state *crtc_state);
diff --git a/drivers/gpu/drm/i915/intel_dsi_vbt.c b/drivers/gpu/drm/i915/intel_dsi_vbt.c
index 06a11c35a784fc7957227792fd46474d960cfc81..d1e00e4c772663047ab52a986d628d8c89d325fe 100644
--- a/drivers/gpu/drm/i915/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/intel_dsi_vbt.c
@@ -194,7 +194,7 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
 		break;
 	}
 
-	if (!IS_ICELAKE(dev_priv))
+	if (INTEL_GEN(dev_priv) < 11)
 		vlv_dsi_wait_for_fifo_empty(intel_dsi, port);
 
 out:
@@ -365,7 +365,7 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
 	/* pull up/down */
 	value = *data++ & 1;
 
-	if (IS_ICELAKE(dev_priv))
+	if (INTEL_GEN(dev_priv) >= 11)
 		icl_exec_gpio(dev_priv, gpio_source, gpio_index, value);
 	else if (IS_VALLEYVIEW(dev_priv))
 		vlv_exec_gpio(dev_priv, gpio_source, gpio_number, value);
@@ -890,7 +890,7 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
 
 	intel_dsi->burst_mode_ratio = burst_mode_ratio;
 
-	if (IS_ICELAKE(dev_priv))
+	if (INTEL_GEN(dev_priv) >= 11)
 		icl_dphy_param_init(intel_dsi);
 	else
 		vlv_dphy_param_init(intel_dsi);
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 49fa43ff02ba09dc63db471b4219e6b4db385cbf..652c1b3ba19009ba587c644f5d4fe6e8e71d76d9 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -84,7 +84,6 @@ static const struct engine_class_info intel_engine_classes[] = {
 #define MAX_MMIO_BASES 3
 struct engine_info {
 	unsigned int hw_id;
-	unsigned int uabi_id;
 	u8 class;
 	u8 instance;
 	/* mmio bases table *must* be sorted in reverse gen order */
@@ -95,27 +94,24 @@ struct engine_info {
 };
 
 static const struct engine_info intel_engines[] = {
-	[RCS] = {
-		.hw_id = RCS_HW,
-		.uabi_id = I915_EXEC_RENDER,
+	[RCS0] = {
+		.hw_id = RCS0_HW,
 		.class = RENDER_CLASS,
 		.instance = 0,
 		.mmio_bases = {
 			{ .gen = 1, .base = RENDER_RING_BASE }
 		},
 	},
-	[BCS] = {
-		.hw_id = BCS_HW,
-		.uabi_id = I915_EXEC_BLT,
+	[BCS0] = {
+		.hw_id = BCS0_HW,
 		.class = COPY_ENGINE_CLASS,
 		.instance = 0,
 		.mmio_bases = {
 			{ .gen = 6, .base = BLT_RING_BASE }
 		},
 	},
-	[VCS] = {
-		.hw_id = VCS_HW,
-		.uabi_id = I915_EXEC_BSD,
+	[VCS0] = {
+		.hw_id = VCS0_HW,
 		.class = VIDEO_DECODE_CLASS,
 		.instance = 0,
 		.mmio_bases = {
@@ -124,9 +120,8 @@ static const struct engine_info intel_engines[] = {
 			{ .gen = 4, .base = BSD_RING_BASE }
 		},
 	},
-	[VCS2] = {
-		.hw_id = VCS2_HW,
-		.uabi_id = I915_EXEC_BSD,
+	[VCS1] = {
+		.hw_id = VCS1_HW,
 		.class = VIDEO_DECODE_CLASS,
 		.instance = 1,
 		.mmio_bases = {
@@ -134,27 +129,24 @@ static const struct engine_info intel_engines[] = {
 			{ .gen = 8, .base = GEN8_BSD2_RING_BASE }
 		},
 	},
-	[VCS3] = {
-		.hw_id = VCS3_HW,
-		.uabi_id = I915_EXEC_BSD,
+	[VCS2] = {
+		.hw_id = VCS2_HW,
 		.class = VIDEO_DECODE_CLASS,
 		.instance = 2,
 		.mmio_bases = {
 			{ .gen = 11, .base = GEN11_BSD3_RING_BASE }
 		},
 	},
-	[VCS4] = {
-		.hw_id = VCS4_HW,
-		.uabi_id = I915_EXEC_BSD,
+	[VCS3] = {
+		.hw_id = VCS3_HW,
 		.class = VIDEO_DECODE_CLASS,
 		.instance = 3,
 		.mmio_bases = {
 			{ .gen = 11, .base = GEN11_BSD4_RING_BASE }
 		},
 	},
-	[VECS] = {
-		.hw_id = VECS_HW,
-		.uabi_id = I915_EXEC_VEBOX,
+	[VECS0] = {
+		.hw_id = VECS0_HW,
 		.class = VIDEO_ENHANCEMENT_CLASS,
 		.instance = 0,
 		.mmio_bases = {
@@ -162,9 +154,8 @@ static const struct engine_info intel_engines[] = {
 			{ .gen = 7, .base = VEBOX_RING_BASE }
 		},
 	},
-	[VECS2] = {
-		.hw_id = VECS2_HW,
-		.uabi_id = I915_EXEC_VEBOX,
+	[VECS1] = {
+		.hw_id = VECS1_HW,
 		.class = VIDEO_ENHANCEMENT_CLASS,
 		.instance = 1,
 		.mmio_bases = {
@@ -313,7 +304,10 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
 	if (!engine)
 		return -ENOMEM;
 
+	BUILD_BUG_ON(BITS_PER_TYPE(engine->mask) < I915_NUM_ENGINES);
+
 	engine->id = id;
+	engine->mask = BIT(id);
 	engine->i915 = dev_priv;
 	__sprint_engine_name(engine->name, info);
 	engine->hw_id = engine->guc_id = info->hw_id;
@@ -321,7 +315,6 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
 	engine->class = info->class;
 	engine->instance = info->instance;
 
-	engine->uabi_id = info->uabi_id;
 	engine->uabi_class = intel_engine_classes[info->class].uabi_class;
 
 	engine->context_size = __intel_engine_context_size(dev_priv,
@@ -355,15 +348,15 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
 int intel_engines_init_mmio(struct drm_i915_private *dev_priv)
 {
 	struct intel_device_info *device_info = mkwrite_device_info(dev_priv);
-	const unsigned int ring_mask = INTEL_INFO(dev_priv)->ring_mask;
+	const unsigned int engine_mask = INTEL_INFO(dev_priv)->engine_mask;
 	struct intel_engine_cs *engine;
 	enum intel_engine_id id;
 	unsigned int mask = 0;
 	unsigned int i;
 	int err;
 
-	WARN_ON(ring_mask == 0);
-	WARN_ON(ring_mask &
+	WARN_ON(engine_mask == 0);
+	WARN_ON(engine_mask &
 		GENMASK(BITS_PER_TYPE(mask) - 1, I915_NUM_ENGINES));
 
 	if (i915_inject_load_failure())
@@ -377,7 +370,7 @@ int intel_engines_init_mmio(struct drm_i915_private *dev_priv)
 		if (err)
 			goto cleanup;
 
-		mask |= ENGINE_MASK(i);
+		mask |= BIT(i);
 	}
 
 	/*
@@ -385,16 +378,16 @@ int intel_engines_init_mmio(struct drm_i915_private *dev_priv)
 	 * are added to the driver by a warning and disabling the forgotten
 	 * engines.
 	 */
-	if (WARN_ON(mask != ring_mask))
-		device_info->ring_mask = mask;
+	if (WARN_ON(mask != engine_mask))
+		device_info->engine_mask = mask;
 
 	/* We always presume we have at least RCS available for later probing */
-	if (WARN_ON(!HAS_ENGINE(dev_priv, RCS))) {
+	if (WARN_ON(!HAS_ENGINE(dev_priv, RCS0))) {
 		err = -ENODEV;
 		goto cleanup;
 	}
 
-	RUNTIME_INFO(dev_priv)->num_rings = hweight32(mask);
+	RUNTIME_INFO(dev_priv)->num_engines = hweight32(mask);
 
 	i915_check_and_clear_faults(dev_priv);
 
@@ -455,12 +448,6 @@ int intel_engines_init(struct drm_i915_private *dev_priv)
 	return err;
 }
 
-void intel_engine_write_global_seqno(struct intel_engine_cs *engine, u32 seqno)
-{
-	intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
-	GEM_BUG_ON(intel_engine_get_seqno(engine) != seqno);
-}
-
 static void intel_engine_init_batch_pool(struct intel_engine_cs *engine)
 {
 	i915_gem_batch_pool_init(&engine->batch_pool, engine);
@@ -614,10 +601,44 @@ int intel_engine_setup_common(struct intel_engine_cs *engine)
 	return err;
 }
 
-static void __intel_context_unpin(struct i915_gem_context *ctx,
-				  struct intel_engine_cs *engine)
+void intel_engines_set_scheduler_caps(struct drm_i915_private *i915)
 {
-	intel_context_unpin(to_intel_context(ctx, engine));
+	static const struct {
+		u8 engine;
+		u8 sched;
+	} map[] = {
+#define MAP(x, y) { ilog2(I915_ENGINE_HAS_##x), ilog2(I915_SCHEDULER_CAP_##y) }
+		MAP(PREEMPTION, PREEMPTION),
+		MAP(SEMAPHORES, SEMAPHORES),
+#undef MAP
+	};
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+	u32 enabled, disabled;
+
+	enabled = 0;
+	disabled = 0;
+	for_each_engine(engine, i915, id) { /* all engines must agree! */
+		int i;
+
+		if (engine->schedule)
+			enabled |= (I915_SCHEDULER_CAP_ENABLED |
+				    I915_SCHEDULER_CAP_PRIORITY);
+		else
+			disabled |= (I915_SCHEDULER_CAP_ENABLED |
+				     I915_SCHEDULER_CAP_PRIORITY);
+
+		for (i = 0; i < ARRAY_SIZE(map); i++) {
+			if (engine->flags & BIT(map[i].engine))
+				enabled |= BIT(map[i].sched);
+			else
+				disabled |= BIT(map[i].sched);
+		}
+	}
+
+	i915->caps.scheduler = enabled & ~disabled;
+	if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_ENABLED))
+		i915->caps.scheduler = 0;
 }
 
 struct measure_breadcrumb {
@@ -670,6 +691,20 @@ static int measure_breadcrumb_dw(struct intel_engine_cs *engine)
 	return dw;
 }
 
+static int pin_context(struct i915_gem_context *ctx,
+		       struct intel_engine_cs *engine,
+		       struct intel_context **out)
+{
+	struct intel_context *ce;
+
+	ce = intel_context_pin(ctx, engine);
+	if (IS_ERR(ce))
+		return PTR_ERR(ce);
+
+	*out = ce;
+	return 0;
+}
+
 /**
  * intel_engines_init_common - initialize cengine state which might require hw access
  * @engine: Engine to initialize.
@@ -684,11 +719,8 @@ static int measure_breadcrumb_dw(struct intel_engine_cs *engine)
 int intel_engine_init_common(struct intel_engine_cs *engine)
 {
 	struct drm_i915_private *i915 = engine->i915;
-	struct intel_context *ce;
 	int ret;
 
-	engine->set_default_submission(engine);
-
 	/* We may need to do things with the shrinker which
 	 * require us to immediately switch back to the default
 	 * context. This can cause a problem as pinning the
@@ -696,36 +728,34 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
 	 * be available. To avoid this we always pin the default
 	 * context.
 	 */
-	ce = intel_context_pin(i915->kernel_context, engine);
-	if (IS_ERR(ce))
-		return PTR_ERR(ce);
+	ret = pin_context(i915->kernel_context, engine,
+			  &engine->kernel_context);
+	if (ret)
+		return ret;
 
 	/*
 	 * Similarly the preempt context must always be available so that
-	 * we can interrupt the engine at any time.
+	 * we can interrupt the engine at any time. However, as preemption
+	 * is optional, we allow it to fail.
 	 */
-	if (i915->preempt_context) {
-		ce = intel_context_pin(i915->preempt_context, engine);
-		if (IS_ERR(ce)) {
-			ret = PTR_ERR(ce);
-			goto err_unpin_kernel;
-		}
-	}
+	if (i915->preempt_context)
+		pin_context(i915->preempt_context, engine,
+			    &engine->preempt_context);
 
 	ret = measure_breadcrumb_dw(engine);
 	if (ret < 0)
-		goto err_unpin_preempt;
+		goto err_unpin;
 
 	engine->emit_fini_breadcrumb_dw = ret;
 
-	return 0;
+	engine->set_default_submission(engine);
 
-err_unpin_preempt:
-	if (i915->preempt_context)
-		__intel_context_unpin(i915->preempt_context, engine);
+	return 0;
 
-err_unpin_kernel:
-	__intel_context_unpin(i915->kernel_context, engine);
+err_unpin:
+	if (engine->preempt_context)
+		intel_context_unpin(engine->preempt_context);
+	intel_context_unpin(engine->kernel_context);
 	return ret;
 }
 
@@ -738,8 +768,6 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
  */
 void intel_engine_cleanup_common(struct intel_engine_cs *engine)
 {
-	struct drm_i915_private *i915 = engine->i915;
-
 	cleanup_status_page(engine);
 
 	intel_engine_fini_breadcrumbs(engine);
@@ -749,9 +777,9 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
 	if (engine->default_state)
 		i915_gem_object_put(engine->default_state);
 
-	if (i915->preempt_context)
-		__intel_context_unpin(i915->preempt_context, engine);
-	__intel_context_unpin(i915->kernel_context, engine);
+	if (engine->preempt_context)
+		intel_context_unpin(engine->preempt_context);
+	intel_context_unpin(engine->kernel_context);
 
 	i915_timeline_fini(&engine->timeline);
 
@@ -930,7 +958,7 @@ void intel_engine_get_instdone(struct intel_engine_cs *engine,
 	default:
 		instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
 
-		if (engine->id != RCS)
+		if (engine->id != RCS0)
 			break;
 
 		instdone->slice_common = I915_READ(GEN7_SC_INSTDONE);
@@ -946,7 +974,7 @@ void intel_engine_get_instdone(struct intel_engine_cs *engine,
 	case 7:
 		instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
 
-		if (engine->id != RCS)
+		if (engine->id != RCS0)
 			break;
 
 		instdone->slice_common = I915_READ(GEN7_SC_INSTDONE);
@@ -959,7 +987,7 @@ void intel_engine_get_instdone(struct intel_engine_cs *engine,
 	case 4:
 		instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
 
-		if (engine->id == RCS)
+		if (engine->id == RCS0)
 			/* HACK: Using the wrong struct member */
 			instdone->slice_common = I915_READ(GEN4_INSTDONE1);
 		break;
@@ -1007,16 +1035,10 @@ static bool ring_is_idle(struct intel_engine_cs *engine)
  */
 bool intel_engine_is_idle(struct intel_engine_cs *engine)
 {
-	struct drm_i915_private *dev_priv = engine->i915;
-
 	/* More white lies, if wedged, hw state is inconsistent */
-	if (i915_terminally_wedged(&dev_priv->gpu_error))
+	if (i915_reset_failed(engine->i915))
 		return true;
 
-	/* Any inflight/incomplete requests? */
-	if (!intel_engine_signaled(engine, intel_engine_last_submit(engine)))
-		return false;
-
 	/* Waiting to drain ELSP? */
 	if (READ_ONCE(engine->execlists.active)) {
 		struct tasklet_struct *t = &engine->execlists.tasklet;
@@ -1045,7 +1067,7 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine)
 	return ring_is_idle(engine);
 }
 
-bool intel_engines_are_idle(struct drm_i915_private *dev_priv)
+bool intel_engines_are_idle(struct drm_i915_private *i915)
 {
 	struct intel_engine_cs *engine;
 	enum intel_engine_id id;
@@ -1054,10 +1076,14 @@ bool intel_engines_are_idle(struct drm_i915_private *dev_priv)
 	 * If the driver is wedged, HW state may be very inconsistent and
 	 * report that it is still busy, even though we have stopped using it.
 	 */
-	if (i915_terminally_wedged(&dev_priv->gpu_error))
+	if (i915_reset_failed(i915))
 		return true;
 
-	for_each_engine(engine, dev_priv, id) {
+	/* Already parked (and passed an idleness test); must still be idle */
+	if (!READ_ONCE(i915->gt.awake))
+		return true;
+
+	for_each_engine(engine, i915, id) {
 		if (!intel_engine_is_idle(engine))
 			return false;
 	}
@@ -1065,34 +1091,6 @@ bool intel_engines_are_idle(struct drm_i915_private *dev_priv)
 	return true;
 }
 
-/**
- * intel_engine_has_kernel_context:
- * @engine: the engine
- *
- * Returns true if the last context to be executed on this engine, or has been
- * executed if the engine is already idle, is the kernel context
- * (#i915.kernel_context).
- */
-bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine)
-{
-	const struct intel_context *kernel_context =
-		to_intel_context(engine->i915->kernel_context, engine);
-	struct i915_request *rq;
-
-	lockdep_assert_held(&engine->i915->drm.struct_mutex);
-
-	/*
-	 * Check the last context seen by the engine. If active, it will be
-	 * the last request that remains in the timeline. When idle, it is
-	 * the last executed context as tracked by retirement.
-	 */
-	rq = __i915_active_request_peek(&engine->timeline.last_request);
-	if (rq)
-		return rq->hw_context == kernel_context;
-	else
-		return engine->last_retired_context == kernel_context;
-}
-
 void intel_engines_reset_default_submission(struct drm_i915_private *i915)
 {
 	struct intel_engine_cs *engine;
@@ -1180,6 +1178,8 @@ void intel_engines_park(struct drm_i915_private *i915)
 		i915_gem_batch_pool_fini(&engine->batch_pool);
 		engine->execlists.no_priolist = false;
 	}
+
+	i915->gt.active_engines = 0;
 }
 
 /**
@@ -1283,15 +1283,14 @@ static void print_request(struct drm_printer *m,
 
 	x = print_sched_attr(rq->i915, &rq->sched.attr, buf, x, sizeof(buf));
 
-	drm_printf(m, "%s%x%s%s [%llx:%llx]%s @ %dms: %s\n",
+	drm_printf(m, "%s %llx:%llx%s%s %s @ %dms: %s\n",
 		   prefix,
-		   rq->global_seqno,
+		   rq->fence.context, rq->fence.seqno,
 		   i915_request_completed(rq) ? "!" :
 		   i915_request_started(rq) ? "*" :
 		   "",
 		   test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
 			    &rq->fence.flags) ?  "+" : "",
-		   rq->fence.context, rq->fence.seqno,
 		   buf,
 		   jiffies_to_msecs(jiffies - rq->emitted_jiffies),
 		   name);
@@ -1334,7 +1333,7 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine,
 		&engine->execlists;
 	u64 addr;
 
-	if (engine->id == RCS && IS_GEN_RANGE(dev_priv, 4, 7))
+	if (engine->id == RCS0 && IS_GEN_RANGE(dev_priv, 4, 7))
 		drm_printf(m, "\tCCID: 0x%08x\n", I915_READ(CCID));
 	drm_printf(m, "\tRING_START: 0x%08x\n",
 		   I915_READ(RING_START(engine->mmio_base)));
@@ -1425,10 +1424,11 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine,
 				char hdr[80];
 
 				snprintf(hdr, sizeof(hdr),
-					 "\t\tELSP[%d] count=%d, ring:{start:%08x, hwsp:%08x}, rq: ",
+					 "\t\tELSP[%d] count=%d, ring:{start:%08x, hwsp:%08x, seqno:%08x}, rq: ",
 					 idx, count,
 					 i915_ggtt_offset(rq->ring->vma),
-					 rq->timeline->hwsp_offset);
+					 rq->timeline->hwsp_offset,
+					 hwsp_seqno(rq));
 				print_request(m, rq, hdr);
 			} else {
 				drm_printf(m, "\t\tELSP[%d] idle\n", idx);
@@ -1495,13 +1495,12 @@ void intel_engine_dump(struct intel_engine_cs *engine,
 		va_end(ap);
 	}
 
-	if (i915_terminally_wedged(&engine->i915->gpu_error))
+	if (i915_reset_failed(engine->i915))
 		drm_printf(m, "*** WEDGED ***\n");
 
-	drm_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [%d ms]\n",
-		   intel_engine_get_seqno(engine),
-		   intel_engine_last_submit(engine),
-		   engine->hangcheck.seqno,
+	drm_printf(m, "\tHangcheck %x:%x [%d ms]\n",
+		   engine->hangcheck.last_seqno,
+		   engine->hangcheck.next_seqno,
 		   jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp));
 	drm_printf(m, "\tReset count: %d (global %d)\n",
 		   i915_reset_engine_count(error, engine),
@@ -1521,7 +1520,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
 	if (&rq->link != &engine->timeline.requests)
 		print_request(m, rq, "\t\tlast   ");
 
-	rq = i915_gem_find_active_request(engine);
+	rq = intel_engine_find_active_request(engine);
 	if (rq) {
 		print_request(m, rq, "\t\tactive ");
 
@@ -1688,6 +1687,51 @@ void intel_disable_engine_stats(struct intel_engine_cs *engine)
 	write_sequnlock_irqrestore(&engine->stats.lock, flags);
 }
 
+static bool match_ring(struct i915_request *rq)
+{
+	struct drm_i915_private *dev_priv = rq->i915;
+	u32 ring = I915_READ(RING_START(rq->engine->mmio_base));
+
+	return ring == i915_ggtt_offset(rq->ring->vma);
+}
+
+struct i915_request *
+intel_engine_find_active_request(struct intel_engine_cs *engine)
+{
+	struct i915_request *request, *active = NULL;
+	unsigned long flags;
+
+	/*
+	 * We are called by the error capture, reset and to dump engine
+	 * state at random points in time. In particular, note that neither is
+	 * crucially ordered with an interrupt. After a hang, the GPU is dead
+	 * and we assume that no more writes can happen (we waited long enough
+	 * for all writes that were in transaction to be flushed) - adding an
+	 * extra delay for a recent interrupt is pointless. Hence, we do
+	 * not need an engine->irq_seqno_barrier() before the seqno reads.
+	 * At all other times, we must assume the GPU is still running, but
+	 * we only care about the snapshot of this moment.
+	 */
+	spin_lock_irqsave(&engine->timeline.lock, flags);
+	list_for_each_entry(request, &engine->timeline.requests, link) {
+		if (i915_request_completed(request))
+			continue;
+
+		if (!i915_request_started(request))
+			break;
+
+		/* More than one preemptible request may match! */
+		if (!match_ring(request))
+			break;
+
+		active = request;
+		break;
+	}
+	spin_unlock_irqrestore(&engine->timeline.lock, flags);
+
+	return active;
+}
+
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
 #include "selftests/mock_engine.c"
 #include "selftests/intel_engine_cs.c"
diff --git a/drivers/gpu/drm/i915/intel_engine_types.h b/drivers/gpu/drm/i915/intel_engine_types.h
new file mode 100644
index 0000000000000000000000000000000000000000..88ed7ba8886fcf0de4250b6be798fb8e5f8c36a1
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_engine_types.h
@@ -0,0 +1,525 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_ENGINE_TYPES__
+#define __INTEL_ENGINE_TYPES__
+
+#include <linux/hashtable.h>
+#include <linux/irq_work.h>
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/types.h>
+
+#include "i915_timeline_types.h"
+#include "intel_device_info.h"
+#include "intel_workarounds_types.h"
+
+#include "i915_gem_batch_pool.h"
+#include "i915_pmu.h"
+
+#define I915_MAX_SLICES	3
+#define I915_MAX_SUBSLICES 8
+
+#define I915_CMD_HASH_ORDER 9
+
+struct drm_i915_reg_table;
+struct i915_gem_context;
+struct i915_request;
+struct i915_sched_attr;
+
+struct intel_hw_status_page {
+	struct i915_vma *vma;
+	u32 *addr;
+};
+
+struct intel_instdone {
+	u32 instdone;
+	/* The following exist only in the RCS engine */
+	u32 slice_common;
+	u32 sampler[I915_MAX_SLICES][I915_MAX_SUBSLICES];
+	u32 row[I915_MAX_SLICES][I915_MAX_SUBSLICES];
+};
+
+struct intel_engine_hangcheck {
+	u64 acthd;
+	u32 last_seqno;
+	u32 next_seqno;
+	unsigned long action_timestamp;
+	struct intel_instdone instdone;
+};
+
+struct intel_ring {
+	struct kref ref;
+	struct i915_vma *vma;
+	void *vaddr;
+
+	struct i915_timeline *timeline;
+	struct list_head request_list;
+	struct list_head active_link;
+
+	u32 head;
+	u32 tail;
+	u32 emit;
+
+	u32 space;
+	u32 size;
+	u32 effective_size;
+};
+
+/*
+ * we use a single page to load ctx workarounds so all of these
+ * values are referred in terms of dwords
+ *
+ * struct i915_wa_ctx_bb:
+ *  offset: specifies batch starting position, also helpful in case
+ *    if we want to have multiple batches at different offsets based on
+ *    some criteria. It is not a requirement at the moment but provides
+ *    an option for future use.
+ *  size: size of the batch in DWORDS
+ */
+struct i915_ctx_workarounds {
+	struct i915_wa_ctx_bb {
+		u32 offset;
+		u32 size;
+	} indirect_ctx, per_ctx;
+	struct i915_vma *vma;
+};
+
+#define I915_MAX_VCS	4
+#define I915_MAX_VECS	2
+
+/*
+ * Engine IDs definitions.
+ * Keep instances of the same type engine together.
+ */
+enum intel_engine_id {
+	RCS0 = 0,
+	BCS0,
+	VCS0,
+	VCS1,
+	VCS2,
+	VCS3,
+#define _VCS(n) (VCS0 + (n))
+	VECS0,
+	VECS1
+#define _VECS(n) (VECS0 + (n))
+};
+
+struct st_preempt_hang {
+	struct completion completion;
+	unsigned int count;
+	bool inject_hang;
+};
+
+/**
+ * struct intel_engine_execlists - execlist submission queue and port state
+ *
+ * The struct intel_engine_execlists represents the combined logical state of
+ * driver and the hardware state for execlist mode of submission.
+ */
+struct intel_engine_execlists {
+	/**
+	 * @tasklet: softirq tasklet for bottom handler
+	 */
+	struct tasklet_struct tasklet;
+
+	/**
+	 * @default_priolist: priority list for I915_PRIORITY_NORMAL
+	 */
+	struct i915_priolist default_priolist;
+
+	/**
+	 * @no_priolist: priority lists disabled
+	 */
+	bool no_priolist;
+
+	/**
+	 * @submit_reg: gen-specific execlist submission register
+	 * set to the ExecList Submission Port (elsp) register pre-Gen11 and to
+	 * the ExecList Submission Queue Contents register array for Gen11+
+	 */
+	u32 __iomem *submit_reg;
+
+	/**
+	 * @ctrl_reg: the enhanced execlists control register, used to load the
+	 * submit queue on the HW and to request preemptions to idle
+	 */
+	u32 __iomem *ctrl_reg;
+
+	/**
+	 * @port: execlist port states
+	 *
+	 * For each hardware ELSP (ExecList Submission Port) we keep
+	 * track of the last request and the number of times we submitted
+	 * that port to hw. We then count the number of times the hw reports
+	 * a context completion or preemption. As only one context can
+	 * be active on hw, we limit resubmission of context to port[0]. This
+	 * is called Lite Restore, of the context.
+	 */
+	struct execlist_port {
+		/**
+		 * @request_count: combined request and submission count
+		 */
+		struct i915_request *request_count;
+#define EXECLIST_COUNT_BITS 2
+#define port_request(p) ptr_mask_bits((p)->request_count, EXECLIST_COUNT_BITS)
+#define port_count(p) ptr_unmask_bits((p)->request_count, EXECLIST_COUNT_BITS)
+#define port_pack(rq, count) ptr_pack_bits(rq, count, EXECLIST_COUNT_BITS)
+#define port_unpack(p, count) ptr_unpack_bits((p)->request_count, count, EXECLIST_COUNT_BITS)
+#define port_set(p, packed) ((p)->request_count = (packed))
+#define port_isset(p) ((p)->request_count)
+#define port_index(p, execlists) ((p) - (execlists)->port)
+
+		/**
+		 * @context_id: context ID for port
+		 */
+		GEM_DEBUG_DECL(u32 context_id);
+
+#define EXECLIST_MAX_PORTS 2
+	} port[EXECLIST_MAX_PORTS];
+
+	/**
+	 * @active: is the HW active? We consider the HW as active after
+	 * submitting any context for execution and until we have seen the
+	 * last context completion event. After that, we do not expect any
+	 * more events until we submit, and so can park the HW.
+	 *
+	 * As we have a small number of different sources from which we feed
+	 * the HW, we track the state of each inside a single bitfield.
+	 */
+	unsigned int active;
+#define EXECLISTS_ACTIVE_USER 0
+#define EXECLISTS_ACTIVE_PREEMPT 1
+#define EXECLISTS_ACTIVE_HWACK 2
+
+	/**
+	 * @port_mask: number of execlist ports - 1
+	 */
+	unsigned int port_mask;
+
+	/**
+	 * @queue_priority_hint: Highest pending priority.
+	 *
+	 * When we add requests into the queue, or adjust the priority of
+	 * executing requests, we compute the maximum priority of those
+	 * pending requests. We can then use this value to determine if
+	 * we need to preempt the executing requests to service the queue.
+	 * However, since the we may have recorded the priority of an inflight
+	 * request we wanted to preempt but since completed, at the time of
+	 * dequeuing the priority hint may no longer may match the highest
+	 * available request priority.
+	 */
+	int queue_priority_hint;
+
+	/**
+	 * @queue: queue of requests, in priority lists
+	 */
+	struct rb_root_cached queue;
+
+	/**
+	 * @csb_write: control register for Context Switch buffer
+	 *
+	 * Note this register may be either mmio or HWSP shadow.
+	 */
+	u32 *csb_write;
+
+	/**
+	 * @csb_status: status array for Context Switch buffer
+	 *
+	 * Note these register may be either mmio or HWSP shadow.
+	 */
+	u32 *csb_status;
+
+	/**
+	 * @preempt_complete_status: expected CSB upon completing preemption
+	 */
+	u32 preempt_complete_status;
+
+	/**
+	 * @csb_head: context status buffer head
+	 */
+	u8 csb_head;
+
+	I915_SELFTEST_DECLARE(struct st_preempt_hang preempt_hang;)
+};
+
+#define INTEL_ENGINE_CS_MAX_NAME 8
+
+struct intel_engine_cs {
+	struct drm_i915_private *i915;
+	char name[INTEL_ENGINE_CS_MAX_NAME];
+
+	enum intel_engine_id id;
+	unsigned int hw_id;
+	unsigned int guc_id;
+	intel_engine_mask_t mask;
+
+	u8 uabi_class;
+
+	u8 class;
+	u8 instance;
+	u32 context_size;
+	u32 mmio_base;
+
+	struct intel_ring *buffer;
+
+	struct i915_timeline timeline;
+
+	struct intel_context *kernel_context; /* pinned */
+	struct intel_context *preempt_context; /* pinned; optional */
+
+	struct drm_i915_gem_object *default_state;
+	void *pinned_default_state;
+
+	/* Rather than have every client wait upon all user interrupts,
+	 * with the herd waking after every interrupt and each doing the
+	 * heavyweight seqno dance, we delegate the task (of being the
+	 * bottom-half of the user interrupt) to the first client. After
+	 * every interrupt, we wake up one client, who does the heavyweight
+	 * coherent seqno read and either goes back to sleep (if incomplete),
+	 * or wakes up all the completed clients in parallel, before then
+	 * transferring the bottom-half status to the next client in the queue.
+	 *
+	 * Compared to walking the entire list of waiters in a single dedicated
+	 * bottom-half, we reduce the latency of the first waiter by avoiding
+	 * a context switch, but incur additional coherent seqno reads when
+	 * following the chain of request breadcrumbs. Since it is most likely
+	 * that we have a single client waiting on each seqno, then reducing
+	 * the overhead of waking that client is much preferred.
+	 */
+	struct intel_breadcrumbs {
+		spinlock_t irq_lock;
+		struct list_head signalers;
+
+		struct irq_work irq_work; /* for use from inside irq_lock */
+
+		unsigned int irq_enabled;
+
+		bool irq_armed;
+	} breadcrumbs;
+
+	struct intel_engine_pmu {
+		/**
+		 * @enable: Bitmask of enable sample events on this engine.
+		 *
+		 * Bits correspond to sample event types, for instance
+		 * I915_SAMPLE_QUEUED is bit 0 etc.
+		 */
+		u32 enable;
+		/**
+		 * @enable_count: Reference count for the enabled samplers.
+		 *
+		 * Index number corresponds to @enum drm_i915_pmu_engine_sample.
+		 */
+		unsigned int enable_count[I915_ENGINE_SAMPLE_COUNT];
+		/**
+		 * @sample: Counter values for sampling events.
+		 *
+		 * Our internal timer stores the current counters in this field.
+		 *
+		 * Index number corresponds to @enum drm_i915_pmu_engine_sample.
+		 */
+		struct i915_pmu_sample sample[I915_ENGINE_SAMPLE_COUNT];
+	} pmu;
+
+	/*
+	 * A pool of objects to use as shadow copies of client batch buffers
+	 * when the command parser is enabled. Prevents the client from
+	 * modifying the batch contents after software parsing.
+	 */
+	struct i915_gem_batch_pool batch_pool;
+
+	struct intel_hw_status_page status_page;
+	struct i915_ctx_workarounds wa_ctx;
+	struct i915_wa_list ctx_wa_list;
+	struct i915_wa_list wa_list;
+	struct i915_wa_list whitelist;
+
+	u32             irq_keep_mask; /* always keep these interrupts */
+	u32		irq_enable_mask; /* bitmask to enable ring interrupt */
+	void		(*irq_enable)(struct intel_engine_cs *engine);
+	void		(*irq_disable)(struct intel_engine_cs *engine);
+
+	int		(*init_hw)(struct intel_engine_cs *engine);
+
+	struct {
+		void (*prepare)(struct intel_engine_cs *engine);
+		void (*reset)(struct intel_engine_cs *engine, bool stalled);
+		void (*finish)(struct intel_engine_cs *engine);
+	} reset;
+
+	void		(*park)(struct intel_engine_cs *engine);
+	void		(*unpark)(struct intel_engine_cs *engine);
+
+	void		(*set_default_submission)(struct intel_engine_cs *engine);
+
+	const struct intel_context_ops *cops;
+
+	int		(*request_alloc)(struct i915_request *rq);
+	int		(*init_context)(struct i915_request *rq);
+
+	int		(*emit_flush)(struct i915_request *request, u32 mode);
+#define EMIT_INVALIDATE	BIT(0)
+#define EMIT_FLUSH	BIT(1)
+#define EMIT_BARRIER	(EMIT_INVALIDATE | EMIT_FLUSH)
+	int		(*emit_bb_start)(struct i915_request *rq,
+					 u64 offset, u32 length,
+					 unsigned int dispatch_flags);
+#define I915_DISPATCH_SECURE BIT(0)
+#define I915_DISPATCH_PINNED BIT(1)
+	int		 (*emit_init_breadcrumb)(struct i915_request *rq);
+	u32		*(*emit_fini_breadcrumb)(struct i915_request *rq,
+						 u32 *cs);
+	unsigned int	emit_fini_breadcrumb_dw;
+
+	/* Pass the request to the hardware queue (e.g. directly into
+	 * the legacy ringbuffer or to the end of an execlist).
+	 *
+	 * This is called from an atomic context with irqs disabled; must
+	 * be irq safe.
+	 */
+	void		(*submit_request)(struct i915_request *rq);
+
+	/*
+	 * Call when the priority on a request has changed and it and its
+	 * dependencies may need rescheduling. Note the request itself may
+	 * not be ready to run!
+	 */
+	void		(*schedule)(struct i915_request *request,
+				    const struct i915_sched_attr *attr);
+
+	/*
+	 * Cancel all requests on the hardware, or queued for execution.
+	 * This should only cancel the ready requests that have been
+	 * submitted to the engine (via the engine->submit_request callback).
+	 * This is called when marking the device as wedged.
+	 */
+	void		(*cancel_requests)(struct intel_engine_cs *engine);
+
+	void		(*cleanup)(struct intel_engine_cs *engine);
+
+	struct intel_engine_execlists execlists;
+
+	/* Contexts are pinned whilst they are active on the GPU. The last
+	 * context executed remains active whilst the GPU is idle - the
+	 * switch away and write to the context object only occurs on the
+	 * next execution.  Contexts are only unpinned on retirement of the
+	 * following request ensuring that we can always write to the object
+	 * on the context switch even after idling. Across suspend, we switch
+	 * to the kernel context and trash it as the save may not happen
+	 * before the hardware is powered down.
+	 */
+	struct intel_context *last_retired_context;
+
+	/* status_notifier: list of callbacks for context-switch changes */
+	struct atomic_notifier_head context_status_notifier;
+
+	struct intel_engine_hangcheck hangcheck;
+
+#define I915_ENGINE_NEEDS_CMD_PARSER BIT(0)
+#define I915_ENGINE_SUPPORTS_STATS   BIT(1)
+#define I915_ENGINE_HAS_PREEMPTION   BIT(2)
+#define I915_ENGINE_HAS_SEMAPHORES   BIT(3)
+	unsigned int flags;
+
+	/*
+	 * Table of commands the command parser needs to know about
+	 * for this engine.
+	 */
+	DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER);
+
+	/*
+	 * Table of registers allowed in commands that read/write registers.
+	 */
+	const struct drm_i915_reg_table *reg_tables;
+	int reg_table_count;
+
+	/*
+	 * Returns the bitmask for the length field of the specified command.
+	 * Return 0 for an unrecognized/invalid command.
+	 *
+	 * If the command parser finds an entry for a command in the engine's
+	 * cmd_tables, it gets the command's length based on the table entry.
+	 * If not, it calls this function to determine the per-engine length
+	 * field encoding for the command (i.e. different opcode ranges use
+	 * certain bits to encode the command length in the header).
+	 */
+	u32 (*get_cmd_length_mask)(u32 cmd_header);
+
+	struct {
+		/**
+		 * @lock: Lock protecting the below fields.
+		 */
+		seqlock_t lock;
+		/**
+		 * @enabled: Reference count indicating number of listeners.
+		 */
+		unsigned int enabled;
+		/**
+		 * @active: Number of contexts currently scheduled in.
+		 */
+		unsigned int active;
+		/**
+		 * @enabled_at: Timestamp when busy stats were enabled.
+		 */
+		ktime_t enabled_at;
+		/**
+		 * @start: Timestamp of the last idle to active transition.
+		 *
+		 * Idle is defined as active == 0, active is active > 0.
+		 */
+		ktime_t start;
+		/**
+		 * @total: Total time this engine was busy.
+		 *
+		 * Accumulated time not counting the most recent block in cases
+		 * where engine is currently busy (active > 0).
+		 */
+		ktime_t total;
+	} stats;
+};
+
+static inline bool
+intel_engine_needs_cmd_parser(const struct intel_engine_cs *engine)
+{
+	return engine->flags & I915_ENGINE_NEEDS_CMD_PARSER;
+}
+
+static inline bool
+intel_engine_supports_stats(const struct intel_engine_cs *engine)
+{
+	return engine->flags & I915_ENGINE_SUPPORTS_STATS;
+}
+
+static inline bool
+intel_engine_has_preemption(const struct intel_engine_cs *engine)
+{
+	return engine->flags & I915_ENGINE_HAS_PREEMPTION;
+}
+
+static inline bool
+intel_engine_has_semaphores(const struct intel_engine_cs *engine)
+{
+	return engine->flags & I915_ENGINE_HAS_SEMAPHORES;
+}
+
+#define instdone_slice_mask(dev_priv__) \
+	(IS_GEN(dev_priv__, 7) ? \
+	 1 : RUNTIME_INFO(dev_priv__)->sseu.slice_mask)
+
+#define instdone_subslice_mask(dev_priv__) \
+	(IS_GEN(dev_priv__, 7) ? \
+	 1 : RUNTIME_INFO(dev_priv__)->sseu.subslice_mask[0])
+
+#define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \
+	for ((slice__) = 0, (subslice__) = 0; \
+	     (slice__) < I915_MAX_SLICES; \
+	     (subslice__) = ((subslice__) + 1) < I915_MAX_SUBSLICES ? (subslice__) + 1 : 0, \
+	       (slice__) += ((subslice__) == 0)) \
+		for_each_if((BIT(slice__) & instdone_slice_mask(dev_priv__)) && \
+			    (BIT(subslice__) & instdone_subslice_mask(dev_priv__)))
+
+#endif /* __INTEL_ENGINE_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/intel_gpu_commands.h b/drivers/gpu/drm/i915/intel_gpu_commands.h
index b96a31bc10809c44ecb17414174991758603dc7b..a34ece53a7712a6efe3e1b1db268238a09ece7f4 100644
--- a/drivers/gpu/drm/i915/intel_gpu_commands.h
+++ b/drivers/gpu/drm/i915/intel_gpu_commands.h
@@ -105,8 +105,13 @@
 #define MI_SEMAPHORE_SIGNAL	MI_INSTR(0x1b, 0) /* GEN8+ */
 #define   MI_SEMAPHORE_TARGET(engine)	((engine)<<15)
 #define MI_SEMAPHORE_WAIT	MI_INSTR(0x1c, 2) /* GEN8+ */
-#define   MI_SEMAPHORE_POLL		(1<<15)
-#define   MI_SEMAPHORE_SAD_GTE_SDD	(1<<12)
+#define   MI_SEMAPHORE_POLL		(1 << 15)
+#define   MI_SEMAPHORE_SAD_GT_SDD	(0 << 12)
+#define   MI_SEMAPHORE_SAD_GTE_SDD	(1 << 12)
+#define   MI_SEMAPHORE_SAD_LT_SDD	(2 << 12)
+#define   MI_SEMAPHORE_SAD_LTE_SDD	(3 << 12)
+#define   MI_SEMAPHORE_SAD_EQ_SDD	(4 << 12)
+#define   MI_SEMAPHORE_SAD_NEQ_SDD	(5 << 12)
 #define MI_STORE_DWORD_IMM	MI_INSTR(0x20, 1)
 #define MI_STORE_DWORD_IMM_GEN4	MI_INSTR(0x20, 2)
 #define   MI_MEM_VIRTUAL	(1 << 22) /* 945,g33,965 */
diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/intel_guc.c
index 8660af3fd75566468651d03dd1eecca4429abbe1..8ecb4708745704f5cf38b78cd890f9f595c31c50 100644
--- a/drivers/gpu/drm/i915/intel_guc.c
+++ b/drivers/gpu/drm/i915/intel_guc.c
@@ -203,11 +203,19 @@ int intel_guc_init(struct intel_guc *guc)
 		goto err_log;
 	GEM_BUG_ON(!guc->ads_vma);
 
+	if (HAS_GUC_CT(dev_priv)) {
+		ret = intel_guc_ct_init(&guc->ct);
+		if (ret)
+			goto err_ads;
+	}
+
 	/* We need to notify the guc whenever we change the GGTT */
 	i915_ggtt_enable_guc(dev_priv);
 
 	return 0;
 
+err_ads:
+	intel_guc_ads_destroy(guc);
 err_log:
 	intel_guc_log_destroy(&guc->log);
 err_shared:
@@ -222,6 +230,10 @@ void intel_guc_fini(struct intel_guc *guc)
 	struct drm_i915_private *dev_priv = guc_to_i915(guc);
 
 	i915_ggtt_disable_guc(dev_priv);
+
+	if (HAS_GUC_CT(dev_priv))
+		intel_guc_ct_fini(&guc->ct);
+
 	intel_guc_ads_destroy(guc);
 	intel_guc_log_destroy(&guc->log);
 	guc_shared_data_destroy(guc);
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
index 744220296653202fcc40c76c809c86fbf2d55c77..77ec1bd4df5ab504e11fdb4e51d57c5e9cdf9bcb 100644
--- a/drivers/gpu/drm/i915/intel_guc.h
+++ b/drivers/gpu/drm/i915/intel_guc.h
@@ -32,6 +32,7 @@
 #include "intel_guc_log.h"
 #include "intel_guc_reg.h"
 #include "intel_uc_fw.h"
+#include "i915_utils.h"
 #include "i915_vma.h"
 
 struct guc_preempt_work {
diff --git a/drivers/gpu/drm/i915/intel_guc_ads.c b/drivers/gpu/drm/i915/intel_guc_ads.c
index f0db62887f5098f3a72433b6f838cb0e2744babd..bec62f34b15a19c92d76998ce156e32386a2baa5 100644
--- a/drivers/gpu/drm/i915/intel_guc_ads.c
+++ b/drivers/gpu/drm/i915/intel_guc_ads.c
@@ -121,8 +121,7 @@ int intel_guc_ads_create(struct intel_guc *guc)
 	 * to find it. Note that we have to skip our header (1 page),
 	 * because our GuC shared data is there.
 	 */
-	kernel_ctx_vma = to_intel_context(dev_priv->kernel_context,
-					  dev_priv->engine[RCS])->state;
+	kernel_ctx_vma = dev_priv->engine[RCS0]->kernel_context->state;
 	blob->ads.golden_context_lrca =
 		intel_guc_ggtt_offset(guc, kernel_ctx_vma) + skipped_offset;
 
diff --git a/drivers/gpu/drm/i915/intel_guc_ct.c b/drivers/gpu/drm/i915/intel_guc_ct.c
index a52883e9146f273237bf9c259c3bcfa6f224a3ea..79ddb80883116a66e51aec5686f4c9690776c5a1 100644
--- a/drivers/gpu/drm/i915/intel_guc_ct.c
+++ b/drivers/gpu/drm/i915/intel_guc_ct.c
@@ -140,11 +140,6 @@ static int guc_action_deregister_ct_buffer(struct intel_guc *guc,
 	return err;
 }
 
-static bool ctch_is_open(struct intel_guc_ct_channel *ctch)
-{
-	return ctch->vma != NULL;
-}
-
 static int ctch_init(struct intel_guc *guc,
 		     struct intel_guc_ct_channel *ctch)
 {
@@ -214,25 +209,21 @@ static int ctch_init(struct intel_guc *guc,
 static void ctch_fini(struct intel_guc *guc,
 		      struct intel_guc_ct_channel *ctch)
 {
+	GEM_BUG_ON(ctch->enabled);
+
 	i915_vma_unpin_and_release(&ctch->vma, I915_VMA_RELEASE_MAP);
 }
 
-static int ctch_open(struct intel_guc *guc,
-		     struct intel_guc_ct_channel *ctch)
+static int ctch_enable(struct intel_guc *guc,
+		       struct intel_guc_ct_channel *ctch)
 {
 	u32 base;
 	int err;
 	int i;
 
-	CT_DEBUG_DRIVER("CT: channel %d reopen=%s\n",
-			ctch->owner, yesno(ctch_is_open(ctch)));
+	GEM_BUG_ON(!ctch->vma);
 
-	if (!ctch->vma) {
-		err = ctch_init(guc, ctch);
-		if (unlikely(err))
-			goto err_out;
-		GEM_BUG_ON(!ctch->vma);
-	}
+	GEM_BUG_ON(ctch->enabled);
 
 	/* vma should be already allocated and map'ed */
 	base = intel_guc_ggtt_offset(guc, ctch->vma);
@@ -255,7 +246,7 @@ static int ctch_open(struct intel_guc *guc,
 					    base + PAGE_SIZE/4 * CTB_RECV,
 					    INTEL_GUC_CT_BUFFER_TYPE_RECV);
 	if (unlikely(err))
-		goto err_fini;
+		goto err_out;
 
 	err = guc_action_register_ct_buffer(guc,
 					    base + PAGE_SIZE/4 * CTB_SEND,
@@ -263,23 +254,25 @@ static int ctch_open(struct intel_guc *guc,
 	if (unlikely(err))
 		goto err_deregister;
 
+	ctch->enabled = true;
+
 	return 0;
 
 err_deregister:
 	guc_action_deregister_ct_buffer(guc,
 					ctch->owner,
 					INTEL_GUC_CT_BUFFER_TYPE_RECV);
-err_fini:
-	ctch_fini(guc, ctch);
 err_out:
 	DRM_ERROR("CT: can't open channel %d; err=%d\n", ctch->owner, err);
 	return err;
 }
 
-static void ctch_close(struct intel_guc *guc,
-		       struct intel_guc_ct_channel *ctch)
+static void ctch_disable(struct intel_guc *guc,
+			 struct intel_guc_ct_channel *ctch)
 {
-	GEM_BUG_ON(!ctch_is_open(ctch));
+	GEM_BUG_ON(!ctch->enabled);
+
+	ctch->enabled = false;
 
 	guc_action_deregister_ct_buffer(guc,
 					ctch->owner,
@@ -287,7 +280,6 @@ static void ctch_close(struct intel_guc *guc,
 	guc_action_deregister_ct_buffer(guc,
 					ctch->owner,
 					INTEL_GUC_CT_BUFFER_TYPE_RECV);
-	ctch_fini(guc, ctch);
 }
 
 static u32 ctch_get_next_fence(struct intel_guc_ct_channel *ctch)
@@ -481,7 +473,7 @@ static int ctch_send(struct intel_guc_ct *ct,
 	u32 fence;
 	int err;
 
-	GEM_BUG_ON(!ctch_is_open(ctch));
+	GEM_BUG_ON(!ctch->enabled);
 	GEM_BUG_ON(!len);
 	GEM_BUG_ON(len & ~GUC_CT_MSG_LEN_MASK);
 	GEM_BUG_ON(!response_buf && response_buf_size);
@@ -817,7 +809,7 @@ static void ct_process_host_channel(struct intel_guc_ct *ct)
 	u32 msg[GUC_CT_MSG_LEN_MASK + 1]; /* one extra dw for the header */
 	int err = 0;
 
-	if (!ctch_is_open(ctch))
+	if (!ctch->enabled)
 		return;
 
 	do {
@@ -848,6 +840,51 @@ static void intel_guc_to_host_event_handler_ct(struct intel_guc *guc)
 	ct_process_host_channel(ct);
 }
 
+/**
+ * intel_guc_ct_init - Init CT communication
+ * @ct: pointer to CT struct
+ *
+ * Allocate memory required for communication via
+ * the CT channel.
+ *
+ * Shall only be called for platforms with HAS_GUC_CT.
+ *
+ * Return: 0 on success, a negative errno code on failure.
+ */
+int intel_guc_ct_init(struct intel_guc_ct *ct)
+{
+	struct intel_guc *guc = ct_to_guc(ct);
+	struct intel_guc_ct_channel *ctch = &ct->host_channel;
+	int err;
+
+	err = ctch_init(guc, ctch);
+	if (unlikely(err)) {
+		DRM_ERROR("CT: can't open channel %d; err=%d\n",
+			  ctch->owner, err);
+		return err;
+	}
+
+	GEM_BUG_ON(!ctch->vma);
+	return 0;
+}
+
+/**
+ * intel_guc_ct_fini - Fini CT communication
+ * @ct: pointer to CT struct
+ *
+ * Deallocate memory required for communication via
+ * the CT channel.
+ *
+ * Shall only be called for platforms with HAS_GUC_CT.
+ */
+void intel_guc_ct_fini(struct intel_guc_ct *ct)
+{
+	struct intel_guc *guc = ct_to_guc(ct);
+	struct intel_guc_ct_channel *ctch = &ct->host_channel;
+
+	ctch_fini(guc, ctch);
+}
+
 /**
  * intel_guc_ct_enable - Enable buffer based command transport.
  * @ct: pointer to CT struct
@@ -865,7 +902,10 @@ int intel_guc_ct_enable(struct intel_guc_ct *ct)
 
 	GEM_BUG_ON(!HAS_GUC_CT(i915));
 
-	err = ctch_open(guc, ctch);
+	if (ctch->enabled)
+		return 0;
+
+	err = ctch_enable(guc, ctch);
 	if (unlikely(err))
 		return err;
 
@@ -890,10 +930,10 @@ void intel_guc_ct_disable(struct intel_guc_ct *ct)
 
 	GEM_BUG_ON(!HAS_GUC_CT(i915));
 
-	if (!ctch_is_open(ctch))
+	if (!ctch->enabled)
 		return;
 
-	ctch_close(guc, ctch);
+	ctch_disable(guc, ctch);
 
 	/* Disable send */
 	guc->send = intel_guc_send_nop;
diff --git a/drivers/gpu/drm/i915/intel_guc_ct.h b/drivers/gpu/drm/i915/intel_guc_ct.h
index d774895ab143cd56c08a6f13b350abb7245812a6..f5e7f066330427275990807c90e88958a8ab28fd 100644
--- a/drivers/gpu/drm/i915/intel_guc_ct.h
+++ b/drivers/gpu/drm/i915/intel_guc_ct.h
@@ -66,6 +66,7 @@ struct intel_guc_ct_channel {
 	struct intel_guc_ct_buffer ctbs[2];
 	u32 owner;
 	u32 next_fence;
+	bool enabled;
 };
 
 /** Holds all command transport channels.
@@ -90,6 +91,8 @@ struct intel_guc_ct {
 };
 
 void intel_guc_ct_init_early(struct intel_guc_ct *ct);
+int intel_guc_ct_init(struct intel_guc_ct *ct);
+void intel_guc_ct_fini(struct intel_guc_ct *ct);
 int intel_guc_ct_enable(struct intel_guc_ct *ct);
 void intel_guc_ct_disable(struct intel_guc_ct *ct);
 
diff --git a/drivers/gpu/drm/i915/intel_guc_log.c b/drivers/gpu/drm/i915/intel_guc_log.c
index 806fdfd7c78a7acf9777aa88cb72d1beb9c09fd4..7146524264dd3e35bd761270eafdabd985ab0865 100644
--- a/drivers/gpu/drm/i915/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/intel_guc_log.c
@@ -620,7 +620,12 @@ void intel_guc_log_relay_flush(struct intel_guc_log *log)
 
 void intel_guc_log_relay_close(struct intel_guc_log *log)
 {
+	struct intel_guc *guc = log_to_guc(log);
+	struct drm_i915_private *i915 = guc_to_i915(guc);
+
 	guc_log_disable_flush_events(log);
+	synchronize_irq(i915->drm.irq);
+
 	flush_work(&log->relay.flush_work);
 
 	mutex_lock(&log->relay.lock);
diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c
index 8bc8aa54aa358b335e1a1e98d74f9d3d59323259..c4ad73980988165e4e0d6d57e4353a32c946ed02 100644
--- a/drivers/gpu/drm/i915/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/intel_guc_submission.c
@@ -382,7 +382,7 @@ static void guc_stage_desc_init(struct intel_guc_client *client)
 	desc->db_id = client->doorbell_id;
 
 	for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
-		struct intel_context *ce = to_intel_context(ctx, engine);
+		struct intel_context *ce = intel_context_lookup(ctx, engine);
 		u32 guc_engine_id = engine->guc_id;
 		struct guc_execlist_context *lrc = &desc->lrc[guc_engine_id];
 
@@ -393,7 +393,7 @@ static void guc_stage_desc_init(struct intel_guc_client *client)
 		 * for now who owns a GuC client. But for future owner of GuC
 		 * client, need to make sure lrc is pinned prior to enter here.
 		 */
-		if (!ce->state)
+		if (!ce || !ce->state)
 			break;	/* XXX: continue? */
 
 		/*
@@ -535,7 +535,7 @@ static void guc_add_request(struct intel_guc *guc, struct i915_request *rq)
 	spin_lock(&client->wq_lock);
 
 	guc_wq_item_append(client, engine->guc_id, ctx_desc,
-			   ring_tail, rq->global_seqno);
+			   ring_tail, rq->fence.seqno);
 	guc_ring_doorbell(client);
 
 	client->submissions[engine->id] += 1;
@@ -567,7 +567,7 @@ static void inject_preempt_context(struct work_struct *work)
 					     preempt_work[engine->id]);
 	struct intel_guc_client *client = guc->preempt_client;
 	struct guc_stage_desc *stage_desc = __get_stage_desc(client);
-	struct intel_context *ce = to_intel_context(client->owner, engine);
+	struct intel_context *ce = intel_context_lookup(client->owner, engine);
 	u32 data[7];
 
 	if (!ce->ring->emit) { /* recreate upon load/resume */
@@ -575,7 +575,7 @@ static void inject_preempt_context(struct work_struct *work)
 		u32 *cs;
 
 		cs = ce->ring->vaddr;
-		if (engine->id == RCS) {
+		if (engine->class == RENDER_CLASS) {
 			cs = gen8_emit_ggtt_write_rcs(cs,
 						      GUC_PREEMPT_FINISHED,
 						      addr,
@@ -583,7 +583,8 @@ static void inject_preempt_context(struct work_struct *work)
 		} else {
 			cs = gen8_emit_ggtt_write(cs,
 						  GUC_PREEMPT_FINISHED,
-						  addr);
+						  addr,
+						  0);
 			*cs++ = MI_NOOP;
 			*cs++ = MI_NOOP;
 		}
@@ -720,7 +721,7 @@ static inline int rq_prio(const struct i915_request *rq)
 
 static inline int port_prio(const struct execlist_port *port)
 {
-	return rq_prio(port_request(port));
+	return rq_prio(port_request(port)) | __NO_PREEMPTION;
 }
 
 static bool __guc_dequeue(struct intel_engine_cs *engine)
@@ -781,8 +782,7 @@ static bool __guc_dequeue(struct intel_engine_cs *engine)
 		}
 
 		rb_erase_cached(&p->node, &execlists->queue);
-		if (p->priority != I915_PRIORITY_NORMAL)
-			kmem_cache_free(engine->i915->priorities, p);
+		i915_priolist_free(p);
 	}
 done:
 	execlists->queue_priority_hint =
@@ -1031,7 +1031,7 @@ static int guc_clients_create(struct intel_guc *guc)
 	GEM_BUG_ON(guc->preempt_client);
 
 	client = guc_client_alloc(dev_priv,
-				  INTEL_INFO(dev_priv)->ring_mask,
+				  INTEL_INFO(dev_priv)->engine_mask,
 				  GUC_CLIENT_PRIORITY_KMD_NORMAL,
 				  dev_priv->kernel_context);
 	if (IS_ERR(client)) {
@@ -1042,7 +1042,7 @@ static int guc_clients_create(struct intel_guc *guc)
 
 	if (dev_priv->preempt_context) {
 		client = guc_client_alloc(dev_priv,
-					  INTEL_INFO(dev_priv)->ring_mask,
+					  INTEL_INFO(dev_priv)->engine_mask,
 					  GUC_CLIENT_PRIORITY_KMD_HIGH,
 					  dev_priv->preempt_context);
 		if (IS_ERR(client)) {
diff --git a/drivers/gpu/drm/i915/intel_hangcheck.c b/drivers/gpu/drm/i915/intel_hangcheck.c
index a219c796e56d9c1f718e5e4d434ede8225a4823c..57ed49dc19c4f88ab0d4e24cb239dc367cfe08d7 100644
--- a/drivers/gpu/drm/i915/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/intel_hangcheck.c
@@ -56,7 +56,7 @@ static bool subunits_stuck(struct intel_engine_cs *engine)
 	int slice;
 	int subslice;
 
-	if (engine->id != RCS)
+	if (engine->id != RCS0)
 		return true;
 
 	intel_engine_get_instdone(engine, &instdone);
@@ -120,7 +120,7 @@ engine_stuck(struct intel_engine_cs *engine, u64 acthd)
 	 */
 	tmp = I915_READ_CTL(engine);
 	if (tmp & RING_WAIT) {
-		i915_handle_error(dev_priv, BIT(engine->id), 0,
+		i915_handle_error(dev_priv, engine->mask, 0,
 				  "stuck wait on %s", engine->name);
 		I915_WRITE_CTL(engine, tmp);
 		return ENGINE_WAIT_KICK;
@@ -133,21 +133,21 @@ static void hangcheck_load_sample(struct intel_engine_cs *engine,
 				  struct hangcheck *hc)
 {
 	hc->acthd = intel_engine_get_active_head(engine);
-	hc->seqno = intel_engine_get_seqno(engine);
+	hc->seqno = intel_engine_get_hangcheck_seqno(engine);
 }
 
 static void hangcheck_store_sample(struct intel_engine_cs *engine,
 				   const struct hangcheck *hc)
 {
 	engine->hangcheck.acthd = hc->acthd;
-	engine->hangcheck.seqno = hc->seqno;
+	engine->hangcheck.last_seqno = hc->seqno;
 }
 
 static enum intel_engine_hangcheck_action
 hangcheck_get_action(struct intel_engine_cs *engine,
 		     const struct hangcheck *hc)
 {
-	if (engine->hangcheck.seqno != hc->seqno)
+	if (engine->hangcheck.last_seqno != hc->seqno)
 		return ENGINE_ACTIVE_SEQNO;
 
 	if (intel_engine_is_idle(engine))
@@ -263,7 +263,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
 	if (!READ_ONCE(dev_priv->gt.awake))
 		return;
 
-	if (i915_terminally_wedged(&dev_priv->gpu_error))
+	if (i915_terminally_wedged(dev_priv))
 		return;
 
 	/* As enabling the GPU requires fairly extensive mmio access,
@@ -282,13 +282,13 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
 		hangcheck_store_sample(engine, &hc);
 
 		if (hc.stalled) {
-			hung |= intel_engine_flag(engine);
+			hung |= engine->mask;
 			if (hc.action != ENGINE_DEAD)
-				stuck |= intel_engine_flag(engine);
+				stuck |= engine->mask;
 		}
 
 		if (hc.wedged)
-			wedged |= intel_engine_flag(engine);
+			wedged |= engine->mask;
 	}
 
 	if (GEM_SHOW_DEBUG() && (hung | stuck)) {
diff --git a/drivers/gpu/drm/i915/intel_hdcp.c b/drivers/gpu/drm/i915/intel_hdcp.c
index ce7ba3a9c0002c7fd65b5378e7d82be4fc7afd92..9ce09f67776d1f3c2a3dd0ec95316c8d3c02f7a1 100644
--- a/drivers/gpu/drm/i915/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/intel_hdcp.c
@@ -7,14 +7,17 @@
  */
 
 #include <drm/drm_hdcp.h>
+#include <drm/i915_component.h>
 #include <linux/i2c.h>
 #include <linux/random.h>
+#include <linux/component.h>
 
 #include "intel_drv.h"
 #include "i915_reg.h"
 
 #define KEY_LOAD_TRIES	5
 #define ENCRYPT_STATUS_CHANGE_TIMEOUT_MS	50
+#define HDCP2_LC_RETRY_CNT			3
 
 static
 bool intel_hdcp_is_ksv_valid(u8 *ksv)
@@ -72,6 +75,52 @@ bool intel_hdcp_capable(struct intel_connector *connector)
 	return capable;
 }
 
+/* Is HDCP2.2 capable on Platform and Sink */
+static bool intel_hdcp2_capable(struct intel_connector *connector)
+{
+	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+	struct intel_hdcp *hdcp = &connector->hdcp;
+	bool capable = false;
+
+	/* I915 support for HDCP2.2 */
+	if (!hdcp->hdcp2_supported)
+		return false;
+
+	/* MEI interface is solid */
+	mutex_lock(&dev_priv->hdcp_comp_mutex);
+	if (!dev_priv->hdcp_comp_added ||  !dev_priv->hdcp_master) {
+		mutex_unlock(&dev_priv->hdcp_comp_mutex);
+		return false;
+	}
+	mutex_unlock(&dev_priv->hdcp_comp_mutex);
+
+	/* Sink's capability for HDCP2.2 */
+	hdcp->shim->hdcp_2_2_capable(intel_dig_port, &capable);
+
+	return capable;
+}
+
+static inline bool intel_hdcp_in_use(struct intel_connector *connector)
+{
+	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	enum port port = connector->encoder->port;
+	u32 reg;
+
+	reg = I915_READ(PORT_HDCP_STATUS(port));
+	return reg & HDCP_STATUS_ENC;
+}
+
+static inline bool intel_hdcp2_in_use(struct intel_connector *connector)
+{
+	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	enum port port = connector->encoder->port;
+	u32 reg;
+
+	reg = I915_READ(HDCP2_STATUS_DDI(port));
+	return reg & LINK_ENCRYPTION_STATUS;
+}
+
 static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *intel_dig_port,
 				    const struct intel_hdcp_shim *shim)
 {
@@ -666,6 +715,7 @@ static int _intel_hdcp_disable(struct intel_connector *connector)
 	DRM_DEBUG_KMS("[%s:%d] HDCP is being disabled...\n",
 		      connector->base.name, connector->base.base.id);
 
+	hdcp->hdcp_encrypted = false;
 	I915_WRITE(PORT_HDCP_CONF(port), 0);
 	if (intel_wait_for_register(dev_priv, PORT_HDCP_STATUS(port), ~0, 0,
 				    ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
@@ -711,8 +761,10 @@ static int _intel_hdcp_enable(struct intel_connector *connector)
 	/* Incase of authentication failures, HDCP spec expects reauth. */
 	for (i = 0; i < tries; i++) {
 		ret = intel_hdcp_auth(conn_to_dig_port(connector), hdcp->shim);
-		if (!ret)
+		if (!ret) {
+			hdcp->hdcp_encrypted = true;
 			return 0;
+		}
 
 		DRM_DEBUG_KMS("HDCP Auth failure (%d)\n", ret);
 
@@ -730,16 +782,64 @@ struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp)
 	return container_of(hdcp, struct intel_connector, hdcp);
 }
 
-static void intel_hdcp_check_work(struct work_struct *work)
+/* Implements Part 3 of the HDCP authorization procedure */
+static int intel_hdcp_check_link(struct intel_connector *connector)
 {
-	struct intel_hdcp *hdcp = container_of(to_delayed_work(work),
-					       struct intel_hdcp,
-					       check_work);
-	struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
+	struct intel_hdcp *hdcp = &connector->hdcp;
+	struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
+	struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+	enum port port = intel_dig_port->base.port;
+	int ret = 0;
 
-	if (!intel_hdcp_check_link(connector))
-		schedule_delayed_work(&hdcp->check_work,
-				      DRM_HDCP_CHECK_PERIOD_MS);
+	mutex_lock(&hdcp->mutex);
+
+	/* Check_link valid only when HDCP1.4 is enabled */
+	if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
+	    !hdcp->hdcp_encrypted) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (WARN_ON(!intel_hdcp_in_use(connector))) {
+		DRM_ERROR("%s:%d HDCP link stopped encryption,%x\n",
+			  connector->base.name, connector->base.base.id,
+			  I915_READ(PORT_HDCP_STATUS(port)));
+		ret = -ENXIO;
+		hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+		schedule_work(&hdcp->prop_work);
+		goto out;
+	}
+
+	if (hdcp->shim->check_link(intel_dig_port)) {
+		if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
+			hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
+			schedule_work(&hdcp->prop_work);
+		}
+		goto out;
+	}
+
+	DRM_DEBUG_KMS("[%s:%d] HDCP link failed, retrying authentication\n",
+		      connector->base.name, connector->base.base.id);
+
+	ret = _intel_hdcp_disable(connector);
+	if (ret) {
+		DRM_ERROR("Failed to disable hdcp (%d)\n", ret);
+		hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+		schedule_work(&hdcp->prop_work);
+		goto out;
+	}
+
+	ret = _intel_hdcp_enable(connector);
+	if (ret) {
+		DRM_ERROR("Failed to enable hdcp (%d)\n", ret);
+		hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+		schedule_work(&hdcp->prop_work);
+		goto out;
+	}
+
+out:
+	mutex_unlock(&hdcp->mutex);
+	return ret;
 }
 
 static void intel_hdcp_prop_work(struct work_struct *work)
@@ -773,155 +873,1071 @@ bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port)
 	return INTEL_GEN(dev_priv) >= 9 && port < PORT_E;
 }
 
-int intel_hdcp_init(struct intel_connector *connector,
-		    const struct intel_hdcp_shim *shim)
+static int
+hdcp2_prepare_ake_init(struct intel_connector *connector,
+		       struct hdcp2_ake_init *ake_data)
 {
-	struct intel_hdcp *hdcp = &connector->hdcp;
+	struct hdcp_port_data *data = &connector->hdcp.port_data;
+	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct i915_hdcp_comp_master *comp;
 	int ret;
 
-	ret = drm_connector_attach_content_protection_property(
-			&connector->base);
+	mutex_lock(&dev_priv->hdcp_comp_mutex);
+	comp = dev_priv->hdcp_master;
+
+	if (!comp || !comp->ops) {
+		mutex_unlock(&dev_priv->hdcp_comp_mutex);
+		return -EINVAL;
+	}
+
+	ret = comp->ops->initiate_hdcp2_session(comp->mei_dev, data, ake_data);
 	if (ret)
-		return ret;
+		DRM_DEBUG_KMS("Prepare_ake_init failed. %d\n", ret);
+	mutex_unlock(&dev_priv->hdcp_comp_mutex);
 
-	hdcp->shim = shim;
-	mutex_init(&hdcp->mutex);
-	INIT_DELAYED_WORK(&hdcp->check_work, intel_hdcp_check_work);
-	INIT_WORK(&hdcp->prop_work, intel_hdcp_prop_work);
-	return 0;
+	return ret;
 }
 
-int intel_hdcp_enable(struct intel_connector *connector)
+static int
+hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector,
+				struct hdcp2_ake_send_cert *rx_cert,
+				bool *paired,
+				struct hdcp2_ake_no_stored_km *ek_pub_km,
+				size_t *msg_sz)
 {
-	struct intel_hdcp *hdcp = &connector->hdcp;
+	struct hdcp_port_data *data = &connector->hdcp.port_data;
+	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct i915_hdcp_comp_master *comp;
 	int ret;
 
-	if (!hdcp->shim)
-		return -ENOENT;
+	mutex_lock(&dev_priv->hdcp_comp_mutex);
+	comp = dev_priv->hdcp_master;
 
-	mutex_lock(&hdcp->mutex);
+	if (!comp || !comp->ops) {
+		mutex_unlock(&dev_priv->hdcp_comp_mutex);
+		return -EINVAL;
+	}
 
-	ret = _intel_hdcp_enable(connector);
-	if (ret)
-		goto out;
+	ret = comp->ops->verify_receiver_cert_prepare_km(comp->mei_dev, data,
+							 rx_cert, paired,
+							 ek_pub_km, msg_sz);
+	if (ret < 0)
+		DRM_DEBUG_KMS("Verify rx_cert failed. %d\n", ret);
+	mutex_unlock(&dev_priv->hdcp_comp_mutex);
 
-	hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
-	schedule_work(&hdcp->prop_work);
-	schedule_delayed_work(&hdcp->check_work,
-			      DRM_HDCP_CHECK_PERIOD_MS);
-out:
-	mutex_unlock(&hdcp->mutex);
 	return ret;
 }
 
-int intel_hdcp_disable(struct intel_connector *connector)
+static int hdcp2_verify_hprime(struct intel_connector *connector,
+			       struct hdcp2_ake_send_hprime *rx_hprime)
 {
-	struct intel_hdcp *hdcp = &connector->hdcp;
-	int ret = 0;
+	struct hdcp_port_data *data = &connector->hdcp.port_data;
+	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct i915_hdcp_comp_master *comp;
+	int ret;
 
-	if (!hdcp->shim)
-		return -ENOENT;
+	mutex_lock(&dev_priv->hdcp_comp_mutex);
+	comp = dev_priv->hdcp_master;
 
-	mutex_lock(&hdcp->mutex);
+	if (!comp || !comp->ops) {
+		mutex_unlock(&dev_priv->hdcp_comp_mutex);
+		return -EINVAL;
+	}
 
-	if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
-		hdcp->value = DRM_MODE_CONTENT_PROTECTION_UNDESIRED;
-		ret = _intel_hdcp_disable(connector);
+	ret = comp->ops->verify_hprime(comp->mei_dev, data, rx_hprime);
+	if (ret < 0)
+		DRM_DEBUG_KMS("Verify hprime failed. %d\n", ret);
+	mutex_unlock(&dev_priv->hdcp_comp_mutex);
+
+	return ret;
+}
+
+static int
+hdcp2_store_pairing_info(struct intel_connector *connector,
+			 struct hdcp2_ake_send_pairing_info *pairing_info)
+{
+	struct hdcp_port_data *data = &connector->hdcp.port_data;
+	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct i915_hdcp_comp_master *comp;
+	int ret;
+
+	mutex_lock(&dev_priv->hdcp_comp_mutex);
+	comp = dev_priv->hdcp_master;
+
+	if (!comp || !comp->ops) {
+		mutex_unlock(&dev_priv->hdcp_comp_mutex);
+		return -EINVAL;
 	}
 
-	mutex_unlock(&hdcp->mutex);
-	cancel_delayed_work_sync(&hdcp->check_work);
+	ret = comp->ops->store_pairing_info(comp->mei_dev, data, pairing_info);
+	if (ret < 0)
+		DRM_DEBUG_KMS("Store pairing info failed. %d\n", ret);
+	mutex_unlock(&dev_priv->hdcp_comp_mutex);
+
 	return ret;
 }
 
-void intel_hdcp_atomic_check(struct drm_connector *connector,
-			     struct drm_connector_state *old_state,
-			     struct drm_connector_state *new_state)
+static int
+hdcp2_prepare_lc_init(struct intel_connector *connector,
+		      struct hdcp2_lc_init *lc_init)
 {
-	u64 old_cp = old_state->content_protection;
-	u64 new_cp = new_state->content_protection;
-	struct drm_crtc_state *crtc_state;
+	struct hdcp_port_data *data = &connector->hdcp.port_data;
+	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct i915_hdcp_comp_master *comp;
+	int ret;
 
-	if (!new_state->crtc) {
-		/*
-		 * If the connector is being disabled with CP enabled, mark it
-		 * desired so it's re-enabled when the connector is brought back
-		 */
-		if (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)
-			new_state->content_protection =
-				DRM_MODE_CONTENT_PROTECTION_DESIRED;
-		return;
+	mutex_lock(&dev_priv->hdcp_comp_mutex);
+	comp = dev_priv->hdcp_master;
+
+	if (!comp || !comp->ops) {
+		mutex_unlock(&dev_priv->hdcp_comp_mutex);
+		return -EINVAL;
 	}
 
-	/*
-	 * Nothing to do if the state didn't change, or HDCP was activated since
-	 * the last commit
-	 */
-	if (old_cp == new_cp ||
-	    (old_cp == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
-	     new_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED))
-		return;
+	ret = comp->ops->initiate_locality_check(comp->mei_dev, data, lc_init);
+	if (ret < 0)
+		DRM_DEBUG_KMS("Prepare lc_init failed. %d\n", ret);
+	mutex_unlock(&dev_priv->hdcp_comp_mutex);
 
-	crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
-						   new_state->crtc);
-	crtc_state->mode_changed = true;
+	return ret;
 }
 
-/* Implements Part 3 of the HDCP authorization procedure */
-int intel_hdcp_check_link(struct intel_connector *connector)
+static int
+hdcp2_verify_lprime(struct intel_connector *connector,
+		    struct hdcp2_lc_send_lprime *rx_lprime)
 {
-	struct intel_hdcp *hdcp = &connector->hdcp;
-	struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
-	struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
-	enum port port = intel_dig_port->base.port;
-	int ret = 0;
+	struct hdcp_port_data *data = &connector->hdcp.port_data;
+	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct i915_hdcp_comp_master *comp;
+	int ret;
 
-	if (!hdcp->shim)
-		return -ENOENT;
+	mutex_lock(&dev_priv->hdcp_comp_mutex);
+	comp = dev_priv->hdcp_master;
 
-	mutex_lock(&hdcp->mutex);
+	if (!comp || !comp->ops) {
+		mutex_unlock(&dev_priv->hdcp_comp_mutex);
+		return -EINVAL;
+	}
 
-	if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
-		goto out;
+	ret = comp->ops->verify_lprime(comp->mei_dev, data, rx_lprime);
+	if (ret < 0)
+		DRM_DEBUG_KMS("Verify L_Prime failed. %d\n", ret);
+	mutex_unlock(&dev_priv->hdcp_comp_mutex);
 
-	if (!(I915_READ(PORT_HDCP_STATUS(port)) & HDCP_STATUS_ENC)) {
-		DRM_ERROR("%s:%d HDCP check failed: link is not encrypted,%x\n",
-			  connector->base.name, connector->base.base.id,
-			  I915_READ(PORT_HDCP_STATUS(port)));
-		ret = -ENXIO;
-		hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
-		schedule_work(&hdcp->prop_work);
-		goto out;
+	return ret;
+}
+
+static int hdcp2_prepare_skey(struct intel_connector *connector,
+			      struct hdcp2_ske_send_eks *ske_data)
+{
+	struct hdcp_port_data *data = &connector->hdcp.port_data;
+	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct i915_hdcp_comp_master *comp;
+	int ret;
+
+	mutex_lock(&dev_priv->hdcp_comp_mutex);
+	comp = dev_priv->hdcp_master;
+
+	if (!comp || !comp->ops) {
+		mutex_unlock(&dev_priv->hdcp_comp_mutex);
+		return -EINVAL;
 	}
 
-	if (hdcp->shim->check_link(intel_dig_port)) {
-		if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
-			hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
-			schedule_work(&hdcp->prop_work);
-		}
-		goto out;
+	ret = comp->ops->get_session_key(comp->mei_dev, data, ske_data);
+	if (ret < 0)
+		DRM_DEBUG_KMS("Get session key failed. %d\n", ret);
+	mutex_unlock(&dev_priv->hdcp_comp_mutex);
+
+	return ret;
+}
+
+static int
+hdcp2_verify_rep_topology_prepare_ack(struct intel_connector *connector,
+				      struct hdcp2_rep_send_receiverid_list
+								*rep_topology,
+				      struct hdcp2_rep_send_ack *rep_send_ack)
+{
+	struct hdcp_port_data *data = &connector->hdcp.port_data;
+	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct i915_hdcp_comp_master *comp;
+	int ret;
+
+	mutex_lock(&dev_priv->hdcp_comp_mutex);
+	comp = dev_priv->hdcp_master;
+
+	if (!comp || !comp->ops) {
+		mutex_unlock(&dev_priv->hdcp_comp_mutex);
+		return -EINVAL;
 	}
 
-	DRM_DEBUG_KMS("[%s:%d] HDCP link failed, retrying authentication\n",
-		      connector->base.name, connector->base.base.id);
+	ret = comp->ops->repeater_check_flow_prepare_ack(comp->mei_dev, data,
+							 rep_topology,
+							 rep_send_ack);
+	if (ret < 0)
+		DRM_DEBUG_KMS("Verify rep topology failed. %d\n", ret);
+	mutex_unlock(&dev_priv->hdcp_comp_mutex);
 
-	ret = _intel_hdcp_disable(connector);
-	if (ret) {
-		DRM_ERROR("Failed to disable hdcp (%d)\n", ret);
-		hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
-		schedule_work(&hdcp->prop_work);
-		goto out;
+	return ret;
+}
+
+static int
+hdcp2_verify_mprime(struct intel_connector *connector,
+		    struct hdcp2_rep_stream_ready *stream_ready)
+{
+	struct hdcp_port_data *data = &connector->hdcp.port_data;
+	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct i915_hdcp_comp_master *comp;
+	int ret;
+
+	mutex_lock(&dev_priv->hdcp_comp_mutex);
+	comp = dev_priv->hdcp_master;
+
+	if (!comp || !comp->ops) {
+		mutex_unlock(&dev_priv->hdcp_comp_mutex);
+		return -EINVAL;
 	}
 
-	ret = _intel_hdcp_enable(connector);
-	if (ret) {
-		DRM_DEBUG_KMS("Failed to enable hdcp (%d)\n", ret);
-		hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
-		schedule_work(&hdcp->prop_work);
-		goto out;
+	ret = comp->ops->verify_mprime(comp->mei_dev, data, stream_ready);
+	if (ret < 0)
+		DRM_DEBUG_KMS("Verify mprime failed. %d\n", ret);
+	mutex_unlock(&dev_priv->hdcp_comp_mutex);
+
+	return ret;
+}
+
+static int hdcp2_authenticate_port(struct intel_connector *connector)
+{
+	struct hdcp_port_data *data = &connector->hdcp.port_data;
+	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct i915_hdcp_comp_master *comp;
+	int ret;
+
+	mutex_lock(&dev_priv->hdcp_comp_mutex);
+	comp = dev_priv->hdcp_master;
+
+	if (!comp || !comp->ops) {
+		mutex_unlock(&dev_priv->hdcp_comp_mutex);
+		return -EINVAL;
 	}
 
-out:
-	mutex_unlock(&hdcp->mutex);
+	ret = comp->ops->enable_hdcp_authentication(comp->mei_dev, data);
+	if (ret < 0)
+		DRM_DEBUG_KMS("Enable hdcp auth failed. %d\n", ret);
+	mutex_unlock(&dev_priv->hdcp_comp_mutex);
+
+	return ret;
+}
+
+static int hdcp2_close_mei_session(struct intel_connector *connector)
+{
+	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct i915_hdcp_comp_master *comp;
+	int ret;
+
+	mutex_lock(&dev_priv->hdcp_comp_mutex);
+	comp = dev_priv->hdcp_master;
+
+	if (!comp || !comp->ops) {
+		mutex_unlock(&dev_priv->hdcp_comp_mutex);
+		return -EINVAL;
+	}
+
+	ret = comp->ops->close_hdcp_session(comp->mei_dev,
+					     &connector->hdcp.port_data);
+	mutex_unlock(&dev_priv->hdcp_comp_mutex);
+
 	return ret;
 }
+
+static int hdcp2_deauthenticate_port(struct intel_connector *connector)
+{
+	return hdcp2_close_mei_session(connector);
+}
+
+/* Authentication flow starts from here */
+static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
+{
+	struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+	struct intel_hdcp *hdcp = &connector->hdcp;
+	union {
+		struct hdcp2_ake_init ake_init;
+		struct hdcp2_ake_send_cert send_cert;
+		struct hdcp2_ake_no_stored_km no_stored_km;
+		struct hdcp2_ake_send_hprime send_hprime;
+		struct hdcp2_ake_send_pairing_info pairing_info;
+	} msgs;
+	const struct intel_hdcp_shim *shim = hdcp->shim;
+	size_t size;
+	int ret;
+
+	/* Init for seq_num */
+	hdcp->seq_num_v = 0;
+	hdcp->seq_num_m = 0;
+
+	ret = hdcp2_prepare_ake_init(connector, &msgs.ake_init);
+	if (ret < 0)
+		return ret;
+
+	ret = shim->write_2_2_msg(intel_dig_port, &msgs.ake_init,
+				  sizeof(msgs.ake_init));
+	if (ret < 0)
+		return ret;
+
+	ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_AKE_SEND_CERT,
+				 &msgs.send_cert, sizeof(msgs.send_cert));
+	if (ret < 0)
+		return ret;
+
+	if (msgs.send_cert.rx_caps[0] != HDCP_2_2_RX_CAPS_VERSION_VAL)
+		return -EINVAL;
+
+	hdcp->is_repeater = HDCP_2_2_RX_REPEATER(msgs.send_cert.rx_caps[2]);
+
+	/*
+	 * Here msgs.no_stored_km will hold msgs corresponding to the km
+	 * stored also.
+	 */
+	ret = hdcp2_verify_rx_cert_prepare_km(connector, &msgs.send_cert,
+					      &hdcp->is_paired,
+					      &msgs.no_stored_km, &size);
+	if (ret < 0)
+		return ret;
+
+	ret = shim->write_2_2_msg(intel_dig_port, &msgs.no_stored_km, size);
+	if (ret < 0)
+		return ret;
+
+	ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_AKE_SEND_HPRIME,
+				 &msgs.send_hprime, sizeof(msgs.send_hprime));
+	if (ret < 0)
+		return ret;
+
+	ret = hdcp2_verify_hprime(connector, &msgs.send_hprime);
+	if (ret < 0)
+		return ret;
+
+	if (!hdcp->is_paired) {
+		/* Pairing is required */
+		ret = shim->read_2_2_msg(intel_dig_port,
+					 HDCP_2_2_AKE_SEND_PAIRING_INFO,
+					 &msgs.pairing_info,
+					 sizeof(msgs.pairing_info));
+		if (ret < 0)
+			return ret;
+
+		ret = hdcp2_store_pairing_info(connector, &msgs.pairing_info);
+		if (ret < 0)
+			return ret;
+		hdcp->is_paired = true;
+	}
+
+	return 0;
+}
+
+static int hdcp2_locality_check(struct intel_connector *connector)
+{
+	struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+	struct intel_hdcp *hdcp = &connector->hdcp;
+	union {
+		struct hdcp2_lc_init lc_init;
+		struct hdcp2_lc_send_lprime send_lprime;
+	} msgs;
+	const struct intel_hdcp_shim *shim = hdcp->shim;
+	int tries = HDCP2_LC_RETRY_CNT, ret, i;
+
+	for (i = 0; i < tries; i++) {
+		ret = hdcp2_prepare_lc_init(connector, &msgs.lc_init);
+		if (ret < 0)
+			continue;
+
+		ret = shim->write_2_2_msg(intel_dig_port, &msgs.lc_init,
+				      sizeof(msgs.lc_init));
+		if (ret < 0)
+			continue;
+
+		ret = shim->read_2_2_msg(intel_dig_port,
+					 HDCP_2_2_LC_SEND_LPRIME,
+					 &msgs.send_lprime,
+					 sizeof(msgs.send_lprime));
+		if (ret < 0)
+			continue;
+
+		ret = hdcp2_verify_lprime(connector, &msgs.send_lprime);
+		if (!ret)
+			break;
+	}
+
+	return ret;
+}
+
+static int hdcp2_session_key_exchange(struct intel_connector *connector)
+{
+	struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+	struct intel_hdcp *hdcp = &connector->hdcp;
+	struct hdcp2_ske_send_eks send_eks;
+	int ret;
+
+	ret = hdcp2_prepare_skey(connector, &send_eks);
+	if (ret < 0)
+		return ret;
+
+	ret = hdcp->shim->write_2_2_msg(intel_dig_port, &send_eks,
+					sizeof(send_eks));
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+static
+int hdcp2_propagate_stream_management_info(struct intel_connector *connector)
+{
+	struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+	struct intel_hdcp *hdcp = &connector->hdcp;
+	union {
+		struct hdcp2_rep_stream_manage stream_manage;
+		struct hdcp2_rep_stream_ready stream_ready;
+	} msgs;
+	const struct intel_hdcp_shim *shim = hdcp->shim;
+	int ret;
+
+	/* Prepare RepeaterAuth_Stream_Manage msg */
+	msgs.stream_manage.msg_id = HDCP_2_2_REP_STREAM_MANAGE;
+	drm_hdcp2_u32_to_seq_num(msgs.stream_manage.seq_num_m, hdcp->seq_num_m);
+
+	/* K no of streams is fixed as 1. Stored as big-endian. */
+	msgs.stream_manage.k = cpu_to_be16(1);
+
+	/* For HDMI this is forced to be 0x0. For DP SST also this is 0x0. */
+	msgs.stream_manage.streams[0].stream_id = 0;
+	msgs.stream_manage.streams[0].stream_type = hdcp->content_type;
+
+	/* Send it to Repeater */
+	ret = shim->write_2_2_msg(intel_dig_port, &msgs.stream_manage,
+				  sizeof(msgs.stream_manage));
+	if (ret < 0)
+		return ret;
+
+	ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_REP_STREAM_READY,
+				 &msgs.stream_ready, sizeof(msgs.stream_ready));
+	if (ret < 0)
+		return ret;
+
+	hdcp->port_data.seq_num_m = hdcp->seq_num_m;
+	hdcp->port_data.streams[0].stream_type = hdcp->content_type;
+
+	ret = hdcp2_verify_mprime(connector, &msgs.stream_ready);
+	if (ret < 0)
+		return ret;
+
+	hdcp->seq_num_m++;
+
+	if (hdcp->seq_num_m > HDCP_2_2_SEQ_NUM_MAX) {
+		DRM_DEBUG_KMS("seq_num_m roll over.\n");
+		return -1;
+	}
+
+	return 0;
+}
+
+static
+int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
+{
+	struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+	struct intel_hdcp *hdcp = &connector->hdcp;
+	union {
+		struct hdcp2_rep_send_receiverid_list recvid_list;
+		struct hdcp2_rep_send_ack rep_ack;
+	} msgs;
+	const struct intel_hdcp_shim *shim = hdcp->shim;
+	u8 *rx_info;
+	u32 seq_num_v;
+	int ret;
+
+	ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_REP_SEND_RECVID_LIST,
+				 &msgs.recvid_list, sizeof(msgs.recvid_list));
+	if (ret < 0)
+		return ret;
+
+	rx_info = msgs.recvid_list.rx_info;
+
+	if (HDCP_2_2_MAX_CASCADE_EXCEEDED(rx_info[1]) ||
+	    HDCP_2_2_MAX_DEVS_EXCEEDED(rx_info[1])) {
+		DRM_DEBUG_KMS("Topology Max Size Exceeded\n");
+		return -EINVAL;
+	}
+
+	/* Converting and Storing the seq_num_v to local variable as DWORD */
+	seq_num_v = drm_hdcp2_seq_num_to_u32(msgs.recvid_list.seq_num_v);
+
+	if (seq_num_v < hdcp->seq_num_v) {
+		/* Roll over of the seq_num_v from repeater. Reauthenticate. */
+		DRM_DEBUG_KMS("Seq_num_v roll over.\n");
+		return -EINVAL;
+	}
+
+	ret = hdcp2_verify_rep_topology_prepare_ack(connector,
+						    &msgs.recvid_list,
+						    &msgs.rep_ack);
+	if (ret < 0)
+		return ret;
+
+	hdcp->seq_num_v = seq_num_v;
+	ret = shim->write_2_2_msg(intel_dig_port, &msgs.rep_ack,
+				  sizeof(msgs.rep_ack));
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+static int hdcp2_authenticate_repeater(struct intel_connector *connector)
+{
+	int ret;
+
+	ret = hdcp2_authenticate_repeater_topology(connector);
+	if (ret < 0)
+		return ret;
+
+	return hdcp2_propagate_stream_management_info(connector);
+}
+
+static int hdcp2_authenticate_sink(struct intel_connector *connector)
+{
+	struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+	struct intel_hdcp *hdcp = &connector->hdcp;
+	const struct intel_hdcp_shim *shim = hdcp->shim;
+	int ret;
+
+	ret = hdcp2_authentication_key_exchange(connector);
+	if (ret < 0) {
+		DRM_DEBUG_KMS("AKE Failed. Err : %d\n", ret);
+		return ret;
+	}
+
+	ret = hdcp2_locality_check(connector);
+	if (ret < 0) {
+		DRM_DEBUG_KMS("Locality Check failed. Err : %d\n", ret);
+		return ret;
+	}
+
+	ret = hdcp2_session_key_exchange(connector);
+	if (ret < 0) {
+		DRM_DEBUG_KMS("SKE Failed. Err : %d\n", ret);
+		return ret;
+	}
+
+	if (shim->config_stream_type) {
+		ret = shim->config_stream_type(intel_dig_port,
+					       hdcp->is_repeater,
+					       hdcp->content_type);
+		if (ret < 0)
+			return ret;
+	}
+
+	if (hdcp->is_repeater) {
+		ret = hdcp2_authenticate_repeater(connector);
+		if (ret < 0) {
+			DRM_DEBUG_KMS("Repeater Auth Failed. Err: %d\n", ret);
+			return ret;
+		}
+	}
+
+	hdcp->port_data.streams[0].stream_type = hdcp->content_type;
+	ret = hdcp2_authenticate_port(connector);
+	if (ret < 0)
+		return ret;
+
+	return ret;
+}
+
+static int hdcp2_enable_encryption(struct intel_connector *connector)
+{
+	struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct intel_hdcp *hdcp = &connector->hdcp;
+	enum port port = connector->encoder->port;
+	int ret;
+
+	WARN_ON(I915_READ(HDCP2_STATUS_DDI(port)) & LINK_ENCRYPTION_STATUS);
+
+	if (hdcp->shim->toggle_signalling) {
+		ret = hdcp->shim->toggle_signalling(intel_dig_port, true);
+		if (ret) {
+			DRM_ERROR("Failed to enable HDCP signalling. %d\n",
+				  ret);
+			return ret;
+		}
+	}
+
+	if (I915_READ(HDCP2_STATUS_DDI(port)) & LINK_AUTH_STATUS) {
+		/* Link is Authenticated. Now set for Encryption */
+		I915_WRITE(HDCP2_CTL_DDI(port),
+			   I915_READ(HDCP2_CTL_DDI(port)) |
+			   CTL_LINK_ENCRYPTION_REQ);
+	}
+
+	ret = intel_wait_for_register(dev_priv, HDCP2_STATUS_DDI(port),
+				      LINK_ENCRYPTION_STATUS,
+				      LINK_ENCRYPTION_STATUS,
+				      ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
+
+	return ret;
+}
+
+static int hdcp2_disable_encryption(struct intel_connector *connector)
+{
+	struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct intel_hdcp *hdcp = &connector->hdcp;
+	enum port port = connector->encoder->port;
+	int ret;
+
+	WARN_ON(!(I915_READ(HDCP2_STATUS_DDI(port)) & LINK_ENCRYPTION_STATUS));
+
+	I915_WRITE(HDCP2_CTL_DDI(port),
+		   I915_READ(HDCP2_CTL_DDI(port)) & ~CTL_LINK_ENCRYPTION_REQ);
+
+	ret = intel_wait_for_register(dev_priv, HDCP2_STATUS_DDI(port),
+				      LINK_ENCRYPTION_STATUS, 0x0,
+				      ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
+	if (ret == -ETIMEDOUT)
+		DRM_DEBUG_KMS("Disable Encryption Timedout");
+
+	if (hdcp->shim->toggle_signalling) {
+		ret = hdcp->shim->toggle_signalling(intel_dig_port, false);
+		if (ret) {
+			DRM_ERROR("Failed to disable HDCP signalling. %d\n",
+				  ret);
+			return ret;
+		}
+	}
+
+	return ret;
+}
+
+static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
+{
+	int ret, i, tries = 3;
+
+	for (i = 0; i < tries; i++) {
+		ret = hdcp2_authenticate_sink(connector);
+		if (!ret)
+			break;
+
+		/* Clearing the mei hdcp session */
+		DRM_DEBUG_KMS("HDCP2.2 Auth %d of %d Failed.(%d)\n",
+			      i + 1, tries, ret);
+		if (hdcp2_deauthenticate_port(connector) < 0)
+			DRM_DEBUG_KMS("Port deauth failed.\n");
+	}
+
+	if (i != tries) {
+		/*
+		 * Ensuring the required 200mSec min time interval between
+		 * Session Key Exchange and encryption.
+		 */
+		msleep(HDCP_2_2_DELAY_BEFORE_ENCRYPTION_EN);
+		ret = hdcp2_enable_encryption(connector);
+		if (ret < 0) {
+			DRM_DEBUG_KMS("Encryption Enable Failed.(%d)\n", ret);
+			if (hdcp2_deauthenticate_port(connector) < 0)
+				DRM_DEBUG_KMS("Port deauth failed.\n");
+		}
+	}
+
+	return ret;
+}
+
+static int _intel_hdcp2_enable(struct intel_connector *connector)
+{
+	struct intel_hdcp *hdcp = &connector->hdcp;
+	int ret;
+
+	DRM_DEBUG_KMS("[%s:%d] HDCP2.2 is being enabled. Type: %d\n",
+		      connector->base.name, connector->base.base.id,
+		      hdcp->content_type);
+
+	ret = hdcp2_authenticate_and_encrypt(connector);
+	if (ret) {
+		DRM_DEBUG_KMS("HDCP2 Type%d  Enabling Failed. (%d)\n",
+			      hdcp->content_type, ret);
+		return ret;
+	}
+
+	DRM_DEBUG_KMS("[%s:%d] HDCP2.2 is enabled. Type %d\n",
+		      connector->base.name, connector->base.base.id,
+		      hdcp->content_type);
+
+	hdcp->hdcp2_encrypted = true;
+	return 0;
+}
+
+static int _intel_hdcp2_disable(struct intel_connector *connector)
+{
+	int ret;
+
+	DRM_DEBUG_KMS("[%s:%d] HDCP2.2 is being Disabled\n",
+		      connector->base.name, connector->base.base.id);
+
+	ret = hdcp2_disable_encryption(connector);
+
+	if (hdcp2_deauthenticate_port(connector) < 0)
+		DRM_DEBUG_KMS("Port deauth failed.\n");
+
+	connector->hdcp.hdcp2_encrypted = false;
+
+	return ret;
+}
+
+/* Implements the Link Integrity Check for HDCP2.2 */
+static int intel_hdcp2_check_link(struct intel_connector *connector)
+{
+	struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct intel_hdcp *hdcp = &connector->hdcp;
+	enum port port = connector->encoder->port;
+	int ret = 0;
+
+	mutex_lock(&hdcp->mutex);
+
+	/* hdcp2_check_link is expected only when HDCP2.2 is Enabled */
+	if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
+	    !hdcp->hdcp2_encrypted) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (WARN_ON(!intel_hdcp2_in_use(connector))) {
+		DRM_ERROR("HDCP2.2 link stopped the encryption, %x\n",
+			  I915_READ(HDCP2_STATUS_DDI(port)));
+		ret = -ENXIO;
+		hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+		schedule_work(&hdcp->prop_work);
+		goto out;
+	}
+
+	ret = hdcp->shim->check_2_2_link(intel_dig_port);
+	if (ret == HDCP_LINK_PROTECTED) {
+		if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
+			hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
+			schedule_work(&hdcp->prop_work);
+		}
+		goto out;
+	}
+
+	if (ret == HDCP_TOPOLOGY_CHANGE) {
+		if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
+			goto out;
+
+		DRM_DEBUG_KMS("HDCP2.2 Downstream topology change\n");
+		ret = hdcp2_authenticate_repeater_topology(connector);
+		if (!ret) {
+			hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
+			schedule_work(&hdcp->prop_work);
+			goto out;
+		}
+		DRM_DEBUG_KMS("[%s:%d] Repeater topology auth failed.(%d)\n",
+			      connector->base.name, connector->base.base.id,
+			      ret);
+	} else {
+		DRM_DEBUG_KMS("[%s:%d] HDCP2.2 link failed, retrying auth\n",
+			      connector->base.name, connector->base.base.id);
+	}
+
+	ret = _intel_hdcp2_disable(connector);
+	if (ret) {
+		DRM_ERROR("[%s:%d] Failed to disable hdcp2.2 (%d)\n",
+			  connector->base.name, connector->base.base.id, ret);
+		hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+		schedule_work(&hdcp->prop_work);
+		goto out;
+	}
+
+	ret = _intel_hdcp2_enable(connector);
+	if (ret) {
+		DRM_DEBUG_KMS("[%s:%d] Failed to enable hdcp2.2 (%d)\n",
+			      connector->base.name, connector->base.base.id,
+			      ret);
+		hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+		schedule_work(&hdcp->prop_work);
+		goto out;
+	}
+
+out:
+	mutex_unlock(&hdcp->mutex);
+	return ret;
+}
+
+static void intel_hdcp_check_work(struct work_struct *work)
+{
+	struct intel_hdcp *hdcp = container_of(to_delayed_work(work),
+					       struct intel_hdcp,
+					       check_work);
+	struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
+
+	if (!intel_hdcp2_check_link(connector))
+		schedule_delayed_work(&hdcp->check_work,
+				      DRM_HDCP2_CHECK_PERIOD_MS);
+	else if (!intel_hdcp_check_link(connector))
+		schedule_delayed_work(&hdcp->check_work,
+				      DRM_HDCP_CHECK_PERIOD_MS);
+}
+
+static int i915_hdcp_component_bind(struct device *i915_kdev,
+				    struct device *mei_kdev, void *data)
+{
+	struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
+
+	DRM_DEBUG("I915 HDCP comp bind\n");
+	mutex_lock(&dev_priv->hdcp_comp_mutex);
+	dev_priv->hdcp_master = (struct i915_hdcp_comp_master *)data;
+	dev_priv->hdcp_master->mei_dev = mei_kdev;
+	mutex_unlock(&dev_priv->hdcp_comp_mutex);
+
+	return 0;
+}
+
+static void i915_hdcp_component_unbind(struct device *i915_kdev,
+				       struct device *mei_kdev, void *data)
+{
+	struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
+
+	DRM_DEBUG("I915 HDCP comp unbind\n");
+	mutex_lock(&dev_priv->hdcp_comp_mutex);
+	dev_priv->hdcp_master = NULL;
+	mutex_unlock(&dev_priv->hdcp_comp_mutex);
+}
+
+static const struct component_ops i915_hdcp_component_ops = {
+	.bind   = i915_hdcp_component_bind,
+	.unbind = i915_hdcp_component_unbind,
+};
+
+static inline int initialize_hdcp_port_data(struct intel_connector *connector)
+{
+	struct intel_hdcp *hdcp = &connector->hdcp;
+	struct hdcp_port_data *data = &hdcp->port_data;
+
+	data->port = connector->encoder->port;
+	data->port_type = (u8)HDCP_PORT_TYPE_INTEGRATED;
+	data->protocol = (u8)hdcp->shim->protocol;
+
+	data->k = 1;
+	if (!data->streams)
+		data->streams = kcalloc(data->k,
+					sizeof(struct hdcp2_streamid_type),
+					GFP_KERNEL);
+	if (!data->streams) {
+		DRM_ERROR("Out of Memory\n");
+		return -ENOMEM;
+	}
+
+	data->streams[0].stream_id = 0;
+	data->streams[0].stream_type = hdcp->content_type;
+
+	return 0;
+}
+
+static bool is_hdcp2_supported(struct drm_i915_private *dev_priv)
+{
+	if (!IS_ENABLED(CONFIG_INTEL_MEI_HDCP))
+		return false;
+
+	return (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv) ||
+		IS_KABYLAKE(dev_priv));
+}
+
+void intel_hdcp_component_init(struct drm_i915_private *dev_priv)
+{
+	int ret;
+
+	if (!is_hdcp2_supported(dev_priv))
+		return;
+
+	mutex_lock(&dev_priv->hdcp_comp_mutex);
+	WARN_ON(dev_priv->hdcp_comp_added);
+
+	dev_priv->hdcp_comp_added = true;
+	mutex_unlock(&dev_priv->hdcp_comp_mutex);
+	ret = component_add_typed(dev_priv->drm.dev, &i915_hdcp_component_ops,
+				  I915_COMPONENT_HDCP);
+	if (ret < 0) {
+		DRM_DEBUG_KMS("Failed at component add(%d)\n", ret);
+		mutex_lock(&dev_priv->hdcp_comp_mutex);
+		dev_priv->hdcp_comp_added = false;
+		mutex_unlock(&dev_priv->hdcp_comp_mutex);
+		return;
+	}
+}
+
+static void intel_hdcp2_init(struct intel_connector *connector)
+{
+	struct intel_hdcp *hdcp = &connector->hdcp;
+	int ret;
+
+	ret = initialize_hdcp_port_data(connector);
+	if (ret) {
+		DRM_DEBUG_KMS("Mei hdcp data init failed\n");
+		return;
+	}
+
+	hdcp->hdcp2_supported = true;
+}
+
+int intel_hdcp_init(struct intel_connector *connector,
+		    const struct intel_hdcp_shim *shim)
+{
+	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct intel_hdcp *hdcp = &connector->hdcp;
+	int ret;
+
+	if (!shim)
+		return -EINVAL;
+
+	ret = drm_connector_attach_content_protection_property(&connector->base);
+	if (ret)
+		return ret;
+
+	hdcp->shim = shim;
+	mutex_init(&hdcp->mutex);
+	INIT_DELAYED_WORK(&hdcp->check_work, intel_hdcp_check_work);
+	INIT_WORK(&hdcp->prop_work, intel_hdcp_prop_work);
+
+	if (is_hdcp2_supported(dev_priv))
+		intel_hdcp2_init(connector);
+	init_waitqueue_head(&hdcp->cp_irq_queue);
+
+	return 0;
+}
+
+int intel_hdcp_enable(struct intel_connector *connector)
+{
+	struct intel_hdcp *hdcp = &connector->hdcp;
+	unsigned long check_link_interval = DRM_HDCP_CHECK_PERIOD_MS;
+	int ret = -EINVAL;
+
+	if (!hdcp->shim)
+		return -ENOENT;
+
+	mutex_lock(&hdcp->mutex);
+	WARN_ON(hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED);
+
+	/*
+	 * Considering that HDCP2.2 is more secure than HDCP1.4, If the setup
+	 * is capable of HDCP2.2, it is preferred to use HDCP2.2.
+	 */
+	if (intel_hdcp2_capable(connector)) {
+		ret = _intel_hdcp2_enable(connector);
+		if (!ret)
+			check_link_interval = DRM_HDCP2_CHECK_PERIOD_MS;
+	}
+
+	/* When HDCP2.2 fails, HDCP1.4 will be attempted */
+	if (ret && intel_hdcp_capable(connector)) {
+		ret = _intel_hdcp_enable(connector);
+	}
+
+	if (!ret) {
+		schedule_delayed_work(&hdcp->check_work, check_link_interval);
+		hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
+		schedule_work(&hdcp->prop_work);
+	}
+
+	mutex_unlock(&hdcp->mutex);
+	return ret;
+}
+
+int intel_hdcp_disable(struct intel_connector *connector)
+{
+	struct intel_hdcp *hdcp = &connector->hdcp;
+	int ret = 0;
+
+	if (!hdcp->shim)
+		return -ENOENT;
+
+	mutex_lock(&hdcp->mutex);
+
+	if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
+		hdcp->value = DRM_MODE_CONTENT_PROTECTION_UNDESIRED;
+		if (hdcp->hdcp2_encrypted)
+			ret = _intel_hdcp2_disable(connector);
+		else if (hdcp->hdcp_encrypted)
+			ret = _intel_hdcp_disable(connector);
+	}
+
+	mutex_unlock(&hdcp->mutex);
+	cancel_delayed_work_sync(&hdcp->check_work);
+	return ret;
+}
+
+void intel_hdcp_component_fini(struct drm_i915_private *dev_priv)
+{
+	mutex_lock(&dev_priv->hdcp_comp_mutex);
+	if (!dev_priv->hdcp_comp_added) {
+		mutex_unlock(&dev_priv->hdcp_comp_mutex);
+		return;
+	}
+
+	dev_priv->hdcp_comp_added = false;
+	mutex_unlock(&dev_priv->hdcp_comp_mutex);
+
+	component_del(dev_priv->drm.dev, &i915_hdcp_component_ops);
+}
+
+void intel_hdcp_cleanup(struct intel_connector *connector)
+{
+	if (!connector->hdcp.shim)
+		return;
+
+	mutex_lock(&connector->hdcp.mutex);
+	kfree(connector->hdcp.port_data.streams);
+	mutex_unlock(&connector->hdcp.mutex);
+}
+
+void intel_hdcp_atomic_check(struct drm_connector *connector,
+			     struct drm_connector_state *old_state,
+			     struct drm_connector_state *new_state)
+{
+	u64 old_cp = old_state->content_protection;
+	u64 new_cp = new_state->content_protection;
+	struct drm_crtc_state *crtc_state;
+
+	if (!new_state->crtc) {
+		/*
+		 * If the connector is being disabled with CP enabled, mark it
+		 * desired so it's re-enabled when the connector is brought back
+		 */
+		if (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)
+			new_state->content_protection =
+				DRM_MODE_CONTENT_PROTECTION_DESIRED;
+		return;
+	}
+
+	/*
+	 * Nothing to do if the state didn't change, or HDCP was activated since
+	 * the last commit
+	 */
+	if (old_cp == new_cp ||
+	    (old_cp == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
+	     new_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED))
+		return;
+
+	crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
+						   new_state->crtc);
+	crtc_state->mode_changed = true;
+}
+
+/* Handles the CP_IRQ raised from the DP HDCP sink */
+void intel_hdcp_handle_cp_irq(struct intel_connector *connector)
+{
+	struct intel_hdcp *hdcp = &connector->hdcp;
+
+	if (!hdcp->shim)
+		return;
+
+	atomic_inc(&connector->hdcp.cp_irq_count);
+	wake_up_all(&connector->hdcp.cp_irq_queue);
+
+	schedule_delayed_work(&hdcp->check_work, 0);
+}
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 765718b606d857a9993a91bfff92365452599f1e..26767785f14aa2d9120e53641f5f03752c90731a 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -82,6 +82,8 @@ static struct intel_hdmi *intel_attached_hdmi(struct drm_connector *connector)
 static u32 g4x_infoframe_index(unsigned int type)
 {
 	switch (type) {
+	case HDMI_PACKET_TYPE_GAMUT_METADATA:
+		return VIDEO_DIP_SELECT_GAMUT;
 	case HDMI_INFOFRAME_TYPE_AVI:
 		return VIDEO_DIP_SELECT_AVI;
 	case HDMI_INFOFRAME_TYPE_SPD:
@@ -97,6 +99,12 @@ static u32 g4x_infoframe_index(unsigned int type)
 static u32 g4x_infoframe_enable(unsigned int type)
 {
 	switch (type) {
+	case HDMI_PACKET_TYPE_GENERAL_CONTROL:
+		return VIDEO_DIP_ENABLE_GCP;
+	case HDMI_PACKET_TYPE_GAMUT_METADATA:
+		return VIDEO_DIP_ENABLE_GAMUT;
+	case DP_SDP_VSC:
+		return 0;
 	case HDMI_INFOFRAME_TYPE_AVI:
 		return VIDEO_DIP_ENABLE_AVI;
 	case HDMI_INFOFRAME_TYPE_SPD:
@@ -112,6 +120,10 @@ static u32 g4x_infoframe_enable(unsigned int type)
 static u32 hsw_infoframe_enable(unsigned int type)
 {
 	switch (type) {
+	case HDMI_PACKET_TYPE_GENERAL_CONTROL:
+		return VIDEO_DIP_ENABLE_GCP_HSW;
+	case HDMI_PACKET_TYPE_GAMUT_METADATA:
+		return VIDEO_DIP_ENABLE_GMP_HSW;
 	case DP_SDP_VSC:
 		return VIDEO_DIP_ENABLE_VSC_HSW;
 	case DP_SDP_PPS:
@@ -135,6 +147,8 @@ hsw_dip_data_reg(struct drm_i915_private *dev_priv,
 		 int i)
 {
 	switch (type) {
+	case HDMI_PACKET_TYPE_GAMUT_METADATA:
+		return HSW_TVIDEO_DIP_GMP_DATA(cpu_transcoder, i);
 	case DP_SDP_VSC:
 		return HSW_TVIDEO_DIP_VSC_DATA(cpu_transcoder, i);
 	case DP_SDP_PPS:
@@ -200,17 +214,37 @@ static void g4x_write_infoframe(struct intel_encoder *encoder,
 	POSTING_READ(VIDEO_DIP_CTL);
 }
 
-static bool g4x_infoframe_enabled(struct intel_encoder *encoder,
+static void g4x_read_infoframe(struct intel_encoder *encoder,
+			       const struct intel_crtc_state *crtc_state,
+			       unsigned int type,
+			       void *frame, ssize_t len)
+{
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	u32 val, *data = frame;
+	int i;
+
+	val = I915_READ(VIDEO_DIP_CTL);
+
+	val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
+	val |= g4x_infoframe_index(type);
+
+	I915_WRITE(VIDEO_DIP_CTL, val);
+
+	for (i = 0; i < len; i += 4)
+		*data++ = I915_READ(VIDEO_DIP_DATA);
+}
+
+static u32 g4x_infoframes_enabled(struct intel_encoder *encoder,
 				  const struct intel_crtc_state *pipe_config)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	u32 val = I915_READ(VIDEO_DIP_CTL);
 
 	if ((val & VIDEO_DIP_ENABLE) == 0)
-		return false;
+		return 0;
 
 	if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(encoder->port))
-		return false;
+		return 0;
 
 	return val & (VIDEO_DIP_ENABLE_AVI |
 		      VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_SPD);
@@ -255,7 +289,28 @@ static void ibx_write_infoframe(struct intel_encoder *encoder,
 	POSTING_READ(reg);
 }
 
-static bool ibx_infoframe_enabled(struct intel_encoder *encoder,
+static void ibx_read_infoframe(struct intel_encoder *encoder,
+			       const struct intel_crtc_state *crtc_state,
+			       unsigned int type,
+			       void *frame, ssize_t len)
+{
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	u32 val, *data = frame;
+	int i;
+
+	val = I915_READ(TVIDEO_DIP_CTL(crtc->pipe));
+
+	val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
+	val |= g4x_infoframe_index(type);
+
+	I915_WRITE(TVIDEO_DIP_CTL(crtc->pipe), val);
+
+	for (i = 0; i < len; i += 4)
+		*data++ = I915_READ(TVIDEO_DIP_DATA(crtc->pipe));
+}
+
+static u32 ibx_infoframes_enabled(struct intel_encoder *encoder,
 				  const struct intel_crtc_state *pipe_config)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -264,10 +319,10 @@ static bool ibx_infoframe_enabled(struct intel_encoder *encoder,
 	u32 val = I915_READ(reg);
 
 	if ((val & VIDEO_DIP_ENABLE) == 0)
-		return false;
+		return 0;
 
 	if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(encoder->port))
-		return false;
+		return 0;
 
 	return val & (VIDEO_DIP_ENABLE_AVI |
 		      VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
@@ -316,7 +371,28 @@ static void cpt_write_infoframe(struct intel_encoder *encoder,
 	POSTING_READ(reg);
 }
 
-static bool cpt_infoframe_enabled(struct intel_encoder *encoder,
+static void cpt_read_infoframe(struct intel_encoder *encoder,
+			       const struct intel_crtc_state *crtc_state,
+			       unsigned int type,
+			       void *frame, ssize_t len)
+{
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	u32 val, *data = frame;
+	int i;
+
+	val = I915_READ(TVIDEO_DIP_CTL(crtc->pipe));
+
+	val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
+	val |= g4x_infoframe_index(type);
+
+	I915_WRITE(TVIDEO_DIP_CTL(crtc->pipe), val);
+
+	for (i = 0; i < len; i += 4)
+		*data++ = I915_READ(TVIDEO_DIP_DATA(crtc->pipe));
+}
+
+static u32 cpt_infoframes_enabled(struct intel_encoder *encoder,
 				  const struct intel_crtc_state *pipe_config)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -324,7 +400,7 @@ static bool cpt_infoframe_enabled(struct intel_encoder *encoder,
 	u32 val = I915_READ(TVIDEO_DIP_CTL(pipe));
 
 	if ((val & VIDEO_DIP_ENABLE) == 0)
-		return false;
+		return 0;
 
 	return val & (VIDEO_DIP_ENABLE_AVI |
 		      VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
@@ -370,7 +446,28 @@ static void vlv_write_infoframe(struct intel_encoder *encoder,
 	POSTING_READ(reg);
 }
 
-static bool vlv_infoframe_enabled(struct intel_encoder *encoder,
+static void vlv_read_infoframe(struct intel_encoder *encoder,
+			       const struct intel_crtc_state *crtc_state,
+			       unsigned int type,
+			       void *frame, ssize_t len)
+{
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	u32 val, *data = frame;
+	int i;
+
+	val = I915_READ(VLV_TVIDEO_DIP_CTL(crtc->pipe));
+
+	val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
+	val |= g4x_infoframe_index(type);
+
+	I915_WRITE(VLV_TVIDEO_DIP_CTL(crtc->pipe), val);
+
+	for (i = 0; i < len; i += 4)
+		*data++ = I915_READ(VLV_TVIDEO_DIP_DATA(crtc->pipe));
+}
+
+static u32 vlv_infoframes_enabled(struct intel_encoder *encoder,
 				  const struct intel_crtc_state *pipe_config)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -378,10 +475,10 @@ static bool vlv_infoframe_enabled(struct intel_encoder *encoder,
 	u32 val = I915_READ(VLV_TVIDEO_DIP_CTL(pipe));
 
 	if ((val & VIDEO_DIP_ENABLE) == 0)
-		return false;
+		return 0;
 
 	if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(encoder->port))
-		return false;
+		return 0;
 
 	return val & (VIDEO_DIP_ENABLE_AVI |
 		      VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
@@ -423,7 +520,24 @@ static void hsw_write_infoframe(struct intel_encoder *encoder,
 	POSTING_READ(ctl_reg);
 }
 
-static bool hsw_infoframe_enabled(struct intel_encoder *encoder,
+static void hsw_read_infoframe(struct intel_encoder *encoder,
+			       const struct intel_crtc_state *crtc_state,
+			       unsigned int type,
+			       void *frame, ssize_t len)
+{
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+	u32 val, *data = frame;
+	int i;
+
+	val = I915_READ(HSW_TVIDEO_DIP_CTL(cpu_transcoder));
+
+	for (i = 0; i < len; i += 4)
+		*data++ = I915_READ(hsw_dip_data_reg(dev_priv, cpu_transcoder,
+						     type, i >> 2));
+}
+
+static u32 hsw_infoframes_enabled(struct intel_encoder *encoder,
 				  const struct intel_crtc_state *pipe_config)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -434,6 +548,53 @@ static bool hsw_infoframe_enabled(struct intel_encoder *encoder,
 		      VIDEO_DIP_ENABLE_GMP_HSW | VIDEO_DIP_ENABLE_SPD_HSW);
 }
 
+static const u8 infoframe_type_to_idx[] = {
+	HDMI_PACKET_TYPE_GENERAL_CONTROL,
+	HDMI_PACKET_TYPE_GAMUT_METADATA,
+	DP_SDP_VSC,
+	HDMI_INFOFRAME_TYPE_AVI,
+	HDMI_INFOFRAME_TYPE_SPD,
+	HDMI_INFOFRAME_TYPE_VENDOR,
+};
+
+u32 intel_hdmi_infoframe_enable(unsigned int type)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(infoframe_type_to_idx); i++) {
+		if (infoframe_type_to_idx[i] == type)
+			return BIT(i);
+	}
+
+	return 0;
+}
+
+u32 intel_hdmi_infoframes_enabled(struct intel_encoder *encoder,
+				  const struct intel_crtc_state *crtc_state)
+{
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+	u32 val, ret = 0;
+	int i;
+
+	val = dig_port->infoframes_enabled(encoder, crtc_state);
+
+	/* map from hardware bits to dip idx */
+	for (i = 0; i < ARRAY_SIZE(infoframe_type_to_idx); i++) {
+		unsigned int type = infoframe_type_to_idx[i];
+
+		if (HAS_DDI(dev_priv)) {
+			if (val & hsw_infoframe_enable(type))
+				ret |= BIT(i);
+		} else {
+			if (val & g4x_infoframe_enable(type))
+				ret |= BIT(i);
+		}
+	}
+
+	return ret;
+}
+
 /*
  * The data we write to the DIP data buffer registers is 1 byte bigger than the
  * HDMI infoframe size because of an ECC/reserved byte at position 3 (starting
@@ -453,15 +614,23 @@ static bool hsw_infoframe_enabled(struct intel_encoder *encoder,
  */
 static void intel_write_infoframe(struct intel_encoder *encoder,
 				  const struct intel_crtc_state *crtc_state,
-				  union hdmi_infoframe *frame)
+				  enum hdmi_infoframe_type type,
+				  const union hdmi_infoframe *frame)
 {
 	struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
 	u8 buffer[VIDEO_DIP_DATA_SIZE];
 	ssize_t len;
 
+	if ((crtc_state->infoframes.enable &
+	     intel_hdmi_infoframe_enable(type)) == 0)
+		return;
+
+	if (WARN_ON(frame->any.type != type))
+		return;
+
 	/* see comment above for the reason for this offset */
-	len = hdmi_infoframe_pack(frame, buffer + 1, sizeof(buffer) - 1);
-	if (len < 0)
+	len = hdmi_infoframe_pack_only(frame, buffer + 1, sizeof(buffer) - 1);
+	if (WARN_ON(len < 0))
 		return;
 
 	/* Insert the 'hole' (see big comment above) at position 3 */
@@ -469,86 +638,143 @@ static void intel_write_infoframe(struct intel_encoder *encoder,
 	buffer[3] = 0;
 	len++;
 
-	intel_dig_port->write_infoframe(encoder,
-					crtc_state,
-					frame->any.type, buffer, len);
+	intel_dig_port->write_infoframe(encoder, crtc_state, type, buffer, len);
 }
 
-static void intel_hdmi_set_avi_infoframe(struct intel_encoder *encoder,
-					 const struct intel_crtc_state *crtc_state,
-					 const struct drm_connector_state *conn_state)
+void intel_read_infoframe(struct intel_encoder *encoder,
+			  const struct intel_crtc_state *crtc_state,
+			  enum hdmi_infoframe_type type,
+			  union hdmi_infoframe *frame)
 {
+	struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
+	u8 buffer[VIDEO_DIP_DATA_SIZE];
+	int ret;
+
+	if ((crtc_state->infoframes.enable &
+	     intel_hdmi_infoframe_enable(type)) == 0)
+		return;
+
+	intel_dig_port->read_infoframe(encoder, crtc_state,
+				       type, buffer, sizeof(buffer));
+
+	/* Fill the 'hole' (see big comment above) at position 3 */
+	memmove(&buffer[1], &buffer[0], 3);
+
+	/* see comment above for the reason for this offset */
+	ret = hdmi_infoframe_unpack(frame, buffer + 1, sizeof(buffer) - 1);
+	if (ret) {
+		DRM_DEBUG_KMS("Failed to unpack infoframe type 0x%02x\n", type);
+		return;
+	}
+
+	if (frame->any.type != type)
+		DRM_DEBUG_KMS("Found the wrong infoframe type 0x%x (expected 0x%02x)\n",
+			      frame->any.type, type);
+}
+
+static bool
+intel_hdmi_compute_avi_infoframe(struct intel_encoder *encoder,
+				 struct intel_crtc_state *crtc_state,
+				 struct drm_connector_state *conn_state)
+{
+	struct hdmi_avi_infoframe *frame = &crtc_state->infoframes.avi.avi;
 	const struct drm_display_mode *adjusted_mode =
 		&crtc_state->base.adjusted_mode;
-	union hdmi_infoframe frame;
+	struct drm_connector *connector = conn_state->connector;
 	int ret;
 
-	ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
-						       conn_state->connector,
+	if (!crtc_state->has_infoframe)
+		return true;
+
+	crtc_state->infoframes.enable |=
+		intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI);
+
+	ret = drm_hdmi_avi_infoframe_from_display_mode(frame, connector,
 						       adjusted_mode);
-	if (ret < 0) {
-		DRM_ERROR("couldn't fill AVI infoframe\n");
-		return;
-	}
+	if (ret)
+		return false;
 
 	if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
-		frame.avi.colorspace = HDMI_COLORSPACE_YUV420;
+		frame->colorspace = HDMI_COLORSPACE_YUV420;
 	else if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
-		frame.avi.colorspace = HDMI_COLORSPACE_YUV444;
+		frame->colorspace = HDMI_COLORSPACE_YUV444;
 	else
-		frame.avi.colorspace = HDMI_COLORSPACE_RGB;
+		frame->colorspace = HDMI_COLORSPACE_RGB;
 
-	drm_hdmi_avi_infoframe_colorspace(&frame.avi, conn_state);
+	drm_hdmi_avi_infoframe_colorspace(frame, conn_state);
 
-	drm_hdmi_avi_infoframe_quant_range(&frame.avi,
-					   conn_state->connector,
+	drm_hdmi_avi_infoframe_quant_range(frame, connector,
 					   adjusted_mode,
 					   crtc_state->limited_color_range ?
 					   HDMI_QUANTIZATION_RANGE_LIMITED :
 					   HDMI_QUANTIZATION_RANGE_FULL);
 
-	drm_hdmi_avi_infoframe_content_type(&frame.avi,
-					    conn_state);
+	drm_hdmi_avi_infoframe_content_type(frame, conn_state);
 
 	/* TODO: handle pixel repetition for YCBCR420 outputs */
-	intel_write_infoframe(encoder, crtc_state,
-			      &frame);
+
+	ret = hdmi_avi_infoframe_check(frame);
+	if (WARN_ON(ret))
+		return false;
+
+	return true;
 }
 
-static void intel_hdmi_set_spd_infoframe(struct intel_encoder *encoder,
-					 const struct intel_crtc_state *crtc_state)
+static bool
+intel_hdmi_compute_spd_infoframe(struct intel_encoder *encoder,
+				 struct intel_crtc_state *crtc_state,
+				 struct drm_connector_state *conn_state)
 {
-	union hdmi_infoframe frame;
+	struct hdmi_spd_infoframe *frame = &crtc_state->infoframes.spd.spd;
 	int ret;
 
-	ret = hdmi_spd_infoframe_init(&frame.spd, "Intel", "Integrated gfx");
-	if (ret < 0) {
-		DRM_ERROR("couldn't fill SPD infoframe\n");
-		return;
-	}
+	if (!crtc_state->has_infoframe)
+		return true;
 
-	frame.spd.sdi = HDMI_SPD_SDI_PC;
+	crtc_state->infoframes.enable |=
+		intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD);
 
-	intel_write_infoframe(encoder, crtc_state,
-			      &frame);
+	ret = hdmi_spd_infoframe_init(frame, "Intel", "Integrated gfx");
+	if (WARN_ON(ret))
+		return false;
+
+	frame->sdi = HDMI_SPD_SDI_PC;
+
+	ret = hdmi_spd_infoframe_check(frame);
+	if (WARN_ON(ret))
+		return false;
+
+	return true;
 }
 
-static void
-intel_hdmi_set_hdmi_infoframe(struct intel_encoder *encoder,
-			      const struct intel_crtc_state *crtc_state,
-			      const struct drm_connector_state *conn_state)
-{
-	union hdmi_infoframe frame;
+static bool
+intel_hdmi_compute_hdmi_infoframe(struct intel_encoder *encoder,
+				  struct intel_crtc_state *crtc_state,
+				  struct drm_connector_state *conn_state)
+{
+	struct hdmi_vendor_infoframe *frame =
+		&crtc_state->infoframes.hdmi.vendor.hdmi;
+	const struct drm_display_info *info =
+		&conn_state->connector->display_info;
 	int ret;
 
-	ret = drm_hdmi_vendor_infoframe_from_display_mode(&frame.vendor.hdmi,
+	if (!crtc_state->has_infoframe || !info->has_hdmi_infoframe)
+		return true;
+
+	crtc_state->infoframes.enable |=
+		intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR);
+
+	ret = drm_hdmi_vendor_infoframe_from_display_mode(frame,
 							  conn_state->connector,
 							  &crtc_state->base.adjusted_mode);
-	if (ret < 0)
-		return;
+	if (WARN_ON(ret))
+		return false;
 
-	intel_write_infoframe(encoder, crtc_state,
-			      &frame);
+	ret = hdmi_vendor_infoframe_check(frame);
+	if (WARN_ON(ret))
+		return false;
+
+	return true;
 }
 
 static void g4x_set_infoframes(struct intel_encoder *encoder,
@@ -608,9 +834,15 @@ static void g4x_set_infoframes(struct intel_encoder *encoder,
 	I915_WRITE(reg, val);
 	POSTING_READ(reg);
 
-	intel_hdmi_set_avi_infoframe(encoder, crtc_state, conn_state);
-	intel_hdmi_set_spd_infoframe(encoder, crtc_state);
-	intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
+	intel_write_infoframe(encoder, crtc_state,
+			      HDMI_INFOFRAME_TYPE_AVI,
+			      &crtc_state->infoframes.avi);
+	intel_write_infoframe(encoder, crtc_state,
+			      HDMI_INFOFRAME_TYPE_SPD,
+			      &crtc_state->infoframes.spd);
+	intel_write_infoframe(encoder, crtc_state,
+			      HDMI_INFOFRAME_TYPE_VENDOR,
+			      &crtc_state->infoframes.hdmi);
 }
 
 static bool hdmi_sink_is_deep_color(const struct drm_connector_state *conn_state)
@@ -676,7 +908,10 @@ static bool intel_hdmi_set_gcp_infoframe(struct intel_encoder *encoder,
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
 	i915_reg_t reg;
-	u32 val = 0;
+
+	if ((crtc_state->infoframes.enable &
+	     intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL)) == 0)
+		return false;
 
 	if (HAS_DDI(dev_priv))
 		reg = HSW_TVIDEO_DIP_GCP(crtc_state->cpu_transcoder);
@@ -687,18 +922,54 @@ static bool intel_hdmi_set_gcp_infoframe(struct intel_encoder *encoder,
 	else
 		return false;
 
+	I915_WRITE(reg, crtc_state->infoframes.gcp);
+
+	return true;
+}
+
+void intel_hdmi_read_gcp_infoframe(struct intel_encoder *encoder,
+				   struct intel_crtc_state *crtc_state)
+{
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	i915_reg_t reg;
+
+	if ((crtc_state->infoframes.enable &
+	     intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL)) == 0)
+		return;
+
+	if (HAS_DDI(dev_priv))
+		reg = HSW_TVIDEO_DIP_GCP(crtc_state->cpu_transcoder);
+	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+		reg = VLV_TVIDEO_DIP_GCP(crtc->pipe);
+	else if (HAS_PCH_SPLIT(dev_priv))
+		reg = TVIDEO_DIP_GCP(crtc->pipe);
+	else
+		return;
+
+	crtc_state->infoframes.gcp = I915_READ(reg);
+}
+
+static void intel_hdmi_compute_gcp_infoframe(struct intel_encoder *encoder,
+					     struct intel_crtc_state *crtc_state,
+					     struct drm_connector_state *conn_state)
+{
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
+	if (IS_G4X(dev_priv) || !crtc_state->has_infoframe)
+		return;
+
+	crtc_state->infoframes.enable |=
+		intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL);
+
 	/* Indicate color depth whenever the sink supports deep color */
 	if (hdmi_sink_is_deep_color(conn_state))
-		val |= GCP_COLOR_INDICATION;
+		crtc_state->infoframes.gcp |= GCP_COLOR_INDICATION;
 
 	/* Enable default_phase whenever the display mode is suitably aligned */
 	if (gcp_default_phase_possible(crtc_state->pipe_bpp,
 				       &crtc_state->base.adjusted_mode))
-		val |= GCP_DEFAULT_PHASE_ENABLE;
-
-	I915_WRITE(reg, val);
-
-	return val != 0;
+		crtc_state->infoframes.gcp |= GCP_DEFAULT_PHASE_ENABLE;
 }
 
 static void ibx_set_infoframes(struct intel_encoder *encoder,
@@ -749,9 +1020,15 @@ static void ibx_set_infoframes(struct intel_encoder *encoder,
 	I915_WRITE(reg, val);
 	POSTING_READ(reg);
 
-	intel_hdmi_set_avi_infoframe(encoder, crtc_state, conn_state);
-	intel_hdmi_set_spd_infoframe(encoder, crtc_state);
-	intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
+	intel_write_infoframe(encoder, crtc_state,
+			      HDMI_INFOFRAME_TYPE_AVI,
+			      &crtc_state->infoframes.avi);
+	intel_write_infoframe(encoder, crtc_state,
+			      HDMI_INFOFRAME_TYPE_SPD,
+			      &crtc_state->infoframes.spd);
+	intel_write_infoframe(encoder, crtc_state,
+			      HDMI_INFOFRAME_TYPE_VENDOR,
+			      &crtc_state->infoframes.hdmi);
 }
 
 static void cpt_set_infoframes(struct intel_encoder *encoder,
@@ -792,9 +1069,15 @@ static void cpt_set_infoframes(struct intel_encoder *encoder,
 	I915_WRITE(reg, val);
 	POSTING_READ(reg);
 
-	intel_hdmi_set_avi_infoframe(encoder, crtc_state, conn_state);
-	intel_hdmi_set_spd_infoframe(encoder, crtc_state);
-	intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
+	intel_write_infoframe(encoder, crtc_state,
+			      HDMI_INFOFRAME_TYPE_AVI,
+			      &crtc_state->infoframes.avi);
+	intel_write_infoframe(encoder, crtc_state,
+			      HDMI_INFOFRAME_TYPE_SPD,
+			      &crtc_state->infoframes.spd);
+	intel_write_infoframe(encoder, crtc_state,
+			      HDMI_INFOFRAME_TYPE_VENDOR,
+			      &crtc_state->infoframes.hdmi);
 }
 
 static void vlv_set_infoframes(struct intel_encoder *encoder,
@@ -844,9 +1127,15 @@ static void vlv_set_infoframes(struct intel_encoder *encoder,
 	I915_WRITE(reg, val);
 	POSTING_READ(reg);
 
-	intel_hdmi_set_avi_infoframe(encoder, crtc_state, conn_state);
-	intel_hdmi_set_spd_infoframe(encoder, crtc_state);
-	intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
+	intel_write_infoframe(encoder, crtc_state,
+			      HDMI_INFOFRAME_TYPE_AVI,
+			      &crtc_state->infoframes.avi);
+	intel_write_infoframe(encoder, crtc_state,
+			      HDMI_INFOFRAME_TYPE_SPD,
+			      &crtc_state->infoframes.spd);
+	intel_write_infoframe(encoder, crtc_state,
+			      HDMI_INFOFRAME_TYPE_VENDOR,
+			      &crtc_state->infoframes.hdmi);
 }
 
 static void hsw_set_infoframes(struct intel_encoder *encoder,
@@ -877,9 +1166,15 @@ static void hsw_set_infoframes(struct intel_encoder *encoder,
 	I915_WRITE(reg, val);
 	POSTING_READ(reg);
 
-	intel_hdmi_set_avi_infoframe(encoder, crtc_state, conn_state);
-	intel_hdmi_set_spd_infoframe(encoder, crtc_state);
-	intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
+	intel_write_infoframe(encoder, crtc_state,
+			      HDMI_INFOFRAME_TYPE_AVI,
+			      &crtc_state->infoframes.avi);
+	intel_write_infoframe(encoder, crtc_state,
+			      HDMI_INFOFRAME_TYPE_SPD,
+			      &crtc_state->infoframes.spd);
+	intel_write_infoframe(encoder, crtc_state,
+			      HDMI_INFOFRAME_TYPE_VENDOR,
+			      &crtc_state->infoframes.hdmi);
 }
 
 void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable)
@@ -1085,10 +1380,44 @@ int intel_hdmi_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port,
 	return ret;
 }
 
+static int kbl_repositioning_enc_en_signal(struct intel_connector *connector)
+{
+	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+	struct drm_crtc *crtc = connector->base.state->crtc;
+	struct intel_crtc *intel_crtc = container_of(crtc,
+						     struct intel_crtc, base);
+	u32 scanline;
+	int ret;
+
+	for (;;) {
+		scanline = I915_READ(PIPEDSL(intel_crtc->pipe));
+		if (scanline > 100 && scanline < 200)
+			break;
+		usleep_range(25, 50);
+	}
+
+	ret = intel_ddi_toggle_hdcp_signalling(&intel_dig_port->base, false);
+	if (ret) {
+		DRM_ERROR("Disable HDCP signalling failed (%d)\n", ret);
+		return ret;
+	}
+	ret = intel_ddi_toggle_hdcp_signalling(&intel_dig_port->base, true);
+	if (ret) {
+		DRM_ERROR("Enable HDCP signalling failed (%d)\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
 static
 int intel_hdmi_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port,
 				      bool enable)
 {
+	struct intel_hdmi *hdmi = &intel_dig_port->hdmi;
+	struct intel_connector *connector = hdmi->attached_connector;
+	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 	int ret;
 
 	if (!enable)
@@ -1100,6 +1429,14 @@ int intel_hdmi_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port,
 			  enable ? "Enable" : "Disable", ret);
 		return ret;
 	}
+
+	/*
+	 * WA: To fix incorrect positioning of the window of
+	 * opportunity and enc_en signalling in KABYLAKE.
+	 */
+	if (IS_KABYLAKE(dev_priv) && enable)
+		return kbl_repositioning_enc_en_signal(connector);
+
 	return 0;
 }
 
@@ -1131,6 +1468,190 @@ bool intel_hdmi_hdcp_check_link(struct intel_digital_port *intel_dig_port)
 	return true;
 }
 
+static struct hdcp2_hdmi_msg_data {
+	u8 msg_id;
+	u32 timeout;
+	u32 timeout2;
+	} hdcp2_msg_data[] = {
+		{HDCP_2_2_AKE_INIT, 0, 0},
+		{HDCP_2_2_AKE_SEND_CERT, HDCP_2_2_CERT_TIMEOUT_MS, 0},
+		{HDCP_2_2_AKE_NO_STORED_KM, 0, 0},
+		{HDCP_2_2_AKE_STORED_KM, 0, 0},
+		{HDCP_2_2_AKE_SEND_HPRIME, HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS,
+				HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS},
+		{HDCP_2_2_AKE_SEND_PAIRING_INFO, HDCP_2_2_PAIRING_TIMEOUT_MS,
+				0},
+		{HDCP_2_2_LC_INIT, 0, 0},
+		{HDCP_2_2_LC_SEND_LPRIME, HDCP_2_2_HDMI_LPRIME_TIMEOUT_MS, 0},
+		{HDCP_2_2_SKE_SEND_EKS, 0, 0},
+		{HDCP_2_2_REP_SEND_RECVID_LIST,
+				HDCP_2_2_RECVID_LIST_TIMEOUT_MS, 0},
+		{HDCP_2_2_REP_SEND_ACK, 0, 0},
+		{HDCP_2_2_REP_STREAM_MANAGE, 0, 0},
+		{HDCP_2_2_REP_STREAM_READY, HDCP_2_2_STREAM_READY_TIMEOUT_MS,
+				0},
+	};
+
+static
+int intel_hdmi_hdcp2_read_rx_status(struct intel_digital_port *intel_dig_port,
+				    u8 *rx_status)
+{
+	return intel_hdmi_hdcp_read(intel_dig_port,
+				    HDCP_2_2_HDMI_REG_RXSTATUS_OFFSET,
+				    rx_status,
+				    HDCP_2_2_HDMI_RXSTATUS_LEN);
+}
+
+static int get_hdcp2_msg_timeout(u8 msg_id, bool is_paired)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(hdcp2_msg_data); i++)
+		if (hdcp2_msg_data[i].msg_id == msg_id &&
+		    (msg_id != HDCP_2_2_AKE_SEND_HPRIME || is_paired))
+			return hdcp2_msg_data[i].timeout;
+		else if (hdcp2_msg_data[i].msg_id == msg_id)
+			return hdcp2_msg_data[i].timeout2;
+
+	return -EINVAL;
+}
+
+static inline
+int hdcp2_detect_msg_availability(struct intel_digital_port *intel_digital_port,
+				  u8 msg_id, bool *msg_ready,
+				  ssize_t *msg_sz)
+{
+	u8 rx_status[HDCP_2_2_HDMI_RXSTATUS_LEN];
+	int ret;
+
+	ret = intel_hdmi_hdcp2_read_rx_status(intel_digital_port, rx_status);
+	if (ret < 0) {
+		DRM_DEBUG_KMS("rx_status read failed. Err %d\n", ret);
+		return ret;
+	}
+
+	*msg_sz = ((HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(rx_status[1]) << 8) |
+		  rx_status[0]);
+
+	if (msg_id == HDCP_2_2_REP_SEND_RECVID_LIST)
+		*msg_ready = (HDCP_2_2_HDMI_RXSTATUS_READY(rx_status[1]) &&
+			     *msg_sz);
+	else
+		*msg_ready = *msg_sz;
+
+	return 0;
+}
+
+static ssize_t
+intel_hdmi_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port,
+			      u8 msg_id, bool paired)
+{
+	bool msg_ready = false;
+	int timeout, ret;
+	ssize_t msg_sz = 0;
+
+	timeout = get_hdcp2_msg_timeout(msg_id, paired);
+	if (timeout < 0)
+		return timeout;
+
+	ret = __wait_for(ret = hdcp2_detect_msg_availability(intel_dig_port,
+							     msg_id, &msg_ready,
+							     &msg_sz),
+			 !ret && msg_ready && msg_sz, timeout * 1000,
+			 1000, 5 * 1000);
+	if (ret)
+		DRM_DEBUG_KMS("msg_id: %d, ret: %d, timeout: %d\n",
+			      msg_id, ret, timeout);
+
+	return ret ? ret : msg_sz;
+}
+
+static
+int intel_hdmi_hdcp2_write_msg(struct intel_digital_port *intel_dig_port,
+			       void *buf, size_t size)
+{
+	unsigned int offset;
+
+	offset = HDCP_2_2_HDMI_REG_WR_MSG_OFFSET;
+	return intel_hdmi_hdcp_write(intel_dig_port, offset, buf, size);
+}
+
+static
+int intel_hdmi_hdcp2_read_msg(struct intel_digital_port *intel_dig_port,
+			      u8 msg_id, void *buf, size_t size)
+{
+	struct intel_hdmi *hdmi = &intel_dig_port->hdmi;
+	struct intel_hdcp *hdcp = &hdmi->attached_connector->hdcp;
+	unsigned int offset;
+	ssize_t ret;
+
+	ret = intel_hdmi_hdcp2_wait_for_msg(intel_dig_port, msg_id,
+					    hdcp->is_paired);
+	if (ret < 0)
+		return ret;
+
+	/*
+	 * Available msg size should be equal to or lesser than the
+	 * available buffer.
+	 */
+	if (ret > size) {
+		DRM_DEBUG_KMS("msg_sz(%zd) is more than exp size(%zu)\n",
+			      ret, size);
+		return -1;
+	}
+
+	offset = HDCP_2_2_HDMI_REG_RD_MSG_OFFSET;
+	ret = intel_hdmi_hdcp_read(intel_dig_port, offset, buf, ret);
+	if (ret)
+		DRM_DEBUG_KMS("Failed to read msg_id: %d(%zd)\n", msg_id, ret);
+
+	return ret;
+}
+
+static
+int intel_hdmi_hdcp2_check_link(struct intel_digital_port *intel_dig_port)
+{
+	u8 rx_status[HDCP_2_2_HDMI_RXSTATUS_LEN];
+	int ret;
+
+	ret = intel_hdmi_hdcp2_read_rx_status(intel_dig_port, rx_status);
+	if (ret)
+		return ret;
+
+	/*
+	 * Re-auth request and Link Integrity Failures are represented by
+	 * same bit. i.e reauth_req.
+	 */
+	if (HDCP_2_2_HDMI_RXSTATUS_REAUTH_REQ(rx_status[1]))
+		ret = HDCP_REAUTH_REQUEST;
+	else if (HDCP_2_2_HDMI_RXSTATUS_READY(rx_status[1]))
+		ret = HDCP_TOPOLOGY_CHANGE;
+
+	return ret;
+}
+
+static
+int intel_hdmi_hdcp2_capable(struct intel_digital_port *intel_dig_port,
+			     bool *capable)
+{
+	u8 hdcp2_version;
+	int ret;
+
+	*capable = false;
+	ret = intel_hdmi_hdcp_read(intel_dig_port, HDCP_2_2_HDMI_REG_VER_OFFSET,
+				   &hdcp2_version, sizeof(hdcp2_version));
+	if (!ret && hdcp2_version & HDCP_2_2_HDMI_SUPPORT_MASK)
+		*capable = true;
+
+	return ret;
+}
+
+static inline
+enum hdcp_wired_protocol intel_hdmi_hdcp2_protocol(void)
+{
+	return HDCP_PROTOCOL_HDMI;
+}
+
 static const struct intel_hdcp_shim intel_hdmi_hdcp_shim = {
 	.write_an_aksv = intel_hdmi_hdcp_write_an_aksv,
 	.read_bksv = intel_hdmi_hdcp_read_bksv,
@@ -1142,6 +1663,11 @@ static const struct intel_hdcp_shim intel_hdmi_hdcp_shim = {
 	.read_v_prime_part = intel_hdmi_hdcp_read_v_prime_part,
 	.toggle_signalling = intel_hdmi_hdcp_toggle_signalling,
 	.check_link = intel_hdmi_hdcp_check_link,
+	.write_2_2_msg = intel_hdmi_hdcp2_write_msg,
+	.read_2_2_msg = intel_hdmi_hdcp2_read_msg,
+	.check_2_2_link	= intel_hdmi_hdcp2_check_link,
+	.hdcp_2_2_capable = intel_hdmi_hdcp2_capable,
+	.protocol = HDCP_PROTOCOL_HDMI,
 };
 
 static void intel_hdmi_prepare(struct intel_encoder *encoder,
@@ -1207,7 +1733,6 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
 				  struct intel_crtc_state *pipe_config)
 {
 	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
-	struct intel_digital_port *intel_dig_port = hdmi_to_dig_port(intel_hdmi);
 	struct drm_device *dev = encoder->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	u32 tmp, flags = 0;
@@ -1230,7 +1755,10 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
 	if (tmp & HDMI_MODE_SELECT_HDMI)
 		pipe_config->has_hdmi_sink = true;
 
-	if (intel_dig_port->infoframe_enabled(encoder, pipe_config))
+	pipe_config->infoframes.enable |=
+		intel_hdmi_infoframes_enabled(encoder, pipe_config);
+
+	if (pipe_config->infoframes.enable)
 		pipe_config->has_infoframe = true;
 
 	if (tmp & SDVO_AUDIO_ENABLE)
@@ -1253,6 +1781,18 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
 	pipe_config->base.adjusted_mode.crtc_clock = dotclock;
 
 	pipe_config->lane_count = 4;
+
+	intel_hdmi_read_gcp_infoframe(encoder, pipe_config);
+
+	intel_read_infoframe(encoder, pipe_config,
+			     HDMI_INFOFRAME_TYPE_AVI,
+			     &pipe_config->infoframes.avi);
+	intel_read_infoframe(encoder, pipe_config,
+			     HDMI_INFOFRAME_TYPE_SPD,
+			     &pipe_config->infoframes.spd);
+	intel_read_infoframe(encoder, pipe_config,
+			     HDMI_INFOFRAME_TYPE_VENDOR,
+			     &pipe_config->infoframes.hdmi);
 }
 
 static void intel_enable_hdmi_audio(struct intel_encoder *encoder,
@@ -1666,7 +2206,7 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
 
 	/* Display Wa_1405510057:icl */
 	if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 &&
-	    bpc == 10 && IS_ICELAKE(dev_priv) &&
+	    bpc == 10 && INTEL_GEN(dev_priv) >= 11 &&
 	    (adjusted_mode->crtc_hblank_end -
 	     adjusted_mode->crtc_hblank_start) % 8 == 2)
 		return false;
@@ -1824,6 +2364,23 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
 		}
 	}
 
+	intel_hdmi_compute_gcp_infoframe(encoder, pipe_config, conn_state);
+
+	if (!intel_hdmi_compute_avi_infoframe(encoder, pipe_config, conn_state)) {
+		DRM_DEBUG_KMS("bad AVI infoframe\n");
+		return -EINVAL;
+	}
+
+	if (!intel_hdmi_compute_spd_infoframe(encoder, pipe_config, conn_state)) {
+		DRM_DEBUG_KMS("bad SPD infoframe\n");
+		return -EINVAL;
+	}
+
+	if (!intel_hdmi_compute_hdmi_infoframe(encoder, pipe_config, conn_state)) {
+		DRM_DEBUG_KMS("bad HDMI infoframe\n");
+		return -EINVAL;
+	}
+
 	return 0;
 }
 
@@ -1943,7 +2500,7 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
 
 	wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
 
-	if (IS_ICELAKE(dev_priv) &&
+	if (INTEL_GEN(dev_priv) >= 11 &&
 	    !intel_digital_port_connected(encoder))
 		goto out;
 
@@ -2344,14 +2901,14 @@ static u8 intel_hdmi_ddc_pin(struct drm_i915_private *dev_priv,
 		return info->alternate_ddc_pin;
 	}
 
-	if (IS_CHERRYVIEW(dev_priv))
-		ddc_pin = chv_port_to_ddc_pin(dev_priv, port);
-	else if (IS_GEN9_LP(dev_priv))
-		ddc_pin = bxt_port_to_ddc_pin(dev_priv, port);
+	if (HAS_PCH_ICP(dev_priv))
+		ddc_pin = icl_port_to_ddc_pin(dev_priv, port);
 	else if (HAS_PCH_CNP(dev_priv))
 		ddc_pin = cnp_port_to_ddc_pin(dev_priv, port);
-	else if (HAS_PCH_ICP(dev_priv))
-		ddc_pin = icl_port_to_ddc_pin(dev_priv, port);
+	else if (IS_GEN9_LP(dev_priv))
+		ddc_pin = bxt_port_to_ddc_pin(dev_priv, port);
+	else if (IS_CHERRYVIEW(dev_priv))
+		ddc_pin = chv_port_to_ddc_pin(dev_priv, port);
 	else
 		ddc_pin = g4x_port_to_ddc_pin(dev_priv, port);
 
@@ -2368,33 +2925,36 @@ void intel_infoframe_init(struct intel_digital_port *intel_dig_port)
 
 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
 		intel_dig_port->write_infoframe = vlv_write_infoframe;
+		intel_dig_port->read_infoframe = vlv_read_infoframe;
 		intel_dig_port->set_infoframes = vlv_set_infoframes;
-		intel_dig_port->infoframe_enabled = vlv_infoframe_enabled;
+		intel_dig_port->infoframes_enabled = vlv_infoframes_enabled;
 	} else if (IS_G4X(dev_priv)) {
 		intel_dig_port->write_infoframe = g4x_write_infoframe;
+		intel_dig_port->read_infoframe = g4x_read_infoframe;
 		intel_dig_port->set_infoframes = g4x_set_infoframes;
-		intel_dig_port->infoframe_enabled = g4x_infoframe_enabled;
+		intel_dig_port->infoframes_enabled = g4x_infoframes_enabled;
 	} else if (HAS_DDI(dev_priv)) {
 		if (intel_dig_port->lspcon.active) {
-			intel_dig_port->write_infoframe =
-					lspcon_write_infoframe;
+			intel_dig_port->write_infoframe = lspcon_write_infoframe;
+			intel_dig_port->read_infoframe = lspcon_read_infoframe;
 			intel_dig_port->set_infoframes = lspcon_set_infoframes;
-			intel_dig_port->infoframe_enabled =
-						lspcon_infoframe_enabled;
+			intel_dig_port->infoframes_enabled = lspcon_infoframes_enabled;
 		} else {
-			intel_dig_port->set_infoframes = hsw_set_infoframes;
-			intel_dig_port->infoframe_enabled =
-						hsw_infoframe_enabled;
 			intel_dig_port->write_infoframe = hsw_write_infoframe;
+			intel_dig_port->read_infoframe = hsw_read_infoframe;
+			intel_dig_port->set_infoframes = hsw_set_infoframes;
+			intel_dig_port->infoframes_enabled = hsw_infoframes_enabled;
 		}
 	} else if (HAS_PCH_IBX(dev_priv)) {
 		intel_dig_port->write_infoframe = ibx_write_infoframe;
+		intel_dig_port->read_infoframe = ibx_read_infoframe;
 		intel_dig_port->set_infoframes = ibx_set_infoframes;
-		intel_dig_port->infoframe_enabled = ibx_infoframe_enabled;
+		intel_dig_port->infoframes_enabled = ibx_infoframes_enabled;
 	} else {
 		intel_dig_port->write_infoframe = cpt_write_infoframe;
+		intel_dig_port->read_infoframe = cpt_read_infoframe;
 		intel_dig_port->set_infoframes = cpt_set_infoframes;
-		intel_dig_port->infoframe_enabled = cpt_infoframe_enabled;
+		intel_dig_port->infoframes_enabled = cpt_infoframes_enabled;
 	}
 }
 
@@ -2440,6 +3000,9 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
 
 	intel_hdmi_add_properties(intel_hdmi, connector);
 
+	intel_connector_attach_encoder(intel_connector, intel_encoder);
+	intel_hdmi->attached_connector = intel_connector;
+
 	if (is_hdcp_supported(dev_priv, port)) {
 		int ret = intel_hdcp_init(intel_connector,
 					  &intel_hdmi_hdcp_shim);
@@ -2447,9 +3010,6 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
 			DRM_DEBUG_KMS("HDCP init failed, skipping.\n");
 	}
 
-	intel_connector_attach_encoder(intel_connector, intel_encoder);
-	intel_hdmi->attached_connector = intel_connector;
-
 	/* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
 	 * 0xd.  Failure to do so will result in spurious interrupts being
 	 * generated on the port when a cable is not attached.
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 5e98fd79bd9df4a04950bdae683f8b67e2c4e7dd..51c2ea164b36f99c834843d472b5358c55085cf9 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -164,20 +164,15 @@
 #define WA_TAIL_DWORDS 2
 #define WA_TAIL_BYTES (sizeof(u32) * WA_TAIL_DWORDS)
 
-static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
-					    struct intel_engine_cs *engine,
-					    struct intel_context *ce);
+#define ACTIVE_PRIORITY (I915_PRIORITY_NEWCLIENT | I915_PRIORITY_NOSEMAPHORE)
+
+static int execlists_context_deferred_alloc(struct intel_context *ce,
+					    struct intel_engine_cs *engine);
 static void execlists_init_reg_state(u32 *reg_state,
-				     struct i915_gem_context *ctx,
+				     struct intel_context *ce,
 				     struct intel_engine_cs *engine,
 				     struct intel_ring *ring);
 
-static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine)
-{
-	return (i915_ggtt_offset(engine->status_page.vma) +
-		I915_GEM_HWS_INDEX_ADDR);
-}
-
 static inline struct i915_priolist *to_priolist(struct rb_node *rb)
 {
 	return rb_entry(rb, struct i915_priolist, node);
@@ -188,6 +183,34 @@ static inline int rq_prio(const struct i915_request *rq)
 	return rq->sched.attr.priority;
 }
 
+static int effective_prio(const struct i915_request *rq)
+{
+	int prio = rq_prio(rq);
+
+	/*
+	 * On unwinding the active request, we give it a priority bump
+	 * equivalent to a freshly submitted request. This protects it from
+	 * being gazumped again, but it would be preferable if we didn't
+	 * let it be gazumped in the first place!
+	 *
+	 * See __unwind_incomplete_requests()
+	 */
+	if (~prio & ACTIVE_PRIORITY && __i915_request_has_started(rq)) {
+		/*
+		 * After preemption, we insert the active request at the
+		 * end of the new priority level. This means that we will be
+		 * _lower_ priority than the preemptee all things equal (and
+		 * so the preemption is valid), so adjust our comparison
+		 * accordingly.
+		 */
+		prio |= ACTIVE_PRIORITY;
+		prio--;
+	}
+
+	/* Restrict mere WAIT boosts from triggering preemption */
+	return prio | __NO_PREEMPTION;
+}
+
 static int queue_prio(const struct intel_engine_execlists *execlists)
 {
 	struct i915_priolist *p;
@@ -208,7 +231,7 @@ static int queue_prio(const struct intel_engine_execlists *execlists)
 static inline bool need_preempt(const struct intel_engine_cs *engine,
 				const struct i915_request *rq)
 {
-	const int last_prio = rq_prio(rq);
+	int last_prio;
 
 	if (!intel_engine_has_preemption(engine))
 		return false;
@@ -228,6 +251,7 @@ static inline bool need_preempt(const struct intel_engine_cs *engine,
 	 * preempt. If that hint is stale or we may be trying to preempt
 	 * ourselves, ignore the request.
 	 */
+	last_prio = effective_prio(rq);
 	if (!__execlists_need_preempt(engine->execlists.queue_priority_hint,
 				      last_prio))
 		return false;
@@ -254,12 +278,11 @@ static inline bool need_preempt(const struct intel_engine_cs *engine,
 }
 
 __maybe_unused static inline bool
-assert_priority_queue(const struct intel_engine_execlists *execlists,
-		      const struct i915_request *prev,
+assert_priority_queue(const struct i915_request *prev,
 		      const struct i915_request *next)
 {
-	if (!prev)
-		return true;
+	const struct intel_engine_execlists *execlists =
+		&prev->engine->execlists;
 
 	/*
 	 * Without preemption, the prev may refer to the still active element
@@ -300,11 +323,10 @@ assert_priority_queue(const struct intel_engine_execlists *execlists,
  * engine info, SW context ID and SW counter need to form a unique number
  * (Context ID) per lrc.
  */
-static void
-intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
-				   struct intel_engine_cs *engine,
-				   struct intel_context *ce)
+static u64
+lrc_descriptor(struct intel_context *ce, struct intel_engine_cs *engine)
 {
+	struct i915_gem_context *ctx = ce->gem_context;
 	u64 desc;
 
 	BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (BIT(GEN8_CTX_ID_WIDTH)));
@@ -322,7 +344,7 @@ intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
 	 * Consider updating oa_get_render_ctx_id in i915_perf.c when changing
 	 * anything below.
 	 */
-	if (INTEL_GEN(ctx->i915) >= 11) {
+	if (INTEL_GEN(engine->i915) >= 11) {
 		GEM_BUG_ON(ctx->hw_id >= BIT(GEN11_SW_CTX_ID_WIDTH));
 		desc |= (u64)ctx->hw_id << GEN11_SW_CTX_ID_SHIFT;
 								/* bits 37-47 */
@@ -339,7 +361,7 @@ intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
 		desc |= (u64)ctx->hw_id << GEN8_CTX_ID_SHIFT;	/* bits 32-52 */
 	}
 
-	ce->lrc_desc = desc;
+	return desc;
 }
 
 static void unwind_wa_tail(struct i915_request *rq)
@@ -353,7 +375,7 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
 {
 	struct i915_request *rq, *rn, *active = NULL;
 	struct list_head *uninitialized_var(pl);
-	int prio = I915_PRIORITY_INVALID | I915_PRIORITY_NEWCLIENT;
+	int prio = I915_PRIORITY_INVALID | ACTIVE_PRIORITY;
 
 	lockdep_assert_held(&engine->timeline.lock);
 
@@ -384,9 +406,21 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
 	 * The active request is now effectively the start of a new client
 	 * stream, so give it the equivalent small priority bump to prevent
 	 * it being gazumped a second time by another peer.
+	 *
+	 * Note we have to be careful not to apply a priority boost to a request
+	 * still spinning on its semaphores. If the request hasn't started, that
+	 * means it is still waiting for its dependencies to be signaled, and
+	 * if we apply a priority boost to this request, we will boost it past
+	 * its signalers and so break PI.
+	 *
+	 * One consequence of this preemption boost is that we may jump
+	 * over lesser priorities (such as I915_PRIORITY_WAIT), effectively
+	 * making those priorities non-preemptible. They will be moved forward
+	 * in the priority queue, but they will not gain immediate access to
+	 * the GPU.
 	 */
-	if (!(prio & I915_PRIORITY_NEWCLIENT)) {
-		prio |= I915_PRIORITY_NEWCLIENT;
+	if (~prio & ACTIVE_PRIORITY && __i915_request_has_started(active)) {
+		prio |= ACTIVE_PRIORITY;
 		active->sched.attr.priority = prio;
 		list_move_tail(&active->sched.link,
 			       i915_sched_lookup_priolist(engine, prio));
@@ -523,13 +557,11 @@ static void execlists_submit_ports(struct intel_engine_cs *engine)
 			desc = execlists_update_context(rq);
 			GEM_DEBUG_EXEC(port[n].context_id = upper_32_bits(desc));
 
-			GEM_TRACE("%s in[%d]:  ctx=%d.%d, global=%d (fence %llx:%lld) (current %d:%d), prio=%d\n",
+			GEM_TRACE("%s in[%d]:  ctx=%d.%d, fence %llx:%lld (current %d), prio=%d\n",
 				  engine->name, n,
 				  port[n].context_id, count,
-				  rq->global_seqno,
 				  rq->fence.context, rq->fence.seqno,
 				  hwsp_seqno(rq),
-				  intel_engine_get_seqno(engine),
 				  rq_prio(rq));
 		} else {
 			GEM_BUG_ON(!n);
@@ -564,6 +596,17 @@ static bool can_merge_ctx(const struct intel_context *prev,
 	return true;
 }
 
+static bool can_merge_rq(const struct i915_request *prev,
+			 const struct i915_request *next)
+{
+	GEM_BUG_ON(!assert_priority_queue(prev, next));
+
+	if (!can_merge_ctx(prev->hw_context, next->hw_context))
+		return false;
+
+	return true;
+}
+
 static void port_assign(struct execlist_port *port, struct i915_request *rq)
 {
 	GEM_BUG_ON(rq == port_request(port));
@@ -577,8 +620,7 @@ static void port_assign(struct execlist_port *port, struct i915_request *rq)
 static void inject_preempt_context(struct intel_engine_cs *engine)
 {
 	struct intel_engine_execlists *execlists = &engine->execlists;
-	struct intel_context *ce =
-		to_intel_context(engine->i915->preempt_context, engine);
+	struct intel_context *ce = engine->preempt_context;
 	unsigned int n;
 
 	GEM_BUG_ON(execlists->preempt_complete_status !=
@@ -716,8 +758,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 		int i;
 
 		priolist_for_each_request_consume(rq, rn, p, i) {
-			GEM_BUG_ON(!assert_priority_queue(execlists, last, rq));
-
 			/*
 			 * Can we combine this request with the current port?
 			 * It has to be the same context/ringbuffer and not
@@ -729,8 +769,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 			 * second request, and so we never need to tell the
 			 * hardware about the first.
 			 */
-			if (last &&
-			    !can_merge_ctx(rq->hw_context, last->hw_context)) {
+			if (last && !can_merge_rq(last, rq)) {
 				/*
 				 * If we are on the second port and cannot
 				 * combine this request with the last, then we
@@ -739,6 +778,14 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 				if (port == last_port)
 					goto done;
 
+				/*
+				 * We must not populate both ELSP[] with the
+				 * same LRCA, i.e. we must submit 2 different
+				 * contexts if we submit 2 ELSP.
+				 */
+				if (last->hw_context == rq->hw_context)
+					goto done;
+
 				/*
 				 * If GVT overrides us we only ever submit
 				 * port[0], leaving port[1] empty. Note that we
@@ -750,7 +797,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 				    ctx_single_port_submission(rq->hw_context))
 					goto done;
 
-				GEM_BUG_ON(last->hw_context == rq->hw_context);
 
 				if (submit)
 					port_assign(port, last);
@@ -769,8 +815,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 		}
 
 		rb_erase_cached(&p->node, &execlists->queue);
-		if (p->priority != I915_PRIORITY_NORMAL)
-			kmem_cache_free(engine->i915->priorities, p);
+		i915_priolist_free(p);
 	}
 
 done:
@@ -790,8 +835,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 	 * request triggering preemption on the next dequeue (or subsequent
 	 * interrupt for secondary ports).
 	 */
-	execlists->queue_priority_hint =
-		port != execlists->port ? rq_prio(last) : INT_MIN;
+	execlists->queue_priority_hint = queue_prio(execlists);
 
 	if (submit) {
 		port_assign(port, last);
@@ -821,13 +865,11 @@ execlists_cancel_port_requests(struct intel_engine_execlists * const execlists)
 	while (num_ports-- && port_isset(port)) {
 		struct i915_request *rq = port_request(port);
 
-		GEM_TRACE("%s:port%u global=%d (fence %llx:%lld), (current %d:%d)\n",
+		GEM_TRACE("%s:port%u fence %llx:%lld, (current %d)\n",
 			  rq->engine->name,
 			  (unsigned int)(port - execlists->port),
-			  rq->global_seqno,
 			  rq->fence.context, rq->fence.seqno,
-			  hwsp_seqno(rq),
-			  intel_engine_get_seqno(rq->engine));
+			  hwsp_seqno(rq));
 
 		GEM_BUG_ON(!execlists->active);
 		execlists_context_schedule_out(rq,
@@ -883,8 +925,7 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
 	struct rb_node *rb;
 	unsigned long flags;
 
-	GEM_TRACE("%s current %d\n",
-		  engine->name, intel_engine_get_seqno(engine));
+	GEM_TRACE("%s\n", engine->name);
 
 	/*
 	 * Before we call engine->cancel_requests(), we should have exclusive
@@ -908,8 +949,6 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
 
 	/* Mark all executing requests as skipped. */
 	list_for_each_entry(rq, &engine->timeline.requests, link) {
-		GEM_BUG_ON(!rq->global_seqno);
-
 		if (!i915_request_signaled(rq))
 			dma_fence_set_error(&rq->fence, -EIO);
 
@@ -929,14 +968,9 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
 		}
 
 		rb_erase_cached(&p->node, &execlists->queue);
-		if (p->priority != I915_PRIORITY_NORMAL)
-			kmem_cache_free(engine->i915->priorities, p);
+		i915_priolist_free(p);
 	}
 
-	intel_write_status_page(engine,
-				I915_GEM_HWS_INDEX,
-				intel_engine_last_submit(engine));
-
 	/* Remaining _unready_ requests will be nop'ed when submitted */
 
 	execlists->queue_priority_hint = INT_MIN;
@@ -1052,14 +1086,12 @@ static void process_csb(struct intel_engine_cs *engine)
 						EXECLISTS_ACTIVE_USER));
 
 		rq = port_unpack(port, &count);
-		GEM_TRACE("%s out[0]: ctx=%d.%d, global=%d (fence %llx:%lld) (current %d:%d), prio=%d\n",
+		GEM_TRACE("%s out[0]: ctx=%d.%d, fence %llx:%lld (current %d), prio=%d\n",
 			  engine->name,
 			  port->context_id, count,
-			  rq ? rq->global_seqno : 0,
 			  rq ? rq->fence.context : 0,
 			  rq ? rq->fence.seqno : 0,
 			  rq ? hwsp_seqno(rq) : 0,
-			  intel_engine_get_seqno(engine),
 			  rq ? rq_prio(rq) : 0);
 
 		/* Check the context/desc id for this event matches */
@@ -1196,19 +1228,26 @@ static void execlists_submit_request(struct i915_request *request)
 	spin_unlock_irqrestore(&engine->timeline.lock, flags);
 }
 
-static void execlists_context_destroy(struct intel_context *ce)
+static void __execlists_context_fini(struct intel_context *ce)
 {
-	GEM_BUG_ON(ce->pin_count);
-
-	if (!ce->state)
-		return;
-
-	intel_ring_free(ce->ring);
+	intel_ring_put(ce->ring);
 
 	GEM_BUG_ON(i915_gem_object_is_active(ce->state->obj));
 	i915_gem_object_put(ce->state->obj);
 }
 
+static void execlists_context_destroy(struct kref *kref)
+{
+	struct intel_context *ce = container_of(kref, typeof(*ce), ref);
+
+	GEM_BUG_ON(intel_context_is_pinned(ce));
+
+	if (ce->state)
+		__execlists_context_fini(ce);
+
+	intel_context_free(ce);
+}
+
 static void execlists_context_unpin(struct intel_context *ce)
 {
 	struct intel_engine_cs *engine;
@@ -1240,11 +1279,9 @@ static void execlists_context_unpin(struct intel_context *ce)
 	ce->state->obj->pin_global--;
 	i915_gem_object_unpin_map(ce->state->obj);
 	i915_vma_unpin(ce->state);
-
-	i915_gem_context_put(ce->gem_context);
 }
 
-static int __context_pin(struct i915_gem_context *ctx, struct i915_vma *vma)
+static int __context_pin(struct i915_vma *vma)
 {
 	unsigned int flags;
 	int err;
@@ -1267,11 +1304,14 @@ static int __context_pin(struct i915_gem_context *ctx, struct i915_vma *vma)
 }
 
 static void
-__execlists_update_reg_state(struct intel_engine_cs *engine,
-			     struct intel_context *ce)
+__execlists_update_reg_state(struct intel_context *ce,
+			     struct intel_engine_cs *engine)
 {
-	u32 *regs = ce->lrc_reg_state;
 	struct intel_ring *ring = ce->ring;
+	u32 *regs = ce->lrc_reg_state;
+
+	GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head));
+	GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail));
 
 	regs[CTX_RING_BUFFER_START + 1] = i915_ggtt_offset(ring->vma);
 	regs[CTX_RING_HEAD + 1] = ring->head;
@@ -1279,29 +1319,30 @@ __execlists_update_reg_state(struct intel_engine_cs *engine,
 
 	/* RPCS */
 	if (engine->class == RENDER_CLASS)
-		regs[CTX_R_PWR_CLK_STATE + 1] = gen8_make_rpcs(engine->i915,
-							       &ce->sseu);
+		regs[CTX_R_PWR_CLK_STATE + 1] =
+			gen8_make_rpcs(engine->i915, &ce->sseu);
 }
 
-static struct intel_context *
-__execlists_context_pin(struct intel_engine_cs *engine,
-			struct i915_gem_context *ctx,
-			struct intel_context *ce)
+static int
+__execlists_context_pin(struct intel_context *ce,
+			struct intel_engine_cs *engine)
 {
 	void *vaddr;
 	int ret;
 
-	ret = execlists_context_deferred_alloc(ctx, engine, ce);
+	GEM_BUG_ON(!ce->gem_context->ppgtt);
+
+	ret = execlists_context_deferred_alloc(ce, engine);
 	if (ret)
 		goto err;
 	GEM_BUG_ON(!ce->state);
 
-	ret = __context_pin(ctx, ce->state);
+	ret = __context_pin(ce->state);
 	if (ret)
 		goto err;
 
 	vaddr = i915_gem_object_pin_map(ce->state->obj,
-					i915_coherent_map_type(ctx->i915) |
+					i915_coherent_map_type(engine->i915) |
 					I915_MAP_OVERRIDE);
 	if (IS_ERR(vaddr)) {
 		ret = PTR_ERR(vaddr);
@@ -1312,21 +1353,16 @@ __execlists_context_pin(struct intel_engine_cs *engine,
 	if (ret)
 		goto unpin_map;
 
-	ret = i915_gem_context_pin_hw_id(ctx);
+	ret = i915_gem_context_pin_hw_id(ce->gem_context);
 	if (ret)
 		goto unpin_ring;
 
-	intel_lr_context_descriptor_update(ctx, engine, ce);
-
-	GEM_BUG_ON(!intel_ring_offset_valid(ce->ring, ce->ring->head));
-
+	ce->lrc_desc = lrc_descriptor(ce, engine);
 	ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
-
-	__execlists_update_reg_state(engine, ce);
+	__execlists_update_reg_state(ce, engine);
 
 	ce->state->obj->pin_global++;
-	i915_gem_context_get(ctx);
-	return ce;
+	return 0;
 
 unpin_ring:
 	intel_ring_unpin(ce->ring);
@@ -1335,33 +1371,20 @@ __execlists_context_pin(struct intel_engine_cs *engine,
 unpin_vma:
 	__i915_vma_unpin(ce->state);
 err:
-	ce->pin_count = 0;
-	return ERR_PTR(ret);
+	return ret;
+}
+
+static int execlists_context_pin(struct intel_context *ce)
+{
+	return __execlists_context_pin(ce, ce->engine);
 }
 
 static const struct intel_context_ops execlists_context_ops = {
+	.pin = execlists_context_pin,
 	.unpin = execlists_context_unpin,
 	.destroy = execlists_context_destroy,
 };
 
-static struct intel_context *
-execlists_context_pin(struct intel_engine_cs *engine,
-		      struct i915_gem_context *ctx)
-{
-	struct intel_context *ce = to_intel_context(ctx, engine);
-
-	lockdep_assert_held(&ctx->i915->drm.struct_mutex);
-	GEM_BUG_ON(!ctx->ppgtt);
-
-	if (likely(ce->pin_count++))
-		return ce;
-	GEM_BUG_ON(!ce->pin_count); /* no overflow please! */
-
-	ce->ops = &execlists_context_ops;
-
-	return __execlists_context_pin(engine, ctx, ce);
-}
-
 static int gen8_emit_init_breadcrumb(struct i915_request *rq)
 {
 	u32 *cs;
@@ -1387,6 +1410,10 @@ static int gen8_emit_init_breadcrumb(struct i915_request *rq)
 	*cs++ = rq->fence.seqno - 1;
 
 	intel_ring_advance(rq, cs);
+
+	/* Record the updated position of the request's payload */
+	rq->infix = intel_ring_offset(rq, cs);
+
 	return 0;
 }
 
@@ -1447,7 +1474,7 @@ static int execlists_request_alloc(struct i915_request *request)
 {
 	int ret;
 
-	GEM_BUG_ON(!request->hw_context->pin_count);
+	GEM_BUG_ON(!intel_context_is_pinned(request->hw_context));
 
 	/*
 	 * Flush enough space to reduce the likelihood of waiting after
@@ -1465,7 +1492,7 @@ static int execlists_request_alloc(struct i915_request *request)
 	 */
 
 	/* Unconditionally invalidate GPU caches and TLBs. */
-	if (i915_vm_is_48bit(&request->gem_context->ppgtt->vm))
+	if (i915_vm_is_4lvl(&request->gem_context->ppgtt->vm))
 		ret = request->engine->emit_flush(request, EMIT_INVALIDATE);
 	else
 		ret = emit_pdps(request);
@@ -1732,7 +1759,7 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
 	unsigned int i;
 	int ret;
 
-	if (GEM_DEBUG_WARN_ON(engine->id != RCS))
+	if (GEM_DEBUG_WARN_ON(engine->id != RCS0))
 		return -EINVAL;
 
 	switch (INTEL_GEN(engine->i915)) {
@@ -1872,12 +1899,31 @@ static void execlists_reset_prepare(struct intel_engine_cs *engine)
 	__tasklet_disable_sync_once(&execlists->tasklet);
 	GEM_BUG_ON(!reset_in_progress(execlists));
 
+	intel_engine_stop_cs(engine);
+
 	/* And flush any current direct submission. */
 	spin_lock_irqsave(&engine->timeline.lock, flags);
 	process_csb(engine); /* drain preemption events */
 	spin_unlock_irqrestore(&engine->timeline.lock, flags);
 }
 
+static bool lrc_regs_ok(const struct i915_request *rq)
+{
+	const struct intel_ring *ring = rq->ring;
+	const u32 *regs = rq->hw_context->lrc_reg_state;
+
+	/* Quick spot check for the common signs of context corruption */
+
+	if (regs[CTX_RING_BUFFER_CONTROL + 1] !=
+	    (RING_CTL_SIZE(ring->size) | RING_VALID))
+		return false;
+
+	if (regs[CTX_RING_BUFFER_START + 1] != i915_ggtt_offset(ring->vma))
+		return false;
+
+	return true;
+}
+
 static void execlists_reset(struct intel_engine_cs *engine, bool stalled)
 {
 	struct intel_engine_execlists * const execlists = &engine->execlists;
@@ -1904,14 +1950,24 @@ static void execlists_reset(struct intel_engine_cs *engine, bool stalled)
 	/* Following the reset, we need to reload the CSB read/write pointers */
 	reset_csb_pointers(&engine->execlists);
 
-	GEM_TRACE("%s seqno=%d, current=%d, stalled? %s\n",
-		  engine->name,
-		  rq ? rq->global_seqno : 0,
-		  intel_engine_get_seqno(engine),
-		  yesno(stalled));
 	if (!rq)
 		goto out_unlock;
 
+	/*
+	 * If this request hasn't started yet, e.g. it is waiting on a
+	 * semaphore, we need to avoid skipping the request or else we
+	 * break the signaling chain. However, if the context is corrupt
+	 * the request will not restart and we will be stuck with a wedged
+	 * device. It is quite often the case that if we issue a reset
+	 * while the GPU is loading the context image, that the context
+	 * image becomes corrupt.
+	 *
+	 * Otherwise, if we have not started yet, the request should replay
+	 * perfectly and we do not need to flag the result as being erroneous.
+	 */
+	if (!i915_request_started(rq) && lrc_regs_ok(rq))
+		goto out_unlock;
+
 	/*
 	 * If the request was innocent, we leave the request in the ELSP
 	 * and will try to replay it on restarting. The context image may
@@ -1924,7 +1980,7 @@ static void execlists_reset(struct intel_engine_cs *engine, bool stalled)
 	 * image back to the expected values to skip over the guilty request.
 	 */
 	i915_reset_request(rq, stalled);
-	if (!stalled)
+	if (!stalled && lrc_regs_ok(rq))
 		goto out_unlock;
 
 	/*
@@ -1942,12 +1998,12 @@ static void execlists_reset(struct intel_engine_cs *engine, bool stalled)
 		       engine->context_size - PAGE_SIZE);
 	}
 
-	/* Move the RING_HEAD onto the breadcrumb, past the hanging batch */
-	rq->ring->head = intel_ring_wrap(rq->ring, rq->postfix);
+	/* Rerun the request; its payload has been neutered (if guilty). */
+	rq->ring->head = intel_ring_wrap(rq->ring, rq->head);
 	intel_ring_update_space(rq->ring);
 
-	execlists_init_reg_state(regs, rq->gem_context, engine, rq->ring);
-	__execlists_update_reg_state(engine, rq->hw_context);
+	execlists_init_reg_state(regs, rq->hw_context, engine, rq->ring);
+	__execlists_update_reg_state(rq->hw_context, engine);
 
 out_unlock:
 	spin_unlock_irqrestore(&engine->timeline.lock, flags);
@@ -1961,13 +2017,14 @@ static void execlists_reset_finish(struct intel_engine_cs *engine)
 	 * After a GPU reset, we may have requests to replay. Do so now while
 	 * we still have the forcewake to be sure that the GPU is not allowed
 	 * to sleep before we restart and reload a context.
-	 *
 	 */
 	GEM_BUG_ON(!reset_in_progress(execlists));
 	if (!RB_EMPTY_ROOT(&execlists->queue.rb_root))
 		execlists->tasklet.func(execlists->tasklet.data);
 
-	tasklet_enable(&execlists->tasklet);
+	if (__tasklet_enable(&execlists->tasklet))
+		/* And kick in case we missed a new request submission. */
+		tasklet_hi_schedule(&execlists->tasklet);
 	GEM_TRACE("%s: depth->%d\n", engine->name,
 		  atomic_read(&execlists->tasklet.count));
 }
@@ -2148,16 +2205,16 @@ static u32 *gen8_emit_wa_tail(struct i915_request *request, u32 *cs)
 
 static u32 *gen8_emit_fini_breadcrumb(struct i915_request *request, u32 *cs)
 {
-	/* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
-	BUILD_BUG_ON(I915_GEM_HWS_INDEX_ADDR & (1 << 5));
-
 	cs = gen8_emit_ggtt_write(cs,
 				  request->fence.seqno,
-				  request->timeline->hwsp_offset);
+				  request->timeline->hwsp_offset,
+				  0);
 
 	cs = gen8_emit_ggtt_write(cs,
-				  request->global_seqno,
-				  intel_hws_seqno_address(request->engine));
+				  intel_engine_next_hangcheck_seqno(request->engine),
+				  I915_GEM_HWS_HANGCHECK_ADDR,
+				  MI_FLUSH_DW_STORE_INDEX);
+
 
 	*cs++ = MI_USER_INTERRUPT;
 	*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
@@ -2180,9 +2237,9 @@ static u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs)
 				      PIPE_CONTROL_CS_STALL);
 
 	cs = gen8_emit_ggtt_write_rcs(cs,
-				      request->global_seqno,
-				      intel_hws_seqno_address(request->engine),
-				      PIPE_CONTROL_CS_STALL);
+				      intel_engine_next_hangcheck_seqno(request->engine),
+				      I915_GEM_HWS_HANGCHECK_ADDR,
+				      PIPE_CONTROL_STORE_DATA_INDEX);
 
 	*cs++ = MI_USER_INTERRUPT;
 	*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
@@ -2258,15 +2315,10 @@ void intel_execlists_set_default_submission(struct intel_engine_cs *engine)
 	engine->park = NULL;
 	engine->unpark = NULL;
 
+	engine->flags |= I915_ENGINE_HAS_SEMAPHORES;
 	engine->flags |= I915_ENGINE_SUPPORTS_STATS;
-	if (engine->i915->preempt_context)
+	if (engine->preempt_context)
 		engine->flags |= I915_ENGINE_HAS_PREEMPTION;
-
-	engine->i915->caps.scheduler =
-		I915_SCHEDULER_CAP_ENABLED |
-		I915_SCHEDULER_CAP_PRIORITY;
-	if (intel_engine_has_preemption(engine))
-		engine->i915->caps.scheduler |= I915_SCHEDULER_CAP_PREEMPTION;
 }
 
 static void
@@ -2279,7 +2331,7 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
 	engine->reset.reset = execlists_reset;
 	engine->reset.finish = execlists_reset_finish;
 
-	engine->context_pin = execlists_context_pin;
+	engine->cops = &execlists_context_ops;
 	engine->request_alloc = execlists_request_alloc;
 
 	engine->emit_flush = gen8_emit_flush;
@@ -2309,11 +2361,11 @@ logical_ring_default_irqs(struct intel_engine_cs *engine)
 
 	if (INTEL_GEN(engine->i915) < 11) {
 		const u8 irq_shifts[] = {
-			[RCS]  = GEN8_RCS_IRQ_SHIFT,
-			[BCS]  = GEN8_BCS_IRQ_SHIFT,
-			[VCS]  = GEN8_VCS1_IRQ_SHIFT,
-			[VCS2] = GEN8_VCS2_IRQ_SHIFT,
-			[VECS] = GEN8_VECS_IRQ_SHIFT,
+			[RCS0]  = GEN8_RCS_IRQ_SHIFT,
+			[BCS0]  = GEN8_BCS_IRQ_SHIFT,
+			[VCS0]  = GEN8_VCS0_IRQ_SHIFT,
+			[VCS1]  = GEN8_VCS1_IRQ_SHIFT,
+			[VECS0] = GEN8_VECS_IRQ_SHIFT,
 		};
 
 		shift = irq_shifts[engine->id];
@@ -2367,13 +2419,9 @@ static int logical_ring_init(struct intel_engine_cs *engine)
 	}
 
 	execlists->preempt_complete_status = ~0u;
-	if (i915->preempt_context) {
-		struct intel_context *ce =
-			to_intel_context(i915->preempt_context, engine);
-
+	if (engine->preempt_context)
 		execlists->preempt_complete_status =
-			upper_32_bits(ce->lrc_desc);
-	}
+			upper_32_bits(engine->preempt_context->lrc_desc);
 
 	execlists->csb_status =
 		&engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX];
@@ -2592,13 +2640,13 @@ static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine)
 }
 
 static void execlists_init_reg_state(u32 *regs,
-				     struct i915_gem_context *ctx,
+				     struct intel_context *ce,
 				     struct intel_engine_cs *engine,
 				     struct intel_ring *ring)
 {
-	struct drm_i915_private *dev_priv = engine->i915;
-	u32 base = engine->mmio_base;
+	struct i915_hw_ppgtt *ppgtt = ce->gem_context->ppgtt;
 	bool rcs = engine->class == RENDER_CLASS;
+	u32 base = engine->mmio_base;
 
 	/* A context is actually a big batch buffer with several
 	 * MI_LOAD_REGISTER_IMM commands followed by (reg, value) pairs. The
@@ -2613,7 +2661,7 @@ static void execlists_init_reg_state(u32 *regs,
 	CTX_REG(regs, CTX_CONTEXT_CONTROL, RING_CONTEXT_CONTROL(engine),
 		_MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT) |
 		_MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH));
-	if (INTEL_GEN(dev_priv) < 11) {
+	if (INTEL_GEN(engine->i915) < 11) {
 		regs[CTX_CONTEXT_CONTROL + 1] |=
 			_MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT |
 					    CTX_CTRL_RS_CTX_ENABLE);
@@ -2668,33 +2716,33 @@ static void execlists_init_reg_state(u32 *regs,
 	CTX_REG(regs, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(engine, 0), 0);
 	CTX_REG(regs, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(engine, 0), 0);
 
-	if (i915_vm_is_48bit(&ctx->ppgtt->vm)) {
+	if (i915_vm_is_4lvl(&ppgtt->vm)) {
 		/* 64b PPGTT (48bit canonical)
 		 * PDP0_DESCRIPTOR contains the base address to PML4 and
 		 * other PDP Descriptors are ignored.
 		 */
-		ASSIGN_CTX_PML4(ctx->ppgtt, regs);
+		ASSIGN_CTX_PML4(ppgtt, regs);
 	} else {
-		ASSIGN_CTX_PDP(ctx->ppgtt, regs, 3);
-		ASSIGN_CTX_PDP(ctx->ppgtt, regs, 2);
-		ASSIGN_CTX_PDP(ctx->ppgtt, regs, 1);
-		ASSIGN_CTX_PDP(ctx->ppgtt, regs, 0);
+		ASSIGN_CTX_PDP(ppgtt, regs, 3);
+		ASSIGN_CTX_PDP(ppgtt, regs, 2);
+		ASSIGN_CTX_PDP(ppgtt, regs, 1);
+		ASSIGN_CTX_PDP(ppgtt, regs, 0);
 	}
 
 	if (rcs) {
 		regs[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
 		CTX_REG(regs, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE, 0);
 
-		i915_oa_init_reg_state(engine, ctx, regs);
+		i915_oa_init_reg_state(engine, ce, regs);
 	}
 
 	regs[CTX_END] = MI_BATCH_BUFFER_END;
-	if (INTEL_GEN(dev_priv) >= 10)
+	if (INTEL_GEN(engine->i915) >= 10)
 		regs[CTX_END] |= BIT(0);
 }
 
 static int
-populate_lr_context(struct i915_gem_context *ctx,
+populate_lr_context(struct intel_context *ce,
 		    struct drm_i915_gem_object *ctx_obj,
 		    struct intel_engine_cs *engine,
 		    struct intel_ring *ring)
@@ -2740,11 +2788,12 @@ populate_lr_context(struct i915_gem_context *ctx,
 	/* The second page of the context object contains some fields which must
 	 * be set up prior to the first execution. */
 	regs = vaddr + LRC_STATE_PN * PAGE_SIZE;
-	execlists_init_reg_state(regs, ctx, engine, ring);
+	execlists_init_reg_state(regs, ce, engine, ring);
 	if (!engine->default_state)
 		regs[CTX_CONTEXT_CONTROL + 1] |=
 			_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
-	if (ctx == ctx->i915->preempt_context && INTEL_GEN(engine->i915) < 11)
+	if (ce->gem_context == engine->i915->preempt_context &&
+	    INTEL_GEN(engine->i915) < 11)
 		regs[CTX_CONTEXT_CONTROL + 1] |=
 			_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
 					   CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT);
@@ -2754,9 +2803,13 @@ populate_lr_context(struct i915_gem_context *ctx,
 	return ret;
 }
 
-static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
-					    struct intel_engine_cs *engine,
-					    struct intel_context *ce)
+static struct i915_timeline *get_timeline(struct i915_gem_context *ctx)
+{
+	return i915_timeline_create(ctx->i915, ctx->name, NULL);
+}
+
+static int execlists_context_deferred_alloc(struct intel_context *ce,
+					    struct intel_engine_cs *engine)
 {
 	struct drm_i915_gem_object *ctx_obj;
 	struct i915_vma *vma;
@@ -2776,30 +2829,32 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
 	 */
 	context_size += LRC_HEADER_PAGES * PAGE_SIZE;
 
-	ctx_obj = i915_gem_object_create(ctx->i915, context_size);
+	ctx_obj = i915_gem_object_create(engine->i915, context_size);
 	if (IS_ERR(ctx_obj))
 		return PTR_ERR(ctx_obj);
 
-	vma = i915_vma_instance(ctx_obj, &ctx->i915->ggtt.vm, NULL);
+	vma = i915_vma_instance(ctx_obj, &engine->i915->ggtt.vm, NULL);
 	if (IS_ERR(vma)) {
 		ret = PTR_ERR(vma);
 		goto error_deref_obj;
 	}
 
-	timeline = i915_timeline_create(ctx->i915, ctx->name, NULL);
+	timeline = get_timeline(ce->gem_context);
 	if (IS_ERR(timeline)) {
 		ret = PTR_ERR(timeline);
 		goto error_deref_obj;
 	}
 
-	ring = intel_engine_create_ring(engine, timeline, ctx->ring_size);
+	ring = intel_engine_create_ring(engine,
+					timeline,
+					ce->gem_context->ring_size);
 	i915_timeline_put(timeline);
 	if (IS_ERR(ring)) {
 		ret = PTR_ERR(ring);
 		goto error_deref_obj;
 	}
 
-	ret = populate_lr_context(ctx, ctx_obj, engine, ring);
+	ret = populate_lr_context(ce, ctx_obj, engine, ring);
 	if (ret) {
 		DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
 		goto error_ring_free;
@@ -2811,7 +2866,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
 	return 0;
 
 error_ring_free:
-	intel_ring_free(ring);
+	intel_ring_put(ring);
 error_deref_obj:
 	i915_gem_object_put(ctx_obj);
 	return ret;
@@ -2819,9 +2874,8 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
 
 void intel_lr_context_resume(struct drm_i915_private *i915)
 {
-	struct intel_engine_cs *engine;
 	struct i915_gem_context *ctx;
-	enum intel_engine_id id;
+	struct intel_context *ce;
 
 	/*
 	 * Because we emit WA_TAIL_DWORDS there may be a disparity
@@ -2835,17 +2889,10 @@ void intel_lr_context_resume(struct drm_i915_private *i915)
 	 * simplicity, we just zero everything out.
 	 */
 	list_for_each_entry(ctx, &i915->contexts.list, link) {
-		for_each_engine(engine, i915, id) {
-			struct intel_context *ce =
-				to_intel_context(ctx, engine);
-
-			if (!ce->state)
-				continue;
-
+		list_for_each_entry(ce, &ctx->active_engines, active_link) {
+			GEM_BUG_ON(!ce->ring);
 			intel_ring_reset(ce->ring, 0);
-
-			if (ce->pin_count) /* otherwise done in context_pin */
-				__execlists_update_reg_state(engine, ce);
+			__execlists_update_reg_state(ce, ce->engine);
 		}
 	}
 }
diff --git a/drivers/gpu/drm/i915/intel_lspcon.c b/drivers/gpu/drm/i915/intel_lspcon.c
index 322bdddda164db496a4190323004ebf2ed916e86..8d202b13e24f302f432d982ef4984d1b238a8548 100644
--- a/drivers/gpu/drm/i915/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/intel_lspcon.c
@@ -452,6 +452,14 @@ void lspcon_write_infoframe(struct intel_encoder *encoder,
 	DRM_DEBUG_DRIVER("AVI infoframes updated successfully\n");
 }
 
+void lspcon_read_infoframe(struct intel_encoder *encoder,
+			   const struct intel_crtc_state *crtc_state,
+			   unsigned int type,
+			   void *frame, ssize_t len)
+{
+	/* FIXME implement this */
+}
+
 void lspcon_set_infoframes(struct intel_encoder *encoder,
 			   bool enable,
 			   const struct intel_crtc_state *crtc_state,
@@ -470,6 +478,8 @@ void lspcon_set_infoframes(struct intel_encoder *encoder,
 		return;
 	}
 
+	/* FIXME precompute infoframes */
+
 	ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
 						       conn_state->connector,
 						       adjusted_mode);
@@ -504,9 +514,10 @@ void lspcon_set_infoframes(struct intel_encoder *encoder,
 				  buf, ret);
 }
 
-bool lspcon_infoframe_enabled(struct intel_encoder *encoder,
+u32 lspcon_infoframes_enabled(struct intel_encoder *encoder,
 			      const struct intel_crtc_state *pipe_config)
 {
+	/* FIXME actually read this from the hw */
 	return enc_to_intel_lspcon(&encoder->base)->active;
 }
 
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index b4aa49768e90fb27997692d38e36824ac81327aa..845792aa0abee89d8cfb8ee7f2947230aaa0bbb7 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -152,24 +152,17 @@ static void intel_lvds_pps_get_hw_state(struct drm_i915_private *dev_priv,
 	pps->powerdown_on_reset = I915_READ(PP_CONTROL(0)) & PANEL_POWER_RESET;
 
 	val = I915_READ(PP_ON_DELAYS(0));
-	pps->port = (val & PANEL_PORT_SELECT_MASK) >>
-		    PANEL_PORT_SELECT_SHIFT;
-	pps->t1_t2 = (val & PANEL_POWER_UP_DELAY_MASK) >>
-		     PANEL_POWER_UP_DELAY_SHIFT;
-	pps->t5 = (val & PANEL_LIGHT_ON_DELAY_MASK) >>
-		  PANEL_LIGHT_ON_DELAY_SHIFT;
+	pps->port = REG_FIELD_GET(PANEL_PORT_SELECT_MASK, val);
+	pps->t1_t2 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, val);
+	pps->t5 = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, val);
 
 	val = I915_READ(PP_OFF_DELAYS(0));
-	pps->t3 = (val & PANEL_POWER_DOWN_DELAY_MASK) >>
-		  PANEL_POWER_DOWN_DELAY_SHIFT;
-	pps->tx = (val & PANEL_LIGHT_OFF_DELAY_MASK) >>
-		  PANEL_LIGHT_OFF_DELAY_SHIFT;
+	pps->t3 = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, val);
+	pps->tx = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, val);
 
 	val = I915_READ(PP_DIVISOR(0));
-	pps->divider = (val & PP_REFERENCE_DIVIDER_MASK) >>
-		       PP_REFERENCE_DIVIDER_SHIFT;
-	val = (val & PANEL_POWER_CYCLE_DELAY_MASK) >>
-	      PANEL_POWER_CYCLE_DELAY_SHIFT;
+	pps->divider = REG_FIELD_GET(PP_REFERENCE_DIVIDER_MASK, val);
+	val = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, val);
 	/*
 	 * Remove the BSpec specified +1 (100ms) offset that accounts for a
 	 * too short power-cycle delay due to the asynchronous programming of
@@ -209,16 +202,19 @@ static void intel_lvds_pps_init_hw(struct drm_i915_private *dev_priv,
 		val |= PANEL_POWER_RESET;
 	I915_WRITE(PP_CONTROL(0), val);
 
-	I915_WRITE(PP_ON_DELAYS(0), (pps->port << PANEL_PORT_SELECT_SHIFT) |
-				    (pps->t1_t2 << PANEL_POWER_UP_DELAY_SHIFT) |
-				    (pps->t5 << PANEL_LIGHT_ON_DELAY_SHIFT));
-	I915_WRITE(PP_OFF_DELAYS(0), (pps->t3 << PANEL_POWER_DOWN_DELAY_SHIFT) |
-				     (pps->tx << PANEL_LIGHT_OFF_DELAY_SHIFT));
+	I915_WRITE(PP_ON_DELAYS(0),
+		   REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, pps->port) |
+		   REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, pps->t1_t2) |
+		   REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, pps->t5));
 
-	val = pps->divider << PP_REFERENCE_DIVIDER_SHIFT;
-	val |= (DIV_ROUND_UP(pps->t4, 1000) + 1) <<
-	       PANEL_POWER_CYCLE_DELAY_SHIFT;
-	I915_WRITE(PP_DIVISOR(0), val);
+	I915_WRITE(PP_OFF_DELAYS(0),
+		   REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, pps->t3) |
+		   REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, pps->tx));
+
+	I915_WRITE(PP_DIVISOR(0),
+		   REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, pps->divider) |
+		   REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK,
+				  DIV_ROUND_UP(pps->t4, 1000) + 1));
 }
 
 static void intel_pre_enable_lvds(struct intel_encoder *encoder,
@@ -746,20 +742,21 @@ static const struct dmi_system_id intel_dual_link_lvds[] = {
 	{ }	/* terminating entry */
 };
 
-struct intel_encoder *intel_get_lvds_encoder(struct drm_device *dev)
+struct intel_encoder *intel_get_lvds_encoder(struct drm_i915_private *dev_priv)
 {
-	struct intel_encoder *intel_encoder;
+	struct intel_encoder *encoder;
 
-	for_each_intel_encoder(dev, intel_encoder)
-		if (intel_encoder->type == INTEL_OUTPUT_LVDS)
-			return intel_encoder;
+	for_each_intel_encoder(&dev_priv->drm, encoder) {
+		if (encoder->type == INTEL_OUTPUT_LVDS)
+			return encoder;
+	}
 
 	return NULL;
 }
 
-bool intel_is_dual_link_lvds(struct drm_device *dev)
+bool intel_is_dual_link_lvds(struct drm_i915_private *dev_priv)
 {
-	struct intel_encoder *encoder = intel_get_lvds_encoder(dev);
+	struct intel_encoder *encoder = intel_get_lvds_encoder(dev_priv);
 
 	return encoder && to_lvds_encoder(&encoder->base)->is_dual_link;
 }
diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c
index 331e7a678fb700ccd5ca4a07303382ccdf291086..274ba78500c06156373dfc6fa89681a72a7eca29 100644
--- a/drivers/gpu/drm/i915/intel_mocs.c
+++ b/drivers/gpu/drm/i915/intel_mocs.c
@@ -252,7 +252,7 @@ static bool get_mocs_settings(struct drm_i915_private *dev_priv,
 {
 	bool result = false;
 
-	if (IS_ICELAKE(dev_priv)) {
+	if (INTEL_GEN(dev_priv) >= 11) {
 		table->size  = ARRAY_SIZE(icelake_mocs_table);
 		table->table = icelake_mocs_table;
 		table->n_entries = GEN11_NUM_MOCS_ENTRIES;
@@ -288,17 +288,17 @@ static bool get_mocs_settings(struct drm_i915_private *dev_priv,
 static i915_reg_t mocs_register(enum intel_engine_id engine_id, int index)
 {
 	switch (engine_id) {
-	case RCS:
+	case RCS0:
 		return GEN9_GFX_MOCS(index);
-	case VCS:
+	case VCS0:
 		return GEN9_MFX0_MOCS(index);
-	case BCS:
+	case BCS0:
 		return GEN9_BLT_MOCS(index);
-	case VECS:
+	case VECS0:
 		return GEN9_VEBOX_MOCS(index);
-	case VCS2:
+	case VCS1:
 		return GEN9_MFX1_MOCS(index);
-	case VCS3:
+	case VCS2:
 		return GEN11_MFX2_MOCS(index);
 	default:
 		MISSING_CASE(engine_id);
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index c0df1dbb0069e805face7c130cd9866791e2b77f..a882b8d42bd95de493c13727186af26f16f221ec 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -236,7 +236,7 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
 static struct i915_request *alloc_request(struct intel_overlay *overlay)
 {
 	struct drm_i915_private *dev_priv = overlay->i915;
-	struct intel_engine_cs *engine = dev_priv->engine[RCS];
+	struct intel_engine_cs *engine = dev_priv->engine[RCS0];
 
 	return i915_request_alloc(engine, dev_priv->kernel_context);
 }
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index beca98d2b035b2a84f3425a674cfbb4a4966aa13..edd5540639b0e7aca4e0a3d28bcd5c529aa05c05 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -1894,15 +1894,14 @@ intel_panel_init_backlight_funcs(struct intel_panel *panel)
 		panel->backlight.set = bxt_set_backlight;
 		panel->backlight.get = bxt_get_backlight;
 		panel->backlight.hz_to_pwm = bxt_hz_to_pwm;
-	} else if (HAS_PCH_CNP(dev_priv) || HAS_PCH_ICP(dev_priv)) {
+	} else if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP) {
 		panel->backlight.setup = cnp_setup_backlight;
 		panel->backlight.enable = cnp_enable_backlight;
 		panel->backlight.disable = cnp_disable_backlight;
 		panel->backlight.set = bxt_set_backlight;
 		panel->backlight.get = bxt_get_backlight;
 		panel->backlight.hz_to_pwm = cnp_hz_to_pwm;
-	} else if (HAS_PCH_LPT(dev_priv) || HAS_PCH_SPT(dev_priv) ||
-		   HAS_PCH_KBP(dev_priv)) {
+	} else if (INTEL_PCH_TYPE(dev_priv) >= PCH_LPT) {
 		panel->backlight.setup = lpt_setup_backlight;
 		panel->backlight.enable = lpt_enable_backlight;
 		panel->backlight.disable = lpt_disable_backlight;
diff --git a/drivers/gpu/drm/i915/intel_pipe_crc.c b/drivers/gpu/drm/i915/intel_pipe_crc.c
index a8554dc4f196fb7a626173a4e8714e4ca4fe78f3..0b1378f0bff76aaf0597dea0f736502d2cbec465 100644
--- a/drivers/gpu/drm/i915/intel_pipe_crc.c
+++ b/drivers/gpu/drm/i915/intel_pipe_crc.c
@@ -31,16 +31,20 @@
 #include "intel_drv.h"
 
 static const char * const pipe_crc_sources[] = {
-	"none",
-	"plane1",
-	"plane2",
-	"pf",
-	"pipe",
-	"TV",
-	"DP-B",
-	"DP-C",
-	"DP-D",
-	"auto",
+	[INTEL_PIPE_CRC_SOURCE_NONE] = "none",
+	[INTEL_PIPE_CRC_SOURCE_PLANE1] = "plane1",
+	[INTEL_PIPE_CRC_SOURCE_PLANE2] = "plane2",
+	[INTEL_PIPE_CRC_SOURCE_PLANE3] = "plane3",
+	[INTEL_PIPE_CRC_SOURCE_PLANE4] = "plane4",
+	[INTEL_PIPE_CRC_SOURCE_PLANE5] = "plane5",
+	[INTEL_PIPE_CRC_SOURCE_PLANE6] = "plane6",
+	[INTEL_PIPE_CRC_SOURCE_PLANE7] = "plane7",
+	[INTEL_PIPE_CRC_SOURCE_PIPE] = "pipe",
+	[INTEL_PIPE_CRC_SOURCE_TV] = "TV",
+	[INTEL_PIPE_CRC_SOURCE_DP_B] = "DP-B",
+	[INTEL_PIPE_CRC_SOURCE_DP_C] = "DP-C",
+	[INTEL_PIPE_CRC_SOURCE_DP_D] = "DP-D",
+	[INTEL_PIPE_CRC_SOURCE_AUTO] = "auto",
 };
 
 static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
@@ -192,8 +196,6 @@ static int i9xx_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
 				 enum intel_pipe_crc_source *source,
 				 u32 *val)
 {
-	bool need_stable_symbols = false;
-
 	if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
 		int ret = i9xx_pipe_crc_auto_source(dev_priv, pipe, source);
 		if (ret)
@@ -209,56 +211,23 @@ static int i9xx_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
 			return -EINVAL;
 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_TV_PRE;
 		break;
-	case INTEL_PIPE_CRC_SOURCE_DP_B:
-		if (!IS_G4X(dev_priv))
-			return -EINVAL;
-		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_G4X;
-		need_stable_symbols = true;
-		break;
-	case INTEL_PIPE_CRC_SOURCE_DP_C:
-		if (!IS_G4X(dev_priv))
-			return -EINVAL;
-		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_G4X;
-		need_stable_symbols = true;
-		break;
-	case INTEL_PIPE_CRC_SOURCE_DP_D:
-		if (!IS_G4X(dev_priv))
-			return -EINVAL;
-		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_G4X;
-		need_stable_symbols = true;
-		break;
 	case INTEL_PIPE_CRC_SOURCE_NONE:
 		*val = 0;
 		break;
 	default:
+		/*
+		 * The DP CRC source doesn't work on g4x.
+		 * It can be made to work to some degree by selecting
+		 * the correct CRC source before the port is enabled,
+		 * and not touching the CRC source bits again until
+		 * the port is disabled. But even then the bits
+		 * eventually get stuck and a reboot is needed to get
+		 * working CRCs on the pipe again. Let's simply
+		 * refuse to use DP CRCs on g4x.
+		 */
 		return -EINVAL;
 	}
 
-	/*
-	 * When the pipe CRC tap point is after the transcoders we need
-	 * to tweak symbol-level features to produce a deterministic series of
-	 * symbols for a given frame. We need to reset those features only once
-	 * a frame (instead of every nth symbol):
-	 *   - DC-balance: used to ensure a better clock recovery from the data
-	 *     link (SDVO)
-	 *   - DisplayPort scrambling: used for EMI reduction
-	 */
-	if (need_stable_symbols) {
-		u32 tmp = I915_READ(PORT_DFT2_G4X);
-
-		WARN_ON(!IS_G4X(dev_priv));
-
-		I915_WRITE(PORT_DFT_I9XX,
-			   I915_READ(PORT_DFT_I9XX) | DC_BALANCE_RESET);
-
-		if (pipe == PIPE_A)
-			tmp |= PIPE_A_SCRAMBLE_RESET;
-		else
-			tmp |= PIPE_B_SCRAMBLE_RESET;
-
-		I915_WRITE(PORT_DFT2_G4X, tmp);
-	}
-
 	return 0;
 }
 
@@ -283,24 +252,6 @@ static void vlv_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv,
 	if (!(tmp & PIPE_SCRAMBLE_RESET_MASK))
 		tmp &= ~DC_BALANCE_RESET_VLV;
 	I915_WRITE(PORT_DFT2_G4X, tmp);
-
-}
-
-static void g4x_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv,
-					 enum pipe pipe)
-{
-	u32 tmp = I915_READ(PORT_DFT2_G4X);
-
-	if (pipe == PIPE_A)
-		tmp &= ~PIPE_A_SCRAMBLE_RESET;
-	else
-		tmp &= ~PIPE_B_SCRAMBLE_RESET;
-	I915_WRITE(PORT_DFT2_G4X, tmp);
-
-	if (!(tmp & PIPE_SCRAMBLE_RESET_MASK)) {
-		I915_WRITE(PORT_DFT_I9XX,
-			   I915_READ(PORT_DFT_I9XX) & ~DC_BALANCE_RESET);
-	}
 }
 
 static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
@@ -329,19 +280,18 @@ static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
 	return 0;
 }
 
-static void hsw_pipe_A_crc_wa(struct drm_i915_private *dev_priv,
-			      bool enable)
+static void
+intel_crtc_crc_setup_workarounds(struct intel_crtc *crtc, bool enable)
 {
-	struct drm_device *dev = &dev_priv->drm;
-	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A);
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	struct intel_crtc_state *pipe_config;
 	struct drm_atomic_state *state;
 	struct drm_modeset_acquire_ctx ctx;
-	int ret = 0;
+	int ret;
 
 	drm_modeset_acquire_init(&ctx, 0);
 
-	state = drm_atomic_state_alloc(dev);
+	state = drm_atomic_state_alloc(&dev_priv->drm);
 	if (!state) {
 		ret = -ENOMEM;
 		goto unlock;
@@ -356,17 +306,10 @@ static void hsw_pipe_A_crc_wa(struct drm_i915_private *dev_priv,
 		goto put_state;
 	}
 
-	if (HAS_IPS(dev_priv)) {
-		/*
-		 * When IPS gets enabled, the pipe CRC changes. Since IPS gets
-		 * enabled and disabled dynamically based on package C states,
-		 * user space can't make reliable use of the CRCs, so let's just
-		 * completely disable it.
-		 */
-		pipe_config->ips_force_disable = enable;
-	}
+	pipe_config->base.mode_changed = pipe_config->has_psr;
+	pipe_config->crc_enabled = enable;
 
-	if (IS_HASWELL(dev_priv)) {
+	if (IS_HASWELL(dev_priv) && crtc->pipe == PIPE_A) {
 		pipe_config->pch_pfit.force_thru = enable;
 		if (pipe_config->cpu_transcoder == TRANSCODER_EDP &&
 		    pipe_config->pch_pfit.enabled != enable)
@@ -392,11 +335,10 @@ static void hsw_pipe_A_crc_wa(struct drm_i915_private *dev_priv,
 static int ivb_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
 				enum pipe pipe,
 				enum intel_pipe_crc_source *source,
-				u32 *val,
-				bool set_wa)
+				u32 *val)
 {
 	if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
-		*source = INTEL_PIPE_CRC_SOURCE_PF;
+		*source = INTEL_PIPE_CRC_SOURCE_PIPE;
 
 	switch (*source) {
 	case INTEL_PIPE_CRC_SOURCE_PLANE1:
@@ -405,11 +347,7 @@ static int ivb_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
 	case INTEL_PIPE_CRC_SOURCE_PLANE2:
 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB;
 		break;
-	case INTEL_PIPE_CRC_SOURCE_PF:
-		if (set_wa && (IS_HASWELL(dev_priv) ||
-		     IS_BROADWELL(dev_priv)) && pipe == PIPE_A)
-			hsw_pipe_A_crc_wa(dev_priv, true);
-
+	case INTEL_PIPE_CRC_SOURCE_PIPE:
 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB;
 		break;
 	case INTEL_PIPE_CRC_SOURCE_NONE:
@@ -422,10 +360,52 @@ static int ivb_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
 	return 0;
 }
 
+static int skl_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
+				enum pipe pipe,
+				enum intel_pipe_crc_source *source,
+				u32 *val)
+{
+	if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
+		*source = INTEL_PIPE_CRC_SOURCE_PIPE;
+
+	switch (*source) {
+	case INTEL_PIPE_CRC_SOURCE_PLANE1:
+		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PLANE_1_SKL;
+		break;
+	case INTEL_PIPE_CRC_SOURCE_PLANE2:
+		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PLANE_2_SKL;
+		break;
+	case INTEL_PIPE_CRC_SOURCE_PLANE3:
+		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PLANE_3_SKL;
+		break;
+	case INTEL_PIPE_CRC_SOURCE_PLANE4:
+		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PLANE_4_SKL;
+		break;
+	case INTEL_PIPE_CRC_SOURCE_PLANE5:
+		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PLANE_5_SKL;
+		break;
+	case INTEL_PIPE_CRC_SOURCE_PLANE6:
+		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PLANE_6_SKL;
+		break;
+	case INTEL_PIPE_CRC_SOURCE_PLANE7:
+		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PLANE_7_SKL;
+		break;
+	case INTEL_PIPE_CRC_SOURCE_PIPE:
+		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DMUX_SKL;
+		break;
+	case INTEL_PIPE_CRC_SOURCE_NONE:
+		*val = 0;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
 static int get_new_crc_ctl_reg(struct drm_i915_private *dev_priv,
 			       enum pipe pipe,
-			       enum intel_pipe_crc_source *source, u32 *val,
-			       bool set_wa)
+			       enum intel_pipe_crc_source *source, u32 *val)
 {
 	if (IS_GEN(dev_priv, 2))
 		return i8xx_pipe_crc_ctl_reg(source, val);
@@ -435,8 +415,10 @@ static int get_new_crc_ctl_reg(struct drm_i915_private *dev_priv,
 		return vlv_pipe_crc_ctl_reg(dev_priv, pipe, source, val);
 	else if (IS_GEN_RANGE(dev_priv, 5, 6))
 		return ilk_pipe_crc_ctl_reg(source, val);
+	else if (INTEL_GEN(dev_priv) < 9)
+		return ivb_pipe_crc_ctl_reg(dev_priv, pipe, source, val);
 	else
-		return ivb_pipe_crc_ctl_reg(dev_priv, pipe, source, val, set_wa);
+		return skl_pipe_crc_ctl_reg(dev_priv, pipe, source, val);
 }
 
 static int
@@ -486,9 +468,6 @@ static int i9xx_crc_source_valid(struct drm_i915_private *dev_priv,
 	switch (source) {
 	case INTEL_PIPE_CRC_SOURCE_PIPE:
 	case INTEL_PIPE_CRC_SOURCE_TV:
-	case INTEL_PIPE_CRC_SOURCE_DP_B:
-	case INTEL_PIPE_CRC_SOURCE_DP_C:
-	case INTEL_PIPE_CRC_SOURCE_DP_D:
 	case INTEL_PIPE_CRC_SOURCE_NONE:
 		return 0;
 	default:
@@ -532,7 +511,25 @@ static int ivb_crc_source_valid(struct drm_i915_private *dev_priv,
 	case INTEL_PIPE_CRC_SOURCE_PIPE:
 	case INTEL_PIPE_CRC_SOURCE_PLANE1:
 	case INTEL_PIPE_CRC_SOURCE_PLANE2:
-	case INTEL_PIPE_CRC_SOURCE_PF:
+	case INTEL_PIPE_CRC_SOURCE_NONE:
+		return 0;
+	default:
+		return -EINVAL;
+	}
+}
+
+static int skl_crc_source_valid(struct drm_i915_private *dev_priv,
+				const enum intel_pipe_crc_source source)
+{
+	switch (source) {
+	case INTEL_PIPE_CRC_SOURCE_PIPE:
+	case INTEL_PIPE_CRC_SOURCE_PLANE1:
+	case INTEL_PIPE_CRC_SOURCE_PLANE2:
+	case INTEL_PIPE_CRC_SOURCE_PLANE3:
+	case INTEL_PIPE_CRC_SOURCE_PLANE4:
+	case INTEL_PIPE_CRC_SOURCE_PLANE5:
+	case INTEL_PIPE_CRC_SOURCE_PLANE6:
+	case INTEL_PIPE_CRC_SOURCE_PLANE7:
 	case INTEL_PIPE_CRC_SOURCE_NONE:
 		return 0;
 	default:
@@ -552,8 +549,10 @@ intel_is_valid_crc_source(struct drm_i915_private *dev_priv,
 		return vlv_crc_source_valid(dev_priv, source);
 	else if (IS_GEN_RANGE(dev_priv, 5, 6))
 		return ilk_crc_source_valid(dev_priv, source);
-	else
+	else if (INTEL_GEN(dev_priv) < 9)
 		return ivb_crc_source_valid(dev_priv, source);
+	else
+		return skl_crc_source_valid(dev_priv, source);
 }
 
 const char *const *intel_crtc_get_crc_sources(struct drm_crtc *crtc,
@@ -592,6 +591,7 @@ int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name)
 	intel_wakeref_t wakeref;
 	u32 val = 0; /* shut up gcc */
 	int ret = 0;
+	bool enable;
 
 	if (display_crc_ctl_parse_source(source_name, &source) < 0) {
 		DRM_DEBUG_DRIVER("unknown source %s\n", source_name);
@@ -605,7 +605,11 @@ int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name)
 		return -EIO;
 	}
 
-	ret = get_new_crc_ctl_reg(dev_priv, crtc->index, &source, &val, true);
+	enable = source != INTEL_PIPE_CRC_SOURCE_NONE;
+	if (enable)
+		intel_crtc_crc_setup_workarounds(to_intel_crtc(crtc), true);
+
+	ret = get_new_crc_ctl_reg(dev_priv, crtc->index, &source, &val);
 	if (ret != 0)
 		goto out;
 
@@ -614,18 +618,16 @@ int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name)
 	POSTING_READ(PIPE_CRC_CTL(crtc->index));
 
 	if (!source) {
-		if (IS_G4X(dev_priv))
-			g4x_undo_pipe_scramble_reset(dev_priv, crtc->index);
-		else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+		if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
 			vlv_undo_pipe_scramble_reset(dev_priv, crtc->index);
-		else if ((IS_HASWELL(dev_priv) ||
-			  IS_BROADWELL(dev_priv)) && crtc->index == PIPE_A)
-			hsw_pipe_A_crc_wa(dev_priv, false);
 	}
 
 	pipe_crc->skipped = 0;
 
 out:
+	if (!enable)
+		intel_crtc_crc_setup_workarounds(to_intel_crtc(crtc), false);
+
 	intel_display_power_put(dev_priv, power_domain, wakeref);
 
 	return ret;
@@ -641,7 +643,7 @@ void intel_crtc_enable_pipe_crc(struct intel_crtc *intel_crtc)
 	if (!crtc->crc.opened)
 		return;
 
-	if (get_new_crc_ctl_reg(dev_priv, crtc->index, &pipe_crc->source, &val, false) < 0)
+	if (get_new_crc_ctl_reg(dev_priv, crtc->index, &pipe_crc->source, &val) < 0)
 		return;
 
 	/* Don't need pipe_crc->lock here, IRQs are not generated. */
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 14ac31888c67d42316fc1eaa367b06260c6759f7..d73b13ca57a03985023da56ba8672adcee249c45 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -338,12 +338,12 @@ static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
 
 	mutex_lock(&dev_priv->pcu_lock);
 
-	val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
+	val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
 	if (enable)
 		val |= DSP_MAXFIFO_PM5_ENABLE;
 	else
 		val &= ~DSP_MAXFIFO_PM5_ENABLE;
-	vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
+	vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, val);
 
 	mutex_unlock(&dev_priv->pcu_lock);
 }
@@ -3624,7 +3624,12 @@ static u8 intel_enabled_dbuf_slices_num(struct drm_i915_private *dev_priv)
 	if (INTEL_GEN(dev_priv) < 11)
 		return enabled_slices;
 
-	if (I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE)
+	/*
+	 * FIXME: for now we'll only ever use 1 slice; pretend that we have
+	 * only that 1 slice enabled until we have a proper way for on-demand
+	 * toggling of the second slice.
+	 */
+	if (0 && I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE)
 		enabled_slices++;
 
 	return enabled_slices;
@@ -4466,6 +4471,17 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
 		for_each_plane_id_on_crtc(intel_crtc, plane_id) {
 			wm = &cstate->wm.skl.optimal.planes[plane_id];
 			memset(&wm->wm[level], 0, sizeof(wm->wm[level]));
+
+			/*
+			 * Wa_1408961008:icl
+			 * Underruns with WM1+ disabled
+			 */
+			if (IS_ICELAKE(dev_priv) &&
+			    level == 1 && wm->wm[0].plane_en) {
+				wm->wm[level].plane_res_b = wm->wm[0].plane_res_b;
+				wm->wm[level].plane_res_l = wm->wm[0].plane_res_l;
+				wm->wm[level].ignore_lines = wm->wm[0].ignore_lines;
+			}
 		}
 	}
 
@@ -5056,11 +5072,12 @@ static void skl_write_wm_level(struct drm_i915_private *dev_priv,
 {
 	u32 val = 0;
 
-	if (level->plane_en) {
+	if (level->plane_en)
 		val |= PLANE_WM_EN;
-		val |= level->plane_res_b;
-		val |= level->plane_res_l << PLANE_WM_LINES_SHIFT;
-	}
+	if (level->ignore_lines)
+		val |= PLANE_WM_IGNORE_LINES;
+	val |= level->plane_res_b;
+	val |= level->plane_res_l << PLANE_WM_LINES_SHIFT;
 
 	I915_WRITE_FW(reg, val);
 }
@@ -5126,6 +5143,7 @@ bool skl_wm_level_equals(const struct skl_wm_level *l1,
 			 const struct skl_wm_level *l2)
 {
 	return l1->plane_en == l2->plane_en &&
+		l1->ignore_lines == l2->ignore_lines &&
 		l1->plane_res_l == l2->plane_res_l &&
 		l1->plane_res_b == l2->plane_res_b;
 }
@@ -5269,6 +5287,11 @@ skl_compute_ddb(struct intel_atomic_state *state)
 	return 0;
 }
 
+static char enast(bool enable)
+{
+	return enable ? '*' : ' ';
+}
+
 static void
 skl_print_wm_changes(struct intel_atomic_state *state)
 {
@@ -5279,8 +5302,16 @@ skl_print_wm_changes(struct intel_atomic_state *state)
 	struct intel_crtc *crtc;
 	int i;
 
+	if ((drm_debug & DRM_UT_KMS) == 0)
+		return;
+
 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
 					    new_crtc_state, i) {
+		const struct skl_pipe_wm *old_pipe_wm, *new_pipe_wm;
+
+		old_pipe_wm = &old_crtc_state->wm.skl.optimal;
+		new_pipe_wm = &new_crtc_state->wm.skl.optimal;
+
 		for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
 			enum plane_id plane_id = plane->id;
 			const struct skl_ddb_entry *old, *new;
@@ -5291,10 +5322,86 @@ skl_print_wm_changes(struct intel_atomic_state *state)
 			if (skl_ddb_entry_equal(old, new))
 				continue;
 
-			DRM_DEBUG_KMS("[PLANE:%d:%s] ddb (%d - %d) -> (%d - %d)\n",
+			DRM_DEBUG_KMS("[PLANE:%d:%s] ddb (%4d - %4d) -> (%4d - %4d), size %4d -> %4d\n",
+				      plane->base.base.id, plane->base.name,
+				      old->start, old->end, new->start, new->end,
+				      skl_ddb_entry_size(old), skl_ddb_entry_size(new));
+		}
+
+		for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
+			enum plane_id plane_id = plane->id;
+			const struct skl_plane_wm *old_wm, *new_wm;
+
+			old_wm = &old_pipe_wm->planes[plane_id];
+			new_wm = &new_pipe_wm->planes[plane_id];
+
+			if (skl_plane_wm_equals(dev_priv, old_wm, new_wm))
+				continue;
+
+			DRM_DEBUG_KMS("[PLANE:%d:%s]   level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm"
+				      " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm\n",
+				      plane->base.base.id, plane->base.name,
+				      enast(old_wm->wm[0].plane_en), enast(old_wm->wm[1].plane_en),
+				      enast(old_wm->wm[2].plane_en), enast(old_wm->wm[3].plane_en),
+				      enast(old_wm->wm[4].plane_en), enast(old_wm->wm[5].plane_en),
+				      enast(old_wm->wm[6].plane_en), enast(old_wm->wm[7].plane_en),
+				      enast(old_wm->trans_wm.plane_en),
+				      enast(new_wm->wm[0].plane_en), enast(new_wm->wm[1].plane_en),
+				      enast(new_wm->wm[2].plane_en), enast(new_wm->wm[3].plane_en),
+				      enast(new_wm->wm[4].plane_en), enast(new_wm->wm[5].plane_en),
+				      enast(new_wm->wm[6].plane_en), enast(new_wm->wm[7].plane_en),
+				      enast(new_wm->trans_wm.plane_en));
+
+			DRM_DEBUG_KMS("[PLANE:%d:%s]   lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d"
+				      " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d\n",
+				      plane->base.base.id, plane->base.name,
+				      enast(old_wm->wm[0].ignore_lines), old_wm->wm[0].plane_res_l,
+				      enast(old_wm->wm[1].ignore_lines), old_wm->wm[1].plane_res_l,
+				      enast(old_wm->wm[2].ignore_lines), old_wm->wm[2].plane_res_l,
+				      enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].plane_res_l,
+				      enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].plane_res_l,
+				      enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].plane_res_l,
+				      enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].plane_res_l,
+				      enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].plane_res_l,
+				      enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm.plane_res_l,
+
+				      enast(new_wm->wm[0].ignore_lines), new_wm->wm[0].plane_res_l,
+				      enast(new_wm->wm[1].ignore_lines), new_wm->wm[1].plane_res_l,
+				      enast(new_wm->wm[2].ignore_lines), new_wm->wm[2].plane_res_l,
+				      enast(new_wm->wm[3].ignore_lines), new_wm->wm[3].plane_res_l,
+				      enast(new_wm->wm[4].ignore_lines), new_wm->wm[4].plane_res_l,
+				      enast(new_wm->wm[5].ignore_lines), new_wm->wm[5].plane_res_l,
+				      enast(new_wm->wm[6].ignore_lines), new_wm->wm[6].plane_res_l,
+				      enast(new_wm->wm[7].ignore_lines), new_wm->wm[7].plane_res_l,
+				      enast(new_wm->trans_wm.ignore_lines), new_wm->trans_wm.plane_res_l);
+
+			DRM_DEBUG_KMS("[PLANE:%d:%s]  blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d"
+				      " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n",
+				      plane->base.base.id, plane->base.name,
+				      old_wm->wm[0].plane_res_b, old_wm->wm[1].plane_res_b,
+				      old_wm->wm[2].plane_res_b, old_wm->wm[3].plane_res_b,
+				      old_wm->wm[4].plane_res_b, old_wm->wm[5].plane_res_b,
+				      old_wm->wm[6].plane_res_b, old_wm->wm[7].plane_res_b,
+				      old_wm->trans_wm.plane_res_b,
+				      new_wm->wm[0].plane_res_b, new_wm->wm[1].plane_res_b,
+				      new_wm->wm[2].plane_res_b, new_wm->wm[3].plane_res_b,
+				      new_wm->wm[4].plane_res_b, new_wm->wm[5].plane_res_b,
+				      new_wm->wm[6].plane_res_b, new_wm->wm[7].plane_res_b,
+				      new_wm->trans_wm.plane_res_b);
+
+			DRM_DEBUG_KMS("[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d"
+				      " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n",
 				      plane->base.base.id, plane->base.name,
-				      old->start, old->end,
-				      new->start, new->end);
+				      old_wm->wm[0].min_ddb_alloc, old_wm->wm[1].min_ddb_alloc,
+				      old_wm->wm[2].min_ddb_alloc, old_wm->wm[3].min_ddb_alloc,
+				      old_wm->wm[4].min_ddb_alloc, old_wm->wm[5].min_ddb_alloc,
+				      old_wm->wm[6].min_ddb_alloc, old_wm->wm[7].min_ddb_alloc,
+				      old_wm->trans_wm.min_ddb_alloc,
+				      new_wm->wm[0].min_ddb_alloc, new_wm->wm[1].min_ddb_alloc,
+				      new_wm->wm[2].min_ddb_alloc, new_wm->wm[3].min_ddb_alloc,
+				      new_wm->wm[4].min_ddb_alloc, new_wm->wm[5].min_ddb_alloc,
+				      new_wm->wm[6].min_ddb_alloc, new_wm->wm[7].min_ddb_alloc,
+				      new_wm->trans_wm.min_ddb_alloc);
 		}
 	}
 }
@@ -5609,6 +5716,7 @@ static inline void skl_wm_level_from_reg_val(u32 val,
 					     struct skl_wm_level *level)
 {
 	level->plane_en = val & PLANE_WM_EN;
+	level->ignore_lines = val & PLANE_WM_IGNORE_LINES;
 	level->plane_res_b = val & PLANE_WM_BLOCKS_MASK;
 	level->plane_res_l = (val >> PLANE_WM_LINES_SHIFT) &
 		PLANE_WM_LINES_MASK;
@@ -5986,7 +6094,7 @@ void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
 	if (IS_CHERRYVIEW(dev_priv)) {
 		mutex_lock(&dev_priv->pcu_lock);
 
-		val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
+		val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
 		if (val & DSP_MAXFIFO_PM5_ENABLE)
 			wm->level = VLV_WM_LEVEL_PM5;
 
@@ -6691,8 +6799,7 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv)
 	mutex_unlock(&dev_priv->pcu_lock);
 }
 
-void gen6_rps_boost(struct i915_request *rq,
-		    struct intel_rps_client *rps_client)
+void gen6_rps_boost(struct i915_request *rq)
 {
 	struct intel_rps *rps = &rq->i915->gt_pm.rps;
 	unsigned long flags;
@@ -6721,7 +6828,7 @@ void gen6_rps_boost(struct i915_request *rq,
 	if (READ_ONCE(rps->cur_freq) < rps->boost_freq)
 		schedule_work(&rps->work);
 
-	atomic_inc(rps_client ? &rps_client->boosts : &rps->boosts);
+	atomic_inc(&rps->boosts);
 }
 
 int intel_set_rps(struct drm_i915_private *dev_priv, u8 val)
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index 84a0fb98156135839b5bc966dfcd09ea81f98556..605fe8fc85cca18f774d4e0a42e20f55b2d6f18c 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -51,6 +51,7 @@
  * must be correctly synchronized/cancelled when shutting down the pipe."
  */
 
+#include <drm/drm_atomic_helper.h>
 
 #include "intel_drv.h"
 #include "i915_drv.h"
@@ -78,9 +79,6 @@ static bool intel_psr2_enabled(struct drm_i915_private *dev_priv,
 	case I915_PSR_DEBUG_DISABLE:
 	case I915_PSR_DEBUG_FORCE_PSR1:
 		return false;
-	case I915_PSR_DEBUG_DEFAULT:
-		if (i915_modparams.enable_psr <= 0)
-			return false;
 	default:
 		return crtc_state->has_psr2;
 	}
@@ -435,32 +433,16 @@ static void intel_psr_enable_sink(struct intel_dp *intel_dp)
 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
 }
 
-static void hsw_activate_psr1(struct intel_dp *intel_dp)
+static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
 {
 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-	u32 max_sleep_time = 0x1f;
-	u32 val = EDP_PSR_ENABLE;
+	u32 val = 0;
 
-	/* Let's use 6 as the minimum to cover all known cases including the
-	 * off-by-one issue that HW has in some cases.
-	 */
-	int idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
-
-	/* sink_sync_latency of 8 means source has to wait for more than 8
-	 * frames, we'll go with 9 frames for now
-	 */
-	idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1);
-	val |= idle_frames << EDP_PSR_IDLE_FRAME_SHIFT;
-
-	val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
-	if (IS_HASWELL(dev_priv))
-		val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
-
-	if (dev_priv->psr.link_standby)
-		val |= EDP_PSR_LINK_STANDBY;
+	if (INTEL_GEN(dev_priv) >= 11)
+		val |= EDP_PSR_TP4_TIME_0US;
 
 	if (dev_priv->vbt.psr.tp1_wakeup_time_us == 0)
-		val |=  EDP_PSR_TP1_TIME_0us;
+		val |= EDP_PSR_TP1_TIME_0us;
 	else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 100)
 		val |= EDP_PSR_TP1_TIME_100us;
 	else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 500)
@@ -469,7 +451,7 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp)
 		val |= EDP_PSR_TP1_TIME_2500us;
 
 	if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us == 0)
-		val |=  EDP_PSR_TP2_TP3_TIME_0us;
+		val |= EDP_PSR_TP2_TP3_TIME_0us;
 	else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 100)
 		val |= EDP_PSR_TP2_TP3_TIME_100us;
 	else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 500)
@@ -483,6 +465,35 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp)
 	else
 		val |= EDP_PSR_TP1_TP2_SEL;
 
+	return val;
+}
+
+static void hsw_activate_psr1(struct intel_dp *intel_dp)
+{
+	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	u32 max_sleep_time = 0x1f;
+	u32 val = EDP_PSR_ENABLE;
+
+	/* Let's use 6 as the minimum to cover all known cases including the
+	 * off-by-one issue that HW has in some cases.
+	 */
+	int idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
+
+	/* sink_sync_latency of 8 means source has to wait for more than 8
+	 * frames, we'll go with 9 frames for now
+	 */
+	idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1);
+	val |= idle_frames << EDP_PSR_IDLE_FRAME_SHIFT;
+
+	val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
+	if (IS_HASWELL(dev_priv))
+		val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
+
+	if (dev_priv->psr.link_standby)
+		val |= EDP_PSR_LINK_STANDBY;
+
+	val |= intel_psr1_get_tp_time(intel_dp);
+
 	if (INTEL_GEN(dev_priv) >= 8)
 		val |= EDP_PSR_CRC_ENABLE;
 
@@ -509,16 +520,24 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
 
 	val |= EDP_PSR2_FRAME_BEFORE_SU(dev_priv->psr.sink_sync_latency + 1);
 
-	if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us >= 0 &&
-	    dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 50)
+	if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 &&
+	    dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50)
 		val |= EDP_PSR2_TP2_TIME_50us;
-	else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 100)
+	else if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100)
 		val |= EDP_PSR2_TP2_TIME_100us;
-	else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 500)
+	else if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500)
 		val |= EDP_PSR2_TP2_TIME_500us;
 	else
 		val |= EDP_PSR2_TP2_TIME_2500us;
 
+	/*
+	 * FIXME: There is probably a issue in DMC firmwares(icl_dmc_ver1_07.bin
+	 * and kbl_dmc_ver1_04.bin at least) that causes PSR2 SU to fail after
+	 * exiting DC6 if EDP_PSR_TP1_TP3_SEL is kept in PSR_CTL, so for now
+	 * lets workaround the issue by cleaning PSR_CTL before enable PSR2.
+	 */
+	I915_WRITE(EDP_PSR_CTL, 0);
+
 	I915_WRITE(EDP_PSR2_CTL, val);
 }
 
@@ -530,11 +549,6 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
 	int crtc_vdisplay = crtc_state->base.adjusted_mode.crtc_vdisplay;
 	int psr_max_h = 0, psr_max_v = 0;
 
-	/*
-	 * FIXME psr2_support is messed up. It's both computed
-	 * dynamically during PSR enable, and extracted from sink
-	 * caps during eDP detection.
-	 */
 	if (!dev_priv->psr.sink_psr2_support)
 		return false;
 
@@ -575,6 +589,11 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
 		return false;
 	}
 
+	if (crtc_state->crc_enabled) {
+		DRM_DEBUG_KMS("PSR2 not enabled because it would inhibit pipe CRC calculation\n");
+		return false;
+	}
+
 	return true;
 }
 
@@ -718,8 +737,11 @@ static void intel_psr_enable_locked(struct drm_i915_private *dev_priv,
 {
 	struct intel_dp *intel_dp = dev_priv->psr.dp;
 
-	if (dev_priv->psr.enabled)
-		return;
+	WARN_ON(dev_priv->psr.enabled);
+
+	dev_priv->psr.psr2_enabled = intel_psr2_enabled(dev_priv, crtc_state);
+	dev_priv->psr.busy_frontbuffer_bits = 0;
+	dev_priv->psr.pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
 
 	DRM_DEBUG_KMS("Enabling PSR%s\n",
 		      dev_priv->psr.psr2_enabled ? "2" : "1");
@@ -752,20 +774,13 @@ void intel_psr_enable(struct intel_dp *intel_dp,
 	WARN_ON(dev_priv->drrs.dp);
 
 	mutex_lock(&dev_priv->psr.lock);
-	if (dev_priv->psr.prepared) {
-		DRM_DEBUG_KMS("PSR already in use\n");
+
+	if (!psr_global_enabled(dev_priv->psr.debug)) {
+		DRM_DEBUG_KMS("PSR disabled by flag\n");
 		goto unlock;
 	}
 
-	dev_priv->psr.psr2_enabled = intel_psr2_enabled(dev_priv, crtc_state);
-	dev_priv->psr.busy_frontbuffer_bits = 0;
-	dev_priv->psr.prepared = true;
-	dev_priv->psr.pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
-
-	if (psr_global_enabled(dev_priv->psr.debug))
-		intel_psr_enable_locked(dev_priv, crtc_state);
-	else
-		DRM_DEBUG_KMS("PSR disabled by flag\n");
+	intel_psr_enable_locked(dev_priv, crtc_state);
 
 unlock:
 	mutex_unlock(&dev_priv->psr.lock);
@@ -848,18 +863,69 @@ void intel_psr_disable(struct intel_dp *intel_dp,
 		return;
 
 	mutex_lock(&dev_priv->psr.lock);
-	if (!dev_priv->psr.prepared) {
-		mutex_unlock(&dev_priv->psr.lock);
-		return;
-	}
 
 	intel_psr_disable_locked(intel_dp);
 
-	dev_priv->psr.prepared = false;
 	mutex_unlock(&dev_priv->psr.lock);
 	cancel_work_sync(&dev_priv->psr.work);
 }
 
+static void psr_force_hw_tracking_exit(struct drm_i915_private *dev_priv)
+{
+	/*
+	 * Display WA #0884: all
+	 * This documented WA for bxt can be safely applied
+	 * broadly so we can force HW tracking to exit PSR
+	 * instead of disabling and re-enabling.
+	 * Workaround tells us to write 0 to CUR_SURFLIVE_A,
+	 * but it makes more sense write to the current active
+	 * pipe.
+	 */
+	I915_WRITE(CURSURFLIVE(dev_priv->psr.pipe), 0);
+}
+
+/**
+ * intel_psr_update - Update PSR state
+ * @intel_dp: Intel DP
+ * @crtc_state: new CRTC state
+ *
+ * This functions will update PSR states, disabling, enabling or switching PSR
+ * version when executing fastsets. For full modeset, intel_psr_disable() and
+ * intel_psr_enable() should be called instead.
+ */
+void intel_psr_update(struct intel_dp *intel_dp,
+		      const struct intel_crtc_state *crtc_state)
+{
+	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	struct i915_psr *psr = &dev_priv->psr;
+	bool enable, psr2_enable;
+
+	if (!CAN_PSR(dev_priv) || READ_ONCE(psr->dp) != intel_dp)
+		return;
+
+	mutex_lock(&dev_priv->psr.lock);
+
+	enable = crtc_state->has_psr && psr_global_enabled(psr->debug);
+	psr2_enable = intel_psr2_enabled(dev_priv, crtc_state);
+
+	if (enable == psr->enabled && psr2_enable == psr->psr2_enabled) {
+		/* Force a PSR exit when enabling CRC to avoid CRC timeouts */
+		if (crtc_state->crc_enabled && psr->enabled)
+			psr_force_hw_tracking_exit(dev_priv);
+
+		goto unlock;
+	}
+
+	if (psr->enabled)
+		intel_psr_disable_locked(intel_dp);
+
+	if (enable)
+		intel_psr_enable_locked(dev_priv, crtc_state);
+
+unlock:
+	mutex_unlock(&dev_priv->psr.lock);
+}
+
 /**
  * intel_psr_wait_for_idle - wait for PSR1 to idle
  * @new_crtc_state: new CRTC state
@@ -924,36 +990,63 @@ static bool __psr_wait_for_idle_locked(struct drm_i915_private *dev_priv)
 	return err == 0 && dev_priv->psr.enabled;
 }
 
-static bool switching_psr(struct drm_i915_private *dev_priv,
-			  struct intel_crtc_state *crtc_state,
-			  u32 mode)
+static int intel_psr_fastset_force(struct drm_i915_private *dev_priv)
 {
-	/* Can't switch psr state anyway if PSR2 is not supported. */
-	if (!crtc_state || !crtc_state->has_psr2)
-		return false;
+	struct drm_device *dev = &dev_priv->drm;
+	struct drm_modeset_acquire_ctx ctx;
+	struct drm_atomic_state *state;
+	struct drm_crtc *crtc;
+	int err;
 
-	if (dev_priv->psr.psr2_enabled && mode == I915_PSR_DEBUG_FORCE_PSR1)
-		return true;
+	state = drm_atomic_state_alloc(dev);
+	if (!state)
+		return -ENOMEM;
 
-	if (!dev_priv->psr.psr2_enabled && mode != I915_PSR_DEBUG_FORCE_PSR1)
-		return true;
+	drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
+	state->acquire_ctx = &ctx;
+
+retry:
+	drm_for_each_crtc(crtc, dev) {
+		struct drm_crtc_state *crtc_state;
+		struct intel_crtc_state *intel_crtc_state;
+
+		crtc_state = drm_atomic_get_crtc_state(state, crtc);
+		if (IS_ERR(crtc_state)) {
+			err = PTR_ERR(crtc_state);
+			goto error;
+		}
+
+		intel_crtc_state = to_intel_crtc_state(crtc_state);
 
-	return false;
+		if (crtc_state->active && intel_crtc_state->has_psr) {
+			/* Mark mode as changed to trigger a pipe->update() */
+			crtc_state->mode_changed = true;
+			break;
+		}
+	}
+
+	err = drm_atomic_commit(state);
+
+error:
+	if (err == -EDEADLK) {
+		drm_atomic_state_clear(state);
+		err = drm_modeset_backoff(&ctx);
+		if (!err)
+			goto retry;
+	}
+
+	drm_modeset_drop_locks(&ctx);
+	drm_modeset_acquire_fini(&ctx);
+	drm_atomic_state_put(state);
+
+	return err;
 }
 
-int intel_psr_set_debugfs_mode(struct drm_i915_private *dev_priv,
-			       struct drm_modeset_acquire_ctx *ctx,
-			       u64 val)
+int intel_psr_debug_set(struct drm_i915_private *dev_priv, u64 val)
 {
-	struct drm_device *dev = &dev_priv->drm;
-	struct drm_connector_state *conn_state;
-	struct intel_crtc_state *crtc_state = NULL;
-	struct drm_crtc_commit *commit;
-	struct drm_crtc *crtc;
-	struct intel_dp *dp;
+	const u32 mode = val & I915_PSR_DEBUG_MODE_MASK;
+	u32 old_mode;
 	int ret;
-	bool enable;
-	u32 mode = val & I915_PSR_DEBUG_MODE_MASK;
 
 	if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_MODE_MASK) ||
 	    mode > I915_PSR_DEBUG_FORCE_PSR1) {
@@ -961,49 +1054,19 @@ int intel_psr_set_debugfs_mode(struct drm_i915_private *dev_priv,
 		return -EINVAL;
 	}
 
-	ret = drm_modeset_lock(&dev->mode_config.connection_mutex, ctx);
-	if (ret)
-		return ret;
-
-	/* dev_priv->psr.dp should be set once and then never touched again. */
-	dp = READ_ONCE(dev_priv->psr.dp);
-	conn_state = dp->attached_connector->base.state;
-	crtc = conn_state->crtc;
-	if (crtc) {
-		ret = drm_modeset_lock(&crtc->mutex, ctx);
-		if (ret)
-			return ret;
-
-		crtc_state = to_intel_crtc_state(crtc->state);
-		commit = crtc_state->base.commit;
-	} else {
-		commit = conn_state->commit;
-	}
-	if (commit) {
-		ret = wait_for_completion_interruptible(&commit->hw_done);
-		if (ret)
-			return ret;
-	}
-
 	ret = mutex_lock_interruptible(&dev_priv->psr.lock);
 	if (ret)
 		return ret;
 
-	enable = psr_global_enabled(val);
-
-	if (!enable || switching_psr(dev_priv, crtc_state, mode))
-		intel_psr_disable_locked(dev_priv->psr.dp);
-
+	old_mode = dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK;
 	dev_priv->psr.debug = val;
-	if (crtc)
-		dev_priv->psr.psr2_enabled = intel_psr2_enabled(dev_priv, crtc_state);
-
 	intel_psr_irq_control(dev_priv, dev_priv->psr.debug);
 
-	if (dev_priv->psr.prepared && enable)
-		intel_psr_enable_locked(dev_priv, crtc_state);
-
 	mutex_unlock(&dev_priv->psr.lock);
+
+	if (old_mode != mode)
+		ret = intel_psr_fastset_force(dev_priv);
+
 	return ret;
 }
 
@@ -1121,18 +1184,8 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
 	dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
 
 	/* By definition flush = invalidate + flush */
-	if (frontbuffer_bits) {
-		/*
-		 * Display WA #0884: all
-		 * This documented WA for bxt can be safely applied
-		 * broadly so we can force HW tracking to exit PSR
-		 * instead of disabling and re-enabling.
-		 * Workaround tells us to write 0 to CUR_SURFLIVE_A,
-		 * but it makes more sense write to the current active
-		 * pipe.
-		 */
-		I915_WRITE(CURSURFLIVE(dev_priv->psr.pipe), 0);
-	}
+	if (frontbuffer_bits)
+		psr_force_hw_tracking_exit(dev_priv);
 
 	if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
 		schedule_work(&dev_priv->psr.work);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 7f841dba87b3026893b2cb330168cb74246d5804..9e7ad17b5250d24d725375ea7973128371eec7c2 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -43,12 +43,6 @@
  */
 #define LEGACY_REQUEST_SIZE 200
 
-static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine)
-{
-	return (i915_ggtt_offset(engine->status_page.vma) +
-		I915_GEM_HWS_INDEX_ADDR);
-}
-
 unsigned int intel_ring_update_space(struct intel_ring *ring)
 {
 	unsigned int space;
@@ -317,9 +311,9 @@ static u32 *gen6_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
 	*cs++ = rq->fence.seqno;
 
 	*cs++ = GFX_OP_PIPE_CONTROL(4);
-	*cs++ = PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
-	*cs++ = intel_hws_seqno_address(rq->engine) | PIPE_CONTROL_GLOBAL_GTT;
-	*cs++ = rq->global_seqno;
+	*cs++ = PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_STORE_DATA_INDEX;
+	*cs++ = I915_GEM_HWS_HANGCHECK_ADDR | PIPE_CONTROL_GLOBAL_GTT;
+	*cs++ = intel_engine_next_hangcheck_seqno(rq->engine);
 
 	*cs++ = MI_USER_INTERRUPT;
 	*cs++ = MI_NOOP;
@@ -424,10 +418,10 @@ static u32 *gen7_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
 
 	*cs++ = GFX_OP_PIPE_CONTROL(4);
 	*cs++ = (PIPE_CONTROL_QW_WRITE |
-		 PIPE_CONTROL_GLOBAL_GTT_IVB |
-		 PIPE_CONTROL_CS_STALL);
-	*cs++ = intel_hws_seqno_address(rq->engine);
-	*cs++ = rq->global_seqno;
+		 PIPE_CONTROL_STORE_DATA_INDEX |
+		 PIPE_CONTROL_GLOBAL_GTT_IVB);
+	*cs++ = I915_GEM_HWS_HANGCHECK_ADDR;
+	*cs++ = intel_engine_next_hangcheck_seqno(rq->engine);
 
 	*cs++ = MI_USER_INTERRUPT;
 	*cs++ = MI_NOOP;
@@ -448,8 +442,8 @@ static u32 *gen6_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
 	*cs++ = rq->fence.seqno;
 
 	*cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX;
-	*cs++ = I915_GEM_HWS_INDEX_ADDR | MI_FLUSH_DW_USE_GTT;
-	*cs++ = rq->global_seqno;
+	*cs++ = I915_GEM_HWS_HANGCHECK_ADDR | MI_FLUSH_DW_USE_GTT;
+	*cs++ = intel_engine_next_hangcheck_seqno(rq->engine);
 
 	*cs++ = MI_USER_INTERRUPT;
 	*cs++ = MI_NOOP;
@@ -473,8 +467,8 @@ static u32 *gen7_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
 	*cs++ = rq->fence.seqno;
 
 	*cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX;
-	*cs++ = I915_GEM_HWS_INDEX_ADDR | MI_FLUSH_DW_USE_GTT;
-	*cs++ = rq->global_seqno;
+	*cs++ = I915_GEM_HWS_HANGCHECK_ADDR | MI_FLUSH_DW_USE_GTT;
+	*cs++ = intel_engine_next_hangcheck_seqno(rq->engine);
 
 	for (i = 0; i < GEN7_XCS_WA; i++) {
 		*cs++ = MI_STORE_DWORD_INDEX;
@@ -554,16 +548,17 @@ static void set_hwsp(struct intel_engine_cs *engine, u32 offset)
 		 */
 		default:
 			GEM_BUG_ON(engine->id);
-		case RCS:
+			/* fallthrough */
+		case RCS0:
 			hwsp = RENDER_HWS_PGA_GEN7;
 			break;
-		case BCS:
+		case BCS0:
 			hwsp = BLT_HWS_PGA_GEN7;
 			break;
-		case VCS:
+		case VCS0:
 			hwsp = BSD_HWS_PGA_GEN7;
 			break;
-		case VECS:
+		case VECS0:
 			hwsp = VEBOX_HWS_PGA_GEN7;
 			break;
 		}
@@ -758,11 +753,6 @@ static void reset_ring(struct intel_engine_cs *engine, bool stalled)
 		}
 	}
 
-	GEM_TRACE("%s seqno=%d, current=%d, stalled? %s\n",
-		  engine->name,
-		  rq ? rq->global_seqno : 0,
-		  intel_engine_get_seqno(engine),
-		  yesno(stalled));
 	/*
 	 * The guilty request will get skipped on a hung engine.
 	 *
@@ -892,18 +882,12 @@ static void cancel_requests(struct intel_engine_cs *engine)
 
 	/* Mark all submitted requests as skipped. */
 	list_for_each_entry(request, &engine->timeline.requests, link) {
-		GEM_BUG_ON(!request->global_seqno);
-
 		if (!i915_request_signaled(request))
 			dma_fence_set_error(&request->fence, -EIO);
 
 		i915_request_mark_complete(request);
 	}
 
-	intel_write_status_page(engine,
-				I915_GEM_HWS_INDEX,
-				intel_engine_last_submit(engine));
-
 	/* Remaining _unready_ requests will be nop'ed when submitted */
 
 	spin_unlock_irqrestore(&engine->timeline.lock, flags);
@@ -931,8 +915,8 @@ static u32 *i9xx_emit_breadcrumb(struct i915_request *rq, u32 *cs)
 	*cs++ = rq->fence.seqno;
 
 	*cs++ = MI_STORE_DWORD_INDEX;
-	*cs++ = I915_GEM_HWS_INDEX_ADDR;
-	*cs++ = rq->global_seqno;
+	*cs++ = I915_GEM_HWS_HANGCHECK_ADDR;
+	*cs++ = intel_engine_next_hangcheck_seqno(rq->engine);
 
 	*cs++ = MI_USER_INTERRUPT;
 
@@ -953,14 +937,14 @@ static u32 *gen5_emit_breadcrumb(struct i915_request *rq, u32 *cs)
 	*cs++ = MI_FLUSH;
 
 	*cs++ = MI_STORE_DWORD_INDEX;
-	*cs++ = I915_GEM_HWS_SEQNO_ADDR;
-	*cs++ = rq->fence.seqno;
+	*cs++ = I915_GEM_HWS_HANGCHECK_ADDR;
+	*cs++ = intel_engine_next_hangcheck_seqno(rq->engine);
 
 	BUILD_BUG_ON(GEN5_WA_STORES < 1);
 	for (i = 0; i < GEN5_WA_STORES; i++) {
 		*cs++ = MI_STORE_DWORD_INDEX;
-		*cs++ = I915_GEM_HWS_INDEX_ADDR;
-		*cs++ = rq->global_seqno;
+		*cs++ = I915_GEM_HWS_SEQNO_ADDR;
+		*cs++ = rq->fence.seqno;
 	}
 
 	*cs++ = MI_USER_INTERRUPT;
@@ -1323,6 +1307,7 @@ intel_engine_create_ring(struct intel_engine_cs *engine,
 	if (!ring)
 		return ERR_PTR(-ENOMEM);
 
+	kref_init(&ring->ref);
 	INIT_LIST_HEAD(&ring->request_list);
 	ring->timeline = i915_timeline_get(timeline);
 
@@ -1347,9 +1332,9 @@ intel_engine_create_ring(struct intel_engine_cs *engine,
 	return ring;
 }
 
-void
-intel_ring_free(struct intel_ring *ring)
+void intel_ring_free(struct kref *ref)
 {
+	struct intel_ring *ring = container_of(ref, typeof(*ring), ref);
 	struct drm_i915_gem_object *obj = ring->vma->obj;
 
 	i915_vma_close(ring->vma);
@@ -1359,17 +1344,24 @@ intel_ring_free(struct intel_ring *ring)
 	kfree(ring);
 }
 
-static void intel_ring_context_destroy(struct intel_context *ce)
+static void __ring_context_fini(struct intel_context *ce)
 {
-	GEM_BUG_ON(ce->pin_count);
-
-	if (!ce->state)
-		return;
-
 	GEM_BUG_ON(i915_gem_object_is_active(ce->state->obj));
 	i915_gem_object_put(ce->state->obj);
 }
 
+static void ring_context_destroy(struct kref *ref)
+{
+	struct intel_context *ce = container_of(ref, typeof(*ce), ref);
+
+	GEM_BUG_ON(intel_context_is_pinned(ce));
+
+	if (ce->state)
+		__ring_context_fini(ce);
+
+	intel_context_free(ce);
+}
+
 static int __context_pin_ppgtt(struct i915_gem_context *ctx)
 {
 	struct i915_hw_ppgtt *ppgtt;
@@ -1436,12 +1428,10 @@ static void __context_unpin(struct intel_context *ce)
 	i915_vma_unpin(vma);
 }
 
-static void intel_ring_context_unpin(struct intel_context *ce)
+static void ring_context_unpin(struct intel_context *ce)
 {
 	__context_unpin_ppgtt(ce->gem_context);
 	__context_unpin(ce);
-
-	i915_gem_context_put(ce->gem_context);
 }
 
 static struct i915_vma *
@@ -1513,70 +1503,46 @@ alloc_context_vma(struct intel_engine_cs *engine)
 	return ERR_PTR(err);
 }
 
-static struct intel_context *
-__ring_context_pin(struct intel_engine_cs *engine,
-		   struct i915_gem_context *ctx,
-		   struct intel_context *ce)
+static int ring_context_pin(struct intel_context *ce)
 {
+	struct intel_engine_cs *engine = ce->engine;
 	int err;
 
+	/* One ringbuffer to rule them all */
+	GEM_BUG_ON(!engine->buffer);
+	ce->ring = engine->buffer;
+
 	if (!ce->state && engine->context_size) {
 		struct i915_vma *vma;
 
 		vma = alloc_context_vma(engine);
-		if (IS_ERR(vma)) {
-			err = PTR_ERR(vma);
-			goto err;
-		}
+		if (IS_ERR(vma))
+			return PTR_ERR(vma);
 
 		ce->state = vma;
 	}
 
 	err = __context_pin(ce);
 	if (err)
-		goto err;
+		return err;
 
 	err = __context_pin_ppgtt(ce->gem_context);
 	if (err)
 		goto err_unpin;
 
-	i915_gem_context_get(ctx);
-
-	/* One ringbuffer to rule them all */
-	GEM_BUG_ON(!engine->buffer);
-	ce->ring = engine->buffer;
-
-	return ce;
+	return 0;
 
 err_unpin:
 	__context_unpin(ce);
-err:
-	ce->pin_count = 0;
-	return ERR_PTR(err);
+	return err;
 }
 
 static const struct intel_context_ops ring_context_ops = {
-	.unpin = intel_ring_context_unpin,
-	.destroy = intel_ring_context_destroy,
+	.pin = ring_context_pin,
+	.unpin = ring_context_unpin,
+	.destroy = ring_context_destroy,
 };
 
-static struct intel_context *
-intel_ring_context_pin(struct intel_engine_cs *engine,
-		       struct i915_gem_context *ctx)
-{
-	struct intel_context *ce = to_intel_context(ctx, engine);
-
-	lockdep_assert_held(&ctx->i915->drm.struct_mutex);
-
-	if (likely(ce->pin_count++))
-		return ce;
-	GEM_BUG_ON(!ce->pin_count); /* no overflow please! */
-
-	ce->ops = &ring_context_ops;
-
-	return __ring_context_pin(engine, ctx, ce);
-}
-
 static int intel_init_ring_buffer(struct intel_engine_cs *engine)
 {
 	struct i915_timeline *timeline;
@@ -1621,7 +1587,7 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
 err_unpin:
 	intel_ring_unpin(ring);
 err_ring:
-	intel_ring_free(ring);
+	intel_ring_put(ring);
 err:
 	intel_engine_cleanup_common(engine);
 	return err;
@@ -1635,7 +1601,7 @@ void intel_engine_cleanup(struct intel_engine_cs *engine)
 		(I915_READ_MODE(engine) & MODE_IDLE) == 0);
 
 	intel_ring_unpin(engine->buffer);
-	intel_ring_free(engine->buffer);
+	intel_ring_put(engine->buffer);
 
 	if (engine->cleanup)
 		engine->cleanup(engine);
@@ -1703,8 +1669,8 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
 	struct drm_i915_private *i915 = rq->i915;
 	struct intel_engine_cs *engine = rq->engine;
 	enum intel_engine_id id;
-	const int num_rings =
-		IS_HSW_GT1(i915) ? RUNTIME_INFO(i915)->num_rings - 1 : 0;
+	const int num_engines =
+		IS_HSW_GT1(i915) ? RUNTIME_INFO(i915)->num_engines - 1 : 0;
 	bool force_restore = false;
 	int len;
 	u32 *cs;
@@ -1718,7 +1684,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
 
 	len = 4;
 	if (IS_GEN(i915, 7))
-		len += 2 + (num_rings ? 4*num_rings + 6 : 0);
+		len += 2 + (num_engines ? 4 * num_engines + 6 : 0);
 	if (flags & MI_FORCE_RESTORE) {
 		GEM_BUG_ON(flags & MI_RESTORE_INHIBIT);
 		flags &= ~MI_FORCE_RESTORE;
@@ -1733,10 +1699,10 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
 	/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
 	if (IS_GEN(i915, 7)) {
 		*cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
-		if (num_rings) {
+		if (num_engines) {
 			struct intel_engine_cs *signaller;
 
-			*cs++ = MI_LOAD_REGISTER_IMM(num_rings);
+			*cs++ = MI_LOAD_REGISTER_IMM(num_engines);
 			for_each_engine(signaller, i915, id) {
 				if (signaller == engine)
 					continue;
@@ -1763,8 +1729,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
 		 * placeholder we use to flush other contexts.
 		 */
 		*cs++ = MI_SET_CONTEXT;
-		*cs++ = i915_ggtt_offset(to_intel_context(i915->kernel_context,
-							  engine)->state) |
+		*cs++ = i915_ggtt_offset(engine->kernel_context->state) |
 			MI_MM_SPACE_GTT |
 			MI_RESTORE_INHIBIT;
 	}
@@ -1779,11 +1744,11 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
 	*cs++ = MI_NOOP;
 
 	if (IS_GEN(i915, 7)) {
-		if (num_rings) {
+		if (num_engines) {
 			struct intel_engine_cs *signaller;
 			i915_reg_t last_reg = {}; /* keep gcc quiet */
 
-			*cs++ = MI_LOAD_REGISTER_IMM(num_rings);
+			*cs++ = MI_LOAD_REGISTER_IMM(num_engines);
 			for_each_engine(signaller, i915, id) {
 				if (signaller == engine)
 					continue;
@@ -1861,7 +1826,7 @@ static int switch_context(struct i915_request *rq)
 		 * explanation.
 		 */
 		loops = 1;
-		if (engine->id == BCS && IS_VALLEYVIEW(engine->i915))
+		if (engine->id == BCS0 && IS_VALLEYVIEW(engine->i915))
 			loops = 32;
 
 		do {
@@ -1870,15 +1835,15 @@ static int switch_context(struct i915_request *rq)
 				goto err;
 		} while (--loops);
 
-		if (intel_engine_flag(engine) & ppgtt->pd_dirty_rings) {
-			unwind_mm = intel_engine_flag(engine);
-			ppgtt->pd_dirty_rings &= ~unwind_mm;
+		if (ppgtt->pd_dirty_engines & engine->mask) {
+			unwind_mm = engine->mask;
+			ppgtt->pd_dirty_engines &= ~unwind_mm;
 			hw_flags = MI_FORCE_RESTORE;
 		}
 	}
 
 	if (rq->hw_context->state) {
-		GEM_BUG_ON(engine->id != RCS);
+		GEM_BUG_ON(engine->id != RCS0);
 
 		/*
 		 * The kernel context(s) is treated as pure scratch and is not
@@ -1938,7 +1903,7 @@ static int switch_context(struct i915_request *rq)
 
 err_mm:
 	if (unwind_mm)
-		ppgtt->pd_dirty_rings |= unwind_mm;
+		ppgtt->pd_dirty_engines |= unwind_mm;
 err:
 	return ret;
 }
@@ -1947,7 +1912,7 @@ static int ring_request_alloc(struct i915_request *request)
 {
 	int ret;
 
-	GEM_BUG_ON(!request->hw_context->pin_count);
+	GEM_BUG_ON(!intel_context_is_pinned(request->hw_context));
 	GEM_BUG_ON(request->timeline->has_initial_breadcrumb);
 
 	/*
@@ -2282,7 +2247,7 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
 	engine->reset.reset = reset_ring;
 	engine->reset.finish = reset_finish;
 
-	engine->context_pin = intel_ring_context_pin;
+	engine->cops = &ring_context_ops;
 	engine->request_alloc = ring_request_alloc;
 
 	/*
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 710ffb2217753b85026c92996f092890fa434ebf..a02c92dac5da1a85fa5b6575923a9f99085cb4c3 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -6,22 +6,20 @@
 
 #include <linux/hashtable.h>
 #include <linux/irq_work.h>
+#include <linux/random.h>
 #include <linux/seqlock.h>
 
 #include "i915_gem_batch_pool.h"
-
-#include "i915_reg.h"
 #include "i915_pmu.h"
+#include "i915_reg.h"
 #include "i915_request.h"
 #include "i915_selftest.h"
 #include "i915_timeline.h"
+#include "intel_engine_types.h"
 #include "intel_gpu_commands.h"
 #include "intel_workarounds.h"
 
 struct drm_printer;
-struct i915_sched_attr;
-
-#define I915_CMD_HASH_ORDER 9
 
 /* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
  * but keeps the logic simple. Indeed, the whole purpose of this macro is just
@@ -31,11 +29,6 @@ struct i915_sched_attr;
 #define CACHELINE_BYTES 64
 #define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(u32))
 
-struct intel_hw_status_page {
-	struct i915_vma *vma;
-	u32 *addr;
-};
-
 #define I915_READ_TAIL(engine) I915_READ(RING_TAIL((engine)->mmio_base))
 #define I915_WRITE_TAIL(engine, val) I915_WRITE(RING_TAIL((engine)->mmio_base), val)
 
@@ -90,506 +83,7 @@ hangcheck_action_to_str(const enum intel_engine_hangcheck_action a)
 	return "unknown";
 }
 
-#define I915_MAX_SLICES	3
-#define I915_MAX_SUBSLICES 8
-
-#define instdone_slice_mask(dev_priv__) \
-	(IS_GEN(dev_priv__, 7) ? \
-	 1 : RUNTIME_INFO(dev_priv__)->sseu.slice_mask)
-
-#define instdone_subslice_mask(dev_priv__) \
-	(IS_GEN(dev_priv__, 7) ? \
-	 1 : RUNTIME_INFO(dev_priv__)->sseu.subslice_mask[0])
-
-#define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \
-	for ((slice__) = 0, (subslice__) = 0; \
-	     (slice__) < I915_MAX_SLICES; \
-	     (subslice__) = ((subslice__) + 1) < I915_MAX_SUBSLICES ? (subslice__) + 1 : 0, \
-	       (slice__) += ((subslice__) == 0)) \
-		for_each_if((BIT(slice__) & instdone_slice_mask(dev_priv__)) && \
-			    (BIT(subslice__) & instdone_subslice_mask(dev_priv__)))
-
-struct intel_instdone {
-	u32 instdone;
-	/* The following exist only in the RCS engine */
-	u32 slice_common;
-	u32 sampler[I915_MAX_SLICES][I915_MAX_SUBSLICES];
-	u32 row[I915_MAX_SLICES][I915_MAX_SUBSLICES];
-};
-
-struct intel_engine_hangcheck {
-	u64 acthd;
-	u32 seqno;
-	unsigned long action_timestamp;
-	struct intel_instdone instdone;
-};
-
-struct intel_ring {
-	struct i915_vma *vma;
-	void *vaddr;
-
-	struct i915_timeline *timeline;
-	struct list_head request_list;
-	struct list_head active_link;
-
-	u32 head;
-	u32 tail;
-	u32 emit;
-
-	u32 space;
-	u32 size;
-	u32 effective_size;
-};
-
-struct i915_gem_context;
-struct drm_i915_reg_table;
-
-/*
- * we use a single page to load ctx workarounds so all of these
- * values are referred in terms of dwords
- *
- * struct i915_wa_ctx_bb:
- *  offset: specifies batch starting position, also helpful in case
- *    if we want to have multiple batches at different offsets based on
- *    some criteria. It is not a requirement at the moment but provides
- *    an option for future use.
- *  size: size of the batch in DWORDS
- */
-struct i915_ctx_workarounds {
-	struct i915_wa_ctx_bb {
-		u32 offset;
-		u32 size;
-	} indirect_ctx, per_ctx;
-	struct i915_vma *vma;
-};
-
-struct i915_request;
-
-#define I915_MAX_VCS	4
-#define I915_MAX_VECS	2
-
-/*
- * Engine IDs definitions.
- * Keep instances of the same type engine together.
- */
-enum intel_engine_id {
-	RCS = 0,
-	BCS,
-	VCS,
-	VCS2,
-	VCS3,
-	VCS4,
-#define _VCS(n) (VCS + (n))
-	VECS,
-	VECS2
-#define _VECS(n) (VECS + (n))
-};
-
-struct i915_priolist {
-	struct list_head requests[I915_PRIORITY_COUNT];
-	struct rb_node node;
-	unsigned long used;
-	int priority;
-};
-
-#define priolist_for_each_request(it, plist, idx) \
-	for (idx = 0; idx < ARRAY_SIZE((plist)->requests); idx++) \
-		list_for_each_entry(it, &(plist)->requests[idx], sched.link)
-
-#define priolist_for_each_request_consume(it, n, plist, idx) \
-	for (; (idx = ffs((plist)->used)); (plist)->used &= ~BIT(idx - 1)) \
-		list_for_each_entry_safe(it, n, \
-					 &(plist)->requests[idx - 1], \
-					 sched.link)
-
-struct st_preempt_hang {
-	struct completion completion;
-	unsigned int count;
-	bool inject_hang;
-};
-
-/**
- * struct intel_engine_execlists - execlist submission queue and port state
- *
- * The struct intel_engine_execlists represents the combined logical state of
- * driver and the hardware state for execlist mode of submission.
- */
-struct intel_engine_execlists {
-	/**
-	 * @tasklet: softirq tasklet for bottom handler
-	 */
-	struct tasklet_struct tasklet;
-
-	/**
-	 * @default_priolist: priority list for I915_PRIORITY_NORMAL
-	 */
-	struct i915_priolist default_priolist;
-
-	/**
-	 * @no_priolist: priority lists disabled
-	 */
-	bool no_priolist;
-
-	/**
-	 * @submit_reg: gen-specific execlist submission register
-	 * set to the ExecList Submission Port (elsp) register pre-Gen11 and to
-	 * the ExecList Submission Queue Contents register array for Gen11+
-	 */
-	u32 __iomem *submit_reg;
-
-	/**
-	 * @ctrl_reg: the enhanced execlists control register, used to load the
-	 * submit queue on the HW and to request preemptions to idle
-	 */
-	u32 __iomem *ctrl_reg;
-
-	/**
-	 * @port: execlist port states
-	 *
-	 * For each hardware ELSP (ExecList Submission Port) we keep
-	 * track of the last request and the number of times we submitted
-	 * that port to hw. We then count the number of times the hw reports
-	 * a context completion or preemption. As only one context can
-	 * be active on hw, we limit resubmission of context to port[0]. This
-	 * is called Lite Restore, of the context.
-	 */
-	struct execlist_port {
-		/**
-		 * @request_count: combined request and submission count
-		 */
-		struct i915_request *request_count;
-#define EXECLIST_COUNT_BITS 2
-#define port_request(p) ptr_mask_bits((p)->request_count, EXECLIST_COUNT_BITS)
-#define port_count(p) ptr_unmask_bits((p)->request_count, EXECLIST_COUNT_BITS)
-#define port_pack(rq, count) ptr_pack_bits(rq, count, EXECLIST_COUNT_BITS)
-#define port_unpack(p, count) ptr_unpack_bits((p)->request_count, count, EXECLIST_COUNT_BITS)
-#define port_set(p, packed) ((p)->request_count = (packed))
-#define port_isset(p) ((p)->request_count)
-#define port_index(p, execlists) ((p) - (execlists)->port)
-
-		/**
-		 * @context_id: context ID for port
-		 */
-		GEM_DEBUG_DECL(u32 context_id);
-
-#define EXECLIST_MAX_PORTS 2
-	} port[EXECLIST_MAX_PORTS];
-
-	/**
-	 * @active: is the HW active? We consider the HW as active after
-	 * submitting any context for execution and until we have seen the
-	 * last context completion event. After that, we do not expect any
-	 * more events until we submit, and so can park the HW.
-	 *
-	 * As we have a small number of different sources from which we feed
-	 * the HW, we track the state of each inside a single bitfield.
-	 */
-	unsigned int active;
-#define EXECLISTS_ACTIVE_USER 0
-#define EXECLISTS_ACTIVE_PREEMPT 1
-#define EXECLISTS_ACTIVE_HWACK 2
-
-	/**
-	 * @port_mask: number of execlist ports - 1
-	 */
-	unsigned int port_mask;
-
-	/**
-	 * @queue_priority_hint: Highest pending priority.
-	 *
-	 * When we add requests into the queue, or adjust the priority of
-	 * executing requests, we compute the maximum priority of those
-	 * pending requests. We can then use this value to determine if
-	 * we need to preempt the executing requests to service the queue.
-	 * However, since the we may have recorded the priority of an inflight
-	 * request we wanted to preempt but since completed, at the time of
-	 * dequeuing the priority hint may no longer may match the highest
-	 * available request priority.
-	 */
-	int queue_priority_hint;
-
-	/**
-	 * @queue: queue of requests, in priority lists
-	 */
-	struct rb_root_cached queue;
-
-	/**
-	 * @csb_write: control register for Context Switch buffer
-	 *
-	 * Note this register may be either mmio or HWSP shadow.
-	 */
-	u32 *csb_write;
-
-	/**
-	 * @csb_status: status array for Context Switch buffer
-	 *
-	 * Note these register may be either mmio or HWSP shadow.
-	 */
-	u32 *csb_status;
-
-	/**
-	 * @preempt_complete_status: expected CSB upon completing preemption
-	 */
-	u32 preempt_complete_status;
-
-	/**
-	 * @csb_head: context status buffer head
-	 */
-	u8 csb_head;
-
-	I915_SELFTEST_DECLARE(struct st_preempt_hang preempt_hang;)
-};
-
-#define INTEL_ENGINE_CS_MAX_NAME 8
-
-struct intel_engine_cs {
-	struct drm_i915_private *i915;
-	char name[INTEL_ENGINE_CS_MAX_NAME];
-
-	enum intel_engine_id id;
-	unsigned int hw_id;
-	unsigned int guc_id;
-
-	u8 uabi_id;
-	u8 uabi_class;
-
-	u8 class;
-	u8 instance;
-	u32 context_size;
-	u32 mmio_base;
-
-	struct intel_ring *buffer;
-
-	struct i915_timeline timeline;
-
-	struct drm_i915_gem_object *default_state;
-	void *pinned_default_state;
-
-	/* Rather than have every client wait upon all user interrupts,
-	 * with the herd waking after every interrupt and each doing the
-	 * heavyweight seqno dance, we delegate the task (of being the
-	 * bottom-half of the user interrupt) to the first client. After
-	 * every interrupt, we wake up one client, who does the heavyweight
-	 * coherent seqno read and either goes back to sleep (if incomplete),
-	 * or wakes up all the completed clients in parallel, before then
-	 * transferring the bottom-half status to the next client in the queue.
-	 *
-	 * Compared to walking the entire list of waiters in a single dedicated
-	 * bottom-half, we reduce the latency of the first waiter by avoiding
-	 * a context switch, but incur additional coherent seqno reads when
-	 * following the chain of request breadcrumbs. Since it is most likely
-	 * that we have a single client waiting on each seqno, then reducing
-	 * the overhead of waking that client is much preferred.
-	 */
-	struct intel_breadcrumbs {
-		spinlock_t irq_lock;
-		struct list_head signalers;
-
-		struct irq_work irq_work; /* for use from inside irq_lock */
-
-		unsigned int irq_enabled;
-
-		bool irq_armed;
-	} breadcrumbs;
-
-	struct {
-		/**
-		 * @enable: Bitmask of enable sample events on this engine.
-		 *
-		 * Bits correspond to sample event types, for instance
-		 * I915_SAMPLE_QUEUED is bit 0 etc.
-		 */
-		u32 enable;
-		/**
-		 * @enable_count: Reference count for the enabled samplers.
-		 *
-		 * Index number corresponds to @enum drm_i915_pmu_engine_sample.
-		 */
-		unsigned int enable_count[I915_ENGINE_SAMPLE_COUNT];
-		/**
-		 * @sample: Counter values for sampling events.
-		 *
-		 * Our internal timer stores the current counters in this field.
-		 *
-		 * Index number corresponds to @enum drm_i915_pmu_engine_sample.
-		 */
-		struct i915_pmu_sample sample[I915_ENGINE_SAMPLE_COUNT];
-	} pmu;
-
-	/*
-	 * A pool of objects to use as shadow copies of client batch buffers
-	 * when the command parser is enabled. Prevents the client from
-	 * modifying the batch contents after software parsing.
-	 */
-	struct i915_gem_batch_pool batch_pool;
-
-	struct intel_hw_status_page status_page;
-	struct i915_ctx_workarounds wa_ctx;
-	struct i915_wa_list ctx_wa_list;
-	struct i915_wa_list wa_list;
-	struct i915_wa_list whitelist;
-
-	u32             irq_keep_mask; /* always keep these interrupts */
-	u32		irq_enable_mask; /* bitmask to enable ring interrupt */
-	void		(*irq_enable)(struct intel_engine_cs *engine);
-	void		(*irq_disable)(struct intel_engine_cs *engine);
-
-	int		(*init_hw)(struct intel_engine_cs *engine);
-
-	struct {
-		void (*prepare)(struct intel_engine_cs *engine);
-		void (*reset)(struct intel_engine_cs *engine, bool stalled);
-		void (*finish)(struct intel_engine_cs *engine);
-	} reset;
-
-	void		(*park)(struct intel_engine_cs *engine);
-	void		(*unpark)(struct intel_engine_cs *engine);
-
-	void		(*set_default_submission)(struct intel_engine_cs *engine);
-
-	struct intel_context *(*context_pin)(struct intel_engine_cs *engine,
-					     struct i915_gem_context *ctx);
-
-	int		(*request_alloc)(struct i915_request *rq);
-	int		(*init_context)(struct i915_request *rq);
-
-	int		(*emit_flush)(struct i915_request *request, u32 mode);
-#define EMIT_INVALIDATE	BIT(0)
-#define EMIT_FLUSH	BIT(1)
-#define EMIT_BARRIER	(EMIT_INVALIDATE | EMIT_FLUSH)
-	int		(*emit_bb_start)(struct i915_request *rq,
-					 u64 offset, u32 length,
-					 unsigned int dispatch_flags);
-#define I915_DISPATCH_SECURE BIT(0)
-#define I915_DISPATCH_PINNED BIT(1)
-	int		 (*emit_init_breadcrumb)(struct i915_request *rq);
-	u32		*(*emit_fini_breadcrumb)(struct i915_request *rq,
-						 u32 *cs);
-	unsigned int	emit_fini_breadcrumb_dw;
-
-	/* Pass the request to the hardware queue (e.g. directly into
-	 * the legacy ringbuffer or to the end of an execlist).
-	 *
-	 * This is called from an atomic context with irqs disabled; must
-	 * be irq safe.
-	 */
-	void		(*submit_request)(struct i915_request *rq);
-
-	/*
-	 * Call when the priority on a request has changed and it and its
-	 * dependencies may need rescheduling. Note the request itself may
-	 * not be ready to run!
-	 */
-	void		(*schedule)(struct i915_request *request,
-				    const struct i915_sched_attr *attr);
-
-	/*
-	 * Cancel all requests on the hardware, or queued for execution.
-	 * This should only cancel the ready requests that have been
-	 * submitted to the engine (via the engine->submit_request callback).
-	 * This is called when marking the device as wedged.
-	 */
-	void		(*cancel_requests)(struct intel_engine_cs *engine);
-
-	void		(*cleanup)(struct intel_engine_cs *engine);
-
-	struct intel_engine_execlists execlists;
-
-	/* Contexts are pinned whilst they are active on the GPU. The last
-	 * context executed remains active whilst the GPU is idle - the
-	 * switch away and write to the context object only occurs on the
-	 * next execution.  Contexts are only unpinned on retirement of the
-	 * following request ensuring that we can always write to the object
-	 * on the context switch even after idling. Across suspend, we switch
-	 * to the kernel context and trash it as the save may not happen
-	 * before the hardware is powered down.
-	 */
-	struct intel_context *last_retired_context;
-
-	/* status_notifier: list of callbacks for context-switch changes */
-	struct atomic_notifier_head context_status_notifier;
-
-	struct intel_engine_hangcheck hangcheck;
-
-#define I915_ENGINE_NEEDS_CMD_PARSER BIT(0)
-#define I915_ENGINE_SUPPORTS_STATS   BIT(1)
-#define I915_ENGINE_HAS_PREEMPTION   BIT(2)
-	unsigned int flags;
-
-	/*
-	 * Table of commands the command parser needs to know about
-	 * for this engine.
-	 */
-	DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER);
-
-	/*
-	 * Table of registers allowed in commands that read/write registers.
-	 */
-	const struct drm_i915_reg_table *reg_tables;
-	int reg_table_count;
-
-	/*
-	 * Returns the bitmask for the length field of the specified command.
-	 * Return 0 for an unrecognized/invalid command.
-	 *
-	 * If the command parser finds an entry for a command in the engine's
-	 * cmd_tables, it gets the command's length based on the table entry.
-	 * If not, it calls this function to determine the per-engine length
-	 * field encoding for the command (i.e. different opcode ranges use
-	 * certain bits to encode the command length in the header).
-	 */
-	u32 (*get_cmd_length_mask)(u32 cmd_header);
-
-	struct {
-		/**
-		 * @lock: Lock protecting the below fields.
-		 */
-		seqlock_t lock;
-		/**
-		 * @enabled: Reference count indicating number of listeners.
-		 */
-		unsigned int enabled;
-		/**
-		 * @active: Number of contexts currently scheduled in.
-		 */
-		unsigned int active;
-		/**
-		 * @enabled_at: Timestamp when busy stats were enabled.
-		 */
-		ktime_t enabled_at;
-		/**
-		 * @start: Timestamp of the last idle to active transition.
-		 *
-		 * Idle is defined as active == 0, active is active > 0.
-		 */
-		ktime_t start;
-		/**
-		 * @total: Total time this engine was busy.
-		 *
-		 * Accumulated time not counting the most recent block in cases
-		 * where engine is currently busy (active > 0).
-		 */
-		ktime_t total;
-	} stats;
-};
-
-static inline bool
-intel_engine_needs_cmd_parser(const struct intel_engine_cs *engine)
-{
-	return engine->flags & I915_ENGINE_NEEDS_CMD_PARSER;
-}
-
-static inline bool
-intel_engine_supports_stats(const struct intel_engine_cs *engine)
-{
-	return engine->flags & I915_ENGINE_SUPPORTS_STATS;
-}
-
-static inline bool
-intel_engine_has_preemption(const struct intel_engine_cs *engine)
-{
-	return engine->flags & I915_ENGINE_HAS_PREEMPTION;
-}
+void intel_engines_set_scheduler_caps(struct drm_i915_private *i915);
 
 static inline bool __execlists_need_preempt(int prio, int last)
 {
@@ -674,12 +168,6 @@ execlists_port_complete(struct intel_engine_execlists * const execlists,
 	return port;
 }
 
-static inline unsigned int
-intel_engine_flag(const struct intel_engine_cs *engine)
-{
-	return BIT(engine->id);
-}
-
 static inline u32
 intel_read_status_page(const struct intel_engine_cs *engine, int reg)
 {
@@ -722,10 +210,10 @@ intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
  *
  * The area from dword 0x30 to 0x3ff is available for driver usage.
  */
-#define I915_GEM_HWS_INDEX		0x30
-#define I915_GEM_HWS_INDEX_ADDR		(I915_GEM_HWS_INDEX * sizeof(u32))
 #define I915_GEM_HWS_PREEMPT		0x32
 #define I915_GEM_HWS_PREEMPT_ADDR	(I915_GEM_HWS_PREEMPT * sizeof(u32))
+#define I915_GEM_HWS_HANGCHECK		0x34
+#define I915_GEM_HWS_HANGCHECK_ADDR	(I915_GEM_HWS_HANGCHECK * sizeof(u32))
 #define I915_GEM_HWS_SEQNO		0x40
 #define I915_GEM_HWS_SEQNO_ADDR		(I915_GEM_HWS_SEQNO * sizeof(u32))
 #define I915_GEM_HWS_SCRATCH		0x80
@@ -743,7 +231,18 @@ int intel_ring_pin(struct intel_ring *ring);
 void intel_ring_reset(struct intel_ring *ring, u32 tail);
 unsigned int intel_ring_update_space(struct intel_ring *ring);
 void intel_ring_unpin(struct intel_ring *ring);
-void intel_ring_free(struct intel_ring *ring);
+void intel_ring_free(struct kref *ref);
+
+static inline struct intel_ring *intel_ring_get(struct intel_ring *ring)
+{
+	kref_get(&ring->ref);
+	return ring;
+}
+
+static inline void intel_ring_put(struct intel_ring *ring)
+{
+	kref_put(&ring->ref, intel_ring_free);
+}
 
 void intel_engine_stop(struct intel_engine_cs *engine);
 void intel_engine_cleanup(struct intel_engine_cs *engine);
@@ -844,8 +343,6 @@ __intel_ring_space(unsigned int head, unsigned int tail, unsigned int size)
 	return (head - tail - CACHELINE_BYTES) & (size - 1);
 }
 
-void intel_engine_write_global_seqno(struct intel_engine_cs *engine, u32 seqno);
-
 int intel_engine_setup_common(struct intel_engine_cs *engine);
 int intel_engine_init_common(struct intel_engine_cs *engine);
 void intel_engine_cleanup_common(struct intel_engine_cs *engine);
@@ -863,44 +360,6 @@ void intel_engine_set_hwsp_writemask(struct intel_engine_cs *engine, u32 mask);
 u64 intel_engine_get_active_head(const struct intel_engine_cs *engine);
 u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine);
 
-static inline u32 intel_engine_last_submit(struct intel_engine_cs *engine)
-{
-	/*
-	 * We are only peeking at the tail of the submit queue (and not the
-	 * queue itself) in order to gain a hint as to the current active
-	 * state of the engine. Callers are not expected to be taking
-	 * engine->timeline->lock, nor are they expected to be concerned
-	 * wtih serialising this hint with anything, so document it as
-	 * a hint and nothing more.
-	 */
-	return READ_ONCE(engine->timeline.seqno);
-}
-
-static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
-{
-	return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
-}
-
-static inline bool intel_engine_signaled(struct intel_engine_cs *engine,
-					 u32 seqno)
-{
-	return i915_seqno_passed(intel_engine_get_seqno(engine), seqno);
-}
-
-static inline bool intel_engine_has_completed(struct intel_engine_cs *engine,
-					      u32 seqno)
-{
-	GEM_BUG_ON(!seqno);
-	return intel_engine_signaled(engine, seqno);
-}
-
-static inline bool intel_engine_has_started(struct intel_engine_cs *engine,
-					    u32 seqno)
-{
-	GEM_BUG_ON(!seqno);
-	return intel_engine_signaled(engine, seqno - 1);
-}
-
 void intel_engine_get_instdone(struct intel_engine_cs *engine,
 			       struct intel_instdone *instdone);
 
@@ -960,14 +419,14 @@ gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
 }
 
 static inline u32 *
-gen8_emit_ggtt_write(u32 *cs, u32 value, u32 gtt_offset)
+gen8_emit_ggtt_write(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
 {
 	/* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
 	GEM_BUG_ON(gtt_offset & (1 << 5));
 	/* Offset should be aligned to 8 bytes for both (QW/DW) write types */
 	GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
 
-	*cs++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW;
+	*cs++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW | flags;
 	*cs++ = gtt_offset | MI_FLUSH_DW_USE_GTT;
 	*cs++ = 0;
 	*cs++ = value;
@@ -987,7 +446,6 @@ void intel_engines_sanitize(struct drm_i915_private *i915, bool force);
 bool intel_engine_is_idle(struct intel_engine_cs *engine);
 bool intel_engines_are_idle(struct drm_i915_private *dev_priv);
 
-bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine);
 void intel_engine_lost_context(struct intel_engine_cs *engine);
 
 void intel_engines_park(struct drm_i915_private *i915);
@@ -1066,6 +524,9 @@ void intel_disable_engine_stats(struct intel_engine_cs *engine);
 
 ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine);
 
+struct i915_request *
+intel_engine_find_active_request(struct intel_engine_cs *engine);
+
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
 
 static inline bool inject_preempt_hang(struct intel_engine_execlists *execlists)
@@ -1086,4 +547,17 @@ static inline bool inject_preempt_hang(struct intel_engine_execlists *execlists)
 
 #endif
 
+static inline u32
+intel_engine_next_hangcheck_seqno(struct intel_engine_cs *engine)
+{
+	return engine->hangcheck.next_seqno =
+		next_pseudo_random32(engine->hangcheck.next_seqno);
+}
+
+static inline u32
+intel_engine_get_hangcheck_seqno(struct intel_engine_cs *engine)
+{
+	return intel_read_status_page(engine, I915_GEM_HWS_HANGCHECK);
+}
+
 #endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index a017a4232c0fae4580da8b0a59bf54a9ba7fdecb..676a89bb8194387e7848b70e07c8bc1985bc7dd3 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -1760,7 +1760,7 @@ static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
 
 	mutex_lock(&dev_priv->pcu_lock);
 
-	state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe);
+	state = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe);
 	/*
 	 * We only ever set the power-on and power-gate states, anything
 	 * else is unexpected.
@@ -1772,7 +1772,7 @@ static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
 	 * A transient state at this point would mean some unexpected party
 	 * is poking at the power controls too.
 	 */
-	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe);
+	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe);
 	WARN_ON(ctrl << 16 != state);
 
 	mutex_unlock(&dev_priv->pcu_lock);
@@ -1793,20 +1793,20 @@ static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
 	mutex_lock(&dev_priv->pcu_lock);
 
 #define COND \
-	((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state)
+	((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe)) == state)
 
 	if (COND)
 		goto out;
 
-	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
+	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
 	ctrl &= ~DP_SSC_MASK(pipe);
 	ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
-	vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl);
+	vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, ctrl);
 
 	if (wait_for(COND, 100))
 		DRM_ERROR("timeout setting power well state %08x (%08x)\n",
 			  state,
-			  vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ));
+			  vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM));
 
 #undef COND
 
@@ -3576,7 +3576,11 @@ static void icl_dbuf_enable(struct drm_i915_private *dev_priv)
 	    !(I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
 		DRM_ERROR("DBuf power enable timeout\n");
 	else
-		dev_priv->wm.skl_hw.ddb.enabled_slices = 2;
+		/*
+		 * FIXME: for now pretend that we only have 1 slice, see
+		 * intel_enabled_dbuf_slices_num().
+		 */
+		dev_priv->wm.skl_hw.ddb.enabled_slices = 1;
 }
 
 static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
@@ -3591,7 +3595,11 @@ static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
 	    (I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
 		DRM_ERROR("DBuf power disable timeout!\n");
 	else
-		dev_priv->wm.skl_hw.ddb.enabled_slices = 0;
+		/*
+		 * FIXME: for now pretend that the first slice is always
+		 * enabled, see intel_enabled_dbuf_slices_num().
+		 */
+		dev_priv->wm.skl_hw.ddb.enabled_slices = 1;
 }
 
 static void icl_mbus_init(struct drm_i915_private *dev_priv)
@@ -3993,6 +4001,36 @@ static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
 	cmn->desc->ops->disable(dev_priv, cmn);
 }
 
+static bool vlv_punit_is_power_gated(struct drm_i915_private *dev_priv, u32 reg0)
+{
+	bool ret;
+
+	mutex_lock(&dev_priv->pcu_lock);
+	ret = (vlv_punit_read(dev_priv, reg0) & SSPM0_SSC_MASK) == SSPM0_SSC_PWR_GATE;
+	mutex_unlock(&dev_priv->pcu_lock);
+
+	return ret;
+}
+
+static void assert_ved_power_gated(struct drm_i915_private *dev_priv)
+{
+	WARN(!vlv_punit_is_power_gated(dev_priv, PUNIT_REG_VEDSSPM0),
+	     "VED not power gated\n");
+}
+
+static void assert_isp_power_gated(struct drm_i915_private *dev_priv)
+{
+	static const struct pci_device_id isp_ids[] = {
+		{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0f38)},
+		{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x22b8)},
+		{}
+	};
+
+	WARN(!pci_dev_present(isp_ids) &&
+	     !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_ISPSSPM0),
+	     "ISP not power gated\n");
+}
+
 static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv);
 
 /**
@@ -4029,10 +4067,13 @@ void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
 		mutex_lock(&power_domains->lock);
 		chv_phy_control_init(i915);
 		mutex_unlock(&power_domains->lock);
+		assert_isp_power_gated(i915);
 	} else if (IS_VALLEYVIEW(i915)) {
 		mutex_lock(&power_domains->lock);
 		vlv_cmnlane_wa(i915);
 		mutex_unlock(&power_domains->lock);
+		assert_ved_power_gated(i915);
+		assert_isp_power_gated(i915);
 	} else if (IS_IVYBRIDGE(i915) || INTEL_GEN(i915) >= 7) {
 		intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
 	}
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index e7b0884ba5a57f825a4d3b722e7a9c8b33496551..68f497493d439a31105cef7aeb99edc3588af9c4 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -978,34 +978,109 @@ static bool intel_sdvo_write_infoframe(struct intel_sdvo *intel_sdvo,
 				    &tx_rate, 1);
 }
 
-static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo,
-					 const struct intel_crtc_state *pipe_config,
-					 const struct drm_connector_state *conn_state)
+static ssize_t intel_sdvo_read_infoframe(struct intel_sdvo *intel_sdvo,
+					 unsigned int if_index,
+					 u8 *data, unsigned int length)
+{
+	u8 set_buf_index[2] = { if_index, 0 };
+	u8 hbuf_size, tx_rate, av_split;
+	int i;
+
+	if (!intel_sdvo_get_value(intel_sdvo,
+				  SDVO_CMD_GET_HBUF_AV_SPLIT,
+				  &av_split, 1))
+		return -ENXIO;
+
+	if (av_split < if_index)
+		return 0;
+
+	if (!intel_sdvo_get_value(intel_sdvo,
+				  SDVO_CMD_GET_HBUF_TXRATE,
+				  &tx_rate, 1))
+		return -ENXIO;
+
+	if (tx_rate == SDVO_HBUF_TX_DISABLED)
+		return 0;
+
+	if (!intel_sdvo_set_value(intel_sdvo,
+				  SDVO_CMD_SET_HBUF_INDEX,
+				  set_buf_index, 2))
+		return -ENXIO;
+
+	if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HBUF_INFO,
+				  &hbuf_size, 1))
+		return -ENXIO;
+
+	/* Buffer size is 0 based, hooray! */
+	hbuf_size++;
+
+	DRM_DEBUG_KMS("reading sdvo hbuf: %i, hbuf_size %i, hbuf_size: %i\n",
+		      if_index, length, hbuf_size);
+
+	hbuf_size = min_t(unsigned int, length, hbuf_size);
+
+	for (i = 0; i < hbuf_size; i += 8) {
+		if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_HBUF_DATA, NULL, 0))
+			return -ENXIO;
+		if (!intel_sdvo_read_response(intel_sdvo, &data[i],
+					      min_t(unsigned int, 8, hbuf_size - i)))
+			return -ENXIO;
+	}
+
+	return hbuf_size;
+}
+
+static bool intel_sdvo_compute_avi_infoframe(struct intel_sdvo *intel_sdvo,
+					     struct intel_crtc_state *crtc_state,
+					     struct drm_connector_state *conn_state)
 {
+	struct hdmi_avi_infoframe *frame = &crtc_state->infoframes.avi.avi;
 	const struct drm_display_mode *adjusted_mode =
-		&pipe_config->base.adjusted_mode;
-	u8 sdvo_data[HDMI_INFOFRAME_SIZE(AVI)];
-	union hdmi_infoframe frame;
+		&crtc_state->base.adjusted_mode;
 	int ret;
-	ssize_t len;
 
-	ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
+	if (!crtc_state->has_hdmi_sink)
+		return true;
+
+	crtc_state->infoframes.enable |=
+		intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI);
+
+	ret = drm_hdmi_avi_infoframe_from_display_mode(frame,
 						       conn_state->connector,
 						       adjusted_mode);
-	if (ret < 0) {
-		DRM_ERROR("couldn't fill AVI infoframe\n");
+	if (ret)
 		return false;
-	}
 
-	drm_hdmi_avi_infoframe_quant_range(&frame.avi,
+	drm_hdmi_avi_infoframe_quant_range(frame,
 					   conn_state->connector,
 					   adjusted_mode,
-					   pipe_config->limited_color_range ?
+					   crtc_state->limited_color_range ?
 					   HDMI_QUANTIZATION_RANGE_LIMITED :
 					   HDMI_QUANTIZATION_RANGE_FULL);
 
-	len = hdmi_infoframe_pack(&frame, sdvo_data, sizeof(sdvo_data));
-	if (len < 0)
+	ret = hdmi_avi_infoframe_check(frame);
+	if (WARN_ON(ret))
+		return false;
+
+	return true;
+}
+
+static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo,
+					 const struct intel_crtc_state *crtc_state)
+{
+	u8 sdvo_data[HDMI_INFOFRAME_SIZE(AVI)];
+	const union hdmi_infoframe *frame = &crtc_state->infoframes.avi;
+	ssize_t len;
+
+	if ((crtc_state->infoframes.enable &
+	     intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI)) == 0)
+		return true;
+
+	if (WARN_ON(frame->any.type != HDMI_INFOFRAME_TYPE_AVI))
+		return false;
+
+	len = hdmi_infoframe_pack_only(frame, sdvo_data, sizeof(sdvo_data));
+	if (WARN_ON(len < 0))
 		return false;
 
 	return intel_sdvo_write_infoframe(intel_sdvo, SDVO_HBUF_INDEX_AVI_IF,
@@ -1013,6 +1088,40 @@ static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo,
 					  sdvo_data, sizeof(sdvo_data));
 }
 
+static void intel_sdvo_get_avi_infoframe(struct intel_sdvo *intel_sdvo,
+					 struct intel_crtc_state *crtc_state)
+{
+	u8 sdvo_data[HDMI_INFOFRAME_SIZE(AVI)];
+	union hdmi_infoframe *frame = &crtc_state->infoframes.avi;
+	ssize_t len;
+	int ret;
+
+	if (!crtc_state->has_hdmi_sink)
+		return;
+
+	len = intel_sdvo_read_infoframe(intel_sdvo, SDVO_HBUF_INDEX_AVI_IF,
+					sdvo_data, sizeof(sdvo_data));
+	if (len < 0) {
+		DRM_DEBUG_KMS("failed to read AVI infoframe\n");
+		return;
+	} else if (len == 0) {
+		return;
+	}
+
+	crtc_state->infoframes.enable |=
+		intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI);
+
+	ret = hdmi_infoframe_unpack(frame, sdvo_data, sizeof(sdvo_data));
+	if (ret) {
+		DRM_DEBUG_KMS("Failed to unpack AVI infoframe\n");
+		return;
+	}
+
+	if (frame->any.type != HDMI_INFOFRAME_TYPE_AVI)
+		DRM_DEBUG_KMS("Found the wrong infoframe type 0x%x (expected 0x%02x)\n",
+			      frame->any.type, HDMI_INFOFRAME_TYPE_AVI);
+}
+
 static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo,
 				     const struct drm_connector_state *conn_state)
 {
@@ -1193,6 +1302,12 @@ static int intel_sdvo_compute_config(struct intel_encoder *encoder,
 	if (intel_sdvo_connector->is_hdmi)
 		adjusted_mode->picture_aspect_ratio = conn_state->picture_aspect_ratio;
 
+	if (!intel_sdvo_compute_avi_infoframe(intel_sdvo,
+					      pipe_config, conn_state)) {
+		DRM_DEBUG_KMS("bad AVI infoframe\n");
+		return -EINVAL;
+	}
+
 	return 0;
 }
 
@@ -1315,8 +1430,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
 		intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI);
 		intel_sdvo_set_colorimetry(intel_sdvo,
 					   SDVO_COLORIMETRY_RGB256);
-		intel_sdvo_set_avi_infoframe(intel_sdvo,
-					     crtc_state, conn_state);
+		intel_sdvo_set_avi_infoframe(intel_sdvo, crtc_state);
 	} else
 		intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_DVI);
 
@@ -1507,6 +1621,10 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
 		}
 	}
 
+	WARN(encoder_pixel_multiplier != pipe_config->pixel_multiplier,
+	     "SDVO pixel multiplier mismatch, port: %i, encoder: %i\n",
+	     pipe_config->pixel_multiplier, encoder_pixel_multiplier);
+
 	if (sdvox & HDMI_COLOR_RANGE_16_235)
 		pipe_config->limited_color_range = true;
 
@@ -1519,9 +1637,7 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
 			pipe_config->has_hdmi_sink = true;
 	}
 
-	WARN(encoder_pixel_multiplier != pipe_config->pixel_multiplier,
-	     "SDVO pixel multiplier mismatch, port: %i, encoder: %i\n",
-	     pipe_config->pixel_multiplier, encoder_pixel_multiplier);
+	intel_sdvo_get_avi_infoframe(intel_sdvo, pipe_config);
 }
 
 static void intel_disable_sdvo(struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 53174d5795741947b609d9c269ba03ed29e675a0..3f2055f70d05123313e769b7e328ed4615973047 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -622,6 +622,9 @@ skl_disable_plane(struct intel_plane *plane,
 
 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 
+	if (icl_is_hdr_plane(dev_priv, plane_id))
+		I915_WRITE_FW(PLANE_CUS_CTL(pipe, plane_id), 0);
+
 	skl_write_plane_wm(plane, crtc_state);
 
 	I915_WRITE_FW(PLANE_CTL(pipe, plane_id), 0);
@@ -754,7 +757,12 @@ vlv_update_clrc(const struct intel_plane_state *plane_state)
 
 static u32 vlv_sprite_ctl_crtc(const struct intel_crtc_state *crtc_state)
 {
-	return SP_GAMMA_ENABLE;
+	u32 sprctl = 0;
+
+	if (crtc_state->gamma_enable)
+		sprctl |= SP_GAMMA_ENABLE;
+
+	return sprctl;
 }
 
 static u32 vlv_sprite_ctl(const struct intel_crtc_state *crtc_state,
@@ -929,12 +937,12 @@ vlv_plane_get_hw_state(struct intel_plane *plane,
 
 static u32 ivb_sprite_ctl_crtc(const struct intel_crtc_state *crtc_state)
 {
-	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
 	u32 sprctl = 0;
 
-	sprctl |= SPRITE_GAMMA_ENABLE;
+	if (crtc_state->gamma_enable)
+		sprctl |= SPRITE_GAMMA_ENABLE;
 
-	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+	if (crtc_state->csc_enable)
 		sprctl |= SPRITE_PIPE_CSC_ENABLE;
 
 	return sprctl;
@@ -1120,7 +1128,15 @@ g4x_sprite_max_stride(struct intel_plane *plane,
 
 static u32 g4x_sprite_ctl_crtc(const struct intel_crtc_state *crtc_state)
 {
-	return DVS_GAMMA_ENABLE;
+	u32 dvscntr = 0;
+
+	if (crtc_state->gamma_enable)
+		dvscntr |= DVS_GAMMA_ENABLE;
+
+	if (crtc_state->csc_enable)
+		dvscntr |= DVS_PIPE_CSC_ENABLE;
+
+	return dvscntr;
 }
 
 static u32 g4x_sprite_ctl(const struct intel_crtc_state *crtc_state,
@@ -1805,7 +1821,7 @@ static const u32 skl_plane_formats[] = {
 	DRM_FORMAT_VYUY,
 };
 
-static const uint32_t icl_plane_formats[] = {
+static const u32 icl_plane_formats[] = {
 	DRM_FORMAT_C8,
 	DRM_FORMAT_RGB565,
 	DRM_FORMAT_XRGB8888,
@@ -1826,7 +1842,7 @@ static const uint32_t icl_plane_formats[] = {
 	DRM_FORMAT_XVYU16161616,
 };
 
-static const uint32_t icl_hdr_plane_formats[] = {
+static const u32 icl_hdr_plane_formats[] = {
 	DRM_FORMAT_C8,
 	DRM_FORMAT_RGB565,
 	DRM_FORMAT_XRGB8888,
@@ -1867,7 +1883,7 @@ static const u32 skl_planar_formats[] = {
 	DRM_FORMAT_NV12,
 };
 
-static const uint32_t glk_planar_formats[] = {
+static const u32 glk_planar_formats[] = {
 	DRM_FORMAT_C8,
 	DRM_FORMAT_RGB565,
 	DRM_FORMAT_XRGB8888,
@@ -1886,7 +1902,7 @@ static const uint32_t glk_planar_formats[] = {
 	DRM_FORMAT_P016,
 };
 
-static const uint32_t icl_planar_formats[] = {
+static const u32 icl_planar_formats[] = {
 	DRM_FORMAT_C8,
 	DRM_FORMAT_RGB565,
 	DRM_FORMAT_XRGB8888,
@@ -1911,7 +1927,7 @@ static const uint32_t icl_planar_formats[] = {
 	DRM_FORMAT_XVYU16161616,
 };
 
-static const uint32_t icl_hdr_planar_formats[] = {
+static const u32 icl_hdr_planar_formats[] = {
 	DRM_FORMAT_C8,
 	DRM_FORMAT_RGB565,
 	DRM_FORMAT_XRGB8888,
diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c
index e711eb3268bccb39f631e6018567fe09b9e053f0..2d360d53757fe6d3c8167cf59b9196b33d8820f4 100644
--- a/drivers/gpu/drm/i915/intel_uc.c
+++ b/drivers/gpu/drm/i915/intel_uc.c
@@ -332,8 +332,6 @@ void intel_uc_sanitize(struct drm_i915_private *i915)
 
 	GEM_BUG_ON(!HAS_GUC(i915));
 
-	guc_disable_communication(guc);
-
 	intel_huc_sanitize(huc);
 	intel_guc_sanitize(guc);
 
@@ -451,6 +449,23 @@ void intel_uc_fini_hw(struct drm_i915_private *i915)
 	guc_disable_communication(guc);
 }
 
+/**
+ * intel_uc_reset_prepare - Prepare for reset
+ * @i915: device private
+ *
+ * Preparing for full gpu reset.
+ */
+void intel_uc_reset_prepare(struct drm_i915_private *i915)
+{
+	struct intel_guc *guc = &i915->guc;
+
+	if (!USES_GUC(i915))
+		return;
+
+	guc_disable_communication(guc);
+	intel_uc_sanitize(i915);
+}
+
 int intel_uc_suspend(struct drm_i915_private *i915)
 {
 	struct intel_guc *guc = &i915->guc;
@@ -468,7 +483,7 @@ int intel_uc_suspend(struct drm_i915_private *i915)
 		return err;
 	}
 
-	gen9_disable_guc_interrupts(i915);
+	guc_disable_communication(guc);
 
 	return 0;
 }
@@ -484,7 +499,7 @@ int intel_uc_resume(struct drm_i915_private *i915)
 	if (guc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS)
 		return 0;
 
-	gen9_enable_guc_interrupts(i915);
+	guc_enable_communication(guc);
 
 	err = intel_guc_resume(guc);
 	if (err) {
diff --git a/drivers/gpu/drm/i915/intel_uc.h b/drivers/gpu/drm/i915/intel_uc.h
index 870faf9011b979c4d450146d241e1c8a2f1dce56..c14729786652783d0649ecdc039b7d4d30169010 100644
--- a/drivers/gpu/drm/i915/intel_uc.h
+++ b/drivers/gpu/drm/i915/intel_uc.h
@@ -38,6 +38,7 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv);
 void intel_uc_fini_hw(struct drm_i915_private *dev_priv);
 int intel_uc_init(struct drm_i915_private *dev_priv);
 void intel_uc_fini(struct drm_i915_private *dev_priv);
+void intel_uc_reset_prepare(struct drm_i915_private *i915);
 int intel_uc_suspend(struct drm_i915_private *dev_priv);
 int intel_uc_resume(struct drm_i915_private *dev_priv);
 
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 75646a1e0051c336b386c60057abaa9807c9c57e..7129eebc333bca7a87bb003385920e417521a944 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -58,8 +58,11 @@ intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)
 	return "unknown";
 }
 
+#define fw_ack(d) readl((d)->reg_ack)
+#define fw_set(d, val) writel((val), (d)->reg_set)
+
 static inline void
-fw_domain_reset(struct drm_i915_private *i915,
+fw_domain_reset(const struct intel_uncore *uncore,
 		const struct intel_uncore_forcewake_domain *d)
 {
 	/*
@@ -67,7 +70,7 @@ fw_domain_reset(struct drm_i915_private *i915,
 	 * trying to reset here does exist at this point (engines could be fused
 	 * off in ICL+), so no waiting for acks
 	 */
-	__raw_i915_write32(i915, d->reg_set, i915->uncore.fw_reset);
+	fw_set(d, uncore->fw_reset);
 }
 
 static inline void
@@ -81,36 +84,32 @@ fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
 }
 
 static inline int
-__wait_for_ack(const struct drm_i915_private *i915,
-	       const struct intel_uncore_forcewake_domain *d,
+__wait_for_ack(const struct intel_uncore_forcewake_domain *d,
 	       const u32 ack,
 	       const u32 value)
 {
-	return wait_for_atomic((__raw_i915_read32(i915, d->reg_ack) & ack) == value,
+	return wait_for_atomic((fw_ack(d) & ack) == value,
 			       FORCEWAKE_ACK_TIMEOUT_MS);
 }
 
 static inline int
-wait_ack_clear(const struct drm_i915_private *i915,
-	       const struct intel_uncore_forcewake_domain *d,
+wait_ack_clear(const struct intel_uncore_forcewake_domain *d,
 	       const u32 ack)
 {
-	return __wait_for_ack(i915, d, ack, 0);
+	return __wait_for_ack(d, ack, 0);
 }
 
 static inline int
-wait_ack_set(const struct drm_i915_private *i915,
-	     const struct intel_uncore_forcewake_domain *d,
+wait_ack_set(const struct intel_uncore_forcewake_domain *d,
 	     const u32 ack)
 {
-	return __wait_for_ack(i915, d, ack, ack);
+	return __wait_for_ack(d, ack, ack);
 }
 
 static inline void
-fw_domain_wait_ack_clear(const struct drm_i915_private *i915,
-			 const struct intel_uncore_forcewake_domain *d)
+fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d)
 {
-	if (wait_ack_clear(i915, d, FORCEWAKE_KERNEL))
+	if (wait_ack_clear(d, FORCEWAKE_KERNEL))
 		DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n",
 			  intel_uncore_forcewake_domain_to_str(d->id));
 }
@@ -121,8 +120,7 @@ enum ack_type {
 };
 
 static int
-fw_domain_wait_ack_with_fallback(const struct drm_i915_private *i915,
-				 const struct intel_uncore_forcewake_domain *d,
+fw_domain_wait_ack_with_fallback(const struct intel_uncore_forcewake_domain *d,
 				 const enum ack_type type)
 {
 	const u32 ack_bit = FORCEWAKE_KERNEL;
@@ -146,141 +144,140 @@ fw_domain_wait_ack_with_fallback(const struct drm_i915_private *i915,
 
 	pass = 1;
 	do {
-		wait_ack_clear(i915, d, FORCEWAKE_KERNEL_FALLBACK);
+		wait_ack_clear(d, FORCEWAKE_KERNEL_FALLBACK);
 
-		__raw_i915_write32(i915, d->reg_set,
-				   _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL_FALLBACK));
+		fw_set(d, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL_FALLBACK));
 		/* Give gt some time to relax before the polling frenzy */
 		udelay(10 * pass);
-		wait_ack_set(i915, d, FORCEWAKE_KERNEL_FALLBACK);
+		wait_ack_set(d, FORCEWAKE_KERNEL_FALLBACK);
 
-		ack_detected = (__raw_i915_read32(i915, d->reg_ack) & ack_bit) == value;
+		ack_detected = (fw_ack(d) & ack_bit) == value;
 
-		__raw_i915_write32(i915, d->reg_set,
-				   _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL_FALLBACK));
+		fw_set(d, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL_FALLBACK));
 	} while (!ack_detected && pass++ < 10);
 
 	DRM_DEBUG_DRIVER("%s had to use fallback to %s ack, 0x%x (passes %u)\n",
 			 intel_uncore_forcewake_domain_to_str(d->id),
 			 type == ACK_SET ? "set" : "clear",
-			 __raw_i915_read32(i915, d->reg_ack),
+			 fw_ack(d),
 			 pass);
 
 	return ack_detected ? 0 : -ETIMEDOUT;
 }
 
 static inline void
-fw_domain_wait_ack_clear_fallback(const struct drm_i915_private *i915,
-				  const struct intel_uncore_forcewake_domain *d)
+fw_domain_wait_ack_clear_fallback(const struct intel_uncore_forcewake_domain *d)
 {
-	if (likely(!wait_ack_clear(i915, d, FORCEWAKE_KERNEL)))
+	if (likely(!wait_ack_clear(d, FORCEWAKE_KERNEL)))
 		return;
 
-	if (fw_domain_wait_ack_with_fallback(i915, d, ACK_CLEAR))
-		fw_domain_wait_ack_clear(i915, d);
+	if (fw_domain_wait_ack_with_fallback(d, ACK_CLEAR))
+		fw_domain_wait_ack_clear(d);
 }
 
 static inline void
-fw_domain_get(struct drm_i915_private *i915,
+fw_domain_get(const struct intel_uncore *uncore,
 	      const struct intel_uncore_forcewake_domain *d)
 {
-	__raw_i915_write32(i915, d->reg_set, i915->uncore.fw_set);
+	fw_set(d, uncore->fw_set);
 }
 
 static inline void
-fw_domain_wait_ack_set(const struct drm_i915_private *i915,
-		       const struct intel_uncore_forcewake_domain *d)
+fw_domain_wait_ack_set(const struct intel_uncore_forcewake_domain *d)
 {
-	if (wait_ack_set(i915, d, FORCEWAKE_KERNEL))
+	if (wait_ack_set(d, FORCEWAKE_KERNEL))
 		DRM_ERROR("%s: timed out waiting for forcewake ack request.\n",
 			  intel_uncore_forcewake_domain_to_str(d->id));
 }
 
 static inline void
-fw_domain_wait_ack_set_fallback(const struct drm_i915_private *i915,
-				const struct intel_uncore_forcewake_domain *d)
+fw_domain_wait_ack_set_fallback(const struct intel_uncore_forcewake_domain *d)
 {
-	if (likely(!wait_ack_set(i915, d, FORCEWAKE_KERNEL)))
+	if (likely(!wait_ack_set(d, FORCEWAKE_KERNEL)))
 		return;
 
-	if (fw_domain_wait_ack_with_fallback(i915, d, ACK_SET))
-		fw_domain_wait_ack_set(i915, d);
+	if (fw_domain_wait_ack_with_fallback(d, ACK_SET))
+		fw_domain_wait_ack_set(d);
 }
 
 static inline void
-fw_domain_put(const struct drm_i915_private *i915,
+fw_domain_put(const struct intel_uncore *uncore,
 	      const struct intel_uncore_forcewake_domain *d)
 {
-	__raw_i915_write32(i915, d->reg_set, i915->uncore.fw_clear);
+	fw_set(d, uncore->fw_clear);
 }
 
 static void
 fw_domains_get(struct drm_i915_private *i915, enum forcewake_domains fw_domains)
 {
+	struct intel_uncore *uncore = &i915->uncore;
 	struct intel_uncore_forcewake_domain *d;
 	unsigned int tmp;
 
-	GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains);
+	GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
 
 	for_each_fw_domain_masked(d, fw_domains, i915, tmp) {
-		fw_domain_wait_ack_clear(i915, d);
-		fw_domain_get(i915, d);
+		fw_domain_wait_ack_clear(d);
+		fw_domain_get(uncore, d);
 	}
 
 	for_each_fw_domain_masked(d, fw_domains, i915, tmp)
-		fw_domain_wait_ack_set(i915, d);
+		fw_domain_wait_ack_set(d);
 
-	i915->uncore.fw_domains_active |= fw_domains;
+	uncore->fw_domains_active |= fw_domains;
 }
 
 static void
 fw_domains_get_with_fallback(struct drm_i915_private *i915,
 			     enum forcewake_domains fw_domains)
 {
+	struct intel_uncore *uncore = &i915->uncore;
 	struct intel_uncore_forcewake_domain *d;
 	unsigned int tmp;
 
-	GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains);
+	GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
 
 	for_each_fw_domain_masked(d, fw_domains, i915, tmp) {
-		fw_domain_wait_ack_clear_fallback(i915, d);
-		fw_domain_get(i915, d);
+		fw_domain_wait_ack_clear_fallback(d);
+		fw_domain_get(uncore, d);
 	}
 
 	for_each_fw_domain_masked(d, fw_domains, i915, tmp)
-		fw_domain_wait_ack_set_fallback(i915, d);
+		fw_domain_wait_ack_set_fallback(d);
 
-	i915->uncore.fw_domains_active |= fw_domains;
+	uncore->fw_domains_active |= fw_domains;
 }
 
 static void
 fw_domains_put(struct drm_i915_private *i915, enum forcewake_domains fw_domains)
 {
+	struct intel_uncore *uncore = &i915->uncore;
 	struct intel_uncore_forcewake_domain *d;
 	unsigned int tmp;
 
-	GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains);
+	GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
 
 	for_each_fw_domain_masked(d, fw_domains, i915, tmp)
-		fw_domain_put(i915, d);
+		fw_domain_put(uncore, d);
 
-	i915->uncore.fw_domains_active &= ~fw_domains;
+	uncore->fw_domains_active &= ~fw_domains;
 }
 
 static void
 fw_domains_reset(struct drm_i915_private *i915,
 		 enum forcewake_domains fw_domains)
 {
+	struct intel_uncore *uncore = &i915->uncore;
 	struct intel_uncore_forcewake_domain *d;
 	unsigned int tmp;
 
 	if (!fw_domains)
 		return;
 
-	GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains);
+	GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
 
 	for_each_fw_domain_masked(d, fw_domains, i915, tmp)
-		fw_domain_reset(i915, d);
+		fw_domain_reset(uncore, d);
 }
 
 static inline u32 gt_thread_status(struct drm_i915_private *dev_priv)
@@ -1337,12 +1334,13 @@ static void fw_domain_init(struct drm_i915_private *dev_priv,
 			   i915_reg_t reg_set,
 			   i915_reg_t reg_ack)
 {
+	struct intel_uncore *uncore = &dev_priv->uncore;
 	struct intel_uncore_forcewake_domain *d;
 
 	if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT))
 		return;
 
-	d = &dev_priv->uncore.fw_domain[domain_id];
+	d = &uncore->fw_domain[domain_id];
 
 	WARN_ON(d->wake_count);
 
@@ -1350,8 +1348,8 @@ static void fw_domain_init(struct drm_i915_private *dev_priv,
 	WARN_ON(!i915_mmio_reg_valid(reg_ack));
 
 	d->wake_count = 0;
-	d->reg_set = reg_set;
-	d->reg_ack = reg_ack;
+	d->reg_set = dev_priv->regs + i915_mmio_reg_offset(reg_set);
+	d->reg_ack = dev_priv->regs + i915_mmio_reg_offset(reg_ack);
 
 	d->id = domain_id;
 
@@ -1371,9 +1369,9 @@ static void fw_domain_init(struct drm_i915_private *dev_priv,
 	hrtimer_init(&d->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
 	d->timer.function = intel_uncore_fw_release_timer;
 
-	dev_priv->uncore.fw_domains |= BIT(domain_id);
+	uncore->fw_domains |= BIT(domain_id);
 
-	fw_domain_reset(dev_priv, d);
+	fw_domain_reset(uncore, d);
 }
 
 static void fw_domain_fini(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
index e5e157d288de4b9fa06e9add47facaa28925e4e9..b0a95469babff93ed37d62a8cddd0d73487068e1 100644
--- a/drivers/gpu/drm/i915/intel_uncore.h
+++ b/drivers/gpu/drm/i915/intel_uncore.h
@@ -116,8 +116,8 @@ struct intel_uncore {
 		unsigned int wake_count;
 		bool active;
 		struct hrtimer timer;
-		i915_reg_t reg_set;
-		i915_reg_t reg_ack;
+		u32 __iomem *reg_set;
+		u32 __iomem *reg_ack;
 	} fw_domain[FW_DOMAIN_ID_COUNT];
 
 	struct {
@@ -138,6 +138,11 @@ struct intel_uncore {
 #define for_each_fw_domain(domain__, dev_priv__, tmp__) \
 	for_each_fw_domain_masked(domain__, (dev_priv__)->uncore.fw_domains, dev_priv__, tmp__)
 
+static inline struct intel_uncore *
+forcewake_domain_to_uncore(const struct intel_uncore_forcewake_domain *d)
+{
+	return container_of(d, struct intel_uncore, fw_domain[d->id]);
+}
 
 void intel_uncore_sanitize(struct drm_i915_private *dev_priv);
 void intel_uncore_init(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_vbt_defs.h b/drivers/gpu/drm/i915/intel_vbt_defs.h
index bf3662ad5fed1ebcb3a49a870ccdd2de4de3fbeb..fdbbb9a538041844ed86720e2df34c672e3b7619 100644
--- a/drivers/gpu/drm/i915/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/intel_vbt_defs.h
@@ -772,6 +772,9 @@ struct psr_table {
 	/* TP wake up time in multiple of 100 */
 	u16 tp1_wakeup_time;
 	u16 tp2_tp3_wakeup_time;
+
+	/* PSR2 TP2/TP3 wakeup time for 16 panels */
+	u32 psr2_tp2_tp3_wakeup_time;
 } __packed;
 
 struct bdb_psr {
diff --git a/drivers/gpu/drm/i915/intel_workarounds.c b/drivers/gpu/drm/i915/intel_workarounds.c
index 15f4a6dee5aad20e80aae26435a3e7baeb435132..283e9a4ef3ca59147f7eb83c33ef05ae68544f0d 100644
--- a/drivers/gpu/drm/i915/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/intel_workarounds.c
@@ -555,6 +555,11 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine)
 			   GEN10_CACHE_MODE_SS,
 			   0, /* write-only, so skip validation */
 			   _MASKED_BIT_ENABLE(FLOAT_BLEND_OPTIMIZATION_ENABLE));
+
+	/* WaDisableGPGPUMidThreadPreemption:icl */
+	WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1,
+			    GEN9_PREEMPT_GPGPU_LEVEL_MASK,
+			    GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL);
 }
 
 void intel_engine_init_ctx_wa(struct intel_engine_cs *engine)
@@ -564,26 +569,26 @@ void intel_engine_init_ctx_wa(struct intel_engine_cs *engine)
 
 	wa_init_start(wal, "context");
 
-	if (INTEL_GEN(i915) < 8)
-		return;
-	else if (IS_BROADWELL(i915))
-		bdw_ctx_workarounds_init(engine);
-	else if (IS_CHERRYVIEW(i915))
-		chv_ctx_workarounds_init(engine);
-	else if (IS_SKYLAKE(i915))
-		skl_ctx_workarounds_init(engine);
-	else if (IS_BROXTON(i915))
-		bxt_ctx_workarounds_init(engine);
-	else if (IS_KABYLAKE(i915))
-		kbl_ctx_workarounds_init(engine);
-	else if (IS_GEMINILAKE(i915))
-		glk_ctx_workarounds_init(engine);
-	else if (IS_COFFEELAKE(i915))
-		cfl_ctx_workarounds_init(engine);
+	if (IS_ICELAKE(i915))
+		icl_ctx_workarounds_init(engine);
 	else if (IS_CANNONLAKE(i915))
 		cnl_ctx_workarounds_init(engine);
-	else if (IS_ICELAKE(i915))
-		icl_ctx_workarounds_init(engine);
+	else if (IS_COFFEELAKE(i915))
+		cfl_ctx_workarounds_init(engine);
+	else if (IS_GEMINILAKE(i915))
+		glk_ctx_workarounds_init(engine);
+	else if (IS_KABYLAKE(i915))
+		kbl_ctx_workarounds_init(engine);
+	else if (IS_BROXTON(i915))
+		bxt_ctx_workarounds_init(engine);
+	else if (IS_SKYLAKE(i915))
+		skl_ctx_workarounds_init(engine);
+	else if (IS_CHERRYVIEW(i915))
+		chv_ctx_workarounds_init(engine);
+	else if (IS_BROADWELL(i915))
+		bdw_ctx_workarounds_init(engine);
+	else if (INTEL_GEN(i915) < 8)
+		return;
 	else
 		MISSING_CASE(INTEL_GEN(i915));
 
@@ -862,26 +867,22 @@ icl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
 static void
 gt_init_workarounds(struct drm_i915_private *i915, struct i915_wa_list *wal)
 {
-	if (INTEL_GEN(i915) < 8)
-		return;
-	else if (IS_BROADWELL(i915))
-		return;
-	else if (IS_CHERRYVIEW(i915))
-		return;
-	else if (IS_SKYLAKE(i915))
-		skl_gt_workarounds_init(i915, wal);
-	else if (IS_BROXTON(i915))
-		bxt_gt_workarounds_init(i915, wal);
-	else if (IS_KABYLAKE(i915))
-		kbl_gt_workarounds_init(i915, wal);
-	else if (IS_GEMINILAKE(i915))
-		glk_gt_workarounds_init(i915, wal);
-	else if (IS_COFFEELAKE(i915))
-		cfl_gt_workarounds_init(i915, wal);
+	if (IS_ICELAKE(i915))
+		icl_gt_workarounds_init(i915, wal);
 	else if (IS_CANNONLAKE(i915))
 		cnl_gt_workarounds_init(i915, wal);
-	else if (IS_ICELAKE(i915))
-		icl_gt_workarounds_init(i915, wal);
+	else if (IS_COFFEELAKE(i915))
+		cfl_gt_workarounds_init(i915, wal);
+	else if (IS_GEMINILAKE(i915))
+		glk_gt_workarounds_init(i915, wal);
+	else if (IS_KABYLAKE(i915))
+		kbl_gt_workarounds_init(i915, wal);
+	else if (IS_BROXTON(i915))
+		bxt_gt_workarounds_init(i915, wal);
+	else if (IS_SKYLAKE(i915))
+		skl_gt_workarounds_init(i915, wal);
+	else if (INTEL_GEN(i915) <= 8)
+		return;
 	else
 		MISSING_CASE(INTEL_GEN(i915));
 }
@@ -1059,30 +1060,26 @@ void intel_engine_init_whitelist(struct intel_engine_cs *engine)
 	struct drm_i915_private *i915 = engine->i915;
 	struct i915_wa_list *w = &engine->whitelist;
 
-	GEM_BUG_ON(engine->id != RCS);
+	GEM_BUG_ON(engine->id != RCS0);
 
 	wa_init_start(w, "whitelist");
 
-	if (INTEL_GEN(i915) < 8)
-		return;
-	else if (IS_BROADWELL(i915))
-		return;
-	else if (IS_CHERRYVIEW(i915))
-		return;
-	else if (IS_SKYLAKE(i915))
-		skl_whitelist_build(w);
-	else if (IS_BROXTON(i915))
-		bxt_whitelist_build(w);
-	else if (IS_KABYLAKE(i915))
-		kbl_whitelist_build(w);
-	else if (IS_GEMINILAKE(i915))
-		glk_whitelist_build(w);
-	else if (IS_COFFEELAKE(i915))
-		cfl_whitelist_build(w);
+	if (IS_ICELAKE(i915))
+		icl_whitelist_build(w);
 	else if (IS_CANNONLAKE(i915))
 		cnl_whitelist_build(w);
-	else if (IS_ICELAKE(i915))
-		icl_whitelist_build(w);
+	else if (IS_COFFEELAKE(i915))
+		cfl_whitelist_build(w);
+	else if (IS_GEMINILAKE(i915))
+		glk_whitelist_build(w);
+	else if (IS_KABYLAKE(i915))
+		kbl_whitelist_build(w);
+	else if (IS_BROXTON(i915))
+		bxt_whitelist_build(w);
+	else if (IS_SKYLAKE(i915))
+		skl_whitelist_build(w);
+	else if (INTEL_GEN(i915) <= 8)
+		return;
 	else
 		MISSING_CASE(INTEL_GEN(i915));
 
@@ -1170,8 +1167,8 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
 				    GEN7_DISABLE_SAMPLER_PREFETCH);
 	}
 
-	if (IS_GEN(i915, 9) || IS_CANNONLAKE(i915)) {
-		/* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,cnl */
+	if (IS_GEN_RANGE(i915, 9, 11)) {
+		/* FtrPerCtxtPreemptionGranularityControl:skl,bxt,kbl,cfl,cnl,icl */
 		wa_masked_en(wal,
 			     GEN7_FF_SLICE_CS_CHICKEN1,
 			     GEN9_FFSC_PERCTX_PREEMPT_CTRL);
@@ -1236,7 +1233,7 @@ engine_init_workarounds(struct intel_engine_cs *engine, struct i915_wa_list *wal
 	if (I915_SELFTEST_ONLY(INTEL_GEN(engine->i915) < 8))
 		return;
 
-	if (engine->id == RCS)
+	if (engine->id == RCS0)
 		rcs_engine_wa_init(engine, wal);
 	else
 		xcs_engine_wa_init(engine, wal);
diff --git a/drivers/gpu/drm/i915/intel_workarounds.h b/drivers/gpu/drm/i915/intel_workarounds.h
index 7c734714b05edc51027e269197602abefc541813..a1bf51c611a9233d1fb9b1bd62775c7a69b84fb4 100644
--- a/drivers/gpu/drm/i915/intel_workarounds.h
+++ b/drivers/gpu/drm/i915/intel_workarounds.h
@@ -9,18 +9,7 @@
 
 #include <linux/slab.h>
 
-struct i915_wa {
-	i915_reg_t	  reg;
-	u32		  mask;
-	u32		  val;
-};
-
-struct i915_wa_list {
-	const char	*name;
-	struct i915_wa	*list;
-	unsigned int	count;
-	unsigned int	wa_count;
-};
+#include "intel_workarounds_types.h"
 
 static inline void intel_wa_list_free(struct i915_wa_list *wal)
 {
diff --git a/drivers/gpu/drm/i915/intel_workarounds_types.h b/drivers/gpu/drm/i915/intel_workarounds_types.h
new file mode 100644
index 0000000000000000000000000000000000000000..30918da180ff2257e09b70e55b6c9f8beb75f788
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_workarounds_types.h
@@ -0,0 +1,27 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2014-2018 Intel Corporation
+ */
+
+#ifndef __INTEL_WORKAROUNDS_TYPES_H__
+#define __INTEL_WORKAROUNDS_TYPES_H__
+
+#include <linux/types.h>
+
+#include "i915_reg.h"
+
+struct i915_wa {
+	i915_reg_t	  reg;
+	u32		  mask;
+	u32		  val;
+};
+
+struct i915_wa_list {
+	const char	*name;
+	struct i915_wa	*list;
+	unsigned int	count;
+	unsigned int	wa_count;
+};
+
+#endif /* __INTEL_WORKAROUNDS_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/selftests/huge_gem_object.c b/drivers/gpu/drm/i915/selftests/huge_gem_object.c
index 391f3d9ffdf1ab652530121cb6d439b987039a8a..419fd4d6a8f04a5ab01de08cbfeca3f78cc3f3c5 100644
--- a/drivers/gpu/drm/i915/selftests/huge_gem_object.c
+++ b/drivers/gpu/drm/i915/selftests/huge_gem_object.c
@@ -122,7 +122,7 @@ huge_gem_object(struct drm_i915_private *i915,
 	if (overflows_type(dma_size, obj->base.size))
 		return ERR_PTR(-E2BIG);
 
-	obj = i915_gem_object_alloc(i915);
+	obj = i915_gem_object_alloc();
 	if (!obj)
 		return ERR_PTR(-ENOMEM);
 
diff --git a/drivers/gpu/drm/i915/selftests/huge_pages.c b/drivers/gpu/drm/i915/selftests/huge_pages.c
index a9a2fa35876fe5537db2e4693334097aae4f7def..2e1db30af477b0300ef6d00d4bb4b7454a832188 100644
--- a/drivers/gpu/drm/i915/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/selftests/huge_pages.c
@@ -171,7 +171,7 @@ huge_pages_object(struct drm_i915_private *i915,
 	if (overflows_type(size, obj->base.size))
 		return ERR_PTR(-E2BIG);
 
-	obj = i915_gem_object_alloc(i915);
+	obj = i915_gem_object_alloc();
 	if (!obj)
 		return ERR_PTR(-ENOMEM);
 
@@ -320,7 +320,7 @@ fake_huge_pages_object(struct drm_i915_private *i915, u64 size, bool single)
 	if (overflows_type(size, obj->base.size))
 		return ERR_PTR(-E2BIG);
 
-	obj = i915_gem_object_alloc(i915);
+	obj = i915_gem_object_alloc();
 	if (!obj)
 		return ERR_PTR(-ENOMEM);
 
@@ -1449,7 +1449,7 @@ static int igt_ppgtt_pin_update(void *arg)
 	 * huge-gtt-pages.
 	 */
 
-	if (!ppgtt || !i915_vm_is_48bit(&ppgtt->vm)) {
+	if (!ppgtt || !i915_vm_is_4lvl(&ppgtt->vm)) {
 		pr_info("48b PPGTT not supported, skipping\n");
 		return 0;
 	}
@@ -1535,7 +1535,7 @@ static int igt_ppgtt_pin_update(void *arg)
 	 * land in the now stale 2M page.
 	 */
 
-	err = gpu_write(vma, ctx, dev_priv->engine[RCS], 0, 0xdeadbeaf);
+	err = gpu_write(vma, ctx, dev_priv->engine[RCS0], 0, 0xdeadbeaf);
 	if (err)
 		goto out_unpin;
 
@@ -1653,7 +1653,7 @@ static int igt_shrink_thp(void *arg)
 	if (err)
 		goto out_unpin;
 
-	err = gpu_write(vma, ctx, i915->engine[RCS], 0, 0xdeadbeaf);
+	err = gpu_write(vma, ctx, i915->engine[RCS0], 0, 0xdeadbeaf);
 	if (err)
 		goto out_unpin;
 
@@ -1709,7 +1709,8 @@ int i915_gem_huge_page_mock_selftests(void)
 		return -ENOMEM;
 
 	/* Pretend to be a device which supports the 48b PPGTT */
-	mkwrite_device_info(dev_priv)->ppgtt = INTEL_PPGTT_FULL_4LVL;
+	mkwrite_device_info(dev_priv)->ppgtt_type = INTEL_PPGTT_FULL;
+	mkwrite_device_info(dev_priv)->ppgtt_size = 48;
 
 	mutex_lock(&dev_priv->drm.struct_mutex);
 	ppgtt = i915_ppgtt_create(dev_priv, ERR_PTR(-ENODEV));
@@ -1718,7 +1719,7 @@ int i915_gem_huge_page_mock_selftests(void)
 		goto out_unlock;
 	}
 
-	if (!i915_vm_is_48bit(&ppgtt->vm)) {
+	if (!i915_vm_is_4lvl(&ppgtt->vm)) {
 		pr_err("failed to create 48b PPGTT\n");
 		err = -EINVAL;
 		goto out_close;
@@ -1764,7 +1765,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv)
 		return 0;
 	}
 
-	if (i915_terminally_wedged(&dev_priv->gpu_error))
+	if (i915_terminally_wedged(dev_priv))
 		return 0;
 
 	file = mock_file(dev_priv);
diff --git a/drivers/gpu/drm/i915/selftests/i915_active.c b/drivers/gpu/drm/i915/selftests/i915_active.c
index 337b1f98b9230fe2caa6866927debd8fd7ecb98e..27d8f853111be92fa340a2404224f5dba48cf57c 100644
--- a/drivers/gpu/drm/i915/selftests/i915_active.c
+++ b/drivers/gpu/drm/i915/selftests/i915_active.c
@@ -150,7 +150,7 @@ int i915_active_live_selftests(struct drm_i915_private *i915)
 		SUBTEST(live_active_retire),
 	};
 
-	if (i915_terminally_wedged(&i915->gpu_error))
+	if (i915_terminally_wedged(i915))
 		return 0;
 
 	return i915_subtests(tests, i915);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c
index e77b7ed449ae8ec5380ffbcd18184056c6757efa..50bb7bbd26d30a6031f02cd1da21f2dda1ac5340 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem.c
@@ -84,14 +84,9 @@ static void simulate_hibernate(struct drm_i915_private *i915)
 
 static int pm_prepare(struct drm_i915_private *i915)
 {
-	int err = 0;
-
-	if (i915_gem_suspend(i915)) {
-		pr_err("i915_gem_suspend failed\n");
-		err = -EINVAL;
-	}
+	i915_gem_suspend(i915);
 
-	return err;
+	return 0;
 }
 
 static void pm_suspend(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c
index fd89a5a33c1a0b1719c8bbbeb354c55ec498484f..e43630b40fce4c463714c4df75aaa3ceaa8e2080 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c
@@ -202,7 +202,7 @@ static int gpu_set(struct drm_i915_gem_object *obj,
 	if (IS_ERR(vma))
 		return PTR_ERR(vma);
 
-	rq = i915_request_alloc(i915->engine[RCS], i915->kernel_context);
+	rq = i915_request_alloc(i915->engine[RCS0], i915->kernel_context);
 	if (IS_ERR(rq)) {
 		i915_vma_unpin(vma);
 		return PTR_ERR(rq);
@@ -248,15 +248,15 @@ static bool always_valid(struct drm_i915_private *i915)
 
 static bool needs_fence_registers(struct drm_i915_private *i915)
 {
-	return !i915_terminally_wedged(&i915->gpu_error);
+	return !i915_terminally_wedged(i915);
 }
 
 static bool needs_mi_store_dword(struct drm_i915_private *i915)
 {
-	if (i915_terminally_wedged(&i915->gpu_error))
+	if (i915_terminally_wedged(i915))
 		return false;
 
-	return intel_engine_can_store_dword(i915->engine[RCS]);
+	return intel_engine_can_store_dword(i915->engine[RCS0]);
 }
 
 static const struct igt_coherency_mode {
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
index 7eb58a9d1319f9c70f146e5d53e57a699d3b3272..4399ef9ebf1583c480a5f84a8170f2fe191fc8ce 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
@@ -556,7 +556,7 @@ static int igt_ctx_exec(void *arg)
 		ncontexts++;
 	}
 	pr_info("Submitted %lu contexts (across %u engines), filling %lu dwords\n",
-		ncontexts, RUNTIME_INFO(i915)->num_rings, ndwords);
+		ncontexts, RUNTIME_INFO(i915)->num_engines, ndwords);
 
 	dw = 0;
 	list_for_each_entry(obj, &objects, st_link) {
@@ -923,7 +923,7 @@ __igt_ctx_sseu(struct drm_i915_private *i915,
 	       unsigned int flags)
 {
 	struct intel_sseu default_sseu = intel_device_default_sseu(i915);
-	struct intel_engine_cs *engine = i915->engine[RCS];
+	struct intel_engine_cs *engine = i915->engine[RCS0];
 	struct drm_i915_gem_object *obj;
 	struct i915_gem_context *ctx;
 	struct intel_sseu pg_sseu;
@@ -967,6 +967,7 @@ __igt_ctx_sseu(struct drm_i915_private *i915,
 		ret = PTR_ERR(ctx);
 		goto out_unlock;
 	}
+	i915_gem_context_clear_bannable(ctx); /* to reset and beyond! */
 
 	obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
 	if (IS_ERR(obj)) {
@@ -1125,7 +1126,7 @@ static int igt_ctx_readonly(void *arg)
 		}
 	}
 	pr_info("Submitted %lu dwords (across %u engines)\n",
-		ndwords, RUNTIME_INFO(i915)->num_rings);
+		ndwords, RUNTIME_INFO(i915)->num_engines);
 
 	dw = 0;
 	list_for_each_entry(obj, &objects, st_link) {
@@ -1432,7 +1433,7 @@ static int igt_vm_isolation(void *arg)
 
 			div64_u64_rem(i915_prandom_u64_state(&prng),
 				      vm_total, &offset);
-			offset &= ~sizeof(u32);
+			offset &= -sizeof(u32);
 			offset += I915_GTT_PAGE_SIZE;
 
 			err = write_to_scratch(ctx_a, engine,
@@ -1458,7 +1459,7 @@ static int igt_vm_isolation(void *arg)
 		count += this;
 	}
 	pr_info("Checked %lu scratch offsets across %d engines\n",
-		count, RUNTIME_INFO(i915)->num_rings);
+		count, RUNTIME_INFO(i915)->num_engines);
 
 out_rpm:
 	intel_runtime_pm_put(i915, wakeref);
@@ -1492,63 +1493,56 @@ static int __igt_switch_to_kernel_context(struct drm_i915_private *i915,
 {
 	struct intel_engine_cs *engine;
 	unsigned int tmp;
-	int err;
+	int pass;
 
 	GEM_TRACE("Testing %s\n", __engine_name(i915, engines));
-	for_each_engine_masked(engine, i915, engines, tmp) {
-		struct i915_request *rq;
+	for (pass = 0; pass < 4; pass++) { /* Once busy; once idle; repeat */
+		bool from_idle = pass & 1;
+		int err;
 
-		rq = i915_request_alloc(engine, ctx);
-		if (IS_ERR(rq))
-			return PTR_ERR(rq);
+		if (!from_idle) {
+			for_each_engine_masked(engine, i915, engines, tmp) {
+				struct i915_request *rq;
 
-		i915_request_add(rq);
-	}
-
-	err = i915_gem_switch_to_kernel_context(i915);
-	if (err)
-		return err;
+				rq = i915_request_alloc(engine, ctx);
+				if (IS_ERR(rq))
+					return PTR_ERR(rq);
 
-	for_each_engine_masked(engine, i915, engines, tmp) {
-		if (!engine_has_kernel_context_barrier(engine)) {
-			pr_err("kernel context not last on engine %s!\n",
-			       engine->name);
-			return -EINVAL;
+				i915_request_add(rq);
+			}
 		}
-	}
 
-	err = i915_gem_wait_for_idle(i915,
-				     I915_WAIT_LOCKED,
-				     MAX_SCHEDULE_TIMEOUT);
-	if (err)
-		return err;
+		err = i915_gem_switch_to_kernel_context(i915,
+							i915->gt.active_engines);
+		if (err)
+			return err;
+
+		if (!from_idle) {
+			err = i915_gem_wait_for_idle(i915,
+						     I915_WAIT_LOCKED,
+						     MAX_SCHEDULE_TIMEOUT);
+			if (err)
+				return err;
+		}
 
-	GEM_BUG_ON(i915->gt.active_requests);
-	for_each_engine_masked(engine, i915, engines, tmp) {
-		if (engine->last_retired_context->gem_context != i915->kernel_context) {
-			pr_err("engine %s not idling in kernel context!\n",
-			       engine->name);
+		if (i915->gt.active_requests) {
+			pr_err("%d active requests remain after switching to kernel context, pass %d (%s) on %s engine%s\n",
+			       i915->gt.active_requests,
+			       pass, from_idle ? "idle" : "busy",
+			       __engine_name(i915, engines),
+			       is_power_of_2(engines) ? "" : "s");
 			return -EINVAL;
 		}
-	}
 
-	err = i915_gem_switch_to_kernel_context(i915);
-	if (err)
-		return err;
+		/* XXX Bonus points for proving we are the kernel context! */
 
-	if (i915->gt.active_requests) {
-		pr_err("switch-to-kernel-context emitted %d requests even though it should already be idling in the kernel context\n",
-		       i915->gt.active_requests);
-		return -EINVAL;
+		mutex_unlock(&i915->drm.struct_mutex);
+		drain_delayed_work(&i915->gt.idle_work);
+		mutex_lock(&i915->drm.struct_mutex);
 	}
 
-	for_each_engine_masked(engine, i915, engines, tmp) {
-		if (!intel_engine_has_kernel_context(engine)) {
-			pr_err("kernel context not last on engine %s!\n",
-			       engine->name);
-			return -EINVAL;
-		}
-	}
+	if (igt_flush_test(i915, I915_WAIT_LOCKED))
+		return -EIO;
 
 	return 0;
 }
@@ -1592,8 +1586,6 @@ static int igt_switch_to_kernel_context(void *arg)
 
 out_unlock:
 	GEM_TRACE_DUMP_ON(err);
-	if (igt_flush_test(i915, I915_WAIT_LOCKED))
-		err = -EIO;
 
 	intel_runtime_pm_put(i915, wakeref);
 	mutex_unlock(&i915->drm.struct_mutex);
@@ -1602,10 +1594,116 @@ static int igt_switch_to_kernel_context(void *arg)
 	return err;
 }
 
+static void mock_barrier_task(void *data)
+{
+	unsigned int *counter = data;
+
+	++*counter;
+}
+
+static int mock_context_barrier(void *arg)
+{
+#undef pr_fmt
+#define pr_fmt(x) "context_barrier_task():" # x
+	struct drm_i915_private *i915 = arg;
+	struct i915_gem_context *ctx;
+	struct i915_request *rq;
+	intel_wakeref_t wakeref;
+	unsigned int counter;
+	int err;
+
+	/*
+	 * The context barrier provides us with a callback after it emits
+	 * a request; useful for retiring old state after loading new.
+	 */
+
+	mutex_lock(&i915->drm.struct_mutex);
+
+	ctx = mock_context(i915, "mock");
+	if (IS_ERR(ctx)) {
+		err = PTR_ERR(ctx);
+		goto unlock;
+	}
+
+	counter = 0;
+	err = context_barrier_task(ctx, 0, mock_barrier_task, &counter);
+	if (err) {
+		pr_err("Failed at line %d, err=%d\n", __LINE__, err);
+		goto out;
+	}
+	if (counter == 0) {
+		pr_err("Did not retire immediately with 0 engines\n");
+		err = -EINVAL;
+		goto out;
+	}
+
+	counter = 0;
+	err = context_barrier_task(ctx,
+				   ALL_ENGINES, mock_barrier_task, &counter);
+	if (err) {
+		pr_err("Failed at line %d, err=%d\n", __LINE__, err);
+		goto out;
+	}
+	if (counter == 0) {
+		pr_err("Did not retire immediately for all inactive engines\n");
+		err = -EINVAL;
+		goto out;
+	}
+
+	rq = ERR_PTR(-ENODEV);
+	with_intel_runtime_pm(i915, wakeref)
+		rq = i915_request_alloc(i915->engine[RCS0], ctx);
+	if (IS_ERR(rq)) {
+		pr_err("Request allocation failed!\n");
+		goto out;
+	}
+	i915_request_add(rq);
+	GEM_BUG_ON(list_empty(&ctx->active_engines));
+
+	counter = 0;
+	context_barrier_inject_fault = BIT(RCS0);
+	err = context_barrier_task(ctx,
+				   ALL_ENGINES, mock_barrier_task, &counter);
+	context_barrier_inject_fault = 0;
+	if (err == -ENXIO)
+		err = 0;
+	else
+		pr_err("Did not hit fault injection!\n");
+	if (counter != 0) {
+		pr_err("Invoked callback on error!\n");
+		err = -EIO;
+	}
+	if (err)
+		goto out;
+
+	counter = 0;
+	err = context_barrier_task(ctx,
+				   ALL_ENGINES, mock_barrier_task, &counter);
+	if (err) {
+		pr_err("Failed at line %d, err=%d\n", __LINE__, err);
+		goto out;
+	}
+	mock_device_flush(i915);
+	if (counter == 0) {
+		pr_err("Did not retire on each active engines\n");
+		err = -EINVAL;
+		goto out;
+	}
+
+out:
+	mock_context_close(ctx);
+unlock:
+	mutex_unlock(&i915->drm.struct_mutex);
+	return err;
+#undef pr_fmt
+#define pr_fmt(x) x
+}
+
 int i915_gem_context_mock_selftests(void)
 {
 	static const struct i915_subtest tests[] = {
 		SUBTEST(igt_switch_to_kernel_context),
+		SUBTEST(mock_context_barrier),
 	};
 	struct drm_i915_private *i915;
 	int err;
@@ -1631,7 +1729,7 @@ int i915_gem_context_live_selftests(struct drm_i915_private *dev_priv)
 		SUBTEST(igt_vm_isolation),
 	};
 
-	if (i915_terminally_wedged(&dev_priv->gpu_error))
+	if (i915_terminally_wedged(dev_priv))
 		return 0;
 
 	return i915_subtests(tests, dev_priv);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
index 32dce7176f6381dc2a0429691dccc2eafc7fe360..b270eab1cad167253fe12f6f025aebca40f057a1 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
@@ -547,7 +547,7 @@ int i915_gem_evict_live_selftests(struct drm_i915_private *i915)
 		SUBTEST(igt_evict_contexts),
 	};
 
-	if (i915_terminally_wedged(&i915->gpu_error))
+	if (i915_terminally_wedged(i915))
 		return 0;
 
 	return i915_subtests(tests, i915);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index 3850ef4a5ec89240883c5fcb074acd4ab78be235..826fd51c331ee6deaae48e3b4bb9be39d5e52886 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -120,7 +120,7 @@ fake_dma_object(struct drm_i915_private *i915, u64 size)
 	if (overflows_type(size, obj->base.size))
 		return ERR_PTR(-E2BIG);
 
-	obj = i915_gem_object_alloc(i915);
+	obj = i915_gem_object_alloc();
 	if (!obj)
 		goto err;
 
@@ -1681,25 +1681,31 @@ int i915_gem_gtt_mock_selftests(void)
 		SUBTEST(igt_gtt_insert),
 	};
 	struct drm_i915_private *i915;
-	struct i915_ggtt ggtt;
+	struct i915_ggtt *ggtt;
 	int err;
 
 	i915 = mock_gem_device();
 	if (!i915)
 		return -ENOMEM;
 
-	mock_init_ggtt(i915, &ggtt);
+	ggtt = kmalloc(sizeof(*ggtt), GFP_KERNEL);
+	if (!ggtt) {
+		err = -ENOMEM;
+		goto out_put;
+	}
+	mock_init_ggtt(i915, ggtt);
 
 	mutex_lock(&i915->drm.struct_mutex);
-	err = i915_subtests(tests, &ggtt);
+	err = i915_subtests(tests, ggtt);
 	mock_device_flush(i915);
 	mutex_unlock(&i915->drm.struct_mutex);
 
 	i915_gem_drain_freed_objects(i915);
 
-	mock_fini_ggtt(&ggtt);
+	mock_fini_ggtt(ggtt);
+	kfree(ggtt);
+out_put:
 	drm_dev_put(&i915->drm);
-
 	return err;
 }
 
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_object.c b/drivers/gpu/drm/i915/selftests/i915_gem_object.c
index 395ae878e0f7c3529ede2201b3b388a0a46dc785..971148fbe6f50ba17894fcf996dffa94bdf2614d 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_object.c
@@ -468,7 +468,7 @@ static int make_obj_busy(struct drm_i915_gem_object *obj)
 	if (err)
 		return err;
 
-	rq = i915_request_alloc(i915->engine[RCS], i915->kernel_context);
+	rq = i915_request_alloc(i915->engine[RCS0], i915->kernel_context);
 	if (IS_ERR(rq)) {
 		i915_vma_unpin(vma);
 		return PTR_ERR(rq);
@@ -583,7 +583,7 @@ static int igt_mmap_offset_exhaustion(void *arg)
 	for (loop = 0; loop < 3; loop++) {
 		intel_wakeref_t wakeref;
 
-		if (i915_terminally_wedged(&i915->gpu_error))
+		if (i915_terminally_wedged(i915))
 			break;
 
 		obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index 6733dc5b6b4c84e19f89ca0ea24be967dc69a625..3eb6a6b075abb5aca06ee80b4a76ca5af1e6a0e1 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -42,7 +42,7 @@ static int igt_add_request(void *arg)
 	/* Basic preliminary test to create a request and let it loose! */
 
 	mutex_lock(&i915->drm.struct_mutex);
-	request = mock_request(i915->engine[RCS],
+	request = mock_request(i915->engine[RCS0],
 			       i915->kernel_context,
 			       HZ / 10);
 	if (!request)
@@ -66,7 +66,7 @@ static int igt_wait_request(void *arg)
 	/* Submit a request, then wait upon it */
 
 	mutex_lock(&i915->drm.struct_mutex);
-	request = mock_request(i915->engine[RCS], i915->kernel_context, T);
+	request = mock_request(i915->engine[RCS0], i915->kernel_context, T);
 	if (!request) {
 		err = -ENOMEM;
 		goto out_unlock;
@@ -136,19 +136,17 @@ static int igt_fence_wait(void *arg)
 	/* Submit a request, treat it as a fence and wait upon it */
 
 	mutex_lock(&i915->drm.struct_mutex);
-	request = mock_request(i915->engine[RCS], i915->kernel_context, T);
+	request = mock_request(i915->engine[RCS0], i915->kernel_context, T);
 	if (!request) {
 		err = -ENOMEM;
 		goto out_locked;
 	}
-	mutex_unlock(&i915->drm.struct_mutex); /* safe as we are single user */
 
 	if (dma_fence_wait_timeout(&request->fence, false, T) != -ETIME) {
 		pr_err("fence wait success before submit (expected timeout)!\n");
-		goto out_device;
+		goto out_locked;
 	}
 
-	mutex_lock(&i915->drm.struct_mutex);
 	i915_request_add(request);
 	mutex_unlock(&i915->drm.struct_mutex);
 
@@ -195,7 +193,7 @@ static int igt_request_rewind(void *arg)
 
 	mutex_lock(&i915->drm.struct_mutex);
 	ctx[0] = mock_context(i915, "A");
-	request = mock_request(i915->engine[RCS], ctx[0], 2 * HZ);
+	request = mock_request(i915->engine[RCS0], ctx[0], 2 * HZ);
 	if (!request) {
 		err = -ENOMEM;
 		goto err_context_0;
@@ -205,7 +203,7 @@ static int igt_request_rewind(void *arg)
 	i915_request_add(request);
 
 	ctx[1] = mock_context(i915, "B");
-	vip = mock_request(i915->engine[RCS], ctx[1], 0);
+	vip = mock_request(i915->engine[RCS0], ctx[1], 0);
 	if (!vip) {
 		err = -ENOMEM;
 		goto err_context_1;
@@ -226,8 +224,7 @@ static int igt_request_rewind(void *arg)
 	mutex_unlock(&i915->drm.struct_mutex);
 
 	if (i915_request_wait(vip, 0, HZ) == -ETIME) {
-		pr_err("timed out waiting for high priority request, vip.seqno=%d, current seqno=%d\n",
-		       vip->global_seqno, intel_engine_get_seqno(i915->engine[RCS]));
+		pr_err("timed out waiting for high priority request\n");
 		goto err;
 	}
 
@@ -418,7 +415,7 @@ static int mock_breadcrumbs_smoketest(void *arg)
 {
 	struct drm_i915_private *i915 = arg;
 	struct smoketest t = {
-		.engine = i915->engine[RCS],
+		.engine = i915->engine[RCS0],
 		.ncontexts = 1024,
 		.max_batch = 1024,
 		.request_alloc = __mock_request_alloc
@@ -1219,7 +1216,7 @@ static int live_breadcrumbs_smoketest(void *arg)
 		num_fences += atomic_long_read(&t[id].num_fences);
 	}
 	pr_info("Completed %lu waits for %lu fences across %d engines and %d cpus\n",
-		num_waits, num_fences, RUNTIME_INFO(i915)->num_rings, ncpus);
+		num_waits, num_fences, RUNTIME_INFO(i915)->num_engines, ncpus);
 
 	mutex_lock(&i915->drm.struct_mutex);
 	ret = igt_live_test_end(&live) ?: ret;
@@ -1246,7 +1243,7 @@ int i915_request_live_selftests(struct drm_i915_private *i915)
 		SUBTEST(live_breadcrumbs_smoketest),
 	};
 
-	if (i915_terminally_wedged(&i915->gpu_error))
+	if (i915_terminally_wedged(i915))
 		return 0;
 
 	return i915_subtests(tests, i915);
diff --git a/drivers/gpu/drm/i915/selftests/i915_selftest.c b/drivers/gpu/drm/i915/selftests/i915_selftest.c
index 10ef0e636a247c985db45409f158800f310ad3cb..b18eaefef7982f465fa7648bf7148775e5aeeb53 100644
--- a/drivers/gpu/drm/i915/selftests/i915_selftest.c
+++ b/drivers/gpu/drm/i915/selftests/i915_selftest.c
@@ -133,7 +133,7 @@ static int __run_selftests(const char *name,
 		if (signal_pending(current))
 			return -EINTR;
 
-		pr_debug(DRIVER_NAME ": Running %s\n", st->name);
+		pr_info(DRIVER_NAME ": Running %s\n", st->name);
 		if (data)
 			err = st->live(data);
 		else
@@ -255,7 +255,7 @@ int __i915_subtests(const char *caller,
 		if (!apply_subtest_filter(caller, st->name))
 			continue;
 
-		pr_debug(DRIVER_NAME ": Running %s/%s\n", caller, st->name);
+		pr_info(DRIVER_NAME ": Running %s/%s\n", caller, st->name);
 		GEM_TRACE("Running %s/%s\n", caller, st->name);
 
 		err = st->func(data);
diff --git a/drivers/gpu/drm/i915/selftests/i915_sw_fence.c b/drivers/gpu/drm/i915/selftests/i915_sw_fence.c
index cdbc8f134e5e47a60e021be30f5aa9160283b947..cbf45d85cbff4ae0c83b6feafa40b1b7ef2f0139 100644
--- a/drivers/gpu/drm/i915/selftests/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/selftests/i915_sw_fence.c
@@ -571,21 +571,27 @@ static int test_timer(void *arg)
 	unsigned long target, delay;
 	struct timed_fence tf;
 
+	preempt_disable();
 	timed_fence_init(&tf, target = jiffies);
 	if (!i915_sw_fence_done(&tf.fence)) {
 		pr_err("Fence with immediate expiration not signaled\n");
 		goto err;
 	}
+	preempt_enable();
 	timed_fence_fini(&tf);
 
 	for_each_prime_number(delay, i915_selftest.timeout_jiffies/2) {
+		preempt_disable();
 		timed_fence_init(&tf, target = jiffies + delay);
 		if (i915_sw_fence_done(&tf.fence)) {
 			pr_err("Fence with future expiration (%lu jiffies) already signaled\n", delay);
 			goto err;
 		}
+		preempt_enable();
 
 		i915_sw_fence_wait(&tf.fence);
+
+		preempt_disable();
 		if (!i915_sw_fence_done(&tf.fence)) {
 			pr_err("Fence not signaled after wait\n");
 			goto err;
@@ -595,13 +601,14 @@ static int test_timer(void *arg)
 			       target, jiffies);
 			goto err;
 		}
-
+		preempt_enable();
 		timed_fence_fini(&tf);
 	}
 
 	return 0;
 
 err:
+	preempt_enable();
 	timed_fence_fini(&tf);
 	return -EINVAL;
 }
diff --git a/drivers/gpu/drm/i915/selftests/i915_timeline.c b/drivers/gpu/drm/i915/selftests/i915_timeline.c
index 12ea69b1a1e570d993f81e5fab2ca8f615e68071..844701759ffc14f27f3573a28738387a6e531b72 100644
--- a/drivers/gpu/drm/i915/selftests/i915_timeline.c
+++ b/drivers/gpu/drm/i915/selftests/i915_timeline.c
@@ -641,6 +641,118 @@ static int live_hwsp_alternate(void *arg)
 #undef NUM_TIMELINES
 }
 
+static int live_hwsp_wrap(void *arg)
+{
+	struct drm_i915_private *i915 = arg;
+	struct intel_engine_cs *engine;
+	struct i915_timeline *tl;
+	enum intel_engine_id id;
+	intel_wakeref_t wakeref;
+	int err = 0;
+
+	/*
+	 * Across a seqno wrap, we need to keep the old cacheline alive for
+	 * foreign GPU references.
+	 */
+
+	mutex_lock(&i915->drm.struct_mutex);
+	wakeref = intel_runtime_pm_get(i915);
+
+	tl = i915_timeline_create(i915, __func__, NULL);
+	if (IS_ERR(tl)) {
+		err = PTR_ERR(tl);
+		goto out_rpm;
+	}
+	if (!tl->has_initial_breadcrumb || !tl->hwsp_cacheline)
+		goto out_free;
+
+	err = i915_timeline_pin(tl);
+	if (err)
+		goto out_free;
+
+	for_each_engine(engine, i915, id) {
+		const u32 *hwsp_seqno[2];
+		struct i915_request *rq;
+		u32 seqno[2];
+
+		if (!intel_engine_can_store_dword(engine))
+			continue;
+
+		rq = i915_request_alloc(engine, i915->kernel_context);
+		if (IS_ERR(rq)) {
+			err = PTR_ERR(rq);
+			goto out;
+		}
+
+		tl->seqno = -4u;
+
+		err = i915_timeline_get_seqno(tl, rq, &seqno[0]);
+		if (err) {
+			i915_request_add(rq);
+			goto out;
+		}
+		pr_debug("seqno[0]:%08x, hwsp_offset:%08x\n",
+			 seqno[0], tl->hwsp_offset);
+
+		err = emit_ggtt_store_dw(rq, tl->hwsp_offset, seqno[0]);
+		if (err) {
+			i915_request_add(rq);
+			goto out;
+		}
+		hwsp_seqno[0] = tl->hwsp_seqno;
+
+		err = i915_timeline_get_seqno(tl, rq, &seqno[1]);
+		if (err) {
+			i915_request_add(rq);
+			goto out;
+		}
+		pr_debug("seqno[1]:%08x, hwsp_offset:%08x\n",
+			 seqno[1], tl->hwsp_offset);
+
+		err = emit_ggtt_store_dw(rq, tl->hwsp_offset, seqno[1]);
+		if (err) {
+			i915_request_add(rq);
+			goto out;
+		}
+		hwsp_seqno[1] = tl->hwsp_seqno;
+
+		/* With wrap should come a new hwsp */
+		GEM_BUG_ON(seqno[1] >= seqno[0]);
+		GEM_BUG_ON(hwsp_seqno[0] == hwsp_seqno[1]);
+
+		i915_request_add(rq);
+
+		if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0) {
+			pr_err("Wait for timeline writes timed out!\n");
+			err = -EIO;
+			goto out;
+		}
+
+		if (*hwsp_seqno[0] != seqno[0] || *hwsp_seqno[1] != seqno[1]) {
+			pr_err("Bad timeline values: found (%x, %x), expected (%x, %x)\n",
+			       *hwsp_seqno[0], *hwsp_seqno[1],
+			       seqno[0], seqno[1]);
+			err = -EINVAL;
+			goto out;
+		}
+
+		i915_retire_requests(i915); /* recycle HWSP */
+	}
+
+out:
+	if (igt_flush_test(i915, I915_WAIT_LOCKED))
+		err = -EIO;
+
+	i915_timeline_unpin(tl);
+out_free:
+	i915_timeline_put(tl);
+out_rpm:
+	intel_runtime_pm_put(i915, wakeref);
+	mutex_unlock(&i915->drm.struct_mutex);
+
+	return err;
+}
+
 static int live_hwsp_recycle(void *arg)
 {
 	struct drm_i915_private *i915 = arg;
@@ -723,6 +835,7 @@ int i915_timeline_live_selftests(struct drm_i915_private *i915)
 		SUBTEST(live_hwsp_recycle),
 		SUBTEST(live_hwsp_engine),
 		SUBTEST(live_hwsp_alternate),
+		SUBTEST(live_hwsp_wrap),
 	};
 
 	return i915_subtests(tests, i915);
diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c
index cf1de82741fa7c32bcb523f51dacfd46bd4a1ea9..fc594b030f5a71029cfdef8b134cffc5ec564abb 100644
--- a/drivers/gpu/drm/i915/selftests/i915_vma.c
+++ b/drivers/gpu/drm/i915/selftests/i915_vma.c
@@ -725,24 +725,30 @@ int i915_vma_mock_selftests(void)
 		SUBTEST(igt_vma_partial),
 	};
 	struct drm_i915_private *i915;
-	struct i915_ggtt ggtt;
+	struct i915_ggtt *ggtt;
 	int err;
 
 	i915 = mock_gem_device();
 	if (!i915)
 		return -ENOMEM;
 
-	mock_init_ggtt(i915, &ggtt);
+	ggtt = kmalloc(sizeof(*ggtt), GFP_KERNEL);
+	if (!ggtt) {
+		err = -ENOMEM;
+		goto out_put;
+	}
+	mock_init_ggtt(i915, ggtt);
 
 	mutex_lock(&i915->drm.struct_mutex);
-	err = i915_subtests(tests, &ggtt);
+	err = i915_subtests(tests, ggtt);
 	mock_device_flush(i915);
 	mutex_unlock(&i915->drm.struct_mutex);
 
 	i915_gem_drain_freed_objects(i915);
 
-	mock_fini_ggtt(&ggtt);
+	mock_fini_ggtt(ggtt);
+	kfree(ggtt);
+out_put:
 	drm_dev_put(&i915->drm);
-
 	return err;
 }
diff --git a/drivers/gpu/drm/i915/selftests/igt_flush_test.c b/drivers/gpu/drm/i915/selftests/igt_flush_test.c
index af66e3d4e23a421521bbbfc27093d84dd2a4d8ea..94aee4071a66f4d43d5f428cfacdbe49cb7fe605 100644
--- a/drivers/gpu/drm/i915/selftests/igt_flush_test.c
+++ b/drivers/gpu/drm/i915/selftests/igt_flush_test.c
@@ -14,7 +14,7 @@ int igt_flush_test(struct drm_i915_private *i915, unsigned int flags)
 	cond_resched();
 
 	if (flags & I915_WAIT_LOCKED &&
-	    i915_gem_switch_to_kernel_context(i915)) {
+	    i915_gem_switch_to_kernel_context(i915, i915->gt.active_engines)) {
 		pr_err("Failed to switch back to kernel context; declaring wedged\n");
 		i915_gem_set_wedged(i915);
 	}
@@ -29,5 +29,5 @@ int igt_flush_test(struct drm_i915_private *i915, unsigned int flags)
 		i915_gem_set_wedged(i915);
 	}
 
-	return i915_terminally_wedged(&i915->gpu_error) ? -EIO : 0;
+	return i915_terminally_wedged(i915);
 }
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c
index 9ebd9225684e8743f2b8db772de4c9d9f7f45465..d0b93a3fbc5455b43b09e0721f7dbe1f2ca3698e 100644
--- a/drivers/gpu/drm/i915/selftests/igt_spinner.c
+++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c
@@ -144,6 +144,13 @@ igt_spinner_create_request(struct igt_spinner *spin,
 
 	i915_gem_chipset_flush(spin->i915);
 
+	if (engine->emit_init_breadcrumb &&
+	    rq->timeline->has_initial_breadcrumb) {
+		err = engine->emit_init_breadcrumb(rq);
+		if (err)
+			goto cancel_rq;
+	}
+
 	err = engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0);
 
 cancel_rq:
diff --git a/drivers/gpu/drm/i915/selftests/intel_guc.c b/drivers/gpu/drm/i915/selftests/intel_guc.c
index c5e0a0e98fcb16d499b082357f99a48a27359011..b05a21eaa8f451ab8aba9ad80abf1d590b909422 100644
--- a/drivers/gpu/drm/i915/selftests/intel_guc.c
+++ b/drivers/gpu/drm/i915/selftests/intel_guc.c
@@ -111,7 +111,7 @@ static int validate_client(struct intel_guc_client *client,
 			dev_priv->preempt_context : dev_priv->kernel_context;
 
 	if (client->owner != ctx_owner ||
-	    client->engines != INTEL_INFO(dev_priv)->ring_mask ||
+	    client->engines != INTEL_INFO(dev_priv)->engine_mask ||
 	    client->priority != client_priority ||
 	    client->doorbell_id == GUC_DOORBELL_INVALID)
 		return -EINVAL;
@@ -261,7 +261,7 @@ static int igt_guc_doorbells(void *arg)
 
 	for (i = 0; i < ATTEMPTS; i++) {
 		clients[i] = guc_client_alloc(dev_priv,
-					      INTEL_INFO(dev_priv)->ring_mask,
+					      INTEL_INFO(dev_priv)->engine_mask,
 					      i % GUC_CLIENT_PRIORITY_NUM,
 					      dev_priv->kernel_context);
 
diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
index 7b6f3bea9ef8cec72c2d27de337313b7a7cdbcc8..b5e35b2a925f2c8331198ea0fa7202fe19f4bf37 100644
--- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
@@ -56,6 +56,8 @@ static int hang_init(struct hang *h, struct drm_i915_private *i915)
 	if (IS_ERR(h->ctx))
 		return PTR_ERR(h->ctx);
 
+	GEM_BUG_ON(i915_gem_context_is_bannable(h->ctx));
+
 	h->hws = i915_gem_object_create_internal(i915, PAGE_SIZE);
 	if (IS_ERR(h->hws)) {
 		err = PTR_ERR(h->hws);
@@ -242,6 +244,12 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
 	*batch++ = MI_BATCH_BUFFER_END; /* not reached */
 	i915_gem_chipset_flush(h->i915);
 
+	if (rq->engine->emit_init_breadcrumb) {
+		err = rq->engine->emit_init_breadcrumb(rq);
+		if (err)
+			goto cancel_rq;
+	}
+
 	flags = 0;
 	if (INTEL_GEN(vm->i915) <= 5)
 		flags |= I915_DISPATCH_SECURE;
@@ -334,7 +342,7 @@ static int igt_hang_sanitycheck(void *arg)
 			timeout = i915_request_wait(rq,
 						    I915_WAIT_LOCKED,
 						    MAX_SCHEDULE_TIMEOUT);
-		if (i915_terminally_wedged(&i915->gpu_error))
+		if (i915_reset_failed(i915))
 			timeout = -EIO;
 
 		i915_request_put(rq);
@@ -375,7 +383,7 @@ static int igt_global_reset(void *arg)
 
 	igt_global_reset_unlock(i915);
 
-	if (i915_terminally_wedged(&i915->gpu_error))
+	if (i915_reset_failed(i915))
 		err = -EIO;
 
 	return err;
@@ -393,15 +401,13 @@ static int igt_wedged_reset(void *arg)
 
 	i915_gem_set_wedged(i915);
 
-	mutex_lock(&i915->drm.struct_mutex);
-	GEM_BUG_ON(!i915_terminally_wedged(&i915->gpu_error));
+	GEM_BUG_ON(!i915_reset_failed(i915));
 	i915_reset(i915, ALL_ENGINES, NULL);
-	mutex_unlock(&i915->drm.struct_mutex);
 
 	intel_runtime_pm_put(i915, wakeref);
 	igt_global_reset_unlock(i915);
 
-	return i915_terminally_wedged(&i915->gpu_error) ? -EIO : 0;
+	return i915_reset_failed(i915) ? -EIO : 0;
 }
 
 static bool wait_for_idle(struct intel_engine_cs *engine)
@@ -409,6 +415,222 @@ static bool wait_for_idle(struct intel_engine_cs *engine)
 	return wait_for(intel_engine_is_idle(engine), IGT_IDLE_TIMEOUT) == 0;
 }
 
+static int igt_reset_nop(void *arg)
+{
+	struct drm_i915_private *i915 = arg;
+	struct intel_engine_cs *engine;
+	struct i915_gem_context *ctx;
+	unsigned int reset_count, count;
+	enum intel_engine_id id;
+	intel_wakeref_t wakeref;
+	struct drm_file *file;
+	IGT_TIMEOUT(end_time);
+	int err = 0;
+
+	/* Check that we can reset during non-user portions of requests */
+
+	file = mock_file(i915);
+	if (IS_ERR(file))
+		return PTR_ERR(file);
+
+	mutex_lock(&i915->drm.struct_mutex);
+	ctx = live_context(i915, file);
+	mutex_unlock(&i915->drm.struct_mutex);
+	if (IS_ERR(ctx)) {
+		err = PTR_ERR(ctx);
+		goto out;
+	}
+
+	i915_gem_context_clear_bannable(ctx);
+	wakeref = intel_runtime_pm_get(i915);
+	reset_count = i915_reset_count(&i915->gpu_error);
+	count = 0;
+	do {
+		mutex_lock(&i915->drm.struct_mutex);
+		for_each_engine(engine, i915, id) {
+			int i;
+
+			for (i = 0; i < 16; i++) {
+				struct i915_request *rq;
+
+				rq = i915_request_alloc(engine, ctx);
+				if (IS_ERR(rq)) {
+					err = PTR_ERR(rq);
+					break;
+				}
+
+				i915_request_add(rq);
+			}
+		}
+		mutex_unlock(&i915->drm.struct_mutex);
+
+		igt_global_reset_lock(i915);
+		i915_reset(i915, ALL_ENGINES, NULL);
+		igt_global_reset_unlock(i915);
+		if (i915_reset_failed(i915)) {
+			err = -EIO;
+			break;
+		}
+
+		if (i915_reset_count(&i915->gpu_error) !=
+		    reset_count + ++count) {
+			pr_err("Full GPU reset not recorded!\n");
+			err = -EINVAL;
+			break;
+		}
+
+		if (!i915_reset_flush(i915)) {
+			struct drm_printer p =
+				drm_info_printer(i915->drm.dev);
+
+			pr_err("%s failed to idle after reset\n",
+			       engine->name);
+			intel_engine_dump(engine, &p,
+					  "%s\n", engine->name);
+
+			err = -EIO;
+			break;
+		}
+
+		err = igt_flush_test(i915, 0);
+		if (err)
+			break;
+	} while (time_before(jiffies, end_time));
+	pr_info("%s: %d resets\n", __func__, count);
+
+	mutex_lock(&i915->drm.struct_mutex);
+	err = igt_flush_test(i915, I915_WAIT_LOCKED);
+	mutex_unlock(&i915->drm.struct_mutex);
+
+	intel_runtime_pm_put(i915, wakeref);
+
+out:
+	mock_file_free(i915, file);
+	if (i915_reset_failed(i915))
+		err = -EIO;
+	return err;
+}
+
+static int igt_reset_nop_engine(void *arg)
+{
+	struct drm_i915_private *i915 = arg;
+	struct intel_engine_cs *engine;
+	struct i915_gem_context *ctx;
+	enum intel_engine_id id;
+	intel_wakeref_t wakeref;
+	struct drm_file *file;
+	int err = 0;
+
+	/* Check that we can engine-reset during non-user portions */
+
+	if (!intel_has_reset_engine(i915))
+		return 0;
+
+	file = mock_file(i915);
+	if (IS_ERR(file))
+		return PTR_ERR(file);
+
+	mutex_lock(&i915->drm.struct_mutex);
+	ctx = live_context(i915, file);
+	mutex_unlock(&i915->drm.struct_mutex);
+	if (IS_ERR(ctx)) {
+		err = PTR_ERR(ctx);
+		goto out;
+	}
+
+	i915_gem_context_clear_bannable(ctx);
+	wakeref = intel_runtime_pm_get(i915);
+	for_each_engine(engine, i915, id) {
+		unsigned int reset_count, reset_engine_count;
+		unsigned int count;
+		IGT_TIMEOUT(end_time);
+
+		reset_count = i915_reset_count(&i915->gpu_error);
+		reset_engine_count = i915_reset_engine_count(&i915->gpu_error,
+							     engine);
+		count = 0;
+
+		set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
+		do {
+			int i;
+
+			if (!wait_for_idle(engine)) {
+				pr_err("%s failed to idle before reset\n",
+				       engine->name);
+				err = -EIO;
+				break;
+			}
+
+			mutex_lock(&i915->drm.struct_mutex);
+			for (i = 0; i < 16; i++) {
+				struct i915_request *rq;
+
+				rq = i915_request_alloc(engine, ctx);
+				if (IS_ERR(rq)) {
+					err = PTR_ERR(rq);
+					break;
+				}
+
+				i915_request_add(rq);
+			}
+			mutex_unlock(&i915->drm.struct_mutex);
+
+			err = i915_reset_engine(engine, NULL);
+			if (err) {
+				pr_err("i915_reset_engine failed\n");
+				break;
+			}
+
+			if (i915_reset_count(&i915->gpu_error) != reset_count) {
+				pr_err("Full GPU reset recorded! (engine reset expected)\n");
+				err = -EINVAL;
+				break;
+			}
+
+			if (i915_reset_engine_count(&i915->gpu_error, engine) !=
+			    reset_engine_count + ++count) {
+				pr_err("%s engine reset not recorded!\n",
+				       engine->name);
+				err = -EINVAL;
+				break;
+			}
+
+			if (!i915_reset_flush(i915)) {
+				struct drm_printer p =
+					drm_info_printer(i915->drm.dev);
+
+				pr_err("%s failed to idle after reset\n",
+				       engine->name);
+				intel_engine_dump(engine, &p,
+						  "%s\n", engine->name);
+
+				err = -EIO;
+				break;
+			}
+		} while (time_before(jiffies, end_time));
+		clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
+		pr_info("%s(%s): %d resets\n", __func__, engine->name, count);
+
+		if (err)
+			break;
+
+		err = igt_flush_test(i915, 0);
+		if (err)
+			break;
+	}
+
+	mutex_lock(&i915->drm.struct_mutex);
+	err = igt_flush_test(i915, I915_WAIT_LOCKED);
+	mutex_unlock(&i915->drm.struct_mutex);
+
+	intel_runtime_pm_put(i915, wakeref);
+out:
+	mock_file_free(i915, file);
+	if (i915_reset_failed(i915))
+		err = -EIO;
+	return err;
+}
+
 static int __igt_reset_engine(struct drm_i915_private *i915, bool active)
 {
 	struct intel_engine_cs *engine;
@@ -523,7 +745,7 @@ static int __igt_reset_engine(struct drm_i915_private *i915, bool active)
 			break;
 	}
 
-	if (i915_terminally_wedged(&i915->gpu_error))
+	if (i915_reset_failed(i915))
 		err = -EIO;
 
 	if (active) {
@@ -565,11 +787,10 @@ static int active_request_put(struct i915_request *rq)
 		return 0;
 
 	if (i915_request_wait(rq, 0, 5 * HZ) < 0) {
-		GEM_TRACE("%s timed out waiting for completion of fence %llx:%lld, seqno %d.\n",
+		GEM_TRACE("%s timed out waiting for completion of fence %llx:%lld\n",
 			  rq->engine->name,
 			  rq->fence.context,
-			  rq->fence.seqno,
-			  i915_request_global_seqno(rq));
+			  rq->fence.seqno);
 		GEM_TRACE_DUMP();
 
 		i915_gem_set_wedged(rq->i915);
@@ -762,7 +983,23 @@ static int __igt_reset_engines(struct drm_i915_private *i915,
 			count++;
 
 			if (rq) {
-				i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT);
+				if (i915_request_wait(rq, 0, HZ / 5) < 0) {
+					struct drm_printer p =
+						drm_info_printer(i915->drm.dev);
+
+					pr_err("i915_reset_engine(%s:%s):"
+					       " failed to complete request after reset\n",
+					       engine->name, test_name);
+					intel_engine_dump(engine, &p,
+							  "%s\n", engine->name);
+					i915_request_put(rq);
+
+					GEM_TRACE_DUMP();
+					i915_gem_set_wedged(i915);
+					err = -EIO;
+					break;
+				}
+
 				i915_request_put(rq);
 			}
 
@@ -837,7 +1074,7 @@ static int __igt_reset_engines(struct drm_i915_private *i915,
 			break;
 	}
 
-	if (i915_terminally_wedged(&i915->gpu_error))
+	if (i915_reset_failed(i915))
 		err = -EIO;
 
 	if (flags & TEST_ACTIVE) {
@@ -905,7 +1142,7 @@ static int igt_reset_wait(void *arg)
 	long timeout;
 	int err;
 
-	if (!intel_engine_can_store_dword(i915->engine[RCS]))
+	if (!intel_engine_can_store_dword(i915->engine[RCS0]))
 		return 0;
 
 	/* Check that we detect a stuck waiter and issue a reset */
@@ -917,7 +1154,7 @@ static int igt_reset_wait(void *arg)
 	if (err)
 		goto unlock;
 
-	rq = hang_create_request(&h, i915->engine[RCS]);
+	rq = hang_create_request(&h, i915->engine[RCS0]);
 	if (IS_ERR(rq)) {
 		err = PTR_ERR(rq);
 		goto fini;
@@ -963,7 +1200,7 @@ static int igt_reset_wait(void *arg)
 	mutex_unlock(&i915->drm.struct_mutex);
 	igt_global_reset_unlock(i915);
 
-	if (i915_terminally_wedged(&i915->gpu_error))
+	if (i915_reset_failed(i915))
 		return -EIO;
 
 	return err;
@@ -1034,13 +1271,11 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915,
 	struct hang h;
 	int err;
 
-	if (!intel_engine_can_store_dword(i915->engine[RCS]))
+	if (!intel_engine_can_store_dword(i915->engine[RCS0]))
 		return 0;
 
 	/* Check that we can recover an unbind stuck on a hanging request */
 
-	igt_global_reset_lock(i915);
-
 	mutex_lock(&i915->drm.struct_mutex);
 	err = hang_init(&h, i915);
 	if (err)
@@ -1066,7 +1301,7 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915,
 		goto out_obj;
 	}
 
-	rq = hang_create_request(&h, i915->engine[RCS]);
+	rq = hang_create_request(&h, i915->engine[RCS0]);
 	if (IS_ERR(rq)) {
 		err = PTR_ERR(rq);
 		goto out_obj;
@@ -1138,7 +1373,9 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915,
 	}
 
 out_reset:
-	fake_hangcheck(rq->i915, intel_engine_flag(rq->engine));
+	igt_global_reset_lock(i915);
+	fake_hangcheck(rq->i915, rq->engine->mask);
+	igt_global_reset_unlock(i915);
 
 	if (tsk) {
 		struct igt_wedge_me w;
@@ -1159,9 +1396,8 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915,
 	hang_fini(&h);
 unlock:
 	mutex_unlock(&i915->drm.struct_mutex);
-	igt_global_reset_unlock(i915);
 
-	if (i915_terminally_wedged(&i915->gpu_error))
+	if (i915_reset_failed(i915))
 		return -EIO;
 
 	return err;
@@ -1317,7 +1553,7 @@ static int igt_reset_queue(void *arg)
 				goto fini;
 			}
 
-			reset_count = fake_hangcheck(i915, ENGINE_MASK(id));
+			reset_count = fake_hangcheck(i915, BIT(id));
 
 			if (prev->fence.error != -EIO) {
 				pr_err("GPU reset not recorded on hanging request [fence.error=%d]!\n",
@@ -1367,7 +1603,7 @@ static int igt_reset_queue(void *arg)
 	mutex_unlock(&i915->drm.struct_mutex);
 	igt_global_reset_unlock(i915);
 
-	if (i915_terminally_wedged(&i915->gpu_error))
+	if (i915_reset_failed(i915))
 		return -EIO;
 
 	return err;
@@ -1376,7 +1612,7 @@ static int igt_reset_queue(void *arg)
 static int igt_handle_error(void *arg)
 {
 	struct drm_i915_private *i915 = arg;
-	struct intel_engine_cs *engine = i915->engine[RCS];
+	struct intel_engine_cs *engine = i915->engine[RCS0];
 	struct hang h;
 	struct i915_request *rq;
 	struct i915_gpu_state *error;
@@ -1423,7 +1659,7 @@ static int igt_handle_error(void *arg)
 	/* Temporarily disable error capture */
 	error = xchg(&i915->gpu_error.first_error, (void *)-1);
 
-	i915_handle_error(i915, ENGINE_MASK(engine->id), 0, NULL);
+	i915_handle_error(i915, engine->mask, 0, NULL);
 
 	xchg(&i915->gpu_error.first_error, error);
 
@@ -1547,7 +1783,7 @@ static int igt_atomic_reset_engine(struct intel_engine_cs *engine,
 			i915_request_wait(rq,
 					  I915_WAIT_LOCKED,
 					  MAX_SCHEDULE_TIMEOUT);
-		if (i915_terminally_wedged(&i915->gpu_error))
+		if (i915_reset_failed(i915))
 			err = -EIO;
 	}
 
@@ -1586,7 +1822,7 @@ static int igt_atomic_reset(void *arg)
 
 	/* Flush any requests before we get started and check basics */
 	force_reset(i915);
-	if (i915_terminally_wedged(&i915->gpu_error))
+	if (i915_reset_failed(i915))
 		goto unlock;
 
 	if (intel_has_gpu_reset(i915)) {
@@ -1642,6 +1878,8 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
 		SUBTEST(igt_global_reset), /* attempt to recover GPU first */
 		SUBTEST(igt_wedged_reset),
 		SUBTEST(igt_hang_sanitycheck),
+		SUBTEST(igt_reset_nop),
+		SUBTEST(igt_reset_nop_engine),
 		SUBTEST(igt_reset_idle_engine),
 		SUBTEST(igt_reset_active_engine),
 		SUBTEST(igt_reset_engines),
@@ -1660,7 +1898,7 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
 	if (!intel_has_gpu_reset(i915))
 		return 0;
 
-	if (i915_terminally_wedged(&i915->gpu_error))
+	if (i915_terminally_wedged(i915))
 		return -EIO; /* we're long past hope of a successful reset */
 
 	wakeref = intel_runtime_pm_get(i915);
diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c b/drivers/gpu/drm/i915/selftests/intel_lrc.c
index 58144e024751fced7c19649b2569f72ab350c310..d61520ea03c13536ba5d36aff9f993fc49e0e0a9 100644
--- a/drivers/gpu/drm/i915/selftests/intel_lrc.c
+++ b/drivers/gpu/drm/i915/selftests/intel_lrc.c
@@ -88,6 +88,9 @@ static int live_preempt(void *arg)
 	if (!HAS_LOGICAL_RING_PREEMPTION(i915))
 		return 0;
 
+	if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION))
+		pr_err("Logical preemption supported, but not exposed\n");
+
 	mutex_lock(&i915->drm.struct_mutex);
 	wakeref = intel_runtime_pm_get(i915);
 
@@ -112,6 +115,9 @@ static int live_preempt(void *arg)
 	for_each_engine(engine, i915, id) {
 		struct i915_request *rq;
 
+		if (!intel_engine_has_preemption(engine))
+			continue;
+
 		rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
 						MI_ARB_CHECK);
 		if (IS_ERR(rq)) {
@@ -203,6 +209,9 @@ static int live_late_preempt(void *arg)
 	for_each_engine(engine, i915, id) {
 		struct i915_request *rq;
 
+		if (!intel_engine_has_preemption(engine))
+			continue;
+
 		rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
 						MI_ARB_CHECK);
 		if (IS_ERR(rq)) {
@@ -335,6 +344,9 @@ static int live_suppress_self_preempt(void *arg)
 		struct i915_request *rq_a, *rq_b;
 		int depth;
 
+		if (!intel_engine_has_preemption(engine))
+			continue;
+
 		engine->execlists.preempt_hang.count = 0;
 
 		rq_a = igt_spinner_create_request(&a.spin,
@@ -407,6 +419,171 @@ static int live_suppress_self_preempt(void *arg)
 	goto err_client_b;
 }
 
+static int __i915_sw_fence_call
+dummy_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
+{
+	return NOTIFY_DONE;
+}
+
+static struct i915_request *dummy_request(struct intel_engine_cs *engine)
+{
+	struct i915_request *rq;
+
+	rq = kzalloc(sizeof(*rq), GFP_KERNEL);
+	if (!rq)
+		return NULL;
+
+	INIT_LIST_HEAD(&rq->active_list);
+	rq->engine = engine;
+
+	i915_sched_node_init(&rq->sched);
+
+	/* mark this request as permanently incomplete */
+	rq->fence.seqno = 1;
+	BUILD_BUG_ON(sizeof(rq->fence.seqno) != 8); /* upper 32b == 0 */
+	rq->hwsp_seqno = (u32 *)&rq->fence.seqno + 1;
+	GEM_BUG_ON(i915_request_completed(rq));
+
+	i915_sw_fence_init(&rq->submit, dummy_notify);
+	i915_sw_fence_commit(&rq->submit);
+
+	return rq;
+}
+
+static void dummy_request_free(struct i915_request *dummy)
+{
+	i915_request_mark_complete(dummy);
+	i915_sched_node_fini(&dummy->sched);
+	i915_sw_fence_fini(&dummy->submit);
+
+	dma_fence_free(&dummy->fence);
+}
+
+static int live_suppress_wait_preempt(void *arg)
+{
+	struct drm_i915_private *i915 = arg;
+	struct preempt_client client[4];
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+	intel_wakeref_t wakeref;
+	int err = -ENOMEM;
+	int i;
+
+	/*
+	 * Waiters are given a little priority nudge, but not enough
+	 * to actually cause any preemption. Double check that we do
+	 * not needlessly generate preempt-to-idle cycles.
+	 */
+
+	if (!HAS_LOGICAL_RING_PREEMPTION(i915))
+		return 0;
+
+	mutex_lock(&i915->drm.struct_mutex);
+	wakeref = intel_runtime_pm_get(i915);
+
+	if (preempt_client_init(i915, &client[0])) /* ELSP[0] */
+		goto err_unlock;
+	if (preempt_client_init(i915, &client[1])) /* ELSP[1] */
+		goto err_client_0;
+	if (preempt_client_init(i915, &client[2])) /* head of queue */
+		goto err_client_1;
+	if (preempt_client_init(i915, &client[3])) /* bystander */
+		goto err_client_2;
+
+	for_each_engine(engine, i915, id) {
+		int depth;
+
+		if (!intel_engine_has_preemption(engine))
+			continue;
+
+		if (!engine->emit_init_breadcrumb)
+			continue;
+
+		for (depth = 0; depth < ARRAY_SIZE(client); depth++) {
+			struct i915_request *rq[ARRAY_SIZE(client)];
+			struct i915_request *dummy;
+
+			engine->execlists.preempt_hang.count = 0;
+
+			dummy = dummy_request(engine);
+			if (!dummy)
+				goto err_client_3;
+
+			for (i = 0; i < ARRAY_SIZE(client); i++) {
+				rq[i] = igt_spinner_create_request(&client[i].spin,
+								   client[i].ctx, engine,
+								   MI_NOOP);
+				if (IS_ERR(rq[i])) {
+					err = PTR_ERR(rq[i]);
+					goto err_wedged;
+				}
+
+				/* Disable NEWCLIENT promotion */
+				__i915_active_request_set(&rq[i]->timeline->last_request,
+							  dummy);
+				i915_request_add(rq[i]);
+			}
+
+			dummy_request_free(dummy);
+
+			GEM_BUG_ON(i915_request_completed(rq[0]));
+			if (!igt_wait_for_spinner(&client[0].spin, rq[0])) {
+				pr_err("%s: First client failed to start\n",
+				       engine->name);
+				goto err_wedged;
+			}
+			GEM_BUG_ON(!i915_request_started(rq[0]));
+
+			if (i915_request_wait(rq[depth],
+					      I915_WAIT_LOCKED |
+					      I915_WAIT_PRIORITY,
+					      1) != -ETIME) {
+				pr_err("%s: Waiter depth:%d completed!\n",
+				       engine->name, depth);
+				goto err_wedged;
+			}
+
+			for (i = 0; i < ARRAY_SIZE(client); i++)
+				igt_spinner_end(&client[i].spin);
+
+			if (igt_flush_test(i915, I915_WAIT_LOCKED))
+				goto err_wedged;
+
+			if (engine->execlists.preempt_hang.count) {
+				pr_err("%s: Preemption recorded x%d, depth %d; should have been suppressed!\n",
+				       engine->name,
+				       engine->execlists.preempt_hang.count,
+				       depth);
+				err = -EINVAL;
+				goto err_client_3;
+			}
+		}
+	}
+
+	err = 0;
+err_client_3:
+	preempt_client_fini(&client[3]);
+err_client_2:
+	preempt_client_fini(&client[2]);
+err_client_1:
+	preempt_client_fini(&client[1]);
+err_client_0:
+	preempt_client_fini(&client[0]);
+err_unlock:
+	if (igt_flush_test(i915, I915_WAIT_LOCKED))
+		err = -EIO;
+	intel_runtime_pm_put(i915, wakeref);
+	mutex_unlock(&i915->drm.struct_mutex);
+	return err;
+
+err_wedged:
+	for (i = 0; i < ARRAY_SIZE(client); i++)
+		igt_spinner_end(&client[i].spin);
+	i915_gem_set_wedged(i915);
+	err = -EIO;
+	goto err_client_3;
+}
+
 static int live_chain_preempt(void *arg)
 {
 	struct drm_i915_private *i915 = arg;
@@ -440,6 +617,9 @@ static int live_chain_preempt(void *arg)
 		};
 		int count, i;
 
+		if (!intel_engine_has_preemption(engine))
+			continue;
+
 		for_each_prime_number_from(count, 1, 32) { /* must fit ring! */
 			struct i915_request *rq;
 
@@ -767,7 +947,7 @@ static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
 
 	pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n",
 		count, flags,
-		RUNTIME_INFO(smoke->i915)->num_rings, smoke->ncontext);
+		RUNTIME_INFO(smoke->i915)->num_engines, smoke->ncontext);
 	return 0;
 }
 
@@ -795,7 +975,7 @@ static int smoke_random(struct preempt_smoke *smoke, unsigned int flags)
 
 	pr_info("Submitted %lu random:%x requests across %d engines and %d contexts\n",
 		count, flags,
-		RUNTIME_INFO(smoke->i915)->num_rings, smoke->ncontext);
+		RUNTIME_INFO(smoke->i915)->num_engines, smoke->ncontext);
 	return 0;
 }
 
@@ -887,6 +1067,7 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915)
 		SUBTEST(live_preempt),
 		SUBTEST(live_late_preempt),
 		SUBTEST(live_suppress_self_preempt),
+		SUBTEST(live_suppress_wait_preempt),
 		SUBTEST(live_chain_preempt),
 		SUBTEST(live_preempt_hang),
 		SUBTEST(live_preempt_smoke),
@@ -895,7 +1076,7 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915)
 	if (!HAS_EXECLISTS(i915))
 		return 0;
 
-	if (i915_terminally_wedged(&i915->gpu_error))
+	if (i915_terminally_wedged(i915))
 		return 0;
 
 	return i915_subtests(tests, i915);
diff --git a/drivers/gpu/drm/i915/selftests/intel_workarounds.c b/drivers/gpu/drm/i915/selftests/intel_workarounds.c
index b15c4f26c5933c32a086d8b4441ccbac1846d424..f2a2b51a4662b6e412d052b6dbad30d0fae39e67 100644
--- a/drivers/gpu/drm/i915/selftests/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/selftests/intel_workarounds.c
@@ -12,6 +12,14 @@
 #include "igt_spinner.h"
 #include "igt_wedge_me.h"
 #include "mock_context.h"
+#include "mock_drm.h"
+
+static const struct wo_register {
+	enum intel_platform platform;
+	u32 reg;
+} wo_registers[] = {
+	{ INTEL_GEMINILAKE, 0x731c }
+};
 
 #define REF_NAME_MAX (INTEL_ENGINE_CS_MAX_NAME + 4)
 struct wa_lists {
@@ -74,7 +82,7 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
 	if (IS_ERR(result))
 		return result;
 
-	i915_gem_object_set_cache_level(result, I915_CACHE_LLC);
+	i915_gem_object_set_cache_coherency(result, I915_CACHE_LLC);
 
 	cs = i915_gem_object_pin_map(result, I915_MAP_WB);
 	if (IS_ERR(cs)) {
@@ -181,7 +189,7 @@ static int check_whitelist(struct i915_gem_context *ctx,
 	err = 0;
 	igt_wedge_on_timeout(&wedge, ctx->i915, HZ / 5) /* a safety net! */
 		err = i915_gem_object_set_to_cpu_domain(results, false);
-	if (i915_terminally_wedged(&ctx->i915->gpu_error))
+	if (i915_terminally_wedged(ctx->i915))
 		err = -EIO;
 	if (err)
 		goto out_put;
@@ -214,7 +222,7 @@ static int check_whitelist(struct i915_gem_context *ctx,
 
 static int do_device_reset(struct intel_engine_cs *engine)
 {
-	i915_reset(engine->i915, ENGINE_MASK(engine->id), "live_workarounds");
+	i915_reset(engine->i915, engine->mask, "live_workarounds");
 	return 0;
 }
 
@@ -236,15 +244,11 @@ switch_to_scratch_context(struct intel_engine_cs *engine,
 	if (IS_ERR(ctx))
 		return PTR_ERR(ctx);
 
+	GEM_BUG_ON(i915_gem_context_is_bannable(ctx));
+
 	rq = ERR_PTR(-ENODEV);
-	with_intel_runtime_pm(engine->i915, wakeref) {
-		if (spin)
-			rq = igt_spinner_create_request(spin,
-							ctx, engine,
-							MI_NOOP);
-		else
-			rq = i915_request_alloc(engine, ctx);
-	}
+	with_intel_runtime_pm(engine->i915, wakeref)
+		rq = igt_spinner_create_request(spin, ctx, engine, MI_NOOP);
 
 	kernel_context_close(ctx);
 
@@ -273,7 +277,6 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine,
 					const char *name)
 {
 	struct drm_i915_private *i915 = engine->i915;
-	bool want_spin = reset == do_engine_reset;
 	struct i915_gem_context *ctx;
 	struct igt_spinner spin;
 	intel_wakeref_t wakeref;
@@ -282,11 +285,9 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine,
 	pr_info("Checking %d whitelisted registers (RING_NONPRIV) [%s]\n",
 		engine->whitelist.count, name);
 
-	if (want_spin) {
-		err = igt_spinner_init(&spin, i915);
-		if (err)
-			return err;
-	}
+	err = igt_spinner_init(&spin, i915);
+	if (err)
+		return err;
 
 	ctx = kernel_context(i915);
 	if (IS_ERR(ctx))
@@ -298,17 +299,15 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine,
 		goto out;
 	}
 
-	err = switch_to_scratch_context(engine, want_spin ? &spin : NULL);
+	err = switch_to_scratch_context(engine, &spin);
 	if (err)
 		goto out;
 
 	with_intel_runtime_pm(i915, wakeref)
 		err = reset(engine);
 
-	if (want_spin) {
-		igt_spinner_end(&spin);
-		igt_spinner_fini(&spin);
-	}
+	igt_spinner_end(&spin);
+	igt_spinner_fini(&spin);
 
 	if (err) {
 		pr_err("%s reset failed\n", name);
@@ -340,10 +339,377 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine,
 	return err;
 }
 
+static struct i915_vma *create_scratch(struct i915_gem_context *ctx)
+{
+	struct drm_i915_gem_object *obj;
+	struct i915_vma *vma;
+	void *ptr;
+	int err;
+
+	obj = i915_gem_object_create_internal(ctx->i915, PAGE_SIZE);
+	if (IS_ERR(obj))
+		return ERR_CAST(obj);
+
+	i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
+
+	ptr = i915_gem_object_pin_map(obj, I915_MAP_WB);
+	if (IS_ERR(ptr)) {
+		err = PTR_ERR(ptr);
+		goto err_obj;
+	}
+	memset(ptr, 0xc5, PAGE_SIZE);
+	i915_gem_object_unpin_map(obj);
+
+	vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL);
+	if (IS_ERR(vma)) {
+		err = PTR_ERR(vma);
+		goto err_obj;
+	}
+
+	err = i915_vma_pin(vma, 0, 0, PIN_USER);
+	if (err)
+		goto err_obj;
+
+	err = i915_gem_object_set_to_cpu_domain(obj, false);
+	if (err)
+		goto err_obj;
+
+	return vma;
+
+err_obj:
+	i915_gem_object_put(obj);
+	return ERR_PTR(err);
+}
+
+static struct i915_vma *create_batch(struct i915_gem_context *ctx)
+{
+	struct drm_i915_gem_object *obj;
+	struct i915_vma *vma;
+	int err;
+
+	obj = i915_gem_object_create_internal(ctx->i915, 16 * PAGE_SIZE);
+	if (IS_ERR(obj))
+		return ERR_CAST(obj);
+
+	vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL);
+	if (IS_ERR(vma)) {
+		err = PTR_ERR(vma);
+		goto err_obj;
+	}
+
+	err = i915_vma_pin(vma, 0, 0, PIN_USER);
+	if (err)
+		goto err_obj;
+
+	err = i915_gem_object_set_to_wc_domain(obj, true);
+	if (err)
+		goto err_obj;
+
+	return vma;
+
+err_obj:
+	i915_gem_object_put(obj);
+	return ERR_PTR(err);
+}
+
+static u32 reg_write(u32 old, u32 new, u32 rsvd)
+{
+	if (rsvd == 0x0000ffff) {
+		old &= ~(new >> 16);
+		old |= new & (new >> 16);
+	} else {
+		old &= ~rsvd;
+		old |= new & rsvd;
+	}
+
+	return old;
+}
+
+static bool wo_register(struct intel_engine_cs *engine, u32 reg)
+{
+	enum intel_platform platform = INTEL_INFO(engine->i915)->platform;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(wo_registers); i++) {
+		if (wo_registers[i].platform == platform &&
+		    wo_registers[i].reg == reg)
+			return true;
+	}
+
+	return false;
+}
+
+static int check_dirty_whitelist(struct i915_gem_context *ctx,
+				 struct intel_engine_cs *engine)
+{
+	const u32 values[] = {
+		0x00000000,
+		0x01010101,
+		0x10100101,
+		0x03030303,
+		0x30300303,
+		0x05050505,
+		0x50500505,
+		0x0f0f0f0f,
+		0xf00ff00f,
+		0x10101010,
+		0xf0f01010,
+		0x30303030,
+		0xa0a03030,
+		0x50505050,
+		0xc0c05050,
+		0xf0f0f0f0,
+		0x11111111,
+		0x33333333,
+		0x55555555,
+		0x0000ffff,
+		0x00ff00ff,
+		0xff0000ff,
+		0xffff00ff,
+		0xffffffff,
+	};
+	struct i915_vma *scratch;
+	struct i915_vma *batch;
+	int err = 0, i, v;
+	u32 *cs, *results;
+
+	scratch = create_scratch(ctx);
+	if (IS_ERR(scratch))
+		return PTR_ERR(scratch);
+
+	batch = create_batch(ctx);
+	if (IS_ERR(batch)) {
+		err = PTR_ERR(batch);
+		goto out_scratch;
+	}
+
+	for (i = 0; i < engine->whitelist.count; i++) {
+		u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
+		u64 addr = scratch->node.start;
+		struct i915_request *rq;
+		u32 srm, lrm, rsvd;
+		u32 expect;
+		int idx;
+
+		if (wo_register(engine, reg))
+			continue;
+
+		srm = MI_STORE_REGISTER_MEM;
+		lrm = MI_LOAD_REGISTER_MEM;
+		if (INTEL_GEN(ctx->i915) >= 8)
+			lrm++, srm++;
+
+		pr_debug("%s: Writing garbage to %x\n",
+			 engine->name, reg);
+
+		cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
+		if (IS_ERR(cs)) {
+			err = PTR_ERR(cs);
+			goto out_batch;
+		}
+
+		/* SRM original */
+		*cs++ = srm;
+		*cs++ = reg;
+		*cs++ = lower_32_bits(addr);
+		*cs++ = upper_32_bits(addr);
+
+		idx = 1;
+		for (v = 0; v < ARRAY_SIZE(values); v++) {
+			/* LRI garbage */
+			*cs++ = MI_LOAD_REGISTER_IMM(1);
+			*cs++ = reg;
+			*cs++ = values[v];
+
+			/* SRM result */
+			*cs++ = srm;
+			*cs++ = reg;
+			*cs++ = lower_32_bits(addr + sizeof(u32) * idx);
+			*cs++ = upper_32_bits(addr + sizeof(u32) * idx);
+			idx++;
+		}
+		for (v = 0; v < ARRAY_SIZE(values); v++) {
+			/* LRI garbage */
+			*cs++ = MI_LOAD_REGISTER_IMM(1);
+			*cs++ = reg;
+			*cs++ = ~values[v];
+
+			/* SRM result */
+			*cs++ = srm;
+			*cs++ = reg;
+			*cs++ = lower_32_bits(addr + sizeof(u32) * idx);
+			*cs++ = upper_32_bits(addr + sizeof(u32) * idx);
+			idx++;
+		}
+		GEM_BUG_ON(idx * sizeof(u32) > scratch->size);
+
+		/* LRM original -- don't leave garbage in the context! */
+		*cs++ = lrm;
+		*cs++ = reg;
+		*cs++ = lower_32_bits(addr);
+		*cs++ = upper_32_bits(addr);
+
+		*cs++ = MI_BATCH_BUFFER_END;
+
+		i915_gem_object_unpin_map(batch->obj);
+		i915_gem_chipset_flush(ctx->i915);
+
+		rq = i915_request_alloc(engine, ctx);
+		if (IS_ERR(rq)) {
+			err = PTR_ERR(rq);
+			goto out_batch;
+		}
+
+		if (engine->emit_init_breadcrumb) { /* Be nice if we hang */
+			err = engine->emit_init_breadcrumb(rq);
+			if (err)
+				goto err_request;
+		}
+
+		err = engine->emit_bb_start(rq,
+					    batch->node.start, PAGE_SIZE,
+					    0);
+		if (err)
+			goto err_request;
+
+err_request:
+		i915_request_add(rq);
+		if (err)
+			goto out_batch;
+
+		if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0) {
+			pr_err("%s: Futzing %x timedout; cancelling test\n",
+			       engine->name, reg);
+			i915_gem_set_wedged(ctx->i915);
+			err = -EIO;
+			goto out_batch;
+		}
+
+		results = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB);
+		if (IS_ERR(results)) {
+			err = PTR_ERR(results);
+			goto out_batch;
+		}
+
+		GEM_BUG_ON(values[ARRAY_SIZE(values) - 1] != 0xffffffff);
+		rsvd = results[ARRAY_SIZE(values)]; /* detect write masking */
+		if (!rsvd) {
+			pr_err("%s: Unable to write to whitelisted register %x\n",
+			       engine->name, reg);
+			err = -EINVAL;
+			goto out_unpin;
+		}
+
+		expect = results[0];
+		idx = 1;
+		for (v = 0; v < ARRAY_SIZE(values); v++) {
+			expect = reg_write(expect, values[v], rsvd);
+			if (results[idx] != expect)
+				err++;
+			idx++;
+		}
+		for (v = 0; v < ARRAY_SIZE(values); v++) {
+			expect = reg_write(expect, ~values[v], rsvd);
+			if (results[idx] != expect)
+				err++;
+			idx++;
+		}
+		if (err) {
+			pr_err("%s: %d mismatch between values written to whitelisted register [%x], and values read back!\n",
+			       engine->name, err, reg);
+
+			pr_info("%s: Whitelisted register: %x, original value %08x, rsvd %08x\n",
+				engine->name, reg, results[0], rsvd);
+
+			expect = results[0];
+			idx = 1;
+			for (v = 0; v < ARRAY_SIZE(values); v++) {
+				u32 w = values[v];
+
+				expect = reg_write(expect, w, rsvd);
+				pr_info("Wrote %08x, read %08x, expect %08x\n",
+					w, results[idx], expect);
+				idx++;
+			}
+			for (v = 0; v < ARRAY_SIZE(values); v++) {
+				u32 w = ~values[v];
+
+				expect = reg_write(expect, w, rsvd);
+				pr_info("Wrote %08x, read %08x, expect %08x\n",
+					w, results[idx], expect);
+				idx++;
+			}
+
+			err = -EINVAL;
+		}
+out_unpin:
+		i915_gem_object_unpin_map(scratch->obj);
+		if (err)
+			break;
+	}
+
+	if (igt_flush_test(ctx->i915, I915_WAIT_LOCKED))
+		err = -EIO;
+out_batch:
+	i915_vma_unpin_and_release(&batch, 0);
+out_scratch:
+	i915_vma_unpin_and_release(&scratch, 0);
+	return err;
+}
+
+static int live_dirty_whitelist(void *arg)
+{
+	struct drm_i915_private *i915 = arg;
+	struct intel_engine_cs *engine;
+	struct i915_gem_context *ctx;
+	enum intel_engine_id id;
+	intel_wakeref_t wakeref;
+	struct drm_file *file;
+	int err = 0;
+
+	/* Can the user write to the whitelisted registers? */
+
+	if (INTEL_GEN(i915) < 7) /* minimum requirement for LRI, SRM, LRM */
+		return 0;
+
+	wakeref = intel_runtime_pm_get(i915);
+
+	mutex_unlock(&i915->drm.struct_mutex);
+	file = mock_file(i915);
+	mutex_lock(&i915->drm.struct_mutex);
+	if (IS_ERR(file)) {
+		err = PTR_ERR(file);
+		goto out_rpm;
+	}
+
+	ctx = live_context(i915, file);
+	if (IS_ERR(ctx)) {
+		err = PTR_ERR(ctx);
+		goto out_file;
+	}
+
+	for_each_engine(engine, i915, id) {
+		if (engine->whitelist.count == 0)
+			continue;
+
+		err = check_dirty_whitelist(ctx, engine);
+		if (err)
+			goto out_file;
+	}
+
+out_file:
+	mutex_unlock(&i915->drm.struct_mutex);
+	mock_file_free(i915, file);
+	mutex_lock(&i915->drm.struct_mutex);
+out_rpm:
+	intel_runtime_pm_put(i915, wakeref);
+	return err;
+}
+
 static int live_reset_whitelist(void *arg)
 {
 	struct drm_i915_private *i915 = arg;
-	struct intel_engine_cs *engine = i915->engine[RCS];
+	struct intel_engine_cs *engine = i915->engine[RCS0];
 	int err = 0;
 
 	/* If we reset the gpu, we should not lose the RING_NONPRIV */
@@ -513,13 +879,14 @@ live_engine_reset_gt_engine_workarounds(void *arg)
 int intel_workarounds_live_selftests(struct drm_i915_private *i915)
 {
 	static const struct i915_subtest tests[] = {
+		SUBTEST(live_dirty_whitelist),
 		SUBTEST(live_reset_whitelist),
 		SUBTEST(live_gpu_reset_gt_engine_workarounds),
 		SUBTEST(live_engine_reset_gt_engine_workarounds),
 	};
 	int err;
 
-	if (i915_terminally_wedged(&i915->gpu_error))
+	if (i915_terminally_wedged(i915))
 		return 0;
 
 	mutex_lock(&i915->drm.struct_mutex);
diff --git a/drivers/gpu/drm/i915/selftests/mock_context.c b/drivers/gpu/drm/i915/selftests/mock_context.c
index b646cdcdd6029a95bace9ed5f537abcf28464abb..8efa6892c6cd96481d31effb5296e4e41d320962 100644
--- a/drivers/gpu/drm/i915/selftests/mock_context.c
+++ b/drivers/gpu/drm/i915/selftests/mock_context.c
@@ -30,7 +30,6 @@ mock_context(struct drm_i915_private *i915,
 	     const char *name)
 {
 	struct i915_gem_context *ctx;
-	unsigned int n;
 	int ret;
 
 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
@@ -41,12 +40,14 @@ mock_context(struct drm_i915_private *i915,
 	INIT_LIST_HEAD(&ctx->link);
 	ctx->i915 = i915;
 
+	ctx->hw_contexts = RB_ROOT;
+	spin_lock_init(&ctx->hw_contexts_lock);
+
 	INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
 	INIT_LIST_HEAD(&ctx->handles_list);
 	INIT_LIST_HEAD(&ctx->hw_id_link);
-
-	for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++)
-		intel_context_init(&ctx->__engine[n], ctx, i915->engine[n]);
+	INIT_LIST_HEAD(&ctx->active_engines);
+	mutex_init(&ctx->mutex);
 
 	ret = i915_gem_context_pin_hw_id(ctx);
 	if (ret < 0)
diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.c b/drivers/gpu/drm/i915/selftests/mock_engine.c
index 08f0cab02e0f331d9ecfbb03e25cc5f7b241852d..639d36eb904a802cdcb7b6cbe42b084bf84233e9 100644
--- a/drivers/gpu/drm/i915/selftests/mock_engine.c
+++ b/drivers/gpu/drm/i915/selftests/mock_engine.c
@@ -57,6 +57,7 @@ static struct intel_ring *mock_ring(struct intel_engine_cs *engine)
 		return NULL;
 	}
 
+	kref_init(&ring->base.ref);
 	ring->base.size = sz;
 	ring->base.effective_size = sz;
 	ring->base.vaddr = (void *)(ring + 1);
@@ -76,28 +77,26 @@ static void mock_ring_free(struct intel_ring *base)
 	kfree(ring);
 }
 
-static struct mock_request *first_request(struct mock_engine *engine)
+static struct i915_request *first_request(struct mock_engine *engine)
 {
 	return list_first_entry_or_null(&engine->hw_queue,
-					struct mock_request,
-					link);
+					struct i915_request,
+					mock.link);
 }
 
-static void advance(struct mock_request *request)
+static void advance(struct i915_request *request)
 {
-	list_del_init(&request->link);
-	intel_engine_write_global_seqno(request->base.engine,
-					request->base.global_seqno);
-	i915_request_mark_complete(&request->base);
-	GEM_BUG_ON(!i915_request_completed(&request->base));
+	list_del_init(&request->mock.link);
+	i915_request_mark_complete(request);
+	GEM_BUG_ON(!i915_request_completed(request));
 
-	intel_engine_queue_breadcrumbs(request->base.engine);
+	intel_engine_queue_breadcrumbs(request->engine);
 }
 
 static void hw_delay_complete(struct timer_list *t)
 {
 	struct mock_engine *engine = from_timer(engine, t, hw_delay);
-	struct mock_request *request;
+	struct i915_request *request;
 	unsigned long flags;
 
 	spin_lock_irqsave(&engine->hw_lock, flags);
@@ -112,8 +111,9 @@ static void hw_delay_complete(struct timer_list *t)
 	 * requeue the timer for the next delayed request.
 	 */
 	while ((request = first_request(engine))) {
-		if (request->delay) {
-			mod_timer(&engine->hw_delay, jiffies + request->delay);
+		if (request->mock.delay) {
+			mod_timer(&engine->hw_delay,
+				  jiffies + request->mock.delay);
 			break;
 		}
 
@@ -126,55 +126,43 @@ static void hw_delay_complete(struct timer_list *t)
 static void mock_context_unpin(struct intel_context *ce)
 {
 	mock_timeline_unpin(ce->ring->timeline);
-	i915_gem_context_put(ce->gem_context);
 }
 
-static void mock_context_destroy(struct intel_context *ce)
+static void mock_context_destroy(struct kref *ref)
 {
-	GEM_BUG_ON(ce->pin_count);
+	struct intel_context *ce = container_of(ref, typeof(*ce), ref);
+
+	GEM_BUG_ON(intel_context_is_pinned(ce));
 
 	if (ce->ring)
 		mock_ring_free(ce->ring);
-}
 
-static const struct intel_context_ops mock_context_ops = {
-	.unpin = mock_context_unpin,
-	.destroy = mock_context_destroy,
-};
+	intel_context_free(ce);
+}
 
-static struct intel_context *
-mock_context_pin(struct intel_engine_cs *engine,
-		 struct i915_gem_context *ctx)
+static int mock_context_pin(struct intel_context *ce)
 {
-	struct intel_context *ce = to_intel_context(ctx, engine);
-	int err = -ENOMEM;
-
-	if (ce->pin_count++)
-		return ce;
-
 	if (!ce->ring) {
-		ce->ring = mock_ring(engine);
+		ce->ring = mock_ring(ce->engine);
 		if (!ce->ring)
-			goto err;
+			return -ENOMEM;
 	}
 
 	mock_timeline_pin(ce->ring->timeline);
+	return 0;
+}
 
-	ce->ops = &mock_context_ops;
-	i915_gem_context_get(ctx);
-	return ce;
+static const struct intel_context_ops mock_context_ops = {
+	.pin = mock_context_pin,
+	.unpin = mock_context_unpin,
 
-err:
-	ce->pin_count = 0;
-	return ERR_PTR(err);
-}
+	.destroy = mock_context_destroy,
+};
 
 static int mock_request_alloc(struct i915_request *request)
 {
-	struct mock_request *mock = container_of(request, typeof(*mock), base);
-
-	INIT_LIST_HEAD(&mock->link);
-	mock->delay = 0;
+	INIT_LIST_HEAD(&request->mock.link);
+	request->mock.delay = 0;
 
 	return 0;
 }
@@ -192,21 +180,20 @@ static u32 *mock_emit_breadcrumb(struct i915_request *request, u32 *cs)
 
 static void mock_submit_request(struct i915_request *request)
 {
-	struct mock_request *mock = container_of(request, typeof(*mock), base);
 	struct mock_engine *engine =
 		container_of(request->engine, typeof(*engine), base);
 	unsigned long flags;
 
 	i915_request_submit(request);
-	GEM_BUG_ON(!request->global_seqno);
 
 	spin_lock_irqsave(&engine->hw_lock, flags);
-	list_add_tail(&mock->link, &engine->hw_queue);
-	if (mock->link.prev == &engine->hw_queue) {
-		if (mock->delay)
-			mod_timer(&engine->hw_delay, jiffies + mock->delay);
+	list_add_tail(&request->mock.link, &engine->hw_queue);
+	if (list_is_first(&request->mock.link, &engine->hw_queue)) {
+		if (request->mock.delay)
+			mod_timer(&engine->hw_delay,
+				  jiffies + request->mock.delay);
 		else
-			advance(mock);
+			advance(request);
 	}
 	spin_unlock_irqrestore(&engine->hw_lock, flags);
 }
@@ -227,9 +214,10 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
 	engine->base.i915 = i915;
 	snprintf(engine->base.name, sizeof(engine->base.name), "%s", name);
 	engine->base.id = id;
+	engine->base.mask = BIT(id);
 	engine->base.status_page.addr = (void *)(engine + 1);
 
-	engine->base.context_pin = mock_context_pin;
+	engine->base.cops = &mock_context_ops;
 	engine->base.request_alloc = mock_request_alloc;
 	engine->base.emit_flush = mock_emit_flush;
 	engine->base.emit_fini_breadcrumb = mock_emit_breadcrumb;
@@ -249,7 +237,8 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
 	timer_setup(&engine->hw_delay, hw_delay_complete, 0);
 	INIT_LIST_HEAD(&engine->hw_queue);
 
-	if (IS_ERR(intel_context_pin(i915->kernel_context, &engine->base)))
+	if (pin_context(i915->kernel_context, &engine->base,
+			&engine->base.kernel_context))
 		goto err_breadcrumbs;
 
 	return &engine->base;
@@ -266,19 +255,18 @@ void mock_engine_flush(struct intel_engine_cs *engine)
 {
 	struct mock_engine *mock =
 		container_of(engine, typeof(*mock), base);
-	struct mock_request *request, *rn;
+	struct i915_request *request, *rn;
 
 	del_timer_sync(&mock->hw_delay);
 
 	spin_lock_irq(&mock->hw_lock);
-	list_for_each_entry_safe(request, rn, &mock->hw_queue, link)
+	list_for_each_entry_safe(request, rn, &mock->hw_queue, mock.link)
 		advance(request);
 	spin_unlock_irq(&mock->hw_lock);
 }
 
 void mock_engine_reset(struct intel_engine_cs *engine)
 {
-	intel_engine_write_global_seqno(engine, 0);
 }
 
 void mock_engine_free(struct intel_engine_cs *engine)
@@ -293,7 +281,7 @@ void mock_engine_free(struct intel_engine_cs *engine)
 	if (ce)
 		intel_context_unpin(ce);
 
-	__intel_context_unpin(engine->i915->kernel_context, engine);
+	intel_context_unpin(engine->kernel_context);
 
 	intel_engine_fini_breadcrumbs(engine);
 	i915_timeline_fini(&engine->timeline);
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 14ae46fda49f1c816fa2d5bb3e88a847eddcd57d..54cfb611c0aa3a772d4c53f2683d9f9f547d3bfe 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -79,12 +79,6 @@ static void mock_device_release(struct drm_device *dev)
 
 	destroy_workqueue(i915->wq);
 
-	kmem_cache_destroy(i915->priorities);
-	kmem_cache_destroy(i915->dependencies);
-	kmem_cache_destroy(i915->requests);
-	kmem_cache_destroy(i915->vmas);
-	kmem_cache_destroy(i915->objects);
-
 	i915_gemfs_fini(i915);
 
 	drm_mode_config_cleanup(&i915->drm);
@@ -115,6 +109,10 @@ static void mock_retire_work_handler(struct work_struct *work)
 
 static void mock_idle_work_handler(struct work_struct *work)
 {
+	struct drm_i915_private *i915 =
+		container_of(work, typeof(*i915), gt.idle_work.work);
+
+	i915->gt.active_engines = 0;
 }
 
 static int pm_domain_resume(struct device *dev)
@@ -189,6 +187,7 @@ struct drm_i915_private *mock_gem_device(void)
 
 	init_waitqueue_head(&i915->gpu_error.wait_queue);
 	init_waitqueue_head(&i915->gpu_error.reset_queue);
+	init_srcu_struct(&i915->gpu_error.reset_backoff_srcu);
 	mutex_init(&i915->gpu_error.wedge_mutex);
 
 	i915->wq = alloc_ordered_workqueue("mock", 0);
@@ -202,31 +201,6 @@ struct drm_i915_private *mock_gem_device(void)
 
 	i915->gt.awake = true;
 
-	i915->objects = KMEM_CACHE(mock_object, SLAB_HWCACHE_ALIGN);
-	if (!i915->objects)
-		goto err_wq;
-
-	i915->vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
-	if (!i915->vmas)
-		goto err_objects;
-
-	i915->requests = KMEM_CACHE(mock_request,
-				    SLAB_HWCACHE_ALIGN |
-				    SLAB_RECLAIM_ACCOUNT |
-				    SLAB_TYPESAFE_BY_RCU);
-	if (!i915->requests)
-		goto err_vmas;
-
-	i915->dependencies = KMEM_CACHE(i915_dependency,
-					SLAB_HWCACHE_ALIGN |
-					SLAB_RECLAIM_ACCOUNT);
-	if (!i915->dependencies)
-		goto err_requests;
-
-	i915->priorities = KMEM_CACHE(i915_priolist, SLAB_HWCACHE_ALIGN);
-	if (!i915->priorities)
-		goto err_dependencies;
-
 	i915_timelines_init(i915);
 
 	INIT_LIST_HEAD(&i915->gt.active_rings);
@@ -236,13 +210,13 @@ struct drm_i915_private *mock_gem_device(void)
 
 	mock_init_ggtt(i915, &i915->ggtt);
 
-	mkwrite_device_info(i915)->ring_mask = BIT(0);
+	mkwrite_device_info(i915)->engine_mask = BIT(0);
 	i915->kernel_context = mock_context(i915, NULL);
 	if (!i915->kernel_context)
 		goto err_unlock;
 
-	i915->engine[RCS] = mock_engine(i915, "mock", RCS);
-	if (!i915->engine[RCS])
+	i915->engine[RCS0] = mock_engine(i915, "mock", RCS0);
+	if (!i915->engine[RCS0])
 		goto err_context;
 
 	mutex_unlock(&i915->drm.struct_mutex);
@@ -256,16 +230,6 @@ struct drm_i915_private *mock_gem_device(void)
 err_unlock:
 	mutex_unlock(&i915->drm.struct_mutex);
 	i915_timelines_fini(i915);
-	kmem_cache_destroy(i915->priorities);
-err_dependencies:
-	kmem_cache_destroy(i915->dependencies);
-err_requests:
-	kmem_cache_destroy(i915->requests);
-err_vmas:
-	kmem_cache_destroy(i915->vmas);
-err_objects:
-	kmem_cache_destroy(i915->objects);
-err_wq:
 	destroy_workqueue(i915->wq);
 err_drv:
 	drm_mode_config_cleanup(&i915->drm);
diff --git a/drivers/gpu/drm/i915/selftests/mock_request.c b/drivers/gpu/drm/i915/selftests/mock_request.c
index 0dc29e24259725f8f64983600fd395481ace8387..d1a7c9608712894dc4eee47d6d14b3e2fe16d711 100644
--- a/drivers/gpu/drm/i915/selftests/mock_request.c
+++ b/drivers/gpu/drm/i915/selftests/mock_request.c
@@ -31,29 +31,25 @@ mock_request(struct intel_engine_cs *engine,
 	     unsigned long delay)
 {
 	struct i915_request *request;
-	struct mock_request *mock;
 
 	/* NB the i915->requests slab cache is enlarged to fit mock_request */
 	request = i915_request_alloc(engine, context);
 	if (IS_ERR(request))
 		return NULL;
 
-	mock = container_of(request, typeof(*mock), base);
-	mock->delay = delay;
-
-	return &mock->base;
+	request->mock.delay = delay;
+	return request;
 }
 
 bool mock_cancel_request(struct i915_request *request)
 {
-	struct mock_request *mock = container_of(request, typeof(*mock), base);
 	struct mock_engine *engine =
 		container_of(request->engine, typeof(*engine), base);
 	bool was_queued;
 
 	spin_lock_irq(&engine->hw_lock);
-	was_queued = !list_empty(&mock->link);
-	list_del_init(&mock->link);
+	was_queued = !list_empty(&request->mock.link);
+	list_del_init(&request->mock.link);
 	spin_unlock_irq(&engine->hw_lock);
 
 	if (was_queued)
diff --git a/drivers/gpu/drm/i915/selftests/mock_request.h b/drivers/gpu/drm/i915/selftests/mock_request.h
index 995fb728380c83ef973e8087e830887d426857e6..4acf0211df200fa34e58c85034bac0e6f8b7ae2b 100644
--- a/drivers/gpu/drm/i915/selftests/mock_request.h
+++ b/drivers/gpu/drm/i915/selftests/mock_request.h
@@ -29,13 +29,6 @@
 
 #include "../i915_request.h"
 
-struct mock_request {
-	struct i915_request base;
-
-	struct list_head link;
-	unsigned long delay;
-};
-
 struct i915_request *
 mock_request(struct intel_engine_cs *engine,
 	     struct i915_gem_context *context,
diff --git a/drivers/gpu/drm/i915/selftests/mock_timeline.c b/drivers/gpu/drm/i915/selftests/mock_timeline.c
index d2de9ece211820e320c067832f346ad3c2cab180..416d85233263dbecf04229b3e6cf51558a9aed41 100644
--- a/drivers/gpu/drm/i915/selftests/mock_timeline.c
+++ b/drivers/gpu/drm/i915/selftests/mock_timeline.c
@@ -14,6 +14,7 @@ void mock_timeline_init(struct i915_timeline *timeline, u64 context)
 	timeline->fence_context = context;
 
 	spin_lock_init(&timeline->lock);
+	mutex_init(&timeline->mutex);
 
 	INIT_ACTIVE_REQUEST(&timeline->barrier);
 	INIT_ACTIVE_REQUEST(&timeline->last_request);
diff --git a/drivers/gpu/drm/i915/test_i915_active_types_standalone.c b/drivers/gpu/drm/i915/test_i915_active_types_standalone.c
new file mode 100644
index 0000000000000000000000000000000000000000..144ebd153e570b72def50b3370d0d14ee8e4a3ff
--- /dev/null
+++ b/drivers/gpu/drm/i915/test_i915_active_types_standalone.c
@@ -0,0 +1,7 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "i915_active_types.h"
diff --git a/drivers/gpu/drm/i915/test_i915_gem_context_types_standalone.c b/drivers/gpu/drm/i915/test_i915_gem_context_types_standalone.c
new file mode 100644
index 0000000000000000000000000000000000000000..4e4da4860bc271ba0537ef5c75d87b25b39a1975
--- /dev/null
+++ b/drivers/gpu/drm/i915/test_i915_gem_context_types_standalone.c
@@ -0,0 +1,7 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "i915_gem_context_types.h"
diff --git a/drivers/gpu/drm/i915/test_i915_timeline_types_standalone.c b/drivers/gpu/drm/i915/test_i915_timeline_types_standalone.c
new file mode 100644
index 0000000000000000000000000000000000000000..f58e148e89461d67670a379dae37eadaef44e8e1
--- /dev/null
+++ b/drivers/gpu/drm/i915/test_i915_timeline_types_standalone.c
@@ -0,0 +1,7 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "i915_timeline_types.h"
diff --git a/drivers/gpu/drm/i915/test_intel_context_types_standalone.c b/drivers/gpu/drm/i915/test_intel_context_types_standalone.c
new file mode 100644
index 0000000000000000000000000000000000000000..b39e3c4e6551841994e5726aeaa353dcaf5d57a0
--- /dev/null
+++ b/drivers/gpu/drm/i915/test_intel_context_types_standalone.c
@@ -0,0 +1,7 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "intel_context_types.h"
diff --git a/drivers/gpu/drm/i915/test_intel_engine_types_standalone.c b/drivers/gpu/drm/i915/test_intel_engine_types_standalone.c
new file mode 100644
index 0000000000000000000000000000000000000000..d05e4cdcbcf91b023c032ac740a8560b0953a8a9
--- /dev/null
+++ b/drivers/gpu/drm/i915/test_intel_engine_types_standalone.c
@@ -0,0 +1,7 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "intel_engine_types.h"
diff --git a/drivers/gpu/drm/i915/test_intel_workarounds_types_standalone.c b/drivers/gpu/drm/i915/test_intel_workarounds_types_standalone.c
new file mode 100644
index 0000000000000000000000000000000000000000..4f658bb0082500571d8a0944213bda5759574a11
--- /dev/null
+++ b/drivers/gpu/drm/i915/test_intel_workarounds_types_standalone.c
@@ -0,0 +1,7 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "intel_workarounds_types.h"
diff --git a/include/drm/drm_audio_component.h b/include/drm/drm_audio_component.h
index 93a386be38fa1155b95fab99123ad8f1c4dc1d7b..a45f93487039520c92b4daebabf410ecbd792cde 100644
--- a/include/drm/drm_audio_component.h
+++ b/include/drm/drm_audio_component.h
@@ -19,14 +19,17 @@ struct drm_audio_component_ops {
 	 * @get_power: get the POWER_DOMAIN_AUDIO power well
 	 *
 	 * Request the power well to be turned on.
+	 *
+	 * Returns a wakeref cookie to be passed back to the corresponding
+	 * call to @put_power.
 	 */
-	void (*get_power)(struct device *);
+	unsigned long (*get_power)(struct device *);
 	/**
 	 * @put_power: put the POWER_DOMAIN_AUDIO power well
 	 *
 	 * Allow the power well to be turned off.
 	 */
-	void (*put_power)(struct device *);
+	void (*put_power)(struct device *, unsigned long);
 	/**
 	 * @codec_wake_override: Enable/disable codec wake signal
 	 */
diff --git a/include/drm/drm_hdcp.h b/include/drm/drm_hdcp.h
index 7260b31af276baac935f7c7ee0271bd27c433555..f243408ecf2671524598797bd2d9b55f437a17a6 100644
--- a/include/drm/drm_hdcp.h
+++ b/include/drm/drm_hdcp.h
@@ -13,6 +13,7 @@
 
 /* Period of hdcp checks (to ensure we're still authenticated) */
 #define DRM_HDCP_CHECK_PERIOD_MS		(128 * 16)
+#define DRM_HDCP2_CHECK_PERIOD_MS		500
 
 /* Shared lengths/masks between HDMI/DVI/DisplayPort */
 #define DRM_HDCP_AN_LEN				8
@@ -68,7 +69,6 @@
 #define HDCP_2_2_REP_SEND_ACK			15
 #define HDCP_2_2_REP_STREAM_MANAGE		16
 #define HDCP_2_2_REP_STREAM_READY		17
-#define HDCP_2_2_ERRATA_DP_STREAM_TYPE		50
 
 #define HDCP_2_2_RTX_LEN			8
 #define HDCP_2_2_RRX_LEN			8
@@ -219,11 +219,6 @@ struct hdcp2_rep_stream_ready {
 	u8	m_prime[HDCP_2_2_MPRIME_LEN];
 } __packed;
 
-struct hdcp2_dp_errata_stream_type {
-	u8	msg_id;
-	u8	stream_type;
-} __packed;
-
 /* HDCP2.2 TIMEOUTs in mSec */
 #define HDCP_2_2_CERT_TIMEOUT_MS		100
 #define HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS	1000
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
index d2fad7b0fcf65a7334cf074c40ab2ce738301dbb..291b5e3fa59ce1189a2502417a2723d92579f4ce 100644
--- a/include/drm/i915_pciids.h
+++ b/include/drm/i915_pciids.h
@@ -373,6 +373,30 @@
 #define INTEL_AML_CFL_GT2_IDS(info) \
 	INTEL_VGA_DEVICE(0x87CA, info)
 
+/* CML GT1 */
+#define INTEL_CML_GT1_IDS(info)	\
+	INTEL_VGA_DEVICE(0x9B21, info), \
+	INTEL_VGA_DEVICE(0x9BAA, info), \
+	INTEL_VGA_DEVICE(0x9BAB, info), \
+	INTEL_VGA_DEVICE(0x9BAC, info), \
+	INTEL_VGA_DEVICE(0x9BA0, info), \
+	INTEL_VGA_DEVICE(0x9BA5, info), \
+	INTEL_VGA_DEVICE(0x9BA8, info), \
+	INTEL_VGA_DEVICE(0x9BA4, info), \
+	INTEL_VGA_DEVICE(0x9BA2, info)
+
+/* CML GT2 */
+#define INTEL_CML_GT2_IDS(info)	\
+	INTEL_VGA_DEVICE(0x9B41, info), \
+	INTEL_VGA_DEVICE(0x9BCA, info), \
+	INTEL_VGA_DEVICE(0x9BCB, info), \
+	INTEL_VGA_DEVICE(0x9BCC, info), \
+	INTEL_VGA_DEVICE(0x9BC0, info), \
+	INTEL_VGA_DEVICE(0x9BC5, info), \
+	INTEL_VGA_DEVICE(0x9BC8, info), \
+	INTEL_VGA_DEVICE(0x9BC4, info), \
+	INTEL_VGA_DEVICE(0x9BC2, info)
+
 #define INTEL_KBL_IDS(info) \
 	INTEL_KBL_GT1_IDS(info), \
 	INTEL_KBL_GT2_IDS(info), \
@@ -436,7 +460,9 @@
 	INTEL_WHL_U_GT1_IDS(info), \
 	INTEL_WHL_U_GT2_IDS(info), \
 	INTEL_WHL_U_GT3_IDS(info), \
-	INTEL_AML_CFL_GT2_IDS(info)
+	INTEL_AML_CFL_GT2_IDS(info), \
+	INTEL_CML_GT1_IDS(info), \
+	INTEL_CML_GT2_IDS(info)
 
 /* CNL */
 #define INTEL_CNL_IDS(info) \
@@ -469,6 +495,7 @@
 	INTEL_VGA_DEVICE(0x8A57, info), \
 	INTEL_VGA_DEVICE(0x8A56, info), \
 	INTEL_VGA_DEVICE(0x8A71, info), \
-	INTEL_VGA_DEVICE(0x8A70, info)
+	INTEL_VGA_DEVICE(0x8A70, info), \
+	INTEL_VGA_DEVICE(0x8A53, info)
 
 #endif /* _I915_PCIIDS_H */
diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h
index 45f944d57982800cc176ed782aedbf9b1d30f1fe..06f504c10b80ec84136a159d7156876006c44985 100644
--- a/include/sound/hdaudio.h
+++ b/include/sound/hdaudio.h
@@ -367,7 +367,7 @@ struct hdac_bus {
 	/* DRM component interface */
 	struct drm_audio_component *audio_component;
 	long display_power_status;
-	bool display_power_active;
+	unsigned long display_power_active;
 
 	/* parameters required for enhanced capabilities */
 	int num_streams;
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 397810fa2d33c95f69770bdf3563ea44213b40c6..aa2d4c73a97dd4f0b3881b1376a05dd23c5c8b22 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -99,6 +99,8 @@ enum drm_i915_gem_engine_class {
 	I915_ENGINE_CLASS_VIDEO		= 2,
 	I915_ENGINE_CLASS_VIDEO_ENHANCE	= 3,
 
+	/* should be kept compact */
+
 	I915_ENGINE_CLASS_INVALID	= -1
 };
 
@@ -319,6 +321,7 @@ typedef struct _drm_i915_sarea {
 #define DRM_I915_PERF_ADD_CONFIG	0x37
 #define DRM_I915_PERF_REMOVE_CONFIG	0x38
 #define DRM_I915_QUERY			0x39
+/* Must be kept compact -- no holes */
 
 #define DRM_IOCTL_I915_INIT		DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
 #define DRM_IOCTL_I915_FLUSH		DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
@@ -476,6 +479,7 @@ typedef struct drm_i915_irq_wait {
 #define   I915_SCHEDULER_CAP_ENABLED	(1ul << 0)
 #define   I915_SCHEDULER_CAP_PRIORITY	(1ul << 1)
 #define   I915_SCHEDULER_CAP_PREEMPTION	(1ul << 2)
+#define   I915_SCHEDULER_CAP_SEMAPHORES	(1ul << 3)
 
 #define I915_PARAM_HUC_STATUS		 42
 
@@ -559,6 +563,8 @@ typedef struct drm_i915_irq_wait {
  */
 #define I915_PARAM_MMAP_GTT_COHERENT	52
 
+/* Must be kept compact -- no holes and well documented */
+
 typedef struct drm_i915_getparam {
 	__s32 param;
 	/*
@@ -574,6 +580,7 @@ typedef struct drm_i915_getparam {
 #define I915_SETPARAM_TEX_LRU_LOG_GRANULARITY             2
 #define I915_SETPARAM_ALLOW_BATCHBUFFER                   3
 #define I915_SETPARAM_NUM_USED_FENCES                     4
+/* Must be kept compact -- no holes */
 
 typedef struct drm_i915_setparam {
 	int param;
@@ -972,7 +979,7 @@ struct drm_i915_gem_execbuffer2 {
 	 * struct drm_i915_gem_exec_fence *fences.
 	 */
 	__u64 cliprects_ptr;
-#define I915_EXEC_RING_MASK              (7<<0)
+#define I915_EXEC_RING_MASK              (0x3f)
 #define I915_EXEC_DEFAULT                (0<<0)
 #define I915_EXEC_RENDER                 (1<<0)
 #define I915_EXEC_BSD                    (2<<0)
@@ -1120,32 +1127,34 @@ struct drm_i915_gem_busy {
 	 * as busy may become idle before the ioctl is completed.
 	 *
 	 * Furthermore, if the object is busy, which engine is busy is only
-	 * provided as a guide. There are race conditions which prevent the
-	 * report of which engines are busy from being always accurate.
-	 * However, the converse is not true. If the object is idle, the
-	 * result of the ioctl, that all engines are idle, is accurate.
+	 * provided as a guide and only indirectly by reporting its class
+	 * (there may be more than one engine in each class). There are race
+	 * conditions which prevent the report of which engines are busy from
+	 * being always accurate.  However, the converse is not true. If the
+	 * object is idle, the result of the ioctl, that all engines are idle,
+	 * is accurate.
 	 *
 	 * The returned dword is split into two fields to indicate both
-	 * the engines on which the object is being read, and the
-	 * engine on which it is currently being written (if any).
+	 * the engine classess on which the object is being read, and the
+	 * engine class on which it is currently being written (if any).
 	 *
 	 * The low word (bits 0:15) indicate if the object is being written
 	 * to by any engine (there can only be one, as the GEM implicit
 	 * synchronisation rules force writes to be serialised). Only the
-	 * engine for the last write is reported.
+	 * engine class (offset by 1, I915_ENGINE_CLASS_RENDER is reported as
+	 * 1 not 0 etc) for the last write is reported.
 	 *
-	 * The high word (bits 16:31) are a bitmask of which engines are
-	 * currently reading from the object. Multiple engines may be
+	 * The high word (bits 16:31) are a bitmask of which engines classes
+	 * are currently reading from the object. Multiple engines may be
 	 * reading from the object simultaneously.
 	 *
-	 * The value of each engine is the same as specified in the
-	 * EXECBUFFER2 ioctl, i.e. I915_EXEC_RENDER, I915_EXEC_BSD etc.
-	 * Note I915_EXEC_DEFAULT is a symbolic value and is mapped to
-	 * the I915_EXEC_RENDER engine for execution, and so it is never
+	 * The value of each engine class is the same as specified in the
+	 * I915_CONTEXT_SET_ENGINES parameter and via perf, i.e.
+	 * I915_ENGINE_CLASS_RENDER, I915_ENGINE_CLASS_COPY, etc.
 	 * reported as active itself. Some hardware may have parallel
 	 * execution engines, e.g. multiple media engines, which are
-	 * mapped to the same identifier in the EXECBUFFER2 ioctl and
-	 * so are not separately reported for busyness.
+	 * mapped to the same class identifier and so are not separately
+	 * reported for busyness.
 	 *
 	 * Caveat emptor:
 	 * Only the boolean result of this query is reliable; that is whether
@@ -1491,6 +1500,27 @@ struct drm_i915_gem_context_param {
 	 * drm_i915_gem_context_param_sseu.
 	 */
 #define I915_CONTEXT_PARAM_SSEU		0x7
+
+/*
+ * Not all clients may want to attempt automatic recover of a context after
+ * a hang (for example, some clients may only submit very small incremental
+ * batches relying on known logical state of previous batches which will never
+ * recover correctly and each attempt will hang), and so would prefer that
+ * the context is forever banned instead.
+ *
+ * If set to false (0), after a reset, subsequent (and in flight) rendering
+ * from this context is discarded, and the client will need to create a new
+ * context to use instead.
+ *
+ * If set to true (1), the kernel will automatically attempt to recover the
+ * context by skipping the hanging batch and executing the next batch starting
+ * from the default context state (discarding the incomplete logical context
+ * state lost due to the reset).
+ *
+ * On creation, all new contexts are marked as recoverable.
+ */
+#define I915_CONTEXT_PARAM_RECOVERABLE	0x8
+/* Must be kept compact -- no holes and well documented */
 	__u64 value;
 };
 
@@ -1714,6 +1744,7 @@ struct drm_i915_perf_oa_config {
 struct drm_i915_query_item {
 	__u64 query_id;
 #define DRM_I915_QUERY_TOPOLOGY_INFO    1
+/* Must be kept compact -- no holes and well documented */
 
 	/*
 	 * When set to zero by userspace, this is filled with the size of the
diff --git a/sound/hda/hdac_component.c b/sound/hda/hdac_component.c
index 5c95933e739a43bc5cd30829e43c0381cff1e989..13915fdc6a54c0335bc2ad6af283f6be97cb9f39 100644
--- a/sound/hda/hdac_component.c
+++ b/sound/hda/hdac_component.c
@@ -79,17 +79,23 @@ void snd_hdac_display_power(struct hdac_bus *bus, unsigned int idx, bool enable)
 
 	if (bus->display_power_status) {
 		if (!bus->display_power_active) {
+			unsigned long cookie = -1;
+
 			if (acomp->ops->get_power)
-				acomp->ops->get_power(acomp->dev);
+				cookie = acomp->ops->get_power(acomp->dev);
+
 			snd_hdac_set_codec_wakeup(bus, true);
 			snd_hdac_set_codec_wakeup(bus, false);
-			bus->display_power_active = true;
+			bus->display_power_active = cookie;
 		}
 	} else {
 		if (bus->display_power_active) {
+			unsigned long cookie = bus->display_power_active;
+
 			if (acomp->ops->put_power)
-				acomp->ops->put_power(acomp->dev);
-			bus->display_power_active = false;
+				acomp->ops->put_power(acomp->dev, cookie);
+
+			bus->display_power_active = 0;
 		}
 	}
 }
@@ -325,9 +331,9 @@ int snd_hdac_acomp_exit(struct hdac_bus *bus)
 		return 0;
 
 	if (WARN_ON(bus->display_power_active) && acomp->ops)
-		acomp->ops->put_power(acomp->dev);
+		acomp->ops->put_power(acomp->dev, bus->display_power_active);
 
-	bus->display_power_active = false;
+	bus->display_power_active = 0;
 	bus->display_power_status = 0;
 
 	component_master_del(dev, &hdac_component_master_ops);