diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 6294316f24c7a14305b401c853e6554939d5cdb5..14398f55f602f2454547c5fe3d50e680fe50647b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -436,6 +436,12 @@ struct amdgpu_cs_chunk {
 	void			*kdata;
 };
 
+struct amdgpu_cs_post_dep {
+	struct drm_syncobj *syncobj;
+	struct dma_fence_chain *chain;
+	u64 point;
+};
+
 struct amdgpu_cs_parser {
 	struct amdgpu_device	*adev;
 	struct drm_file		*filp;
@@ -465,8 +471,8 @@ struct amdgpu_cs_parser {
 	/* user fence */
 	struct amdgpu_bo_list_entry	uf_entry;
 
-	unsigned num_post_dep_syncobjs;
-	struct drm_syncobj **post_dep_syncobjs;
+	unsigned			num_post_deps;
+	struct amdgpu_cs_post_dep	*post_deps;
 };
 
 static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 52a5e4fdc95b4604a36dc39921137030edf26d56..2f6239b6be6fec001f33f141ae4662b44226a31e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -215,6 +215,8 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs
 		case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
 		case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
 		case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
+		case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
+		case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
 			break;
 
 		default:
@@ -804,9 +806,11 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
 		ttm_eu_backoff_reservation(&parser->ticket,
 					   &parser->validated);
 
-	for (i = 0; i < parser->num_post_dep_syncobjs; i++)
-		drm_syncobj_put(parser->post_dep_syncobjs[i]);
-	kfree(parser->post_dep_syncobjs);
+	for (i = 0; i < parser->num_post_deps; i++) {
+		drm_syncobj_put(parser->post_deps[i].syncobj);
+		kfree(parser->post_deps[i].chain);
+	}
+	kfree(parser->post_deps);
 
 	dma_fence_put(parser->fence);
 
@@ -1117,13 +1121,18 @@ static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
 }
 
 static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p,
-						 uint32_t handle)
+						 uint32_t handle, u64 point,
+						 u64 flags)
 {
-	int r;
 	struct dma_fence *fence;
-	r = drm_syncobj_find_fence(p->filp, handle, 0, 0, &fence);
-	if (r)
+	int r;
+
+	r = drm_syncobj_find_fence(p->filp, handle, point, flags, &fence);
+	if (r) {
+		DRM_ERROR("syncobj %u failed to find fence @ %llu (%d)!\n",
+			  handle, point, r);
 		return r;
+	}
 
 	r = amdgpu_sync_fence(p->adev, &p->job->sync, fence, true);
 	dma_fence_put(fence);
@@ -1134,46 +1143,118 @@ static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p,
 static int amdgpu_cs_process_syncobj_in_dep(struct amdgpu_cs_parser *p,
 					    struct amdgpu_cs_chunk *chunk)
 {
+	struct drm_amdgpu_cs_chunk_sem *deps;
 	unsigned num_deps;
 	int i, r;
-	struct drm_amdgpu_cs_chunk_sem *deps;
 
 	deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata;
 	num_deps = chunk->length_dw * 4 /
 		sizeof(struct drm_amdgpu_cs_chunk_sem);
+	for (i = 0; i < num_deps; ++i) {
+		r = amdgpu_syncobj_lookup_and_add_to_sync(p, deps[i].handle,
+							  0, 0);
+		if (r)
+			return r;
+	}
+
+	return 0;
+}
+
 
+static int amdgpu_cs_process_syncobj_timeline_in_dep(struct amdgpu_cs_parser *p,
+						     struct amdgpu_cs_chunk *chunk)
+{
+	struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps;
+	unsigned num_deps;
+	int i, r;
+
+	syncobj_deps = (struct drm_amdgpu_cs_chunk_syncobj *)chunk->kdata;
+	num_deps = chunk->length_dw * 4 /
+		sizeof(struct drm_amdgpu_cs_chunk_syncobj);
 	for (i = 0; i < num_deps; ++i) {
-		r = amdgpu_syncobj_lookup_and_add_to_sync(p, deps[i].handle);
+		r = amdgpu_syncobj_lookup_and_add_to_sync(p,
+							  syncobj_deps[i].handle,
+							  syncobj_deps[i].point,
+							  syncobj_deps[i].flags);
 		if (r)
 			return r;
 	}
+
 	return 0;
 }
 
 static int amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser *p,
 					     struct amdgpu_cs_chunk *chunk)
 {
+	struct drm_amdgpu_cs_chunk_sem *deps;
 	unsigned num_deps;
 	int i;
-	struct drm_amdgpu_cs_chunk_sem *deps;
+
 	deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata;
 	num_deps = chunk->length_dw * 4 /
 		sizeof(struct drm_amdgpu_cs_chunk_sem);
 
-	p->post_dep_syncobjs = kmalloc_array(num_deps,
-					     sizeof(struct drm_syncobj *),
-					     GFP_KERNEL);
-	p->num_post_dep_syncobjs = 0;
+	p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
+				     GFP_KERNEL);
+	p->num_post_deps = 0;
 
-	if (!p->post_dep_syncobjs)
+	if (!p->post_deps)
 		return -ENOMEM;
 
+
 	for (i = 0; i < num_deps; ++i) {
-		p->post_dep_syncobjs[i] = drm_syncobj_find(p->filp, deps[i].handle);
-		if (!p->post_dep_syncobjs[i])
+		p->post_deps[i].syncobj =
+			drm_syncobj_find(p->filp, deps[i].handle);
+		if (!p->post_deps[i].syncobj)
 			return -EINVAL;
-		p->num_post_dep_syncobjs++;
+		p->post_deps[i].chain = NULL;
+		p->post_deps[i].point = 0;
+		p->num_post_deps++;
 	}
+
+	return 0;
+}
+
+
+static int amdgpu_cs_process_syncobj_timeline_out_dep(struct amdgpu_cs_parser *p,
+						      struct amdgpu_cs_chunk
+						      *chunk)
+{
+	struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps;
+	unsigned num_deps;
+	int i;
+
+	syncobj_deps = (struct drm_amdgpu_cs_chunk_syncobj *)chunk->kdata;
+	num_deps = chunk->length_dw * 4 /
+		sizeof(struct drm_amdgpu_cs_chunk_syncobj);
+
+	p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
+				     GFP_KERNEL);
+	p->num_post_deps = 0;
+
+	if (!p->post_deps)
+		return -ENOMEM;
+
+	for (i = 0; i < num_deps; ++i) {
+		struct amdgpu_cs_post_dep *dep = &p->post_deps[i];
+
+		dep->chain = NULL;
+		if (syncobj_deps[i].point) {
+			dep->chain = kmalloc(sizeof(*dep->chain), GFP_KERNEL);
+			if (!dep->chain)
+				return -ENOMEM;
+		}
+
+		dep->syncobj = drm_syncobj_find(p->filp,
+						syncobj_deps[i].handle);
+		if (!dep->syncobj) {
+			kfree(dep->chain);
+			return -EINVAL;
+		}
+		dep->point = syncobj_deps[i].point;
+		p->num_post_deps++;
+	}
+
 	return 0;
 }
 
@@ -1187,19 +1268,33 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
 
 		chunk = &p->chunks[i];
 
-		if (chunk->chunk_id == AMDGPU_CHUNK_ID_DEPENDENCIES ||
-		    chunk->chunk_id == AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES) {
+		switch (chunk->chunk_id) {
+		case AMDGPU_CHUNK_ID_DEPENDENCIES:
+		case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
 			r = amdgpu_cs_process_fence_dep(p, chunk);
 			if (r)
 				return r;
-		} else if (chunk->chunk_id == AMDGPU_CHUNK_ID_SYNCOBJ_IN) {
+			break;
+		case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
 			r = amdgpu_cs_process_syncobj_in_dep(p, chunk);
 			if (r)
 				return r;
-		} else if (chunk->chunk_id == AMDGPU_CHUNK_ID_SYNCOBJ_OUT) {
+			break;
+		case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
 			r = amdgpu_cs_process_syncobj_out_dep(p, chunk);
 			if (r)
 				return r;
+			break;
+		case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
+			r = amdgpu_cs_process_syncobj_timeline_in_dep(p, chunk);
+			if (r)
+				return r;
+			break;
+		case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
+			r = amdgpu_cs_process_syncobj_timeline_out_dep(p, chunk);
+			if (r)
+				return r;
+			break;
 		}
 	}
 
@@ -1210,8 +1305,17 @@ static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
 {
 	int i;
 
-	for (i = 0; i < p->num_post_dep_syncobjs; ++i)
-		drm_syncobj_replace_fence(p->post_dep_syncobjs[i], p->fence);
+	for (i = 0; i < p->num_post_deps; ++i) {
+		if (p->post_deps[i].chain && p->post_deps[i].point) {
+			drm_syncobj_add_point(p->post_deps[i].syncobj,
+					      p->post_deps[i].chain,
+					      p->fence, p->post_deps[i].point);
+			p->post_deps[i].chain = NULL;
+		} else {
+			drm_syncobj_replace_fence(p->post_deps[i].syncobj,
+						  p->fence);
+		}
+	}
 }
 
 static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index f34e3ab5a9f3fcc04c289ba6fe2a0d4b6c57bc39..1e2cc9d68a05def7aa26cc02a754482bd89594fe 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -75,9 +75,10 @@
  * - 3.29.0 - Add AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID
  * - 3.30.0 - Add AMDGPU_SCHED_OP_CONTEXT_PRIORITY_OVERRIDE.
  * - 3.31.0 - Add support for per-flip tiling attribute changes with DC
+ * - 3.32.0 - Add syncobj timeline support to AMDGPU_CS.
  */
 #define KMS_DRIVER_MAJOR	3
-#define KMS_DRIVER_MINOR	31
+#define KMS_DRIVER_MINOR	32
 #define KMS_DRIVER_PATCHLEVEL	0
 
 int amdgpu_vram_limit = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index ee47c11e92ce7f021e7ce87d5613c7a9543eb900..4dee2326b29c3d8a88455c8b521d8fb9a61d312f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -136,8 +136,9 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
 {
 	struct amdgpu_device *adev = ring->adev;
 	struct amdgpu_fence *fence;
-	struct dma_fence *old, **ptr;
+	struct dma_fence __rcu **ptr;
 	uint32_t seq;
+	int r;
 
 	fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL);
 	if (fence == NULL)
@@ -153,15 +154,24 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
 			       seq, flags | AMDGPU_FENCE_FLAG_INT);
 
 	ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
+	if (unlikely(rcu_dereference_protected(*ptr, 1))) {
+		struct dma_fence *old;
+
+		rcu_read_lock();
+		old = dma_fence_get_rcu_safe(ptr);
+		rcu_read_unlock();
+
+		if (old) {
+			r = dma_fence_wait(old, false);
+			dma_fence_put(old);
+			if (r)
+				return r;
+		}
+	}
+
 	/* This function can't be called concurrently anyway, otherwise
 	 * emitting the fence would mess up the hardware ring buffer.
 	 */
-	old = rcu_dereference_protected(*ptr, 1);
-	if (old && !dma_fence_is_signaled(old)) {
-		DRM_INFO("rcu slot is busy\n");
-		dma_fence_wait(old, false);
-	}
-
 	rcu_assign_pointer(*ptr, dma_fence_get(&fence->base));
 
 	*f = &fence->base;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index 336834797af3d9e20bb656730e0ff0c5d6ca57cd..a48c84c51775bed4467058dedea3b8a2de8ea5aa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -24,6 +24,7 @@
 #include <linux/list.h>
 #include "amdgpu.h"
 #include "amdgpu_xgmi.h"
+#include "amdgpu_smu.h"
 
 
 static DEFINE_MUTEX(xgmi_mutex);
@@ -216,7 +217,17 @@ int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate)
 
 	if (hive->pstate == pstate)
 		return 0;
-	/* Todo : sent the message to SMU for pstate change */
+
+	dev_dbg(adev->dev, "Set xgmi pstate %d.\n", pstate);
+
+	if (is_support_sw_smu(adev))
+		ret = smu_set_xgmi_pstate(&adev->smu, pstate);
+	if (ret)
+		dev_err(adev->dev,
+			"XGMI: Set pstate failure on device %llx, hive %llx, ret %d",
+			adev->gmc.xgmi.node_id,
+			adev->gmc.xgmi.hive_id, ret);
+
 	return ret;
 }
 
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 9f7d399b73d4f281027598de3744e4c6e50dff54..923cb5ca5a57cba08c7c06a6b68d4b84382956e7 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -2085,7 +2085,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
 		if (!plane->blends_with_above || !plane->blends_with_below)
 			continue;
 
-		if (!plane->supports_argb8888)
+		if (!plane->pixel_format_support.argb8888)
 			continue;
 
 		if (initialize_plane(dm, NULL, primary_planes + i,
@@ -2385,56 +2385,63 @@ static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
 	.destroy = amdgpu_dm_encoder_destroy,
 };
 
-static bool fill_rects_from_plane_state(const struct drm_plane_state *state,
-					struct dc_plane_state *plane_state)
+
+static int fill_dc_scaling_info(const struct drm_plane_state *state,
+				struct dc_scaling_info *scaling_info)
 {
-	plane_state->src_rect.x = state->src_x >> 16;
-	plane_state->src_rect.y = state->src_y >> 16;
-	/* we ignore the mantissa for now and do not deal with floating pixels :( */
-	plane_state->src_rect.width = state->src_w >> 16;
+	int scale_w, scale_h;
 
-	if (plane_state->src_rect.width == 0)
-		return false;
+	memset(scaling_info, 0, sizeof(*scaling_info));
 
-	plane_state->src_rect.height = state->src_h >> 16;
-	if (plane_state->src_rect.height == 0)
-		return false;
+	/* Source is fixed 16.16 but we ignore mantissa for now... */
+	scaling_info->src_rect.x = state->src_x >> 16;
+	scaling_info->src_rect.y = state->src_y >> 16;
 
-	plane_state->dst_rect.x = state->crtc_x;
-	plane_state->dst_rect.y = state->crtc_y;
+	scaling_info->src_rect.width = state->src_w >> 16;
+	if (scaling_info->src_rect.width == 0)
+		return -EINVAL;
+
+	scaling_info->src_rect.height = state->src_h >> 16;
+	if (scaling_info->src_rect.height == 0)
+		return -EINVAL;
+
+	scaling_info->dst_rect.x = state->crtc_x;
+	scaling_info->dst_rect.y = state->crtc_y;
 
 	if (state->crtc_w == 0)
-		return false;
+		return -EINVAL;
 
-	plane_state->dst_rect.width = state->crtc_w;
+	scaling_info->dst_rect.width = state->crtc_w;
 
 	if (state->crtc_h == 0)
-		return false;
+		return -EINVAL;
 
-	plane_state->dst_rect.height = state->crtc_h;
+	scaling_info->dst_rect.height = state->crtc_h;
 
-	plane_state->clip_rect = plane_state->dst_rect;
+	/* DRM doesn't specify clipping on destination output. */
+	scaling_info->clip_rect = scaling_info->dst_rect;
 
-	switch (state->rotation & DRM_MODE_ROTATE_MASK) {
-	case DRM_MODE_ROTATE_0:
-		plane_state->rotation = ROTATION_ANGLE_0;
-		break;
-	case DRM_MODE_ROTATE_90:
-		plane_state->rotation = ROTATION_ANGLE_90;
-		break;
-	case DRM_MODE_ROTATE_180:
-		plane_state->rotation = ROTATION_ANGLE_180;
-		break;
-	case DRM_MODE_ROTATE_270:
-		plane_state->rotation = ROTATION_ANGLE_270;
-		break;
-	default:
-		plane_state->rotation = ROTATION_ANGLE_0;
-		break;
-	}
+	/* TODO: Validate scaling per-format with DC plane caps */
+	scale_w = scaling_info->dst_rect.width * 1000 /
+		  scaling_info->src_rect.width;
 
-	return true;
+	if (scale_w < 250 || scale_w > 16000)
+		return -EINVAL;
+
+	scale_h = scaling_info->dst_rect.height * 1000 /
+		  scaling_info->src_rect.height;
+
+	if (scale_h < 250 || scale_h > 16000)
+		return -EINVAL;
+
+	/*
+	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
+	 * assume reasonable defaults based on the format.
+	 */
+
+	return 0;
 }
+
 static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
 		       uint64_t *tiling_flags)
 {
@@ -2463,12 +2470,16 @@ static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
 	return offset ? (address + offset * 256) : 0;
 }
 
-static int fill_plane_dcc_attributes(struct amdgpu_device *adev,
-				      const struct amdgpu_framebuffer *afb,
-				      const struct dc_plane_state *plane_state,
-				      struct dc_plane_dcc_param *dcc,
-				      struct dc_plane_address *address,
-				      uint64_t info)
+static int
+fill_plane_dcc_attributes(struct amdgpu_device *adev,
+			  const struct amdgpu_framebuffer *afb,
+			  const enum surface_pixel_format format,
+			  const enum dc_rotation_angle rotation,
+			  const union plane_size *plane_size,
+			  const union dc_tiling_info *tiling_info,
+			  const uint64_t info,
+			  struct dc_plane_dcc_param *dcc,
+			  struct dc_plane_address *address)
 {
 	struct dc *dc = adev->dm.dc;
 	struct dc_dcc_surface_param input;
@@ -2483,24 +2494,20 @@ static int fill_plane_dcc_attributes(struct amdgpu_device *adev,
 	if (!offset)
 		return 0;
 
-	if (plane_state->address.type != PLN_ADDR_TYPE_GRAPHICS)
+	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
 		return 0;
 
 	if (!dc->cap_funcs.get_dcc_compression_cap)
 		return -EINVAL;
 
-	input.format = plane_state->format;
-	input.surface_size.width =
-		plane_state->plane_size.grph.surface_size.width;
-	input.surface_size.height =
-		plane_state->plane_size.grph.surface_size.height;
-	input.swizzle_mode = plane_state->tiling_info.gfx9.swizzle;
+	input.format = format;
+	input.surface_size.width = plane_size->grph.surface_size.width;
+	input.surface_size.height = plane_size->grph.surface_size.height;
+	input.swizzle_mode = tiling_info->gfx9.swizzle;
 
-	if (plane_state->rotation == ROTATION_ANGLE_0 ||
-	    plane_state->rotation == ROTATION_ANGLE_180)
+	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
 		input.scan = SCAN_DIRECTION_HORIZONTAL;
-	else if (plane_state->rotation == ROTATION_ANGLE_90 ||
-		 plane_state->rotation == ROTATION_ANGLE_270)
+	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
 		input.scan = SCAN_DIRECTION_VERTICAL;
 
 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
@@ -2525,28 +2532,54 @@ static int fill_plane_dcc_attributes(struct amdgpu_device *adev,
 }
 
 static int
-fill_plane_tiling_attributes(struct amdgpu_device *adev,
+fill_plane_buffer_attributes(struct amdgpu_device *adev,
 			     const struct amdgpu_framebuffer *afb,
-			     const struct dc_plane_state *plane_state,
+			     const enum surface_pixel_format format,
+			     const enum dc_rotation_angle rotation,
+			     const uint64_t tiling_flags,
 			     union dc_tiling_info *tiling_info,
+			     union plane_size *plane_size,
 			     struct dc_plane_dcc_param *dcc,
-			     struct dc_plane_address *address,
-			     uint64_t tiling_flags)
+			     struct dc_plane_address *address)
 {
+	const struct drm_framebuffer *fb = &afb->base;
 	int ret;
 
 	memset(tiling_info, 0, sizeof(*tiling_info));
+	memset(plane_size, 0, sizeof(*plane_size));
 	memset(dcc, 0, sizeof(*dcc));
 	memset(address, 0, sizeof(*address));
 
-	if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
+	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
+		plane_size->grph.surface_size.x = 0;
+		plane_size->grph.surface_size.y = 0;
+		plane_size->grph.surface_size.width = fb->width;
+		plane_size->grph.surface_size.height = fb->height;
+		plane_size->grph.surface_pitch =
+			fb->pitches[0] / fb->format->cpp[0];
+
 		address->type = PLN_ADDR_TYPE_GRAPHICS;
 		address->grph.addr.low_part = lower_32_bits(afb->address);
 		address->grph.addr.high_part = upper_32_bits(afb->address);
 	} else {
-		const struct drm_framebuffer *fb = &afb->base;
 		uint64_t chroma_addr = afb->address + fb->offsets[1];
 
+		plane_size->video.luma_size.x = 0;
+		plane_size->video.luma_size.y = 0;
+		plane_size->video.luma_size.width = fb->width;
+		plane_size->video.luma_size.height = fb->height;
+		plane_size->video.luma_pitch =
+			fb->pitches[0] / fb->format->cpp[0];
+
+		plane_size->video.chroma_size.x = 0;
+		plane_size->video.chroma_size.y = 0;
+		/* TODO: set these based on surface format */
+		plane_size->video.chroma_size.width = fb->width / 2;
+		plane_size->video.chroma_size.height = fb->height / 2;
+
+		plane_size->video.chroma_pitch =
+			fb->pitches[1] / fb->format->cpp[1];
+
 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
 		address->video_progressive.luma_addr.low_part =
 			lower_32_bits(afb->address);
@@ -2607,8 +2640,9 @@ fill_plane_tiling_attributes(struct amdgpu_device *adev,
 			AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
 		tiling_info->gfx9.shaderEnable = 1;
 
-		ret = fill_plane_dcc_attributes(adev, afb, plane_state, dcc,
-						address, tiling_flags);
+		ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
+						plane_size, tiling_info,
+						tiling_flags, dcc, address);
 		if (ret)
 			return ret;
 	}
@@ -2616,112 +2650,8 @@ fill_plane_tiling_attributes(struct amdgpu_device *adev,
 	return 0;
 }
 
-static int fill_plane_attributes_from_fb(struct amdgpu_device *adev,
-					 struct dc_plane_state *plane_state,
-					 const struct amdgpu_framebuffer *amdgpu_fb)
-{
-	uint64_t tiling_flags;
-	const struct drm_framebuffer *fb = &amdgpu_fb->base;
-	int ret = 0;
-	struct drm_format_name_buf format_name;
-
-	ret = get_fb_info(
-		amdgpu_fb,
-		&tiling_flags);
-
-	if (ret)
-		return ret;
-
-	switch (fb->format->format) {
-	case DRM_FORMAT_C8:
-		plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
-		break;
-	case DRM_FORMAT_RGB565:
-		plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
-		break;
-	case DRM_FORMAT_XRGB8888:
-	case DRM_FORMAT_ARGB8888:
-		plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
-		break;
-	case DRM_FORMAT_XRGB2101010:
-	case DRM_FORMAT_ARGB2101010:
-		plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
-		break;
-	case DRM_FORMAT_XBGR2101010:
-	case DRM_FORMAT_ABGR2101010:
-		plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
-		break;
-	case DRM_FORMAT_XBGR8888:
-	case DRM_FORMAT_ABGR8888:
-		plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
-		break;
-	case DRM_FORMAT_NV21:
-		plane_state->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
-		break;
-	case DRM_FORMAT_NV12:
-		plane_state->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
-		break;
-	default:
-		DRM_ERROR("Unsupported screen format %s\n",
-			  drm_get_format_name(fb->format->format, &format_name));
-		return -EINVAL;
-	}
-
-	memset(&plane_state->address, 0, sizeof(plane_state->address));
-
-	if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
-		plane_state->plane_size.grph.surface_size.x = 0;
-		plane_state->plane_size.grph.surface_size.y = 0;
-		plane_state->plane_size.grph.surface_size.width = fb->width;
-		plane_state->plane_size.grph.surface_size.height = fb->height;
-		plane_state->plane_size.grph.surface_pitch =
-				fb->pitches[0] / fb->format->cpp[0];
-		/* TODO: unhardcode */
-		plane_state->color_space = COLOR_SPACE_SRGB;
-
-	} else {
-		plane_state->plane_size.video.luma_size.x = 0;
-		plane_state->plane_size.video.luma_size.y = 0;
-		plane_state->plane_size.video.luma_size.width = fb->width;
-		plane_state->plane_size.video.luma_size.height = fb->height;
-		plane_state->plane_size.video.luma_pitch =
-			fb->pitches[0] / fb->format->cpp[0];
-
-		plane_state->plane_size.video.chroma_size.x = 0;
-		plane_state->plane_size.video.chroma_size.y = 0;
-		/* TODO: set these based on surface format */
-		plane_state->plane_size.video.chroma_size.width = fb->width / 2;
-		plane_state->plane_size.video.chroma_size.height = fb->height / 2;
-
-		plane_state->plane_size.video.chroma_pitch =
-			fb->pitches[1] / fb->format->cpp[1];
-
-		/* TODO: unhardcode */
-		plane_state->color_space = COLOR_SPACE_YCBCR709;
-	}
-
-	fill_plane_tiling_attributes(adev, amdgpu_fb, plane_state,
-				     &plane_state->tiling_info,
-				     &plane_state->dcc,
-				     &plane_state->address,
-				     tiling_flags);
-
-	plane_state->visible = true;
-	plane_state->scaling_quality.h_taps_c = 0;
-	plane_state->scaling_quality.v_taps_c = 0;
-
-	/* is this needed? is plane_state zeroed at allocation? */
-	plane_state->scaling_quality.h_taps = 0;
-	plane_state->scaling_quality.v_taps = 0;
-	plane_state->stereo_format = PLANE_STEREO_FORMAT_NONE;
-
-	return ret;
-
-}
-
 static void
-fill_blending_from_plane_state(struct drm_plane_state *plane_state,
-			       const struct dc_plane_state *dc_plane_state,
+fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
 			       bool *per_pixel_alpha, bool *global_alpha,
 			       int *global_alpha_value)
 {
@@ -2757,7 +2687,7 @@ fill_blending_from_plane_state(struct drm_plane_state *plane_state,
 
 static int
 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
-			    const struct dc_plane_state *dc_plane_state,
+			    const enum surface_pixel_format format,
 			    enum dc_color_space *color_space)
 {
 	bool full_range;
@@ -2765,7 +2695,7 @@ fill_plane_color_attributes(const struct drm_plane_state *plane_state,
 	*color_space = COLOR_SPACE_SRGB;
 
 	/* DRM color properties only affect non-RGB formats. */
-	if (dc_plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
+	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
 		return 0;
 
 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
@@ -2799,32 +2729,144 @@ fill_plane_color_attributes(const struct drm_plane_state *plane_state,
 	return 0;
 }
 
-static int fill_plane_attributes(struct amdgpu_device *adev,
-				 struct dc_plane_state *dc_plane_state,
-				 struct drm_plane_state *plane_state,
-				 struct drm_crtc_state *crtc_state)
+static int
+fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
+			    const struct drm_plane_state *plane_state,
+			    const uint64_t tiling_flags,
+			    struct dc_plane_info *plane_info,
+			    struct dc_plane_address *address)
+{
+	const struct drm_framebuffer *fb = plane_state->fb;
+	const struct amdgpu_framebuffer *afb =
+		to_amdgpu_framebuffer(plane_state->fb);
+	struct drm_format_name_buf format_name;
+	int ret;
+
+	memset(plane_info, 0, sizeof(*plane_info));
+
+	switch (fb->format->format) {
+	case DRM_FORMAT_C8:
+		plane_info->format =
+			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
+		break;
+	case DRM_FORMAT_RGB565:
+		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
+		break;
+	case DRM_FORMAT_XRGB8888:
+	case DRM_FORMAT_ARGB8888:
+		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
+		break;
+	case DRM_FORMAT_XRGB2101010:
+	case DRM_FORMAT_ARGB2101010:
+		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
+		break;
+	case DRM_FORMAT_XBGR2101010:
+	case DRM_FORMAT_ABGR2101010:
+		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
+		break;
+	case DRM_FORMAT_XBGR8888:
+	case DRM_FORMAT_ABGR8888:
+		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
+		break;
+	case DRM_FORMAT_NV21:
+		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
+		break;
+	case DRM_FORMAT_NV12:
+		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
+		break;
+	default:
+		DRM_ERROR(
+			"Unsupported screen format %s\n",
+			drm_get_format_name(fb->format->format, &format_name));
+		return -EINVAL;
+	}
+
+	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
+	case DRM_MODE_ROTATE_0:
+		plane_info->rotation = ROTATION_ANGLE_0;
+		break;
+	case DRM_MODE_ROTATE_90:
+		plane_info->rotation = ROTATION_ANGLE_90;
+		break;
+	case DRM_MODE_ROTATE_180:
+		plane_info->rotation = ROTATION_ANGLE_180;
+		break;
+	case DRM_MODE_ROTATE_270:
+		plane_info->rotation = ROTATION_ANGLE_270;
+		break;
+	default:
+		plane_info->rotation = ROTATION_ANGLE_0;
+		break;
+	}
+
+	plane_info->visible = true;
+	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
+
+	ret = fill_plane_color_attributes(plane_state, plane_info->format,
+					  &plane_info->color_space);
+	if (ret)
+		return ret;
+
+	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
+					   plane_info->rotation, tiling_flags,
+					   &plane_info->tiling_info,
+					   &plane_info->plane_size,
+					   &plane_info->dcc, address);
+	if (ret)
+		return ret;
+
+	fill_blending_from_plane_state(
+		plane_state, &plane_info->per_pixel_alpha,
+		&plane_info->global_alpha, &plane_info->global_alpha_value);
+
+	return 0;
+}
+
+static int fill_dc_plane_attributes(struct amdgpu_device *adev,
+				    struct dc_plane_state *dc_plane_state,
+				    struct drm_plane_state *plane_state,
+				    struct drm_crtc_state *crtc_state)
 {
 	const struct amdgpu_framebuffer *amdgpu_fb =
 		to_amdgpu_framebuffer(plane_state->fb);
-	const struct drm_crtc *crtc = plane_state->crtc;
-	int ret = 0;
+	struct dc_scaling_info scaling_info;
+	struct dc_plane_info plane_info;
+	uint64_t tiling_flags;
+	int ret;
 
-	if (!fill_rects_from_plane_state(plane_state, dc_plane_state))
-		return -EINVAL;
+	ret = fill_dc_scaling_info(plane_state, &scaling_info);
+	if (ret)
+		return ret;
 
-	ret = fill_plane_attributes_from_fb(
-		crtc->dev->dev_private,
-		dc_plane_state,
-		amdgpu_fb);
+	dc_plane_state->src_rect = scaling_info.src_rect;
+	dc_plane_state->dst_rect = scaling_info.dst_rect;
+	dc_plane_state->clip_rect = scaling_info.clip_rect;
+	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
 
+	ret = get_fb_info(amdgpu_fb, &tiling_flags);
 	if (ret)
 		return ret;
 
-	ret = fill_plane_color_attributes(plane_state, dc_plane_state,
-					  &dc_plane_state->color_space);
+	ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags,
+					  &plane_info,
+					  &dc_plane_state->address);
 	if (ret)
 		return ret;
 
+	dc_plane_state->format = plane_info.format;
+	dc_plane_state->color_space = plane_info.color_space;
+	dc_plane_state->format = plane_info.format;
+	dc_plane_state->plane_size = plane_info.plane_size;
+	dc_plane_state->rotation = plane_info.rotation;
+	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
+	dc_plane_state->stereo_format = plane_info.stereo_format;
+	dc_plane_state->tiling_info = plane_info.tiling_info;
+	dc_plane_state->visible = plane_info.visible;
+	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
+	dc_plane_state->global_alpha = plane_info.global_alpha;
+	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
+	dc_plane_state->dcc = plane_info.dcc;
+
 	/*
 	 * Always set input transfer function, since plane state is refreshed
 	 * every time.
@@ -2835,11 +2877,6 @@ static int fill_plane_attributes(struct amdgpu_device *adev,
 		dc_plane_state->in_transfer_func = NULL;
 	}
 
-	fill_blending_from_plane_state(plane_state, dc_plane_state,
-				       &dc_plane_state->per_pixel_alpha,
-				       &dc_plane_state->global_alpha,
-				       &dc_plane_state->global_alpha_value);
-
 	return ret;
 }
 
@@ -3825,6 +3862,38 @@ static void dm_crtc_helper_disable(struct drm_crtc *crtc)
 {
 }
 
+static bool does_crtc_have_active_plane(struct drm_crtc_state *new_crtc_state)
+{
+	struct drm_atomic_state *state = new_crtc_state->state;
+	struct drm_plane *plane;
+	int num_active = 0;
+
+	drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
+		struct drm_plane_state *new_plane_state;
+
+		/* Cursor planes are "fake". */
+		if (plane->type == DRM_PLANE_TYPE_CURSOR)
+			continue;
+
+		new_plane_state = drm_atomic_get_new_plane_state(state, plane);
+
+		if (!new_plane_state) {
+			/*
+			 * The plane is enable on the CRTC and hasn't changed
+			 * state. This means that it previously passed
+			 * validation and is therefore enabled.
+			 */
+			num_active += 1;
+			continue;
+		}
+
+		/* We need a framebuffer to be considered enabled. */
+		num_active += (new_plane_state->fb != NULL);
+	}
+
+	return num_active > 0;
+}
+
 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
 				       struct drm_crtc_state *state)
 {
@@ -3843,6 +3912,11 @@ static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
 	if (!dm_crtc_state->stream)
 		return 0;
 
+	/* We want at least one hardware plane enabled to use the stream. */
+	if (state->enable && state->active &&
+	    !does_crtc_have_active_plane(state))
+		return -EINVAL;
+
 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
 		return 0;
 
@@ -3994,9 +4068,11 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
 			dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
 		struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
 
-		fill_plane_tiling_attributes(
-			adev, afb, plane_state, &plane_state->tiling_info,
-			&plane_state->dcc, &plane_state->address, tiling_flags);
+		fill_plane_buffer_attributes(
+			adev, afb, plane_state->format, plane_state->rotation,
+			tiling_flags, &plane_state->tiling_info,
+			&plane_state->plane_size, &plane_state->dcc,
+			&plane_state->address);
 	}
 
 	return 0;
@@ -4028,13 +4104,18 @@ static int dm_plane_atomic_check(struct drm_plane *plane,
 {
 	struct amdgpu_device *adev = plane->dev->dev_private;
 	struct dc *dc = adev->dm.dc;
-	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
+	struct dm_plane_state *dm_plane_state;
+	struct dc_scaling_info scaling_info;
+	int ret;
+
+	dm_plane_state = to_dm_plane_state(state);
 
 	if (!dm_plane_state->dc_state)
 		return 0;
 
-	if (!fill_rects_from_plane_state(state, dm_plane_state->dc_state))
-		return -EINVAL;
+	ret = fill_dc_scaling_info(state, &scaling_info);
+	if (ret)
+		return ret;
 
 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
 		return 0;
@@ -4121,46 +4202,71 @@ static const u32 cursor_formats[] = {
 	DRM_FORMAT_ARGB8888
 };
 
-static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
-				struct drm_plane *plane,
-				unsigned long possible_crtcs,
-				const struct dc_plane_cap *plane_cap)
+static int get_plane_formats(const struct drm_plane *plane,
+			     const struct dc_plane_cap *plane_cap,
+			     uint32_t *formats, int max_formats)
 {
-	int res = -EPERM;
+	int i, num_formats = 0;
+
+	/*
+	 * TODO: Query support for each group of formats directly from
+	 * DC plane caps. This will require adding more formats to the
+	 * caps list.
+	 */
 
 	switch (plane->type) {
 	case DRM_PLANE_TYPE_PRIMARY:
-		res = drm_universal_plane_init(
-				dm->adev->ddev,
-				plane,
-				possible_crtcs,
-				&dm_plane_funcs,
-				rgb_formats,
-				ARRAY_SIZE(rgb_formats),
-				NULL, plane->type, NULL);
+		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
+			if (num_formats >= max_formats)
+				break;
+
+			formats[num_formats++] = rgb_formats[i];
+		}
+
+		if (plane_cap && plane_cap->pixel_format_support.nv12)
+			formats[num_formats++] = DRM_FORMAT_NV12;
 		break;
+
 	case DRM_PLANE_TYPE_OVERLAY:
-		res = drm_universal_plane_init(
-				dm->adev->ddev,
-				plane,
-				possible_crtcs,
-				&dm_plane_funcs,
-				overlay_formats,
-				ARRAY_SIZE(overlay_formats),
-				NULL, plane->type, NULL);
+		for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
+			if (num_formats >= max_formats)
+				break;
+
+			formats[num_formats++] = overlay_formats[i];
+		}
 		break;
+
 	case DRM_PLANE_TYPE_CURSOR:
-		res = drm_universal_plane_init(
-				dm->adev->ddev,
-				plane,
-				possible_crtcs,
-				&dm_plane_funcs,
-				cursor_formats,
-				ARRAY_SIZE(cursor_formats),
-				NULL, plane->type, NULL);
+		for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
+			if (num_formats >= max_formats)
+				break;
+
+			formats[num_formats++] = cursor_formats[i];
+		}
 		break;
 	}
 
+	return num_formats;
+}
+
+static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
+				struct drm_plane *plane,
+				unsigned long possible_crtcs,
+				const struct dc_plane_cap *plane_cap)
+{
+	uint32_t formats[32];
+	int num_formats;
+	int res = -EPERM;
+
+	num_formats = get_plane_formats(plane, plane_cap, formats,
+					ARRAY_SIZE(formats));
+
+	res = drm_universal_plane_init(dm->adev->ddev, plane, possible_crtcs,
+				       &dm_plane_funcs, formats, num_formats,
+				       NULL, plane->type, NULL);
+	if (res)
+		return res;
+
 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
 	    plane_cap && plane_cap->per_pixel_alpha) {
 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
@@ -4170,14 +4276,25 @@ static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
 		drm_plane_create_blend_mode_property(plane, blend_caps);
 	}
 
+	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
+	    plane_cap && plane_cap->pixel_format_support.nv12) {
+		/* This only affects YUV formats. */
+		drm_plane_create_color_properties(
+			plane,
+			BIT(DRM_COLOR_YCBCR_BT601) |
+			BIT(DRM_COLOR_YCBCR_BT709),
+			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
+			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
+			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
+	}
+
 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
 
 	/* Create (reset) the plane state */
 	if (plane->funcs->reset)
 		plane->funcs->reset(plane);
 
-
-	return res;
+	return 0;
 }
 
 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
@@ -4769,9 +4886,13 @@ static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
 
 	x = plane->state->crtc_x;
 	y = plane->state->crtc_y;
-	/* avivo cursor are offset into the total surface */
-	x += crtc->primary->state->src_x >> 16;
-	y += crtc->primary->state->src_y >> 16;
+
+	if (crtc->primary->state) {
+		/* avivo cursor are offset into the total surface */
+		x += crtc->primary->state->src_x >> 16;
+		y += crtc->primary->state->src_y >> 16;
+	}
+
 	if (x < 0) {
 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
 		x = 0;
@@ -5046,7 +5167,6 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
 		struct drm_crtc *crtc = new_plane_state->crtc;
 		struct drm_crtc_state *new_crtc_state;
 		struct drm_framebuffer *fb = new_plane_state->fb;
-		struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
 		bool plane_needs_flip;
 		struct dc_plane_state *dc_plane;
 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
@@ -5070,29 +5190,11 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
 		}
 
+		fill_dc_scaling_info(new_plane_state,
+				     &bundle->scaling_infos[planes_count]);
 
-		bundle->scaling_infos[planes_count].scaling_quality = dc_plane->scaling_quality;
-		bundle->scaling_infos[planes_count].src_rect = dc_plane->src_rect;
-		bundle->scaling_infos[planes_count].dst_rect = dc_plane->dst_rect;
-		bundle->scaling_infos[planes_count].clip_rect = dc_plane->clip_rect;
-		bundle->surface_updates[planes_count].scaling_info = &bundle->scaling_infos[planes_count];
-
-		fill_plane_color_attributes(
-			new_plane_state, dc_plane,
-			&bundle->plane_infos[planes_count].color_space);
-
-		bundle->plane_infos[planes_count].format = dc_plane->format;
-		bundle->plane_infos[planes_count].plane_size = dc_plane->plane_size;
-		bundle->plane_infos[planes_count].rotation = dc_plane->rotation;
-		bundle->plane_infos[planes_count].horizontal_mirror = dc_plane->horizontal_mirror;
-		bundle->plane_infos[planes_count].stereo_format = dc_plane->stereo_format;
-		bundle->plane_infos[planes_count].tiling_info = dc_plane->tiling_info;
-		bundle->plane_infos[planes_count].visible = dc_plane->visible;
-		bundle->plane_infos[planes_count].global_alpha = dc_plane->global_alpha;
-		bundle->plane_infos[planes_count].global_alpha_value = dc_plane->global_alpha_value;
-		bundle->plane_infos[planes_count].per_pixel_alpha = dc_plane->per_pixel_alpha;
-		bundle->plane_infos[planes_count].dcc = dc_plane->dcc;
-		bundle->surface_updates[planes_count].plane_info = &bundle->plane_infos[planes_count];
+		bundle->surface_updates[planes_count].scaling_info =
+			&bundle->scaling_infos[planes_count];
 
 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
 
@@ -5124,11 +5226,13 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
 
 		amdgpu_bo_unreserve(abo);
 
-		fill_plane_tiling_attributes(dm->adev, afb, dc_plane,
-			&bundle->plane_infos[planes_count].tiling_info,
-			&bundle->plane_infos[planes_count].dcc,
-			&bundle->flip_addrs[planes_count].address,
-			tiling_flags);
+		fill_dc_plane_info_and_addr(
+			dm->adev, new_plane_state, tiling_flags,
+			&bundle->plane_infos[planes_count],
+			&bundle->flip_addrs[planes_count].address);
+
+		bundle->surface_updates[planes_count].plane_info =
+			&bundle->plane_infos[planes_count];
 
 		bundle->flip_addrs[planes_count].flip_immediate =
 				(crtc->state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0;
@@ -5812,21 +5916,12 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
 	struct amdgpu_dm_connector *aconnector = NULL;
 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
-	struct drm_plane_state *new_plane_state = NULL;
 
 	new_stream = NULL;
 
 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
 	acrtc = to_amdgpu_crtc(crtc);
-
-	new_plane_state = drm_atomic_get_new_plane_state(state, new_crtc_state->crtc->primary);
-
-	if (new_crtc_state->enable && new_plane_state && !new_plane_state->fb) {
-		ret = -EINVAL;
-		goto fail;
-	}
-
 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
 
 	/* TODO This hack should go away */
@@ -6016,6 +6111,69 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
 	return ret;
 }
 
+static bool should_reset_plane(struct drm_atomic_state *state,
+			       struct drm_plane *plane,
+			       struct drm_plane_state *old_plane_state,
+			       struct drm_plane_state *new_plane_state)
+{
+	struct drm_plane *other;
+	struct drm_plane_state *old_other_state, *new_other_state;
+	struct drm_crtc_state *new_crtc_state;
+	int i;
+
+	/*
+	 * TODO: Remove this hack once the checks below are sufficient
+	 * enough to determine when we need to reset all the planes on
+	 * the stream.
+	 */
+	if (state->allow_modeset)
+		return true;
+
+	/* Exit early if we know that we're adding or removing the plane. */
+	if (old_plane_state->crtc != new_plane_state->crtc)
+		return true;
+
+	/* old crtc == new_crtc == NULL, plane not in context. */
+	if (!new_plane_state->crtc)
+		return false;
+
+	new_crtc_state =
+		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
+
+	if (!new_crtc_state)
+		return true;
+
+	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
+		return true;
+
+	/*
+	 * If there are any new primary or overlay planes being added or
+	 * removed then the z-order can potentially change. To ensure
+	 * correct z-order and pipe acquisition the current DC architecture
+	 * requires us to remove and recreate all existing planes.
+	 *
+	 * TODO: Come up with a more elegant solution for this.
+	 */
+	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
+		if (other->type == DRM_PLANE_TYPE_CURSOR)
+			continue;
+
+		if (old_other_state->crtc != new_plane_state->crtc &&
+		    new_other_state->crtc != new_plane_state->crtc)
+			continue;
+
+		if (old_other_state->crtc != new_other_state->crtc)
+			return true;
+
+		/* TODO: Remove this once we can handle fast format changes. */
+		if (old_other_state->fb && new_other_state->fb &&
+		    old_other_state->fb->format != new_other_state->fb->format)
+			return true;
+	}
+
+	return false;
+}
+
 static int dm_update_plane_state(struct dc *dc,
 				 struct drm_atomic_state *state,
 				 struct drm_plane *plane,
@@ -6030,8 +6188,7 @@ static int dm_update_plane_state(struct dc *dc,
 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
-	/* TODO return page_flip_needed() function */
-	bool pflip_needed  = !state->allow_modeset;
+	bool needs_reset;
 	int ret = 0;
 
 
@@ -6044,10 +6201,12 @@ static int dm_update_plane_state(struct dc *dc,
 	if (plane->type == DRM_PLANE_TYPE_CURSOR)
 		return 0;
 
+	needs_reset = should_reset_plane(state, plane, old_plane_state,
+					 new_plane_state);
+
 	/* Remove any changed/removed planes */
 	if (!enable) {
-		if (pflip_needed &&
-		    plane->type != DRM_PLANE_TYPE_OVERLAY)
+		if (!needs_reset)
 			return 0;
 
 		if (!old_plane_crtc)
@@ -6098,7 +6257,7 @@ static int dm_update_plane_state(struct dc *dc,
 		if (!dm_new_crtc_state->stream)
 			return 0;
 
-		if (pflip_needed && plane->type != DRM_PLANE_TYPE_OVERLAY)
+		if (!needs_reset)
 			return 0;
 
 		WARN_ON(dm_new_plane_state->dc_state);
@@ -6110,7 +6269,7 @@ static int dm_update_plane_state(struct dc *dc,
 		DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
 				plane->base.id, new_plane_crtc->base.id);
 
-		ret = fill_plane_attributes(
+		ret = fill_dc_plane_attributes(
 			new_plane_crtc->dev->dev_private,
 			dc_new_plane_state,
 			new_plane_state,
@@ -6158,10 +6317,11 @@ static int dm_update_plane_state(struct dc *dc,
 }
 
 static int
-dm_determine_update_type_for_commit(struct dc *dc,
+dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
 				    struct drm_atomic_state *state,
 				    enum surface_update_type *out_type)
 {
+	struct dc *dc = dm->dc;
 	struct dm_atomic_state *dm_state = NULL, *old_dm_state = NULL;
 	int i, j, num_plane, ret = 0;
 	struct drm_plane_state *old_plane_state, *new_plane_state;
@@ -6175,20 +6335,19 @@ dm_determine_update_type_for_commit(struct dc *dc,
 	struct dc_stream_status *status = NULL;
 
 	struct dc_surface_update *updates;
-	struct dc_plane_state *surface;
 	enum surface_update_type update_type = UPDATE_TYPE_FAST;
 
 	updates = kcalloc(MAX_SURFACES, sizeof(*updates), GFP_KERNEL);
-	surface = kcalloc(MAX_SURFACES, sizeof(*surface), GFP_KERNEL);
 
-	if (!updates || !surface) {
-		DRM_ERROR("Plane or surface update failed to allocate");
+	if (!updates) {
+		DRM_ERROR("Failed to allocate plane updates\n");
 		/* Set type to FULL to avoid crashing in DC*/
 		update_type = UPDATE_TYPE_FULL;
 		goto cleanup;
 	}
 
 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+		struct dc_scaling_info scaling_info;
 		struct dc_stream_update stream_update;
 
 		memset(&stream_update, 0, sizeof(stream_update));
@@ -6219,23 +6378,12 @@ dm_determine_update_type_for_commit(struct dc *dc,
 				goto cleanup;
 			}
 
-			if (!state->allow_modeset)
-				continue;
-
 			if (crtc != new_plane_crtc)
 				continue;
 
-			updates[num_plane].surface = &surface[num_plane];
+			updates[num_plane].surface = new_dm_plane_state->dc_state;
 
 			if (new_crtc_state->mode_changed) {
-				updates[num_plane].surface->src_rect =
-						new_dm_plane_state->dc_state->src_rect;
-				updates[num_plane].surface->dst_rect =
-						new_dm_plane_state->dc_state->dst_rect;
-				updates[num_plane].surface->rotation =
-						new_dm_plane_state->dc_state->rotation;
-				updates[num_plane].surface->in_transfer_func =
-						new_dm_plane_state->dc_state->in_transfer_func;
 				stream_update.dst = new_dm_crtc_state->stream->dst;
 				stream_update.src = new_dm_crtc_state->stream->src;
 			}
@@ -6251,6 +6399,13 @@ dm_determine_update_type_for_commit(struct dc *dc,
 						new_dm_crtc_state->stream->out_transfer_func;
 			}
 
+			ret = fill_dc_scaling_info(new_plane_state,
+						   &scaling_info);
+			if (ret)
+				goto cleanup;
+
+			updates[num_plane].scaling_info = &scaling_info;
+
 			num_plane++;
 		}
 
@@ -6270,8 +6425,14 @@ dm_determine_update_type_for_commit(struct dc *dc,
 		status = dc_stream_get_status_from_state(old_dm_state->context,
 							 new_dm_crtc_state->stream);
 
+		/*
+		 * TODO: DC modifies the surface during this call so we need
+		 * to lock here - find a way to do this without locking.
+		 */
+		mutex_lock(&dm->dc_lock);
 		update_type = dc_check_update_surfaces_for_stream(dc, updates, num_plane,
 								  &stream_update, status);
+		mutex_unlock(&dm->dc_lock);
 
 		if (update_type > UPDATE_TYPE_MED) {
 			update_type = UPDATE_TYPE_FULL;
@@ -6281,7 +6442,6 @@ dm_determine_update_type_for_commit(struct dc *dc,
 
 cleanup:
 	kfree(updates);
-	kfree(surface);
 
 	*out_type = update_type;
 	return ret;
@@ -6465,7 +6625,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
 		lock_and_validation_needed = true;
 	}
 
-	ret = dm_determine_update_type_for_commit(dc, state, &update_type);
+	ret = dm_determine_update_type_for_commit(&adev->dm, state, &update_type);
 	if (ret)
 		goto fail;
 
@@ -6480,9 +6640,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
 	 */
 	if (lock_and_validation_needed && overall_update_type <= UPDATE_TYPE_FAST)
 		WARN(1, "Global lock should be Set, overall_update_type should be UPDATE_TYPE_MED or UPDATE_TYPE_FULL");
-	else if (!lock_and_validation_needed && overall_update_type > UPDATE_TYPE_FAST)
-		WARN(1, "Global lock should NOT be set, overall_update_type should be UPDATE_TYPE_FAST");
-
 
 	if (overall_update_type > UPDATE_TYPE_FAST) {
 		ret = dm_atomic_get_state(state, &dm_state);
@@ -6493,7 +6650,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
 		if (ret)
 			goto fail;
 
-		if (dc_validate_global_state(dc, dm_state->context) != DC_OK) {
+		if (dc_validate_global_state(dc, dm_state->context, false) != DC_OK) {
 			ret = -EINVAL;
 			goto fail;
 		}
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
index 8843361e842d5b06befc3972382436ea60bf418a..1b4b51657f5e3dfa309db62d0b13c6d391e9051f 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
@@ -701,8 +701,15 @@ static void hack_bounding_box(struct dcn_bw_internal_vars *v,
 
 bool dcn_validate_bandwidth(
 		struct dc *dc,
-		struct dc_state *context)
+		struct dc_state *context,
+		bool fast_validate)
 {
+	/*
+	 * we want a breakdown of the various stages of validation, which the
+	 * perf_trace macro doesn't support
+	 */
+	BW_VAL_TRACE_SETUP();
+
 	const struct resource_pool *pool = dc->res_pool;
 	struct dcn_bw_internal_vars *v = &context->dcn_bw_vars;
 	int i, input_idx;
@@ -711,6 +718,9 @@ bool dcn_validate_bandwidth(
 	float bw_limit;
 
 	PERFORMANCE_TRACE_START();
+
+	BW_VAL_TRACE_COUNT();
+
 	if (dcn_bw_apply_registry_override(dc))
 		dcn_bw_sync_calcs_and_dml(dc);
 
@@ -1013,8 +1023,11 @@ bool dcn_validate_bandwidth(
 		mode_support_and_system_configuration(v);
 	}
 
-	if (v->voltage_level != 5) {
+	BW_VAL_TRACE_END_VOLTAGE_LEVEL();
+
+	if (v->voltage_level != number_of_states_plus_one && !fast_validate) {
 		float bw_consumed = v->total_bandwidth_consumed_gbyte_per_second;
+
 		if (bw_consumed < v->fabric_and_dram_bandwidth_vmin0p65)
 			bw_consumed = v->fabric_and_dram_bandwidth_vmin0p65;
 		else if (bw_consumed < v->fabric_and_dram_bandwidth_vmid0p72)
@@ -1087,6 +1100,8 @@ bool dcn_validate_bandwidth(
 			break;
 		}
 
+		BW_VAL_TRACE_END_WATERMARKS();
+
 		for (i = 0, input_idx = 0; i < pool->pipe_count; i++) {
 			struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
 
@@ -1177,6 +1192,10 @@ bool dcn_validate_bandwidth(
 
 			input_idx++;
 		}
+	} else if (v->voltage_level == number_of_states_plus_one) {
+		BW_VAL_TRACE_SKIP(fail);
+	} else if (fast_validate) {
+		BW_VAL_TRACE_SKIP(fast);
 	}
 
 	if (v->voltage_level == 0) {
@@ -1196,6 +1215,7 @@ bool dcn_validate_bandwidth(
 	kernel_fpu_end();
 
 	PERFORMANCE_TRACE_END();
+	BW_VAL_TRACE_FINISH();
 
 	if (bw_limit_pass && v->voltage_level != 5)
 		return true;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 4887d06110019fbee7d6de1ed89c93e8989aa51f..dda10b1f8574bffe42b154db6ac09517422da639 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -597,7 +597,7 @@ uint32_t dc_link_bandwidth_kbps(
 
 }
 
-const struct dc_link_settings *dc_link_get_verified_link_cap(
+const struct dc_link_settings *dc_link_get_link_cap(
 		const struct dc_link *link)
 {
 	if (link->preferred_link_setting.lane_count != LANE_COUNT_UNKNOWN &&
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index acb4f829e042e1a8cd350cdb3513ab5115454306..a6424c70f4c5a3e6425d07adf1ccfa46c78bf497 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -1548,8 +1548,7 @@ bool dp_validate_mode_timing(
 		timing->v_addressable == (uint32_t) 480)
 		return true;
 
-	/* We always use verified link settings */
-	link_setting = dc_link_get_verified_link_cap(link);
+	link_setting = dc_link_get_link_cap(link);
 
 	/* TODO: DYNAMIC_VALIDATION needs to be implemented */
 	/*if (flags.DYNAMIC_VALIDATION == 1 &&
@@ -2587,6 +2586,9 @@ void detect_edp_sink_caps(struct dc_link *link)
 	uint32_t entry;
 	uint32_t link_rate_in_khz;
 	enum dc_link_rate link_rate = LINK_RATE_UNKNOWN;
+	union lane_count_set lane_count_set = { {0} };
+	uint8_t link_bw_set;
+	uint8_t link_rate_set;
 
 	retrieve_link_cap(link);
 	link->dpcd_caps.edp_supported_link_rates_count = 0;
@@ -2612,6 +2614,33 @@ void detect_edp_sink_caps(struct dc_link *link)
 		}
 	}
 	link->verified_link_cap = link->reported_link_cap;
+
+	// Read DPCD 00101h to find out the number of lanes currently set
+	core_link_read_dpcd(link, DP_LANE_COUNT_SET,
+			&lane_count_set.raw, sizeof(lane_count_set));
+	link->cur_link_settings.lane_count = lane_count_set.bits.LANE_COUNT_SET;
+
+	// Read DPCD 00100h to find if standard link rates are set
+	core_link_read_dpcd(link, DP_LINK_BW_SET,
+			&link_bw_set, sizeof(link_bw_set));
+
+	if (link_bw_set == 0) {
+		/* If standard link rates are not being used,
+		 * Read DPCD 00115h to find the link rate set used
+		 */
+		core_link_read_dpcd(link, DP_LINK_RATE_SET,
+				&link_rate_set, sizeof(link_rate_set));
+
+		if (link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) {
+			link->cur_link_settings.link_rate =
+				link->dpcd_caps.edp_supported_link_rates[link_rate_set];
+			link->cur_link_settings.link_rate_set = link_rate_set;
+			link->cur_link_settings.use_link_rate_set = true;
+		}
+	} else {
+		link->cur_link_settings.link_rate = link_bw_set;
+		link->cur_link_settings.use_link_rate_set = false;
+	}
 }
 
 void dc_link_dp_enable_hpd(const struct dc_link *link)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 3830e6ce13558bac96d34bd684c40c19eacfe5bd..eac7186e4f0841fdb771dd722b59dc7e819afb16 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -2067,12 +2067,14 @@ void dc_resource_state_construct(
  * Checks HW resource availability and bandwidth requirement.
  * @dc: dc struct for this driver
  * @new_ctx: state to be validated
+ * @fast_validate: set to true if only yes/no to support matters
  *
  * Return: DC_OK if the result can be programmed.  Otherwise, an error code.
  */
 enum dc_status dc_validate_global_state(
 		struct dc *dc,
-		struct dc_state *new_ctx)
+		struct dc_state *new_ctx,
+		bool fast_validate)
 {
 	enum dc_status result = DC_ERROR_UNEXPECTED;
 	int i, j;
@@ -2127,7 +2129,7 @@ enum dc_status dc_validate_global_state(
 	result = resource_build_scaling_params_for_context(dc, new_ctx);
 
 	if (result == DC_OK)
-		if (!dc->res_pool->funcs->validate_bandwidth(dc, new_ctx))
+		if (!dc->res_pool->funcs->validate_bandwidth(dc, new_ctx, fast_validate))
 			result = DC_FAIL_BANDWIDTH_VALIDATE;
 
 	return result;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index f7a2939022347730877b272c87ed745999ed5c29..e10479d58c11ecae0c988044f4d1258690a62e4b 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -211,7 +211,8 @@ static void delay_cursor_until_vupdate(struct pipe_ctx *pipe_ctx, struct dc *dc)
 			ASIC_REV_IS_RAVEN(stream->ctx->asic_id.hw_internal_rev)) {
 
 		vupdate_line = get_vupdate_offset_from_vsync(pipe_ctx);
-		dc_stream_get_crtc_position(dc, &stream, 1, &vpos, &nvpos);
+		if (!dc_stream_get_crtc_position(dc, &stream, 1, &vpos, &nvpos))
+			return;
 
 		if (vpos >= vupdate_line)
 			return;
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index dc6a2c236ba7ba14f75072dda0595fa226108cb9..3459e39714bcea0949dd2fab69e3638e86e9a16f 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -39,7 +39,7 @@
 #include "inc/hw/dmcu.h"
 #include "dml/display_mode_lib.h"
 
-#define DC_VER "3.2.25"
+#define DC_VER "3.2.26"
 
 #define MAX_SURFACES 3
 #define MAX_PLANES 6
@@ -66,8 +66,27 @@ struct dc_plane_cap {
 	uint32_t blends_with_above : 1;
 	uint32_t blends_with_below : 1;
 	uint32_t per_pixel_alpha : 1;
-	uint32_t supports_argb8888 : 1;
-	uint32_t supports_nv12 : 1;
+	struct {
+		uint32_t argb8888 : 1;
+		uint32_t nv12 : 1;
+		uint32_t fp16 : 1;
+	} pixel_format_support;
+	// max upscaling factor x1000
+	// upscaling factors are always >= 1
+	// for example, 1080p -> 8K is 4.0, or 4000 raw value
+	struct {
+		uint32_t argb8888;
+		uint32_t nv12;
+		uint32_t fp16;
+	} max_upscale_factor;
+	// max downscale factor x1000
+	// downscale factors are always <= 1
+	// for example, 8K -> 1080p is 0.25, or 250 raw value
+	struct {
+		uint32_t argb8888;
+		uint32_t nv12;
+		uint32_t fp16;
+	} max_downscale_factor;
 };
 
 struct dc_caps {
@@ -183,6 +202,7 @@ struct dc_config {
 	bool disable_disp_pll_sharing;
 	bool fbc_support;
 	bool optimize_edp_link_rate;
+	bool disable_fractional_pwm;
 	bool allow_seamless_boot_optimization;
 };
 
@@ -226,6 +246,57 @@ struct dc_clocks {
 	bool p_state_change_support;
 };
 
+struct dc_bw_validation_profile {
+	bool enable;
+
+	unsigned long long total_ticks;
+	unsigned long long voltage_level_ticks;
+	unsigned long long watermark_ticks;
+	unsigned long long rq_dlg_ticks;
+
+	unsigned long long total_count;
+	unsigned long long skip_fast_count;
+	unsigned long long skip_pass_count;
+	unsigned long long skip_fail_count;
+};
+
+#define BW_VAL_TRACE_SETUP() \
+		unsigned long long end_tick = 0; \
+		unsigned long long voltage_level_tick = 0; \
+		unsigned long long watermark_tick = 0; \
+		unsigned long long start_tick = dc->debug.bw_val_profile.enable ? \
+				dm_get_timestamp(dc->ctx) : 0
+
+#define BW_VAL_TRACE_COUNT() \
+		if (dc->debug.bw_val_profile.enable) \
+			dc->debug.bw_val_profile.total_count++
+
+#define BW_VAL_TRACE_SKIP(status) \
+		if (dc->debug.bw_val_profile.enable) { \
+			if (!voltage_level_tick) \
+				voltage_level_tick = dm_get_timestamp(dc->ctx); \
+			dc->debug.bw_val_profile.skip_ ## status ## _count++; \
+		}
+
+#define BW_VAL_TRACE_END_VOLTAGE_LEVEL() \
+		if (dc->debug.bw_val_profile.enable) \
+			voltage_level_tick = dm_get_timestamp(dc->ctx)
+
+#define BW_VAL_TRACE_END_WATERMARKS() \
+		if (dc->debug.bw_val_profile.enable) \
+			watermark_tick = dm_get_timestamp(dc->ctx)
+
+#define BW_VAL_TRACE_FINISH() \
+		if (dc->debug.bw_val_profile.enable) { \
+			end_tick = dm_get_timestamp(dc->ctx); \
+			dc->debug.bw_val_profile.total_ticks += end_tick - start_tick; \
+			dc->debug.bw_val_profile.voltage_level_ticks += voltage_level_tick - start_tick; \
+			if (watermark_tick) { \
+				dc->debug.bw_val_profile.watermark_ticks += watermark_tick - voltage_level_tick; \
+				dc->debug.bw_val_profile.rq_dlg_ticks += end_tick - watermark_tick; \
+			} \
+		}
+
 struct dc_debug_options {
 	enum visual_confirm visual_confirm;
 	bool sanity_checks;
@@ -279,6 +350,7 @@ struct dc_debug_options {
 	unsigned int force_odm_combine; //bit vector based on otg inst
 	unsigned int force_fclk_khz;
 	bool disable_tri_buf;
+	struct dc_bw_validation_profile bw_val_profile;
 };
 
 struct dc_debug_data {
@@ -638,9 +710,14 @@ enum dc_status dc_validate_plane(struct dc *dc, const struct dc_plane_state *pla
 
 void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info);
 
+/*
+ * fast_validate: we return after determining if we can support the new state,
+ * but before we populate the programming info
+ */
 enum dc_status dc_validate_global_state(
 		struct dc *dc,
-		struct dc_state *new_ctx);
+		struct dc_state *new_ctx,
+		bool fast_validate);
 
 
 void dc_resource_state_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index 4e26d6e93b315fc61d4eee5dd83452f05872f4ba..cc7ffac64c9611b00c254300e47c923501bdaacc 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -250,7 +250,7 @@ uint32_t dc_link_bandwidth_kbps(
 	const struct dc_link *link,
 	const struct dc_link_settings *link_setting);
 
-const struct dc_link_settings *dc_link_get_verified_link_cap(
+const struct dc_link_settings *dc_link_get_link_cap(
 		const struct dc_link *link);
 
 bool dc_submit_i2c(
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
index da96229db53a76a91e739d3f85bbadc8df78f689..855360b1414f466ca48c5cf0166a8ce54f2f5ce9 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
@@ -50,6 +50,7 @@
 #define MCP_ABM_LEVEL_SET 0x65
 #define MCP_ABM_PIPE_SET 0x66
 #define MCP_BL_SET 0x67
+#define MCP_BL_SET_PWM_FRAC     0x6A  /* Enable or disable Fractional PWM */
 
 #define MCP_DISABLE_ABM_IMMEDIATELY 255
 
@@ -390,6 +391,23 @@ static bool dce_abm_init_backlight(struct abm *abm)
 	REG_UPDATE(BL_PWM_GRP1_REG_LOCK,
 			BL_PWM_GRP1_REG_LOCK, 0);
 
+	/* Wait until microcontroller is ready to process interrupt */
+	REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 100, 800);
+
+	/* Set PWM fractional enable/disable */
+	value = (abm->ctx->dc->config.disable_fractional_pwm == false) ? 1 : 0;
+	REG_WRITE(MASTER_COMM_DATA_REG1, value);
+
+	/* Set command to enable or disable fractional PWM microcontroller */
+	REG_UPDATE(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0,
+			MCP_BL_SET_PWM_FRAC);
+
+	/* Notify microcontroller of new command */
+	REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
+
+	/* Ensure command has been executed before continuing */
+	REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 100, 800);
+
 	return true;
 }
 
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
index 767d37c6d942bf80242d80e28dab623d1c85c320..e938bf9986d38fa4e2e91243a1d93363d8407ccc 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
@@ -380,7 +380,24 @@ static const struct resource_caps res_cap = {
 
 static const struct dc_plane_cap plane_cap = {
 	.type = DC_PLANE_TYPE_DCE_RGB,
-	.supports_argb8888 = true,
+
+	.pixel_format_support = {
+			.argb8888 = true,
+			.nv12 = false,
+			.fp16 = false
+	},
+
+	.max_upscale_factor = {
+			.argb8888 = 16000,
+			.nv12 = 1,
+			.fp16 = 1
+	},
+
+	.max_downscale_factor = {
+			.argb8888 = 250,
+			.nv12 = 1,
+			.fp16 = 1
+	}
 };
 
 #define CTX  ctx
@@ -761,7 +778,8 @@ static enum dc_status build_mapped_resource(
 
 bool dce100_validate_bandwidth(
 	struct dc  *dc,
-	struct dc_state *context)
+	struct dc_state *context,
+	bool fast_validate)
 {
 	int i;
 	bool at_least_one_pipe = false;
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
index 7c4914b2b524c98b9d6fabdf1fb429e382ebab57..dcd04e9ea76bda85bc44db181cbf62ae163966c6 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
@@ -397,14 +397,48 @@ static const struct dc_plane_cap plane_cap = {
 		.blends_with_below = true,
 		.blends_with_above = true,
 		.per_pixel_alpha = 1,
-		.supports_argb8888 = true,
+
+		.pixel_format_support = {
+				.argb8888 = true,
+				.nv12 = false,
+				.fp16 = false
+		},
+
+		.max_upscale_factor = {
+				.argb8888 = 16000,
+				.nv12 = 1,
+				.fp16 = 1
+		},
+
+		.max_downscale_factor = {
+				.argb8888 = 250,
+				.nv12 = 1,
+				.fp16 = 1
+		}
 };
 
 static const struct dc_plane_cap underlay_plane_cap = {
 		.type = DC_PLANE_TYPE_DCE_UNDERLAY,
 		.blends_with_above = true,
 		.per_pixel_alpha = 1,
-		.supports_nv12 = true
+
+		.pixel_format_support = {
+				.argb8888 = false,
+				.nv12 = true,
+				.fp16 = false
+		},
+
+		.max_upscale_factor = {
+				.argb8888 = 1,
+				.nv12 = 16000,
+				.fp16 = 1
+		},
+
+		.max_downscale_factor = {
+				.argb8888 = 1,
+				.nv12 = 250,
+				.fp16 = 1
+		}
 };
 
 #define CTX  ctx
@@ -869,7 +903,8 @@ static enum dc_status build_mapped_resource(
 
 static bool dce110_validate_bandwidth(
 	struct dc *dc,
-	struct dc_state *context)
+	struct dc_state *context,
+	bool fast_validate)
 {
 	bool result = false;
 
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
index 2f28a74383f55d8387ee58ab25b0a6d5aba45ec9..a480b15f688591603dc5ad0212cfa8603232b019 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
@@ -399,7 +399,24 @@ static const struct resource_caps polaris_11_resource_cap = {
 
 static const struct dc_plane_cap plane_cap = {
 	.type = DC_PLANE_TYPE_DCE_RGB,
-	.supports_argb8888 = true,
+
+	.pixel_format_support = {
+			.argb8888 = true,
+			.nv12 = false,
+			.fp16 = false
+	},
+
+	.max_upscale_factor = {
+			.argb8888 = 16000,
+			.nv12 = 1,
+			.fp16 = 1
+	},
+
+	.max_downscale_factor = {
+			.argb8888 = 250,
+			.nv12 = 1,
+			.fp16 = 1
+	}
 };
 
 #define CTX  ctx
@@ -809,7 +826,8 @@ static enum dc_status build_mapped_resource(
 
 bool dce112_validate_bandwidth(
 	struct dc *dc,
-	struct dc_state *context)
+	struct dc_state *context,
+	bool fast_validate)
 {
 	bool result = false;
 
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.h b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.h
index 95a4033962196e96995069b202102ca9450c412a..1f57ebc6f9b440f69c0d778eb6735aad2b3085dd 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.h
@@ -44,7 +44,8 @@ enum dc_status dce112_validate_with_context(
 
 bool dce112_validate_bandwidth(
 	struct dc *dc,
-	struct dc_state *context);
+	struct dc_state *context,
+	bool fast_validate);
 
 enum dc_status dce112_add_stream_to_ctx(
 		struct dc *dc,
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
index 01ea503faa12c2ba94b20513d9b5faf7bcd75c7e..6d49c7143c672f995f59f741be9d6c1c35780eb6 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
@@ -456,7 +456,24 @@ static const struct resource_caps res_cap = {
 
 static const struct dc_plane_cap plane_cap = {
 	.type = DC_PLANE_TYPE_DCE_RGB,
-	.supports_argb8888 = true,
+
+	.pixel_format_support = {
+			.argb8888 = true,
+			.nv12 = false,
+			.fp16 = false
+	},
+
+	.max_upscale_factor = {
+			.argb8888 = 16000,
+			.nv12 = 1,
+			.fp16 = 1
+	},
+
+	.max_downscale_factor = {
+			.argb8888 = 250,
+			.nv12 = 1,
+			.fp16 = 1
+	}
 };
 
 static const struct dc_debug_options debug_defaults = {
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
index c7899ec96287d6dab9ed2a56dabdd59b9879ecb5..27d0cc39496386a15db49e334c8c17919af58b02 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
@@ -389,7 +389,24 @@ static const struct resource_caps res_cap_83 = {
 
 static const struct dc_plane_cap plane_cap = {
 	.type = DC_PLANE_TYPE_DCE_RGB,
-	.supports_argb8888 = true,
+
+	.pixel_format_support = {
+			.argb8888 = true,
+			.nv12 = false,
+			.fp16 = false
+	},
+
+	.max_upscale_factor = {
+			.argb8888 = 16000,
+			.nv12 = 1,
+			.fp16 = 1
+	},
+
+	.max_downscale_factor = {
+			.argb8888 = 250,
+			.nv12 = 1,
+			.fp16 = 1
+	}
 };
 
 static const struct dce_dmcu_registers dmcu_regs = {
@@ -795,7 +812,8 @@ static void destruct(struct dce110_resource_pool *pool)
 
 bool dce80_validate_bandwidth(
 	struct dc *dc,
-	struct dc_state *context)
+	struct dc_state *context,
+	bool fast_validate)
 {
 	int i;
 	bool at_least_one_pipe = false;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
index 3268ab089363ba2626bc7a8910ca01652728f838..db98ba361686a8d51b9d3f44fac5b2f958758265 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
@@ -247,7 +247,7 @@
 	.field_name = reg_name ## __ ## field_name ## post_fix
 
 /* Mask/shift struct generation macro for all ASICs (including those with reduced functionality) */
-#define HUBP_MASK_SH_LIST_DCN(mask_sh)\
+#define HUBP_MASK_SH_LIST_DCN_COMMON(mask_sh)\
 	HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_BLANK_EN, mask_sh),\
 	HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_TTU_DISABLE, mask_sh),\
 	HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_UNDERFLOW_STATUS, mask_sh),\
@@ -331,7 +331,6 @@
 	HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, META_CHUNK_SIZE, mask_sh),\
 	HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, MIN_META_CHUNK_SIZE, mask_sh),\
 	HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, DPTE_GROUP_SIZE, mask_sh),\
-	HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, MPTE_GROUP_SIZE, mask_sh),\
 	HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, SWATH_HEIGHT, mask_sh),\
 	HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, PTE_ROW_HEIGHT_LINEAR, mask_sh),\
 	HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, CHUNK_SIZE_C, mask_sh),\
@@ -339,7 +338,6 @@
 	HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, META_CHUNK_SIZE_C, mask_sh),\
 	HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, MIN_META_CHUNK_SIZE_C, mask_sh),\
 	HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, DPTE_GROUP_SIZE_C, mask_sh),\
-	HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, MPTE_GROUP_SIZE_C, mask_sh),\
 	HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, SWATH_HEIGHT_C, mask_sh),\
 	HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, PTE_ROW_HEIGHT_LINEAR_C, mask_sh),\
 	HUBP_SF(HUBPREQ0_BLANK_OFFSET_0, REFCYC_H_BLANK_END, mask_sh),\
@@ -373,6 +371,11 @@
 	HUBP_SF(HUBPREQ0_DCN_SURF0_TTU_CNTL1, REFCYC_PER_REQ_DELIVERY_PRE, mask_sh),\
 	HUBP_SF(HUBP0_HUBP_CLK_CNTL, HUBP_CLOCK_ENABLE, mask_sh)
 
+#define HUBP_MASK_SH_LIST_DCN(mask_sh)\
+	HUBP_MASK_SH_LIST_DCN_COMMON(mask_sh),\
+	HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, MPTE_GROUP_SIZE, mask_sh),\
+	HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, MPTE_GROUP_SIZE_C, mask_sh)
+
 /* Mask/shift struct generation macro for ASICs with VM */
 #define HUBP_MASK_SH_LIST_DCN_VM(mask_sh)\
 	HUBP_SF(HUBPREQ0_NOM_PARAMETERS_0, DST_Y_PER_PTE_ROW_NOM_L, mask_sh),\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index 79f4fbb8a14532794922872acfe1a14cb5fde8ca..7eccb54c421d9090c9fdbc5a0b45f8c1cf4b7454 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -521,8 +521,24 @@ static const struct dc_plane_cap plane_cap = {
 	.blends_with_above = true,
 	.blends_with_below = true,
 	.per_pixel_alpha = true,
-	.supports_argb8888 = true,
-	.supports_nv12 = true
+
+	.pixel_format_support = {
+			.argb8888 = true,
+			.nv12 = true,
+			.fp16 = true
+	},
+
+	.max_upscale_factor = {
+			.argb8888 = 16000,
+			.nv12 = 16000,
+			.fp16 = 1
+	},
+
+	.max_downscale_factor = {
+			.argb8888 = 250,
+			.nv12 = 250,
+			.fp16 = 1
+	}
 };
 
 static const struct dc_debug_options debug_defaults_drv = {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index 827541e34ee2a97343cd08dd9f6975fd2382e7b4..88a82a23d259087e27a795b1eb470ee5c2339765 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -97,7 +97,8 @@ struct resource_funcs {
 			const struct encoder_init_data *init);
 	bool (*validate_bandwidth)(
 					struct dc *dc,
-					struct dc_state *context);
+					struct dc_state *context,
+					bool fast_validate);
 
 	enum dc_status (*validate_global)(
 		struct dc *dc,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
index 86ec3f69c7652ac06bfca24e53a36a09d2368fad..263c09630c06dcc96e3c2a2e21e8e34971d23ed9 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
@@ -621,7 +621,8 @@ extern const struct dcn_ip_params dcn10_ip_defaults;
 
 bool dcn_validate_bandwidth(
 		struct dc *dc,
-		struct dc_state *context);
+		struct dc_state *context,
+		bool fast_validate);
 
 unsigned int dcn_find_dcfclk_suits_all(
 	const struct dc *dc,
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
index 26a7d2c7f4fab2cedc1bb3d41ab27eb0e52842fe..c8b168b3413b6d529d892d116b77edddbbb22132 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
@@ -540,6 +540,8 @@ struct smu_funcs
 	int (*get_fan_speed_percent)(struct smu_context *smu, uint32_t *speed);
 	int (*set_fan_speed_percent)(struct smu_context *smu, uint32_t speed);
 	int (*set_fan_speed_rpm)(struct smu_context *smu, uint32_t speed);
+	int (*set_xgmi_pstate)(struct smu_context *smu, uint32_t pstate);
+
 };
 
 #define smu_init_microcode(smu) \
@@ -723,6 +725,8 @@ struct smu_funcs
 	((smu)->funcs->get_sclk ? (smu)->funcs->get_sclk((smu), (low)) : 0)
 #define smu_get_mclk(smu, low) \
 	((smu)->funcs->get_mclk ? (smu)->funcs->get_mclk((smu), (low)) : 0)
+#define smu_set_xgmi_pstate(smu, pstate) \
+		((smu)->funcs->set_xgmi_pstate ? (smu)->funcs->set_xgmi_pstate((smu), (pstate)) : 0)
 
 
 extern int smu_get_atom_data_table(struct smu_context *smu, uint32_t table,
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
index d2e2a4e2d0eb160c0a7987420efa4805db970353..c478b38662d09946fef1ff66b27db81d89550f7f 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
@@ -1893,6 +1893,13 @@ static int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu,
 	return ret;
 }
 
+static int smu_v11_0_set_xgmi_pstate(struct smu_context *smu,
+				     uint32_t pstate)
+{
+	/* send msg to SMU to set pstate */
+	return 0;
+}
+
 static const struct smu_funcs smu_v11_0_funcs = {
 	.init_microcode = smu_v11_0_init_microcode,
 	.load_microcode = smu_v11_0_load_microcode,
@@ -1947,6 +1954,7 @@ static const struct smu_funcs smu_v11_0_funcs = {
 	.get_fan_speed_percent = smu_v11_0_get_fan_speed_percent,
 	.set_fan_speed_percent = smu_v11_0_set_fan_speed_percent,
 	.set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm,
+	.set_xgmi_pstate = smu_v11_0_set_xgmi_pstate,
 };
 
 void smu_v11_0_set_smu_funcs(struct smu_context *smu)
@@ -1954,7 +1962,6 @@ void smu_v11_0_set_smu_funcs(struct smu_context *smu)
 	struct amdgpu_device *adev = smu->adev;
 
 	smu->funcs = &smu_v11_0_funcs;
-
 	switch (adev->asic_type) {
 	case CHIP_VEGA20:
 		vega20_set_ppt_funcs(smu);
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index e3a97da4add98f50997af529653559a04b462012..4788730dbe78430ed75005253d92063131fa0625 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -528,6 +528,8 @@ struct drm_amdgpu_gem_va {
 #define AMDGPU_CHUNK_ID_SYNCOBJ_OUT     0x05
 #define AMDGPU_CHUNK_ID_BO_HANDLES      0x06
 #define AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES	0x07
+#define AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT    0x08
+#define AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL  0x09
 
 struct drm_amdgpu_cs_chunk {
 	__u32		chunk_id;
@@ -608,6 +610,12 @@ struct drm_amdgpu_cs_chunk_sem {
 	__u32 handle;
 };
 
+struct drm_amdgpu_cs_chunk_syncobj {
+       __u32 handle;
+       __u32 flags;
+       __u64 point;
+};
+
 #define AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ	0
 #define AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD	1
 #define AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD	2