diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 92d415a4ee47e2a712a8ca76376b5c32a7279d47..3913a75924c623982ebad68b6760ebe5b0ebb4ff 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -702,7 +702,7 @@ MODULE_PARM_DESC(hws_gws_support, "MEC FW support gws barriers (false = not supp
   * DOC: queue_preemption_timeout_ms (int)
   * queue preemption timeout in ms (1 = Minimum, 9000 = default)
   */
-int queue_preemption_timeout_ms;
+int queue_preemption_timeout_ms = 9000;
 module_param(queue_preemption_timeout_ms, int, 0644);
 MODULE_PARM_DESC(queue_preemption_timeout_ms, "queue preemption timeout in ms (1 = Minimum, 9000 = default)");
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 704c7034da09df319700f781b78f6cecb6caad2a..193d53720d9baeca7800e376bed88d9cc2273542 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -2886,7 +2886,8 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
 		return ret;
 	}
 	/* APU does not have its own dedicated memory */
-	if (!(adev->flags & AMD_IS_APU)) {
+	if (!(adev->flags & AMD_IS_APU) &&
+	     (adev->asic_type != CHIP_VEGA10)) {
 		ret = device_create_file(adev->dev,
 				&dev_attr_mem_busy_percent);
 		if (ret) {
@@ -2966,7 +2967,8 @@ void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
 		device_remove_file(adev->dev,
 				&dev_attr_pp_od_clk_voltage);
 	device_remove_file(adev->dev, &dev_attr_gpu_busy_percent);
-	if (!(adev->flags & AMD_IS_APU))
+	if (!(adev->flags & AMD_IS_APU) &&
+	     (adev->asic_type != CHIP_VEGA10))
 		device_remove_file(adev->dev, &dev_attr_mem_busy_percent);
 	if (!(adev->flags & AMD_IS_APU))
 		device_remove_file(adev->dev, &dev_attr_pcie_bw);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index 0061a0e8ab78c402ee831a9d4f76e98c932f79d2..2932ade7dbd055f46565218b2d614bd7b16a16ae 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -2624,9 +2624,6 @@ static int gfx_v10_0_cp_gfx_resume(struct amdgpu_device *adev)
 	rb_bufsz = order_base_2(ring->ring_size / 8);
 	tmp = REG_SET_FIELD(0, CP_RB1_CNTL, RB_BUFSZ, rb_bufsz);
 	tmp = REG_SET_FIELD(tmp, CP_RB1_CNTL, RB_BLKSZ, rb_bufsz - 2);
-#ifdef __BIG_ENDIAN
-	tmp = REG_SET_FIELD(tmp, CP_RB1_CNTL, BUF_SWAP, 1);
-#endif
 	WREG32_SOC15(GC, 0, mmCP_RB1_CNTL, tmp);
 	/* Initialize the ring buffer's write pointers */
 	ring->wptr = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
index c441e6ce95ecbb6e57e990eec7cc3affb7547b85..988c0adaca916e797d42b236ad39a487adf013a4 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
@@ -1010,8 +1010,8 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect)
 
 	if (indirect)
 		psp_update_vcn_sram(adev, 0, adev->vcn.dpg_sram_gpu_addr,
-			(uint32_t)((uint64_t)adev->vcn.dpg_sram_curr_addr -
-			(uint64_t)adev->vcn.dpg_sram_cpu_addr));
+				    (uint32_t)((uintptr_t)adev->vcn.dpg_sram_curr_addr -
+					       (uintptr_t)adev->vcn.dpg_sram_cpu_addr));
 
 	/* force RBC into idle state */
 	rb_bufsz = order_base_2(ring->ring_size);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 26ea46de37229e2cc504e20109a405dd695303a7..2e2f7241f3819aa9a29a4fdd7a6977a592221da9 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -738,7 +738,6 @@ int kgd2kfd_post_reset(struct kfd_dev *kfd)
 	if (ret)
 		return ret;
 	count = atomic_dec_return(&kfd_locked);
-	WARN_ONCE(count != 0, "KFD reset ref. error");
 
 	atomic_set(&kfd->sram_ecc_flag, 0);
 
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
index be49fc7f4abe7b350d9840c52cbfae7d305f6fcf..ffd0014ec3b501c7b7aea8aa8749cb9584d95020 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
@@ -126,7 +126,7 @@ static void dsc2_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock
 
 	/* Maximum total throughput with all the slices combined. This is different from how DP spec specifies it.
 	 * Our decoder's total throughput in Pix/s is equal to DISPCLK. This is then shared between slices.
-	 * The value below is the absolute maximum value. The actual througput may be lower, but it'll always
+	 * The value below is the absolute maximum value. The actual throughput may be lower, but it'll always
 	 * be sufficient to process the input pixel rate fed into a single DSC engine.
 	 */
 	dsc_enc_caps->max_total_throughput_mps = DCN20_MAX_DISPLAY_CLOCK_Mhz;
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
index 77e7a0f8a527e3d3450db6f2def4096ffb0d7f2c..ef5f84a144c35d7ad449a9b173367929ce9fdf2e 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
@@ -47,7 +47,7 @@ static bool dsc_buff_block_size_from_dpcd(int dpcd_buff_block_size, int *buff_bl
 		*buff_block_size = 64 * 1024;
 		break;
 	default: {
-			dm_error("%s: DPCD DSC buffer size not recoginzed.\n", __func__);
+			dm_error("%s: DPCD DSC buffer size not recognized.\n", __func__);
 			return false;
 		}
 	}
@@ -63,7 +63,7 @@ static bool dsc_line_buff_depth_from_dpcd(int dpcd_line_buff_bit_depth, int *lin
 	else if (dpcd_line_buff_bit_depth == 8)
 		*line_buff_bit_depth = 8;
 	else {
-		dm_error("%s: DPCD DSC buffer depth not recoginzed.\n", __func__);
+		dm_error("%s: DPCD DSC buffer depth not recognized.\n", __func__);
 		return false;
 	}
 
@@ -123,7 +123,7 @@ static bool dsc_throughput_from_dpcd(int dpcd_throughput, int *throughput)
 		*throughput = 1000;
 		break;
 	default: {
-			dm_error("%s: DPCD DSC througput mode not recoginzed.\n", __func__);
+			dm_error("%s: DPCD DSC throughput mode not recognized.\n", __func__);
 			return false;
 		}
 	}
@@ -152,7 +152,7 @@ static bool dsc_bpp_increment_div_from_dpcd(int bpp_increment_dpcd, uint32_t *bp
 		*bpp_increment_div = 1;
 		break;
 	default: {
-		dm_error("%s: DPCD DSC bits-per-pixel increment not recoginzed.\n", __func__);
+		dm_error("%s: DPCD DSC bits-per-pixel increment not recognized.\n", __func__);
 		return false;
 	}
 	}
diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
index a0fb36360bea3a915716f15d95055136a3e31337..31152d495f695fbc7884203a601813da079245f7 100644
--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
@@ -820,6 +820,10 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
 		if (ret)
 			return ret;
 
+		ret = smu_get_clk_info_from_vbios(smu);
+		if (ret)
+			return ret;
+
 		/*
 		 * check if the format_revision in vbios is up to pptable header
 		 * version, and the structure size is not 0.
diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
index 527f7fa442efc6af68ce9abb23d67a90e56cf49e..27e5c8088f1bfb6790acfa83ded4bd6233088ef8 100644
--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
@@ -574,15 +574,19 @@ static int navi10_dpm_set_uvd_enable(struct smu_context *smu, bool enable)
 	struct smu_power_gate *power_gate = &smu_power->power_gate;
 
 	if (enable && power_gate->uvd_gated) {
-		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 1);
-		if (ret)
-			return ret;
+		if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT)) {
+			ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 1);
+			if (ret)
+				return ret;
+		}
 		power_gate->uvd_gated = false;
 	} else {
 		if (!enable && !power_gate->uvd_gated) {
-			ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn);
-			if (ret)
-				return ret;
+			if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT)) {
+				ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn);
+				if (ret)
+					return ret;
+			}
 			power_gate->uvd_gated = true;
 		}
 	}
@@ -1300,6 +1304,169 @@ static int navi10_get_uclk_dpm_states(struct smu_context *smu, uint32_t *clocks_
 	return 0;
 }
 
+static int navi10_get_ppfeature_status(struct smu_context *smu,
+				       char *buf)
+{
+	static const char *ppfeature_name[] = {
+				"DPM_PREFETCHER",
+				"DPM_GFXCLK",
+				"DPM_GFX_PACE",
+				"DPM_UCLK",
+				"DPM_SOCCLK",
+				"DPM_MP0CLK",
+				"DPM_LINK",
+				"DPM_DCEFCLK",
+				"MEM_VDDCI_SCALING",
+				"MEM_MVDD_SCALING",
+				"DS_GFXCLK",
+				"DS_SOCCLK",
+				"DS_LCLK",
+				"DS_DCEFCLK",
+				"DS_UCLK",
+				"GFX_ULV",
+				"FW_DSTATE",
+				"GFXOFF",
+				"BACO",
+				"VCN_PG",
+				"JPEG_PG",
+				"USB_PG",
+				"RSMU_SMN_CG",
+				"PPT",
+				"TDC",
+				"GFX_EDC",
+				"APCC_PLUS",
+				"GTHR",
+				"ACDC",
+				"VR0HOT",
+				"VR1HOT",
+				"FW_CTF",
+				"FAN_CONTROL",
+				"THERMAL",
+				"GFX_DCS",
+				"RM",
+				"LED_DISPLAY",
+				"GFX_SS",
+				"OUT_OF_BAND_MONITOR",
+				"TEMP_DEPENDENT_VMIN",
+				"MMHUB_PG",
+				"ATHUB_PG"};
+	static const char *output_title[] = {
+				"FEATURES",
+				"BITMASK",
+				"ENABLEMENT"};
+	uint64_t features_enabled;
+	uint32_t feature_mask[2];
+	int i;
+	int ret = 0;
+	int size = 0;
+
+	ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
+	PP_ASSERT_WITH_CODE(!ret,
+			"[GetPPfeatureStatus] Failed to get enabled smc features!",
+			return ret);
+	features_enabled = (uint64_t)feature_mask[0] |
+			   (uint64_t)feature_mask[1] << 32;
+
+	size += sprintf(buf + size, "Current ppfeatures: 0x%016llx\n", features_enabled);
+	size += sprintf(buf + size, "%-19s %-22s %s\n",
+				output_title[0],
+				output_title[1],
+				output_title[2]);
+	for (i = 0; i < (sizeof(ppfeature_name) / sizeof(ppfeature_name[0])); i++) {
+		size += sprintf(buf + size, "%-19s 0x%016llx %6s\n",
+					ppfeature_name[i],
+					1ULL << i,
+					(features_enabled & (1ULL << i)) ? "Y" : "N");
+	}
+
+	return size;
+}
+
+static int navi10_enable_smc_features(struct smu_context *smu,
+				      bool enabled,
+				      uint64_t feature_masks)
+{
+	struct smu_feature *feature = &smu->smu_feature;
+	uint32_t feature_low, feature_high;
+	uint32_t feature_mask[2];
+	int ret = 0;
+
+	feature_low = (uint32_t)(feature_masks & 0xFFFFFFFF);
+	feature_high = (uint32_t)((feature_masks & 0xFFFFFFFF00000000ULL) >> 32);
+
+	if (enabled) {
+		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesLow,
+						  feature_low);
+		if (ret)
+			return ret;
+		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesHigh,
+						  feature_high);
+		if (ret)
+			return ret;
+	} else {
+		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesLow,
+						  feature_low);
+		if (ret)
+			return ret;
+		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesHigh,
+						  feature_high);
+		if (ret)
+			return ret;
+	}
+
+	ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
+	if (ret)
+		return ret;
+
+	mutex_lock(&feature->mutex);
+	bitmap_copy(feature->enabled, (unsigned long *)&feature_mask,
+		    feature->feature_num);
+	mutex_unlock(&feature->mutex);
+
+	return 0;
+}
+
+static int navi10_set_ppfeature_status(struct smu_context *smu,
+				       uint64_t new_ppfeature_masks)
+{
+	uint64_t features_enabled;
+	uint32_t feature_mask[2];
+	uint64_t features_to_enable;
+	uint64_t features_to_disable;
+	int ret = 0;
+
+	ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
+	PP_ASSERT_WITH_CODE(!ret,
+			"[SetPPfeatureStatus] Failed to get enabled smc features!",
+			return ret);
+	features_enabled = (uint64_t)feature_mask[0] |
+			   (uint64_t)feature_mask[1] << 32;
+
+	features_to_disable =
+		features_enabled & ~new_ppfeature_masks;
+	features_to_enable =
+		~features_enabled & new_ppfeature_masks;
+
+	pr_debug("features_to_disable 0x%llx\n", features_to_disable);
+	pr_debug("features_to_enable 0x%llx\n", features_to_enable);
+
+	if (features_to_disable) {
+		ret = navi10_enable_smc_features(smu, false, features_to_disable);
+		PP_ASSERT_WITH_CODE(!ret,
+				"[SetPPfeatureStatus] Failed to disable smc features!",
+				return ret);
+	}
+
+	if (features_to_enable) {
+		ret = navi10_enable_smc_features(smu, true, features_to_enable);
+		PP_ASSERT_WITH_CODE(!ret,
+				"[SetPPfeatureStatus] Failed to enable smc features!",
+				return ret);
+	}
+
+	return 0;
+}
+
 static const struct pptable_funcs navi10_ppt_funcs = {
 	.tables_init = navi10_tables_init,
 	.alloc_dpm_context = navi10_allocate_dpm_context,
@@ -1333,6 +1500,8 @@ static const struct pptable_funcs navi10_ppt_funcs = {
 	.set_watermarks_table = navi10_set_watermarks_table,
 	.read_sensor = navi10_read_sensor,
 	.get_uclk_dpm_states = navi10_get_uclk_dpm_states,
+	.get_ppfeature_status = navi10_get_ppfeature_status,
+	.set_ppfeature_status = navi10_set_ppfeature_status,
 };
 
 void navi10_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 6953dd264172c3e5f3bd5aa5a44415252ee157ce..a7fd5a4955c969fe8842ee067ac1b52b62a6567a 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -827,7 +827,7 @@ static int ttm_mem_evict_wait_busy(struct ttm_buffer_object *busy_bo,
 	if (!r)
 		reservation_object_unlock(busy_bo->resv);
 
-	return r == -EDEADLK ? -EAGAIN : r;
+	return r == -EDEADLK ? -EBUSY : r;
 }
 
 static int ttm_mem_evict_first(struct ttm_bo_device *bdev,