diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
index 8902720b4e49ab29bb97abb3c63bfc444ebf15a8..331b8d558791d3c36d605669cee6658fac22f937 100644
--- a/drivers/net/wireless/ath/ath10k/htc.c
+++ b/drivers/net/wireless/ath/ath10k/htc.c
@@ -274,7 +274,7 @@ ath10k_htc_process_lookahead_bundle(struct ath10k_htc *htc,
 	struct ath10k *ar = htc->ar;
 	int bundle_cnt = len / sizeof(*report);
 
-	if (!bundle_cnt || (bundle_cnt > HTC_HOST_MAX_MSG_PER_BUNDLE)) {
+	if (!bundle_cnt || (bundle_cnt > HTC_HOST_MAX_MSG_PER_RX_BUNDLE)) {
 		ath10k_warn(ar, "Invalid lookahead bundle count: %d\n",
 			    bundle_cnt);
 		return -EINVAL;
@@ -655,7 +655,7 @@ int ath10k_htc_wait_target(struct ath10k_htc *htc)
 	    sizeof(msg->hdr) + sizeof(msg->ready_ext)) {
 		htc->max_msgs_per_htc_bundle =
 			min_t(u8, msg->ready_ext.max_msgs_per_htc_bundle,
-			      HTC_HOST_MAX_MSG_PER_BUNDLE);
+			      HTC_HOST_MAX_MSG_PER_RX_BUNDLE);
 		ath10k_dbg(ar, ATH10K_DBG_HTC,
 			   "Extended ready message. RX bundle size: %d\n",
 			   htc->max_msgs_per_htc_bundle);
diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h
index 34877597dd6a88f4b5ebdebd561d1feda027782b..51fda6c23f6939661f179eaac3090b9e3a40df5a 100644
--- a/drivers/net/wireless/ath/ath10k/htc.h
+++ b/drivers/net/wireless/ath/ath10k/htc.h
@@ -50,7 +50,8 @@ struct ath10k;
  * 4-byte aligned.
  */
 
-#define HTC_HOST_MAX_MSG_PER_BUNDLE        8
+#define HTC_HOST_MAX_MSG_PER_RX_BUNDLE        8
+#define HTC_HOST_MAX_MSG_PER_TX_BUNDLE        16
 
 enum ath10k_htc_tx_flags {
 	ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE = 0x01,
@@ -58,6 +59,7 @@ enum ath10k_htc_tx_flags {
 };
 
 enum ath10k_htc_rx_flags {
+	ATH10K_HTC_FLAGS_RECV_1MORE_BLOCK = 0x01,
 	ATH10K_HTC_FLAG_TRAILER_PRESENT = 0x02,
 	ATH10K_HTC_FLAG_BUNDLE_MASK     = 0xF0
 };
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index c72d8af122a28e19048c40f7072b4a9064d43e0b..4d1cd90d6d27c3d299095626efd80888eb9e840c 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -268,11 +268,12 @@ int ath10k_htt_rx_ring_refill(struct ath10k *ar)
 	spin_lock_bh(&htt->rx_ring.lock);
 	ret = ath10k_htt_rx_ring_fill_n(htt, (htt->rx_ring.fill_level -
 					      htt->rx_ring.fill_cnt));
-	spin_unlock_bh(&htt->rx_ring.lock);
 
 	if (ret)
 		ath10k_htt_rx_ring_free(htt);
 
+	spin_unlock_bh(&htt->rx_ring.lock);
+
 	return ret;
 }
 
@@ -284,7 +285,9 @@ void ath10k_htt_rx_free(struct ath10k_htt *htt)
 	skb_queue_purge(&htt->rx_in_ord_compl_q);
 	skb_queue_purge(&htt->tx_fetch_ind_q);
 
+	spin_lock_bh(&htt->rx_ring.lock);
 	ath10k_htt_rx_ring_free(htt);
+	spin_unlock_bh(&htt->rx_ring.lock);
 
 	dma_free_coherent(htt->ar->dev,
 			  ath10k_htt_get_rx_ring_size(htt),
@@ -1089,7 +1092,7 @@ static void ath10k_htt_rx_h_queue_msdu(struct ath10k *ar,
 	status = IEEE80211_SKB_RXCB(skb);
 	*status = *rx_status;
 
-	__skb_queue_tail(&ar->htt.rx_msdus_q, skb);
+	skb_queue_tail(&ar->htt.rx_msdus_q, skb);
 }
 
 static void ath10k_process_rx(struct ath10k *ar, struct sk_buff *skb)
@@ -2810,7 +2813,7 @@ bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
 		break;
 	}
 	case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND: {
-		__skb_queue_tail(&htt->rx_in_ord_compl_q, skb);
+		skb_queue_tail(&htt->rx_in_ord_compl_q, skb);
 		return false;
 	}
 	case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND:
@@ -2874,7 +2877,7 @@ static int ath10k_htt_rx_deliver_msdu(struct ath10k *ar, int quota, int budget)
 		if (skb_queue_empty(&ar->htt.rx_msdus_q))
 			break;
 
-		skb = __skb_dequeue(&ar->htt.rx_msdus_q);
+		skb = skb_dequeue(&ar->htt.rx_msdus_q);
 		if (!skb)
 			break;
 		ath10k_process_rx(ar, skb);
@@ -2905,7 +2908,7 @@ int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget)
 		goto exit;
 	}
 
-	while ((skb = __skb_dequeue(&htt->rx_in_ord_compl_q))) {
+	while ((skb = skb_dequeue(&htt->rx_in_ord_compl_q))) {
 		spin_lock_bh(&htt->rx_ring.lock);
 		ret = ath10k_htt_rx_in_ord_ind(ar, skb);
 		spin_unlock_bh(&htt->rx_ring.lock);
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
index 89157c5b5e5fd0771d5be58bb81cba168ffd59aa..be5b52aaffa61c2aa1f2ebaf0ec74042cc5d81c9 100644
--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -1056,7 +1056,7 @@ static u8 ath10k_htt_tx_get_tid(struct sk_buff *skb, bool is_eth)
 	if (!is_eth && ieee80211_is_mgmt(hdr->frame_control))
 		return HTT_DATA_TX_EXT_TID_MGMT;
 	else if (cb->flags & ATH10K_SKB_F_QOS)
-		return skb->priority % IEEE80211_QOS_CTL_TID_MASK;
+		return skb->priority & IEEE80211_QOS_CTL_TID_MASK;
 	else
 		return HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST;
 }
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 541bc1c4b2f710526c8cb8470bd46f0c1134cddf..95243b48a179ecd043c8b28be2ba31305414289e 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -4026,7 +4026,7 @@ void ath10k_mac_tx_push_pending(struct ath10k *ar)
 				   drv_priv);
 
 		/* Prevent aggressive sta/tid taking over tx queue */
-		max = 16;
+		max = HTC_HOST_MAX_MSG_PER_TX_BUNDLE;
 		ret = 0;
 		while (ath10k_mac_tx_can_push(hw, txq) && max--) {
 			ret = ath10k_mac_tx_push_txq(hw, txq);
@@ -4047,6 +4047,7 @@ void ath10k_mac_tx_push_pending(struct ath10k *ar)
 	rcu_read_unlock();
 	spin_unlock_bh(&ar->txqs_lock);
 }
+EXPORT_SYMBOL(ath10k_mac_tx_push_pending);
 
 /************/
 /* Scanning */
@@ -4287,7 +4288,7 @@ static void ath10k_mac_op_wake_tx_queue(struct ieee80211_hw *hw,
 	struct ieee80211_txq *f_txq;
 	struct ath10k_txq *f_artxq;
 	int ret = 0;
-	int max = 16;
+	int max = HTC_HOST_MAX_MSG_PER_TX_BUNDLE;
 
 	spin_lock_bh(&ar->txqs_lock);
 	if (list_empty(&artxq->list))
diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c
index d612ce8c9cffeefb927218977daebdaada99435c..7f61591ce0de6b5c904fde8261c9a6689a587f5a 100644
--- a/drivers/net/wireless/ath/ath10k/sdio.c
+++ b/drivers/net/wireless/ath/ath10k/sdio.c
@@ -30,6 +30,7 @@
 #include "debug.h"
 #include "hif.h"
 #include "htc.h"
+#include "mac.h"
 #include "targaddrs.h"
 #include "trace.h"
 #include "sdio.h"
@@ -396,6 +397,7 @@ static int ath10k_sdio_mbox_rx_process_packet(struct ath10k *ar,
 	int ret;
 
 	payload_len = le16_to_cpu(htc_hdr->len);
+	skb->len = payload_len + sizeof(struct ath10k_htc_hdr);
 
 	if (trailer_present) {
 		trailer = skb->data + sizeof(*htc_hdr) +
@@ -434,12 +436,14 @@ static int ath10k_sdio_mbox_rx_process_packets(struct ath10k *ar,
 	enum ath10k_htc_ep_id id;
 	int ret, i, *n_lookahead_local;
 	u32 *lookaheads_local;
+	int lookahead_idx = 0;
 
 	for (i = 0; i < ar_sdio->n_rx_pkts; i++) {
 		lookaheads_local = lookaheads;
 		n_lookahead_local = n_lookahead;
 
-		id = ((struct ath10k_htc_hdr *)&lookaheads[i])->eid;
+		id = ((struct ath10k_htc_hdr *)
+		      &lookaheads[lookahead_idx++])->eid;
 
 		if (id >= ATH10K_HTC_EP_COUNT) {
 			ath10k_warn(ar, "invalid endpoint in look-ahead: %d\n",
@@ -462,6 +466,7 @@ static int ath10k_sdio_mbox_rx_process_packets(struct ath10k *ar,
 			/* Only read lookahead's from RX trailers
 			 * for the last packet in a bundle.
 			 */
+			lookahead_idx--;
 			lookaheads_local = NULL;
 			n_lookahead_local = NULL;
 		}
@@ -505,11 +510,11 @@ static int ath10k_sdio_mbox_alloc_pkt_bundle(struct ath10k *ar,
 
 	*bndl_cnt = FIELD_GET(ATH10K_HTC_FLAG_BUNDLE_MASK, htc_hdr->flags);
 
-	if (*bndl_cnt > HTC_HOST_MAX_MSG_PER_BUNDLE) {
+	if (*bndl_cnt > HTC_HOST_MAX_MSG_PER_RX_BUNDLE) {
 		ath10k_warn(ar,
 			    "HTC bundle length %u exceeds maximum %u\n",
 			    le16_to_cpu(htc_hdr->len),
-			    HTC_HOST_MAX_MSG_PER_BUNDLE);
+			    HTC_HOST_MAX_MSG_PER_RX_BUNDLE);
 		return -ENOMEM;
 	}
 
@@ -600,6 +605,9 @@ static int ath10k_sdio_mbox_rx_alloc(struct ath10k *ar,
 		 * ATH10K_HTC_FLAG_BUNDLE_MASK flag set, all bundled
 		 * packet skb's have been allocated in the previous step.
 		 */
+		if (htc_hdr->flags & ATH10K_HTC_FLAGS_RECV_1MORE_BLOCK)
+			full_len += ATH10K_HIF_MBOX_BLOCK_SIZE;
+
 		ret = ath10k_sdio_mbox_alloc_rx_pkt(&ar_sdio->rx_pkts[i],
 						    act_len,
 						    full_len,
@@ -1342,6 +1350,8 @@ static void ath10k_sdio_irq_handler(struct sdio_func *func)
 			break;
 	} while (time_before(jiffies, timeout) && !done);
 
+	ath10k_mac_tx_push_pending(ar);
+
 	sdio_claim_host(ar_sdio->func);
 
 	if (ret && ret != -ECANCELED)
diff --git a/drivers/net/wireless/ath/ath10k/sdio.h b/drivers/net/wireless/ath/ath10k/sdio.h
index 4ff7b545293be871bbf4275145ae78777e303086..453eb626314342c277adf17e49fd6266013d18f6 100644
--- a/drivers/net/wireless/ath/ath10k/sdio.h
+++ b/drivers/net/wireless/ath/ath10k/sdio.h
@@ -96,14 +96,14 @@
  * way:
  *
  * Let's assume that each packet in a bundle of the maximum bundle size
- * (HTC_HOST_MAX_MSG_PER_BUNDLE) has the HTC header bundle count set
- * to the maximum value (HTC_HOST_MAX_MSG_PER_BUNDLE).
+ * (HTC_HOST_MAX_MSG_PER_RX_BUNDLE) has the HTC header bundle count set
+ * to the maximum value (HTC_HOST_MAX_MSG_PER_RX_BUNDLE).
  *
  * in this case the driver must allocate
- * (HTC_HOST_MAX_MSG_PER_BUNDLE * HTC_HOST_MAX_MSG_PER_BUNDLE) skb's.
+ * (HTC_HOST_MAX_MSG_PER_RX_BUNDLE * HTC_HOST_MAX_MSG_PER_RX_BUNDLE) skb's.
  */
 #define ATH10K_SDIO_MAX_RX_MSGS \
-	(HTC_HOST_MAX_MSG_PER_BUNDLE * HTC_HOST_MAX_MSG_PER_BUNDLE)
+	(HTC_HOST_MAX_MSG_PER_RX_BUNDLE * HTC_HOST_MAX_MSG_PER_RX_BUNDLE)
 
 #define ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL   0x00000868u
 #define ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_OFF 0xFFFEFFFF
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
index 8c49a26fc57120bfa22acc0e435a2bf1b07004e8..b04f86f8038af793068cad3a881897cb5b7c772e 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
@@ -1076,6 +1076,8 @@ static int ath10k_wmi_tlv_op_pull_svc_rdy_ev(struct ath10k *ar,
 	arg->phy_capab = ev->phy_capability;
 	arg->num_rf_chains = ev->num_rf_chains;
 	arg->eeprom_rd = reg->eeprom_rd;
+	arg->low_5ghz_chan = reg->low_5ghz_chan;
+	arg->high_5ghz_chan = reg->high_5ghz_chan;
 	arg->num_mem_reqs = ev->num_mem_reqs;
 	arg->service_map = svc_bmap;
 	arg->service_map_len = ath10k_wmi_tlv_len(svc_bmap);
@@ -1614,10 +1616,10 @@ ath10k_wmi_tlv_op_gen_start_scan(struct ath10k *ar,
 	bssid_len = arg->n_bssids * sizeof(struct wmi_mac_addr);
 	ie_len = roundup(arg->ie_len, 4);
 	len = (sizeof(*tlv) + sizeof(*cmd)) +
-	      (arg->n_channels ? sizeof(*tlv) + chan_len : 0) +
-	      (arg->n_ssids ? sizeof(*tlv) + ssid_len : 0) +
-	      (arg->n_bssids ? sizeof(*tlv) + bssid_len : 0) +
-	      (arg->ie_len ? sizeof(*tlv) + ie_len : 0);
+	      sizeof(*tlv) + chan_len +
+	      sizeof(*tlv) + ssid_len +
+	      sizeof(*tlv) + bssid_len +
+	      sizeof(*tlv) + ie_len;
 
 	skb = ath10k_wmi_alloc_skb(ar, len);
 	if (!skb)
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index ef0de4f1312c369ed47dffac19f4681375e1e912..21ba20981a80bfd4b96133c0b0a9ecd6c51f6c41 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -342,7 +342,7 @@ struct ath_chanctx {
 
 	struct ath_beacon_config beacon;
 	struct ath9k_hw_cal_data caldata;
-	struct timespec tsf_ts;
+	struct timespec64 tsf_ts;
 	u64 tsf_val;
 	u32 last_beacon;
 
@@ -1021,7 +1021,7 @@ struct ath_softc {
 	struct ath_offchannel offchannel;
 	struct ath_chanctx *next_chan;
 	struct completion go_beacon;
-	struct timespec last_event_time;
+	struct timespec64 last_event_time;
 #endif
 
 	unsigned long driver_data;
diff --git a/drivers/net/wireless/ath/ath9k/channel.c b/drivers/net/wireless/ath/ath9k/channel.c
index 1b05b5d7a03867ecc299f9941dbab3466159e9c9..fd61ae4782b63632c7f8a7ccc9a107e7274bd587 100644
--- a/drivers/net/wireless/ath/ath9k/channel.c
+++ b/drivers/net/wireless/ath/ath9k/channel.c
@@ -233,9 +233,9 @@ static const char *chanctx_state_string(enum ath_chanctx_state state)
 static u32 chanctx_event_delta(struct ath_softc *sc)
 {
 	u64 ms;
-	struct timespec ts, *old;
+	struct timespec64 ts, *old;
 
-	getrawmonotonic(&ts);
+	ktime_get_raw_ts64(&ts);
 	old = &sc->last_event_time;
 	ms = ts.tv_sec * 1000 + ts.tv_nsec / 1000000;
 	ms -= old->tv_sec * 1000 + old->tv_nsec / 1000000;
@@ -334,7 +334,7 @@ ath_chanctx_get_next(struct ath_softc *sc, struct ath_chanctx *ctx)
 static void ath_chanctx_adjust_tbtt_delta(struct ath_softc *sc)
 {
 	struct ath_chanctx *prev, *cur;
-	struct timespec ts;
+	struct timespec64 ts;
 	u32 cur_tsf, prev_tsf, beacon_int;
 	s32 offset;
 
@@ -346,7 +346,7 @@ static void ath_chanctx_adjust_tbtt_delta(struct ath_softc *sc)
 	if (!prev->switch_after_beacon)
 		return;
 
-	getrawmonotonic(&ts);
+	ktime_get_raw_ts64(&ts);
 	cur_tsf = (u32) cur->tsf_val +
 		  ath9k_hw_get_tsf_offset(&cur->tsf_ts, &ts);
 
@@ -1230,7 +1230,7 @@ void ath_chanctx_set_next(struct ath_softc *sc, bool force)
 {
 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
 	struct ath_chanctx *old_ctx;
-	struct timespec ts;
+	struct timespec64 ts;
 	bool measure_time = false;
 	bool send_ps = false;
 	bool queues_stopped = false;
@@ -1260,7 +1260,7 @@ void ath_chanctx_set_next(struct ath_softc *sc, bool force)
 		spin_unlock_bh(&sc->chan_lock);
 
 		if (sc->next_chan == &sc->offchannel.chan) {
-			getrawmonotonic(&ts);
+			ktime_get_raw_ts64(&ts);
 			measure_time = true;
 		}
 
@@ -1277,7 +1277,7 @@ void ath_chanctx_set_next(struct ath_softc *sc, bool force)
 		spin_lock_bh(&sc->chan_lock);
 
 		if (sc->cur_chan != &sc->offchannel.chan) {
-			getrawmonotonic(&sc->cur_chan->tsf_ts);
+			ktime_get_raw_ts64(&sc->cur_chan->tsf_ts);
 			sc->cur_chan->tsf_val = ath9k_hw_gettsf64(sc->sc_ah);
 		}
 	}
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index cb0eef13af1c8d3791832d78d70e5546b2ade346..fb649d85b8fc73fbaa44dcfacb231674593f776c 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -138,6 +138,7 @@ static void hif_usb_mgmt_cb(struct urb *urb)
 {
 	struct cmd_buf *cmd = (struct cmd_buf *)urb->context;
 	struct hif_device_usb *hif_dev;
+	unsigned long flags;
 	bool txok = true;
 
 	if (!cmd || !cmd->skb || !cmd->hif_dev)
@@ -158,14 +159,14 @@ static void hif_usb_mgmt_cb(struct urb *urb)
 		 * If the URBs are being flushed, no need to complete
 		 * this packet.
 		 */
-		spin_lock(&hif_dev->tx.tx_lock);
+		spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
 		if (hif_dev->tx.flags & HIF_USB_TX_FLUSH) {
-			spin_unlock(&hif_dev->tx.tx_lock);
+			spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
 			dev_kfree_skb_any(cmd->skb);
 			kfree(cmd);
 			return;
 		}
-		spin_unlock(&hif_dev->tx.tx_lock);
+		spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
 
 		break;
 	default:
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index 585736a837ed8df30c84702fadc861384e17a418..799010ed04e0eedbde2be23824a9cb6b513984bf 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -1107,25 +1107,26 @@ void ath9k_htc_rxep(void *drv_priv, struct sk_buff *skb,
 	struct ath_hw *ah = priv->ah;
 	struct ath_common *common = ath9k_hw_common(ah);
 	struct ath9k_htc_rxbuf *rxbuf = NULL, *tmp_buf = NULL;
+	unsigned long flags;
 
-	spin_lock(&priv->rx.rxbuflock);
+	spin_lock_irqsave(&priv->rx.rxbuflock, flags);
 	list_for_each_entry(tmp_buf, &priv->rx.rxbuf, list) {
 		if (!tmp_buf->in_process) {
 			rxbuf = tmp_buf;
 			break;
 		}
 	}
-	spin_unlock(&priv->rx.rxbuflock);
+	spin_unlock_irqrestore(&priv->rx.rxbuflock, flags);
 
 	if (rxbuf == NULL) {
 		ath_dbg(common, ANY, "No free RX buffer\n");
 		goto err;
 	}
 
-	spin_lock(&priv->rx.rxbuflock);
+	spin_lock_irqsave(&priv->rx.rxbuflock, flags);
 	rxbuf->skb = skb;
 	rxbuf->in_process = true;
-	spin_unlock(&priv->rx.rxbuflock);
+	spin_unlock_irqrestore(&priv->rx.rxbuflock, flags);
 
 	tasklet_schedule(&priv->rx_tasklet);
 	return;
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 1665066f4e242a5a0b2441c7d74d591234906d02..32fb85e076d647899a4f884c2ee858a5101df8c1 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -1835,13 +1835,13 @@ static int ath9k_hw_do_fastcc(struct ath_hw *ah, struct ath9k_channel *chan)
 	return -EINVAL;
 }
 
-u32 ath9k_hw_get_tsf_offset(struct timespec *last, struct timespec *cur)
+u32 ath9k_hw_get_tsf_offset(struct timespec64 *last, struct timespec64 *cur)
 {
-	struct timespec ts;
+	struct timespec64 ts;
 	s64 usec;
 
 	if (!cur) {
-		getrawmonotonic(&ts);
+		ktime_get_raw_ts64(&ts);
 		cur = &ts;
 	}
 
@@ -1859,7 +1859,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
 	u32 saveLedState;
 	u32 saveDefAntenna;
 	u32 macStaId1;
-	struct timespec tsf_ts;
+	struct timespec64 tsf_ts;
 	u32 tsf_offset;
 	u64 tsf = 0;
 	int r;
@@ -1905,7 +1905,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
 	macStaId1 = REG_READ(ah, AR_STA_ID1) & AR_STA_ID1_BASE_RATE_11B;
 
 	/* Save TSF before chip reset, a cold reset clears it */
-	getrawmonotonic(&tsf_ts);
+	ktime_get_raw_ts64(&tsf_ts);
 	tsf = ath9k_hw_gettsf64(ah);
 
 	saveLedState = REG_READ(ah, AR_CFG_LED) &
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index 9804a24a2dc0949539fb58f8981086b69dad9ff5..68956cdc8c9ae21c0d974466ced9c57b85294ce9 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -1060,7 +1060,7 @@ u32 ath9k_hw_gettsf32(struct ath_hw *ah);
 u64 ath9k_hw_gettsf64(struct ath_hw *ah);
 void ath9k_hw_settsf64(struct ath_hw *ah, u64 tsf64);
 void ath9k_hw_reset_tsf(struct ath_hw *ah);
-u32 ath9k_hw_get_tsf_offset(struct timespec *last, struct timespec *cur);
+u32 ath9k_hw_get_tsf_offset(struct timespec64 *last, struct timespec64 *cur);
 void ath9k_hw_set_tsfadjust(struct ath_hw *ah, bool set);
 void ath9k_hw_init_global_settings(struct ath_hw *ah);
 u32 ar9003_get_pll_sqsum_dvc(struct ath_hw *ah);
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 5eb1c0aea41d7d8b5706c8c4be0e881f28fe0954..1049773378f274e2f8c4ccf1050cb58f50858366 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -1865,7 +1865,7 @@ static void ath9k_set_tsf(struct ieee80211_hw *hw,
 	mutex_lock(&sc->mutex);
 	ath9k_ps_wakeup(sc);
 	tsf -= le64_to_cpu(avp->tsf_adjust);
-	getrawmonotonic(&avp->chanctx->tsf_ts);
+	ktime_get_raw_ts64(&avp->chanctx->tsf_ts);
 	if (sc->cur_chan == avp->chanctx)
 		ath9k_hw_settsf64(sc->sc_ah, tsf);
 	avp->chanctx->tsf_val = tsf;
@@ -1881,7 +1881,7 @@ static void ath9k_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
 	mutex_lock(&sc->mutex);
 
 	ath9k_ps_wakeup(sc);
-	getrawmonotonic(&avp->chanctx->tsf_ts);
+	ktime_get_raw_ts64(&avp->chanctx->tsf_ts);
 	if (sc->cur_chan == avp->chanctx)
 		ath9k_hw_reset_tsf(sc->sc_ah);
 	avp->chanctx->tsf_val = 0;
diff --git a/drivers/net/wireless/ath/ath9k/wmi.c b/drivers/net/wireless/ath/ath9k/wmi.c
index b0b5579b756077aead50de45c017b75a393066e4..d1f6710ca63bdab330f09634ea9bdb3f3b4b02d8 100644
--- a/drivers/net/wireless/ath/ath9k/wmi.c
+++ b/drivers/net/wireless/ath/ath9k/wmi.c
@@ -209,6 +209,7 @@ static void ath9k_wmi_ctrl_rx(void *priv, struct sk_buff *skb,
 {
 	struct wmi *wmi = priv;
 	struct wmi_cmd_hdr *hdr;
+	unsigned long flags;
 	u16 cmd_id;
 
 	if (unlikely(wmi->stopped))
@@ -218,20 +219,20 @@ static void ath9k_wmi_ctrl_rx(void *priv, struct sk_buff *skb,
 	cmd_id = be16_to_cpu(hdr->command_id);
 
 	if (cmd_id & 0x1000) {
-		spin_lock(&wmi->wmi_lock);
+		spin_lock_irqsave(&wmi->wmi_lock, flags);
 		__skb_queue_tail(&wmi->wmi_event_queue, skb);
-		spin_unlock(&wmi->wmi_lock);
+		spin_unlock_irqrestore(&wmi->wmi_lock, flags);
 		tasklet_schedule(&wmi->wmi_event_tasklet);
 		return;
 	}
 
 	/* Check if there has been a timeout. */
-	spin_lock(&wmi->wmi_lock);
+	spin_lock_irqsave(&wmi->wmi_lock, flags);
 	if (be16_to_cpu(hdr->seq_no) != wmi->last_seq_id) {
-		spin_unlock(&wmi->wmi_lock);
+		spin_unlock_irqrestore(&wmi->wmi_lock, flags);
 		goto free_skb;
 	}
-	spin_unlock(&wmi->wmi_lock);
+	spin_unlock_irqrestore(&wmi->wmi_lock, flags);
 
 	/* WMI command response */
 	ath9k_wmi_rsp_callback(wmi, skb);
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index aeb5e6e806beab897026133f1b55bb53f20d73cd..79998a3ddb7afaf4df76449e5567e59aaf69863f 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -493,7 +493,7 @@ static int wcn36xx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
 {
 	struct wcn36xx *wcn = hw->priv;
 	struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
-	struct wcn36xx_sta *sta_priv = wcn36xx_sta_to_priv(sta);
+	struct wcn36xx_sta *sta_priv = sta ? wcn36xx_sta_to_priv(sta) : NULL;
 	int ret = 0;
 	u8 key[WLAN_MAX_KEY_LEN];
 
@@ -512,7 +512,7 @@ static int wcn36xx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
 		vif_priv->encrypt_type = WCN36XX_HAL_ED_WEP40;
 		break;
 	case WLAN_CIPHER_SUITE_WEP104:
-		vif_priv->encrypt_type = WCN36XX_HAL_ED_WEP40;
+		vif_priv->encrypt_type = WCN36XX_HAL_ED_WEP104;
 		break;
 	case WLAN_CIPHER_SUITE_CCMP:
 		vif_priv->encrypt_type = WCN36XX_HAL_ED_CCMP;
@@ -567,15 +567,19 @@ static int wcn36xx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
 				key_conf->keyidx,
 				key_conf->keylen,
 				key);
+
 			if ((WLAN_CIPHER_SUITE_WEP40 == key_conf->cipher) ||
 			    (WLAN_CIPHER_SUITE_WEP104 == key_conf->cipher)) {
-				sta_priv->is_data_encrypted = true;
-				wcn36xx_smd_set_stakey(wcn,
-					vif_priv->encrypt_type,
-					key_conf->keyidx,
-					key_conf->keylen,
-					key,
-					get_sta_index(vif, sta_priv));
+				list_for_each_entry(sta_priv,
+						    &vif_priv->sta_list, list) {
+					sta_priv->is_data_encrypted = true;
+					wcn36xx_smd_set_stakey(wcn,
+						vif_priv->encrypt_type,
+						key_conf->keyidx,
+						key_conf->keylen,
+						key,
+						get_sta_index(vif, sta_priv));
+				}
 			}
 		}
 		break;
@@ -984,6 +988,7 @@ static int wcn36xx_add_interface(struct ieee80211_hw *hw,
 	mutex_lock(&wcn->conf_mutex);
 
 	vif_priv->bss_index = WCN36XX_HAL_BSS_INVALID_IDX;
+	INIT_LIST_HEAD(&vif_priv->sta_list);
 	list_add(&vif_priv->list, &wcn->vif_list);
 	wcn36xx_smd_add_sta_self(wcn, vif);
 
@@ -1005,6 +1010,8 @@ static int wcn36xx_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
 
 	spin_lock_init(&sta_priv->ampdu_lock);
 	sta_priv->vif = vif_priv;
+	list_add(&sta_priv->list, &vif_priv->sta_list);
+
 	/*
 	 * For STA mode HW will be configured on BSS_CHANGED_ASSOC because
 	 * at this stage AID is not available yet.
@@ -1032,6 +1039,7 @@ static int wcn36xx_sta_remove(struct ieee80211_hw *hw,
 
 	mutex_lock(&wcn->conf_mutex);
 
+	list_del(&sta_priv->list);
 	wcn36xx_smd_delete_sta(wcn, sta_priv->sta_index);
 	sta_priv->vif = NULL;
 
@@ -1153,8 +1161,6 @@ static const struct ieee80211_ops wcn36xx_ops = {
 
 static int wcn36xx_init_ieee80211(struct wcn36xx *wcn)
 {
-	int ret = 0;
-
 	static const u32 cipher_suites[] = {
 		WLAN_CIPHER_SUITE_WEP40,
 		WLAN_CIPHER_SUITE_WEP104,
@@ -1201,7 +1207,7 @@ static int wcn36xx_init_ieee80211(struct wcn36xx *wcn)
 	wiphy_ext_feature_set(wcn->hw->wiphy,
 			      NL80211_EXT_FEATURE_CQM_RSSI_LIST);
 
-	return ret;
+	return 0;
 }
 
 static int wcn36xx_platform_get_resources(struct wcn36xx *wcn,
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c
index b4dadf75d565eb4172551f952fa5a6368a79e65d..00098f24116dea042ad5e57526e4a4cf6b64183d 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.c
+++ b/drivers/net/wireless/ath/wcn36xx/smd.c
@@ -250,7 +250,7 @@ static void wcn36xx_smd_set_sta_params(struct wcn36xx *wcn,
 
 static int wcn36xx_smd_send_and_wait(struct wcn36xx *wcn, size_t len)
 {
-	int ret = 0;
+	int ret;
 	unsigned long start;
 	struct wcn36xx_hal_msg_header *hdr =
 		(struct wcn36xx_hal_msg_header *)wcn->hal_buf;
@@ -446,7 +446,7 @@ static int wcn36xx_smd_start_rsp(struct wcn36xx *wcn, void *buf, size_t len)
 int wcn36xx_smd_start(struct wcn36xx *wcn)
 {
 	struct wcn36xx_hal_mac_start_req_msg msg_body, *body;
-	int ret = 0;
+	int ret;
 	int i;
 	size_t len;
 
@@ -493,7 +493,7 @@ int wcn36xx_smd_start(struct wcn36xx *wcn)
 int wcn36xx_smd_stop(struct wcn36xx *wcn)
 {
 	struct wcn36xx_hal_mac_stop_req_msg msg_body;
-	int ret = 0;
+	int ret;
 
 	mutex_lock(&wcn->hal_mutex);
 	INIT_HAL_MSG(msg_body, WCN36XX_HAL_STOP_REQ);
@@ -520,7 +520,7 @@ int wcn36xx_smd_stop(struct wcn36xx *wcn)
 int wcn36xx_smd_init_scan(struct wcn36xx *wcn, enum wcn36xx_hal_sys_mode mode)
 {
 	struct wcn36xx_hal_init_scan_req_msg msg_body;
-	int ret = 0;
+	int ret;
 
 	mutex_lock(&wcn->hal_mutex);
 	INIT_HAL_MSG(msg_body, WCN36XX_HAL_INIT_SCAN_REQ);
@@ -549,7 +549,7 @@ int wcn36xx_smd_init_scan(struct wcn36xx *wcn, enum wcn36xx_hal_sys_mode mode)
 int wcn36xx_smd_start_scan(struct wcn36xx *wcn, u8 scan_channel)
 {
 	struct wcn36xx_hal_start_scan_req_msg msg_body;
-	int ret = 0;
+	int ret;
 
 	mutex_lock(&wcn->hal_mutex);
 	INIT_HAL_MSG(msg_body, WCN36XX_HAL_START_SCAN_REQ);
@@ -579,7 +579,7 @@ int wcn36xx_smd_start_scan(struct wcn36xx *wcn, u8 scan_channel)
 int wcn36xx_smd_end_scan(struct wcn36xx *wcn, u8 scan_channel)
 {
 	struct wcn36xx_hal_end_scan_req_msg msg_body;
-	int ret = 0;
+	int ret;
 
 	mutex_lock(&wcn->hal_mutex);
 	INIT_HAL_MSG(msg_body, WCN36XX_HAL_END_SCAN_REQ);
@@ -610,7 +610,7 @@ int wcn36xx_smd_finish_scan(struct wcn36xx *wcn,
 			    enum wcn36xx_hal_sys_mode mode)
 {
 	struct wcn36xx_hal_finish_scan_req_msg msg_body;
-	int ret = 0;
+	int ret;
 
 	mutex_lock(&wcn->hal_mutex);
 	INIT_HAL_MSG(msg_body, WCN36XX_HAL_FINISH_SCAN_REQ);
@@ -732,7 +732,7 @@ int wcn36xx_smd_stop_hw_scan(struct wcn36xx *wcn)
 static int wcn36xx_smd_switch_channel_rsp(void *buf, size_t len)
 {
 	struct wcn36xx_hal_switch_channel_rsp_msg *rsp;
-	int ret = 0;
+	int ret;
 
 	ret = wcn36xx_smd_rsp_status_check(buf, len);
 	if (ret)
@@ -747,7 +747,7 @@ int wcn36xx_smd_switch_channel(struct wcn36xx *wcn,
 			       struct ieee80211_vif *vif, int ch)
 {
 	struct wcn36xx_hal_switch_channel_req_msg msg_body;
-	int ret = 0;
+	int ret;
 
 	mutex_lock(&wcn->hal_mutex);
 	INIT_HAL_MSG(msg_body, WCN36XX_HAL_CH_SWITCH_REQ);
@@ -860,7 +860,7 @@ int wcn36xx_smd_update_scan_params(struct wcn36xx *wcn,
 				   u8 *channels, size_t channel_count)
 {
 	struct wcn36xx_hal_update_scan_params_req_ex msg_body;
-	int ret = 0;
+	int ret;
 
 	mutex_lock(&wcn->hal_mutex);
 	INIT_HAL_MSG(msg_body, WCN36XX_HAL_UPDATE_SCAN_PARAM_REQ);
@@ -931,7 +931,7 @@ static int wcn36xx_smd_add_sta_self_rsp(struct wcn36xx *wcn,
 int wcn36xx_smd_add_sta_self(struct wcn36xx *wcn, struct ieee80211_vif *vif)
 {
 	struct wcn36xx_hal_add_sta_self_req msg_body;
-	int ret = 0;
+	int ret;
 
 	mutex_lock(&wcn->hal_mutex);
 	INIT_HAL_MSG(msg_body, WCN36XX_HAL_ADD_STA_SELF_REQ);
@@ -965,7 +965,7 @@ int wcn36xx_smd_add_sta_self(struct wcn36xx *wcn, struct ieee80211_vif *vif)
 int wcn36xx_smd_delete_sta_self(struct wcn36xx *wcn, u8 *addr)
 {
 	struct wcn36xx_hal_del_sta_self_req_msg msg_body;
-	int ret = 0;
+	int ret;
 
 	mutex_lock(&wcn->hal_mutex);
 	INIT_HAL_MSG(msg_body, WCN36XX_HAL_DEL_STA_SELF_REQ);
@@ -993,7 +993,7 @@ int wcn36xx_smd_delete_sta_self(struct wcn36xx *wcn, u8 *addr)
 int wcn36xx_smd_delete_sta(struct wcn36xx *wcn, u8 sta_index)
 {
 	struct wcn36xx_hal_delete_sta_req_msg msg_body;
-	int ret = 0;
+	int ret;
 
 	mutex_lock(&wcn->hal_mutex);
 	INIT_HAL_MSG(msg_body, WCN36XX_HAL_DELETE_STA_REQ);
@@ -1040,7 +1040,7 @@ static int wcn36xx_smd_join_rsp(void *buf, size_t len)
 int wcn36xx_smd_join(struct wcn36xx *wcn, const u8 *bssid, u8 *vif, u8 ch)
 {
 	struct wcn36xx_hal_join_req_msg msg_body;
-	int ret = 0;
+	int ret;
 
 	mutex_lock(&wcn->hal_mutex);
 	INIT_HAL_MSG(msg_body, WCN36XX_HAL_JOIN_REQ);
@@ -1089,7 +1089,7 @@ int wcn36xx_smd_set_link_st(struct wcn36xx *wcn, const u8 *bssid,
 			    enum wcn36xx_hal_link_state state)
 {
 	struct wcn36xx_hal_set_link_state_req_msg msg_body;
-	int ret = 0;
+	int ret;
 
 	mutex_lock(&wcn->hal_mutex);
 	INIT_HAL_MSG(msg_body, WCN36XX_HAL_SET_LINK_ST_REQ);
@@ -1215,7 +1215,7 @@ int wcn36xx_smd_config_sta(struct wcn36xx *wcn, struct ieee80211_vif *vif,
 {
 	struct wcn36xx_hal_config_sta_req_msg msg;
 	struct wcn36xx_hal_config_sta_params *sta_params;
-	int ret = 0;
+	int ret;
 
 	mutex_lock(&wcn->hal_mutex);
 	INIT_HAL_MSG(msg, WCN36XX_HAL_CONFIG_STA_REQ);
@@ -1414,7 +1414,7 @@ int wcn36xx_smd_config_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif,
 	struct wcn36xx_hal_config_bss_params *bss;
 	struct wcn36xx_hal_config_sta_params *sta_params;
 	struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
-	int ret = 0;
+	int ret;
 
 	mutex_lock(&wcn->hal_mutex);
 	INIT_HAL_MSG(msg, WCN36XX_HAL_CONFIG_BSS_REQ);
@@ -1579,7 +1579,7 @@ int wcn36xx_smd_send_beacon(struct wcn36xx *wcn, struct ieee80211_vif *vif,
 			    u16 p2p_off)
 {
 	struct wcn36xx_hal_send_beacon_req_msg msg_body;
-	int ret = 0, pad, pvm_len;
+	int ret, pad, pvm_len;
 
 	mutex_lock(&wcn->hal_mutex);
 	INIT_HAL_MSG(msg_body, WCN36XX_HAL_SEND_BEACON_REQ);
@@ -1653,7 +1653,7 @@ int wcn36xx_smd_update_proberesp_tmpl(struct wcn36xx *wcn,
 				      struct sk_buff *skb)
 {
 	struct wcn36xx_hal_send_probe_resp_req_msg msg;
-	int ret = 0;
+	int ret;
 
 	mutex_lock(&wcn->hal_mutex);
 	INIT_HAL_MSG(msg, WCN36XX_HAL_UPDATE_PROBE_RSP_TEMPLATE_REQ);
@@ -1700,7 +1700,7 @@ int wcn36xx_smd_set_stakey(struct wcn36xx *wcn,
 			   u8 sta_index)
 {
 	struct wcn36xx_hal_set_sta_key_req_msg msg_body;
-	int ret = 0;
+	int ret;
 
 	mutex_lock(&wcn->hal_mutex);
 	INIT_HAL_MSG(msg_body, WCN36XX_HAL_SET_STAKEY_REQ);
@@ -1708,12 +1708,20 @@ int wcn36xx_smd_set_stakey(struct wcn36xx *wcn,
 	msg_body.set_sta_key_params.sta_index = sta_index;
 	msg_body.set_sta_key_params.enc_type = enc_type;
 
-	msg_body.set_sta_key_params.key[0].id = keyidx;
-	msg_body.set_sta_key_params.key[0].unicast = 1;
-	msg_body.set_sta_key_params.key[0].direction = WCN36XX_HAL_TX_RX;
-	msg_body.set_sta_key_params.key[0].pae_role = 0;
-	msg_body.set_sta_key_params.key[0].length = keylen;
-	memcpy(msg_body.set_sta_key_params.key[0].key, key, keylen);
+	if (enc_type == WCN36XX_HAL_ED_WEP104 ||
+	    enc_type == WCN36XX_HAL_ED_WEP40) {
+		/* Use bss key for wep (static) */
+		msg_body.set_sta_key_params.def_wep_idx = keyidx;
+		msg_body.set_sta_key_params.wep_type = 0;
+	} else {
+		msg_body.set_sta_key_params.key[0].id = keyidx;
+		msg_body.set_sta_key_params.key[0].unicast = 1;
+		msg_body.set_sta_key_params.key[0].direction = WCN36XX_HAL_TX_RX;
+		msg_body.set_sta_key_params.key[0].pae_role = 0;
+		msg_body.set_sta_key_params.key[0].length = keylen;
+		memcpy(msg_body.set_sta_key_params.key[0].key, key, keylen);
+	}
+
 	msg_body.set_sta_key_params.single_tid_rc = 1;
 
 	PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
@@ -1741,7 +1749,7 @@ int wcn36xx_smd_set_bsskey(struct wcn36xx *wcn,
 			   u8 *key)
 {
 	struct wcn36xx_hal_set_bss_key_req_msg msg_body;
-	int ret = 0;
+	int ret;
 
 	mutex_lock(&wcn->hal_mutex);
 	INIT_HAL_MSG(msg_body, WCN36XX_HAL_SET_BSSKEY_REQ);
@@ -1778,7 +1786,7 @@ int wcn36xx_smd_remove_stakey(struct wcn36xx *wcn,
 			      u8 sta_index)
 {
 	struct wcn36xx_hal_remove_sta_key_req_msg msg_body;
-	int ret = 0;
+	int ret;
 
 	mutex_lock(&wcn->hal_mutex);
 	INIT_HAL_MSG(msg_body, WCN36XX_HAL_RMV_STAKEY_REQ);
@@ -1810,7 +1818,7 @@ int wcn36xx_smd_remove_bsskey(struct wcn36xx *wcn,
 			      u8 keyidx)
 {
 	struct wcn36xx_hal_remove_bss_key_req_msg msg_body;
-	int ret = 0;
+	int ret;
 
 	mutex_lock(&wcn->hal_mutex);
 	INIT_HAL_MSG(msg_body, WCN36XX_HAL_RMV_BSSKEY_REQ);
@@ -1839,7 +1847,7 @@ int wcn36xx_smd_enter_bmps(struct wcn36xx *wcn, struct ieee80211_vif *vif)
 {
 	struct wcn36xx_hal_enter_bmps_req_msg msg_body;
 	struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
-	int ret = 0;
+	int ret;
 
 	mutex_lock(&wcn->hal_mutex);
 	INIT_HAL_MSG(msg_body, WCN36XX_HAL_ENTER_BMPS_REQ);
@@ -1869,7 +1877,7 @@ int wcn36xx_smd_exit_bmps(struct wcn36xx *wcn, struct ieee80211_vif *vif)
 {
 	struct wcn36xx_hal_exit_bmps_req_msg msg_body;
 	struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
-	int ret = 0;
+	int ret;
 
 	mutex_lock(&wcn->hal_mutex);
 	INIT_HAL_MSG(msg_body, WCN36XX_HAL_EXIT_BMPS_REQ);
@@ -1895,7 +1903,7 @@ int wcn36xx_smd_exit_bmps(struct wcn36xx *wcn, struct ieee80211_vif *vif)
 int wcn36xx_smd_set_power_params(struct wcn36xx *wcn, bool ignore_dtim)
 {
 	struct wcn36xx_hal_set_power_params_req_msg msg_body;
-	int ret = 0;
+	int ret;
 
 	mutex_lock(&wcn->hal_mutex);
 	INIT_HAL_MSG(msg_body, WCN36XX_HAL_SET_POWER_PARAMS_REQ);
@@ -1930,7 +1938,7 @@ int wcn36xx_smd_keep_alive_req(struct wcn36xx *wcn,
 {
 	struct wcn36xx_hal_keep_alive_req_msg msg_body;
 	struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
-	int ret = 0;
+	int ret;
 
 	mutex_lock(&wcn->hal_mutex);
 	INIT_HAL_MSG(msg_body, WCN36XX_HAL_KEEP_ALIVE_REQ);
@@ -1968,7 +1976,7 @@ int wcn36xx_smd_dump_cmd_req(struct wcn36xx *wcn, u32 arg1, u32 arg2,
 			     u32 arg3, u32 arg4, u32 arg5)
 {
 	struct wcn36xx_hal_dump_cmd_req_msg msg_body;
-	int ret = 0;
+	int ret;
 
 	mutex_lock(&wcn->hal_mutex);
 	INIT_HAL_MSG(msg_body, WCN36XX_HAL_DUMP_COMMAND_REQ);
@@ -2013,7 +2021,6 @@ void set_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap)
 int get_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap)
 {
 	int arr_idx, bit_idx;
-	int ret = 0;
 
 	if (cap < 0 || cap > 127) {
 		wcn36xx_warn("error cap idx %d\n", cap);
@@ -2022,8 +2029,8 @@ int get_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap)
 
 	arr_idx = cap / 32;
 	bit_idx = cap % 32;
-	ret = (bitmap[arr_idx] & (1 << bit_idx)) ? 1 : 0;
-	return ret;
+
+	return (bitmap[arr_idx] & (1 << bit_idx)) ? 1 : 0;
 }
 
 void clear_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap)
@@ -2043,7 +2050,7 @@ void clear_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap)
 int wcn36xx_smd_feature_caps_exchange(struct wcn36xx *wcn)
 {
 	struct wcn36xx_hal_feat_caps_msg msg_body, *rsp;
-	int ret = 0, i;
+	int ret, i;
 
 	mutex_lock(&wcn->hal_mutex);
 	INIT_HAL_MSG(msg_body, WCN36XX_HAL_FEATURE_CAPS_EXCHANGE_REQ);
@@ -2079,7 +2086,7 @@ int wcn36xx_smd_add_ba_session(struct wcn36xx *wcn,
 		u8 sta_index)
 {
 	struct wcn36xx_hal_add_ba_session_req_msg msg_body;
-	int ret = 0;
+	int ret;
 
 	mutex_lock(&wcn->hal_mutex);
 	INIT_HAL_MSG(msg_body, WCN36XX_HAL_ADD_BA_SESSION_REQ);
@@ -2117,7 +2124,7 @@ int wcn36xx_smd_add_ba_session(struct wcn36xx *wcn,
 int wcn36xx_smd_add_ba(struct wcn36xx *wcn)
 {
 	struct wcn36xx_hal_add_ba_req_msg msg_body;
-	int ret = 0;
+	int ret;
 
 	mutex_lock(&wcn->hal_mutex);
 	INIT_HAL_MSG(msg_body, WCN36XX_HAL_ADD_BA_REQ);
@@ -2145,7 +2152,7 @@ int wcn36xx_smd_add_ba(struct wcn36xx *wcn)
 int wcn36xx_smd_del_ba(struct wcn36xx *wcn, u16 tid, u8 sta_index)
 {
 	struct wcn36xx_hal_del_ba_req_msg msg_body;
-	int ret = 0;
+	int ret;
 
 	mutex_lock(&wcn->hal_mutex);
 	INIT_HAL_MSG(msg_body, WCN36XX_HAL_DEL_BA_REQ);
@@ -2185,7 +2192,7 @@ int wcn36xx_smd_trigger_ba(struct wcn36xx *wcn, u8 sta_index)
 {
 	struct wcn36xx_hal_trigger_ba_req_msg msg_body;
 	struct wcn36xx_hal_trigger_ba_req_candidate *candidate;
-	int ret = 0;
+	int ret;
 
 	mutex_lock(&wcn->hal_mutex);
 	INIT_HAL_MSG(msg_body, WCN36XX_HAL_TRIGGER_BA_REQ);
@@ -2364,7 +2371,7 @@ int wcn36xx_smd_update_cfg(struct wcn36xx *wcn, u32 cfg_id, u32 value)
 {
 	struct wcn36xx_hal_update_cfg_req_msg msg_body, *body;
 	size_t len;
-	int ret = 0;
+	int ret;
 
 	mutex_lock(&wcn->hal_mutex);
 	INIT_HAL_MSG(msg_body, WCN36XX_HAL_UPDATE_CFG_REQ);
@@ -2399,7 +2406,7 @@ int wcn36xx_smd_set_mc_list(struct wcn36xx *wcn,
 {
 	struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
 	struct wcn36xx_hal_rcv_flt_pkt_set_mc_list_req_msg *msg_body = NULL;
-	int ret = 0;
+	int ret;
 
 	mutex_lock(&wcn->hal_mutex);
 
diff --git a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
index 11e74015c79af02d47039efd171bec340438fe4c..a58f313983b904ae65342fcfd904cf0018dfe436 100644
--- a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
+++ b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
@@ -129,6 +129,8 @@ struct wcn36xx_vif {
 	u8 self_sta_index;
 	u8 self_dpu_desc_index;
 	u8 self_ucast_dpu_sign;
+
+	struct list_head sta_list;
 };
 
 /**
@@ -154,6 +156,7 @@ struct wcn36xx_vif {
  * |______________|_____________|_______________|
  */
 struct wcn36xx_sta {
+	struct list_head list;
 	struct wcn36xx_vif *vif;
 	u16 aid;
 	u16 tid;
diff --git a/drivers/net/wireless/ath/wil6210/Makefile b/drivers/net/wireless/ath/wil6210/Makefile
index 398edd2a7f2b4b2957e83ce1e19d13e7b75eecdd..d3d61ae459e25307e15112538d8abadea347f415 100644
--- a/drivers/net/wireless/ath/wil6210/Makefile
+++ b/drivers/net/wireless/ath/wil6210/Makefile
@@ -9,6 +9,7 @@ wil6210-$(CONFIG_WIL6210_DEBUGFS) += debugfs.o
 wil6210-y += wmi.o
 wil6210-y += interrupt.o
 wil6210-y += txrx.o
+wil6210-y += txrx_edma.o
 wil6210-y += debug.o
 wil6210-y += rx_reorder.o
 wil6210-y += fw.o
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index 013d056a7a4cb4b5c56991a2b256825dad0e6e56..e63b07830f2cf86fcbb16f2d352f85c23deb50d9 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -1726,7 +1726,7 @@ static int wil_cfg80211_change_station(struct wiphy *wiphy,
 	struct wil6210_priv *wil = wiphy_to_wil(wiphy);
 	int authorize;
 	int cid, i;
-	struct vring_tx_data *txdata = NULL;
+	struct wil_ring_tx_data *txdata = NULL;
 
 	wil_dbg_misc(wil, "change station %pM mask 0x%x set 0x%x mid %d\n",
 		     mac, params->sta_flags_mask, params->sta_flags_set,
@@ -1746,20 +1746,20 @@ static int wil_cfg80211_change_station(struct wiphy *wiphy,
 		return -ENOLINK;
 	}
 
-	for (i = 0; i < ARRAY_SIZE(wil->vring2cid_tid); i++)
-		if (wil->vring2cid_tid[i][0] == cid) {
-			txdata = &wil->vring_tx_data[i];
+	for (i = 0; i < ARRAY_SIZE(wil->ring2cid_tid); i++)
+		if (wil->ring2cid_tid[i][0] == cid) {
+			txdata = &wil->ring_tx_data[i];
 			break;
 		}
 
 	if (!txdata) {
-		wil_err(wil, "vring data not found\n");
+		wil_err(wil, "ring data not found\n");
 		return -ENOLINK;
 	}
 
 	authorize = params->sta_flags_set & BIT(NL80211_STA_FLAG_AUTHORIZED);
 	txdata->dot1x_open = authorize ? 1 : 0;
-	wil_dbg_misc(wil, "cid %d vring %d authorize %d\n", cid, i,
+	wil_dbg_misc(wil, "cid %d ring %d authorize %d\n", cid, i,
 		     txdata->dot1x_open);
 
 	return 0;
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index ebfdff4d328cbc66217222d1c1df8329e2586b52..58ce044b1130df227fc331734f951888cbfcb5ba 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -29,7 +29,10 @@
 /* Nasty hack. Better have per device instances */
 static u32 mem_addr;
 static u32 dbg_txdesc_index;
-static u32 dbg_vring_index; /* 24+ for Rx, 0..23 for Tx */
+static u32 dbg_ring_index; /* 24+ for Rx, 0..23 for Tx */
+static u32 dbg_status_msg_index;
+/* 0..wil->num_rx_status_rings-1 for Rx, wil->tx_sring_idx for Tx */
+static u32 dbg_sring_index;
 
 enum dbg_off_type {
 	doff_u32 = 0,
@@ -47,20 +50,53 @@ struct dbg_off {
 	enum dbg_off_type type;
 };
 
-static void wil_print_vring(struct seq_file *s, struct wil6210_priv *wil,
-			    const char *name, struct vring *vring,
-			    char _s, char _h)
+static void wil_print_desc_edma(struct seq_file *s, struct wil6210_priv *wil,
+				struct wil_ring *ring,
+				char _s, char _h, int idx)
 {
-	void __iomem *x = wmi_addr(wil, vring->hwtail);
+	u8 num_of_descs;
+	bool has_skb = false;
+
+	if (ring->is_rx) {
+		struct wil_rx_enhanced_desc *rx_d =
+			(struct wil_rx_enhanced_desc *)
+			&ring->va[idx].rx.enhanced;
+		u16 buff_id = le16_to_cpu(rx_d->mac.buff_id);
+
+		has_skb = wil->rx_buff_mgmt.buff_arr[buff_id].skb;
+		seq_printf(s, "%c", (has_skb) ? _h : _s);
+	} else {
+		struct wil_tx_enhanced_desc *d =
+			(struct wil_tx_enhanced_desc *)
+			&ring->va[idx].tx.enhanced;
+
+		num_of_descs = (u8)d->mac.d[2];
+		has_skb = ring->ctx[idx].skb;
+		if (num_of_descs >= 1)
+			seq_printf(s, "%c", ring->ctx[idx].skb ? _h : _s);
+		else
+			/* num_of_descs == 0, it's a frag in a list of descs */
+			seq_printf(s, "%c", has_skb ? 'h' : _s);
+	}
+}
+
+static void wil_print_ring(struct seq_file *s, struct wil6210_priv *wil,
+			   const char *name, struct wil_ring *ring,
+			   char _s, char _h)
+{
+	void __iomem *x = wmi_addr(wil, ring->hwtail);
 	u32 v;
 
-	seq_printf(s, "VRING %s = {\n", name);
-	seq_printf(s, "  pa     = %pad\n", &vring->pa);
-	seq_printf(s, "  va     = 0x%p\n", vring->va);
-	seq_printf(s, "  size   = %d\n", vring->size);
-	seq_printf(s, "  swtail = %d\n", vring->swtail);
-	seq_printf(s, "  swhead = %d\n", vring->swhead);
-	seq_printf(s, "  hwtail = [0x%08x] -> ", vring->hwtail);
+	seq_printf(s, "RING %s = {\n", name);
+	seq_printf(s, "  pa     = %pad\n", &ring->pa);
+	seq_printf(s, "  va     = 0x%p\n", ring->va);
+	seq_printf(s, "  size   = %d\n", ring->size);
+	if (wil->use_enhanced_dma_hw && ring->is_rx)
+		seq_printf(s, "  swtail = %u\n", *ring->edma_rx_swtail.va);
+	else
+		seq_printf(s, "  swtail = %d\n", ring->swtail);
+	seq_printf(s, "  swhead = %d\n", ring->swhead);
+	seq_printf(s, "  hwtail = [0x%08x] -> ", ring->hwtail);
 	if (x) {
 		v = readl(x);
 		seq_printf(s, "0x%08x = %d\n", v, v);
@@ -68,41 +104,45 @@ static void wil_print_vring(struct seq_file *s, struct wil6210_priv *wil,
 		seq_puts(s, "???\n");
 	}
 
-	if (vring->va && (vring->size <= (1 << WIL_RING_SIZE_ORDER_MAX))) {
+	if (ring->va && (ring->size <= (1 << WIL_RING_SIZE_ORDER_MAX))) {
 		uint i;
 
-		for (i = 0; i < vring->size; i++) {
-			volatile struct vring_tx_desc *d = &vring->va[i].tx;
-
-			if ((i % 128) == 0 && (i != 0))
+		for (i = 0; i < ring->size; i++) {
+			if ((i % 128) == 0 && i != 0)
 				seq_puts(s, "\n");
-			seq_printf(s, "%c", (d->dma.status & BIT(0)) ?
-					_s : (vring->ctx[i].skb ? _h : 'h'));
+			if (wil->use_enhanced_dma_hw) {
+				wil_print_desc_edma(s, wil, ring, _s, _h, i);
+			} else {
+				volatile struct vring_tx_desc *d =
+					&ring->va[i].tx.legacy;
+				seq_printf(s, "%c", (d->dma.status & BIT(0)) ?
+					   _s : (ring->ctx[i].skb ? _h : 'h'));
+			}
 		}
 		seq_puts(s, "\n");
 	}
 	seq_puts(s, "}\n");
 }
 
-static int wil_vring_debugfs_show(struct seq_file *s, void *data)
+static int wil_ring_debugfs_show(struct seq_file *s, void *data)
 {
 	uint i;
 	struct wil6210_priv *wil = s->private;
 
-	wil_print_vring(s, wil, "rx", &wil->vring_rx, 'S', '_');
+	wil_print_ring(s, wil, "rx", &wil->ring_rx, 'S', '_');
 
-	for (i = 0; i < ARRAY_SIZE(wil->vring_tx); i++) {
-		struct vring *vring = &wil->vring_tx[i];
-		struct vring_tx_data *txdata = &wil->vring_tx_data[i];
+	for (i = 0; i < ARRAY_SIZE(wil->ring_tx); i++) {
+		struct wil_ring *ring = &wil->ring_tx[i];
+		struct wil_ring_tx_data *txdata = &wil->ring_tx_data[i];
 
-		if (vring->va) {
-			int cid = wil->vring2cid_tid[i][0];
-			int tid = wil->vring2cid_tid[i][1];
-			u32 swhead = vring->swhead;
-			u32 swtail = vring->swtail;
-			int used = (vring->size + swhead - swtail)
-				   % vring->size;
-			int avail = vring->size - used - 1;
+		if (ring->va) {
+			int cid = wil->ring2cid_tid[i][0];
+			int tid = wil->ring2cid_tid[i][1];
+			u32 swhead = ring->swhead;
+			u32 swtail = ring->swtail;
+			int used = (ring->size + swhead - swtail)
+				   % ring->size;
+			int avail = ring->size - used - 1;
 			char name[10];
 			char sidle[10];
 			/* performance monitoring */
@@ -137,20 +177,88 @@ static int wil_vring_debugfs_show(struct seq_file *s, void *data)
 					   txdata->dot1x_open ? "+" : "-",
 					   used, avail, sidle);
 
-			wil_print_vring(s, wil, name, vring, '_', 'H');
+			wil_print_ring(s, wil, name, ring, '_', 'H');
+		}
+	}
+
+	return 0;
+}
+
+static int wil_ring_seq_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, wil_ring_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations fops_ring = {
+	.open		= wil_ring_seq_open,
+	.release	= single_release,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+};
+
+static void wil_print_sring(struct seq_file *s, struct wil6210_priv *wil,
+			    struct wil_status_ring *sring)
+{
+	void __iomem *x = wmi_addr(wil, sring->hwtail);
+	int sring_idx = sring - wil->srings;
+	u32 v;
+
+	seq_printf(s, "Status Ring %s [ %d ] = {\n",
+		   sring->is_rx ? "RX" : "TX", sring_idx);
+	seq_printf(s, "  pa     = %pad\n", &sring->pa);
+	seq_printf(s, "  va     = 0x%pK\n", sring->va);
+	seq_printf(s, "  size   = %d\n", sring->size);
+	seq_printf(s, "  elem_size   = %zu\n", sring->elem_size);
+	seq_printf(s, "  swhead = %d\n", sring->swhead);
+	seq_printf(s, "  hwtail = [0x%08x] -> ", sring->hwtail);
+	if (x) {
+		v = readl_relaxed(x);
+		seq_printf(s, "0x%08x = %d\n", v, v);
+	} else {
+		seq_puts(s, "???\n");
+	}
+	seq_printf(s, "  desc_rdy_pol   = %d\n", sring->desc_rdy_pol);
+
+	if (sring->va && (sring->size <= (1 << WIL_RING_SIZE_ORDER_MAX))) {
+		uint i;
+
+		for (i = 0; i < sring->size; i++) {
+			u32 *sdword_0 =
+				(u32 *)(sring->va + (sring->elem_size * i));
+
+			if ((i % 128) == 0 && i != 0)
+				seq_puts(s, "\n");
+			if (i == sring->swhead)
+				seq_printf(s, "%c", (*sdword_0 & BIT(31)) ?
+					   'X' : 'x');
+			else
+				seq_printf(s, "%c", (*sdword_0 & BIT(31)) ?
+					   '1' : '0');
 		}
+		seq_puts(s, "\n");
 	}
+	seq_puts(s, "}\n");
+}
+
+static int wil_srings_debugfs_show(struct seq_file *s, void *data)
+{
+	struct wil6210_priv *wil = s->private;
+	int i = 0;
+
+	for (i = 0; i < WIL6210_MAX_STATUS_RINGS; i++)
+		if (wil->srings[i].va)
+			wil_print_sring(s, wil, &wil->srings[i]);
 
 	return 0;
 }
 
-static int wil_vring_seq_open(struct inode *inode, struct file *file)
+static int wil_srings_seq_open(struct inode *inode, struct file *file)
 {
-	return single_open(file, wil_vring_debugfs_show, inode->i_private);
+	return single_open(file, wil_srings_debugfs_show, inode->i_private);
 }
 
-static const struct file_operations fops_vring = {
-	.open		= wil_vring_seq_open,
+static const struct file_operations fops_srings = {
+	.open		= wil_srings_seq_open,
 	.release	= single_release,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
@@ -162,8 +270,8 @@ static void wil_seq_hexdump(struct seq_file *s, void *p, int len,
 	seq_hex_dump(s, prefix, DUMP_PREFIX_NONE, 16, 1, p, len, false);
 }
 
-static void wil_print_ring(struct seq_file *s, const char *prefix,
-			   void __iomem *off)
+static void wil_print_mbox_ring(struct seq_file *s, const char *prefix,
+				void __iomem *off)
 {
 	struct wil6210_priv *wil = s->private;
 	struct wil6210_mbox_ring r;
@@ -249,9 +357,9 @@ static int wil_mbox_debugfs_show(struct seq_file *s, void *data)
 	if (ret < 0)
 		return ret;
 
-	wil_print_ring(s, "tx", wil->csr + HOST_MBOX +
+	wil_print_mbox_ring(s, "tx", wil->csr + HOST_MBOX +
 		       offsetof(struct wil6210_mbox_ctl, tx));
-	wil_print_ring(s, "rx", wil->csr + HOST_MBOX +
+	wil_print_mbox_ring(s, "rx", wil->csr + HOST_MBOX +
 		       offsetof(struct wil6210_mbox_ctl, rx));
 
 	wil_pm_runtime_put(wil);
@@ -719,13 +827,13 @@ static ssize_t wil_write_back(struct file *file, const char __user *buf,
 
 	if ((strcmp(cmd, "add") == 0) ||
 	    (strcmp(cmd, "del_tx") == 0)) {
-		struct vring_tx_data *txdata;
+		struct wil_ring_tx_data *txdata;
 
 		if (p1 < 0 || p1 >= WIL6210_MAX_TX_RINGS) {
 			wil_err(wil, "BACK: invalid ring id %d\n", p1);
 			return -EINVAL;
 		}
-		txdata = &wil->vring_tx_data[p1];
+		txdata = &wil->ring_tx_data[p1];
 		if (strcmp(cmd, "add") == 0) {
 			if (rc < 3) {
 				wil_err(wil, "BACK: add require at least 2 params\n");
@@ -972,54 +1080,93 @@ static void wil_seq_print_skb(struct seq_file *s, struct sk_buff *skb)
 static int wil_txdesc_debugfs_show(struct seq_file *s, void *data)
 {
 	struct wil6210_priv *wil = s->private;
-	struct vring *vring;
-	bool tx = (dbg_vring_index < WIL6210_MAX_TX_RINGS);
+	struct wil_ring *ring;
+	bool tx;
+	int ring_idx = dbg_ring_index;
+	int txdesc_idx = dbg_txdesc_index;
+	volatile struct vring_tx_desc *d;
+	volatile u32 *u;
+	struct sk_buff *skb;
+
+	if (wil->use_enhanced_dma_hw) {
+		/* RX ring index == 0 */
+		if (ring_idx >= WIL6210_MAX_TX_RINGS) {
+			seq_printf(s, "invalid ring index %d\n", ring_idx);
+			return 0;
+		}
+		tx = ring_idx > 0; /* desc ring 0 is reserved for RX */
+	} else {
+		/* RX ring index == WIL6210_MAX_TX_RINGS */
+		if (ring_idx > WIL6210_MAX_TX_RINGS) {
+			seq_printf(s, "invalid ring index %d\n", ring_idx);
+			return 0;
+		}
+		tx = (ring_idx < WIL6210_MAX_TX_RINGS);
+	}
 
-	vring = tx ? &wil->vring_tx[dbg_vring_index] : &wil->vring_rx;
+	ring = tx ? &wil->ring_tx[ring_idx] : &wil->ring_rx;
 
-	if (!vring->va) {
+	if (!ring->va) {
 		if (tx)
-			seq_printf(s, "No Tx[%2d] VRING\n", dbg_vring_index);
+			seq_printf(s, "No Tx[%2d] RING\n", ring_idx);
 		else
-			seq_puts(s, "No Rx VRING\n");
+			seq_puts(s, "No Rx RING\n");
 		return 0;
 	}
 
-	if (dbg_txdesc_index < vring->size) {
-		/* use struct vring_tx_desc for Rx as well,
-		 * only field used, .dma.length, is the same
-		 */
-		volatile struct vring_tx_desc *d =
-				&vring->va[dbg_txdesc_index].tx;
-		volatile u32 *u = (volatile u32 *)d;
-		struct sk_buff *skb = vring->ctx[dbg_txdesc_index].skb;
-
+	if (txdesc_idx >= ring->size) {
 		if (tx)
-			seq_printf(s, "Tx[%2d][%3d] = {\n", dbg_vring_index,
-				   dbg_txdesc_index);
+			seq_printf(s, "[%2d] TxDesc index (%d) >= size (%d)\n",
+				   ring_idx, txdesc_idx, ring->size);
 		else
-			seq_printf(s, "Rx[%3d] = {\n", dbg_txdesc_index);
-		seq_printf(s, "  MAC = 0x%08x 0x%08x 0x%08x 0x%08x\n",
-			   u[0], u[1], u[2], u[3]);
-		seq_printf(s, "  DMA = 0x%08x 0x%08x 0x%08x 0x%08x\n",
-			   u[4], u[5], u[6], u[7]);
-		seq_printf(s, "  SKB = 0x%p\n", skb);
+			seq_printf(s, "RxDesc index (%d) >= size (%d)\n",
+				   txdesc_idx, ring->size);
+		return 0;
+	}
 
-		if (skb) {
-			skb_get(skb);
-			wil_seq_print_skb(s, skb);
-			kfree_skb(skb);
+	/* use struct vring_tx_desc for Rx as well,
+	 * only field used, .dma.length, is the same
+	 */
+	d = &ring->va[txdesc_idx].tx.legacy;
+	u = (volatile u32 *)d;
+	skb = NULL;
+
+	if (wil->use_enhanced_dma_hw) {
+		if (tx) {
+			skb = ring->ctx[txdesc_idx].skb;
+		} else {
+			struct wil_rx_enhanced_desc *rx_d =
+				(struct wil_rx_enhanced_desc *)
+				&ring->va[txdesc_idx].rx.enhanced;
+			u16 buff_id = le16_to_cpu(rx_d->mac.buff_id);
+
+			if (!wil_val_in_range(buff_id, 0,
+					      wil->rx_buff_mgmt.size)) {
+				seq_printf(s, "invalid buff_id %d\n", buff_id);
+				return 0;
+			}
+			skb = wil->rx_buff_mgmt.buff_arr[buff_id].skb;
 		}
-		seq_puts(s, "}\n");
 	} else {
-		if (tx)
-			seq_printf(s, "[%2d] TxDesc index (%d) >= size (%d)\n",
-				   dbg_vring_index, dbg_txdesc_index,
-				   vring->size);
-		else
-			seq_printf(s, "RxDesc index (%d) >= size (%d)\n",
-				   dbg_txdesc_index, vring->size);
+		skb = ring->ctx[txdesc_idx].skb;
 	}
+	if (tx)
+		seq_printf(s, "Tx[%2d][%3d] = {\n", ring_idx,
+			   txdesc_idx);
+	else
+		seq_printf(s, "Rx[%3d] = {\n", txdesc_idx);
+	seq_printf(s, "  MAC = 0x%08x 0x%08x 0x%08x 0x%08x\n",
+		   u[0], u[1], u[2], u[3]);
+	seq_printf(s, "  DMA = 0x%08x 0x%08x 0x%08x 0x%08x\n",
+		   u[4], u[5], u[6], u[7]);
+	seq_printf(s, "  SKB = 0x%p\n", skb);
+
+	if (skb) {
+		skb_get(skb);
+		wil_seq_print_skb(s, skb);
+		kfree_skb(skb);
+	}
+	seq_puts(s, "}\n");
 
 	return 0;
 }
@@ -1036,6 +1183,115 @@ static const struct file_operations fops_txdesc = {
 	.llseek		= seq_lseek,
 };
 
+/*---------Tx/Rx status message------------*/
+static int wil_status_msg_debugfs_show(struct seq_file *s, void *data)
+{
+	struct wil6210_priv *wil = s->private;
+	int sring_idx = dbg_sring_index;
+	struct wil_status_ring *sring;
+	bool tx = sring_idx == wil->tx_sring_idx ? 1 : 0;
+	u32 status_msg_idx = dbg_status_msg_index;
+	u32 *u;
+
+	if (sring_idx >= WIL6210_MAX_STATUS_RINGS) {
+		seq_printf(s, "invalid status ring index %d\n", sring_idx);
+		return 0;
+	}
+
+	sring = &wil->srings[sring_idx];
+
+	if (!sring->va) {
+		seq_printf(s, "No %cX status ring\n", tx ? 'T' : 'R');
+		return 0;
+	}
+
+	if (status_msg_idx >= sring->size) {
+		seq_printf(s, "%cxDesc index (%d) >= size (%d)\n",
+			   tx ? 'T' : 'R', status_msg_idx, sring->size);
+		return 0;
+	}
+
+	u = sring->va + (sring->elem_size * status_msg_idx);
+
+	seq_printf(s, "%cx[%d][%3d] = {\n",
+		   tx ? 'T' : 'R', sring_idx, status_msg_idx);
+
+	seq_printf(s, "  0x%08x 0x%08x 0x%08x 0x%08x\n",
+		   u[0], u[1], u[2], u[3]);
+	if (!tx && !wil->use_compressed_rx_status)
+		seq_printf(s, "  0x%08x 0x%08x 0x%08x 0x%08x\n",
+			   u[4], u[5], u[6], u[7]);
+
+	seq_puts(s, "}\n");
+
+	return 0;
+}
+
+static int wil_status_msg_seq_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, wil_status_msg_debugfs_show,
+			   inode->i_private);
+}
+
+static const struct file_operations fops_status_msg = {
+	.open		= wil_status_msg_seq_open,
+	.release	= single_release,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+};
+
+static int wil_print_rx_buff(struct seq_file *s, struct list_head *lh)
+{
+	struct wil_rx_buff *it;
+	int i = 0;
+
+	list_for_each_entry(it, lh, list) {
+		if ((i % 16) == 0 && i != 0)
+			seq_puts(s, "\n    ");
+		seq_printf(s, "[%4d] ", it->id);
+		i++;
+	}
+	seq_printf(s, "\nNumber of buffers: %u\n", i);
+
+	return i;
+}
+
+static int wil_rx_buff_mgmt_debugfs_show(struct seq_file *s, void *data)
+{
+	struct wil6210_priv *wil = s->private;
+	struct wil_rx_buff_mgmt *rbm = &wil->rx_buff_mgmt;
+	int num_active;
+	int num_free;
+
+	seq_printf(s, "  size = %zu\n", rbm->size);
+	seq_printf(s, "  free_list_empty_cnt = %lu\n",
+		   rbm->free_list_empty_cnt);
+
+	/* Print active list */
+	seq_puts(s, "  Active list:\n");
+	num_active = wil_print_rx_buff(s, &rbm->active);
+	seq_puts(s, "\n  Free list:\n");
+	num_free = wil_print_rx_buff(s, &rbm->free);
+
+	seq_printf(s, "  Total number of buffers: %u\n",
+		   num_active + num_free);
+
+	return 0;
+}
+
+static int wil_rx_buff_mgmt_seq_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, wil_rx_buff_mgmt_debugfs_show,
+			   inode->i_private);
+}
+
+static const struct file_operations fops_rx_buff_mgmt = {
+	.open		= wil_rx_buff_mgmt_seq_open,
+	.release	= single_release,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+};
+
 /*---------beamforming------------*/
 static char *wil_bfstatus_str(u32 status)
 {
@@ -1478,6 +1734,13 @@ __acquires(&p->tid_rx_lock) __releases(&p->tid_rx_lock)
 				   p->stats.rx_large_frame,
 				   p->stats.rx_replay);
 
+			if (wil->use_enhanced_dma_hw)
+				seq_printf(s,
+					   "mic error  %lu, key error %lu, amsdu error %lu\n",
+					   p->stats.rx_mic_error,
+					   p->stats.rx_key_error,
+					   p->stats.rx_amsdu_error);
+
 			seq_puts(s, "Rx/MCS:");
 			for (mcs = 0; mcs < ARRAY_SIZE(p->stats.rx_per_mcs);
 			     mcs++)
@@ -1760,6 +2023,60 @@ static const struct file_operations fops_suspend_stats = {
 	.open  = simple_open,
 };
 
+/*---------compressed_rx_status---------*/
+static ssize_t wil_compressed_rx_status_write(struct file *file,
+					      const char __user *buf,
+					      size_t len, loff_t *ppos)
+{
+	struct seq_file *s = file->private_data;
+	struct wil6210_priv *wil = s->private;
+	int compressed_rx_status;
+	int rc;
+
+	rc = kstrtoint_from_user(buf, len, 0, &compressed_rx_status);
+	if (rc) {
+		wil_err(wil, "Invalid argument\n");
+		return rc;
+	}
+
+	if (wil_has_active_ifaces(wil, true, false)) {
+		wil_err(wil, "cannot change edma config after iface is up\n");
+		return -EPERM;
+	}
+
+	wil_info(wil, "%sable compressed_rx_status\n",
+		 compressed_rx_status ? "En" : "Dis");
+
+	wil->use_compressed_rx_status = compressed_rx_status;
+
+	return len;
+}
+
+static int
+wil_compressed_rx_status_show(struct seq_file *s, void *data)
+{
+	struct wil6210_priv *wil = s->private;
+
+	seq_printf(s, "%d\n", wil->use_compressed_rx_status);
+
+	return 0;
+}
+
+static int
+wil_compressed_rx_status_seq_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, wil_compressed_rx_status_show,
+			   inode->i_private);
+}
+
+static const struct file_operations fops_compressed_rx_status = {
+	.open  = wil_compressed_rx_status_seq_open,
+	.release = single_release,
+	.read = seq_read,
+	.write = wil_compressed_rx_status_write,
+	.llseek	= seq_lseek,
+};
+
 /*----------------*/
 static void wil6210_debugfs_init_blobs(struct wil6210_priv *wil,
 				       struct dentry *dbg)
@@ -1790,7 +2107,7 @@ static const struct {
 	const struct file_operations *fops;
 } dbg_files[] = {
 	{"mbox",	0444,		&fops_mbox},
-	{"vrings",	0444,		&fops_vring},
+	{"rings",	0444,		&fops_ring},
 	{"stations", 0444,		&fops_sta},
 	{"mids",	0444,		&fops_mids},
 	{"desc",	0444,		&fops_txdesc},
@@ -1813,6 +2130,10 @@ static const struct {
 	{"fw_capabilities",	0444,	&fops_fw_capabilities},
 	{"fw_version",	0444,		&fops_fw_version},
 	{"suspend_stats",	0644,	&fops_suspend_stats},
+	{"compressed_rx_status", 0644,	&fops_compressed_rx_status},
+	{"srings",	0444,		&fops_srings},
+	{"status_msg",	0444,		&fops_status_msg},
+	{"rx_buff_mgmt",	0444,	&fops_rx_buff_mgmt},
 };
 
 static void wil6210_debugfs_init_files(struct wil6210_priv *wil,
@@ -1858,7 +2179,12 @@ static const struct dbg_off dbg_wil_off[] = {
 	WIL_FIELD(chip_revision, 0444,	doff_u8),
 	WIL_FIELD(abft_len, 0644,		doff_u8),
 	WIL_FIELD(wakeup_trigger, 0644,		doff_u8),
-	WIL_FIELD(vring_idle_trsh, 0644,	doff_u32),
+	WIL_FIELD(ring_idle_trsh, 0644,	doff_u32),
+	WIL_FIELD(num_rx_status_rings, 0644,	doff_u8),
+	WIL_FIELD(rx_status_ring_order, 0644,	doff_u32),
+	WIL_FIELD(tx_status_ring_order, 0644,	doff_u32),
+	WIL_FIELD(rx_buff_id_count, 0644,	doff_u32),
+	WIL_FIELD(amsdu_en, 0644,	doff_u8),
 	{},
 };
 
@@ -1872,9 +2198,11 @@ static const struct dbg_off dbg_wil_regs[] = {
 /* static parameters */
 static const struct dbg_off dbg_statics[] = {
 	{"desc_index",	0644, (ulong)&dbg_txdesc_index, doff_u32},
-	{"vring_index",	0644, (ulong)&dbg_vring_index, doff_u32},
+	{"ring_index",	0644, (ulong)&dbg_ring_index, doff_u32},
 	{"mem_addr",	0644, (ulong)&mem_addr, doff_u32},
 	{"led_polarity", 0644, (ulong)&led_polarity, doff_u8},
+	{"status_index", 0644, (ulong)&dbg_status_msg_index, doff_u32},
+	{"sring_index",	0644, (ulong)&dbg_sring_index, doff_u32},
 	{},
 };
 
diff --git a/drivers/net/wireless/ath/wil6210/ethtool.c b/drivers/net/wireless/ath/wil6210/ethtool.c
index e7ff41e623d2b1a2c14311e1150552e025519305..a04c87ffd37bc6b87c16d289c6417dcc1aaba8e3 100644
--- a/drivers/net/wireless/ath/wil6210/ethtool.c
+++ b/drivers/net/wireless/ath/wil6210/ethtool.c
@@ -101,7 +101,7 @@ static int wil_ethtoolops_set_coalesce(struct net_device *ndev,
 	if (ret < 0)
 		return ret;
 
-	wil_configure_interrupt_moderation(wil);
+	wil->txrx_ops.configure_interrupt_moderation(wil);
 
 	wil_pm_runtime_put(wil);
 
diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c
index 84e9840c175282f319c221ffa1aec585304a5aa9..d7e112da6a8d3ba520aa9ed232053cd9481f16a8 100644
--- a/drivers/net/wireless/ath/wil6210/interrupt.c
+++ b/drivers/net/wireless/ath/wil6210/interrupt.c
@@ -44,6 +44,8 @@
 				    (~(BIT_DMA_EP_RX_ICR_RX_HTRSH)))
 #define WIL6210_IMC_TX		(BIT_DMA_EP_TX_ICR_TX_DONE | \
 				BIT_DMA_EP_TX_ICR_TX_DONE_N(0))
+#define WIL6210_IMC_TX_EDMA		BIT_TX_STATUS_IRQ
+#define WIL6210_IMC_RX_EDMA		BIT_RX_STATUS_IRQ
 #define WIL6210_IMC_MISC_NO_HALP	(ISR_MISC_FW_READY | \
 					 ISR_MISC_MBOX_EVT | \
 					 ISR_MISC_FW_ERROR)
@@ -87,12 +89,24 @@ static void wil6210_mask_irq_tx(struct wil6210_priv *wil)
 	      WIL6210_IRQ_DISABLE);
 }
 
+static void wil6210_mask_irq_tx_edma(struct wil6210_priv *wil)
+{
+	wil_w(wil, RGF_INT_GEN_TX_ICR + offsetof(struct RGF_ICR, IMS),
+	      WIL6210_IRQ_DISABLE);
+}
+
 static void wil6210_mask_irq_rx(struct wil6210_priv *wil)
 {
 	wil_w(wil, RGF_DMA_EP_RX_ICR + offsetof(struct RGF_ICR, IMS),
 	      WIL6210_IRQ_DISABLE);
 }
 
+static void wil6210_mask_irq_rx_edma(struct wil6210_priv *wil)
+{
+	wil_w(wil, RGF_INT_GEN_RX_ICR + offsetof(struct RGF_ICR, IMS),
+	      WIL6210_IRQ_DISABLE);
+}
+
 static void wil6210_mask_irq_misc(struct wil6210_priv *wil, bool mask_halp)
 {
 	wil_dbg_irq(wil, "mask_irq_misc: mask_halp(%s)\n",
@@ -125,6 +139,12 @@ void wil6210_unmask_irq_tx(struct wil6210_priv *wil)
 	      WIL6210_IMC_TX);
 }
 
+void wil6210_unmask_irq_tx_edma(struct wil6210_priv *wil)
+{
+	wil_w(wil, RGF_INT_GEN_TX_ICR + offsetof(struct RGF_ICR, IMC),
+	      WIL6210_IMC_TX_EDMA);
+}
+
 void wil6210_unmask_irq_rx(struct wil6210_priv *wil)
 {
 	bool unmask_rx_htrsh = atomic_read(&wil->connected_vifs) > 0;
@@ -133,6 +153,12 @@ void wil6210_unmask_irq_rx(struct wil6210_priv *wil)
 	      unmask_rx_htrsh ? WIL6210_IMC_RX : WIL6210_IMC_RX_NO_RX_HTRSH);
 }
 
+void wil6210_unmask_irq_rx_edma(struct wil6210_priv *wil)
+{
+	wil_w(wil, RGF_INT_GEN_RX_ICR + offsetof(struct RGF_ICR, IMC),
+	      WIL6210_IMC_RX_EDMA);
+}
+
 static void wil6210_unmask_irq_misc(struct wil6210_priv *wil, bool unmask_halp)
 {
 	wil_dbg_irq(wil, "unmask_irq_misc: unmask_halp(%s)\n",
@@ -164,7 +190,9 @@ void wil_mask_irq(struct wil6210_priv *wil)
 	wil_dbg_irq(wil, "mask_irq\n");
 
 	wil6210_mask_irq_tx(wil);
+	wil6210_mask_irq_tx_edma(wil);
 	wil6210_mask_irq_rx(wil);
+	wil6210_mask_irq_rx_edma(wil);
 	wil6210_mask_irq_misc(wil, true);
 	wil6210_mask_irq_pseudo(wil);
 }
@@ -179,13 +207,43 @@ void wil_unmask_irq(struct wil6210_priv *wil)
 	      WIL_ICR_ICC_VALUE);
 	wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, ICC),
 	      WIL_ICR_ICC_MISC_VALUE);
+	wil_w(wil, RGF_INT_GEN_TX_ICR + offsetof(struct RGF_ICR, ICC),
+	      WIL_ICR_ICC_VALUE);
+	wil_w(wil, RGF_INT_GEN_RX_ICR + offsetof(struct RGF_ICR, ICC),
+	      WIL_ICR_ICC_VALUE);
 
 	wil6210_unmask_irq_pseudo(wil);
-	wil6210_unmask_irq_tx(wil);
-	wil6210_unmask_irq_rx(wil);
+	if (wil->use_enhanced_dma_hw) {
+		wil6210_unmask_irq_tx_edma(wil);
+		wil6210_unmask_irq_rx_edma(wil);
+	} else {
+		wil6210_unmask_irq_tx(wil);
+		wil6210_unmask_irq_rx(wil);
+	}
 	wil6210_unmask_irq_misc(wil, true);
 }
 
+void wil_configure_interrupt_moderation_edma(struct wil6210_priv *wil)
+{
+	u32 moderation;
+
+	wil_s(wil, RGF_INT_GEN_IDLE_TIME_LIMIT, WIL_EDMA_IDLE_TIME_LIMIT_USEC);
+
+	wil_s(wil, RGF_INT_GEN_TIME_UNIT_LIMIT, WIL_EDMA_TIME_UNIT_CLK_CYCLES);
+
+	/* Update RX and TX moderation */
+	moderation = wil->rx_max_burst_duration |
+		(WIL_EDMA_AGG_WATERMARK << WIL_EDMA_AGG_WATERMARK_POS);
+	wil_w(wil, RGF_INT_CTRL_INT_GEN_CFG_0, moderation);
+	wil_w(wil, RGF_INT_CTRL_INT_GEN_CFG_1, moderation);
+
+	/* Treat special events as regular
+	 * (set bit 0 to 0x1 and clear bits 1-8)
+	 */
+	wil_c(wil, RGF_INT_COUNT_ON_SPECIAL_EVT, 0x1FE);
+	wil_s(wil, RGF_INT_COUNT_ON_SPECIAL_EVT, 0x1);
+}
+
 void wil_configure_interrupt_moderation(struct wil6210_priv *wil)
 {
 	struct wireless_dev *wdev = wil->main_ndev->ieee80211_ptr;
@@ -294,6 +352,97 @@ static irqreturn_t wil6210_irq_rx(int irq, void *cookie)
 	return IRQ_HANDLED;
 }
 
+static irqreturn_t wil6210_irq_rx_edma(int irq, void *cookie)
+{
+	struct wil6210_priv *wil = cookie;
+	u32 isr = wil_ioread32_and_clear(wil->csr +
+					 HOSTADDR(RGF_INT_GEN_RX_ICR) +
+					 offsetof(struct RGF_ICR, ICR));
+	bool need_unmask = true;
+
+	trace_wil6210_irq_rx(isr);
+	wil_dbg_irq(wil, "ISR RX 0x%08x\n", isr);
+
+	if (unlikely(!isr)) {
+		wil_err(wil, "spurious IRQ: RX\n");
+		return IRQ_NONE;
+	}
+
+	wil6210_mask_irq_rx_edma(wil);
+
+	if (likely(isr & BIT_RX_STATUS_IRQ)) {
+		wil_dbg_irq(wil, "RX status ring\n");
+		isr &= ~BIT_RX_STATUS_IRQ;
+		if (likely(test_bit(wil_status_fwready, wil->status))) {
+			if (likely(test_bit(wil_status_napi_en, wil->status))) {
+				wil_dbg_txrx(wil, "NAPI(Rx) schedule\n");
+				need_unmask = false;
+				napi_schedule(&wil->napi_rx);
+			} else {
+				wil_err(wil,
+					"Got Rx interrupt while stopping interface\n");
+			}
+		} else {
+			wil_err(wil, "Got Rx interrupt while in reset\n");
+		}
+	}
+
+	if (unlikely(isr))
+		wil_err(wil, "un-handled RX ISR bits 0x%08x\n", isr);
+
+	/* Rx IRQ will be enabled when NAPI processing finished */
+
+	atomic_inc(&wil->isr_count_rx);
+
+	if (unlikely(need_unmask))
+		wil6210_unmask_irq_rx_edma(wil);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t wil6210_irq_tx_edma(int irq, void *cookie)
+{
+	struct wil6210_priv *wil = cookie;
+	u32 isr = wil_ioread32_and_clear(wil->csr +
+					 HOSTADDR(RGF_INT_GEN_TX_ICR) +
+					 offsetof(struct RGF_ICR, ICR));
+	bool need_unmask = true;
+
+	trace_wil6210_irq_tx(isr);
+	wil_dbg_irq(wil, "ISR TX 0x%08x\n", isr);
+
+	if (unlikely(!isr)) {
+		wil_err(wil, "spurious IRQ: TX\n");
+		return IRQ_NONE;
+	}
+
+	wil6210_mask_irq_tx_edma(wil);
+
+	if (likely(isr & BIT_TX_STATUS_IRQ)) {
+		wil_dbg_irq(wil, "TX status ring\n");
+		isr &= ~BIT_TX_STATUS_IRQ;
+		if (likely(test_bit(wil_status_fwready, wil->status))) {
+			wil_dbg_txrx(wil, "NAPI(Tx) schedule\n");
+			need_unmask = false;
+			napi_schedule(&wil->napi_tx);
+		} else {
+			wil_err(wil, "Got Tx status ring IRQ while in reset\n");
+		}
+	}
+
+	if (unlikely(isr))
+		wil_err(wil, "un-handled TX ISR bits 0x%08x\n", isr);
+
+	/* Tx IRQ will be enabled when NAPI processing finished */
+
+	atomic_inc(&wil->isr_count_tx);
+
+	if (unlikely(need_unmask))
+		wil6210_unmask_irq_tx_edma(wil);
+
+	return IRQ_HANDLED;
+}
+
 static irqreturn_t wil6210_irq_tx(int irq, void *cookie)
 {
 	struct wil6210_priv *wil = cookie;
@@ -510,30 +659,53 @@ static irqreturn_t wil6210_thread_irq(int irq, void *cookie)
  */
 static int wil6210_debug_irq_mask(struct wil6210_priv *wil, u32 pseudo_cause)
 {
+	u32 icm_rx, icr_rx, imv_rx;
+	u32 icm_tx, icr_tx, imv_tx;
+	u32 icm_misc, icr_misc, imv_misc;
+
 	if (!test_bit(wil_status_irqen, wil->status)) {
-		u32 icm_rx = wil_ioread32_and_clear(wil->csr +
-				HOSTADDR(RGF_DMA_EP_RX_ICR) +
-				offsetof(struct RGF_ICR, ICM));
-		u32 icr_rx = wil_ioread32_and_clear(wil->csr +
-				HOSTADDR(RGF_DMA_EP_RX_ICR) +
-				offsetof(struct RGF_ICR, ICR));
-		u32 imv_rx = wil_r(wil, RGF_DMA_EP_RX_ICR +
+		if (wil->use_enhanced_dma_hw) {
+			icm_rx = wil_ioread32_and_clear(wil->csr +
+					HOSTADDR(RGF_INT_GEN_RX_ICR) +
+					offsetof(struct RGF_ICR, ICM));
+			icr_rx = wil_ioread32_and_clear(wil->csr +
+					HOSTADDR(RGF_INT_GEN_RX_ICR) +
+					offsetof(struct RGF_ICR, ICR));
+			imv_rx = wil_r(wil, RGF_INT_GEN_RX_ICR +
 				   offsetof(struct RGF_ICR, IMV));
-		u32 icm_tx = wil_ioread32_and_clear(wil->csr +
-				HOSTADDR(RGF_DMA_EP_TX_ICR) +
-				offsetof(struct RGF_ICR, ICM));
-		u32 icr_tx = wil_ioread32_and_clear(wil->csr +
-				HOSTADDR(RGF_DMA_EP_TX_ICR) +
-				offsetof(struct RGF_ICR, ICR));
-		u32 imv_tx = wil_r(wil, RGF_DMA_EP_TX_ICR +
+			icm_tx = wil_ioread32_and_clear(wil->csr +
+					HOSTADDR(RGF_INT_GEN_TX_ICR) +
+					offsetof(struct RGF_ICR, ICM));
+			icr_tx = wil_ioread32_and_clear(wil->csr +
+					HOSTADDR(RGF_INT_GEN_TX_ICR) +
+					offsetof(struct RGF_ICR, ICR));
+			imv_tx = wil_r(wil, RGF_INT_GEN_TX_ICR +
+					   offsetof(struct RGF_ICR, IMV));
+		} else {
+			icm_rx = wil_ioread32_and_clear(wil->csr +
+					HOSTADDR(RGF_DMA_EP_RX_ICR) +
+					offsetof(struct RGF_ICR, ICM));
+			icr_rx = wil_ioread32_and_clear(wil->csr +
+					HOSTADDR(RGF_DMA_EP_RX_ICR) +
+					offsetof(struct RGF_ICR, ICR));
+			imv_rx = wil_r(wil, RGF_DMA_EP_RX_ICR +
 				   offsetof(struct RGF_ICR, IMV));
-		u32 icm_misc = wil_ioread32_and_clear(wil->csr +
+			icm_tx = wil_ioread32_and_clear(wil->csr +
+					HOSTADDR(RGF_DMA_EP_TX_ICR) +
+					offsetof(struct RGF_ICR, ICM));
+			icr_tx = wil_ioread32_and_clear(wil->csr +
+					HOSTADDR(RGF_DMA_EP_TX_ICR) +
+					offsetof(struct RGF_ICR, ICR));
+			imv_tx = wil_r(wil, RGF_DMA_EP_TX_ICR +
+					   offsetof(struct RGF_ICR, IMV));
+		}
+		icm_misc = wil_ioread32_and_clear(wil->csr +
 				HOSTADDR(RGF_DMA_EP_MISC_ICR) +
 				offsetof(struct RGF_ICR, ICM));
-		u32 icr_misc = wil_ioread32_and_clear(wil->csr +
+		icr_misc = wil_ioread32_and_clear(wil->csr +
 				HOSTADDR(RGF_DMA_EP_MISC_ICR) +
 				offsetof(struct RGF_ICR, ICR));
-		u32 imv_misc = wil_r(wil, RGF_DMA_EP_MISC_ICR +
+		imv_misc = wil_r(wil, RGF_DMA_EP_MISC_ICR +
 				     offsetof(struct RGF_ICR, IMV));
 
 		/* HALP interrupt can be unmasked when misc interrupts are
@@ -592,11 +764,11 @@ static irqreturn_t wil6210_hardirq(int irq, void *cookie)
 	 * voting for wake thread - need at least 1 vote
 	 */
 	if ((pseudo_cause & BIT_DMA_PSEUDO_CAUSE_RX) &&
-	    (wil6210_irq_rx(irq, cookie) == IRQ_WAKE_THREAD))
+	    (wil->txrx_ops.irq_rx(irq, cookie) == IRQ_WAKE_THREAD))
 		rc = IRQ_WAKE_THREAD;
 
 	if ((pseudo_cause & BIT_DMA_PSEUDO_CAUSE_TX) &&
-	    (wil6210_irq_tx(irq, cookie) == IRQ_WAKE_THREAD))
+	    (wil->txrx_ops.irq_tx(irq, cookie) == IRQ_WAKE_THREAD))
 		rc = IRQ_WAKE_THREAD;
 
 	if ((pseudo_cause & BIT_DMA_PSEUDO_CAUSE_MISC) &&
@@ -624,6 +796,10 @@ void wil6210_clear_irq(struct wil6210_priv *wil)
 		    offsetof(struct RGF_ICR, ICR));
 	wil_clear32(wil->csr + HOSTADDR(RGF_DMA_EP_TX_ICR) +
 		    offsetof(struct RGF_ICR, ICR));
+	wil_clear32(wil->csr + HOSTADDR(RGF_INT_GEN_RX_ICR) +
+		    offsetof(struct RGF_ICR, ICR));
+	wil_clear32(wil->csr + HOSTADDR(RGF_INT_GEN_TX_ICR) +
+		    offsetof(struct RGF_ICR, ICR));
 	wil_clear32(wil->csr + HOSTADDR(RGF_DMA_EP_MISC_ICR) +
 		    offsetof(struct RGF_ICR, ICR));
 	wmb(); /* make sure write completed */
@@ -652,6 +828,13 @@ int wil6210_init_irq(struct wil6210_priv *wil, int irq, bool use_msi)
 
 	wil_dbg_misc(wil, "init_irq: %s\n", use_msi ? "MSI" : "INTx");
 
+	if (wil->use_enhanced_dma_hw) {
+		wil->txrx_ops.irq_tx = wil6210_irq_tx_edma;
+		wil->txrx_ops.irq_rx = wil6210_irq_rx_edma;
+	} else {
+		wil->txrx_ops.irq_tx = wil6210_irq_tx;
+		wil->txrx_ops.irq_rx = wil6210_irq_rx;
+	}
 	rc = request_threaded_irq(irq, wil6210_hardirq,
 				  wil6210_thread_irq,
 				  use_msi ? 0 : IRQF_SHARED,
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index e7006c2428a0aa54b4cea133d91ffadb23987440..4de19bd40a589d8389e330aa1f09c15b861f7a8a 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -21,11 +21,13 @@
 
 #include "wil6210.h"
 #include "txrx.h"
+#include "txrx_edma.h"
 #include "wmi.h"
 #include "boot_loader.h"
 
 #define WAIT_FOR_HALP_VOTE_MS 100
 #define WAIT_FOR_SCAN_ABORT_MS 1000
+#define WIL_DEFAULT_NUM_RX_STATUS_RINGS 1
 
 bool debug_fw; /* = false; */
 module_param(debug_fw, bool, 0444);
@@ -110,9 +112,29 @@ MODULE_PARM_DESC(tx_ring_order, " Tx ring order; size = 1 << order");
 module_param_cb(bcast_ring_order, &ring_order_ops, &bcast_ring_order, 0444);
 MODULE_PARM_DESC(bcast_ring_order, " Bcast ring order; size = 1 << order");
 
-#define RST_DELAY (20) /* msec, for loop in @wil_target_reset */
+enum {
+	WIL_BOOT_ERR,
+	WIL_BOOT_VANILLA,
+	WIL_BOOT_PRODUCTION,
+	WIL_BOOT_DEVELOPMENT,
+};
+
+enum {
+	WIL_SIG_STATUS_VANILLA = 0x0,
+	WIL_SIG_STATUS_DEVELOPMENT = 0x1,
+	WIL_SIG_STATUS_PRODUCTION = 0x2,
+	WIL_SIG_STATUS_CORRUPTED_PRODUCTION = 0x3,
+};
+
+#define RST_DELAY (20) /* msec, for loop in @wil_wait_device_ready */
 #define RST_COUNT (1 + 1000/RST_DELAY) /* round up to be above 1 sec total */
 
+#define PMU_READY_DELAY_MS (4) /* ms, for sleep in @wil_wait_device_ready */
+
+#define OTP_HW_DELAY (200) /* usec, loop in @wil_wait_device_ready_talyn_mb */
+/* round up to be above 2 ms total */
+#define OTP_HW_COUNT (1 + 2000 / OTP_HW_DELAY)
+
 /*
  * Due to a hardware issue,
  * one has to read/write to/from NIC in 32-bit chunks;
@@ -160,6 +182,37 @@ void wil_memcpy_toio_32(volatile void __iomem *dst, const void *src,
 	}
 }
 
+static void wil_ring_fini_tx(struct wil6210_priv *wil, int id)
+{
+	struct wil_ring *ring = &wil->ring_tx[id];
+	struct wil_ring_tx_data *txdata = &wil->ring_tx_data[id];
+
+	lockdep_assert_held(&wil->mutex);
+
+	if (!ring->va)
+		return;
+
+	wil_dbg_misc(wil, "vring_fini_tx: id=%d\n", id);
+
+	spin_lock_bh(&txdata->lock);
+	txdata->dot1x_open = false;
+	txdata->mid = U8_MAX;
+	txdata->enabled = 0; /* no Tx can be in progress or start anew */
+	spin_unlock_bh(&txdata->lock);
+	/* napi_synchronize waits for completion of the current NAPI but will
+	 * not prevent the next NAPI run.
+	 * Add a memory barrier to guarantee that txdata->enabled is zeroed
+	 * before napi_synchronize so that the next scheduled NAPI will not
+	 * handle this vring
+	 */
+	wmb();
+	/* make sure NAPI won't touch this vring */
+	if (test_bit(wil_status_napi_en, wil->status))
+		napi_synchronize(&wil->napi_tx);
+
+	wil->txrx_ops.ring_fini_tx(wil, ring);
+}
+
 static void wil_disconnect_cid(struct wil6210_vif *vif, int cid,
 			       u16 reason_code, bool from_event)
 __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
@@ -219,9 +272,9 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
 	memset(sta->tid_crypto_rx, 0, sizeof(sta->tid_crypto_rx));
 	memset(&sta->group_crypto_rx, 0, sizeof(sta->group_crypto_rx));
 	/* release vrings */
-	for (i = 0; i < ARRAY_SIZE(wil->vring_tx); i++) {
-		if (wil->vring2cid_tid[i][0] == cid)
-			wil_vring_fini_tx(wil, i);
+	for (i = 0; i < ARRAY_SIZE(wil->ring_tx); i++) {
+		if (wil->ring2cid_tid[i][0] == cid)
+			wil_ring_fini_tx(wil, i);
 	}
 	/* statistics */
 	memset(&sta->stats, 0, sizeof(sta->stats));
@@ -453,18 +506,19 @@ static void wil_fw_error_worker(struct work_struct *work)
 	mutex_unlock(&wil->mutex);
 }
 
-static int wil_find_free_vring(struct wil6210_priv *wil)
+static int wil_find_free_ring(struct wil6210_priv *wil)
 {
 	int i;
+	int min_ring_id = wil_get_min_tx_ring_id(wil);
 
-	for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
-		if (!wil->vring_tx[i].va)
+	for (i = min_ring_id; i < WIL6210_MAX_TX_RINGS; i++) {
+		if (!wil->ring_tx[i].va)
 			return i;
 	}
 	return -EINVAL;
 }
 
-int wil_tx_init(struct wil6210_vif *vif, int cid)
+int wil_ring_init_tx(struct wil6210_vif *vif, int cid)
 {
 	struct wil6210_priv *wil = vif_to_wil(vif);
 	int rc = -EINVAL, ringid;
@@ -473,16 +527,17 @@ int wil_tx_init(struct wil6210_vif *vif, int cid)
 		wil_err(wil, "No connection pending\n");
 		goto out;
 	}
-	ringid = wil_find_free_vring(wil);
+	ringid = wil_find_free_ring(wil);
 	if (ringid < 0) {
 		wil_err(wil, "No free vring found\n");
 		goto out;
 	}
 
-	wil_dbg_wmi(wil, "Configure for connection CID %d MID %d vring %d\n",
+	wil_dbg_wmi(wil, "Configure for connection CID %d MID %d ring %d\n",
 		    cid, vif->mid, ringid);
 
-	rc = wil_vring_init_tx(vif, ringid, 1 << tx_ring_order, cid, 0);
+	rc = wil->txrx_ops.ring_init_tx(vif, ringid, 1 << tx_ring_order,
+					cid, 0);
 	if (rc)
 		wil_err(wil, "init TX for CID %d MID %d vring %d failed\n",
 			cid, vif->mid, ringid);
@@ -494,19 +549,19 @@ int wil_tx_init(struct wil6210_vif *vif, int cid)
 int wil_bcast_init(struct wil6210_vif *vif)
 {
 	struct wil6210_priv *wil = vif_to_wil(vif);
-	int ri = vif->bcast_vring, rc;
+	int ri = vif->bcast_ring, rc;
 
-	if ((ri >= 0) && wil->vring_tx[ri].va)
+	if (ri >= 0 && wil->ring_tx[ri].va)
 		return 0;
 
-	ri = wil_find_free_vring(wil);
+	ri = wil_find_free_ring(wil);
 	if (ri < 0)
 		return ri;
 
-	vif->bcast_vring = ri;
-	rc = wil_vring_init_bcast(vif, ri, 1 << bcast_ring_order);
+	vif->bcast_ring = ri;
+	rc = wil->txrx_ops.ring_init_bcast(vif, ri, 1 << bcast_ring_order);
 	if (rc)
-		vif->bcast_vring = -1;
+		vif->bcast_ring = -1;
 
 	return rc;
 }
@@ -514,13 +569,13 @@ int wil_bcast_init(struct wil6210_vif *vif)
 void wil_bcast_fini(struct wil6210_vif *vif)
 {
 	struct wil6210_priv *wil = vif_to_wil(vif);
-	int ri = vif->bcast_vring;
+	int ri = vif->bcast_ring;
 
 	if (ri < 0)
 		return;
 
-	vif->bcast_vring = -1;
-	wil_vring_fini_tx(wil, ri);
+	vif->bcast_ring = -1;
+	wil_ring_fini_tx(wil, ri);
 }
 
 void wil_bcast_fini_all(struct wil6210_priv *wil)
@@ -548,7 +603,7 @@ int wil_priv_init(struct wil6210_priv *wil)
 	}
 
 	for (i = 0; i < WIL6210_MAX_TX_RINGS; i++)
-		spin_lock_init(&wil->vring_tx_data[i].lock);
+		spin_lock_init(&wil->ring_tx_data[i].lock);
 
 	mutex_init(&wil->mutex);
 	mutex_init(&wil->vif_mutex);
@@ -589,11 +644,30 @@ int wil_priv_init(struct wil6210_priv *wil)
 	wil->wakeup_trigger = WMI_WAKEUP_TRIGGER_UCAST |
 			      WMI_WAKEUP_TRIGGER_BCAST;
 	memset(&wil->suspend_stats, 0, sizeof(wil->suspend_stats));
-	wil->vring_idle_trsh = 16;
+	wil->ring_idle_trsh = 16;
 
 	wil->reply_mid = U8_MAX;
 	wil->max_vifs = 1;
 
+	/* edma configuration can be updated via debugfs before allocation */
+	wil->num_rx_status_rings = WIL_DEFAULT_NUM_RX_STATUS_RINGS;
+	wil->use_compressed_rx_status = true;
+	wil->use_rx_hw_reordering = true;
+	wil->tx_status_ring_order = WIL_TX_SRING_SIZE_ORDER_DEFAULT;
+
+	/* Rx status ring size should be bigger than the number of RX buffers
+	 * in order to prevent backpressure on the status ring, which may
+	 * cause HW freeze.
+	 */
+	wil->rx_status_ring_order = WIL_RX_SRING_SIZE_ORDER_DEFAULT;
+	/* Number of RX buffer IDs should be bigger than the RX descriptor
+	 * ring size as in HW reorder flow, the HW can consume additional
+	 * buffers before releasing the previous ones.
+	 */
+	wil->rx_buff_id_count = WIL_RX_BUFF_ARR_SIZE_DEFAULT;
+
+	wil->amsdu_en = 1;
+
 	return 0;
 
 out_wmi_wq:
@@ -736,14 +810,24 @@ static void wil_bl_prepare_halt(struct wil6210_priv *wil)
 
 static inline void wil_halt_cpu(struct wil6210_priv *wil)
 {
-	wil_w(wil, RGF_USER_USER_CPU_0, BIT_USER_USER_CPU_MAN_RST);
-	wil_w(wil, RGF_USER_MAC_CPU_0,  BIT_USER_MAC_CPU_MAN_RST);
+	if (wil->hw_version >= HW_VER_TALYN_MB) {
+		wil_w(wil, RGF_USER_USER_CPU_0_TALYN_MB,
+		      BIT_USER_USER_CPU_MAN_RST);
+		wil_w(wil, RGF_USER_MAC_CPU_0_TALYN_MB,
+		      BIT_USER_MAC_CPU_MAN_RST);
+	} else {
+		wil_w(wil, RGF_USER_USER_CPU_0, BIT_USER_USER_CPU_MAN_RST);
+		wil_w(wil, RGF_USER_MAC_CPU_0,  BIT_USER_MAC_CPU_MAN_RST);
+	}
 }
 
 static inline void wil_release_cpu(struct wil6210_priv *wil)
 {
 	/* Start CPU */
-	wil_w(wil, RGF_USER_USER_CPU_0, 1);
+	if (wil->hw_version >= HW_VER_TALYN_MB)
+		wil_w(wil, RGF_USER_USER_CPU_0_TALYN_MB, 1);
+	else
+		wil_w(wil, RGF_USER_USER_CPU_0, 1);
 }
 
 static void wil_set_oob_mode(struct wil6210_priv *wil, u8 mode)
@@ -767,11 +851,146 @@ static void wil_set_oob_mode(struct wil6210_priv *wil, u8 mode)
 	}
 }
 
-static int wil_target_reset(struct wil6210_priv *wil, int no_flash)
+static int wil_wait_device_ready(struct wil6210_priv *wil, int no_flash)
 {
 	int delay = 0;
 	u32 x, x1 = 0;
 
+	/* wait until device ready. */
+	if (no_flash) {
+		msleep(PMU_READY_DELAY_MS);
+
+		wil_dbg_misc(wil, "Reset completed\n");
+	} else {
+		do {
+			msleep(RST_DELAY);
+			x = wil_r(wil, RGF_USER_BL +
+				  offsetof(struct bl_dedicated_registers_v0,
+					   boot_loader_ready));
+			if (x1 != x) {
+				wil_dbg_misc(wil, "BL.ready 0x%08x => 0x%08x\n",
+					     x1, x);
+				x1 = x;
+			}
+			if (delay++ > RST_COUNT) {
+				wil_err(wil, "Reset not completed, bl.ready 0x%08x\n",
+					x);
+				return -ETIME;
+			}
+		} while (x != BL_READY);
+
+		wil_dbg_misc(wil, "Reset completed in %d ms\n",
+			     delay * RST_DELAY);
+	}
+
+	return 0;
+}
+
+static int wil_wait_device_ready_talyn_mb(struct wil6210_priv *wil)
+{
+	u32 otp_hw;
+	u8 signature_status;
+	bool otp_signature_err;
+	bool hw_section_done;
+	u32 otp_qc_secured;
+	int delay = 0;
+
+	/* Wait for OTP signature test to complete */
+	usleep_range(2000, 2200);
+
+	wil->boot_config = WIL_BOOT_ERR;
+
+	/* Poll until OTP signature status is valid.
+	 * In vanilla and development modes, when signature test is complete
+	 * HW sets BIT_OTP_SIGNATURE_ERR_TALYN_MB.
+	 * In production mode BIT_OTP_SIGNATURE_ERR_TALYN_MB remains 0, poll
+	 * for signature status change to 2 or 3.
+	 */
+	do {
+		otp_hw = wil_r(wil, RGF_USER_OTP_HW_RD_MACHINE_1);
+		signature_status = WIL_GET_BITS(otp_hw, 8, 9);
+		otp_signature_err = otp_hw & BIT_OTP_SIGNATURE_ERR_TALYN_MB;
+
+		if (otp_signature_err &&
+		    signature_status == WIL_SIG_STATUS_VANILLA) {
+			wil->boot_config = WIL_BOOT_VANILLA;
+			break;
+		}
+		if (otp_signature_err &&
+		    signature_status == WIL_SIG_STATUS_DEVELOPMENT) {
+			wil->boot_config = WIL_BOOT_DEVELOPMENT;
+			break;
+		}
+		if (!otp_signature_err &&
+		    signature_status == WIL_SIG_STATUS_PRODUCTION) {
+			wil->boot_config = WIL_BOOT_PRODUCTION;
+			break;
+		}
+		if  (!otp_signature_err &&
+		     signature_status ==
+		     WIL_SIG_STATUS_CORRUPTED_PRODUCTION) {
+			/* Unrecognized OTP signature found. Possibly a
+			 * corrupted production signature, access control
+			 * is applied as in production mode, therefore
+			 * do not fail
+			 */
+			wil->boot_config = WIL_BOOT_PRODUCTION;
+			break;
+		}
+		if (delay++ > OTP_HW_COUNT)
+			break;
+
+		usleep_range(OTP_HW_DELAY, OTP_HW_DELAY + 10);
+	} while (!otp_signature_err && signature_status == 0);
+
+	if (wil->boot_config == WIL_BOOT_ERR) {
+		wil_err(wil,
+			"invalid boot config, signature_status %d otp_signature_err %d\n",
+			signature_status, otp_signature_err);
+		return -ETIME;
+	}
+
+	wil_dbg_misc(wil,
+		     "signature test done in %d usec, otp_hw 0x%x, boot_config %d\n",
+		     delay * OTP_HW_DELAY, otp_hw, wil->boot_config);
+
+	if (wil->boot_config == WIL_BOOT_VANILLA)
+		/* Assuming not SPI boot (currently not supported) */
+		goto out;
+
+	hw_section_done = otp_hw & BIT_OTP_HW_SECTION_DONE_TALYN_MB;
+	delay = 0;
+
+	while (!hw_section_done) {
+		msleep(RST_DELAY);
+
+		otp_hw = wil_r(wil, RGF_USER_OTP_HW_RD_MACHINE_1);
+		hw_section_done = otp_hw & BIT_OTP_HW_SECTION_DONE_TALYN_MB;
+
+		if (delay++ > RST_COUNT) {
+			wil_err(wil, "TO waiting for hw_section_done\n");
+			return -ETIME;
+		}
+	}
+
+	wil_dbg_misc(wil, "HW section done in %d ms\n", delay * RST_DELAY);
+
+	otp_qc_secured = wil_r(wil, RGF_OTP_QC_SECURED);
+	wil->secured_boot = otp_qc_secured & BIT_BOOT_FROM_ROM ? 1 : 0;
+	wil_dbg_misc(wil, "secured boot is %sabled\n",
+		     wil->secured_boot ? "en" : "dis");
+
+out:
+	wil_dbg_misc(wil, "Reset completed\n");
+
+	return 0;
+}
+
+static int wil_target_reset(struct wil6210_priv *wil, int no_flash)
+{
+	u32 x;
+	int rc;
+
 	wil_dbg_misc(wil, "Resetting \"%s\"...\n", wil->hw_name);
 
 	/* Clear MAC link up */
@@ -811,10 +1030,17 @@ static int wil_target_reset(struct wil6210_priv *wil, int no_flash)
 	wil_w(wil, RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x3ff81f);
 	wil_w(wil, RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0xf);
 
-	wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0xFE000000);
-	wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0x0000003F);
-	wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x000000f0);
-	wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0xFFE7FE00);
+	if (wil->hw_version >= HW_VER_TALYN_MB) {
+		wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0x7e000000);
+		wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0x0000003f);
+		wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0xc00000f0);
+		wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0xffe7fe00);
+	} else {
+		wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0xfe000000);
+		wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0x0000003f);
+		wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x000000f0);
+		wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0xffe7fe00);
+	}
 
 	wil_w(wil, RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x0);
 	wil_w(wil, RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0x0);
@@ -830,34 +1056,12 @@ static int wil_target_reset(struct wil6210_priv *wil, int no_flash)
 
 	wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0);
 
-	/* wait until device ready. typical time is 20..80 msec */
-	if (no_flash)
-		do {
-			msleep(RST_DELAY);
-			x = wil_r(wil, USER_EXT_USER_PMU_3);
-			if (delay++ > RST_COUNT) {
-				wil_err(wil, "Reset not completed, PMU_3 0x%08x\n",
-					x);
-				return -ETIME;
-			}
-		} while ((x & BIT_PMU_DEVICE_RDY) == 0);
+	if (wil->hw_version == HW_VER_TALYN_MB)
+		rc = wil_wait_device_ready_talyn_mb(wil);
 	else
-		do {
-			msleep(RST_DELAY);
-			x = wil_r(wil, RGF_USER_BL +
-				  offsetof(struct bl_dedicated_registers_v0,
-					   boot_loader_ready));
-			if (x1 != x) {
-				wil_dbg_misc(wil, "BL.ready 0x%08x => 0x%08x\n",
-					     x1, x);
-				x1 = x;
-			}
-			if (delay++ > RST_COUNT) {
-				wil_err(wil, "Reset not completed, bl.ready 0x%08x\n",
-					x);
-				return -ETIME;
-			}
-		} while (x != BL_READY);
+		rc = wil_wait_device_ready(wil, no_flash);
+	if (rc)
+		return rc;
 
 	wil_c(wil, RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD);
 
@@ -865,7 +1069,7 @@ static int wil_target_reset(struct wil6210_priv *wil, int no_flash)
 	wil_s(wil, RGF_DMA_OFUL_NID_0, BIT_DMA_OFUL_NID_0_RX_EXT_TR_EN |
 	      BIT_DMA_OFUL_NID_0_RX_EXT_A3_SRC);
 
-	if (no_flash) {
+	if (wil->hw_version < HW_VER_TALYN_MB && no_flash) {
 		/* Reset OTP HW vectors to fit 40MHz */
 		wil_w(wil, RGF_USER_XPM_IFC_RD_TIME1, 0x60001);
 		wil_w(wil, RGF_USER_XPM_IFC_RD_TIME2, 0x20027);
@@ -880,7 +1084,6 @@ static int wil_target_reset(struct wil6210_priv *wil, int no_flash)
 		wil_w(wil, RGF_USER_XPM_RD_DOUT_SAMPLE_TIME, 0x57);
 	}
 
-	wil_dbg_misc(wil, "Reset completed in %d ms\n", delay * RST_DELAY);
 	return 0;
 }
 
@@ -1042,8 +1245,14 @@ static int wil_get_otp_info(struct wil6210_priv *wil)
 	struct net_device *ndev = wil->main_ndev;
 	struct wiphy *wiphy = wil_to_wiphy(wil);
 	u8 mac[8];
+	int mac_addr;
+
+	if (wil->hw_version >= HW_VER_TALYN_MB)
+		mac_addr = RGF_OTP_MAC_TALYN_MB;
+	else
+		mac_addr = RGF_OTP_MAC;
 
-	wil_memcpy_fromio_32(mac, wil->csr + HOSTADDR(RGF_OTP_MAC),
+	wil_memcpy_fromio_32(mac, wil->csr + HOSTADDR(mac_addr),
 			     sizeof(mac));
 	if (!is_valid_ether_addr(mac)) {
 		wil_err(wil, "Invalid MAC %pM\n", mac);
@@ -1147,8 +1356,13 @@ static void wil_pre_fw_config(struct wil6210_priv *wil)
 	/* it is W1C, clear by writing back same value */
 	wil_s(wil, RGF_CAF_ICR + offsetof(struct RGF_ICR, ICR), 0);
 	wil_w(wil, RGF_CAF_ICR + offsetof(struct RGF_ICR, IMV), ~0);
-	/* clear PAL_UNIT_ICR (potential D0->D3 leftover) */
-	wil_s(wil, RGF_PAL_UNIT_ICR + offsetof(struct RGF_ICR, ICR), 0);
+	/* clear PAL_UNIT_ICR (potential D0->D3 leftover)
+	 * In Talyn-MB host cannot access this register due to
+	 * access control, hence PAL_UNIT_ICR is cleared by the FW
+	 */
+	if (wil->hw_version < HW_VER_TALYN_MB)
+		wil_s(wil, RGF_PAL_UNIT_ICR + offsetof(struct RGF_ICR, ICR),
+		      0);
 
 	if (wil->fw_calib_result > 0) {
 		__le32 val = cpu_to_le32(wil->fw_calib_result |
@@ -1284,7 +1498,8 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
 	rc = wil_target_reset(wil, no_flash);
 	wil6210_clear_irq(wil);
 	wil_enable_irq(wil);
-	wil_rx_fini(wil);
+	wil->txrx_ops.rx_fini(wil);
+	wil->txrx_ops.tx_fini(wil);
 	if (rc) {
 		if (!no_flash)
 			wil_bl_crash_info(wil, true);
@@ -1337,7 +1552,6 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
 	clear_bit(wil_status_resetting, wil->status);
 
 	if (load_fw) {
-		wil_configure_interrupt_moderation(wil);
 		wil_unmask_irq(wil);
 
 		/* we just started MAC, wait for FW ready */
@@ -1352,6 +1566,8 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
 			return rc;
 		}
 
+		wil->txrx_ops.configure_interrupt_moderation(wil);
+
 		rc = wil_restore_vifs(wil);
 		if (rc) {
 			wil_err(wil, "failed to restore vifs, rc %d\n", rc);
@@ -1406,8 +1622,12 @@ int __wil_up(struct wil6210_priv *wil)
 	if (rc)
 		return rc;
 
-	/* Rx VRING. After MAC and beacon */
-	rc = wil_rx_init(wil, 1 << rx_ring_order);
+	/* Rx RING. After MAC and beacon */
+	rc = wil->txrx_ops.rx_init(wil, 1 << rx_ring_order);
+	if (rc)
+		return rc;
+
+	rc = wil->txrx_ops.tx_init(wil);
 	if (rc)
 		return rc;
 
@@ -1568,3 +1788,11 @@ void wil_halp_unvote(struct wil6210_priv *wil)
 
 	mutex_unlock(&wil->halp.lock);
 }
+
+void wil_init_txrx_ops(struct wil6210_priv *wil)
+{
+	if (wil->use_enhanced_dma_hw)
+		wil_init_txrx_ops_edma(wil);
+	else
+		wil_init_txrx_ops_legacy_dma(wil);
+}
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
index eb6c14ed65a417127a7a72b9347f91cb9f19ebbd..7a78a06bd3560bd73f65842d464b48feec7cbbac 100644
--- a/drivers/net/wireless/ath/wil6210/netdev.c
+++ b/drivers/net/wireless/ath/wil6210/netdev.c
@@ -120,6 +120,27 @@ static int wil6210_netdev_poll_rx(struct napi_struct *napi, int budget)
 	return done;
 }
 
+static int wil6210_netdev_poll_rx_edma(struct napi_struct *napi, int budget)
+{
+	struct wil6210_priv *wil = container_of(napi, struct wil6210_priv,
+						napi_rx);
+	int quota = budget;
+	int done;
+
+	wil_rx_handle_edma(wil, &quota);
+	done = budget - quota;
+
+	if (done < budget) {
+		napi_complete_done(napi, done);
+		wil6210_unmask_irq_rx_edma(wil);
+		wil_dbg_txrx(wil, "NAPI RX complete\n");
+	}
+
+	wil_dbg_txrx(wil, "NAPI RX poll(%d) done %d\n", budget, done);
+
+	return done;
+}
+
 static int wil6210_netdev_poll_tx(struct napi_struct *napi, int budget)
 {
 	struct wil6210_priv *wil = container_of(napi, struct wil6210_priv,
@@ -129,11 +150,11 @@ static int wil6210_netdev_poll_tx(struct napi_struct *napi, int budget)
 
 	/* always process ALL Tx complete, regardless budget - it is fast */
 	for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
-		struct vring *vring = &wil->vring_tx[i];
-		struct vring_tx_data *txdata = &wil->vring_tx_data[i];
+		struct wil_ring *ring = &wil->ring_tx[i];
+		struct wil_ring_tx_data *txdata = &wil->ring_tx_data[i];
 		struct wil6210_vif *vif;
 
-		if (!vring->va || !txdata->enabled ||
+		if (!ring->va || !txdata->enabled ||
 		    txdata->mid >= wil->max_vifs)
 			continue;
 
@@ -157,6 +178,30 @@ static int wil6210_netdev_poll_tx(struct napi_struct *napi, int budget)
 	return min(tx_done, budget);
 }
 
+static int wil6210_netdev_poll_tx_edma(struct napi_struct *napi, int budget)
+{
+	struct wil6210_priv *wil = container_of(napi, struct wil6210_priv,
+						napi_tx);
+	int tx_done;
+	/* There is only one status TX ring */
+	struct wil_status_ring *sring = &wil->srings[wil->tx_sring_idx];
+
+	if (!sring->va)
+		return 0;
+
+	tx_done = wil_tx_sring_handler(wil, sring);
+
+	if (tx_done < budget) {
+		napi_complete(napi);
+		wil6210_unmask_irq_tx_edma(wil);
+		wil_dbg_txrx(wil, "NAPI TX complete\n");
+	}
+
+	wil_dbg_txrx(wil, "NAPI TX poll(%d) done %d\n", budget, tx_done);
+
+	return min(tx_done, budget);
+}
+
 static void wil_dev_setup(struct net_device *dev)
 {
 	ether_setup(dev);
@@ -228,7 +273,7 @@ static void wil_p2p_discovery_timer_fn(struct timer_list *t)
 
 static void wil_vif_init(struct wil6210_vif *vif)
 {
-	vif->bcast_vring = -1;
+	vif->bcast_ring = -1;
 
 	mutex_init(&vif->probe_client_mutex);
 
@@ -418,11 +463,21 @@ int wil_if_add(struct wil6210_priv *wil)
 	}
 
 	init_dummy_netdev(&wil->napi_ndev);
-	netif_napi_add(&wil->napi_ndev, &wil->napi_rx, wil6210_netdev_poll_rx,
-		       WIL6210_NAPI_BUDGET);
-	netif_tx_napi_add(&wil->napi_ndev,
-			  &wil->napi_tx, wil6210_netdev_poll_tx,
-			  WIL6210_NAPI_BUDGET);
+	if (wil->use_enhanced_dma_hw) {
+		netif_napi_add(&wil->napi_ndev, &wil->napi_rx,
+			       wil6210_netdev_poll_rx_edma,
+			       WIL6210_NAPI_BUDGET);
+		netif_tx_napi_add(&wil->napi_ndev,
+				  &wil->napi_tx, wil6210_netdev_poll_tx_edma,
+				  WIL6210_NAPI_BUDGET);
+	} else {
+		netif_napi_add(&wil->napi_ndev, &wil->napi_rx,
+			       wil6210_netdev_poll_rx,
+			       WIL6210_NAPI_BUDGET);
+		netif_tx_napi_add(&wil->napi_ndev,
+				  &wil->napi_tx, wil6210_netdev_poll_tx,
+				  WIL6210_NAPI_BUDGET);
+	}
 
 	wil_update_net_queues_bh(wil, vif, NULL, true);
 
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
index 19cbc6add6377286dbb094e90b7644c8e15e59c4..8b148cb91372f4fac59aa3cac56469f809c1145e 100644
--- a/drivers/net/wireless/ath/wil6210/pcie_bus.c
+++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
@@ -85,7 +85,7 @@ int wil_set_capabilities(struct wil6210_priv *wil)
 		wil->rgf_ucode_assert_code_addr = SPARROW_RGF_UCODE_ASSERT_CODE;
 		break;
 	case JTAG_DEV_ID_TALYN:
-		wil->hw_name = "Talyn";
+		wil->hw_name = "Talyn-MA";
 		wil->hw_version = HW_VER_TALYN;
 		memcpy(fw_mapping, talyn_fw_mapping, sizeof(talyn_fw_mapping));
 		wil->rgf_fw_assert_code_addr = TALYN_RGF_FW_ASSERT_CODE;
@@ -94,6 +94,17 @@ int wil_set_capabilities(struct wil6210_priv *wil)
 		    BIT_NO_FLASH_INDICATION)
 			set_bit(hw_capa_no_flash, wil->hw_capa);
 		break;
+	case JTAG_DEV_ID_TALYN_MB:
+		wil->hw_name = "Talyn-MB";
+		wil->hw_version = HW_VER_TALYN_MB;
+		memcpy(fw_mapping, talyn_mb_fw_mapping,
+		       sizeof(talyn_mb_fw_mapping));
+		wil->rgf_fw_assert_code_addr = TALYN_RGF_FW_ASSERT_CODE;
+		wil->rgf_ucode_assert_code_addr = TALYN_RGF_UCODE_ASSERT_CODE;
+		set_bit(hw_capa_no_flash, wil->hw_capa);
+		wil->use_enhanced_dma_hw = true;
+		wil->use_rx_hw_reordering = true;
+		break;
 	default:
 		wil_err(wil, "Unknown board hardware, chip_id 0x%08x, chip_revision 0x%08x\n",
 			jtag_id, chip_revision);
@@ -102,6 +113,8 @@ int wil_set_capabilities(struct wil6210_priv *wil)
 		return -EINVAL;
 	}
 
+	wil_init_txrx_ops(wil);
+
 	iccm_section = wil_find_fw_mapping("fw_code");
 	if (!iccm_section) {
 		wil_err(wil, "fw_code section not found in fw_mapping\n");
@@ -257,8 +270,8 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 		.fw_recovery = wil_platform_rop_fw_recovery,
 	};
 	u32 bar_size = pci_resource_len(pdev, 0);
-	int dma_addr_size[] = {48, 40, 32}; /* keep descending order */
-	int i;
+	int dma_addr_size[] = {64, 48, 40, 32}; /* keep descending order */
+	int i, start_idx;
 
 	/* check HW */
 	dev_info(&pdev->dev, WIL_NAME
@@ -293,24 +306,6 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 		goto if_free;
 	}
 	/* rollback to err_plat */
-
-	/* device supports >32bit addresses */
-	for (i = 0; i < ARRAY_SIZE(dma_addr_size); i++) {
-		rc = dma_set_mask_and_coherent(dev,
-					       DMA_BIT_MASK(dma_addr_size[i]));
-		if (rc) {
-			dev_err(dev, "dma_set_mask_and_coherent(%d) failed: %d\n",
-				dma_addr_size[i], rc);
-			continue;
-		}
-		dev_info(dev, "using dma mask %d", dma_addr_size[i]);
-		wil->dma_addr_size = dma_addr_size[i];
-		break;
-	}
-
-	if (wil->dma_addr_size == 0)
-		goto err_plat;
-
 	rc = pci_enable_device(pdev);
 	if (rc && pdev->msi_enabled == 0) {
 		wil_err(wil,
@@ -350,6 +345,28 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 		wil_err(wil, "wil_set_capabilities failed, rc %d\n", rc);
 		goto err_iounmap;
 	}
+
+	/* device supports >32bit addresses.
+	 * for legacy DMA start from 48 bit.
+	 */
+	start_idx = wil->use_enhanced_dma_hw ? 0 : 1;
+
+	for (i = start_idx; i < ARRAY_SIZE(dma_addr_size); i++) {
+		rc = dma_set_mask_and_coherent(dev,
+					       DMA_BIT_MASK(dma_addr_size[i]));
+		if (rc) {
+			dev_err(dev, "dma_set_mask_and_coherent(%d) failed: %d\n",
+				dma_addr_size[i], rc);
+			continue;
+		}
+		dev_info(dev, "using dma mask %d", dma_addr_size[i]);
+		wil->dma_addr_size = dma_addr_size[i];
+		break;
+	}
+
+	if (wil->dma_addr_size == 0)
+		goto err_iounmap;
+
 	wil6210_clear_irq(wil);
 
 	/* FW should raise IRQ when ready */
diff --git a/drivers/net/wireless/ath/wil6210/pm.c b/drivers/net/wireless/ath/wil6210/pm.c
index ba81fb3ac96f3bc5a6ca90f2ee98574ba258f959..3a4194779ddf6371da82b934565bbf6c69eb2c27 100644
--- a/drivers/net/wireless/ath/wil6210/pm.c
+++ b/drivers/net/wireless/ath/wil6210/pm.c
@@ -211,7 +211,7 @@ static int wil_suspend_keep_radio_on(struct wil6210_priv *wil)
 		goto reject_suspend;
 	}
 
-	if (!wil_is_rx_idle(wil)) {
+	if (!wil->txrx_ops.is_rx_idle(wil)) {
 		wil_dbg_pm(wil, "Pending RX data, reject suspend\n");
 		wil->suspend_stats.rejected_by_host++;
 		goto reject_suspend;
@@ -235,9 +235,9 @@ static int wil_suspend_keep_radio_on(struct wil6210_priv *wil)
 	start = jiffies;
 	data_comp_to = jiffies + msecs_to_jiffies(WIL_DATA_COMPLETION_TO_MS);
 	if (test_bit(wil_status_napi_en, wil->status)) {
-		while (!wil_is_rx_idle(wil)) {
+		while (!wil->txrx_ops.is_rx_idle(wil)) {
 			if (time_after(jiffies, data_comp_to)) {
-				if (wil_is_rx_idle(wil))
+				if (wil->txrx_ops.is_rx_idle(wil))
 					break;
 				wil_err(wil,
 					"TO waiting for idle RX, suspend failed\n");
diff --git a/drivers/net/wireless/ath/wil6210/rx_reorder.c b/drivers/net/wireless/ath/wil6210/rx_reorder.c
index 76f8084c1fd81acdce116f6a88901c79976e6852..22475a1ddb7f0129139a08956f46cdfd2f4e7fbe 100644
--- a/drivers/net/wireless/ath/wil6210/rx_reorder.c
+++ b/drivers/net/wireless/ath/wil6210/rx_reorder.c
@@ -95,17 +95,17 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
 {
 	struct wil6210_vif *vif;
 	struct net_device *ndev;
-	struct vring_rx_desc *d = wil_skb_rxdesc(skb);
-	int tid = wil_rxdesc_tid(d);
-	int cid = wil_rxdesc_cid(d);
-	int mid = wil_rxdesc_mid(d);
-	u16 seq = wil_rxdesc_seq(d);
-	int mcast = wil_rxdesc_mcast(d);
-	struct wil_sta_info *sta = &wil->sta[cid];
+	int tid, cid, mid, mcast;
+	u16 seq;
+	struct wil_sta_info *sta;
 	struct wil_tid_ampdu_rx *r;
 	u16 hseq;
 	int index;
 
+	wil->txrx_ops.get_reorder_params(wil, skb, &tid, &cid, &mid, &seq,
+					 &mcast);
+	sta = &wil->sta[cid];
+
 	wil_dbg_txrx(wil, "MID %d CID %d TID %d Seq 0x%03x mcast %01x\n",
 		     mid, cid, tid, seq, mcast);
 
@@ -315,7 +315,10 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
 	 * bits 6..15: buffer size
 	 */
 	u16 req_agg_wsize = WIL_GET_BITS(param_set, 6, 15);
-	bool agg_amsdu = !!(param_set & BIT(0));
+	bool agg_amsdu = wil->use_enhanced_dma_hw &&
+		wil->use_rx_hw_reordering &&
+		test_bit(WMI_FW_CAPABILITY_AMSDU, wil->fw_capabilities) &&
+		wil->amsdu_en && (param_set & BIT(0));
 	int ba_policy = param_set & BIT(1);
 	u16 status = WLAN_STATUS_SUCCESS;
 	u16 ssn = seq_ctrl >> 4;
@@ -360,8 +363,9 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
 		}
 	}
 
-	rc = wmi_addba_rx_resp(wil, mid, cid, tid, dialog_token, status,
-			       agg_amsdu, agg_wsize, agg_timeout);
+	rc = wil->txrx_ops.wmi_addba_rx_resp(wil, mid, cid, tid, dialog_token,
+					     status, agg_amsdu, agg_wsize,
+					     agg_timeout);
 	if (rc || (status != WLAN_STATUS_SUCCESS)) {
 		wil_err(wil, "do not apply ba, rc(%d), status(%d)\n", rc,
 			status);
@@ -384,7 +388,7 @@ int wil_addba_tx_request(struct wil6210_priv *wil, u8 ringid, u16 wsize)
 {
 	u8 agg_wsize = wil_agg_size(wil, wsize);
 	u16 agg_timeout = 0;
-	struct vring_tx_data *txdata = &wil->vring_tx_data[ringid];
+	struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ringid];
 	int rc = 0;
 
 	if (txdata->addba_in_progress) {
diff --git a/drivers/net/wireless/ath/wil6210/trace.h b/drivers/net/wireless/ath/wil6210/trace.h
index c4db2a9d9f7f6c8338e5f35d331069e93bccf89d..853abc3a73e457dbad915b3f679a66b57d96d972 100644
--- a/drivers/net/wireless/ath/wil6210/trace.h
+++ b/drivers/net/wireless/ath/wil6210/trace.h
@@ -187,6 +187,40 @@ TRACE_EVENT(wil6210_rx,
 		  __entry->seq, __entry->type, __entry->subtype)
 );
 
+TRACE_EVENT(wil6210_rx_status,
+	    TP_PROTO(struct wil6210_priv *wil, u8 use_compressed, u16 buff_id,
+		     void *msg),
+	    TP_ARGS(wil, use_compressed, buff_id, msg),
+	    TP_STRUCT__entry(__field(u8, use_compressed)
+			     __field(u16, buff_id)
+			     __field(unsigned int, len)
+			     __field(u8, mid)
+			     __field(u8, cid)
+			     __field(u8, tid)
+			     __field(u8, type)
+			     __field(u8, subtype)
+			     __field(u16, seq)
+			     __field(u8, mcs)
+	    ),
+	    TP_fast_assign(__entry->use_compressed = use_compressed;
+			   __entry->buff_id = buff_id;
+			   __entry->len = wil_rx_status_get_length(msg);
+			   __entry->mid = wil_rx_status_get_mid(msg);
+			   __entry->cid = wil_rx_status_get_cid(msg);
+			   __entry->tid = wil_rx_status_get_tid(msg);
+			   __entry->type = wil_rx_status_get_frame_type(wil,
+									msg);
+			   __entry->subtype = wil_rx_status_get_fc1(wil, msg);
+			   __entry->seq = wil_rx_status_get_seq(wil, msg);
+			   __entry->mcs = wil_rx_status_get_mcs(msg);
+	    ),
+	    TP_printk(
+		      "compressed %d buff_id %d len %d mid %d cid %d tid %d mcs %d seq 0x%03x type 0x%1x subtype 0x%1x",
+		      __entry->use_compressed, __entry->buff_id, __entry->len,
+		      __entry->mid, __entry->cid, __entry->tid, __entry->mcs,
+		      __entry->seq, __entry->type, __entry->subtype)
+);
+
 TRACE_EVENT(wil6210_tx,
 	TP_PROTO(u8 vring, u16 index, unsigned int len, u8 frags),
 	TP_ARGS(vring, index, len, frags),
@@ -226,6 +260,31 @@ TRACE_EVENT(wil6210_tx_done,
 		  __entry->err)
 );
 
+TRACE_EVENT(wil6210_tx_status,
+	    TP_PROTO(struct wil_ring_tx_status *msg, u16 index,
+		     unsigned int len),
+	    TP_ARGS(msg, index, len),
+	    TP_STRUCT__entry(__field(u16, index)
+			     __field(unsigned int, len)
+			     __field(u8, num_descs)
+			     __field(u8, ring_id)
+			     __field(u8, status)
+			     __field(u8, mcs)
+
+	    ),
+	    TP_fast_assign(__entry->index = index;
+			   __entry->len = len;
+			   __entry->num_descs = msg->num_descriptors;
+			   __entry->ring_id = msg->ring_id;
+			   __entry->status = msg->status;
+			   __entry->mcs = wil_tx_status_get_mcs(msg);
+	    ),
+	    TP_printk(
+		      "ring_id %d swtail 0x%x len %d num_descs %d status 0x%x mcs %d",
+		      __entry->ring_id, __entry->index, __entry->len,
+		      __entry->num_descs, __entry->status, __entry->mcs)
+);
+
 #endif /* WIL6210_TRACE_H || TRACE_HEADER_MULTI_READ*/
 
 #if defined(CONFIG_WIL6210_TRACING) && !defined(__CHECKER__)
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index b9a9fa8289618f6c4a0f08c2f053268ad1c390e6..2098f3cc1cec598a20035f62e90efa3813f4d899 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -28,6 +28,7 @@
 #include "wmi.h"
 #include "txrx.h"
 #include "trace.h"
+#include "txrx_edma.h"
 
 static bool rtap_include_phy_info;
 module_param(rtap_include_phy_info, bool, 0444);
@@ -47,62 +48,28 @@ static inline uint wil_rx_snaplen(void)
 	return rx_align_2 ? 6 : 0;
 }
 
-static inline int wil_vring_is_empty(struct vring *vring)
+/* wil_ring_wmark_low - low watermark for available descriptor space */
+static inline int wil_ring_wmark_low(struct wil_ring *ring)
 {
-	return vring->swhead == vring->swtail;
+	return ring->size / 8;
 }
 
-static inline u32 wil_vring_next_tail(struct vring *vring)
+/* wil_ring_wmark_high - high watermark for available descriptor space */
+static inline int wil_ring_wmark_high(struct wil_ring *ring)
 {
-	return (vring->swtail + 1) % vring->size;
-}
-
-static inline void wil_vring_advance_head(struct vring *vring, int n)
-{
-	vring->swhead = (vring->swhead + n) % vring->size;
-}
-
-static inline int wil_vring_is_full(struct vring *vring)
-{
-	return wil_vring_next_tail(vring) == vring->swhead;
-}
-
-/* Used space in Tx Vring */
-static inline int wil_vring_used_tx(struct vring *vring)
-{
-	u32 swhead = vring->swhead;
-	u32 swtail = vring->swtail;
-	return (vring->size + swhead - swtail) % vring->size;
-}
-
-/* Available space in Tx Vring */
-static inline int wil_vring_avail_tx(struct vring *vring)
-{
-	return vring->size - wil_vring_used_tx(vring) - 1;
-}
-
-/* wil_vring_wmark_low - low watermark for available descriptor space */
-static inline int wil_vring_wmark_low(struct vring *vring)
-{
-	return vring->size/8;
-}
-
-/* wil_vring_wmark_high - high watermark for available descriptor space */
-static inline int wil_vring_wmark_high(struct vring *vring)
-{
-	return vring->size/4;
+	return ring->size / 4;
 }
 
 /* returns true if num avail descriptors is lower than wmark_low */
-static inline int wil_vring_avail_low(struct vring *vring)
+static inline int wil_ring_avail_low(struct wil_ring *ring)
 {
-	return wil_vring_avail_tx(vring) < wil_vring_wmark_low(vring);
+	return wil_ring_avail_tx(ring) < wil_ring_wmark_low(ring);
 }
 
 /* returns true if num avail descriptors is higher than wmark_high */
-static inline int wil_vring_avail_high(struct vring *vring)
+static inline int wil_ring_avail_high(struct wil_ring *ring)
 {
-	return wil_vring_avail_tx(vring) > wil_vring_wmark_high(vring);
+	return wil_ring_avail_tx(ring) > wil_ring_wmark_high(ring);
 }
 
 /* returns true when all tx vrings are empty */
@@ -112,9 +79,10 @@ bool wil_is_tx_idle(struct wil6210_priv *wil)
 	unsigned long data_comp_to;
 
 	for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
-		struct vring *vring = &wil->vring_tx[i];
-		int vring_index = vring - wil->vring_tx;
-		struct vring_tx_data *txdata = &wil->vring_tx_data[vring_index];
+		struct wil_ring *vring = &wil->ring_tx[i];
+		int vring_index = vring - wil->ring_tx;
+		struct wil_ring_tx_data *txdata =
+			&wil->ring_tx_data[vring_index];
 
 		spin_lock(&txdata->lock);
 
@@ -126,7 +94,7 @@ bool wil_is_tx_idle(struct wil6210_priv *wil)
 		data_comp_to = jiffies + msecs_to_jiffies(
 					WIL_DATA_COMPLETION_TO_MS);
 		if (test_bit(wil_status_napi_en, wil->status)) {
-			while (!wil_vring_is_empty(vring)) {
+			while (!wil_ring_is_empty(vring)) {
 				if (time_after(jiffies, data_comp_to)) {
 					wil_dbg_pm(wil,
 						   "TO waiting for idle tx\n");
@@ -150,13 +118,7 @@ bool wil_is_tx_idle(struct wil6210_priv *wil)
 	return true;
 }
 
-/* wil_val_in_range - check if value in [min,max) */
-static inline bool wil_val_in_range(int val, int min, int max)
-{
-	return val >= min && val < max;
-}
-
-static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
+static int wil_vring_alloc(struct wil6210_priv *wil, struct wil_ring *vring)
 {
 	struct device *dev = wil_to_dev(wil);
 	size_t sz = vring->size * sizeof(vring->va[0]);
@@ -205,7 +167,8 @@ static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
 	 * we can use any
 	 */
 	for (i = 0; i < vring->size; i++) {
-		volatile struct vring_tx_desc *_d = &vring->va[i].tx;
+		volatile struct vring_tx_desc *_d =
+			&vring->va[i].tx.legacy;
 
 		_d->dma.status = TX_DMA_STATUS_DU;
 	}
@@ -216,9 +179,10 @@ static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
 	return 0;
 }
 
-static void wil_txdesc_unmap(struct device *dev, struct vring_tx_desc *d,
+static void wil_txdesc_unmap(struct device *dev, union wil_tx_desc *desc,
 			     struct wil_ctx *ctx)
 {
+	struct vring_tx_desc *d = &desc->legacy;
 	dma_addr_t pa = wil_desc_addr(&d->dma.addr);
 	u16 dmalen = le16_to_cpu(d->dma.length);
 
@@ -234,15 +198,14 @@ static void wil_txdesc_unmap(struct device *dev, struct vring_tx_desc *d,
 	}
 }
 
-static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring,
-			   int tx)
+static void wil_vring_free(struct wil6210_priv *wil, struct wil_ring *vring)
 {
 	struct device *dev = wil_to_dev(wil);
 	size_t sz = vring->size * sizeof(vring->va[0]);
 
 	lockdep_assert_held(&wil->mutex);
-	if (tx) {
-		int vring_index = vring - wil->vring_tx;
+	if (!vring->is_rx) {
+		int vring_index = vring - wil->ring_tx;
 
 		wil_dbg_misc(wil, "free Tx vring %d [%d] 0x%p:%pad 0x%p\n",
 			     vring_index, vring->size, vring->va,
@@ -253,33 +216,33 @@ static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring,
 			     &vring->pa, vring->ctx);
 	}
 
-	while (!wil_vring_is_empty(vring)) {
+	while (!wil_ring_is_empty(vring)) {
 		dma_addr_t pa;
 		u16 dmalen;
 		struct wil_ctx *ctx;
 
-		if (tx) {
+		if (!vring->is_rx) {
 			struct vring_tx_desc dd, *d = &dd;
 			volatile struct vring_tx_desc *_d =
-					&vring->va[vring->swtail].tx;
+					&vring->va[vring->swtail].tx.legacy;
 
 			ctx = &vring->ctx[vring->swtail];
 			if (!ctx) {
 				wil_dbg_txrx(wil,
 					     "ctx(%d) was already completed\n",
 					     vring->swtail);
-				vring->swtail = wil_vring_next_tail(vring);
+				vring->swtail = wil_ring_next_tail(vring);
 				continue;
 			}
 			*d = *_d;
-			wil_txdesc_unmap(dev, d, ctx);
+			wil_txdesc_unmap(dev, (union wil_tx_desc *)d, ctx);
 			if (ctx->skb)
 				dev_kfree_skb_any(ctx->skb);
-			vring->swtail = wil_vring_next_tail(vring);
+			vring->swtail = wil_ring_next_tail(vring);
 		} else { /* rx */
 			struct vring_rx_desc dd, *d = &dd;
 			volatile struct vring_rx_desc *_d =
-					&vring->va[vring->swhead].rx;
+				&vring->va[vring->swhead].rx.legacy;
 
 			ctx = &vring->ctx[vring->swhead];
 			*d = *_d;
@@ -287,7 +250,7 @@ static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring,
 			dmalen = le16_to_cpu(d->dma.length);
 			dma_unmap_single(dev, pa, dmalen, DMA_FROM_DEVICE);
 			kfree_skb(ctx->skb);
-			wil_vring_advance_head(vring, 1);
+			wil_ring_advance_head(vring, 1);
 		}
 	}
 	dma_free_coherent(dev, sz, (void *)vring->va, vring->pa);
@@ -302,13 +265,13 @@ static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring,
  *
  * Safe to call from IRQ
  */
-static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct vring *vring,
+static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct wil_ring *vring,
 			       u32 i, int headroom)
 {
 	struct device *dev = wil_to_dev(wil);
 	unsigned int sz = wil->rx_buf_len + ETH_HLEN + wil_rx_snaplen();
 	struct vring_rx_desc dd, *d = &dd;
-	volatile struct vring_rx_desc *_d = &vring->va[i].rx;
+	volatile struct vring_rx_desc *_d = &vring->va[i].rx.legacy;
 	dma_addr_t pa;
 	struct sk_buff *skb = dev_alloc_skb(sz + headroom);
 
@@ -445,19 +408,12 @@ static void wil_rx_add_radiotap_header(struct wil6210_priv *wil,
 	}
 }
 
-/* similar to ieee80211_ version, but FC contain only 1-st byte */
-static inline int wil_is_back_req(u8 fc)
-{
-	return (fc & (IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
-	       (IEEE80211_FTYPE_CTL | IEEE80211_STYPE_BACK_REQ);
-}
-
-bool wil_is_rx_idle(struct wil6210_priv *wil)
+static bool wil_is_rx_idle(struct wil6210_priv *wil)
 {
 	struct vring_rx_desc *_d;
-	struct vring *vring = &wil->vring_rx;
+	struct wil_ring *ring = &wil->ring_rx;
 
-	_d = (struct vring_rx_desc *)&vring->va[vring->swhead].rx;
+	_d = (struct vring_rx_desc *)&ring->va[ring->swhead].rx.legacy;
 	if (_d->dma.status & RX_DMA_STATUS_DU)
 		return false;
 
@@ -472,7 +428,7 @@ bool wil_is_rx_idle(struct wil6210_priv *wil)
  * Safe to call from IRQ
  */
 static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
-					 struct vring *vring)
+					 struct wil_ring *vring)
 {
 	struct device *dev = wil_to_dev(wil);
 	struct wil6210_vif *vif;
@@ -492,11 +448,11 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
 	BUILD_BUG_ON(sizeof(struct vring_rx_desc) > sizeof(skb->cb));
 
 again:
-	if (unlikely(wil_vring_is_empty(vring)))
+	if (unlikely(wil_ring_is_empty(vring)))
 		return NULL;
 
 	i = (int)vring->swhead;
-	_d = &vring->va[i].rx;
+	_d = &vring->va[i].rx.legacy;
 	if (unlikely(!(_d->dma.status & RX_DMA_STATUS_DU))) {
 		/* it is not error, we just reached end of Rx done area */
 		return NULL;
@@ -504,7 +460,7 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
 
 	skb = vring->ctx[i].skb;
 	vring->ctx[i].skb = NULL;
-	wil_vring_advance_head(vring, 1);
+	wil_ring_advance_head(vring, 1);
 	if (!skb) {
 		wil_err(wil, "No Rx skb at [%d]\n", i);
 		goto again;
@@ -641,15 +597,15 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
 static int wil_rx_refill(struct wil6210_priv *wil, int count)
 {
 	struct net_device *ndev = wil->main_ndev;
-	struct vring *v = &wil->vring_rx;
+	struct wil_ring *v = &wil->ring_rx;
 	u32 next_tail;
 	int rc = 0;
 	int headroom = ndev->type == ARPHRD_IEEE80211_RADIOTAP ?
 			WIL6210_RTAP_SIZE : 0;
 
-	for (; next_tail = wil_vring_next_tail(v),
-			(next_tail != v->swhead) && (count-- > 0);
-			v->swtail = next_tail) {
+	for (; next_tail = wil_ring_next_tail(v),
+	     (next_tail != v->swhead) && (count-- > 0);
+	     v->swtail = next_tail) {
 		rc = wil_vring_alloc_skb(wil, v, v->swtail, headroom);
 		if (unlikely(rc)) {
 			wil_err_ratelimited(wil, "Error %d in rx refill[%d]\n",
@@ -677,7 +633,7 @@ static int wil_rx_refill(struct wil6210_priv *wil, int count)
  * Cut'n'paste from original memcmp (see lib/string.c)
  * with minimal modifications
  */
-static int reverse_memcmp(const void *cs, const void *ct, size_t count)
+int reverse_memcmp(const void *cs, const void *ct, size_t count)
 {
 	const unsigned char *su1, *su2;
 	int res = 0;
@@ -722,6 +678,15 @@ static int wil_rx_crypto_check(struct wil6210_priv *wil, struct sk_buff *skb)
 	return 0;
 }
 
+static void wil_get_netif_rx_params(struct sk_buff *skb, int *cid,
+				    int *security)
+{
+	struct vring_rx_desc *d = wil_skb_rxdesc(skb);
+
+	*cid = wil_rxdesc_cid(d); /* always 0..7, no need to check */
+	*security = wil_rxdesc_security(d);
+}
+
 /*
  * Pass Rx packet to the netif. Update statistics.
  * Called in softirq context (NAPI poll).
@@ -733,15 +698,14 @@ void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
 	struct wil6210_priv *wil = ndev_to_wil(ndev);
 	struct wireless_dev *wdev = vif_to_wdev(vif);
 	unsigned int len = skb->len;
-	struct vring_rx_desc *d = wil_skb_rxdesc(skb);
-	int cid = wil_rxdesc_cid(d); /* always 0..7, no need to check */
-	int security = wil_rxdesc_security(d);
+	int cid;
+	int security;
 	struct ethhdr *eth = (void *)skb->data;
 	/* here looking for DA, not A1, thus Rxdesc's 'mcast' indication
 	 * is not suitable, need to look at data
 	 */
 	int mcast = is_multicast_ether_addr(eth->h_dest);
-	struct wil_net_stats *stats = &wil->sta[cid].stats;
+	struct wil_net_stats *stats;
 	struct sk_buff *xmit_skb = NULL;
 	static const char * const gro_res_str[] = {
 		[GRO_MERGED]		= "GRO_MERGED",
@@ -751,6 +715,10 @@ void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
 		[GRO_DROP]		= "GRO_DROP",
 	};
 
+	wil->txrx_ops.get_netif_rx_params(skb, &cid, &security);
+
+	stats = &wil->sta[cid].stats;
+
 	if (ndev->features & NETIF_F_RXHASH)
 		/* fake L4 to ensure it won't be re-calculated later
 		 * set hash to any non-zero value to activate rps
@@ -761,7 +729,7 @@ void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
 
 	skb_orphan(skb);
 
-	if (security && (wil_rx_crypto_check(wil, skb) != 0)) {
+	if (security && (wil->txrx_ops.rx_crypto_check(wil, skb) != 0)) {
 		rc = GRO_DROP;
 		dev_kfree_skb(skb);
 		stats->rx_replay++;
@@ -835,7 +803,7 @@ void wil_rx_handle(struct wil6210_priv *wil, int *quota)
 {
 	struct net_device *ndev = wil->main_ndev;
 	struct wireless_dev *wdev = ndev->ieee80211_ptr;
-	struct vring *v = &wil->vring_rx;
+	struct wil_ring *v = &wil->ring_rx;
 	struct sk_buff *skb;
 
 	if (unlikely(!v->va)) {
@@ -875,9 +843,9 @@ static void wil_rx_buf_len_init(struct wil6210_priv *wil)
 	}
 }
 
-int wil_rx_init(struct wil6210_priv *wil, u16 size)
+static int wil_rx_init(struct wil6210_priv *wil, u16 size)
 {
-	struct vring *vring = &wil->vring_rx;
+	struct wil_ring *vring = &wil->ring_rx;
 	int rc;
 
 	wil_dbg_misc(wil, "rx_init\n");
@@ -890,6 +858,7 @@ int wil_rx_init(struct wil6210_priv *wil, u16 size)
 	wil_rx_buf_len_init(wil);
 
 	vring->size = size;
+	vring->is_rx = true;
 	rc = wil_vring_alloc(wil, vring);
 	if (rc)
 		return rc;
@@ -904,22 +873,46 @@ int wil_rx_init(struct wil6210_priv *wil, u16 size)
 
 	return 0;
  err_free:
-	wil_vring_free(wil, vring, 0);
+	wil_vring_free(wil, vring);
 
 	return rc;
 }
 
-void wil_rx_fini(struct wil6210_priv *wil)
+static void wil_rx_fini(struct wil6210_priv *wil)
 {
-	struct vring *vring = &wil->vring_rx;
+	struct wil_ring *vring = &wil->ring_rx;
 
 	wil_dbg_misc(wil, "rx_fini\n");
 
 	if (vring->va)
-		wil_vring_free(wil, vring, 0);
+		wil_vring_free(wil, vring);
 }
 
-static inline void wil_tx_data_init(struct vring_tx_data *txdata)
+static int wil_tx_desc_map(union wil_tx_desc *desc, dma_addr_t pa,
+			   u32 len, int vring_index)
+{
+	struct vring_tx_desc *d = &desc->legacy;
+
+	wil_desc_addr_set(&d->dma.addr, pa);
+	d->dma.ip_length = 0;
+	/* 0..6: mac_length; 7:ip_version 0-IP6 1-IP4*/
+	d->dma.b11 = 0/*14 | BIT(7)*/;
+	d->dma.error = 0;
+	d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */
+	d->dma.length = cpu_to_le16((u16)len);
+	d->dma.d0 = (vring_index << DMA_CFG_DESC_TX_0_QID_POS);
+	d->mac.d[0] = 0;
+	d->mac.d[1] = 0;
+	d->mac.d[2] = 0;
+	d->mac.ucode_cmd = 0;
+	/* translation type:  0 - bypass; 1 - 802.3; 2 - native wifi */
+	d->mac.d[2] = BIT(MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_POS) |
+		      (1 << MAC_CFG_DESC_TX_2_L2_TRANSLATION_TYPE_POS);
+
+	return 0;
+}
+
+void wil_tx_data_init(struct wil_ring_tx_data *txdata)
 {
 	spin_lock_bh(&txdata->lock);
 	txdata->dot1x_open = 0;
@@ -935,8 +928,8 @@ static inline void wil_tx_data_init(struct vring_tx_data *txdata)
 	spin_unlock_bh(&txdata->lock);
 }
 
-int wil_vring_init_tx(struct wil6210_vif *vif, int id, int size,
-		      int cid, int tid)
+static int wil_vring_init_tx(struct wil6210_vif *vif, int id, int size,
+			     int cid, int tid)
 {
 	struct wil6210_priv *wil = vif_to_wil(vif);
 	int rc;
@@ -966,8 +959,8 @@ int wil_vring_init_tx(struct wil6210_vif *vif, int id, int size,
 	} __packed reply = {
 		.cmd = {.status = WMI_FW_STATUS_FAILURE},
 	};
-	struct vring *vring = &wil->vring_tx[id];
-	struct vring_tx_data *txdata = &wil->vring_tx_data[id];
+	struct wil_ring *vring = &wil->ring_tx[id];
+	struct wil_ring_tx_data *txdata = &wil->ring_tx_data[id];
 
 	wil_dbg_misc(wil, "vring_init_tx: max_mpdu_size %d\n",
 		     cmd.vring_cfg.tx_sw_ring.max_mpdu_size);
@@ -980,13 +973,14 @@ int wil_vring_init_tx(struct wil6210_vif *vif, int id, int size,
 	}
 
 	wil_tx_data_init(txdata);
+	vring->is_rx = false;
 	vring->size = size;
 	rc = wil_vring_alloc(wil, vring);
 	if (rc)
 		goto out;
 
-	wil->vring2cid_tid[id][0] = cid;
-	wil->vring2cid_tid[id][1] = tid;
+	wil->ring2cid_tid[id][0] = cid;
+	wil->ring2cid_tid[id][1] = tid;
 
 	cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
 
@@ -1019,9 +1013,9 @@ int wil_vring_init_tx(struct wil6210_vif *vif, int id, int size,
 	txdata->dot1x_open = false;
 	txdata->enabled = 0;
 	spin_unlock_bh(&txdata->lock);
-	wil_vring_free(wil, vring, 1);
-	wil->vring2cid_tid[id][0] = WIL6210_MAX_CID;
-	wil->vring2cid_tid[id][1] = 0;
+	wil_vring_free(wil, vring);
+	wil->ring2cid_tid[id][0] = WIL6210_MAX_CID;
+	wil->ring2cid_tid[id][1] = 0;
 
  out:
 
@@ -1050,8 +1044,8 @@ int wil_vring_init_bcast(struct wil6210_vif *vif, int id, int size)
 	} __packed reply = {
 		.cmd = {.status = WMI_FW_STATUS_FAILURE},
 	};
-	struct vring *vring = &wil->vring_tx[id];
-	struct vring_tx_data *txdata = &wil->vring_tx_data[id];
+	struct wil_ring *vring = &wil->ring_tx[id];
+	struct wil_ring_tx_data *txdata = &wil->ring_tx_data[id];
 
 	wil_dbg_misc(wil, "vring_init_bcast: max_mpdu_size %d\n",
 		     cmd.vring_cfg.tx_sw_ring.max_mpdu_size);
@@ -1064,13 +1058,14 @@ int wil_vring_init_bcast(struct wil6210_vif *vif, int id, int size)
 	}
 
 	wil_tx_data_init(txdata);
+	vring->is_rx = false;
 	vring->size = size;
 	rc = wil_vring_alloc(wil, vring);
 	if (rc)
 		goto out;
 
-	wil->vring2cid_tid[id][0] = WIL6210_MAX_CID; /* CID */
-	wil->vring2cid_tid[id][1] = 0; /* TID */
+	wil->ring2cid_tid[id][0] = WIL6210_MAX_CID; /* CID */
+	wil->ring2cid_tid[id][1] = 0; /* TID */
 
 	cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
 
@@ -1101,62 +1096,32 @@ int wil_vring_init_bcast(struct wil6210_vif *vif, int id, int size)
 	txdata->enabled = 0;
 	txdata->dot1x_open = false;
 	spin_unlock_bh(&txdata->lock);
-	wil_vring_free(wil, vring, 1);
+	wil_vring_free(wil, vring);
  out:
 
 	return rc;
 }
 
-void wil_vring_fini_tx(struct wil6210_priv *wil, int id)
-{
-	struct vring *vring = &wil->vring_tx[id];
-	struct vring_tx_data *txdata = &wil->vring_tx_data[id];
-
-	lockdep_assert_held(&wil->mutex);
-
-	if (!vring->va)
-		return;
-
-	wil_dbg_misc(wil, "vring_fini_tx: id=%d\n", id);
-
-	spin_lock_bh(&txdata->lock);
-	txdata->dot1x_open = false;
-	txdata->mid = U8_MAX;
-	txdata->enabled = 0; /* no Tx can be in progress or start anew */
-	spin_unlock_bh(&txdata->lock);
-	/* napi_synchronize waits for completion of the current NAPI but will
-	 * not prevent the next NAPI run.
-	 * Add a memory barrier to guarantee that txdata->enabled is zeroed
-	 * before napi_synchronize so that the next scheduled NAPI will not
-	 * handle this vring
-	 */
-	wmb();
-	/* make sure NAPI won't touch this vring */
-	if (test_bit(wil_status_napi_en, wil->status))
-		napi_synchronize(&wil->napi_tx);
-
-	wil_vring_free(wil, vring, 1);
-}
-
-static struct vring *wil_find_tx_ucast(struct wil6210_priv *wil,
-				       struct wil6210_vif *vif,
-				       struct sk_buff *skb)
+static struct wil_ring *wil_find_tx_ucast(struct wil6210_priv *wil,
+					  struct wil6210_vif *vif,
+					  struct sk_buff *skb)
 {
 	int i;
 	struct ethhdr *eth = (void *)skb->data;
 	int cid = wil_find_cid(wil, vif->mid, eth->h_dest);
+	int min_ring_id = wil_get_min_tx_ring_id(wil);
 
 	if (cid < 0)
 		return NULL;
 
 	/* TODO: fix for multiple TID */
-	for (i = 0; i < ARRAY_SIZE(wil->vring2cid_tid); i++) {
-		if (!wil->vring_tx_data[i].dot1x_open &&
-		    (skb->protocol != cpu_to_be16(ETH_P_PAE)))
+	for (i = min_ring_id; i < ARRAY_SIZE(wil->ring2cid_tid); i++) {
+		if (!wil->ring_tx_data[i].dot1x_open &&
+		    skb->protocol != cpu_to_be16(ETH_P_PAE))
 			continue;
-		if (wil->vring2cid_tid[i][0] == cid) {
-			struct vring *v = &wil->vring_tx[i];
-			struct vring_tx_data *txdata = &wil->vring_tx_data[i];
+		if (wil->ring2cid_tid[i][0] == cid) {
+			struct wil_ring *v = &wil->ring_tx[i];
+			struct wil_ring_tx_data *txdata = &wil->ring_tx_data[i];
 
 			wil_dbg_txrx(wil, "find_tx_ucast: (%pM) -> [%d]\n",
 				     eth->h_dest, i);
@@ -1174,42 +1139,43 @@ static struct vring *wil_find_tx_ucast(struct wil6210_priv *wil,
 	return NULL;
 }
 
-static int wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif,
-			struct vring *vring, struct sk_buff *skb);
+static int wil_tx_ring(struct wil6210_priv *wil, struct wil6210_vif *vif,
+		       struct wil_ring *ring, struct sk_buff *skb);
 
-static struct vring *wil_find_tx_vring_sta(struct wil6210_priv *wil,
-					   struct wil6210_vif *vif,
-					   struct sk_buff *skb)
+static struct wil_ring *wil_find_tx_ring_sta(struct wil6210_priv *wil,
+					     struct wil6210_vif *vif,
+					     struct sk_buff *skb)
 {
-	struct vring *v;
+	struct wil_ring *ring;
 	int i;
 	u8 cid;
-	struct vring_tx_data *txdata;
+	struct wil_ring_tx_data  *txdata;
+	int min_ring_id = wil_get_min_tx_ring_id(wil);
 
 	/* In the STA mode, it is expected to have only 1 VRING
 	 * for the AP we connected to.
 	 * find 1-st vring eligible for this skb and use it.
 	 */
-	for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
-		v = &wil->vring_tx[i];
-		txdata = &wil->vring_tx_data[i];
-		if (!v->va || !txdata->enabled || txdata->mid != vif->mid)
+	for (i = min_ring_id; i < WIL6210_MAX_TX_RINGS; i++) {
+		ring = &wil->ring_tx[i];
+		txdata = &wil->ring_tx_data[i];
+		if (!ring->va || !txdata->enabled || txdata->mid != vif->mid)
 			continue;
 
-		cid = wil->vring2cid_tid[i][0];
+		cid = wil->ring2cid_tid[i][0];
 		if (cid >= WIL6210_MAX_CID) /* skip BCAST */
 			continue;
 
-		if (!wil->vring_tx_data[i].dot1x_open &&
-		    (skb->protocol != cpu_to_be16(ETH_P_PAE)))
+		if (!wil->ring_tx_data[i].dot1x_open &&
+		    skb->protocol != cpu_to_be16(ETH_P_PAE))
 			continue;
 
 		wil_dbg_txrx(wil, "Tx -> ring %d\n", i);
 
-		return v;
+		return ring;
 	}
 
-	wil_dbg_txrx(wil, "Tx while no vrings active?\n");
+	wil_dbg_txrx(wil, "Tx while no rings active?\n");
 
 	return NULL;
 }
@@ -1225,22 +1191,22 @@ static struct vring *wil_find_tx_vring_sta(struct wil6210_priv *wil,
  * Use old strategy when new is not supported yet:
  *  - for PBSS
  */
-static struct vring *wil_find_tx_bcast_1(struct wil6210_priv *wil,
-					 struct wil6210_vif *vif,
-					 struct sk_buff *skb)
+static struct wil_ring *wil_find_tx_bcast_1(struct wil6210_priv *wil,
+					    struct wil6210_vif *vif,
+					    struct sk_buff *skb)
 {
-	struct vring *v;
-	struct vring_tx_data *txdata;
-	int i = vif->bcast_vring;
+	struct wil_ring *v;
+	struct wil_ring_tx_data *txdata;
+	int i = vif->bcast_ring;
 
 	if (i < 0)
 		return NULL;
-	v = &wil->vring_tx[i];
-	txdata = &wil->vring_tx_data[i];
+	v = &wil->ring_tx[i];
+	txdata = &wil->ring_tx_data[i];
 	if (!v->va || !txdata->enabled)
 		return NULL;
-	if (!wil->vring_tx_data[i].dot1x_open &&
-	    (skb->protocol != cpu_to_be16(ETH_P_PAE)))
+	if (!wil->ring_tx_data[i].dot1x_open &&
+	    skb->protocol != cpu_to_be16(ETH_P_PAE))
 		return NULL;
 
 	return v;
@@ -1250,35 +1216,36 @@ static void wil_set_da_for_vring(struct wil6210_priv *wil,
 				 struct sk_buff *skb, int vring_index)
 {
 	struct ethhdr *eth = (void *)skb->data;
-	int cid = wil->vring2cid_tid[vring_index][0];
+	int cid = wil->ring2cid_tid[vring_index][0];
 
 	ether_addr_copy(eth->h_dest, wil->sta[cid].addr);
 }
 
-static struct vring *wil_find_tx_bcast_2(struct wil6210_priv *wil,
-					 struct wil6210_vif *vif,
-					 struct sk_buff *skb)
+static struct wil_ring *wil_find_tx_bcast_2(struct wil6210_priv *wil,
+					    struct wil6210_vif *vif,
+					    struct sk_buff *skb)
 {
-	struct vring *v, *v2;
+	struct wil_ring *v, *v2;
 	struct sk_buff *skb2;
 	int i;
 	u8 cid;
 	struct ethhdr *eth = (void *)skb->data;
 	char *src = eth->h_source;
-	struct vring_tx_data *txdata, *txdata2;
+	struct wil_ring_tx_data *txdata, *txdata2;
+	int min_ring_id = wil_get_min_tx_ring_id(wil);
 
 	/* find 1-st vring eligible for data */
-	for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
-		v = &wil->vring_tx[i];
-		txdata = &wil->vring_tx_data[i];
+	for (i = min_ring_id; i < WIL6210_MAX_TX_RINGS; i++) {
+		v = &wil->ring_tx[i];
+		txdata = &wil->ring_tx_data[i];
 		if (!v->va || !txdata->enabled || txdata->mid != vif->mid)
 			continue;
 
-		cid = wil->vring2cid_tid[i][0];
+		cid = wil->ring2cid_tid[i][0];
 		if (cid >= WIL6210_MAX_CID) /* skip BCAST */
 			continue;
-		if (!wil->vring_tx_data[i].dot1x_open &&
-		    (skb->protocol != cpu_to_be16(ETH_P_PAE)))
+		if (!wil->ring_tx_data[i].dot1x_open &&
+		    skb->protocol != cpu_to_be16(ETH_P_PAE))
 			continue;
 
 		/* don't Tx back to source when re-routing Rx->Tx at the AP */
@@ -1298,15 +1265,15 @@ static struct vring *wil_find_tx_bcast_2(struct wil6210_priv *wil,
 
 	/* find other active vrings and duplicate skb for each */
 	for (i++; i < WIL6210_MAX_TX_RINGS; i++) {
-		v2 = &wil->vring_tx[i];
-		txdata2 = &wil->vring_tx_data[i];
+		v2 = &wil->ring_tx[i];
+		txdata2 = &wil->ring_tx_data[i];
 		if (!v2->va || txdata2->mid != vif->mid)
 			continue;
-		cid = wil->vring2cid_tid[i][0];
+		cid = wil->ring2cid_tid[i][0];
 		if (cid >= WIL6210_MAX_CID) /* skip BCAST */
 			continue;
-		if (!wil->vring_tx_data[i].dot1x_open &&
-		    (skb->protocol != cpu_to_be16(ETH_P_PAE)))
+		if (!wil->ring_tx_data[i].dot1x_open &&
+		    skb->protocol != cpu_to_be16(ETH_P_PAE))
 			continue;
 
 		if (0 == memcmp(wil->sta[cid].addr, src, ETH_ALEN))
@@ -1316,7 +1283,7 @@ static struct vring *wil_find_tx_bcast_2(struct wil6210_priv *wil,
 		if (skb2) {
 			wil_dbg_txrx(wil, "BCAST DUP -> ring %d\n", i);
 			wil_set_da_for_vring(wil, skb2, i);
-			wil_tx_vring(wil, vif, v2, skb2);
+			wil_tx_ring(wil, vif, v2, skb2);
 		} else {
 			wil_err(wil, "skb_copy failed\n");
 		}
@@ -1325,28 +1292,6 @@ static struct vring *wil_find_tx_bcast_2(struct wil6210_priv *wil,
 	return v;
 }
 
-static int wil_tx_desc_map(struct vring_tx_desc *d, dma_addr_t pa, u32 len,
-			   int vring_index)
-{
-	wil_desc_addr_set(&d->dma.addr, pa);
-	d->dma.ip_length = 0;
-	/* 0..6: mac_length; 7:ip_version 0-IP6 1-IP4*/
-	d->dma.b11 = 0/*14 | BIT(7)*/;
-	d->dma.error = 0;
-	d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */
-	d->dma.length = cpu_to_le16((u16)len);
-	d->dma.d0 = (vring_index << DMA_CFG_DESC_TX_0_QID_POS);
-	d->mac.d[0] = 0;
-	d->mac.d[1] = 0;
-	d->mac.d[2] = 0;
-	d->mac.ucode_cmd = 0;
-	/* translation type:  0 - bypass; 1 - 802.3; 2 - native wifi */
-	d->mac.d[2] = BIT(MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_POS) |
-		      (1 << MAC_CFG_DESC_TX_2_L2_TRANSLATION_TYPE_POS);
-
-	return 0;
-}
-
 static inline
 void wil_tx_desc_set_nr_frags(struct vring_tx_desc *d, int nr_frags)
 {
@@ -1454,7 +1399,7 @@ static inline void wil_set_tx_desc_last_tso(volatile struct vring_tx_desc *d)
 }
 
 static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct wil6210_vif *vif,
-			      struct vring *vring, struct sk_buff *skb)
+			      struct wil_ring *vring, struct sk_buff *skb)
 {
 	struct device *dev = wil_to_dev(wil);
 
@@ -1474,13 +1419,13 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct wil6210_vif *vif,
 	int sg_desc_cnt = 0; /* number of descriptors for current mss*/
 
 	u32 swhead = vring->swhead;
-	int used, avail = wil_vring_avail_tx(vring);
+	int used, avail = wil_ring_avail_tx(vring);
 	int nr_frags = skb_shinfo(skb)->nr_frags;
 	int min_desc_required = nr_frags + 1;
 	int mss = skb_shinfo(skb)->gso_size;	/* payload size w/o headers */
 	int f, len, hdrlen, headlen;
-	int vring_index = vring - wil->vring_tx;
-	struct vring_tx_data *txdata = &wil->vring_tx_data[vring_index];
+	int vring_index = vring - wil->ring_tx;
+	struct wil_ring_tx_data *txdata = &wil->ring_tx_data[vring_index];
 	uint i = swhead;
 	dma_addr_t pa;
 	const skb_frag_t *frag = NULL;
@@ -1548,7 +1493,7 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct wil6210_vif *vif,
 	tcp_hdr_len = tcp_hdrlen(skb);
 	skb_net_hdr_len = skb_network_header_len(skb);
 
-	_hdr_desc = &vring->va[i].tx;
+	_hdr_desc = &vring->va[i].tx.legacy;
 
 	pa = dma_map_single(dev, skb->data, hdrlen, DMA_TO_DEVICE);
 	if (unlikely(dma_mapping_error(dev, pa))) {
@@ -1556,7 +1501,8 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct wil6210_vif *vif,
 		goto err_exit;
 	}
 
-	wil_tx_desc_map(hdr_desc, pa, hdrlen, vring_index);
+	wil->txrx_ops.tx_desc_map((union wil_tx_desc *)hdr_desc, pa,
+				  hdrlen, vring_index);
 	wil_tx_desc_offload_setup_tso(hdr_desc, skb, wil_tso_type_hdr, is_ipv4,
 				      tcp_hdr_len, skb_net_hdr_len);
 	wil_tx_last_desc(hdr_desc);
@@ -1613,7 +1559,7 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct wil6210_vif *vif,
 				goto mem_error;
 			}
 
-			_desc = &vring->va[i].tx;
+			_desc = &vring->va[i].tx.legacy;
 
 			if (!_first_desc) {
 				_first_desc = _desc;
@@ -1623,7 +1569,8 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct wil6210_vif *vif,
 				d = &desc_mem;
 			}
 
-			wil_tx_desc_map(d, pa, lenmss, vring_index);
+			wil->txrx_ops.tx_desc_map((union wil_tx_desc *)d,
+						  pa, lenmss, vring_index);
 			wil_tx_desc_offload_setup_tso(d, skb, desc_tso_type,
 						      is_ipv4, tcp_hdr_len,
 						      skb_net_hdr_len);
@@ -1701,8 +1648,8 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct wil6210_vif *vif,
 	vring->ctx[i].skb = skb_get(skb);
 
 	/* performance monitoring */
-	used = wil_vring_used_tx(vring);
-	if (wil_val_in_range(wil->vring_idle_trsh,
+	used = wil_ring_used_tx(vring);
+	if (wil_val_in_range(wil->ring_idle_trsh,
 			     used, used + descs_used)) {
 		txdata->idle += get_cycles() - txdata->last_idle;
 		wil_dbg_txrx(wil,  "Ring[%2d] not idle %d -> %d\n",
@@ -1717,7 +1664,7 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct wil6210_vif *vif,
 	wmb();
 
 	/* advance swhead */
-	wil_vring_advance_head(vring, descs_used);
+	wil_ring_advance_head(vring, descs_used);
 	wil_dbg_txrx(wil, "TSO: Tx swhead %d -> %d\n", swhead, vring->swhead);
 
 	/* make sure all writes to descriptors (shared memory) are done before
@@ -1733,12 +1680,12 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct wil6210_vif *vif,
 		struct wil_ctx *ctx;
 
 		i = (swhead + descs_used - 1) % vring->size;
-		d = (struct vring_tx_desc *)&vring->va[i].tx;
-		_desc = &vring->va[i].tx;
+		d = (struct vring_tx_desc *)&vring->va[i].tx.legacy;
+		_desc = &vring->va[i].tx.legacy;
 		*d = *_desc;
 		_desc->dma.status = TX_DMA_STATUS_DU;
 		ctx = &vring->ctx[i];
-		wil_txdesc_unmap(dev, d, ctx);
+		wil_txdesc_unmap(dev, (union wil_tx_desc *)d, ctx);
 		memset(ctx, 0, sizeof(*ctx));
 		descs_used--;
 	}
@@ -1746,26 +1693,26 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct wil6210_vif *vif,
 	return rc;
 }
 
-static int __wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif,
-			  struct vring *vring, struct sk_buff *skb)
+static int __wil_tx_ring(struct wil6210_priv *wil, struct wil6210_vif *vif,
+			 struct wil_ring *ring, struct sk_buff *skb)
 {
 	struct device *dev = wil_to_dev(wil);
 	struct vring_tx_desc dd, *d = &dd;
 	volatile struct vring_tx_desc *_d;
-	u32 swhead = vring->swhead;
-	int avail = wil_vring_avail_tx(vring);
+	u32 swhead = ring->swhead;
+	int avail = wil_ring_avail_tx(ring);
 	int nr_frags = skb_shinfo(skb)->nr_frags;
 	uint f = 0;
-	int vring_index = vring - wil->vring_tx;
-	struct vring_tx_data *txdata = &wil->vring_tx_data[vring_index];
+	int ring_index = ring - wil->ring_tx;
+	struct wil_ring_tx_data  *txdata = &wil->ring_tx_data[ring_index];
 	uint i = swhead;
 	dma_addr_t pa;
 	int used;
-	bool mcast = (vring_index == vif->bcast_vring);
+	bool mcast = (ring_index == vif->bcast_ring);
 	uint len = skb_headlen(skb);
 
-	wil_dbg_txrx(wil, "tx_vring: %d bytes to vring %d\n", skb->len,
-		     vring_index);
+	wil_dbg_txrx(wil, "tx_ring: %d bytes to ring %d, nr_frags %d\n",
+		     skb->len, ring_index, nr_frags);
 
 	if (unlikely(!txdata->enabled))
 		return -EINVAL;
@@ -1773,23 +1720,24 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif,
 	if (unlikely(avail < 1 + nr_frags)) {
 		wil_err_ratelimited(wil,
 				    "Tx ring[%2d] full. No space for %d fragments\n",
-				    vring_index, 1 + nr_frags);
+				    ring_index, 1 + nr_frags);
 		return -ENOMEM;
 	}
-	_d = &vring->va[i].tx;
+	_d = &ring->va[i].tx.legacy;
 
 	pa = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
 
-	wil_dbg_txrx(wil, "Tx[%2d] skb %d bytes 0x%p -> %pad\n", vring_index,
+	wil_dbg_txrx(wil, "Tx[%2d] skb %d bytes 0x%p -> %pad\n", ring_index,
 		     skb_headlen(skb), skb->data, &pa);
 	wil_hex_dump_txrx("Tx ", DUMP_PREFIX_OFFSET, 16, 1,
 			  skb->data, skb_headlen(skb), false);
 
 	if (unlikely(dma_mapping_error(dev, pa)))
 		return -EINVAL;
-	vring->ctx[i].mapped_as = wil_mapped_as_single;
+	ring->ctx[i].mapped_as = wil_mapped_as_single;
 	/* 1-st segment */
-	wil_tx_desc_map(d, pa, len, vring_index);
+	wil->txrx_ops.tx_desc_map((union wil_tx_desc *)d, pa, len,
+				   ring_index);
 	if (unlikely(mcast)) {
 		d->mac.d[0] |= BIT(MAC_CFG_DESC_TX_0_MCS_EN_POS); /* MCS 0 */
 		if (unlikely(len > WIL_BCAST_MCS0_LIMIT)) /* set MCS 1 */
@@ -1798,11 +1746,11 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif,
 	/* Process TCP/UDP checksum offloading */
 	if (unlikely(wil_tx_desc_offload_setup(d, skb))) {
 		wil_err(wil, "Tx[%2d] Failed to set cksum, drop packet\n",
-			vring_index);
+			ring_index);
 		goto dma_error;
 	}
 
-	vring->ctx[i].nr_frags = nr_frags;
+	ring->ctx[i].nr_frags = nr_frags;
 	wil_tx_desc_set_nr_frags(d, nr_frags + 1);
 
 	/* middle segments */
@@ -1812,20 +1760,21 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif,
 		int len = skb_frag_size(frag);
 
 		*_d = *d;
-		wil_dbg_txrx(wil, "Tx[%2d] desc[%4d]\n", vring_index, i);
+		wil_dbg_txrx(wil, "Tx[%2d] desc[%4d]\n", ring_index, i);
 		wil_hex_dump_txrx("TxD ", DUMP_PREFIX_NONE, 32, 4,
 				  (const void *)d, sizeof(*d), false);
-		i = (swhead + f + 1) % vring->size;
-		_d = &vring->va[i].tx;
+		i = (swhead + f + 1) % ring->size;
+		_d = &ring->va[i].tx.legacy;
 		pa = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag),
 				      DMA_TO_DEVICE);
 		if (unlikely(dma_mapping_error(dev, pa))) {
 			wil_err(wil, "Tx[%2d] failed to map fragment\n",
-				vring_index);
+				ring_index);
 			goto dma_error;
 		}
-		vring->ctx[i].mapped_as = wil_mapped_as_page;
-		wil_tx_desc_map(d, pa, len, vring_index);
+		ring->ctx[i].mapped_as = wil_mapped_as_page;
+		wil->txrx_ops.tx_desc_map((union wil_tx_desc *)d,
+					   pa, len, ring_index);
 		/* no need to check return code -
 		 * if it succeeded for 1-st descriptor,
 		 * it will succeed here too
@@ -1837,7 +1786,7 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif,
 	d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_MARK_WB_POS);
 	d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS);
 	*_d = *d;
-	wil_dbg_txrx(wil, "Tx[%2d] desc[%4d]\n", vring_index, i);
+	wil_dbg_txrx(wil, "Tx[%2d] desc[%4d]\n", ring_index, i);
 	wil_hex_dump_txrx("TxD ", DUMP_PREFIX_NONE, 32, 4,
 			  (const void *)d, sizeof(*d), false);
 
@@ -1845,15 +1794,15 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif,
 	 * to prevent skb release before accounting
 	 * in case of immediate "tx done"
 	 */
-	vring->ctx[i].skb = skb_get(skb);
+	ring->ctx[i].skb = skb_get(skb);
 
 	/* performance monitoring */
-	used = wil_vring_used_tx(vring);
-	if (wil_val_in_range(wil->vring_idle_trsh,
+	used = wil_ring_used_tx(ring);
+	if (wil_val_in_range(wil->ring_idle_trsh,
 			     used, used + nr_frags + 1)) {
 		txdata->idle += get_cycles() - txdata->last_idle;
 		wil_dbg_txrx(wil,  "Ring[%2d] not idle %d -> %d\n",
-			     vring_index, used, used + nr_frags + 1);
+			     ring_index, used, used + nr_frags + 1);
 	}
 
 	/* Make sure to advance the head only after descriptor update is done.
@@ -1864,17 +1813,17 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif,
 	wmb();
 
 	/* advance swhead */
-	wil_vring_advance_head(vring, nr_frags + 1);
-	wil_dbg_txrx(wil, "Tx[%2d] swhead %d -> %d\n", vring_index, swhead,
-		     vring->swhead);
-	trace_wil6210_tx(vring_index, swhead, skb->len, nr_frags);
+	wil_ring_advance_head(ring, nr_frags + 1);
+	wil_dbg_txrx(wil, "Tx[%2d] swhead %d -> %d\n", ring_index, swhead,
+		     ring->swhead);
+	trace_wil6210_tx(ring_index, swhead, skb->len, nr_frags);
 
 	/* make sure all writes to descriptors (shared memory) are done before
 	 * committing them to HW
 	 */
 	wmb();
 
-	wil_w(wil, vring->hwtail, vring->swhead);
+	wil_w(wil, ring->hwtail, ring->swhead);
 
 	return 0;
  dma_error:
@@ -1883,12 +1832,14 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif,
 	for (f = 0; f < nr_frags; f++) {
 		struct wil_ctx *ctx;
 
-		i = (swhead + f) % vring->size;
-		ctx = &vring->ctx[i];
-		_d = &vring->va[i].tx;
+		i = (swhead + f) % ring->size;
+		ctx = &ring->ctx[i];
+		_d = &ring->va[i].tx.legacy;
 		*d = *_d;
 		_d->dma.status = TX_DMA_STATUS_DU;
-		wil_txdesc_unmap(dev, d, ctx);
+		wil->txrx_ops.tx_desc_unmap(dev,
+					    (union wil_tx_desc *)d,
+					    ctx);
 
 		memset(ctx, 0, sizeof(*ctx));
 	}
@@ -1896,11 +1847,11 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif,
 	return -EINVAL;
 }
 
-static int wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif,
-			struct vring *vring, struct sk_buff *skb)
+static int wil_tx_ring(struct wil6210_priv *wil, struct wil6210_vif *vif,
+		       struct wil_ring *ring, struct sk_buff *skb)
 {
-	int vring_index = vring - wil->vring_tx;
-	struct vring_tx_data *txdata = &wil->vring_tx_data[vring_index];
+	int ring_index = ring - wil->ring_tx;
+	struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_index];
 	int rc;
 
 	spin_lock(&txdata->lock);
@@ -1914,8 +1865,8 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif,
 		return -EINVAL;
 	}
 
-	rc = (skb_is_gso(skb) ? __wil_tx_vring_tso : __wil_tx_vring)
-	     (wil, vif, vring, skb);
+	rc = (skb_is_gso(skb) ? wil->txrx_ops.tx_ring_tso : __wil_tx_ring)
+	     (wil, vif, ring, skb);
 
 	spin_unlock(&txdata->lock);
 
@@ -1941,7 +1892,7 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif,
  */
 static inline void __wil_update_net_queues(struct wil6210_priv *wil,
 					   struct wil6210_vif *vif,
-					   struct vring *vring,
+					   struct wil_ring *ring,
 					   bool check_stop)
 {
 	int i;
@@ -1949,9 +1900,9 @@ static inline void __wil_update_net_queues(struct wil6210_priv *wil,
 	if (unlikely(!vif))
 		return;
 
-	if (vring)
+	if (ring)
 		wil_dbg_txrx(wil, "vring %d, mid %d, check_stop=%d, stopped=%d",
-			     (int)(vring - wil->vring_tx), vif->mid, check_stop,
+			     (int)(ring - wil->ring_tx), vif->mid, check_stop,
 			     vif->net_queue_stopped);
 	else
 		wil_dbg_txrx(wil, "check_stop=%d, mid=%d, stopped=%d",
@@ -1962,7 +1913,7 @@ static inline void __wil_update_net_queues(struct wil6210_priv *wil,
 		return;
 
 	if (check_stop) {
-		if (!vring || unlikely(wil_vring_avail_low(vring))) {
+		if (!ring || unlikely(wil_ring_avail_low(ring))) {
 			/* not enough room in the vring */
 			netif_tx_stop_all_queues(vif_to_ndev(vif));
 			vif->net_queue_stopped = true;
@@ -1978,22 +1929,22 @@ static inline void __wil_update_net_queues(struct wil6210_priv *wil,
 
 	/* check wake */
 	for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
-		struct vring *cur_vring = &wil->vring_tx[i];
-		struct vring_tx_data *txdata = &wil->vring_tx_data[i];
+		struct wil_ring *cur_ring = &wil->ring_tx[i];
+		struct wil_ring_tx_data  *txdata = &wil->ring_tx_data[i];
 
-		if (txdata->mid != vif->mid || !cur_vring->va ||
-		    !txdata->enabled || cur_vring == vring)
+		if (txdata->mid != vif->mid || !cur_ring->va ||
+		    !txdata->enabled || cur_ring == ring)
 			continue;
 
-		if (wil_vring_avail_low(cur_vring)) {
-			wil_dbg_txrx(wil, "vring %d full, can't wake\n",
-				     (int)(cur_vring - wil->vring_tx));
+		if (wil_ring_avail_low(cur_ring)) {
+			wil_dbg_txrx(wil, "ring %d full, can't wake\n",
+				     (int)(cur_ring - wil->ring_tx));
 			return;
 		}
 	}
 
-	if (!vring || wil_vring_avail_high(vring)) {
-		/* enough room in the vring */
+	if (!ring || wil_ring_avail_high(ring)) {
+		/* enough room in the ring */
 		wil_dbg_txrx(wil, "calling netif_tx_wake\n");
 		netif_tx_wake_all_queues(vif_to_ndev(vif));
 		vif->net_queue_stopped = false;
@@ -2001,18 +1952,18 @@ static inline void __wil_update_net_queues(struct wil6210_priv *wil,
 }
 
 void wil_update_net_queues(struct wil6210_priv *wil, struct wil6210_vif *vif,
-			   struct vring *vring, bool check_stop)
+			   struct wil_ring *ring, bool check_stop)
 {
 	spin_lock(&wil->net_queue_lock);
-	__wil_update_net_queues(wil, vif, vring, check_stop);
+	__wil_update_net_queues(wil, vif, ring, check_stop);
 	spin_unlock(&wil->net_queue_lock);
 }
 
 void wil_update_net_queues_bh(struct wil6210_priv *wil, struct wil6210_vif *vif,
-			      struct vring *vring, bool check_stop)
+			      struct wil_ring *ring, bool check_stop)
 {
 	spin_lock_bh(&wil->net_queue_lock);
-	__wil_update_net_queues(wil, vif, vring, check_stop);
+	__wil_update_net_queues(wil, vif, ring, check_stop);
 	spin_unlock_bh(&wil->net_queue_lock);
 }
 
@@ -2022,7 +1973,7 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
 	struct wil6210_priv *wil = vif_to_wil(vif);
 	struct ethhdr *eth = (void *)skb->data;
 	bool bcast = is_multicast_ether_addr(eth->h_dest);
-	struct vring *vring;
+	struct wil_ring *ring;
 	static bool pr_once_fw;
 	int rc;
 
@@ -2048,36 +1999,36 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
 	/* find vring */
 	if (vif->wdev.iftype == NL80211_IFTYPE_STATION && !vif->pbss) {
 		/* in STA mode (ESS), all to same VRING (to AP) */
-		vring = wil_find_tx_vring_sta(wil, vif, skb);
+		ring = wil_find_tx_ring_sta(wil, vif, skb);
 	} else if (bcast) {
 		if (vif->pbss)
 			/* in pbss, no bcast VRING - duplicate skb in
 			 * all stations VRINGs
 			 */
-			vring = wil_find_tx_bcast_2(wil, vif, skb);
+			ring = wil_find_tx_bcast_2(wil, vif, skb);
 		else if (vif->wdev.iftype == NL80211_IFTYPE_AP)
 			/* AP has a dedicated bcast VRING */
-			vring = wil_find_tx_bcast_1(wil, vif, skb);
+			ring = wil_find_tx_bcast_1(wil, vif, skb);
 		else
 			/* unexpected combination, fallback to duplicating
 			 * the skb in all stations VRINGs
 			 */
-			vring = wil_find_tx_bcast_2(wil, vif, skb);
+			ring = wil_find_tx_bcast_2(wil, vif, skb);
 	} else {
 		/* unicast, find specific VRING by dest. address */
-		vring = wil_find_tx_ucast(wil, vif, skb);
+		ring = wil_find_tx_ucast(wil, vif, skb);
 	}
-	if (unlikely(!vring)) {
-		wil_dbg_txrx(wil, "No Tx VRING found for %pM\n", eth->h_dest);
+	if (unlikely(!ring)) {
+		wil_dbg_txrx(wil, "No Tx RING found for %pM\n", eth->h_dest);
 		goto drop;
 	}
 	/* set up vring entry */
-	rc = wil_tx_vring(wil, vif, vring, skb);
+	rc = wil_tx_ring(wil, vif, ring, skb);
 
 	switch (rc) {
 	case 0:
 		/* shall we stop net queues? */
-		wil_update_net_queues_bh(wil, vif, vring, true);
+		wil_update_net_queues_bh(wil, vif, ring, true);
 		/* statistics will be updated on the tx_complete */
 		dev_kfree_skb_any(skb);
 		return NETDEV_TX_OK;
@@ -2093,22 +2044,6 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
 	return NET_XMIT_DROP;
 }
 
-static inline bool wil_need_txstat(struct sk_buff *skb)
-{
-	struct ethhdr *eth = (void *)skb->data;
-
-	return is_unicast_ether_addr(eth->h_dest) && skb->sk &&
-	       (skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS);
-}
-
-static inline void wil_consume_skb(struct sk_buff *skb, bool acked)
-{
-	if (unlikely(wil_need_txstat(skb)))
-		skb_complete_wifi_ack(skb, acked);
-	else
-		acked ? dev_consume_skb_any(skb) : dev_kfree_skb_any(skb);
-}
-
 /**
  * Clean up transmitted skb's from the Tx VRING
  *
@@ -2121,10 +2056,10 @@ int wil_tx_complete(struct wil6210_vif *vif, int ringid)
 	struct wil6210_priv *wil = vif_to_wil(vif);
 	struct net_device *ndev = vif_to_ndev(vif);
 	struct device *dev = wil_to_dev(wil);
-	struct vring *vring = &wil->vring_tx[ringid];
-	struct vring_tx_data *txdata = &wil->vring_tx_data[ringid];
+	struct wil_ring *vring = &wil->ring_tx[ringid];
+	struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ringid];
 	int done = 0;
-	int cid = wil->vring2cid_tid[ringid][0];
+	int cid = wil->ring2cid_tid[ringid][0];
 	struct wil_net_stats *stats = NULL;
 	volatile struct vring_tx_desc *_d;
 	int used_before_complete;
@@ -2142,12 +2077,12 @@ int wil_tx_complete(struct wil6210_vif *vif, int ringid)
 
 	wil_dbg_txrx(wil, "tx_complete: (%d)\n", ringid);
 
-	used_before_complete = wil_vring_used_tx(vring);
+	used_before_complete = wil_ring_used_tx(vring);
 
 	if (cid < WIL6210_MAX_CID)
 		stats = &wil->sta[cid].stats;
 
-	while (!wil_vring_is_empty(vring)) {
+	while (!wil_ring_is_empty(vring)) {
 		int new_swtail;
 		struct wil_ctx *ctx = &vring->ctx[vring->swtail];
 		/**
@@ -2158,7 +2093,7 @@ int wil_tx_complete(struct wil6210_vif *vif, int ringid)
 		int lf = (vring->swtail + ctx->nr_frags) % vring->size;
 		/* TODO: check we are not past head */
 
-		_d = &vring->va[lf].tx;
+		_d = &vring->va[lf].tx.legacy;
 		if (unlikely(!(_d->dma.status & TX_DMA_STATUS_DU)))
 			break;
 
@@ -2170,7 +2105,7 @@ int wil_tx_complete(struct wil6210_vif *vif, int ringid)
 
 			ctx = &vring->ctx[vring->swtail];
 			skb = ctx->skb;
-			_d = &vring->va[vring->swtail].tx;
+			_d = &vring->va[vring->swtail].tx.legacy;
 
 			*d = *_d;
 
@@ -2184,7 +2119,9 @@ int wil_tx_complete(struct wil6210_vif *vif, int ringid)
 			wil_hex_dump_txrx("TxCD ", DUMP_PREFIX_NONE, 32, 4,
 					  (const void *)d, sizeof(*d), false);
 
-			wil_txdesc_unmap(dev, d, ctx);
+			wil->txrx_ops.tx_desc_unmap(dev,
+						    (union wil_tx_desc *)d,
+						    ctx);
 
 			if (skb) {
 				if (likely(d->dma.error == 0)) {
@@ -2203,7 +2140,7 @@ int wil_tx_complete(struct wil6210_vif *vif, int ringid)
 			}
 			memset(ctx, 0, sizeof(*ctx));
 			/* Make sure the ctx is zeroed before updating the tail
-			 * to prevent a case where wil_tx_vring will see
+			 * to prevent a case where wil_tx_ring will see
 			 * this descriptor as used and handle it before ctx zero
 			 * is completed.
 			 */
@@ -2213,14 +2150,14 @@ int wil_tx_complete(struct wil6210_vif *vif, int ringid)
 			 *   so hardware will not try to process this desc.,
 			 * - rest of descriptor will be initialized on Tx.
 			 */
-			vring->swtail = wil_vring_next_tail(vring);
+			vring->swtail = wil_ring_next_tail(vring);
 			done++;
 		}
 	}
 
 	/* performance monitoring */
-	used_new = wil_vring_used_tx(vring);
-	if (wil_val_in_range(wil->vring_idle_trsh,
+	used_new = wil_ring_used_tx(vring);
+	if (wil_val_in_range(wil->ring_idle_trsh,
 			     used_new, used_before_complete)) {
 		wil_dbg_txrx(wil, "Ring[%2d] idle %d -> %d\n",
 			     ringid, used_before_complete, used_new);
@@ -2233,3 +2170,47 @@ int wil_tx_complete(struct wil6210_vif *vif, int ringid)
 
 	return done;
 }
+
+static inline int wil_tx_init(struct wil6210_priv *wil)
+{
+	return 0;
+}
+
+static inline void wil_tx_fini(struct wil6210_priv *wil) {}
+
+static void wil_get_reorder_params(struct wil6210_priv *wil,
+				   struct sk_buff *skb, int *tid, int *cid,
+				   int *mid, u16 *seq, int *mcast)
+{
+	struct vring_rx_desc *d = wil_skb_rxdesc(skb);
+
+	*tid = wil_rxdesc_tid(d);
+	*cid = wil_rxdesc_cid(d);
+	*mid = wil_rxdesc_mid(d);
+	*seq = wil_rxdesc_seq(d);
+	*mcast = wil_rxdesc_mcast(d);
+}
+
+void wil_init_txrx_ops_legacy_dma(struct wil6210_priv *wil)
+{
+	wil->txrx_ops.configure_interrupt_moderation =
+		wil_configure_interrupt_moderation;
+	/* TX ops */
+	wil->txrx_ops.tx_desc_map = wil_tx_desc_map;
+	wil->txrx_ops.tx_desc_unmap = wil_txdesc_unmap;
+	wil->txrx_ops.tx_ring_tso =  __wil_tx_vring_tso;
+	wil->txrx_ops.ring_init_tx = wil_vring_init_tx;
+	wil->txrx_ops.ring_fini_tx = wil_vring_free;
+	wil->txrx_ops.ring_init_bcast = wil_vring_init_bcast;
+	wil->txrx_ops.tx_init = wil_tx_init;
+	wil->txrx_ops.tx_fini = wil_tx_fini;
+	/* RX ops */
+	wil->txrx_ops.rx_init = wil_rx_init;
+	wil->txrx_ops.wmi_addba_rx_resp = wmi_addba_rx_resp;
+	wil->txrx_ops.get_reorder_params = wil_get_reorder_params;
+	wil->txrx_ops.get_netif_rx_params =
+		wil_get_netif_rx_params;
+	wil->txrx_ops.rx_crypto_check = wil_rx_crypto_check;
+	wil->txrx_ops.is_rx_idle = wil_is_rx_idle;
+	wil->txrx_ops.rx_fini = wil_rx_fini;
+}
diff --git a/drivers/net/wireless/ath/wil6210/txrx.h b/drivers/net/wireless/ath/wil6210/txrx.h
index 5f07717acc2c796df2d33ab4c283df02a9110a00..f361423628f54352780aa682e88709c5043d394d 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.h
+++ b/drivers/net/wireless/ath/wil6210/txrx.h
@@ -18,6 +18,9 @@
 #ifndef WIL6210_TXRX_H
 #define WIL6210_TXRX_H
 
+#include "wil6210.h"
+#include "txrx_edma.h"
+
 #define BUF_SW_OWNED    (1)
 #define BUF_HW_OWNED    (0)
 
@@ -29,19 +32,13 @@
 
 /* Tx/Rx path */
 
-/* Common representation of physical address in Vring */
-struct vring_dma_addr {
-	__le32 addr_low;
-	__le16 addr_high;
-} __packed;
-
-static inline dma_addr_t wil_desc_addr(struct vring_dma_addr *addr)
+static inline dma_addr_t wil_desc_addr(struct wil_ring_dma_addr *addr)
 {
 	return le32_to_cpu(addr->addr_low) |
 			   ((u64)le16_to_cpu(addr->addr_high) << 32);
 }
 
-static inline void wil_desc_addr_set(struct vring_dma_addr *addr,
+static inline void wil_desc_addr_set(struct wil_ring_dma_addr *addr,
 				     dma_addr_t pa)
 {
 	addr->addr_low = cpu_to_le32(lower_32_bits(pa));
@@ -294,7 +291,7 @@ struct vring_tx_mac {
  */
 struct vring_tx_dma {
 	u32 d0;
-	struct vring_dma_addr addr;
+	struct wil_ring_dma_addr addr;
 	u8  ip_length;
 	u8  b11;       /* 0..6: mac_length; 7:ip_version */
 	u8  error;     /* 0..2: err; 3..7: reserved; */
@@ -428,7 +425,7 @@ struct vring_rx_mac {
 
 struct vring_rx_dma {
 	u32 d0;
-	struct vring_dma_addr addr;
+	struct wil_ring_dma_addr addr;
 	u8  ip_length;
 	u8  b11;
 	u8  error;
@@ -441,14 +438,24 @@ struct vring_tx_desc {
 	struct vring_tx_dma dma;
 } __packed;
 
+union wil_tx_desc {
+	struct vring_tx_desc legacy;
+	struct wil_tx_enhanced_desc enhanced;
+} __packed;
+
 struct vring_rx_desc {
 	struct vring_rx_mac mac;
 	struct vring_rx_dma dma;
 } __packed;
 
-union vring_desc {
-	struct vring_tx_desc tx;
-	struct vring_rx_desc rx;
+union wil_rx_desc {
+	struct vring_rx_desc legacy;
+	struct wil_rx_enhanced_desc enhanced;
+} __packed;
+
+union wil_ring_desc {
+	union wil_tx_desc tx;
+	union wil_rx_desc rx;
 } __packed;
 
 static inline int wil_rxdesc_tid(struct vring_rx_desc *d)
@@ -528,6 +535,76 @@ static inline struct vring_rx_desc *wil_skb_rxdesc(struct sk_buff *skb)
 	return (void *)skb->cb;
 }
 
+static inline int wil_ring_is_empty(struct wil_ring *ring)
+{
+	return ring->swhead == ring->swtail;
+}
+
+static inline u32 wil_ring_next_tail(struct wil_ring *ring)
+{
+	return (ring->swtail + 1) % ring->size;
+}
+
+static inline void wil_ring_advance_head(struct wil_ring *ring, int n)
+{
+	ring->swhead = (ring->swhead + n) % ring->size;
+}
+
+static inline int wil_ring_is_full(struct wil_ring *ring)
+{
+	return wil_ring_next_tail(ring) == ring->swhead;
+}
+
+static inline bool wil_need_txstat(struct sk_buff *skb)
+{
+	struct ethhdr *eth = (void *)skb->data;
+
+	return is_unicast_ether_addr(eth->h_dest) && skb->sk &&
+	       (skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS);
+}
+
+static inline void wil_consume_skb(struct sk_buff *skb, bool acked)
+{
+	if (unlikely(wil_need_txstat(skb)))
+		skb_complete_wifi_ack(skb, acked);
+	else
+		acked ? dev_consume_skb_any(skb) : dev_kfree_skb_any(skb);
+}
+
+/* Used space in Tx ring */
+static inline int wil_ring_used_tx(struct wil_ring *ring)
+{
+	u32 swhead = ring->swhead;
+	u32 swtail = ring->swtail;
+
+	return (ring->size + swhead - swtail) % ring->size;
+}
+
+/* Available space in Tx ring */
+static inline int wil_ring_avail_tx(struct wil_ring *ring)
+{
+	return ring->size - wil_ring_used_tx(ring) - 1;
+}
+
+static inline int wil_get_min_tx_ring_id(struct wil6210_priv *wil)
+{
+	/* In Enhanced DMA ring 0 is reserved for RX */
+	return wil->use_enhanced_dma_hw ? 1 : 0;
+}
+
+/* similar to ieee80211_ version, but FC contain only 1-st byte */
+static inline int wil_is_back_req(u8 fc)
+{
+	return (fc & (IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
+	       (IEEE80211_FTYPE_CTL | IEEE80211_STYPE_BACK_REQ);
+}
+
+/* wil_val_in_range - check if value in [min,max) */
+static inline bool wil_val_in_range(int val, int min, int max)
+{
+	return val >= min && val < max;
+}
+
 void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev);
 void wil_rx_reorder(struct wil6210_priv *wil, struct sk_buff *skb);
 void wil_rx_bar(struct wil6210_priv *wil, struct wil6210_vif *vif,
@@ -536,5 +613,7 @@ struct wil_tid_ampdu_rx *wil_tid_ampdu_rx_alloc(struct wil6210_priv *wil,
 						int size, u16 ssn);
 void wil_tid_ampdu_rx_free(struct wil6210_priv *wil,
 			   struct wil_tid_ampdu_rx *r);
+void wil_tx_data_init(struct wil_ring_tx_data *txdata);
+void wil_init_txrx_ops_legacy_dma(struct wil6210_priv *wil);
 
 #endif /* WIL6210_TXRX_H */
diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.c b/drivers/net/wireless/ath/wil6210/txrx_edma.c
new file mode 100644
index 0000000000000000000000000000000000000000..95f38e65d969a6ae86dcaec6ccc94bdb21805dd7
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/txrx_edma.c
@@ -0,0 +1,1598 @@
+/*
+ * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/moduleparam.h>
+#include <linux/prefetch.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include "wil6210.h"
+#include "txrx_edma.h"
+#include "txrx.h"
+#include "trace.h"
+
+#define WIL_EDMA_MAX_DATA_OFFSET (2)
+
+static void wil_tx_desc_unmap_edma(struct device *dev,
+				   union wil_tx_desc *desc,
+				   struct wil_ctx *ctx)
+{
+	struct wil_tx_enhanced_desc *d = (struct wil_tx_enhanced_desc *)desc;
+	dma_addr_t pa = wil_tx_desc_get_addr_edma(&d->dma);
+	u16 dmalen = le16_to_cpu(d->dma.length);
+
+	switch (ctx->mapped_as) {
+	case wil_mapped_as_single:
+		dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE);
+		break;
+	case wil_mapped_as_page:
+		dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE);
+		break;
+	default:
+		break;
+	}
+}
+
+static int wil_find_free_sring(struct wil6210_priv *wil)
+{
+	int i;
+
+	for (i = 0; i < WIL6210_MAX_STATUS_RINGS; i++) {
+		if (!wil->srings[i].va)
+			return i;
+	}
+
+	return -EINVAL;
+}
+
+static void wil_sring_free(struct wil6210_priv *wil,
+			   struct wil_status_ring *sring)
+{
+	struct device *dev = wil_to_dev(wil);
+	size_t sz;
+
+	if (!sring || !sring->va)
+		return;
+
+	sz = sring->elem_size * sring->size;
+
+	wil_dbg_misc(wil, "status_ring_free, size(bytes)=%zu, 0x%p:%pad\n",
+		     sz, sring->va, &sring->pa);
+
+	dma_free_coherent(dev, sz, (void *)sring->va, sring->pa);
+	sring->pa = 0;
+	sring->va = NULL;
+}
+
+static int wil_sring_alloc(struct wil6210_priv *wil,
+			   struct wil_status_ring *sring)
+{
+	struct device *dev = wil_to_dev(wil);
+	size_t sz = sring->elem_size * sring->size;
+
+	wil_dbg_misc(wil, "status_ring_alloc: size=%zu\n", sz);
+
+	if (sz == 0) {
+		wil_err(wil, "Cannot allocate a zero size status ring\n");
+		return -EINVAL;
+	}
+
+	sring->swhead = 0;
+
+	/* Status messages are allocated and initialized to 0. This is necessary
+	 * since DR bit should be initialized to 0.
+	 */
+	sring->va = dma_zalloc_coherent(dev, sz, &sring->pa, GFP_KERNEL);
+	if (!sring->va)
+		return -ENOMEM;
+
+	wil_dbg_misc(wil, "status_ring[%d] 0x%p:%pad\n", sring->size, sring->va,
+		     &sring->pa);
+
+	return 0;
+}
+
+static int wil_tx_init_edma(struct wil6210_priv *wil)
+{
+	int ring_id = wil_find_free_sring(wil);
+	struct wil_status_ring *sring;
+	int rc;
+	u16 status_ring_size;
+
+	if (wil->tx_status_ring_order < WIL_SRING_SIZE_ORDER_MIN ||
+	    wil->tx_status_ring_order > WIL_SRING_SIZE_ORDER_MAX)
+		wil->tx_status_ring_order = WIL_TX_SRING_SIZE_ORDER_DEFAULT;
+
+	status_ring_size = 1 << wil->tx_status_ring_order;
+
+	wil_dbg_misc(wil, "init TX sring: size=%u, ring_id=%u\n",
+		     status_ring_size, ring_id);
+
+	if (ring_id < 0)
+		return ring_id;
+
+	/* Allocate Tx status ring. Tx descriptor rings will be
+	 * allocated on WMI connect event
+	 */
+	sring = &wil->srings[ring_id];
+
+	sring->is_rx = false;
+	sring->size = status_ring_size;
+	sring->elem_size = sizeof(struct wil_ring_tx_status);
+	rc = wil_sring_alloc(wil, sring);
+	if (rc)
+		return rc;
+
+	rc = wil_wmi_tx_sring_cfg(wil, ring_id);
+	if (rc)
+		goto out_free;
+
+	sring->desc_rdy_pol = 1;
+	wil->tx_sring_idx = ring_id;
+
+	return 0;
+out_free:
+	wil_sring_free(wil, sring);
+	return rc;
+}
+
+/**
+ * Allocate one skb for Rx descriptor RING
+ */
+static int wil_ring_alloc_skb_edma(struct wil6210_priv *wil,
+				   struct wil_ring *ring, u32 i)
+{
+	struct device *dev = wil_to_dev(wil);
+	unsigned int sz = wil->rx_buf_len + ETH_HLEN +
+		WIL_EDMA_MAX_DATA_OFFSET;
+	dma_addr_t pa;
+	u16 buff_id;
+	struct list_head *active = &wil->rx_buff_mgmt.active;
+	struct list_head *free = &wil->rx_buff_mgmt.free;
+	struct wil_rx_buff *rx_buff;
+	struct wil_rx_buff *buff_arr = wil->rx_buff_mgmt.buff_arr;
+	struct sk_buff *skb;
+	struct wil_rx_enhanced_desc dd, *d = &dd;
+	struct wil_rx_enhanced_desc *_d = (struct wil_rx_enhanced_desc *)
+		&ring->va[i].rx.enhanced;
+
+	if (unlikely(list_empty(free))) {
+		wil->rx_buff_mgmt.free_list_empty_cnt++;
+		return -EAGAIN;
+	}
+
+	skb = dev_alloc_skb(sz);
+	if (unlikely(!skb))
+		return -ENOMEM;
+
+	skb_put(skb, sz);
+
+	pa = dma_map_single(dev, skb->data, skb->len, DMA_FROM_DEVICE);
+	if (unlikely(dma_mapping_error(dev, pa))) {
+		kfree_skb(skb);
+		return -ENOMEM;
+	}
+
+	/* Get the buffer ID - the index of the rx buffer in the buff_arr */
+	rx_buff = list_first_entry(free, struct wil_rx_buff, list);
+	buff_id = rx_buff->id;
+
+	/* Move a buffer from the free list to the active list */
+	list_move(&rx_buff->list, active);
+
+	buff_arr[buff_id].skb = skb;
+
+	wil_desc_set_addr_edma(&d->dma.addr, &d->dma.addr_high_high, pa);
+	d->dma.length = cpu_to_le16(sz);
+	d->mac.buff_id = cpu_to_le16(buff_id);
+	*_d = *d;
+
+	/* Save the physical address in skb->cb for later use in dma_unmap */
+	memcpy(skb->cb, &pa, sizeof(pa));
+
+	return 0;
+}
+
+static inline
+void wil_get_next_rx_status_msg(struct wil_status_ring *sring, void *msg)
+{
+	memcpy(msg, (void *)(sring->va + (sring->elem_size * sring->swhead)),
+	       sring->elem_size);
+}
+
+static inline void wil_sring_advance_swhead(struct wil_status_ring *sring)
+{
+	sring->swhead = (sring->swhead + 1) % sring->size;
+	if (sring->swhead == 0)
+		sring->desc_rdy_pol = 1 - sring->desc_rdy_pol;
+}
+
+static int wil_rx_refill_edma(struct wil6210_priv *wil)
+{
+	struct wil_ring *ring = &wil->ring_rx;
+	u32 next_head;
+	int rc = 0;
+	u32 swtail = *ring->edma_rx_swtail.va;
+
+	for (; next_head = wil_ring_next_head(ring), (next_head != swtail);
+	     ring->swhead = next_head) {
+		rc = wil_ring_alloc_skb_edma(wil, ring, ring->swhead);
+		if (unlikely(rc)) {
+			if (rc == -EAGAIN)
+				wil_dbg_txrx(wil, "No free buffer ID found\n");
+			else
+				wil_err_ratelimited(wil,
+						    "Error %d in refill desc[%d]\n",
+						    rc, ring->swhead);
+			break;
+		}
+	}
+
+	/* make sure all writes to descriptors (shared memory) are done before
+	 * committing them to HW
+	 */
+	wmb();
+
+	wil_w(wil, ring->hwtail, ring->swhead);
+
+	return rc;
+}
+
+static void wil_move_all_rx_buff_to_free_list(struct wil6210_priv *wil,
+					      struct wil_ring *ring)
+{
+	struct device *dev = wil_to_dev(wil);
+	u32 next_tail;
+	u32 swhead = (ring->swhead + 1) % ring->size;
+	dma_addr_t pa;
+	u16 dmalen;
+
+	for (; next_tail = wil_ring_next_tail(ring), (next_tail != swhead);
+	     ring->swtail = next_tail) {
+		struct wil_rx_enhanced_desc dd, *d = &dd;
+		struct wil_rx_enhanced_desc *_d =
+			(struct wil_rx_enhanced_desc *)
+			&ring->va[ring->swtail].rx.enhanced;
+		struct sk_buff *skb;
+		u16 buff_id;
+
+		*d = *_d;
+		pa = wil_rx_desc_get_addr_edma(&d->dma);
+		dmalen = le16_to_cpu(d->dma.length);
+		dma_unmap_single(dev, pa, dmalen, DMA_FROM_DEVICE);
+
+		/* Extract the SKB from the rx_buff management array */
+		buff_id = __le16_to_cpu(d->mac.buff_id);
+		if (buff_id >= wil->rx_buff_mgmt.size) {
+			wil_err(wil, "invalid buff_id %d\n", buff_id);
+			continue;
+		}
+		skb = wil->rx_buff_mgmt.buff_arr[buff_id].skb;
+		wil->rx_buff_mgmt.buff_arr[buff_id].skb = NULL;
+		if (unlikely(!skb))
+			wil_err(wil, "No Rx skb at buff_id %d\n", buff_id);
+		else
+			kfree_skb(skb);
+
+		/* Move the buffer from the active to the free list */
+		list_move(&wil->rx_buff_mgmt.buff_arr[buff_id].list,
+			  &wil->rx_buff_mgmt.free);
+	}
+}
+
+static void wil_free_rx_buff_arr(struct wil6210_priv *wil)
+{
+	struct wil_ring *ring = &wil->ring_rx;
+
+	if (!wil->rx_buff_mgmt.buff_arr)
+		return;
+
+	/* Move all the buffers to the free list in case active list is
+	 * not empty in order to release all SKBs before deleting the array
+	 */
+	wil_move_all_rx_buff_to_free_list(wil, ring);
+
+	kfree(wil->rx_buff_mgmt.buff_arr);
+	wil->rx_buff_mgmt.buff_arr = NULL;
+}
+
+static int wil_init_rx_buff_arr(struct wil6210_priv *wil,
+				size_t size)
+{
+	struct wil_rx_buff *buff_arr;
+	struct list_head *active = &wil->rx_buff_mgmt.active;
+	struct list_head *free = &wil->rx_buff_mgmt.free;
+	int i;
+
+	wil->rx_buff_mgmt.buff_arr = kcalloc(size, sizeof(struct wil_rx_buff),
+					     GFP_KERNEL);
+	if (!wil->rx_buff_mgmt.buff_arr)
+		return -ENOMEM;
+
+	/* Set list heads */
+	INIT_LIST_HEAD(active);
+	INIT_LIST_HEAD(free);
+
+	/* Linkify the list */
+	buff_arr = wil->rx_buff_mgmt.buff_arr;
+	for (i = 0; i < size; i++) {
+		list_add(&buff_arr[i].list, free);
+		buff_arr[i].id = i;
+	}
+
+	wil->rx_buff_mgmt.size = size;
+
+	return 0;
+}
+
+static int wil_init_rx_sring(struct wil6210_priv *wil,
+			     u16 status_ring_size,
+			     size_t elem_size,
+			     u16 ring_id)
+{
+	struct wil_status_ring *sring = &wil->srings[ring_id];
+	int rc;
+
+	wil_dbg_misc(wil, "init RX sring: size=%u, ring_id=%u\n", sring->size,
+		     ring_id);
+
+	memset(&sring->rx_data, 0, sizeof(sring->rx_data));
+
+	sring->is_rx = true;
+	sring->size = status_ring_size;
+	sring->elem_size = elem_size;
+	rc = wil_sring_alloc(wil, sring);
+	if (rc)
+		return rc;
+
+	rc = wil_wmi_rx_sring_add(wil, ring_id);
+	if (rc)
+		goto out_free;
+
+	sring->desc_rdy_pol = 1;
+
+	return 0;
+out_free:
+	wil_sring_free(wil, sring);
+	return rc;
+}
+
+static int wil_ring_alloc_desc_ring(struct wil6210_priv *wil,
+				    struct wil_ring *ring)
+{
+	struct device *dev = wil_to_dev(wil);
+	size_t sz = ring->size * sizeof(ring->va[0]);
+
+	wil_dbg_misc(wil, "alloc_desc_ring:\n");
+
+	BUILD_BUG_ON(sizeof(ring->va[0]) != 32);
+
+	ring->swhead = 0;
+	ring->swtail = 0;
+	ring->ctx = kcalloc(ring->size, sizeof(ring->ctx[0]), GFP_KERNEL);
+	if (!ring->ctx)
+		goto err;
+
+	ring->va = dma_zalloc_coherent(dev, sz, &ring->pa, GFP_KERNEL);
+	if (!ring->va)
+		goto err_free_ctx;
+
+	if (ring->is_rx) {
+		sz = sizeof(*ring->edma_rx_swtail.va);
+		ring->edma_rx_swtail.va =
+			dma_zalloc_coherent(dev, sz, &ring->edma_rx_swtail.pa,
+					    GFP_KERNEL);
+		if (!ring->edma_rx_swtail.va)
+			goto err_free_va;
+	}
+
+	wil_dbg_misc(wil, "%s ring[%d] 0x%p:%pad 0x%p\n",
+		     ring->is_rx ? "RX" : "TX",
+		     ring->size, ring->va, &ring->pa, ring->ctx);
+
+	return 0;
+err_free_va:
+	dma_free_coherent(dev, ring->size * sizeof(ring->va[0]),
+			  (void *)ring->va, ring->pa);
+	ring->va = NULL;
+err_free_ctx:
+	kfree(ring->ctx);
+	ring->ctx = NULL;
+err:
+	return -ENOMEM;
+}
+
+static void wil_ring_free_edma(struct wil6210_priv *wil, struct wil_ring *ring)
+{
+	struct device *dev = wil_to_dev(wil);
+	size_t sz;
+	int ring_index = 0;
+
+	if (!ring->va)
+		return;
+
+	sz = ring->size * sizeof(ring->va[0]);
+
+	lockdep_assert_held(&wil->mutex);
+	if (ring->is_rx) {
+		wil_dbg_misc(wil, "free Rx ring [%d] 0x%p:%pad 0x%p\n",
+			     ring->size, ring->va,
+			     &ring->pa, ring->ctx);
+
+		wil_move_all_rx_buff_to_free_list(wil, ring);
+		goto out;
+	}
+
+	/* TX ring */
+	ring_index = ring - wil->ring_tx;
+
+	wil_dbg_misc(wil, "free Tx ring %d [%d] 0x%p:%pad 0x%p\n",
+		     ring_index, ring->size, ring->va,
+		     &ring->pa, ring->ctx);
+
+	while (!wil_ring_is_empty(ring)) {
+		struct wil_ctx *ctx;
+
+		struct wil_tx_enhanced_desc dd, *d = &dd;
+		struct wil_tx_enhanced_desc *_d =
+			(struct wil_tx_enhanced_desc *)
+			&ring->va[ring->swtail].tx.enhanced;
+
+		ctx = &ring->ctx[ring->swtail];
+		if (!ctx) {
+			wil_dbg_txrx(wil,
+				     "ctx(%d) was already completed\n",
+				     ring->swtail);
+			ring->swtail = wil_ring_next_tail(ring);
+			continue;
+		}
+		*d = *_d;
+		wil_tx_desc_unmap_edma(dev, (union wil_tx_desc *)d, ctx);
+		if (ctx->skb)
+			dev_kfree_skb_any(ctx->skb);
+		ring->swtail = wil_ring_next_tail(ring);
+	}
+
+out:
+	dma_free_coherent(dev, sz, (void *)ring->va, ring->pa);
+	kfree(ring->ctx);
+	ring->pa = 0;
+	ring->va = NULL;
+	ring->ctx = NULL;
+}
+
+static int wil_init_rx_desc_ring(struct wil6210_priv *wil, u16 desc_ring_size,
+				 int status_ring_id)
+{
+	struct wil_ring *ring = &wil->ring_rx;
+	int rc;
+
+	wil_dbg_misc(wil, "init RX desc ring\n");
+
+	ring->size = desc_ring_size;
+	ring->is_rx = true;
+	rc = wil_ring_alloc_desc_ring(wil, ring);
+	if (rc)
+		return rc;
+
+	rc = wil_wmi_rx_desc_ring_add(wil, status_ring_id);
+	if (rc)
+		goto out_free;
+
+	return 0;
+out_free:
+	wil_ring_free_edma(wil, ring);
+	return rc;
+}
+
+static void wil_get_reorder_params_edma(struct wil6210_priv *wil,
+					struct sk_buff *skb, int *tid,
+					int *cid, int *mid, u16 *seq,
+					int *mcast)
+{
+	struct wil_rx_status_extended *s = wil_skb_rxstatus(skb);
+
+	*tid = wil_rx_status_get_tid(s);
+	*cid = wil_rx_status_get_cid(s);
+	*mid = wil_rx_status_get_mid(s);
+	*seq = le16_to_cpu(wil_rx_status_get_seq(wil, s));
+	*mcast = wil_rx_status_get_mcast(s);
+}
+
+static void wil_get_netif_rx_params_edma(struct sk_buff *skb, int *cid,
+					 int *security)
+{
+	struct wil_rx_status_extended *s = wil_skb_rxstatus(skb);
+
+	*cid = wil_rx_status_get_cid(s);
+	*security = wil_rx_status_get_security(s);
+}
+
+static int wil_rx_crypto_check_edma(struct wil6210_priv *wil,
+				    struct sk_buff *skb)
+{
+	struct wil_rx_status_extended *st;
+	int cid, tid, key_id, mc;
+	struct wil_sta_info *s;
+	struct wil_tid_crypto_rx *c;
+	struct wil_tid_crypto_rx_single *cc;
+	const u8 *pn;
+
+	/* In HW reorder, HW is responsible for crypto check */
+	if (wil->use_rx_hw_reordering)
+		return 0;
+
+	st = wil_skb_rxstatus(skb);
+
+	cid = wil_rx_status_get_cid(st);
+	tid = wil_rx_status_get_tid(st);
+	key_id = wil_rx_status_get_key_id(st);
+	mc = wil_rx_status_get_mcast(st);
+	s = &wil->sta[cid];
+	c = mc ? &s->group_crypto_rx : &s->tid_crypto_rx[tid];
+	cc = &c->key_id[key_id];
+	pn = (u8 *)&st->ext.pn_15_0;
+
+	if (!cc->key_set) {
+		wil_err_ratelimited(wil,
+				    "Key missing. CID %d TID %d MCast %d KEY_ID %d\n",
+				    cid, tid, mc, key_id);
+		return -EINVAL;
+	}
+
+	if (reverse_memcmp(pn, cc->pn, IEEE80211_GCMP_PN_LEN) <= 0) {
+		wil_err_ratelimited(wil,
+				    "Replay attack. CID %d TID %d MCast %d KEY_ID %d PN %6phN last %6phN\n",
+				    cid, tid, mc, key_id, pn, cc->pn);
+		return -EINVAL;
+	}
+	memcpy(cc->pn, pn, IEEE80211_GCMP_PN_LEN);
+
+	return 0;
+}
+
+static bool wil_is_rx_idle_edma(struct wil6210_priv *wil)
+{
+	struct wil_status_ring *sring;
+	struct wil_rx_status_extended msg1;
+	void *msg = &msg1;
+	u8 dr_bit;
+	int i;
+
+	for (i = 0; i < wil->num_rx_status_rings; i++) {
+		sring = &wil->srings[i];
+		if (!sring->va)
+			continue;
+
+		wil_get_next_rx_status_msg(sring, msg);
+		dr_bit = wil_rx_status_get_desc_rdy_bit(msg);
+
+		/* Check if there are unhandled RX status messages */
+		if (dr_bit == sring->desc_rdy_pol)
+			return false;
+	}
+
+	return true;
+}
+
+static void wil_rx_buf_len_init_edma(struct wil6210_priv *wil)
+{
+	wil->rx_buf_len = rx_large_buf ?
+		WIL_MAX_ETH_MTU : TXRX_BUF_LEN_DEFAULT - WIL_MAX_MPDU_OVERHEAD;
+}
+
+static int wil_rx_init_edma(struct wil6210_priv *wil, u16 desc_ring_size)
+{
+	u16 status_ring_size;
+	struct wil_ring *ring = &wil->ring_rx;
+	int rc;
+	size_t elem_size = wil->use_compressed_rx_status ?
+		sizeof(struct wil_rx_status_compressed) :
+		sizeof(struct wil_rx_status_extended);
+	int i;
+	u16 max_rx_pl_per_desc;
+
+	/* In SW reorder one must use extended status messages */
+	if (wil->use_compressed_rx_status && !wil->use_rx_hw_reordering) {
+		wil_err(wil,
+			"compressed RX status cannot be used with SW reorder\n");
+		return -EINVAL;
+	}
+
+	if (wil->rx_status_ring_order < WIL_SRING_SIZE_ORDER_MIN ||
+	    wil->rx_status_ring_order > WIL_SRING_SIZE_ORDER_MAX)
+		wil->rx_status_ring_order = WIL_RX_SRING_SIZE_ORDER_DEFAULT;
+
+	status_ring_size = 1 << wil->rx_status_ring_order;
+
+	wil_dbg_misc(wil,
+		     "rx_init, desc_ring_size=%u, status_ring_size=%u, elem_size=%zu\n",
+		     desc_ring_size, status_ring_size, elem_size);
+
+	wil_rx_buf_len_init_edma(wil);
+
+	max_rx_pl_per_desc = wil->rx_buf_len + ETH_HLEN +
+		WIL_EDMA_MAX_DATA_OFFSET;
+
+	/* Use debugfs dbg_num_rx_srings if set, reserve one sring for TX */
+	if (wil->num_rx_status_rings > WIL6210_MAX_STATUS_RINGS - 1)
+		wil->num_rx_status_rings = WIL6210_MAX_STATUS_RINGS - 1;
+
+	wil_dbg_misc(wil, "rx_init: allocate %d status rings\n",
+		     wil->num_rx_status_rings);
+
+	rc = wil_wmi_cfg_def_rx_offload(wil, max_rx_pl_per_desc);
+	if (rc)
+		return rc;
+
+	/* Allocate status ring */
+	for (i = 0; i < wil->num_rx_status_rings; i++) {
+		int sring_id = wil_find_free_sring(wil);
+
+		if (sring_id < 0) {
+			rc = -EFAULT;
+			goto err_free_status;
+		}
+		rc = wil_init_rx_sring(wil, status_ring_size, elem_size,
+				       sring_id);
+		if (rc)
+			goto err_free_status;
+	}
+
+	/* Allocate descriptor ring */
+	rc = wil_init_rx_desc_ring(wil, desc_ring_size,
+				   WIL_DEFAULT_RX_STATUS_RING_ID);
+	if (rc)
+		goto err_free_status;
+
+	if (wil->rx_buff_id_count >= status_ring_size) {
+		wil_info(wil,
+			 "rx_buff_id_count %d exceeds sring_size %d. set it to %d\n",
+			 wil->rx_buff_id_count, status_ring_size,
+			 status_ring_size - 1);
+		wil->rx_buff_id_count = status_ring_size - 1;
+	}
+
+	/* Allocate Rx buffer array */
+	rc = wil_init_rx_buff_arr(wil, wil->rx_buff_id_count);
+	if (rc)
+		goto err_free_desc;
+
+	/* Fill descriptor ring with credits */
+	rc = wil_rx_refill_edma(wil);
+	if (rc)
+		goto err_free_rx_buff_arr;
+
+	return 0;
+err_free_rx_buff_arr:
+	wil_free_rx_buff_arr(wil);
+err_free_desc:
+	wil_ring_free_edma(wil, ring);
+err_free_status:
+	for (i = 0; i < wil->num_rx_status_rings; i++)
+		wil_sring_free(wil, &wil->srings[i]);
+
+	return rc;
+}
+
+static int wil_ring_init_tx_edma(struct wil6210_vif *vif, int ring_id,
+				 int size, int cid, int tid)
+{
+	struct wil6210_priv *wil = vif_to_wil(vif);
+	int rc;
+	struct wil_ring *ring = &wil->ring_tx[ring_id];
+	struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_id];
+
+	lockdep_assert_held(&wil->mutex);
+
+	wil_dbg_misc(wil,
+		     "init TX ring: ring_id=%u, cid=%u, tid=%u, sring_id=%u\n",
+		     ring_id, cid, tid, wil->tx_sring_idx);
+
+	wil_tx_data_init(txdata);
+	ring->size = size;
+	rc = wil_ring_alloc_desc_ring(wil, ring);
+	if (rc)
+		goto out;
+
+	wil->ring2cid_tid[ring_id][0] = cid;
+	wil->ring2cid_tid[ring_id][1] = tid;
+	if (!vif->privacy)
+		txdata->dot1x_open = true;
+
+	rc = wil_wmi_tx_desc_ring_add(vif, ring_id, cid, tid);
+	if (rc) {
+		wil_err(wil, "WMI_TX_DESC_RING_ADD_CMD failed\n");
+		goto out_free;
+	}
+
+	if (txdata->dot1x_open && agg_wsize >= 0)
+		wil_addba_tx_request(wil, ring_id, agg_wsize);
+
+	return 0;
+ out_free:
+	spin_lock_bh(&txdata->lock);
+	txdata->dot1x_open = false;
+	txdata->enabled = 0;
+	spin_unlock_bh(&txdata->lock);
+	wil_ring_free_edma(wil, ring);
+	wil->ring2cid_tid[ring_id][0] = WIL6210_MAX_CID;
+	wil->ring2cid_tid[ring_id][1] = 0;
+
+ out:
+	return rc;
+}
+
+/* This function is used only for RX SW reorder */
+static int wil_check_bar(struct wil6210_priv *wil, void *msg, int cid,
+			 struct sk_buff *skb, struct wil_net_stats *stats)
+{
+	u8 ftype;
+	u8 fc1;
+	int mid;
+	int tid;
+	u16 seq;
+	struct wil6210_vif *vif;
+
+	ftype = wil_rx_status_get_frame_type(wil, msg);
+	if (ftype == IEEE80211_FTYPE_DATA)
+		return 0;
+
+	fc1 = wil_rx_status_get_fc1(wil, msg);
+	mid = wil_rx_status_get_mid(msg);
+	tid = wil_rx_status_get_tid(msg);
+	seq = le16_to_cpu(wil_rx_status_get_seq(wil, msg));
+	vif = wil->vifs[mid];
+
+	if (unlikely(!vif)) {
+		wil_dbg_txrx(wil, "RX descriptor with invalid mid %d", mid);
+		return -EAGAIN;
+	}
+
+	wil_dbg_txrx(wil,
+		     "Non-data frame FC[7:0] 0x%02x MID %d CID %d TID %d Seq 0x%03x\n",
+		     fc1, mid, cid, tid, seq);
+	if (stats)
+		stats->rx_non_data_frame++;
+	if (wil_is_back_req(fc1)) {
+		wil_dbg_txrx(wil,
+			     "BAR: MID %d CID %d TID %d Seq 0x%03x\n",
+			     mid, cid, tid, seq);
+		wil_rx_bar(wil, vif, cid, tid, seq);
+	} else {
+		u32 sz = wil->use_compressed_rx_status ?
+			sizeof(struct wil_rx_status_compressed) :
+			sizeof(struct wil_rx_status_extended);
+
+		/* print again all info. One can enable only this
+		 * without overhead for printing every Rx frame
+		 */
+		wil_dbg_txrx(wil,
+			     "Unhandled non-data frame FC[7:0] 0x%02x MID %d CID %d TID %d Seq 0x%03x\n",
+			     fc1, mid, cid, tid, seq);
+		wil_hex_dump_txrx("RxS ", DUMP_PREFIX_NONE, 32, 4,
+				  (const void *)msg, sz, false);
+		wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1,
+				  skb->data, skb_headlen(skb), false);
+	}
+
+	return -EAGAIN;
+}
+
+static int wil_rx_edma_check_errors(struct wil6210_priv *wil, void *msg,
+				    struct wil_net_stats *stats,
+				    struct sk_buff *skb)
+{
+	int error;
+	int l2_rx_status;
+	int l3_rx_status;
+	int l4_rx_status;
+
+	error = wil_rx_status_get_error(msg);
+	if (!error) {
+		skb->ip_summed = CHECKSUM_UNNECESSARY;
+		return 0;
+	}
+
+	l2_rx_status = wil_rx_status_get_l2_rx_status(msg);
+	if (l2_rx_status != 0) {
+		wil_dbg_txrx(wil, "L2 RX error, l2_rx_status=0x%x\n",
+			     l2_rx_status);
+		/* Due to HW issue, KEY error will trigger a MIC error */
+		if (l2_rx_status & WIL_RX_EDMA_ERROR_MIC) {
+			wil_dbg_txrx(wil,
+				     "L2 MIC/KEY error, dropping packet\n");
+			stats->rx_mic_error++;
+		}
+		if (l2_rx_status & WIL_RX_EDMA_ERROR_KEY) {
+			wil_dbg_txrx(wil, "L2 KEY error, dropping packet\n");
+			stats->rx_key_error++;
+		}
+		if (l2_rx_status & WIL_RX_EDMA_ERROR_REPLAY) {
+			wil_dbg_txrx(wil,
+				     "L2 REPLAY error, dropping packet\n");
+			stats->rx_replay++;
+		}
+		if (l2_rx_status & WIL_RX_EDMA_ERROR_AMSDU) {
+			wil_dbg_txrx(wil,
+				     "L2 AMSDU error, dropping packet\n");
+			stats->rx_amsdu_error++;
+		}
+		return -EFAULT;
+	}
+
+	l3_rx_status = wil_rx_status_get_l3_rx_status(msg);
+	l4_rx_status = wil_rx_status_get_l4_rx_status(msg);
+	if (!l3_rx_status && !l4_rx_status)
+		skb->ip_summed = CHECKSUM_UNNECESSARY;
+	/* If HW reports bad checksum, let IP stack re-check it
+	 * For example, HW don't understand Microsoft IP stack that
+	 * mis-calculates TCP checksum - if it should be 0x0,
+	 * it writes 0xffff in violation of RFC 1624
+	 */
+
+	return 0;
+}
+
+static struct sk_buff *wil_sring_reap_rx_edma(struct wil6210_priv *wil,
+					      struct wil_status_ring *sring)
+{
+	struct device *dev = wil_to_dev(wil);
+	struct wil_rx_status_extended msg1;
+	void *msg = &msg1;
+	u16 buff_id;
+	struct sk_buff *skb;
+	dma_addr_t pa;
+	struct wil_ring_rx_data *rxdata = &sring->rx_data;
+	unsigned int sz = wil->rx_buf_len + ETH_HLEN +
+		WIL_EDMA_MAX_DATA_OFFSET;
+	struct wil_net_stats *stats = NULL;
+	u16 dmalen;
+	int cid;
+	int rc;
+	bool eop, headstolen;
+	int delta;
+	u8 dr_bit;
+	u8 data_offset;
+	struct wil_rx_status_extended *s;
+	u16 sring_idx = sring - wil->srings;
+
+	BUILD_BUG_ON(sizeof(struct wil_rx_status_extended) > sizeof(skb->cb));
+
+again:
+	wil_get_next_rx_status_msg(sring, msg);
+	dr_bit = wil_rx_status_get_desc_rdy_bit(msg);
+
+	/* Completed handling all the ready status messages */
+	if (dr_bit != sring->desc_rdy_pol)
+		return NULL;
+
+	/* Extract the buffer ID from the status message */
+	buff_id = le16_to_cpu(wil_rx_status_get_buff_id(msg));
+	if (unlikely(!wil_val_in_range(buff_id, 0, wil->rx_buff_mgmt.size))) {
+		wil_err(wil, "Corrupt buff_id=%d, sring->swhead=%d\n",
+			buff_id, sring->swhead);
+		wil_sring_advance_swhead(sring);
+		goto again;
+	}
+
+	wil_sring_advance_swhead(sring);
+
+	/* Extract the SKB from the rx_buff management array */
+	skb = wil->rx_buff_mgmt.buff_arr[buff_id].skb;
+	wil->rx_buff_mgmt.buff_arr[buff_id].skb = NULL;
+	if (!skb) {
+		wil_err(wil, "No Rx skb at buff_id %d\n", buff_id);
+		goto again;
+	}
+
+	memcpy(&pa, skb->cb, sizeof(pa));
+	dma_unmap_single(dev, pa, sz, DMA_FROM_DEVICE);
+	dmalen = le16_to_cpu(wil_rx_status_get_length(msg));
+
+	trace_wil6210_rx_status(wil, wil->use_compressed_rx_status, buff_id,
+				msg);
+	wil_dbg_txrx(wil, "Rx, buff_id=%u, sring_idx=%u, dmalen=%u bytes\n",
+		     buff_id, sring_idx, dmalen);
+	wil_hex_dump_txrx("RxS ", DUMP_PREFIX_NONE, 32, 4,
+			  (const void *)msg, wil->use_compressed_rx_status ?
+			  sizeof(struct wil_rx_status_compressed) :
+			  sizeof(struct wil_rx_status_extended), false);
+
+	/* Move the buffer from the active list to the free list */
+	list_move(&wil->rx_buff_mgmt.buff_arr[buff_id].list,
+		  &wil->rx_buff_mgmt.free);
+
+	eop = wil_rx_status_get_eop(msg);
+
+	cid = wil_rx_status_get_cid(msg);
+	if (unlikely(!wil_val_in_range(cid, 0, WIL6210_MAX_CID))) {
+		wil_err(wil, "Corrupt cid=%d, sring->swhead=%d\n",
+			cid, sring->swhead);
+		rxdata->skipping = true;
+		goto skipping;
+	}
+	stats = &wil->sta[cid].stats;
+
+	if (unlikely(skb->len < ETH_HLEN)) {
+		wil_dbg_txrx(wil, "Short frame, len = %d\n", skb->len);
+		stats->rx_short_frame++;
+		rxdata->skipping = true;
+		goto skipping;
+	}
+
+	/* Check and treat errors reported by HW */
+	rc = wil_rx_edma_check_errors(wil, msg, stats, skb);
+	if (rc) {
+		rxdata->skipping = true;
+		goto skipping;
+	}
+
+	if (unlikely(dmalen > sz)) {
+		wil_err(wil, "Rx size too large: %d bytes!\n", dmalen);
+		stats->rx_large_frame++;
+		rxdata->skipping = true;
+	}
+
+skipping:
+	/* skipping indicates if a certain SKB should be dropped.
+	 * It is set in case there is an error on the current SKB or in case
+	 * of RX chaining: as long as we manage to merge the SKBs it will
+	 * be false. once we have a bad SKB or we don't manage to merge SKBs
+	 * it will be set to the !EOP value of the current SKB.
+	 * This guarantees that all the following SKBs until EOP will also
+	 * get dropped.
+	 */
+	if (unlikely(rxdata->skipping)) {
+		kfree_skb(skb);
+		if (rxdata->skb) {
+			kfree_skb(rxdata->skb);
+			rxdata->skb = NULL;
+		}
+		rxdata->skipping = !eop;
+		goto again;
+	}
+
+	skb_trim(skb, dmalen);
+
+	prefetch(skb->data);
+
+	if (!rxdata->skb) {
+		rxdata->skb = skb;
+	} else {
+		if (likely(skb_try_coalesce(rxdata->skb, skb, &headstolen,
+					    &delta))) {
+			kfree_skb_partial(skb, headstolen);
+		} else {
+			wil_err(wil, "failed to merge skbs!\n");
+			kfree_skb(skb);
+			kfree_skb(rxdata->skb);
+			rxdata->skb = NULL;
+			rxdata->skipping = !eop;
+			goto again;
+		}
+	}
+
+	if (!eop)
+		goto again;
+
+	/* reaching here rxdata->skb always contains a full packet */
+	skb = rxdata->skb;
+	rxdata->skb = NULL;
+	rxdata->skipping = false;
+
+	if (stats) {
+		stats->last_mcs_rx = wil_rx_status_get_mcs(msg);
+		if (stats->last_mcs_rx < ARRAY_SIZE(stats->rx_per_mcs))
+			stats->rx_per_mcs[stats->last_mcs_rx]++;
+	}
+
+	if (!wil->use_rx_hw_reordering && !wil->use_compressed_rx_status &&
+	    wil_check_bar(wil, msg, cid, skb, stats) == -EAGAIN) {
+		kfree_skb(skb);
+		goto again;
+	}
+
+	/* Compensate for the HW data alignment according to the status
+	 * message
+	 */
+	data_offset = wil_rx_status_get_data_offset(msg);
+	if (data_offset == 0xFF ||
+	    data_offset > WIL_EDMA_MAX_DATA_OFFSET) {
+		wil_err(wil, "Unexpected data offset %d\n", data_offset);
+		kfree_skb(skb);
+		goto again;
+	}
+
+	skb_pull(skb, data_offset);
+
+	wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1,
+			  skb->data, skb_headlen(skb), false);
+
+	/* Has to be done after dma_unmap_single as skb->cb is also
+	 * used for holding the pa
+	 */
+	s = wil_skb_rxstatus(skb);
+	memcpy(s, msg, sring->elem_size);
+
+	return skb;
+}
+
+void wil_rx_handle_edma(struct wil6210_priv *wil, int *quota)
+{
+	struct net_device *ndev;
+	struct wil_ring *ring = &wil->ring_rx;
+	struct wil_status_ring *sring;
+	struct sk_buff *skb;
+	int i;
+
+	if (unlikely(!ring->va)) {
+		wil_err(wil, "Rx IRQ while Rx not yet initialized\n");
+		return;
+	}
+	wil_dbg_txrx(wil, "rx_handle\n");
+
+	for (i = 0; i < wil->num_rx_status_rings; i++) {
+		sring = &wil->srings[i];
+		if (unlikely(!sring->va)) {
+			wil_err(wil,
+				"Rx IRQ while Rx status ring %d not yet initialized\n",
+				i);
+			continue;
+		}
+
+		while ((*quota > 0) &&
+		       (NULL != (skb =
+			wil_sring_reap_rx_edma(wil, sring)))) {
+			(*quota)--;
+			if (wil->use_rx_hw_reordering) {
+				void *msg = wil_skb_rxstatus(skb);
+				int mid = wil_rx_status_get_mid(msg);
+				struct wil6210_vif *vif = wil->vifs[mid];
+
+				if (unlikely(!vif)) {
+					wil_dbg_txrx(wil,
+						     "RX desc invalid mid %d",
+						     mid);
+					kfree_skb(skb);
+					continue;
+				}
+				ndev = vif_to_ndev(vif);
+				wil_netif_rx_any(skb, ndev);
+			} else {
+				wil_rx_reorder(wil, skb);
+			}
+		}
+
+		wil_w(wil, sring->hwtail, (sring->swhead - 1) % sring->size);
+	}
+
+	wil_rx_refill_edma(wil);
+}
+
+static int wil_tx_desc_map_edma(union wil_tx_desc *desc,
+				dma_addr_t pa,
+				u32 len,
+				int ring_index)
+{
+	struct wil_tx_enhanced_desc *d =
+		(struct wil_tx_enhanced_desc *)&desc->enhanced;
+
+	memset(d, 0, sizeof(struct wil_tx_enhanced_desc));
+
+	wil_desc_set_addr_edma(&d->dma.addr, &d->dma.addr_high_high, pa);
+
+	/* 0..6: mac_length; 7:ip_version 0-IP6 1-IP4*/
+	d->dma.length = cpu_to_le16((u16)len);
+	d->mac.d[0] = (ring_index << WIL_EDMA_DESC_TX_MAC_CFG_0_QID_POS);
+	/* translation type:  0 - bypass; 1 - 802.3; 2 - native wifi;
+	 * 3 - eth mode
+	 */
+	d->mac.d[2] = BIT(MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_POS) |
+		      (0x3 << MAC_CFG_DESC_TX_2_L2_TRANSLATION_TYPE_POS);
+
+	return 0;
+}
+
+static inline void
+wil_get_next_tx_status_msg(struct wil_status_ring *sring,
+			   struct wil_ring_tx_status *msg)
+{
+	struct wil_ring_tx_status *_msg = (struct wil_ring_tx_status *)
+		(sring->va + (sring->elem_size * sring->swhead));
+
+	*msg = *_msg;
+}
+
+/**
+ * Clean up transmitted skb's from the Tx descriptor RING.
+ * Return number of descriptors cleared.
+ */
+int wil_tx_sring_handler(struct wil6210_priv *wil,
+			 struct wil_status_ring *sring)
+{
+	struct net_device *ndev;
+	struct device *dev = wil_to_dev(wil);
+	struct wil_ring *ring = NULL;
+	struct wil_ring_tx_data *txdata;
+	/* Total number of completed descriptors in all descriptor rings */
+	int desc_cnt = 0;
+	int cid;
+	struct wil_net_stats *stats = NULL;
+	struct wil_tx_enhanced_desc *_d;
+	unsigned int ring_id;
+	unsigned int num_descs;
+	int i;
+	u8 dr_bit; /* Descriptor Ready bit */
+	struct wil_ring_tx_status msg;
+	struct wil6210_vif *vif;
+	int used_before_complete;
+	int used_new;
+
+	wil_get_next_tx_status_msg(sring, &msg);
+	dr_bit = msg.desc_ready >> TX_STATUS_DESC_READY_POS;
+
+	/* Process completion messages while DR bit has the expected polarity */
+	while (dr_bit == sring->desc_rdy_pol) {
+		num_descs = msg.num_descriptors;
+		if (!num_descs) {
+			wil_err(wil, "invalid num_descs 0\n");
+			goto again;
+		}
+
+		/* Find the corresponding descriptor ring */
+		ring_id = msg.ring_id;
+
+		if (unlikely(ring_id >= WIL6210_MAX_TX_RINGS)) {
+			wil_err(wil, "invalid ring id %d\n", ring_id);
+			goto again;
+		}
+		ring = &wil->ring_tx[ring_id];
+		if (unlikely(!ring->va)) {
+			wil_err(wil, "Tx irq[%d]: ring not initialized\n",
+				ring_id);
+			goto again;
+		}
+		txdata = &wil->ring_tx_data[ring_id];
+		if (unlikely(!txdata->enabled)) {
+			wil_info(wil, "Tx irq[%d]: ring disabled\n", ring_id);
+			goto again;
+		}
+		vif = wil->vifs[txdata->mid];
+		if (unlikely(!vif)) {
+			wil_dbg_txrx(wil, "invalid MID %d for ring %d\n",
+				     txdata->mid, ring_id);
+			goto again;
+		}
+
+		ndev = vif_to_ndev(vif);
+
+		cid = wil->ring2cid_tid[ring_id][0];
+		if (cid < WIL6210_MAX_CID)
+			stats = &wil->sta[cid].stats;
+
+		wil_dbg_txrx(wil,
+			     "tx_status: completed desc_ring (%d), num_descs (%d)\n",
+			     ring_id, num_descs);
+
+		used_before_complete = wil_ring_used_tx(ring);
+
+		for (i = 0 ; i < num_descs; ++i) {
+			struct wil_ctx *ctx = &ring->ctx[ring->swtail];
+			struct wil_tx_enhanced_desc dd, *d = &dd;
+			u16 dmalen;
+			struct sk_buff *skb = ctx->skb;
+
+			_d = (struct wil_tx_enhanced_desc *)
+				&ring->va[ring->swtail].tx.enhanced;
+			*d = *_d;
+
+			dmalen = le16_to_cpu(d->dma.length);
+			trace_wil6210_tx_status(&msg, ring->swtail, dmalen);
+			wil_dbg_txrx(wil,
+				     "TxC[%2d][%3d] : %d bytes, status 0x%02x\n",
+				     ring_id, ring->swtail, dmalen,
+				     msg.status);
+			wil_hex_dump_txrx("TxS ", DUMP_PREFIX_NONE, 32, 4,
+					  (const void *)&msg, sizeof(msg),
+					  false);
+
+			wil_tx_desc_unmap_edma(dev,
+					       (union wil_tx_desc *)d,
+					       ctx);
+
+			if (skb) {
+				if (likely(msg.status == 0)) {
+					ndev->stats.tx_packets++;
+					ndev->stats.tx_bytes += skb->len;
+					if (stats) {
+						stats->tx_packets++;
+						stats->tx_bytes += skb->len;
+					}
+				} else {
+					ndev->stats.tx_errors++;
+					if (stats)
+						stats->tx_errors++;
+				}
+				wil_consume_skb(skb, msg.status == 0);
+			}
+			memset(ctx, 0, sizeof(*ctx));
+			/* Make sure the ctx is zeroed before updating the tail
+			 * to prevent a case where wil_tx_ring will see
+			 * this descriptor as used and handle it before ctx zero
+			 * is completed.
+			 */
+			wmb();
+
+			ring->swtail = wil_ring_next_tail(ring);
+
+			desc_cnt++;
+		}
+
+		/* performance monitoring */
+		used_new = wil_ring_used_tx(ring);
+		if (wil_val_in_range(wil->ring_idle_trsh,
+				     used_new, used_before_complete)) {
+			wil_dbg_txrx(wil, "Ring[%2d] idle %d -> %d\n",
+				     ring_id, used_before_complete, used_new);
+			txdata->last_idle = get_cycles();
+		}
+
+again:
+		wil_sring_advance_swhead(sring);
+
+		wil_get_next_tx_status_msg(sring, &msg);
+		dr_bit = msg.desc_ready >> TX_STATUS_DESC_READY_POS;
+	}
+
+	/* shall we wake net queues? */
+	if (desc_cnt)
+		wil_update_net_queues(wil, vif, NULL, false);
+
+	/* Update the HW tail ptr (RD ptr) */
+	wil_w(wil, sring->hwtail, (sring->swhead - 1) % sring->size);
+
+	return desc_cnt;
+}
+
+/**
+ * Sets the descriptor @d up for csum and/or TSO offloading. The corresponding
+ * @skb is used to obtain the protocol and headers length.
+ * @tso_desc_type is a descriptor type for TSO: 0 - a header, 1 - first data,
+ * 2 - middle, 3 - last descriptor.
+ */
+static void wil_tx_desc_offload_setup_tso_edma(struct wil_tx_enhanced_desc *d,
+					       int tso_desc_type, bool is_ipv4,
+					       int tcp_hdr_len,
+					       int skb_net_hdr_len,
+					       int mss)
+{
+	/* Number of descriptors */
+	d->mac.d[2] |= 1;
+	/* Maximum Segment Size */
+	d->mac.tso_mss |= cpu_to_le16(mss >> 2);
+	/* L4 header len: TCP header length */
+	d->dma.l4_hdr_len |= tcp_hdr_len & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK;
+	/* EOP, TSO desc type, Segmentation enable,
+	 * Insert IPv4 and TCP / UDP Checksum
+	 */
+	d->dma.cmd |= BIT(WIL_EDMA_DESC_TX_CFG_EOP_POS) |
+		      tso_desc_type << WIL_EDMA_DESC_TX_CFG_TSO_DESC_TYPE_POS |
+		      BIT(WIL_EDMA_DESC_TX_CFG_SEG_EN_POS) |
+		      BIT(WIL_EDMA_DESC_TX_CFG_INSERT_IP_CHKSUM_POS) |
+		      BIT(WIL_EDMA_DESC_TX_CFG_INSERT_TCP_CHKSUM_POS);
+	/* Calculate pseudo-header */
+	d->dma.w1 |= BIT(WIL_EDMA_DESC_TX_CFG_PSEUDO_HEADER_CALC_EN_POS) |
+		     BIT(WIL_EDMA_DESC_TX_CFG_L4_TYPE_POS);
+	/* IP Header Length */
+	d->dma.ip_length |= skb_net_hdr_len;
+	/* MAC header length and IP address family*/
+	d->dma.b11 |= ETH_HLEN |
+		      is_ipv4 << DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS;
+}
+
+static int wil_tx_tso_gen_desc(struct wil6210_priv *wil, void *buff_addr,
+			       int len, uint i, int tso_desc_type,
+			       skb_frag_t *frag, struct wil_ring *ring,
+			       struct sk_buff *skb, bool is_ipv4,
+			       int tcp_hdr_len, int skb_net_hdr_len,
+			       int mss, int *descs_used)
+{
+	struct device *dev = wil_to_dev(wil);
+	struct wil_tx_enhanced_desc *_desc = (struct wil_tx_enhanced_desc *)
+		&ring->va[i].tx.enhanced;
+	struct wil_tx_enhanced_desc desc_mem, *d = &desc_mem;
+	int ring_index = ring - wil->ring_tx;
+	dma_addr_t pa;
+
+	if (len == 0)
+		return 0;
+
+	if (!frag) {
+		pa = dma_map_single(dev, buff_addr, len, DMA_TO_DEVICE);
+		ring->ctx[i].mapped_as = wil_mapped_as_single;
+	} else {
+		pa = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
+		ring->ctx[i].mapped_as = wil_mapped_as_page;
+	}
+	if (unlikely(dma_mapping_error(dev, pa))) {
+		wil_err(wil, "TSO: Skb DMA map error\n");
+		return -EINVAL;
+	}
+
+	wil->txrx_ops.tx_desc_map((union wil_tx_desc *)d, pa,
+				   len, ring_index);
+	wil_tx_desc_offload_setup_tso_edma(d, tso_desc_type, is_ipv4,
+					   tcp_hdr_len,
+					   skb_net_hdr_len, mss);
+
+	/* hold reference to skb
+	 * to prevent skb release before accounting
+	 * in case of immediate "tx done"
+	 */
+	if (tso_desc_type == wil_tso_type_lst)
+		ring->ctx[i].skb = skb_get(skb);
+
+	wil_hex_dump_txrx("TxD ", DUMP_PREFIX_NONE, 32, 4,
+			  (const void *)d, sizeof(*d), false);
+
+	*_desc = *d;
+	(*descs_used)++;
+
+	return 0;
+}
+
+static int __wil_tx_ring_tso_edma(struct wil6210_priv *wil,
+				  struct wil6210_vif *vif,
+				  struct wil_ring *ring,
+				  struct sk_buff *skb)
+{
+	int ring_index = ring - wil->ring_tx;
+	struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_index];
+	int nr_frags = skb_shinfo(skb)->nr_frags;
+	int min_desc_required = nr_frags + 2; /* Headers, Head, Fragments */
+	int used, avail = wil_ring_avail_tx(ring);
+	int f, hdrlen, headlen;
+	int gso_type;
+	bool is_ipv4;
+	u32 swhead = ring->swhead;
+	int descs_used = 0; /* total number of used descriptors */
+	int rc = -EINVAL;
+	int tcp_hdr_len;
+	int skb_net_hdr_len;
+	int mss = skb_shinfo(skb)->gso_size;
+
+	wil_dbg_txrx(wil, "tx_ring_tso: %d bytes to ring %d\n", skb->len,
+		     ring_index);
+
+	if (unlikely(!txdata->enabled))
+		return -EINVAL;
+
+	if (unlikely(avail < min_desc_required)) {
+		wil_err_ratelimited(wil,
+				    "TSO: Tx ring[%2d] full. No space for %d fragments\n",
+				    ring_index, min_desc_required);
+		return -ENOMEM;
+	}
+
+	gso_type = skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV6 | SKB_GSO_TCPV4);
+	switch (gso_type) {
+	case SKB_GSO_TCPV4:
+		is_ipv4 = true;
+		break;
+	case SKB_GSO_TCPV6:
+		is_ipv4 = false;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (skb->ip_summed != CHECKSUM_PARTIAL)
+		return -EINVAL;
+
+	/* tcp header length and skb network header length are fixed for all
+	 * packet's descriptors - read them once here
+	 */
+	tcp_hdr_len = tcp_hdrlen(skb);
+	skb_net_hdr_len = skb_network_header_len(skb);
+
+	/* First descriptor must contain the header only
+	 * Header Length = MAC header len + IP header len + TCP header len
+	 */
+	hdrlen = ETH_HLEN + tcp_hdr_len + skb_net_hdr_len;
+	wil_dbg_txrx(wil, "TSO: process header descriptor, hdrlen %u\n",
+		     hdrlen);
+	rc = wil_tx_tso_gen_desc(wil, skb->data, hdrlen, swhead,
+				 wil_tso_type_hdr, NULL, ring, skb,
+				 is_ipv4, tcp_hdr_len, skb_net_hdr_len,
+				 mss, &descs_used);
+	if (rc)
+		return -EINVAL;
+
+	/* Second descriptor contains the head */
+	headlen = skb_headlen(skb) - hdrlen;
+	wil_dbg_txrx(wil, "TSO: process skb head, headlen %u\n", headlen);
+	rc = wil_tx_tso_gen_desc(wil, skb->data + hdrlen, headlen,
+				 (swhead + descs_used) % ring->size,
+				 (nr_frags != 0) ? wil_tso_type_first :
+				 wil_tso_type_lst, NULL, ring, skb,
+				 is_ipv4, tcp_hdr_len, skb_net_hdr_len,
+				 mss, &descs_used);
+	if (rc)
+		goto mem_error;
+
+	/* Rest of the descriptors are from the SKB fragments */
+	for (f = 0; f < nr_frags; f++) {
+		skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
+		int len = frag->size;
+
+		wil_dbg_txrx(wil, "TSO: frag[%d]: len %u, descs_used %d\n", f,
+			     len, descs_used);
+
+		rc = wil_tx_tso_gen_desc(wil, NULL, len,
+					 (swhead + descs_used) % ring->size,
+					 (f != nr_frags - 1) ?
+					 wil_tso_type_mid : wil_tso_type_lst,
+					 frag, ring, skb, is_ipv4,
+					 tcp_hdr_len, skb_net_hdr_len,
+					 mss, &descs_used);
+		if (rc)
+			goto mem_error;
+	}
+
+	/* performance monitoring */
+	used = wil_ring_used_tx(ring);
+	if (wil_val_in_range(wil->ring_idle_trsh,
+			     used, used + descs_used)) {
+		txdata->idle += get_cycles() - txdata->last_idle;
+		wil_dbg_txrx(wil,  "Ring[%2d] not idle %d -> %d\n",
+			     ring_index, used, used + descs_used);
+	}
+
+	/* advance swhead */
+	wil_ring_advance_head(ring, descs_used);
+	wil_dbg_txrx(wil, "TSO: Tx swhead %d -> %d\n", swhead, ring->swhead);
+
+	/* make sure all writes to descriptors (shared memory) are done before
+	 * committing them to HW
+	 */
+	wmb();
+
+	wil_w(wil, ring->hwtail, ring->swhead);
+
+	return 0;
+
+mem_error:
+	while (descs_used > 0) {
+		struct device *dev = wil_to_dev(wil);
+		struct wil_ctx *ctx;
+		int i = (swhead + descs_used - 1) % ring->size;
+		struct wil_tx_enhanced_desc dd, *d = &dd;
+		struct wil_tx_enhanced_desc *_desc =
+			(struct wil_tx_enhanced_desc *)
+			&ring->va[i].tx.enhanced;
+
+		*d = *_desc;
+		ctx = &ring->ctx[i];
+		wil_tx_desc_unmap_edma(dev, (union wil_tx_desc *)d, ctx);
+		memset(ctx, 0, sizeof(*ctx));
+		descs_used--;
+	}
+	return rc;
+}
+
+static int wil_ring_init_bcast_edma(struct wil6210_vif *vif, int ring_id,
+				    int size)
+{
+	struct wil6210_priv *wil = vif_to_wil(vif);
+	struct wil_ring *ring = &wil->ring_tx[ring_id];
+	int rc;
+	struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_id];
+
+	wil_dbg_misc(wil, "init bcast: ring_id=%d, sring_id=%d\n",
+		     ring_id, wil->tx_sring_idx);
+
+	lockdep_assert_held(&wil->mutex);
+
+	wil_tx_data_init(txdata);
+	ring->size = size;
+	ring->is_rx = false;
+	rc = wil_ring_alloc_desc_ring(wil, ring);
+	if (rc)
+		goto out;
+
+	wil->ring2cid_tid[ring_id][0] = WIL6210_MAX_CID; /* CID */
+	wil->ring2cid_tid[ring_id][1] = 0; /* TID */
+	if (!vif->privacy)
+		txdata->dot1x_open = true;
+
+	rc = wil_wmi_bcast_desc_ring_add(vif, ring_id);
+	if (rc)
+		goto out_free;
+
+	return 0;
+
+ out_free:
+	spin_lock_bh(&txdata->lock);
+	txdata->enabled = 0;
+	txdata->dot1x_open = false;
+	spin_unlock_bh(&txdata->lock);
+	wil_ring_free_edma(wil, ring);
+
+out:
+	return rc;
+}
+
+static void wil_tx_fini_edma(struct wil6210_priv *wil)
+{
+	struct wil_status_ring *sring = &wil->srings[wil->tx_sring_idx];
+
+	wil_dbg_misc(wil, "free TX sring\n");
+
+	wil_sring_free(wil, sring);
+}
+
+static void wil_rx_data_free(struct wil_status_ring *sring)
+{
+	if (!sring)
+		return;
+
+	kfree_skb(sring->rx_data.skb);
+	sring->rx_data.skb = NULL;
+}
+
+static void wil_rx_fini_edma(struct wil6210_priv *wil)
+{
+	struct wil_ring *ring = &wil->ring_rx;
+	int i;
+
+	wil_dbg_misc(wil, "rx_fini_edma\n");
+
+	wil_ring_free_edma(wil, ring);
+
+	for (i = 0; i < wil->num_rx_status_rings; i++) {
+		wil_rx_data_free(&wil->srings[i]);
+		wil_sring_free(wil, &wil->srings[i]);
+	}
+
+	wil_free_rx_buff_arr(wil);
+}
+
+void wil_init_txrx_ops_edma(struct wil6210_priv *wil)
+{
+	wil->txrx_ops.configure_interrupt_moderation =
+		wil_configure_interrupt_moderation_edma;
+	/* TX ops */
+	wil->txrx_ops.ring_init_tx = wil_ring_init_tx_edma;
+	wil->txrx_ops.ring_fini_tx = wil_ring_free_edma;
+	wil->txrx_ops.ring_init_bcast = wil_ring_init_bcast_edma;
+	wil->txrx_ops.tx_init = wil_tx_init_edma;
+	wil->txrx_ops.tx_fini = wil_tx_fini_edma;
+	wil->txrx_ops.tx_desc_map = wil_tx_desc_map_edma;
+	wil->txrx_ops.tx_desc_unmap = wil_tx_desc_unmap_edma;
+	wil->txrx_ops.tx_ring_tso = __wil_tx_ring_tso_edma;
+	/* RX ops */
+	wil->txrx_ops.rx_init = wil_rx_init_edma;
+	wil->txrx_ops.wmi_addba_rx_resp = wmi_addba_rx_resp_edma;
+	wil->txrx_ops.get_reorder_params = wil_get_reorder_params_edma;
+	wil->txrx_ops.get_netif_rx_params = wil_get_netif_rx_params_edma;
+	wil->txrx_ops.rx_crypto_check = wil_rx_crypto_check_edma;
+	wil->txrx_ops.is_rx_idle = wil_is_rx_idle_edma;
+	wil->txrx_ops.rx_fini = wil_rx_fini_edma;
+}
+
diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.h b/drivers/net/wireless/ath/wil6210/txrx_edma.h
new file mode 100644
index 0000000000000000000000000000000000000000..e86fc2dc0ce010915d84aa439da441eac44297d5
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/txrx_edma.h
@@ -0,0 +1,562 @@
+/*
+ * Copyright (c) 2012-2016,2018, The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef WIL6210_TXRX_EDMA_H
+#define WIL6210_TXRX_EDMA_H
+
+#include "wil6210.h"
+
+/* limit status ring size in range [ring size..max ring size] */
+#define WIL_SRING_SIZE_ORDER_MIN	(WIL_RING_SIZE_ORDER_MIN)
+#define WIL_SRING_SIZE_ORDER_MAX	(WIL_RING_SIZE_ORDER_MAX)
+/* RX sring order should be bigger than RX ring order */
+#define WIL_RX_SRING_SIZE_ORDER_DEFAULT	(11)
+#define WIL_TX_SRING_SIZE_ORDER_DEFAULT	(12)
+#define WIL_RX_BUFF_ARR_SIZE_DEFAULT (1536)
+
+#define WIL_DEFAULT_RX_STATUS_RING_ID 0
+#define WIL_RX_DESC_RING_ID 0
+#define WIL_RX_STATUS_IRQ_IDX 0
+#define WIL_TX_STATUS_IRQ_IDX 1
+
+#define WIL_EDMA_AGG_WATERMARK (0xffff)
+#define WIL_EDMA_AGG_WATERMARK_POS (16)
+
+#define WIL_EDMA_IDLE_TIME_LIMIT_USEC (50)
+#define WIL_EDMA_TIME_UNIT_CLK_CYCLES (330) /* fits 1 usec */
+
+/* Error field */
+#define WIL_RX_EDMA_ERROR_MIC	(1)
+#define WIL_RX_EDMA_ERROR_KEY	(2) /* Key missing */
+#define WIL_RX_EDMA_ERROR_REPLAY	(3)
+#define WIL_RX_EDMA_ERROR_AMSDU	(4)
+#define WIL_RX_EDMA_ERROR_FCS	(7)
+
+#define WIL_RX_EDMA_ERROR_L3_ERR	(BIT(0) | BIT(1))
+#define WIL_RX_EDMA_ERROR_L4_ERR	(BIT(0) | BIT(1))
+
+#define WIL_RX_EDMA_DLPF_LU_MISS_BIT		BIT(11)
+#define WIL_RX_EDMA_DLPF_LU_MISS_CID_TID_MASK	0x7
+#define WIL_RX_EDMA_DLPF_LU_HIT_CID_TID_MASK	0xf
+
+#define WIL_RX_EDMA_DLPF_LU_MISS_CID_POS	2
+#define WIL_RX_EDMA_DLPF_LU_HIT_CID_POS		4
+
+#define WIL_RX_EDMA_DLPF_LU_MISS_TID_POS	5
+
+#define WIL_RX_EDMA_MID_VALID_BIT		BIT(22)
+
+#define WIL_EDMA_DESC_TX_MAC_CFG_0_QID_POS 16
+#define WIL_EDMA_DESC_TX_MAC_CFG_0_QID_LEN 6
+
+#define WIL_EDMA_DESC_TX_CFG_EOP_POS 0
+#define WIL_EDMA_DESC_TX_CFG_EOP_LEN 1
+
+#define WIL_EDMA_DESC_TX_CFG_TSO_DESC_TYPE_POS 3
+#define WIL_EDMA_DESC_TX_CFG_TSO_DESC_TYPE_LEN 2
+
+#define WIL_EDMA_DESC_TX_CFG_SEG_EN_POS 5
+#define WIL_EDMA_DESC_TX_CFG_SEG_EN_LEN 1
+
+#define WIL_EDMA_DESC_TX_CFG_INSERT_IP_CHKSUM_POS 6
+#define WIL_EDMA_DESC_TX_CFG_INSERT_IP_CHKSUM_LEN 1
+
+#define WIL_EDMA_DESC_TX_CFG_INSERT_TCP_CHKSUM_POS 7
+#define WIL_EDMA_DESC_TX_CFG_INSERT_TCP_CHKSUM_LEN 1
+
+#define WIL_EDMA_DESC_TX_CFG_L4_TYPE_POS 15
+#define WIL_EDMA_DESC_TX_CFG_L4_TYPE_LEN 1
+
+#define WIL_EDMA_DESC_TX_CFG_PSEUDO_HEADER_CALC_EN_POS 5
+#define WIL_EDMA_DESC_TX_CFG_PSEUDO_HEADER_CALC_EN_LEN 1
+
+/* Enhanced Rx descriptor - MAC part
+ * [dword 0] : Reserved
+ * [dword 1] : Reserved
+ * [dword 2] : Reserved
+ * [dword 3]
+ *	bit  0..15 : Buffer ID
+ *	bit 16..31 : Reserved
+ */
+struct wil_ring_rx_enhanced_mac {
+	u32 d[3];
+	__le16 buff_id;
+	u16 reserved;
+} __packed;
+
+/* Enhanced Rx descriptor - DMA part
+ * [dword 0] - Reserved
+ * [dword 1]
+ *	bit  0..31 : addr_low:32 The payload buffer address, bits 0-31
+ * [dword 2]
+ *	bit  0..15 : addr_high_low:16 The payload buffer address, bits 32-47
+ *	bit 16..31 : Reserved
+ * [dword 3]
+ *	bit  0..15 : addr_high_high:16 The payload buffer address, bits 48-63
+ *	bit 16..31 : length
+ */
+struct wil_ring_rx_enhanced_dma {
+	u32 d0;
+	struct wil_ring_dma_addr addr;
+	u16 w5;
+	__le16 addr_high_high;
+	__le16 length;
+} __packed;
+
+struct wil_rx_enhanced_desc {
+	struct wil_ring_rx_enhanced_mac mac;
+	struct wil_ring_rx_enhanced_dma dma;
+} __packed;
+
+/* Enhanced Tx descriptor - DMA part
+ * [dword 0]
+ *	Same as legacy
+ * [dword 1]
+ * bit  0..31 : addr_low:32 The payload buffer address, bits 0-31
+ * [dword 2]
+ * bit  0..15 : addr_high_low:16 The payload buffer address, bits 32-47
+ * bit 16..23 : ip_length:8 The IP header length for the TX IP checksum
+ *		offload feature
+ * bit 24..30 : mac_length:7
+ * bit     31 : ip_version:1 1 - IPv4, 0 - IPv6
+ * [dword 3]
+ * bit  0..15 : addr_high_high:16 The payload buffer address, bits 48-63
+ * bit 16..31 : length
+ */
+struct wil_ring_tx_enhanced_dma {
+	u8 l4_hdr_len;
+	u8 cmd;
+	u16 w1;
+	struct wil_ring_dma_addr addr;
+	u8  ip_length;
+	u8  b11;       /* 0..6: mac_length; 7:ip_version */
+	__le16 addr_high_high;
+	__le16 length;
+} __packed;
+
+/* Enhanced Tx descriptor - MAC part
+ * [dword 0]
+ * bit  0.. 9 : lifetime_expiry_value:10
+ * bit     10 : interrupt_en:1
+ * bit     11 : status_en:1
+ * bit 12..13 : txss_override:2
+ * bit     14 : timestamp_insertion:1
+ * bit     15 : duration_preserve:1
+ * bit 16..21 : reserved0:6
+ * bit 22..26 : mcs_index:5
+ * bit     27 : mcs_en:1
+ * bit 28..30 : reserved1:3
+ * bit     31 : sn_preserved:1
+ * [dword 1]
+ * bit  0.. 3 : pkt_mode:4
+ * bit      4 : pkt_mode_en:1
+ * bit  5..14 : reserved0:10
+ * bit     15 : ack_policy_en:1
+ * bit 16..19 : dst_index:4
+ * bit     20 : dst_index_en:1
+ * bit 21..22 : ack_policy:2
+ * bit     23 : lifetime_en:1
+ * bit 24..30 : max_retry:7
+ * bit     31 : max_retry_en:1
+ * [dword 2]
+ * bit  0.. 7 : num_of_descriptors:8
+ * bit  8..17 : reserved:10
+ * bit 18..19 : l2_translation_type:2 00 - bypass, 01 - 802.3, 10 - 802.11
+ * bit     20 : snap_hdr_insertion_en:1
+ * bit     21 : vlan_removal_en:1
+ * bit 22..23 : reserved0:2
+ * bit	   24 : Dest ID extension:1
+ * bit 25..31 : reserved0:7
+ * [dword 3]
+ * bit  0..15 : tso_mss:16
+ * bit 16..31 : descriptor_scratchpad:16 - mailbox between driver and ucode
+ */
+struct wil_ring_tx_enhanced_mac {
+	u32 d[3];
+	__le16 tso_mss;
+	u16 scratchpad;
+} __packed;
+
+struct wil_tx_enhanced_desc {
+	struct wil_ring_tx_enhanced_mac mac;
+	struct wil_ring_tx_enhanced_dma dma;
+} __packed;
+
+#define TX_STATUS_DESC_READY_POS 7
+
+/* Enhanced TX status message
+ * [dword 0]
+ *	bit  0.. 7 : Number of Descriptor:8 - The number of descriptors that
+ *		     are used to form the packets. It  is needed for WB when
+ *		     releasing the packet
+ *	bit  8..15 : tx_ring_id:8 The transmission ring ID that is related to
+ *		     the message
+ *	bit 16..23 : Status:8 - The TX status Code
+ *		0x0 - A successful transmission
+ *		0x1 - Retry expired
+ *		0x2 - Lifetime Expired
+ *		0x3 - Released
+ *		0x4-0xFF - Reserved
+ *	bit 24..30 : Reserved:7
+ *	bit     31 : Descriptor Ready bit:1 - It is initiated to
+ *		zero by the driver when the ring is created. It is set by the HW
+ *		to one for each completed status message. Each wrap around,
+ *		the DR bit value is flipped.
+ * [dword 1]
+ *	bit 0..31  : timestamp:32 - Set when MPDU is transmitted.
+ * [dword 2]
+ *	bit  0.. 4 : MCS:5 - The transmitted MCS value
+ *	bit      5 : Reserved:1
+ *	bit  6.. 7 : CB mode:2 - 0-DMG 1-EDMG 2-Wide
+ *	bit  8..12 : QID:5 - The QID that was used for the transmission
+ *	bit 13..15 : Reserved:3
+ *	bit 16..20 : Num of MSDUs:5 - Number of MSDUs in the aggregation
+ *	bit 21..22 : Reserved:2
+ *	bit     23 : Retry:1 - An indication that the transmission was retried
+ *	bit 24..31 : TX-Sector:8 - the antenna sector that was used for
+ *		     transmission
+ * [dword 3]
+ *	bit  0..11 : Sequence number:12 - The Sequence Number that was used
+ *		     for the MPDU transmission
+ *	bit 12..31 : Reserved:20
+ */
+struct wil_ring_tx_status {
+	u8 num_descriptors;
+	u8 ring_id;
+	u8 status;
+	u8 desc_ready; /* Only the last bit should be set */
+	u32 timestamp;
+	u32 d2;
+	u16 seq_number; /* Only the first 12 bits */
+	u16 w7;
+} __packed;
+
+/* Enhanced Rx status message - compressed part
+ * [dword 0]
+ *	bit  0.. 2 : L2 Rx Status:3 - The L2 packet reception Status
+ *		     0-Success, 1-MIC Error, 2-Key Error, 3-Replay Error,
+ *		     4-A-MSDU Error, 5-Reserved, 6-Reserved, 7-FCS Error
+ *	bit  3.. 4 : L3 Rx Status:2 - Bit0 - L3I - L3 identified and checksum
+ *		     calculated, Bit1- L3Err - IPv4 Checksum Error
+ *	bit  5.. 6 : L4 Rx Status:2 - Bit0 - L4I - L4 identified and checksum
+ *		     calculated, Bit1- L4Err - TCP/UDP Checksum Error
+ *	bit      7 : Reserved:1
+ *	bit  8..19 : Flow ID:12 - MSDU flow ID
+ *	bit 20..21 : MID:2 - The MAC ID
+ *	bit     22 : MID_V:1 - The MAC ID field is valid
+ *	bit     23 : L3T:1 - IP types: 0-IPv6, 1-IPv4
+ *	bit     24 : L4T:1 - Layer 4 Type: 0-UDP, 1-TCP
+ *	bit     25 : BC:1 - The received MPDU is broadcast
+ *	bit     26 : MC:1 - The received MPDU is multicast
+ *	bit     27 : Raw:1 - The MPDU received with no translation
+ *	bit     28 : Sec:1 - The FC control (b14) - Frame Protected
+ *	bit     29 : Error:1 - An error is set when (L2 status != 0) ||
+ *		(L3 status == 3) || (L4 status == 3)
+ *	bit     30 : EOP:1 - End of MSDU signaling. It is set to mark the end
+ *		     of the transfer, otherwise the status indicates buffer
+ *		     only completion.
+ *	bit     31 : Descriptor Ready bit:1 - It is initiated to
+ *		     zero by the driver when the ring is created. It is set
+ *		     by the HW to one for each completed status message.
+ *		     Each wrap around, the DR bit value is flipped.
+ * [dword 1]
+ *	bit  0.. 5 : MAC Len:6 - The number of bytes that are used for L2 header
+ *	bit  6..11 : IPLEN:6 - The number of DW that are used for L3 header
+ *	bit 12..15 : I4Len:4 - The number of DW that are used for L4 header
+ *	bit 16..21 : MCS:6 - The received MCS field from the PLCP Header
+ *	bit 22..23 : CB mode:2 - The CB Mode: 0-DMG, 1-EDMG, 2-Wide
+ *	bit 24..27 : Data Offset:4 - The data offset, a code that describe the
+ *		     payload shift from the beginning of the buffer:
+ *		     0 - 0 Bytes, 3 - 2 Bytes
+ *	bit     28 : A-MSDU Present:1 - The QoS (b7) A-MSDU present field
+ *	bit     29 : A-MSDU Type:1 The QoS (b8) A-MSDU Type field
+ *	bit     30 : A-MPDU:1 - Packet is part of aggregated MPDU
+ *	bit     31 : Key ID:1 - The extracted Key ID from the encryption header
+ * [dword 2]
+ *	bit  0..15 : Buffer ID:16 - The Buffer Identifier
+ *	bit 16..31 : Length:16 - It indicates the valid bytes that are stored
+ *		     in the current descriptor buffer. For multiple buffer
+ *		     descriptor, SW need to sum the total descriptor length
+ *		     in all buffers to produce the packet length
+ * [dword 3]
+ *	bit  0..31  : timestamp:32 - The MPDU Timestamp.
+ */
+struct wil_rx_status_compressed {
+	u32 d0;
+	u32 d1;
+	__le16 buff_id;
+	__le16 length;
+	u32 timestamp;
+} __packed;
+
+/* Enhanced Rx status message - extension part
+ * [dword 0]
+ *	bit  0.. 4 : QID:5 - The Queue Identifier that the packet is received
+ *		     from
+ *	bit  5.. 7 : Reserved:3
+ *	bit  8..11 : TID:4 - The QoS (b3-0) TID Field
+ *	bit 12..15   Source index:4 - The Source index that was found
+		     during Parsing the TA. This field is used to define the
+		     source of the packet
+ *	bit 16..18 : Destination index:3 - The Destination index that
+		     was found during Parsing the RA.
+ *	bit 19..20 : DS Type:2 - The FC Control (b9-8) - From / To DS
+ *	bit 21..22 : MIC ICR:2 - this signal tells the DMA to assert an
+		     interrupt after it writes the packet
+ *	bit     23 : ESOP:1 - The QoS (b4) ESOP field
+ *	bit     24 : RDG:1
+ *	bit 25..31 : Reserved:7
+ * [dword 1]
+ *	bit  0.. 1 : Frame Type:2 - The FC Control (b3-2) - MPDU Type
+		     (management, data, control and extension)
+ *	bit  2.. 5 : Syb type:4 - The FC Control (b7-4) - Frame Subtype
+ *	bit  6..11 : Ext sub type:6 - The FC Control (b11-8) - Frame Extended
+ *                   Subtype
+ *	bit 12..13 : ACK Policy:2 - The QoS (b6-5) ACK Policy fields
+ *	bit 14     : DECRYPT_BYP:1 - The MPDU is bypass by the decryption unit
+ *	bit 15..23 : Reserved:9
+ *	bit 24..31 : RSSI/SNR:8 - The RSSI / SNR measurement for the received
+ *                   MPDU
+ * [dword 2]
+ *	bit  0..11 : SN:12 - The received Sequence number field
+ *	bit 12..15 : Reserved:4
+ *	bit 16..31 : PN bits [15:0]:16
+ * [dword 3]
+ *	bit  0..31 : PN bits [47:16]:32
+ */
+struct wil_rx_status_extension {
+	u32 d0;
+	u32 d1;
+	__le16 seq_num; /* only lower 12 bits */
+	u16 pn_15_0;
+	u32 pn_47_16;
+} __packed;
+
+struct wil_rx_status_extended {
+	struct wil_rx_status_compressed comp;
+	struct wil_rx_status_extension ext;
+} __packed;
+
+static inline void *wil_skb_rxstatus(struct sk_buff *skb)
+{
+	return (void *)skb->cb;
+}
+
+static inline __le16 wil_rx_status_get_length(void *msg)
+{
+	return ((struct wil_rx_status_compressed *)msg)->length;
+}
+
+static inline u8 wil_rx_status_get_mcs(void *msg)
+{
+	return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d1,
+			    16, 21);
+}
+
+static inline u16 wil_rx_status_get_flow_id(void *msg)
+{
+	return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0,
+			    8, 19);
+}
+
+static inline u8 wil_rx_status_get_mcast(void *msg)
+{
+	return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0,
+			    26, 26);
+}
+
+/**
+ * In case of DLPF miss the parsing of flow Id should be as follows:
+ * dest_id:2
+ * src_id :3 - cid
+ * tid:3
+ * Otherwise:
+ * tid:4
+ * cid:4
+ */
+
+static inline u8 wil_rx_status_get_cid(void *msg)
+{
+	u16 val = wil_rx_status_get_flow_id(msg);
+
+	if (val & WIL_RX_EDMA_DLPF_LU_MISS_BIT)
+		/* CID is in bits 2..4 */
+		return (val >> WIL_RX_EDMA_DLPF_LU_MISS_CID_POS) &
+			WIL_RX_EDMA_DLPF_LU_MISS_CID_TID_MASK;
+	else
+		/* CID is in bits 4..7 */
+		return (val >> WIL_RX_EDMA_DLPF_LU_HIT_CID_POS) &
+			WIL_RX_EDMA_DLPF_LU_HIT_CID_TID_MASK;
+}
+
+static inline u8 wil_rx_status_get_tid(void *msg)
+{
+	u16 val = wil_rx_status_get_flow_id(msg);
+
+	if (val & WIL_RX_EDMA_DLPF_LU_MISS_BIT)
+		/* TID is in bits 5..7 */
+		return (val >> WIL_RX_EDMA_DLPF_LU_MISS_TID_POS) &
+			WIL_RX_EDMA_DLPF_LU_MISS_CID_TID_MASK;
+	else
+		/* TID is in bits 0..3 */
+		return val & WIL_RX_EDMA_DLPF_LU_MISS_CID_TID_MASK;
+}
+
+static inline int wil_rx_status_get_desc_rdy_bit(void *msg)
+{
+	return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0,
+			    31, 31);
+}
+
+static inline int wil_rx_status_get_eop(void *msg) /* EoP = End of Packet */
+{
+	return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0,
+			    30, 30);
+}
+
+static inline __le16 wil_rx_status_get_buff_id(void *msg)
+{
+	return ((struct wil_rx_status_compressed *)msg)->buff_id;
+}
+
+static inline u8 wil_rx_status_get_data_offset(void *msg)
+{
+	u8 val = WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d1,
+			      24, 27);
+
+	switch (val) {
+	case 0: return 0;
+	case 3: return 2;
+	default: return 0xFF;
+	}
+}
+
+static inline int wil_rx_status_get_frame_type(struct wil6210_priv *wil,
+					       void *msg)
+{
+	if (wil->use_compressed_rx_status)
+		return IEEE80211_FTYPE_DATA;
+
+	return WIL_GET_BITS(((struct wil_rx_status_extended *)msg)->ext.d1,
+			    0, 1) << 2;
+}
+
+static inline int wil_rx_status_get_fc1(struct wil6210_priv *wil, void *msg)
+{
+	if (wil->use_compressed_rx_status)
+		return 0;
+
+	return WIL_GET_BITS(((struct wil_rx_status_extended *)msg)->ext.d1,
+			    0, 5) << 2;
+}
+
+static inline __le16 wil_rx_status_get_seq(struct wil6210_priv *wil, void *msg)
+{
+	if (wil->use_compressed_rx_status)
+		return 0;
+
+	return ((struct wil_rx_status_extended *)msg)->ext.seq_num;
+}
+
+static inline int wil_rx_status_get_mid(void *msg)
+{
+	if (!(((struct wil_rx_status_compressed *)msg)->d0 &
+	    WIL_RX_EDMA_MID_VALID_BIT))
+		return 0; /* use the default MID */
+
+	return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0,
+			    20, 21);
+}
+
+static inline int wil_rx_status_get_error(void *msg)
+{
+	return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0,
+			    29, 29);
+}
+
+static inline int wil_rx_status_get_l2_rx_status(void *msg)
+{
+	return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0,
+			    0, 2);
+}
+
+static inline int wil_rx_status_get_l3_rx_status(void *msg)
+{
+	return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0,
+			    3, 4);
+}
+
+static inline int wil_rx_status_get_l4_rx_status(void *msg)
+{
+	return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0,
+			    5, 6);
+}
+
+static inline int wil_rx_status_get_security(void *msg)
+{
+	return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0,
+			    28, 28);
+}
+
+static inline u8 wil_rx_status_get_key_id(void *msg)
+{
+	return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d1,
+			    31, 31);
+}
+
+static inline u8 wil_tx_status_get_mcs(struct wil_ring_tx_status *msg)
+{
+	return WIL_GET_BITS(msg->d2, 0, 4);
+}
+
+static inline u32 wil_ring_next_head(struct wil_ring *ring)
+{
+	return (ring->swhead + 1) % ring->size;
+}
+
+static inline void wil_desc_set_addr_edma(struct wil_ring_dma_addr *addr,
+					  __le16 *addr_high_high,
+					  dma_addr_t pa)
+{
+	addr->addr_low = cpu_to_le32(lower_32_bits(pa));
+	addr->addr_high = cpu_to_le16((u16)upper_32_bits(pa));
+	*addr_high_high = cpu_to_le16((u16)(upper_32_bits(pa) >> 16));
+}
+
+static inline
+dma_addr_t wil_tx_desc_get_addr_edma(struct wil_ring_tx_enhanced_dma *dma)
+{
+	return le32_to_cpu(dma->addr.addr_low) |
+			   ((u64)le16_to_cpu(dma->addr.addr_high) << 32) |
+			   ((u64)le16_to_cpu(dma->addr_high_high) << 48);
+}
+
+static inline
+dma_addr_t wil_rx_desc_get_addr_edma(struct wil_ring_rx_enhanced_dma *dma)
+{
+	return le32_to_cpu(dma->addr.addr_low) |
+			   ((u64)le16_to_cpu(dma->addr.addr_high) << 32) |
+			   ((u64)le16_to_cpu(dma->addr_high_high) << 48);
+}
+
+void wil_configure_interrupt_moderation_edma(struct wil6210_priv *wil);
+int wil_tx_sring_handler(struct wil6210_priv *wil,
+			 struct wil_status_ring *sring);
+void wil_rx_handle_edma(struct wil6210_priv *wil, int *quota);
+void wil_init_txrx_ops_edma(struct wil6210_priv *wil);
+
+#endif /* WIL6210_TXRX_EDMA_H */
+
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index b623510c6f6c02d71724ca82b591c0465be4d8f8..d963c76b679ed95efce68ddc2ffffd8d80da0ccd 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -24,6 +24,7 @@
 #include <net/cfg80211.h>
 #include <linux/timex.h>
 #include <linux/types.h>
+#include <linux/irqreturn.h>
 #include "wmi.h"
 #include "wil_platform.h"
 #include "fw.h"
@@ -37,6 +38,10 @@ extern bool rx_large_buf;
 extern bool debug_fw;
 extern bool disable_ap_sme;
 
+struct wil6210_priv;
+struct wil6210_vif;
+union wil_tx_desc;
+
 #define WIL_NAME "wil6210"
 
 #define WIL_FW_NAME_DEFAULT "wil6210.fw"
@@ -80,6 +85,8 @@ static inline u32 WIL_GET_BITS(u32 x, int b0, int b1)
 #define WIL6210_NAPI_BUDGET	(16) /* arbitrary */
 #define WIL_MAX_AMPDU_SIZE	(64 * 1024) /* FW/HW limit */
 #define WIL_MAX_AGG_WSIZE	(32) /* FW/HW limit */
+#define WIL6210_MAX_STATUS_RINGS	(8)
+
 /* Hardware offload block adds the following:
  * 26 bytes - 3-address QoS data header
  *  8 bytes - IV + EIV (for GCMP)
@@ -203,7 +210,9 @@ struct RGF_ICR {
 #define RGF_USER_SPARROW_M_4			(0x880c50) /* Sparrow */
 	#define BIT_SPARROW_M_4_SEL_SLEEP_OR_REF	BIT(2)
 #define RGF_USER_OTP_HW_RD_MACHINE_1	(0x880ce0)
-	#define BIT_NO_FLASH_INDICATION		BIT(8)
+	#define BIT_OTP_SIGNATURE_ERR_TALYN_MB		BIT(0)
+	#define BIT_OTP_HW_SECTION_DONE_TALYN_MB	BIT(2)
+	#define BIT_NO_FLASH_INDICATION			BIT(8)
 #define RGF_USER_XPM_IFC_RD_TIME1	(0x880cec)
 #define RGF_USER_XPM_IFC_RD_TIME2	(0x880cf0)
 #define RGF_USER_XPM_IFC_RD_TIME3	(0x880cf4)
@@ -305,20 +314,49 @@ struct RGF_ICR {
 #define RGF_CAF_PLL_LOCK_STATUS		(0x88afec)
 	#define BIT_CAF_OSC_DIG_XTAL_STABLE	BIT(0)
 
+#define RGF_OTP_QC_SECURED		(0x8a0038)
+	#define BIT_BOOT_FROM_ROM		BIT(31)
+
+/* eDMA */
+#define RGF_INT_COUNT_ON_SPECIAL_EVT	(0x8b62d8)
+
+#define RGF_INT_CTRL_INT_GEN_CFG_0	(0x8bc000)
+#define RGF_INT_CTRL_INT_GEN_CFG_1	(0x8bc004)
+#define RGF_INT_GEN_TIME_UNIT_LIMIT	(0x8bc0c8)
+
+#define RGF_INT_GEN_CTRL		(0x8bc0ec)
+	#define BIT_CONTROL_0			BIT(0)
+
+/* eDMA status interrupts */
+#define RGF_INT_GEN_RX_ICR		(0x8bc0f4)
+	#define BIT_RX_STATUS_IRQ BIT(WIL_RX_STATUS_IRQ_IDX)
+#define RGF_INT_GEN_TX_ICR		(0x8bc110)
+	#define BIT_TX_STATUS_IRQ BIT(WIL_TX_STATUS_IRQ_IDX)
+#define RGF_INT_CTRL_RX_INT_MASK	(0x8bc12c)
+#define RGF_INT_CTRL_TX_INT_MASK	(0x8bc130)
+
+#define RGF_INT_GEN_IDLE_TIME_LIMIT	(0x8bc134)
+
 #define USER_EXT_USER_PMU_3		(0x88d00c)
 	#define BIT_PMU_DEVICE_RDY		BIT(0)
 
 #define RGF_USER_JTAG_DEV_ID	(0x880b34) /* device ID */
 	#define JTAG_DEV_ID_SPARROW	(0x2632072f)
 	#define JTAG_DEV_ID_TALYN	(0x7e0e1)
+	#define JTAG_DEV_ID_TALYN_MB	(0x1007e0e1)
 
 #define RGF_USER_REVISION_ID		(0x88afe4)
 #define RGF_USER_REVISION_ID_MASK	(3)
 	#define REVISION_ID_SPARROW_B0	(0x0)
 	#define REVISION_ID_SPARROW_D0	(0x3)
 
+#define RGF_OTP_MAC_TALYN_MB		(0x8a0304)
 #define RGF_OTP_MAC			(0x8a0620)
 
+/* Talyn-MB */
+#define RGF_USER_USER_CPU_0_TALYN_MB	(0x8c0138)
+#define RGF_USER_MAC_CPU_0_TALYN_MB	(0x8c0154)
+
 /* crash codes for FW/Ucode stored here */
 
 /* ASSERT RGFs */
@@ -332,6 +370,7 @@ enum {
 	HW_VER_SPARROW_B0, /* REVISION_ID_SPARROW_B0 */
 	HW_VER_SPARROW_D0, /* REVISION_ID_SPARROW_D0 */
 	HW_VER_TALYN,	/* JTAG_DEV_ID_TALYN */
+	HW_VER_TALYN_MB	/* JTAG_DEV_ID_TALYN_MB */
 };
 
 /* popular locations */
@@ -349,7 +388,14 @@ enum {
 /* Hardware definitions end */
 #define SPARROW_FW_MAPPING_TABLE_SIZE 10
 #define TALYN_FW_MAPPING_TABLE_SIZE 13
-#define MAX_FW_MAPPING_TABLE_SIZE 13
+#define TALYN_MB_FW_MAPPING_TABLE_SIZE 19
+#define MAX_FW_MAPPING_TABLE_SIZE 19
+
+/* Common representation of physical address in wil ring */
+struct wil_ring_dma_addr {
+	__le32 addr_low;
+	__le16 addr_high;
+} __packed;
 
 struct fw_map {
 	u32 from; /* linker address - from, inclusive */
@@ -357,12 +403,14 @@ struct fw_map {
 	u32 host; /* PCI/Host address - BAR0 + 0x880000 */
 	const char *name; /* for debugfs */
 	bool fw; /* true if FW mapping, false if UCODE mapping */
+	bool crash_dump; /* true if should be dumped during crash dump */
 };
 
 /* array size should be in sync with actual definition in the wmi.c */
 extern const struct fw_map sparrow_fw_mapping[SPARROW_FW_MAPPING_TABLE_SIZE];
 extern const struct fw_map sparrow_d0_mac_rgf_ext;
 extern const struct fw_map talyn_fw_mapping[TALYN_FW_MAPPING_TABLE_SIZE];
+extern const struct fw_map talyn_mb_fw_mapping[TALYN_MB_FW_MAPPING_TABLE_SIZE];
 extern struct fw_map fw_mapping[MAX_FW_MAPPING_TABLE_SIZE];
 
 /**
@@ -438,7 +486,7 @@ enum { /* for wil_ctx.mapped_as */
 };
 
 /**
- * struct wil_ctx - software context for Vring descriptor
+ * struct wil_ctx - software context for ring descriptor
  */
 struct wil_ctx {
 	struct sk_buff *skb;
@@ -446,22 +494,96 @@ struct wil_ctx {
 	u8 mapped_as;
 };
 
-union vring_desc;
+struct wil_desc_ring_rx_swtail { /* relevant for enhanced DMA only */
+	u32 *va;
+	dma_addr_t pa;
+};
 
-struct vring {
+/**
+ * A general ring structure, used for RX and TX.
+ * In legacy DMA it represents the vring,
+ * In enahnced DMA it represents the descriptor ring (vrings are handled by FW)
+ */
+struct wil_ring {
 	dma_addr_t pa;
-	volatile union vring_desc *va; /* vring_desc[size], WriteBack by DMA */
-	u16 size; /* number of vring_desc elements */
+	volatile union wil_ring_desc *va;
+	u16 size; /* number of wil_ring_desc elements */
 	u32 swtail;
 	u32 swhead;
 	u32 hwtail; /* write here to inform hw */
 	struct wil_ctx *ctx; /* ctx[size] - software context */
+	struct wil_desc_ring_rx_swtail edma_rx_swtail;
+	bool is_rx;
 };
 
 /**
- * Additional data for Tx Vring
+ * Additional data for Rx ring.
+ * Used for enhanced DMA RX chaining.
  */
-struct vring_tx_data {
+struct wil_ring_rx_data {
+	/* the skb being assembled */
+	struct sk_buff *skb;
+	/* true if we are skipping a bad fragmented packet */
+	bool skipping;
+	u16 buff_size;
+};
+
+/**
+ * Status ring structure, used for enhanced DMA completions for RX and TX.
+ */
+struct wil_status_ring {
+	dma_addr_t pa;
+	void *va; /* pointer to ring_[tr]x_status elements */
+	u16 size; /* number of status elements */
+	size_t elem_size; /* status element size in bytes */
+	u32 swhead;
+	u32 hwtail; /* write here to inform hw */
+	bool is_rx;
+	u8 desc_rdy_pol; /* Expected descriptor ready bit polarity */
+	struct wil_ring_rx_data rx_data;
+};
+
+/**
+ * struct tx_rx_ops - different TX/RX ops for legacy and enhanced
+ * DMA flow
+ */
+struct wil_txrx_ops {
+	void (*configure_interrupt_moderation)(struct wil6210_priv *wil);
+	/* TX ops */
+	int (*ring_init_tx)(struct wil6210_vif *vif, int ring_id,
+			    int size, int cid, int tid);
+	void (*ring_fini_tx)(struct wil6210_priv *wil, struct wil_ring *ring);
+	int (*ring_init_bcast)(struct wil6210_vif *vif, int id, int size);
+	int (*tx_init)(struct wil6210_priv *wil);
+	void (*tx_fini)(struct wil6210_priv *wil);
+	int (*tx_desc_map)(union wil_tx_desc *desc, dma_addr_t pa,
+			   u32 len, int ring_index);
+	void (*tx_desc_unmap)(struct device *dev,
+			      union wil_tx_desc *desc,
+			      struct wil_ctx *ctx);
+	int (*tx_ring_tso)(struct wil6210_priv *wil, struct wil6210_vif *vif,
+			   struct wil_ring *ring, struct sk_buff *skb);
+	irqreturn_t (*irq_tx)(int irq, void *cookie);
+	/* RX ops */
+	int (*rx_init)(struct wil6210_priv *wil, u16 ring_size);
+	void (*rx_fini)(struct wil6210_priv *wil);
+	int (*wmi_addba_rx_resp)(struct wil6210_priv *wil, u8 mid, u8 cid,
+				 u8 tid, u8 token, u16 status, bool amsdu,
+				 u16 agg_wsize, u16 timeout);
+	void (*get_reorder_params)(struct wil6210_priv *wil,
+				   struct sk_buff *skb, int *tid, int *cid,
+				   int *mid, u16 *seq, int *mcast);
+	void (*get_netif_rx_params)(struct sk_buff *skb,
+				    int *cid, int *security);
+	int (*rx_crypto_check)(struct wil6210_priv *wil, struct sk_buff *skb);
+	bool (*is_rx_idle)(struct wil6210_priv *wil);
+	irqreturn_t (*irq_rx)(int irq, void *cookie);
+};
+
+/**
+ * Additional data for Tx ring
+ */
+struct wil_ring_tx_data {
 	bool dot1x_open;
 	int enabled;
 	cycles_t idle, last_idle, begin;
@@ -564,6 +686,9 @@ struct wil_net_stats {
 	unsigned long	rx_short_frame;
 	unsigned long	rx_large_frame;
 	unsigned long	rx_replay;
+	unsigned long	rx_mic_error; /* eDMA specific */
+	unsigned long	rx_key_error; /* eDMA specific */
+	unsigned long	rx_amsdu_error; /* eDMA specific */
 	u16 last_mcs_rx;
 	u64 rx_per_mcs[WIL_MCS_MAX + 1];
 };
@@ -681,7 +806,7 @@ struct wil6210_vif {
 	u8 hidden_ssid; /* relevant in AP mode */
 	u32 ap_isolate; /* no intra-BSS communication */
 	bool pbss;
-	int bcast_vring;
+	int bcast_ring;
 	struct cfg80211_bss *bss; /* connected bss, relevant in STA mode */
 	int locally_generated_disc; /* relevant in STA mode */
 	struct timer_list connect_timer;
@@ -697,6 +822,31 @@ struct wil6210_vif {
 	int net_queue_stopped; /* netif_tx_stop_all_queues invoked */
 };
 
+/**
+ * RX buffer allocated for enhanced DMA RX descriptors
+ */
+struct wil_rx_buff {
+	struct sk_buff *skb;
+	struct list_head list;
+	int id;
+};
+
+/**
+ * During Rx completion processing, the driver extracts a buffer ID which
+ * is used as an index to the rx_buff_mgmt.buff_arr array and then the SKB
+ * is given to the network stack and the buffer is moved from the 'active'
+ * list to the 'free' list.
+ * During Rx refill, SKBs are attached to free buffers and moved to the
+ * 'active' list.
+ */
+struct wil_rx_buff_mgmt {
+	struct wil_rx_buff *buff_arr;
+	size_t size; /* number of items in buff_arr */
+	struct list_head active;
+	struct list_head free;
+	unsigned long free_list_empty_cnt; /* statistics */
+};
+
 struct wil6210_priv {
 	struct pci_dev *pdev;
 	u32 bar_size;
@@ -761,14 +911,20 @@ struct wil6210_priv {
 	struct net_device napi_ndev; /* dummy net_device serving all VIFs */
 
 	/* DMA related */
-	struct vring vring_rx;
+	struct wil_ring ring_rx;
 	unsigned int rx_buf_len;
-	struct vring vring_tx[WIL6210_MAX_TX_RINGS];
-	struct vring_tx_data vring_tx_data[WIL6210_MAX_TX_RINGS];
-	u8 vring2cid_tid[WIL6210_MAX_TX_RINGS][2]; /* [0] - CID, [1] - TID */
+	struct wil_ring ring_tx[WIL6210_MAX_TX_RINGS];
+	struct wil_ring_tx_data ring_tx_data[WIL6210_MAX_TX_RINGS];
+	struct wil_status_ring srings[WIL6210_MAX_STATUS_RINGS];
+	u8 num_rx_status_rings;
+	int tx_sring_idx;
+	u8 ring2cid_tid[WIL6210_MAX_TX_RINGS][2]; /* [0] - CID, [1] - TID */
 	struct wil_sta_info sta[WIL6210_MAX_CID];
-	u32 vring_idle_trsh; /* HW fetches up to 16 descriptors at once  */
+	u32 ring_idle_trsh; /* HW fetches up to 16 descriptors at once  */
 	u32 dma_addr_size; /* indicates dma addr size */
+	struct wil_rx_buff_mgmt rx_buff_mgmt;
+	bool use_enhanced_dma_hw;
+	struct wil_txrx_ops txrx_ops;
 
 	struct mutex mutex; /* for wil6210_priv access in wil_{up|down} */
 	/* statistics */
@@ -811,6 +967,16 @@ struct wil6210_priv {
 	u32 rgf_fw_assert_code_addr;
 	u32 rgf_ucode_assert_code_addr;
 	u32 iccm_base;
+
+	/* relevant only for eDMA */
+	bool use_compressed_rx_status;
+	u32 rx_status_ring_order;
+	u32 tx_status_ring_order;
+	u32 rx_buff_id_count;
+	bool amsdu_en;
+	bool use_rx_hw_reordering;
+	bool secured_boot;
+	u8 boot_config;
 };
 
 #define wil_to_wiphy(i) (i->wiphy)
@@ -990,7 +1156,7 @@ int wmi_add_cipher_key(struct wil6210_vif *vif, u8 key_index,
 		       int key_usage);
 int wmi_echo(struct wil6210_priv *wil);
 int wmi_set_ie(struct wil6210_vif *vif, u8 type, u16 ie_len, const void *ie);
-int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring);
+int wmi_rx_chain_add(struct wil6210_priv *wil, struct wil_ring *vring);
 int wmi_rxon(struct wil6210_priv *wil, bool on);
 int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_m, u32 *t_r);
 int wmi_disconnect_sta(struct wil6210_vif *vif, const u8 *mac,
@@ -1083,30 +1249,28 @@ void wil_probe_client_flush(struct wil6210_vif *vif);
 void wil_probe_client_worker(struct work_struct *work);
 void wil_disconnect_worker(struct work_struct *work);
 
-int wil_rx_init(struct wil6210_priv *wil, u16 size);
-void wil_rx_fini(struct wil6210_priv *wil);
+void wil_init_txrx_ops(struct wil6210_priv *wil);
 
 /* TX API */
-int wil_vring_init_tx(struct wil6210_vif *vif, int id, int size,
-		      int cid, int tid);
-void wil_vring_fini_tx(struct wil6210_priv *wil, int id);
-int wil_tx_init(struct wil6210_vif *vif, int cid);
+int wil_ring_init_tx(struct wil6210_vif *vif, int cid);
 int wil_vring_init_bcast(struct wil6210_vif *vif, int id, int size);
 int wil_bcast_init(struct wil6210_vif *vif);
 void wil_bcast_fini(struct wil6210_vif *vif);
 void wil_bcast_fini_all(struct wil6210_priv *wil);
 
 void wil_update_net_queues(struct wil6210_priv *wil, struct wil6210_vif *vif,
-			   struct vring *vring, bool should_stop);
+			   struct wil_ring *ring, bool should_stop);
 void wil_update_net_queues_bh(struct wil6210_priv *wil, struct wil6210_vif *vif,
-			      struct vring *vring, bool check_stop);
+			      struct wil_ring *ring, bool check_stop);
 netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev);
 int wil_tx_complete(struct wil6210_vif *vif, int ringid);
 void wil6210_unmask_irq_tx(struct wil6210_priv *wil);
+void wil6210_unmask_irq_tx_edma(struct wil6210_priv *wil);
 
 /* RX API */
 void wil_rx_handle(struct wil6210_priv *wil, int *quota);
 void wil6210_unmask_irq_rx(struct wil6210_priv *wil);
+void wil6210_unmask_irq_rx_edma(struct wil6210_priv *wil);
 
 int wil_iftype_nl2wmi(enum nl80211_iftype type);
 
@@ -1127,7 +1291,6 @@ bool wil_is_wmi_idle(struct wil6210_priv *wil);
 int wmi_resume(struct wil6210_priv *wil);
 int wmi_suspend(struct wil6210_priv *wil);
 bool wil_is_tx_idle(struct wil6210_priv *wil);
-bool wil_is_rx_idle(struct wil6210_priv *wil);
 
 int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size);
 void wil_fw_core_dump(struct wil6210_priv *wil);
@@ -1142,4 +1305,19 @@ int wmi_start_sched_scan(struct wil6210_priv *wil,
 int wmi_stop_sched_scan(struct wil6210_priv *wil);
 int wmi_mgmt_tx(struct wil6210_vif *vif, const u8 *buf, size_t len);
 
+int reverse_memcmp(const void *cs, const void *ct, size_t count);
+
+/* WMI for enhanced DMA */
+int wil_wmi_tx_sring_cfg(struct wil6210_priv *wil, int ring_id);
+int wil_wmi_cfg_def_rx_offload(struct wil6210_priv *wil,
+			       u16 max_rx_pl_per_desc);
+int wil_wmi_rx_sring_add(struct wil6210_priv *wil, u16 ring_id);
+int wil_wmi_rx_desc_ring_add(struct wil6210_priv *wil, int status_ring_id);
+int wil_wmi_tx_desc_ring_add(struct wil6210_vif *vif, int ring_id, int cid,
+			     int tid);
+int wil_wmi_bcast_desc_ring_add(struct wil6210_vif *vif, int ring_id);
+int wmi_addba_rx_resp_edma(struct wil6210_priv *wil, u8 mid, u8 cid,
+			   u8 tid, u8 token, u16 status, bool amsdu,
+			   u16 agg_wsize, u16 timeout);
+
 #endif /* __WIL6210_H__ */
diff --git a/drivers/net/wireless/ath/wil6210/wil_crash_dump.c b/drivers/net/wireless/ath/wil6210/wil_crash_dump.c
index 1ed330674d9b8d5aa17e9647cf9a80c8787c2151..dc33a0b4c3fac55c67371aa7ef762a2c90320636 100644
--- a/drivers/net/wireless/ath/wil6210/wil_crash_dump.c
+++ b/drivers/net/wireless/ath/wil6210/wil_crash_dump.c
@@ -1,5 +1,6 @@
 /*
  * Copyright (c) 2015,2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -36,7 +37,7 @@ static int wil_fw_get_crash_dump_bounds(struct wil6210_priv *wil,
 	for (i = 1; i < ARRAY_SIZE(fw_mapping); i++) {
 		map = &fw_mapping[i];
 
-		if (!map->fw)
+		if (!map->crash_dump)
 			continue;
 
 		if (map->host < host_min)
@@ -85,7 +86,7 @@ int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size)
 	for (i = 0; i < ARRAY_SIZE(fw_mapping); i++) {
 		map = &fw_mapping[i];
 
-		if (!map->fw)
+		if (!map->crash_dump)
 			continue;
 
 		data = (void * __force)wil->csr + HOSTADDR(map->host);
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index 5d991243cdb59c341d5637dc20a82cbbc9f9f389..71056c834fff339eb5239134061033dc6b5fa436 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -89,28 +89,28 @@ MODULE_PARM_DESC(led_id,
  */
 const struct fw_map sparrow_fw_mapping[] = {
 	/* FW code RAM 256k */
-	{0x000000, 0x040000, 0x8c0000, "fw_code", true},
+	{0x000000, 0x040000, 0x8c0000, "fw_code", true, true},
 	/* FW data RAM 32k */
-	{0x800000, 0x808000, 0x900000, "fw_data", true},
+	{0x800000, 0x808000, 0x900000, "fw_data", true, true},
 	/* periph data 128k */
-	{0x840000, 0x860000, 0x908000, "fw_peri", true},
+	{0x840000, 0x860000, 0x908000, "fw_peri", true, true},
 	/* various RGF 40k */
-	{0x880000, 0x88a000, 0x880000, "rgf", true},
+	{0x880000, 0x88a000, 0x880000, "rgf", true, true},
 	/* AGC table   4k */
-	{0x88a000, 0x88b000, 0x88a000, "AGC_tbl", true},
+	{0x88a000, 0x88b000, 0x88a000, "AGC_tbl", true, true},
 	/* Pcie_ext_rgf 4k */
-	{0x88b000, 0x88c000, 0x88b000, "rgf_ext", true},
+	{0x88b000, 0x88c000, 0x88b000, "rgf_ext", true, true},
 	/* mac_ext_rgf 512b */
-	{0x88c000, 0x88c200, 0x88c000, "mac_rgf_ext", true},
+	{0x88c000, 0x88c200, 0x88c000, "mac_rgf_ext", true, true},
 	/* upper area 548k */
-	{0x8c0000, 0x949000, 0x8c0000, "upper", true},
+	{0x8c0000, 0x949000, 0x8c0000, "upper", true, true},
 	/* UCODE areas - accessible by debugfs blobs but not by
 	 * wmi_addr_remap. UCODE areas MUST be added AFTER FW areas!
 	 */
 	/* ucode code RAM 128k */
-	{0x000000, 0x020000, 0x920000, "uc_code", false},
+	{0x000000, 0x020000, 0x920000, "uc_code", false, false},
 	/* ucode data RAM 16k */
-	{0x800000, 0x804000, 0x940000, "uc_data", false},
+	{0x800000, 0x804000, 0x940000, "uc_data", false, false},
 };
 
 /**
@@ -118,7 +118,7 @@ const struct fw_map sparrow_fw_mapping[] = {
  * it is a bit larger to support extra features
  */
 const struct fw_map sparrow_d0_mac_rgf_ext = {
-	0x88c000, 0x88c500, 0x88c000, "mac_rgf_ext", true
+	0x88c000, 0x88c500, 0x88c000, "mac_rgf_ext", true, true
 };
 
 /**
@@ -134,34 +134,89 @@ const struct fw_map sparrow_d0_mac_rgf_ext = {
  */
 const struct fw_map talyn_fw_mapping[] = {
 	/* FW code RAM 1M */
-	{0x000000, 0x100000, 0x900000, "fw_code", true},
+	{0x000000, 0x100000, 0x900000, "fw_code", true, true},
 	/* FW data RAM 128k */
-	{0x800000, 0x820000, 0xa00000, "fw_data", true},
+	{0x800000, 0x820000, 0xa00000, "fw_data", true, true},
 	/* periph. data RAM 96k */
-	{0x840000, 0x858000, 0xa20000, "fw_peri", true},
+	{0x840000, 0x858000, 0xa20000, "fw_peri", true, true},
 	/* various RGF 40k */
-	{0x880000, 0x88a000, 0x880000, "rgf", true},
+	{0x880000, 0x88a000, 0x880000, "rgf", true, true},
 	/* AGC table 4k */
-	{0x88a000, 0x88b000, 0x88a000, "AGC_tbl", true},
+	{0x88a000, 0x88b000, 0x88a000, "AGC_tbl", true, true},
 	/* Pcie_ext_rgf 4k */
-	{0x88b000, 0x88c000, 0x88b000, "rgf_ext", true},
+	{0x88b000, 0x88c000, 0x88b000, "rgf_ext", true, true},
 	/* mac_ext_rgf 1344b */
-	{0x88c000, 0x88c540, 0x88c000, "mac_rgf_ext", true},
+	{0x88c000, 0x88c540, 0x88c000, "mac_rgf_ext", true, true},
 	/* ext USER RGF 4k */
-	{0x88d000, 0x88e000, 0x88d000, "ext_user_rgf", true},
+	{0x88d000, 0x88e000, 0x88d000, "ext_user_rgf", true, true},
 	/* OTP 4k */
-	{0x8a0000, 0x8a1000, 0x8a0000, "otp", true},
+	{0x8a0000, 0x8a1000, 0x8a0000, "otp", true, false},
 	/* DMA EXT RGF 64k */
-	{0x8b0000, 0x8c0000, 0x8b0000, "dma_ext_rgf", true},
+	{0x8b0000, 0x8c0000, 0x8b0000, "dma_ext_rgf", true, true},
 	/* upper area 1536k */
-	{0x900000, 0xa80000, 0x900000, "upper", true},
+	{0x900000, 0xa80000, 0x900000, "upper", true, true},
 	/* UCODE areas - accessible by debugfs blobs but not by
 	 * wmi_addr_remap. UCODE areas MUST be added AFTER FW areas!
 	 */
 	/* ucode code RAM 256k */
-	{0x000000, 0x040000, 0xa38000, "uc_code", false},
+	{0x000000, 0x040000, 0xa38000, "uc_code", false, false},
 	/* ucode data RAM 32k */
-	{0x800000, 0x808000, 0xa78000, "uc_data", false},
+	{0x800000, 0x808000, 0xa78000, "uc_data", false, false},
+};
+
+/**
+ * @talyn_mb_fw_mapping provides memory remapping table for Talyn-MB
+ *
+ * array size should be in sync with the declaration in the wil6210.h
+ *
+ * Talyn MB memory mapping:
+ * Linker address         PCI/Host address
+ *                        0x880000 .. 0xc80000  4Mb BAR0
+ * 0x800000 .. 0x820000   0xa00000 .. 0xa20000  128k DCCM
+ * 0x840000 .. 0x858000   0xa20000 .. 0xa38000  96k PERIPH
+ */
+const struct fw_map talyn_mb_fw_mapping[] = {
+	/* FW code RAM 768k */
+	{0x000000, 0x0c0000, 0x900000, "fw_code", true, true},
+	/* FW data RAM 128k */
+	{0x800000, 0x820000, 0xa00000, "fw_data", true, true},
+	/* periph. data RAM 96k */
+	{0x840000, 0x858000, 0xa20000, "fw_peri", true, true},
+	/* various RGF 40k */
+	{0x880000, 0x88a000, 0x880000, "rgf", true, true},
+	/* AGC table 4k */
+	{0x88a000, 0x88b000, 0x88a000, "AGC_tbl", true, true},
+	/* Pcie_ext_rgf 4k */
+	{0x88b000, 0x88c000, 0x88b000, "rgf_ext", true, true},
+	/* mac_ext_rgf 2256b */
+	{0x88c000, 0x88c8d0, 0x88c000, "mac_rgf_ext", true, true},
+	/* ext USER RGF 4k */
+	{0x88d000, 0x88e000, 0x88d000, "ext_user_rgf", true, true},
+	/* SEC PKA 16k */
+	{0x890000, 0x894000, 0x890000, "sec_pka", true, true},
+	/* SEC KDF RGF 3096b */
+	{0x898000, 0x898c18, 0x898000, "sec_kdf_rgf", true, true},
+	/* SEC MAIN 2124b */
+	{0x89a000, 0x89a84c, 0x89a000, "sec_main", true, true},
+	/* OTP 4k */
+	{0x8a0000, 0x8a1000, 0x8a0000, "otp", true, false},
+	/* DMA EXT RGF 64k */
+	{0x8b0000, 0x8c0000, 0x8b0000, "dma_ext_rgf", true, true},
+	/* DUM USER RGF 528b */
+	{0x8c0000, 0x8c0210, 0x8c0000, "dum_user_rgf", true, true},
+	/* DMA OFU 296b */
+	{0x8c2000, 0x8c2128, 0x8c2000, "dma_ofu", true, true},
+	/* ucode debug 4k */
+	{0x8c3000, 0x8c4000, 0x8c3000, "ucode_debug", true, true},
+	/* upper area 1536k */
+	{0x900000, 0xa80000, 0x900000, "upper", true, true},
+	/* UCODE areas - accessible by debugfs blobs but not by
+	 * wmi_addr_remap. UCODE areas MUST be added AFTER FW areas!
+	 */
+	/* ucode code RAM 256k */
+	{0x000000, 0x040000, 0xa38000, "uc_code", false, false},
+	/* ucode data RAM 32k */
+	{0x800000, 0x808000, 0xa78000, "uc_data", false, false},
 };
 
 struct fw_map fw_mapping[MAX_FW_MAPPING_TABLE_SIZE];
@@ -365,14 +420,16 @@ static const char *cmdid2name(u16 cmdid)
 		return "WMI_DEL_STA_CMD";
 	case WMI_DISCONNECT_STA_CMDID:
 		return "WMI_DISCONNECT_STA_CMD";
-	case WMI_VRING_BA_EN_CMDID:
-		return "WMI_VRING_BA_EN_CMD";
-	case WMI_VRING_BA_DIS_CMDID:
-		return "WMI_VRING_BA_DIS_CMD";
+	case WMI_RING_BA_EN_CMDID:
+		return "WMI_RING_BA_EN_CMD";
+	case WMI_RING_BA_DIS_CMDID:
+		return "WMI_RING_BA_DIS_CMD";
 	case WMI_RCP_DELBA_CMDID:
 		return "WMI_RCP_DELBA_CMD";
 	case WMI_RCP_ADDBA_RESP_CMDID:
 		return "WMI_RCP_ADDBA_RESP_CMD";
+	case WMI_RCP_ADDBA_RESP_EDMA_CMDID:
+		return "WMI_RCP_ADDBA_RESP_EDMA_CMD";
 	case WMI_PS_DEV_PROFILE_CFG_CMDID:
 		return "WMI_PS_DEV_PROFILE_CFG_CMD";
 	case WMI_SET_MGMT_RETRY_LIMIT_CMDID:
@@ -395,6 +452,18 @@ static const char *cmdid2name(u16 cmdid)
 		return "WMI_START_SCHED_SCAN_CMD";
 	case WMI_STOP_SCHED_SCAN_CMDID:
 		return "WMI_STOP_SCHED_SCAN_CMD";
+	case WMI_TX_STATUS_RING_ADD_CMDID:
+		return "WMI_TX_STATUS_RING_ADD_CMD";
+	case WMI_RX_STATUS_RING_ADD_CMDID:
+		return "WMI_RX_STATUS_RING_ADD_CMD";
+	case WMI_TX_DESC_RING_ADD_CMDID:
+		return "WMI_TX_DESC_RING_ADD_CMD";
+	case WMI_RX_DESC_RING_ADD_CMDID:
+		return "WMI_RX_DESC_RING_ADD_CMD";
+	case WMI_BCAST_DESC_RING_ADD_CMDID:
+		return "WMI_BCAST_DESC_RING_ADD_CMD";
+	case WMI_CFG_DEF_RX_OFFLOAD_CMDID:
+		return "WMI_CFG_DEF_RX_OFFLOAD_CMD";
 	default:
 		return "Untracked CMD";
 	}
@@ -449,8 +518,8 @@ static const char *eventid2name(u16 eventid)
 		return "WMI_RCP_ADDBA_REQ_EVENT";
 	case WMI_DELBA_EVENTID:
 		return "WMI_DELBA_EVENT";
-	case WMI_VRING_EN_EVENTID:
-		return "WMI_VRING_EN_EVENT";
+	case WMI_RING_EN_EVENTID:
+		return "WMI_RING_EN_EVENT";
 	case WMI_DATA_PORT_OPEN_EVENTID:
 		return "WMI_DATA_PORT_OPEN_EVENT";
 	case WMI_AOA_MEAS_EVENTID:
@@ -519,6 +588,16 @@ static const char *eventid2name(u16 eventid)
 		return "WMI_STOP_SCHED_SCAN_EVENT";
 	case WMI_SCHED_SCAN_RESULT_EVENTID:
 		return "WMI_SCHED_SCAN_RESULT_EVENT";
+	case WMI_TX_STATUS_RING_CFG_DONE_EVENTID:
+		return "WMI_TX_STATUS_RING_CFG_DONE_EVENT";
+	case WMI_RX_STATUS_RING_CFG_DONE_EVENTID:
+		return "WMI_RX_STATUS_RING_CFG_DONE_EVENT";
+	case WMI_TX_DESC_RING_CFG_DONE_EVENTID:
+		return "WMI_TX_DESC_RING_CFG_DONE_EVENT";
+	case WMI_RX_DESC_RING_CFG_DONE_EVENTID:
+		return "WMI_RX_DESC_RING_CFG_DONE_EVENT";
+	case WMI_CFG_DEF_RX_OFFLOAD_DONE_EVENTID:
+		return "WMI_CFG_DEF_RX_OFFLOAD_DONE_EVENT";
 	default:
 		return "Untracked EVENT";
 	}
@@ -906,7 +985,7 @@ static void wmi_evt_connect(struct wil6210_vif *vif, int id, void *d, int len)
 	wil->sta[evt->cid].mid = vif->mid;
 	wil->sta[evt->cid].status = wil_sta_conn_pending;
 
-	rc = wil_tx_init(vif, evt->cid);
+	rc = wil_ring_init_tx(vif, evt->cid);
 	if (rc) {
 		wil_err(wil, "config tx vring failed for CID %d, rc (%d)\n",
 			evt->cid, rc);
@@ -1063,16 +1142,16 @@ static void wmi_evt_eapol_rx(struct wil6210_vif *vif, int id, void *d, int len)
 	}
 }
 
-static void wmi_evt_vring_en(struct wil6210_vif *vif, int id, void *d, int len)
+static void wmi_evt_ring_en(struct wil6210_vif *vif, int id, void *d, int len)
 {
 	struct wil6210_priv *wil = vif_to_wil(vif);
-	struct wmi_vring_en_event *evt = d;
-	u8 vri = evt->vring_index;
+	struct wmi_ring_en_event *evt = d;
+	u8 vri = evt->ring_index;
 	struct wireless_dev *wdev = vif_to_wdev(vif);
 
 	wil_dbg_wmi(wil, "Enable vring %d MID %d\n", vri, vif->mid);
 
-	if (vri >= ARRAY_SIZE(wil->vring_tx)) {
+	if (vri >= ARRAY_SIZE(wil->ring_tx)) {
 		wil_err(wil, "Enable for invalid vring %d\n", vri);
 		return;
 	}
@@ -1081,8 +1160,8 @@ static void wmi_evt_vring_en(struct wil6210_vif *vif, int id, void *d, int len)
 		/* in AP mode with disable_ap_sme, this is done by
 		 * wil_cfg80211_change_station()
 		 */
-		wil->vring_tx_data[vri].dot1x_open = true;
-	if (vri == vif->bcast_vring) /* no BA for bcast */
+		wil->ring_tx_data[vri].dot1x_open = true;
+	if (vri == vif->bcast_ring) /* no BA for bcast */
 		return;
 	if (agg_wsize >= 0)
 		wil_addba_tx_request(wil, vri, agg_wsize);
@@ -1093,7 +1172,7 @@ static void wmi_evt_ba_status(struct wil6210_vif *vif, int id,
 {
 	struct wil6210_priv *wil = vif_to_wil(vif);
 	struct wmi_ba_status_event *evt = d;
-	struct vring_tx_data *txdata;
+	struct wil_ring_tx_data *txdata;
 
 	wil_dbg_wmi(wil, "BACK[%d] %s {%d} timeout %d AMSDU%s\n",
 		    evt->ringid,
@@ -1112,7 +1191,7 @@ static void wmi_evt_ba_status(struct wil6210_vif *vif, int id,
 		evt->amsdu = 0;
 	}
 
-	txdata = &wil->vring_tx_data[evt->ringid];
+	txdata = &wil->ring_tx_data[evt->ringid];
 
 	txdata->agg_timeout = le16_to_cpu(evt->ba_timeout);
 	txdata->agg_wsize = evt->agg_wsize;
@@ -1150,11 +1229,11 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
 	if (!evt->from_initiator) {
 		int i;
 		/* find Tx vring it belongs to */
-		for (i = 0; i < ARRAY_SIZE(wil->vring2cid_tid); i++) {
-			if ((wil->vring2cid_tid[i][0] == cid) &&
-			    (wil->vring2cid_tid[i][1] == tid)) {
-				struct vring_tx_data *txdata =
-					&wil->vring_tx_data[i];
+		for (i = 0; i < ARRAY_SIZE(wil->ring2cid_tid); i++) {
+			if (wil->ring2cid_tid[i][0] == cid &&
+			    wil->ring2cid_tid[i][1] == tid) {
+				struct wil_ring_tx_data *txdata =
+					&wil->ring_tx_data[i];
 
 				wil_dbg_wmi(wil, "DELBA Tx vring %d\n", i);
 				txdata->agg_timeout = 0;
@@ -1164,7 +1243,7 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
 				break; /* max. 1 matching ring */
 			}
 		}
-		if (i >= ARRAY_SIZE(wil->vring2cid_tid))
+		if (i >= ARRAY_SIZE(wil->ring2cid_tid))
 			wil_err(wil, "DELBA: unable to find Tx vring\n");
 		return;
 	}
@@ -1277,7 +1356,7 @@ static const struct {
 	{WMI_BA_STATUS_EVENTID,		wmi_evt_ba_status},
 	{WMI_RCP_ADDBA_REQ_EVENTID,	wmi_evt_addba_rx_req},
 	{WMI_DELBA_EVENTID,		wmi_evt_delba},
-	{WMI_VRING_EN_EVENTID,		wmi_evt_vring_en},
+	{WMI_RING_EN_EVENTID,		wmi_evt_ring_en},
 	{WMI_DATA_PORT_OPEN_EVENTID,		wmi_evt_ignore},
 	{WMI_SCHED_SCAN_RESULT_EVENTID,		wmi_evt_sched_scan_result},
 };
@@ -1909,7 +1988,7 @@ int wmi_rxon(struct wil6210_priv *wil, bool on)
 	return rc;
 }
 
-int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring)
+int wmi_rx_chain_add(struct wil6210_priv *wil, struct wil_ring *vring)
 {
 	struct net_device *ndev = wil->main_ndev;
 	struct wireless_dev *wdev = ndev->ieee80211_ptr;
@@ -2063,29 +2142,32 @@ int wmi_disconnect_sta(struct wil6210_vif *vif, const u8 *mac,
 int wmi_addba(struct wil6210_priv *wil, u8 mid,
 	      u8 ringid, u8 size, u16 timeout)
 {
-	struct wmi_vring_ba_en_cmd cmd = {
-		.ringid = ringid,
+	u8 amsdu = wil->use_enhanced_dma_hw && wil->use_rx_hw_reordering &&
+		test_bit(WMI_FW_CAPABILITY_AMSDU, wil->fw_capabilities) &&
+		wil->amsdu_en;
+	struct wmi_ring_ba_en_cmd cmd = {
+		.ring_id = ringid,
 		.agg_max_wsize = size,
 		.ba_timeout = cpu_to_le16(timeout),
-		.amsdu = 0,
+		.amsdu = amsdu,
 	};
 
-	wil_dbg_wmi(wil, "addba: (ring %d size %d timeout %d)\n", ringid, size,
-		    timeout);
+	wil_dbg_wmi(wil, "addba: (ring %d size %d timeout %d amsdu %d)\n",
+		    ringid, size, timeout, amsdu);
 
-	return wmi_send(wil, WMI_VRING_BA_EN_CMDID, mid, &cmd, sizeof(cmd));
+	return wmi_send(wil, WMI_RING_BA_EN_CMDID, mid, &cmd, sizeof(cmd));
 }
 
 int wmi_delba_tx(struct wil6210_priv *wil, u8 mid, u8 ringid, u16 reason)
 {
-	struct wmi_vring_ba_dis_cmd cmd = {
-		.ringid = ringid,
+	struct wmi_ring_ba_dis_cmd cmd = {
+		.ring_id = ringid,
 		.reason = cpu_to_le16(reason),
 	};
 
 	wil_dbg_wmi(wil, "delba_tx: (ring %d reason %d)\n", ringid, reason);
 
-	return wmi_send(wil, WMI_VRING_BA_DIS_CMDID, mid, &cmd, sizeof(cmd));
+	return wmi_send(wil, WMI_RING_BA_DIS_CMDID, mid, &cmd, sizeof(cmd));
 }
 
 int wmi_delba_rx(struct wil6210_priv *wil, u8 mid, u8 cidxtid, u16 reason)
@@ -2146,6 +2228,54 @@ int wmi_addba_rx_resp(struct wil6210_priv *wil,
 	return rc;
 }
 
+int wmi_addba_rx_resp_edma(struct wil6210_priv *wil, u8 mid, u8 cid, u8 tid,
+			   u8 token, u16 status, bool amsdu, u16 agg_wsize,
+			   u16 timeout)
+{
+	int rc;
+	struct wmi_rcp_addba_resp_edma_cmd cmd = {
+		.cid = cid,
+		.tid = tid,
+		.dialog_token = token,
+		.status_code = cpu_to_le16(status),
+		/* bit 0: A-MSDU supported
+		 * bit 1: policy (should be 0 for us)
+		 * bits 2..5: TID
+		 * bits 6..15: buffer size
+		 */
+		.ba_param_set = cpu_to_le16((amsdu ? 1 : 0) | (tid << 2) |
+					    (agg_wsize << 6)),
+		.ba_timeout = cpu_to_le16(timeout),
+		/* route all the connections to status ring 0 */
+		.status_ring_id = WIL_DEFAULT_RX_STATUS_RING_ID,
+	};
+	struct {
+		struct wmi_cmd_hdr wmi;
+		struct wmi_rcp_addba_resp_sent_event evt;
+	} __packed reply = {
+		.evt = {.status = cpu_to_le16(WMI_FW_STATUS_FAILURE)},
+	};
+
+	wil_dbg_wmi(wil,
+		    "ADDBA response for CID %d TID %d size %d timeout %d status %d AMSDU%s, sring_id %d\n",
+		    cid, tid, agg_wsize, timeout, status, amsdu ? "+" : "-",
+		    WIL_DEFAULT_RX_STATUS_RING_ID);
+
+	rc = wmi_call(wil, WMI_RCP_ADDBA_RESP_EDMA_CMDID, mid, &cmd,
+		      sizeof(cmd), WMI_RCP_ADDBA_RESP_SENT_EVENTID, &reply,
+		      sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS);
+	if (rc)
+		return rc;
+
+	if (reply.evt.status) {
+		wil_err(wil, "ADDBA response failed with status %d\n",
+			le16_to_cpu(reply.evt.status));
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+
 int wmi_ps_dev_profile_cfg(struct wil6210_priv *wil,
 			   enum wmi_ps_profile_type ps_profile)
 {
@@ -2852,3 +2982,263 @@ int wmi_mgmt_tx(struct wil6210_vif *vif, const u8 *buf, size_t len)
 
 	return rc;
 }
+
+int wil_wmi_tx_sring_cfg(struct wil6210_priv *wil, int ring_id)
+{
+	int rc;
+	struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev);
+	struct wil_status_ring *sring = &wil->srings[ring_id];
+	struct wmi_tx_status_ring_add_cmd cmd = {
+		.ring_cfg = {
+			.ring_size = cpu_to_le16(sring->size),
+		},
+		.irq_index = WIL_TX_STATUS_IRQ_IDX
+	};
+	struct {
+		struct wmi_cmd_hdr hdr;
+		struct wmi_tx_status_ring_cfg_done_event evt;
+	} __packed reply = {
+		.evt = {.status = WMI_FW_STATUS_FAILURE},
+	};
+
+	cmd.ring_cfg.ring_id = ring_id;
+
+	cmd.ring_cfg.ring_mem_base = cpu_to_le64(sring->pa);
+	rc = wmi_call(wil, WMI_TX_STATUS_RING_ADD_CMDID, vif->mid, &cmd,
+		      sizeof(cmd), WMI_TX_STATUS_RING_CFG_DONE_EVENTID,
+		      &reply, sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS);
+	if (rc) {
+		wil_err(wil, "TX_STATUS_RING_ADD_CMD failed, rc %d\n", rc);
+		return rc;
+	}
+
+	if (reply.evt.status != WMI_FW_STATUS_SUCCESS) {
+		wil_err(wil, "TX_STATUS_RING_ADD_CMD failed, status %d\n",
+			reply.evt.status);
+		return -EINVAL;
+	}
+
+	sring->hwtail = le32_to_cpu(reply.evt.ring_tail_ptr);
+
+	return 0;
+}
+
+int wil_wmi_cfg_def_rx_offload(struct wil6210_priv *wil, u16 max_rx_pl_per_desc)
+{
+	struct net_device *ndev = wil->main_ndev;
+	struct wil6210_vif *vif = ndev_to_vif(ndev);
+	int rc;
+	struct wmi_cfg_def_rx_offload_cmd cmd = {
+		.max_msdu_size = cpu_to_le16(wil_mtu2macbuf(WIL_MAX_ETH_MTU)),
+		.max_rx_pl_per_desc = cpu_to_le16(max_rx_pl_per_desc),
+		.decap_trans_type = WMI_DECAP_TYPE_802_3,
+		.l2_802_3_offload_ctrl = 0,
+		.l3_l4_ctrl = 1 << L3_L4_CTRL_TCPIP_CHECKSUM_EN_POS,
+	};
+	struct {
+		struct wmi_cmd_hdr hdr;
+		struct wmi_cfg_def_rx_offload_done_event evt;
+	} __packed reply = {
+		.evt = {.status = WMI_FW_STATUS_FAILURE},
+	};
+
+	rc = wmi_call(wil, WMI_CFG_DEF_RX_OFFLOAD_CMDID, vif->mid, &cmd,
+		      sizeof(cmd), WMI_CFG_DEF_RX_OFFLOAD_DONE_EVENTID, &reply,
+		      sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS);
+	if (rc) {
+		wil_err(wil, "WMI_CFG_DEF_RX_OFFLOAD_CMD failed, rc %d\n", rc);
+		return rc;
+	}
+
+	if (reply.evt.status != WMI_FW_STATUS_SUCCESS) {
+		wil_err(wil, "WMI_CFG_DEF_RX_OFFLOAD_CMD failed, status %d\n",
+			reply.evt.status);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int wil_wmi_rx_sring_add(struct wil6210_priv *wil, u16 ring_id)
+{
+	struct net_device *ndev = wil->main_ndev;
+	struct wil6210_vif *vif = ndev_to_vif(ndev);
+	struct wil_status_ring *sring = &wil->srings[ring_id];
+	int rc;
+	struct wmi_rx_status_ring_add_cmd cmd = {
+		.ring_cfg = {
+			.ring_size = cpu_to_le16(sring->size),
+			.ring_id = ring_id,
+		},
+		.rx_msg_type = wil->use_compressed_rx_status ?
+			WMI_RX_MSG_TYPE_COMPRESSED :
+			WMI_RX_MSG_TYPE_EXTENDED,
+		.irq_index = WIL_RX_STATUS_IRQ_IDX,
+	};
+	struct {
+		struct wmi_cmd_hdr hdr;
+		struct wmi_rx_status_ring_cfg_done_event evt;
+	} __packed reply = {
+		.evt = {.status = WMI_FW_STATUS_FAILURE},
+	};
+
+	cmd.ring_cfg.ring_mem_base = cpu_to_le64(sring->pa);
+	rc = wmi_call(wil, WMI_RX_STATUS_RING_ADD_CMDID, vif->mid, &cmd,
+		      sizeof(cmd), WMI_RX_STATUS_RING_CFG_DONE_EVENTID, &reply,
+		      sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS);
+	if (rc) {
+		wil_err(wil, "RX_STATUS_RING_ADD_CMD failed, rc %d\n", rc);
+		return rc;
+	}
+
+	if (reply.evt.status != WMI_FW_STATUS_SUCCESS) {
+		wil_err(wil, "RX_STATUS_RING_ADD_CMD failed, status %d\n",
+			reply.evt.status);
+		return -EINVAL;
+	}
+
+	sring->hwtail = le32_to_cpu(reply.evt.ring_tail_ptr);
+
+	return 0;
+}
+
+int wil_wmi_rx_desc_ring_add(struct wil6210_priv *wil, int status_ring_id)
+{
+	struct net_device *ndev = wil->main_ndev;
+	struct wil6210_vif *vif = ndev_to_vif(ndev);
+	struct wil_ring *ring = &wil->ring_rx;
+	int rc;
+	struct wmi_rx_desc_ring_add_cmd cmd = {
+		.ring_cfg = {
+			.ring_size = cpu_to_le16(ring->size),
+			.ring_id = WIL_RX_DESC_RING_ID,
+		},
+		.status_ring_id = status_ring_id,
+		.irq_index = WIL_RX_STATUS_IRQ_IDX,
+	};
+	struct {
+		struct wmi_cmd_hdr hdr;
+		struct wmi_rx_desc_ring_cfg_done_event evt;
+	} __packed reply = {
+		.evt = {.status = WMI_FW_STATUS_FAILURE},
+	};
+
+	cmd.ring_cfg.ring_mem_base = cpu_to_le64(ring->pa);
+	cmd.sw_tail_host_addr = cpu_to_le64(ring->edma_rx_swtail.pa);
+	rc = wmi_call(wil, WMI_RX_DESC_RING_ADD_CMDID, vif->mid, &cmd,
+		      sizeof(cmd), WMI_RX_DESC_RING_CFG_DONE_EVENTID, &reply,
+		      sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS);
+	if (rc) {
+		wil_err(wil, "WMI_RX_DESC_RING_ADD_CMD failed, rc %d\n", rc);
+		return rc;
+	}
+
+	if (reply.evt.status != WMI_FW_STATUS_SUCCESS) {
+		wil_err(wil, "WMI_RX_DESC_RING_ADD_CMD failed, status %d\n",
+			reply.evt.status);
+		return -EINVAL;
+	}
+
+	ring->hwtail = le32_to_cpu(reply.evt.ring_tail_ptr);
+
+	return 0;
+}
+
+int wil_wmi_tx_desc_ring_add(struct wil6210_vif *vif, int ring_id, int cid,
+			     int tid)
+{
+	struct wil6210_priv *wil = vif_to_wil(vif);
+	int sring_id = wil->tx_sring_idx; /* there is only one TX sring */
+	int rc;
+	struct wil_ring *ring = &wil->ring_tx[ring_id];
+	struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_id];
+	struct wmi_tx_desc_ring_add_cmd cmd = {
+		.ring_cfg = {
+			.ring_size = cpu_to_le16(ring->size),
+			.ring_id = ring_id,
+		},
+		.status_ring_id = sring_id,
+		.cid = cid,
+		.tid = tid,
+		.encap_trans_type = WMI_VRING_ENC_TYPE_802_3,
+		.max_msdu_size = cpu_to_le16(wil_mtu2macbuf(mtu_max)),
+		.schd_params = {
+			.priority = cpu_to_le16(0),
+			.timeslot_us = cpu_to_le16(0xfff),
+		}
+	};
+	struct {
+		struct wmi_cmd_hdr hdr;
+		struct wmi_tx_desc_ring_cfg_done_event evt;
+	} __packed reply = {
+		.evt = {.status = WMI_FW_STATUS_FAILURE},
+	};
+
+	cmd.ring_cfg.ring_mem_base = cpu_to_le64(ring->pa);
+	rc = wmi_call(wil, WMI_TX_DESC_RING_ADD_CMDID, vif->mid, &cmd,
+		      sizeof(cmd), WMI_TX_DESC_RING_CFG_DONE_EVENTID, &reply,
+		      sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS);
+	if (rc) {
+		wil_err(wil, "WMI_TX_DESC_RING_ADD_CMD failed, rc %d\n", rc);
+		return rc;
+	}
+
+	if (reply.evt.status != WMI_FW_STATUS_SUCCESS) {
+		wil_err(wil, "WMI_TX_DESC_RING_ADD_CMD failed, status %d\n",
+			reply.evt.status);
+		return -EINVAL;
+	}
+
+	spin_lock_bh(&txdata->lock);
+	ring->hwtail = le32_to_cpu(reply.evt.ring_tail_ptr);
+	txdata->mid = vif->mid;
+	txdata->enabled = 1;
+	spin_unlock_bh(&txdata->lock);
+
+	return 0;
+}
+
+int wil_wmi_bcast_desc_ring_add(struct wil6210_vif *vif, int ring_id)
+{
+	struct wil6210_priv *wil = vif_to_wil(vif);
+	struct wil_ring *ring = &wil->ring_tx[ring_id];
+	int rc;
+	struct wmi_bcast_desc_ring_add_cmd cmd = {
+		.ring_cfg = {
+			.ring_size = cpu_to_le16(ring->size),
+			.ring_id = ring_id,
+		},
+		.status_ring_id = wil->tx_sring_idx,
+		.encap_trans_type = WMI_VRING_ENC_TYPE_802_3,
+	};
+	struct {
+		struct wmi_cmd_hdr hdr;
+		struct wmi_rx_desc_ring_cfg_done_event evt;
+	} __packed reply = {
+		.evt = {.status = WMI_FW_STATUS_FAILURE},
+	};
+	struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_id];
+
+	cmd.ring_cfg.ring_mem_base = cpu_to_le64(ring->pa);
+	rc = wmi_call(wil, WMI_BCAST_DESC_RING_ADD_CMDID, vif->mid, &cmd,
+		      sizeof(cmd), WMI_TX_DESC_RING_CFG_DONE_EVENTID, &reply,
+		      sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS);
+	if (rc) {
+		wil_err(wil, "WMI_BCAST_DESC_RING_ADD_CMD failed, rc %d\n", rc);
+		return rc;
+	}
+
+	if (reply.evt.status != WMI_FW_STATUS_SUCCESS) {
+		wil_err(wil, "Broadcast Tx config failed, status %d\n",
+			reply.evt.status);
+		return -EINVAL;
+	}
+
+	spin_lock_bh(&txdata->lock);
+	ring->hwtail = le32_to_cpu(reply.evt.ring_tail_ptr);
+	txdata->mid = vif->mid;
+	txdata->enabled = 1;
+	spin_unlock_bh(&txdata->lock);
+
+	return 0;
+}
diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h
index dc503d903786bd7d2516f31d5d7b57ba33a3579e..abf6f05c4801b9d80b2cfd1e974b9e3dcaa605c0 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.h
+++ b/drivers/net/wireless/ath/wil6210/wmi.h
@@ -86,6 +86,7 @@ enum wmi_fw_capability {
 	WMI_FW_CAPABILITY_PNO				= 15,
 	WMI_FW_CAPABILITY_REF_CLOCK_CONTROL		= 18,
 	WMI_FW_CAPABILITY_AP_SME_OFFLOAD_NONE		= 19,
+	WMI_FW_CAPABILITY_AMSDU				= 23,
 	WMI_FW_CAPABILITY_MAX,
 };
 
@@ -148,8 +149,8 @@ enum wmi_command_id {
 	WMI_CFG_RX_CHAIN_CMDID				= 0x820,
 	WMI_VRING_CFG_CMDID				= 0x821,
 	WMI_BCAST_VRING_CFG_CMDID			= 0x822,
-	WMI_VRING_BA_EN_CMDID				= 0x823,
-	WMI_VRING_BA_DIS_CMDID				= 0x824,
+	WMI_RING_BA_EN_CMDID				= 0x823,
+	WMI_RING_BA_DIS_CMDID				= 0x824,
 	WMI_RCP_ADDBA_RESP_CMDID			= 0x825,
 	WMI_RCP_DELBA_CMDID				= 0x826,
 	WMI_SET_SSID_CMDID				= 0x827,
@@ -163,6 +164,7 @@ enum wmi_command_id {
 	WMI_BF_SM_MGMT_CMDID				= 0x838,
 	WMI_BF_RXSS_MGMT_CMDID				= 0x839,
 	WMI_BF_TRIG_CMDID				= 0x83A,
+	WMI_RCP_ADDBA_RESP_EDMA_CMDID			= 0x83B,
 	WMI_LINK_MAINTAIN_CFG_WRITE_CMDID		= 0x842,
 	WMI_LINK_MAINTAIN_CFG_READ_CMDID		= 0x843,
 	WMI_SET_SECTORS_CMDID				= 0x849,
@@ -235,6 +237,12 @@ enum wmi_command_id {
 	WMI_PRIO_TX_SECTORS_NUMBER_CMDID		= 0x9A6,
 	WMI_PRIO_TX_SECTORS_SET_DEFAULT_CFG_CMDID	= 0x9A7,
 	WMI_BF_CONTROL_CMDID				= 0x9AA,
+	WMI_TX_STATUS_RING_ADD_CMDID			= 0x9C0,
+	WMI_RX_STATUS_RING_ADD_CMDID			= 0x9C1,
+	WMI_TX_DESC_RING_ADD_CMDID			= 0x9C2,
+	WMI_RX_DESC_RING_ADD_CMDID			= 0x9C3,
+	WMI_BCAST_DESC_RING_ADD_CMDID			= 0x9C4,
+	WMI_CFG_DEF_RX_OFFLOAD_CMDID			= 0x9C5,
 	WMI_SCHEDULING_SCHEME_CMDID			= 0xA01,
 	WMI_FIXED_SCHEDULING_CONFIG_CMDID		= 0xA02,
 	WMI_ENABLE_FIXED_SCHEDULING_CMDID		= 0xA03,
@@ -781,18 +789,90 @@ struct wmi_lo_power_calib_from_otp_event {
 	u8 reserved[3];
 } __packed;
 
-/* WMI_VRING_BA_EN_CMDID */
-struct wmi_vring_ba_en_cmd {
-	u8 ringid;
+struct wmi_edma_ring_cfg {
+	__le64 ring_mem_base;
+	/* size in number of items */
+	__le16 ring_size;
+	u8 ring_id;
+	u8 reserved;
+} __packed;
+
+enum wmi_rx_msg_type {
+	WMI_RX_MSG_TYPE_COMPRESSED	= 0x00,
+	WMI_RX_MSG_TYPE_EXTENDED	= 0x01,
+};
+
+struct wmi_tx_status_ring_add_cmd {
+	struct wmi_edma_ring_cfg ring_cfg;
+	u8 irq_index;
+	u8 reserved[3];
+} __packed;
+
+struct wmi_rx_status_ring_add_cmd {
+	struct wmi_edma_ring_cfg ring_cfg;
+	u8 irq_index;
+	/* wmi_rx_msg_type */
+	u8 rx_msg_type;
+	u8 reserved[2];
+} __packed;
+
+struct wmi_cfg_def_rx_offload_cmd {
+	__le16 max_msdu_size;
+	__le16 max_rx_pl_per_desc;
+	u8 decap_trans_type;
+	u8 l2_802_3_offload_ctrl;
+	u8 l2_nwifi_offload_ctrl;
+	u8 vlan_id;
+	u8 nwifi_ds_trans_type;
+	u8 l3_l4_ctrl;
+	u8 reserved[6];
+} __packed;
+
+struct wmi_tx_desc_ring_add_cmd {
+	struct wmi_edma_ring_cfg ring_cfg;
+	__le16 max_msdu_size;
+	/* Correlated status ring (0-63) */
+	u8 status_ring_id;
+	u8 cid;
+	u8 tid;
+	u8 encap_trans_type;
+	u8 mac_ctrl;
+	u8 to_resolution;
+	u8 agg_max_wsize;
+	u8 reserved[3];
+	struct wmi_vring_cfg_schd schd_params;
+} __packed;
+
+struct wmi_rx_desc_ring_add_cmd {
+	struct wmi_edma_ring_cfg ring_cfg;
+	u8 irq_index;
+	/* 0-63 status rings */
+	u8 status_ring_id;
+	u8 reserved[2];
+	__le64 sw_tail_host_addr;
+} __packed;
+
+struct wmi_bcast_desc_ring_add_cmd {
+	struct wmi_edma_ring_cfg ring_cfg;
+	__le16 max_msdu_size;
+	/* Correlated status ring (0-63) */
+	u8 status_ring_id;
+	u8 encap_trans_type;
+	u8 reserved[4];
+} __packed;
+
+/* WMI_RING_BA_EN_CMDID */
+struct wmi_ring_ba_en_cmd {
+	u8 ring_id;
 	u8 agg_max_wsize;
 	__le16 ba_timeout;
 	u8 amsdu;
 	u8 reserved[3];
 } __packed;
 
-/* WMI_VRING_BA_DIS_CMDID */
-struct wmi_vring_ba_dis_cmd {
-	u8 ringid;
+/* WMI_RING_BA_DIS_CMDID */
+struct wmi_ring_ba_dis_cmd {
+	u8 ring_id;
 	u8 reserved;
 	__le16 reason;
 } __packed;
@@ -950,6 +1030,21 @@ struct wmi_rcp_addba_resp_cmd {
 	u8 reserved[2];
 } __packed;
 
+/* WMI_RCP_ADDBA_RESP_EDMA_CMDID */
+struct wmi_rcp_addba_resp_edma_cmd {
+	u8 cid;
+	u8 tid;
+	u8 dialog_token;
+	u8 reserved;
+	__le16 status_code;
+	/* ieee80211_ba_parameterset field to send */
+	__le16 ba_param_set;
+	__le16 ba_timeout;
+	u8 status_ring_id;
+	/* wmi_cfg_rx_chain_cmd_reorder_type */
+	u8 reorder_type;
+} __packed;
+
 /* WMI_RCP_DELBA_CMDID */
 struct wmi_rcp_delba_cmd {
 	/* Used for cid less than 8. For higher cid set
@@ -1535,7 +1630,7 @@ enum wmi_event_id {
 	WMI_BF_CTRL_DONE_EVENTID			= 0x1862,
 	WMI_NOTIFY_REQ_DONE_EVENTID			= 0x1863,
 	WMI_GET_STATUS_DONE_EVENTID			= 0x1864,
-	WMI_VRING_EN_EVENTID				= 0x1865,
+	WMI_RING_EN_EVENTID				= 0x1865,
 	WMI_GET_RF_STATUS_EVENTID			= 0x1866,
 	WMI_GET_BASEBAND_TYPE_EVENTID			= 0x1867,
 	WMI_VRING_SWITCH_TIMING_CONFIG_EVENTID		= 0x1868,
@@ -1587,6 +1682,11 @@ enum wmi_event_id {
 	WMI_PRIO_TX_SECTORS_NUMBER_EVENTID		= 0x19A6,
 	WMI_PRIO_TX_SECTORS_SET_DEFAULT_CFG_EVENTID	= 0x19A7,
 	WMI_BF_CONTROL_EVENTID				= 0x19AA,
+	WMI_TX_STATUS_RING_CFG_DONE_EVENTID		= 0x19C0,
+	WMI_RX_STATUS_RING_CFG_DONE_EVENTID		= 0x19C1,
+	WMI_TX_DESC_RING_CFG_DONE_EVENTID		= 0x19C2,
+	WMI_RX_DESC_RING_CFG_DONE_EVENTID		= 0x19C3,
+	WMI_CFG_DEF_RX_OFFLOAD_DONE_EVENTID		= 0x19C5,
 	WMI_SCHEDULING_SCHEME_EVENTID			= 0x1A01,
 	WMI_FIXED_SCHEDULING_CONFIG_COMPLETE_EVENTID	= 0x1A02,
 	WMI_ENABLE_FIXED_SCHEDULING_COMPLETE_EVENTID	= 0x1A03,
@@ -1997,6 +2097,49 @@ struct wmi_rcp_addba_resp_sent_event {
 	u8 reserved2[2];
 } __packed;
 
+/* WMI_TX_STATUS_RING_CFG_DONE_EVENTID */
+struct wmi_tx_status_ring_cfg_done_event {
+	u8 ring_id;
+	/* wmi_fw_status */
+	u8 status;
+	u8 reserved[2];
+	__le32 ring_tail_ptr;
+} __packed;
+
+/* WMI_RX_STATUS_RING_CFG_DONE_EVENTID */
+struct wmi_rx_status_ring_cfg_done_event {
+	u8 ring_id;
+	/* wmi_fw_status */
+	u8 status;
+	u8 reserved[2];
+	__le32 ring_tail_ptr;
+} __packed;
+
+/* WMI_CFG_DEF_RX_OFFLOAD_DONE_EVENTID */
+struct wmi_cfg_def_rx_offload_done_event {
+	/* wmi_fw_status */
+	u8 status;
+	u8 reserved[3];
+} __packed;
+
+/* WMI_TX_DESC_RING_CFG_DONE_EVENTID */
+struct wmi_tx_desc_ring_cfg_done_event {
+	u8 ring_id;
+	/* wmi_fw_status */
+	u8 status;
+	u8 reserved[2];
+	__le32 ring_tail_ptr;
+} __packed;
+
+/* WMI_RX_DESC_RING_CFG_DONE_EVENTID */
+struct wmi_rx_desc_ring_cfg_done_event {
+	u8 ring_id;
+	/* wmi_fw_status */
+	u8 status;
+	u8 reserved[2];
+	__le32 ring_tail_ptr;
+} __packed;
+
 /* WMI_RCP_ADDBA_REQ_EVENTID */
 struct wmi_rcp_addba_req_event {
 	/* Used for cid less than 8. For higher cid set
@@ -2047,9 +2190,9 @@ struct wmi_data_port_open_event {
 	u8 reserved[3];
 } __packed;
 
-/* WMI_VRING_EN_EVENTID */
-struct wmi_vring_en_event {
-	u8 vring_index;
+/* WMI_RING_EN_EVENTID */
+struct wmi_ring_en_event {
+	u8 ring_index;
 	u8 reserved[3];
 } __packed;
 
diff --git a/drivers/net/wireless/atmel/atmel.c b/drivers/net/wireless/atmel/atmel.c
index 3ed3d9f6aae91b5a76d6caee9ac714b1824db6d5..74538085cfb792f27692730b51510136051875d3 100644
--- a/drivers/net/wireless/atmel/atmel.c
+++ b/drivers/net/wireless/atmel/atmel.c
@@ -1399,6 +1399,7 @@ static int atmel_validate_channel(struct atmel_private *priv, int channel)
 	return 0;
 }
 
+#ifdef CONFIG_PROC_FS
 static int atmel_proc_show(struct seq_file *m, void *v)
 {
 	struct atmel_private *priv = m->private;
@@ -1481,6 +1482,7 @@ static int atmel_proc_show(struct seq_file *m, void *v)
 	seq_printf(m, "Current state:\t\t%s\n", s);
 	return 0;
 }
+#endif
 
 static const struct net_device_ops atmel_netdev_ops = {
 	.ndo_open 		= atmel_open,
@@ -3675,7 +3677,7 @@ static int probe_atmel_card(struct net_device *dev)
 		atmel_write16(dev, GCR, 0x0060);
 
 	atmel_write16(dev, GCR, 0x0040);
-	mdelay(500);
+	msleep(500);
 
 	if (atmel_read16(dev, MR2) == 0) {
 		/* No stored firmware so load a small stub which just
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
index 4db4d444407afa6a48806193cbbeb596b3863533..8347da632a5b0de85f9a68d9eedfd0436d2da915 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
@@ -93,6 +93,42 @@ static int brcmf_feat_debugfs_read(struct seq_file *seq, void *data)
 }
 #endif /* DEBUG */
 
+struct brcmf_feat_fwfeat {
+	const char * const fwid;
+	u32 feat_flags;
+};
+
+static const struct brcmf_feat_fwfeat brcmf_feat_fwfeat_map[] = {
+	/* brcmfmac43602-pcie.ap.bin from linux-firmware.git commit ea1178515b88 */
+	{ "01-6cb8e269", BIT(BRCMF_FEAT_MONITOR) },
+	/* brcmfmac4366b-pcie.bin from linux-firmware.git commit 52442afee990 */
+	{ "01-c47a91a4", BIT(BRCMF_FEAT_MONITOR) },
+};
+
+static void brcmf_feat_firmware_overrides(struct brcmf_pub *drv)
+{
+	const struct brcmf_feat_fwfeat *e;
+	u32 feat_flags = 0;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(brcmf_feat_fwfeat_map); i++) {
+		e = &brcmf_feat_fwfeat_map[i];
+		if (!strcmp(e->fwid, drv->fwver)) {
+			feat_flags = e->feat_flags;
+			break;
+		}
+	}
+
+	if (!feat_flags)
+		return;
+
+	for (i = 0; i < BRCMF_FEAT_LAST; i++)
+		if (feat_flags & BIT(i))
+			brcmf_dbg(INFO, "enabling firmware feature: %s\n",
+				  brcmf_feat_names[i]);
+	drv->feat_flags |= feat_flags;
+}
+
 /**
  * brcmf_feat_iovar_int_get() - determine feature through iovar query.
  *
@@ -253,6 +289,8 @@ void brcmf_feat_attach(struct brcmf_pub *drvr)
 	}
 	brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_FWSUP, "sup_wpa");
 
+	brcmf_feat_firmware_overrides(drvr);
+
 	/* set chip related quirks */
 	switch (drvr->bus_if->chip) {
 	case BRCM_CC_43236_CHIP_ID:
diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c
index 72046e182745dc72d00462285c4d8e55d42a74e5..04dd7a936593826928cf11559b3b362d0ed43383 100644
--- a/drivers/net/wireless/cisco/airo.c
+++ b/drivers/net/wireless/cisco/airo.c
@@ -3419,7 +3419,7 @@ static void airo_handle_rx(struct airo_info *ai)
 
 static void airo_handle_tx(struct airo_info *ai, u16 status)
 {
-	int i, len = 0, index = -1;
+	int i, index = -1;
 	u16 fid;
 
 	if (test_bit(FLAG_MPI, &ai->flags)) {
@@ -3443,11 +3443,9 @@ static void airo_handle_tx(struct airo_info *ai, u16 status)
 
 	fid = IN4500(ai, TXCOMPLFID);
 
-	for(i = 0; i < MAX_FIDS; i++) {
-		if ((ai->fids[i] & 0xffff) == fid) {
-			len = ai->fids[i] >> 16;
+	for (i = 0; i < MAX_FIDS; i++) {
+		if ((ai->fids[i] & 0xffff) == fid)
 			index = i;
-		}
 	}
 
 	if (index != -1) {
diff --git a/drivers/net/wireless/cisco/airo_cs.c b/drivers/net/wireless/cisco/airo_cs.c
index d9ed22b4cc6b60dfe0a59d32ed44b78923ec483f..3718f958c0fca44cfd56f556b73c3a19ba8ee99f 100644
--- a/drivers/net/wireless/cisco/airo_cs.c
+++ b/drivers/net/wireless/cisco/airo_cs.c
@@ -102,11 +102,8 @@ static int airo_cs_config_check(struct pcmcia_device *p_dev, void *priv_data)
 
 static int airo_config(struct pcmcia_device *link)
 {
-	struct local_info *dev;
 	int ret;
 
-	dev = link->priv;
-
 	dev_dbg(&link->dev, "airo_config\n");
 
 	link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_VPP |
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
index 1ad83ef5f202e0f2d7ae6f1919d2b11aa21b52b2..910db46db6a12ea2c74cf3b9e42a7a42ea9a7351 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
@@ -5112,11 +5112,9 @@ static int ipw2100_disassociate_bssid(struct ipw2100_priv *priv)
 		.host_command_length = ETH_ALEN
 	};
 	int err;
-	int len;
 
 	IPW_DEBUG_HC("DISASSOCIATION_BSSID\n");
 
-	len = ETH_ALEN;
 	/* The Firmware currently ignores the BSSID and just disassociates from
 	 * the currently associated AP -- but in the off chance that a future
 	 * firmware does use the BSSID provided here, we go ahead and try and
@@ -7723,7 +7721,6 @@ static int ipw2100_wx_get_auth(struct net_device *dev,
 	struct libipw_device *ieee = priv->ieee;
 	struct lib80211_crypt_data *crypt;
 	struct iw_param *param = &wrqu->param;
-	int ret = 0;
 
 	switch (param->flags & IW_AUTH_INDEX) {
 	case IW_AUTH_WPA_VERSION:
@@ -7733,7 +7730,6 @@ static int ipw2100_wx_get_auth(struct net_device *dev,
 		/*
 		 * wpa_supplicant will control these internally
 		 */
-		ret = -EOPNOTSUPP;
 		break;
 
 	case IW_AUTH_TKIP_COUNTERMEASURES:
@@ -7801,9 +7797,6 @@ static int ipw2100_wx_set_mlme(struct net_device *dev,
 {
 	struct ipw2100_priv *priv = libipw_priv(dev);
 	struct iw_mlme *mlme = (struct iw_mlme *)extra;
-	__le16 reason;
-
-	reason = cpu_to_le16(mlme->reason_code);
 
 	switch (mlme->cmd) {
 	case IW_MLME_DEAUTH:
diff --git a/drivers/net/wireless/intel/ipw2x00/libipw_wx.c b/drivers/net/wireless/intel/ipw2x00/libipw_wx.c
index dd29f46d086b2bf2f60586655fb59ff9647c0b0c..d32d39fa268625097aafb993c4aa44fdf6909b6c 100644
--- a/drivers/net/wireless/intel/ipw2x00/libipw_wx.c
+++ b/drivers/net/wireless/intel/ipw2x00/libipw_wx.c
@@ -479,7 +479,6 @@ int libipw_wx_get_encode(struct libipw_device *ieee,
 {
 	struct iw_point *erq = &(wrqu->encoding);
 	int len, key;
-	struct lib80211_crypt_data *crypt;
 	struct libipw_security *sec = &ieee->sec;
 
 	LIBIPW_DEBUG_WX("GET_ENCODE\n");
@@ -492,7 +491,6 @@ int libipw_wx_get_encode(struct libipw_device *ieee,
 	} else
 		key = ieee->crypt_info.tx_keyidx;
 
-	crypt = ieee->crypt_info.crypt[key];
 	erq->flags = key + 1;
 
 	if (!sec->enabled) {
diff --git a/drivers/net/wireless/intel/iwlegacy/3945-mac.c b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
index 62a9794f952b5175da68828a83809eeb47c8b4cb..57e3b6cca2341442a8ca23052defbe84f3e3899d 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
@@ -476,8 +476,6 @@ il3945_tx_skb(struct il_priv *il,
 	int txq_id = skb_get_queue_mapping(skb);
 	u16 len, idx, hdr_len;
 	u16 firstlen, secondlen;
-	u8 id;
-	u8 unicast;
 	u8 sta_id;
 	u8 tid = 0;
 	__le16 fc;
@@ -496,9 +494,6 @@ il3945_tx_skb(struct il_priv *il,
 		goto drop_unlock;
 	}
 
-	unicast = !is_multicast_ether_addr(hdr->addr1);
-	id = 0;
-
 	fc = hdr->frame_control;
 
 #ifdef CONFIG_IWLEGACY_DEBUG
@@ -957,10 +952,8 @@ il3945_rx_queue_restock(struct il_priv *il)
 	struct list_head *element;
 	struct il_rx_buf *rxb;
 	unsigned long flags;
-	int write;
 
 	spin_lock_irqsave(&rxq->lock, flags);
-	write = rxq->write & ~0x7;
 	while (il_rx_queue_space(rxq) > 0 && rxq->free_count) {
 		/* Get next free Rx buffer, remove from free list */
 		element = rxq->rx_free.next;
@@ -2725,7 +2718,6 @@ void
 il3945_post_associate(struct il_priv *il)
 {
 	int rc = 0;
-	struct ieee80211_conf *conf = NULL;
 
 	if (!il->vif || !il->is_open)
 		return;
@@ -2738,8 +2730,6 @@ il3945_post_associate(struct il_priv *il)
 
 	il_scan_cancel_timeout(il, 200);
 
-	conf = &il->hw->conf;
-
 	il->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
 	il3945_commit_rxon(il);
 
diff --git a/drivers/net/wireless/intel/iwlegacy/3945.c b/drivers/net/wireless/intel/iwlegacy/3945.c
index dbf164d48ed3c12b611dbfb66a21476c163e522c..3e568ce2fb20838f090b9aa4716495a9cb51c52f 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945.c
@@ -1634,7 +1634,6 @@ il3945_hw_reg_set_txpower(struct il_priv *il, s8 power)
 {
 	struct il_channel_info *ch_info;
 	s8 max_power;
-	u8 a_band;
 	u8 i;
 
 	if (il->tx_power_user_lmt == power) {
@@ -1650,7 +1649,6 @@ il3945_hw_reg_set_txpower(struct il_priv *il, s8 power)
 
 	for (i = 0; i < il->channel_count; i++) {
 		ch_info = &il->channel_info[i];
-		a_band = il_is_channel_a_band(ch_info);
 
 		/* find minimum power of all user and regulatory constraints
 		 *    (does not consider h/w clipping limitations) */
diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
index 562e94870a9c66f6a94e37c0bda696ae49c01c3a..280cd8ae1696db3204fa54729b8870efe9115346 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
@@ -1338,15 +1338,12 @@ il4965_accumulative_stats(struct il_priv *il, __le32 * stats)
 	u32 *accum_stats;
 	u32 *delta, *max_delta;
 	struct stats_general_common *general, *accum_general;
-	struct stats_tx *tx, *accum_tx;
 
 	prev_stats = (__le32 *) &il->_4965.stats;
 	accum_stats = (u32 *) &il->_4965.accum_stats;
 	size = sizeof(struct il_notif_stats);
 	general = &il->_4965.stats.general.common;
 	accum_general = &il->_4965.accum_stats.general.common;
-	tx = &il->_4965.stats.tx;
-	accum_tx = &il->_4965.accum_stats.tx;
 	delta = (u32 *) &il->_4965.delta_stats;
 	max_delta = (u32 *) &il->_4965.max_delta;
 
@@ -4784,7 +4781,6 @@ static void
 il4965_ucode_callback(const struct firmware *ucode_raw, void *context)
 {
 	struct il_priv *il = context;
-	struct il_ucode_header *ucode;
 	int err;
 	struct il4965_firmware_pieces pieces;
 	const unsigned int api_max = il->cfg->ucode_api_max;
@@ -4814,8 +4810,6 @@ il4965_ucode_callback(const struct firmware *ucode_raw, void *context)
 	}
 
 	/* Data from ucode file:  header followed by uCode images */
-	ucode = (struct il_ucode_header *)ucode_raw->data;
-
 	err = il4965_load_firmware(il, ucode_raw, &pieces);
 
 	if (err)
diff --git a/drivers/net/wireless/intel/iwlwifi/Makefile b/drivers/net/wireless/intel/iwlwifi/Makefile
index 4d08d78c6b71a8df7c49eaa5fd8180b4f470180b..04e376cc898c1864ea868a40cb1e289acd1309fb 100644
--- a/drivers/net/wireless/intel/iwlwifi/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/Makefile
@@ -7,13 +7,13 @@ iwlwifi-objs		+= iwl-debug.o
 iwlwifi-objs		+= iwl-eeprom-read.o iwl-eeprom-parse.o
 iwlwifi-objs		+= iwl-phy-db.o iwl-nvm-parse.o
 iwlwifi-objs		+= pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o
-iwlwifi-objs		+= pcie/ctxt-info.o pcie/trans-gen2.o pcie/tx-gen2.o
+iwlwifi-objs		+= pcie/ctxt-info.o pcie/ctxt-info-gen3.o
+iwlwifi-objs		+= pcie/trans-gen2.o pcie/tx-gen2.o
 iwlwifi-$(CONFIG_IWLDVM) += cfg/1000.o cfg/2000.o cfg/5000.o cfg/6000.o
 iwlwifi-$(CONFIG_IWLMVM) += cfg/7000.o cfg/8000.o cfg/9000.o cfg/22000.o
 iwlwifi-objs		+= iwl-trans.o
 iwlwifi-objs		+= fw/notif-wait.o
 iwlwifi-$(CONFIG_IWLMVM) += fw/paging.o fw/smem.o fw/init.o fw/dbg.o
-iwlwifi-$(CONFIG_IWLMVM) += fw/common_rx.o
 iwlwifi-$(CONFIG_ACPI) += fw/acpi.o
 iwlwifi-$(CONFIG_IWLWIFI_DEBUGFS) += fw/debugfs.o
 
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/2000.c b/drivers/net/wireless/intel/iwlwifi/cfg/2000.c
index a63ca8820568ac6daf8d0bfbdf23c5e44d77b85b..fedb108db68f1b4f0ea6f5123e27d52e8b1d06c6 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/2000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/2000.c
@@ -63,6 +63,7 @@
 static const struct iwl_base_params iwl2000_base_params = {
 	.eeprom_size = OTP_LOW_IMAGE_SIZE,
 	.num_of_queues = IWLAGN_NUM_QUEUES,
+	.max_tfd_queue_size = 256,
 	.max_ll_items = OTP_MAX_LL_ITEMS_2x00,
 	.shadow_ram_support = true,
 	.led_compensation = 51,
@@ -76,6 +77,7 @@ static const struct iwl_base_params iwl2000_base_params = {
 static const struct iwl_base_params iwl2030_base_params = {
 	.eeprom_size = OTP_LOW_IMAGE_SIZE,
 	.num_of_queues = IWLAGN_NUM_QUEUES,
+	.max_tfd_queue_size = 256,
 	.max_ll_items = OTP_MAX_LL_ITEMS_2x00,
 	.shadow_ram_support = true,
 	.led_compensation = 57,
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
index d4ba66aecdc94ada5b66545afcde6ff88a8756b0..91ca77c7571cebb2a6dd3265b1713fb6d3202bda 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
@@ -59,7 +59,7 @@
 #define IWL_22000_UCODE_API_MAX	38
 
 /* Lowest firmware API version supported */
-#define IWL_22000_UCODE_API_MIN	24
+#define IWL_22000_UCODE_API_MIN	39
 
 /* NVM versions */
 #define IWL_22000_NVM_VERSION		0x0a1d
@@ -73,29 +73,48 @@
 #define IWL_22000_SMEM_OFFSET		0x400000
 #define IWL_22000_SMEM_LEN		0xD0000
 
-#define IWL_22000_JF_FW_PRE	"iwlwifi-Qu-a0-jf-b0-"
-#define IWL_22000_HR_FW_PRE	"iwlwifi-Qu-a0-hr-a0-"
-#define IWL_22000_HR_CDB_FW_PRE	"iwlwifi-QuIcp-z0-hrcdb-a0-"
-#define IWL_22000_HR_F0_FW_PRE	"iwlwifi-QuQnj-f0-hr-a0-"
-#define IWL_22000_JF_B0_FW_PRE	"iwlwifi-QuQnj-a0-jf-b0-"
-#define IWL_22000_HR_A0_FW_PRE	"iwlwifi-QuQnj-a0-hr-a0-"
+#define IWL_22000_JF_FW_PRE		"iwlwifi-Qu-a0-jf-b0-"
+#define IWL_22000_HR_FW_PRE		"iwlwifi-Qu-a0-hr-a0-"
+#define IWL_22000_HR_CDB_FW_PRE		"iwlwifi-QuIcp-z0-hrcdb-a0-"
+#define IWL_22000_HR_A_F0_FW_PRE	"iwlwifi-QuQnj-f0-hr-a0-"
+#define IWL_22000_HR_B_FW_PRE		"iwlwifi-Qu-b0-hr-b0-"
+#define IWL_22000_JF_B0_FW_PRE		"iwlwifi-QuQnj-a0-jf-b0-"
+#define IWL_22000_HR_A0_FW_PRE		"iwlwifi-QuQnj-a0-hr-a0-"
+#define IWL_22000_SU_Z0_FW_PRE		"iwlwifi-su-z0-"
 
 #define IWL_22000_HR_MODULE_FIRMWARE(api) \
 	IWL_22000_HR_FW_PRE __stringify(api) ".ucode"
 #define IWL_22000_JF_MODULE_FIRMWARE(api) \
 	IWL_22000_JF_FW_PRE __stringify(api) ".ucode"
-#define IWL_22000_HR_F0_QNJ_MODULE_FIRMWARE(api) \
-	IWL_22000_HR_F0_FW_PRE __stringify(api) ".ucode"
+#define IWL_22000_HR_A_F0_QNJ_MODULE_FIRMWARE(api) \
+	IWL_22000_HR_A_F0_FW_PRE __stringify(api) ".ucode"
+#define IWL_22000_HR_B_QNJ_MODULE_FIRMWARE(api) \
+	IWL_22000_HR_B_FW_PRE __stringify(api) ".ucode"
 #define IWL_22000_JF_B0_QNJ_MODULE_FIRMWARE(api) \
 	IWL_22000_JF_B0_FW_PRE __stringify(api) ".ucode"
 #define IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(api) \
 	IWL_22000_HR_A0_FW_PRE __stringify(api) ".ucode"
+#define IWL_22000_SU_Z0_MODULE_FIRMWARE(api) \
+	IWL_22000_SU_Z0_FW_PRE __stringify(api) ".ucode"
 
 #define NVM_HW_SECTION_NUM_FAMILY_22000		10
 
 static const struct iwl_base_params iwl_22000_base_params = {
 	.eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_22000,
 	.num_of_queues = 512,
+	.max_tfd_queue_size = 256,
+	.shadow_ram_support = true,
+	.led_compensation = 57,
+	.wd_timeout = IWL_LONG_WD_TIMEOUT,
+	.max_event_log_size = 512,
+	.shadow_reg_enable = true,
+	.pcie_l1_allowed = true,
+};
+
+static const struct iwl_base_params iwl_22560_base_params = {
+	.eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_22000,
+	.num_of_queues = 512,
+	.max_tfd_queue_size = 65536,
 	.shadow_ram_support = true,
 	.led_compensation = 57,
 	.wd_timeout = IWL_LONG_WD_TIMEOUT,
@@ -110,11 +129,9 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
 	.ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
 };
 
-#define IWL_DEVICE_22000						\
+#define IWL_DEVICE_22000_COMMON						\
 	.ucode_api_max = IWL_22000_UCODE_API_MAX,			\
 	.ucode_api_min = IWL_22000_UCODE_API_MIN,			\
-	.device_family = IWL_DEVICE_FAMILY_22000,			\
-	.base_params = &iwl_22000_base_params,				\
 	.led_mode = IWL_LED_RF_STATE,					\
 	.nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_22000,		\
 	.non_shared_ant = ANT_A,					\
@@ -129,6 +146,10 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
 	.mq_rx_supported = true,					\
 	.vht_mu_mimo_supported = true,					\
 	.mac_addr_from_csr = true,					\
+	.ht_params = &iwl_22000_ht_params,				\
+	.nvm_ver = IWL_22000_NVM_VERSION,				\
+	.nvm_calib_ver = IWL_22000_TX_POWER_VERSION,			\
+	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,		\
 	.use_tfh = true,						\
 	.rf_id = true,							\
 	.gen2 = true,							\
@@ -136,86 +157,114 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
 	.dbgc_supported = true,						\
 	.min_umac_error_event_table = 0x400000
 
+#define IWL_DEVICE_22500						\
+	IWL_DEVICE_22000_COMMON,					\
+	.device_family = IWL_DEVICE_FAMILY_22000,			\
+	.base_params = &iwl_22000_base_params,				\
+	.csr = &iwl_csr_v1
+
+#define IWL_DEVICE_22560						\
+	IWL_DEVICE_22000_COMMON,					\
+	.device_family = IWL_DEVICE_FAMILY_22560,			\
+	.base_params = &iwl_22560_base_params,				\
+	.csr = &iwl_csr_v2
+
 const struct iwl_cfg iwl22000_2ac_cfg_hr = {
 	.name = "Intel(R) Dual Band Wireless AC 22000",
 	.fw_name_pre = IWL_22000_HR_FW_PRE,
-	IWL_DEVICE_22000,
-	.csr = &iwl_csr_v1,
-	.ht_params = &iwl_22000_ht_params,
-	.nvm_ver = IWL_22000_NVM_VERSION,
-	.nvm_calib_ver = IWL_22000_TX_POWER_VERSION,
-	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+	IWL_DEVICE_22500,
 };
 
 const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb = {
 	.name = "Intel(R) Dual Band Wireless AC 22000",
 	.fw_name_pre = IWL_22000_HR_CDB_FW_PRE,
-	IWL_DEVICE_22000,
-	.csr = &iwl_csr_v1,
-	.ht_params = &iwl_22000_ht_params,
-	.nvm_ver = IWL_22000_NVM_VERSION,
-	.nvm_calib_ver = IWL_22000_TX_POWER_VERSION,
-	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+	IWL_DEVICE_22500,
 	.cdb = true,
 };
 
 const struct iwl_cfg iwl22000_2ac_cfg_jf = {
 	.name = "Intel(R) Dual Band Wireless AC 22000",
 	.fw_name_pre = IWL_22000_JF_FW_PRE,
-	IWL_DEVICE_22000,
-	.csr = &iwl_csr_v1,
-	.ht_params = &iwl_22000_ht_params,
-	.nvm_ver = IWL_22000_NVM_VERSION,
-	.nvm_calib_ver = IWL_22000_TX_POWER_VERSION,
-	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+	IWL_DEVICE_22500,
 };
 
 const struct iwl_cfg iwl22000_2ax_cfg_hr = {
 	.name = "Intel(R) Dual Band Wireless AX 22000",
 	.fw_name_pre = IWL_22000_HR_FW_PRE,
-	IWL_DEVICE_22000,
-	.csr = &iwl_csr_v1,
-	.ht_params = &iwl_22000_ht_params,
-	.nvm_ver = IWL_22000_NVM_VERSION,
-	.nvm_calib_ver = IWL_22000_TX_POWER_VERSION,
-	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+	IWL_DEVICE_22500,
+	/*
+	 * This device doesn't support receiving BlockAck with a large bitmap
+	 * so we need to restrict the size of transmitted aggregation to the
+	 * HT size; mac80211 would otherwise pick the HE max (256) by default.
+	 */
+	.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
 };
 
-const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_f0 = {
+const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0_f0 = {
 	.name = "Intel(R) Dual Band Wireless AX 22000",
-	.fw_name_pre = IWL_22000_HR_F0_FW_PRE,
-	IWL_DEVICE_22000,
-	.csr = &iwl_csr_v1,
-	.ht_params = &iwl_22000_ht_params,
-	.nvm_ver = IWL_22000_NVM_VERSION,
-	.nvm_calib_ver = IWL_22000_TX_POWER_VERSION,
-	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+	.fw_name_pre = IWL_22000_HR_A_F0_FW_PRE,
+	IWL_DEVICE_22500,
+	/*
+	 * This device doesn't support receiving BlockAck with a large bitmap
+	 * so we need to restrict the size of transmitted aggregation to the
+	 * HT size; mac80211 would otherwise pick the HE max (256) by default.
+	 */
+	.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+};
+
+const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_b0 = {
+	.name = "Intel(R) Dual Band Wireless AX 22000",
+	.fw_name_pre = IWL_22000_HR_B_FW_PRE,
+	IWL_DEVICE_22500,
+	/*
+	 * This device doesn't support receiving BlockAck with a large bitmap
+	 * so we need to restrict the size of transmitted aggregation to the
+	 * HT size; mac80211 would otherwise pick the HE max (256) by default.
+	 */
+	.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
 };
 
 const struct iwl_cfg iwl22000_2ax_cfg_qnj_jf_b0 = {
 	.name = "Intel(R) Dual Band Wireless AX 22000",
 	.fw_name_pre = IWL_22000_JF_B0_FW_PRE,
-	IWL_DEVICE_22000,
-	.csr = &iwl_csr_v1,
-	.ht_params = &iwl_22000_ht_params,
-	.nvm_ver = IWL_22000_NVM_VERSION,
-	.nvm_calib_ver = IWL_22000_TX_POWER_VERSION,
-	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+	IWL_DEVICE_22500,
+	/*
+	 * This device doesn't support receiving BlockAck with a large bitmap
+	 * so we need to restrict the size of transmitted aggregation to the
+	 * HT size; mac80211 would otherwise pick the HE max (256) by default.
+	 */
+	.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
 };
 
 const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0 = {
 	.name = "Intel(R) Dual Band Wireless AX 22000",
 	.fw_name_pre = IWL_22000_HR_A0_FW_PRE,
-	IWL_DEVICE_22000,
-	.csr = &iwl_csr_v1,
-	.ht_params = &iwl_22000_ht_params,
-	.nvm_ver = IWL_22000_NVM_VERSION,
-	.nvm_calib_ver = IWL_22000_TX_POWER_VERSION,
-	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+	IWL_DEVICE_22500,
+	/*
+	 * This device doesn't support receiving BlockAck with a large bitmap
+	 * so we need to restrict the size of transmitted aggregation to the
+	 * HT size; mac80211 would otherwise pick the HE max (256) by default.
+	 */
+	.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+};
+
+const struct iwl_cfg iwl22560_2ax_cfg_su_cdb = {
+	.name = "Intel(R) Dual Band Wireless AX 22560",
+	.fw_name_pre = IWL_22000_SU_Z0_FW_PRE,
+	IWL_DEVICE_22560,
+	.cdb = true,
+	/*
+	 * This device doesn't support receiving BlockAck with a large bitmap
+	 * so we need to restrict the size of transmitted aggregation to the
+	 * HT size; mac80211 would otherwise pick the HE max (256) by default.
+	 */
+	.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
 };
 
 MODULE_FIRMWARE(IWL_22000_HR_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
 MODULE_FIRMWARE(IWL_22000_JF_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL_22000_HR_F0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_22000_HR_A_F0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_22000_HR_B_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
 MODULE_FIRMWARE(IWL_22000_JF_B0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
 MODULE_FIRMWARE(IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_22000_SU_Z0_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/5000.c b/drivers/net/wireless/intel/iwlwifi/cfg/5000.c
index a224f1be1ec2cd614d947a26d8e35ed1fa917e27..36151e61a26f018f6c0ee1bc13a5931b08a6cffb 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/5000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/5000.c
@@ -53,6 +53,7 @@
 static const struct iwl_base_params iwl5000_base_params = {
 	.eeprom_size = IWLAGN_EEPROM_IMG_SIZE,
 	.num_of_queues = IWLAGN_NUM_QUEUES,
+	.max_tfd_queue_size = 256,
 	.pll_cfg = true,
 	.led_compensation = 51,
 	.wd_timeout = IWL_WATCHDOG_DISABLED,
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/6000.c b/drivers/net/wireless/intel/iwlwifi/cfg/6000.c
index dbcec7ce7863074f8cd2ca76df9dde4a67a7cc7b..b5d8274761d8d776613376034232b87f9467e385 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/6000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/6000.c
@@ -72,6 +72,7 @@
 static const struct iwl_base_params iwl6000_base_params = {
 	.eeprom_size = OTP_LOW_IMAGE_SIZE,
 	.num_of_queues = IWLAGN_NUM_QUEUES,
+	.max_tfd_queue_size = 256,
 	.max_ll_items = OTP_MAX_LL_ITEMS_6x00,
 	.shadow_ram_support = true,
 	.led_compensation = 51,
@@ -84,6 +85,7 @@ static const struct iwl_base_params iwl6000_base_params = {
 static const struct iwl_base_params iwl6050_base_params = {
 	.eeprom_size = OTP_LOW_IMAGE_SIZE,
 	.num_of_queues = IWLAGN_NUM_QUEUES,
+	.max_tfd_queue_size = 256,
 	.max_ll_items = OTP_MAX_LL_ITEMS_6x50,
 	.shadow_ram_support = true,
 	.led_compensation = 51,
@@ -96,6 +98,7 @@ static const struct iwl_base_params iwl6050_base_params = {
 static const struct iwl_base_params iwl6000_g2_base_params = {
 	.eeprom_size = OTP_LOW_IMAGE_SIZE,
 	.num_of_queues = IWLAGN_NUM_QUEUES,
+	.max_tfd_queue_size = 256,
 	.max_ll_items = OTP_MAX_LL_ITEMS_6x00,
 	.shadow_ram_support = true,
 	.led_compensation = 57,
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/7000.c b/drivers/net/wireless/intel/iwlwifi/cfg/7000.c
index 69bfa827e82adf6512f0e5b868150840ba636481..a62c8346f13a47691a6dfe5bff657a90c2440f1c 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/7000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/7000.c
@@ -123,6 +123,7 @@
 static const struct iwl_base_params iwl7000_base_params = {
 	.eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_7000,
 	.num_of_queues = 31,
+	.max_tfd_queue_size = 256,
 	.shadow_ram_support = true,
 	.led_compensation = 57,
 	.wd_timeout = IWL_LONG_WD_TIMEOUT,
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/8000.c b/drivers/net/wireless/intel/iwlwifi/cfg/8000.c
index 7262e973e0d6483f03f60b0d321693654f669040..c46fa712985b7e70568461736957c7e09b063a20 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/8000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/8000.c
@@ -104,6 +104,7 @@
 static const struct iwl_base_params iwl8000_base_params = {
 	.eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_8000,
 	.num_of_queues = 31,
+	.max_tfd_queue_size = 256,
 	.shadow_ram_support = true,
 	.led_compensation = 57,
 	.wd_timeout = IWL_LONG_WD_TIMEOUT,
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
index c8ea63d02619c345a8260a316665a40f03ac391c..24b2f7cbb308c95ca5ea378151d36e12f3c9b892 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
@@ -95,6 +95,7 @@
 static const struct iwl_base_params iwl9000_base_params = {
 	.eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_9000,
 	.num_of_queues = 31,
+	.max_tfd_queue_size = 256,
 	.shadow_ram_support = true,
 	.led_compensation = 57,
 	.wd_timeout = IWL_LONG_WD_TIMEOUT,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h b/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h
index 007bfe7656a4ae704722b3eaf2b29fe33b03238b..08d3d8a190f6b9f0ce75cbc39754247b2fc4d1cf 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h
@@ -8,6 +8,7 @@
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,6 +31,7 @@
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -187,20 +189,4 @@ struct iwl_card_state_notif {
 	__le32 flags;
 } __packed; /* CARD_STATE_NTFY_API_S_VER_1 */
 
-/**
- * struct iwl_fseq_ver_mismatch_nty - Notification about version
- *
- * This notification does not have a direct impact on the init flow.
- * It means that another core (not WiFi) has initiated the FSEQ flow
- * and updated the FSEQ version.  The driver only prints an error when
- * this occurs.
- *
- * @aux_read_fseq_ver: auxiliary read FSEQ version
- * @wifi_fseq_ver: FSEQ version (embedded in WiFi)
- */
-struct iwl_fseq_ver_mismatch_ntf {
-	__le32 aux_read_fseq_ver;
-	__le32 wifi_fseq_ver;
-} __packed; /* FSEQ_VER_MISMATCH_NTFY_API_S_VER_1 */
-
 #endif /* __iwl_fw_api_alive_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
index f285bacc8726523b8b3b38b44da3d019f8c61435..6dad748e5cdc9538d1d3c84442f3dea2e227c349 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
@@ -193,7 +193,8 @@ enum iwl_legacy_cmds {
 	FW_GET_ITEM_CMD = 0x1a,
 
 	/**
-	 * @TX_CMD: uses &struct iwl_tx_cmd or &struct iwl_tx_cmd_gen2,
+	 * @TX_CMD: uses &struct iwl_tx_cmd or &struct iwl_tx_cmd_gen2 or
+	 *	&struct iwl_tx_cmd_gen3,
 	 *	response in &struct iwl_mvm_tx_resp or
 	 *	&struct iwl_mvm_tx_resp_v3
 	 */
@@ -646,13 +647,6 @@ enum iwl_system_subcmd_ids {
 	 * @INIT_EXTENDED_CFG_CMD: &struct iwl_init_extended_cfg_cmd
 	 */
 	INIT_EXTENDED_CFG_CMD = 0x03,
-
-	/**
-	 * @FSEQ_VER_MISMATCH_NTF: Notification about fseq version
-	 *	mismatch during init.  The format is specified in
-	 *	&struct iwl_fseq_ver_mismatch_ntf.
-	 */
-	FSEQ_VER_MISMATCH_NTF = 0xFF,
 };
 
 #endif /* __iwl_fw_api_commands_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
index 5f6e855006dd9359ec7ebfd32f37798bff4c60d4..59b3c6e8f37b609cf269c8915cdbe3bf7867079b 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
@@ -8,6 +8,7 @@
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,6 +31,7 @@
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -82,6 +84,16 @@ enum iwl_data_path_subcmd_ids {
 	 */
 	TRIGGER_RX_QUEUES_NOTIF_CMD = 0x2,
 
+	/**
+	 * @STA_HE_CTXT_CMD: &struct iwl_he_sta_context_cmd
+	 */
+	STA_HE_CTXT_CMD = 0x7,
+
+	/**
+	 * @RFH_QUEUE_CONFIG_CMD: &struct iwl_rfh_queue_config
+	 */
+	RFH_QUEUE_CONFIG_CMD = 0xD,
+
 	/**
 	 * @TLC_MNG_CONFIG_CMD: &struct iwl_tlc_config_cmd
 	 */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
index f2e31e040a7b51334c8407b8e35fa83b441ab27c..55594c93b014cfe9c94ed689dd7482d835217bfe 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
@@ -7,6 +7,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2017        Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -28,6 +29,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2017        Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -279,6 +281,10 @@ enum iwl_mac_filter_flags {
 	MAC_FILTER_OUT_BCAST		= BIT(8),
 	MAC_FILTER_IN_CRC32		= BIT(11),
 	MAC_FILTER_IN_PROBE_REQUEST	= BIT(12),
+	/**
+	 * @MAC_FILTER_IN_11AX: mark BSS as supporting 802.11ax
+	 */
+	MAC_FILTER_IN_11AX		= BIT(14),
 };
 
 /**
@@ -406,4 +412,170 @@ struct iwl_missed_beacons_notif {
 	__le32 num_recvd_beacons;
 } __packed; /* MISSED_BEACON_NTFY_API_S_VER_3 */
 
+/**
+ * struct iwl_he_backoff_conf - used for backoff configuration
+ * Per each trigger-based AC, (set by MU EDCA Parameter set info-element)
+ * used for backoff configuration of TXF5..TXF8 trigger based.
+ * The MU-TIMER is reloaded w/ MU_TIME each time a frame from the AC is sent via
+ * trigger-based TX.
+ * @cwmin: CW min
+ * @cwmax: CW max
+ * @aifsn: AIFSN
+ *	AIFSN=0, means that no backoff from the specified TRIG-BASED AC is
+ *	allowed till the MU-TIMER is 0
+ * @mu_time: MU time in 8TU units
+ */
+struct iwl_he_backoff_conf {
+	__le16 cwmin;
+	__le16 cwmax;
+	__le16 aifsn;
+	__le16 mu_time;
+} __packed; /* AC_QOS_DOT11AX_API_S */
+
+#define MAX_HE_SUPP_NSS	2
+#define MAX_HE_CHANNEL_BW_INDX	4
+
+/**
+ * struct iwl_he_pkt_ext - QAM thresholds
+ * The required PPE is set via HE Capabilities IE, per Nss x BW x MCS
+ * The IE is organized in the following way:
+ * Support for Nss x BW (or RU) matrix:
+ *	(0=SISO, 1=MIMO2) x (0-20MHz, 1-40MHz, 2-80MHz, 3-160MHz)
+ * Each entry contains 2 QAM thresholds for 8us and 16us:
+ *	0=BPSK, 1=QPSK, 2=16QAM, 3=64QAM, 4=256QAM, 5=1024QAM, 6/7=RES
+ * i.e. QAM_th1 < QAM_th2 such if TX uses QAM_tx:
+ *	QAM_tx < QAM_th1            --> PPE=0us
+ *	QAM_th1 <= QAM_tx < QAM_th2 --> PPE=8us
+ *	QAM_th2 <= QAM_tx           --> PPE=16us
+ * @pkt_ext_qam_th: QAM thresholds
+ *	For each Nss/Bw define 2 QAM thrsholds (0..5)
+ *	For rates below the low_th, no need for PPE
+ *	For rates between low_th and high_th, need 8us PPE
+ *	For rates equal or higher then the high_th, need 16us PPE
+ *	Nss (0-siso, 1-mimo2) x BW (0-20MHz, 1-40MHz, 2-80MHz, 3-160MHz) x
+ *		(0-low_th, 1-high_th)
+ */
+struct iwl_he_pkt_ext {
+	u8 pkt_ext_qam_th[MAX_HE_SUPP_NSS][MAX_HE_CHANNEL_BW_INDX][2];
+} __packed; /* PKT_EXT_DOT11AX_API_S */
+
+/**
+ * enum iwl_he_sta_ctxt_flags - HE STA context flags
+ * @STA_CTXT_HE_REF_BSSID_VALID: ref bssid addr valid (for receiving specific
+ *	control frames such as TRIG, NDPA, BACK)
+ * @STA_CTXT_HE_BSS_COLOR_DIS: BSS color disable, don't use the BSS
+ *	color for RX filter but use MAC header
+ * @STA_CTXT_HE_PARTIAL_BSS_COLOR: partial BSS color allocation
+ * @STA_CTXT_HE_32BIT_BA_BITMAP: indicates the receiver supports BA bitmap
+ *	of 32-bits
+ * @STA_CTXT_HE_PACKET_EXT: indicates that the packet-extension info is valid
+ *	and should be used
+ * @STA_CTXT_HE_TRIG_RND_ALLOC: indicates that trigger based random allocation
+ *	is enabled according to UORA element existence
+ * @STA_CTXT_HE_CONST_TRIG_RND_ALLOC: used for AV testing
+ * @STA_CTXT_HE_ACK_ENABLED: indicates that the AP supports receiving ACK-
+ *	enabled AGG, i.e. both BACK and non-BACK frames in a single AGG
+ * @STA_CTXT_HE_MU_EDCA_CW: indicates that there is an element of MU EDCA
+ *	parameter set, i.e. the backoff counters for trig-based ACs
+ */
+enum iwl_he_sta_ctxt_flags {
+	STA_CTXT_HE_REF_BSSID_VALID		= BIT(4),
+	STA_CTXT_HE_BSS_COLOR_DIS		= BIT(5),
+	STA_CTXT_HE_PARTIAL_BSS_COLOR		= BIT(6),
+	STA_CTXT_HE_32BIT_BA_BITMAP		= BIT(7),
+	STA_CTXT_HE_PACKET_EXT			= BIT(8),
+	STA_CTXT_HE_TRIG_RND_ALLOC		= BIT(9),
+	STA_CTXT_HE_CONST_TRIG_RND_ALLOC	= BIT(10),
+	STA_CTXT_HE_ACK_ENABLED			= BIT(11),
+	STA_CTXT_HE_MU_EDCA_CW			= BIT(12),
+};
+
+/**
+ * enum iwl_he_htc_flags - HE HTC support flags
+ * @IWL_HE_HTC_SUPPORT: HE-HTC support
+ * @IWL_HE_HTC_UL_MU_RESP_SCHED: HE UL MU response schedule
+ *	support via A-control field
+ * @IWL_HE_HTC_BSR_SUPP: BSR support in A-control field
+ * @IWL_HE_HTC_OMI_SUPP: A-OMI support in A-control field
+ * @IWL_HE_HTC_BQR_SUPP: A-BQR support in A-control field
+ */
+enum iwl_he_htc_flags {
+	IWL_HE_HTC_SUPPORT			= BIT(0),
+	IWL_HE_HTC_UL_MU_RESP_SCHED		= BIT(3),
+	IWL_HE_HTC_BSR_SUPP			= BIT(4),
+	IWL_HE_HTC_OMI_SUPP			= BIT(5),
+	IWL_HE_HTC_BQR_SUPP			= BIT(6),
+};
+
+/*
+ * @IWL_HE_HTC_LINK_ADAP_NO_FEEDBACK: the STA does not provide HE MFB
+ * @IWL_HE_HTC_LINK_ADAP_UNSOLICITED: the STA provides only unsolicited HE MFB
+ * @IWL_HE_HTC_LINK_ADAP_BOTH: the STA is capable of providing HE MFB in
+ *      response to HE MRQ and if the STA provides unsolicited HE MFB
+ */
+#define IWL_HE_HTC_LINK_ADAP_POS		(1)
+#define IWL_HE_HTC_LINK_ADAP_NO_FEEDBACK	(0)
+#define IWL_HE_HTC_LINK_ADAP_UNSOLICITED	(2 << IWL_HE_HTC_LINK_ADAP_POS)
+#define IWL_HE_HTC_LINK_ADAP_BOTH		(3 << IWL_HE_HTC_LINK_ADAP_POS)
+
+/**
+ * struct iwl_he_sta_context_cmd - configure FW to work with HE AP
+ * @sta_id: STA id
+ * @tid_limit: max num of TIDs in TX HE-SU multi-TID agg
+ *	0 - bad value, 1 - multi-tid not supported, 2..8 - tid limit
+ * @reserved1: reserved byte for future use
+ * @reserved2: reserved byte for future use
+ * @flags: see %iwl_11ax_sta_ctxt_flags
+ * @ref_bssid_addr: reference BSSID used by the AP
+ * @reserved0: reserved 2 bytes for aligning the ref_bssid_addr field to 8 bytes
+ * @htc_flags: which features are supported in HTC
+ * @frag_flags: frag support in A-MSDU
+ * @frag_level: frag support level
+ * @frag_max_num: max num of "open" MSDUs in the receiver (in power of 2)
+ * @frag_min_size: min frag size (except last frag)
+ * @pkt_ext: optional, exists according to PPE-present bit in the HE-PHY capa
+ * @bss_color: 11ax AP ID that is used in the HE SIG-A to mark inter BSS frame
+ * @htc_trig_based_pkt_ext: default PE in 4us units
+ * @frame_time_rts_th: HE duration RTS threshold, in units of 32us
+ * @rand_alloc_ecwmin: random CWmin = 2**ECWmin-1
+ * @rand_alloc_ecwmax: random CWmax = 2**ECWmax-1
+ * @reserved3: reserved byte for future use
+ * @trig_based_txf: MU EDCA Parameter set for the trigger based traffic queues
+ */
+struct iwl_he_sta_context_cmd {
+	u8 sta_id;
+	u8 tid_limit;
+	u8 reserved1;
+	u8 reserved2;
+	__le32 flags;
+
+	/* The below fields are set via Multiple BSSID IE */
+	u8 ref_bssid_addr[6];
+	__le16 reserved0;
+
+	/* The below fields are set via HE-capabilities IE */
+	__le32 htc_flags;
+
+	u8 frag_flags;
+	u8 frag_level;
+	u8 frag_max_num;
+	u8 frag_min_size;
+
+	/* The below fields are set via PPE thresholds element */
+	struct iwl_he_pkt_ext pkt_ext;
+
+	/* The below fields are set via HE-Operation IE */
+	u8 bss_color;
+	u8 htc_trig_based_pkt_ext;
+	__le16 frame_time_rts_th;
+
+	/* Random access parameter set (i.e. RAPS) */
+	u8 rand_alloc_ecwmin;
+	u8 rand_alloc_ecwmax;
+	__le16 reserved3;
+
+	/* The below fields are set via MU EDCA parameter set element */
+	struct iwl_he_backoff_conf trig_based_txf[AC_NUM];
+} __packed; /* STA_CONTEXT_DOT11AX_API_S */
+
 #endif /* __iwl_fw_api_mac_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
index 8d6dc918998537a53543e968012f72714f8786a8..6c53383647942bc915bf09766a24276f4f22b5e2 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
@@ -195,7 +195,6 @@ struct iwl_nvm_get_info_general {
  * @NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED: true if 5.2 band enabled
  * @NVM_MAC_SKU_FLAGS_802_11N_ENABLED: true if 11n enabled
  * @NVM_MAC_SKU_FLAGS_802_11AC_ENABLED: true if 11ac enabled
- * @NVM_MAC_SKU_FLAGS_802_11AX_ENABLED: true if 11ax enabled
  * @NVM_MAC_SKU_FLAGS_MIMO_DISABLED: true if MIMO disabled
  * @NVM_MAC_SKU_FLAGS_WAPI_ENABLED: true if WAPI enabled
  * @NVM_MAC_SKU_FLAGS_REG_CHECK_ENABLED: true if regulatory checker enabled
@@ -206,6 +205,9 @@ enum iwl_nvm_mac_sku_flags {
 	NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED	= BIT(1),
 	NVM_MAC_SKU_FLAGS_802_11N_ENABLED	= BIT(2),
 	NVM_MAC_SKU_FLAGS_802_11AC_ENABLED	= BIT(3),
+	/**
+	 * @NVM_MAC_SKU_FLAGS_802_11AX_ENABLED: true if 11ax enabled
+	 */
 	NVM_MAC_SKU_FLAGS_802_11AX_ENABLED	= BIT(4),
 	NVM_MAC_SKU_FLAGS_MIMO_DISABLED		= BIT(5),
 	NVM_MAC_SKU_FLAGS_WAPI_ENABLED		= BIT(8),
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
index 21e13a315421ae972e057a99916291592c810326..087fae91baeff2fcbaff253e33f04ace4b07c91b 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
@@ -314,8 +314,11 @@ enum {
 	IWL_RATE_MCS_8_INDEX,
 	IWL_RATE_MCS_9_INDEX,
 	IWL_LAST_VHT_RATE = IWL_RATE_MCS_9_INDEX,
+	IWL_RATE_MCS_10_INDEX,
+	IWL_RATE_MCS_11_INDEX,
+	IWL_LAST_HE_RATE = IWL_RATE_MCS_11_INDEX,
 	IWL_RATE_COUNT_LEGACY = IWL_LAST_NON_HT_RATE + 1,
-	IWL_RATE_COUNT = IWL_LAST_VHT_RATE + 1,
+	IWL_RATE_COUNT = IWL_LAST_HE_RATE + 1,
 };
 
 #define IWL_RATE_BIT_MSK(r) BIT(IWL_RATE_##r##M_INDEX)
@@ -440,8 +443,8 @@ enum {
 #define RATE_LEGACY_RATE_MSK 0xff
 
 /* Bit 10 - OFDM HE */
-#define RATE_MCS_OFDM_HE_POS		10
-#define RATE_MCS_OFDM_HE_MSK		BIT(RATE_MCS_OFDM_HE_POS)
+#define RATE_MCS_HE_POS		10
+#define RATE_MCS_HE_MSK		BIT(RATE_MCS_HE_POS)
 
 /*
  * Bit 11-12: (0) 20MHz, (1) 40MHz, (2) 80MHz, (3) 160MHz
@@ -482,15 +485,33 @@ enum {
 #define RATE_MCS_BF_MSK			(1 << RATE_MCS_BF_POS)
 
 /*
- * Bit 20-21: HE guard interval and LTF type.
- * (0) 1xLTF+1.6us, (1) 2xLTF+0.8us,
- * (2) 2xLTF+1.6us, (3) 4xLTF+3.2us
+ * Bit 20-21: HE LTF type and guard interval
+ * HE (ext) SU:
+ *	0			1xLTF+0.8us
+ *	1			2xLTF+0.8us
+ *	2			2xLTF+1.6us
+ *	3 & SGI (bit 13) clear	4xLTF+3.2us
+ *	3 & SGI (bit 13) set	4xLTF+0.8us
+ * HE MU:
+ *	0			4xLTF+0.8us
+ *	1			2xLTF+0.8us
+ *	2			2xLTF+1.6us
+ *	3			4xLTF+3.2us
+ * HE TRIG:
+ *	0			1xLTF+1.6us
+ *	1			2xLTF+1.6us
+ *	2			4xLTF+3.2us
+ *	3			(does not occur)
  */
 #define RATE_MCS_HE_GI_LTF_POS		20
 #define RATE_MCS_HE_GI_LTF_MSK		(3 << RATE_MCS_HE_GI_LTF_POS)
 
 /* Bit 22-23: HE type. (0) SU, (1) SU_EXT, (2) MU, (3) trigger based */
 #define RATE_MCS_HE_TYPE_POS		22
+#define RATE_MCS_HE_TYPE_SU		(0 << RATE_MCS_HE_TYPE_POS)
+#define RATE_MCS_HE_TYPE_EXT_SU		(1 << RATE_MCS_HE_TYPE_POS)
+#define RATE_MCS_HE_TYPE_MU		(2 << RATE_MCS_HE_TYPE_POS)
+#define RATE_MCS_HE_TYPE_TRIG		(3 << RATE_MCS_HE_TYPE_POS)
 #define RATE_MCS_HE_TYPE_MSK		(3 << RATE_MCS_HE_TYPE_POS)
 
 /* Bit 24-25: (0) 20MHz (no dup), (1) 2x20MHz, (2) 4x20MHz, 3 8x20MHz */
@@ -501,6 +522,9 @@ enum {
 #define RATE_MCS_LDPC_POS		27
 #define RATE_MCS_LDPC_MSK		(1 << RATE_MCS_LDPC_POS)
 
+/* Bit 28: (1) 106-tone RX (8 MHz RU), (0) normal bandwidth */
+#define RATE_MCS_HE_106T_POS		28
+#define RATE_MCS_HE_106T_MSK		(1 << RATE_MCS_HE_106T_POS)
 
 /* Link Quality definitions */
 
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
index 7e570c4a9df0ed0d396165ca804d7896088d07e6..2f599353c8856b4c9604e4953722771c7fef4837 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
@@ -8,6 +8,7 @@
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,6 +31,7 @@
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -343,6 +345,169 @@ enum iwl_rx_mpdu_mac_info {
 	IWL_RX_MPDU_PHY_PHY_INDEX_MASK		= 0xf0,
 };
 
+/*
+ * enum iwl_rx_he_phy - HE PHY data
+ */
+enum iwl_rx_he_phy {
+	IWL_RX_HE_PHY_BEAM_CHNG			= BIT(0),
+	IWL_RX_HE_PHY_UPLINK			= BIT(1),
+	IWL_RX_HE_PHY_BSS_COLOR_MASK		= 0xfc,
+	IWL_RX_HE_PHY_SPATIAL_REUSE_MASK	= 0xf00,
+	IWL_RX_HE_PHY_SU_EXT_BW10		= BIT(12),
+	IWL_RX_HE_PHY_TXOP_DUR_MASK		= 0xfe000,
+	IWL_RX_HE_PHY_LDPC_EXT_SYM		= BIT(20),
+	IWL_RX_HE_PHY_PRE_FEC_PAD_MASK		= 0x600000,
+	IWL_RX_HE_PHY_PE_DISAMBIG		= BIT(23),
+	IWL_RX_HE_PHY_DOPPLER			= BIT(24),
+	/* 6 bits reserved */
+	IWL_RX_HE_PHY_DELIM_EOF			= BIT(31),
+
+	/* second dword - MU data */
+	IWL_RX_HE_PHY_SIGB_COMPRESSION		= BIT_ULL(32 + 0),
+	IWL_RX_HE_PHY_SIBG_SYM_OR_USER_NUM_MASK	= 0x1e00000000ULL,
+	IWL_RX_HE_PHY_HE_LTF_NUM_MASK		= 0xe000000000ULL,
+	IWL_RX_HE_PHY_RU_ALLOC_SEC80		= BIT_ULL(32 + 8),
+	/* trigger encoded */
+	IWL_RX_HE_PHY_RU_ALLOC_MASK		= 0xfe0000000000ULL,
+	IWL_RX_HE_PHY_SIGB_MCS_MASK		= 0xf000000000000ULL,
+	/* 1 bit reserved */
+	IWL_RX_HE_PHY_SIGB_DCM			= BIT_ULL(32 + 21),
+	IWL_RX_HE_PHY_PREAMBLE_PUNC_TYPE_MASK	= 0xc0000000000000ULL,
+	/* 8 bits reserved */
+};
+
+/**
+ * struct iwl_rx_mpdu_desc_v1 - RX MPDU descriptor
+ */
+struct iwl_rx_mpdu_desc_v1 {
+	/* DW7 - carries rss_hash only when rpa_en == 1 */
+	/**
+	 * @rss_hash: RSS hash value
+	 */
+	__le32 rss_hash;
+	/* DW8 - carries filter_match only when rpa_en == 1 */
+	/**
+	 * @filter_match: filter match value
+	 */
+	__le32 filter_match;
+	/* DW9 */
+	/**
+	 * @rate_n_flags: RX rate/flags encoding
+	 */
+	__le32 rate_n_flags;
+	/* DW10 */
+	/**
+	 * @energy_a: energy chain A
+	 */
+	u8 energy_a;
+	/**
+	 * @energy_b: energy chain B
+	 */
+	u8 energy_b;
+	/**
+	 * @channel: channel number
+	 */
+	u8 channel;
+	/**
+	 * @mac_context: MAC context mask
+	 */
+	u8 mac_context;
+	/* DW11 */
+	/**
+	 * @gp2_on_air_rise: GP2 timer value on air rise (INA)
+	 */
+	__le32 gp2_on_air_rise;
+	/* DW12 & DW13 */
+	union {
+		/**
+		 * @tsf_on_air_rise:
+		 * TSF value on air rise (INA), only valid if
+		 * %IWL_RX_MPDU_PHY_TSF_OVERLOAD isn't set
+		 */
+		__le64 tsf_on_air_rise;
+		/**
+		 * @he_phy_data:
+		 * HE PHY data, see &enum iwl_rx_he_phy, valid
+		 * only if %IWL_RX_MPDU_PHY_TSF_OVERLOAD is set
+		 */
+		__le64 he_phy_data;
+	};
+} __packed;
+
+/**
+ * struct iwl_rx_mpdu_desc_v3 - RX MPDU descriptor
+ */
+struct iwl_rx_mpdu_desc_v3 {
+	/* DW7 - carries filter_match only when rpa_en == 1 */
+	/**
+	 * @filter_match: filter match value
+	 */
+	__le32 filter_match;
+	/* DW8 - carries rss_hash only when rpa_en == 1 */
+	/**
+	 * @rss_hash: RSS hash value
+	 */
+	__le32 rss_hash;
+	/* DW9 */
+	/**
+	 * @partial_hash: 31:0 ip/tcp header hash
+	 *	w/o some fields (such as IP SRC addr)
+	 */
+	__le32 partial_hash;
+	/* DW10 */
+	/**
+	 * @raw_xsum: raw xsum value
+	 */
+	__le32 raw_xsum;
+	/* DW11 */
+	/**
+	 * @rate_n_flags: RX rate/flags encoding
+	 */
+	__le32 rate_n_flags;
+	/* DW12 */
+	/**
+	 * @energy_a: energy chain A
+	 */
+	u8 energy_a;
+	/**
+	 * @energy_b: energy chain B
+	 */
+	u8 energy_b;
+	/**
+	 * @channel: channel number
+	 */
+	u8 channel;
+	/**
+	 * @mac_context: MAC context mask
+	 */
+	u8 mac_context;
+	/* DW13 */
+	/**
+	 * @gp2_on_air_rise: GP2 timer value on air rise (INA)
+	 */
+	__le32 gp2_on_air_rise;
+	/* DW14 & DW15 */
+	union {
+		/**
+		 * @tsf_on_air_rise:
+		 * TSF value on air rise (INA), only valid if
+		 * %IWL_RX_MPDU_PHY_TSF_OVERLOAD isn't set
+		 */
+		__le64 tsf_on_air_rise;
+		/**
+		 * @he_phy_data:
+		 * HE PHY data, see &enum iwl_rx_he_phy, valid
+		 * only if %IWL_RX_MPDU_PHY_TSF_OVERLOAD is set
+		 */
+		__le64 he_phy_data;
+	};
+	/* DW16 & DW17 */
+	/**
+	 * @reserved: reserved
+	 */
+	__le32 reserved[2];
+} __packed; /* RX_MPDU_RES_START_API_S_VER_3 */
+
 /**
  * struct iwl_rx_mpdu_desc - RX MPDU descriptor
  */
@@ -400,51 +565,14 @@ struct iwl_rx_mpdu_desc {
 	 * @reorder_data: &enum iwl_rx_mpdu_reorder_data
 	 */
 	__le32 reorder_data;
-	/* DW7 - carries rss_hash only when rpa_en == 1 */
-	/**
-	 * @rss_hash: RSS hash value
-	 */
-	__le32 rss_hash;
-	/* DW8 - carries filter_match only when rpa_en == 1 */
-	/**
-	 * @filter_match: filter match value
-	 */
-	__le32 filter_match;
-	/* DW9 */
-	/**
-	 * @rate_n_flags: RX rate/flags encoding
-	 */
-	__le32 rate_n_flags;
-	/* DW10 */
-	/**
-	 * @energy_a: energy chain A
-	 */
-	u8 energy_a;
-	/**
-	 * @energy_b: energy chain B
-	 */
-	u8 energy_b;
-	/**
-	 * @channel: channel number
-	 */
-	u8 channel;
-	/**
-	 * @mac_context: MAC context mask
-	 */
-	u8 mac_context;
-	/* DW11 */
-	/**
-	 * @gp2_on_air_rise: GP2 timer value on air rise (INA)
-	 */
-	__le32 gp2_on_air_rise;
-	/* DW12 & DW13 */
-	/**
-	 * @tsf_on_air_rise:
-	 * TSF value on air rise (INA), only valid if
-	 * %IWL_RX_MPDU_PHY_TSF_OVERLOAD isn't set
-	 */
-	__le64 tsf_on_air_rise;
-} __packed;
+
+	union {
+		struct iwl_rx_mpdu_desc_v1 v1;
+		struct iwl_rx_mpdu_desc_v3 v3;
+	};
+} __packed; /* RX_MPDU_RES_START_API_S_VER_3 */
+
+#define IWL_RX_DESC_SIZE_V1 offsetofend(struct iwl_rx_mpdu_desc, v1)
 
 struct iwl_frame_release {
 	u8 baid;
@@ -587,4 +715,36 @@ struct iwl_ba_window_status_notif {
 	__le16 mpdu_rx_count[BA_WINDOW_STREAMS_MAX];
 } __packed; /* BA_WINDOW_STATUS_NTFY_API_S_VER_1 */
 
+/**
+ * struct iwl_rfh_queue_config - RX queue configuration
+ * @q_num: Q num
+ * @enable: enable queue
+ * @reserved: alignment
+ * @urbd_stts_wrptr: DMA address of urbd_stts_wrptr
+ * @fr_bd_cb: DMA address of freeRB table
+ * @ur_bd_cb: DMA address of used RB table
+ * @fr_bd_wid: Initial index of the free table
+ */
+struct iwl_rfh_queue_data {
+	u8 q_num;
+	u8 enable;
+	__le16 reserved;
+	__le64 urbd_stts_wrptr;
+	__le64 fr_bd_cb;
+	__le64 ur_bd_cb;
+	__le32 fr_bd_wid;
+} __packed; /* RFH_QUEUE_CONFIG_S_VER_1 */
+
+/**
+ * struct iwl_rfh_queue_config - RX queue configuration
+ * @num_queues: number of queues configured
+ * @reserved: alignment
+ * @data: DMA addresses per-queue
+ */
+struct iwl_rfh_queue_config {
+	u8 num_queues;
+	u8 reserved[3];
+	struct iwl_rfh_queue_data data[];
+} __packed; /* RFH_QUEUE_CONFIG_API_S_VER_1 */
+
 #endif /* __iwl_fw_api_rx_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
index a2a40b515a3c8301848462275cb79a5caba3c95b..514b86123d3d366fd368e9d49f7d10222f77bae5 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
@@ -7,6 +7,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -28,6 +29,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -320,6 +322,29 @@ struct iwl_tx_cmd_gen2 {
 	struct ieee80211_hdr hdr[0];
 } __packed; /* TX_CMD_API_S_VER_7 */
 
+/**
+ * struct iwl_tx_cmd_gen3 - TX command struct to FW for 22560 devices
+ * ( TX_CMD = 0x1c )
+ * @len: in bytes of the payload, see below for details
+ * @flags: combination of &enum iwl_tx_cmd_flags
+ * @offload_assist: TX offload configuration
+ * @dram_info: FW internal DRAM storage
+ * @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is
+ *	cleared. Combination of RATE_MCS_*
+ * @ttl: time to live - packet lifetime limit. The FW should drop if
+ *	passed.
+ * @hdr: 802.11 header
+ */
+struct iwl_tx_cmd_gen3 {
+	__le16 len;
+	__le16 flags;
+	__le32 offload_assist;
+	struct iwl_dram_sec_info dram_info;
+	__le32 rate_n_flags;
+	__le64 ttl;
+	struct ieee80211_hdr hdr[0];
+} __packed; /* TX_CMD_API_S_VER_8 */
+
 /*
  * TX response related data
  */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/common_rx.c b/drivers/net/wireless/intel/iwlwifi/fw/common_rx.c
deleted file mode 100644
index 6f75985eea66fe69076b4601e47a57296f2681b1..0000000000000000000000000000000000000000
--- a/drivers/net/wireless/intel/iwlwifi/fw/common_rx.c
+++ /dev/null
@@ -1,88 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license.  When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2017 Intel Deutschland GmbH
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * The full GNU General Public License is included in this distribution
- * in the file called COPYING.
- *
- * Contact Information:
- *  Intel Linux Wireless <linuxwifi@intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2017 Intel Deutschland GmbH
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- *  * Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- *  * Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- *  * Neither the name Intel Corporation nor the names of its
- *    contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *****************************************************************************/
-#include "iwl-drv.h"
-#include "runtime.h"
-#include "fw/api/commands.h"
-#include "fw/api/alive.h"
-
-static void iwl_fwrt_fseq_ver_mismatch(struct iwl_fw_runtime *fwrt,
-				       struct iwl_rx_cmd_buffer *rxb)
-{
-	struct iwl_rx_packet *pkt = rxb_addr(rxb);
-	struct iwl_fseq_ver_mismatch_ntf *fseq = (void *)pkt->data;
-
-	IWL_ERR(fwrt, "FSEQ version mismatch (aux: %d, wifi: %d)\n",
-		__le32_to_cpu(fseq->aux_read_fseq_ver),
-		__le32_to_cpu(fseq->wifi_fseq_ver));
-}
-
-void iwl_fwrt_handle_notification(struct iwl_fw_runtime *fwrt,
-				  struct iwl_rx_cmd_buffer *rxb)
-{
-	struct iwl_rx_packet *pkt = rxb_addr(rxb);
-	u32 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
-
-	switch (cmd) {
-	case WIDE_ID(SYSTEM_GROUP, FSEQ_VER_MISMATCH_NTF):
-		iwl_fwrt_fseq_ver_mismatch(fwrt, rxb);
-		break;
-	default:
-		break;
-	}
-}
-IWL_EXPORT_SYMBOL(iwl_fwrt_handle_notification);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
index fa283285fcbed08e7df15ba3d1d5c513b8083b58..a31a42e673c46fffec58ac95acbda96a442d803c 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
@@ -243,39 +243,47 @@ static void iwl_fw_dump_fifos(struct iwl_fw_runtime *fwrt,
 	if (!iwl_trans_grab_nic_access(fwrt->trans, &flags))
 		return;
 
-	/* Pull RXF1 */
-	iwl_fwrt_dump_rxf(fwrt, dump_data, cfg->lmac[0].rxfifo1_size, 0, 0);
-	/* Pull RXF2 */
-	iwl_fwrt_dump_rxf(fwrt, dump_data, cfg->rxfifo2_size,
-			  RXF_DIFF_FROM_PREV, 1);
-	/* Pull LMAC2 RXF1 */
-	if (fwrt->smem_cfg.num_lmacs > 1)
-		iwl_fwrt_dump_rxf(fwrt, dump_data, cfg->lmac[1].rxfifo1_size,
-				  LMAC2_PRPH_OFFSET, 2);
-
-	/* Pull TXF data from LMAC1 */
-	for (i = 0; i < fwrt->smem_cfg.num_txfifo_entries; i++) {
-		/* Mark the number of TXF we're pulling now */
-		iwl_trans_write_prph(fwrt->trans, TXF_LARC_NUM, i);
-		iwl_fwrt_dump_txf(fwrt, dump_data, cfg->lmac[0].txfifo_size[i],
-				  0, i);
+	if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_RXF)) {
+		/* Pull RXF1 */
+		iwl_fwrt_dump_rxf(fwrt, dump_data,
+				  cfg->lmac[0].rxfifo1_size, 0, 0);
+		/* Pull RXF2 */
+		iwl_fwrt_dump_rxf(fwrt, dump_data, cfg->rxfifo2_size,
+				  RXF_DIFF_FROM_PREV, 1);
+		/* Pull LMAC2 RXF1 */
+		if (fwrt->smem_cfg.num_lmacs > 1)
+			iwl_fwrt_dump_rxf(fwrt, dump_data,
+					  cfg->lmac[1].rxfifo1_size,
+					  LMAC2_PRPH_OFFSET, 2);
 	}
 
-	/* Pull TXF data from LMAC2 */
-	if (fwrt->smem_cfg.num_lmacs > 1) {
+	if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_TXF)) {
+		/* Pull TXF data from LMAC1 */
 		for (i = 0; i < fwrt->smem_cfg.num_txfifo_entries; i++) {
 			/* Mark the number of TXF we're pulling now */
-			iwl_trans_write_prph(fwrt->trans,
-					     TXF_LARC_NUM + LMAC2_PRPH_OFFSET,
-					     i);
+			iwl_trans_write_prph(fwrt->trans, TXF_LARC_NUM, i);
 			iwl_fwrt_dump_txf(fwrt, dump_data,
-					  cfg->lmac[1].txfifo_size[i],
-					  LMAC2_PRPH_OFFSET,
-					  i + cfg->num_txfifo_entries);
+					  cfg->lmac[0].txfifo_size[i], 0, i);
+		}
+
+		/* Pull TXF data from LMAC2 */
+		if (fwrt->smem_cfg.num_lmacs > 1) {
+			for (i = 0; i < fwrt->smem_cfg.num_txfifo_entries;
+			     i++) {
+				/* Mark the number of TXF we're pulling now */
+				iwl_trans_write_prph(fwrt->trans,
+						     TXF_LARC_NUM +
+						     LMAC2_PRPH_OFFSET, i);
+				iwl_fwrt_dump_txf(fwrt, dump_data,
+						  cfg->lmac[1].txfifo_size[i],
+						  LMAC2_PRPH_OFFSET,
+						  i + cfg->num_txfifo_entries);
+			}
 		}
 	}
 
-	if (fw_has_capa(&fwrt->fw->ucode_capa,
+	if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_INTERNAL_TXF) &&
+	    fw_has_capa(&fwrt->fw->ucode_capa,
 			IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
 		/* Pull UMAC internal TXF data from all TXFs */
 		for (i = 0;
@@ -600,42 +608,54 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
 	if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status)) {
 		fifo_data_len = 0;
 
-		/* Count RXF2 size */
-		if (mem_cfg->rxfifo2_size) {
-			/* Add header info */
-			fifo_data_len += mem_cfg->rxfifo2_size +
-					 sizeof(*dump_data) +
-					 sizeof(struct iwl_fw_error_dump_fifo);
-		}
-
-		/* Count RXF1 sizes */
-		for (i = 0; i < mem_cfg->num_lmacs; i++) {
-			if (!mem_cfg->lmac[i].rxfifo1_size)
-				continue;
-
-			/* Add header info */
-			fifo_data_len += mem_cfg->lmac[i].rxfifo1_size +
-					 sizeof(*dump_data) +
-					 sizeof(struct iwl_fw_error_dump_fifo);
-		}
+		if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_RXF)) {
 
-		/* Count TXF sizes */
-		for (i = 0; i < mem_cfg->num_lmacs; i++) {
-			int j;
+			/* Count RXF2 size */
+			if (mem_cfg->rxfifo2_size) {
+				/* Add header info */
+				fifo_data_len +=
+					mem_cfg->rxfifo2_size +
+					sizeof(*dump_data) +
+					sizeof(struct iwl_fw_error_dump_fifo);
+			}
 
-			for (j = 0; j < mem_cfg->num_txfifo_entries; j++) {
-				if (!mem_cfg->lmac[i].txfifo_size[j])
+			/* Count RXF1 sizes */
+			for (i = 0; i < mem_cfg->num_lmacs; i++) {
+				if (!mem_cfg->lmac[i].rxfifo1_size)
 					continue;
 
 				/* Add header info */
 				fifo_data_len +=
-					mem_cfg->lmac[i].txfifo_size[j] +
+					mem_cfg->lmac[i].rxfifo1_size +
 					sizeof(*dump_data) +
 					sizeof(struct iwl_fw_error_dump_fifo);
 			}
 		}
 
-		if (fw_has_capa(&fwrt->fw->ucode_capa,
+		if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_TXF)) {
+			size_t fifo_const_len = sizeof(*dump_data) +
+				sizeof(struct iwl_fw_error_dump_fifo);
+
+			/* Count TXF sizes */
+			for (i = 0; i < mem_cfg->num_lmacs; i++) {
+				int j;
+
+				for (j = 0; j < mem_cfg->num_txfifo_entries;
+				     j++) {
+					if (!mem_cfg->lmac[i].txfifo_size[j])
+						continue;
+
+					/* Add header info */
+					fifo_data_len +=
+						fifo_const_len +
+						mem_cfg->lmac[i].txfifo_size[j];
+				}
+			}
+		}
+
+		if ((fwrt->fw->dbg_dump_mask &
+		    BIT(IWL_FW_ERROR_DUMP_INTERNAL_TXF)) &&
+		    fw_has_capa(&fwrt->fw->ucode_capa,
 				IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
 			for (i = 0;
 			     i < ARRAY_SIZE(mem_cfg->internal_txfifo_size);
@@ -652,7 +672,8 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
 		}
 
 		/* Make room for PRPH registers */
-		if (!fwrt->trans->cfg->gen2) {
+		if (!fwrt->trans->cfg->gen2 &&
+		    fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PRPH)) {
 			for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr_comm);
 			     i++) {
 				/* The range includes both boundaries */
@@ -667,7 +688,8 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
 		}
 
 		if (!fwrt->trans->cfg->gen2 &&
-		    fwrt->trans->cfg->mq_rx_supported) {
+		    fwrt->trans->cfg->mq_rx_supported &&
+		    fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PRPH)) {
 			for (i = 0; i <
 				ARRAY_SIZE(iwl_prph_dump_addr_9000); i++) {
 				/* The range includes both boundaries */
@@ -681,34 +703,42 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
 			}
 		}
 
-		if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000)
+		if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000 &&
+		    fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_RADIO_REG))
 			radio_len = sizeof(*dump_data) + RADIO_REG_MAX_READ;
 	}
 
 	file_len = sizeof(*dump_file) +
-		   sizeof(*dump_data) * 3 +
-		   sizeof(*dump_smem_cfg) +
 		   fifo_data_len +
 		   prph_len +
-		   radio_len +
-		   sizeof(*dump_info);
-
-	/* Make room for the SMEM, if it exists */
-	if (smem_len)
-		file_len += sizeof(*dump_data) + sizeof(*dump_mem) + smem_len;
-
-	/* Make room for the secondary SRAM, if it exists */
-	if (sram2_len)
-		file_len += sizeof(*dump_data) + sizeof(*dump_mem) + sram2_len;
-
-	/* Make room for MEM segments */
-	for (i = 0; i < fwrt->fw->n_dbg_mem_tlv; i++) {
-		file_len += sizeof(*dump_data) + sizeof(*dump_mem) +
-			    le32_to_cpu(fw_dbg_mem[i].len);
+		   radio_len;
+
+	if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_DEV_FW_INFO))
+		file_len += sizeof(*dump_data) + sizeof(*dump_info);
+	if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM_CFG))
+		file_len += sizeof(*dump_data) + sizeof(*dump_smem_cfg);
+
+	if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)) {
+		/* Make room for the SMEM, if it exists */
+		if (smem_len)
+			file_len += sizeof(*dump_data) + sizeof(*dump_mem) +
+				smem_len;
+
+		/* Make room for the secondary SRAM, if it exists */
+		if (sram2_len)
+			file_len += sizeof(*dump_data) + sizeof(*dump_mem) +
+				sram2_len;
+
+		/* Make room for MEM segments */
+		for (i = 0; i < fwrt->fw->n_dbg_mem_tlv; i++) {
+			file_len += sizeof(*dump_data) + sizeof(*dump_mem) +
+				    le32_to_cpu(fw_dbg_mem[i].len);
+		}
 	}
 
 	/* Make room for fw's virtual image pages, if it exists */
-	if (!fwrt->trans->cfg->gen2 &&
+	if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING) &&
+	    !fwrt->trans->cfg->gen2 &&
 	    fwrt->fw->img[fwrt->cur_fw_img].paging_mem_size &&
 	    fwrt->fw_paging_db[0].fw_paging_block)
 		file_len += fwrt->num_of_paging_blk *
@@ -722,12 +752,14 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
 			   sizeof(*dump_info) + sizeof(*dump_smem_cfg);
 	}
 
-	if (fwrt->dump.desc)
+	if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_ERROR_INFO) &&
+	    fwrt->dump.desc)
 		file_len += sizeof(*dump_data) + sizeof(*dump_trig) +
 			    fwrt->dump.desc->len;
 
-	if (!fwrt->fw->n_dbg_mem_tlv)
-		file_len += sram_len + sizeof(*dump_mem);
+	if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM) &&
+	    !fwrt->fw->n_dbg_mem_tlv)
+		file_len += sizeof(*dump_data) + sram_len + sizeof(*dump_mem);
 
 	dump_file = vzalloc(file_len);
 	if (!dump_file) {
@@ -740,48 +772,56 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
 	dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER);
 	dump_data = (void *)dump_file->data;
 
-	dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_DEV_FW_INFO);
-	dump_data->len = cpu_to_le32(sizeof(*dump_info));
-	dump_info = (void *)dump_data->data;
-	dump_info->device_family =
-		fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000 ?
-			cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_7) :
-			cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_8);
-	dump_info->hw_step = cpu_to_le32(CSR_HW_REV_STEP(fwrt->trans->hw_rev));
-	memcpy(dump_info->fw_human_readable, fwrt->fw->human_readable,
-	       sizeof(dump_info->fw_human_readable));
-	strncpy(dump_info->dev_human_readable, fwrt->trans->cfg->name,
-		sizeof(dump_info->dev_human_readable));
-	strncpy(dump_info->bus_human_readable, fwrt->dev->bus->name,
-		sizeof(dump_info->bus_human_readable));
-
-	dump_data = iwl_fw_error_next_data(dump_data);
-
-	/* Dump shared memory configuration */
-	dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_CFG);
-	dump_data->len = cpu_to_le32(sizeof(*dump_smem_cfg));
-	dump_smem_cfg = (void *)dump_data->data;
-	dump_smem_cfg->num_lmacs = cpu_to_le32(mem_cfg->num_lmacs);
-	dump_smem_cfg->num_txfifo_entries =
-		cpu_to_le32(mem_cfg->num_txfifo_entries);
-	for (i = 0; i < MAX_NUM_LMAC; i++) {
-		int j;
-
-		for (j = 0; j < TX_FIFO_MAX_NUM; j++)
-			dump_smem_cfg->lmac[i].txfifo_size[j] =
-				cpu_to_le32(mem_cfg->lmac[i].txfifo_size[j]);
-		dump_smem_cfg->lmac[i].rxfifo1_size =
-			cpu_to_le32(mem_cfg->lmac[i].rxfifo1_size);
-	}
-	dump_smem_cfg->rxfifo2_size = cpu_to_le32(mem_cfg->rxfifo2_size);
-	dump_smem_cfg->internal_txfifo_addr =
-		cpu_to_le32(mem_cfg->internal_txfifo_addr);
-	for (i = 0; i < TX_FIFO_INTERNAL_MAX_NUM; i++) {
-		dump_smem_cfg->internal_txfifo_size[i] =
-			cpu_to_le32(mem_cfg->internal_txfifo_size[i]);
+	if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_DEV_FW_INFO)) {
+		dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_DEV_FW_INFO);
+		dump_data->len = cpu_to_le32(sizeof(*dump_info));
+		dump_info = (void *)dump_data->data;
+		dump_info->device_family =
+			fwrt->trans->cfg->device_family ==
+			IWL_DEVICE_FAMILY_7000 ?
+				cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_7) :
+				cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_8);
+		dump_info->hw_step =
+			cpu_to_le32(CSR_HW_REV_STEP(fwrt->trans->hw_rev));
+		memcpy(dump_info->fw_human_readable, fwrt->fw->human_readable,
+		       sizeof(dump_info->fw_human_readable));
+		strncpy(dump_info->dev_human_readable, fwrt->trans->cfg->name,
+			sizeof(dump_info->dev_human_readable) - 1);
+		strncpy(dump_info->bus_human_readable, fwrt->dev->bus->name,
+			sizeof(dump_info->bus_human_readable) - 1);
+
+		dump_data = iwl_fw_error_next_data(dump_data);
 	}
 
-	dump_data = iwl_fw_error_next_data(dump_data);
+	if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM_CFG)) {
+		/* Dump shared memory configuration */
+		dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_CFG);
+		dump_data->len = cpu_to_le32(sizeof(*dump_smem_cfg));
+		dump_smem_cfg = (void *)dump_data->data;
+		dump_smem_cfg->num_lmacs = cpu_to_le32(mem_cfg->num_lmacs);
+		dump_smem_cfg->num_txfifo_entries =
+			cpu_to_le32(mem_cfg->num_txfifo_entries);
+		for (i = 0; i < MAX_NUM_LMAC; i++) {
+			int j;
+			u32 *txf_size = mem_cfg->lmac[i].txfifo_size;
+
+			for (j = 0; j < TX_FIFO_MAX_NUM; j++)
+				dump_smem_cfg->lmac[i].txfifo_size[j] =
+					cpu_to_le32(txf_size[j]);
+			dump_smem_cfg->lmac[i].rxfifo1_size =
+				cpu_to_le32(mem_cfg->lmac[i].rxfifo1_size);
+		}
+		dump_smem_cfg->rxfifo2_size =
+			cpu_to_le32(mem_cfg->rxfifo2_size);
+		dump_smem_cfg->internal_txfifo_addr =
+			cpu_to_le32(mem_cfg->internal_txfifo_addr);
+		for (i = 0; i < TX_FIFO_INTERNAL_MAX_NUM; i++) {
+			dump_smem_cfg->internal_txfifo_size[i] =
+				cpu_to_le32(mem_cfg->internal_txfifo_size[i]);
+		}
+
+		dump_data = iwl_fw_error_next_data(dump_data);
+	}
 
 	/* We only dump the FIFOs if the FW is in error state */
 	if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status)) {
@@ -790,7 +830,8 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
 			iwl_read_radio_regs(fwrt, &dump_data);
 	}
 
-	if (fwrt->dump.desc) {
+	if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_ERROR_INFO) &&
+	    fwrt->dump.desc) {
 		dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_ERROR_INFO);
 		dump_data->len = cpu_to_le32(sizeof(*dump_trig) +
 					     fwrt->dump.desc->len);
@@ -805,7 +846,8 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
 	if (monitor_dump_only)
 		goto dump_trans_data;
 
-	if (!fwrt->fw->n_dbg_mem_tlv) {
+	if (!fwrt->fw->n_dbg_mem_tlv &&
+	    fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)) {
 		dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
 		dump_data->len = cpu_to_le32(sram_len + sizeof(*dump_mem));
 		dump_mem = (void *)dump_data->data;
@@ -821,6 +863,9 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
 		u32 ofs = le32_to_cpu(fw_dbg_mem[i].ofs);
 		bool success;
 
+		if (!(fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)))
+			break;
+
 		dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
 		dump_data->len = cpu_to_le32(len + sizeof(*dump_mem));
 		dump_mem = (void *)dump_data->data;
@@ -854,7 +899,7 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
 			dump_data = iwl_fw_error_next_data(dump_data);
 	}
 
-	if (smem_len) {
+	if (smem_len && fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)) {
 		IWL_DEBUG_INFO(fwrt, "WRT SMEM dump\n");
 		dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
 		dump_data->len = cpu_to_le32(smem_len + sizeof(*dump_mem));
@@ -867,7 +912,7 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
 		dump_data = iwl_fw_error_next_data(dump_data);
 	}
 
-	if (sram2_len) {
+	if (sram2_len && fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)) {
 		IWL_DEBUG_INFO(fwrt, "WRT SRAM dump\n");
 		dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
 		dump_data->len = cpu_to_le32(sram2_len + sizeof(*dump_mem));
@@ -881,7 +926,8 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
 	}
 
 	/* Dump fw's virtual image */
-	if (!fwrt->trans->cfg->gen2 &&
+	if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING) &&
+	    !fwrt->trans->cfg->gen2 &&
 	    fwrt->fw->img[fwrt->cur_fw_img].paging_mem_size &&
 	    fwrt->fw_paging_db[0].fw_paging_block) {
 		IWL_DEBUG_INFO(fwrt, "WRT paging dump\n");
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h
index 9d939cbaf6c6ef84751b2a5aece20ea4b2069b22..bbf2b265a06a649f1e19157861bc375ca7c4c705 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/file.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h
@@ -146,6 +146,9 @@ enum iwl_ucode_tlv_type {
 	IWL_UCODE_TLV_FW_GSCAN_CAPA	= 50,
 	IWL_UCODE_TLV_FW_MEM_SEG	= 51,
 	IWL_UCODE_TLV_IML		= 52,
+
+	/* TLVs 0x1000-0x2000 are for internal driver usage */
+	IWL_UCODE_TLV_FW_DBG_DUMP_LST	= 0x1000,
 };
 
 struct iwl_ucode_tlv {
@@ -318,7 +321,7 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t;
  *	IWL_UCODE_TLV_API_WIFI_MCC_UPDATE. When either is set, multi-source LAR
  *	is supported.
  * @IWL_UCODE_TLV_CAPA_BT_COEX_RRC: supports BT Coex RRC
- * @IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT: supports gscan
+ * @IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT: supports gscan (no longer used)
  * @IWL_UCODE_TLV_CAPA_STA_PM_NOTIF: firmware will send STA PM notification
  * @IWL_UCODE_TLV_CAPA_TLC_OFFLOAD: firmware implements rate scaling algorithm
  * @IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA: firmware implements quota related
@@ -889,39 +892,4 @@ struct iwl_fw_dbg_conf_tlv {
 	struct iwl_fw_dbg_conf_hcmd hcmd;
 } __packed;
 
-/**
- * struct iwl_fw_gscan_capabilities - gscan capabilities supported by FW
- * @max_scan_cache_size: total space allocated for scan results (in bytes).
- * @max_scan_buckets: maximum number of channel buckets.
- * @max_ap_cache_per_scan: maximum number of APs that can be stored per scan.
- * @max_rssi_sample_size: number of RSSI samples used for averaging RSSI.
- * @max_scan_reporting_threshold: max possible report threshold. in percentage.
- * @max_hotlist_aps: maximum number of entries for hotlist APs.
- * @max_significant_change_aps: maximum number of entries for significant
- *	change APs.
- * @max_bssid_history_entries: number of BSSID/RSSI entries that the device can
- *	hold.
- * @max_hotlist_ssids: maximum number of entries for hotlist SSIDs.
- * @max_number_epno_networks: max number of epno entries.
- * @max_number_epno_networks_by_ssid: max number of epno entries if ssid is
- *	specified.
- * @max_number_of_white_listed_ssid: max number of white listed SSIDs.
- * @max_number_of_black_listed_ssid: max number of black listed SSIDs.
- */
-struct iwl_fw_gscan_capabilities {
-	__le32 max_scan_cache_size;
-	__le32 max_scan_buckets;
-	__le32 max_ap_cache_per_scan;
-	__le32 max_rssi_sample_size;
-	__le32 max_scan_reporting_threshold;
-	__le32 max_hotlist_aps;
-	__le32 max_significant_change_aps;
-	__le32 max_bssid_history_entries;
-	__le32 max_hotlist_ssids;
-	__le32 max_number_epno_networks;
-	__le32 max_number_epno_networks_by_ssid;
-	__le32 max_number_of_white_listed_ssid;
-	__le32 max_number_of_black_listed_ssid;
-} __packed;
-
 #endif  /* __iwl_fw_file_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/img.h b/drivers/net/wireless/intel/iwlwifi/fw/img.h
index f4912382b6af39418eb84a7033ccc537b7f6e0a8..0861b97c4233c8a7eeb1294f5ca0c9042c0b1f8c 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/img.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/img.h
@@ -192,41 +192,6 @@ struct iwl_fw_cscheme_list {
 	struct iwl_fw_cipher_scheme cs[];
 } __packed;
 
-/**
- * struct iwl_gscan_capabilities - gscan capabilities supported by FW
- * @max_scan_cache_size: total space allocated for scan results (in bytes).
- * @max_scan_buckets: maximum number of channel buckets.
- * @max_ap_cache_per_scan: maximum number of APs that can be stored per scan.
- * @max_rssi_sample_size: number of RSSI samples used for averaging RSSI.
- * @max_scan_reporting_threshold: max possible report threshold. in percentage.
- * @max_hotlist_aps: maximum number of entries for hotlist APs.
- * @max_significant_change_aps: maximum number of entries for significant
- *	change APs.
- * @max_bssid_history_entries: number of BSSID/RSSI entries that the device can
- *	hold.
- * @max_hotlist_ssids: maximum number of entries for hotlist SSIDs.
- * @max_number_epno_networks: max number of epno entries.
- * @max_number_epno_networks_by_ssid: max number of epno entries if ssid is
- *	specified.
- * @max_number_of_white_listed_ssid: max number of white listed SSIDs.
- * @max_number_of_black_listed_ssid: max number of black listed SSIDs.
- */
-struct iwl_gscan_capabilities {
-	u32 max_scan_cache_size;
-	u32 max_scan_buckets;
-	u32 max_ap_cache_per_scan;
-	u32 max_rssi_sample_size;
-	u32 max_scan_reporting_threshold;
-	u32 max_hotlist_aps;
-	u32 max_significant_change_aps;
-	u32 max_bssid_history_entries;
-	u32 max_hotlist_ssids;
-	u32 max_number_epno_networks;
-	u32 max_number_epno_networks_by_ssid;
-	u32 max_number_of_white_listed_ssid;
-	u32 max_number_of_black_listed_ssid;
-};
-
 /**
  * enum iwl_fw_type - iwlwifi firmware type
  * @IWL_FW_DVM: DVM firmware
@@ -298,7 +263,7 @@ struct iwl_fw {
 	size_t n_dbg_mem_tlv;
 	size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX];
 	u8 dbg_dest_reg_num;
-	struct iwl_gscan_capabilities gscan_capa;
+	u32 dbg_dump_mask;
 };
 
 static inline const char *get_fw_dbg_mode_string(int mode)
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
index d8db1dd100b08dc54ac5c219bc5e172f8a68873c..ed23367f7088d5f5b72fb827b3539934b54fe9a8 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
@@ -168,7 +168,4 @@ void iwl_free_fw_paging(struct iwl_fw_runtime *fwrt);
 
 void iwl_get_shared_mem_conf(struct iwl_fw_runtime *fwrt);
 
-void iwl_fwrt_handle_notification(struct iwl_fw_runtime *fwrt,
-				  struct iwl_rx_cmd_buffer *rxb);
-
 #endif /* __iwl_fw_runtime_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/smem.c b/drivers/net/wireless/intel/iwlwifi/fw/smem.c
index fb4b6442b4d7d4b9b381194c5d5c03c93c98bfa0..ff85d69c2a8cb3703bdc1bebe3148435a4cf2e43 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/smem.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/smem.c
@@ -8,6 +8,7 @@
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,6 +31,7 @@
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -143,7 +145,7 @@ void iwl_get_shared_mem_conf(struct iwl_fw_runtime *fwrt)
 		return;
 
 	pkt = cmd.resp_pkt;
-	if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_22000)
+	if (fwrt->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000)
 		iwl_parse_shared_mem_22000(fwrt, pkt);
 	else
 		iwl_parse_shared_mem(fwrt, pkt);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index 84a81680972351180bfc1e48f361ede22050280b..12fddcf15bab394521122a2fecaab3d2dcd83579 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -93,6 +93,7 @@ enum iwl_device_family {
 	IWL_DEVICE_FAMILY_8000,
 	IWL_DEVICE_FAMILY_9000,
 	IWL_DEVICE_FAMILY_22000,
+	IWL_DEVICE_FAMILY_22560,
 };
 
 /*
@@ -176,6 +177,7 @@ static inline u8 num_of_ant(u8 mask)
  * @apmg_wake_up_wa: should the MAC access REQ be asserted when a command
  *	is in flight. This is due to a HW bug in 7260, 3160 and 7265.
  * @scd_chain_ext_wa: should the chain extension feature in SCD be disabled.
+ * @max_tfd_queue_size: max number of entries in tfd queue.
  */
 struct iwl_base_params {
 	unsigned int wd_timeout;
@@ -191,6 +193,7 @@ struct iwl_base_params {
 	   scd_chain_ext_wa:1;
 
 	u16 num_of_queues;	/* def: HW dependent */
+	u32 max_tfd_queue_size;	/* def: HW dependent */
 
 	u8 max_ll_items;
 	u8 led_compensation;
@@ -571,9 +574,11 @@ extern const struct iwl_cfg iwl22000_2ac_cfg_hr;
 extern const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb;
 extern const struct iwl_cfg iwl22000_2ac_cfg_jf;
 extern const struct iwl_cfg iwl22000_2ax_cfg_hr;
-extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_f0;
+extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0_f0;
+extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_b0;
 extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_jf_b0;
 extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0;
+extern const struct iwl_cfg iwl22560_2ax_cfg_su_cdb;
 #endif /* CONFIG_IWLMVM */
 
 #endif /* __IWL_CONFIG_H__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h b/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h
new file mode 100644
index 0000000000000000000000000000000000000000..ebea99189ca9f0bf342ab144c8c0b8b3d525aa68
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h
@@ -0,0 +1,286 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2018 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2018 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#ifndef __iwl_context_info_file_gen3_h__
+#define __iwl_context_info_file_gen3_h__
+
+#include "iwl-context-info.h"
+
+#define CSR_CTXT_INFO_BOOT_CTRL         0x0
+#define CSR_CTXT_INFO_ADDR              0x118
+#define CSR_IML_DATA_ADDR               0x120
+#define CSR_IML_SIZE_ADDR               0x128
+#define CSR_IML_RESP_ADDR               0x12c
+
+/* Set bit for enabling automatic function boot */
+#define CSR_AUTO_FUNC_BOOT_ENA          BIT(1)
+/* Set bit for initiating function boot */
+#define CSR_AUTO_FUNC_INIT              BIT(7)
+
+/**
+ * enum iwl_prph_scratch_mtr_format - tfd size configuration
+ * @IWL_PRPH_MTR_FORMAT_16B: 16 bit tfd
+ * @IWL_PRPH_MTR_FORMAT_32B: 32 bit tfd
+ * @IWL_PRPH_MTR_FORMAT_64B: 64 bit tfd
+ * @IWL_PRPH_MTR_FORMAT_256B: 256 bit tfd
+ */
+enum iwl_prph_scratch_mtr_format {
+	IWL_PRPH_MTR_FORMAT_16B = 0x0,
+	IWL_PRPH_MTR_FORMAT_32B = 0x40000,
+	IWL_PRPH_MTR_FORMAT_64B = 0x80000,
+	IWL_PRPH_MTR_FORMAT_256B = 0xC0000,
+};
+
+/**
+ * enum iwl_prph_scratch_flags - PRPH scratch control flags
+ * @IWL_PRPH_SCRATCH_EARLY_DEBUG_EN: enable early debug conf
+ * @IWL_PRPH_SCRATCH_EDBG_DEST_DRAM: use DRAM, with size allocated
+ *	in hwm config.
+ * @IWL_PRPH_SCRATCH_EDBG_DEST_INTERNAL: use buffer on SRAM
+ * @IWL_PRPH_SCRATCH_EDBG_DEST_ST_ARBITER: use st arbiter, mainly for
+ *	multicomm.
+ * @IWL_PRPH_SCRATCH_EDBG_DEST_TB22DTF: route debug data to SoC HW
+ * @IWL_PRPH_SCTATCH_RB_SIZE_4K: Use 4K RB size (the default is 2K)
+ * @IWL_PRPH_SCRATCH_MTR_MODE: format used for completion - 0: for
+ *	completion descriptor, 1 for responses (legacy)
+ * @IWL_PRPH_SCRATCH_MTR_FORMAT: a mask for the size of the tfd.
+ *	There are 4 optional values: 0: 16 bit, 1: 32 bit, 2: 64 bit,
+ *	3: 256 bit.
+ */
+enum iwl_prph_scratch_flags {
+	IWL_PRPH_SCRATCH_EARLY_DEBUG_EN		= BIT(4),
+	IWL_PRPH_SCRATCH_EDBG_DEST_DRAM		= BIT(8),
+	IWL_PRPH_SCRATCH_EDBG_DEST_INTERNAL	= BIT(9),
+	IWL_PRPH_SCRATCH_EDBG_DEST_ST_ARBITER	= BIT(10),
+	IWL_PRPH_SCRATCH_EDBG_DEST_TB22DTF	= BIT(11),
+	IWL_PRPH_SCRATCH_RB_SIZE_4K		= BIT(16),
+	IWL_PRPH_SCRATCH_MTR_MODE		= BIT(17),
+	IWL_PRPH_SCRATCH_MTR_FORMAT		= BIT(18) | BIT(19),
+};
+
+/*
+ * struct iwl_prph_scratch_version - version structure
+ * @mac_id: SKU and revision id
+ * @version: prph scratch information version id
+ * @size: the size of the context information in DWs
+ * @reserved: reserved
+ */
+struct iwl_prph_scratch_version {
+	__le16 mac_id;
+	__le16 version;
+	__le16 size;
+	__le16 reserved;
+} __packed; /* PERIPH_SCRATCH_VERSION_S */
+
+/*
+ * struct iwl_prph_scratch_control - control structure
+ * @control_flags: context information flags see &enum iwl_prph_scratch_flags
+ * @reserved: reserved
+ */
+struct iwl_prph_scratch_control {
+	__le32 control_flags;
+	__le32 reserved;
+} __packed; /* PERIPH_SCRATCH_CONTROL_S */
+
+/*
+ * struct iwl_prph_scratch_ror_cfg - ror config
+ * @ror_base_addr: ror start address
+ * @ror_size: ror size in DWs
+ * @reserved: reserved
+ */
+struct iwl_prph_scratch_ror_cfg {
+	__le64 ror_base_addr;
+	__le32 ror_size;
+	__le32 reserved;
+} __packed; /* PERIPH_SCRATCH_ROR_CFG_S */
+
+/*
+ * struct iwl_prph_scratch_hwm_cfg - hwm config
+ * @hwm_base_addr: hwm start address
+ * @hwm_size: hwm size in DWs
+ * @reserved: reserved
+ */
+struct iwl_prph_scratch_hwm_cfg {
+	__le64 hwm_base_addr;
+	__le32 hwm_size;
+	__le32 reserved;
+} __packed; /* PERIPH_SCRATCH_HWM_CFG_S */
+
+/*
+ * struct iwl_prph_scratch_rbd_cfg - RBDs configuration
+ * @free_rbd_addr: default queue free RB CB base address
+ * @reserved: reserved
+ */
+struct iwl_prph_scratch_rbd_cfg {
+	__le64 free_rbd_addr;
+	__le32 reserved;
+} __packed; /* PERIPH_SCRATCH_RBD_CFG_S */
+
+/*
+ * struct iwl_prph_scratch_ctrl_cfg - prph scratch ctrl and config
+ * @version: version information of context info and HW
+ * @control: control flags of FH configurations
+ * @ror_cfg: ror configuration
+ * @hwm_cfg: hwm configuration
+ * @rbd_cfg: default RX queue configuration
+ */
+struct iwl_prph_scratch_ctrl_cfg {
+	struct iwl_prph_scratch_version version;
+	struct iwl_prph_scratch_control control;
+	struct iwl_prph_scratch_ror_cfg ror_cfg;
+	struct iwl_prph_scratch_hwm_cfg hwm_cfg;
+	struct iwl_prph_scratch_rbd_cfg rbd_cfg;
+} __packed; /* PERIPH_SCRATCH_CTRL_CFG_S */
+
+/*
+ * struct iwl_prph_scratch - peripheral scratch mapping
+ * @ctrl_cfg: control and configuration of prph scratch
+ * @dram: firmware images addresses in DRAM
+ * @reserved: reserved
+ */
+struct iwl_prph_scratch {
+	struct iwl_prph_scratch_ctrl_cfg ctrl_cfg;
+	__le32 reserved[16];
+	struct iwl_context_info_dram dram;
+} __packed; /* PERIPH_SCRATCH_S */
+
+/*
+ * struct iwl_prph_info - peripheral information
+ * @boot_stage_mirror: reflects the value in the Boot Stage CSR register
+ * @ipc_status_mirror: reflects the value in the IPC Status CSR register
+ * @sleep_notif: indicates the peripheral sleep status
+ * @reserved: reserved
+ */
+struct iwl_prph_info {
+	__le32 boot_stage_mirror;
+	__le32 ipc_status_mirror;
+	__le32 sleep_notif;
+	__le32 reserved;
+} __packed; /* PERIPH_INFO_S */
+
+/*
+ * struct iwl_context_info_gen3 - device INIT configuration
+ * @version: version of the context information
+ * @size: size of context information in DWs
+ * @config: context in which the peripheral would execute - a subset of
+ *	capability csr register published by the peripheral
+ * @prph_info_base_addr: the peripheral information structure start address
+ * @cr_head_idx_arr_base_addr: the completion ring head index array
+ *	start address
+ * @tr_tail_idx_arr_base_addr: the transfer ring tail index array
+ *	start address
+ * @cr_tail_idx_arr_base_addr: the completion ring tail index array
+ *	start address
+ * @tr_head_idx_arr_base_addr: the transfer ring head index array
+ *	start address
+ * @cr_idx_arr_size: number of entries in the completion ring index array
+ * @tr_idx_arr_size: number of entries in the transfer ring index array
+ * @mtr_base_addr: the message transfer ring start address
+ * @mcr_base_addr: the message completion ring start address
+ * @mtr_size: number of entries which the message transfer ring can hold
+ * @mcr_size: number of entries which the message completion ring can hold
+ * @mtr_doorbell_vec: the doorbell vector associated with the message
+ *	transfer ring
+ * @mcr_doorbell_vec: the doorbell vector associated with the message
+ *	completion ring
+ * @mtr_msi_vec: the MSI which shall be generated by the peripheral after
+ *	completing a transfer descriptor in the message transfer ring
+ * @mcr_msi_vec: the MSI which shall be generated by the peripheral after
+ *	completing a completion descriptor in the message completion ring
+ * @mtr_opt_header_size: the size of the optional header in the transfer
+ *	descriptor associated with the message transfer ring in DWs
+ * @mtr_opt_footer_size: the size of the optional footer in the transfer
+ *	descriptor associated with the message transfer ring in DWs
+ * @mcr_opt_header_size: the size of the optional header in the completion
+ *	descriptor associated with the message completion ring in DWs
+ * @mcr_opt_footer_size: the size of the optional footer in the completion
+ *	descriptor associated with the message completion ring in DWs
+ * @msg_rings_ctrl_flags: message rings control flags
+ * @prph_info_msi_vec: the MSI which shall be generated by the peripheral
+ *	after updating the Peripheral Information structure
+ * @prph_scratch_base_addr: the peripheral scratch structure start address
+ * @prph_scratch_size: the size of the peripheral scratch structure in DWs
+ * @reserved: reserved
+ */
+struct iwl_context_info_gen3 {
+	__le16 version;
+	__le16 size;
+	__le32 config;
+	__le64 prph_info_base_addr;
+	__le64 cr_head_idx_arr_base_addr;
+	__le64 tr_tail_idx_arr_base_addr;
+	__le64 cr_tail_idx_arr_base_addr;
+	__le64 tr_head_idx_arr_base_addr;
+	__le16 cr_idx_arr_size;
+	__le16 tr_idx_arr_size;
+	__le64 mtr_base_addr;
+	__le64 mcr_base_addr;
+	__le16 mtr_size;
+	__le16 mcr_size;
+	__le16 mtr_doorbell_vec;
+	__le16 mcr_doorbell_vec;
+	__le16 mtr_msi_vec;
+	__le16 mcr_msi_vec;
+	u8 mtr_opt_header_size;
+	u8 mtr_opt_footer_size;
+	u8 mcr_opt_header_size;
+	u8 mcr_opt_footer_size;
+	__le16 msg_rings_ctrl_flags;
+	__le16 prph_info_msi_vec;
+	__le64 prph_scratch_base_addr;
+	__le32 prph_scratch_size;
+	__le32 reserved;
+} __packed; /* IPC_CONTEXT_INFO_S */
+
+int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
+				 const struct fw_img *fw);
+void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans);
+
+#endif /* __iwl_context_info_file_gen3_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h b/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h
index b870c09867449ed899f9886d5d88f13a06386e09..4b6fdf3b15fbe4e0d4f096e128b788e518966919 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h
@@ -6,6 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -19,6 +20,7 @@
  * BSD LICENSE
  *
  * Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -199,5 +201,8 @@ struct iwl_context_info {
 int iwl_pcie_ctxt_info_init(struct iwl_trans *trans, const struct fw_img *fw);
 void iwl_pcie_ctxt_info_free(struct iwl_trans *trans);
 void iwl_pcie_ctxt_info_free_paging(struct iwl_trans *trans);
+int iwl_pcie_init_fw_sec(struct iwl_trans *trans,
+			 const struct fw_img *fw,
+			 struct iwl_context_info_dram *ctxt_dram);
 
 #endif /* __iwl_context_info_file_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
index ba971d3946e2cce79ef6dd0630ac554a5fd635f6..9019de99f077f1f069bcb37d8cbc5e51fe110c65 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
@@ -339,6 +339,9 @@ enum {
 /* HW_RF CHIP ID  */
 #define CSR_HW_RF_ID_TYPE_CHIP_ID(_val) (((_val) >> 12) & 0xFFF)
 
+/* HW_RF CHIP STEP  */
+#define CSR_HW_RF_STEP(_val) (((_val) >> 8) & 0xF)
+
 /* EEPROM REG */
 #define CSR_EEPROM_REG_READ_VALID_MSK	(0x00000001)
 #define CSR_EEPROM_REG_BIT_CMD		(0x00000002)
@@ -592,6 +595,8 @@ enum msix_fh_int_causes {
 enum msix_hw_int_causes {
 	MSIX_HW_INT_CAUSES_REG_ALIVE		= BIT(0),
 	MSIX_HW_INT_CAUSES_REG_WAKEUP		= BIT(1),
+	MSIX_HW_INT_CAUSES_REG_IPC		= BIT(1),
+	MSIX_HW_INT_CAUSES_REG_SW_ERR_V2	= BIT(5),
 	MSIX_HW_INT_CAUSES_REG_CT_KILL		= BIT(6),
 	MSIX_HW_INT_CAUSES_REG_RF_KILL		= BIT(7),
 	MSIX_HW_INT_CAUSES_REG_PERIODIC		= BIT(8),
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index c59ce4f8a5edbdc2451d28f392dfbd38fba1a248..c0631255aee7ca3dedf6a5168856f34d3a20401d 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -402,35 +402,6 @@ static int iwl_store_cscheme(struct iwl_fw *fw, const u8 *data, const u32 len)
 	return 0;
 }
 
-static void iwl_store_gscan_capa(struct iwl_fw *fw, const u8 *data,
-				 const u32 len)
-{
-	struct iwl_fw_gscan_capabilities *fw_capa = (void *)data;
-	struct iwl_gscan_capabilities *capa = &fw->gscan_capa;
-
-	capa->max_scan_cache_size = le32_to_cpu(fw_capa->max_scan_cache_size);
-	capa->max_scan_buckets = le32_to_cpu(fw_capa->max_scan_buckets);
-	capa->max_ap_cache_per_scan =
-		le32_to_cpu(fw_capa->max_ap_cache_per_scan);
-	capa->max_rssi_sample_size = le32_to_cpu(fw_capa->max_rssi_sample_size);
-	capa->max_scan_reporting_threshold =
-		le32_to_cpu(fw_capa->max_scan_reporting_threshold);
-	capa->max_hotlist_aps = le32_to_cpu(fw_capa->max_hotlist_aps);
-	capa->max_significant_change_aps =
-		le32_to_cpu(fw_capa->max_significant_change_aps);
-	capa->max_bssid_history_entries =
-		le32_to_cpu(fw_capa->max_bssid_history_entries);
-	capa->max_hotlist_ssids = le32_to_cpu(fw_capa->max_hotlist_ssids);
-	capa->max_number_epno_networks =
-		le32_to_cpu(fw_capa->max_number_epno_networks);
-	capa->max_number_epno_networks_by_ssid =
-		le32_to_cpu(fw_capa->max_number_epno_networks_by_ssid);
-	capa->max_number_of_white_listed_ssid =
-		le32_to_cpu(fw_capa->max_number_of_white_listed_ssid);
-	capa->max_number_of_black_listed_ssid =
-		le32_to_cpu(fw_capa->max_number_of_black_listed_ssid);
-}
-
 /*
  * Gets uCode section from tlv.
  */
@@ -644,7 +615,6 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
 	u32 build, paging_mem_size;
 	int num_of_cpus;
 	bool usniffer_req = false;
-	bool gscan_capa = false;
 
 	if (len < sizeof(*ucode)) {
 		IWL_ERR(drv, "uCode has invalid length: %zd\n", len);
@@ -1043,6 +1013,17 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
 			pieces->dbg_trigger_tlv_len[trigger_id] = tlv_len;
 			break;
 			}
+		case IWL_UCODE_TLV_FW_DBG_DUMP_LST: {
+			if (tlv_len != sizeof(u32)) {
+				IWL_ERR(drv,
+					"dbg lst mask size incorrect, skip\n");
+				break;
+			}
+
+			drv->fw.dbg_dump_mask =
+				le32_to_cpup((__le32 *)tlv_data);
+			break;
+			}
 		case IWL_UCODE_TLV_SEC_RT_USNIFFER:
 			*usniffer_images = true;
 			iwl_store_ucode_sec(pieces, tlv_data,
@@ -1079,16 +1060,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
 				paging_mem_size;
 			break;
 		case IWL_UCODE_TLV_FW_GSCAN_CAPA:
-			/*
-			 * Don't return an error in case of a shorter tlv_len
-			 * to enable loading of FW that has an old format
-			 * of GSCAN capabilities TLV.
-			 */
-			if (tlv_len < sizeof(struct iwl_fw_gscan_capabilities))
-				break;
-
-			iwl_store_gscan_capa(&drv->fw, tlv_data, tlv_len);
-			gscan_capa = true;
+			/* ignored */
 			break;
 		case IWL_UCODE_TLV_FW_MEM_SEG: {
 			struct iwl_fw_dbg_mem_seg_tlv *dbg_mem =
@@ -1153,19 +1125,6 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
 		return -EINVAL;
 	}
 
-	/*
-	 * If ucode advertises that it supports GSCAN but GSCAN
-	 * capabilities TLV is not present, or if it has an old format,
-	 * warn and continue without GSCAN.
-	 */
-	if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT) &&
-	    !gscan_capa) {
-		IWL_DEBUG_INFO(drv,
-			       "GSCAN is supported but capabilities TLV is unavailable\n");
-		__clear_bit((__force long)IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT,
-			    capa->_capa);
-	}
-
 	return 0;
 
  invalid_tlv_len:
@@ -1316,6 +1275,8 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
 	fw->ucode_capa.standard_phy_calibration_size =
 			IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE;
 	fw->ucode_capa.n_scan_channels = IWL_DEFAULT_SCAN_CHANNELS;
+	/* dump all fw memory areas by default */
+	fw->dbg_dump_mask = 0xffffffff;
 
 	pieces = kzalloc(sizeof(*pieces), GFP_KERNEL);
 	if (!pieces)
@@ -1787,7 +1748,8 @@ MODULE_PARM_DESC(11n_disable,
 	"disable 11n functionality, bitmap: 1: full, 2: disable agg TX, 4: disable agg RX, 8 enable agg TX");
 module_param_named(amsdu_size, iwlwifi_mod_params.amsdu_size, int, 0444);
 MODULE_PARM_DESC(amsdu_size,
-		 "amsdu size 0: 12K for multi Rx queue devices, 4K for other devices 1:4K 2:8K 3:12K (default 0)");
+		 "amsdu size 0: 12K for multi Rx queue devices, 2K for 22560 devices, "
+		 "4K for other devices 1:4K 2:8K 3:12K 4: 2K (default 0)");
 module_param_named(fw_restart, iwlwifi_mod_params.fw_restart, bool, 0444);
 MODULE_PARM_DESC(fw_restart, "restart firmware in case of error (default true)");
 
@@ -1856,3 +1818,7 @@ module_param_named(remove_when_gone,
 		   0444);
 MODULE_PARM_DESC(remove_when_gone,
 		 "Remove dev from PCIe bus if it is deemed inaccessible (default: false)");
+
+module_param_named(disable_11ax, iwlwifi_mod_params.disable_11ax, bool,
+		   S_IRUGO);
+MODULE_PARM_DESC(disable_11ax, "Disable HE capabilities (default: false)");
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c
index 777f5df8a0c6402cd69b55610fdf49e08f619745..a4c96215933ba589d2bbbc272728c901c80a4214 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c
@@ -7,6 +7,7 @@
  *
  * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2018 Intel Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -18,9 +19,7 @@
  * General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
+ * along with this program;
  *
  * The full GNU General Public License is included in this distribution
  * in the file called COPYING.
@@ -33,6 +32,7 @@
  *
  * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2018 Intel Corporation
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -767,7 +767,7 @@ void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg,
 		ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING;
 
 	if ((cfg->mq_rx_supported &&
-	     iwlwifi_mod_params.amsdu_size != IWL_AMSDU_4K) ||
+	     iwlwifi_mod_params.amsdu_size == IWL_AMSDU_DEF) ||
 	     iwlwifi_mod_params.amsdu_size >= IWL_AMSDU_8K)
 		ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
 
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
index 11789ffb6512037b6b4cdc4c8a997bf7e3225f45..df0e9ffff7067e86c158e587028f6e250ba6643a 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
@@ -7,6 +7,7 @@
  *
  * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -18,9 +19,7 @@
  * General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
+ * along with this program.
  *
  * The full GNU General Public License is included in this distribution
  * in the file called COPYING.
@@ -33,6 +32,7 @@
  *
  * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -434,13 +434,15 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
  * RXF to DRAM.
  * Once the RXF-to-DRAM DMA is active, this flag is immediately turned off.
  */
-#define RFH_GEN_STATUS 0xA09808
+#define RFH_GEN_STATUS		0xA09808
+#define RFH_GEN_STATUS_GEN3	0xA07824
 #define RBD_FETCH_IDLE	BIT(29)
 #define SRAM_DMA_IDLE	BIT(30)
 #define RXF_DMA_IDLE	BIT(31)
 
 /* DMA configuration */
-#define RFH_RXF_DMA_CFG 0xA09820
+#define RFH_RXF_DMA_CFG		0xA09820
+#define RFH_RXF_DMA_CFG_GEN3	0xA07880
 /* RB size */
 #define RFH_RXF_DMA_RB_SIZE_MASK (0x000F0000) /* bits 16-19 */
 #define RFH_RXF_DMA_RB_SIZE_POS 16
@@ -643,10 +645,13 @@ struct iwl_rb_status {
 
 
 #define TFD_QUEUE_SIZE_MAX      (256)
+#define TFD_QUEUE_SIZE_MAX_GEN3 (65536)
 /* cb size is the exponent - 3 */
 #define TFD_QUEUE_CB_SIZE(x)	(ilog2(x) - 3)
 #define TFD_QUEUE_SIZE_BC_DUP	(64)
 #define TFD_QUEUE_BC_SIZE	(TFD_QUEUE_SIZE_MAX + TFD_QUEUE_SIZE_BC_DUP)
+#define TFD_QUEUE_BC_SIZE_GEN3	(TFD_QUEUE_SIZE_MAX_GEN3 + \
+				 TFD_QUEUE_SIZE_BC_DUP)
 #define IWL_TX_DMA_MASK        DMA_BIT_MASK(36)
 #define IWL_NUM_OF_TBS		20
 #define IWL_TFH_NUM_TBS		25
@@ -753,7 +758,7 @@ struct iwl_tfh_tfd {
  * For devices up to 22000:
  * @tfd_offset  0-12 - tx command byte count
  *		12-16 - station index
- * For 22000 and on:
+ * For 22000:
  * @tfd_offset  0-12 - tx command byte count
  *		12-13 - number of 64 byte chunks
  *		14-16 - reserved
@@ -762,4 +767,15 @@ struct iwlagn_scd_bc_tbl {
 	__le16 tfd_offset[TFD_QUEUE_BC_SIZE];
 } __packed;
 
+/**
+ * struct iwl_gen3_bc_tbl scheduler byte count table gen3
+ * For 22560 and on:
+ * @tfd_offset: 0-12 - tx command byte count
+ *		12-13 - number of 64 byte chunks
+ *		14-16 - reserved
+ */
+struct iwl_gen3_bc_tbl {
+	__le16 tfd_offset[TFD_QUEUE_BC_SIZE_GEN3];
+} __packed;
+
 #endif /* !__iwl_fh_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
index a7dd8a8cddf92c0fa039fdd53466666222de61ea..97072cf75bca5a7d95a88dab4bf69c5d963b083a 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
@@ -6,6 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2018 Intel Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -17,9 +18,7 @@
  * General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
+ * along with this program;
  *
  * The full GNU General Public License is included in this distribution
  * in the file called COPYING.
@@ -31,6 +30,7 @@
  * BSD LICENSE
  *
  * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2018 Intel Corporation
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -90,6 +90,8 @@ enum iwl_amsdu_size {
 	IWL_AMSDU_4K = 1,
 	IWL_AMSDU_8K = 2,
 	IWL_AMSDU_12K = 3,
+	/* Add 2K at the end to avoid breaking current API */
+	IWL_AMSDU_2K = 4,
 };
 
 enum iwl_uapsd_disable {
@@ -144,6 +146,10 @@ struct iwl_mod_params {
 	bool lar_disable;
 	bool fw_monitor;
 	bool disable_11ac;
+	/**
+	 * @disable_11ax: disable HE capabilities, default = false
+	 */
+	bool disable_11ax;
 	bool remove_when_gone;
 };
 
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
index b815ba38dbdb177e6a2fe2e235c2a4f5cac8d725..b4c3a957c102d2463fde9d4987dcdf3764e2a694 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -430,6 +430,13 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
 		else
 			vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895;
 		break;
+	case IWL_AMSDU_2K:
+		if (cfg->mq_rx_supported)
+			vht_cap->cap |=
+				IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454;
+		else
+			WARN(1, "RB size of 2K is not supported by this device\n");
+		break;
 	case IWL_AMSDU_4K:
 		vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895;
 		break;
@@ -463,6 +470,101 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
 	vht_cap->vht_mcs.tx_mcs_map = vht_cap->vht_mcs.rx_mcs_map;
 }
 
+static struct ieee80211_sband_iftype_data iwl_he_capa = {
+	.types_mask = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP),
+	.he_cap = {
+		.has_he = true,
+		.he_cap_elem = {
+			.mac_cap_info[0] =
+				IEEE80211_HE_MAC_CAP0_HTC_HE,
+			.mac_cap_info[1] =
+				IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US |
+				IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_8,
+			.mac_cap_info[2] =
+				IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP |
+				IEEE80211_HE_MAC_CAP2_ACK_EN,
+			.mac_cap_info[3] =
+				IEEE80211_HE_MAC_CAP3_GRP_ADDR_MULTI_STA_BA_DL_MU |
+				IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_VHT_2,
+			.mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU,
+			.phy_cap_info[0] =
+				IEEE80211_HE_PHY_CAP0_DUAL_BAND |
+				IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G |
+				IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G |
+				IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G,
+			.phy_cap_info[1] =
+				IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A |
+				IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD |
+				IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_MAX_NSTS,
+			.phy_cap_info[2] =
+				IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US |
+				IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ |
+				IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ,
+			.phy_cap_info[3] =
+				IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_BPSK |
+				IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_1 |
+				IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_BPSK |
+				IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_1,
+			.phy_cap_info[4] =
+				IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE |
+				IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_8 |
+				IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_8,
+			.phy_cap_info[5] =
+				IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_2 |
+				IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_2,
+			.phy_cap_info[6] =
+				IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT,
+			.phy_cap_info[7] =
+				IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_AR |
+				IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI |
+				IEEE80211_HE_PHY_CAP7_MAX_NC_7,
+			.phy_cap_info[8] =
+				IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI |
+				IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G |
+				IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU |
+				IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU,
+		},
+		/*
+		 * Set default Tx/Rx HE MCS NSS Support field. Indicate support
+		 * for up to 2 spatial streams and all MCS, without any special
+		 * cases
+		 */
+		.he_mcs_nss_supp = {
+			.rx_mcs_80 = cpu_to_le16(0xfffa),
+			.tx_mcs_80 = cpu_to_le16(0xfffa),
+			.rx_mcs_160 = cpu_to_le16(0xfffa),
+			.tx_mcs_160 = cpu_to_le16(0xfffa),
+			.rx_mcs_80p80 = cpu_to_le16(0xffff),
+			.tx_mcs_80p80 = cpu_to_le16(0xffff),
+		},
+		/*
+		 * Set default PPE thresholds, with PPET16 set to 0, PPET8 set
+		 * to 7
+		 */
+		.ppe_thres = {0x61, 0x1c, 0xc7, 0x71},
+	},
+};
+
+static void iwl_init_he_hw_capab(struct ieee80211_supported_band *sband,
+				 u8 tx_chains, u8 rx_chains)
+{
+	if (sband->band == NL80211_BAND_2GHZ ||
+	    sband->band == NL80211_BAND_5GHZ)
+		sband->iftype_data = &iwl_he_capa;
+	else
+		return;
+
+	sband->n_iftype_data = 1;
+
+	/* If not 2x2, we need to indicate 1x1 in the Midamble RX Max NSTS */
+	if ((tx_chains & rx_chains) != ANT_AB) {
+		iwl_he_capa.he_cap.he_cap_elem.phy_cap_info[1] &=
+			~IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_MAX_NSTS;
+		iwl_he_capa.he_cap.he_cap_elem.phy_cap_info[2] &=
+			~IEEE80211_HE_PHY_CAP2_MIDAMBLE_RX_MAX_NSTS;
+	}
+}
+
 static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
 			    struct iwl_nvm_data *data,
 			    const __le16 *nvm_ch_flags, u8 tx_chains,
@@ -483,6 +585,9 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
 	iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, NL80211_BAND_2GHZ,
 			     tx_chains, rx_chains);
 
+	if (data->sku_cap_11ax_enable && !iwlwifi_mod_params.disable_11ax)
+		iwl_init_he_hw_capab(sband, tx_chains, rx_chains);
+
 	sband = &data->bands[NL80211_BAND_5GHZ];
 	sband->band = NL80211_BAND_5GHZ;
 	sband->bitrates = &iwl_cfg80211_rates[RATES_52_OFFS];
@@ -495,6 +600,9 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
 		iwl_init_vht_hw_capab(cfg, data, &sband->vht_cap,
 				      tx_chains, rx_chains);
 
+	if (data->sku_cap_11ax_enable && !iwlwifi_mod_params.disable_11ax)
+		iwl_init_he_hw_capab(sband, tx_chains, rx_chains);
+
 	if (n_channels != n_used)
 		IWL_ERR_DEV(dev, "NVM: used only %d of %d channels\n",
 			    n_used, n_channels);
@@ -1293,6 +1401,8 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans,
 		!!(mac_flags & NVM_MAC_SKU_FLAGS_802_11AC_ENABLED);
 	nvm->sku_cap_11n_enable =
 		!!(mac_flags & NVM_MAC_SKU_FLAGS_802_11N_ENABLED);
+	nvm->sku_cap_11ax_enable =
+		!!(mac_flags & NVM_MAC_SKU_FLAGS_802_11AX_ENABLED);
 	nvm->sku_cap_band_24ghz_enable =
 		!!(mac_flags & NVM_MAC_SKU_FLAGS_BAND_2_4_ENABLED);
 	nvm->sku_cap_band_52ghz_enable =
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
index 1b9c627ee34d9ca2ba96a7d217d4fe5fdae8e3bd..279dd7b7a3fb921c615dcab4fe769f926f7e896d 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
@@ -350,6 +350,8 @@ static inline int
 iwl_trans_get_rb_size_order(enum iwl_amsdu_size rb_size)
 {
 	switch (rb_size) {
+	case IWL_AMSDU_2K:
+		return get_order(2 * 1024);
 	case IWL_AMSDU_4K:
 		return get_order(4 * 1024);
 	case IWL_AMSDU_8K:
@@ -437,6 +439,20 @@ struct iwl_trans_txq_scd_cfg {
 	int frame_limit;
 };
 
+/**
+ * struct iwl_trans_rxq_dma_data - RX queue DMA data
+ * @fr_bd_cb: DMA address of free BD cyclic buffer
+ * @fr_bd_wid: Initial write index of the free BD cyclic buffer
+ * @urbd_stts_wrptr: DMA address of urbd_stts_wrptr
+ * @ur_bd_cb: DMA address of used BD cyclic buffer
+ */
+struct iwl_trans_rxq_dma_data {
+	u64 fr_bd_cb;
+	u32 fr_bd_wid;
+	u64 urbd_stts_wrptr;
+	u64 ur_bd_cb;
+};
+
 /**
  * struct iwl_trans_ops - transport specific operations
  *
@@ -557,6 +573,8 @@ struct iwl_trans_ops {
 			 int cmd_id, int size,
 			 unsigned int queue_wdg_timeout);
 	void (*txq_free)(struct iwl_trans *trans, int queue);
+	int (*rxq_dma_data)(struct iwl_trans *trans, int queue,
+			    struct iwl_trans_rxq_dma_data *data);
 
 	void (*txq_set_shared_mode)(struct iwl_trans *trans, u32 txq_id,
 				    bool shared);
@@ -753,6 +771,7 @@ struct iwl_trans {
 	const struct iwl_fw_dbg_dest_tlv_v1 *dbg_dest_tlv;
 	const struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX];
 	struct iwl_fw_dbg_trigger_tlv * const *dbg_trigger_tlv;
+	u32 dbg_dump_mask;
 	u8 dbg_dest_reg_num;
 
 	enum iwl_plat_pm_mode system_pm_mode;
@@ -945,6 +964,16 @@ iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn,
 				      cfg, queue_wdg_timeout);
 }
 
+static inline int
+iwl_trans_get_rxq_dma_data(struct iwl_trans *trans, int queue,
+			   struct iwl_trans_rxq_dma_data *data)
+{
+	if (WARN_ON_ONCE(!trans->ops->rxq_dma_data))
+		return -ENOTSUPP;
+
+	return trans->ops->rxq_dma_data(trans, queue, data);
+}
+
 static inline void
 iwl_trans_txq_free(struct iwl_trans *trans, int queue)
 {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index 3fcf489f3120f19db48f38d4eaf39baddd17ca70..79bdae994822844bcc07bb8b63b617f59982d042 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -1037,6 +1037,13 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
 			cpu_to_le32(IWL_WAKEUP_D3_CONFIG_FW_ERROR);
 #endif
 
+	/*
+	 * TODO: this is needed because the firmware is not stopping
+	 * the recording automatically before entering D3.  This can
+	 * be removed once the FW starts doing that.
+	 */
+	iwl_fw_dbg_stop_recording(&mvm->fwrt);
+
 	/* must be last -- this switches firmware state */
 	ret = iwl_mvm_send_cmd(mvm, &d3_cfg_cmd);
 	if (ret)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index 1c4178f204412cbb2042e778e20ff6f58d618f4a..05b77419953ce6250f77f573b173b92660d5bc33 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -1150,6 +1150,10 @@ static ssize_t iwl_dbgfs_inject_packet_write(struct iwl_mvm *mvm,
 	struct iwl_rx_mpdu_desc *desc;
 	int bin_len = count / 2;
 	int ret = -EINVAL;
+	size_t mpdu_cmd_hdr_size =
+		(mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) ?
+		sizeof(struct iwl_rx_mpdu_desc) :
+		IWL_RX_DESC_SIZE_V1;
 
 	if (!iwl_mvm_firmware_running(mvm))
 		return -EIO;
@@ -1168,7 +1172,7 @@ static ssize_t iwl_dbgfs_inject_packet_write(struct iwl_mvm *mvm,
 		goto out;
 
 	/* avoid invalid memory access */
-	if (bin_len < sizeof(*pkt) + sizeof(*desc))
+	if (bin_len < sizeof(*pkt) + mpdu_cmd_hdr_size)
 		goto out;
 
 	/* check this is RX packet */
@@ -1179,7 +1183,7 @@ static ssize_t iwl_dbgfs_inject_packet_write(struct iwl_mvm *mvm,
 	/* check the length in metadata matches actual received length */
 	desc = (void *)pkt->data;
 	if (le16_to_cpu(desc->mpdu_len) !=
-	    (bin_len - sizeof(*desc) - sizeof(*pkt)))
+	    (bin_len - mpdu_cmd_hdr_size - sizeof(*pkt)))
 		goto out;
 
 	local_bh_disable();
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 866c91c923be5688ee498bca12710d4bd98c2100..6bb1a99a197a22981f29962ab487cccb76dd0eb8 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -130,6 +130,41 @@ static int iwl_send_rss_cfg_cmd(struct iwl_mvm *mvm)
 	return iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd);
 }
 
+static int iwl_configure_rxq(struct iwl_mvm *mvm)
+{
+	int i, num_queues, size;
+	struct iwl_rfh_queue_config *cmd;
+
+	/* Do not configure default queue, it is configured via context info */
+	num_queues = mvm->trans->num_rx_queues - 1;
+
+	size = sizeof(*cmd) + num_queues * sizeof(struct iwl_rfh_queue_data);
+
+	cmd = kzalloc(size, GFP_KERNEL);
+	if (!cmd)
+		return -ENOMEM;
+
+	cmd->num_queues = num_queues;
+
+	for (i = 0; i < num_queues; i++) {
+		struct iwl_trans_rxq_dma_data data;
+
+		cmd->data[i].q_num = i + 1;
+		iwl_trans_get_rxq_dma_data(mvm->trans, i + 1, &data);
+
+		cmd->data[i].fr_bd_cb = cpu_to_le64(data.fr_bd_cb);
+		cmd->data[i].urbd_stts_wrptr =
+			cpu_to_le64(data.urbd_stts_wrptr);
+		cmd->data[i].ur_bd_cb = cpu_to_le64(data.ur_bd_cb);
+		cmd->data[i].fr_bd_wid = cpu_to_le32(data.fr_bd_wid);
+	}
+
+	return iwl_mvm_send_cmd_pdu(mvm,
+				    WIDE_ID(DATA_PATH_GROUP,
+					    RFH_QUEUE_CONFIG_CMD),
+				    0, size, cmd);
+}
+
 static int iwl_mvm_send_dqa_cmd(struct iwl_mvm *mvm)
 {
 	struct iwl_dqa_enable_cmd dqa_cmd = {
@@ -301,7 +336,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
 	if (ret) {
 		struct iwl_trans *trans = mvm->trans;
 
-		if (trans->cfg->device_family == IWL_DEVICE_FAMILY_22000)
+		if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000)
 			IWL_ERR(mvm,
 				"SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
 				iwl_read_prph(trans, UMAG_SB_CPU_1_STATUS),
@@ -1007,9 +1042,16 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
 		goto error;
 
 	/* Init RSS configuration */
-	/* TODO - remove 22000 disablement when we have RXQ config API */
-	if (iwl_mvm_has_new_rx_api(mvm) &&
-	    mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_22000) {
+	if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000) {
+		ret = iwl_configure_rxq(mvm);
+		if (ret) {
+			IWL_ERR(mvm, "Failed to configure RX queues: %d\n",
+				ret);
+			goto error;
+		}
+	}
+
+	if (iwl_mvm_has_new_rx_api(mvm)) {
 		ret = iwl_send_rss_cfg_cmd(mvm);
 		if (ret) {
 			IWL_ERR(mvm, "Failed to configure RSS queues: %d\n",
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index 8ba16fc24e3af0bd6bc07b6de7195e375d3f0cb1..b3fd20502abb3c604352fbc9d5aa84256f40a4f6 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -780,6 +780,10 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm,
 	if (vif->probe_req_reg && vif->bss_conf.assoc && vif->p2p)
 		cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST);
 
+	if (vif->bss_conf.assoc && vif->bss_conf.he_support &&
+	    !iwlwifi_mod_params.disable_11ax)
+		cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_11AX);
+
 	return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
 }
 
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 26021bc55e981cf727c5682b36f8e3057c05d0a8..b15b0d84bb7ea18a06933d455e495248c7bcd32c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -36,6 +36,7 @@
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018        Intel Corporation
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -914,7 +915,7 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
 	enum ieee80211_ampdu_mlme_action action = params->action;
 	u16 tid = params->tid;
 	u16 *ssn = &params->ssn;
-	u8 buf_size = params->buf_size;
+	u16 buf_size = params->buf_size;
 	bool amsdu = params->amsdu;
 	u16 timeout = params->timeout;
 
@@ -1897,6 +1898,194 @@ void iwl_mvm_mu_mimo_grp_notif(struct iwl_mvm *mvm,
 			iwl_mvm_mu_mimo_iface_iterator, notif);
 }
 
+static u8 iwl_mvm_he_get_ppe_val(u8 *ppe, u8 ppe_pos_bit)
+{
+	u8 byte_num = ppe_pos_bit / 8;
+	u8 bit_num = ppe_pos_bit % 8;
+	u8 residue_bits;
+	u8 res;
+
+	if (bit_num <= 5)
+		return (ppe[byte_num] >> bit_num) &
+		       (BIT(IEEE80211_PPE_THRES_INFO_PPET_SIZE) - 1);
+
+	/*
+	 * If bit_num > 5, we have to combine bits with next byte.
+	 * Calculate how many bits we need to take from current byte (called
+	 * here "residue_bits"), and add them to bits from next byte.
+	 */
+
+	residue_bits = 8 - bit_num;
+
+	res = (ppe[byte_num + 1] &
+	       (BIT(IEEE80211_PPE_THRES_INFO_PPET_SIZE - residue_bits) - 1)) <<
+	      residue_bits;
+	res += (ppe[byte_num] >> bit_num) & (BIT(residue_bits) - 1);
+
+	return res;
+}
+
+static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
+			       struct ieee80211_vif *vif, u8 sta_id)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_he_sta_context_cmd sta_ctxt_cmd = {
+		.sta_id = sta_id,
+		.tid_limit = IWL_MAX_TID_COUNT,
+		.bss_color = vif->bss_conf.bss_color,
+		.htc_trig_based_pkt_ext = vif->bss_conf.htc_trig_based_pkt_ext,
+		.frame_time_rts_th =
+			cpu_to_le16(vif->bss_conf.frame_time_rts_th),
+	};
+	struct ieee80211_sta *sta;
+	u32 flags;
+	int i;
+
+	rcu_read_lock();
+
+	sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_ctxt_cmd.sta_id]);
+	if (IS_ERR(sta)) {
+		rcu_read_unlock();
+		WARN(1, "Can't find STA to configure HE\n");
+		return;
+	}
+
+	if (!sta->he_cap.has_he) {
+		rcu_read_unlock();
+		return;
+	}
+
+	flags = 0;
+
+	/* HTC flags */
+	if (sta->he_cap.he_cap_elem.mac_cap_info[0] &
+	    IEEE80211_HE_MAC_CAP0_HTC_HE)
+		sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_SUPPORT);
+	if ((sta->he_cap.he_cap_elem.mac_cap_info[1] &
+	      IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION) ||
+	    (sta->he_cap.he_cap_elem.mac_cap_info[2] &
+	      IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION)) {
+		u8 link_adap =
+			((sta->he_cap.he_cap_elem.mac_cap_info[2] &
+			  IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION) << 1) +
+			 (sta->he_cap.he_cap_elem.mac_cap_info[1] &
+			  IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION);
+
+		if (link_adap == 2)
+			sta_ctxt_cmd.htc_flags |=
+				cpu_to_le32(IWL_HE_HTC_LINK_ADAP_UNSOLICITED);
+		else if (link_adap == 3)
+			sta_ctxt_cmd.htc_flags |=
+				cpu_to_le32(IWL_HE_HTC_LINK_ADAP_BOTH);
+	}
+	if (sta->he_cap.he_cap_elem.mac_cap_info[2] &
+	    IEEE80211_HE_MAC_CAP2_UL_MU_RESP_SCHED)
+		sta_ctxt_cmd.htc_flags |=
+			cpu_to_le32(IWL_HE_HTC_UL_MU_RESP_SCHED);
+	if (sta->he_cap.he_cap_elem.mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_BSR)
+		sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_BSR_SUPP);
+	if (sta->he_cap.he_cap_elem.mac_cap_info[3] &
+	    IEEE80211_HE_MAC_CAP3_OMI_CONTROL)
+		sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_OMI_SUPP);
+	if (sta->he_cap.he_cap_elem.mac_cap_info[4] & IEEE80211_HE_MAC_CAP4_BQR)
+		sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_BQR_SUPP);
+
+	/* If PPE Thresholds exist, parse them into a FW-familiar format */
+	if (sta->he_cap.he_cap_elem.phy_cap_info[6] &
+	    IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) {
+		u8 nss = (sta->he_cap.ppe_thres[0] &
+			  IEEE80211_PPE_THRES_NSS_MASK) + 1;
+		u8 ru_index_bitmap =
+			(sta->he_cap.ppe_thres[0] &
+			 IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK) >>
+			IEEE80211_PPE_THRES_RU_INDEX_BITMASK_POS;
+		u8 *ppe = &sta->he_cap.ppe_thres[0];
+		u8 ppe_pos_bit = 7; /* Starting after PPE header */
+
+		/*
+		 * FW currently supports only nss == MAX_HE_SUPP_NSS
+		 *
+		 * If nss > MAX: we can ignore values we don't support
+		 * If nss < MAX: we can set zeros in other streams
+		 */
+		if (nss > MAX_HE_SUPP_NSS) {
+			IWL_INFO(mvm, "Got NSS = %d - trimming to %d\n", nss,
+				 MAX_HE_SUPP_NSS);
+			nss = MAX_HE_SUPP_NSS;
+		}
+
+		for (i = 0; i < nss; i++) {
+			u8 ru_index_tmp = ru_index_bitmap << 1;
+			u8 bw;
+
+			for (bw = 0; bw < MAX_HE_CHANNEL_BW_INDX; bw++) {
+				ru_index_tmp >>= 1;
+				if (!(ru_index_tmp & 1))
+					continue;
+
+				sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw][1] =
+					iwl_mvm_he_get_ppe_val(ppe,
+							       ppe_pos_bit);
+				ppe_pos_bit +=
+					IEEE80211_PPE_THRES_INFO_PPET_SIZE;
+				sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw][0] =
+					iwl_mvm_he_get_ppe_val(ppe,
+							       ppe_pos_bit);
+				ppe_pos_bit +=
+					IEEE80211_PPE_THRES_INFO_PPET_SIZE;
+			}
+		}
+
+		flags |= STA_CTXT_HE_PACKET_EXT;
+	}
+	rcu_read_unlock();
+
+	/* Mark MU EDCA as enabled, unless none detected on some AC */
+	flags |= STA_CTXT_HE_MU_EDCA_CW;
+	for (i = 0; i < AC_NUM; i++) {
+		struct ieee80211_he_mu_edca_param_ac_rec *mu_edca =
+			&mvmvif->queue_params[i].mu_edca_param_rec;
+
+		if (!mvmvif->queue_params[i].mu_edca) {
+			flags &= ~STA_CTXT_HE_MU_EDCA_CW;
+			break;
+		}
+
+		sta_ctxt_cmd.trig_based_txf[i].cwmin =
+			cpu_to_le16(mu_edca->ecw_min_max & 0xf);
+		sta_ctxt_cmd.trig_based_txf[i].cwmax =
+			cpu_to_le16((mu_edca->ecw_min_max & 0xf0) >> 4);
+		sta_ctxt_cmd.trig_based_txf[i].aifsn =
+			cpu_to_le16(mu_edca->aifsn);
+		sta_ctxt_cmd.trig_based_txf[i].mu_time =
+			cpu_to_le16(mu_edca->mu_edca_timer);
+	}
+
+	if (vif->bss_conf.multi_sta_back_32bit)
+		flags |= STA_CTXT_HE_32BIT_BA_BITMAP;
+
+	if (vif->bss_conf.ack_enabled)
+		flags |= STA_CTXT_HE_ACK_ENABLED;
+
+	if (vif->bss_conf.uora_exists) {
+		flags |= STA_CTXT_HE_TRIG_RND_ALLOC;
+
+		sta_ctxt_cmd.rand_alloc_ecwmin =
+			vif->bss_conf.uora_ocw_range & 0x7;
+		sta_ctxt_cmd.rand_alloc_ecwmax =
+			(vif->bss_conf.uora_ocw_range >> 3) & 0x7;
+	}
+
+	/* TODO: support Multi BSSID IE */
+
+	sta_ctxt_cmd.flags = cpu_to_le32(flags);
+
+	if (iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(STA_HE_CTXT_CMD,
+						 DATA_PATH_GROUP, 0),
+				 0, sizeof(sta_ctxt_cmd), &sta_ctxt_cmd))
+		IWL_ERR(mvm, "Failed to config FW to work HE!\n");
+}
+
 static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
 					     struct ieee80211_vif *vif,
 					     struct ieee80211_bss_conf *bss_conf,
@@ -1910,8 +2099,13 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
 	 * beacon interval, which was not known when the station interface was
 	 * added.
 	 */
-	if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc)
+	if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc) {
+		if (vif->bss_conf.he_support &&
+		    !iwlwifi_mod_params.disable_11ax)
+			iwl_mvm_cfg_he_sta(mvm, vif, mvmvif->ap_sta_id);
+
 		iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif);
+	}
 
 	/*
 	 * If we're not associated yet, take the (new) BSSID before associating
@@ -4364,13 +4558,6 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
 		atomic_set(&mvm->queue_sync_counter,
 			   mvm->trans->num_rx_queues);
 
-	/* TODO - remove this when we have RXQ config API */
-	if (mvm->trans->cfg->device_family == IWL_DEVICE_FAMILY_22000) {
-		qmask = BIT(0);
-		if (notif->sync)
-			atomic_set(&mvm->queue_sync_counter, 1);
-	}
-
 	ret = iwl_mvm_notify_rx_queue(mvm, qmask, (u8 *)notif, size);
 	if (ret) {
 		IWL_ERR(mvm, "Failed to trigger RX queues sync (%d)\n", ret);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 6a4ba160c59ed4accd1a56136e5b5d9ebdbd8061..b3987a0a70181ad2dc6775d044ccf90a3bbd8cf8 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -654,7 +654,7 @@ struct iwl_mvm_tcm {
 struct iwl_mvm_reorder_buffer {
 	u16 head_sn;
 	u16 num_stored;
-	u8 buf_size;
+	u16 buf_size;
 	int queue;
 	u16 last_amsdu;
 	u8 last_sub_index;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index ff1e518096c511297c107ccf83d685d9ea81bf0e..0e26619fb330b8282f5c923aae61adf971c4a119 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -448,6 +448,8 @@ static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = {
 	HCMD_NAME(DQA_ENABLE_CMD),
 	HCMD_NAME(UPDATE_MU_GROUPS_CMD),
 	HCMD_NAME(TRIGGER_RX_QUEUES_NOTIF_CMD),
+	HCMD_NAME(STA_HE_CTXT_CMD),
+	HCMD_NAME(RFH_QUEUE_CONFIG_CMD),
 	HCMD_NAME(STA_PM_NOTIF),
 	HCMD_NAME(MU_GROUP_MGMT_NOTIF),
 	HCMD_NAME(RX_QUEUES_NOTIFICATION),
@@ -620,7 +622,11 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
 
 	if (iwl_mvm_has_new_rx_api(mvm)) {
 		op_mode->ops = &iwl_mvm_ops_mq;
-		trans->rx_mpdu_cmd_hdr_size = sizeof(struct iwl_rx_mpdu_desc);
+		trans->rx_mpdu_cmd_hdr_size =
+			(trans->cfg->device_family >=
+			 IWL_DEVICE_FAMILY_22560) ?
+			sizeof(struct iwl_rx_mpdu_desc) :
+			IWL_RX_DESC_SIZE_V1;
 	} else {
 		op_mode->ops = &iwl_mvm_ops;
 		trans->rx_mpdu_cmd_hdr_size =
@@ -703,11 +709,17 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
 	}
 
 	/* the hardware splits the A-MSDU */
-	if (mvm->cfg->mq_rx_supported)
+	if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
+		trans_cfg.rx_buf_size = IWL_AMSDU_2K;
+		/* TODO: remove when balanced power mode is fw supported */
+		iwlmvm_mod_params.power_scheme = IWL_POWER_SCHEME_CAM;
+	} else if (mvm->cfg->mq_rx_supported) {
 		trans_cfg.rx_buf_size = IWL_AMSDU_4K;
+	}
 
 	trans->wide_cmd_header = true;
-	trans_cfg.bc_table_dword = true;
+	trans_cfg.bc_table_dword =
+		mvm->trans->cfg->device_family < IWL_DEVICE_FAMILY_22560;
 
 	trans_cfg.command_groups = iwl_mvm_groups;
 	trans_cfg.command_groups_size = ARRAY_SIZE(iwl_mvm_groups);
@@ -738,6 +750,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
 	memcpy(trans->dbg_conf_tlv, mvm->fw->dbg_conf_tlv,
 	       sizeof(trans->dbg_conf_tlv));
 	trans->dbg_trigger_tlv = mvm->fw->dbg_trigger_tlv;
+	trans->dbg_dump_mask = mvm->fw->dbg_dump_mask;
 
 	trans->iml = mvm->fw->iml;
 	trans->iml_len = mvm->fw->iml_len;
@@ -1003,10 +1016,8 @@ static void iwl_mvm_rx_common(struct iwl_mvm *mvm,
 		list_add_tail(&entry->list, &mvm->async_handlers_list);
 		spin_unlock(&mvm->async_handlers_lock);
 		schedule_work(&mvm->async_handlers_wk);
-		return;
+		break;
 	}
-
-	iwl_fwrt_handle_notification(&mvm->fwrt, rxb);
 }
 
 static void iwl_mvm_rx(struct iwl_op_mode *op_mode,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
index b8b2b819e8e7ea0203d162235f00eccc39959f13..8169d1450b3b9b3954ff45e4b32d44fd3062788b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
@@ -183,6 +183,43 @@ rs_fw_vht_set_enabled_rates(const struct ieee80211_sta *sta,
 	}
 }
 
+static u16 rs_fw_he_ieee80211_mcs_to_rs_mcs(u16 mcs)
+{
+	switch (mcs) {
+	case IEEE80211_HE_MCS_SUPPORT_0_7:
+		return BIT(IWL_TLC_MNG_HT_RATE_MCS7 + 1) - 1;
+	case IEEE80211_HE_MCS_SUPPORT_0_9:
+		return BIT(IWL_TLC_MNG_HT_RATE_MCS9 + 1) - 1;
+	case IEEE80211_HE_MCS_SUPPORT_0_11:
+		return BIT(IWL_TLC_MNG_HT_RATE_MCS11 + 1) - 1;
+	case IEEE80211_HE_MCS_NOT_SUPPORTED:
+		return 0;
+	}
+
+	WARN(1, "invalid HE MCS %d\n", mcs);
+	return 0;
+}
+
+static void
+rs_fw_he_set_enabled_rates(const struct ieee80211_sta *sta,
+			   const struct ieee80211_sta_he_cap *he_cap,
+			   struct iwl_tlc_config_cmd *cmd)
+{
+	u16 mcs_160 = le16_to_cpu(sta->he_cap.he_mcs_nss_supp.rx_mcs_160);
+	u16 mcs_80 = le16_to_cpu(sta->he_cap.he_mcs_nss_supp.rx_mcs_80);
+	int i;
+
+	for (i = 0; i < sta->rx_nss && i < MAX_NSS; i++) {
+		u16 _mcs_160 = (mcs_160 >> (2 * i)) & 0x3;
+		u16 _mcs_80 = (mcs_80 >> (2 * i)) & 0x3;
+
+		cmd->ht_rates[i][0] =
+			cpu_to_le16(rs_fw_he_ieee80211_mcs_to_rs_mcs(_mcs_80));
+		cmd->ht_rates[i][1] =
+			cpu_to_le16(rs_fw_he_ieee80211_mcs_to_rs_mcs(_mcs_160));
+	}
+}
+
 static void rs_fw_set_supp_rates(struct ieee80211_sta *sta,
 				 struct ieee80211_supported_band *sband,
 				 struct iwl_tlc_config_cmd *cmd)
@@ -192,6 +229,7 @@ static void rs_fw_set_supp_rates(struct ieee80211_sta *sta,
 	unsigned long supp; /* must be unsigned long for for_each_set_bit */
 	const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
 	const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
+	const struct ieee80211_sta_he_cap *he_cap = &sta->he_cap;
 
 	/* non HT rates */
 	supp = 0;
@@ -202,7 +240,11 @@ static void rs_fw_set_supp_rates(struct ieee80211_sta *sta,
 	cmd->non_ht_rates = cpu_to_le16(supp);
 	cmd->mode = IWL_TLC_MNG_MODE_NON_HT;
 
-	if (vht_cap && vht_cap->vht_supported) {
+	/* HT/VHT rates */
+	if (he_cap && he_cap->has_he) {
+		cmd->mode = IWL_TLC_MNG_MODE_HE;
+		rs_fw_he_set_enabled_rates(sta, he_cap, cmd);
+	} else if (vht_cap && vht_cap->vht_supported) {
 		cmd->mode = IWL_TLC_MNG_MODE_VHT;
 		rs_fw_vht_set_enabled_rates(sta, vht_cap, cmd);
 	} else if (ht_cap && ht_cap->ht_supported) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
index 642da10b0b7fdd448fff8db0ef3d4f82c85308a2..30cfd7d50bc939fae1d0111c2c4694f755c9c947 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -363,7 +363,8 @@ static int iwl_hwrate_to_plcp_idx(u32 rate_n_flags)
 			idx += 1;
 		if ((idx >= IWL_FIRST_HT_RATE) && (idx <= IWL_LAST_HT_RATE))
 			return idx;
-	} else if (rate_n_flags & RATE_MCS_VHT_MSK) {
+	} else if (rate_n_flags & RATE_MCS_VHT_MSK ||
+		   rate_n_flags & RATE_MCS_HE_MSK) {
 		idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK;
 		idx += IWL_RATE_MCS_0_INDEX;
 
@@ -372,6 +373,9 @@ static int iwl_hwrate_to_plcp_idx(u32 rate_n_flags)
 			idx++;
 		if ((idx >= IWL_FIRST_VHT_RATE) && (idx <= IWL_LAST_VHT_RATE))
 			return idx;
+		if ((rate_n_flags & RATE_MCS_HE_MSK) &&
+		    (idx <= IWL_LAST_HE_RATE))
+			return idx;
 	} else {
 		/* legacy rate format, search for match in table */
 
@@ -516,6 +520,8 @@ static const char *rs_pretty_lq_type(enum iwl_table_type type)
 		[LQ_HT_MIMO2] = "HT MIMO",
 		[LQ_VHT_SISO] = "VHT SISO",
 		[LQ_VHT_MIMO2] = "VHT MIMO",
+		[LQ_HE_SISO] = "HE SISO",
+		[LQ_HE_MIMO2] = "HE MIMO",
 	};
 
 	if (type < LQ_NONE || type >= LQ_MAX)
@@ -900,7 +906,8 @@ static int rs_rate_from_ucode_rate(const u32 ucode_rate,
 
 	/* Legacy */
 	if (!(ucode_rate & RATE_MCS_HT_MSK) &&
-	    !(ucode_rate & RATE_MCS_VHT_MSK)) {
+	    !(ucode_rate & RATE_MCS_VHT_MSK) &&
+	    !(ucode_rate & RATE_MCS_HE_MSK)) {
 		if (num_of_ant == 1) {
 			if (band == NL80211_BAND_5GHZ)
 				rate->type = LQ_LEGACY_A;
@@ -911,7 +918,7 @@ static int rs_rate_from_ucode_rate(const u32 ucode_rate,
 		return 0;
 	}
 
-	/* HT or VHT */
+	/* HT, VHT or HE */
 	if (ucode_rate & RATE_MCS_SGI_MSK)
 		rate->sgi = true;
 	if (ucode_rate & RATE_MCS_LDPC_MSK)
@@ -953,10 +960,24 @@ static int rs_rate_from_ucode_rate(const u32 ucode_rate,
 		} else {
 			WARN_ON_ONCE(1);
 		}
+	} else if (ucode_rate & RATE_MCS_HE_MSK) {
+		nss = ((ucode_rate & RATE_VHT_MCS_NSS_MSK) >>
+		      RATE_VHT_MCS_NSS_POS) + 1;
+
+		if (nss == 1) {
+			rate->type = LQ_HE_SISO;
+			WARN_ONCE(!rate->stbc && !rate->bfer && num_of_ant != 1,
+				  "stbc %d bfer %d", rate->stbc, rate->bfer);
+		} else if (nss == 2) {
+			rate->type = LQ_HE_MIMO2;
+			WARN_ON_ONCE(num_of_ant != 2);
+		} else {
+			WARN_ON_ONCE(1);
+		}
 	}
 
 	WARN_ON_ONCE(rate->bw == RATE_MCS_CHAN_WIDTH_80 &&
-		     !is_vht(rate));
+		     !is_he(rate) && !is_vht(rate));
 
 	return 0;
 }
@@ -3606,7 +3627,8 @@ int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate)
 	u8 ant = (rate & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS;
 
 	if (!(rate & RATE_MCS_HT_MSK) &&
-	    !(rate & RATE_MCS_VHT_MSK)) {
+	    !(rate & RATE_MCS_VHT_MSK) &&
+	    !(rate & RATE_MCS_HE_MSK)) {
 		int index = iwl_hwrate_to_plcp_idx(rate);
 
 		return scnprintf(buf, bufsz, "Legacy | ANT: %s Rate: %s Mbps\n",
@@ -3625,6 +3647,11 @@ int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate)
 		mcs = rate & RATE_HT_MCS_INDEX_MSK;
 		nss = ((rate & RATE_HT_MCS_NSS_MSK)
 		       >> RATE_HT_MCS_NSS_POS) + 1;
+	} else if (rate & RATE_MCS_HE_MSK) {
+		type = "HE";
+		mcs = rate & RATE_VHT_MCS_RATE_CODE_MSK;
+		nss = ((rate & RATE_VHT_MCS_NSS_MSK)
+		       >> RATE_VHT_MCS_NSS_POS) + 1;
 	} else {
 		type = "Unknown"; /* shouldn't happen */
 	}
@@ -3886,6 +3913,8 @@ static ssize_t rs_sta_dbgfs_drv_tx_stats_read(struct file *file,
 		[IWL_RATE_MCS_7_INDEX] = "MCS7",
 		[IWL_RATE_MCS_8_INDEX] = "MCS8",
 		[IWL_RATE_MCS_9_INDEX] = "MCS9",
+		[IWL_RATE_MCS_10_INDEX] = "MCS10",
+		[IWL_RATE_MCS_11_INDEX] = "MCS11",
 	};
 
 	char *buff, *pos, *endpos;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
index cffb8c852934b2aef96dde32e967b7a21b612d67..d2cf484e2b73be77bd61006d0aeb77683baec58d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
@@ -144,8 +144,13 @@ enum {
 
 #define LINK_QUAL_AGG_FRAME_LIMIT_DEF	(63)
 #define LINK_QUAL_AGG_FRAME_LIMIT_MAX	(63)
-#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF	(64)
-#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_MAX	(64)
+/*
+ * FIXME - various places in firmware API still use u8,
+ * e.g. LQ command and SCD config command.
+ * This should be 256 instead.
+ */
+#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF	(255)
+#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_MAX	(255)
 #define LINK_QUAL_AGG_FRAME_LIMIT_MIN	(0)
 
 #define LQ_SIZE		2	/* 2 mode tables:  "Active" and "Search" */
@@ -162,6 +167,8 @@ enum iwl_table_type {
 	LQ_HT_MIMO2,
 	LQ_VHT_SISO,    /* VHT types */
 	LQ_VHT_MIMO2,
+	LQ_HE_SISO,     /* HE types */
+	LQ_HE_MIMO2,
 	LQ_MAX,
 };
 
@@ -183,11 +190,16 @@ struct rs_rate {
 #define is_type_ht_mimo2(type) ((type) == LQ_HT_MIMO2)
 #define is_type_vht_siso(type) ((type) == LQ_VHT_SISO)
 #define is_type_vht_mimo2(type) ((type) == LQ_VHT_MIMO2)
-#define is_type_siso(type) (is_type_ht_siso(type) || is_type_vht_siso(type))
-#define is_type_mimo2(type) (is_type_ht_mimo2(type) || is_type_vht_mimo2(type))
+#define is_type_he_siso(type) ((type) == LQ_HE_SISO)
+#define is_type_he_mimo2(type) ((type) == LQ_HE_MIMO2)
+#define is_type_siso(type) (is_type_ht_siso(type) || is_type_vht_siso(type) || \
+			    is_type_he_siso(type))
+#define is_type_mimo2(type) (is_type_ht_mimo2(type) || \
+			     is_type_vht_mimo2(type) || is_type_he_mimo2(type))
 #define is_type_mimo(type) (is_type_mimo2(type))
 #define is_type_ht(type) (is_type_ht_siso(type) || is_type_ht_mimo2(type))
 #define is_type_vht(type) (is_type_vht_siso(type) || is_type_vht_mimo2(type))
+#define is_type_he(type) (is_type_he_siso(type) || is_type_he_mimo2(type))
 #define is_type_a_band(type) ((type) == LQ_LEGACY_A)
 #define is_type_g_band(type) ((type) == LQ_LEGACY_G)
 
@@ -201,6 +213,7 @@ struct rs_rate {
 #define is_mimo(rate)         is_type_mimo((rate)->type)
 #define is_ht(rate)           is_type_ht((rate)->type)
 #define is_vht(rate)          is_type_vht((rate)->type)
+#define is_he(rate)           is_type_he((rate)->type)
 #define is_a_band(rate)       is_type_a_band((rate)->type)
 #define is_g_band(rate)       is_type_g_band((rate)->type)
 
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index 129c4c09648dcb35972eecbb344a25ad51339870..b53148f972a4a4e5c6e04d34ed7c052144235282 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -8,6 +8,7 @@
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,6 +31,7 @@
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -196,22 +198,31 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
 					    struct sk_buff *skb, int queue,
 					    struct ieee80211_sta *sta)
 {
-	if (iwl_mvm_check_pn(mvm, skb, queue, sta))
+	struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
+
+	if (iwl_mvm_check_pn(mvm, skb, queue, sta)) {
 		kfree_skb(skb);
-	else
+	} else {
+		unsigned int radiotap_len = 0;
+
+		if (rx_status->flag & RX_FLAG_RADIOTAP_HE)
+			radiotap_len += sizeof(struct ieee80211_radiotap_he);
+		if (rx_status->flag & RX_FLAG_RADIOTAP_HE_MU)
+			radiotap_len += sizeof(struct ieee80211_radiotap_he_mu);
+		__skb_push(skb, radiotap_len);
 		ieee80211_rx_napi(mvm->hw, sta, skb, napi);
+	}
 }
 
 static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm,
-					struct iwl_rx_mpdu_desc *desc,
-					struct ieee80211_rx_status *rx_status)
+					struct ieee80211_rx_status *rx_status,
+					u32 rate_n_flags, int energy_a,
+					int energy_b)
 {
-	int energy_a, energy_b, max_energy;
-	u32 rate_flags = le32_to_cpu(desc->rate_n_flags);
+	int max_energy;
+	u32 rate_flags = rate_n_flags;
 
-	energy_a = desc->energy_a;
 	energy_a = energy_a ? -energy_a : S8_MIN;
-	energy_b = desc->energy_b;
 	energy_b = energy_b ? -energy_b : S8_MIN;
 	max_energy = max(energy_a, energy_b);
 
@@ -356,7 +367,8 @@ static bool iwl_mvm_is_dup(struct ieee80211_sta *sta, int queue,
 		tid = IWL_MAX_TID_COUNT;
 
 	/* If this wasn't a part of an A-MSDU the sub-frame index will be 0 */
-	sub_frame_idx = desc->amsdu_info & IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
+	sub_frame_idx = desc->amsdu_info &
+		IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
 
 	if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
 		     dup_data->last_seq[tid] == hdr->seq_ctrl &&
@@ -850,17 +862,41 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
 	struct ieee80211_rx_status *rx_status;
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_rx_mpdu_desc *desc = (void *)pkt->data;
-	struct ieee80211_hdr *hdr = (void *)(pkt->data + sizeof(*desc));
+	struct ieee80211_hdr *hdr;
 	u32 len = le16_to_cpu(desc->mpdu_len);
-	u32 rate_n_flags = le32_to_cpu(desc->rate_n_flags);
+	u32 rate_n_flags, gp2_on_air_rise;
 	u16 phy_info = le16_to_cpu(desc->phy_info);
 	struct ieee80211_sta *sta = NULL;
 	struct sk_buff *skb;
-	u8 crypt_len = 0;
+	u8 crypt_len = 0, channel, energy_a, energy_b;
+	struct ieee80211_radiotap_he *he = NULL;
+	struct ieee80211_radiotap_he_mu *he_mu = NULL;
+	u32 he_type = 0xffffffff;
+	/* this is invalid e.g. because puncture type doesn't allow 0b11 */
+#define HE_PHY_DATA_INVAL ((u64)-1)
+	u64 he_phy_data = HE_PHY_DATA_INVAL;
+	size_t desc_size;
 
 	if (unlikely(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)))
 		return;
 
+	if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
+		rate_n_flags = le32_to_cpu(desc->v3.rate_n_flags);
+		channel = desc->v3.channel;
+		gp2_on_air_rise = le32_to_cpu(desc->v3.gp2_on_air_rise);
+		energy_a = desc->v3.energy_a;
+		energy_b = desc->v3.energy_b;
+		desc_size = sizeof(*desc);
+	} else {
+		rate_n_flags = le32_to_cpu(desc->v1.rate_n_flags);
+		channel = desc->v1.channel;
+		gp2_on_air_rise = le32_to_cpu(desc->v1.gp2_on_air_rise);
+		energy_a = desc->v1.energy_a;
+		energy_b = desc->v1.energy_b;
+		desc_size = IWL_RX_DESC_SIZE_V1;
+	}
+
+	hdr = (void *)(pkt->data + desc_size);
 	/* Dont use dev_alloc_skb(), we'll have enough headroom once
 	 * ieee80211_hdr pulled.
 	 */
@@ -882,6 +918,51 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
 
 	rx_status = IEEE80211_SKB_RXCB(skb);
 
+	if (rate_n_flags & RATE_MCS_HE_MSK) {
+		static const struct ieee80211_radiotap_he known = {
+			.data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
+					     IEEE80211_RADIOTAP_HE_DATA1_DATA_DCM_KNOWN |
+					     IEEE80211_RADIOTAP_HE_DATA1_STBC_KNOWN |
+					     IEEE80211_RADIOTAP_HE_DATA1_CODING_KNOWN),
+			.data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN |
+					     IEEE80211_RADIOTAP_HE_DATA2_TXBF_KNOWN),
+		};
+		static const struct ieee80211_radiotap_he_mu mu_known = {
+			.flags1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS_KNOWN |
+					      IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM_KNOWN |
+					      IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_SYMS_USERS_KNOWN |
+					      IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_COMP_KNOWN),
+			.flags2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW_KNOWN),
+		};
+		unsigned int radiotap_len = 0;
+
+		he = skb_put_data(skb, &known, sizeof(known));
+		radiotap_len += sizeof(known);
+		rx_status->flag |= RX_FLAG_RADIOTAP_HE;
+
+		he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK;
+
+		if (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD) {
+			if (mvm->trans->cfg->device_family >=
+			    IWL_DEVICE_FAMILY_22560)
+				he_phy_data = le64_to_cpu(desc->v3.he_phy_data);
+			else
+				he_phy_data = le64_to_cpu(desc->v1.he_phy_data);
+
+			if (he_type == RATE_MCS_HE_TYPE_MU) {
+				he_mu = skb_put_data(skb, &mu_known,
+						     sizeof(mu_known));
+				radiotap_len += sizeof(mu_known);
+				rx_status->flag |= RX_FLAG_RADIOTAP_HE_MU;
+			}
+		}
+
+		/* temporarily hide the radiotap data */
+		__skb_pull(skb, radiotap_len);
+	}
+
+	rx_status = IEEE80211_SKB_RXCB(skb);
+
 	if (iwl_mvm_rx_crypto(mvm, hdr, rx_status, phy_info, desc,
 			      le32_to_cpu(pkt->len_n_flags), queue,
 			      &crypt_len)) {
@@ -904,20 +985,80 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
 		rx_status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
 
 	if (likely(!(phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD))) {
-		rx_status->mactime = le64_to_cpu(desc->tsf_on_air_rise);
+		u64 tsf_on_air_rise;
+
+		if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+			tsf_on_air_rise = le64_to_cpu(desc->v3.tsf_on_air_rise);
+		else
+			tsf_on_air_rise = le64_to_cpu(desc->v1.tsf_on_air_rise);
+
+		rx_status->mactime = tsf_on_air_rise;
 		/* TSF as indicated by the firmware is at INA time */
 		rx_status->flag |= RX_FLAG_MACTIME_PLCP_START;
+	} else if (he_type == RATE_MCS_HE_TYPE_SU) {
+		u64 he_phy_data;
+
+		if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+			he_phy_data = le64_to_cpu(desc->v3.he_phy_data);
+		else
+			he_phy_data = le64_to_cpu(desc->v1.he_phy_data);
+
+		he->data1 |=
+			cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_UL_DL_KNOWN);
+		if (FIELD_GET(IWL_RX_HE_PHY_UPLINK,
+			      he_phy_data))
+			he->data3 |=
+				cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA3_UL_DL);
+
+		if (!queue && !(phy_info & IWL_RX_MPDU_PHY_AMPDU)) {
+			rx_status->ampdu_reference = mvm->ampdu_ref;
+			mvm->ampdu_ref++;
+
+			rx_status->flag |= RX_FLAG_AMPDU_DETAILS;
+			rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN;
+			if (FIELD_GET(IWL_RX_HE_PHY_DELIM_EOF,
+				      he_phy_data))
+				rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT;
+		}
+	} else if (he_mu && he_phy_data != HE_PHY_DATA_INVAL) {
+		he_mu->flags1 |=
+			le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_SIBG_SYM_OR_USER_NUM_MASK,
+						   he_phy_data),
+					 IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_SYMS_USERS);
+		he_mu->flags1 |=
+			le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_SIGB_DCM,
+						   he_phy_data),
+					 IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM);
+		he_mu->flags1 |=
+			le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_SIGB_MCS_MASK,
+						   he_phy_data),
+					 IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS);
+		he_mu->flags2 |=
+			le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_SIGB_COMPRESSION,
+						   he_phy_data),
+					 IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_COMP);
+		he_mu->flags2 |=
+			le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_PREAMBLE_PUNC_TYPE_MASK,
+						   he_phy_data),
+					 IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW);
 	}
-	rx_status->device_timestamp = le32_to_cpu(desc->gp2_on_air_rise);
-	rx_status->band = desc->channel > 14 ? NL80211_BAND_5GHZ :
-					       NL80211_BAND_2GHZ;
-	rx_status->freq = ieee80211_channel_to_frequency(desc->channel,
+	rx_status->device_timestamp = gp2_on_air_rise;
+	rx_status->band = channel > 14 ? NL80211_BAND_5GHZ :
+		NL80211_BAND_2GHZ;
+	rx_status->freq = ieee80211_channel_to_frequency(channel,
 							 rx_status->band);
-	iwl_mvm_get_signal_strength(mvm, desc, rx_status);
+	iwl_mvm_get_signal_strength(mvm, rx_status, rate_n_flags, energy_a,
+				    energy_b);
 
 	/* update aggregation data for monitor sake on default queue */
 	if (!queue && (phy_info & IWL_RX_MPDU_PHY_AMPDU)) {
 		bool toggle_bit = phy_info & IWL_RX_MPDU_PHY_AMPDU_TOGGLE;
+		u64 he_phy_data;
+
+		if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+			he_phy_data = le64_to_cpu(desc->v3.he_phy_data);
+		else
+			he_phy_data = le64_to_cpu(desc->v1.he_phy_data);
 
 		rx_status->flag |= RX_FLAG_AMPDU_DETAILS;
 		rx_status->ampdu_reference = mvm->ampdu_ref;
@@ -925,6 +1066,15 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
 		if (toggle_bit != mvm->ampdu_toggle) {
 			mvm->ampdu_ref++;
 			mvm->ampdu_toggle = toggle_bit;
+
+			if (he_phy_data != HE_PHY_DATA_INVAL &&
+			    he_type == RATE_MCS_HE_TYPE_MU) {
+				rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN;
+				if (FIELD_GET(IWL_RX_HE_PHY_DELIM_EOF,
+					      he_phy_data))
+					rx_status->flag |=
+						RX_FLAG_AMPDU_EOF_BIT;
+			}
 		}
 	}
 
@@ -1033,7 +1183,6 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
 		}
 	}
 
-	/* Set up the HT phy flags */
 	switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
 	case RATE_MCS_CHAN_WIDTH_20:
 		break;
@@ -1048,6 +1197,70 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
 		break;
 	}
 
+	if (he_type == RATE_MCS_HE_TYPE_EXT_SU &&
+	    rate_n_flags & RATE_MCS_HE_106T_MSK) {
+		rx_status->bw = RATE_INFO_BW_HE_RU;
+		rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106;
+	}
+
+	if (rate_n_flags & RATE_MCS_HE_MSK &&
+	    phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD &&
+	    he_type == RATE_MCS_HE_TYPE_MU) {
+		/*
+		 * Unfortunately, we have to leave the mac80211 data
+		 * incorrect for the case that we receive an HE-MU
+		 * transmission and *don't* have the he_mu pointer,
+		 * i.e. we don't have the phy data (due to the bits
+		 * being used for TSF). This shouldn't happen though
+		 * as management frames where we need the TSF/timers
+		 * are not be transmitted in HE-MU, I think.
+		 */
+		u8 ru = FIELD_GET(IWL_RX_HE_PHY_RU_ALLOC_MASK, he_phy_data);
+		u8 offs = 0;
+
+		rx_status->bw = RATE_INFO_BW_HE_RU;
+
+		switch (ru) {
+		case 0 ... 36:
+			rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_26;
+			offs = ru;
+			break;
+		case 37 ... 52:
+			rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_52;
+			offs = ru - 37;
+			break;
+		case 53 ... 60:
+			rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106;
+			offs = ru - 53;
+			break;
+		case 61 ... 64:
+			rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_242;
+			offs = ru - 61;
+			break;
+		case 65 ... 66:
+			rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_484;
+			offs = ru - 65;
+			break;
+		case 67:
+			rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_996;
+			break;
+		case 68:
+			rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_2x996;
+			break;
+		}
+		he->data2 |=
+			le16_encode_bits(offs,
+					 IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET);
+		he->data2 |=
+			cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_KNOWN);
+		if (he_phy_data & IWL_RX_HE_PHY_RU_ALLOC_SEC80)
+			he->data2 |=
+				cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_SEC);
+	} else if (he) {
+		he->data1 |=
+			cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN);
+	}
+
 	if (!(rate_n_flags & RATE_MCS_CCK_MSK) &&
 	    rate_n_flags & RATE_MCS_SGI_MSK)
 		rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
@@ -1072,6 +1285,119 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
 		rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
 		if (rate_n_flags & RATE_MCS_BF_MSK)
 			rx_status->enc_flags |= RX_ENC_FLAG_BF;
+	} else if (he) {
+		u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >>
+				RATE_MCS_STBC_POS;
+		rx_status->nss =
+			((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >>
+						RATE_VHT_MCS_NSS_POS) + 1;
+		rx_status->rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK;
+		rx_status->encoding = RX_ENC_HE;
+		rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
+		if (rate_n_flags & RATE_MCS_BF_MSK)
+			rx_status->enc_flags |= RX_ENC_FLAG_BF;
+
+		rx_status->he_dcm =
+			!!(rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK);
+
+#define CHECK_TYPE(F)							\
+	BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA1_FORMAT_ ## F !=	\
+		     (RATE_MCS_HE_TYPE_ ## F >> RATE_MCS_HE_TYPE_POS))
+
+		CHECK_TYPE(SU);
+		CHECK_TYPE(EXT_SU);
+		CHECK_TYPE(MU);
+		CHECK_TYPE(TRIG);
+
+		he->data1 |= cpu_to_le16(he_type >> RATE_MCS_HE_TYPE_POS);
+
+		if (rate_n_flags & RATE_MCS_BF_POS)
+			he->data5 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA5_TXBF);
+
+		switch ((rate_n_flags & RATE_MCS_HE_GI_LTF_MSK) >>
+			RATE_MCS_HE_GI_LTF_POS) {
+		case 0:
+			rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
+			break;
+		case 1:
+			rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
+			break;
+		case 2:
+			rx_status->he_gi = NL80211_RATE_INFO_HE_GI_1_6;
+			break;
+		case 3:
+			if (rate_n_flags & RATE_MCS_SGI_MSK)
+				rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
+			else
+				rx_status->he_gi = NL80211_RATE_INFO_HE_GI_3_2;
+			break;
+		}
+
+		switch (he_type) {
+		case RATE_MCS_HE_TYPE_SU: {
+			u16 val;
+
+			/* LTF syms correspond to streams */
+			he->data2 |=
+				cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN);
+			switch (rx_status->nss) {
+			case 1:
+				val = 0;
+				break;
+			case 2:
+				val = 1;
+				break;
+			case 3:
+			case 4:
+				val = 2;
+				break;
+			case 5:
+			case 6:
+				val = 3;
+				break;
+			case 7:
+			case 8:
+				val = 4;
+				break;
+			default:
+				WARN_ONCE(1, "invalid nss: %d\n",
+					  rx_status->nss);
+				val = 0;
+			}
+			he->data5 |=
+				le16_encode_bits(val,
+						 IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS);
+			}
+			break;
+		case RATE_MCS_HE_TYPE_MU: {
+			u16 val;
+			u64 he_phy_data;
+
+			if (mvm->trans->cfg->device_family >=
+			    IWL_DEVICE_FAMILY_22560)
+				he_phy_data = le64_to_cpu(desc->v3.he_phy_data);
+			else
+				he_phy_data = le64_to_cpu(desc->v1.he_phy_data);
+
+			if (he_phy_data == HE_PHY_DATA_INVAL)
+				break;
+
+			val = FIELD_GET(IWL_RX_HE_PHY_HE_LTF_NUM_MASK,
+					he_phy_data);
+
+			he->data2 |=
+				cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN);
+			he->data5 |=
+				cpu_to_le16(FIELD_PREP(
+					IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS,
+					val));
+			}
+			break;
+		case RATE_MCS_HE_TYPE_EXT_SU:
+		case RATE_MCS_HE_TYPE_TRIG:
+			/* not supported yet */
+			break;
+		}
 	} else {
 		int rate = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
 							       rx_status->band);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index 9263b9aa8b72fff47dcdee806ac45d8b1fc9253b..18db1ed92d9b09741e0fec60e66389bd5af18dea 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -2184,7 +2184,7 @@ static void iwl_mvm_free_reorder(struct iwl_mvm *mvm,
 
 static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
 					struct iwl_mvm_baid_data *data,
-					u16 ssn, u8 buf_size)
+					u16 ssn, u16 buf_size)
 {
 	int i;
 
@@ -2211,7 +2211,7 @@ static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
 }
 
 int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
-		       int tid, u16 ssn, bool start, u8 buf_size, u16 timeout)
+		       int tid, u16 ssn, bool start, u16 buf_size, u16 timeout)
 {
 	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
 	struct iwl_mvm_add_sta_cmd cmd = {};
@@ -2273,7 +2273,7 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
 	if (start) {
 		cmd.add_immediate_ba_tid = (u8) tid;
 		cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
-		cmd.rx_ba_window = cpu_to_le16((u16)buf_size);
+		cmd.rx_ba_window = cpu_to_le16(buf_size);
 	} else {
 		cmd.remove_immediate_ba_tid = (u8) tid;
 	}
@@ -2559,7 +2559,7 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 }
 
 int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
-			    struct ieee80211_sta *sta, u16 tid, u8 buf_size,
+			    struct ieee80211_sta *sta, u16 tid, u16 buf_size,
 			    bool amsdu)
 {
 	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
index 1c43ea8dd8cc15c2ae226c001161f767beba9a8a..0fc211108149ffcf400d1a64a59b421642147910 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
@@ -412,7 +412,7 @@ struct iwl_mvm_sta {
 	u32 tfd_queue_msk;
 	u32 mac_id_n_color;
 	u16 tid_disable_agg;
-	u8 max_agg_bufsize;
+	u16 max_agg_bufsize;
 	enum iwl_sta_type sta_type;
 	enum ieee80211_sta_state sta_state;
 	bool bt_reduced_txpower;
@@ -518,11 +518,11 @@ void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
 
 /* AMPDU */
 int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
-		       int tid, u16 ssn, bool start, u8 buf_size, u16 timeout);
+		       int tid, u16 ssn, bool start, u16 buf_size, u16 timeout);
 int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 			struct ieee80211_sta *sta, u16 tid, u16 *ssn);
 int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
-			    struct ieee80211_sta *sta, u16 tid, u8 buf_size,
+			    struct ieee80211_sta *sta, u16 tid, u16 buf_size,
 			    bool amsdu);
 int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 			    struct ieee80211_sta *sta, u16 tid);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index cf2591f2ac236ad9d03f54bffab350391750c21c..ff193dca2020c99987ca91df4a2d07ef6f58dbfd 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -484,13 +484,15 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
 
 	/* Make sure we zero enough of dev_cmd */
 	BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) > sizeof(*tx_cmd));
+	BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen3) > sizeof(*tx_cmd));
 
 	memset(dev_cmd, 0, sizeof(dev_cmd->hdr) + sizeof(*tx_cmd));
 	dev_cmd->hdr.cmd = TX_CMD;
 
 	if (iwl_mvm_has_new_tx_api(mvm)) {
-		struct iwl_tx_cmd_gen2 *cmd = (void *)dev_cmd->payload;
 		u16 offload_assist = 0;
+		u32 rate_n_flags = 0;
+		u16 flags = 0;
 
 		if (ieee80211_is_data_qos(hdr->frame_control)) {
 			u8 *qc = ieee80211_get_qos_ctl(hdr);
@@ -507,25 +509,43 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
 		    !(offload_assist & BIT(TX_CMD_OFFLD_AMSDU)))
 			offload_assist |= BIT(TX_CMD_OFFLD_PAD);
 
-		cmd->offload_assist |= cpu_to_le16(offload_assist);
+		if (!info->control.hw_key)
+			flags |= IWL_TX_FLAGS_ENCRYPT_DIS;
 
-		/* Total # bytes to be transmitted */
-		cmd->len = cpu_to_le16((u16)skb->len);
+		/* For data packets rate info comes from the fw */
+		if (!(ieee80211_is_data(hdr->frame_control) && sta)) {
+			flags |= IWL_TX_FLAGS_CMD_RATE;
+			rate_n_flags = iwl_mvm_get_tx_rate(mvm, info, sta);
+		}
 
-		/* Copy MAC header from skb into command buffer */
-		memcpy(cmd->hdr, hdr, hdrlen);
+		if (mvm->trans->cfg->device_family >=
+		    IWL_DEVICE_FAMILY_22560) {
+			struct iwl_tx_cmd_gen3 *cmd = (void *)dev_cmd->payload;
 
-		if (!info->control.hw_key)
-			cmd->flags |= cpu_to_le32(IWL_TX_FLAGS_ENCRYPT_DIS);
+			cmd->offload_assist |= cpu_to_le32(offload_assist);
 
-		/* For data packets rate info comes from the fw */
-		if (ieee80211_is_data(hdr->frame_control) && sta)
-			goto out;
+			/* Total # bytes to be transmitted */
+			cmd->len = cpu_to_le16((u16)skb->len);
 
-		cmd->flags |= cpu_to_le32(IWL_TX_FLAGS_CMD_RATE);
-		cmd->rate_n_flags =
-			cpu_to_le32(iwl_mvm_get_tx_rate(mvm, info, sta));
+			/* Copy MAC header from skb into command buffer */
+			memcpy(cmd->hdr, hdr, hdrlen);
 
+			cmd->flags = cpu_to_le16(flags);
+			cmd->rate_n_flags = cpu_to_le32(rate_n_flags);
+		} else {
+			struct iwl_tx_cmd_gen2 *cmd = (void *)dev_cmd->payload;
+
+			cmd->offload_assist |= cpu_to_le16(offload_assist);
+
+			/* Total # bytes to be transmitted */
+			cmd->len = cpu_to_le16((u16)skb->len);
+
+			/* Copy MAC header from skb into command buffer */
+			memcpy(cmd->hdr, hdr, hdrlen);
+
+			cmd->flags = cpu_to_le32(flags);
+			cmd->rate_n_flags = cpu_to_le32(rate_n_flags);
+		}
 		goto out;
 	}
 
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
new file mode 100644
index 0000000000000000000000000000000000000000..2146fda8da2fdbdece661ceb3e177ab7c5c2b83e
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
@@ -0,0 +1,207 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2018 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2018 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include "iwl-trans.h"
+#include "iwl-fh.h"
+#include "iwl-context-info-gen3.h"
+#include "internal.h"
+#include "iwl-prph.h"
+
+int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
+				 const struct fw_img *fw)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwl_context_info_gen3 *ctxt_info_gen3;
+	struct iwl_prph_scratch *prph_scratch;
+	struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl;
+	struct iwl_prph_info *prph_info;
+	void *iml_img;
+	u32 control_flags = 0;
+	int ret;
+
+	/* Allocate prph scratch */
+	prph_scratch = dma_alloc_coherent(trans->dev, sizeof(*prph_scratch),
+					  &trans_pcie->prph_scratch_dma_addr,
+					  GFP_KERNEL);
+	if (!prph_scratch)
+		return -ENOMEM;
+
+	prph_sc_ctrl = &prph_scratch->ctrl_cfg;
+
+	prph_sc_ctrl->version.version = 0;
+	prph_sc_ctrl->version.mac_id =
+		cpu_to_le16((u16)iwl_read32(trans, CSR_HW_REV));
+	prph_sc_ctrl->version.size = cpu_to_le16(sizeof(*prph_scratch) / 4);
+
+	control_flags = IWL_PRPH_SCRATCH_RB_SIZE_4K |
+			IWL_PRPH_SCRATCH_MTR_MODE |
+			(IWL_PRPH_MTR_FORMAT_256B &
+			 IWL_PRPH_SCRATCH_MTR_FORMAT) |
+			IWL_PRPH_SCRATCH_EARLY_DEBUG_EN |
+			IWL_PRPH_SCRATCH_EDBG_DEST_DRAM;
+	prph_sc_ctrl->control.control_flags = cpu_to_le32(control_flags);
+
+	/* initialize RX default queue */
+	prph_sc_ctrl->rbd_cfg.free_rbd_addr =
+		cpu_to_le64(trans_pcie->rxq->bd_dma);
+
+	/* Configure debug, for integration */
+	iwl_pcie_alloc_fw_monitor(trans, 0);
+	prph_sc_ctrl->hwm_cfg.hwm_base_addr =
+		cpu_to_le64(trans_pcie->fw_mon_phys);
+	prph_sc_ctrl->hwm_cfg.hwm_size =
+		cpu_to_le32(trans_pcie->fw_mon_size);
+
+	/* allocate ucode sections in dram and set addresses */
+	ret = iwl_pcie_init_fw_sec(trans, fw, &prph_scratch->dram);
+	if (ret) {
+		dma_free_coherent(trans->dev,
+				  sizeof(*prph_scratch),
+				  prph_scratch,
+				  trans_pcie->prph_scratch_dma_addr);
+		return ret;
+	}
+
+	/* Allocate prph information
+	 * currently we don't assign to the prph info anything, but it would get
+	 * assigned later */
+	prph_info = dma_alloc_coherent(trans->dev, sizeof(*prph_info),
+				       &trans_pcie->prph_info_dma_addr,
+				       GFP_KERNEL);
+	if (!prph_info)
+		return -ENOMEM;
+
+	/* Allocate context info */
+	ctxt_info_gen3 = dma_alloc_coherent(trans->dev,
+					    sizeof(*ctxt_info_gen3),
+					    &trans_pcie->ctxt_info_dma_addr,
+					    GFP_KERNEL);
+	if (!ctxt_info_gen3)
+		return -ENOMEM;
+
+	ctxt_info_gen3->prph_info_base_addr =
+		cpu_to_le64(trans_pcie->prph_info_dma_addr);
+	ctxt_info_gen3->prph_scratch_base_addr =
+		cpu_to_le64(trans_pcie->prph_scratch_dma_addr);
+	ctxt_info_gen3->prph_scratch_size =
+		cpu_to_le32(sizeof(*prph_scratch));
+	ctxt_info_gen3->cr_head_idx_arr_base_addr =
+		cpu_to_le64(trans_pcie->rxq->rb_stts_dma);
+	ctxt_info_gen3->tr_tail_idx_arr_base_addr =
+		cpu_to_le64(trans_pcie->rxq->tr_tail_dma);
+	ctxt_info_gen3->cr_tail_idx_arr_base_addr =
+		cpu_to_le64(trans_pcie->rxq->cr_tail_dma);
+	ctxt_info_gen3->cr_idx_arr_size =
+		cpu_to_le16(IWL_NUM_OF_COMPLETION_RINGS);
+	ctxt_info_gen3->tr_idx_arr_size =
+		cpu_to_le16(IWL_NUM_OF_TRANSFER_RINGS);
+	ctxt_info_gen3->mtr_base_addr =
+		cpu_to_le64(trans_pcie->txq[trans_pcie->cmd_queue]->dma_addr);
+	ctxt_info_gen3->mcr_base_addr =
+		cpu_to_le64(trans_pcie->rxq->used_bd_dma);
+	ctxt_info_gen3->mtr_size =
+		cpu_to_le16(TFD_QUEUE_CB_SIZE(TFD_CMD_SLOTS));
+	ctxt_info_gen3->mcr_size =
+		cpu_to_le16(RX_QUEUE_CB_SIZE(MQ_RX_TABLE_SIZE));
+
+	trans_pcie->ctxt_info_gen3 = ctxt_info_gen3;
+	trans_pcie->prph_info = prph_info;
+	trans_pcie->prph_scratch = prph_scratch;
+
+	/* Allocate IML */
+	iml_img = dma_alloc_coherent(trans->dev, trans->iml_len,
+				     &trans_pcie->iml_dma_addr, GFP_KERNEL);
+	if (!iml_img)
+		return -ENOMEM;
+
+	memcpy(iml_img, trans->iml, trans->iml_len);
+
+	iwl_enable_interrupts(trans);
+
+	/* kick FW self load */
+	iwl_write64(trans, CSR_CTXT_INFO_ADDR,
+		    trans_pcie->ctxt_info_dma_addr);
+	iwl_write64(trans, CSR_IML_DATA_ADDR,
+		    trans_pcie->iml_dma_addr);
+	iwl_write32(trans, CSR_IML_SIZE_ADDR, trans->iml_len);
+	iwl_set_bit(trans, CSR_CTXT_INFO_BOOT_CTRL, CSR_AUTO_FUNC_BOOT_ENA);
+	iwl_set_bit(trans, CSR_GP_CNTRL, CSR_AUTO_FUNC_INIT);
+
+	return 0;
+}
+
+void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+	if (!trans_pcie->ctxt_info_gen3)
+		return;
+
+	dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info_gen3),
+			  trans_pcie->ctxt_info_gen3,
+			  trans_pcie->ctxt_info_dma_addr);
+	trans_pcie->ctxt_info_dma_addr = 0;
+	trans_pcie->ctxt_info_gen3 = NULL;
+
+	iwl_pcie_ctxt_info_free_fw_img(trans);
+
+	dma_free_coherent(trans->dev, sizeof(*trans_pcie->prph_scratch),
+			  trans_pcie->prph_scratch,
+			  trans_pcie->prph_scratch_dma_addr);
+	trans_pcie->prph_scratch_dma_addr = 0;
+	trans_pcie->prph_scratch = NULL;
+
+	dma_free_coherent(trans->dev, sizeof(*trans_pcie->prph_info),
+			  trans_pcie->prph_info,
+			  trans_pcie->prph_info_dma_addr);
+	trans_pcie->prph_info_dma_addr = 0;
+	trans_pcie->prph_info = NULL;
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
index 3fc4343581eee27c2234909a0554aac67a2b3c93..b2cd7ef5fc3a9ba3b37351745d6fdedfda985cf9 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
@@ -6,6 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -19,6 +20,7 @@
  * BSD LICENSE
  *
  * Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -55,57 +57,6 @@
 #include "internal.h"
 #include "iwl-prph.h"
 
-static int iwl_pcie_get_num_sections(const struct fw_img *fw,
-				     int start)
-{
-	int i = 0;
-
-	while (start < fw->num_sec &&
-	       fw->sec[start].offset != CPU1_CPU2_SEPARATOR_SECTION &&
-	       fw->sec[start].offset != PAGING_SEPARATOR_SECTION) {
-		start++;
-		i++;
-	}
-
-	return i;
-}
-
-static int iwl_pcie_ctxt_info_alloc_dma(struct iwl_trans *trans,
-					const struct fw_desc *sec,
-					struct iwl_dram_data *dram)
-{
-	dram->block = dma_alloc_coherent(trans->dev, sec->len,
-					 &dram->physical,
-					 GFP_KERNEL);
-	if (!dram->block)
-		return -ENOMEM;
-
-	dram->size = sec->len;
-	memcpy(dram->block, sec->data, sec->len);
-
-	return 0;
-}
-
-static void iwl_pcie_ctxt_info_free_fw_img(struct iwl_trans *trans)
-{
-	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-	struct iwl_self_init_dram *dram = &trans_pcie->init_dram;
-	int i;
-
-	if (!dram->fw) {
-		WARN_ON(dram->fw_cnt);
-		return;
-	}
-
-	for (i = 0; i < dram->fw_cnt; i++)
-		dma_free_coherent(trans->dev, dram->fw[i].size,
-				  dram->fw[i].block, dram->fw[i].physical);
-
-	kfree(dram->fw);
-	dram->fw_cnt = 0;
-	dram->fw = NULL;
-}
-
 void iwl_pcie_ctxt_info_free_paging(struct iwl_trans *trans)
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -128,13 +79,12 @@ void iwl_pcie_ctxt_info_free_paging(struct iwl_trans *trans)
 	dram->paging = NULL;
 }
 
-static int iwl_pcie_ctxt_info_init_fw_sec(struct iwl_trans *trans,
-					  const struct fw_img *fw,
-					  struct iwl_context_info *ctxt_info)
+int iwl_pcie_init_fw_sec(struct iwl_trans *trans,
+			 const struct fw_img *fw,
+			 struct iwl_context_info_dram *ctxt_dram)
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	struct iwl_self_init_dram *dram = &trans_pcie->init_dram;
-	struct iwl_context_info_dram *ctxt_dram = &ctxt_info->dram;
 	int i, ret, lmac_cnt, umac_cnt, paging_cnt;
 
 	if (WARN(dram->paging,
@@ -247,7 +197,7 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
 		TFD_QUEUE_CB_SIZE(TFD_CMD_SLOTS);
 
 	/* allocate ucode sections in dram and set addresses */
-	ret = iwl_pcie_ctxt_info_init_fw_sec(trans, fw, ctxt_info);
+	ret = iwl_pcie_init_fw_sec(trans, fw, &ctxt_info->dram);
 	if (ret) {
 		dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info),
 				  ctxt_info, trans_pcie->ctxt_info_dma_addr);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index 8520523b91b40b5a0f6a00b0fe02c8ba33254aa4..562cc79288a677a537856bbd3f92575b049e4cbd 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -828,19 +828,32 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
 	{IWL_PCI_DEVICE(0xA370, 0x42A4, iwl9462_2ac_cfg_soc)},
 
 /* 22000 Series */
-	{IWL_PCI_DEVICE(0x2720, 0x0A10, iwl22000_2ac_cfg_hr_cdb)},
-	{IWL_PCI_DEVICE(0x34F0, 0x0310, iwl22000_2ac_cfg_jf)},
 	{IWL_PCI_DEVICE(0x2720, 0x0000, iwl22000_2ax_cfg_hr)},
-	{IWL_PCI_DEVICE(0x34F0, 0x0070, iwl22000_2ax_cfg_hr)},
+	{IWL_PCI_DEVICE(0x2720, 0x0040, iwl22000_2ax_cfg_hr)},
 	{IWL_PCI_DEVICE(0x2720, 0x0078, iwl22000_2ax_cfg_hr)},
 	{IWL_PCI_DEVICE(0x2720, 0x0070, iwl22000_2ac_cfg_hr_cdb)},
 	{IWL_PCI_DEVICE(0x2720, 0x0030, iwl22000_2ac_cfg_hr_cdb)},
 	{IWL_PCI_DEVICE(0x2720, 0x1080, iwl22000_2ax_cfg_hr)},
 	{IWL_PCI_DEVICE(0x2720, 0x0090, iwl22000_2ac_cfg_hr_cdb)},
 	{IWL_PCI_DEVICE(0x2720, 0x0310, iwl22000_2ac_cfg_hr_cdb)},
-	{IWL_PCI_DEVICE(0x40C0, 0x0000, iwl22000_2ax_cfg_hr)},
-	{IWL_PCI_DEVICE(0x40C0, 0x0A10, iwl22000_2ax_cfg_hr)},
+	{IWL_PCI_DEVICE(0x34F0, 0x0040, iwl22000_2ax_cfg_hr)},
+	{IWL_PCI_DEVICE(0x34F0, 0x0070, iwl22000_2ax_cfg_hr)},
+	{IWL_PCI_DEVICE(0x34F0, 0x0078, iwl22000_2ax_cfg_hr)},
+	{IWL_PCI_DEVICE(0x34F0, 0x0310, iwl22000_2ac_cfg_jf)},
+	{IWL_PCI_DEVICE(0x40C0, 0x0000, iwl22560_2ax_cfg_su_cdb)},
+	{IWL_PCI_DEVICE(0x40C0, 0x0010, iwl22560_2ax_cfg_su_cdb)},
+	{IWL_PCI_DEVICE(0x40c0, 0x0090, iwl22560_2ax_cfg_su_cdb)},
+	{IWL_PCI_DEVICE(0x40C0, 0x0310, iwl22560_2ax_cfg_su_cdb)},
+	{IWL_PCI_DEVICE(0x40C0, 0x0A10, iwl22560_2ax_cfg_su_cdb)},
+	{IWL_PCI_DEVICE(0x43F0, 0x0040, iwl22000_2ax_cfg_hr)},
+	{IWL_PCI_DEVICE(0x43F0, 0x0070, iwl22000_2ax_cfg_hr)},
+	{IWL_PCI_DEVICE(0x43F0, 0x0078, iwl22000_2ax_cfg_hr)},
 	{IWL_PCI_DEVICE(0xA0F0, 0x0000, iwl22000_2ax_cfg_hr)},
+	{IWL_PCI_DEVICE(0xA0F0, 0x0040, iwl22000_2ax_cfg_hr)},
+	{IWL_PCI_DEVICE(0xA0F0, 0x0070, iwl22000_2ax_cfg_hr)},
+	{IWL_PCI_DEVICE(0xA0F0, 0x0078, iwl22000_2ax_cfg_hr)},
+	{IWL_PCI_DEVICE(0xA0F0, 0x00B0, iwl22000_2ax_cfg_hr)},
+	{IWL_PCI_DEVICE(0xA0F0, 0x0A10, iwl22000_2ax_cfg_hr)},
 
 #endif /* CONFIG_IWLMVM */
 
@@ -1003,6 +1016,10 @@ static int iwl_pci_resume(struct device *device)
 	if (!trans->op_mode)
 		return 0;
 
+	/* In WOWLAN, let iwl_trans_pcie_d3_resume do the rest of the work */
+	if (test_bit(STATUS_DEVICE_ENABLED, &trans->status))
+		return 0;
+
 	/* reconfigure the MSI-X mapping to get the correct IRQ for rfkill */
 	iwl_pcie_conf_msix_hw(trans_pcie);
 
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index 45ea32796cdaa7b030f55267f434f0084d1d8c44..b63d44b7cd7c7be1e6a0a3fe0081edc5ea4820b7 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -3,6 +3,7 @@
  * Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
  *
  * Portions of this file are derived from the ipw3945 project, as well
  * as portions of the ieee80211 subsystem header files.
@@ -17,8 +18,7 @@
  * more details.
  *
  * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ * this program.
  *
  * The full GNU General Public License is included in this distribution in the
  * file called LICENSE.
@@ -45,6 +45,7 @@
 #include "iwl-debug.h"
 #include "iwl-io.h"
 #include "iwl-op-mode.h"
+#include "iwl-drv.h"
 
 /* We need 2 entries for the TX command and header, and another one might
  * be needed for potential data in the SKB's head. The remaining ones can
@@ -59,6 +60,7 @@
 #define RX_POST_REQ_ALLOC 2
 #define RX_CLAIM_REQ_ALLOC 8
 #define RX_PENDING_WATERMARK 16
+#define FIRST_RX_QUEUE 512
 
 struct iwl_host_cmd;
 
@@ -71,6 +73,7 @@ struct iwl_host_cmd;
  * @page: driver's pointer to the rxb page
  * @invalid: rxb is in driver ownership - not owned by HW
  * @vid: index of this rxb in the global table
+ * @size: size used from the buffer
  */
 struct iwl_rx_mem_buffer {
 	dma_addr_t page_dma;
@@ -78,6 +81,7 @@ struct iwl_rx_mem_buffer {
 	u16 vid;
 	bool invalid;
 	struct list_head list;
+	u32 size;
 };
 
 /**
@@ -98,14 +102,121 @@ struct isr_statistics {
 	u32 unhandled;
 };
 
+#define IWL_CD_STTS_OPTIMIZED_POS	0
+#define IWL_CD_STTS_OPTIMIZED_MSK	0x01
+#define IWL_CD_STTS_TRANSFER_STATUS_POS	1
+#define IWL_CD_STTS_TRANSFER_STATUS_MSK	0x0E
+#define IWL_CD_STTS_WIFI_STATUS_POS	4
+#define IWL_CD_STTS_WIFI_STATUS_MSK	0xF0
+
+/**
+ * enum iwl_completion_desc_transfer_status -  transfer status (bits 1-3)
+ * @IWL_CD_STTS_END_TRANSFER: successful transfer complete.
+ *	In sniffer mode, when split is used, set in last CD completion. (RX)
+ * @IWL_CD_STTS_OVERFLOW: In sniffer mode, when using split - used for
+ *	all CD completion. (RX)
+ * @IWL_CD_STTS_ABORTED: CR abort / close flow. (RX)
+ */
+enum iwl_completion_desc_transfer_status {
+	IWL_CD_STTS_UNUSED,
+	IWL_CD_STTS_UNUSED_2,
+	IWL_CD_STTS_END_TRANSFER,
+	IWL_CD_STTS_OVERFLOW,
+	IWL_CD_STTS_ABORTED,
+	IWL_CD_STTS_ERROR,
+};
+
+/**
+ * enum iwl_completion_desc_wifi_status - wifi status (bits 4-7)
+ * @IWL_CD_STTS_VALID: the packet is valid (RX)
+ * @IWL_CD_STTS_FCS_ERR: frame check sequence error (RX)
+ * @IWL_CD_STTS_SEC_KEY_ERR: error handling the security key of rx (RX)
+ * @IWL_CD_STTS_DECRYPTION_ERR: error decrypting the frame (RX)
+ * @IWL_CD_STTS_DUP: duplicate packet (RX)
+ * @IWL_CD_STTS_ICV_MIC_ERR: MIC error (RX)
+ * @IWL_CD_STTS_INTERNAL_SNAP_ERR: problems removing the snap (RX)
+ * @IWL_CD_STTS_SEC_PORT_FAIL: security port fail (RX)
+ * @IWL_CD_STTS_BA_OLD_SN: block ack received old SN (RX)
+ * @IWL_CD_STTS_QOS_NULL: QoS null packet (RX)
+ * @IWL_CD_STTS_MAC_HDR_ERR: MAC header conversion error (RX)
+ * @IWL_CD_STTS_MAX_RETRANS: reached max number of retransmissions (TX)
+ * @IWL_CD_STTS_EX_LIFETIME: exceeded lifetime (TX)
+ * @IWL_CD_STTS_NOT_USED: completed but not used (RX)
+ * @IWL_CD_STTS_REPLAY_ERR: pn check failed, replay error (RX)
+ */
+enum iwl_completion_desc_wifi_status {
+	IWL_CD_STTS_VALID,
+	IWL_CD_STTS_FCS_ERR,
+	IWL_CD_STTS_SEC_KEY_ERR,
+	IWL_CD_STTS_DECRYPTION_ERR,
+	IWL_CD_STTS_DUP,
+	IWL_CD_STTS_ICV_MIC_ERR,
+	IWL_CD_STTS_INTERNAL_SNAP_ERR,
+	IWL_CD_STTS_SEC_PORT_FAIL,
+	IWL_CD_STTS_BA_OLD_SN,
+	IWL_CD_STTS_QOS_NULL,
+	IWL_CD_STTS_MAC_HDR_ERR,
+	IWL_CD_STTS_MAX_RETRANS,
+	IWL_CD_STTS_EX_LIFETIME,
+	IWL_CD_STTS_NOT_USED,
+	IWL_CD_STTS_REPLAY_ERR,
+};
+
+#define IWL_RX_TD_TYPE_MSK	0xff000000
+#define IWL_RX_TD_SIZE_MSK	0x00ffffff
+#define IWL_RX_TD_SIZE_2K	BIT(11)
+#define IWL_RX_TD_TYPE		0
+
+/**
+ * struct iwl_rx_transfer_desc - transfer descriptor
+ * @type_n_size: buffer type (bit 0: external buff valid,
+ *	bit 1: optional footer valid, bit 2-7: reserved)
+ *	and buffer size
+ * @addr: ptr to free buffer start address
+ * @rbid: unique tag of the buffer
+ * @reserved: reserved
+ */
+struct iwl_rx_transfer_desc {
+	__le32 type_n_size;
+	__le64 addr;
+	__le16 rbid;
+	__le16 reserved;
+} __packed;
+
+#define IWL_RX_CD_SIZE		0xffffff00
+
+/**
+ * struct iwl_rx_completion_desc - completion descriptor
+ * @type: buffer type (bit 0: external buff valid,
+ *	bit 1: optional footer valid, bit 2-7: reserved)
+ * @status: status of the completion
+ * @reserved1: reserved
+ * @rbid: unique tag of the received buffer
+ * @size: buffer size, masked by IWL_RX_CD_SIZE
+ * @reserved2: reserved
+ */
+struct iwl_rx_completion_desc {
+	u8 type;
+	u8 status;
+	__le16 reserved1;
+	__le16 rbid;
+	__le32 size;
+	u8 reserved2[22];
+} __packed;
+
 /**
  * struct iwl_rxq - Rx queue
  * @id: queue index
  * @bd: driver's pointer to buffer of receive buffer descriptors (rbd).
  *	Address size is 32 bit in pre-9000 devices and 64 bit in 9000 devices.
+ *	In 22560 devices it is a pointer to a list of iwl_rx_transfer_desc's
  * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
  * @ubd: driver's pointer to buffer of used receive buffer descriptors (rbd)
  * @ubd_dma: physical address of buffer of used receive buffer descriptors (rbd)
+ * @tr_tail: driver's pointer to the transmission ring tail buffer
+ * @tr_tail_dma: physical address of the buffer for the transmission ring tail
+ * @cr_tail: driver's pointer to the completion ring tail buffer
+ * @cr_tail_dma: physical address of the buffer for the completion ring tail
  * @read: Shared index to newest available Rx buffer
  * @write: Shared index to oldest written Rx packet
  * @free_count: Number of pre-allocated buffers in rx_free
@@ -125,8 +236,16 @@ struct iwl_rxq {
 	int id;
 	void *bd;
 	dma_addr_t bd_dma;
-	__le32 *used_bd;
+	union {
+		void *used_bd;
+		__le32 *bd_32;
+		struct iwl_rx_completion_desc *cd;
+	};
 	dma_addr_t used_bd_dma;
+	__le16 *tr_tail;
+	dma_addr_t tr_tail_dma;
+	__le16 *cr_tail;
+	dma_addr_t cr_tail_dma;
 	u32 read;
 	u32 write;
 	u32 free_count;
@@ -136,7 +255,7 @@ struct iwl_rxq {
 	struct list_head rx_free;
 	struct list_head rx_used;
 	bool need_update;
-	struct iwl_rb_status *rb_stts;
+	void *rb_stts;
 	dma_addr_t rb_stts_dma;
 	spinlock_t lock;
 	struct napi_struct napi;
@@ -175,18 +294,36 @@ struct iwl_dma_ptr {
  * iwl_queue_inc_wrap - increment queue index, wrap back to beginning
  * @index -- current index
  */
-static inline int iwl_queue_inc_wrap(int index)
+static inline int iwl_queue_inc_wrap(struct iwl_trans *trans, int index)
 {
-	return ++index & (TFD_QUEUE_SIZE_MAX - 1);
+	return ++index & (trans->cfg->base_params->max_tfd_queue_size - 1);
+}
+
+/**
+ * iwl_get_closed_rb_stts - get closed rb stts from different structs
+ * @rxq - the rxq to get the rb stts from
+ */
+static inline __le16 iwl_get_closed_rb_stts(struct iwl_trans *trans,
+					    struct iwl_rxq *rxq)
+{
+	if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
+		__le16 *rb_stts = rxq->rb_stts;
+
+		return READ_ONCE(*rb_stts);
+	} else {
+		struct iwl_rb_status *rb_stts = rxq->rb_stts;
+
+		return READ_ONCE(rb_stts->closed_rb_num);
+	}
 }
 
 /**
  * iwl_queue_dec_wrap - decrement queue index, wrap back to end
  * @index -- current index
  */
-static inline int iwl_queue_dec_wrap(int index)
+static inline int iwl_queue_dec_wrap(struct iwl_trans *trans, int index)
 {
-	return --index & (TFD_QUEUE_SIZE_MAX - 1);
+	return --index & (trans->cfg->base_params->max_tfd_queue_size - 1);
 }
 
 struct iwl_cmd_meta {
@@ -314,6 +451,18 @@ enum iwl_shared_irq_flags {
 	IWL_SHARED_IRQ_FIRST_RSS	= BIT(1),
 };
 
+/**
+ * enum iwl_image_response_code - image response values
+ * @IWL_IMAGE_RESP_DEF: the default value of the register
+ * @IWL_IMAGE_RESP_SUCCESS: iml was read successfully
+ * @IWL_IMAGE_RESP_FAIL: iml reading failed
+ */
+enum iwl_image_response_code {
+	IWL_IMAGE_RESP_DEF		= 0,
+	IWL_IMAGE_RESP_SUCCESS		= 1,
+	IWL_IMAGE_RESP_FAIL		= 2,
+};
+
 /**
  * struct iwl_dram_data
  * @physical: page phy pointer
@@ -347,6 +496,12 @@ struct iwl_self_init_dram {
  * @global_table: table mapping received VID from hw to rxb
  * @rba: allocator for RX replenishing
  * @ctxt_info: context information for FW self init
+ * @ctxt_info_gen3: context information for gen3 devices
+ * @prph_info: prph info for self init
+ * @prph_scratch: prph scratch for self init
+ * @ctxt_info_dma_addr: dma addr of context information
+ * @prph_info_dma_addr: dma addr of prph info
+ * @prph_scratch_dma_addr: dma addr of prph scratch
  * @ctxt_info_dma_addr: dma addr of context information
  * @init_dram: DRAM data of firmware image (including paging).
  *	Context information addresses will be taken from here.
@@ -391,8 +546,16 @@ struct iwl_trans_pcie {
 	struct iwl_rx_mem_buffer rx_pool[RX_POOL_SIZE];
 	struct iwl_rx_mem_buffer *global_table[RX_POOL_SIZE];
 	struct iwl_rb_allocator rba;
-	struct iwl_context_info *ctxt_info;
+	union {
+		struct iwl_context_info *ctxt_info;
+		struct iwl_context_info_gen3 *ctxt_info_gen3;
+	};
+	struct iwl_prph_info *prph_info;
+	struct iwl_prph_scratch *prph_scratch;
 	dma_addr_t ctxt_info_dma_addr;
+	dma_addr_t prph_info_dma_addr;
+	dma_addr_t prph_scratch_dma_addr;
+	dma_addr_t iml_dma_addr;
 	struct iwl_self_init_dram init_dram;
 	struct iwl_trans *trans;
 
@@ -477,6 +640,20 @@ IWL_TRANS_GET_PCIE_TRANS(struct iwl_trans *trans)
 	return (void *)trans->trans_specific;
 }
 
+static inline void iwl_pcie_clear_irq(struct iwl_trans *trans,
+				      struct msix_entry *entry)
+{
+	/*
+	 * Before sending the interrupt the HW disables it to prevent
+	 * a nested interrupt. This is done by writing 1 to the corresponding
+	 * bit in the mask register. After handling the interrupt, it should be
+	 * re-enabled by clearing this bit. This register is defined as
+	 * write 1 clear (W1C) register, meaning that it's being clear
+	 * by writing 1 to the bit.
+	 */
+	iwl_write32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(entry->entry));
+}
+
 static inline struct iwl_trans *
 iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie)
 {
@@ -504,6 +681,11 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id);
 irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id);
 int iwl_pcie_rx_stop(struct iwl_trans *trans);
 void iwl_pcie_rx_free(struct iwl_trans *trans);
+void iwl_pcie_free_rbs_pool(struct iwl_trans *trans);
+void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq);
+int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget);
+void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
+			    struct iwl_rxq *rxq);
 
 /*****************************************************
 * ICT - interrupt handling
@@ -588,6 +770,60 @@ static inline void _iwl_disable_interrupts(struct iwl_trans *trans)
 	IWL_DEBUG_ISR(trans, "Disabled interrupts\n");
 }
 
+#define IWL_NUM_OF_COMPLETION_RINGS	31
+#define IWL_NUM_OF_TRANSFER_RINGS	527
+
+static inline int iwl_pcie_get_num_sections(const struct fw_img *fw,
+					    int start)
+{
+	int i = 0;
+
+	while (start < fw->num_sec &&
+	       fw->sec[start].offset != CPU1_CPU2_SEPARATOR_SECTION &&
+	       fw->sec[start].offset != PAGING_SEPARATOR_SECTION) {
+		start++;
+		i++;
+	}
+
+	return i;
+}
+
+static inline int iwl_pcie_ctxt_info_alloc_dma(struct iwl_trans *trans,
+					       const struct fw_desc *sec,
+					       struct iwl_dram_data *dram)
+{
+	dram->block = dma_alloc_coherent(trans->dev, sec->len,
+					 &dram->physical,
+					 GFP_KERNEL);
+	if (!dram->block)
+		return -ENOMEM;
+
+	dram->size = sec->len;
+	memcpy(dram->block, sec->data, sec->len);
+
+	return 0;
+}
+
+static inline void iwl_pcie_ctxt_info_free_fw_img(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwl_self_init_dram *dram = &trans_pcie->init_dram;
+	int i;
+
+	if (!dram->fw) {
+		WARN_ON(dram->fw_cnt);
+		return;
+	}
+
+	for (i = 0; i < dram->fw_cnt; i++)
+		dma_free_coherent(trans->dev, dram->fw[i].size,
+				  dram->fw[i].block, dram->fw[i].physical);
+
+	kfree(dram->fw);
+	dram->fw_cnt = 0;
+	dram->fw = NULL;
+}
+
 static inline void iwl_disable_interrupts(struct iwl_trans *trans)
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -660,7 +896,7 @@ static inline void iwl_enable_fw_load_int(struct iwl_trans *trans)
 	}
 }
 
-static inline u8 iwl_pcie_get_cmd_index(struct iwl_txq *q, u32 index)
+static inline u16 iwl_pcie_get_cmd_index(const struct iwl_txq *q, u32 index)
 {
 	return index & (q->n_window - 1);
 }
@@ -676,6 +912,29 @@ static inline void *iwl_pcie_get_tfd(struct iwl_trans *trans,
 	return txq->tfds + trans_pcie->tfd_size * idx;
 }
 
+static inline const char *queue_name(struct device *dev,
+				     struct iwl_trans_pcie *trans_p, int i)
+{
+	if (trans_p->shared_vec_mask) {
+		int vec = trans_p->shared_vec_mask &
+			  IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0;
+
+		if (i == 0)
+			return DRV_NAME ": shared IRQ";
+
+		return devm_kasprintf(dev, GFP_KERNEL,
+				      DRV_NAME ": queue %d", i + vec);
+	}
+	if (i == 0)
+		return DRV_NAME ": default queue";
+
+	if (i == trans_p->alloc_vecs - 1)
+		return DRV_NAME ": exception";
+
+	return devm_kasprintf(dev, GFP_KERNEL,
+			      DRV_NAME  ": queue %d", i);
+}
+
 static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -730,9 +989,13 @@ static inline void iwl_stop_queue(struct iwl_trans *trans,
 
 static inline bool iwl_queue_used(const struct iwl_txq *q, int i)
 {
-	return q->write_ptr >= q->read_ptr ?
-		(i >= q->read_ptr && i < q->write_ptr) :
-		!(i < q->read_ptr && i >= q->write_ptr);
+	int index = iwl_pcie_get_cmd_index(q, i);
+	int r = iwl_pcie_get_cmd_index(q, q->read_ptr);
+	int w = iwl_pcie_get_cmd_index(q, q->write_ptr);
+
+	return w >= r ?
+		(index >= r && index < w) :
+		!(index < r && index >= w);
 }
 
 static inline bool iwl_is_rfkill_set(struct iwl_trans *trans)
@@ -801,7 +1064,7 @@ bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans);
 void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans,
 				       bool was_in_rfkill);
 void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq);
-int iwl_queue_space(const struct iwl_txq *q);
+int iwl_queue_space(struct iwl_trans *trans, const struct iwl_txq *q);
 void iwl_pcie_apm_stop_master(struct iwl_trans *trans);
 void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie);
 int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
@@ -818,6 +1081,9 @@ void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie,
 struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len);
 #endif
 
+/* common functions that are used by gen3 transport */
+void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power);
+
 /* transport gen 2 exported functions */
 int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
 				 const struct fw_img *fw, bool run_in_rfkill);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index d15f5ba2dc777b29576945ec75e3956ad6e1ab35..d017aa2a0a8bd7bed6c75f5f20da83460cca1c7a 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -18,8 +18,7 @@
  * more details.
  *
  * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ * this program.
  *
  * The full GNU General Public License is included in this distribution in the
  * file called LICENSE.
@@ -37,6 +36,7 @@
 #include "iwl-io.h"
 #include "internal.h"
 #include "iwl-op-mode.h"
+#include "iwl-context-info-gen3.h"
 
 /******************************************************************************
  *
@@ -167,7 +167,12 @@ static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr)
  */
 int iwl_pcie_rx_stop(struct iwl_trans *trans)
 {
-	if (trans->cfg->mq_rx_supported) {
+	if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
+		/* TODO: remove this for 22560 once fw does it */
+		iwl_write_prph(trans, RFH_RXF_DMA_CFG_GEN3, 0);
+		return iwl_poll_prph_bit(trans, RFH_GEN_STATUS_GEN3,
+					 RXF_DMA_IDLE, RXF_DMA_IDLE, 1000);
+	} else if (trans->cfg->mq_rx_supported) {
 		iwl_write_prph(trans, RFH_RXF_DMA_CFG, 0);
 		return iwl_poll_prph_bit(trans, RFH_GEN_STATUS,
 					   RXF_DMA_IDLE, RXF_DMA_IDLE, 1000);
@@ -209,7 +214,11 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
 	}
 
 	rxq->write_actual = round_down(rxq->write, 8);
-	if (trans->cfg->mq_rx_supported)
+	if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+		iwl_write32(trans, HBUS_TARG_WRPTR,
+			    (rxq->write_actual |
+			     ((FIRST_RX_QUEUE + rxq->id) << 16)));
+	else if (trans->cfg->mq_rx_supported)
 		iwl_write32(trans, RFH_Q_FRBDCB_WIDX_TRG(rxq->id),
 			    rxq->write_actual);
 	else
@@ -233,6 +242,25 @@ static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans)
 	}
 }
 
+static void iwl_pcie_restock_bd(struct iwl_trans *trans,
+				struct iwl_rxq *rxq,
+				struct iwl_rx_mem_buffer *rxb)
+{
+	if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
+		struct iwl_rx_transfer_desc *bd = rxq->bd;
+
+		bd[rxq->write].type_n_size =
+			cpu_to_le32((IWL_RX_TD_TYPE & IWL_RX_TD_TYPE_MSK) |
+			((IWL_RX_TD_SIZE_2K >> 8) & IWL_RX_TD_SIZE_MSK));
+		bd[rxq->write].addr = cpu_to_le64(rxb->page_dma);
+		bd[rxq->write].rbid = cpu_to_le16(rxb->vid);
+	} else {
+		__le64 *bd = rxq->bd;
+
+		bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid);
+	}
+}
+
 /*
  * iwl_pcie_rxmq_restock - restock implementation for multi-queue rx
  */
@@ -254,8 +282,6 @@ static void iwl_pcie_rxmq_restock(struct iwl_trans *trans,
 
 	spin_lock(&rxq->lock);
 	while (rxq->free_count) {
-		__le64 *bd = (__le64 *)rxq->bd;
-
 		/* Get next free Rx buffer, remove from free list */
 		rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
 				       list);
@@ -264,7 +290,7 @@ static void iwl_pcie_rxmq_restock(struct iwl_trans *trans,
 		/* 12 first bits are expected to be empty */
 		WARN_ON(rxb->page_dma & DMA_BIT_MASK(12));
 		/* Point to Rx buffer via next RBD in circular buffer */
-		bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid);
+		iwl_pcie_restock_bd(trans, rxq, rxb);
 		rxq->write = (rxq->write + 1) & MQ_RX_TABLE_MASK;
 		rxq->free_count--;
 	}
@@ -391,8 +417,8 @@ static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans,
  * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly
  * allocated buffers.
  */
-static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
-				   struct iwl_rxq *rxq)
+void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
+			    struct iwl_rxq *rxq)
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	struct iwl_rx_mem_buffer *rxb;
@@ -448,7 +474,7 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
 	}
 }
 
-static void iwl_pcie_free_rbs_pool(struct iwl_trans *trans)
+void iwl_pcie_free_rbs_pool(struct iwl_trans *trans)
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	int i;
@@ -608,89 +634,174 @@ void iwl_pcie_rx_allocator_work(struct work_struct *data)
 	iwl_pcie_rx_allocator(trans_pcie->trans);
 }
 
-static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
+static int iwl_pcie_free_bd_size(struct iwl_trans *trans, bool use_rx_td)
 {
-	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-	struct iwl_rb_allocator *rba = &trans_pcie->rba;
-	struct device *dev = trans->dev;
-	int i;
-	int free_size = trans->cfg->mq_rx_supported ? sizeof(__le64) :
-						      sizeof(__le32);
+	struct iwl_rx_transfer_desc *rx_td;
 
-	if (WARN_ON(trans_pcie->rxq))
-		return -EINVAL;
+	if (use_rx_td)
+		return sizeof(*rx_td);
+	else
+		return trans->cfg->mq_rx_supported ? sizeof(__le64) :
+			sizeof(__le32);
+}
 
-	trans_pcie->rxq = kcalloc(trans->num_rx_queues, sizeof(struct iwl_rxq),
-				  GFP_KERNEL);
-	if (!trans_pcie->rxq)
-		return -EINVAL;
+static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans,
+				  struct iwl_rxq *rxq)
+{
+	struct device *dev = trans->dev;
+	bool use_rx_td = (trans->cfg->device_family >=
+			  IWL_DEVICE_FAMILY_22560);
+	int free_size = iwl_pcie_free_bd_size(trans, use_rx_td);
+
+	if (rxq->bd)
+		dma_free_coherent(trans->dev,
+				  free_size * rxq->queue_size,
+				  rxq->bd, rxq->bd_dma);
+	rxq->bd_dma = 0;
+	rxq->bd = NULL;
+
+	if (rxq->rb_stts)
+		dma_free_coherent(trans->dev,
+				  use_rx_td ? sizeof(__le16) :
+				  sizeof(struct iwl_rb_status),
+				  rxq->rb_stts, rxq->rb_stts_dma);
+	rxq->rb_stts_dma = 0;
+	rxq->rb_stts = NULL;
+
+	if (rxq->used_bd)
+		dma_free_coherent(trans->dev,
+				  (use_rx_td ? sizeof(*rxq->cd) :
+				   sizeof(__le32)) * rxq->queue_size,
+				  rxq->used_bd, rxq->used_bd_dma);
+	rxq->used_bd_dma = 0;
+	rxq->used_bd = NULL;
+
+	if (trans->cfg->device_family < IWL_DEVICE_FAMILY_22560)
+		return;
 
-	spin_lock_init(&rba->lock);
+	if (rxq->tr_tail)
+		dma_free_coherent(dev, sizeof(__le16),
+				  rxq->tr_tail, rxq->tr_tail_dma);
+	rxq->tr_tail_dma = 0;
+	rxq->tr_tail = NULL;
+
+	if (rxq->cr_tail)
+		dma_free_coherent(dev, sizeof(__le16),
+				  rxq->cr_tail, rxq->cr_tail_dma);
+	rxq->cr_tail_dma = 0;
+	rxq->cr_tail = NULL;
+}
 
-	for (i = 0; i < trans->num_rx_queues; i++) {
-		struct iwl_rxq *rxq = &trans_pcie->rxq[i];
+static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
+				  struct iwl_rxq *rxq)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct device *dev = trans->dev;
+	int i;
+	int free_size;
+	bool use_rx_td = (trans->cfg->device_family >=
+			  IWL_DEVICE_FAMILY_22560);
 
-		spin_lock_init(&rxq->lock);
-		if (trans->cfg->mq_rx_supported)
-			rxq->queue_size = MQ_RX_TABLE_SIZE;
-		else
-			rxq->queue_size = RX_QUEUE_SIZE;
+	spin_lock_init(&rxq->lock);
+	if (trans->cfg->mq_rx_supported)
+		rxq->queue_size = MQ_RX_TABLE_SIZE;
+	else
+		rxq->queue_size = RX_QUEUE_SIZE;
 
-		/*
-		 * Allocate the circular buffer of Read Buffer Descriptors
-		 * (RBDs)
-		 */
-		rxq->bd = dma_zalloc_coherent(dev,
-					     free_size * rxq->queue_size,
-					     &rxq->bd_dma, GFP_KERNEL);
-		if (!rxq->bd)
-			goto err;
+	free_size = iwl_pcie_free_bd_size(trans, use_rx_td);
 
-		if (trans->cfg->mq_rx_supported) {
-			rxq->used_bd = dma_zalloc_coherent(dev,
-							   sizeof(__le32) *
-							   rxq->queue_size,
-							   &rxq->used_bd_dma,
-							   GFP_KERNEL);
-			if (!rxq->used_bd)
-				goto err;
-		}
+	/*
+	 * Allocate the circular buffer of Read Buffer Descriptors
+	 * (RBDs)
+	 */
+	rxq->bd = dma_zalloc_coherent(dev,
+				      free_size * rxq->queue_size,
+				      &rxq->bd_dma, GFP_KERNEL);
+	if (!rxq->bd)
+		goto err;
 
-		/*Allocate the driver's pointer to receive buffer status */
-		rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts),
-						   &rxq->rb_stts_dma,
+	if (trans->cfg->mq_rx_supported) {
+		rxq->used_bd = dma_zalloc_coherent(dev,
+						   (use_rx_td ?
+						   sizeof(*rxq->cd) :
+						   sizeof(__le32)) *
+						   rxq->queue_size,
+						   &rxq->used_bd_dma,
 						   GFP_KERNEL);
-		if (!rxq->rb_stts)
+		if (!rxq->used_bd)
 			goto err;
 	}
+
+	/* Allocate the driver's pointer to receive buffer status */
+	rxq->rb_stts = dma_zalloc_coherent(dev, use_rx_td ?
+					   sizeof(__le16) :
+					   sizeof(struct iwl_rb_status),
+					   &rxq->rb_stts_dma,
+					   GFP_KERNEL);
+	if (!rxq->rb_stts)
+		goto err;
+
+	if (!use_rx_td)
+		return 0;
+
+	/* Allocate the driver's pointer to TR tail */
+	rxq->tr_tail = dma_zalloc_coherent(dev, sizeof(__le16),
+					   &rxq->tr_tail_dma,
+					   GFP_KERNEL);
+	if (!rxq->tr_tail)
+		goto err;
+
+	/* Allocate the driver's pointer to CR tail */
+	rxq->cr_tail = dma_zalloc_coherent(dev, sizeof(__le16),
+					   &rxq->cr_tail_dma,
+					   GFP_KERNEL);
+	if (!rxq->cr_tail)
+		goto err;
+	/*
+	 * W/A 22560 device step Z0 must be non zero bug
+	 * TODO: remove this when stop supporting Z0
+	 */
+	*rxq->cr_tail = cpu_to_le16(500);
+
 	return 0;
 
 err:
 	for (i = 0; i < trans->num_rx_queues; i++) {
 		struct iwl_rxq *rxq = &trans_pcie->rxq[i];
 
-		if (rxq->bd)
-			dma_free_coherent(dev, free_size * rxq->queue_size,
-					  rxq->bd, rxq->bd_dma);
-		rxq->bd_dma = 0;
-		rxq->bd = NULL;
-
-		if (rxq->rb_stts)
-			dma_free_coherent(trans->dev,
-					  sizeof(struct iwl_rb_status),
-					  rxq->rb_stts, rxq->rb_stts_dma);
-
-		if (rxq->used_bd)
-			dma_free_coherent(dev, sizeof(__le32) * rxq->queue_size,
-					  rxq->used_bd, rxq->used_bd_dma);
-		rxq->used_bd_dma = 0;
-		rxq->used_bd = NULL;
+		iwl_pcie_free_rxq_dma(trans, rxq);
 	}
 	kfree(trans_pcie->rxq);
 
 	return -ENOMEM;
 }
 
+static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwl_rb_allocator *rba = &trans_pcie->rba;
+	int i, ret;
+
+	if (WARN_ON(trans_pcie->rxq))
+		return -EINVAL;
+
+	trans_pcie->rxq = kcalloc(trans->num_rx_queues, sizeof(struct iwl_rxq),
+				  GFP_KERNEL);
+	if (!trans_pcie->rxq)
+		return -EINVAL;
+
+	spin_lock_init(&rba->lock);
+
+	for (i = 0; i < trans->num_rx_queues; i++) {
+		struct iwl_rxq *rxq = &trans_pcie->rxq[i];
+
+		ret = iwl_pcie_alloc_rxq_dma(trans, rxq);
+		if (ret)
+			return ret;
+	}
+	return 0;
+}
+
 static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -792,6 +903,9 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
 	int i;
 
 	switch (trans_pcie->rx_buf_size) {
+	case IWL_AMSDU_2K:
+		rb_size = RFH_RXF_DMA_RB_SIZE_2K;
+		break;
 	case IWL_AMSDU_4K:
 		rb_size = RFH_RXF_DMA_RB_SIZE_4K;
 		break;
@@ -872,7 +986,7 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
 	iwl_pcie_enable_rx_wake(trans, true);
 }
 
-static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
+void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
 {
 	lockdep_assert_held(&rxq->lock);
 
@@ -882,7 +996,7 @@ static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
 	rxq->used_count = 0;
 }
 
-static int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget)
+int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget)
 {
 	WARN_ON(1);
 	return 0;
@@ -931,7 +1045,9 @@ static int _iwl_pcie_rx_init(struct iwl_trans *trans)
 		rxq->read = 0;
 		rxq->write = 0;
 		rxq->write_actual = 0;
-		memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
+		memset(rxq->rb_stts, 0,
+		       (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) ?
+		       sizeof(__le16) : sizeof(struct iwl_rb_status));
 
 		iwl_pcie_rx_init_rxb_lists(rxq);
 
@@ -1002,8 +1118,6 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	struct iwl_rb_allocator *rba = &trans_pcie->rba;
-	int free_size = trans->cfg->mq_rx_supported ? sizeof(__le64) :
-					      sizeof(__le32);
 	int i;
 
 	/*
@@ -1022,27 +1136,7 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
 	for (i = 0; i < trans->num_rx_queues; i++) {
 		struct iwl_rxq *rxq = &trans_pcie->rxq[i];
 
-		if (rxq->bd)
-			dma_free_coherent(trans->dev,
-					  free_size * rxq->queue_size,
-					  rxq->bd, rxq->bd_dma);
-		rxq->bd_dma = 0;
-		rxq->bd = NULL;
-
-		if (rxq->rb_stts)
-			dma_free_coherent(trans->dev,
-					  sizeof(struct iwl_rb_status),
-					  rxq->rb_stts, rxq->rb_stts_dma);
-		else
-			IWL_DEBUG_INFO(trans,
-				       "Free rxq->rb_stts which is NULL\n");
-
-		if (rxq->used_bd)
-			dma_free_coherent(trans->dev,
-					  sizeof(__le32) * rxq->queue_size,
-					  rxq->used_bd, rxq->used_bd_dma);
-		rxq->used_bd_dma = 0;
-		rxq->used_bd = NULL;
+		iwl_pcie_free_rxq_dma(trans, rxq);
 
 		if (rxq->napi.poll)
 			netif_napi_del(&rxq->napi);
@@ -1202,6 +1296,8 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
 		}
 
 		page_stolen |= rxcb._page_stolen;
+		if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+			break;
 		offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN);
 	}
 
@@ -1236,6 +1332,45 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
 		iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
 }
 
+static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans,
+						  struct iwl_rxq *rxq, int i)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwl_rx_mem_buffer *rxb;
+	u16 vid;
+
+	if (!trans->cfg->mq_rx_supported) {
+		rxb = rxq->queue[i];
+		rxq->queue[i] = NULL;
+		return rxb;
+	}
+
+	/* used_bd is a 32/16 bit but only 12 are used to retrieve the vid */
+	if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+		vid = le16_to_cpu(rxq->cd[i].rbid) & 0x0FFF;
+	else
+		vid = le32_to_cpu(rxq->bd_32[i]) & 0x0FFF;
+
+	if (!vid || vid > ARRAY_SIZE(trans_pcie->global_table))
+		goto out_err;
+
+	rxb = trans_pcie->global_table[vid - 1];
+	if (rxb->invalid)
+		goto out_err;
+
+	if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+		rxb->size = le32_to_cpu(rxq->cd[i].size) & IWL_RX_CD_SIZE;
+
+	rxb->invalid = true;
+
+	return rxb;
+
+out_err:
+	WARN(1, "Invalid rxb from HW %u\n", (u32)vid);
+	iwl_force_nmi(trans);
+	return NULL;
+}
+
 /*
  * iwl_pcie_rx_handle - Main entry function for receiving responses from fw
  */
@@ -1250,7 +1385,7 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans, int queue)
 	spin_lock(&rxq->lock);
 	/* uCode's read index (stored in shared DRAM) indicates the last Rx
 	 * buffer that the driver may process (last buffer filled by ucode). */
-	r = le16_to_cpu(READ_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
+	r = le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) & 0x0FFF;
 	i = rxq->read;
 
 	/* W/A 9000 device step A0 wrap-around bug */
@@ -1266,30 +1401,9 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans, int queue)
 		if (unlikely(rxq->used_count == rxq->queue_size / 2))
 			emergency = true;
 
-		if (trans->cfg->mq_rx_supported) {
-			/*
-			 * used_bd is a 32 bit but only 12 are used to retrieve
-			 * the vid
-			 */
-			u16 vid = le32_to_cpu(rxq->used_bd[i]) & 0x0FFF;
-
-			if (WARN(!vid ||
-				 vid > ARRAY_SIZE(trans_pcie->global_table),
-				 "Invalid rxb index from HW %u\n", (u32)vid)) {
-				iwl_force_nmi(trans);
-				goto out;
-			}
-			rxb = trans_pcie->global_table[vid - 1];
-			if (WARN(rxb->invalid,
-				 "Invalid rxb from HW %u\n", (u32)vid)) {
-				iwl_force_nmi(trans);
-				goto out;
-			}
-			rxb->invalid = true;
-		} else {
-			rxb = rxq->queue[i];
-			rxq->queue[i] = NULL;
-		}
+		rxb = iwl_pcie_get_rxb(trans, rxq, i);
+		if (!rxb)
+			goto out;
 
 		IWL_DEBUG_RX(trans, "Q %d: HW = %d, SW = %d\n", rxq->id, r, i);
 		iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency);
@@ -1331,6 +1445,9 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans, int queue)
 out:
 	/* Backtrack one entry */
 	rxq->read = i;
+	/* update cr tail with the rxq read pointer */
+	if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+		*rxq->cr_tail = cpu_to_le16(r);
 	spin_unlock(&rxq->lock);
 
 	/*
@@ -1362,20 +1479,6 @@ static struct iwl_trans_pcie *iwl_pcie_get_trans_pcie(struct msix_entry *entry)
 	return container_of(entries, struct iwl_trans_pcie, msix_entries[0]);
 }
 
-static inline void iwl_pcie_clear_irq(struct iwl_trans *trans,
-				      struct msix_entry *entry)
-{
-	/*
-	 * Before sending the interrupt the HW disables it to prevent
-	 * a nested interrupt. This is done by writing 1 to the corresponding
-	 * bit in the mask register. After handling the interrupt, it should be
-	 * re-enabled by clearing this bit. This register is defined as
-	 * write 1 clear (W1C) register, meaning that it's being clear
-	 * by writing 1 to the bit.
-	 */
-	iwl_write32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(entry->entry));
-}
-
 /*
  * iwl_pcie_rx_msix_handle - Main entry function for receiving responses from fw
  * This interrupt handler should be used with RSS queue only.
@@ -1970,7 +2073,8 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
 
 	/* Error detected by uCode */
 	if ((inta_fh & MSIX_FH_INT_CAUSES_FH_ERR) ||
-	    (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR)) {
+	    (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR) ||
+	    (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR_V2)) {
 		IWL_ERR(trans,
 			"Microcode SW error detected. Restarting 0x%X.\n",
 			inta_fh);
@@ -1995,8 +2099,18 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
 		}
 	}
 
-	/* uCode wakes up after power-down sleep */
-	if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP) {
+	if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560 &&
+	    inta_hw & MSIX_HW_INT_CAUSES_REG_IPC) {
+		/* Reflect IML transfer status */
+		int res = iwl_read32(trans, CSR_IML_RESP_ADDR);
+
+		IWL_DEBUG_ISR(trans, "IML transfer status: %d\n", res);
+		if (res == IWL_IMAGE_RESP_FAIL) {
+			isr_stats->sw++;
+			iwl_pcie_irq_handle_error(trans);
+		}
+	} else if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP) {
+		/* uCode wakes up after power-down sleep */
 		IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
 		iwl_pcie_rxq_check_wrptr(trans);
 		iwl_pcie_txq_check_wrptrs(trans);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
index b8e8dac2895de3d2e513f13ca5cac9d30f1fa963..2bc67219ed3efadd09597a28fe18e39ad5f1d736 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
@@ -53,6 +53,7 @@
 #include "iwl-trans.h"
 #include "iwl-prph.h"
 #include "iwl-context-info.h"
+#include "iwl-context-info-gen3.h"
 #include "internal.h"
 
 /*
@@ -188,7 +189,10 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power)
 	}
 
 	iwl_pcie_ctxt_info_free_paging(trans);
-	iwl_pcie_ctxt_info_free(trans);
+	if (trans->cfg->device_family == IWL_DEVICE_FAMILY_22560)
+		iwl_pcie_ctxt_info_gen3_free(trans);
+	else
+		iwl_pcie_ctxt_info_free(trans);
 
 	/* Make sure (redundant) we've released our request to stay awake */
 	iwl_clear_bit(trans, CSR_GP_CNTRL,
@@ -346,7 +350,10 @@ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
 		goto out;
 	}
 
-	ret = iwl_pcie_ctxt_info_init(trans, fw);
+	if (trans->cfg->device_family == IWL_DEVICE_FAMILY_22560)
+		ret = iwl_pcie_ctxt_info_gen3_init(trans, fw);
+	else
+		ret = iwl_pcie_ctxt_info_init(trans, fw);
 	if (ret)
 		goto out;
 
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index 7229991ae70d4ff1d98ee9779b97ba60fd12087d..7d319b6863feb2e14e8702907603c3f13b43de53 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -84,6 +84,7 @@
 #include "iwl-scd.h"
 #include "iwl-agn-hw.h"
 #include "fw/error-dump.h"
+#include "fw/dbg.h"
 #include "internal.h"
 #include "iwl-fh.h"
 
@@ -203,7 +204,7 @@ static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans)
 	trans_pcie->fw_mon_size = 0;
 }
 
-static void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power)
+void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power)
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	struct page *page = NULL;
@@ -1132,21 +1133,44 @@ static struct iwl_causes_list causes_list[] = {
 	{MSIX_HW_INT_CAUSES_REG_HAP,		CSR_MSIX_HW_INT_MASK_AD, 0x2E},
 };
 
+static struct iwl_causes_list causes_list_v2[] = {
+	{MSIX_FH_INT_CAUSES_D2S_CH0_NUM,	CSR_MSIX_FH_INT_MASK_AD, 0},
+	{MSIX_FH_INT_CAUSES_D2S_CH1_NUM,	CSR_MSIX_FH_INT_MASK_AD, 0x1},
+	{MSIX_FH_INT_CAUSES_S2D,		CSR_MSIX_FH_INT_MASK_AD, 0x3},
+	{MSIX_FH_INT_CAUSES_FH_ERR,		CSR_MSIX_FH_INT_MASK_AD, 0x5},
+	{MSIX_HW_INT_CAUSES_REG_ALIVE,		CSR_MSIX_HW_INT_MASK_AD, 0x10},
+	{MSIX_HW_INT_CAUSES_REG_IPC,		CSR_MSIX_HW_INT_MASK_AD, 0x11},
+	{MSIX_HW_INT_CAUSES_REG_SW_ERR_V2,	CSR_MSIX_HW_INT_MASK_AD, 0x15},
+	{MSIX_HW_INT_CAUSES_REG_CT_KILL,	CSR_MSIX_HW_INT_MASK_AD, 0x16},
+	{MSIX_HW_INT_CAUSES_REG_RF_KILL,	CSR_MSIX_HW_INT_MASK_AD, 0x17},
+	{MSIX_HW_INT_CAUSES_REG_PERIODIC,	CSR_MSIX_HW_INT_MASK_AD, 0x18},
+	{MSIX_HW_INT_CAUSES_REG_SCD,		CSR_MSIX_HW_INT_MASK_AD, 0x2A},
+	{MSIX_HW_INT_CAUSES_REG_FH_TX,		CSR_MSIX_HW_INT_MASK_AD, 0x2B},
+	{MSIX_HW_INT_CAUSES_REG_HW_ERR,		CSR_MSIX_HW_INT_MASK_AD, 0x2D},
+	{MSIX_HW_INT_CAUSES_REG_HAP,		CSR_MSIX_HW_INT_MASK_AD, 0x2E},
+};
+
 static void iwl_pcie_map_non_rx_causes(struct iwl_trans *trans)
 {
 	struct iwl_trans_pcie *trans_pcie =  IWL_TRANS_GET_PCIE_TRANS(trans);
 	int val = trans_pcie->def_irq | MSIX_NON_AUTO_CLEAR_CAUSE;
-	int i;
+	int i, arr_size =
+		(trans->cfg->device_family < IWL_DEVICE_FAMILY_22560) ?
+		ARRAY_SIZE(causes_list) : ARRAY_SIZE(causes_list_v2);
 
 	/*
 	 * Access all non RX causes and map them to the default irq.
 	 * In case we are missing at least one interrupt vector,
 	 * the first interrupt vector will serve non-RX and FBQ causes.
 	 */
-	for (i = 0; i < ARRAY_SIZE(causes_list); i++) {
-		iwl_write8(trans, CSR_MSIX_IVAR(causes_list[i].addr), val);
-		iwl_clear_bit(trans, causes_list[i].mask_reg,
-			      causes_list[i].cause_num);
+	for (i = 0; i < arr_size; i++) {
+		struct iwl_causes_list *causes =
+			(trans->cfg->device_family < IWL_DEVICE_FAMILY_22560) ?
+			causes_list : causes_list_v2;
+
+		iwl_write8(trans, CSR_MSIX_IVAR(causes[i].addr), val);
+		iwl_clear_bit(trans, causes[i].mask_reg,
+			      causes[i].cause_num);
 	}
 }
 
@@ -1539,18 +1563,6 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
 
 	iwl_pcie_enable_rx_wake(trans, true);
 
-	/*
-	 * Reconfigure IVAR table in case of MSIX or reset ict table in
-	 * MSI mode since HW reset erased it.
-	 * Also enables interrupts - none will happen as
-	 * the device doesn't know we're waking it up, only when
-	 * the opmode actually tells it after this call.
-	 */
-	iwl_pcie_conf_msix_hw(trans_pcie);
-	if (!trans_pcie->msix_enabled)
-		iwl_pcie_reset_ict(trans);
-	iwl_enable_interrupts(trans);
-
 	iwl_set_bit(trans, CSR_GP_CNTRL,
 		    BIT(trans->cfg->csr->flag_mac_access_req));
 	iwl_set_bit(trans, CSR_GP_CNTRL,
@@ -1568,6 +1580,18 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
 		return ret;
 	}
 
+	/*
+	 * Reconfigure IVAR table in case of MSIX or reset ict table in
+	 * MSI mode since HW reset erased it.
+	 * Also enables interrupts - none will happen as
+	 * the device doesn't know we're waking it up, only when
+	 * the opmode actually tells it after this call.
+	 */
+	iwl_pcie_conf_msix_hw(trans_pcie);
+	if (!trans_pcie->msix_enabled)
+		iwl_pcie_reset_ict(trans);
+	iwl_enable_interrupts(trans);
+
 	iwl_pcie_set_pwr(trans, false);
 
 	if (!reset) {
@@ -1685,29 +1709,6 @@ static void iwl_pcie_irq_set_affinity(struct iwl_trans *trans)
 	}
 }
 
-static const char *queue_name(struct device *dev,
-			      struct iwl_trans_pcie *trans_p, int i)
-{
-	if (trans_p->shared_vec_mask) {
-		int vec = trans_p->shared_vec_mask &
-			  IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0;
-
-		if (i == 0)
-			return DRV_NAME ": shared IRQ";
-
-		return devm_kasprintf(dev, GFP_KERNEL,
-				      DRV_NAME ": queue %d", i + vec);
-	}
-	if (i == 0)
-		return DRV_NAME ": default queue";
-
-	if (i == trans_p->alloc_vecs - 1)
-		return DRV_NAME ": exception";
-
-	return devm_kasprintf(dev, GFP_KERNEL,
-			      DRV_NAME  ": queue %d", i);
-}
-
 static int iwl_pcie_init_msix_handler(struct pci_dev *pdev,
 				      struct iwl_trans_pcie *trans_pcie)
 {
@@ -2236,12 +2237,28 @@ void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq)
 		jiffies_to_msecs(txq->wd_timeout),
 		txq->read_ptr, txq->write_ptr,
 		iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) &
-			(TFD_QUEUE_SIZE_MAX - 1),
+			(trans->cfg->base_params->max_tfd_queue_size - 1),
 		iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)) &
-			(TFD_QUEUE_SIZE_MAX - 1),
+			(trans->cfg->base_params->max_tfd_queue_size - 1),
 		iwl_read_direct32(trans, FH_TX_TRB_REG(fifo)));
 }
 
+static int iwl_trans_pcie_rxq_dma_data(struct iwl_trans *trans, int queue,
+				       struct iwl_trans_rxq_dma_data *data)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+	if (queue >= trans->num_rx_queues || !trans_pcie->rxq)
+		return -EINVAL;
+
+	data->fr_bd_cb = trans_pcie->rxq[queue].bd_dma;
+	data->urbd_stts_wrptr = trans_pcie->rxq[queue].rb_stts_dma;
+	data->ur_bd_cb = trans_pcie->rxq[queue].used_bd_dma;
+	data->fr_bd_wid = 0;
+
+	return 0;
+}
+
 static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx)
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -2522,10 +2539,11 @@ static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
 		pos += scnprintf(buf + pos, bufsz - pos, "\tfree_count: %u\n",
 				 rxq->free_count);
 		if (rxq->rb_stts) {
+			u32 r =	__le16_to_cpu(iwl_get_closed_rb_stts(trans,
+								     rxq));
 			pos += scnprintf(buf + pos, bufsz - pos,
 					 "\tclosed_rb_num: %u\n",
-					 le16_to_cpu(rxq->rb_stts->closed_rb_num) &
-					 0x0FFF);
+					 r & 0x0FFF);
 		} else {
 			pos += scnprintf(buf + pos, bufsz - pos,
 					 "\tclosed_rb_num: Not Allocated\n");
@@ -2731,7 +2749,7 @@ static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans,
 
 	spin_lock(&rxq->lock);
 
-	r = le16_to_cpu(READ_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
+	r = le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) & 0x0FFF;
 
 	for (i = rxq->read, j = 0;
 	     i != r && j < allocated_rb_nums;
@@ -2934,11 +2952,12 @@ static struct iwl_trans_dump_data
 	struct iwl_txq *cmdq = trans_pcie->txq[trans_pcie->cmd_queue];
 	struct iwl_fw_error_dump_txcmd *txcmd;
 	struct iwl_trans_dump_data *dump_data;
-	u32 len, num_rbs;
+	u32 len, num_rbs = 0;
 	u32 monitor_len;
 	int i, ptr;
 	bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status) &&
-			!trans->cfg->mq_rx_supported;
+			!trans->cfg->mq_rx_supported &&
+			trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_RB);
 
 	/* transport dump header */
 	len = sizeof(*dump_data);
@@ -2990,6 +3009,10 @@ static struct iwl_trans_dump_data
 	}
 
 	if (trigger && (trigger->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY)) {
+		if (!(trans->dbg_dump_mask &
+		      BIT(IWL_FW_ERROR_DUMP_FW_MONITOR)))
+			return NULL;
+
 		dump_data = vzalloc(len);
 		if (!dump_data)
 			return NULL;
@@ -3002,22 +3025,28 @@ static struct iwl_trans_dump_data
 	}
 
 	/* CSR registers */
-	len += sizeof(*data) + IWL_CSR_TO_DUMP;
+	if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR))
+		len += sizeof(*data) + IWL_CSR_TO_DUMP;
 
 	/* FH registers */
-	if (trans->cfg->gen2)
-		len += sizeof(*data) +
-		       (FH_MEM_UPPER_BOUND_GEN2 - FH_MEM_LOWER_BOUND_GEN2);
-	else
-		len += sizeof(*data) +
-		       (FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND);
+	if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS)) {
+		if (trans->cfg->gen2)
+			len += sizeof(*data) +
+			       (FH_MEM_UPPER_BOUND_GEN2 -
+				FH_MEM_LOWER_BOUND_GEN2);
+		else
+			len += sizeof(*data) +
+			       (FH_MEM_UPPER_BOUND -
+				FH_MEM_LOWER_BOUND);
+	}
 
 	if (dump_rbs) {
 		/* Dump RBs is supported only for pre-9000 devices (1 queue) */
 		struct iwl_rxq *rxq = &trans_pcie->rxq[0];
 		/* RBs */
-		num_rbs = le16_to_cpu(READ_ONCE(rxq->rb_stts->closed_rb_num))
-				      & 0x0FFF;
+		num_rbs =
+			le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq))
+			& 0x0FFF;
 		num_rbs = (num_rbs - rxq->read) & RX_QUEUE_MASK;
 		len += num_rbs * (sizeof(*data) +
 				  sizeof(struct iwl_fw_error_dump_rb) +
@@ -3025,7 +3054,8 @@ static struct iwl_trans_dump_data
 	}
 
 	/* Paged memory for gen2 HW */
-	if (trans->cfg->gen2)
+	if (trans->cfg->gen2 &&
+	    trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING))
 		for (i = 0; i < trans_pcie->init_dram.paging_cnt; i++)
 			len += sizeof(*data) +
 			       sizeof(struct iwl_fw_error_dump_paging) +
@@ -3037,41 +3067,51 @@ static struct iwl_trans_dump_data
 
 	len = 0;
 	data = (void *)dump_data->data;
-	data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD);
-	txcmd = (void *)data->data;
-	spin_lock_bh(&cmdq->lock);
-	ptr = cmdq->write_ptr;
-	for (i = 0; i < cmdq->n_window; i++) {
-		u8 idx = iwl_pcie_get_cmd_index(cmdq, ptr);
-		u32 caplen, cmdlen;
-
-		cmdlen = iwl_trans_pcie_get_cmdlen(trans, cmdq->tfds +
-						   trans_pcie->tfd_size * ptr);
-		caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen);
-
-		if (cmdlen) {
-			len += sizeof(*txcmd) + caplen;
-			txcmd->cmdlen = cpu_to_le32(cmdlen);
-			txcmd->caplen = cpu_to_le32(caplen);
-			memcpy(txcmd->data, cmdq->entries[idx].cmd, caplen);
-			txcmd = (void *)((u8 *)txcmd->data + caplen);
+
+	if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD)) {
+		u16 tfd_size = trans_pcie->tfd_size;
+
+		data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD);
+		txcmd = (void *)data->data;
+		spin_lock_bh(&cmdq->lock);
+		ptr = cmdq->write_ptr;
+		for (i = 0; i < cmdq->n_window; i++) {
+			u8 idx = iwl_pcie_get_cmd_index(cmdq, ptr);
+			u32 caplen, cmdlen;
+
+			cmdlen = iwl_trans_pcie_get_cmdlen(trans,
+							   cmdq->tfds +
+							   tfd_size * ptr);
+			caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen);
+
+			if (cmdlen) {
+				len += sizeof(*txcmd) + caplen;
+				txcmd->cmdlen = cpu_to_le32(cmdlen);
+				txcmd->caplen = cpu_to_le32(caplen);
+				memcpy(txcmd->data, cmdq->entries[idx].cmd,
+				       caplen);
+				txcmd = (void *)((u8 *)txcmd->data + caplen);
+			}
+
+			ptr = iwl_queue_dec_wrap(trans, ptr);
 		}
+		spin_unlock_bh(&cmdq->lock);
 
-		ptr = iwl_queue_dec_wrap(ptr);
+		data->len = cpu_to_le32(len);
+		len += sizeof(*data);
+		data = iwl_fw_error_next_data(data);
 	}
-	spin_unlock_bh(&cmdq->lock);
 
-	data->len = cpu_to_le32(len);
-	len += sizeof(*data);
-	data = iwl_fw_error_next_data(data);
-
-	len += iwl_trans_pcie_dump_csr(trans, &data);
-	len += iwl_trans_pcie_fh_regs_dump(trans, &data);
+	if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR))
+		len += iwl_trans_pcie_dump_csr(trans, &data);
+	if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS))
+		len += iwl_trans_pcie_fh_regs_dump(trans, &data);
 	if (dump_rbs)
 		len += iwl_trans_pcie_dump_rbs(trans, &data, num_rbs);
 
 	/* Paged memory for gen2 HW */
-	if (trans->cfg->gen2) {
+	if (trans->cfg->gen2 &&
+	    trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING)) {
 		for (i = 0; i < trans_pcie->init_dram.paging_cnt; i++) {
 			struct iwl_fw_error_dump_paging *paging;
 			dma_addr_t addr =
@@ -3091,8 +3131,8 @@ static struct iwl_trans_dump_data
 			len += sizeof(*data) + sizeof(*paging) + page_len;
 		}
 	}
-
-	len += iwl_trans_pcie_dump_monitor(trans, &data, monitor_len);
+	if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_FW_MONITOR))
+		len += iwl_trans_pcie_dump_monitor(trans, &data, monitor_len);
 
 	dump_data->len = len;
 
@@ -3187,6 +3227,7 @@ static const struct iwl_trans_ops trans_ops_pcie_gen2 = {
 	.txq_alloc = iwl_trans_pcie_dyn_txq_alloc,
 	.txq_free = iwl_trans_pcie_dyn_txq_free,
 	.wait_txq_empty = iwl_trans_pcie_wait_txq_empty,
+	.rxq_dma_data = iwl_trans_pcie_rxq_dma_data,
 };
 
 struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
@@ -3349,14 +3390,26 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
 
 #if IS_ENABLED(CONFIG_IWLMVM)
 	trans->hw_rf_id = iwl_read32(trans, CSR_HW_RF_ID);
-	if (trans->hw_rf_id == CSR_HW_RF_ID_TYPE_HR) {
+
+	if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
+	    CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR)) {
 		u32 hw_status;
 
 		hw_status = iwl_read_prph(trans, UMAG_GEN_HW_STATUS);
-		if (hw_status & UMAG_GEN_HW_IS_FPGA)
-			trans->cfg = &iwl22000_2ax_cfg_qnj_hr_f0;
-		else
+		if (CSR_HW_RF_STEP(trans->hw_rf_id) == SILICON_B_STEP)
+			/*
+			* b step fw is the same for physical card and fpga
+			*/
+			trans->cfg = &iwl22000_2ax_cfg_qnj_hr_b0;
+		else if ((hw_status & UMAG_GEN_HW_IS_FPGA) &&
+			 CSR_HW_RF_STEP(trans->hw_rf_id) == SILICON_A_STEP) {
+			trans->cfg = &iwl22000_2ax_cfg_qnj_hr_a0_f0;
+		} else {
+			/*
+			* a step no FPGA
+			*/
 			trans->cfg = &iwl22000_2ac_cfg_hr;
+		}
 	}
 #endif
 
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
index 48890a1c825fd9ded157f57d12599601e4c8d6e7..b99f33ff912306f5638bb95f9578df88b82aadc4 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
@@ -6,6 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018        Intel Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -19,6 +20,7 @@
  * BSD LICENSE
  *
  * Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018        Intel Corporation
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -50,6 +52,7 @@
  *****************************************************************************/
 #include <linux/pm_runtime.h>
 #include <net/tso.h>
+#include <linux/tcp.h>
 
 #include "iwl-debug.h"
 #include "iwl-csr.h"
@@ -84,16 +87,20 @@ void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans)
 /*
  * iwl_pcie_txq_update_byte_tbl - Set up entry in Tx byte-count array
  */
-static void iwl_pcie_gen2_update_byte_tbl(struct iwl_txq *txq, u16 byte_cnt,
+static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie,
+					  struct iwl_txq *txq, u16 byte_cnt,
 					  int num_tbs)
 {
 	struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr;
+	struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
+	struct iwl_gen3_bc_tbl *scd_bc_tbl_gen3 = txq->bc_tbl.addr;
 	int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
 	u8 filled_tfd_size, num_fetch_chunks;
 	u16 len = byte_cnt;
 	__le16 bc_ent;
 
-	len = DIV_ROUND_UP(len, 4);
+	if (trans_pcie->bc_table_dword)
+		len = DIV_ROUND_UP(len, 4);
 
 	if (WARN_ON(len > 0xFFF || idx >= txq->n_window))
 		return;
@@ -111,7 +118,10 @@ static void iwl_pcie_gen2_update_byte_tbl(struct iwl_txq *txq, u16 byte_cnt,
 	num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
 
 	bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12));
-	scd_bc_tbl->tfd_offset[idx] = bc_ent;
+	if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+		scd_bc_tbl_gen3->tfd_offset[idx] = bc_ent;
+	else
+		scd_bc_tbl->tfd_offset[idx] = bc_ent;
 }
 
 /*
@@ -355,52 +365,89 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
 	return -EINVAL;
 }
 
-static
-struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
-					    struct iwl_txq *txq,
-					    struct iwl_device_cmd *dev_cmd,
-					    struct sk_buff *skb,
-					    struct iwl_cmd_meta *out_meta)
+static struct
+iwl_tfh_tfd *iwl_pcie_gen2_build_tx_amsdu(struct iwl_trans *trans,
+					  struct iwl_txq *txq,
+					  struct iwl_device_cmd *dev_cmd,
+					  struct sk_buff *skb,
+					  struct iwl_cmd_meta *out_meta,
+					  int hdr_len,
+					  int tx_cmd_len)
 {
-	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
 	int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
 	struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx);
 	dma_addr_t tb_phys;
-	bool amsdu;
-	int i, len, tb1_len, tb2_len, hdr_len;
+	int len;
 	void *tb1_addr;
 
-	memset(tfd, 0, sizeof(*tfd));
+	tb_phys = iwl_pcie_get_first_tb_dma(txq, idx);
 
-	amsdu = ieee80211_is_data_qos(hdr->frame_control) &&
-		(*ieee80211_get_qos_ctl(hdr) &
-		 IEEE80211_QOS_CTL_A_MSDU_PRESENT);
+	iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
+
+	/*
+	 * The second TB (tb1) points to the remainder of the TX command
+	 * and the 802.11 header - dword aligned size
+	 * (This calculation modifies the TX command, so do it before the
+	 * setup of the first TB)
+	 */
+	len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
+	      IWL_FIRST_TB_SIZE;
+
+	/* do not align A-MSDU to dword as the subframe header aligns it */
+
+	/* map the data for TB1 */
+	tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
+	tb_phys = dma_map_single(trans->dev, tb1_addr, len, DMA_TO_DEVICE);
+	if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
+		goto out_err;
+	iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, len);
+
+	if (iwl_pcie_gen2_build_amsdu(trans, skb, tfd,
+				      len + IWL_FIRST_TB_SIZE,
+				      hdr_len, dev_cmd))
+		goto out_err;
+
+	/* building the A-MSDU might have changed this data, memcpy it now */
+	memcpy(&txq->first_tb_bufs[idx], &dev_cmd->hdr, IWL_FIRST_TB_SIZE);
+	return tfd;
+
+out_err:
+	iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd);
+	return NULL;
+}
+
+static struct
+iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
+				    struct iwl_txq *txq,
+				    struct iwl_device_cmd *dev_cmd,
+				    struct sk_buff *skb,
+				    struct iwl_cmd_meta *out_meta,
+				    int hdr_len,
+				    int tx_cmd_len)
+{
+	int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
+	struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx);
+	dma_addr_t tb_phys;
+	int i, len, tb1_len, tb2_len;
+	void *tb1_addr;
 
 	tb_phys = iwl_pcie_get_first_tb_dma(txq, idx);
+
 	/* The first TB points to bi-directional DMA data */
-	if (!amsdu)
-		memcpy(&txq->first_tb_bufs[idx], &dev_cmd->hdr,
-		       IWL_FIRST_TB_SIZE);
+	memcpy(&txq->first_tb_bufs[idx], &dev_cmd->hdr, IWL_FIRST_TB_SIZE);
 
 	iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
 
-	/* there must be data left over for TB1 or this code must be changed */
-	BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) < IWL_FIRST_TB_SIZE);
-
 	/*
 	 * The second TB (tb1) points to the remainder of the TX command
 	 * and the 802.11 header - dword aligned size
 	 * (This calculation modifies the TX command, so do it before the
 	 * setup of the first TB)
 	 */
-	len = sizeof(struct iwl_tx_cmd_gen2) + sizeof(struct iwl_cmd_header) +
-	      ieee80211_hdrlen(hdr->frame_control) - IWL_FIRST_TB_SIZE;
+	len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
+	      IWL_FIRST_TB_SIZE;
 
-	/* do not align A-MSDU to dword as the subframe header aligns it */
-	if (amsdu)
-		tb1_len = len;
-	else
-		tb1_len = ALIGN(len, 4);
+	tb1_len = ALIGN(len, 4);
 
 	/* map the data for TB1 */
 	tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
@@ -409,23 +456,6 @@ struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
 		goto out_err;
 	iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb1_len);
 
-	hdr_len = ieee80211_hdrlen(hdr->frame_control);
-
-	if (amsdu) {
-		if (iwl_pcie_gen2_build_amsdu(trans, skb, tfd,
-					      tb1_len + IWL_FIRST_TB_SIZE,
-					      hdr_len, dev_cmd))
-			goto out_err;
-
-		/*
-		 * building the A-MSDU might have changed this data, so memcpy
-		 * it now
-		 */
-		memcpy(&txq->first_tb_bufs[idx], &dev_cmd->hdr,
-		       IWL_FIRST_TB_SIZE);
-		return tfd;
-	}
-
 	/* set up TFD's third entry to point to remainder of skb's head */
 	tb2_len = skb_headlen(skb) - hdr_len;
 
@@ -467,13 +497,50 @@ struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
 	return NULL;
 }
 
+static
+struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
+					    struct iwl_txq *txq,
+					    struct iwl_device_cmd *dev_cmd,
+					    struct sk_buff *skb,
+					    struct iwl_cmd_meta *out_meta)
+{
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+	int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
+	struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx);
+	int len, hdr_len;
+	bool amsdu;
+
+	/* There must be data left over for TB1 or this code must be changed */
+	BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) < IWL_FIRST_TB_SIZE);
+
+	memset(tfd, 0, sizeof(*tfd));
+
+	if (trans->cfg->device_family < IWL_DEVICE_FAMILY_22560)
+		len = sizeof(struct iwl_tx_cmd_gen2);
+	else
+		len = sizeof(struct iwl_tx_cmd_gen3);
+
+	amsdu = ieee80211_is_data_qos(hdr->frame_control) &&
+			(*ieee80211_get_qos_ctl(hdr) &
+			 IEEE80211_QOS_CTL_A_MSDU_PRESENT);
+
+	hdr_len = ieee80211_hdrlen(hdr->frame_control);
+
+	if (amsdu)
+		return iwl_pcie_gen2_build_tx_amsdu(trans, txq, dev_cmd, skb,
+						    out_meta, hdr_len, len);
+
+	return iwl_pcie_gen2_build_tx(trans, txq, dev_cmd, skb, out_meta,
+				      hdr_len, len);
+}
+
 int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
 			   struct iwl_device_cmd *dev_cmd, int txq_id)
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-	struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload;
 	struct iwl_cmd_meta *out_meta;
 	struct iwl_txq *txq = trans_pcie->txq[txq_id];
+	u16 cmd_len;
 	int idx;
 	void *tfd;
 
@@ -488,11 +555,23 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
 
 	spin_lock(&txq->lock);
 
-	if (iwl_queue_space(txq) < txq->high_mark) {
+	if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
+		struct iwl_tx_cmd_gen3 *tx_cmd_gen3 =
+			(void *)dev_cmd->payload;
+
+		cmd_len = le16_to_cpu(tx_cmd_gen3->len);
+	} else {
+		struct iwl_tx_cmd_gen2 *tx_cmd_gen2 =
+			(void *)dev_cmd->payload;
+
+		cmd_len = le16_to_cpu(tx_cmd_gen2->len);
+	}
+
+	if (iwl_queue_space(trans, txq) < txq->high_mark) {
 		iwl_stop_queue(trans, txq);
 
 		/* don't put the packet on the ring, if there is no room */
-		if (unlikely(iwl_queue_space(txq) < 3)) {
+		if (unlikely(iwl_queue_space(trans, txq) < 3)) {
 			struct iwl_device_cmd **dev_cmd_ptr;
 
 			dev_cmd_ptr = (void *)((u8 *)skb->cb +
@@ -526,7 +605,7 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
 	}
 
 	/* Set up entry for this TFD in Tx byte-count array */
-	iwl_pcie_gen2_update_byte_tbl(txq, le16_to_cpu(tx_cmd->len),
+	iwl_pcie_gen2_update_byte_tbl(trans_pcie, txq, cmd_len,
 				      iwl_pcie_gen2_get_num_tbs(trans, tfd));
 
 	/* start timer if queue currently empty */
@@ -538,7 +617,7 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
 	}
 
 	/* Tell device the write index *just past* this latest filled TFD */
-	txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr);
+	txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr);
 	iwl_pcie_gen2_txq_inc_wr_ptr(trans, txq);
 	/*
 	 * At this point the frame is "transmitted" successfully
@@ -650,7 +729,7 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
 	tfd = iwl_pcie_get_tfd(trans, txq, txq->write_ptr);
 	memset(tfd, 0, sizeof(*tfd));
 
-	if (iwl_queue_space(txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
+	if (iwl_queue_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
 		spin_unlock_bh(&txq->lock);
 
 		IWL_ERR(trans, "No space in command queue\n");
@@ -787,7 +866,7 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
 		iwl_trans_ref(trans);
 	}
 	/* Increment and update queue's write index */
-	txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr);
+	txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr);
 	iwl_pcie_gen2_txq_inc_wr_ptr(trans, txq);
 	spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
 
@@ -954,7 +1033,7 @@ void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id)
 			iwl_pcie_free_tso_page(trans_pcie, skb);
 		}
 		iwl_pcie_gen2_free_tfd(trans, txq);
-		txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr);
+		txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr);
 
 		if (txq->read_ptr == txq->write_ptr) {
 			unsigned long flags;
@@ -1062,6 +1141,9 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
 	if (!txq)
 		return -ENOMEM;
 	ret = iwl_pcie_alloc_dma_ptr(trans, &txq->bc_tbl,
+				     (trans->cfg->device_family >=
+				      IWL_DEVICE_FAMILY_22560) ?
+				     sizeof(struct iwl_gen3_bc_tbl) :
 				     sizeof(struct iwlagn_scd_bc_tbl));
 	if (ret) {
 		IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
@@ -1113,7 +1195,7 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
 
 	txq->id = qid;
 	trans_pcie->txq[qid] = txq;
-	wr_ptr &= (TFD_QUEUE_SIZE_MAX - 1);
+	wr_ptr &= (trans->cfg->base_params->max_tfd_queue_size - 1);
 
 	/* Place first TFD at index corresponding to start sequence number */
 	txq->read_ptr = wr_ptr;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index 473fe7ccb07c40fe70ebe966bd8dd5d22c2e2d9f..93f0d387688a1314a54a8f17f4c02153dadd802d 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -71,27 +71,28 @@
  *
  ***************************************************/
 
-int iwl_queue_space(const struct iwl_txq *q)
+int iwl_queue_space(struct iwl_trans *trans, const struct iwl_txq *q)
 {
 	unsigned int max;
 	unsigned int used;
 
 	/*
 	 * To avoid ambiguity between empty and completely full queues, there
-	 * should always be less than TFD_QUEUE_SIZE_MAX elements in the queue.
-	 * If q->n_window is smaller than TFD_QUEUE_SIZE_MAX, there is no need
+	 * should always be less than max_tfd_queue_size elements in the queue.
+	 * If q->n_window is smaller than max_tfd_queue_size, there is no need
 	 * to reserve any queue entries for this purpose.
 	 */
-	if (q->n_window < TFD_QUEUE_SIZE_MAX)
+	if (q->n_window < trans->cfg->base_params->max_tfd_queue_size)
 		max = q->n_window;
 	else
-		max = TFD_QUEUE_SIZE_MAX - 1;
+		max = trans->cfg->base_params->max_tfd_queue_size - 1;
 
 	/*
-	 * TFD_QUEUE_SIZE_MAX is a power of 2, so the following is equivalent to
-	 * modulo by TFD_QUEUE_SIZE_MAX and is well defined.
+	 * max_tfd_queue_size is a power of 2, so the following is equivalent to
+	 * modulo by max_tfd_queue_size and is well defined.
 	 */
-	used = (q->write_ptr - q->read_ptr) & (TFD_QUEUE_SIZE_MAX - 1);
+	used = (q->write_ptr - q->read_ptr) &
+		(trans->cfg->base_params->max_tfd_queue_size - 1);
 
 	if (WARN_ON(used > max))
 		return 0;
@@ -489,7 +490,8 @@ int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq,
 		       int slots_num, bool cmd_queue)
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-	size_t tfd_sz = trans_pcie->tfd_size * TFD_QUEUE_SIZE_MAX;
+	size_t tfd_sz = trans_pcie->tfd_size *
+		trans->cfg->base_params->max_tfd_queue_size;
 	size_t tb0_buf_sz;
 	int i;
 
@@ -555,12 +557,16 @@ int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
 		      int slots_num, bool cmd_queue)
 {
 	int ret;
+	u32 tfd_queue_max_size = trans->cfg->base_params->max_tfd_queue_size;
 
 	txq->need_update = false;
 
-	/* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
+	/* max_tfd_queue_size must be power-of-two size, otherwise
 	 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
-	BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
+	if (WARN_ONCE(tfd_queue_max_size & (tfd_queue_max_size - 1),
+		      "Max tfd queue size must be a power of two, but is %d",
+		      tfd_queue_max_size))
+		return -EINVAL;
 
 	/* Initialize queue's high/low-water marks, and head/tail indexes */
 	ret = iwl_queue_init(txq, slots_num);
@@ -637,7 +643,7 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
 			iwl_pcie_free_tso_page(trans_pcie, skb);
 		}
 		iwl_pcie_txq_free_tfd(trans, txq);
-		txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr);
+		txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr);
 
 		if (txq->read_ptr == txq->write_ptr) {
 			unsigned long flags;
@@ -696,7 +702,8 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
 	/* De-alloc circular buffer of TFDs */
 	if (txq->tfds) {
 		dma_free_coherent(dev,
-				  trans_pcie->tfd_size * TFD_QUEUE_SIZE_MAX,
+				  trans_pcie->tfd_size *
+				  trans->cfg->base_params->max_tfd_queue_size,
 				  txq->tfds, txq->dma_addr);
 		txq->dma_addr = 0;
 		txq->tfds = NULL;
@@ -916,9 +923,11 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
 	int ret;
 	int txq_id, slots_num;
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	u16 bc_tbls_size = trans->cfg->base_params->num_of_queues;
 
-	u16 scd_bc_tbls_size = trans->cfg->base_params->num_of_queues *
-			sizeof(struct iwlagn_scd_bc_tbl);
+	bc_tbls_size *= (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) ?
+		sizeof(struct iwl_gen3_bc_tbl) :
+		sizeof(struct iwlagn_scd_bc_tbl);
 
 	/*It is not allowed to alloc twice, so warn when this happens.
 	 * We cannot rely on the previous allocation, so free and fail */
@@ -928,7 +937,7 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
 	}
 
 	ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->scd_bc_tbls,
-				   scd_bc_tbls_size);
+				     bc_tbls_size);
 	if (ret) {
 		IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
 		goto error;
@@ -1064,7 +1073,8 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	struct iwl_txq *txq = trans_pcie->txq[txq_id];
-	int tfd_num = ssn & (TFD_QUEUE_SIZE_MAX - 1);
+	int tfd_num = iwl_pcie_get_cmd_index(txq, ssn);
+	int read_ptr = iwl_pcie_get_cmd_index(txq, txq->read_ptr);
 	int last_to_free;
 
 	/* This function is not meant to release cmd queue*/
@@ -1079,7 +1089,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
 		goto out;
 	}
 
-	if (txq->read_ptr == tfd_num)
+	if (read_ptr == tfd_num)
 		goto out;
 
 	IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n",
@@ -1087,12 +1097,13 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
 
 	/*Since we free until index _not_ inclusive, the one before index is
 	 * the last we will free. This one must be used */
-	last_to_free = iwl_queue_dec_wrap(tfd_num);
+	last_to_free = iwl_queue_dec_wrap(trans, tfd_num);
 
 	if (!iwl_queue_used(txq, last_to_free)) {
 		IWL_ERR(trans,
 			"%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
-			__func__, txq_id, last_to_free, TFD_QUEUE_SIZE_MAX,
+			__func__, txq_id, last_to_free,
+			trans->cfg->base_params->max_tfd_queue_size,
 			txq->write_ptr, txq->read_ptr);
 		goto out;
 	}
@@ -1101,10 +1112,10 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
 		goto out;
 
 	for (;
-	     txq->read_ptr != tfd_num;
-	     txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr)) {
-		int idx = iwl_pcie_get_cmd_index(txq, txq->read_ptr);
-		struct sk_buff *skb = txq->entries[idx].skb;
+	     read_ptr != tfd_num;
+	     txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr),
+	     read_ptr = iwl_pcie_get_cmd_index(txq, txq->read_ptr)) {
+		struct sk_buff *skb = txq->entries[read_ptr].skb;
 
 		if (WARN_ON_ONCE(!skb))
 			continue;
@@ -1113,7 +1124,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
 
 		__skb_queue_tail(skbs, skb);
 
-		txq->entries[idx].skb = NULL;
+		txq->entries[read_ptr].skb = NULL;
 
 		if (!trans->cfg->use_tfh)
 			iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq);
@@ -1123,7 +1134,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
 
 	iwl_pcie_txq_progress(txq);
 
-	if (iwl_queue_space(txq) > txq->low_mark &&
+	if (iwl_queue_space(trans, txq) > txq->low_mark &&
 	    test_bit(txq_id, trans_pcie->queue_stopped)) {
 		struct sk_buff_head overflow_skbs;
 
@@ -1155,7 +1166,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
 		}
 		spin_lock_bh(&txq->lock);
 
-		if (iwl_queue_space(txq) > txq->low_mark)
+		if (iwl_queue_space(trans, txq) > txq->low_mark)
 			iwl_wake_queue(trans, txq);
 	}
 
@@ -1225,23 +1236,30 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
 	struct iwl_txq *txq = trans_pcie->txq[txq_id];
 	unsigned long flags;
 	int nfreed = 0;
+	u16 r;
 
 	lockdep_assert_held(&txq->lock);
 
-	if ((idx >= TFD_QUEUE_SIZE_MAX) || (!iwl_queue_used(txq, idx))) {
+	idx = iwl_pcie_get_cmd_index(txq, idx);
+	r = iwl_pcie_get_cmd_index(txq, txq->read_ptr);
+
+	if (idx >= trans->cfg->base_params->max_tfd_queue_size ||
+	    (!iwl_queue_used(txq, idx))) {
 		IWL_ERR(trans,
 			"%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
-			__func__, txq_id, idx, TFD_QUEUE_SIZE_MAX,
+			__func__, txq_id, idx,
+			trans->cfg->base_params->max_tfd_queue_size,
 			txq->write_ptr, txq->read_ptr);
 		return;
 	}
 
-	for (idx = iwl_queue_inc_wrap(idx); txq->read_ptr != idx;
-	     txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr)) {
+	for (idx = iwl_queue_inc_wrap(trans, idx); r != idx;
+	     r = iwl_queue_inc_wrap(trans, r)) {
+		txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr);
 
 		if (nfreed++ > 0) {
 			IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n",
-				idx, txq->write_ptr, txq->read_ptr);
+				idx, txq->write_ptr, r);
 			iwl_force_nmi(trans);
 		}
 	}
@@ -1555,7 +1573,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
 
 	spin_lock_bh(&txq->lock);
 
-	if (iwl_queue_space(txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
+	if (iwl_queue_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
 		spin_unlock_bh(&txq->lock);
 
 		IWL_ERR(trans, "No space in command queue\n");
@@ -1711,7 +1729,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
 	}
 
 	/* Increment and update queue's write index */
-	txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr);
+	txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr);
 	iwl_pcie_txq_inc_wr_ptr(trans, txq);
 
 	spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
@@ -2311,11 +2329,11 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
 
 	spin_lock(&txq->lock);
 
-	if (iwl_queue_space(txq) < txq->high_mark) {
+	if (iwl_queue_space(trans, txq) < txq->high_mark) {
 		iwl_stop_queue(trans, txq);
 
 		/* don't put the packet on the ring, if there is no room */
-		if (unlikely(iwl_queue_space(txq) < 3)) {
+		if (unlikely(iwl_queue_space(trans, txq) < 3)) {
 			struct iwl_device_cmd **dev_cmd_ptr;
 
 			dev_cmd_ptr = (void *)((u8 *)skb->cb +
@@ -2444,7 +2462,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
 	}
 
 	/* Tell device the write index *just past* this latest filled TFD */
-	txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr);
+	txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr);
 	if (!wait_write_ptr)
 		iwl_pcie_txq_inc_wr_ptr(trans, txq);
 
diff --git a/drivers/net/wireless/intersil/hostap/hostap_ap.c b/drivers/net/wireless/intersil/hostap/hostap_ap.c
index d1884b8913e7276e42be9f6a91b16e023482f05e..0094b1d2b5770568fa1993474fe96f701debcb04 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_ap.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_ap.c
@@ -66,7 +66,7 @@ static void prism2_send_mgmt(struct net_device *dev,
 #endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
 
 
-#ifndef PRISM2_NO_PROCFS_DEBUG
+#if !defined(PRISM2_NO_PROCFS_DEBUG) && defined(CONFIG_PROC_FS)
 static int ap_debug_proc_show(struct seq_file *m, void *v)
 {
 	struct ap_data *ap = PDE_DATA(file_inode(m->file));
@@ -81,8 +81,7 @@ static int ap_debug_proc_show(struct seq_file *m, void *v)
 	seq_printf(m, "tx_drop_nonassoc=%u\n", ap->tx_drop_nonassoc);
 	return 0;
 }
-#endif /* PRISM2_NO_PROCFS_DEBUG */
-
+#endif
 
 static void ap_sta_hash_add(struct ap_data *ap, struct sta_info *sta)
 {
@@ -990,7 +989,7 @@ static void prism2_send_mgmt(struct net_device *dev,
 }
 #endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
 
-
+#ifdef CONFIG_PROC_FS
 static int prism2_sta_proc_show(struct seq_file *m, void *v)
 {
 	struct sta_info *sta = m->private;
@@ -1059,6 +1058,7 @@ static int prism2_sta_proc_show(struct seq_file *m, void *v)
 
 	return 0;
 }
+#endif
 
 static void handle_add_proc_queue(struct work_struct *work)
 {
diff --git a/drivers/net/wireless/intersil/hostap/hostap_hw.c b/drivers/net/wireless/intersil/hostap/hostap_hw.c
index 2720aa39f530fc25ee30455c654335639e2d0770..ad1aa65fee7f7c4515dba38291c0503a647a3152 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_hw.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_hw.c
@@ -151,13 +151,6 @@ static int prism2_get_ram_size(local_info_t *local);
 #define HFA384X_MAGIC 0x8A32
 #endif
 
-
-static u16 hfa384x_read_reg(struct net_device *dev, u16 reg)
-{
-	return HFA384X_INW(reg);
-}
-
-
 static void hfa384x_read_regs(struct net_device *dev,
 			      struct hfa384x_regs *regs)
 {
@@ -2897,7 +2890,12 @@ static void hostap_tick_timer(struct timer_list *t)
 }
 
 
-#ifndef PRISM2_NO_PROCFS_DEBUG
+#if !defined(PRISM2_NO_PROCFS_DEBUG) && defined(CONFIG_PROC_FS)
+static u16 hfa384x_read_reg(struct net_device *dev, u16 reg)
+{
+	return HFA384X_INW(reg);
+}
+
 static int prism2_registers_proc_show(struct seq_file *m, void *v)
 {
 	local_info_t *local = m->private;
@@ -2951,8 +2949,7 @@ static int prism2_registers_proc_show(struct seq_file *m, void *v)
 
 	return 0;
 }
-#endif /* PRISM2_NO_PROCFS_DEBUG */
-
+#endif
 
 struct set_tim_data {
 	struct list_head list;
diff --git a/drivers/net/wireless/intersil/hostap/hostap_proc.c b/drivers/net/wireless/intersil/hostap/hostap_proc.c
index 5b33ccab91887b029086fd946275a69a1ac3be2f..703d74cea3c20e3cbd48849eb591df52c673ae02 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_proc.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_proc.c
@@ -11,8 +11,7 @@
 
 #define PROC_LIMIT (PAGE_SIZE - 80)
 
-
-#ifndef PRISM2_NO_PROCFS_DEBUG
+#if !defined(PRISM2_NO_PROCFS_DEBUG) && defined(CONFIG_PROC_FS)
 static int prism2_debug_proc_show(struct seq_file *m, void *v)
 {
 	local_info_t *local = m->private;
@@ -43,9 +42,9 @@ static int prism2_debug_proc_show(struct seq_file *m, void *v)
 
 	return 0;
 }
-#endif /* PRISM2_NO_PROCFS_DEBUG */
-
+#endif
 
+#ifdef CONFIG_PROC_FS
 static int prism2_stats_proc_show(struct seq_file *m, void *v)
 {
 	local_info_t *local = m->private;
@@ -82,6 +81,7 @@ static int prism2_stats_proc_show(struct seq_file *m, void *v)
 
 	return 0;
 }
+#endif
 
 static int prism2_wds_proc_show(struct seq_file *m, void *v)
 {
@@ -174,6 +174,7 @@ static const struct seq_operations prism2_bss_list_proc_seqops = {
 	.show	= prism2_bss_list_proc_show,
 };
 
+#ifdef CONFIG_PROC_FS
 static int prism2_crypt_proc_show(struct seq_file *m, void *v)
 {
 	local_info_t *local = m->private;
@@ -190,6 +191,7 @@ static int prism2_crypt_proc_show(struct seq_file *m, void *v)
 	}
 	return 0;
 }
+#endif
 
 static ssize_t prism2_pda_proc_read(struct file *file, char __user *buf,
 				    size_t count, loff_t *_pos)
diff --git a/drivers/net/wireless/marvell/mwifiex/11n.c b/drivers/net/wireless/marvell/mwifiex/11n.c
index 5d75c971004b4e480737aa0fe13627afb1e58cb7..e2addd8b878b290bbcb6656f6baeedb258400964 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n.c
@@ -696,10 +696,11 @@ void mwifiex_11n_delba(struct mwifiex_private *priv, int tid)
 				"Send delba to tid=%d, %pM\n",
 				tid, rx_reor_tbl_ptr->ta);
 			mwifiex_send_delba(priv, tid, rx_reor_tbl_ptr->ta, 0);
-			goto exit;
+			spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
+					       flags);
+			return;
 		}
 	}
-exit:
 	spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
 }
 
diff --git a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
index 7ab44cd32a9d506b9dcef96c1f81c359123356dd..8e63d14c1e1c57b4a93d430f5174c245f6a69b08 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
@@ -103,6 +103,8 @@ static int mwifiex_11n_dispatch_pkt(struct mwifiex_private *priv, void *payload)
  * There could be holes in the buffer, which are skipped by the function.
  * Since the buffer is linear, the function uses rotation to simulate
  * circular buffer.
+ *
+ * The caller must hold rx_reorder_tbl_lock spinlock.
  */
 static void
 mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
@@ -111,25 +113,21 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
 {
 	int pkt_to_send, i;
 	void *rx_tmp_ptr;
-	unsigned long flags;
 
 	pkt_to_send = (start_win > tbl->start_win) ?
 		      min((start_win - tbl->start_win), tbl->win_size) :
 		      tbl->win_size;
 
 	for (i = 0; i < pkt_to_send; ++i) {
-		spin_lock_irqsave(&priv->rx_pkt_lock, flags);
 		rx_tmp_ptr = NULL;
 		if (tbl->rx_reorder_ptr[i]) {
 			rx_tmp_ptr = tbl->rx_reorder_ptr[i];
 			tbl->rx_reorder_ptr[i] = NULL;
 		}
-		spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
 		if (rx_tmp_ptr)
 			mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr);
 	}
 
-	spin_lock_irqsave(&priv->rx_pkt_lock, flags);
 	/*
 	 * We don't have a circular buffer, hence use rotation to simulate
 	 * circular buffer
@@ -140,7 +138,6 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
 	}
 
 	tbl->start_win = start_win;
-	spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
 }
 
 /*
@@ -150,6 +147,8 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
  * The start window is adjusted automatically when a hole is located.
  * Since the buffer is linear, the function uses rotation to simulate
  * circular buffer.
+ *
+ * The caller must hold rx_reorder_tbl_lock spinlock.
  */
 static void
 mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
@@ -157,21 +156,15 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
 {
 	int i, j, xchg;
 	void *rx_tmp_ptr;
-	unsigned long flags;
 
 	for (i = 0; i < tbl->win_size; ++i) {
-		spin_lock_irqsave(&priv->rx_pkt_lock, flags);
-		if (!tbl->rx_reorder_ptr[i]) {
-			spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
+		if (!tbl->rx_reorder_ptr[i])
 			break;
-		}
 		rx_tmp_ptr = tbl->rx_reorder_ptr[i];
 		tbl->rx_reorder_ptr[i] = NULL;
-		spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
 		mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr);
 	}
 
-	spin_lock_irqsave(&priv->rx_pkt_lock, flags);
 	/*
 	 * We don't have a circular buffer, hence use rotation to simulate
 	 * circular buffer
@@ -184,7 +177,6 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
 		}
 	}
 	tbl->start_win = (tbl->start_win + i) & (MAX_TID_VALUE - 1);
-	spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
 }
 
 /*
@@ -192,6 +184,8 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
  *
  * The function stops the associated timer and dispatches all the
  * pending packets in the Rx reorder table before deletion.
+ *
+ * The caller must hold rx_reorder_tbl_lock spinlock.
  */
 static void
 mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv,
@@ -217,11 +211,7 @@ mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv,
 
 	del_timer_sync(&tbl->timer_context.timer);
 	tbl->timer_context.timer_is_set = false;
-
-	spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
 	list_del(&tbl->list);
-	spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
-
 	kfree(tbl->rx_reorder_ptr);
 	kfree(tbl);
 
@@ -234,22 +224,17 @@ mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv,
 /*
  * This function returns the pointer to an entry in Rx reordering
  * table which matches the given TA/TID pair.
+ *
+ * The caller must hold rx_reorder_tbl_lock spinlock.
  */
 struct mwifiex_rx_reorder_tbl *
 mwifiex_11n_get_rx_reorder_tbl(struct mwifiex_private *priv, int tid, u8 *ta)
 {
 	struct mwifiex_rx_reorder_tbl *tbl;
-	unsigned long flags;
 
-	spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
-	list_for_each_entry(tbl, &priv->rx_reorder_tbl_ptr, list) {
-		if (!memcmp(tbl->ta, ta, ETH_ALEN) && tbl->tid == tid) {
-			spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
-					       flags);
+	list_for_each_entry(tbl, &priv->rx_reorder_tbl_ptr, list)
+		if (!memcmp(tbl->ta, ta, ETH_ALEN) && tbl->tid == tid)
 			return tbl;
-		}
-	}
-	spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
 
 	return NULL;
 }
@@ -266,14 +251,9 @@ void mwifiex_11n_del_rx_reorder_tbl_by_ta(struct mwifiex_private *priv, u8 *ta)
 		return;
 
 	spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
-	list_for_each_entry_safe(tbl, tmp, &priv->rx_reorder_tbl_ptr, list) {
-		if (!memcmp(tbl->ta, ta, ETH_ALEN)) {
-			spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
-					       flags);
+	list_for_each_entry_safe(tbl, tmp, &priv->rx_reorder_tbl_ptr, list)
+		if (!memcmp(tbl->ta, ta, ETH_ALEN))
 			mwifiex_del_rx_reorder_entry(priv, tbl);
-			spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
-		}
-	}
 	spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
 
 	return;
@@ -282,24 +262,18 @@ void mwifiex_11n_del_rx_reorder_tbl_by_ta(struct mwifiex_private *priv, u8 *ta)
 /*
  * This function finds the last sequence number used in the packets
  * buffered in Rx reordering table.
+ *
+ * The caller must hold rx_reorder_tbl_lock spinlock.
  */
 static int
 mwifiex_11n_find_last_seq_num(struct reorder_tmr_cnxt *ctx)
 {
 	struct mwifiex_rx_reorder_tbl *rx_reorder_tbl_ptr = ctx->ptr;
-	struct mwifiex_private *priv = ctx->priv;
-	unsigned long flags;
 	int i;
 
-	spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
-	for (i = rx_reorder_tbl_ptr->win_size - 1; i >= 0; --i) {
-		if (rx_reorder_tbl_ptr->rx_reorder_ptr[i]) {
-			spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
-					       flags);
+	for (i = rx_reorder_tbl_ptr->win_size - 1; i >= 0; --i)
+		if (rx_reorder_tbl_ptr->rx_reorder_ptr[i])
 			return i;
-		}
-	}
-	spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
 
 	return -1;
 }
@@ -317,17 +291,22 @@ mwifiex_flush_data(struct timer_list *t)
 	struct reorder_tmr_cnxt *ctx =
 		from_timer(ctx, t, timer);
 	int start_win, seq_num;
+	unsigned long flags;
 
 	ctx->timer_is_set = false;
+	spin_lock_irqsave(&ctx->priv->rx_reorder_tbl_lock, flags);
 	seq_num = mwifiex_11n_find_last_seq_num(ctx);
 
-	if (seq_num < 0)
+	if (seq_num < 0) {
+		spin_unlock_irqrestore(&ctx->priv->rx_reorder_tbl_lock, flags);
 		return;
+	}
 
 	mwifiex_dbg(ctx->priv->adapter, INFO, "info: flush data %d\n", seq_num);
 	start_win = (ctx->ptr->start_win + seq_num + 1) & (MAX_TID_VALUE - 1);
 	mwifiex_11n_dispatch_pkt_until_start_win(ctx->priv, ctx->ptr,
 						 start_win);
+	spin_unlock_irqrestore(&ctx->priv->rx_reorder_tbl_lock, flags);
 }
 
 /*
@@ -354,11 +333,14 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
 	 * If we get a TID, ta pair which is already present dispatch all the
 	 * the packets and move the window size until the ssn
 	 */
+	spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
 	tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta);
 	if (tbl) {
 		mwifiex_11n_dispatch_pkt_until_start_win(priv, tbl, seq_num);
+		spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
 		return;
 	}
+	spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
 	/* if !tbl then create one */
 	new_node = kzalloc(sizeof(struct mwifiex_rx_reorder_tbl), GFP_KERNEL);
 	if (!new_node)
@@ -569,16 +551,20 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
 	int prev_start_win, start_win, end_win, win_size;
 	u16 pkt_index;
 	bool init_window_shift = false;
+	unsigned long flags;
 	int ret = 0;
 
+	spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
 	tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta);
 	if (!tbl) {
+		spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
 		if (pkt_type != PKT_TYPE_BAR)
 			mwifiex_11n_dispatch_pkt(priv, payload);
 		return ret;
 	}
 
 	if ((pkt_type == PKT_TYPE_AMSDU) && !tbl->amsdu) {
+		spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
 		mwifiex_11n_dispatch_pkt(priv, payload);
 		return ret;
 	}
@@ -665,6 +651,8 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
 	if (!tbl->timer_context.timer_is_set ||
 	    prev_start_win != tbl->start_win)
 		mwifiex_11n_rxreorder_timer_restart(tbl);
+
+	spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
 	return ret;
 }
 
@@ -693,14 +681,18 @@ mwifiex_del_ba_tbl(struct mwifiex_private *priv, int tid, u8 *peer_mac,
 		    peer_mac, tid, initiator);
 
 	if (cleanup_rx_reorder_tbl) {
+		spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
 		tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid,
 								 peer_mac);
 		if (!tbl) {
+			spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
+					       flags);
 			mwifiex_dbg(priv->adapter, EVENT,
 				    "event: TID, TA not found in table\n");
 			return;
 		}
 		mwifiex_del_rx_reorder_entry(priv, tbl);
+		spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
 	} else {
 		ptx_tbl = mwifiex_get_ba_tbl(priv, tid, peer_mac);
 		if (!ptx_tbl) {
@@ -734,6 +726,7 @@ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv,
 	int tid, win_size;
 	struct mwifiex_rx_reorder_tbl *tbl;
 	uint16_t block_ack_param_set;
+	unsigned long flags;
 
 	block_ack_param_set = le16_to_cpu(add_ba_rsp->block_ack_param_set);
 
@@ -747,17 +740,20 @@ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv,
 		mwifiex_dbg(priv->adapter, ERROR, "ADDBA RSP: failed %pM tid=%d)\n",
 			    add_ba_rsp->peer_mac_addr, tid);
 
+		spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
 		tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid,
 						     add_ba_rsp->peer_mac_addr);
 		if (tbl)
 			mwifiex_del_rx_reorder_entry(priv, tbl);
 
+		spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
 		return 0;
 	}
 
 	win_size = (block_ack_param_set & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK)
 		    >> BLOCKACKPARAM_WINSIZE_POS;
 
+	spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
 	tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid,
 					     add_ba_rsp->peer_mac_addr);
 	if (tbl) {
@@ -768,6 +764,7 @@ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv,
 		else
 			tbl->amsdu = false;
 	}
+	spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
 
 	mwifiex_dbg(priv->adapter, CMD,
 		    "cmd: ADDBA RSP: %pM tid=%d ssn=%d win_size=%d\n",
@@ -807,11 +804,8 @@ void mwifiex_11n_cleanup_reorder_tbl(struct mwifiex_private *priv)
 
 	spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
 	list_for_each_entry_safe(del_tbl_ptr, tmp_node,
-				 &priv->rx_reorder_tbl_ptr, list) {
-		spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
+				 &priv->rx_reorder_tbl_ptr, list)
 		mwifiex_del_rx_reorder_entry(priv, del_tbl_ptr);
-		spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
-	}
 	INIT_LIST_HEAD(&priv->rx_reorder_tbl_ptr);
 	spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
 
@@ -935,6 +929,7 @@ void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv,
 	int tlv_buf_left = len;
 	int ret;
 	u8 *tmp;
+	unsigned long flags;
 
 	mwifiex_dbg_dump(priv->adapter, EVT_D, "RXBA_SYNC event:",
 			 event_buf, len);
@@ -954,14 +949,18 @@ void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv,
 			    tlv_rxba->mac, tlv_rxba->tid, tlv_seq_num,
 			    tlv_bitmap_len);
 
+		spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
 		rx_reor_tbl_ptr =
 			mwifiex_11n_get_rx_reorder_tbl(priv, tlv_rxba->tid,
 						       tlv_rxba->mac);
 		if (!rx_reor_tbl_ptr) {
+			spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
+					       flags);
 			mwifiex_dbg(priv->adapter, ERROR,
 				    "Can not find rx_reorder_tbl!");
 			return;
 		}
+		spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
 
 		for (i = 0; i < tlv_bitmap_len; i++) {
 			for (j = 0 ; j < 8; j++) {
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
index c02e02c17c9cf5d2393f85c25f77d2c9900912eb..adc88433faa85a006b558c9f0e359327284c8854 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
@@ -2322,7 +2322,8 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
 	if (priv->scan_block)
 		priv->scan_block = false;
 
-	if (adapter->surprise_removed || adapter->is_cmd_timedout) {
+	if (test_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags) ||
+	    test_bit(MWIFIEX_IS_CMD_TIMEDOUT, &adapter->work_flags)) {
 		mwifiex_dbg(adapter, ERROR,
 			    "%s: Ignore connection.\t"
 			    "Card removed or FW in bad state\n",
diff --git a/drivers/net/wireless/marvell/mwifiex/cmdevt.c b/drivers/net/wireless/marvell/mwifiex/cmdevt.c
index 9cfcdf6bec52c1b24fe67001b77a369a5f7a7ad2..60db2b969e20d7d8f5b7119ed6636ccda132cdfd 100644
--- a/drivers/net/wireless/marvell/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/marvell/mwifiex/cmdevt.c
@@ -372,7 +372,7 @@ static int mwifiex_dnld_sleep_confirm_cmd(struct mwifiex_adapter *adapter)
 		adapter->ps_state = PS_STATE_SLEEP_CFM;
 
 	if (!le16_to_cpu(sleep_cfm_buf->resp_ctrl) &&
-	    (adapter->is_hs_configured &&
+	    (test_bit(MWIFIEX_IS_HS_CONFIGURED, &adapter->work_flags) &&
 	     !adapter->sleep_period.period)) {
 		adapter->pm_wakeup_card_req = true;
 		mwifiex_hs_activated_event(mwifiex_get_priv
@@ -564,25 +564,26 @@ int mwifiex_send_cmd(struct mwifiex_private *priv, u16 cmd_no,
 		return -1;
 	}
 
-	if (adapter->is_suspended) {
+	if (test_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags)) {
 		mwifiex_dbg(adapter, ERROR,
 			    "PREP_CMD: device in suspended state\n");
 		return -1;
 	}
 
-	if (adapter->hs_enabling && cmd_no != HostCmd_CMD_802_11_HS_CFG_ENH) {
+	if (test_bit(MWIFIEX_IS_HS_ENABLING, &adapter->work_flags) &&
+	    cmd_no != HostCmd_CMD_802_11_HS_CFG_ENH) {
 		mwifiex_dbg(adapter, ERROR,
 			    "PREP_CMD: host entering sleep state\n");
 		return -1;
 	}
 
-	if (adapter->surprise_removed) {
+	if (test_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags)) {
 		mwifiex_dbg(adapter, ERROR,
 			    "PREP_CMD: card is removed\n");
 		return -1;
 	}
 
-	if (adapter->is_cmd_timedout) {
+	if (test_bit(MWIFIEX_IS_CMD_TIMEDOUT, &adapter->work_flags)) {
 		mwifiex_dbg(adapter, ERROR,
 			    "PREP_CMD: FW is in bad state\n");
 		return -1;
@@ -789,7 +790,8 @@ int mwifiex_exec_next_cmd(struct mwifiex_adapter *adapter)
 	if (priv && (host_cmd->command !=
 	     cpu_to_le16(HostCmd_CMD_802_11_HS_CFG_ENH))) {
 		if (adapter->hs_activated) {
-			adapter->is_hs_configured = false;
+			clear_bit(MWIFIEX_IS_HS_CONFIGURED,
+				  &adapter->work_flags);
 			mwifiex_hs_activated_event(priv, false);
 		}
 	}
@@ -825,7 +827,7 @@ int mwifiex_process_cmdresp(struct mwifiex_adapter *adapter)
 		return -1;
 	}
 
-	adapter->is_cmd_timedout = 0;
+	clear_bit(MWIFIEX_IS_CMD_TIMEDOUT, &adapter->work_flags);
 
 	resp = (struct host_cmd_ds_command *) adapter->curr_cmd->resp_skb->data;
 	if (adapter->curr_cmd->cmd_flag & CMD_F_HOSTCMD) {
@@ -927,7 +929,7 @@ mwifiex_cmd_timeout_func(struct timer_list *t)
 	struct mwifiex_adapter *adapter = from_timer(adapter, t, cmd_timer);
 	struct cmd_ctrl_node *cmd_node;
 
-	adapter->is_cmd_timedout = 1;
+	set_bit(MWIFIEX_IS_CMD_TIMEDOUT, &adapter->work_flags);
 	if (!adapter->curr_cmd) {
 		mwifiex_dbg(adapter, ERROR,
 			    "cmd: empty curr_cmd\n");
@@ -953,7 +955,8 @@ mwifiex_cmd_timeout_func(struct timer_list *t)
 
 		mwifiex_dbg(adapter, MSG,
 			    "is_cmd_timedout = %d\n",
-			    adapter->is_cmd_timedout);
+			    test_bit(MWIFIEX_IS_CMD_TIMEDOUT,
+				     &adapter->work_flags));
 		mwifiex_dbg(adapter, MSG,
 			    "num_tx_timeout = %d\n",
 			    adapter->dbg.num_tx_timeout);
@@ -1135,7 +1138,8 @@ void
 mwifiex_hs_activated_event(struct mwifiex_private *priv, u8 activated)
 {
 	if (activated) {
-		if (priv->adapter->is_hs_configured) {
+		if (test_bit(MWIFIEX_IS_HS_CONFIGURED,
+			     &priv->adapter->work_flags)) {
 			priv->adapter->hs_activated = true;
 			mwifiex_update_rxreor_flags(priv->adapter,
 						    RXREOR_FORCE_NO_DROP);
@@ -1186,11 +1190,11 @@ int mwifiex_ret_802_11_hs_cfg(struct mwifiex_private *priv,
 			    phs_cfg->params.hs_config.gap);
 	}
 	if (conditions != HS_CFG_CANCEL) {
-		adapter->is_hs_configured = true;
+		set_bit(MWIFIEX_IS_HS_CONFIGURED, &adapter->work_flags);
 		if (adapter->iface_type == MWIFIEX_USB)
 			mwifiex_hs_activated_event(priv, true);
 	} else {
-		adapter->is_hs_configured = false;
+		clear_bit(MWIFIEX_IS_HS_CONFIGURED, &adapter->work_flags);
 		if (adapter->hs_activated)
 			mwifiex_hs_activated_event(priv, false);
 	}
@@ -1212,8 +1216,8 @@ mwifiex_process_hs_config(struct mwifiex_adapter *adapter)
 
 	adapter->if_ops.wakeup(adapter);
 	adapter->hs_activated = false;
-	adapter->is_hs_configured = false;
-	adapter->is_suspended = false;
+	clear_bit(MWIFIEX_IS_HS_CONFIGURED, &adapter->work_flags);
+	clear_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags);
 	mwifiex_hs_activated_event(mwifiex_get_priv(adapter,
 						    MWIFIEX_BSS_ROLE_ANY),
 				   false);
@@ -1273,7 +1277,7 @@ mwifiex_process_sleep_confirm_resp(struct mwifiex_adapter *adapter,
 		return;
 	}
 	adapter->pm_wakeup_card_req = true;
-	if (adapter->is_hs_configured)
+	if (test_bit(MWIFIEX_IS_HS_CONFIGURED, &adapter->work_flags))
 		mwifiex_hs_activated_event(mwifiex_get_priv
 						(adapter, MWIFIEX_BSS_ROLE_ANY),
 					   true);
diff --git a/drivers/net/wireless/marvell/mwifiex/debugfs.c b/drivers/net/wireless/marvell/mwifiex/debugfs.c
index 07453932f7038ca76bc089aa04b8ef5f12a8fec8..cce70252fd96b5b57791f89f5ca902ce5b00bb0b 100644
--- a/drivers/net/wireless/marvell/mwifiex/debugfs.c
+++ b/drivers/net/wireless/marvell/mwifiex/debugfs.c
@@ -813,7 +813,7 @@ mwifiex_hscfg_write(struct file *file, const char __user *ubuf,
 			      MWIFIEX_SYNC_CMD, &hscfg);
 
 	mwifiex_enable_hs(priv->adapter);
-	priv->adapter->hs_enabling = false;
+	clear_bit(MWIFIEX_IS_HS_ENABLING, &priv->adapter->work_flags);
 	ret = count;
 done:
 	kfree(buf);
diff --git a/drivers/net/wireless/marvell/mwifiex/ie.c b/drivers/net/wireless/marvell/mwifiex/ie.c
index b10baacb51c990a47baf396fc606b4f922273a0e..75cbd609d60619c9429093083cb8b3d477c26e41 100644
--- a/drivers/net/wireless/marvell/mwifiex/ie.c
+++ b/drivers/net/wireless/marvell/mwifiex/ie.c
@@ -355,8 +355,14 @@ static int mwifiex_uap_parse_tail_ies(struct mwifiex_private *priv,
 		case WLAN_EID_HT_OPERATION:
 		case WLAN_EID_VHT_CAPABILITY:
 		case WLAN_EID_VHT_OPERATION:
-		case WLAN_EID_VENDOR_SPECIFIC:
 			break;
+		case WLAN_EID_VENDOR_SPECIFIC:
+			/* Skip only Microsoft WMM IE */
+			if (cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
+						    WLAN_OUI_TYPE_MICROSOFT_WMM,
+						    (const u8 *)hdr,
+						    hdr->len + sizeof(struct ieee_types_header)))
+				break;
 		default:
 			memcpy(gen_ie->ie_buffer + ie_len, hdr,
 			       hdr->len + sizeof(struct ieee_types_header));
diff --git a/drivers/net/wireless/marvell/mwifiex/init.c b/drivers/net/wireless/marvell/mwifiex/init.c
index d239e9248c05cfeaa056ad53cfb57a8ed693c417..673e89dff0b5435cc0712a51316102f1e923fadb 100644
--- a/drivers/net/wireless/marvell/mwifiex/init.c
+++ b/drivers/net/wireless/marvell/mwifiex/init.c
@@ -233,7 +233,7 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
 	adapter->event_received = false;
 	adapter->data_received = false;
 
-	adapter->surprise_removed = false;
+	clear_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags);
 
 	adapter->hw_status = MWIFIEX_HW_STATUS_INITIALIZING;
 
@@ -270,7 +270,7 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
 
 	adapter->curr_tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K;
 
-	adapter->is_hs_configured = false;
+	clear_bit(MWIFIEX_IS_HS_CONFIGURED, &adapter->work_flags);
 	adapter->hs_cfg.conditions = cpu_to_le32(HS_CFG_COND_DEF);
 	adapter->hs_cfg.gpio = HS_CFG_GPIO_DEF;
 	adapter->hs_cfg.gap = HS_CFG_GAP_DEF;
@@ -439,7 +439,6 @@ int mwifiex_init_lock_list(struct mwifiex_adapter *adapter)
 	for (i = 0; i < adapter->priv_num; i++) {
 		if (adapter->priv[i]) {
 			priv = adapter->priv[i];
-			spin_lock_init(&priv->rx_pkt_lock);
 			spin_lock_init(&priv->wmm.ra_list_spinlock);
 			spin_lock_init(&priv->curr_bcn_buf_lock);
 			spin_lock_init(&priv->sta_list_spinlock);
diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c
index fa3e8ddfe9a93f78382aaa5031fcd15a29b7be09..20cee5c397fb6f4d72dc6378401ee9bcf2af6708 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.c
+++ b/drivers/net/wireless/marvell/mwifiex/main.c
@@ -404,7 +404,8 @@ int mwifiex_main_process(struct mwifiex_adapter *adapter)
 		    !skb_queue_empty(&adapter->tx_data_q)) {
 			mwifiex_process_tx_queue(adapter);
 			if (adapter->hs_activated) {
-				adapter->is_hs_configured = false;
+				clear_bit(MWIFIEX_IS_HS_CONFIGURED,
+					  &adapter->work_flags);
 				mwifiex_hs_activated_event
 					(mwifiex_get_priv
 					(adapter, MWIFIEX_BSS_ROLE_ANY),
@@ -420,7 +421,8 @@ int mwifiex_main_process(struct mwifiex_adapter *adapter)
 			(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA))) {
 			mwifiex_process_bypass_tx(adapter);
 			if (adapter->hs_activated) {
-				adapter->is_hs_configured = false;
+				clear_bit(MWIFIEX_IS_HS_CONFIGURED,
+					  &adapter->work_flags);
 				mwifiex_hs_activated_event
 					(mwifiex_get_priv
 					 (adapter, MWIFIEX_BSS_ROLE_ANY),
@@ -435,7 +437,8 @@ int mwifiex_main_process(struct mwifiex_adapter *adapter)
 			(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA))) {
 			mwifiex_wmm_process_tx(adapter);
 			if (adapter->hs_activated) {
-				adapter->is_hs_configured = false;
+				clear_bit(MWIFIEX_IS_HS_CONFIGURED,
+					  &adapter->work_flags);
 				mwifiex_hs_activated_event
 					(mwifiex_get_priv
 					 (adapter, MWIFIEX_BSS_ROLE_ANY),
@@ -647,7 +650,7 @@ static int _mwifiex_fw_dpc(const struct firmware *firmware, void *context)
 	if (adapter->if_ops.unregister_dev)
 		adapter->if_ops.unregister_dev(adapter);
 
-	adapter->surprise_removed = true;
+	set_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags);
 	mwifiex_terminate_workqueue(adapter);
 
 	if (adapter->hw_status == MWIFIEX_HW_STATUS_READY) {
@@ -870,7 +873,7 @@ mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
 		    "data: %lu BSS(%d-%d): Data <= kernel\n",
 		    jiffies, priv->bss_type, priv->bss_num);
 
-	if (priv->adapter->surprise_removed) {
+	if (test_bit(MWIFIEX_SURPRISE_REMOVED, &priv->adapter->work_flags)) {
 		kfree_skb(skb);
 		priv->stats.tx_dropped++;
 		return 0;
@@ -1372,7 +1375,7 @@ static void mwifiex_rx_work_queue(struct work_struct *work)
 	struct mwifiex_adapter *adapter =
 		container_of(work, struct mwifiex_adapter, rx_work);
 
-	if (adapter->surprise_removed)
+	if (test_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags))
 		return;
 	mwifiex_process_rx(adapter);
 }
@@ -1388,7 +1391,7 @@ static void mwifiex_main_work_queue(struct work_struct *work)
 	struct mwifiex_adapter *adapter =
 		container_of(work, struct mwifiex_adapter, main_work);
 
-	if (adapter->surprise_removed)
+	if (test_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags))
 		return;
 	mwifiex_main_process(adapter);
 }
@@ -1405,7 +1408,7 @@ static void mwifiex_uninit_sw(struct mwifiex_adapter *adapter)
 	if (adapter->if_ops.disable_int)
 		adapter->if_ops.disable_int(adapter);
 
-	adapter->surprise_removed = true;
+	set_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags);
 	mwifiex_terminate_workqueue(adapter);
 	adapter->int_status = 0;
 
@@ -1493,11 +1496,11 @@ mwifiex_reinit_sw(struct mwifiex_adapter *adapter)
 		adapter->if_ops.up_dev(adapter);
 
 	adapter->hw_status = MWIFIEX_HW_STATUS_INITIALIZING;
-	adapter->surprise_removed = false;
+	clear_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags);
 	init_waitqueue_head(&adapter->init_wait_q);
-	adapter->is_suspended = false;
+	clear_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags);
 	adapter->hs_activated = false;
-	adapter->is_cmd_timedout = 0;
+	clear_bit(MWIFIEX_IS_CMD_TIMEDOUT, &adapter->work_flags);
 	init_waitqueue_head(&adapter->hs_activate_wait_q);
 	init_waitqueue_head(&adapter->cmd_wait_q.wait);
 	adapter->cmd_wait_q.status = 0;
@@ -1552,7 +1555,7 @@ mwifiex_reinit_sw(struct mwifiex_adapter *adapter)
 		adapter->if_ops.unregister_dev(adapter);
 
 err_kmalloc:
-	adapter->surprise_removed = true;
+	set_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags);
 	mwifiex_terminate_workqueue(adapter);
 	if (adapter->hw_status == MWIFIEX_HW_STATUS_READY) {
 		mwifiex_dbg(adapter, ERROR,
@@ -1649,9 +1652,9 @@ mwifiex_add_card(void *card, struct completion *fw_done,
 	adapter->fw_done = fw_done;
 
 	adapter->hw_status = MWIFIEX_HW_STATUS_INITIALIZING;
-	adapter->surprise_removed = false;
+	clear_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags);
 	init_waitqueue_head(&adapter->init_wait_q);
-	adapter->is_suspended = false;
+	clear_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags);
 	adapter->hs_activated = false;
 	init_waitqueue_head(&adapter->hs_activate_wait_q);
 	init_waitqueue_head(&adapter->cmd_wait_q.wait);
@@ -1699,7 +1702,7 @@ mwifiex_add_card(void *card, struct completion *fw_done,
 	if (adapter->if_ops.unregister_dev)
 		adapter->if_ops.unregister_dev(adapter);
 err_registerdev:
-	adapter->surprise_removed = true;
+	set_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags);
 	mwifiex_terminate_workqueue(adapter);
 	if (adapter->hw_status == MWIFIEX_HW_STATUS_READY) {
 		pr_debug("info: %s: shutdown mwifiex\n", __func__);
diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h
index 69ac0a22c28c4c8ca99ad15e40a18994401645dc..b025ba164412813f6b675b9f096969898c940af2 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.h
+++ b/drivers/net/wireless/marvell/mwifiex/main.h
@@ -517,6 +517,14 @@ enum mwifiex_iface_work_flags {
 	MWIFIEX_IFACE_WORK_CARD_RESET,
 };
 
+enum mwifiex_adapter_work_flags {
+	MWIFIEX_SURPRISE_REMOVED,
+	MWIFIEX_IS_CMD_TIMEDOUT,
+	MWIFIEX_IS_SUSPENDED,
+	MWIFIEX_IS_HS_CONFIGURED,
+	MWIFIEX_IS_HS_ENABLING,
+};
+
 struct mwifiex_band_config {
 	u8 chan_band:2;
 	u8 chan_width:2;
@@ -616,9 +624,6 @@ struct mwifiex_private {
 	struct list_head rx_reorder_tbl_ptr;
 	/* spin lock for rx_reorder_tbl_ptr queue */
 	spinlock_t rx_reorder_tbl_lock;
-	/* spin lock for Rx packets */
-	spinlock_t rx_pkt_lock;
-
 #define MWIFIEX_ASSOC_RSP_BUF_SIZE  500
 	u8 assoc_rsp_buf[MWIFIEX_ASSOC_RSP_BUF_SIZE];
 	u32 assoc_rsp_size;
@@ -875,7 +880,7 @@ struct mwifiex_adapter {
 	struct device *dev;
 	struct wiphy *wiphy;
 	u8 perm_addr[ETH_ALEN];
-	bool surprise_removed;
+	unsigned long work_flags;
 	u32 fw_release_number;
 	u8 intf_hdr_len;
 	u16 init_wait_q_woken;
@@ -929,7 +934,6 @@ struct mwifiex_adapter {
 	struct cmd_ctrl_node *curr_cmd;
 	/* spin lock for command */
 	spinlock_t mwifiex_cmd_lock;
-	u8 is_cmd_timedout;
 	u16 last_init_cmd;
 	struct timer_list cmd_timer;
 	struct list_head cmd_free_q;
@@ -979,13 +983,10 @@ struct mwifiex_adapter {
 	u16 pps_uapsd_mode;
 	u32 pm_wakeup_fw_try;
 	struct timer_list wakeup_timer;
-	u8 is_hs_configured;
 	struct mwifiex_hs_config_param hs_cfg;
 	u8 hs_activated;
 	u16 hs_activate_wait_q_woken;
 	wait_queue_head_t hs_activate_wait_q;
-	bool is_suspended;
-	bool hs_enabling;
 	u8 event_body[MAX_EVENT_SIZE];
 	u32 hw_dot_11n_dev_cap;
 	u8 hw_dev_mcs_support;
diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
index 0c42b7296ddd15d89d58f8e1a0470447f4c1e3d0..3fe81b2a929ac13797d976015adff288683f3566 100644
--- a/drivers/net/wireless/marvell/mwifiex/pcie.c
+++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
@@ -170,7 +170,7 @@ static int mwifiex_pcie_suspend(struct device *dev)
 	if (!mwifiex_enable_hs(adapter)) {
 		mwifiex_dbg(adapter, ERROR,
 			    "cmd: failed to suspend\n");
-		adapter->hs_enabling = false;
+		clear_bit(MWIFIEX_IS_HS_ENABLING, &adapter->work_flags);
 		mwifiex_disable_wake(adapter);
 		return -EFAULT;
 	}
@@ -178,8 +178,8 @@ static int mwifiex_pcie_suspend(struct device *dev)
 	flush_workqueue(adapter->workqueue);
 
 	/* Indicate device suspended */
-	adapter->is_suspended = true;
-	adapter->hs_enabling = false;
+	set_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags);
+	clear_bit(MWIFIEX_IS_HS_ENABLING, &adapter->work_flags);
 
 	return 0;
 }
@@ -207,13 +207,13 @@ static int mwifiex_pcie_resume(struct device *dev)
 
 	adapter = card->adapter;
 
-	if (!adapter->is_suspended) {
+	if (!test_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags)) {
 		mwifiex_dbg(adapter, WARN,
 			    "Device already resumed\n");
 		return 0;
 	}
 
-	adapter->is_suspended = false;
+	clear_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags);
 
 	mwifiex_cancel_hs(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA),
 			  MWIFIEX_ASYNC_CMD);
@@ -2430,7 +2430,7 @@ static irqreturn_t mwifiex_pcie_interrupt(int irq, void *context)
 	}
 	adapter = card->adapter;
 
-	if (adapter->surprise_removed)
+	if (test_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags))
 		goto exit;
 
 	if (card->msix_enable)
diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c
index 895b806cdb03314b3996bff1c06f826fc151f855..8e483b0bc3b173e218057adf4952be3cc38ce79d 100644
--- a/drivers/net/wireless/marvell/mwifiex/scan.c
+++ b/drivers/net/wireless/marvell/mwifiex/scan.c
@@ -1495,7 +1495,8 @@ int mwifiex_scan_networks(struct mwifiex_private *priv,
 		return -EBUSY;
 	}
 
-	if (adapter->surprise_removed || adapter->is_cmd_timedout) {
+	if (test_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags) ||
+	    test_bit(MWIFIEX_IS_CMD_TIMEDOUT, &adapter->work_flags)) {
 		mwifiex_dbg(adapter, ERROR,
 			    "Ignore scan. Card removed or firmware in bad state\n");
 		return -EFAULT;
diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c
index dfdcbc4f141af6dbbcfe144965e1642a7916fdfb..d49fbd58afa7fefa7a740a5efe98893fa82edb22 100644
--- a/drivers/net/wireless/marvell/mwifiex/sdio.c
+++ b/drivers/net/wireless/marvell/mwifiex/sdio.c
@@ -181,13 +181,13 @@ static int mwifiex_sdio_resume(struct device *dev)
 
 	adapter = card->adapter;
 
-	if (!adapter->is_suspended) {
+	if (test_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags)) {
 		mwifiex_dbg(adapter, WARN,
 			    "device already resumed\n");
 		return 0;
 	}
 
-	adapter->is_suspended = false;
+	clear_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags);
 
 	/* Disable Host Sleep */
 	mwifiex_cancel_hs(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA),
@@ -260,7 +260,7 @@ mwifiex_write_data_sync(struct mwifiex_adapter *adapter,
 				MWIFIEX_SDIO_BLOCK_SIZE) : pkt_len;
 	u32 ioport = (port & MWIFIEX_SDIO_IO_PORT_MASK);
 
-	if (adapter->is_suspended) {
+	if (test_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags)) {
 		mwifiex_dbg(adapter, ERROR,
 			    "%s: not allowed while suspended\n", __func__);
 		return -1;
@@ -450,7 +450,7 @@ static int mwifiex_sdio_suspend(struct device *dev)
 	if (!mwifiex_enable_hs(adapter)) {
 		mwifiex_dbg(adapter, ERROR,
 			    "cmd: failed to suspend\n");
-		adapter->hs_enabling = false;
+		clear_bit(MWIFIEX_IS_HS_ENABLING, &adapter->work_flags);
 		mwifiex_disable_wake(adapter);
 		return -EFAULT;
 	}
@@ -460,8 +460,8 @@ static int mwifiex_sdio_suspend(struct device *dev)
 	ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
 
 	/* Indicate device suspended */
-	adapter->is_suspended = true;
-	adapter->hs_enabling = false;
+	set_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags);
+	clear_bit(MWIFIEX_IS_HS_ENABLING, &adapter->work_flags);
 
 	return ret;
 }
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_event.c b/drivers/net/wireless/marvell/mwifiex/sta_event.c
index 03a6492662cad6a2311f5a13d21f312f45646141..a327fc5b36e3e2d7971e3409f99b9b2c205a073c 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_event.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_event.c
@@ -224,7 +224,8 @@ void mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code,
 	adapter->tx_lock_flag = false;
 	adapter->pps_uapsd_mode = false;
 
-	if (adapter->is_cmd_timedout && adapter->curr_cmd)
+	if (test_bit(MWIFIEX_IS_CMD_TIMEDOUT, &adapter->work_flags) &&
+	    adapter->curr_cmd)
 		return;
 	priv->media_connected = false;
 	mwifiex_dbg(adapter, MSG,
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
index 5414b755cf8225829e86b0bb26390e7635fadf73..b454b5f855034be5667985b4037aeb86ff68dea8 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
@@ -419,7 +419,8 @@ int mwifiex_set_hs_params(struct mwifiex_private *priv, u16 action,
 		}
 		if (hs_cfg->is_invoke_hostcmd) {
 			if (hs_cfg->conditions == HS_CFG_CANCEL) {
-				if (!adapter->is_hs_configured)
+				if (!test_bit(MWIFIEX_IS_HS_CONFIGURED,
+					      &adapter->work_flags))
 					/* Already cancelled */
 					break;
 				/* Save previous condition */
@@ -535,7 +536,7 @@ int mwifiex_enable_hs(struct mwifiex_adapter *adapter)
 	memset(&hscfg, 0, sizeof(hscfg));
 	hscfg.is_invoke_hostcmd = true;
 
-	adapter->hs_enabling = true;
+	set_bit(MWIFIEX_IS_HS_ENABLING, &adapter->work_flags);
 	mwifiex_cancel_all_pending_cmd(adapter);
 
 	if (mwifiex_set_hs_params(mwifiex_get_priv(adapter,
@@ -601,7 +602,8 @@ int mwifiex_get_bss_info(struct mwifiex_private *priv,
 	else
 		info->wep_status = false;
 
-	info->is_hs_configured = adapter->is_hs_configured;
+	info->is_hs_configured = test_bit(MWIFIEX_IS_HS_CONFIGURED,
+					  &adapter->work_flags);
 	info->is_deep_sleep = adapter->is_deep_sleep;
 
 	return 0;
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_tx.c b/drivers/net/wireless/marvell/mwifiex/sta_tx.c
index 620f8650a74200290a7fd98561b3f0b6ec5afc4a..37c24b95e64226bad12984a34921f3301b6f9a8c 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_tx.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_tx.c
@@ -143,7 +143,7 @@ int mwifiex_send_null_packet(struct mwifiex_private *priv, u8 flags)
 	int ret;
 	struct mwifiex_txinfo *tx_info = NULL;
 
-	if (adapter->surprise_removed)
+	if (test_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags))
 		return -1;
 
 	if (!priv->media_connected)
diff --git a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
index 5ce85d5727e4b882ebc37372f03bb49003d1a0c9..a83c5afc256abcb9f3eca164dd84ce323e3b0eaf 100644
--- a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
+++ b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
@@ -421,12 +421,15 @@ int mwifiex_process_uap_rx_packet(struct mwifiex_private *priv,
 		spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
 	}
 
+	spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
 	if (!priv->ap_11n_enabled ||
 	    (!mwifiex_11n_get_rx_reorder_tbl(priv, uap_rx_pd->priority, ta) &&
 	    (le16_to_cpu(uap_rx_pd->rx_pkt_type) != PKT_TYPE_AMSDU))) {
 		ret = mwifiex_handle_uap_rx_forward(priv, skb);
+		spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
 		return ret;
 	}
+	spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
 
 	/* Reorder and send to kernel */
 	pkt_type = (u8)le16_to_cpu(uap_rx_pd->rx_pkt_type);
diff --git a/drivers/net/wireless/marvell/mwifiex/usb.c b/drivers/net/wireless/marvell/mwifiex/usb.c
index 88f4c89f89ba85f5ff64085f8f29abf20722d5ea..433c6a16870b6b5ef92b782dd13a3cf95f7a520e 100644
--- a/drivers/net/wireless/marvell/mwifiex/usb.c
+++ b/drivers/net/wireless/marvell/mwifiex/usb.c
@@ -181,7 +181,8 @@ static void mwifiex_usb_rx_complete(struct urb *urb)
 		atomic_dec(&card->rx_data_urb_pending);
 
 	if (recv_length) {
-		if (urb->status || (adapter->surprise_removed)) {
+		if (urb->status ||
+		    test_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags)) {
 			mwifiex_dbg(adapter, ERROR,
 				    "URB status is failed: %d\n", urb->status);
 			/* Do not free skb in case of command ep */
@@ -218,10 +219,10 @@ static void mwifiex_usb_rx_complete(struct urb *urb)
 				dev_kfree_skb_any(skb);
 		}
 	} else if (urb->status) {
-		if (!adapter->is_suspended) {
+		if (!test_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags)) {
 			mwifiex_dbg(adapter, FATAL,
 				    "Card is removed: %d\n", urb->status);
-			adapter->surprise_removed = true;
+			set_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags);
 		}
 		dev_kfree_skb_any(skb);
 		return;
@@ -529,7 +530,7 @@ static int mwifiex_usb_suspend(struct usb_interface *intf, pm_message_t message)
 		return 0;
 	}
 
-	if (unlikely(adapter->is_suspended))
+	if (unlikely(test_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags)))
 		mwifiex_dbg(adapter, WARN,
 			    "Device already suspended\n");
 
@@ -537,19 +538,19 @@ static int mwifiex_usb_suspend(struct usb_interface *intf, pm_message_t message)
 	if (!mwifiex_enable_hs(adapter)) {
 		mwifiex_dbg(adapter, ERROR,
 			    "cmd: failed to suspend\n");
-		adapter->hs_enabling = false;
+		clear_bit(MWIFIEX_IS_HS_ENABLING, &adapter->work_flags);
 		return -EFAULT;
 	}
 
 
-	/* 'is_suspended' flag indicates device is suspended.
+	/* 'MWIFIEX_IS_SUSPENDED' bit indicates device is suspended.
 	 * It must be set here before the usb_kill_urb() calls. Reason
 	 * is in the complete handlers, urb->status(= -ENOENT) and
 	 * this flag is used in combination to distinguish between a
 	 * 'suspended' state and a 'disconnect' one.
 	 */
-	adapter->is_suspended = true;
-	adapter->hs_enabling = false;
+	set_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags);
+	clear_bit(MWIFIEX_IS_HS_ENABLING, &adapter->work_flags);
 
 	if (atomic_read(&card->rx_cmd_urb_pending) && card->rx_cmd.urb)
 		usb_kill_urb(card->rx_cmd.urb);
@@ -593,7 +594,7 @@ static int mwifiex_usb_resume(struct usb_interface *intf)
 	}
 	adapter = card->adapter;
 
-	if (unlikely(!adapter->is_suspended)) {
+	if (unlikely(!test_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags))) {
 		mwifiex_dbg(adapter, WARN,
 			    "Device already resumed\n");
 		return 0;
@@ -602,7 +603,7 @@ static int mwifiex_usb_resume(struct usb_interface *intf)
 	/* Indicate device resumed. The netdev queue will be resumed only
 	 * after the urbs have been re-submitted
 	 */
-	adapter->is_suspended = false;
+	clear_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags);
 
 	if (!atomic_read(&card->rx_data_urb_pending))
 		for (i = 0; i < MWIFIEX_RX_DATA_URB; i++)
@@ -1158,13 +1159,13 @@ static int mwifiex_usb_host_to_card(struct mwifiex_adapter *adapter, u8 ep,
 	unsigned long flags;
 	int idx, ret;
 
-	if (adapter->is_suspended) {
+	if (test_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags)) {
 		mwifiex_dbg(adapter, ERROR,
 			    "%s: not allowed while suspended\n", __func__);
 		return -1;
 	}
 
-	if (adapter->surprise_removed) {
+	if (test_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags)) {
 		mwifiex_dbg(adapter, ERROR, "%s: device removed\n", __func__);
 		return -1;
 	}
diff --git a/drivers/net/wireless/marvell/mwifiex/util.c b/drivers/net/wireless/marvell/mwifiex/util.c
index 6dd212898117697f53fad22b20eb831ee6de6ab0..f9b71539d33e684963ed2cdc65d14c9feee9b8c8 100644
--- a/drivers/net/wireless/marvell/mwifiex/util.c
+++ b/drivers/net/wireless/marvell/mwifiex/util.c
@@ -197,9 +197,11 @@ int mwifiex_get_debug_info(struct mwifiex_private *priv,
 		info->is_deep_sleep = adapter->is_deep_sleep;
 		info->pm_wakeup_card_req = adapter->pm_wakeup_card_req;
 		info->pm_wakeup_fw_try = adapter->pm_wakeup_fw_try;
-		info->is_hs_configured = adapter->is_hs_configured;
+		info->is_hs_configured = test_bit(MWIFIEX_IS_HS_CONFIGURED,
+						  &adapter->work_flags);
 		info->hs_activated = adapter->hs_activated;
-		info->is_cmd_timedout = adapter->is_cmd_timedout;
+		info->is_cmd_timedout = test_bit(MWIFIEX_IS_CMD_TIMEDOUT,
+						 &adapter->work_flags);
 		info->num_cmd_host_to_card_failure
 				= adapter->dbg.num_cmd_host_to_card_failure;
 		info->num_cmd_sleep_cfm_host_to_card_failure
diff --git a/drivers/net/wireless/marvell/mwifiex/wmm.c b/drivers/net/wireless/marvell/mwifiex/wmm.c
index 936a0a841af884f441fa7f58f96f8797a1495fe0..407b9932ca4d7a2295de0f8216b053690dcc746f 100644
--- a/drivers/net/wireless/marvell/mwifiex/wmm.c
+++ b/drivers/net/wireless/marvell/mwifiex/wmm.c
@@ -599,7 +599,7 @@ mwifiex_clean_txrx(struct mwifiex_private *priv)
 	memcpy(tos_to_tid, ac_to_tid, sizeof(tos_to_tid));
 
 	if (priv->adapter->if_ops.clean_pcie_ring &&
-	    !priv->adapter->surprise_removed)
+	    !test_bit(MWIFIEX_SURPRISE_REMOVED, &priv->adapter->work_flags))
 		priv->adapter->if_ops.clean_pcie_ring(priv->adapter);
 	spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
 
diff --git a/drivers/net/wireless/mediatek/mt76/Kconfig b/drivers/net/wireless/mediatek/mt76/Kconfig
index fc05d79c80d08ff2f46a36e181340f8700d7e9d5..850611ad347a532612d72d4d300b67379bdb1218 100644
--- a/drivers/net/wireless/mediatek/mt76/Kconfig
+++ b/drivers/net/wireless/mediatek/mt76/Kconfig
@@ -1,10 +1,36 @@
 config MT76_CORE
 	tristate
 
+config MT76_USB
+	tristate
+	depends on MT76_CORE
+
+config MT76x2_COMMON
+	tristate
+	depends on MT76_CORE
+
+config MT76x0U
+	tristate "MediaTek MT76x0U (USB) support"
+	depends on MAC80211
+	depends on USB
+	help
+	  This adds support for MT7610U-based wireless USB dongles.
+
 config MT76x2E
 	tristate "MediaTek MT76x2E (PCIe) support"
 	select MT76_CORE
+	select MT76x2_COMMON
 	depends on MAC80211
 	depends on PCI
 	---help---
 	  This adds support for MT7612/MT7602/MT7662-based wireless PCIe devices.
+
+config MT76x2U
+	tristate "MediaTek MT76x2U (USB) support"
+	select MT76_CORE
+	select MT76_USB
+	select MT76x2_COMMON
+	depends on MAC80211
+	depends on USB
+	help
+	  This adds support for MT7612U-based wireless USB dongles.
diff --git a/drivers/net/wireless/mediatek/mt76/Makefile b/drivers/net/wireless/mediatek/mt76/Makefile
index a0156bc01dea63dac3819dc686ad9fb88a8c91d2..158d10d2716c4e0afd3d3926445083d6377680d0 100644
--- a/drivers/net/wireless/mediatek/mt76/Makefile
+++ b/drivers/net/wireless/mediatek/mt76/Makefile
@@ -1,15 +1,31 @@
 obj-$(CONFIG_MT76_CORE) += mt76.o
+obj-$(CONFIG_MT76_USB) += mt76-usb.o
+obj-$(CONFIG_MT76x0U) += mt76x0/
+obj-$(CONFIG_MT76x2_COMMON) += mt76x2-common.o
 obj-$(CONFIG_MT76x2E) += mt76x2e.o
+obj-$(CONFIG_MT76x2U) += mt76x2u.o
 
 mt76-y := \
 	mmio.o util.o trace.o dma.o mac80211.o debugfs.o eeprom.o tx.o agg-rx.o
 
+mt76-usb-y := usb.o usb_trace.o usb_mcu.o
+
 CFLAGS_trace.o := -I$(src)
+CFLAGS_usb_trace.o := -I$(src)
+
+mt76x2-common-y := \
+	mt76x2_eeprom.o mt76x2_tx_common.o mt76x2_mac_common.o \
+	mt76x2_init_common.o mt76x2_common.o mt76x2_phy_common.o \
+	mt76x2_debugfs.o
 
 mt76x2e-y := \
 	mt76x2_pci.o mt76x2_dma.o \
-	mt76x2_main.o mt76x2_init.o mt76x2_debugfs.o mt76x2_tx.o \
-	mt76x2_core.o mt76x2_mac.o mt76x2_eeprom.o mt76x2_mcu.o mt76x2_phy.o \
+	mt76x2_main.o mt76x2_init.o mt76x2_tx.o \
+	mt76x2_core.o mt76x2_mac.o mt76x2_mcu.o mt76x2_phy.o \
 	mt76x2_dfs.o mt76x2_trace.o
 
+mt76x2u-y := \
+	mt76x2_usb.o mt76x2u_init.o mt76x2u_main.o mt76x2u_mac.o \
+	mt76x2u_mcu.o mt76x2u_phy.o mt76x2u_core.o
+
 CFLAGS_mt76x2_trace.o := -I$(src)
diff --git a/drivers/net/wireless/mediatek/mt76/agg-rx.c b/drivers/net/wireless/mediatek/mt76/agg-rx.c
index 1e8cdce919d9a41122adc6774a6cbe2340a3ed03..73c8b2805c978cb688e73bbd72156078eb98a492 100644
--- a/drivers/net/wireless/mediatek/mt76/agg-rx.c
+++ b/drivers/net/wireless/mediatek/mt76/agg-rx.c
@@ -113,7 +113,7 @@ mt76_rx_aggr_reorder_work(struct work_struct *work)
 	if (nframes)
 		ieee80211_queue_delayed_work(tid->dev->hw, &tid->reorder_work,
 					     REORDER_TIMEOUT);
-	mt76_rx_complete(dev, &frames, -1);
+	mt76_rx_complete(dev, &frames, NULL);
 
 	rcu_read_unlock();
 	local_bh_enable();
diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
index 3dbedcedc2c4da58c783754792e549602040119f..c51da2205b938b87d5fb4b5c4ef0c15c1a5adaa4 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/dma.c
@@ -239,6 +239,80 @@ mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q)
 	iowrite32(q->head, &q->regs->cpu_idx);
 }
 
+int mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
+			  struct sk_buff *skb, struct mt76_wcid *wcid,
+			  struct ieee80211_sta *sta)
+{
+	struct mt76_queue_entry e;
+	struct mt76_txwi_cache *t;
+	struct mt76_queue_buf buf[32];
+	struct sk_buff *iter;
+	dma_addr_t addr;
+	int len;
+	u32 tx_info = 0;
+	int n, ret;
+
+	t = mt76_get_txwi(dev);
+	if (!t) {
+		ieee80211_free_txskb(dev->hw, skb);
+		return -ENOMEM;
+	}
+
+	dma_sync_single_for_cpu(dev->dev, t->dma_addr, sizeof(t->txwi),
+				DMA_TO_DEVICE);
+	ret = dev->drv->tx_prepare_skb(dev, &t->txwi, skb, q, wcid, sta,
+				       &tx_info);
+	dma_sync_single_for_device(dev->dev, t->dma_addr, sizeof(t->txwi),
+				   DMA_TO_DEVICE);
+	if (ret < 0)
+		goto free;
+
+	len = skb->len - skb->data_len;
+	addr = dma_map_single(dev->dev, skb->data, len, DMA_TO_DEVICE);
+	if (dma_mapping_error(dev->dev, addr)) {
+		ret = -ENOMEM;
+		goto free;
+	}
+
+	n = 0;
+	buf[n].addr = t->dma_addr;
+	buf[n++].len = dev->drv->txwi_size;
+	buf[n].addr = addr;
+	buf[n++].len = len;
+
+	skb_walk_frags(skb, iter) {
+		if (n == ARRAY_SIZE(buf))
+			goto unmap;
+
+		addr = dma_map_single(dev->dev, iter->data, iter->len,
+				      DMA_TO_DEVICE);
+		if (dma_mapping_error(dev->dev, addr))
+			goto unmap;
+
+		buf[n].addr = addr;
+		buf[n++].len = iter->len;
+	}
+
+	if (q->queued + (n + 1) / 2 >= q->ndesc - 1)
+		goto unmap;
+
+	return dev->queue_ops->add_buf(dev, q, buf, n, tx_info, skb, t);
+
+unmap:
+	ret = -ENOMEM;
+	for (n--; n > 0; n--)
+		dma_unmap_single(dev->dev, buf[n].addr, buf[n].len,
+				 DMA_TO_DEVICE);
+
+free:
+	e.skb = skb;
+	e.txwi = t;
+	dev->drv->tx_complete_skb(dev, q, &e, true);
+	mt76_put_txwi(dev, t);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(mt76_dma_tx_queue_skb);
+
 static int
 mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q, bool napi)
 {
@@ -400,7 +474,7 @@ mt76_dma_rx_poll(struct napi_struct *napi, int budget)
 
 	do {
 		cur = mt76_dma_rx_process(dev, &dev->q_rx[qid], budget - done);
-		mt76_rx_poll_complete(dev, qid);
+		mt76_rx_poll_complete(dev, qid, napi);
 		done += cur;
 	} while (cur && done < budget);
 
@@ -436,6 +510,7 @@ static const struct mt76_queue_ops mt76_dma_ops = {
 	.init = mt76_dma_init,
 	.alloc = mt76_dma_alloc_queue,
 	.add_buf = mt76_dma_add_buf,
+	.tx_queue_skb = mt76_dma_tx_queue_skb,
 	.tx_cleanup = mt76_dma_tx_cleanup,
 	.rx_reset = mt76_dma_rx_reset,
 	.kick = mt76_dma_kick_queue,
diff --git a/drivers/net/wireless/mediatek/mt76/dma.h b/drivers/net/wireless/mediatek/mt76/dma.h
index 1dad39697929659db6ea4b0e8bdbc961f401afce..27248e24a19b1b82b08a99e894642e39afc91b93 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.h
+++ b/drivers/net/wireless/mediatek/mt76/dma.h
@@ -25,6 +25,39 @@
 #define MT_DMA_CTL_LAST_SEC0		BIT(30)
 #define MT_DMA_CTL_DMA_DONE		BIT(31)
 
+#define MT_TXD_INFO_LEN			GENMASK(15, 0)
+#define MT_TXD_INFO_NEXT_VLD		BIT(16)
+#define MT_TXD_INFO_TX_BURST		BIT(17)
+#define MT_TXD_INFO_80211		BIT(19)
+#define MT_TXD_INFO_TSO			BIT(20)
+#define MT_TXD_INFO_CSO			BIT(21)
+#define MT_TXD_INFO_WIV			BIT(24)
+#define MT_TXD_INFO_QSEL		GENMASK(26, 25)
+#define MT_TXD_INFO_DPORT		GENMASK(29, 27)
+#define MT_TXD_INFO_TYPE		GENMASK(31, 30)
+
+#define MT_RX_FCE_INFO_LEN		GENMASK(13, 0)
+#define MT_RX_FCE_INFO_SELF_GEN		BIT(15)
+#define MT_RX_FCE_INFO_CMD_SEQ		GENMASK(19, 16)
+#define MT_RX_FCE_INFO_EVT_TYPE		GENMASK(23, 20)
+#define MT_RX_FCE_INFO_PCIE_INTR	BIT(24)
+#define MT_RX_FCE_INFO_QSEL		GENMASK(26, 25)
+#define MT_RX_FCE_INFO_D_PORT		GENMASK(29, 27)
+#define MT_RX_FCE_INFO_TYPE		GENMASK(31, 30)
+
+/* MCU request message header  */
+#define MT_MCU_MSG_LEN			GENMASK(15, 0)
+#define MT_MCU_MSG_CMD_SEQ		GENMASK(19, 16)
+#define MT_MCU_MSG_CMD_TYPE		GENMASK(26, 20)
+#define MT_MCU_MSG_PORT			GENMASK(29, 27)
+#define MT_MCU_MSG_TYPE			GENMASK(31, 30)
+#define MT_MCU_MSG_TYPE_CMD		BIT(30)
+
+#define MT_DMA_HDR_LEN			4
+#define MT_RX_INFO_LEN			4
+#define MT_FCE_INFO_LEN			4
+#define MT_RX_RXWI_LEN			32
+
 struct mt76_desc {
 	__le32 buf0;
 	__le32 ctrl;
@@ -32,6 +65,16 @@ struct mt76_desc {
 	__le32 info;
 } __packed __aligned(4);
 
+enum dma_msg_port {
+	WLAN_PORT,
+	CPU_RX_PORT,
+	CPU_TX_PORT,
+	HOST_PORT,
+	VIRTUAL_CPU_RX_PORT,
+	VIRTUAL_CPU_TX_PORT,
+	DISCARD,
+};
+
 int mt76_dma_attach(struct mt76_dev *dev);
 void mt76_dma_cleanup(struct mt76_dev *dev);
 
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
index d62e34e7eadfe9dfce468dad87bf630c4d82c3f3..029d54bce9e81e160fc6ad06a1192552ec817eca 100644
--- a/drivers/net/wireless/mediatek/mt76/mac80211.c
+++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
@@ -303,14 +303,6 @@ int mt76_register_device(struct mt76_dev *dev, bool vht,
 	SET_IEEE80211_DEV(hw, dev->dev);
 	SET_IEEE80211_PERM_ADDR(hw, dev->macaddr);
 
-	wiphy->interface_modes =
-		BIT(NL80211_IFTYPE_STATION) |
-		BIT(NL80211_IFTYPE_AP) |
-#ifdef CONFIG_MAC80211_MESH
-		BIT(NL80211_IFTYPE_MESH_POINT) |
-#endif
-		BIT(NL80211_IFTYPE_ADHOC);
-
 	wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
 
 	wiphy->available_antennas_tx = dev->antenna_mask;
@@ -591,15 +583,11 @@ mt76_check_ps(struct mt76_dev *dev, struct sk_buff *skb)
 }
 
 void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
-		      int queue)
+		      struct napi_struct *napi)
 {
-	struct napi_struct *napi = NULL;
 	struct ieee80211_sta *sta;
 	struct sk_buff *skb;
 
-	if (queue >= 0)
-	    napi = &dev->napi[queue];
-
 	spin_lock(&dev->rx_lock);
 	while ((skb = __skb_dequeue(frames)) != NULL) {
 		if (mt76_check_ccmp_pn(skb)) {
@@ -613,7 +601,8 @@ void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
 	spin_unlock(&dev->rx_lock);
 }
 
-void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q)
+void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
+			   struct napi_struct *napi)
 {
 	struct sk_buff_head frames;
 	struct sk_buff *skb;
@@ -625,5 +614,6 @@ void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q)
 		mt76_rx_aggr_reorder(skb, &frames);
 	}
 
-	mt76_rx_complete(dev, &frames, q);
+	mt76_rx_complete(dev, &frames, napi);
 }
+EXPORT_SYMBOL_GPL(mt76_rx_poll_complete);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
index 96e9798bb8a02e549bfed971198b8fa98d064d8f..2eab358791633e80e8e0f3c4b10c8d37d0839fa9 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76.h
@@ -22,6 +22,7 @@
 #include <linux/spinlock.h>
 #include <linux/skbuff.h>
 #include <linux/leds.h>
+#include <linux/usb.h>
 #include <net/mac80211.h>
 #include "util.h"
 
@@ -30,6 +31,7 @@
 #define MT_RX_BUF_SIZE      2048
 
 struct mt76_dev;
+struct mt76_wcid;
 
 struct mt76_bus_ops {
 	u32 (*rr)(struct mt76_dev *dev, u32 offset);
@@ -62,12 +64,22 @@ struct mt76_queue_buf {
 	int len;
 };
 
+struct mt76u_buf {
+	struct mt76_dev *dev;
+	struct urb *urb;
+	size_t len;
+	bool done;
+};
+
 struct mt76_queue_entry {
 	union {
 		void *buf;
 		struct sk_buff *skb;
 	};
-	struct mt76_txwi_cache *txwi;
+	union {
+		struct mt76_txwi_cache *txwi;
+		struct mt76u_buf ubuf;
+	};
 	bool schedule;
 };
 
@@ -88,6 +100,7 @@ struct mt76_queue {
 	struct list_head swq;
 	int swq_queued;
 
+	u16 first;
 	u16 head;
 	u16 tail;
 	int ndesc;
@@ -110,6 +123,10 @@ struct mt76_queue_ops {
 		       struct mt76_queue_buf *buf, int nbufs, u32 info,
 		       struct sk_buff *skb, void *txwi);
 
+	int (*tx_queue_skb)(struct mt76_dev *dev, struct mt76_queue *q,
+			    struct sk_buff *skb, struct mt76_wcid *wcid,
+			    struct ieee80211_sta *sta);
+
 	void *(*dequeue)(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
 			 int *len, u32 *info, bool *more);
 
@@ -187,9 +204,13 @@ struct mt76_rx_tid {
 enum {
 	MT76_STATE_INITIALIZED,
 	MT76_STATE_RUNNING,
+	MT76_STATE_MCU_RUNNING,
 	MT76_SCANNING,
 	MT76_RESET,
 	MT76_OFFCHANNEL,
+	MT76_REMOVED,
+	MT76_READING_STATS,
+	MT76_MORE_STATS,
 };
 
 struct mt76_hw_cap {
@@ -210,6 +231,8 @@ struct mt76_driver_ops {
 	void (*tx_complete_skb)(struct mt76_dev *dev, struct mt76_queue *q,
 				struct mt76_queue_entry *e, bool flush);
 
+	bool (*tx_status_data)(struct mt76_dev *dev, u8 *update);
+
 	void (*rx_skb)(struct mt76_dev *dev, enum mt76_rxq_id q,
 		       struct sk_buff *skb);
 
@@ -229,6 +252,64 @@ struct mt76_sband {
 	struct mt76_channel_state *chan;
 };
 
+/* addr req mask */
+#define MT_VEND_TYPE_EEPROM	BIT(31)
+#define MT_VEND_TYPE_CFG	BIT(30)
+#define MT_VEND_TYPE_MASK	(MT_VEND_TYPE_EEPROM | MT_VEND_TYPE_CFG)
+
+#define MT_VEND_ADDR(type, n)	(MT_VEND_TYPE_##type | (n))
+enum mt_vendor_req {
+	MT_VEND_DEV_MODE =	0x1,
+	MT_VEND_WRITE =		0x2,
+	MT_VEND_MULTI_WRITE =	0x6,
+	MT_VEND_MULTI_READ =	0x7,
+	MT_VEND_READ_EEPROM =	0x9,
+	MT_VEND_WRITE_FCE =	0x42,
+	MT_VEND_WRITE_CFG =	0x46,
+	MT_VEND_READ_CFG =	0x47,
+};
+
+enum mt76u_in_ep {
+	MT_EP_IN_PKT_RX,
+	MT_EP_IN_CMD_RESP,
+	__MT_EP_IN_MAX,
+};
+
+enum mt76u_out_ep {
+	MT_EP_OUT_INBAND_CMD,
+	MT_EP_OUT_AC_BK,
+	MT_EP_OUT_AC_BE,
+	MT_EP_OUT_AC_VI,
+	MT_EP_OUT_AC_VO,
+	MT_EP_OUT_HCCA,
+	__MT_EP_OUT_MAX,
+};
+
+#define MT_SG_MAX_SIZE		8
+#define MT_NUM_TX_ENTRIES	256
+#define MT_NUM_RX_ENTRIES	128
+#define MCU_RESP_URB_SIZE	1024
+struct mt76_usb {
+	struct mutex usb_ctrl_mtx;
+	u8 data[32];
+
+	struct tasklet_struct rx_tasklet;
+	struct tasklet_struct tx_tasklet;
+	struct delayed_work stat_work;
+
+	u8 out_ep[__MT_EP_OUT_MAX];
+	u16 out_max_packet;
+	u8 in_ep[__MT_EP_IN_MAX];
+	u16 in_max_packet;
+
+	struct mt76u_mcu {
+		struct mutex mutex;
+		struct completion cmpl;
+		struct mt76u_buf res;
+		u32 msg_seq;
+	} mcu;
+};
+
 struct mt76_dev {
 	struct ieee80211_hw *hw;
 	struct cfg80211_chan_def chandef;
@@ -271,6 +352,8 @@ struct mt76_dev {
 	char led_name[32];
 	bool led_al;
 	u8 led_pin;
+
+	struct mt76_usb usb;
 };
 
 enum mt76_phy_type {
@@ -402,6 +485,14 @@ static inline int mt76_decr(int val, int size)
 	return (val - 1) & (size - 1);
 }
 
+/* Hardware uses mirrored order of queues with Q3
+ * having the highest priority
+ */
+static inline u8 q2hwq(u8 q)
+{
+	return q ^ 0x3;
+}
+
 static inline struct ieee80211_txq *
 mtxq_to_txq(struct mt76_txq *mtxq)
 {
@@ -421,9 +512,9 @@ wcid_to_sta(struct mt76_wcid *wcid)
 	return container_of(ptr, struct ieee80211_sta, drv_priv);
 }
 
-int mt76_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
-		      struct sk_buff *skb, struct mt76_wcid *wcid,
-		      struct ieee80211_sta *sta);
+int mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
+			  struct sk_buff *skb, struct mt76_wcid *wcid,
+			  struct ieee80211_sta *sta);
 
 void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb);
 void mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
@@ -454,10 +545,69 @@ void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
 
 /* internal */
 void mt76_tx_free(struct mt76_dev *dev);
+struct mt76_txwi_cache *mt76_get_txwi(struct mt76_dev *dev);
 void mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t);
 void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
-		      int queue);
-void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q);
+		      struct napi_struct *napi);
+void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
+			   struct napi_struct *napi);
 void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames);
 
+/* usb */
+static inline bool mt76u_urb_error(struct urb *urb)
+{
+	return urb->status &&
+	       urb->status != -ECONNRESET &&
+	       urb->status != -ESHUTDOWN &&
+	       urb->status != -ENOENT;
+}
+
+/* Map hardware queues to usb endpoints */
+static inline u8 q2ep(u8 qid)
+{
+	/* TODO: take management packets to queue 5 */
+	return qid + 1;
+}
+
+static inline bool mt76u_check_sg(struct mt76_dev *dev)
+{
+	struct usb_interface *intf = to_usb_interface(dev->dev);
+	struct usb_device *udev = interface_to_usbdev(intf);
+
+	return (udev->bus->sg_tablesize > 0 &&
+		(udev->bus->no_sg_constraint ||
+		 udev->speed == USB_SPEED_WIRELESS));
+}
+
+int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
+			 u8 req_type, u16 val, u16 offset,
+			 void *buf, size_t len);
+void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
+		     const u16 offset, const u32 val);
+u32 mt76u_rr(struct mt76_dev *dev, u32 addr);
+void mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val);
+int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf);
+void mt76u_deinit(struct mt76_dev *dev);
+int mt76u_buf_alloc(struct mt76_dev *dev, struct mt76u_buf *buf,
+		    int nsgs, int len, int sglen, gfp_t gfp);
+void mt76u_buf_free(struct mt76u_buf *buf);
+int mt76u_submit_buf(struct mt76_dev *dev, int dir, int index,
+		     struct mt76u_buf *buf, gfp_t gfp,
+		     usb_complete_t complete_fn, void *context);
+int mt76u_submit_rx_buffers(struct mt76_dev *dev);
+int mt76u_alloc_queues(struct mt76_dev *dev);
+void mt76u_stop_queues(struct mt76_dev *dev);
+void mt76u_stop_stat_wk(struct mt76_dev *dev);
+void mt76u_queues_deinit(struct mt76_dev *dev);
+int mt76u_skb_dma_info(struct sk_buff *skb, int port, u32 flags);
+
+int mt76u_mcu_fw_send_data(struct mt76_dev *dev, const void *data,
+			   int data_len, u32 max_payload, u32 offset);
+void mt76u_mcu_complete_urb(struct urb *urb);
+struct sk_buff *mt76u_mcu_msg_alloc(const void *data, int len);
+int mt76u_mcu_send_msg(struct mt76_dev *dev, struct sk_buff *skb,
+		       int cmd, bool wait_resp);
+void mt76u_mcu_fw_reset(struct mt76_dev *dev);
+int mt76u_mcu_init_rx(struct mt76_dev *dev);
+
 #endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/Makefile b/drivers/net/wireless/mediatek/mt76/mt76x0/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..7843908261ba0659157b895be8fbe14baf2a16bb
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/Makefile
@@ -0,0 +1,7 @@
+obj-$(CONFIG_MT76x0U)    += mt76x0.o
+
+mt76x0-objs	= \
+	usb.o init.o main.o mcu.o trace.o dma.o eeprom.o phy.o \
+	mac.o util.o debugfs.o tx.o core.o
+# ccflags-y := -DDEBUG
+CFLAGS_trace.o := -I$(src)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/core.c b/drivers/net/wireless/mediatek/mt76/mt76x0/core.c
new file mode 100644
index 0000000000000000000000000000000000000000..892803fce8420c09c311740c898ca6b25e9099d6
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/core.c
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "mt76x0.h"
+
+int mt76x0_wait_asic_ready(struct mt76x0_dev *dev)
+{
+	int i = 100;
+	u32 val;
+
+	do {
+		if (test_bit(MT76_REMOVED, &dev->mt76.state))
+			return -EIO;
+
+		val = mt76_rr(dev, MT_MAC_CSR0);
+		if (val && ~val)
+			return 0;
+
+		udelay(10);
+	} while (i--);
+
+	return -EIO;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt76x0/debugfs.c
new file mode 100644
index 0000000000000000000000000000000000000000..e7a77a88606806fc9da59be52af0994fa9fb872c
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/debugfs.c
@@ -0,0 +1,166 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/debugfs.h>
+
+#include "mt76x0.h"
+#include "eeprom.h"
+
+static int
+mt76_reg_set(void *data, u64 val)
+{
+	struct mt76x0_dev *dev = data;
+
+	mt76_wr(dev, dev->debugfs_reg, val);
+	return 0;
+}
+
+static int
+mt76_reg_get(void *data, u64 *val)
+{
+	struct mt76x0_dev *dev = data;
+
+	*val = mt76_rr(dev, dev->debugfs_reg);
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_regval, mt76_reg_get, mt76_reg_set, "0x%08llx\n");
+
+static int
+mt76x0_ampdu_stat_read(struct seq_file *file, void *data)
+{
+	struct mt76x0_dev *dev = file->private;
+	int i, j;
+
+#define stat_printf(grp, off, name)					\
+	seq_printf(file, #name ":\t%llu\n", dev->stats.grp[off])
+
+	stat_printf(rx_stat, 0, rx_crc_err);
+	stat_printf(rx_stat, 1, rx_phy_err);
+	stat_printf(rx_stat, 2, rx_false_cca);
+	stat_printf(rx_stat, 3, rx_plcp_err);
+	stat_printf(rx_stat, 4, rx_fifo_overflow);
+	stat_printf(rx_stat, 5, rx_duplicate);
+
+	stat_printf(tx_stat, 0, tx_fail_cnt);
+	stat_printf(tx_stat, 1, tx_bcn_cnt);
+	stat_printf(tx_stat, 2, tx_success);
+	stat_printf(tx_stat, 3, tx_retransmit);
+	stat_printf(tx_stat, 4, tx_zero_len);
+	stat_printf(tx_stat, 5, tx_underflow);
+
+	stat_printf(aggr_stat, 0, non_aggr_tx);
+	stat_printf(aggr_stat, 1, aggr_tx);
+
+	stat_printf(zero_len_del, 0, tx_zero_len_del);
+	stat_printf(zero_len_del, 1, rx_zero_len_del);
+#undef stat_printf
+
+	seq_puts(file, "Aggregations stats:\n");
+	for (i = 0; i < 4; i++) {
+		for (j = 0; j < 8; j++)
+			seq_printf(file, "%08llx ",
+				   dev->stats.aggr_n[i * 8 + j]);
+		seq_putc(file, '\n');
+	}
+
+	seq_printf(file, "recent average AMPDU len: %d\n",
+		   atomic_read(&dev->avg_ampdu_len));
+
+	return 0;
+}
+
+static int
+mt76x0_ampdu_stat_open(struct inode *inode, struct file *f)
+{
+	return single_open(f, mt76x0_ampdu_stat_read, inode->i_private);
+}
+
+static const struct file_operations fops_ampdu_stat = {
+	.open = mt76x0_ampdu_stat_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static int
+mt76x0_eeprom_param_read(struct seq_file *file, void *data)
+{
+	struct mt76x0_dev *dev = file->private;
+	int i;
+
+	seq_printf(file, "RF freq offset: %hhx\n", dev->ee->rf_freq_off);
+	seq_printf(file, "RSSI offset 2GHz: %hhx %hhx\n",
+		   dev->ee->rssi_offset_2ghz[0], dev->ee->rssi_offset_2ghz[1]);
+	seq_printf(file, "RSSI offset 5GHz: %hhx %hhx %hhx\n",
+		   dev->ee->rssi_offset_5ghz[0], dev->ee->rssi_offset_5ghz[1],
+		   dev->ee->rssi_offset_5ghz[2]);
+	seq_printf(file, "Temperature offset: %hhx\n", dev->ee->temp_off);
+	seq_printf(file, "LNA gain 2Ghz: %hhx\n", dev->ee->lna_gain_2ghz);
+	seq_printf(file, "LNA gain 5Ghz: %hhx %hhx %hhx\n",
+		   dev->ee->lna_gain_5ghz[0], dev->ee->lna_gain_5ghz[1],
+		   dev->ee->lna_gain_5ghz[2]);
+	seq_printf(file, "Power Amplifier type %hhx\n", dev->ee->pa_type);
+	seq_printf(file, "Reg channels: %hhu-%hhu\n", dev->ee->reg.start,
+		   dev->ee->reg.start + dev->ee->reg.num - 1);
+
+	seq_puts(file, "Per channel power:\n");
+	for (i = 0; i < 58; i++)
+		seq_printf(file, "\t%d chan:%d pwr:%d\n", i, i,
+			   dev->ee->tx_pwr_per_chan[i]);
+
+	seq_puts(file, "Per rate power 2GHz:\n");
+	for (i = 0; i < 5; i++)
+		seq_printf(file, "\t %d bw20:%d bw40:%d\n",
+			   i, dev->ee->tx_pwr_cfg_2g[i][0],
+			      dev->ee->tx_pwr_cfg_5g[i][1]);
+
+	seq_puts(file, "Per rate power 5GHz:\n");
+	for (i = 0; i < 5; i++)
+		seq_printf(file, "\t %d bw20:%d bw40:%d\n",
+			   i, dev->ee->tx_pwr_cfg_5g[i][0],
+			      dev->ee->tx_pwr_cfg_5g[i][1]);
+
+	return 0;
+}
+
+static int
+mt76x0_eeprom_param_open(struct inode *inode, struct file *f)
+{
+	return single_open(f, mt76x0_eeprom_param_read, inode->i_private);
+}
+
+static const struct file_operations fops_eeprom_param = {
+	.open = mt76x0_eeprom_param_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+void mt76x0_init_debugfs(struct mt76x0_dev *dev)
+{
+	struct dentry *dir;
+
+	dir = debugfs_create_dir("mt76x0", dev->mt76.hw->wiphy->debugfsdir);
+	if (!dir)
+		return;
+
+	debugfs_create_u32("regidx", S_IRUSR | S_IWUSR, dir, &dev->debugfs_reg);
+	debugfs_create_file("regval", S_IRUSR | S_IWUSR, dir, dev,
+			    &fops_regval);
+	debugfs_create_file("ampdu_stat", S_IRUSR, dir, dev, &fops_ampdu_stat);
+	debugfs_create_file("eeprom_param", S_IRUSR, dir, dev,
+			    &fops_eeprom_param);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/dma.c b/drivers/net/wireless/mediatek/mt76/mt76x0/dma.c
new file mode 100644
index 0000000000000000000000000000000000000000..e2efb430419b6d77b2669c51476b4900cded71e6
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/dma.c
@@ -0,0 +1,522 @@
+/*
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "mt76x0.h"
+#include "dma.h"
+#include "usb.h"
+#include "trace.h"
+
+static int mt76x0_submit_rx_buf(struct mt76x0_dev *dev,
+				 struct mt76x0_dma_buf_rx *e, gfp_t gfp);
+
+static unsigned int ieee80211_get_hdrlen_from_buf(const u8 *data, unsigned len)
+{
+	const struct ieee80211_hdr *hdr = (const struct ieee80211_hdr *)data;
+	unsigned int hdrlen;
+
+	if (unlikely(len < 10))
+		return 0;
+	hdrlen = ieee80211_hdrlen(hdr->frame_control);
+	if (unlikely(hdrlen > len))
+		return 0;
+	return hdrlen;
+}
+
+static struct sk_buff *
+mt76x0_rx_skb_from_seg(struct mt76x0_dev *dev, struct mt76x0_rxwi *rxwi,
+			void *data, u32 seg_len, u32 truesize, struct page *p)
+{
+	struct sk_buff *skb;
+	u32 true_len, hdr_len = 0, copy, frag;
+
+	skb = alloc_skb(p ? 128 : seg_len, GFP_ATOMIC);
+	if (!skb)
+		return NULL;
+
+	true_len = mt76x0_mac_process_rx(dev, skb, data, rxwi);
+	if (!true_len || true_len > seg_len)
+		goto bad_frame;
+
+	hdr_len = ieee80211_get_hdrlen_from_buf(data, true_len);
+	if (!hdr_len)
+		goto bad_frame;
+
+	if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_L2PAD)) {
+		memcpy(skb_put(skb, hdr_len), data, hdr_len);
+
+		data += hdr_len + 2;
+		true_len -= hdr_len;
+		hdr_len = 0;
+	}
+
+	/* If not doing paged RX allocated skb will always have enough space */
+	copy = (true_len <= skb_tailroom(skb)) ? true_len : hdr_len + 8;
+	frag = true_len - copy;
+
+	memcpy(skb_put(skb, copy), data, copy);
+	data += copy;
+
+	if (frag) {
+		skb_add_rx_frag(skb, 0, p, data - page_address(p),
+				frag, truesize);
+		get_page(p);
+	}
+
+	return skb;
+
+bad_frame:
+	dev_err_ratelimited(dev->mt76.dev, "Error: incorrect frame len:%u hdr:%u\n",
+			    true_len, hdr_len);
+	dev_kfree_skb(skb);
+	return NULL;
+}
+
+static void mt76x0_rx_process_seg(struct mt76x0_dev *dev, u8 *data,
+				   u32 seg_len, struct page *p)
+{
+	struct sk_buff *skb;
+	struct mt76x0_rxwi *rxwi;
+	u32 fce_info, truesize = seg_len;
+
+	/* DMA_INFO field at the beginning of the segment contains only some of
+	 * the information, we need to read the FCE descriptor from the end.
+	 */
+	fce_info = get_unaligned_le32(data + seg_len - MT_FCE_INFO_LEN);
+	seg_len -= MT_FCE_INFO_LEN;
+
+	data += MT_DMA_HDR_LEN;
+	seg_len -= MT_DMA_HDR_LEN;
+
+	rxwi = (struct mt76x0_rxwi *) data;
+	data += sizeof(struct mt76x0_rxwi);
+	seg_len -= sizeof(struct mt76x0_rxwi);
+
+	if (unlikely(FIELD_GET(MT_RXD_INFO_TYPE, fce_info)))
+		dev_err_once(dev->mt76.dev, "Error: RX path seen a non-pkt urb\n");
+
+	trace_mt76x0_rx(&dev->mt76, rxwi, fce_info);
+
+	skb = mt76x0_rx_skb_from_seg(dev, rxwi, data, seg_len, truesize, p);
+	if (!skb)
+		return;
+
+	spin_lock(&dev->mac_lock);
+	ieee80211_rx(dev->mt76.hw, skb);
+	spin_unlock(&dev->mac_lock);
+}
+
+static u16 mt76x0_rx_next_seg_len(u8 *data, u32 data_len)
+{
+	u32 min_seg_len = MT_DMA_HDR_LEN + MT_RX_INFO_LEN +
+		sizeof(struct mt76x0_rxwi) + MT_FCE_INFO_LEN;
+	u16 dma_len = get_unaligned_le16(data);
+
+	if (data_len < min_seg_len ||
+	    WARN_ON(!dma_len) ||
+	    WARN_ON(dma_len + MT_DMA_HDRS > data_len) ||
+	    WARN_ON(dma_len & 0x3))
+		return 0;
+
+	return MT_DMA_HDRS + dma_len;
+}
+
+static void
+mt76x0_rx_process_entry(struct mt76x0_dev *dev, struct mt76x0_dma_buf_rx *e)
+{
+	u32 seg_len, data_len = e->urb->actual_length;
+	u8 *data = page_address(e->p);
+	struct page *new_p = NULL;
+	int cnt = 0;
+
+	if (!test_bit(MT76_STATE_INITIALIZED, &dev->mt76.state))
+		return;
+
+	/* Copy if there is very little data in the buffer. */
+	if (data_len > 512)
+		new_p = dev_alloc_pages(MT_RX_ORDER);
+
+	while ((seg_len = mt76x0_rx_next_seg_len(data, data_len))) {
+		mt76x0_rx_process_seg(dev, data, seg_len, new_p ? e->p : NULL);
+
+		data_len -= seg_len;
+		data += seg_len;
+		cnt++;
+	}
+
+	if (cnt > 1)
+		trace_mt76x0_rx_dma_aggr(&dev->mt76, cnt, !!new_p);
+
+	if (new_p) {
+		/* we have one extra ref from the allocator */
+		__free_pages(e->p, MT_RX_ORDER);
+
+		e->p = new_p;
+	}
+}
+
+static struct mt76x0_dma_buf_rx *
+mt76x0_rx_get_pending_entry(struct mt76x0_dev *dev)
+{
+	struct mt76x0_rx_queue *q = &dev->rx_q;
+	struct mt76x0_dma_buf_rx *buf = NULL;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->rx_lock, flags);
+
+	if (!q->pending)
+		goto out;
+
+	buf = &q->e[q->start];
+	q->pending--;
+	q->start = (q->start + 1) % q->entries;
+out:
+	spin_unlock_irqrestore(&dev->rx_lock, flags);
+
+	return buf;
+}
+
+static void mt76x0_complete_rx(struct urb *urb)
+{
+	struct mt76x0_dev *dev = urb->context;
+	struct mt76x0_rx_queue *q = &dev->rx_q;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->rx_lock, flags);
+
+	if (mt76x0_urb_has_error(urb))
+		dev_err(dev->mt76.dev, "Error: RX urb failed:%d\n", urb->status);
+	if (WARN_ONCE(q->e[q->end].urb != urb, "RX urb mismatch"))
+		goto out;
+
+	q->end = (q->end + 1) % q->entries;
+	q->pending++;
+	tasklet_schedule(&dev->rx_tasklet);
+out:
+	spin_unlock_irqrestore(&dev->rx_lock, flags);
+}
+
+static void mt76x0_rx_tasklet(unsigned long data)
+{
+	struct mt76x0_dev *dev = (struct mt76x0_dev *) data;
+	struct mt76x0_dma_buf_rx *e;
+
+	while ((e = mt76x0_rx_get_pending_entry(dev))) {
+		if (e->urb->status)
+			continue;
+
+		mt76x0_rx_process_entry(dev, e);
+		mt76x0_submit_rx_buf(dev, e, GFP_ATOMIC);
+	}
+}
+
+static void mt76x0_complete_tx(struct urb *urb)
+{
+	struct mt76x0_tx_queue *q = urb->context;
+	struct mt76x0_dev *dev = q->dev;
+	struct sk_buff *skb;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->tx_lock, flags);
+
+	if (mt76x0_urb_has_error(urb))
+		dev_err(dev->mt76.dev, "Error: TX urb failed:%d\n", urb->status);
+	if (WARN_ONCE(q->e[q->start].urb != urb, "TX urb mismatch"))
+		goto out;
+
+	skb = q->e[q->start].skb;
+	trace_mt76x0_tx_dma_done(&dev->mt76, skb);
+
+	__skb_queue_tail(&dev->tx_skb_done, skb);
+	tasklet_schedule(&dev->tx_tasklet);
+
+	if (q->used == q->entries - q->entries / 8)
+		ieee80211_wake_queue(dev->mt76.hw, skb_get_queue_mapping(skb));
+
+	q->start = (q->start + 1) % q->entries;
+	q->used--;
+out:
+	spin_unlock_irqrestore(&dev->tx_lock, flags);
+}
+
+static void mt76x0_tx_tasklet(unsigned long data)
+{
+	struct mt76x0_dev *dev = (struct mt76x0_dev *) data;
+	struct sk_buff_head skbs;
+	unsigned long flags;
+
+	__skb_queue_head_init(&skbs);
+
+	spin_lock_irqsave(&dev->tx_lock, flags);
+
+	set_bit(MT76_MORE_STATS, &dev->mt76.state);
+	if (!test_and_set_bit(MT76_READING_STATS, &dev->mt76.state))
+		queue_delayed_work(dev->stat_wq, &dev->stat_work,
+				   msecs_to_jiffies(10));
+
+	skb_queue_splice_init(&dev->tx_skb_done, &skbs);
+
+	spin_unlock_irqrestore(&dev->tx_lock, flags);
+
+	while (!skb_queue_empty(&skbs)) {
+		struct sk_buff *skb = __skb_dequeue(&skbs);
+
+		mt76x0_tx_status(dev, skb);
+	}
+}
+
+static int mt76x0_dma_submit_tx(struct mt76x0_dev *dev,
+				 struct sk_buff *skb, u8 ep)
+{
+	struct usb_device *usb_dev = mt76x0_to_usb_dev(dev);
+	unsigned snd_pipe = usb_sndbulkpipe(usb_dev, dev->out_ep[ep]);
+	struct mt76x0_dma_buf_tx *e;
+	struct mt76x0_tx_queue *q = &dev->tx_q[ep];
+	unsigned long flags;
+	int ret;
+
+	spin_lock_irqsave(&dev->tx_lock, flags);
+
+	if (WARN_ON_ONCE(q->entries <= q->used)) {
+		ret = -ENOSPC;
+		goto out;
+	}
+
+	e = &q->e[q->end];
+	e->skb = skb;
+	usb_fill_bulk_urb(e->urb, usb_dev, snd_pipe, skb->data, skb->len,
+			  mt76x0_complete_tx, q);
+	ret = usb_submit_urb(e->urb, GFP_ATOMIC);
+	if (ret) {
+		/* Special-handle ENODEV from TX urb submission because it will
+		 * often be the first ENODEV we see after device is removed.
+		 */
+		if (ret == -ENODEV)
+			set_bit(MT76_REMOVED, &dev->mt76.state);
+		else
+			dev_err(dev->mt76.dev, "Error: TX urb submit failed:%d\n",
+				ret);
+		goto out;
+	}
+
+	q->end = (q->end + 1) % q->entries;
+	q->used++;
+
+	if (q->used >= q->entries)
+		ieee80211_stop_queue(dev->mt76.hw, skb_get_queue_mapping(skb));
+out:
+	spin_unlock_irqrestore(&dev->tx_lock, flags);
+
+	return ret;
+}
+
+/* Map USB endpoint number to Q id in the DMA engine */
+static enum mt76_qsel ep2dmaq(u8 ep)
+{
+	if (ep == 5)
+		return MT_QSEL_MGMT;
+	return MT_QSEL_EDCA;
+}
+
+int mt76x0_dma_enqueue_tx(struct mt76x0_dev *dev, struct sk_buff *skb,
+			   struct mt76_wcid *wcid, int hw_q)
+{
+	u8 ep = q2ep(hw_q);
+	u32 dma_flags;
+	int ret;
+
+	dma_flags = MT_TXD_PKT_INFO_80211;
+	if (wcid->hw_key_idx == 0xff)
+		dma_flags |= MT_TXD_PKT_INFO_WIV;
+
+	ret = mt76x0_dma_skb_wrap_pkt(skb, ep2dmaq(ep), dma_flags);
+	if (ret)
+		return ret;
+
+	ret = mt76x0_dma_submit_tx(dev, skb, ep);
+
+	if (ret) {
+		ieee80211_free_txskb(dev->mt76.hw, skb);
+		return ret;
+	}
+
+	return 0;
+}
+
+static void mt76x0_kill_rx(struct mt76x0_dev *dev)
+{
+	int i;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->rx_lock, flags);
+
+	for (i = 0; i < dev->rx_q.entries; i++) {
+		int next = dev->rx_q.end;
+
+		spin_unlock_irqrestore(&dev->rx_lock, flags);
+		usb_poison_urb(dev->rx_q.e[next].urb);
+		spin_lock_irqsave(&dev->rx_lock, flags);
+	}
+
+	spin_unlock_irqrestore(&dev->rx_lock, flags);
+}
+
+static int mt76x0_submit_rx_buf(struct mt76x0_dev *dev,
+				 struct mt76x0_dma_buf_rx *e, gfp_t gfp)
+{
+	struct usb_device *usb_dev = mt76x0_to_usb_dev(dev);
+	u8 *buf = page_address(e->p);
+	unsigned pipe;
+	int ret;
+
+	pipe = usb_rcvbulkpipe(usb_dev, dev->in_ep[MT_EP_IN_PKT_RX]);
+
+	usb_fill_bulk_urb(e->urb, usb_dev, pipe, buf, MT_RX_URB_SIZE,
+			  mt76x0_complete_rx, dev);
+
+	trace_mt76x0_submit_urb(&dev->mt76, e->urb);
+	ret = usb_submit_urb(e->urb, gfp);
+	if (ret)
+		dev_err(dev->mt76.dev, "Error: submit RX URB failed:%d\n", ret);
+
+	return ret;
+}
+
+static int mt76x0_submit_rx(struct mt76x0_dev *dev)
+{
+	int i, ret;
+
+	for (i = 0; i < dev->rx_q.entries; i++) {
+		ret = mt76x0_submit_rx_buf(dev, &dev->rx_q.e[i], GFP_KERNEL);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static void mt76x0_free_rx(struct mt76x0_dev *dev)
+{
+	int i;
+
+	for (i = 0; i < dev->rx_q.entries; i++) {
+		__free_pages(dev->rx_q.e[i].p, MT_RX_ORDER);
+		usb_free_urb(dev->rx_q.e[i].urb);
+	}
+}
+
+static int mt76x0_alloc_rx(struct mt76x0_dev *dev)
+{
+	int i;
+
+	memset(&dev->rx_q, 0, sizeof(dev->rx_q));
+	dev->rx_q.dev = dev;
+	dev->rx_q.entries = N_RX_ENTRIES;
+
+	for (i = 0; i < N_RX_ENTRIES; i++) {
+		dev->rx_q.e[i].urb = usb_alloc_urb(0, GFP_KERNEL);
+		dev->rx_q.e[i].p = dev_alloc_pages(MT_RX_ORDER);
+
+		if (!dev->rx_q.e[i].urb || !dev->rx_q.e[i].p)
+			return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static void mt76x0_free_tx_queue(struct mt76x0_tx_queue *q)
+{
+	int i;
+
+	WARN_ON(q->used);
+
+	for (i = 0; i < q->entries; i++)  {
+		usb_poison_urb(q->e[i].urb);
+		usb_free_urb(q->e[i].urb);
+	}
+}
+
+static void mt76x0_free_tx(struct mt76x0_dev *dev)
+{
+	int i;
+
+	for (i = 0; i < __MT_EP_OUT_MAX; i++)
+		mt76x0_free_tx_queue(&dev->tx_q[i]);
+}
+
+static int mt76x0_alloc_tx_queue(struct mt76x0_dev *dev,
+				  struct mt76x0_tx_queue *q)
+{
+	int i;
+
+	q->dev = dev;
+	q->entries = N_TX_ENTRIES;
+
+	for (i = 0; i < N_TX_ENTRIES; i++) {
+		q->e[i].urb = usb_alloc_urb(0, GFP_KERNEL);
+		if (!q->e[i].urb)
+			return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static int mt76x0_alloc_tx(struct mt76x0_dev *dev)
+{
+	int i;
+
+	dev->tx_q = devm_kcalloc(dev->mt76.dev, __MT_EP_OUT_MAX,
+				 sizeof(*dev->tx_q), GFP_KERNEL);
+
+	for (i = 0; i < __MT_EP_OUT_MAX; i++)
+		if (mt76x0_alloc_tx_queue(dev, &dev->tx_q[i]))
+			return -ENOMEM;
+
+	return 0;
+}
+
+int mt76x0_dma_init(struct mt76x0_dev *dev)
+{
+	int ret = -ENOMEM;
+
+	tasklet_init(&dev->tx_tasklet, mt76x0_tx_tasklet, (unsigned long) dev);
+	tasklet_init(&dev->rx_tasklet, mt76x0_rx_tasklet, (unsigned long) dev);
+
+	ret = mt76x0_alloc_tx(dev);
+	if (ret)
+		goto err;
+	ret = mt76x0_alloc_rx(dev);
+	if (ret)
+		goto err;
+
+	ret = mt76x0_submit_rx(dev);
+	if (ret)
+		goto err;
+
+	return 0;
+err:
+	mt76x0_dma_cleanup(dev);
+	return ret;
+}
+
+void mt76x0_dma_cleanup(struct mt76x0_dev *dev)
+{
+	mt76x0_kill_rx(dev);
+
+	tasklet_kill(&dev->rx_tasklet);
+
+	mt76x0_free_rx(dev);
+	mt76x0_free_tx(dev);
+
+	tasklet_kill(&dev->tx_tasklet);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/dma.h b/drivers/net/wireless/mediatek/mt76/mt76x0/dma.h
new file mode 100644
index 0000000000000000000000000000000000000000..891ce1c3461f57bfef9444fc27399545fb17fa1b
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/dma.h
@@ -0,0 +1,126 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT76X0U_DMA_H
+#define __MT76X0U_DMA_H
+
+#include <asm/unaligned.h>
+#include <linux/skbuff.h>
+
+#define MT_DMA_HDR_LEN			4
+#define MT_RX_INFO_LEN			4
+#define MT_FCE_INFO_LEN			4
+#define MT_DMA_HDRS			(MT_DMA_HDR_LEN + MT_RX_INFO_LEN)
+
+/* Common Tx DMA descriptor fields */
+#define MT_TXD_INFO_LEN		GENMASK(15, 0)
+#define MT_TXD_INFO_D_PORT	GENMASK(29, 27)
+#define MT_TXD_INFO_TYPE	GENMASK(31, 30)
+
+/* Tx DMA MCU command specific flags */
+#define MT_TXD_CMD_SEQ		GENMASK(19, 16)
+#define MT_TXD_CMD_TYPE		GENMASK(26, 20)
+
+enum mt76_msg_port {
+	WLAN_PORT,
+	CPU_RX_PORT,
+	CPU_TX_PORT,
+	HOST_PORT,
+	VIRTUAL_CPU_RX_PORT,
+	VIRTUAL_CPU_TX_PORT,
+	DISCARD,
+};
+
+enum mt76_info_type {
+	DMA_PACKET,
+	DMA_COMMAND,
+};
+
+/* Tx DMA packet specific flags */
+#define MT_TXD_PKT_INFO_NEXT_VLD	BIT(16)
+#define MT_TXD_PKT_INFO_TX_BURST	BIT(17)
+#define MT_TXD_PKT_INFO_80211		BIT(19)
+#define MT_TXD_PKT_INFO_TSO		BIT(20)
+#define MT_TXD_PKT_INFO_CSO		BIT(21)
+#define MT_TXD_PKT_INFO_WIV		BIT(24)
+#define MT_TXD_PKT_INFO_QSEL		GENMASK(26, 25)
+
+enum mt76_qsel {
+	MT_QSEL_MGMT,
+	MT_QSEL_HCCA,
+	MT_QSEL_EDCA,
+	MT_QSEL_EDCA_2,
+};
+
+
+static inline int mt76x0_dma_skb_wrap(struct sk_buff *skb,
+				       enum mt76_msg_port d_port,
+				       enum mt76_info_type type, u32 flags)
+{
+	u32 info;
+
+	/* Buffer layout:
+	 *	|   4B   | xfer len |      pad       |  4B  |
+	 *	| TXINFO | pkt/cmd  | zero pad to 4B | zero |
+	 *
+	 * length field of TXINFO should be set to 'xfer len'.
+	 */
+
+	info = flags |
+		FIELD_PREP(MT_TXD_INFO_LEN, round_up(skb->len, 4)) |
+		FIELD_PREP(MT_TXD_INFO_D_PORT, d_port) |
+		FIELD_PREP(MT_TXD_INFO_TYPE, type);
+
+	put_unaligned_le32(info, skb_push(skb, sizeof(info)));
+	return skb_put_padto(skb, round_up(skb->len, 4) + 4);
+}
+
+static inline int
+mt76x0_dma_skb_wrap_pkt(struct sk_buff *skb, enum mt76_qsel qsel, u32 flags)
+{
+	flags |= FIELD_PREP(MT_TXD_PKT_INFO_QSEL, qsel);
+	return mt76x0_dma_skb_wrap(skb, WLAN_PORT, DMA_PACKET, flags);
+}
+
+/* Common Rx DMA descriptor fields */
+#define MT_RXD_INFO_LEN			GENMASK(13, 0)
+#define MT_RXD_INFO_PCIE_INTR		BIT(24)
+#define MT_RXD_INFO_QSEL		GENMASK(26, 25)
+#define MT_RXD_INFO_PORT		GENMASK(29, 27)
+#define MT_RXD_INFO_TYPE		GENMASK(31, 30)
+
+/* Rx DMA packet specific flags */
+#define MT_RXD_PKT_INFO_UDP_ERR		BIT(16)
+#define MT_RXD_PKT_INFO_TCP_ERR		BIT(17)
+#define MT_RXD_PKT_INFO_IP_ERR		BIT(18)
+#define MT_RXD_PKT_INFO_PKT_80211	BIT(19)
+#define MT_RXD_PKT_INFO_L3L4_DONE	BIT(20)
+#define MT_RXD_PKT_INFO_MAC_LEN		GENMASK(23, 21)
+
+/* Rx DMA MCU command specific flags */
+#define MT_RXD_CMD_INFO_SELF_GEN	BIT(15)
+#define MT_RXD_CMD_INFO_CMD_SEQ		GENMASK(19, 16)
+#define MT_RXD_CMD_INFO_EVT_TYPE	GENMASK(23, 20)
+
+enum mt76_evt_type {
+	CMD_DONE,
+	CMD_ERROR,
+	CMD_RETRY,
+	EVENT_PWR_RSP,
+	EVENT_WOW_RSP,
+	EVENT_CARRIER_DETECT_RSP,
+	EVENT_DFS_DETECT_RSP,
+};
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
new file mode 100644
index 0000000000000000000000000000000000000000..1ecd018f12b858551b8e681fa207c4ffef9924a2
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
@@ -0,0 +1,445 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/of.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/etherdevice.h>
+#include <asm/unaligned.h>
+#include "mt76x0.h"
+#include "eeprom.h"
+
+static bool
+field_valid(u8 val)
+{
+	return val != 0xff;
+}
+
+static s8
+field_validate(u8 val)
+{
+	if (!field_valid(val))
+		return 0;
+
+	return val;
+}
+
+static inline int
+sign_extend(u32 val, unsigned int size)
+{
+	bool sign = val & BIT(size - 1);
+
+	val &= BIT(size - 1) - 1;
+
+	return sign ? val : -val;
+}
+
+static int
+mt76x0_efuse_read(struct mt76x0_dev *dev, u16 addr, u8 *data,
+		   enum mt76x0_eeprom_access_modes mode)
+{
+	u32 val;
+	int i;
+
+	val = mt76_rr(dev, MT_EFUSE_CTRL);
+	val &= ~(MT_EFUSE_CTRL_AIN |
+		 MT_EFUSE_CTRL_MODE);
+	val |= FIELD_PREP(MT_EFUSE_CTRL_AIN, addr & ~0xf) |
+	       FIELD_PREP(MT_EFUSE_CTRL_MODE, mode) |
+	       MT_EFUSE_CTRL_KICK;
+	mt76_wr(dev, MT_EFUSE_CTRL, val);
+
+	if (!mt76_poll(dev, MT_EFUSE_CTRL, MT_EFUSE_CTRL_KICK, 0, 1000))
+		return -ETIMEDOUT;
+
+	val = mt76_rr(dev, MT_EFUSE_CTRL);
+	if ((val & MT_EFUSE_CTRL_AOUT) == MT_EFUSE_CTRL_AOUT) {
+		/* Parts of eeprom not in the usage map (0x80-0xc0,0xf0)
+		 * will not return valid data but it's ok.
+		 */
+		memset(data, 0xff, 16);
+		return 0;
+	}
+
+	for (i = 0; i < 4; i++) {
+		val = mt76_rr(dev, MT_EFUSE_DATA(i));
+		put_unaligned_le32(val, data + 4 * i);
+	}
+
+	return 0;
+}
+
+static int
+mt76x0_efuse_physical_size_check(struct mt76x0_dev *dev)
+{
+	const int map_reads = DIV_ROUND_UP(MT_EFUSE_USAGE_MAP_SIZE, 16);
+	u8 data[map_reads * 16];
+	int ret, i;
+	u32 start = 0, end = 0, cnt_free;
+
+	for (i = 0; i < map_reads; i++) {
+		ret = mt76x0_efuse_read(dev, MT_EE_USAGE_MAP_START + i * 16,
+					 data + i * 16, MT_EE_PHYSICAL_READ);
+		if (ret)
+			return ret;
+	}
+
+	for (i = 0; i < MT_EFUSE_USAGE_MAP_SIZE; i++)
+		if (!data[i]) {
+			if (!start)
+				start = MT_EE_USAGE_MAP_START + i;
+			end = MT_EE_USAGE_MAP_START + i;
+		}
+	cnt_free = end - start + 1;
+
+	if (MT_EFUSE_USAGE_MAP_SIZE - cnt_free < 5) {
+		dev_err(dev->mt76.dev, "Error: your device needs default EEPROM file and this driver doesn't support it!\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void
+mt76x0_set_chip_cap(struct mt76x0_dev *dev, u8 *eeprom)
+{
+	enum mt76x2_board_type { BOARD_TYPE_2GHZ = 1, BOARD_TYPE_5GHZ = 2 };
+	u16 nic_conf0 = get_unaligned_le16(eeprom + MT_EE_NIC_CONF_0);
+	u16 nic_conf1 = get_unaligned_le16(eeprom + MT_EE_NIC_CONF_1);
+
+	dev_dbg(dev->mt76.dev, "NIC_CONF0: %04x NIC_CONF1: %04x\n", nic_conf0, nic_conf1);
+
+	switch (FIELD_GET(MT_EE_NIC_CONF_0_BOARD_TYPE, nic_conf0)) {
+	case BOARD_TYPE_5GHZ:
+		dev->ee->has_5ghz = true;
+		break;
+	case BOARD_TYPE_2GHZ:
+		dev->ee->has_2ghz = true;
+		break;
+	default:
+		dev->ee->has_2ghz = true;
+		dev->ee->has_5ghz = true;
+		break;
+	}
+
+	dev_dbg(dev->mt76.dev, "Has 2GHZ %d 5GHZ %d\n", dev->ee->has_2ghz, dev->ee->has_5ghz);
+
+	if (!field_valid(nic_conf1 & 0xff))
+		nic_conf1 &= 0xff00;
+
+	if (nic_conf1 & MT_EE_NIC_CONF_1_HW_RF_CTRL)
+		dev_err(dev->mt76.dev,
+			"Error: this driver does not support HW RF ctrl\n");
+
+	if (!field_valid(nic_conf0 >> 8))
+		return;
+
+	if (FIELD_GET(MT_EE_NIC_CONF_0_RX_PATH, nic_conf0) > 1 ||
+	    FIELD_GET(MT_EE_NIC_CONF_0_TX_PATH, nic_conf0) > 1)
+		dev_err(dev->mt76.dev,
+			"Error: device has more than 1 RX/TX stream!\n");
+
+	dev->ee->pa_type = FIELD_GET(MT_EE_NIC_CONF_0_PA_TYPE, nic_conf0);
+	dev_dbg(dev->mt76.dev, "PA Type %d\n", dev->ee->pa_type);
+}
+
+static int
+mt76x0_set_macaddr(struct mt76x0_dev *dev, const u8 *eeprom)
+{
+	const void *src = eeprom + MT_EE_MAC_ADDR;
+
+	ether_addr_copy(dev->macaddr, src);
+
+	if (!is_valid_ether_addr(dev->macaddr)) {
+		eth_random_addr(dev->macaddr);
+		dev_info(dev->mt76.dev,
+			 "Invalid MAC address, using random address %pM\n",
+			 dev->macaddr);
+	}
+
+	mt76_wr(dev, MT_MAC_ADDR_DW0, get_unaligned_le32(dev->macaddr));
+	mt76_wr(dev, MT_MAC_ADDR_DW1, get_unaligned_le16(dev->macaddr + 4) |
+		FIELD_PREP(MT_MAC_ADDR_DW1_U2ME_MASK, 0xff));
+
+	return 0;
+}
+
+static void
+mt76x0_set_temp_offset(struct mt76x0_dev *dev, u8 *eeprom)
+{
+	u8 temp = eeprom[MT_EE_TEMP_OFFSET];
+
+	if (field_valid(temp))
+		dev->ee->temp_off = sign_extend(temp, 8);
+	else
+		dev->ee->temp_off = -10;
+}
+
+static void
+mt76x0_set_country_reg(struct mt76x0_dev *dev, u8 *eeprom)
+{
+	/* Note: - region 31 is not valid for mt76x0 (see rtmp_init.c)
+	 *	 - comments in rtmp_def.h are incorrect (see rt_channel.c)
+	 */
+	static const struct reg_channel_bounds chan_bounds[] = {
+		/* EEPROM country regions 0 - 7 */
+		{  1, 11 },	{  1, 13 },	{ 10,  2 },	{ 10,  4 },
+		{ 14,  1 },	{  1, 14 },	{  3,  7 },	{  5,  9 },
+		/* EEPROM country regions 32 - 33 */
+		{  1, 11 },	{  1, 14 }
+	};
+	u8 val = eeprom[MT_EE_COUNTRY_REGION_2GHZ];
+	int idx = -1;
+
+	dev_dbg(dev->mt76.dev, "REG 2GHZ %u REG 5GHZ %u\n", val, eeprom[MT_EE_COUNTRY_REGION_5GHZ]);
+	if (val < 8)
+		idx = val;
+	if (val > 31 && val < 33)
+		idx = val - 32 + 8;
+
+	if (idx != -1)
+		dev_info(dev->mt76.dev,
+			 "EEPROM country region %02hhx (channels %hhd-%hhd)\n",
+			 val, chan_bounds[idx].start,
+			 chan_bounds[idx].start + chan_bounds[idx].num - 1);
+	else
+		idx = 5; /* channels 1 - 14 */
+
+	dev->ee->reg = chan_bounds[idx];
+
+	/* TODO: country region 33 is special - phy should be set to B-mode
+	 *	 before entering channel 14 (see sta/connect.c)
+	 */
+}
+
+static void
+mt76x0_set_rf_freq_off(struct mt76x0_dev *dev, u8 *eeprom)
+{
+	u8 comp;
+
+	dev->ee->rf_freq_off = field_validate(eeprom[MT_EE_FREQ_OFFSET]);
+	comp = field_validate(eeprom[MT_EE_FREQ_OFFSET_COMPENSATION]);
+
+	if (comp & BIT(7))
+		dev->ee->rf_freq_off -= comp & 0x7f;
+	else
+		dev->ee->rf_freq_off += comp;
+}
+
+static void
+mt76x0_set_lna_gain(struct mt76x0_dev *dev, u8 *eeprom)
+{
+	s8 gain;
+
+	dev->ee->lna_gain_2ghz = eeprom[MT_EE_LNA_GAIN_2GHZ];
+	dev->ee->lna_gain_5ghz[0] = eeprom[MT_EE_LNA_GAIN_5GHZ_0];
+
+	gain = eeprom[MT_EE_LNA_GAIN_5GHZ_1];
+	if (gain == 0xff || gain == 0)
+		dev->ee->lna_gain_5ghz[1] = dev->ee->lna_gain_5ghz[0];
+	else
+		dev->ee->lna_gain_5ghz[1] = gain;
+
+	gain = eeprom[MT_EE_LNA_GAIN_5GHZ_2];
+	if (gain == 0xff || gain == 0)
+		dev->ee->lna_gain_5ghz[2] = dev->ee->lna_gain_5ghz[0];
+	else
+		dev->ee->lna_gain_5ghz[2] = gain;
+}
+
+static void
+mt76x0_set_rssi_offset(struct mt76x0_dev *dev, u8 *eeprom)
+{
+	int i;
+	s8 *rssi_offset = dev->ee->rssi_offset_2ghz;
+
+	for (i = 0; i < 2; i++) {
+		rssi_offset[i] = eeprom[MT_EE_RSSI_OFFSET + i];
+
+		if (rssi_offset[i] < -10 || rssi_offset[i] > 10) {
+			dev_warn(dev->mt76.dev,
+				 "Warning: EEPROM RSSI is invalid %02hhx\n",
+				 rssi_offset[i]);
+			rssi_offset[i] = 0;
+		}
+	}
+
+	rssi_offset = dev->ee->rssi_offset_5ghz;
+
+	for (i = 0; i < 3; i++) {
+		rssi_offset[i] = eeprom[MT_EE_RSSI_OFFSET_5GHZ + i];
+
+		if (rssi_offset[i] < -10 || rssi_offset[i] > 10) {
+			dev_warn(dev->mt76.dev,
+				 "Warning: EEPROM RSSI is invalid %02hhx\n",
+				 rssi_offset[i]);
+			rssi_offset[i] = 0;
+		}
+	}
+}
+
+static u32
+calc_bw40_power_rate(u32 value, int delta)
+{
+	u32 ret = 0;
+	int i, tmp;
+
+	for (i = 0; i < 4; i++) {
+		tmp = s6_to_int((value >> i*8) & 0xff) + delta;
+		ret |= (u32)(int_to_s6(tmp)) << i*8;
+	}
+
+	return ret;
+}
+
+static s8
+get_delta(u8 val)
+{
+	s8 ret;
+
+	if (!field_valid(val) || !(val & BIT(7)))
+		return 0;
+
+	ret = val & 0x1f;
+	if (ret > 8)
+		ret = 8;
+	if (val & BIT(6))
+		ret = -ret;
+
+	return ret;
+}
+
+static void
+mt76x0_set_tx_power_per_rate(struct mt76x0_dev *dev, u8 *eeprom)
+{
+	s8 bw40_delta_2g, bw40_delta_5g;
+	u32 val;
+	int i;
+
+	bw40_delta_2g = get_delta(eeprom[MT_EE_TX_POWER_DELTA_BW40]);
+	bw40_delta_5g = get_delta(eeprom[MT_EE_TX_POWER_DELTA_BW40 + 1]);
+
+	for (i = 0; i < 5; i++) {
+		val = get_unaligned_le32(eeprom + MT_EE_TX_POWER_BYRATE(i));
+
+		/* Skip last 16 bits. */
+		if (i == 4)
+			val &= 0x0000ffff;
+
+		dev->ee->tx_pwr_cfg_2g[i][0] = val;
+		dev->ee->tx_pwr_cfg_2g[i][1] = calc_bw40_power_rate(val, bw40_delta_2g);
+	}
+
+	/* Reading per rate tx power for 5 GHz band is a bit more complex. Note
+	 * we mix 16 bit and 32 bit reads and sometimes do shifts.
+	 */
+	val = get_unaligned_le16(eeprom + 0x120);
+	val <<= 16;
+	dev->ee->tx_pwr_cfg_5g[0][0] = val;
+	dev->ee->tx_pwr_cfg_5g[0][1] = calc_bw40_power_rate(val, bw40_delta_5g);
+
+	val = get_unaligned_le32(eeprom + 0x122);
+	dev->ee->tx_pwr_cfg_5g[1][0] = val;
+	dev->ee->tx_pwr_cfg_5g[1][1] = calc_bw40_power_rate(val, bw40_delta_5g);
+
+	val = get_unaligned_le16(eeprom + 0x126);
+	dev->ee->tx_pwr_cfg_5g[2][0] = val;
+	dev->ee->tx_pwr_cfg_5g[2][1] = calc_bw40_power_rate(val, bw40_delta_5g);
+
+	val = get_unaligned_le16(eeprom + 0xec);
+	val <<= 16;
+	dev->ee->tx_pwr_cfg_5g[3][0] = val;
+	dev->ee->tx_pwr_cfg_5g[3][1] = calc_bw40_power_rate(val, bw40_delta_5g);
+
+	val = get_unaligned_le16(eeprom + 0xee);
+	dev->ee->tx_pwr_cfg_5g[4][0] = val;
+	dev->ee->tx_pwr_cfg_5g[4][1] = calc_bw40_power_rate(val, bw40_delta_5g);
+}
+
+static void
+mt76x0_set_tx_power_per_chan(struct mt76x0_dev *dev, u8 *eeprom)
+{
+	int i;
+	u8 tx_pwr;
+
+	for (i = 0; i < 14; i++) {
+		tx_pwr = eeprom[MT_EE_TX_POWER_OFFSET_2GHZ + i];
+		if (tx_pwr <= 0x3f && tx_pwr > 0)
+			dev->ee->tx_pwr_per_chan[i] = tx_pwr;
+		else
+			dev->ee->tx_pwr_per_chan[i] = 5;
+	}
+
+	for (i = 0; i < 40; i++) {
+		tx_pwr = eeprom[MT_EE_TX_POWER_OFFSET_5GHZ + i];
+		if (tx_pwr <= 0x3f && tx_pwr > 0)
+			dev->ee->tx_pwr_per_chan[14 + i] = tx_pwr;
+		else
+			dev->ee->tx_pwr_per_chan[14 + i] = 5;
+	}
+
+	dev->ee->tx_pwr_per_chan[54] = dev->ee->tx_pwr_per_chan[22];
+	dev->ee->tx_pwr_per_chan[55] = dev->ee->tx_pwr_per_chan[28];
+	dev->ee->tx_pwr_per_chan[56] = dev->ee->tx_pwr_per_chan[34];
+	dev->ee->tx_pwr_per_chan[57] = dev->ee->tx_pwr_per_chan[44];
+}
+
+int
+mt76x0_eeprom_init(struct mt76x0_dev *dev)
+{
+	u8 *eeprom;
+	int i, ret;
+
+	ret = mt76x0_efuse_physical_size_check(dev);
+	if (ret)
+		return ret;
+
+	dev->ee = devm_kzalloc(dev->mt76.dev, sizeof(*dev->ee), GFP_KERNEL);
+	if (!dev->ee)
+		return -ENOMEM;
+
+	eeprom = kmalloc(MT76X0_EEPROM_SIZE, GFP_KERNEL);
+	if (!eeprom)
+		return -ENOMEM;
+
+	for (i = 0; i + 16 <= MT76X0_EEPROM_SIZE; i += 16) {
+		ret = mt76x0_efuse_read(dev, i, eeprom + i, MT_EE_READ);
+		if (ret)
+			goto out;
+	}
+
+	if (eeprom[MT_EE_VERSION_EE] > MT76X0U_EE_MAX_VER)
+		dev_warn(dev->mt76.dev,
+			 "Warning: unsupported EEPROM version %02hhx\n",
+			 eeprom[MT_EE_VERSION_EE]);
+	dev_info(dev->mt76.dev, "EEPROM ver:%02hhx fae:%02hhx\n",
+		 eeprom[MT_EE_VERSION_EE], eeprom[MT_EE_VERSION_FAE]);
+
+	mt76x0_set_macaddr(dev, eeprom);
+	mt76x0_set_chip_cap(dev, eeprom);
+	mt76x0_set_country_reg(dev, eeprom);
+	mt76x0_set_rf_freq_off(dev, eeprom);
+	mt76x0_set_temp_offset(dev, eeprom);
+	mt76x0_set_lna_gain(dev, eeprom);
+	mt76x0_set_rssi_offset(dev, eeprom);
+	dev->chainmask = 0x0101;
+
+	mt76x0_set_tx_power_per_rate(dev, eeprom);
+	mt76x0_set_tx_power_per_chan(dev, eeprom);
+
+out:
+	kfree(eeprom);
+	return ret;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h
new file mode 100644
index 0000000000000000000000000000000000000000..e37b573aed7b88e339d9b22e89cb20f9fa734110
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h
@@ -0,0 +1,149 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT76X0U_EEPROM_H
+#define __MT76X0U_EEPROM_H
+
+struct mt76x0_dev;
+
+#define MT76X0U_EE_MAX_VER			0x0c
+#define MT76X0_EEPROM_SIZE			512
+
+#define MT76X0U_DEFAULT_TX_POWER		6
+
+enum mt76_eeprom_field {
+	MT_EE_CHIP_ID =				0x00,
+	MT_EE_VERSION_FAE =			0x02,
+	MT_EE_VERSION_EE =			0x03,
+	MT_EE_MAC_ADDR =			0x04,
+	MT_EE_NIC_CONF_0 =			0x34,
+	MT_EE_NIC_CONF_1 =			0x36,
+	MT_EE_COUNTRY_REGION_5GHZ =		0x38,
+	MT_EE_COUNTRY_REGION_2GHZ =		0x39,
+	MT_EE_FREQ_OFFSET =			0x3a,
+	MT_EE_NIC_CONF_2 =			0x42,
+
+	MT_EE_LNA_GAIN_2GHZ =			0x44,
+	MT_EE_LNA_GAIN_5GHZ_0 =			0x45,
+	MT_EE_RSSI_OFFSET =			0x46,
+	MT_EE_RSSI_OFFSET_5GHZ =		0x4a,
+	MT_EE_LNA_GAIN_5GHZ_1 =			0x49,
+	MT_EE_LNA_GAIN_5GHZ_2 =			0x4d,
+
+	MT_EE_TX_POWER_DELTA_BW40 =		0x50,
+
+	MT_EE_TX_POWER_OFFSET_2GHZ =		0x52,
+
+	MT_EE_TX_TSSI_SLOPE =			0x6e,
+	MT_EE_TX_TSSI_OFFSET_GROUP =		0x6f,
+	MT_EE_TX_TSSI_OFFSET =			0x76,
+
+	MT_EE_TX_POWER_OFFSET_5GHZ =		0x78,
+
+	MT_EE_TEMP_OFFSET =			0xd1,
+	MT_EE_FREQ_OFFSET_COMPENSATION =	0xdb,
+	MT_EE_TX_POWER_BYRATE_BASE =		0xde,
+
+	MT_EE_TX_POWER_BYRATE_BASE_5GHZ =	0x120,
+
+	MT_EE_USAGE_MAP_START =			0x1e0,
+	MT_EE_USAGE_MAP_END =			0x1fc,
+};
+
+#define MT_EE_NIC_CONF_0_RX_PATH		GENMASK(3, 0)
+#define MT_EE_NIC_CONF_0_TX_PATH		GENMASK(7, 4)
+#define MT_EE_NIC_CONF_0_PA_TYPE		GENMASK(9, 8)
+#define MT_EE_NIC_CONF_0_BOARD_TYPE		GENMASK(13, 12)
+
+#define MT_EE_NIC_CONF_1_HW_RF_CTRL		BIT(0)
+#define MT_EE_NIC_CONF_1_TEMP_TX_ALC		BIT(1)
+#define MT_EE_NIC_CONF_1_LNA_EXT_2G		BIT(2)
+#define MT_EE_NIC_CONF_1_LNA_EXT_5G		BIT(3)
+#define MT_EE_NIC_CONF_1_TX_ALC_EN		BIT(13)
+
+#define MT_EE_NIC_CONF_2_RX_STREAM		GENMASK(3, 0)
+#define MT_EE_NIC_CONF_2_TX_STREAM		GENMASK(7, 4)
+#define MT_EE_NIC_CONF_2_HW_ANTDIV		BIT(8)
+#define MT_EE_NIC_CONF_2_XTAL_OPTION		GENMASK(10, 9)
+#define MT_EE_NIC_CONF_2_TEMP_DISABLE		BIT(11)
+#define MT_EE_NIC_CONF_2_COEX_METHOD		GENMASK(15, 13)
+
+#define MT_EE_TX_POWER_BYRATE(i)		(MT_EE_TX_POWER_BYRATE_BASE + \
+						 (i) * 4)
+
+#define MT_EFUSE_USAGE_MAP_SIZE			(MT_EE_USAGE_MAP_END -	\
+						 MT_EE_USAGE_MAP_START + 1)
+
+enum mt76x0_eeprom_access_modes {
+	MT_EE_READ = 0,
+	MT_EE_PHYSICAL_READ = 1,
+};
+
+struct reg_channel_bounds {
+	u8 start;
+	u8 num;
+};
+
+struct mt76x0_eeprom_params {
+	u8 rf_freq_off;
+	s16 temp_off;
+	s8 rssi_offset_2ghz[2];
+	s8 rssi_offset_5ghz[3];
+	s8 lna_gain_2ghz;
+	s8 lna_gain_5ghz[3];
+	u8 pa_type;
+
+	/* TX_PWR_CFG_* values from EEPROM for 20 and 40 Mhz bandwidths. */
+	u32 tx_pwr_cfg_2g[5][2];
+	u32 tx_pwr_cfg_5g[5][2];
+
+	u8 tx_pwr_per_chan[58];
+
+	struct reg_channel_bounds reg;
+
+	bool has_2ghz;
+	bool has_5ghz;
+};
+
+int mt76x0_eeprom_init(struct mt76x0_dev *dev);
+
+static inline u32 s6_validate(u32 reg)
+{
+	WARN_ON(reg & ~GENMASK(5, 0));
+	return reg & GENMASK(5, 0);
+}
+
+static inline int s6_to_int(u32 reg)
+{
+	int s6;
+
+	s6 = s6_validate(reg);
+	if (s6 & BIT(5))
+		s6 -= BIT(6);
+
+	return s6;
+}
+
+static inline u32 int_to_s6(int val)
+{
+	if (val < -0x20)
+		return 0x20;
+	if (val > 0x1f)
+		return 0x1f;
+
+	return val & 0x3f;
+}
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/init.c b/drivers/net/wireless/mediatek/mt76/mt76x0/init.c
new file mode 100644
index 0000000000000000000000000000000000000000..7cdb3e740522b72001866d4cfe56d3140531a7c2
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/init.c
@@ -0,0 +1,720 @@
+/*
+ * (c) Copyright 2002-2010, Ralink Technology, Inc.
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "mt76x0.h"
+#include "eeprom.h"
+#include "trace.h"
+#include "mcu.h"
+#include "usb.h"
+
+#include "initvals.h"
+
+static void
+mt76x0_set_wlan_state(struct mt76x0_dev *dev, u32 val, bool enable)
+{
+	int i;
+
+	/* Note: we don't turn off WLAN_CLK because that makes the device
+	 *	 not respond properly on the probe path.
+	 *	 In case anyone (PSM?) wants to use this function we can
+	 *	 bring the clock stuff back and fixup the probe path.
+	 */
+
+	if (enable)
+		val |= (MT_WLAN_FUN_CTRL_WLAN_EN |
+			MT_WLAN_FUN_CTRL_WLAN_CLK_EN);
+	else
+		val &= ~(MT_WLAN_FUN_CTRL_WLAN_EN);
+
+	mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
+	udelay(20);
+
+	if (!enable)
+		return;
+
+	for (i = 200; i; i--) {
+		val = mt76_rr(dev, MT_CMB_CTRL);
+
+		if (val & MT_CMB_CTRL_XTAL_RDY && val & MT_CMB_CTRL_PLL_LD)
+			break;
+
+		udelay(20);
+	}
+
+	/* Note: vendor driver tries to disable/enable wlan here and retry
+	 *       but the code which does it is so buggy it must have never
+	 *       triggered, so don't bother.
+	 */
+	if (!i)
+		dev_err(dev->mt76.dev, "Error: PLL and XTAL check failed!\n");
+}
+
+void mt76x0_chip_onoff(struct mt76x0_dev *dev, bool enable, bool reset)
+{
+	u32 val;
+
+	mutex_lock(&dev->hw_atomic_mutex);
+
+	val = mt76_rr(dev, MT_WLAN_FUN_CTRL);
+
+	if (reset) {
+		val |= MT_WLAN_FUN_CTRL_GPIO_OUT_EN;
+		val &= ~MT_WLAN_FUN_CTRL_FRC_WL_ANT_SEL;
+
+		if (val & MT_WLAN_FUN_CTRL_WLAN_EN) {
+			val |= (MT_WLAN_FUN_CTRL_WLAN_RESET |
+				MT_WLAN_FUN_CTRL_WLAN_RESET_RF);
+			mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
+			udelay(20);
+
+			val &= ~(MT_WLAN_FUN_CTRL_WLAN_RESET |
+				 MT_WLAN_FUN_CTRL_WLAN_RESET_RF);
+		}
+	}
+
+	mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
+	udelay(20);
+
+	mt76x0_set_wlan_state(dev, val, enable);
+
+	mutex_unlock(&dev->hw_atomic_mutex);
+}
+
+static void mt76x0_reset_csr_bbp(struct mt76x0_dev *dev)
+{
+	u32 val;
+
+	val = mt76_rr(dev, MT_PBF_SYS_CTRL);
+	val &= ~0x2000;
+	mt76_wr(dev, MT_PBF_SYS_CTRL, val);
+
+	mt76_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_RESET_CSR |
+					 MT_MAC_SYS_CTRL_RESET_BBP);
+
+	msleep(200);
+}
+
+static void mt76x0_init_usb_dma(struct mt76x0_dev *dev)
+{
+	u32 val;
+
+	val = mt76_rr(dev, MT_USB_DMA_CFG);
+
+	val |= FIELD_PREP(MT_USB_DMA_CFG_RX_BULK_AGG_TOUT, MT_USB_AGGR_TIMEOUT) |
+	       FIELD_PREP(MT_USB_DMA_CFG_RX_BULK_AGG_LMT, MT_USB_AGGR_SIZE_LIMIT) |
+	       MT_USB_DMA_CFG_RX_BULK_EN |
+	       MT_USB_DMA_CFG_TX_BULK_EN;
+	if (dev->in_max_packet == 512)
+		val |= MT_USB_DMA_CFG_RX_BULK_AGG_EN;
+	mt76_wr(dev, MT_USB_DMA_CFG, val);
+
+	val = mt76_rr(dev, MT_COM_REG0);
+	if (val & 1)
+		dev_dbg(dev->mt76.dev, "MCU not ready\n");
+
+	val = mt76_rr(dev, MT_USB_DMA_CFG);
+
+	val |= MT_USB_DMA_CFG_RX_DROP_OR_PADDING;
+	mt76_wr(dev, MT_USB_DMA_CFG, val);
+	val &= ~MT_USB_DMA_CFG_RX_DROP_OR_PADDING;
+	mt76_wr(dev, MT_USB_DMA_CFG, val);
+}
+
+#define RANDOM_WRITE(dev, tab) \
+	mt76x0_write_reg_pairs(dev, MT_MCU_MEMMAP_WLAN, tab, ARRAY_SIZE(tab));
+
+static int mt76x0_init_bbp(struct mt76x0_dev *dev)
+{
+	int ret, i;
+
+	ret = mt76x0_wait_bbp_ready(dev);
+	if (ret)
+		return ret;
+
+	RANDOM_WRITE(dev, mt76x0_bbp_init_tab);
+
+	for (i = 0; i < ARRAY_SIZE(mt76x0_bbp_switch_tab); i++) {
+		const struct mt76x0_bbp_switch_item *item = &mt76x0_bbp_switch_tab[i];
+		const struct mt76_reg_pair *pair = &item->reg_pair;
+
+		if (((RF_G_BAND | RF_BW_20) & item->bw_band) == (RF_G_BAND | RF_BW_20))
+			mt76_wr(dev, pair->reg, pair->value);
+	}
+
+	RANDOM_WRITE(dev, mt76x0_dcoc_tab);
+
+	return 0;
+}
+
+static void
+mt76_init_beacon_offsets(struct mt76x0_dev *dev)
+{
+	u16 base = MT_BEACON_BASE;
+	u32 regs[4] = {};
+	int i;
+
+	for (i = 0; i < 16; i++) {
+		u16 addr = dev->beacon_offsets[i];
+
+		regs[i / 4] |= ((addr - base) / 64) << (8 * (i % 4));
+	}
+
+	for (i = 0; i < 4; i++)
+		mt76_wr(dev, MT_BCN_OFFSET(i), regs[i]);
+}
+
+static void mt76x0_init_mac_registers(struct mt76x0_dev *dev)
+{
+	u32 reg;
+
+	RANDOM_WRITE(dev, common_mac_reg_table);
+
+	mt76_init_beacon_offsets(dev);
+
+	/* Enable PBF and MAC clock SYS_CTRL[11:10] = 0x3 */
+	RANDOM_WRITE(dev, mt76x0_mac_reg_table);
+
+	/* Release BBP and MAC reset MAC_SYS_CTRL[1:0] = 0x0 */
+	reg = mt76_rr(dev, MT_MAC_SYS_CTRL);
+	reg &= ~0x3;
+	mt76_wr(dev, MT_MAC_SYS_CTRL, reg);
+
+	if (is_mt7610e(dev)) {
+		/* Disable COEX_EN */
+		reg = mt76_rr(dev, MT_COEXCFG0);
+		reg &= 0xFFFFFFFE;
+		mt76_wr(dev, MT_COEXCFG0, reg);
+	}
+
+	/* Set 0x141C[15:12]=0xF */
+	reg = mt76_rr(dev, MT_EXT_CCA_CFG);
+	reg |= 0x0000F000;
+	mt76_wr(dev, MT_EXT_CCA_CFG, reg);
+
+	mt76_clear(dev, MT_FCE_L2_STUFF, MT_FCE_L2_STUFF_WR_MPDU_LEN_EN);
+
+	/*
+		TxRing 9 is for Mgmt frame.
+		TxRing 8 is for In-band command frame.
+		WMM_RG0_TXQMA: This register setting is for FCE to define the rule of TxRing 9.
+		WMM_RG1_TXQMA: This register setting is for FCE to define the rule of TxRing 8.
+	*/
+	reg = mt76_rr(dev, MT_WMM_CTRL);
+	reg &= ~0x000003FF;
+	reg |= 0x00000201;
+	mt76_wr(dev, MT_WMM_CTRL, reg);
+
+	/* TODO: Probably not needed */
+	mt76_wr(dev, 0x7028, 0);
+	mt76_wr(dev, 0x7010, 0);
+	mt76_wr(dev, 0x7024, 0);
+	msleep(10);
+}
+
+static int mt76x0_init_wcid_mem(struct mt76x0_dev *dev)
+{
+	u32 *vals;
+	int i, ret;
+
+	vals = kmalloc(sizeof(*vals) * N_WCIDS * 2, GFP_KERNEL);
+	if (!vals)
+		return -ENOMEM;
+
+	for (i = 0; i < N_WCIDS; i++)  {
+		vals[i * 2] = 0xffffffff;
+		vals[i * 2 + 1] = 0x00ffffff;
+	}
+
+	ret = mt76x0_burst_write_regs(dev, MT_WCID_ADDR_BASE,
+				      vals, N_WCIDS * 2);
+	kfree(vals);
+
+	return ret;
+}
+
+static int mt76x0_init_key_mem(struct mt76x0_dev *dev)
+{
+	u32 vals[4] = {};
+
+	return mt76x0_burst_write_regs(dev, MT_SKEY_MODE_BASE_0,
+					vals, ARRAY_SIZE(vals));
+}
+
+static int mt76x0_init_wcid_attr_mem(struct mt76x0_dev *dev)
+{
+	u32 *vals;
+	int i, ret;
+
+	vals = kmalloc(sizeof(*vals) * N_WCIDS * 2, GFP_KERNEL);
+	if (!vals)
+		return -ENOMEM;
+
+	for (i = 0; i < N_WCIDS * 2; i++)
+		vals[i] = 1;
+
+	ret = mt76x0_burst_write_regs(dev, MT_WCID_ATTR_BASE,
+				      vals, N_WCIDS * 2);
+	kfree(vals);
+
+	return ret;
+}
+
+static void mt76x0_reset_counters(struct mt76x0_dev *dev)
+{
+	mt76_rr(dev, MT_RX_STA_CNT0);
+	mt76_rr(dev, MT_RX_STA_CNT1);
+	mt76_rr(dev, MT_RX_STA_CNT2);
+	mt76_rr(dev, MT_TX_STA_CNT0);
+	mt76_rr(dev, MT_TX_STA_CNT1);
+	mt76_rr(dev, MT_TX_STA_CNT2);
+}
+
+int mt76x0_mac_start(struct mt76x0_dev *dev)
+{
+	mt76_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX);
+
+	if (!mt76_poll(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
+		       MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 200000))
+		return -ETIMEDOUT;
+
+	dev->rxfilter = MT_RX_FILTR_CFG_CRC_ERR |
+		MT_RX_FILTR_CFG_PHY_ERR | MT_RX_FILTR_CFG_PROMISC |
+		MT_RX_FILTR_CFG_VER_ERR | MT_RX_FILTR_CFG_DUP |
+		MT_RX_FILTR_CFG_CFACK | MT_RX_FILTR_CFG_CFEND |
+		MT_RX_FILTR_CFG_ACK | MT_RX_FILTR_CFG_CTS |
+		MT_RX_FILTR_CFG_RTS | MT_RX_FILTR_CFG_PSPOLL |
+		MT_RX_FILTR_CFG_BA | MT_RX_FILTR_CFG_CTRL_RSV;
+	mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
+
+	mt76_wr(dev, MT_MAC_SYS_CTRL,
+		   MT_MAC_SYS_CTRL_ENABLE_TX | MT_MAC_SYS_CTRL_ENABLE_RX);
+
+	if (!mt76_poll(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
+		       MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 50))
+		return -ETIMEDOUT;
+
+	return 0;
+}
+
+static void mt76x0_mac_stop_hw(struct mt76x0_dev *dev)
+{
+	int i, ok;
+
+	if (test_bit(MT76_REMOVED, &dev->mt76.state))
+		return;
+
+	mt76_clear(dev, MT_BEACON_TIME_CFG, MT_BEACON_TIME_CFG_TIMER_EN |
+		   MT_BEACON_TIME_CFG_SYNC_MODE | MT_BEACON_TIME_CFG_TBTT_EN |
+		   MT_BEACON_TIME_CFG_BEACON_TX);
+
+	if (!mt76_poll(dev, MT_USB_DMA_CFG, MT_USB_DMA_CFG_TX_BUSY, 0, 1000))
+		dev_warn(dev->mt76.dev, "Warning: TX DMA did not stop!\n");
+
+	/* Page count on TxQ */
+	i = 200;
+	while (i-- && ((mt76_rr(dev, 0x0438) & 0xffffffff) ||
+		       (mt76_rr(dev, 0x0a30) & 0x000000ff) ||
+		       (mt76_rr(dev, 0x0a34) & 0x00ff00ff)))
+		msleep(10);
+
+	if (!mt76_poll(dev, MT_MAC_STATUS, MT_MAC_STATUS_TX, 0, 1000))
+		dev_warn(dev->mt76.dev, "Warning: MAC TX did not stop!\n");
+
+	mt76_clear(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_RX |
+					 MT_MAC_SYS_CTRL_ENABLE_TX);
+
+	/* Page count on RxQ */
+	ok = 0;
+	i = 200;
+	while (i--) {
+		if (!(mt76_rr(dev, MT_RXQ_STA) & 0x00ff0000) &&
+		    !mt76_rr(dev, 0x0a30) &&
+		    !mt76_rr(dev, 0x0a34)) {
+			if (ok++ > 5)
+				break;
+			continue;
+		}
+		msleep(1);
+	}
+
+	if (!mt76_poll(dev, MT_MAC_STATUS, MT_MAC_STATUS_RX, 0, 1000))
+		dev_warn(dev->mt76.dev, "Warning: MAC RX did not stop!\n");
+
+	if (!mt76_poll(dev, MT_USB_DMA_CFG, MT_USB_DMA_CFG_RX_BUSY, 0, 1000))
+		dev_warn(dev->mt76.dev, "Warning: RX DMA did not stop!\n");
+}
+
+void mt76x0_mac_stop(struct mt76x0_dev *dev)
+{
+	mt76x0_mac_stop_hw(dev);
+	flush_delayed_work(&dev->stat_work);
+	cancel_delayed_work_sync(&dev->stat_work);
+}
+
+static void mt76x0_stop_hardware(struct mt76x0_dev *dev)
+{
+	mt76x0_chip_onoff(dev, false, false);
+}
+
+int mt76x0_init_hardware(struct mt76x0_dev *dev)
+{
+	static const u16 beacon_offsets[16] = {
+		/* 512 byte per beacon */
+		0xc000,	0xc200,	0xc400,	0xc600,
+		0xc800,	0xca00,	0xcc00,	0xce00,
+		0xd000,	0xd200,	0xd400,	0xd600,
+		0xd800,	0xda00,	0xdc00,	0xde00
+	};
+	int ret;
+
+	dev->beacon_offsets = beacon_offsets;
+
+	mt76x0_chip_onoff(dev, true, true);
+
+	ret = mt76x0_wait_asic_ready(dev);
+	if (ret)
+		goto err;
+	ret = mt76x0_mcu_init(dev);
+	if (ret)
+		goto err;
+
+	if (!mt76_poll_msec(dev, MT_WPDMA_GLO_CFG,
+			    MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
+			    MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 100)) {
+		ret = -EIO;
+		goto err;
+	}
+
+	/* Wait for ASIC ready after FW load. */
+	ret = mt76x0_wait_asic_ready(dev);
+	if (ret)
+		goto err;
+
+	mt76x0_reset_csr_bbp(dev);
+	mt76x0_init_usb_dma(dev);
+
+	mt76_wr(dev, MT_HEADER_TRANS_CTRL_REG, 0x0);
+	mt76_wr(dev, MT_TSO_CTRL, 0x0);
+
+	ret = mt76x0_mcu_cmd_init(dev);
+	if (ret)
+		goto err;
+	ret = mt76x0_dma_init(dev);
+	if (ret)
+		goto err_mcu;
+
+	mt76x0_init_mac_registers(dev);
+
+	if (!mt76_poll_msec(dev, MT_MAC_STATUS,
+			    MT_MAC_STATUS_TX | MT_MAC_STATUS_RX, 0, 1000)) {
+		ret = -EIO;
+		goto err_rx;
+	}
+
+	ret = mt76x0_init_bbp(dev);
+	if (ret)
+		goto err_rx;
+
+	ret = mt76x0_init_wcid_mem(dev);
+	if (ret)
+		goto err_rx;
+	ret = mt76x0_init_key_mem(dev);
+	if (ret)
+		goto err_rx;
+	ret = mt76x0_init_wcid_attr_mem(dev);
+	if (ret)
+		goto err_rx;
+
+	mt76_clear(dev, MT_BEACON_TIME_CFG, (MT_BEACON_TIME_CFG_TIMER_EN |
+					     MT_BEACON_TIME_CFG_SYNC_MODE |
+					     MT_BEACON_TIME_CFG_TBTT_EN |
+					     MT_BEACON_TIME_CFG_BEACON_TX));
+
+	mt76x0_reset_counters(dev);
+
+	mt76_rmw(dev, MT_US_CYC_CFG, MT_US_CYC_CNT, 0x1e);
+
+	mt76_wr(dev, MT_TXOP_CTRL_CFG,
+		   FIELD_PREP(MT_TXOP_TRUN_EN, 0x3f) |
+		   FIELD_PREP(MT_TXOP_EXT_CCA_DLY, 0x58));
+
+	ret = mt76x0_eeprom_init(dev);
+	if (ret)
+		goto err_rx;
+
+	mt76x0_phy_init(dev);
+	return 0;
+
+err_rx:
+	mt76x0_dma_cleanup(dev);
+err_mcu:
+	mt76x0_mcu_cmd_deinit(dev);
+err:
+	mt76x0_chip_onoff(dev, false, false);
+	return ret;
+}
+
+void mt76x0_cleanup(struct mt76x0_dev *dev)
+{
+	if (!test_and_clear_bit(MT76_STATE_INITIALIZED, &dev->mt76.state))
+		return;
+
+	mt76x0_stop_hardware(dev);
+	mt76x0_dma_cleanup(dev);
+	mt76x0_mcu_cmd_deinit(dev);
+}
+
+struct mt76x0_dev *mt76x0_alloc_device(struct device *pdev)
+{
+	struct ieee80211_hw *hw;
+	struct mt76x0_dev *dev;
+
+	hw = ieee80211_alloc_hw(sizeof(*dev), &mt76x0_ops);
+	if (!hw)
+		return NULL;
+
+	dev = hw->priv;
+	dev->mt76.dev = pdev;
+	dev->mt76.hw = hw;
+	mutex_init(&dev->usb_ctrl_mtx);
+	mutex_init(&dev->reg_atomic_mutex);
+	mutex_init(&dev->hw_atomic_mutex);
+	mutex_init(&dev->mutex);
+	spin_lock_init(&dev->tx_lock);
+	spin_lock_init(&dev->rx_lock);
+	spin_lock_init(&dev->mt76.lock);
+	spin_lock_init(&dev->mac_lock);
+	spin_lock_init(&dev->con_mon_lock);
+	atomic_set(&dev->avg_ampdu_len, 1);
+	skb_queue_head_init(&dev->tx_skb_done);
+
+	dev->stat_wq = alloc_workqueue("mt76x0", WQ_UNBOUND, 0);
+	if (!dev->stat_wq) {
+		ieee80211_free_hw(hw);
+		return NULL;
+	}
+
+	return dev;
+}
+
+#define CHAN2G(_idx, _freq) {			\
+	.band = NL80211_BAND_2GHZ,		\
+	.center_freq = (_freq),			\
+	.hw_value = (_idx),			\
+	.max_power = 30,			\
+}
+
+static const struct ieee80211_channel mt76_channels_2ghz[] = {
+	CHAN2G(1, 2412),
+	CHAN2G(2, 2417),
+	CHAN2G(3, 2422),
+	CHAN2G(4, 2427),
+	CHAN2G(5, 2432),
+	CHAN2G(6, 2437),
+	CHAN2G(7, 2442),
+	CHAN2G(8, 2447),
+	CHAN2G(9, 2452),
+	CHAN2G(10, 2457),
+	CHAN2G(11, 2462),
+	CHAN2G(12, 2467),
+	CHAN2G(13, 2472),
+	CHAN2G(14, 2484),
+};
+
+#define CHAN5G(_idx, _freq) {			\
+	.band = NL80211_BAND_5GHZ,		\
+	.center_freq = (_freq),			\
+	.hw_value = (_idx),			\
+	.max_power = 30,			\
+}
+
+static const struct ieee80211_channel mt76_channels_5ghz[] = {
+	CHAN5G(36, 5180),
+	CHAN5G(40, 5200),
+	CHAN5G(44, 5220),
+	CHAN5G(46, 5230),
+	CHAN5G(48, 5240),
+	CHAN5G(52, 5260),
+	CHAN5G(56, 5280),
+	CHAN5G(60, 5300),
+	CHAN5G(64, 5320),
+
+	CHAN5G(100, 5500),
+	CHAN5G(104, 5520),
+	CHAN5G(108, 5540),
+	CHAN5G(112, 5560),
+	CHAN5G(116, 5580),
+	CHAN5G(120, 5600),
+	CHAN5G(124, 5620),
+	CHAN5G(128, 5640),
+	CHAN5G(132, 5660),
+	CHAN5G(136, 5680),
+	CHAN5G(140, 5700),
+};
+
+#define CCK_RATE(_idx, _rate) {					\
+	.bitrate = _rate,					\
+	.flags = IEEE80211_RATE_SHORT_PREAMBLE,			\
+	.hw_value = (MT_PHY_TYPE_CCK << 8) | _idx,		\
+	.hw_value_short = (MT_PHY_TYPE_CCK << 8) | (8 + _idx),	\
+}
+
+#define OFDM_RATE(_idx, _rate) {				\
+	.bitrate = _rate,					\
+	.hw_value = (MT_PHY_TYPE_OFDM << 8) | _idx,		\
+	.hw_value_short = (MT_PHY_TYPE_OFDM << 8) | _idx,	\
+}
+
+static struct ieee80211_rate mt76_rates[] = {
+	CCK_RATE(0, 10),
+	CCK_RATE(1, 20),
+	CCK_RATE(2, 55),
+	CCK_RATE(3, 110),
+	OFDM_RATE(0, 60),
+	OFDM_RATE(1, 90),
+	OFDM_RATE(2, 120),
+	OFDM_RATE(3, 180),
+	OFDM_RATE(4, 240),
+	OFDM_RATE(5, 360),
+	OFDM_RATE(6, 480),
+	OFDM_RATE(7, 540),
+};
+
+static int
+mt76_init_sband(struct mt76x0_dev *dev, struct ieee80211_supported_band *sband,
+		const struct ieee80211_channel *chan, int n_chan,
+		struct ieee80211_rate *rates, int n_rates)
+{
+	struct ieee80211_sta_ht_cap *ht_cap;
+	void *chanlist;
+	int size;
+
+	size = n_chan * sizeof(*chan);
+	chanlist = devm_kmemdup(dev->mt76.dev, chan, size, GFP_KERNEL);
+	if (!chanlist)
+		return -ENOMEM;
+
+	sband->channels = chanlist;
+	sband->n_channels = n_chan;
+	sband->bitrates = rates;
+	sband->n_bitrates = n_rates;
+
+	ht_cap = &sband->ht_cap;
+	ht_cap->ht_supported = true;
+	ht_cap->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
+		      IEEE80211_HT_CAP_GRN_FLD |
+		      IEEE80211_HT_CAP_SGI_20 |
+		      IEEE80211_HT_CAP_SGI_40 |
+		      (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
+
+	ht_cap->mcs.rx_mask[0] = 0xff;
+	ht_cap->mcs.rx_mask[4] = 0x1;
+	ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
+	ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+	ht_cap->ampdu_density = IEEE80211_HT_MPDU_DENSITY_2;
+
+	return 0;
+}
+
+static int
+mt76_init_sband_2g(struct mt76x0_dev *dev)
+{
+	dev->mt76.hw->wiphy->bands[NL80211_BAND_2GHZ] = &dev->mt76.sband_2g.sband;
+
+	WARN_ON(dev->ee->reg.start - 1 + dev->ee->reg.num >
+		ARRAY_SIZE(mt76_channels_2ghz));
+
+
+	return mt76_init_sband(dev, &dev->mt76.sband_2g.sband,
+			       mt76_channels_2ghz, ARRAY_SIZE(mt76_channels_2ghz),
+			       mt76_rates, ARRAY_SIZE(mt76_rates));
+}
+
+static int
+mt76_init_sband_5g(struct mt76x0_dev *dev)
+{
+	dev->mt76.hw->wiphy->bands[NL80211_BAND_5GHZ] = &dev->mt76.sband_5g.sband;
+
+	return mt76_init_sband(dev, &dev->mt76.sband_5g.sband,
+			       mt76_channels_5ghz, ARRAY_SIZE(mt76_channels_5ghz),
+			       mt76_rates + 4, ARRAY_SIZE(mt76_rates) - 4);
+}
+
+
+int mt76x0_register_device(struct mt76x0_dev *dev)
+{
+	struct ieee80211_hw *hw = dev->mt76.hw;
+	struct wiphy *wiphy = hw->wiphy;
+	int ret;
+
+	/* Reserve WCID 0 for mcast - thanks to this APs WCID will go to
+	 * entry no. 1 like it does in the vendor driver.
+	 */
+	dev->wcid_mask[0] |= 1;
+
+	/* init fake wcid for monitor interfaces */
+	dev->mon_wcid = devm_kmalloc(dev->mt76.dev, sizeof(*dev->mon_wcid),
+				     GFP_KERNEL);
+	if (!dev->mon_wcid)
+		return -ENOMEM;
+	dev->mon_wcid->idx = 0xff;
+	dev->mon_wcid->hw_key_idx = -1;
+
+	SET_IEEE80211_DEV(hw, dev->mt76.dev);
+
+	hw->queues = 4;
+	ieee80211_hw_set(hw, SIGNAL_DBM);
+	ieee80211_hw_set(hw, PS_NULLFUNC_STACK);
+	ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES);
+	ieee80211_hw_set(hw, AMPDU_AGGREGATION);
+	ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
+	hw->max_rates = 1;
+	hw->max_report_rates = 7;
+	hw->max_rate_tries = 1;
+
+	hw->sta_data_size = sizeof(struct mt76_sta);
+	hw->vif_data_size = sizeof(struct mt76_vif);
+
+	SET_IEEE80211_PERM_ADDR(hw, dev->macaddr);
+
+	wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
+	wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
+
+	if (dev->ee->has_2ghz) {
+		ret = mt76_init_sband_2g(dev);
+		if (ret)
+			return ret;
+	}
+
+	if (dev->ee->has_5ghz) {
+		ret = mt76_init_sband_5g(dev);
+		if (ret)
+			return ret;
+	}
+
+	dev->mt76.chandef.chan = &dev->mt76.sband_2g.sband.channels[0];
+
+	INIT_DELAYED_WORK(&dev->mac_work, mt76x0_mac_work);
+	INIT_DELAYED_WORK(&dev->stat_work, mt76x0_tx_stat);
+
+	ret = ieee80211_register_hw(hw);
+	if (ret)
+		return ret;
+
+	mt76x0_init_debugfs(dev);
+
+	return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h b/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h
new file mode 100644
index 0000000000000000000000000000000000000000..24afcfd94b4e63f934259463e410ff627adbdaea
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h
@@ -0,0 +1,282 @@
+/*
+ * (c) Copyright 2002-2010, Ralink Technology, Inc.
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT76X0U_INITVALS_H
+#define __MT76X0U_INITVALS_H
+
+#include "phy.h"
+
+static const struct mt76_reg_pair common_mac_reg_table[] = {
+#if 1
+	{MT_BCN_OFFSET(0),			0xf8f0e8e0}, /* 0x3800(e0), 0x3A00(e8), 0x3C00(f0), 0x3E00(f8), 512B for each beacon */
+	{MT_BCN_OFFSET(1),			0x6f77d0c8}, /* 0x3200(c8), 0x3400(d0), 0x1DC0(77), 0x1BC0(6f), 512B for each beacon */
+#endif
+
+	{MT_LEGACY_BASIC_RATE,		0x0000013f}, /*  Basic rate set bitmap*/
+	{MT_HT_BASIC_RATE,		0x00008003}, /* Basic HT rate set , 20M, MCS=3, MM. Format is the same as in TXWI.*/
+	{MT_MAC_SYS_CTRL,		0x00}, /* 0x1004, , default Disable RX*/
+	{MT_RX_FILTR_CFG,		0x17f97}, /*0x1400  , RX filter control,  */
+	{MT_BKOFF_SLOT_CFG,	0x209}, /* default set short slot time, CC_DELAY_TIME should be 2	 */
+	/*{TX_SW_CFG0,		0x40a06},  Gary,2006-08-23 */
+	{MT_TX_SW_CFG0,		0x0}, 		/* Gary,2008-05-21 for CWC test */
+	{MT_TX_SW_CFG1,		0x80606}, /* Gary,2006-08-23 */
+	{MT_TX_LINK_CFG,		0x1020},		/* Gary,2006-08-23 */
+	/*{TX_TIMEOUT_CFG,	0x00182090},	 CCK has some problem. So increase timieout value. 2006-10-09 MArvek RT*/
+	{MT_TX_TIMEOUT_CFG,	0x000a2090},	/* CCK has some problem. So increase timieout value. 2006-10-09 MArvek RT , Modify for 2860E ,2007-08-01*/
+	{MT_MAX_LEN_CFG,		0xa0fff | 0x00001000},	/* 0x3018, MAX frame length. Max PSDU = 16kbytes.*/
+	{MT_LED_CFG,		0x7f031e46}, /* Gary, 2006-08-23*/
+
+	{MT_PBF_TX_MAX_PCNT,		0x1fbf1f1f /*0xbfbf3f1f*/},
+	{MT_PBF_RX_MAX_PCNT,		0x9f},
+
+	/*{TX_RTY_CFG,			0x6bb80408},	 Jan, 2006/11/16*/
+/* WMM_ACM_SUPPORT */
+/*	{TX_RTY_CFG,			0x6bb80101},	 sample*/
+	{MT_TX_RETRY_CFG,			0x47d01f0f},	/* Jan, 2006/11/16, Set TxWI->ACK =0 in Probe Rsp Modify for 2860E ,2007-08-03*/
+
+	{MT_AUTO_RSP_CFG,			0x00000013},	/* Initial Auto_Responder, because QA will turn off Auto-Responder*/
+	{MT_CCK_PROT_CFG,			0x05740003 /*0x01740003*/},	/* Initial Auto_Responder, because QA will turn off Auto-Responder. And RTS threshold is enabled. */
+	{MT_OFDM_PROT_CFG,			0x05740003 /*0x01740003*/},	/* Initial Auto_Responder, because QA will turn off Auto-Responder. And RTS threshold is enabled. */
+	{MT_PBF_CFG, 				0xf40006}, 		/* Only enable Queue 2*/
+	{MT_MM40_PROT_CFG,			0x3F44084},		/* Initial Auto_Responder, because QA will turn off Auto-Responder*/
+	{MT_WPDMA_GLO_CFG,			0x00000030},
+	{MT_GF20_PROT_CFG,			0x01744004},    /* set 19:18 --> Short NAV for MIMO PS*/
+	{MT_GF40_PROT_CFG,			0x03F44084},
+	{MT_MM20_PROT_CFG,			0x01744004},
+	{MT_TXOP_CTRL_CFG,			0x0000583f, /*0x0000243f*/ /*0x000024bf*/},	/*Extension channel backoff.*/
+	{MT_TX_RTS_CFG,			0x00092b20},
+
+	{MT_EXP_ACK_TIME,			0x002400ca},	/* default value */
+	{MT_TXOP_HLDR_ET, 			0x00000002},
+
+	/* Jerry comments 2008/01/16: we use SIFS = 10us in CCK defaultly, but it seems that 10us
+		is too small for INTEL 2200bg card, so in MBSS mode, the delta time between beacon0
+		and beacon1 is SIFS (10us), so if INTEL 2200bg card connects to BSS0, the ping
+		will always lost. So we change the SIFS of CCK from 10us to 16us. */
+	{MT_XIFS_TIME_CFG,			0x33a41010},
+	{MT_PWR_PIN_CFG,			0x00000000},
+};
+
+static const struct mt76_reg_pair mt76x0_mac_reg_table[] = {
+	/* {MT_IOCFG_6,		0xA0040080 }, */
+	{MT_PBF_SYS_CTRL,	0x00080c00 },
+	{MT_PBF_CFG,		0x77723c1f },
+	{MT_FCE_PSE_CTRL,	0x00000001 },
+
+	{MT_AMPDU_MAX_LEN_20M1S,	0xBAA99887 },
+
+	/* Delay bb_tx_pe for proper tx_mcs_pwr update */
+	{MT_TX_SW_CFG0,		0x00000601 },
+
+	/* Set rf_tx_pe deassert time to 1us by Chee's comment @MT7650_CR_setting_1018.xlsx */
+	{MT_TX_SW_CFG1,		0x00040000 },
+	{MT_TX_SW_CFG2,		0x00000000 },
+
+	/* disable Tx info report */
+	{0xa44,		0x0000000 },
+
+	{MT_HEADER_TRANS_CTRL_REG, 0x0},
+	{MT_TSO_CTRL,		0x0},
+
+	/* BB_PA_MODE_CFG0(0x1214) Keep default value @20120903 */
+	{MT_BB_PA_MODE_CFG1,	0x00500055},
+
+	/* RF_PA_MODE_CFG0(0x121C) Keep default value @20120903 */
+	{MT_RF_PA_MODE_CFG1,	0x00500055},
+
+	{MT_TX_ALC_CFG_0,	0x2F2F000C},
+	{MT_TX0_BB_GAIN_ATTEN,  0x00000000}, /* set BBP atten gain = 0 */
+
+	{MT_TX_PWR_CFG_0, 0x3A3A3A3A},
+	{MT_TX_PWR_CFG_1, 0x3A3A3A3A},
+	{MT_TX_PWR_CFG_2, 0x3A3A3A3A},
+	{MT_TX_PWR_CFG_3, 0x3A3A3A3A},
+	{MT_TX_PWR_CFG_4, 0x3A3A3A3A},
+	{MT_TX_PWR_CFG_7, 0x3A3A3A3A},
+	{MT_TX_PWR_CFG_8, 0x3A},
+	{MT_TX_PWR_CFG_9, 0x3A},
+	/* Enable Tx length > 4095 byte */
+	{0x150C,		0x00000002},
+
+	/* Disable bt_abort_tx_en(0x1238[21] = 0) which is not used at MT7650 */
+	{0x1238, 		0x001700C8},
+	/* PMU_OCLEVEL<5:1> from default <5'b10010> to <5'b11011> for normal driver */
+	/* {MT_LDO_CTRL_0,		0x00A647B6}, */
+
+	/* Default LDO_DIG supply 1.26V, change to 1.2V */
+	{MT_LDO_CTRL_1,		0x6B006464 },
+/*
+	{MT_HT_BASIC_RATE,	0x00004003 },
+	{MT_HT_CTRL_CFG,	0x000001FF },
+*/
+};
+
+
+static const struct mt76_reg_pair mt76x0_bbp_init_tab[] = {
+	{MT_BBP(CORE, 1), 0x00000002},
+	{MT_BBP(CORE, 4), 0x00000000},
+	{MT_BBP(CORE, 24), 0x00000000},
+	{MT_BBP(CORE, 32), 0x4003000a},
+	{MT_BBP(CORE, 42), 0x00000000},
+	{MT_BBP(CORE, 44), 0x00000000},
+
+	{MT_BBP(IBI, 11), 0x00000080},
+
+	/*
+		0x2300[5] Default Antenna:
+		0 for WIFI main antenna
+		1 for WIFI aux  antenna
+
+	*/
+	{MT_BBP(AGC, 0), 0x00021400},
+	{MT_BBP(AGC, 1), 0x00000003},
+	{MT_BBP(AGC, 2), 0x003A6464},
+	{MT_BBP(AGC, 15), 0x88A28CB8},
+	{MT_BBP(AGC, 22), 0x00001E21},
+	{MT_BBP(AGC, 23), 0x0000272C},
+	{MT_BBP(AGC, 24), 0x00002F3A},
+	{MT_BBP(AGC, 25), 0x8000005A},
+	{MT_BBP(AGC, 26), 0x007C2005},
+	{MT_BBP(AGC, 34), 0x000A0C0C},
+	{MT_BBP(AGC, 37), 0x2121262C},
+	{MT_BBP(AGC, 41), 0x38383E45},
+	{MT_BBP(AGC, 57), 0x00001010},
+	{MT_BBP(AGC, 59), 0xBAA20E96},
+	{MT_BBP(AGC, 63), 0x00000001},
+
+	{MT_BBP(TXC, 0), 0x00280403},
+	{MT_BBP(TXC, 1), 0x00000000},
+
+	{MT_BBP(RXC, 1), 0x00000012},
+	{MT_BBP(RXC, 2), 0x00000011},
+	{MT_BBP(RXC, 3), 0x00000005},
+	{MT_BBP(RXC, 4), 0x00000000},
+	{MT_BBP(RXC, 5), 0xF977C4EC},
+	{MT_BBP(RXC, 7), 0x00000090},
+
+	{MT_BBP(TXO, 8), 0x00000000},
+
+	{MT_BBP(TXBE, 0), 0x00000000},
+	{MT_BBP(TXBE, 4), 0x00000004},
+	{MT_BBP(TXBE, 6), 0x00000000},
+	{MT_BBP(TXBE, 8), 0x00000014},
+	{MT_BBP(TXBE, 9), 0x20000000},
+	{MT_BBP(TXBE, 10), 0x00000000},
+	{MT_BBP(TXBE, 12), 0x00000000},
+	{MT_BBP(TXBE, 13), 0x00000000},
+	{MT_BBP(TXBE, 14), 0x00000000},
+	{MT_BBP(TXBE, 15), 0x00000000},
+	{MT_BBP(TXBE, 16), 0x00000000},
+	{MT_BBP(TXBE, 17), 0x00000000},
+
+	{MT_BBP(RXFE, 1), 0x00008800}, /* Add for E3 */
+	{MT_BBP(RXFE, 3), 0x00000000},
+	{MT_BBP(RXFE, 4), 0x00000000},
+
+	{MT_BBP(RXO, 13), 0x00000092},
+	{MT_BBP(RXO, 14), 0x00060612},
+	{MT_BBP(RXO, 15), 0xC8321B18},
+	{MT_BBP(RXO, 16), 0x0000001E},
+	{MT_BBP(RXO, 17), 0x00000000},
+	{MT_BBP(RXO, 18), 0xCC00A993},
+	{MT_BBP(RXO, 19), 0xB9CB9CB9},
+	{MT_BBP(RXO, 20), 0x26c00057},
+	{MT_BBP(RXO, 21), 0x00000001},
+	{MT_BBP(RXO, 24), 0x00000006},
+};
+
+static const struct mt76x0_bbp_switch_item mt76x0_bbp_switch_tab[] = {
+	{RF_G_BAND | RF_BW_20 | RF_BW_40,		{MT_BBP(AGC, 8), 0x0E344EF0}},
+	{RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80,	{MT_BBP(AGC, 8), 0x122C54F2}},
+
+	{RF_G_BAND | RF_BW_20 | RF_BW_40,		{MT_BBP(AGC, 14), 0x310F2E39}},
+	{RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80,	{MT_BBP(AGC, 14), 0x310F2A3F}},
+
+	{RF_G_BAND | RF_BW_20 | RF_BW_40,		{MT_BBP(AGC, 32), 0x00003230}},
+	{RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80,	{MT_BBP(AGC, 32), 0x0000181C}},
+
+	{RF_G_BAND | RF_BW_20 | RF_BW_40,		{MT_BBP(AGC, 33), 0x00003240}},
+	{RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80,	{MT_BBP(AGC, 33), 0x00003218}},
+
+	{RF_G_BAND | RF_BW_20 | RF_BW_40,		{MT_BBP(AGC, 35), 0x11112016}},
+	{RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80,	{MT_BBP(AGC, 35), 0x11112016}},
+
+	{RF_G_BAND | RF_BW_20 | RF_BW_40,		{MT_BBP(RXO, 28), 0x0000008A}},
+	{RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80,	{MT_BBP(RXO, 28), 0x0000008A}},
+
+	{RF_G_BAND | RF_BW_20 | RF_BW_40,		{MT_BBP(AGC, 4), 0x1FEDA049}},
+	{RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80,	{MT_BBP(AGC, 4), 0x1FECA054}},
+
+	{RF_G_BAND | RF_BW_20 | RF_BW_40,		{MT_BBP(AGC, 6), 0x00000045}},
+	{RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80,	{MT_BBP(AGC, 6), 0x0000000A}},
+
+	{RF_G_BAND | RF_BW_20,				{MT_BBP(AGC, 12), 0x05052879}},
+	{RF_G_BAND | RF_BW_40,				{MT_BBP(AGC, 12), 0x050528F9}},
+	{RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80,	{MT_BBP(AGC, 12), 0x050528F9}},
+
+	{RF_G_BAND | RF_BW_20 | RF_BW_40,		{MT_BBP(AGC, 13), 0x35050004}},
+	{RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80,	{MT_BBP(AGC, 13), 0x2C3A0406}},
+
+	{RF_G_BAND | RF_BW_20 | RF_BW_40,		{MT_BBP(AGC, 27), 0x000000E1}},
+	{RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80,	{MT_BBP(AGC, 27), 0x000000EC}},
+
+	{RF_G_BAND | RF_BW_20,				{MT_BBP(AGC, 28), 0x00060806}},
+	{RF_G_BAND | RF_BW_40,				{MT_BBP(AGC, 28), 0x00050806}},
+	{RF_A_BAND | RF_BW_40,				{MT_BBP(AGC, 28), 0x00060801}},
+	{RF_A_BAND | RF_BW_20 | RF_BW_80,		{MT_BBP(AGC, 28), 0x00060806}},
+
+	{RF_G_BAND | RF_BW_20 | RF_BW_40,		{MT_BBP(AGC, 31), 0x00000F23}},
+	{RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80,	{MT_BBP(AGC, 31), 0x00000F13}},
+
+	{RF_G_BAND | RF_BW_20,				{MT_BBP(AGC, 39), 0x2A2A3036}},
+	{RF_G_BAND | RF_BW_40,				{MT_BBP(AGC, 39), 0x2A2A2C36}},
+	{RF_A_BAND | RF_BW_20 | RF_BW_40,		{MT_BBP(AGC, 39), 0x2A2A3036}},
+	{RF_A_BAND | RF_BW_80,				{MT_BBP(AGC, 39), 0x2A2A2A36}},
+
+	{RF_G_BAND | RF_BW_20,				{MT_BBP(AGC, 43), 0x27273438}},
+	{RF_G_BAND | RF_BW_40,				{MT_BBP(AGC, 43), 0x27272D38}},
+	{RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80,	{MT_BBP(AGC, 43), 0x27272B30}},
+
+	{RF_G_BAND | RF_BW_20 | RF_BW_40,		{MT_BBP(AGC, 51), 0x17171C1C}},
+	{RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80,	{MT_BBP(AGC, 51), 0xFFFFFFFF}},
+
+	{RF_G_BAND | RF_BW_20,				{MT_BBP(AGC, 53), 0x26262A2F}},
+	{RF_G_BAND | RF_BW_40,				{MT_BBP(AGC, 53), 0x2626322F}},
+	{RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80,	{MT_BBP(AGC, 53), 0xFFFFFFFF}},
+
+	{RF_G_BAND | RF_BW_20,				{MT_BBP(AGC, 55), 0x40404E58}},
+	{RF_G_BAND | RF_BW_40,				{MT_BBP(AGC, 55), 0x40405858}},
+	{RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80,	{MT_BBP(AGC, 55), 0xFFFFFFFF}},
+
+	{RF_G_BAND | RF_BW_20 | RF_BW_40,		{MT_BBP(AGC, 58), 0x00001010}},
+	{RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80,	{MT_BBP(AGC, 58), 0x00000000}},
+
+	{RF_G_BAND | RF_BW_20 | RF_BW_40,		{MT_BBP(RXFE, 0), 0x3D5000E0}},
+	{RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80,	{MT_BBP(RXFE, 0), 0x895000E0}},
+};
+
+static const struct mt76_reg_pair mt76x0_dcoc_tab[] = {
+	{MT_BBP(CAL, 47), 0x000010F0 },
+	{MT_BBP(CAL, 48), 0x00008080 },
+	{MT_BBP(CAL, 49), 0x00000F07 },
+	{MT_BBP(CAL, 50), 0x00000040 },
+	{MT_BBP(CAL, 51), 0x00000404 },
+	{MT_BBP(CAL, 52), 0x00080803 },
+	{MT_BBP(CAL, 53), 0x00000704 },
+	{MT_BBP(CAL, 54), 0x00002828 },
+	{MT_BBP(CAL, 55), 0x00005050 },
+};
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/initvals_phy.h b/drivers/net/wireless/mediatek/mt76/mt76x0/initvals_phy.h
new file mode 100644
index 0000000000000000000000000000000000000000..95d43efc1f3d4d225826670033415ddb54bbad5c
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/initvals_phy.h
@@ -0,0 +1,772 @@
+/*
+ * (c) Copyright 2002-2010, Ralink Technology, Inc.
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT76X0U_PHY_INITVALS_H
+#define __MT76X0U_PHY_INITVALS_H
+
+#define RF_REG_PAIR(bank, reg, value)				\
+	{ (bank) << 16 | (reg), value }
+
+
+static const struct mt76_reg_pair mt76x0_rf_central_tab[] = {
+/*
+	Bank 0 - For central blocks: BG, PLL, XTAL, LO, ADC/DAC
+*/
+	{ MT_RF(0, 1), 0x01},
+	{ MT_RF(0, 2), 0x11},
+
+	/*
+		R3 ~ R7: VCO Cal.
+	*/
+	{ MT_RF(0, 3), 0x73}, /* VCO Freq Cal - No Bypass, VCO Amp Cal - No Bypass */
+	{ MT_RF(0, 4), 0x30}, /* R4 b<7>=1, VCO cal */
+	{ MT_RF(0, 5), 0x00},
+	{ MT_RF(0, 6), 0x41}, /* Set the open loop amplitude to middle since bypassing amplitude calibration */
+	{ MT_RF(0, 7), 0x00},
+
+	/*
+		XO
+	*/
+	{ MT_RF(0,  8), 0x00},
+	{ MT_RF(0,  9), 0x00},
+	{ MT_RF(0, 10), 0x0C},
+	{ MT_RF(0, 11), 0x00},
+	{ MT_RF(0, 12), 0x00},
+
+	/*
+		BG
+	*/
+	{ MT_RF(0, 13), 0x00},
+	{ MT_RF(0, 14), 0x00},
+	{ MT_RF(0, 15), 0x00},
+
+	/*
+		LDO
+	*/
+	{ MT_RF(0, 19), 0x20},
+	/*
+		XO
+	*/
+	{ MT_RF(0, 20), 0x22},
+	{ MT_RF(0, 21), 0x12},
+	{ MT_RF(0, 23), 0x00},
+	{ MT_RF(0, 24), 0x33}, /* See band selection for R24<1:0> */
+	{ MT_RF(0, 25), 0x00},
+
+	/*
+		PLL, See Freq Selection
+	*/
+	{ MT_RF(0, 26), 0x00},
+	{ MT_RF(0, 27), 0x00},
+	{ MT_RF(0, 28), 0x00},
+	{ MT_RF(0, 29), 0x00},
+	{ MT_RF(0, 30), 0x00},
+	{ MT_RF(0, 31), 0x00},
+	{ MT_RF(0, 32), 0x00},
+	{ MT_RF(0, 33), 0x00},
+	{ MT_RF(0, 34), 0x00},
+	{ MT_RF(0, 35), 0x00},
+	{ MT_RF(0, 36), 0x00},
+	{ MT_RF(0, 37), 0x00},
+
+	/*
+		LO Buffer
+	*/
+	{ MT_RF(0, 38), 0x2F},
+
+	/*
+		Test Ports
+	*/
+	{ MT_RF(0, 64), 0x00},
+	{ MT_RF(0, 65), 0x80},
+	{ MT_RF(0, 66), 0x01},
+	{ MT_RF(0, 67), 0x04},
+
+	/*
+		ADC/DAC
+	*/
+	{ MT_RF(0, 68), 0x00},
+	{ MT_RF(0, 69), 0x08},
+	{ MT_RF(0, 70), 0x08},
+	{ MT_RF(0, 71), 0x40},
+	{ MT_RF(0, 72), 0xD0},
+	{ MT_RF(0, 73), 0x93},
+};
+
+static const struct mt76_reg_pair mt76x0_rf_2g_channel_0_tab[] = {
+/*
+	Bank 5 - Channel 0 2G RF registers
+*/
+	/*
+		RX logic operation
+	*/
+	/* RF_R00 Change in SelectBand6590 */
+
+	{ MT_RF(5, 2), 0x0C}, /* 5G+2G (MT7610U) */
+	{ MT_RF(5, 3), 0x00},
+
+	/*
+		TX logic operation
+	*/
+	{ MT_RF(5, 4), 0x00},
+	{ MT_RF(5, 5), 0x84},
+	{ MT_RF(5, 6), 0x02},
+
+	/*
+		LDO
+	*/
+	{ MT_RF(5, 7), 0x00},
+	{ MT_RF(5, 8), 0x00},
+	{ MT_RF(5, 9), 0x00},
+
+	/*
+		RX
+	*/
+	{ MT_RF(5, 10), 0x51},
+	{ MT_RF(5, 11), 0x22},
+	{ MT_RF(5, 12), 0x22},
+	{ MT_RF(5, 13), 0x0F},
+	{ MT_RF(5, 14), 0x47}, /* Increase mixer current for more gain */
+	{ MT_RF(5, 15), 0x25},
+	{ MT_RF(5, 16), 0xC7}, /* Tune LNA2 tank */
+	{ MT_RF(5, 17), 0x00},
+	{ MT_RF(5, 18), 0x00},
+	{ MT_RF(5, 19), 0x30}, /* Improve max Pin */
+	{ MT_RF(5, 20), 0x33},
+	{ MT_RF(5, 21), 0x02},
+	{ MT_RF(5, 22), 0x32}, /* Tune LNA1 tank */
+	{ MT_RF(5, 23), 0x00},
+	{ MT_RF(5, 24), 0x25},
+	{ MT_RF(5, 26), 0x00},
+	{ MT_RF(5, 27), 0x12},
+	{ MT_RF(5, 28), 0x0F},
+	{ MT_RF(5, 29), 0x00},
+
+	/*
+		LOGEN
+	*/
+	{ MT_RF(5, 30), 0x51}, /* Tune LOGEN tank */
+	{ MT_RF(5, 31), 0x35},
+	{ MT_RF(5, 32), 0x31},
+	{ MT_RF(5, 33), 0x31},
+	{ MT_RF(5, 34), 0x34},
+	{ MT_RF(5, 35), 0x03},
+	{ MT_RF(5, 36), 0x00},
+
+	/*
+		TX
+	*/
+	{ MT_RF(5, 37), 0xDD}, /* Improve 3.2GHz spur */
+	{ MT_RF(5, 38), 0xB3},
+	{ MT_RF(5, 39), 0x33},
+	{ MT_RF(5, 40), 0xB1},
+	{ MT_RF(5, 41), 0x71},
+	{ MT_RF(5, 42), 0xF2},
+	{ MT_RF(5, 43), 0x47},
+	{ MT_RF(5, 44), 0x77},
+	{ MT_RF(5, 45), 0x0E},
+	{ MT_RF(5, 46), 0x10},
+	{ MT_RF(5, 47), 0x00},
+	{ MT_RF(5, 48), 0x53},
+	{ MT_RF(5, 49), 0x03},
+	{ MT_RF(5, 50), 0xEF},
+	{ MT_RF(5, 51), 0xC7},
+	{ MT_RF(5, 52), 0x62},
+	{ MT_RF(5, 53), 0x62},
+	{ MT_RF(5, 54), 0x00},
+	{ MT_RF(5, 55), 0x00},
+	{ MT_RF(5, 56), 0x0F},
+	{ MT_RF(5, 57), 0x0F},
+	{ MT_RF(5, 58), 0x16},
+	{ MT_RF(5, 59), 0x16},
+	{ MT_RF(5, 60), 0x10},
+	{ MT_RF(5, 61), 0x10},
+	{ MT_RF(5, 62), 0xD0},
+	{ MT_RF(5, 63), 0x6C},
+	{ MT_RF(5, 64), 0x58},
+	{ MT_RF(5, 65), 0x58},
+	{ MT_RF(5, 66), 0xF2},
+	{ MT_RF(5, 67), 0xE8},
+	{ MT_RF(5, 68), 0xF0},
+	{ MT_RF(5, 69), 0xF0},
+	{ MT_RF(5, 127), 0x04},
+};
+
+static const struct mt76_reg_pair mt76x0_rf_5g_channel_0_tab[] = {
+/*
+	Bank 6 - Channel 0 5G RF registers
+*/
+	/*
+		RX logic operation
+	*/
+	/* RF_R00 Change in SelectBandmt76x0 */
+
+	{ MT_RF(6, 2), 0x0C},
+	{ MT_RF(6, 3), 0x00},
+
+	/*
+		TX logic operation
+	*/
+	{ MT_RF(6, 4), 0x00},
+	{ MT_RF(6, 5), 0x84},
+	{ MT_RF(6, 6), 0x02},
+
+	/*
+		LDO
+	*/
+	{ MT_RF(6, 7), 0x00},
+	{ MT_RF(6, 8), 0x00},
+	{ MT_RF(6, 9), 0x00},
+
+	/*
+		RX
+	*/
+	{ MT_RF(6, 10), 0x00},
+	{ MT_RF(6, 11), 0x01},
+
+	{ MT_RF(6, 13), 0x23},
+	{ MT_RF(6, 14), 0x00},
+	{ MT_RF(6, 15), 0x04},
+	{ MT_RF(6, 16), 0x22},
+
+	{ MT_RF(6, 18), 0x08},
+	{ MT_RF(6, 19), 0x00},
+	{ MT_RF(6, 20), 0x00},
+	{ MT_RF(6, 21), 0x00},
+	{ MT_RF(6, 22), 0xFB},
+
+	/*
+		LOGEN5G
+	*/
+	{ MT_RF(6, 25), 0x76},
+	{ MT_RF(6, 26), 0x24},
+	{ MT_RF(6, 27), 0x04},
+	{ MT_RF(6, 28), 0x00},
+	{ MT_RF(6, 29), 0x00},
+
+	/*
+		TX
+	*/
+	{ MT_RF(6, 37), 0xBB},
+	{ MT_RF(6, 38), 0xB3},
+
+	{ MT_RF(6, 40), 0x33},
+	{ MT_RF(6, 41), 0x33},
+
+	{ MT_RF(6, 43), 0x03},
+	{ MT_RF(6, 44), 0xB3},
+
+	{ MT_RF(6, 46), 0x17},
+	{ MT_RF(6, 47), 0x0E},
+	{ MT_RF(6, 48), 0x10},
+	{ MT_RF(6, 49), 0x07},
+
+	{ MT_RF(6, 62), 0x00},
+	{ MT_RF(6, 63), 0x00},
+	{ MT_RF(6, 64), 0xF1},
+	{ MT_RF(6, 65), 0x0F},
+};
+
+static const struct mt76_reg_pair mt76x0_rf_vga_channel_0_tab[] = {
+/*
+	Bank 7 - Channel 0 VGA RF registers
+*/
+	/* E3 CR */
+	{ MT_RF(7, 0), 0x47}, /* Allow BBP/MAC to do calibration */
+	{ MT_RF(7, 1), 0x00},
+	{ MT_RF(7, 2), 0x00},
+	{ MT_RF(7, 3), 0x00},
+	{ MT_RF(7, 4), 0x00},
+
+	{ MT_RF(7, 10), 0x13},
+	{ MT_RF(7, 11), 0x0F},
+	{ MT_RF(7, 12), 0x13}, /* For dcoc */
+	{ MT_RF(7, 13), 0x13}, /* For dcoc */
+	{ MT_RF(7, 14), 0x13}, /* For dcoc */
+	{ MT_RF(7, 15), 0x20}, /* For dcoc */
+	{ MT_RF(7, 16), 0x22}, /* For dcoc */
+
+	{ MT_RF(7, 17), 0x7C},
+
+	{ MT_RF(7, 18), 0x00},
+	{ MT_RF(7, 19), 0x00},
+	{ MT_RF(7, 20), 0x00},
+	{ MT_RF(7, 21), 0xF1},
+	{ MT_RF(7, 22), 0x11},
+	{ MT_RF(7, 23), 0xC2},
+	{ MT_RF(7, 24), 0x41},
+	{ MT_RF(7, 25), 0x20},
+	{ MT_RF(7, 26), 0x40},
+	{ MT_RF(7, 27), 0xD7},
+	{ MT_RF(7, 28), 0xA2},
+	{ MT_RF(7, 29), 0x60},
+	{ MT_RF(7, 30), 0x49},
+	{ MT_RF(7, 31), 0x20},
+	{ MT_RF(7, 32), 0x44},
+	{ MT_RF(7, 33), 0xC1},
+	{ MT_RF(7, 34), 0x60},
+	{ MT_RF(7, 35), 0xC0},
+
+	{ MT_RF(7, 61), 0x01},
+
+	{ MT_RF(7, 72), 0x3C},
+	{ MT_RF(7, 73), 0x34},
+	{ MT_RF(7, 74), 0x00},
+};
+
+static const struct mt76x0_rf_switch_item mt76x0_rf_bw_switch_tab[] = {
+	/*   Bank, 		Register,	Bw/Band, 	Value */
+	{ MT_RF(0, 17),		RF_G_BAND | RF_BW_20,	0x00},
+	{ MT_RF(0, 17),		RF_G_BAND | RF_BW_40,	0x00},
+	{ MT_RF(0, 17),		RF_A_BAND | RF_BW_20,	0x00},
+	{ MT_RF(0, 17),		RF_A_BAND | RF_BW_40,	0x00},
+	{ MT_RF(0, 17),		RF_A_BAND | RF_BW_80,	0x00},
+
+	/* TODO: need to check B7.R6 & B7.R7 setting for 2.4G again @20121112 */
+	{ MT_RF(7,  6),		RF_G_BAND | RF_BW_20,	0x40},
+	{ MT_RF(7,  6),		RF_G_BAND | RF_BW_40,	0x1C},
+	{ MT_RF(7,  6),		RF_A_BAND | RF_BW_20,	0x40},
+	{ MT_RF(7,  6),		RF_A_BAND | RF_BW_40,	0x20},
+	{ MT_RF(7,  6),		RF_A_BAND | RF_BW_80,	0x10},
+
+	{ MT_RF(7,  7),		RF_G_BAND | RF_BW_20,	0x40},
+	{ MT_RF(7,  7),		RF_G_BAND | RF_BW_40,	0x20},
+	{ MT_RF(7,  7),		RF_A_BAND | RF_BW_20,	0x40},
+	{ MT_RF(7,  7),		RF_A_BAND | RF_BW_40,	0x20},
+	{ MT_RF(7,  7),		RF_A_BAND | RF_BW_80,	0x10},
+
+	{ MT_RF(7,  8),		RF_G_BAND | RF_BW_20,	0x03},
+	{ MT_RF(7,  8),		RF_G_BAND | RF_BW_40,	0x01},
+	{ MT_RF(7,  8),		RF_A_BAND | RF_BW_20,	0x03},
+	{ MT_RF(7,  8),		RF_A_BAND | RF_BW_40,	0x01},
+	{ MT_RF(7,  8),		RF_A_BAND | RF_BW_80,	0x00},
+
+	/* TODO: need to check B7.R58 & B7.R59 setting for 2.4G again @20121112 */
+	{ MT_RF(7, 58),		RF_G_BAND | RF_BW_20,	0x40},
+	{ MT_RF(7, 58),		RF_G_BAND | RF_BW_40,	0x40},
+	{ MT_RF(7, 58),		RF_A_BAND | RF_BW_20,	0x40},
+	{ MT_RF(7, 58),		RF_A_BAND | RF_BW_40,	0x40},
+	{ MT_RF(7, 58),		RF_A_BAND | RF_BW_80,	0x10},
+
+	{ MT_RF(7, 59),		RF_G_BAND | RF_BW_20,	0x40},
+	{ MT_RF(7, 59),		RF_G_BAND | RF_BW_40,	0x40},
+	{ MT_RF(7, 59),		RF_A_BAND | RF_BW_20,	0x40},
+	{ MT_RF(7, 59),		RF_A_BAND | RF_BW_40,	0x40},
+	{ MT_RF(7, 59),		RF_A_BAND | RF_BW_80,	0x10},
+
+	{ MT_RF(7, 60),		RF_G_BAND | RF_BW_20,	0xAA},
+	{ MT_RF(7, 60),		RF_G_BAND | RF_BW_40,	0xAA},
+	{ MT_RF(7, 60),		RF_A_BAND | RF_BW_20,	0xAA},
+	{ MT_RF(7, 60),		RF_A_BAND | RF_BW_40,	0xAA},
+	{ MT_RF(7, 60),		RF_A_BAND | RF_BW_80,	0xAA},
+
+	{ MT_RF(7, 76),		RF_BW_20,	0x40},
+	{ MT_RF(7, 76),		RF_BW_40,	0x40},
+	{ MT_RF(7, 76),		RF_BW_80,	0x10},
+
+	{ MT_RF(7, 77),		RF_BW_20,	0x40},
+	{ MT_RF(7, 77),		RF_BW_40,	0x40},
+	{ MT_RF(7, 77),		RF_BW_80,	0x10},
+};
+
+static const struct mt76x0_rf_switch_item mt76x0_rf_band_switch_tab[] = {
+	/*   Bank, 		Register,	Bw/Band, 		Value */
+	{ MT_RF(0, 16),		RF_G_BAND,		0x20},
+	{ MT_RF(0, 16),		RF_A_BAND,		0x20},
+
+	{ MT_RF(0, 18),		RF_G_BAND,		0x00},
+	{ MT_RF(0, 18),		RF_A_BAND,		0x00},
+
+	{ MT_RF(0, 39),		RF_G_BAND,		0x36},
+	{ MT_RF(0, 39),		RF_A_BAND_LB,	0x34},
+	{ MT_RF(0, 39),		RF_A_BAND_MB,	0x33},
+	{ MT_RF(0, 39),		RF_A_BAND_HB,	0x31},
+	{ MT_RF(0, 39),		RF_A_BAND_11J,	0x36},
+
+	{ MT_RF(6, 12),		RF_A_BAND_LB,	0x44},
+	{ MT_RF(6, 12),		RF_A_BAND_MB,	0x44},
+	{ MT_RF(6, 12),		RF_A_BAND_HB,	0x55},
+	{ MT_RF(6, 12),		RF_A_BAND_11J,	0x44},
+
+	{ MT_RF(6, 17),		RF_A_BAND_LB,	0x02},
+	{ MT_RF(6, 17),		RF_A_BAND_MB,	0x00},
+	{ MT_RF(6, 17),		RF_A_BAND_HB,	0x00},
+	{ MT_RF(6, 17),		RF_A_BAND_11J,	0x05},
+
+	{ MT_RF(6, 24),		RF_A_BAND_LB,	0xA1},
+	{ MT_RF(6, 24),		RF_A_BAND_MB,	0x41},
+	{ MT_RF(6, 24),		RF_A_BAND_HB,	0x21},
+	{ MT_RF(6, 24),		RF_A_BAND_11J,	0xE1},
+
+	{ MT_RF(6, 39),		RF_A_BAND_LB,	0x36},
+	{ MT_RF(6, 39),		RF_A_BAND_MB,	0x34},
+	{ MT_RF(6, 39),		RF_A_BAND_HB,	0x32},
+	{ MT_RF(6, 39),		RF_A_BAND_11J,	0x37},
+
+	{ MT_RF(6, 42),		RF_A_BAND_LB,	0xFB},
+	{ MT_RF(6, 42),		RF_A_BAND_MB,	0xF3},
+	{ MT_RF(6, 42),		RF_A_BAND_HB,	0xEB},
+	{ MT_RF(6, 42),		RF_A_BAND_11J,	0xEB},
+
+	/* Move R6-R45, R50~R59 to mt76x0_RF_INT_PA_5G_Channel_0_RegTb/mt76x0_RF_EXT_PA_5G_Channel_0_RegTb */
+
+	{ MT_RF(6, 127),	RF_G_BAND,		0x84},
+	{ MT_RF(6, 127),	RF_A_BAND,		0x04},
+
+	{ MT_RF(7, 5),		RF_G_BAND,		0x40},
+	{ MT_RF(7, 5),		RF_A_BAND,		0x00},
+
+	{ MT_RF(7, 9),		RF_G_BAND,		0x00},
+	{ MT_RF(7, 9),		RF_A_BAND,		0x00},
+
+	{ MT_RF(7, 70),		RF_G_BAND,		0x00},
+	{ MT_RF(7, 70),		RF_A_BAND,		0x6D},
+
+	{ MT_RF(7, 71),		RF_G_BAND,		0x00},
+	{ MT_RF(7, 71),		RF_A_BAND,		0xB0},
+
+	{ MT_RF(7, 78),		RF_G_BAND,		0x00},
+	{ MT_RF(7, 78),		RF_A_BAND,		0x55},
+
+	{ MT_RF(7, 79),		RF_G_BAND,		0x00},
+	{ MT_RF(7, 79),		RF_A_BAND,		0x55},
+};
+
+static const struct mt76x0_freq_item mt76x0_frequency_plan[] = {
+	{1,	RF_G_BAND,	0x02, 0x3F, 0x28, 0xDD, 0xE2, 0x40, 0x02, 0x40, 0x02, 0, 0, 1, 0x28, 0, 0x30, 0, 0, 0x3}, /* Freq 2412 */
+	{2, 	RF_G_BAND,	0x02, 0x3F, 0x3C, 0xDD, 0xE4, 0x40, 0x07, 0x40, 0x02, 0, 0, 1, 0xA1, 0, 0x30, 0, 0, 0x1}, /* Freq 2417 */
+	{3, 	RF_G_BAND,	0x02, 0x3F, 0x3C, 0xDD, 0xE2, 0x40, 0x07, 0x40, 0x0B, 0, 0, 1, 0x50, 0, 0x30, 0, 0, 0x0}, /* Freq 2422 */
+	{4, 	RF_G_BAND,	0x02, 0x3F, 0x28, 0xDD, 0xD4, 0x40, 0x02, 0x40, 0x09, 0, 0, 1, 0x50, 0, 0x30, 0, 0, 0x0}, /* Freq 2427 */
+	{5, 	RF_G_BAND,	0x02, 0x3F, 0x3C, 0xDD, 0xD4, 0x40, 0x07, 0x40, 0x02, 0, 0, 1, 0xA2, 0, 0x30, 0, 0, 0x1}, /* Freq 2432 */
+	{6, 	RF_G_BAND,	0x02, 0x3F, 0x3C, 0xDD, 0xD4, 0x40, 0x07, 0x40, 0x07, 0, 0, 1, 0xA2, 0, 0x30, 0, 0, 0x1}, /* Freq 2437 */
+	{7, 	RF_G_BAND,	0x02, 0x3F, 0x28, 0xDD, 0xE2, 0x40, 0x02, 0x40, 0x07, 0, 0, 1, 0x28, 0, 0x30, 0, 0, 0x3}, /* Freq 2442 */
+	{8, 	RF_G_BAND,	0x02, 0x3F, 0x3C, 0xDD, 0xD4, 0x40, 0x07, 0x40, 0x02, 0, 0, 1, 0xA3, 0, 0x30, 0, 0, 0x1}, /* Freq 2447 */
+	{9, 	RF_G_BAND,	0x02, 0x3F, 0x3C, 0xDD, 0xF2, 0x40, 0x07, 0x40, 0x0D, 0, 0, 1, 0x28, 0, 0x30, 0, 0, 0x3}, /* Freq 2452 */
+	{10, 	RF_G_BAND,	0x02, 0x3F, 0x28, 0xDD, 0xD4, 0x40, 0x02, 0x40, 0x09, 0, 0, 1, 0x51, 0, 0x30, 0, 0, 0x0}, /* Freq 2457 */
+	{11, 	RF_G_BAND,	0x02, 0x3F, 0x3C, 0xDD, 0xD4, 0x40, 0x07, 0x40, 0x02, 0, 0, 1, 0xA4, 0, 0x30, 0, 0, 0x1}, /* Freq 2462 */
+	{12, 	RF_G_BAND,	0x02, 0x3F, 0x3C, 0xDD, 0xD4, 0x40, 0x07, 0x40, 0x07, 0, 0, 1, 0xA4, 0, 0x30, 0, 0, 0x1}, /* Freq 2467 */
+	{13, 	RF_G_BAND,	0x02, 0x3F, 0x28, 0xDD, 0xF2, 0x40, 0x02, 0x40, 0x02, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3}, /* Freq 2472 */
+	{14, 	RF_G_BAND,	0x02, 0x3F, 0x28, 0xDD, 0xF2, 0x40, 0x02, 0x40, 0x04, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3}, /* Freq 2484 */
+
+	{183, 	(RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x70, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x17, 0, 0, 1, 0x28, 0, 0x30, 0, 0, 0x3}, /* Freq 4915 */
+	{184, 	(RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x00, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3}, /* Freq 4920 */
+	{185, 	(RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x01, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3}, /* Freq 4925 */
+	{187, 	(RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x03, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3}, /* Freq 4935 */
+	{188, 	(RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x02, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3}, /* Freq 4940 */
+	{189, 	(RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x05, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3}, /* Freq 4945 */
+	{192, 	(RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x04, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3}, /* Freq 4960 */
+	{196, 	(RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x06, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3}, /* Freq 4980 */
+
+	{36, 	(RF_A_BAND | RF_A_BAND_LB),	 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x02, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5180 */
+	{37, 	(RF_A_BAND | RF_A_BAND_LB),	 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x05, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5185 */
+	{38, 	(RF_A_BAND | RF_A_BAND_LB),	 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x03, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5190 */
+	{39, 	(RF_A_BAND | RF_A_BAND_LB),	 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x07, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5195 */
+	{40, 	(RF_A_BAND | RF_A_BAND_LB),	 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x04, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5200 */
+	{41, 	(RF_A_BAND | RF_A_BAND_LB),	 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x09, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5205 */
+	{42, 	(RF_A_BAND | RF_A_BAND_LB),	 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x05, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5210 */
+	{43, 	(RF_A_BAND | RF_A_BAND_LB),	 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0B, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5215 */
+	{44, 	(RF_A_BAND | RF_A_BAND_LB),	 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x06, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5220 */
+	{45, 	(RF_A_BAND | RF_A_BAND_LB),	 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0D, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5225 */
+	{46, 	(RF_A_BAND | RF_A_BAND_LB),	 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x07, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5230 */
+	{47, 	(RF_A_BAND | RF_A_BAND_LB),	 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0F, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5235 */
+	{48, 	(RF_A_BAND | RF_A_BAND_LB),	 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x08, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5240 */
+	{49, 	(RF_A_BAND | RF_A_BAND_LB),	 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x11, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5245 */
+	{50, 	(RF_A_BAND | RF_A_BAND_LB),	 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x09, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5250 */
+	{51, 	(RF_A_BAND | RF_A_BAND_LB),	 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x13, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5255 */
+	{52, 	(RF_A_BAND | RF_A_BAND_LB),	 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x0A, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5260 */
+	{53, 	(RF_A_BAND | RF_A_BAND_LB),	 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x15, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5265 */
+	{54, 	(RF_A_BAND | RF_A_BAND_LB),	 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x0B, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5270 */
+	{55, 	(RF_A_BAND | RF_A_BAND_LB),	 0x02, 0x3F, 0x70, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x17, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5275 */
+	{56, 	(RF_A_BAND | RF_A_BAND_LB),	 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x00, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3}, /* Freq 5280 */
+	{57, 	(RF_A_BAND | RF_A_BAND_LB),	 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x01, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3}, /* Freq 5285 */
+	{58, 	(RF_A_BAND | RF_A_BAND_LB),	 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x01, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3}, /* Freq 5290 */
+	{59, 	(RF_A_BAND | RF_A_BAND_LB),	 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x03, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3}, /* Freq 5295 */
+	{60, 	(RF_A_BAND | RF_A_BAND_LB),	 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x02, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3}, /* Freq 5300 */
+	{61, 	(RF_A_BAND | RF_A_BAND_LB),	 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x05, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3}, /* Freq 5305 */
+	{62, 	(RF_A_BAND | RF_A_BAND_LB),	 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x03, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3}, /* Freq 5310 */
+	{63, 	(RF_A_BAND | RF_A_BAND_LB),	 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x07, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3}, /* Freq 5315 */
+	{64, 	(RF_A_BAND | RF_A_BAND_LB),	 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x04, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3}, /* Freq 5320 */
+
+	{100, 	(RF_A_BAND | RF_A_BAND_MB),	 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x0A, 0, 0, 1, 0x2D, 0, 0x30, 0, 0, 0x3}, /* Freq 5500 */
+	{101, 	(RF_A_BAND | RF_A_BAND_MB),	 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x15, 0, 0, 1, 0x2D, 0, 0x30, 0, 0, 0x3}, /* Freq 5505 */
+	{102, 	(RF_A_BAND | RF_A_BAND_MB),	 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x0B, 0, 0, 1, 0x2D, 0, 0x30, 0, 0, 0x3}, /* Freq 5510 */
+	{103, 	(RF_A_BAND | RF_A_BAND_MB),	 0x02, 0x3F, 0x70, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x17, 0, 0, 1, 0x2D, 0, 0x30, 0, 0, 0x3}, /* Freq 5515 */
+	{104, 	(RF_A_BAND | RF_A_BAND_MB),	 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x00, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5520 */
+	{105, 	(RF_A_BAND | RF_A_BAND_MB),	 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x01, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5525 */
+	{106, 	(RF_A_BAND | RF_A_BAND_MB),	 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x01, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5530 */
+	{107, 	(RF_A_BAND | RF_A_BAND_MB),	 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x03, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5535 */
+	{108, 	(RF_A_BAND | RF_A_BAND_MB),	 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x02, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5540 */
+	{109, 	(RF_A_BAND | RF_A_BAND_MB),	 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x05, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5545 */
+	{110, 	(RF_A_BAND | RF_A_BAND_MB),	 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x03, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5550 */
+	{111, 	(RF_A_BAND | RF_A_BAND_MB),	 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x07, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5555 */
+	{112, 	(RF_A_BAND | RF_A_BAND_MB),	 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x04, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5560 */
+	{113, 	(RF_A_BAND | RF_A_BAND_MB),	 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x09, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5565 */
+	{114, 	(RF_A_BAND | RF_A_BAND_MB),	 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x05, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5570 */
+	{115, 	(RF_A_BAND | RF_A_BAND_MB),	 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0B, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5575 */
+	{116, 	(RF_A_BAND | RF_A_BAND_MB),	 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x06, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5580 */
+	{117, 	(RF_A_BAND | RF_A_BAND_MB),	 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0D, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5585 */
+	{118, 	(RF_A_BAND | RF_A_BAND_MB),	 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x07, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5590 */
+	{119, 	(RF_A_BAND | RF_A_BAND_MB),	 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0F, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5595 */
+	{120, 	(RF_A_BAND | RF_A_BAND_MB),	 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x08, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5600 */
+	{121, 	(RF_A_BAND | RF_A_BAND_MB),	 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x11, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5605 */
+	{122, 	(RF_A_BAND | RF_A_BAND_MB),	 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x09, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5610 */
+	{123, 	(RF_A_BAND | RF_A_BAND_MB),	 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x13, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5615 */
+	{124, 	(RF_A_BAND | RF_A_BAND_MB),	 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x0A, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5620 */
+	{125, 	(RF_A_BAND | RF_A_BAND_MB),	 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x15, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5625 */
+	{126, 	(RF_A_BAND | RF_A_BAND_MB),	 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x0B, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5630 */
+	{127, 	(RF_A_BAND | RF_A_BAND_MB),	 0x02, 0x3F, 0x70, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x17, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5635 */
+	{128, 	(RF_A_BAND | RF_A_BAND_MB),	 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x00, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5640 */
+	{129, 	(RF_A_BAND | RF_A_BAND_MB),	 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x01, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5645 */
+	{130, 	(RF_A_BAND | RF_A_BAND_MB),	 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x01, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5650 */
+	{131, 	(RF_A_BAND | RF_A_BAND_MB),	 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x03, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5655 */
+	{132, 	(RF_A_BAND | RF_A_BAND_MB),	 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x02, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5660 */
+	{133, 	(RF_A_BAND | RF_A_BAND_MB),	 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x05, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5665 */
+	{134, 	(RF_A_BAND | RF_A_BAND_MB),	 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x03, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5670 */
+	{135, 	(RF_A_BAND | RF_A_BAND_MB),	 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x07, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5675 */
+	{136, 	(RF_A_BAND | RF_A_BAND_MB),	 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x04, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5680 */
+
+	{137, 	(RF_A_BAND | RF_A_BAND_HB),	 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x09, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5685 */
+	{138, 	(RF_A_BAND | RF_A_BAND_HB),	 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x05, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5690 */
+	{139, 	(RF_A_BAND | RF_A_BAND_HB),	 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0B, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5695 */
+	{140, 	(RF_A_BAND | RF_A_BAND_HB),	 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x06, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5700 */
+	{141, 	(RF_A_BAND | RF_A_BAND_HB),	 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0D, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5705 */
+	{142, 	(RF_A_BAND | RF_A_BAND_HB),	 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x07, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5710 */
+	{143, 	(RF_A_BAND | RF_A_BAND_HB),	 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0F, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5715 */
+	{144, 	(RF_A_BAND | RF_A_BAND_HB),	 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x08, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5720 */
+	{145, 	(RF_A_BAND | RF_A_BAND_HB),	 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x11, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5725 */
+	{146, 	(RF_A_BAND | RF_A_BAND_HB),	 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x09, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5730 */
+	{147, 	(RF_A_BAND | RF_A_BAND_HB),	 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x13, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5735 */
+	{148, 	(RF_A_BAND | RF_A_BAND_HB),	 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x0A, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5740 */
+	{149, 	(RF_A_BAND | RF_A_BAND_HB),	 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x15, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5745 */
+	{150, 	(RF_A_BAND | RF_A_BAND_HB),	 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x0B, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5750 */
+	{151, 	(RF_A_BAND | RF_A_BAND_HB),	 0x02, 0x3F, 0x70, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x17, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5755 */
+	{152, 	(RF_A_BAND | RF_A_BAND_HB),	 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x00, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5760 */
+	{153, 	(RF_A_BAND | RF_A_BAND_HB),	 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x01, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5765 */
+	{154, 	(RF_A_BAND | RF_A_BAND_HB),	 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x01, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5770 */
+	{155, 	(RF_A_BAND | RF_A_BAND_HB),	 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x03, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5775 */
+	{156, 	(RF_A_BAND | RF_A_BAND_HB),	 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x02, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5780 */
+	{157, 	(RF_A_BAND | RF_A_BAND_HB),	 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x05, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5785 */
+	{158, 	(RF_A_BAND | RF_A_BAND_HB),	 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x03, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5790 */
+	{159, 	(RF_A_BAND | RF_A_BAND_HB),	 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x07, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5795 */
+	{160, 	(RF_A_BAND | RF_A_BAND_HB),	 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x04, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5800 */
+	{161, 	(RF_A_BAND | RF_A_BAND_HB),	 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x09, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5805 */
+	{162, 	(RF_A_BAND | RF_A_BAND_HB),	 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x05, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5810 */
+	{163, 	(RF_A_BAND | RF_A_BAND_HB),	 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0B, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5815 */
+	{164, 	(RF_A_BAND | RF_A_BAND_HB),	 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x06, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5820 */
+	{165, 	(RF_A_BAND | RF_A_BAND_HB),	 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0D, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5825 */
+	{166, 	(RF_A_BAND | RF_A_BAND_HB),	 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x07, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5830 */
+	{167, 	(RF_A_BAND | RF_A_BAND_HB),	 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0F, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5835 */
+	{168, 	(RF_A_BAND | RF_A_BAND_HB),	 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x08, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5840 */
+	{169, 	(RF_A_BAND | RF_A_BAND_HB),	 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x11, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5845 */
+	{170, 	(RF_A_BAND | RF_A_BAND_HB),	 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x09, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5850 */
+	{171, 	(RF_A_BAND | RF_A_BAND_HB),	 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x13, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5855 */
+	{172, 	(RF_A_BAND | RF_A_BAND_HB),	 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x0A, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5860 */
+	{173, 	(RF_A_BAND | RF_A_BAND_HB),	 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x15, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5865 */
+};
+
+static const struct mt76x0_freq_item mt76x0_sdm_frequency_plan[] = {
+	{1,	RF_G_BAND,	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0xCCCC,  0x3}, /* Freq 2412 */
+	{2, 	RF_G_BAND,	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x12222, 0x3}, /* Freq 2417 */
+	{3, 	RF_G_BAND,	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x17777, 0x3}, /* Freq 2422 */
+	{4, 	RF_G_BAND,	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x1CCCC, 0x3}, /* Freq 2427 */
+	{5, 	RF_G_BAND,	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x22222, 0x3}, /* Freq 2432 */
+	{6, 	RF_G_BAND,	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x27777, 0x3}, /* Freq 2437 */
+	{7, 	RF_G_BAND,	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x2CCCC, 0x3}, /* Freq 2442 */
+	{8, 	RF_G_BAND,	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x32222, 0x3}, /* Freq 2447 */
+	{9, 	RF_G_BAND,	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x37777, 0x3}, /* Freq 2452 */
+	{10, 	RF_G_BAND,	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x3CCCC, 0x3}, /* Freq 2457 */
+	{11, 	RF_G_BAND,	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0x2222, 0x3}, /* Freq 2462 */
+	{12, 	RF_G_BAND,	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0x7777, 0x3}, /* Freq 2467 */
+	{13, 	RF_G_BAND,	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0xCCCC, 0x3}, /* Freq 2472 */
+	{14, 	RF_G_BAND,	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0x19999, 0x3}, /* Freq 2484 */
+
+	{183, 	(RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x3D555, 0x3}, /* Freq 4915 */
+	{184, 	(RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0x0,     0x3}, /* Freq 4920 */
+	{185, 	(RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0x2AAA,  0x3}, /* Freq 4925 */
+	{187, 	(RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0x8000,  0x3}, /* Freq 4935 */
+	{188, 	(RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0xAAAA,  0x3}, /* Freq 4940 */
+	{189, 	(RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0xD555,  0x3}, /* Freq 4945 */
+	{192, 	(RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0x15555, 0x3}, /* Freq 4960 */
+	{196, 	(RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0x20000, 0x3}, /* Freq 4980 */
+
+	{36, 	(RF_A_BAND | RF_A_BAND_LB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0xAAAA,  0x3}, /* Freq 5180 */
+	{37, 	(RF_A_BAND | RF_A_BAND_LB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0xD555,  0x3}, /* Freq 5185 */
+	{38, 	(RF_A_BAND | RF_A_BAND_LB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x10000, 0x3}, /* Freq 5190 */
+	{39, 	(RF_A_BAND | RF_A_BAND_LB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x12AAA, 0x3}, /* Freq 5195 */
+	{40, 	(RF_A_BAND | RF_A_BAND_LB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x15555, 0x3}, /* Freq 5200 */
+	{41, 	(RF_A_BAND | RF_A_BAND_LB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x18000, 0x3}, /* Freq 5205 */
+	{42, 	(RF_A_BAND | RF_A_BAND_LB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x1AAAA, 0x3}, /* Freq 5210 */
+	{43, 	(RF_A_BAND | RF_A_BAND_LB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x1D555, 0x3}, /* Freq 5215 */
+	{44, 	(RF_A_BAND | RF_A_BAND_LB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x20000, 0x3}, /* Freq 5220 */
+	{45, 	(RF_A_BAND | RF_A_BAND_LB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x22AAA, 0x3}, /* Freq 5225 */
+	{46, 	(RF_A_BAND | RF_A_BAND_LB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x25555, 0x3}, /* Freq 5230 */
+	{47, 	(RF_A_BAND | RF_A_BAND_LB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x28000, 0x3}, /* Freq 5235 */
+	{48, 	(RF_A_BAND | RF_A_BAND_LB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x2AAAA, 0x3}, /* Freq 5240 */
+	{49, 	(RF_A_BAND | RF_A_BAND_LB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x2D555, 0x3}, /* Freq 5245 */
+	{50, 	(RF_A_BAND | RF_A_BAND_LB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x30000, 0x3}, /* Freq 5250 */
+	{51, 	(RF_A_BAND | RF_A_BAND_LB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x32AAA, 0x3}, /* Freq 5255 */
+	{52, 	(RF_A_BAND | RF_A_BAND_LB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x35555, 0x3}, /* Freq 5260 */
+	{53, 	(RF_A_BAND | RF_A_BAND_LB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x38000, 0x3}, /* Freq 5265 */
+	{54, 	(RF_A_BAND | RF_A_BAND_LB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x3AAAA, 0x3}, /* Freq 5270 */
+	{55, 	(RF_A_BAND | RF_A_BAND_LB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x3D555, 0x3}, /* Freq 5275 */
+	{56, 	(RF_A_BAND | RF_A_BAND_LB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2C, 0, 0x0, 0x8, 0x00000, 0x3}, /* Freq 5280 */
+	{57, 	(RF_A_BAND | RF_A_BAND_LB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2C, 0, 0x0, 0x8, 0x02AAA, 0x3}, /* Freq 5285 */
+	{58, 	(RF_A_BAND | RF_A_BAND_LB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2C, 0, 0x0, 0x8, 0x05555, 0x3}, /* Freq 5290 */
+	{59, 	(RF_A_BAND | RF_A_BAND_LB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2C, 0, 0x0, 0x8, 0x08000, 0x3}, /* Freq 5295 */
+	{60, 	(RF_A_BAND | RF_A_BAND_LB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2C, 0, 0x0, 0x8, 0x0AAAA, 0x3}, /* Freq 5300 */
+	{61, 	(RF_A_BAND | RF_A_BAND_LB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2C, 0, 0x0, 0x8, 0x0D555, 0x3}, /* Freq 5305 */
+	{62, 	(RF_A_BAND | RF_A_BAND_LB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2C, 0, 0x0, 0x8, 0x10000, 0x3}, /* Freq 5310 */
+	{63, 	(RF_A_BAND | RF_A_BAND_LB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2C, 0, 0x0, 0x8, 0x12AAA, 0x3}, /* Freq 5315 */
+	{64, 	(RF_A_BAND | RF_A_BAND_LB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2C, 0, 0x0, 0x8, 0x15555, 0x3}, /* Freq 5320 */
+
+	{100, 	(RF_A_BAND | RF_A_BAND_MB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2D, 0, 0x0, 0x8, 0x35555, 0x3}, /* Freq 5500 */
+	{101, 	(RF_A_BAND | RF_A_BAND_MB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2D, 0, 0x0, 0x8, 0x38000, 0x3}, /* Freq 5505 */
+	{102, 	(RF_A_BAND | RF_A_BAND_MB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2D, 0, 0x0, 0x8, 0x3AAAA, 0x3}, /* Freq 5510 */
+	{103, 	(RF_A_BAND | RF_A_BAND_MB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2D, 0, 0x0, 0x8, 0x3D555, 0x3}, /* Freq 5515 */
+	{104, 	(RF_A_BAND | RF_A_BAND_MB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x00000, 0x3}, /* Freq 5520 */
+	{105, 	(RF_A_BAND | RF_A_BAND_MB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x02AAA, 0x3}, /* Freq 5525 */
+	{106, 	(RF_A_BAND | RF_A_BAND_MB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x05555, 0x3}, /* Freq 5530 */
+	{107, 	(RF_A_BAND | RF_A_BAND_MB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x08000, 0x3}, /* Freq 5535 */
+	{108, 	(RF_A_BAND | RF_A_BAND_MB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x0AAAA, 0x3}, /* Freq 5540 */
+	{109, 	(RF_A_BAND | RF_A_BAND_MB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x0D555, 0x3}, /* Freq 5545 */
+	{110, 	(RF_A_BAND | RF_A_BAND_MB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x10000, 0x3}, /* Freq 5550 */
+	{111, 	(RF_A_BAND | RF_A_BAND_MB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x12AAA, 0x3}, /* Freq 5555 */
+	{112, 	(RF_A_BAND | RF_A_BAND_MB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x15555, 0x3}, /* Freq 5560 */
+	{113, 	(RF_A_BAND | RF_A_BAND_MB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x18000, 0x3}, /* Freq 5565 */
+	{114, 	(RF_A_BAND | RF_A_BAND_MB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x1AAAA, 0x3}, /* Freq 5570 */
+	{115, 	(RF_A_BAND | RF_A_BAND_MB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x1D555, 0x3}, /* Freq 5575 */
+	{116, 	(RF_A_BAND | RF_A_BAND_MB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x20000, 0x3}, /* Freq 5580 */
+	{117, 	(RF_A_BAND | RF_A_BAND_MB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x22AAA, 0x3}, /* Freq 5585 */
+	{118, 	(RF_A_BAND | RF_A_BAND_MB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x25555, 0x3}, /* Freq 5590 */
+	{119, 	(RF_A_BAND | RF_A_BAND_MB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x28000, 0x3}, /* Freq 5595 */
+	{120, 	(RF_A_BAND | RF_A_BAND_MB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x2AAAA, 0x3}, /* Freq 5600 */
+	{121, 	(RF_A_BAND | RF_A_BAND_MB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x2D555, 0x3}, /* Freq 5605 */
+	{122, 	(RF_A_BAND | RF_A_BAND_MB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x30000, 0x3}, /* Freq 5610 */
+	{123, 	(RF_A_BAND | RF_A_BAND_MB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x32AAA, 0x3}, /* Freq 5615 */
+	{124, 	(RF_A_BAND | RF_A_BAND_MB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x35555, 0x3}, /* Freq 5620 */
+	{125, 	(RF_A_BAND | RF_A_BAND_MB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x38000, 0x3}, /* Freq 5625 */
+	{126, 	(RF_A_BAND | RF_A_BAND_MB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x3AAAA, 0x3}, /* Freq 5630 */
+	{127, 	(RF_A_BAND | RF_A_BAND_MB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x3D555, 0x3}, /* Freq 5635 */
+	{128, 	(RF_A_BAND | RF_A_BAND_MB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x00000, 0x3}, /* Freq 5640 */
+	{129, 	(RF_A_BAND | RF_A_BAND_MB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x02AAA, 0x3}, /* Freq 5645 */
+	{130, 	(RF_A_BAND | RF_A_BAND_MB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x05555, 0x3}, /* Freq 5650 */
+	{131, 	(RF_A_BAND | RF_A_BAND_MB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x08000, 0x3}, /* Freq 5655 */
+	{132, 	(RF_A_BAND | RF_A_BAND_MB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x0AAAA, 0x3}, /* Freq 5660 */
+	{133, 	(RF_A_BAND | RF_A_BAND_MB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x0D555, 0x3}, /* Freq 5665 */
+	{134, 	(RF_A_BAND | RF_A_BAND_MB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x10000, 0x3}, /* Freq 5670 */
+	{135, 	(RF_A_BAND | RF_A_BAND_MB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x12AAA, 0x3}, /* Freq 5675 */
+	{136, 	(RF_A_BAND | RF_A_BAND_MB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x15555, 0x3}, /* Freq 5680 */
+
+	{137, 	(RF_A_BAND | RF_A_BAND_HB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x18000, 0x3}, /* Freq 5685 */
+	{138, 	(RF_A_BAND | RF_A_BAND_HB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x1AAAA, 0x3}, /* Freq 5690 */
+	{139, 	(RF_A_BAND | RF_A_BAND_HB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x1D555, 0x3}, /* Freq 5695 */
+	{140, 	(RF_A_BAND | RF_A_BAND_HB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x20000, 0x3}, /* Freq 5700 */
+	{141, 	(RF_A_BAND | RF_A_BAND_HB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x22AAA, 0x3}, /* Freq 5705 */
+	{142, 	(RF_A_BAND | RF_A_BAND_HB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x25555, 0x3}, /* Freq 5710 */
+	{143, 	(RF_A_BAND | RF_A_BAND_HB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x28000, 0x3}, /* Freq 5715 */
+	{144, 	(RF_A_BAND | RF_A_BAND_HB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x2AAAA, 0x3}, /* Freq 5720 */
+	{145, 	(RF_A_BAND | RF_A_BAND_HB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x2D555, 0x3}, /* Freq 5725 */
+	{146, 	(RF_A_BAND | RF_A_BAND_HB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x30000, 0x3}, /* Freq 5730 */
+	{147, 	(RF_A_BAND | RF_A_BAND_HB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x32AAA, 0x3}, /* Freq 5735 */
+	{148, 	(RF_A_BAND | RF_A_BAND_HB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x35555, 0x3}, /* Freq 5740 */
+	{149, 	(RF_A_BAND | RF_A_BAND_HB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x38000, 0x3}, /* Freq 5745 */
+	{150, 	(RF_A_BAND | RF_A_BAND_HB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x3AAAA, 0x3}, /* Freq 5750 */
+	{151, 	(RF_A_BAND | RF_A_BAND_HB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x3D555, 0x3}, /* Freq 5755 */
+	{152, 	(RF_A_BAND | RF_A_BAND_HB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x00000, 0x3}, /* Freq 5760 */
+	{153, 	(RF_A_BAND | RF_A_BAND_HB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x02AAA, 0x3}, /* Freq 5765 */
+	{154, 	(RF_A_BAND | RF_A_BAND_HB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x05555, 0x3}, /* Freq 5770 */
+	{155, 	(RF_A_BAND | RF_A_BAND_HB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x08000, 0x3}, /* Freq 5775 */
+	{156, 	(RF_A_BAND | RF_A_BAND_HB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x0AAAA, 0x3}, /* Freq 5780 */
+	{157, 	(RF_A_BAND | RF_A_BAND_HB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x0D555, 0x3}, /* Freq 5785 */
+	{158, 	(RF_A_BAND | RF_A_BAND_HB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x10000, 0x3}, /* Freq 5790 */
+	{159, 	(RF_A_BAND | RF_A_BAND_HB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x12AAA, 0x3}, /* Freq 5795 */
+	{160, 	(RF_A_BAND | RF_A_BAND_HB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x15555, 0x3}, /* Freq 5800 */
+	{161, 	(RF_A_BAND | RF_A_BAND_HB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x18000, 0x3}, /* Freq 5805 */
+	{162, 	(RF_A_BAND | RF_A_BAND_HB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x1AAAA, 0x3}, /* Freq 5810 */
+	{163, 	(RF_A_BAND | RF_A_BAND_HB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x1D555, 0x3}, /* Freq 5815 */
+	{164, 	(RF_A_BAND | RF_A_BAND_HB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x20000, 0x3}, /* Freq 5820 */
+	{165, 	(RF_A_BAND | RF_A_BAND_HB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x22AAA, 0x3}, /* Freq 5825 */
+	{166, 	(RF_A_BAND | RF_A_BAND_HB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x25555, 0x3}, /* Freq 5830 */
+	{167, 	(RF_A_BAND | RF_A_BAND_HB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x28000, 0x3}, /* Freq 5835 */
+	{168, 	(RF_A_BAND | RF_A_BAND_HB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x2AAAA, 0x3}, /* Freq 5840 */
+	{169, 	(RF_A_BAND | RF_A_BAND_HB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x2D555, 0x3}, /* Freq 5845 */
+	{170, 	(RF_A_BAND | RF_A_BAND_HB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x30000, 0x3}, /* Freq 5850 */
+	{171, 	(RF_A_BAND | RF_A_BAND_HB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x32AAA, 0x3}, /* Freq 5855 */
+	{172, 	(RF_A_BAND | RF_A_BAND_HB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x35555, 0x3}, /* Freq 5860 */
+	{173, 	(RF_A_BAND | RF_A_BAND_HB),	 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x38000, 0x3}, /* Freq 5865 */
+};
+
+static const u8 mt76x0_sdm_channel[] = {
+	183, 185, 43, 45, 54, 55, 57, 58, 102, 103, 105, 106, 115, 117, 126, 127, 129, 130, 139, 141, 150, 151, 153, 154, 163, 165
+};
+
+static const struct mt76x0_rf_switch_item mt76x0_rf_ext_pa_tab[] = {
+	{ MT_RF(6, 45),		RF_A_BAND_LB,	0x63},
+	{ MT_RF(6, 45),		RF_A_BAND_MB,	0x43},
+	{ MT_RF(6, 45),		RF_A_BAND_HB,	0x33},
+	{ MT_RF(6, 45),		RF_A_BAND_11J,	0x73},
+
+	{ MT_RF(6, 50),		RF_A_BAND_LB,	0x02},
+	{ MT_RF(6, 50),		RF_A_BAND_MB,	0x02},
+	{ MT_RF(6, 50),		RF_A_BAND_HB,	0x02},
+	{ MT_RF(6, 50),		RF_A_BAND_11J,	0x02},
+
+	{ MT_RF(6, 51),		RF_A_BAND_LB,	0x02},
+	{ MT_RF(6, 51),		RF_A_BAND_MB,	0x02},
+	{ MT_RF(6, 51),		RF_A_BAND_HB,	0x02},
+	{ MT_RF(6, 51),		RF_A_BAND_11J,	0x02},
+
+	{ MT_RF(6, 52),		RF_A_BAND_LB,	0x08},
+	{ MT_RF(6, 52),		RF_A_BAND_MB,	0x08},
+	{ MT_RF(6, 52),		RF_A_BAND_HB,	0x08},
+	{ MT_RF(6, 52),		RF_A_BAND_11J,	0x08},
+
+	{ MT_RF(6, 53),		RF_A_BAND_LB,	0x08},
+	{ MT_RF(6, 53),		RF_A_BAND_MB,	0x08},
+	{ MT_RF(6, 53),		RF_A_BAND_HB,	0x08},
+	{ MT_RF(6, 53),		RF_A_BAND_11J,	0x08},
+
+	{ MT_RF(6, 54),		RF_A_BAND_LB,	0x0A},
+	{ MT_RF(6, 54),		RF_A_BAND_MB,	0x0A},
+	{ MT_RF(6, 54),		RF_A_BAND_HB,	0x0A},
+	{ MT_RF(6, 54),		RF_A_BAND_11J,	0x0A},
+
+	{ MT_RF(6, 55),		RF_A_BAND_LB,	0x0A},
+	{ MT_RF(6, 55),		RF_A_BAND_MB,	0x0A},
+	{ MT_RF(6, 55),		RF_A_BAND_HB,	0x0A},
+	{ MT_RF(6, 55),		RF_A_BAND_11J,	0x0A},
+
+	{ MT_RF(6, 56),		RF_A_BAND_LB,	0x05},
+	{ MT_RF(6, 56),		RF_A_BAND_MB,	0x05},
+	{ MT_RF(6, 56),		RF_A_BAND_HB,	0x05},
+	{ MT_RF(6, 56),		RF_A_BAND_11J,	0x05},
+
+	{ MT_RF(6, 57),		RF_A_BAND_LB,	0x05},
+	{ MT_RF(6, 57),		RF_A_BAND_MB,	0x05},
+	{ MT_RF(6, 57),		RF_A_BAND_HB,	0x05},
+	{ MT_RF(6, 57),		RF_A_BAND_11J,	0x05},
+
+	{ MT_RF(6, 58),		RF_A_BAND_LB,	0x05},
+	{ MT_RF(6, 58),		RF_A_BAND_MB,	0x03},
+	{ MT_RF(6, 58),		RF_A_BAND_HB,	0x02},
+	{ MT_RF(6, 58),		RF_A_BAND_11J,	0x07},
+
+	{ MT_RF(6, 59),		RF_A_BAND_LB,	0x05},
+	{ MT_RF(6, 59),		RF_A_BAND_MB,	0x03},
+	{ MT_RF(6, 59),		RF_A_BAND_HB,	0x02},
+	{ MT_RF(6, 59),		RF_A_BAND_11J,	0x07},
+};
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mac.c b/drivers/net/wireless/mediatek/mt76/mt76x0/mac.c
new file mode 100644
index 0000000000000000000000000000000000000000..95f28492a843c4282bd8d08102be456daeee66d1
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/mac.c
@@ -0,0 +1,660 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "mt76x0.h"
+#include "trace.h"
+#include <linux/etherdevice.h>
+
+static void
+mt76_mac_process_tx_rate(struct ieee80211_tx_rate *txrate, u16 rate,
+			 enum nl80211_band band)
+{
+	u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate);
+
+	txrate->idx = 0;
+	txrate->flags = 0;
+	txrate->count = 1;
+
+	switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) {
+	case MT_PHY_TYPE_OFDM:
+		if (band == NL80211_BAND_2GHZ)
+			idx += 4;
+
+		txrate->idx = idx;
+		return;
+	case MT_PHY_TYPE_CCK:
+		if (idx >= 8)
+			idx -= 8;
+
+		txrate->idx = idx;
+		return;
+	case MT_PHY_TYPE_HT_GF:
+		txrate->flags |= IEEE80211_TX_RC_GREEN_FIELD;
+		/* fall through */
+	case MT_PHY_TYPE_HT:
+		txrate->flags |= IEEE80211_TX_RC_MCS;
+		txrate->idx = idx;
+		break;
+	case MT_PHY_TYPE_VHT:
+		txrate->flags |= IEEE80211_TX_RC_VHT_MCS;
+		txrate->idx = idx;
+		break;
+	default:
+		WARN_ON(1);
+		return;
+	}
+
+	switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) {
+	case MT_PHY_BW_20:
+		break;
+	case MT_PHY_BW_40:
+		txrate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+		break;
+	case MT_PHY_BW_80:
+		txrate->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH;
+		break;
+	default:
+		WARN_ON(1);
+		return;
+	}
+
+	if (rate & MT_RXWI_RATE_SGI)
+		txrate->flags |= IEEE80211_TX_RC_SHORT_GI;
+}
+
+static void
+mt76_mac_fill_tx_status(struct mt76x0_dev *dev, struct ieee80211_tx_info *info,
+			struct mt76_tx_status *st, int n_frames)
+{
+	struct ieee80211_tx_rate *rate = info->status.rates;
+	int cur_idx, last_rate;
+	int i;
+
+	if (!n_frames)
+		return;
+
+	last_rate = min_t(int, st->retry, IEEE80211_TX_MAX_RATES - 1);
+	mt76_mac_process_tx_rate(&rate[last_rate], st->rate,
+				 dev->mt76.chandef.chan->band);
+	if (last_rate < IEEE80211_TX_MAX_RATES - 1)
+		rate[last_rate + 1].idx = -1;
+
+	cur_idx = rate[last_rate].idx + last_rate;
+	for (i = 0; i <= last_rate; i++) {
+		rate[i].flags = rate[last_rate].flags;
+		rate[i].idx = max_t(int, 0, cur_idx - i);
+		rate[i].count = 1;
+	}
+
+	rate[last_rate - 1].count = st->retry + 1 - last_rate;
+
+	info->status.ampdu_len = n_frames;
+	info->status.ampdu_ack_len = st->success ? n_frames : 0;
+
+	if (st->pktid & MT_TXWI_PKTID_PROBE)
+		info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
+
+	if (st->aggr)
+		info->flags |= IEEE80211_TX_CTL_AMPDU |
+			       IEEE80211_TX_STAT_AMPDU;
+
+	if (!st->ack_req)
+		info->flags |= IEEE80211_TX_CTL_NO_ACK;
+	else if (st->success)
+		info->flags |= IEEE80211_TX_STAT_ACK;
+}
+
+u16 mt76x0_mac_tx_rate_val(struct mt76x0_dev *dev,
+			 const struct ieee80211_tx_rate *rate, u8 *nss_val)
+{
+	u16 rateval;
+	u8 phy, rate_idx;
+	u8 nss = 1;
+	u8 bw = 0;
+
+	if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
+		rate_idx = rate->idx;
+		nss = 1 + (rate->idx >> 4);
+		phy = MT_PHY_TYPE_VHT;
+		if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
+			bw = 2;
+		else if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+			bw = 1;
+	} else if (rate->flags & IEEE80211_TX_RC_MCS) {
+		rate_idx = rate->idx;
+		nss = 1 + (rate->idx >> 3);
+		phy = MT_PHY_TYPE_HT;
+		if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD)
+			phy = MT_PHY_TYPE_HT_GF;
+		if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+			bw = 1;
+	} else {
+		const struct ieee80211_rate *r;
+		int band = dev->mt76.chandef.chan->band;
+		u16 val;
+
+		r = &dev->mt76.hw->wiphy->bands[band]->bitrates[rate->idx];
+		if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
+			val = r->hw_value_short;
+		else
+			val = r->hw_value;
+
+		phy = val >> 8;
+		rate_idx = val & 0xff;
+		bw = 0;
+	}
+
+	rateval = FIELD_PREP(MT_RXWI_RATE_INDEX, rate_idx);
+	rateval |= FIELD_PREP(MT_RXWI_RATE_PHY, phy);
+	rateval |= FIELD_PREP(MT_RXWI_RATE_BW, bw);
+	if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
+		rateval |= MT_RXWI_RATE_SGI;
+
+	*nss_val = nss;
+	return cpu_to_le16(rateval);
+}
+
+void mt76x0_mac_wcid_set_rate(struct mt76x0_dev *dev, struct mt76_wcid *wcid,
+			    const struct ieee80211_tx_rate *rate)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->mt76.lock, flags);
+	wcid->tx_rate = mt76x0_mac_tx_rate_val(dev, rate, &wcid->tx_rate_nss);
+	wcid->tx_rate_set = true;
+	spin_unlock_irqrestore(&dev->mt76.lock, flags);
+}
+
+struct mt76_tx_status mt76x0_mac_fetch_tx_status(struct mt76x0_dev *dev)
+{
+	struct mt76_tx_status stat = {};
+	u32 stat2, stat1;
+
+	stat2 = mt76_rr(dev, MT_TX_STAT_FIFO_EXT);
+	stat1 = mt76_rr(dev, MT_TX_STAT_FIFO);
+
+	stat.valid = !!(stat1 & MT_TX_STAT_FIFO_VALID);
+	stat.success = !!(stat1 & MT_TX_STAT_FIFO_SUCCESS);
+	stat.aggr = !!(stat1 & MT_TX_STAT_FIFO_AGGR);
+	stat.ack_req = !!(stat1 & MT_TX_STAT_FIFO_ACKREQ);
+	stat.wcid = FIELD_GET(MT_TX_STAT_FIFO_WCID, stat1);
+	stat.rate = FIELD_GET(MT_TX_STAT_FIFO_RATE, stat1);
+
+	stat.retry = FIELD_GET(MT_TX_STAT_FIFO_EXT_RETRY, stat2);
+	stat.pktid = FIELD_GET(MT_TX_STAT_FIFO_EXT_PKTID, stat2);
+
+	return stat;
+}
+
+void mt76x0_send_tx_status(struct mt76x0_dev *dev, struct mt76_tx_status *stat, u8 *update)
+{
+	struct ieee80211_tx_info info = {};
+	struct ieee80211_sta *sta = NULL;
+	struct mt76_wcid *wcid = NULL;
+	struct mt76_sta *msta = NULL;
+
+	rcu_read_lock();
+	if (stat->wcid < ARRAY_SIZE(dev->wcid))
+		wcid = rcu_dereference(dev->wcid[stat->wcid]);
+
+	if (wcid) {
+		void *priv;
+		priv = msta = container_of(wcid, struct mt76_sta, wcid);
+		sta = container_of(priv, struct ieee80211_sta, drv_priv);
+	}
+
+	if (msta && stat->aggr) {
+		u32 stat_val, stat_cache;
+
+		stat_val = stat->rate;
+		stat_val |= ((u32) stat->retry) << 16;
+		stat_cache = msta->status.rate;
+		stat_cache |= ((u32) msta->status.retry) << 16;
+
+		if (*update == 0 && stat_val == stat_cache &&
+		    stat->wcid == msta->status.wcid && msta->n_frames < 32) {
+			msta->n_frames++;
+			goto out;
+		}
+
+		mt76_mac_fill_tx_status(dev, &info, &msta->status,
+					msta->n_frames);
+		msta->status = *stat;
+		msta->n_frames = 1;
+		*update = 0;
+	} else {
+		mt76_mac_fill_tx_status(dev, &info, stat, 1);
+		*update = 1;
+	}
+
+	spin_lock_bh(&dev->mac_lock);
+	ieee80211_tx_status_noskb(dev->mt76.hw, sta, &info);
+	spin_unlock_bh(&dev->mac_lock);
+out:
+	rcu_read_unlock();
+}
+
+void mt76x0_mac_set_protection(struct mt76x0_dev *dev, bool legacy_prot,
+				int ht_mode)
+{
+	int mode = ht_mode & IEEE80211_HT_OP_MODE_PROTECTION;
+	bool non_gf = !!(ht_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
+	u32 prot[6];
+	bool ht_rts[4] = {};
+	int i;
+
+	prot[0] = MT_PROT_NAV_SHORT |
+		  MT_PROT_TXOP_ALLOW_ALL |
+		  MT_PROT_RTS_THR_EN;
+	prot[1] = prot[0];
+	if (legacy_prot)
+		prot[1] |= MT_PROT_CTRL_CTS2SELF;
+
+	prot[2] = prot[4] = MT_PROT_NAV_SHORT | MT_PROT_TXOP_ALLOW_BW20;
+	prot[3] = prot[5] = MT_PROT_NAV_SHORT | MT_PROT_TXOP_ALLOW_ALL;
+
+	if (legacy_prot) {
+		prot[2] |= MT_PROT_RATE_CCK_11;
+		prot[3] |= MT_PROT_RATE_CCK_11;
+		prot[4] |= MT_PROT_RATE_CCK_11;
+		prot[5] |= MT_PROT_RATE_CCK_11;
+	} else {
+		prot[2] |= MT_PROT_RATE_OFDM_24;
+		prot[3] |= MT_PROT_RATE_DUP_OFDM_24;
+		prot[4] |= MT_PROT_RATE_OFDM_24;
+		prot[5] |= MT_PROT_RATE_DUP_OFDM_24;
+	}
+
+	switch (mode) {
+	case IEEE80211_HT_OP_MODE_PROTECTION_NONE:
+		break;
+
+	case IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER:
+		ht_rts[0] = ht_rts[1] = ht_rts[2] = ht_rts[3] = true;
+		break;
+
+	case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ:
+		ht_rts[1] = ht_rts[3] = true;
+		break;
+
+	case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED:
+		ht_rts[0] = ht_rts[1] = ht_rts[2] = ht_rts[3] = true;
+		break;
+	}
+
+	if (non_gf)
+		ht_rts[2] = ht_rts[3] = true;
+
+	for (i = 0; i < 4; i++)
+		if (ht_rts[i])
+			prot[i + 2] |= MT_PROT_CTRL_RTS_CTS;
+
+	for (i = 0; i < 6; i++)
+		mt76_wr(dev, MT_CCK_PROT_CFG + i * 4, prot[i]);
+}
+
+void mt76x0_mac_set_short_preamble(struct mt76x0_dev *dev, bool short_preamb)
+{
+	if (short_preamb)
+		mt76_set(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_PREAMB_SHORT);
+	else
+		mt76_clear(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_PREAMB_SHORT);
+}
+
+void mt76x0_mac_config_tsf(struct mt76x0_dev *dev, bool enable, int interval)
+{
+	u32 val = mt76_rr(dev, MT_BEACON_TIME_CFG);
+
+	val &= ~(MT_BEACON_TIME_CFG_TIMER_EN |
+		 MT_BEACON_TIME_CFG_SYNC_MODE |
+		 MT_BEACON_TIME_CFG_TBTT_EN);
+
+	if (!enable) {
+		mt76_wr(dev, MT_BEACON_TIME_CFG, val);
+		return;
+	}
+
+	val &= ~MT_BEACON_TIME_CFG_INTVAL;
+	val |= FIELD_PREP(MT_BEACON_TIME_CFG_INTVAL, interval << 4) |
+		MT_BEACON_TIME_CFG_TIMER_EN |
+		MT_BEACON_TIME_CFG_SYNC_MODE |
+		MT_BEACON_TIME_CFG_TBTT_EN;
+}
+
+static void mt76x0_check_mac_err(struct mt76x0_dev *dev)
+{
+	u32 val = mt76_rr(dev, 0x10f4);
+
+	if (!(val & BIT(29)) || !(val & (BIT(7) | BIT(5))))
+		return;
+
+	dev_err(dev->mt76.dev, "Error: MAC specific condition occurred\n");
+
+	mt76_set(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_RESET_CSR);
+	udelay(10);
+	mt76_clear(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_RESET_CSR);
+}
+void mt76x0_mac_work(struct work_struct *work)
+{
+	struct mt76x0_dev *dev = container_of(work, struct mt76x0_dev,
+					       mac_work.work);
+	struct {
+		u32 addr_base;
+		u32 span;
+		u64 *stat_base;
+	} spans[] = {
+		{ MT_RX_STA_CNT0,	3,	dev->stats.rx_stat },
+		{ MT_TX_STA_CNT0,	3,	dev->stats.tx_stat },
+		{ MT_TX_AGG_STAT,	1,	dev->stats.aggr_stat },
+		{ MT_MPDU_DENSITY_CNT,	1,	dev->stats.zero_len_del },
+		{ MT_TX_AGG_CNT_BASE0,	8,	&dev->stats.aggr_n[0] },
+		{ MT_TX_AGG_CNT_BASE1,	8,	&dev->stats.aggr_n[16] },
+	};
+	u32 sum, n;
+	int i, j, k;
+
+	/* Note: using MCU_RANDOM_READ is actually slower then reading all the
+	 *	 registers by hand.  MCU takes ca. 20ms to complete read of 24
+	 *	 registers while reading them one by one will takes roughly
+	 *	 24*200us =~ 5ms.
+	 */
+
+	k = 0;
+	n = 0;
+	sum = 0;
+	for (i = 0; i < ARRAY_SIZE(spans); i++)
+		for (j = 0; j < spans[i].span; j++) {
+			u32 val = mt76_rr(dev, spans[i].addr_base + j * 4);
+
+			spans[i].stat_base[j * 2] += val & 0xffff;
+			spans[i].stat_base[j * 2 + 1] += val >> 16;
+
+			/* Calculate average AMPDU length */
+			if (spans[i].addr_base != MT_TX_AGG_CNT_BASE0 &&
+			    spans[i].addr_base != MT_TX_AGG_CNT_BASE1)
+				continue;
+
+			n += (val >> 16) + (val & 0xffff);
+			sum += (val & 0xffff) * (1 + k * 2) +
+				(val >> 16) * (2 + k * 2);
+			k++;
+		}
+
+	atomic_set(&dev->avg_ampdu_len, n ? DIV_ROUND_CLOSEST(sum, n) : 1);
+
+	mt76x0_check_mac_err(dev);
+
+	ieee80211_queue_delayed_work(dev->mt76.hw, &dev->mac_work, 10 * HZ);
+}
+
+void
+mt76x0_mac_wcid_setup(struct mt76x0_dev *dev, u8 idx, u8 vif_idx, u8 *mac)
+{
+	u8 zmac[ETH_ALEN] = {};
+	u32 attr;
+
+	attr = FIELD_PREP(MT_WCID_ATTR_BSS_IDX, vif_idx & 7) |
+	       FIELD_PREP(MT_WCID_ATTR_BSS_IDX_EXT, !!(vif_idx & 8));
+
+	mt76_wr(dev, MT_WCID_ATTR(idx), attr);
+
+	if (mac)
+		memcpy(zmac, mac, sizeof(zmac));
+
+	mt76x0_addr_wr(dev, MT_WCID_ADDR(idx), zmac);
+}
+
+void mt76x0_mac_set_ampdu_factor(struct mt76x0_dev *dev)
+{
+	struct ieee80211_sta *sta;
+	struct mt76_wcid *wcid;
+	void *msta;
+	u8 min_factor = 3;
+	int i;
+
+	return;
+
+	rcu_read_lock();
+	for (i = 0; i < ARRAY_SIZE(dev->wcid); i++) {
+		wcid = rcu_dereference(dev->wcid[i]);
+		if (!wcid)
+			continue;
+
+		msta = container_of(wcid, struct mt76_sta, wcid);
+		sta = container_of(msta, struct ieee80211_sta, drv_priv);
+
+		min_factor = min(min_factor, sta->ht_cap.ampdu_factor);
+	}
+	rcu_read_unlock();
+
+	mt76_wr(dev, MT_MAX_LEN_CFG, 0xa0fff |
+		   FIELD_PREP(MT_MAX_LEN_CFG_AMPDU, min_factor));
+}
+
+static void
+mt76_mac_process_rate(struct ieee80211_rx_status *status, u16 rate)
+{
+	u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate);
+
+	switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) {
+	case MT_PHY_TYPE_OFDM:
+		if (idx >= 8)
+			idx = 0;
+
+		if (status->band == NL80211_BAND_2GHZ)
+			idx += 4;
+
+		status->rate_idx = idx;
+		return;
+	case MT_PHY_TYPE_CCK:
+		if (idx >= 8) {
+			idx -= 8;
+			status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
+		}
+
+		if (idx >= 4)
+			idx = 0;
+
+		status->rate_idx = idx;
+		return;
+	case MT_PHY_TYPE_HT_GF:
+		status->enc_flags |= RX_ENC_FLAG_HT_GF;
+		/* fall through */
+	case MT_PHY_TYPE_HT:
+		status->encoding = RX_ENC_HT;
+		status->rate_idx = idx;
+		break;
+	case MT_PHY_TYPE_VHT:
+		status->encoding = RX_ENC_VHT;
+		status->rate_idx = FIELD_GET(MT_RATE_INDEX_VHT_IDX, idx);
+		status->nss = FIELD_GET(MT_RATE_INDEX_VHT_NSS, idx) + 1;
+		break;
+	default:
+		WARN_ON(1);
+		return;
+	}
+
+	if (rate & MT_RXWI_RATE_LDPC)
+		status->enc_flags |= RX_ENC_FLAG_LDPC;
+
+	if (rate & MT_RXWI_RATE_SGI)
+		status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+
+	if (rate & MT_RXWI_RATE_STBC)
+		status->enc_flags |= 1 << RX_ENC_FLAG_STBC_SHIFT;
+
+	switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) {
+	case MT_PHY_BW_20:
+		break;
+	case MT_PHY_BW_40:
+		status->bw = RATE_INFO_BW_40;
+		break;
+	case MT_PHY_BW_80:
+		status->bw = RATE_INFO_BW_80;
+		break;
+	default:
+		WARN_ON(1);
+		break;
+	}
+}
+
+static void
+mt76x0_rx_monitor_beacon(struct mt76x0_dev *dev, struct mt76x0_rxwi *rxwi,
+			  u16 rate, int rssi)
+{
+	dev->bcn_phy_mode = FIELD_GET(MT_RXWI_RATE_PHY, rate);
+	dev->avg_rssi = ((dev->avg_rssi * 15) / 16 + (rssi << 8)) / 256;
+}
+
+static int
+mt76x0_rx_is_our_beacon(struct mt76x0_dev *dev, u8 *data)
+{
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)data;
+
+	return ieee80211_is_beacon(hdr->frame_control) &&
+		ether_addr_equal(hdr->addr2, dev->ap_bssid);
+}
+
+u32 mt76x0_mac_process_rx(struct mt76x0_dev *dev, struct sk_buff *skb,
+			u8 *data, void *rxi)
+{
+	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+	struct mt76x0_rxwi *rxwi = rxi;
+	u32 len, ctl = le32_to_cpu(rxwi->ctl);
+	u16 rate = le16_to_cpu(rxwi->rate);
+	int rssi;
+
+	len = FIELD_GET(MT_RXWI_CTL_MPDU_LEN, ctl);
+	if (WARN_ON(len < 10))
+		return 0;
+
+	if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_DECRYPT)) {
+		status->flag |= RX_FLAG_DECRYPTED;
+		status->flag |= RX_FLAG_IV_STRIPPED | RX_FLAG_MMIC_STRIPPED;
+	}
+
+	status->chains = BIT(0);
+	rssi = mt76x0_phy_get_rssi(dev, rxwi);
+	status->chain_signal[0] = status->signal = rssi;
+	status->freq = dev->mt76.chandef.chan->center_freq;
+	status->band = dev->mt76.chandef.chan->band;
+
+	mt76_mac_process_rate(status, rate);
+
+	spin_lock_bh(&dev->con_mon_lock);
+	if (mt76x0_rx_is_our_beacon(dev, data)) {
+		mt76x0_rx_monitor_beacon(dev, rxwi, rate, rssi);
+	} else if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_U2M)) {
+		if (dev->avg_rssi == 0)
+			dev->avg_rssi = rssi;
+		else
+			dev->avg_rssi = (dev->avg_rssi * 15) / 16 + rssi / 16;
+
+	}
+	spin_unlock_bh(&dev->con_mon_lock);
+
+	return len;
+}
+
+static enum mt76_cipher_type
+mt76_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data)
+{
+	memset(key_data, 0, 32);
+	if (!key)
+		return MT_CIPHER_NONE;
+
+	if (key->keylen > 32)
+		return MT_CIPHER_NONE;
+
+	memcpy(key_data, key->key, key->keylen);
+
+	switch (key->cipher) {
+	case WLAN_CIPHER_SUITE_WEP40:
+		return MT_CIPHER_WEP40;
+	case WLAN_CIPHER_SUITE_WEP104:
+		return MT_CIPHER_WEP104;
+	case WLAN_CIPHER_SUITE_TKIP:
+		return MT_CIPHER_TKIP;
+	case WLAN_CIPHER_SUITE_CCMP:
+		return MT_CIPHER_AES_CCMP;
+	default:
+		return MT_CIPHER_NONE;
+	}
+}
+
+int mt76x0_mac_wcid_set_key(struct mt76x0_dev *dev, u8 idx,
+			  struct ieee80211_key_conf *key)
+{
+	enum mt76_cipher_type cipher;
+	u8 key_data[32];
+	u8 iv_data[8];
+	u32 val;
+
+	cipher = mt76_mac_get_key_info(key, key_data);
+	if (cipher == MT_CIPHER_NONE && key)
+		return -EINVAL;
+
+	trace_mt76x0_set_key(&dev->mt76, idx);
+
+	mt76_wr_copy(dev, MT_WCID_KEY(idx), key_data, sizeof(key_data));
+
+	memset(iv_data, 0, sizeof(iv_data));
+	if (key) {
+		iv_data[3] = key->keyidx << 6;
+		if (cipher >= MT_CIPHER_TKIP) {
+			/* Note: start with 1 to comply with spec,
+			 *	 (see comment on common/cmm_wpa.c:4291).
+			 */
+			iv_data[0] |= 1;
+			iv_data[3] |= 0x20;
+		}
+	}
+	mt76_wr_copy(dev, MT_WCID_IV(idx), iv_data, sizeof(iv_data));
+
+	val = mt76_rr(dev, MT_WCID_ATTR(idx));
+	val &= ~MT_WCID_ATTR_PKEY_MODE & ~MT_WCID_ATTR_PKEY_MODE_EXT;
+	val |= FIELD_PREP(MT_WCID_ATTR_PKEY_MODE, cipher & 7) |
+	       FIELD_PREP(MT_WCID_ATTR_PKEY_MODE_EXT, cipher >> 3);
+	val &= ~MT_WCID_ATTR_PAIRWISE;
+	val |= MT_WCID_ATTR_PAIRWISE *
+		!!(key && key->flags & IEEE80211_KEY_FLAG_PAIRWISE);
+	mt76_wr(dev, MT_WCID_ATTR(idx), val);
+
+	return 0;
+}
+
+int mt76x0_mac_shared_key_setup(struct mt76x0_dev *dev, u8 vif_idx, u8 key_idx,
+			      struct ieee80211_key_conf *key)
+{
+	enum mt76_cipher_type cipher;
+	u8 key_data[32];
+	u32 val;
+
+	cipher = mt76_mac_get_key_info(key, key_data);
+	if (cipher == MT_CIPHER_NONE && key)
+		return -EINVAL;
+
+	trace_mt76x0_set_shared_key(&dev->mt76, vif_idx, key_idx);
+
+	mt76_wr_copy(dev, MT_SKEY(vif_idx, key_idx),
+			key_data, sizeof(key_data));
+
+	val = mt76_rr(dev, MT_SKEY_MODE(vif_idx));
+	val &= ~(MT_SKEY_MODE_MASK << MT_SKEY_MODE_SHIFT(vif_idx, key_idx));
+	val |= cipher << MT_SKEY_MODE_SHIFT(vif_idx, key_idx);
+	mt76_wr(dev, MT_SKEY_MODE(vif_idx), val);
+
+	return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mac.h b/drivers/net/wireless/mediatek/mt76/mt76x0/mac.h
new file mode 100644
index 0000000000000000000000000000000000000000..bea067b71c13e4d508dc754982140d012706977d
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/mac.h
@@ -0,0 +1,154 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT76_MAC_H
+#define __MT76_MAC_H
+
+/* Note: values in original "RSSI" and "SNR" fields are not actually what they
+ *	 are called for MT76X0U, names used by this driver are educated guesses
+ *	 (see vendor mac/ral_omac.c).
+ */
+struct mt76x0_rxwi {
+	__le32 rxinfo;
+
+	__le32 ctl;
+
+	__le16 tid_sn;
+	__le16 rate;
+
+	s8 rssi[4];
+
+	__le32 bbp_rxinfo[4];
+} __packed __aligned(4);
+
+#define MT_RXINFO_BA			BIT(0)
+#define MT_RXINFO_DATA			BIT(1)
+#define MT_RXINFO_NULL			BIT(2)
+#define MT_RXINFO_FRAG			BIT(3)
+#define MT_RXINFO_U2M			BIT(4)
+#define MT_RXINFO_MULTICAST		BIT(5)
+#define MT_RXINFO_BROADCAST		BIT(6)
+#define MT_RXINFO_MYBSS			BIT(7)
+#define MT_RXINFO_CRCERR		BIT(8)
+#define MT_RXINFO_ICVERR		BIT(9)
+#define MT_RXINFO_MICERR		BIT(10)
+#define MT_RXINFO_AMSDU			BIT(11)
+#define MT_RXINFO_HTC			BIT(12)
+#define MT_RXINFO_RSSI			BIT(13)
+#define MT_RXINFO_L2PAD			BIT(14)
+#define MT_RXINFO_AMPDU			BIT(15)
+#define MT_RXINFO_DECRYPT		BIT(16)
+#define MT_RXINFO_BSSIDX3		BIT(17)
+#define MT_RXINFO_WAPI_KEY		BIT(18)
+#define MT_RXINFO_PN_LEN		GENMASK(21, 19)
+#define MT_RXINFO_SW_PKT_80211		BIT(22)
+#define MT_RXINFO_TCP_SUM_BYPASS	BIT(28)
+#define MT_RXINFO_IP_SUM_BYPASS		BIT(29)
+#define MT_RXINFO_TCP_SUM_ERR		BIT(30)
+#define MT_RXINFO_IP_SUM_ERR		BIT(31)
+
+#define MT_RXWI_CTL_WCID		GENMASK(7, 0)
+#define MT_RXWI_CTL_KEY_IDX		GENMASK(9, 8)
+#define MT_RXWI_CTL_BSS_IDX		GENMASK(12, 10)
+#define MT_RXWI_CTL_UDF			GENMASK(15, 13)
+#define MT_RXWI_CTL_MPDU_LEN		GENMASK(27, 16)
+#define MT_RXWI_CTL_TID			GENMASK(31, 28)
+
+#define MT_RXWI_FRAG			GENMASK(3, 0)
+#define MT_RXWI_SN			GENMASK(15, 4)
+
+#define MT_RXWI_RATE_INDEX		GENMASK(5, 0)
+#define MT_RXWI_RATE_LDPC		BIT(6)
+#define MT_RXWI_RATE_BW			GENMASK(8, 7)
+#define MT_RXWI_RATE_SGI		BIT(9)
+#define MT_RXWI_RATE_STBC		BIT(10)
+#define MT_RXWI_RATE_LDPC_ETXBF		BIT(11)
+#define MT_RXWI_RATE_SND		BIT(12)
+#define MT_RXWI_RATE_PHY		GENMASK(15, 13)
+
+#define MT_RATE_INDEX_VHT_IDX		GENMASK(3, 0)
+#define MT_RATE_INDEX_VHT_NSS		GENMASK(5, 4)
+
+#define MT_RXWI_GAIN_RSSI_VAL		GENMASK(5, 0)
+#define MT_RXWI_GAIN_RSSI_LNA_ID	GENMASK(7, 6)
+#define MT_RXWI_ANT_AUX_LNA		BIT(7)
+
+#define MT_RXWI_EANT_ENC_ANT_ID		GENMASK(7, 0)
+
+enum mt76_phy_bandwidth {
+	MT_PHY_BW_20,
+	MT_PHY_BW_40,
+	MT_PHY_BW_80,
+};
+
+struct mt76_txwi {
+	__le16 flags;
+	__le16 rate_ctl;
+	u8 ack_ctl;
+	u8 wcid;
+	__le16 len_ctl;
+	__le32 iv;
+	__le32 eiv;
+	u8 aid;
+	u8 txstream;
+	u8 ctl2;
+	u8 pktid;
+} __packed __aligned(4);
+
+#define MT_TXWI_FLAGS_FRAG		BIT(0)
+#define MT_TXWI_FLAGS_MMPS		BIT(1)
+#define MT_TXWI_FLAGS_CFACK		BIT(2)
+#define MT_TXWI_FLAGS_TS		BIT(3)
+#define MT_TXWI_FLAGS_AMPDU		BIT(4)
+#define MT_TXWI_FLAGS_MPDU_DENSITY	GENMASK(7, 5)
+#define MT_TXWI_FLAGS_TXOP		GENMASK(9, 8)
+#define MT_TXWI_FLAGS_CWMIN		GENMASK(12, 10)
+#define MT_TXWI_FLAGS_NO_RATE_FALLBACK	BIT(13)
+#define MT_TXWI_FLAGS_TX_RPT		BIT(14)
+#define MT_TXWI_FLAGS_TX_RATE_LUT	BIT(15)
+
+#define MT_TXWI_RATE_MCS		GENMASK(6, 0)
+#define MT_TXWI_RATE_BW			BIT(7)
+#define MT_TXWI_RATE_SGI		BIT(8)
+#define MT_TXWI_RATE_STBC		GENMASK(10, 9)
+#define MT_TXWI_RATE_PHY_MODE		GENMASK(15, 14)
+
+#define MT_TXWI_ACK_CTL_REQ		BIT(0)
+#define MT_TXWI_ACK_CTL_NSEQ		BIT(1)
+#define MT_TXWI_ACK_CTL_BA_WINDOW	GENMASK(7, 2)
+
+#define MT_TXWI_LEN_BYTE_CNT		GENMASK(11, 0)
+
+#define MT_TXWI_CTL_TX_POWER_ADJ	GENMASK(3, 0)
+#define MT_TXWI_CTL_CHAN_CHECK_PKT	BIT(4)
+#define MT_TXWI_CTL_PIFS_REV		BIT(6)
+
+#define MT_TXWI_PKTID_PROBE             BIT(7)
+
+u32 mt76x0_mac_process_rx(struct mt76x0_dev *dev, struct sk_buff *skb,
+			u8 *data, void *rxi);
+int mt76x0_mac_wcid_set_key(struct mt76x0_dev *dev, u8 idx,
+			  struct ieee80211_key_conf *key);
+void mt76x0_mac_wcid_set_rate(struct mt76x0_dev *dev, struct mt76_wcid *wcid,
+			    const struct ieee80211_tx_rate *rate);
+
+int mt76x0_mac_shared_key_setup(struct mt76x0_dev *dev, u8 vif_idx, u8 key_idx,
+			      struct ieee80211_key_conf *key);
+u16 mt76x0_mac_tx_rate_val(struct mt76x0_dev *dev,
+			 const struct ieee80211_tx_rate *rate, u8 *nss_val);
+struct mt76_tx_status
+mt76x0_mac_fetch_tx_status(struct mt76x0_dev *dev);
+void mt76x0_send_tx_status(struct mt76x0_dev *dev, struct mt76_tx_status *stat, u8 *update);
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/main.c b/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
new file mode 100644
index 0000000000000000000000000000000000000000..cf6ffb1ba4a290e1561374c68af9b947215af0ea
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
@@ -0,0 +1,403 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "mt76x0.h"
+#include "mac.h"
+#include <linux/etherdevice.h>
+
+static int mt76x0_start(struct ieee80211_hw *hw)
+{
+	struct mt76x0_dev *dev = hw->priv;
+	int ret;
+
+	mutex_lock(&dev->mutex);
+
+	ret = mt76x0_mac_start(dev);
+	if (ret)
+		goto out;
+
+	ieee80211_queue_delayed_work(dev->mt76.hw, &dev->mac_work,
+				     MT_CALIBRATE_INTERVAL);
+	ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
+				     MT_CALIBRATE_INTERVAL);
+out:
+	mutex_unlock(&dev->mutex);
+	return ret;
+}
+
+static void mt76x0_stop(struct ieee80211_hw *hw)
+{
+	struct mt76x0_dev *dev = hw->priv;
+
+	mutex_lock(&dev->mutex);
+
+	cancel_delayed_work_sync(&dev->cal_work);
+	cancel_delayed_work_sync(&dev->mac_work);
+	mt76x0_mac_stop(dev);
+
+	mutex_unlock(&dev->mutex);
+}
+
+
+static int mt76x0_add_interface(struct ieee80211_hw *hw,
+				 struct ieee80211_vif *vif)
+{
+	struct mt76x0_dev *dev = hw->priv;
+	struct mt76_vif *mvif = (struct mt76_vif *) vif->drv_priv;
+	unsigned int idx;
+
+	idx = ffs(~dev->vif_mask);
+	if (!idx || idx > 8)
+		return -ENOSPC;
+
+	idx--;
+	dev->vif_mask |= BIT(idx);
+
+	mvif->idx = idx;
+	mvif->group_wcid.idx = GROUP_WCID(idx);
+	mvif->group_wcid.hw_key_idx = -1;
+
+	return 0;
+}
+
+static void mt76x0_remove_interface(struct ieee80211_hw *hw,
+				     struct ieee80211_vif *vif)
+{
+	struct mt76x0_dev *dev = hw->priv;
+	struct mt76_vif *mvif = (struct mt76_vif *) vif->drv_priv;
+	unsigned int wcid = mvif->group_wcid.idx;
+
+	dev->wcid_mask[wcid / BITS_PER_LONG] &= ~BIT(wcid % BITS_PER_LONG);
+}
+
+static int mt76x0_config(struct ieee80211_hw *hw, u32 changed)
+{
+	struct mt76x0_dev *dev = hw->priv;
+	int ret = 0;
+
+	mutex_lock(&dev->mutex);
+
+	if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
+		if (!(hw->conf.flags & IEEE80211_CONF_MONITOR))
+			dev->rxfilter |= MT_RX_FILTR_CFG_PROMISC;
+		else
+			dev->rxfilter &= ~MT_RX_FILTR_CFG_PROMISC;
+
+		mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
+	}
+
+	if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+		ieee80211_stop_queues(hw);
+		ret = mt76x0_phy_set_channel(dev, &hw->conf.chandef);
+		ieee80211_wake_queues(hw);
+	}
+
+	mutex_unlock(&dev->mutex);
+
+	return ret;
+}
+
+static void
+mt76_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
+		      unsigned int *total_flags, u64 multicast)
+{
+	struct mt76x0_dev *dev = hw->priv;
+	u32 flags = 0;
+
+#define MT76_FILTER(_flag, _hw) do { \
+		flags |= *total_flags & FIF_##_flag;			\
+		dev->rxfilter &= ~(_hw);				\
+		dev->rxfilter |= !(flags & FIF_##_flag) * (_hw);	\
+	} while (0)
+
+	mutex_lock(&dev->mutex);
+
+	dev->rxfilter &= ~MT_RX_FILTR_CFG_OTHER_BSS;
+
+	MT76_FILTER(FCSFAIL, MT_RX_FILTR_CFG_CRC_ERR);
+	MT76_FILTER(PLCPFAIL, MT_RX_FILTR_CFG_PHY_ERR);
+	MT76_FILTER(CONTROL, MT_RX_FILTR_CFG_ACK |
+			     MT_RX_FILTR_CFG_CTS |
+			     MT_RX_FILTR_CFG_CFEND |
+			     MT_RX_FILTR_CFG_CFACK |
+			     MT_RX_FILTR_CFG_BA |
+			     MT_RX_FILTR_CFG_CTRL_RSV);
+	MT76_FILTER(PSPOLL, MT_RX_FILTR_CFG_PSPOLL);
+
+	*total_flags = flags;
+	mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
+
+	mutex_unlock(&dev->mutex);
+}
+
+static void
+mt76x0_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+			 struct ieee80211_bss_conf *info, u32 changed)
+{
+	struct mt76x0_dev *dev = hw->priv;
+
+	mutex_lock(&dev->mutex);
+
+	if (changed & BSS_CHANGED_ASSOC)
+		mt76x0_phy_con_cal_onoff(dev, info);
+
+	if (changed & BSS_CHANGED_BSSID) {
+		mt76x0_addr_wr(dev, MT_MAC_BSSID_DW0, info->bssid);
+
+		/* Note: this is a hack because beacon_int is not changed
+		 *	 on leave nor is any more appropriate event generated.
+		 *	 rt2x00 doesn't seem to be bothered though.
+		 */
+		if (is_zero_ether_addr(info->bssid))
+			mt76x0_mac_config_tsf(dev, false, 0);
+	}
+
+	if (changed & BSS_CHANGED_BASIC_RATES) {
+		mt76_wr(dev, MT_LEGACY_BASIC_RATE, info->basic_rates);
+		mt76_wr(dev, MT_HT_FBK_CFG0, 0x65432100);
+		mt76_wr(dev, MT_HT_FBK_CFG1, 0xedcba980);
+		mt76_wr(dev, MT_LG_FBK_CFG0, 0xedcba988);
+		mt76_wr(dev, MT_LG_FBK_CFG1, 0x00002100);
+	}
+
+	if (changed & BSS_CHANGED_BEACON_INT)
+		mt76x0_mac_config_tsf(dev, true, info->beacon_int);
+
+	if (changed & BSS_CHANGED_HT || changed & BSS_CHANGED_ERP_CTS_PROT)
+		mt76x0_mac_set_protection(dev, info->use_cts_prot,
+					   info->ht_operation_mode);
+
+	if (changed & BSS_CHANGED_ERP_PREAMBLE)
+		mt76x0_mac_set_short_preamble(dev, info->use_short_preamble);
+
+	if (changed & BSS_CHANGED_ERP_SLOT) {
+		int slottime = info->use_short_slot ? 9 : 20;
+
+		mt76_rmw_field(dev, MT_BKOFF_SLOT_CFG,
+			       MT_BKOFF_SLOT_CFG_SLOTTIME, slottime);
+	}
+
+	if (changed & BSS_CHANGED_ASSOC)
+		mt76x0_phy_recalibrate_after_assoc(dev);
+
+	mutex_unlock(&dev->mutex);
+}
+
+static int
+mt76x0_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+		struct ieee80211_sta *sta)
+{
+	struct mt76x0_dev *dev = hw->priv;
+	struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv;
+	struct mt76_vif *mvif = (struct mt76_vif *) vif->drv_priv;
+	int ret = 0;
+	int idx = 0;
+
+	mutex_lock(&dev->mutex);
+
+	idx = mt76_wcid_alloc(dev->wcid_mask, ARRAY_SIZE(dev->wcid));
+	if (idx < 0) {
+		ret = -ENOSPC;
+		goto out;
+	}
+
+	msta->wcid.idx = idx;
+	msta->wcid.hw_key_idx = -1;
+	mt76x0_mac_wcid_setup(dev, idx, mvif->idx, sta->addr);
+	mt76_clear(dev, MT_WCID_DROP(idx), MT_WCID_DROP_MASK(idx));
+	rcu_assign_pointer(dev->wcid[idx], &msta->wcid);
+	mt76x0_mac_set_ampdu_factor(dev);
+
+out:
+	mutex_unlock(&dev->mutex);
+
+	return ret;
+}
+
+static int
+mt76x0_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+		   struct ieee80211_sta *sta)
+{
+	struct mt76x0_dev *dev = hw->priv;
+	struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv;
+	int idx = msta->wcid.idx;
+
+	mutex_lock(&dev->mutex);
+	rcu_assign_pointer(dev->wcid[idx], NULL);
+	mt76_set(dev, MT_WCID_DROP(idx), MT_WCID_DROP_MASK(idx));
+	dev->wcid_mask[idx / BITS_PER_LONG] &= ~BIT(idx % BITS_PER_LONG);
+	mt76x0_mac_wcid_setup(dev, idx, 0, NULL);
+	mt76x0_mac_set_ampdu_factor(dev);
+	mutex_unlock(&dev->mutex);
+
+	return 0;
+}
+
+static void
+mt76x0_sta_notify(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+		   enum sta_notify_cmd cmd, struct ieee80211_sta *sta)
+{
+}
+
+static void
+mt76x0_sw_scan(struct ieee80211_hw *hw,
+		struct ieee80211_vif *vif,
+		const u8 *mac_addr)
+{
+	struct mt76x0_dev *dev = hw->priv;
+
+	cancel_delayed_work_sync(&dev->cal_work);
+	mt76x0_agc_save(dev);
+	set_bit(MT76_SCANNING, &dev->mt76.state);
+}
+
+static void
+mt76x0_sw_scan_complete(struct ieee80211_hw *hw,
+			 struct ieee80211_vif *vif)
+{
+	struct mt76x0_dev *dev = hw->priv;
+
+	mt76x0_agc_restore(dev);
+	clear_bit(MT76_SCANNING, &dev->mt76.state);
+
+	ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
+				     MT_CALIBRATE_INTERVAL);
+}
+
+static int
+mt76x0_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+		struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+		struct ieee80211_key_conf *key)
+{
+	struct mt76x0_dev *dev = hw->priv;
+	struct mt76_vif *mvif = (struct mt76_vif *) vif->drv_priv;
+	struct mt76_sta *msta = sta ? (struct mt76_sta *) sta->drv_priv : NULL;
+	struct mt76_wcid *wcid = msta ? &msta->wcid : &mvif->group_wcid;
+	int idx = key->keyidx;
+	int ret;
+
+	if (cmd == SET_KEY) {
+		key->hw_key_idx = wcid->idx;
+		wcid->hw_key_idx = idx;
+	} else {
+		if (idx == wcid->hw_key_idx)
+			wcid->hw_key_idx = -1;
+
+		key = NULL;
+	}
+
+	if (!msta) {
+		if (key || wcid->hw_key_idx == idx) {
+			ret = mt76x0_mac_wcid_set_key(dev, wcid->idx, key);
+			if (ret)
+				return ret;
+		}
+
+		return mt76x0_mac_shared_key_setup(dev, mvif->idx, idx, key);
+	}
+
+	return mt76x0_mac_wcid_set_key(dev, msta->wcid.idx, key);
+}
+
+static int mt76x0_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+{
+	struct mt76x0_dev *dev = hw->priv;
+
+	mt76_rmw_field(dev, MT_TX_RTS_CFG, MT_TX_RTS_CFG_THRESH, value);
+
+	return 0;
+}
+
+static int
+mt76_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+		  struct ieee80211_ampdu_params *params)
+{
+	struct mt76x0_dev *dev = hw->priv;
+	struct ieee80211_sta *sta = params->sta;
+	enum ieee80211_ampdu_mlme_action action = params->action;
+	u16 tid = params->tid;
+	u16 *ssn = &params->ssn;
+	struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv;
+
+	WARN_ON(msta->wcid.idx > N_WCIDS);
+
+	switch (action) {
+	case IEEE80211_AMPDU_RX_START:
+		mt76_set(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, BIT(16 + tid));
+		break;
+	case IEEE80211_AMPDU_RX_STOP:
+		mt76_clear(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, BIT(16 + tid));
+		break;
+	case IEEE80211_AMPDU_TX_OPERATIONAL:
+		ieee80211_send_bar(vif, sta->addr, tid, msta->agg_ssn[tid]);
+		break;
+	case IEEE80211_AMPDU_TX_STOP_FLUSH:
+	case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+		break;
+	case IEEE80211_AMPDU_TX_START:
+		msta->agg_ssn[tid] = *ssn << 4;
+		ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+		break;
+	case IEEE80211_AMPDU_TX_STOP_CONT:
+		ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+		break;
+	}
+
+	return 0;
+}
+
+static void
+mt76_sta_rate_tbl_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+			 struct ieee80211_sta *sta)
+{
+	struct mt76x0_dev *dev = hw->priv;
+	struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv;
+	struct ieee80211_sta_rates *rates;
+	struct ieee80211_tx_rate rate = {};
+
+	rcu_read_lock();
+	rates = rcu_dereference(sta->rates);
+
+	if (!rates)
+		goto out;
+
+	rate.idx = rates->rate[0].idx;
+	rate.flags = rates->rate[0].flags;
+	mt76x0_mac_wcid_set_rate(dev, &msta->wcid, &rate);
+
+out:
+	rcu_read_unlock();
+}
+
+const struct ieee80211_ops mt76x0_ops = {
+	.tx = mt76x0_tx,
+	.start = mt76x0_start,
+	.stop = mt76x0_stop,
+	.add_interface = mt76x0_add_interface,
+	.remove_interface = mt76x0_remove_interface,
+	.config = mt76x0_config,
+	.configure_filter = mt76_configure_filter,
+	.bss_info_changed = mt76x0_bss_info_changed,
+	.sta_add = mt76x0_sta_add,
+	.sta_remove = mt76x0_sta_remove,
+	.sta_notify = mt76x0_sta_notify,
+	.set_key = mt76x0_set_key,
+	.conf_tx = mt76x0_conf_tx,
+	.sw_scan_start = mt76x0_sw_scan,
+	.sw_scan_complete = mt76x0_sw_scan_complete,
+	.ampdu_action = mt76_ampdu_action,
+	.sta_rate_tbl_update = mt76_sta_rate_tbl_update,
+	.set_rts_threshold = mt76x0_set_rts_threshold,
+};
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x0/mcu.c
new file mode 100644
index 0000000000000000000000000000000000000000..8affacbab90ad05e8673a18214156bbbfa6b7204
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/mcu.c
@@ -0,0 +1,656 @@
+/*
+ * (c) Copyright 2002-2010, Ralink Technology, Inc.
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/firmware.h>
+#include <linux/delay.h>
+#include <linux/usb.h>
+#include <linux/skbuff.h>
+
+#include "mt76x0.h"
+#include "dma.h"
+#include "mcu.h"
+#include "usb.h"
+#include "trace.h"
+
+#define MCU_FW_URB_MAX_PAYLOAD		0x38f8
+#define MCU_FW_URB_SIZE			(MCU_FW_URB_MAX_PAYLOAD + 12)
+#define MCU_RESP_URB_SIZE		1024
+
+static inline int firmware_running(struct mt76x0_dev *dev)
+{
+	return mt76_rr(dev, MT_MCU_COM_REG0) == 1;
+}
+
+static inline void skb_put_le32(struct sk_buff *skb, u32 val)
+{
+	put_unaligned_le32(val, skb_put(skb, 4));
+}
+
+static inline void mt76x0_dma_skb_wrap_cmd(struct sk_buff *skb,
+					    u8 seq, enum mcu_cmd cmd)
+{
+	WARN_ON(mt76x0_dma_skb_wrap(skb, CPU_TX_PORT, DMA_COMMAND,
+				     FIELD_PREP(MT_TXD_CMD_SEQ, seq) |
+				     FIELD_PREP(MT_TXD_CMD_TYPE, cmd)));
+}
+
+static inline void trace_mt76x0_mcu_msg_send_cs(struct mt76_dev *dev,
+					    struct sk_buff *skb, bool need_resp)
+{
+	u32 i, csum = 0;
+
+	for (i = 0; i < skb->len / 4; i++)
+		csum ^= get_unaligned_le32(skb->data + i * 4);
+
+	trace_mt76x0_mcu_msg_send(dev, skb, csum, need_resp);
+}
+
+static struct sk_buff *
+mt76x0_mcu_msg_alloc(struct mt76x0_dev *dev, const void *data, int len)
+{
+	struct sk_buff *skb;
+
+	WARN_ON(len % 4); /* if length is not divisible by 4 we need to pad */
+
+	skb = alloc_skb(len + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
+	if (skb) {
+		skb_reserve(skb, MT_DMA_HDR_LEN);
+		memcpy(skb_put(skb, len), data, len);
+	}
+	return skb;
+}
+
+static void mt76x0_read_resp_regs(struct mt76x0_dev *dev, int len)
+{
+	int i;
+	int n = dev->mcu.reg_pairs_len;
+	u8 *buf = dev->mcu.resp.buf;
+
+	buf += 4;
+	len -= 8;
+
+	if (dev->mcu.burst_read) {
+		u32 reg = dev->mcu.reg_pairs[0].reg - dev->mcu.reg_base;
+
+		WARN_ON_ONCE(len/4 != n);
+		for (i = 0; i < n; i++) {
+			u32 val = get_unaligned_le32(buf + 4*i);
+
+			dev->mcu.reg_pairs[i].reg = reg++;
+			dev->mcu.reg_pairs[i].value = val;
+		}
+	} else {
+		WARN_ON_ONCE(len/8 != n);
+		for (i = 0; i < n; i++) {
+			u32 reg = get_unaligned_le32(buf + 8*i) - dev->mcu.reg_base;
+			u32 val = get_unaligned_le32(buf + 8*i + 4);
+
+			WARN_ON_ONCE(dev->mcu.reg_pairs[i].reg != reg);
+			dev->mcu.reg_pairs[i].value = val;
+		}
+	}
+}
+
+static int mt76x0_mcu_wait_resp(struct mt76x0_dev *dev, u8 seq)
+{
+	struct urb *urb = dev->mcu.resp.urb;
+	u32 rxfce;
+	int urb_status, ret, try = 5;
+
+	while (try--) {
+		if (!wait_for_completion_timeout(&dev->mcu.resp_cmpl,
+						 msecs_to_jiffies(300))) {
+			dev_warn(dev->mt76.dev, "Warning: %s retrying\n", __func__);
+			continue;
+		}
+
+		/* Make copies of important data before reusing the urb */
+		rxfce = get_unaligned_le32(dev->mcu.resp.buf);
+		urb_status = urb->status * mt76x0_urb_has_error(urb);
+
+		if (urb_status == 0 && dev->mcu.reg_pairs)
+			mt76x0_read_resp_regs(dev, urb->actual_length);
+
+		ret = mt76x0_usb_submit_buf(dev, USB_DIR_IN, MT_EP_IN_CMD_RESP,
+					     &dev->mcu.resp, GFP_KERNEL,
+					     mt76x0_complete_urb,
+					     &dev->mcu.resp_cmpl);
+		if (ret)
+			return ret;
+
+		if (urb_status)
+			dev_err(dev->mt76.dev, "Error: MCU resp urb failed:%d\n",
+				urb_status);
+
+		if (FIELD_GET(MT_RXD_CMD_INFO_CMD_SEQ, rxfce) == seq &&
+		    FIELD_GET(MT_RXD_CMD_INFO_EVT_TYPE, rxfce) == CMD_DONE)
+			return 0;
+
+		dev_err(dev->mt76.dev, "Error: MCU resp evt:%lx seq:%hhx-%lx!\n",
+			FIELD_GET(MT_RXD_CMD_INFO_EVT_TYPE, rxfce),
+			seq, FIELD_GET(MT_RXD_CMD_INFO_CMD_SEQ, rxfce));
+	}
+
+	dev_err(dev->mt76.dev, "Error: %s timed out\n", __func__);
+	return -ETIMEDOUT;
+}
+
+static int
+__mt76x0_mcu_msg_send(struct mt76x0_dev *dev, struct sk_buff *skb,
+		      enum mcu_cmd cmd, bool wait_resp)
+{
+	struct usb_device *usb_dev = mt76x0_to_usb_dev(dev);
+	unsigned cmd_pipe = usb_sndbulkpipe(usb_dev,
+					    dev->out_ep[MT_EP_OUT_INBAND_CMD]);
+	int sent, ret;
+	u8 seq = 0;
+
+	if (wait_resp)
+		while (!seq)
+			seq = ++dev->mcu.msg_seq & 0xf;
+
+	mt76x0_dma_skb_wrap_cmd(skb, seq, cmd);
+
+	if (dev->mcu.resp_cmpl.done)
+		dev_err(dev->mt76.dev, "Error: MCU response pre-completed!\n");
+
+	trace_mt76x0_mcu_msg_send_cs(&dev->mt76, skb, wait_resp);
+	trace_mt76x0_submit_urb_sync(&dev->mt76, cmd_pipe, skb->len);
+
+	ret = usb_bulk_msg(usb_dev, cmd_pipe, skb->data, skb->len, &sent, 500);
+	if (ret) {
+		dev_err(dev->mt76.dev, "Error: send MCU cmd failed:%d\n", ret);
+		goto out;
+	}
+	if (sent != skb->len)
+		dev_err(dev->mt76.dev, "Error: %s sent != skb->len\n", __func__);
+
+	if (wait_resp)
+		ret = mt76x0_mcu_wait_resp(dev, seq);
+
+out:
+	return ret;
+}
+
+static int
+mt76x0_mcu_msg_send(struct mt76x0_dev *dev, struct sk_buff *skb,
+		     enum mcu_cmd cmd, bool wait_resp)
+{
+	int ret;
+
+	if (test_bit(MT76_REMOVED, &dev->mt76.state))
+		return 0;
+
+	mutex_lock(&dev->mcu.mutex);
+	ret = __mt76x0_mcu_msg_send(dev, skb, cmd, wait_resp);
+	mutex_unlock(&dev->mcu.mutex);
+
+	consume_skb(skb);
+
+	return ret;
+}
+
+int mt76x0_mcu_function_select(struct mt76x0_dev *dev,
+			       enum mcu_function func, u32 val)
+{
+	struct sk_buff *skb;
+	struct {
+		__le32 id;
+		__le32 value;
+	} __packed __aligned(4) msg = {
+		.id = cpu_to_le32(func),
+		.value = cpu_to_le32(val),
+	};
+
+	skb = mt76x0_mcu_msg_alloc(dev, &msg, sizeof(msg));
+	if (!skb)
+		return -ENOMEM;
+	return mt76x0_mcu_msg_send(dev, skb, CMD_FUN_SET_OP, func == 5);
+}
+
+int
+mt76x0_mcu_calibrate(struct mt76x0_dev *dev, enum mcu_calibrate cal, u32 val)
+{
+	struct sk_buff *skb;
+	struct {
+		__le32 id;
+		__le32 value;
+	} __packed __aligned(4) msg = {
+		.id = cpu_to_le32(cal),
+		.value = cpu_to_le32(val),
+	};
+
+	skb = mt76x0_mcu_msg_alloc(dev, &msg, sizeof(msg));
+	if (!skb)
+		return -ENOMEM;
+	return mt76x0_mcu_msg_send(dev, skb, CMD_CALIBRATION_OP, true);
+}
+
+int mt76x0_write_reg_pairs(struct mt76x0_dev *dev, u32 base,
+			   const struct mt76_reg_pair *data, int n)
+{
+	const int max_vals_per_cmd = INBAND_PACKET_MAX_LEN / 8;
+	struct sk_buff *skb;
+	int cnt, i, ret;
+
+	if (!n)
+		return 0;
+
+	cnt = min(max_vals_per_cmd, n);
+
+	skb = alloc_skb(cnt * 8 + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
+	if (!skb)
+		return -ENOMEM;
+	skb_reserve(skb, MT_DMA_HDR_LEN);
+
+	for (i = 0; i < cnt; i++) {
+		skb_put_le32(skb, base + data[i].reg);
+		skb_put_le32(skb, data[i].value);
+	}
+
+	ret = mt76x0_mcu_msg_send(dev, skb, CMD_RANDOM_WRITE, cnt == n);
+	if (ret)
+		return ret;
+
+	return mt76x0_write_reg_pairs(dev, base, data + cnt, n - cnt);
+}
+
+int mt76x0_read_reg_pairs(struct mt76x0_dev *dev, u32 base,
+			  struct mt76_reg_pair *data, int n)
+{
+	const int max_vals_per_cmd = INBAND_PACKET_MAX_LEN / 8;
+	struct sk_buff *skb;
+	int cnt, i, ret;
+
+	if (!n)
+		return 0;
+
+	cnt = min(max_vals_per_cmd, n);
+	if (cnt != n)
+		return -EINVAL;
+
+	skb = alloc_skb(cnt * 8 + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
+	if (!skb)
+		return -ENOMEM;
+	skb_reserve(skb, MT_DMA_HDR_LEN);
+
+	for (i = 0; i < cnt; i++) {
+		skb_put_le32(skb, base + data[i].reg);
+		skb_put_le32(skb, data[i].value);
+	}
+
+	mutex_lock(&dev->mcu.mutex);
+
+	dev->mcu.reg_pairs = data;
+	dev->mcu.reg_pairs_len = n;
+	dev->mcu.reg_base = base;
+	dev->mcu.burst_read = false;
+
+	ret = __mt76x0_mcu_msg_send(dev, skb, CMD_RANDOM_READ, true);
+
+	dev->mcu.reg_pairs = NULL;
+
+	mutex_unlock(&dev->mcu.mutex);
+
+	consume_skb(skb);
+
+	return ret;
+
+}
+
+int mt76x0_burst_write_regs(struct mt76x0_dev *dev, u32 offset,
+			     const u32 *data, int n)
+{
+	const int max_regs_per_cmd = INBAND_PACKET_MAX_LEN / 4 - 1;
+	struct sk_buff *skb;
+	int cnt, i, ret;
+
+	if (!n)
+		return 0;
+
+	cnt = min(max_regs_per_cmd, n);
+
+	skb = alloc_skb(cnt * 4 + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
+	if (!skb)
+		return -ENOMEM;
+	skb_reserve(skb, MT_DMA_HDR_LEN);
+
+	skb_put_le32(skb, MT_MCU_MEMMAP_WLAN + offset);
+	for (i = 0; i < cnt; i++)
+		skb_put_le32(skb, data[i]);
+
+	ret = mt76x0_mcu_msg_send(dev, skb, CMD_BURST_WRITE, cnt == n);
+	if (ret)
+		return ret;
+
+	return mt76x0_burst_write_regs(dev, offset + cnt * 4,
+					data + cnt, n - cnt);
+}
+
+#if 0
+static int mt76x0_burst_read_regs(struct mt76x0_dev *dev, u32 base,
+				  struct mt76_reg_pair *data, int n)
+{
+	const int max_vals_per_cmd = INBAND_PACKET_MAX_LEN / 4 - 1;
+	struct sk_buff *skb;
+	int cnt, ret;
+
+	if (!n)
+		return 0;
+
+	cnt = min(max_vals_per_cmd, n);
+	if (cnt != n)
+		return -EINVAL;
+
+	skb = alloc_skb(cnt * 4 + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
+	if (!skb)
+		return -ENOMEM;
+	skb_reserve(skb, MT_DMA_HDR_LEN);
+
+	skb_put_le32(skb, base + data[0].reg);
+	skb_put_le32(skb, n);
+
+	mutex_lock(&dev->mcu.mutex);
+
+	dev->mcu.reg_pairs = data;
+	dev->mcu.reg_pairs_len = n;
+	dev->mcu.reg_base = base;
+	dev->mcu.burst_read = true;
+
+	ret = __mt76x0_mcu_msg_send(dev, skb, CMD_BURST_READ, true);
+
+	dev->mcu.reg_pairs = NULL;
+
+	mutex_unlock(&dev->mcu.mutex);
+
+	consume_skb(skb);
+
+	return ret;
+}
+#endif
+
+struct mt76_fw_header {
+	__le32 ilm_len;
+	__le32 dlm_len;
+	__le16 build_ver;
+	__le16 fw_ver;
+	u8 pad[4];
+	char build_time[16];
+};
+
+struct mt76_fw {
+	struct mt76_fw_header hdr;
+	u8 ivb[MT_MCU_IVB_SIZE];
+	u8 ilm[];
+};
+
+static int __mt76x0_dma_fw(struct mt76x0_dev *dev,
+			    const struct mt76x0_dma_buf *dma_buf,
+			    const void *data, u32 len, u32 dst_addr)
+{
+	DECLARE_COMPLETION_ONSTACK(cmpl);
+	struct mt76x0_dma_buf buf = *dma_buf; /* we need to fake length */
+	__le32 reg;
+	u32 val;
+	int ret;
+
+	reg = cpu_to_le32(FIELD_PREP(MT_TXD_INFO_TYPE, DMA_COMMAND) |
+			  FIELD_PREP(MT_TXD_INFO_D_PORT, CPU_TX_PORT) |
+			  FIELD_PREP(MT_TXD_INFO_LEN, len));
+	memcpy(buf.buf, &reg, sizeof(reg));
+	memcpy(buf.buf + sizeof(reg), data, len);
+	memset(buf.buf + sizeof(reg) + len, 0, 8);
+
+	ret = mt76x0_vendor_single_wr(dev, MT_VEND_WRITE_FCE,
+				       MT_FCE_DMA_ADDR, dst_addr);
+	if (ret)
+		return ret;
+	len = roundup(len, 4);
+	ret = mt76x0_vendor_single_wr(dev, MT_VEND_WRITE_FCE,
+				       MT_FCE_DMA_LEN, len << 16);
+	if (ret)
+		return ret;
+
+	buf.len = MT_DMA_HDR_LEN + len + 4;
+	ret = mt76x0_usb_submit_buf(dev, USB_DIR_OUT, MT_EP_OUT_INBAND_CMD,
+				     &buf, GFP_KERNEL,
+				     mt76x0_complete_urb, &cmpl);
+	if (ret)
+		return ret;
+
+	if (!wait_for_completion_timeout(&cmpl, msecs_to_jiffies(1000))) {
+		dev_err(dev->mt76.dev, "Error: firmware upload timed out\n");
+		usb_kill_urb(buf.urb);
+		return -ETIMEDOUT;
+	}
+	if (mt76x0_urb_has_error(buf.urb)) {
+		dev_err(dev->mt76.dev, "Error: firmware upload urb failed:%d\n",
+			buf.urb->status);
+		return buf.urb->status;
+	}
+
+	val = mt76_rr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX);
+	val++;
+	mt76_wr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX, val);
+
+	msleep(5);
+
+	return 0;
+}
+
+static int
+mt76x0_dma_fw(struct mt76x0_dev *dev, struct mt76x0_dma_buf *dma_buf,
+	       const void *data, int len, u32 dst_addr)
+{
+	int n, ret;
+
+	if (len == 0)
+		return 0;
+
+	n = min(MCU_FW_URB_MAX_PAYLOAD, len);
+	ret = __mt76x0_dma_fw(dev, dma_buf, data, n, dst_addr);
+	if (ret)
+		return ret;
+
+#if 0
+	if (!mt76_poll_msec(dev, MT_MCU_COM_REG1, BIT(31), BIT(31), 500))
+		return -ETIMEDOUT;
+#endif
+
+	return mt76x0_dma_fw(dev, dma_buf, data + n, len - n, dst_addr + n);
+}
+
+static int
+mt76x0_upload_firmware(struct mt76x0_dev *dev, const struct mt76_fw *fw)
+{
+	struct mt76x0_dma_buf dma_buf;
+	void *ivb;
+	u32 ilm_len, dlm_len;
+	int i, ret;
+
+	ivb = kmemdup(fw->ivb, sizeof(fw->ivb), GFP_KERNEL);
+	if (!ivb)
+		return -ENOMEM;
+	if (mt76x0_usb_alloc_buf(dev, MCU_FW_URB_SIZE, &dma_buf)) {
+		ret = -ENOMEM;
+		goto error;
+	}
+
+	ilm_len = le32_to_cpu(fw->hdr.ilm_len) - sizeof(fw->ivb);
+	dev_dbg(dev->mt76.dev, "loading FW - ILM %u + IVB %zu\n",
+		ilm_len, sizeof(fw->ivb));
+	ret = mt76x0_dma_fw(dev, &dma_buf, fw->ilm, ilm_len, sizeof(fw->ivb));
+	if (ret)
+		goto error;
+
+	dlm_len = le32_to_cpu(fw->hdr.dlm_len);
+	dev_dbg(dev->mt76.dev, "loading FW - DLM %u\n", dlm_len);
+	ret = mt76x0_dma_fw(dev, &dma_buf, fw->ilm + ilm_len,
+			     dlm_len, MT_MCU_DLM_OFFSET);
+	if (ret)
+		goto error;
+
+	ret = mt76x0_vendor_request(dev, MT_VEND_DEV_MODE, USB_DIR_OUT,
+				     0x12, 0, ivb, sizeof(fw->ivb));
+	if (ret < 0)
+		goto error;
+	ret = 0;
+
+	for (i = 100; i && !firmware_running(dev); i--)
+		msleep(10);
+	if (!i) {
+		ret = -ETIMEDOUT;
+		goto error;
+	}
+
+	dev_dbg(dev->mt76.dev, "Firmware running!\n");
+error:
+	kfree(ivb);
+	mt76x0_usb_free_buf(dev, &dma_buf);
+
+	return ret;
+}
+
+static int mt76x0_load_firmware(struct mt76x0_dev *dev)
+{
+	const struct firmware *fw;
+	const struct mt76_fw_header *hdr;
+	int len, ret;
+	u32 val;
+
+	mt76_wr(dev, MT_USB_DMA_CFG, (MT_USB_DMA_CFG_RX_BULK_EN |
+					 MT_USB_DMA_CFG_TX_BULK_EN));
+
+	if (firmware_running(dev))
+		return 0;
+
+	ret = request_firmware(&fw, MT7610_FIRMWARE, dev->mt76.dev);
+	if (ret)
+		return ret;
+
+	if (!fw || !fw->data || fw->size < sizeof(*hdr))
+		goto err_inv_fw;
+
+	hdr = (const struct mt76_fw_header *) fw->data;
+
+	if (le32_to_cpu(hdr->ilm_len) <= MT_MCU_IVB_SIZE)
+		goto err_inv_fw;
+
+	len = sizeof(*hdr);
+	len += le32_to_cpu(hdr->ilm_len);
+	len += le32_to_cpu(hdr->dlm_len);
+
+	if (fw->size != len)
+		goto err_inv_fw;
+
+	val = le16_to_cpu(hdr->fw_ver);
+	dev_dbg(dev->mt76.dev,
+		 "Firmware Version: %d.%d.%02d Build: %x Build time: %.16s\n",
+		 (val >> 12) & 0xf, (val >> 8) & 0xf, val & 0xf,
+		 le16_to_cpu(hdr->build_ver), hdr->build_time);
+
+	len = le32_to_cpu(hdr->ilm_len);
+
+	mt76_wr(dev, 0x1004, 0x2c);
+
+	mt76_set(dev, MT_USB_DMA_CFG, (MT_USB_DMA_CFG_RX_BULK_EN |
+				       MT_USB_DMA_CFG_TX_BULK_EN) |
+				       FIELD_PREP(MT_USB_DMA_CFG_RX_BULK_AGG_TOUT, 0x20));
+	mt76x0_vendor_reset(dev);
+	msleep(5);
+/*
+	mt76x0_rmw(dev, MT_PBF_CFG, 0, (MT_PBF_CFG_TX0Q_EN |
+					 MT_PBF_CFG_TX1Q_EN |
+					 MT_PBF_CFG_TX2Q_EN |
+					 MT_PBF_CFG_TX3Q_EN));
+*/
+
+	mt76_wr(dev, MT_FCE_PSE_CTRL, 1);
+
+	/* FCE tx_fs_base_ptr */
+	mt76_wr(dev, MT_TX_CPU_FROM_FCE_BASE_PTR, 0x400230);
+	/* FCE tx_fs_max_cnt */
+	mt76_wr(dev, MT_TX_CPU_FROM_FCE_MAX_COUNT, 1);
+	/* FCE pdma enable */
+	mt76_wr(dev, MT_FCE_PDMA_GLOBAL_CONF, 0x44);
+	/* FCE skip_fs_en */
+	mt76_wr(dev, MT_FCE_SKIP_FS, 3);
+
+	val = mt76_rr(dev, MT_USB_DMA_CFG);
+	val |= MT_USB_DMA_CFG_TX_WL_DROP;
+	mt76_wr(dev, MT_USB_DMA_CFG, val);
+	val &= ~MT_USB_DMA_CFG_TX_WL_DROP;
+	mt76_wr(dev, MT_USB_DMA_CFG, val);
+
+	ret = mt76x0_upload_firmware(dev, (const struct mt76_fw *)fw->data);
+	release_firmware(fw);
+
+	mt76_wr(dev, MT_FCE_PSE_CTRL, 1);
+
+	return ret;
+
+err_inv_fw:
+	dev_err(dev->mt76.dev, "Invalid firmware image\n");
+	release_firmware(fw);
+	return -ENOENT;
+}
+
+int mt76x0_mcu_init(struct mt76x0_dev *dev)
+{
+	int ret;
+
+	mutex_init(&dev->mcu.mutex);
+
+	ret = mt76x0_load_firmware(dev);
+	if (ret)
+		return ret;
+
+	set_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state);
+
+	return 0;
+}
+
+int mt76x0_mcu_cmd_init(struct mt76x0_dev *dev)
+{
+	int ret;
+
+	ret = mt76x0_mcu_function_select(dev, Q_SELECT, 1);
+	if (ret)
+		return ret;
+
+	init_completion(&dev->mcu.resp_cmpl);
+	if (mt76x0_usb_alloc_buf(dev, MCU_RESP_URB_SIZE, &dev->mcu.resp)) {
+		mt76x0_usb_free_buf(dev, &dev->mcu.resp);
+		return -ENOMEM;
+	}
+
+	ret = mt76x0_usb_submit_buf(dev, USB_DIR_IN, MT_EP_IN_CMD_RESP,
+				     &dev->mcu.resp, GFP_KERNEL,
+				     mt76x0_complete_urb, &dev->mcu.resp_cmpl);
+	if (ret) {
+		mt76x0_usb_free_buf(dev, &dev->mcu.resp);
+		return ret;
+	}
+
+	return 0;
+}
+
+void mt76x0_mcu_cmd_deinit(struct mt76x0_dev *dev)
+{
+	usb_kill_urb(dev->mcu.resp.urb);
+	mt76x0_usb_free_buf(dev, &dev->mcu.resp);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mcu.h b/drivers/net/wireless/mediatek/mt76/mt76x0/mcu.h
new file mode 100644
index 0000000000000000000000000000000000000000..8c2f77f4c3f577daf989e1392571518554fa05fe
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/mcu.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT76X0U_MCU_H
+#define __MT76X0U_MCU_H
+
+struct mt76x0_dev;
+
+/* Register definitions */
+#define MT_MCU_RESET_CTL		0x070C
+#define MT_MCU_INT_LEVEL		0x0718
+#define MT_MCU_COM_REG0			0x0730
+#define MT_MCU_COM_REG1			0x0734
+#define MT_MCU_COM_REG2			0x0738
+#define MT_MCU_COM_REG3			0x073C
+
+#define MT_MCU_IVB_SIZE			0x40
+#define MT_MCU_DLM_OFFSET		0x80000
+
+#define MT_MCU_MEMMAP_WLAN		0x00410000
+/* We use same space for BBP as for MAC regs
+ * #define MT_MCU_MEMMAP_BBP		0x40000000
+ */
+#define MT_MCU_MEMMAP_RF		0x80000000
+
+#define INBAND_PACKET_MAX_LEN		192
+
+enum mcu_cmd {
+	CMD_FUN_SET_OP = 1,
+	CMD_LOAD_CR = 2,
+	CMD_INIT_GAIN_OP = 3,
+	CMD_DYNC_VGA_OP = 6,
+	CMD_TDLS_CH_SW = 7,
+	CMD_BURST_WRITE = 8,
+	CMD_READ_MODIFY_WRITE = 9,
+	CMD_RANDOM_READ = 10,
+	CMD_BURST_READ = 11,
+	CMD_RANDOM_WRITE = 12,
+	CMD_LED_MODE_OP = 16,
+	CMD_POWER_SAVING_OP = 20,
+	CMD_WOW_CONFIG = 21,
+	CMD_WOW_QUERY = 22,
+	CMD_WOW_FEATURE = 24,
+	CMD_CARRIER_DETECT_OP = 28,
+	CMD_RADOR_DETECT_OP = 29,
+	CMD_SWITCH_CHANNEL_OP = 30,
+	CMD_CALIBRATION_OP = 31,
+	CMD_BEACON_OP = 32,
+	CMD_ANTENNA_OP = 33,
+};
+
+enum mcu_function {
+	Q_SELECT = 1,
+	BW_SETTING = 2,
+	ATOMIC_TSSI_SETTING = 5,
+};
+
+enum mcu_power_mode {
+	RADIO_OFF = 0x30,
+	RADIO_ON = 0x31,
+	RADIO_OFF_AUTO_WAKEUP = 0x32,
+	RADIO_OFF_ADVANCE = 0x33,
+	RADIO_ON_ADVANCE = 0x34,
+};
+
+enum mcu_calibrate {
+	MCU_CAL_R = 1,
+	MCU_CAL_RXDCOC,
+	MCU_CAL_LC,
+	MCU_CAL_LOFT,
+	MCU_CAL_TXIQ,
+	MCU_CAL_BW,
+	MCU_CAL_DPD,
+	MCU_CAL_RXIQ,
+	MCU_CAL_TXDCOC,
+	MCU_CAL_RX_GROUP_DELAY,
+	MCU_CAL_TX_GROUP_DELAY,
+};
+
+int mt76x0_mcu_init(struct mt76x0_dev *dev);
+int mt76x0_mcu_cmd_init(struct mt76x0_dev *dev);
+void mt76x0_mcu_cmd_deinit(struct mt76x0_dev *dev);
+
+int
+mt76x0_mcu_calibrate(struct mt76x0_dev *dev, enum mcu_calibrate cal, u32 val);
+
+int
+mt76x0_mcu_function_select(struct mt76x0_dev *dev, enum mcu_function func, u32 val);
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h b/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h
new file mode 100644
index 0000000000000000000000000000000000000000..fc9857f61771ccbc9d712c94402c455f498b8853
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h
@@ -0,0 +1,330 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MT76X0U_H
+#define MT76X0U_H
+
+#include <linux/bitfield.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/usb.h>
+#include <linux/completion.h>
+#include <net/mac80211.h>
+#include <linux/debugfs.h>
+
+#include "../mt76.h"
+#include "regs.h"
+
+#define MT_CALIBRATE_INTERVAL		(4 * HZ)
+
+#define MT_FREQ_CAL_INIT_DELAY		(30 * HZ)
+#define MT_FREQ_CAL_CHECK_INTERVAL	(10 * HZ)
+#define MT_FREQ_CAL_ADJ_INTERVAL	(HZ / 2)
+
+#define MT_BBP_REG_VERSION		0x00
+
+#define MT_USB_AGGR_SIZE_LIMIT		21 /* * 1024B */
+#define MT_USB_AGGR_TIMEOUT		0x80 /* * 33ns */
+#define MT_RX_ORDER			3
+#define MT_RX_URB_SIZE			(PAGE_SIZE << MT_RX_ORDER)
+
+struct mt76x0_dma_buf {
+	struct urb *urb;
+	void *buf;
+	dma_addr_t dma;
+	size_t len;
+};
+
+struct mt76x0_mcu {
+	struct mutex mutex;
+
+	u8 msg_seq;
+
+	struct mt76x0_dma_buf resp;
+	struct completion resp_cmpl;
+
+	struct mt76_reg_pair *reg_pairs;
+	unsigned int reg_pairs_len;
+	u32 reg_base;
+	bool burst_read;
+};
+
+struct mac_stats {
+	u64 rx_stat[6];
+	u64 tx_stat[6];
+	u64 aggr_stat[2];
+	u64 aggr_n[32];
+	u64 zero_len_del[2];
+};
+
+#define N_RX_ENTRIES	16
+struct mt76x0_rx_queue {
+	struct mt76x0_dev *dev;
+
+	struct mt76x0_dma_buf_rx {
+		struct urb *urb;
+		struct page *p;
+	} e[N_RX_ENTRIES];
+
+	unsigned int start;
+	unsigned int end;
+	unsigned int entries;
+	unsigned int pending;
+};
+
+#define N_TX_ENTRIES	64
+
+struct mt76x0_tx_queue {
+	struct mt76x0_dev *dev;
+
+	struct mt76x0_dma_buf_tx {
+		struct urb *urb;
+		struct sk_buff *skb;
+	} e[N_TX_ENTRIES];
+
+	unsigned int start;
+	unsigned int end;
+	unsigned int entries;
+	unsigned int used;
+	unsigned int fifo_seq;
+};
+
+/* WCID allocation:
+ *     0: mcast wcid
+ *     1: bssid wcid
+ *  1...: STAs
+ * ...7e: group wcids
+ *    7f: reserved
+ */
+#define N_WCIDS		128
+#define GROUP_WCID(idx)	(254 - idx)
+
+struct mt76x0_eeprom_params;
+
+#define MT_EE_TEMPERATURE_SLOPE		39
+#define MT_FREQ_OFFSET_INVALID		-128
+
+/* addr req mask */
+#define MT_VEND_TYPE_EEPROM	BIT(31)
+#define MT_VEND_TYPE_CFG	BIT(30)
+#define MT_VEND_TYPE_MASK	(MT_VEND_TYPE_EEPROM | MT_VEND_TYPE_CFG)
+
+#define MT_VEND_ADDR(type, n)	(MT_VEND_TYPE_##type | (n))
+
+enum mt_bw {
+	MT_BW_20,
+	MT_BW_40,
+};
+
+/**
+ * struct mt76x0_dev - adapter structure
+ * @lock:		protects @wcid->tx_rate.
+ * @mac_lock:		locks out mac80211's tx status and rx paths.
+ * @tx_lock:		protects @tx_q and changes of MT76_STATE_*_STATS
+ *			flags in @state.
+ * @rx_lock:		protects @rx_q.
+ * @con_mon_lock:	protects @ap_bssid, @bcn_*, @avg_rssi.
+ * @mutex:		ensures exclusive access from mac80211 callbacks.
+ * @reg_atomic_mutex:	ensures atomicity of indirect register accesses
+ *			(accesses to RF and BBP).
+ * @hw_atomic_mutex:	ensures exclusive access to HW during critical
+ *			operations (power management, channel switch).
+ */
+struct mt76x0_dev {
+	struct mt76_dev mt76; /* must be first */
+
+	struct mutex mutex;
+
+	struct mutex usb_ctrl_mtx;
+	u8 data[32];
+
+	struct tasklet_struct rx_tasklet;
+	struct tasklet_struct tx_tasklet;
+
+	u8 out_ep[__MT_EP_OUT_MAX];
+	u16 out_max_packet;
+	u8 in_ep[__MT_EP_IN_MAX];
+	u16 in_max_packet;
+
+	unsigned long wcid_mask[DIV_ROUND_UP(N_WCIDS, BITS_PER_LONG)];
+	unsigned long vif_mask;
+
+	struct mt76x0_mcu mcu;
+
+	struct delayed_work cal_work;
+	struct delayed_work mac_work;
+
+	struct workqueue_struct *stat_wq;
+	struct delayed_work stat_work;
+
+	struct mt76_wcid *mon_wcid;
+	struct mt76_wcid __rcu *wcid[N_WCIDS];
+
+	spinlock_t mac_lock;
+
+	const u16 *beacon_offsets;
+
+	u8 macaddr[ETH_ALEN];
+	struct mt76x0_eeprom_params *ee;
+
+	struct mutex reg_atomic_mutex;
+	struct mutex hw_atomic_mutex;
+
+	u32 rxfilter;
+	u32 debugfs_reg;
+
+	/* TX */
+	spinlock_t tx_lock;
+	struct mt76x0_tx_queue *tx_q;
+	struct sk_buff_head tx_skb_done;
+
+	atomic_t avg_ampdu_len;
+
+	/* RX */
+	spinlock_t rx_lock;
+	struct mt76x0_rx_queue rx_q;
+
+	/* Connection monitoring things */
+	spinlock_t con_mon_lock;
+	u8 ap_bssid[ETH_ALEN];
+
+	s8 bcn_freq_off;
+	u8 bcn_phy_mode;
+
+	int avg_rssi; /* starts at 0 and converges */
+
+	u8 agc_save;
+	u16 chainmask;
+
+	struct mac_stats stats;
+};
+
+struct mt76x0_wcid {
+	u8 idx;
+	u8 hw_key_idx;
+
+	u16 tx_rate;
+	bool tx_rate_set;
+	u8 tx_rate_nss;
+};
+
+struct mt76_vif {
+	u8 idx;
+
+	struct mt76_wcid group_wcid;
+};
+
+struct mt76_tx_status {
+	u8 valid:1;
+	u8 success:1;
+	u8 aggr:1;
+	u8 ack_req:1;
+	u8 is_probe:1;
+	u8 wcid;
+	u8 pktid;
+	u8 retry;
+	u16 rate;
+} __packed __aligned(2);
+
+struct mt76_sta {
+	struct mt76_wcid wcid;
+	struct mt76_tx_status status;
+	int n_frames;
+	u16 agg_ssn[IEEE80211_NUM_TIDS];
+};
+
+struct mt76_reg_pair {
+	u32 reg;
+	u32 value;
+};
+
+struct mt76x0_rxwi;
+
+extern const struct ieee80211_ops mt76x0_ops;
+
+static inline bool is_mt7610e(struct mt76x0_dev *dev)
+{
+	/* TODO */
+	return false;
+}
+
+void mt76x0_init_debugfs(struct mt76x0_dev *dev);
+
+int mt76x0_wait_asic_ready(struct mt76x0_dev *dev);
+
+/* Compatibility with mt76 */
+#define mt76_rmw_field(_dev, _reg, _field, _val)	\
+	mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val))
+
+int mt76x0_write_reg_pairs(struct mt76x0_dev *dev, u32 base,
+			    const struct mt76_reg_pair *data, int len);
+int mt76x0_read_reg_pairs(struct mt76x0_dev *dev, u32 base,
+			  struct mt76_reg_pair *data, int len);
+int mt76x0_burst_write_regs(struct mt76x0_dev *dev, u32 offset,
+			     const u32 *data, int n);
+void mt76x0_addr_wr(struct mt76x0_dev *dev, const u32 offset, const u8 *addr);
+
+/* Init */
+struct mt76x0_dev *mt76x0_alloc_device(struct device *dev);
+int mt76x0_init_hardware(struct mt76x0_dev *dev);
+int mt76x0_register_device(struct mt76x0_dev *dev);
+void mt76x0_cleanup(struct mt76x0_dev *dev);
+void mt76x0_chip_onoff(struct mt76x0_dev *dev, bool enable, bool reset);
+
+int mt76x0_mac_start(struct mt76x0_dev *dev);
+void mt76x0_mac_stop(struct mt76x0_dev *dev);
+
+/* PHY */
+void mt76x0_phy_init(struct mt76x0_dev *dev);
+int mt76x0_wait_bbp_ready(struct mt76x0_dev *dev);
+void mt76x0_agc_save(struct mt76x0_dev *dev);
+void mt76x0_agc_restore(struct mt76x0_dev *dev);
+int mt76x0_phy_set_channel(struct mt76x0_dev *dev,
+			    struct cfg80211_chan_def *chandef);
+void mt76x0_phy_recalibrate_after_assoc(struct mt76x0_dev *dev);
+int mt76x0_phy_get_rssi(struct mt76x0_dev *dev, struct mt76x0_rxwi *rxwi);
+void mt76x0_phy_con_cal_onoff(struct mt76x0_dev *dev,
+			       struct ieee80211_bss_conf *info);
+
+/* MAC */
+void mt76x0_mac_work(struct work_struct *work);
+void mt76x0_mac_set_protection(struct mt76x0_dev *dev, bool legacy_prot,
+				int ht_mode);
+void mt76x0_mac_set_short_preamble(struct mt76x0_dev *dev, bool short_preamb);
+void mt76x0_mac_config_tsf(struct mt76x0_dev *dev, bool enable, int interval);
+void
+mt76x0_mac_wcid_setup(struct mt76x0_dev *dev, u8 idx, u8 vif_idx, u8 *mac);
+void mt76x0_mac_set_ampdu_factor(struct mt76x0_dev *dev);
+
+/* TX */
+void mt76x0_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
+		struct sk_buff *skb);
+int mt76x0_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+		    u16 queue, const struct ieee80211_tx_queue_params *params);
+void mt76x0_tx_status(struct mt76x0_dev *dev, struct sk_buff *skb);
+void mt76x0_tx_stat(struct work_struct *work);
+
+/* util */
+void mt76x0_remove_hdr_pad(struct sk_buff *skb);
+int mt76x0_insert_hdr_pad(struct sk_buff *skb);
+
+int mt76x0_dma_init(struct mt76x0_dev *dev);
+void mt76x0_dma_cleanup(struct mt76x0_dev *dev);
+
+int mt76x0_dma_enqueue_tx(struct mt76x0_dev *dev, struct sk_buff *skb,
+			   struct mt76_wcid *wcid, int hw_q);
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
new file mode 100644
index 0000000000000000000000000000000000000000..5da7bfbe907ff65c8ac3b871a468ee4e287bba5c
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
@@ -0,0 +1,1008 @@
+/*
+ * (c) Copyright 2002-2010, Ralink Technology, Inc.
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "mt76x0.h"
+#include "mcu.h"
+#include "eeprom.h"
+#include "trace.h"
+#include "phy.h"
+#include "initvals.h"
+#include "initvals_phy.h"
+
+#include <linux/etherdevice.h>
+
+static int
+mt76x0_rf_csr_wr(struct mt76x0_dev *dev, u32 offset, u8 value)
+{
+	int ret = 0;
+	u8 bank, reg;
+
+	if (test_bit(MT76_REMOVED, &dev->mt76.state))
+		return -ENODEV;
+
+	bank = MT_RF_BANK(offset);
+	reg = MT_RF_REG(offset);
+
+	if (WARN_ON_ONCE(reg > 64) || WARN_ON_ONCE(bank) > 8)
+		return -EINVAL;
+
+	mutex_lock(&dev->reg_atomic_mutex);
+
+	if (!mt76_poll(dev, MT_RF_CSR_CFG, MT_RF_CSR_CFG_KICK, 0, 100)) {
+		ret = -ETIMEDOUT;
+		goto out;
+	}
+
+	mt76_wr(dev, MT_RF_CSR_CFG,
+		   FIELD_PREP(MT_RF_CSR_CFG_DATA, value) |
+		   FIELD_PREP(MT_RF_CSR_CFG_REG_BANK, bank) |
+		   FIELD_PREP(MT_RF_CSR_CFG_REG_ID, reg) |
+		   MT_RF_CSR_CFG_WR |
+		   MT_RF_CSR_CFG_KICK);
+	trace_mt76x0_rf_write(&dev->mt76, bank, offset, value);
+out:
+	mutex_unlock(&dev->reg_atomic_mutex);
+
+	if (ret < 0)
+		dev_err(dev->mt76.dev, "Error: RF write %d:%d failed:%d!!\n",
+			bank, reg, ret);
+
+	return ret;
+}
+
+static int
+mt76x0_rf_csr_rr(struct mt76x0_dev *dev, u32 offset)
+{
+	int ret = -ETIMEDOUT;
+	u32 val;
+	u8 bank, reg;
+
+	if (test_bit(MT76_REMOVED, &dev->mt76.state))
+		return -ENODEV;
+
+	bank = MT_RF_BANK(offset);
+	reg = MT_RF_REG(offset);
+
+	if (WARN_ON_ONCE(reg > 64) || WARN_ON_ONCE(bank) > 8)
+		return -EINVAL;
+
+	mutex_lock(&dev->reg_atomic_mutex);
+
+	if (!mt76_poll(dev, MT_RF_CSR_CFG, MT_RF_CSR_CFG_KICK, 0, 100))
+		goto out;
+
+	mt76_wr(dev, MT_RF_CSR_CFG,
+		   FIELD_PREP(MT_RF_CSR_CFG_REG_BANK, bank) |
+		   FIELD_PREP(MT_RF_CSR_CFG_REG_ID, reg) |
+		   MT_RF_CSR_CFG_KICK);
+
+	if (!mt76_poll(dev, MT_RF_CSR_CFG, MT_RF_CSR_CFG_KICK, 0, 100))
+		goto out;
+
+	val = mt76_rr(dev, MT_RF_CSR_CFG);
+	if (FIELD_GET(MT_RF_CSR_CFG_REG_ID, val) == reg &&
+	    FIELD_GET(MT_RF_CSR_CFG_REG_BANK, val) == bank) {
+		ret = FIELD_GET(MT_RF_CSR_CFG_DATA, val);
+		trace_mt76x0_rf_read(&dev->mt76, bank, offset, ret);
+	}
+out:
+	mutex_unlock(&dev->reg_atomic_mutex);
+
+	if (ret < 0)
+		dev_err(dev->mt76.dev, "Error: RF read %d:%d failed:%d!!\n",
+			bank, reg, ret);
+
+	return ret;
+}
+
+static int
+rf_wr(struct mt76x0_dev *dev, u32 offset, u8 val)
+{
+	if (test_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state)) {
+		struct mt76_reg_pair pair = {
+			.reg = offset,
+			.value = val,
+		};
+
+		return mt76x0_write_reg_pairs(dev, MT_MCU_MEMMAP_RF, &pair, 1);
+	} else {
+		WARN_ON_ONCE(1);
+		return mt76x0_rf_csr_wr(dev, offset, val);
+	}
+}
+
+static int
+rf_rr(struct mt76x0_dev *dev, u32 offset)
+{
+	int ret;
+	u32 val;
+
+	if (test_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state)) {
+		struct mt76_reg_pair pair = {
+			.reg = offset,
+		};
+
+		ret = mt76x0_read_reg_pairs(dev, MT_MCU_MEMMAP_RF, &pair, 1);
+		val = pair.value;
+	} else {
+		WARN_ON_ONCE(1);
+		ret = val = mt76x0_rf_csr_rr(dev, offset);
+	}
+
+	return (ret < 0) ? ret : val;
+}
+
+static int
+rf_rmw(struct mt76x0_dev *dev, u32 offset, u8 mask, u8 val)
+{
+	int ret;
+
+	ret = rf_rr(dev, offset);
+	if (ret < 0)
+		return ret;
+	val |= ret & ~mask;
+	ret = rf_wr(dev, offset, val);
+	if (ret)
+		return ret;
+
+	return val;
+}
+
+static int
+rf_set(struct mt76x0_dev *dev, u32 offset, u8 val)
+{
+	return rf_rmw(dev, offset, 0, val);
+}
+
+#if 0
+static int
+rf_clear(struct mt76x0_dev *dev, u32 offset, u8 mask)
+{
+	return rf_rmw(dev, offset, mask, 0);
+}
+#endif
+
+#define RF_RANDOM_WRITE(dev, tab) \
+	mt76x0_write_reg_pairs(dev, MT_MCU_MEMMAP_RF, tab, ARRAY_SIZE(tab));
+
+int mt76x0_wait_bbp_ready(struct mt76x0_dev *dev)
+{
+	int i = 20;
+	u32 val;
+
+	do {
+		val = mt76_rr(dev, MT_BBP(CORE, 0));
+		printk("BBP version %08x\n", val);
+		if (val && ~val)
+			break;
+	} while (--i);
+
+	if (!i) {
+		dev_err(dev->mt76.dev, "Error: BBP is not ready\n");
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static void
+mt76x0_bbp_set_ctrlch(struct mt76x0_dev *dev, enum nl80211_chan_width width,
+		      u8 ctrl)
+{
+	int core_val, agc_val;
+
+	switch (width) {
+	case NL80211_CHAN_WIDTH_80:
+		core_val = 3;
+		agc_val = 7;
+		break;
+	case NL80211_CHAN_WIDTH_40:
+		core_val = 2;
+		agc_val = 3;
+		break;
+	default:
+		core_val = 0;
+		agc_val = 1;
+		break;
+	}
+
+	mt76_rmw_field(dev, MT_BBP(CORE, 1), MT_BBP_CORE_R1_BW, core_val);
+	mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_BW, agc_val);
+	mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_CTRL_CHAN, ctrl);
+	mt76_rmw_field(dev, MT_BBP(TXBE, 0), MT_BBP_TXBE_R0_CTRL_CHAN, ctrl);
+}
+
+int mt76x0_phy_get_rssi(struct mt76x0_dev *dev, struct mt76x0_rxwi *rxwi)
+{
+	s8 lna_gain, rssi_offset;
+	int val;
+
+	if (dev->mt76.chandef.chan->band == NL80211_BAND_2GHZ) {
+		lna_gain = dev->ee->lna_gain_2ghz;
+		rssi_offset = dev->ee->rssi_offset_2ghz[0];
+	} else {
+		lna_gain = dev->ee->lna_gain_5ghz[0];
+		rssi_offset = dev->ee->rssi_offset_5ghz[0];
+	}
+
+	val = rxwi->rssi[0] + rssi_offset - lna_gain;
+
+	return val;
+}
+
+static void mt76x0_vco_cal(struct mt76x0_dev *dev, u8 channel)
+{
+	u8 val;
+
+	val = rf_rr(dev, MT_RF(0, 4));
+	if ((val & 0x70) != 0x30)
+		return;
+
+	/*
+	 * Calibration Mode - Open loop, closed loop, and amplitude:
+	 * B0.R06.[0]: 1
+	 * B0.R06.[3:1] bp_close_code: 100
+	 * B0.R05.[7:0] bp_open_code: 0x0
+	 * B0.R04.[2:0] cal_bits: 000
+	 * B0.R03.[2:0] startup_time: 011
+	 * B0.R03.[6:4] settle_time:
+	 *  80MHz channel: 110
+	 *  40MHz channel: 101
+	 *  20MHz channel: 100
+	 */
+	val = rf_rr(dev, MT_RF(0, 6));
+	val &= ~0xf;
+	val |= 0x09;
+	rf_wr(dev, MT_RF(0, 6), val);
+
+	val = rf_rr(dev, MT_RF(0, 5));
+	if (val != 0)
+		rf_wr(dev, MT_RF(0, 5), 0x0);
+
+	val = rf_rr(dev, MT_RF(0, 4));
+	val &= ~0x07;
+	rf_wr(dev, MT_RF(0, 4), val);
+
+	val = rf_rr(dev, MT_RF(0, 3));
+	val &= ~0x77;
+	if (channel == 1 || channel == 7 || channel == 9 || channel >= 13) {
+		val |= 0x63;
+	} else if (channel == 3 || channel == 4 || channel == 10) {
+		val |= 0x53;
+	} else if (channel == 2 || channel == 5 || channel == 6 ||
+		   channel == 8 || channel == 11 || channel == 12) {
+		val |= 0x43;
+	} else {
+		WARN(1, "Unknown channel %u\n", channel);
+		return;
+	}
+	rf_wr(dev, MT_RF(0, 3), val);
+
+	/* TODO replace by mt76x0_rf_set(dev, MT_RF(0, 4), BIT(7)); */
+	val = rf_rr(dev, MT_RF(0, 4));
+	val = ((val & ~(0x80)) | 0x80);
+	rf_wr(dev, MT_RF(0, 4), val);
+
+	msleep(2);
+}
+
+static void
+mt76x0_mac_set_ctrlch(struct mt76x0_dev *dev, bool primary_upper)
+{
+	mt76_rmw_field(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_UPPER_40M,
+		       primary_upper);
+}
+
+static void
+mt76x0_phy_set_band(struct mt76x0_dev *dev, enum nl80211_band band)
+{
+	switch (band) {
+	case NL80211_BAND_2GHZ:
+		RF_RANDOM_WRITE(dev, mt76x0_rf_2g_channel_0_tab);
+
+		rf_wr(dev, MT_RF(5, 0), 0x45);
+		rf_wr(dev, MT_RF(6, 0), 0x44);
+
+		mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
+		mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
+
+		mt76_wr(dev, MT_TX_ALC_VGA3, 0x00050007);
+		mt76_wr(dev, MT_TX0_RF_GAIN_CORR, 0x003E0002);
+		break;
+	case NL80211_BAND_5GHZ:
+		RF_RANDOM_WRITE(dev, mt76x0_rf_5g_channel_0_tab);
+
+		rf_wr(dev, MT_RF(5, 0), 0x44);
+		rf_wr(dev, MT_RF(6, 0), 0x45);
+
+		mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
+		mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
+
+		mt76_wr(dev, MT_TX_ALC_VGA3, 0x00000005);
+		mt76_wr(dev, MT_TX0_RF_GAIN_CORR, 0x01010102);
+		break;
+	default:
+		break;
+	}
+}
+
+#define EXT_PA_2G_5G            0x0
+#define EXT_PA_5G_ONLY          0x1
+#define EXT_PA_2G_ONLY          0x2
+#define INT_PA_2G_5G            0x3
+
+static void
+mt76x0_phy_set_chan_rf_params(struct mt76x0_dev *dev, u8 channel, u16 rf_bw_band)
+{
+	u16 rf_band = rf_bw_band & 0xff00;
+	u16 rf_bw = rf_bw_band & 0x00ff;
+	u32 mac_reg;
+	u8 rf_val;
+	int i;
+	bool bSDM = false;
+	const struct mt76x0_freq_item *freq_item;
+
+	for (i = 0; i < ARRAY_SIZE(mt76x0_sdm_channel); i++) {
+		if (channel == mt76x0_sdm_channel[i]) {
+			bSDM = true;
+			break;
+		}
+	}
+
+	for (i = 0; i < ARRAY_SIZE(mt76x0_frequency_plan); i++) {
+		if (channel == mt76x0_frequency_plan[i].channel) {
+			rf_band = mt76x0_frequency_plan[i].band;
+
+			if (bSDM)
+				freq_item = &(mt76x0_sdm_frequency_plan[i]);
+			else
+				freq_item = &(mt76x0_frequency_plan[i]);
+
+			rf_wr(dev, MT_RF(0, 37), freq_item->pllR37);
+			rf_wr(dev, MT_RF(0, 36), freq_item->pllR36);
+			rf_wr(dev, MT_RF(0, 35), freq_item->pllR35);
+			rf_wr(dev, MT_RF(0, 34), freq_item->pllR34);
+			rf_wr(dev, MT_RF(0, 33), freq_item->pllR33);
+
+			rf_val = rf_rr(dev, MT_RF(0, 32));
+			rf_val &= ~0xE0;
+			rf_val |= freq_item->pllR32_b7b5;
+			rf_wr(dev, MT_RF(0, 32), rf_val);
+
+			/* R32<4:0> pll_den: (Denomina - 8) */
+			rf_val = rf_rr(dev, MT_RF(0, 32));
+			rf_val &= ~0x1F;
+			rf_val |= freq_item->pllR32_b4b0;
+			rf_wr(dev, MT_RF(0, 32), rf_val);
+
+			/* R31<7:5> */
+			rf_val = rf_rr(dev, MT_RF(0, 31));
+			rf_val &= ~0xE0;
+			rf_val |= freq_item->pllR31_b7b5;
+			rf_wr(dev, MT_RF(0, 31), rf_val);
+
+			/* R31<4:0> pll_k(Nominator) */
+			rf_val = rf_rr(dev, MT_RF(0, 31));
+			rf_val &= ~0x1F;
+			rf_val |= freq_item->pllR31_b4b0;
+			rf_wr(dev, MT_RF(0, 31), rf_val);
+
+			/* R30<7> sdm_reset_n */
+			rf_val = rf_rr(dev, MT_RF(0, 30));
+			rf_val &= ~0x80;
+			if (bSDM) {
+				rf_wr(dev, MT_RF(0, 30), rf_val);
+				rf_val |= 0x80;
+				rf_wr(dev, MT_RF(0, 30), rf_val);
+			} else {
+				rf_val |= freq_item->pllR30_b7;
+				rf_wr(dev, MT_RF(0, 30), rf_val);
+			}
+
+			/* R30<6:2> sdmmash_prbs,sin */
+			rf_val = rf_rr(dev, MT_RF(0, 30));
+			rf_val &= ~0x7C;
+			rf_val |= freq_item->pllR30_b6b2;
+			rf_wr(dev, MT_RF(0, 30), rf_val);
+
+			/* R30<1> sdm_bp */
+			rf_val = rf_rr(dev, MT_RF(0, 30));
+			rf_val &= ~0x02;
+			rf_val |= (freq_item->pllR30_b1 << 1);
+			rf_wr(dev, MT_RF(0, 30), rf_val);
+
+			/* R30<0> R29<7:0> (hex) pll_n */
+			rf_val = freq_item->pll_n & 0x00FF;
+			rf_wr(dev, MT_RF(0, 29), rf_val);
+
+			rf_val = rf_rr(dev, MT_RF(0, 30));
+			rf_val &= ~0x1;
+			rf_val |= ((freq_item->pll_n >> 8) & 0x0001);
+			rf_wr(dev, MT_RF(0, 30), rf_val);
+
+			/* R28<7:6> isi_iso */
+			rf_val = rf_rr(dev, MT_RF(0, 28));
+			rf_val &= ~0xC0;
+			rf_val |= freq_item->pllR28_b7b6;
+			rf_wr(dev, MT_RF(0, 28), rf_val);
+
+			/* R28<5:4> pfd_dly */
+			rf_val = rf_rr(dev, MT_RF(0, 28));
+			rf_val &= ~0x30;
+			rf_val |= freq_item->pllR28_b5b4;
+			rf_wr(dev, MT_RF(0, 28), rf_val);
+
+			/* R28<3:2> clksel option */
+			rf_val = rf_rr(dev, MT_RF(0, 28));
+			rf_val &= ~0x0C;
+			rf_val |= freq_item->pllR28_b3b2;
+			rf_wr(dev, MT_RF(0, 28), rf_val);
+
+			/* R28<1:0> R27<7:0> R26<7:0> (hex) sdm_k */
+			rf_val = freq_item->pll_sdm_k & 0x000000FF;
+			rf_wr(dev, MT_RF(0, 26), rf_val);
+
+			rf_val = ((freq_item->pll_sdm_k >> 8) & 0x000000FF);
+			rf_wr(dev, MT_RF(0, 27), rf_val);
+
+			rf_val = rf_rr(dev, MT_RF(0, 28));
+			rf_val &= ~0x3;
+			rf_val |= ((freq_item->pll_sdm_k >> 16) & 0x0003);
+			rf_wr(dev, MT_RF(0, 28), rf_val);
+
+			/* R24<1:0> xo_div */
+			rf_val = rf_rr(dev, MT_RF(0, 24));
+			rf_val &= ~0x3;
+			rf_val |= freq_item->pllR24_b1b0;
+			rf_wr(dev, MT_RF(0, 24), rf_val);
+
+			break;
+		}
+	}
+
+	for (i = 0; i < ARRAY_SIZE(mt76x0_rf_bw_switch_tab); i++) {
+		if (rf_bw == mt76x0_rf_bw_switch_tab[i].bw_band) {
+			rf_wr(dev, mt76x0_rf_bw_switch_tab[i].rf_bank_reg,
+				   mt76x0_rf_bw_switch_tab[i].value);
+		} else if ((rf_bw == (mt76x0_rf_bw_switch_tab[i].bw_band & 0xFF)) &&
+			   (rf_band & mt76x0_rf_bw_switch_tab[i].bw_band)) {
+			rf_wr(dev, mt76x0_rf_bw_switch_tab[i].rf_bank_reg,
+				   mt76x0_rf_bw_switch_tab[i].value);
+		}
+	}
+
+	for (i = 0; i < ARRAY_SIZE(mt76x0_rf_band_switch_tab); i++) {
+		if (mt76x0_rf_band_switch_tab[i].bw_band & rf_band) {
+			rf_wr(dev, mt76x0_rf_band_switch_tab[i].rf_bank_reg,
+				   mt76x0_rf_band_switch_tab[i].value);
+		}
+	}
+
+	mac_reg = mt76_rr(dev, MT_RF_MISC);
+	mac_reg &= ~0xC; /* Clear 0x518[3:2] */
+	mt76_wr(dev, MT_RF_MISC, mac_reg);
+
+	if (dev->ee->pa_type == INT_PA_2G_5G ||
+	    (dev->ee->pa_type == EXT_PA_5G_ONLY && (rf_band & RF_G_BAND)) ||
+	    (dev->ee->pa_type == EXT_PA_2G_ONLY && (rf_band & RF_A_BAND))) {
+		; /* Internal PA - nothing to do. */
+	} else {
+		/*
+			MT_RF_MISC (offset: 0x0518)
+			[2]1'b1: enable external A band PA, 1'b0: disable external A band PA
+			[3]1'b1: enable external G band PA, 1'b0: disable external G band PA
+		*/
+		if (rf_band & RF_A_BAND) {
+			mac_reg = mt76_rr(dev, MT_RF_MISC);
+			mac_reg |= 0x4;
+			mt76_wr(dev, MT_RF_MISC, mac_reg);
+		} else {
+			mac_reg = mt76_rr(dev, MT_RF_MISC);
+			mac_reg |= 0x8;
+			mt76_wr(dev, MT_RF_MISC, mac_reg);
+		}
+
+		/* External PA */
+		for (i = 0; i < ARRAY_SIZE(mt76x0_rf_ext_pa_tab); i++)
+			if (mt76x0_rf_ext_pa_tab[i].bw_band & rf_band)
+				rf_wr(dev, mt76x0_rf_ext_pa_tab[i].rf_bank_reg,
+					   mt76x0_rf_ext_pa_tab[i].value);
+	}
+
+	if (rf_band & RF_G_BAND) {
+		mt76_wr(dev, MT_TX0_RF_GAIN_ATTEN, 0x63707400);
+		/* Set Atten mode = 2 For G band, Disable Tx Inc dcoc. */
+		mac_reg = mt76_rr(dev, MT_TX_ALC_CFG_1);
+		mac_reg &= 0x896400FF;
+		mt76_wr(dev, MT_TX_ALC_CFG_1, mac_reg);
+	} else {
+		mt76_wr(dev, MT_TX0_RF_GAIN_ATTEN, 0x686A7800);
+		/* Set Atten mode = 0 For Ext A band, Disable Tx Inc dcoc Cal. */
+		mac_reg = mt76_rr(dev, MT_TX_ALC_CFG_1);
+		mac_reg &= 0x890400FF;
+		mt76_wr(dev, MT_TX_ALC_CFG_1, mac_reg);
+	}
+}
+
+static void
+mt76x0_phy_set_chan_bbp_params(struct mt76x0_dev *dev, u8 channel, u16 rf_bw_band)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(mt76x0_bbp_switch_tab); i++) {
+		const struct mt76x0_bbp_switch_item *item = &mt76x0_bbp_switch_tab[i];
+		const struct mt76_reg_pair *pair = &item->reg_pair;
+
+		if ((rf_bw_band & item->bw_band) != rf_bw_band)
+			continue;
+
+		if (pair->reg == MT_BBP(AGC, 8)) {
+			u32 val = pair->value;
+			u8 gain = FIELD_GET(MT_BBP_AGC_GAIN, val);
+
+			if (channel > 14) {
+				if (channel < 100)
+					gain -= dev->ee->lna_gain_5ghz[0]*2;
+				else if (channel < 137)
+					gain -= dev->ee->lna_gain_5ghz[1]*2;
+				else
+					gain -= dev->ee->lna_gain_5ghz[2]*2;
+
+			} else {
+				gain -= dev->ee->lna_gain_2ghz*2;
+			}
+
+			val &= ~MT_BBP_AGC_GAIN;
+			val |= FIELD_PREP(MT_BBP_AGC_GAIN, gain);
+			mt76_wr(dev, pair->reg, val);
+		} else {
+			mt76_wr(dev, pair->reg, pair->value);
+		}
+	}
+}
+
+#if 0
+static void
+mt76x0_extra_power_over_mac(struct mt76x0_dev *dev)
+{
+	u32 val;
+
+	val = ((mt76_rr(dev, MT_TX_PWR_CFG_1) & 0x00003f00) >> 8);
+	val |= ((mt76_rr(dev, MT_TX_PWR_CFG_2) & 0x00003f00) << 8);
+	mt76_wr(dev, MT_TX_PWR_CFG_7, val);
+
+	/* TODO: fix VHT */
+	val = ((mt76_rr(dev, MT_TX_PWR_CFG_3) & 0x0000ff00) >> 8);
+	mt76_wr(dev, MT_TX_PWR_CFG_8, val);
+
+	val = ((mt76_rr(dev, MT_TX_PWR_CFG_4) & 0x0000ff00) >> 8);
+	mt76_wr(dev, MT_TX_PWR_CFG_9, val);
+}
+
+static void
+mt76x0_phy_set_tx_power(struct mt76x0_dev *dev, u8 channel, u8 rf_bw_band)
+{
+	u32 val;
+	int i;
+	int bw = (rf_bw_band & RF_BW_20) ? 0 : 1;
+
+	for (i = 0; i < 4; i++) {
+		if (channel <= 14)
+			val = dev->ee->tx_pwr_cfg_2g[i][bw];
+		else
+			val = dev->ee->tx_pwr_cfg_5g[i][bw];
+
+		mt76_wr(dev, MT_TX_PWR_CFG_0 + 4*i, val);
+	}
+
+	mt76x0_extra_power_over_mac(dev);
+}
+#endif
+
+static void
+mt76x0_bbp_set_bw(struct mt76x0_dev *dev, enum nl80211_chan_width width)
+{
+	enum { BW_20 = 0, BW_40 = 1, BW_80 = 2, BW_10 = 4};
+	int bw;
+
+	switch (width) {
+	default:
+	case NL80211_CHAN_WIDTH_20_NOHT:
+	case NL80211_CHAN_WIDTH_20:
+		bw = BW_20;
+		break;
+	case NL80211_CHAN_WIDTH_40:
+		bw = BW_40;
+		break;
+	case NL80211_CHAN_WIDTH_80:
+		bw = BW_80;
+		break;
+	case NL80211_CHAN_WIDTH_10:
+		bw = BW_10;
+		break;
+	case NL80211_CHAN_WIDTH_80P80:
+	case NL80211_CHAN_WIDTH_160:
+	case NL80211_CHAN_WIDTH_5:
+		/* TODO error */
+		return ;
+	}
+
+	mt76x0_mcu_function_select(dev, BW_SETTING, bw);
+}
+
+static void
+mt76x0_phy_set_chan_pwr(struct mt76x0_dev *dev, u8 channel)
+{
+	static const int mt76x0_tx_pwr_ch_list[] = {
+		1,2,3,4,5,6,7,8,9,10,11,12,13,14,
+		36,38,40,44,46,48,52,54,56,60,62,64,
+		100,102,104,108,110,112,116,118,120,124,126,128,132,134,136,140,
+		149,151,153,157,159,161,165,167,169,171,173,
+		42,58,106,122,155
+	};
+	int i;
+	u32 val;
+
+	for (i = 0; i < ARRAY_SIZE(mt76x0_tx_pwr_ch_list); i++)
+		if (mt76x0_tx_pwr_ch_list[i] == channel)
+			break;
+
+	if (WARN_ON(i == ARRAY_SIZE(mt76x0_tx_pwr_ch_list)))
+		return;
+
+	val = mt76_rr(dev, MT_TX_ALC_CFG_0);
+	val &= ~0x3f3f;
+	val |= dev->ee->tx_pwr_per_chan[i];
+	val |= 0x2f2f << 16;
+	mt76_wr(dev, MT_TX_ALC_CFG_0, val);
+}
+
+static int
+__mt76x0_phy_set_channel(struct mt76x0_dev *dev,
+		       struct cfg80211_chan_def *chandef)
+{
+	u32 ext_cca_chan[4] = {
+		[0] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 0) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 1) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 2) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 3) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(0)),
+		[1] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 1) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 0) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 2) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 3) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(1)),
+		[2] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 2) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 3) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 1) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 0) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(2)),
+		[3] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 3) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 2) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 1) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 0) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(3)),
+	};
+	bool scan = test_bit(MT76_SCANNING, &dev->mt76.state);
+	int ch_group_index, freq, freq1;
+	u8 channel;
+	u32 val;
+	u16 rf_bw_band;
+
+	freq = chandef->chan->center_freq;
+	freq1 = chandef->center_freq1;
+	channel = chandef->chan->hw_value;
+	rf_bw_band = (channel <= 14) ? RF_G_BAND : RF_A_BAND;
+
+	switch (chandef->width) {
+	case NL80211_CHAN_WIDTH_40:
+		if (freq1 > freq)
+			ch_group_index = 0;
+		else
+			ch_group_index = 1;
+		channel += 2 - ch_group_index * 4;
+		rf_bw_band |= RF_BW_40;
+		break;
+	case NL80211_CHAN_WIDTH_80:
+		ch_group_index = (freq - freq1 + 30) / 20;
+		if (WARN_ON(ch_group_index < 0 || ch_group_index > 3))
+			ch_group_index = 0;
+		channel += 6 - ch_group_index * 4;
+		rf_bw_band |= RF_BW_80;
+		break;
+	default:
+		ch_group_index = 0;
+		rf_bw_band |= RF_BW_20;
+		break;
+	}
+
+	mt76x0_bbp_set_bw(dev, chandef->width);
+	mt76x0_bbp_set_ctrlch(dev, chandef->width, ch_group_index);
+	mt76x0_mac_set_ctrlch(dev, ch_group_index & 1);
+
+	mt76_rmw(dev, MT_EXT_CCA_CFG,
+		 (MT_EXT_CCA_CFG_CCA0 |
+		  MT_EXT_CCA_CFG_CCA1 |
+		  MT_EXT_CCA_CFG_CCA2 |
+		  MT_EXT_CCA_CFG_CCA3 |
+		  MT_EXT_CCA_CFG_CCA_MASK),
+		 ext_cca_chan[ch_group_index]);
+
+	mt76x0_phy_set_band(dev, chandef->chan->band);
+	mt76x0_phy_set_chan_rf_params(dev, channel, rf_bw_band);
+
+	/* set Japan Tx filter at channel 14 */
+	val = mt76_rr(dev, MT_BBP(CORE, 1));
+	if (channel == 14)
+		val |= 0x20;
+	else
+		val &= ~0x20;
+	mt76_wr(dev, MT_BBP(CORE, 1), val);
+
+	mt76x0_phy_set_chan_bbp_params(dev, channel, rf_bw_band);
+
+	/* Vendor driver don't do it */
+	/* mt76x0_phy_set_tx_power(dev, channel, rf_bw_band); */
+
+	if (scan)
+		mt76x0_vco_cal(dev, channel);
+
+	mt76x0_mcu_calibrate(dev, MCU_CAL_RXDCOC, 1);
+	mt76x0_phy_set_chan_pwr(dev, channel);
+
+	dev->mt76.chandef = *chandef;
+	return 0;
+}
+
+int mt76x0_phy_set_channel(struct mt76x0_dev *dev,
+			   struct cfg80211_chan_def *chandef)
+{
+	int ret;
+
+	mutex_lock(&dev->hw_atomic_mutex);
+	ret = __mt76x0_phy_set_channel(dev, chandef);
+	mutex_unlock(&dev->hw_atomic_mutex);
+
+	return ret;
+}
+
+void mt76x0_phy_recalibrate_after_assoc(struct mt76x0_dev *dev)
+{
+	u32 tx_alc, reg_val;
+	u8 channel = dev->mt76.chandef.chan->hw_value;
+	int is_5ghz = (dev->mt76.chandef.chan->band == NL80211_BAND_5GHZ) ? 1 : 0;
+
+	mt76x0_mcu_calibrate(dev, MCU_CAL_R, 0);
+
+	mt76x0_vco_cal(dev, channel);
+
+	tx_alc = mt76_rr(dev, MT_TX_ALC_CFG_0);
+	mt76_wr(dev, MT_TX_ALC_CFG_0, 0);
+	usleep_range(500, 700);
+
+	reg_val = mt76_rr(dev, 0x2124);
+	reg_val &= 0xffffff7e;
+	mt76_wr(dev, 0x2124, reg_val);
+
+	mt76x0_mcu_calibrate(dev, MCU_CAL_RXDCOC, 0);
+
+	mt76x0_mcu_calibrate(dev, MCU_CAL_LC, is_5ghz);
+	mt76x0_mcu_calibrate(dev, MCU_CAL_LOFT, is_5ghz);
+	mt76x0_mcu_calibrate(dev, MCU_CAL_TXIQ, is_5ghz);
+	mt76x0_mcu_calibrate(dev, MCU_CAL_TX_GROUP_DELAY, is_5ghz);
+	mt76x0_mcu_calibrate(dev, MCU_CAL_RXIQ, is_5ghz);
+	mt76x0_mcu_calibrate(dev, MCU_CAL_RX_GROUP_DELAY, is_5ghz);
+
+	mt76_wr(dev, 0x2124, reg_val);
+	mt76_wr(dev, MT_TX_ALC_CFG_0, tx_alc);
+	msleep(100);
+
+	mt76x0_mcu_calibrate(dev, MCU_CAL_RXDCOC, 1);
+}
+
+void mt76x0_agc_save(struct mt76x0_dev *dev)
+{
+	/* Only one RX path */
+	dev->agc_save = FIELD_GET(MT_BBP_AGC_GAIN, mt76_rr(dev, MT_BBP(AGC, 8)));
+}
+
+void mt76x0_agc_restore(struct mt76x0_dev *dev)
+{
+	mt76_rmw_field(dev, MT_BBP(AGC, 8), MT_BBP_AGC_GAIN, dev->agc_save);
+}
+
+static void mt76x0_temp_sensor(struct mt76x0_dev *dev)
+{
+	u8 rf_b7_73, rf_b0_66, rf_b0_67;
+	int cycle, temp;
+	u32 val;
+	s32 sval;
+
+	rf_b7_73 = rf_rr(dev, MT_RF(7, 73));
+	rf_b0_66 = rf_rr(dev, MT_RF(0, 66));
+	rf_b0_67 = rf_rr(dev, MT_RF(0, 73));
+
+	rf_wr(dev, MT_RF(7, 73), 0x02);
+	rf_wr(dev, MT_RF(0, 66), 0x23);
+	rf_wr(dev, MT_RF(0, 73), 0x01);
+
+	mt76_wr(dev, MT_BBP(CORE, 34), 0x00080055);
+
+	for (cycle = 0; cycle < 2000; cycle++) {
+		val = mt76_rr(dev, MT_BBP(CORE, 34));
+		if (!(val & 0x10))
+			break;
+		udelay(3);
+	}
+
+	if (cycle >= 2000) {
+		val &= 0x10;
+		mt76_wr(dev, MT_BBP(CORE, 34), val);
+		goto done;
+	}
+
+	sval = mt76_rr(dev, MT_BBP(CORE, 35)) & 0xff;
+	if (!(sval & 0x80))
+		sval &= 0x7f; /* Positive */
+	else
+		sval |= 0xffffff00; /* Negative */
+
+	temp = (35 * (sval - dev->ee->temp_off))/ 10 + 25;
+
+done:
+	rf_wr(dev, MT_RF(7, 73), rf_b7_73);
+	rf_wr(dev, MT_RF(0, 66), rf_b0_66);
+	rf_wr(dev, MT_RF(0, 73), rf_b0_67);
+}
+
+static void mt76x0_dynamic_vga_tuning(struct mt76x0_dev *dev)
+{
+	u32 val, init_vga;
+
+	init_vga = (dev->mt76.chandef.chan->band == NL80211_BAND_5GHZ) ? 0x54 : 0x4E;
+	if (dev->avg_rssi > -60)
+		init_vga -= 0x20;
+	else if (dev->avg_rssi > -70)
+		init_vga -= 0x10;
+
+	val = mt76_rr(dev, MT_BBP(AGC, 8));
+	val &= 0xFFFF80FF;
+	val |= init_vga << 8;
+	mt76_wr(dev, MT_BBP(AGC,8), val);
+}
+
+static void mt76x0_phy_calibrate(struct work_struct *work)
+{
+	struct mt76x0_dev *dev = container_of(work, struct mt76x0_dev,
+					    cal_work.work);
+
+	mt76x0_dynamic_vga_tuning(dev);
+	mt76x0_temp_sensor(dev);
+
+	ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
+				     MT_CALIBRATE_INTERVAL);
+}
+
+void mt76x0_phy_con_cal_onoff(struct mt76x0_dev *dev,
+			       struct ieee80211_bss_conf *info)
+{
+	/* Start/stop collecting beacon data */
+	spin_lock_bh(&dev->con_mon_lock);
+	ether_addr_copy(dev->ap_bssid, info->bssid);
+	dev->avg_rssi = 0;
+	dev->bcn_freq_off = MT_FREQ_OFFSET_INVALID;
+	spin_unlock_bh(&dev->con_mon_lock);
+}
+
+static void
+mt76x0_set_rx_chains(struct mt76x0_dev *dev)
+{
+	u32 val;
+
+	val = mt76_rr(dev, MT_BBP(AGC, 0));
+	val &= ~(BIT(3) | BIT(4));
+
+	if (dev->chainmask & BIT(1))
+		val |= BIT(3);
+
+	mt76_wr(dev, MT_BBP(AGC, 0), val);
+
+	mb();
+	val = mt76_rr(dev, MT_BBP(AGC, 0));
+}
+
+static void
+mt76x0_set_tx_dac(struct mt76x0_dev *dev)
+{
+	if (dev->chainmask & BIT(1))
+		mt76_set(dev, MT_BBP(TXBE, 5), 3);
+	else
+		mt76_clear(dev, MT_BBP(TXBE, 5), 3);
+}
+
+static void
+mt76x0_rf_init(struct mt76x0_dev *dev)
+{
+	int i;
+	u8 val;
+
+	RF_RANDOM_WRITE(dev, mt76x0_rf_central_tab);
+	RF_RANDOM_WRITE(dev, mt76x0_rf_2g_channel_0_tab);
+	RF_RANDOM_WRITE(dev, mt76x0_rf_5g_channel_0_tab);
+	RF_RANDOM_WRITE(dev, mt76x0_rf_vga_channel_0_tab);
+
+	for (i = 0; i < ARRAY_SIZE(mt76x0_rf_bw_switch_tab); i++) {
+		const struct mt76x0_rf_switch_item *item = &mt76x0_rf_bw_switch_tab[i];
+
+		if (item->bw_band == RF_BW_20)
+			rf_wr(dev, item->rf_bank_reg, item->value);
+		else if (((RF_G_BAND | RF_BW_20) & item->bw_band) == (RF_G_BAND | RF_BW_20))
+			rf_wr(dev, item->rf_bank_reg, item->value);
+	}
+
+	for (i = 0; i < ARRAY_SIZE(mt76x0_rf_band_switch_tab); i++) {
+		if (mt76x0_rf_band_switch_tab[i].bw_band & RF_G_BAND) {
+			rf_wr(dev,
+			      mt76x0_rf_band_switch_tab[i].rf_bank_reg,
+			      mt76x0_rf_band_switch_tab[i].value);
+		}
+	}
+
+	/*
+	   Frequency calibration
+	   E1: B0.R22<6:0>: xo_cxo<6:0>
+	   E2: B0.R21<0>: xo_cxo<0>, B0.R22<7:0>: xo_cxo<8:1>
+	 */
+	rf_wr(dev, MT_RF(0, 22), min_t(u8, dev->ee->rf_freq_off, 0xBF));
+	val = rf_rr(dev, MT_RF(0, 22));
+
+	/*
+	   Reset the DAC (Set B0.R73<7>=1, then set B0.R73<7>=0, and then set B0.R73<7>) during power up.
+	 */
+	val = rf_rr(dev, MT_RF(0, 73));
+	val |= 0x80;
+	rf_wr(dev, MT_RF(0, 73), val);
+	val &= ~0x80;
+	rf_wr(dev, MT_RF(0, 73), val);
+	val |= 0x80;
+	rf_wr(dev, MT_RF(0, 73), val);
+
+	/*
+	   vcocal_en (initiate VCO calibration (reset after completion)) - It should be at the end of RF configuration.
+	 */
+	rf_set(dev, MT_RF(0, 4), 0x80);
+}
+
+static void mt76x0_ant_select(struct mt76x0_dev *dev)
+{
+	/* Single antenna mode. */
+	mt76_rmw(dev, MT_WLAN_FUN_CTRL, BIT(5), BIT(6));
+	mt76_clear(dev, MT_CMB_CTRL, BIT(14) | BIT(12));
+	mt76_clear(dev, MT_COEXCFG0, BIT(2));
+	mt76_rmw(dev, MT_COEXCFG3, BIT(5) | BIT(4) | BIT(3) | BIT(2), BIT(1));
+}
+
+void mt76x0_phy_init(struct mt76x0_dev *dev)
+{
+	INIT_DELAYED_WORK(&dev->cal_work, mt76x0_phy_calibrate);
+
+	mt76x0_ant_select(dev);
+
+	mt76x0_rf_init(dev);
+
+	mt76x0_set_rx_chains(dev);
+	mt76x0_set_tx_dac(dev);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.h b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.h
new file mode 100644
index 0000000000000000000000000000000000000000..2880a43c3cb0b32159eec9d53467505362f518fc
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.h
@@ -0,0 +1,81 @@
+/*
+ * (c) Copyright 2002-2010, Ralink Technology, Inc.
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef _MT76X0_PHY_H_
+#define _MT76X0_PHY_H_
+
+#define RF_G_BAND 	0x0100
+#define RF_A_BAND 	0x0200
+#define RF_A_BAND_LB	0x0400
+#define RF_A_BAND_MB	0x0800
+#define RF_A_BAND_HB	0x1000
+#define RF_A_BAND_11J	0x2000
+
+#define RF_BW_20        1
+#define RF_BW_40        2
+#define RF_BW_10        4
+#define RF_BW_80        8
+
+#define MT_RF(bank, reg) ((bank) << 16 | (reg))
+#define MT_RF_BANK(offset) (offset >> 16)
+#define MT_RF_REG(offset) (offset & 0xff)
+
+struct mt76x0_bbp_switch_item {
+	u16 bw_band;
+	struct mt76_reg_pair reg_pair;
+};
+
+struct mt76x0_rf_switch_item {
+	u32 rf_bank_reg;
+	u16 bw_band;
+	u8 value;
+};
+
+struct mt76x0_freq_item {
+	u8 channel;
+	u32 band;
+	u8 pllR37;
+	u8 pllR36;
+	u8 pllR35;
+	u8 pllR34;
+	u8 pllR33;
+	u8 pllR32_b7b5;
+	u8 pllR32_b4b0; /* PLL_DEN (Denomina - 8) */
+	u8 pllR31_b7b5;
+	u8 pllR31_b4b0; /* PLL_K (Nominator *)*/
+	u8 pllR30_b7;	/* sdm_reset_n */
+	u8 pllR30_b6b2; /* sdmmash_prbs,sin */
+	u8 pllR30_b1;	/* sdm_bp */
+	u16 pll_n;	/* R30<0>, R29<7:0> (hex) */
+	u8 pllR28_b7b6; /* isi,iso */
+	u8 pllR28_b5b4;	/* pfd_dly */
+	u8 pllR28_b3b2;	/* clksel option */
+	u32 pll_sdm_k;	/* R28<1:0>, R27<7:0>, R26<7:0> (hex) SDM_k */
+	u8 pllR24_b1b0;	/* xo_div */
+};
+
+struct mt76x0_rate_pwr_item {
+	s8 mcs_power;
+	u8 rf_pa_mode;
+};
+
+struct mt76x0_rate_pwr_tab {
+	struct mt76x0_rate_pwr_item cck[4];
+	struct mt76x0_rate_pwr_item ofdm[8];
+	struct mt76x0_rate_pwr_item ht[8];
+	struct mt76x0_rate_pwr_item vht[10];
+	struct mt76x0_rate_pwr_item stbc[8];
+	struct mt76x0_rate_pwr_item mcs32;
+};
+
+#endif /* _MT76X0_PHY_H_ */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/regs.h b/drivers/net/wireless/mediatek/mt76/mt76x0/regs.h
new file mode 100644
index 0000000000000000000000000000000000000000..16bed4aaa2429ff54216642d77833f8d8b2284a8
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/regs.h
@@ -0,0 +1,651 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT76_REGS_H
+#define __MT76_REGS_H
+
+#include <linux/bitops.h>
+
+#define MT_ASIC_VERSION			0x0000
+
+#define MT76XX_REV_E3		0x22
+#define MT76XX_REV_E4		0x33
+
+#define MT_CMB_CTRL			0x0020
+#define MT_CMB_CTRL_XTAL_RDY		BIT(22)
+#define MT_CMB_CTRL_PLL_LD		BIT(23)
+
+#define MT_EFUSE_CTRL			0x0024
+#define MT_EFUSE_CTRL_AOUT		GENMASK(5, 0)
+#define MT_EFUSE_CTRL_MODE		GENMASK(7, 6)
+#define MT_EFUSE_CTRL_LDO_OFF_TIME	GENMASK(13, 8)
+#define MT_EFUSE_CTRL_LDO_ON_TIME	GENMASK(15, 14)
+#define MT_EFUSE_CTRL_AIN		GENMASK(25, 16)
+#define MT_EFUSE_CTRL_KICK		BIT(30)
+#define MT_EFUSE_CTRL_SEL		BIT(31)
+
+#define MT_EFUSE_DATA_BASE		0x0028
+#define MT_EFUSE_DATA(_n)		(MT_EFUSE_DATA_BASE + ((_n) << 2))
+
+#define MT_COEXCFG0			0x0040
+#define MT_COEXCFG0_COEX_EN		BIT(0)
+
+#define MT_COEXCFG3			0x004c
+
+#define MT_LDO_CTRL_0			0x006c
+#define MT_LDO_CTRL_1			0x0070
+
+#define MT_WLAN_FUN_CTRL		0x0080
+#define MT_WLAN_FUN_CTRL_WLAN_EN	BIT(0)
+#define MT_WLAN_FUN_CTRL_WLAN_CLK_EN	BIT(1)
+#define MT_WLAN_FUN_CTRL_WLAN_RESET_RF	BIT(2)
+
+#define MT_WLAN_FUN_CTRL_WLAN_RESET	BIT(3) /* MT76x0 */
+#define MT_WLAN_FUN_CTRL_CSR_F20M_CKEN	BIT(3) /* MT76x2 */
+
+#define MT_WLAN_FUN_CTRL_PCIE_CLK_REQ	BIT(4)
+#define MT_WLAN_FUN_CTRL_FRC_WL_ANT_SEL	BIT(5)
+#define MT_WLAN_FUN_CTRL_INV_ANT_SEL	BIT(6)
+#define MT_WLAN_FUN_CTRL_WAKE_HOST	BIT(7)
+
+#define MT_WLAN_FUN_CTRL_THERM_RST	BIT(8) /* MT76x2 */
+#define MT_WLAN_FUN_CTRL_THERM_CKEN	BIT(9) /* MT76x2 */
+
+#define MT_WLAN_FUN_CTRL_GPIO_IN	GENMASK(15, 8) /* MT76x0 */
+#define MT_WLAN_FUN_CTRL_GPIO_OUT	GENMASK(23, 16) /* MT76x0 */
+#define MT_WLAN_FUN_CTRL_GPIO_OUT_EN	GENMASK(31, 24) /* MT76x0 */
+
+#define MT_XO_CTRL0			0x0100
+#define MT_XO_CTRL1			0x0104
+#define MT_XO_CTRL2			0x0108
+#define MT_XO_CTRL3			0x010c
+#define MT_XO_CTRL4			0x0110
+
+#define MT_XO_CTRL5			0x0114
+#define MT_XO_CTRL5_C2_VAL		GENMASK(14, 8)
+
+#define MT_XO_CTRL6			0x0118
+#define MT_XO_CTRL6_C2_CTRL		GENMASK(14, 8)
+
+#define MT_XO_CTRL7			0x011c
+
+#define MT_IOCFG_6			0x0124
+#define MT_WLAN_MTC_CTRL		0x10148
+#define MT_WLAN_MTC_CTRL_MTCMOS_PWR_UP	BIT(0)
+#define MT_WLAN_MTC_CTRL_PWR_ACK	BIT(12)
+#define MT_WLAN_MTC_CTRL_PWR_ACK_S	BIT(13)
+#define MT_WLAN_MTC_CTRL_BBP_MEM_PD	GENMASK(19, 16)
+#define MT_WLAN_MTC_CTRL_PBF_MEM_PD	BIT(20)
+#define MT_WLAN_MTC_CTRL_FCE_MEM_PD	BIT(21)
+#define MT_WLAN_MTC_CTRL_TSO_MEM_PD	BIT(22)
+#define MT_WLAN_MTC_CTRL_BBP_MEM_RB	BIT(24)
+#define MT_WLAN_MTC_CTRL_PBF_MEM_RB	BIT(25)
+#define MT_WLAN_MTC_CTRL_FCE_MEM_RB	BIT(26)
+#define MT_WLAN_MTC_CTRL_TSO_MEM_RB	BIT(27)
+#define MT_WLAN_MTC_CTRL_STATE_UP	BIT(28)
+
+#define MT_INT_SOURCE_CSR		0x0200
+#define MT_INT_MASK_CSR			0x0204
+
+#define MT_INT_RX_DONE(_n)		BIT(_n)
+#define MT_INT_RX_DONE_ALL		GENMASK(1, 0)
+#define MT_INT_TX_DONE_ALL		GENMASK(13, 4)
+#define MT_INT_TX_DONE(_n)		BIT(_n + 4)
+#define MT_INT_RX_COHERENT		BIT(16)
+#define MT_INT_TX_COHERENT		BIT(17)
+#define MT_INT_ANY_COHERENT		BIT(18)
+#define MT_INT_MCU_CMD			BIT(19)
+#define MT_INT_TBTT			BIT(20)
+#define MT_INT_PRE_TBTT			BIT(21)
+#define MT_INT_TX_STAT			BIT(22)
+#define MT_INT_AUTO_WAKEUP		BIT(23)
+#define MT_INT_GPTIMER			BIT(24)
+#define MT_INT_RXDELAYINT		BIT(26)
+#define MT_INT_TXDELAYINT		BIT(27)
+
+#define MT_WPDMA_GLO_CFG		0x0208
+#define MT_WPDMA_GLO_CFG_TX_DMA_EN	BIT(0)
+#define MT_WPDMA_GLO_CFG_TX_DMA_BUSY	BIT(1)
+#define MT_WPDMA_GLO_CFG_RX_DMA_EN	BIT(2)
+#define MT_WPDMA_GLO_CFG_RX_DMA_BUSY	BIT(3)
+#define MT_WPDMA_GLO_CFG_DMA_BURST_SIZE	GENMASK(5, 4)
+#define MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE	BIT(6)
+#define MT_WPDMA_GLO_CFG_BIG_ENDIAN	BIT(7)
+#define MT_WPDMA_GLO_CFG_HDR_SEG_LEN	GENMASK(15, 8)
+#define MT_WPDMA_GLO_CFG_CLK_GATE_DIS	BIT(30)
+#define MT_WPDMA_GLO_CFG_RX_2B_OFFSET	BIT(31)
+
+#define MT_WPDMA_RST_IDX		0x020c
+
+#define MT_WPDMA_DELAY_INT_CFG		0x0210
+
+#define MT_WMM_AIFSN		0x0214
+#define MT_WMM_AIFSN_MASK		GENMASK(3, 0)
+#define MT_WMM_AIFSN_SHIFT(_n)		((_n) * 4)
+
+#define MT_WMM_CWMIN		0x0218
+#define MT_WMM_CWMIN_MASK		GENMASK(3, 0)
+#define MT_WMM_CWMIN_SHIFT(_n)		((_n) * 4)
+
+#define MT_WMM_CWMAX		0x021c
+#define MT_WMM_CWMAX_MASK		GENMASK(3, 0)
+#define MT_WMM_CWMAX_SHIFT(_n)		((_n) * 4)
+
+#define MT_WMM_TXOP_BASE		0x0220
+#define MT_WMM_TXOP(_n)			(MT_WMM_TXOP_BASE + (((_n) / 2) << 2))
+#define MT_WMM_TXOP_SHIFT(_n)		((_n & 1) * 16)
+#define MT_WMM_TXOP_MASK		GENMASK(15, 0)
+
+#define MT_WMM_CTRL			0x0230 /* MT76x0 */
+
+#define MT_FCE_DMA_ADDR			0x0230
+#define MT_FCE_DMA_LEN			0x0234
+
+#define MT_USB_DMA_CFG			0x238
+#define MT_USB_DMA_CFG_RX_BULK_AGG_TOUT	GENMASK(7, 0)
+#define MT_USB_DMA_CFG_RX_BULK_AGG_LMT	GENMASK(15, 8)
+#define MT_USB_DMA_CFG_TX_WL_DROP	BIT(16)
+#define MT_USB_DMA_CFG_WAKEUP_EN	BIT(17)
+#define MT_USB_DMA_CFG_RX_DROP_OR_PADDING	BIT(18)
+#define MT_USB_DMA_CFG_TX_CLR		BIT(19)
+#define MT_USB_DMA_CFG_WL_LPK_EN	BIT(20)
+#define MT_USB_DMA_CFG_RX_BULK_AGG_EN	BIT(21)
+#define MT_USB_DMA_CFG_RX_BULK_EN	BIT(22)
+#define MT_USB_DMA_CFG_TX_BULK_EN	BIT(23)
+#define MT_USB_DMA_CFG_EP_OUT_VALID	GENMASK(29, 24)
+#define MT_USB_DMA_CFG_RX_BUSY		BIT(30)
+#define MT_USB_DMA_CFG_TX_BUSY		BIT(31)
+#if 0
+#define MT_USB_DMA_CFG_TX_CLR		BIT(19)
+#define MT_USB_DMA_CFG_TXOP_HALT	BIT(20)
+#define MT_USB_DMA_CFG_RX_BULK_AGG_EN	BIT(21)
+#define MT_USB_DMA_CFG_RX_BULK_EN	BIT(22)
+#define MT_USB_DMA_CFG_TX_BULK_EN	BIT(23)
+#define MT_USB_DMA_CFG_UDMA_RX_WL_DROP	BIT(25)
+#endif
+
+#define MT_TSO_CTRL			0x0250
+#define MT_HEADER_TRANS_CTRL_REG	0x0260
+
+#define MT_US_CYC_CFG			0x02a4
+#define MT_US_CYC_CNT			GENMASK(7, 0)
+
+#define MT_TX_RING_BASE			0x0300
+#define MT_RX_RING_BASE			0x03c0
+#define MT_RING_SIZE			0x10
+
+#define MT_TX_HW_QUEUE_MCU		8
+#define MT_TX_HW_QUEUE_MGMT		9
+
+#define MT_PBF_SYS_CTRL			0x0400
+#define MT_PBF_SYS_CTRL_MCU_RESET	BIT(0)
+#define MT_PBF_SYS_CTRL_DMA_RESET	BIT(1)
+#define MT_PBF_SYS_CTRL_MAC_RESET	BIT(2)
+#define MT_PBF_SYS_CTRL_PBF_RESET	BIT(3)
+#define MT_PBF_SYS_CTRL_ASY_RESET	BIT(4)
+
+#define MT_PBF_CFG			0x0404
+#define MT_PBF_CFG_TX0Q_EN		BIT(0)
+#define MT_PBF_CFG_TX1Q_EN		BIT(1)
+#define MT_PBF_CFG_TX2Q_EN		BIT(2)
+#define MT_PBF_CFG_TX3Q_EN		BIT(3)
+#define MT_PBF_CFG_RX0Q_EN		BIT(4)
+#define MT_PBF_CFG_RX_DROP_EN		BIT(8)
+
+#define MT_PBF_TX_MAX_PCNT		0x0408
+#define MT_PBF_RX_MAX_PCNT		0x040c
+
+#define MT_BCN_OFFSET_BASE		0x041c
+#define MT_BCN_OFFSET(_n)		(MT_BCN_OFFSET_BASE + ((_n) << 2))
+
+#define MT_RXQ_STA			0x0430
+#define MT_TXQ_STA			0x0434
+#define	MT_RF_CSR_CFG			0x0500
+#define MT_RF_CSR_CFG_DATA		GENMASK(7, 0)
+#define MT_RF_CSR_CFG_REG_ID		GENMASK(13, 8)
+#define MT_RF_CSR_CFG_REG_BANK		GENMASK(17, 14)
+#define MT_RF_CSR_CFG_WR		BIT(30)
+#define MT_RF_CSR_CFG_KICK		BIT(31)
+
+#define MT_RF_BYPASS_0			0x0504
+#define MT_RF_BYPASS_1			0x0508
+#define MT_RF_SETTING_0			0x050c
+
+#define MT_RF_MISC			0x0518
+#define MT_RF_DATA_WRITE		0x0524
+
+#define MT_RF_CTRL			0x0528
+#define MT_RF_CTRL_ADDR			GENMASK(11, 0)
+#define MT_RF_CTRL_WRITE		BIT(12)
+#define MT_RF_CTRL_BUSY			BIT(13)
+#define MT_RF_CTRL_IDX			BIT(16)
+
+#define MT_RF_DATA_READ			0x052c
+
+#define MT_COM_REG0			0x0730
+#define MT_COM_REG1			0x0734
+#define MT_COM_REG2			0x0738
+#define MT_COM_REG3			0x073C
+
+#define MT_FCE_PSE_CTRL			0x0800
+#define MT_FCE_PARAMETERS		0x0804
+#define MT_FCE_CSO			0x0808
+
+#define MT_FCE_L2_STUFF			0x080c
+#define MT_FCE_L2_STUFF_HT_L2_EN	BIT(0)
+#define MT_FCE_L2_STUFF_QOS_L2_EN	BIT(1)
+#define MT_FCE_L2_STUFF_RX_STUFF_EN	BIT(2)
+#define MT_FCE_L2_STUFF_TX_STUFF_EN	BIT(3)
+#define MT_FCE_L2_STUFF_WR_MPDU_LEN_EN	BIT(4)
+#define MT_FCE_L2_STUFF_MVINV_BSWAP	BIT(5)
+#define MT_FCE_L2_STUFF_TS_CMD_QSEL_EN	GENMASK(15, 8)
+#define MT_FCE_L2_STUFF_TS_LEN_EN	GENMASK(23, 16)
+#define MT_FCE_L2_STUFF_OTHER_PORT	GENMASK(25, 24)
+
+#define MT_FCE_WLAN_FLOW_CONTROL1	0x0824
+
+#define MT_TX_CPU_FROM_FCE_BASE_PTR	0x09a0
+#define MT_TX_CPU_FROM_FCE_MAX_COUNT	0x09a4
+#define MT_TX_CPU_FROM_FCE_CPU_DESC_IDX	0x09a8
+
+#define MT_FCE_PDMA_GLOBAL_CONF		0x09c4
+
+#define MT_PAUSE_ENABLE_CONTROL1	0x0a38
+
+#define MT_FCE_SKIP_FS			0x0a6c
+
+#define MT_MAC_CSR0			0x1000
+#define MT_MAC_SYS_CTRL			0x1004
+#define MT_MAC_SYS_CTRL_RESET_CSR	BIT(0)
+#define MT_MAC_SYS_CTRL_RESET_BBP	BIT(1)
+#define MT_MAC_SYS_CTRL_ENABLE_TX	BIT(2)
+#define MT_MAC_SYS_CTRL_ENABLE_RX	BIT(3)
+
+#define MT_MAC_ADDR_DW0			0x1008
+#define MT_MAC_ADDR_DW1			0x100c
+#define MT_MAC_ADDR_DW1_U2ME_MASK	GENMASK(23, 16)
+
+#define MT_MAC_BSSID_DW0		0x1010
+#define MT_MAC_BSSID_DW1		0x1014
+#define MT_MAC_BSSID_DW1_ADDR		GENMASK(15, 0)
+#define MT_MAC_BSSID_DW1_MBSS_MODE	GENMASK(17, 16)
+#define MT_MAC_BSSID_DW1_MBEACON_N	GENMASK(20, 18)
+#define MT_MAC_BSSID_DW1_MBSS_LOCAL_BIT	BIT(21)
+#define MT_MAC_BSSID_DW1_MBSS_MODE_B2	BIT(22)
+#define MT_MAC_BSSID_DW1_MBEACON_N_B3	BIT(23)
+#define MT_MAC_BSSID_DW1_MBSS_IDX_BYTE	GENMASK(26, 24)
+
+#define MT_MAX_LEN_CFG			0x1018
+#define MT_MAX_LEN_CFG_AMPDU		GENMASK(13, 12)
+
+#define MT_LED_CFG			0x102c
+
+#define MT_AMPDU_MAX_LEN_20M1S		0x1030
+#define MT_AMPDU_MAX_LEN_20M2S		0x1034
+#define MT_AMPDU_MAX_LEN_40M1S		0x1038
+#define MT_AMPDU_MAX_LEN_40M2S		0x103c
+#define MT_AMPDU_MAX_LEN		0x1040
+
+#define MT_WCID_DROP_BASE		0x106c
+#define MT_WCID_DROP(_n)		(MT_WCID_DROP_BASE + ((_n) >> 5) * 4)
+#define MT_WCID_DROP_MASK(_n)		BIT((_n) % 32)
+
+#define MT_BCN_BYPASS_MASK		0x108c
+
+#define MT_MAC_APC_BSSID_BASE		0x1090
+#define MT_MAC_APC_BSSID_L(_n)		(MT_MAC_APC_BSSID_BASE + ((_n) * 8))
+#define MT_MAC_APC_BSSID_H(_n)		(MT_MAC_APC_BSSID_BASE + ((_n) * 8 + 4))
+#define MT_MAC_APC_BSSID_H_ADDR		GENMASK(15, 0)
+#define MT_MAC_APC_BSSID0_H_EN		BIT(16)
+
+#define MT_XIFS_TIME_CFG		0x1100
+#define MT_XIFS_TIME_CFG_CCK_SIFS	GENMASK(7, 0)
+#define MT_XIFS_TIME_CFG_OFDM_SIFS	GENMASK(15, 8)
+#define MT_XIFS_TIME_CFG_OFDM_XIFS	GENMASK(19, 16)
+#define MT_XIFS_TIME_CFG_EIFS		GENMASK(28, 20)
+#define MT_XIFS_TIME_CFG_BB_RXEND_EN	BIT(29)
+
+#define MT_BKOFF_SLOT_CFG		0x1104
+#define MT_BKOFF_SLOT_CFG_SLOTTIME	GENMASK(7, 0)
+#define MT_BKOFF_SLOT_CFG_CC_DELAY	GENMASK(11, 8)
+
+#define MT_BEACON_TIME_CFG		0x1114
+#define MT_BEACON_TIME_CFG_INTVAL	GENMASK(15, 0)
+#define MT_BEACON_TIME_CFG_TIMER_EN	BIT(16)
+#define MT_BEACON_TIME_CFG_SYNC_MODE	GENMASK(18, 17)
+#define MT_BEACON_TIME_CFG_TBTT_EN	BIT(19)
+#define MT_BEACON_TIME_CFG_BEACON_TX	BIT(20)
+#define MT_BEACON_TIME_CFG_TSF_COMP	GENMASK(31, 24)
+
+#define MT_TBTT_SYNC_CFG		0x1118
+#define MT_TBTT_TIMER_CFG		0x1124
+
+#define MT_INT_TIMER_CFG		0x1128
+#define MT_INT_TIMER_CFG_PRE_TBTT	GENMASK(15, 0)
+#define MT_INT_TIMER_CFG_GP_TIMER	GENMASK(31, 16)
+
+#define MT_INT_TIMER_EN			0x112c
+#define MT_INT_TIMER_EN_PRE_TBTT_EN	BIT(0)
+#define MT_INT_TIMER_EN_GP_TIMER_EN	BIT(1)
+
+#define MT_MAC_STATUS			0x1200
+#define MT_MAC_STATUS_TX		BIT(0)
+#define MT_MAC_STATUS_RX		BIT(1)
+
+#define MT_PWR_PIN_CFG			0x1204
+#define MT_AUX_CLK_CFG			0x120c
+
+#define MT_BB_PA_MODE_CFG0		0x1214
+#define MT_BB_PA_MODE_CFG1		0x1218
+#define MT_RF_PA_MODE_CFG0		0x121c
+#define MT_RF_PA_MODE_CFG1		0x1220
+
+#define MT_RF_PA_MODE_ADJ0		0x1228
+#define MT_RF_PA_MODE_ADJ1		0x122c
+
+#define MT_DACCLK_EN_DLY_CFG		0x1264
+
+#define MT_EDCA_CFG_BASE		0x1300
+#define MT_EDCA_CFG_AC(_n)		(MT_EDCA_CFG_BASE + ((_n) << 2))
+#define MT_EDCA_CFG_TXOP		GENMASK(7, 0)
+#define MT_EDCA_CFG_AIFSN		GENMASK(11, 8)
+#define MT_EDCA_CFG_CWMIN		GENMASK(15, 12)
+#define MT_EDCA_CFG_CWMAX		GENMASK(19, 16)
+
+#define MT_TX_PWR_CFG_0			0x1314
+#define MT_TX_PWR_CFG_1			0x1318
+#define MT_TX_PWR_CFG_2			0x131c
+#define MT_TX_PWR_CFG_3			0x1320
+#define MT_TX_PWR_CFG_4			0x1324
+
+#define MT_TX_BAND_CFG			0x132c
+#define MT_TX_BAND_CFG_UPPER_40M	BIT(0)
+#define MT_TX_BAND_CFG_5G		BIT(1)
+#define MT_TX_BAND_CFG_2G		BIT(2)
+
+#define MT_HT_FBK_TO_LEGACY		0x1384
+#define MT_TX_MPDU_ADJ_INT		0x1388
+
+#define MT_TX_PWR_CFG_7			0x13d4
+#define MT_TX_PWR_CFG_8			0x13d8
+#define MT_TX_PWR_CFG_9			0x13dc
+
+#define MT_TX_SW_CFG0			0x1330
+#define MT_TX_SW_CFG1			0x1334
+#define MT_TX_SW_CFG2			0x1338
+
+#define MT_TXOP_CTRL_CFG		0x1340
+#define MT_TXOP_TRUN_EN			GENMASK(5, 0)
+#define MT_TXOP_EXT_CCA_DLY		GENMASK(15, 8)
+#define MT_TXOP_CTRL
+
+#define MT_TX_RTS_CFG			0x1344
+#define MT_TX_RTS_CFG_RETRY_LIMIT	GENMASK(7, 0)
+#define MT_TX_RTS_CFG_THRESH		GENMASK(23, 8)
+#define MT_TX_RTS_FALLBACK		BIT(24)
+
+#define MT_TX_TIMEOUT_CFG		0x1348
+#define MT_TX_RETRY_CFG			0x134c
+#define MT_TX_LINK_CFG			0x1350
+#define MT_HT_FBK_CFG0			0x1354
+#define MT_HT_FBK_CFG1			0x1358
+#define MT_LG_FBK_CFG0			0x135c
+#define MT_LG_FBK_CFG1			0x1360
+
+#define MT_CCK_PROT_CFG			0x1364
+#define MT_OFDM_PROT_CFG		0x1368
+#define MT_MM20_PROT_CFG		0x136c
+#define MT_MM40_PROT_CFG		0x1370
+#define MT_GF20_PROT_CFG		0x1374
+#define MT_GF40_PROT_CFG		0x1378
+
+#define MT_PROT_RATE			GENMASK(15, 0)
+#define MT_PROT_CTRL_RTS_CTS		BIT(16)
+#define MT_PROT_CTRL_CTS2SELF		BIT(17)
+#define MT_PROT_NAV_SHORT		BIT(18)
+#define MT_PROT_NAV_LONG		BIT(19)
+#define MT_PROT_TXOP_ALLOW_CCK		BIT(20)
+#define MT_PROT_TXOP_ALLOW_OFDM		BIT(21)
+#define MT_PROT_TXOP_ALLOW_MM20		BIT(22)
+#define MT_PROT_TXOP_ALLOW_MM40		BIT(23)
+#define MT_PROT_TXOP_ALLOW_GF20		BIT(24)
+#define MT_PROT_TXOP_ALLOW_GF40		BIT(25)
+#define MT_PROT_RTS_THR_EN		BIT(26)
+#define MT_PROT_RATE_CCK_11		0x0003
+#define MT_PROT_RATE_OFDM_6		0x4000
+#define MT_PROT_RATE_OFDM_24		0x4004
+#define MT_PROT_RATE_DUP_OFDM_24	0x4084
+#define MT_PROT_TXOP_ALLOW_ALL		GENMASK(25, 20)
+#define MT_PROT_TXOP_ALLOW_BW20		(MT_PROT_TXOP_ALLOW_ALL &	\
+					 ~MT_PROT_TXOP_ALLOW_MM40 &	\
+					 ~MT_PROT_TXOP_ALLOW_GF40)
+
+#define MT_EXP_ACK_TIME			0x1380
+
+#define MT_TX_PWR_CFG_0_EXT		0x1390
+#define MT_TX_PWR_CFG_1_EXT		0x1394
+
+#define MT_TX_FBK_LIMIT			0x1398
+#define MT_TX_FBK_LIMIT_MPDU_FBK	GENMASK(7, 0)
+#define MT_TX_FBK_LIMIT_AMPDU_FBK	GENMASK(15, 8)
+#define MT_TX_FBK_LIMIT_MPDU_UP_CLEAR	BIT(16)
+#define MT_TX_FBK_LIMIT_AMPDU_UP_CLEAR	BIT(17)
+#define MT_TX_FBK_LIMIT_RATE_LUT	BIT(18)
+
+#define MT_TX0_RF_GAIN_CORR		0x13a0
+#define MT_TX1_RF_GAIN_CORR		0x13a4
+#define MT_TX0_RF_GAIN_ATTEN		0x13a8
+
+#define MT_TX_ALC_CFG_0			0x13b0
+#define MT_TX_ALC_CFG_0_CH_INIT_0	GENMASK(5, 0)
+#define MT_TX_ALC_CFG_0_CH_INIT_1	GENMASK(13, 8)
+#define MT_TX_ALC_CFG_0_LIMIT_0		GENMASK(21, 16)
+#define MT_TX_ALC_CFG_0_LIMIT_1		GENMASK(29, 24)
+
+#define MT_TX_ALC_CFG_1			0x13b4
+#define MT_TX_ALC_CFG_1_TEMP_COMP	GENMASK(5, 0)
+
+#define MT_TX_ALC_CFG_2			0x13a8
+#define MT_TX_ALC_CFG_2_TEMP_COMP	GENMASK(5, 0)
+
+#define MT_TX0_BB_GAIN_ATTEN		0x13c0
+
+#define MT_TX_ALC_VGA3			0x13c8
+
+#define MT_TX_PROT_CFG6			0x13e0
+#define MT_TX_PROT_CFG7			0x13e4
+#define MT_TX_PROT_CFG8			0x13e8
+
+#define MT_PIFS_TX_CFG			0x13ec
+
+#define MT_RX_FILTR_CFG			0x1400
+
+#define MT_RX_FILTR_CFG_CRC_ERR		BIT(0)
+#define MT_RX_FILTR_CFG_PHY_ERR		BIT(1)
+#define MT_RX_FILTR_CFG_PROMISC		BIT(2)
+#define MT_RX_FILTR_CFG_OTHER_BSS	BIT(3)
+#define MT_RX_FILTR_CFG_VER_ERR		BIT(4)
+#define MT_RX_FILTR_CFG_MCAST		BIT(5)
+#define MT_RX_FILTR_CFG_BCAST		BIT(6)
+#define MT_RX_FILTR_CFG_DUP		BIT(7)
+#define MT_RX_FILTR_CFG_CFACK		BIT(8)
+#define MT_RX_FILTR_CFG_CFEND		BIT(9)
+#define MT_RX_FILTR_CFG_ACK		BIT(10)
+#define MT_RX_FILTR_CFG_CTS		BIT(11)
+#define MT_RX_FILTR_CFG_RTS		BIT(12)
+#define MT_RX_FILTR_CFG_PSPOLL		BIT(13)
+#define MT_RX_FILTR_CFG_BA		BIT(14)
+#define MT_RX_FILTR_CFG_BAR		BIT(15)
+#define MT_RX_FILTR_CFG_CTRL_RSV	BIT(16)
+
+#define MT_AUTO_RSP_CFG			0x1404
+
+#define MT_AUTO_RSP_PREAMB_SHORT	BIT(4)
+
+#define MT_LEGACY_BASIC_RATE		0x1408
+#define MT_HT_BASIC_RATE		0x140c
+#define MT_HT_CTRL_CFG			0x1410
+#define MT_RX_PARSER_CFG		0x1418
+#define MT_RX_PARSER_RX_SET_NAV_ALL	BIT(0)
+
+#define MT_EXT_CCA_CFG			0x141c
+#define MT_EXT_CCA_CFG_CCA0		GENMASK(1, 0)
+#define MT_EXT_CCA_CFG_CCA1		GENMASK(3, 2)
+#define MT_EXT_CCA_CFG_CCA2		GENMASK(5, 4)
+#define MT_EXT_CCA_CFG_CCA3		GENMASK(7, 6)
+#define MT_EXT_CCA_CFG_CCA_MASK		GENMASK(11, 8)
+#define MT_EXT_CCA_CFG_ED_CCA_MASK	GENMASK(15, 12)
+
+#define MT_TX_SW_CFG3			0x1478
+
+#define MT_PN_PAD_MODE			0x150c
+
+#define MT_TXOP_HLDR_ET			0x1608
+
+#define MT_PROT_AUTO_TX_CFG		0x1648
+
+#define MT_RX_STA_CNT0			0x1700
+#define MT_RX_STA_CNT1			0x1704
+#define MT_RX_STA_CNT2			0x1708
+#define MT_TX_STA_CNT0			0x170c
+#define MT_TX_STA_CNT1			0x1710
+#define MT_TX_STA_CNT2			0x1714
+
+/* Vendor driver defines content of the second word of STAT_FIFO as follows:
+ *	MT_TX_STAT_FIFO_RATE		GENMASK(26, 16)
+ *	MT_TX_STAT_FIFO_ETXBF		BIT(27)
+ *	MT_TX_STAT_FIFO_SND		BIT(28)
+ *	MT_TX_STAT_FIFO_ITXBF		BIT(29)
+ * However, tests show that b16-31 have the same layout as TXWI rate_ctl
+ * with rate set to rate at which frame was acked.
+ */
+#define MT_TX_STAT_FIFO			0x1718
+#define MT_TX_STAT_FIFO_VALID		BIT(0)
+#define MT_TX_STAT_FIFO_SUCCESS		BIT(5)
+#define MT_TX_STAT_FIFO_AGGR		BIT(6)
+#define MT_TX_STAT_FIFO_ACKREQ		BIT(7)
+#define MT_TX_STAT_FIFO_WCID		GENMASK(15, 8)
+#define MT_TX_STAT_FIFO_RATE		GENMASK(31, 16)
+
+#define MT_TX_AGG_STAT			0x171c
+
+#define MT_TX_AGG_CNT_BASE0		0x1720
+
+#define MT_MPDU_DENSITY_CNT		0x1740
+
+#define MT_TX_AGG_CNT_BASE1		0x174c
+
+#define MT_TX_AGG_CNT(_id)		((_id) < 8 ?			\
+					 MT_TX_AGG_CNT_BASE0 + ((_id) << 2) : \
+					 MT_TX_AGG_CNT_BASE1 + ((_id - 8) << 2))
+
+#define MT_TX_STAT_FIFO_EXT		0x1798
+#define MT_TX_STAT_FIFO_EXT_RETRY	GENMASK(7, 0)
+#define MT_TX_STAT_FIFO_EXT_PKTID	GENMASK(15, 8)
+
+#define MT_BBP_CORE_BASE		0x2000
+#define MT_BBP_IBI_BASE			0x2100
+#define MT_BBP_AGC_BASE			0x2300
+#define MT_BBP_TXC_BASE			0x2400
+#define MT_BBP_RXC_BASE			0x2500
+#define MT_BBP_TXO_BASE			0x2600
+#define MT_BBP_TXBE_BASE		0x2700
+#define MT_BBP_RXFE_BASE		0x2800
+#define MT_BBP_RXO_BASE			0x2900
+#define MT_BBP_DFS_BASE			0x2a00
+#define MT_BBP_TR_BASE			0x2b00
+#define MT_BBP_CAL_BASE			0x2c00
+#define MT_BBP_DSC_BASE			0x2e00
+#define MT_BBP_PFMU_BASE		0x2f00
+
+#define MT_BBP(_type, _n)		(MT_BBP_##_type##_BASE + ((_n) << 2))
+
+#define MT_BBP_CORE_R1_BW		GENMASK(4, 3)
+
+#define MT_BBP_AGC_R0_CTRL_CHAN		GENMASK(9, 8)
+#define MT_BBP_AGC_R0_BW		GENMASK(14, 12)
+
+/* AGC, R4/R5 */
+#define MT_BBP_AGC_LNA_GAIN		GENMASK(21, 16)
+
+/* AGC, R8/R9 */
+#define MT_BBP_AGC_GAIN			GENMASK(14, 8)
+
+#define MT_BBP_AGC20_RSSI0		GENMASK(7, 0)
+#define MT_BBP_AGC20_RSSI1		GENMASK(15, 8)
+
+#define MT_BBP_TXBE_R0_CTRL_CHAN	GENMASK(1, 0)
+
+#define MT_WCID_ADDR_BASE		0x1800
+#define MT_WCID_ADDR(_n)		(MT_WCID_ADDR_BASE + (_n) * 8)
+
+#define MT_SRAM_BASE			0x4000
+
+#define MT_WCID_KEY_BASE		0x8000
+#define MT_WCID_KEY(_n)			(MT_WCID_KEY_BASE + (_n) * 32)
+
+#define MT_WCID_IV_BASE			0xa000
+#define MT_WCID_IV(_n)			(MT_WCID_IV_BASE + (_n) * 8)
+
+#define MT_WCID_ATTR_BASE		0xa800
+#define MT_WCID_ATTR(_n)		(MT_WCID_ATTR_BASE + (_n) * 4)
+
+#define MT_WCID_ATTR_PAIRWISE		BIT(0)
+#define MT_WCID_ATTR_PKEY_MODE		GENMASK(3, 1)
+#define MT_WCID_ATTR_BSS_IDX		GENMASK(6, 4)
+#define MT_WCID_ATTR_RXWI_UDF		GENMASK(9, 7)
+#define MT_WCID_ATTR_PKEY_MODE_EXT	BIT(10)
+#define MT_WCID_ATTR_BSS_IDX_EXT	BIT(11)
+#define MT_WCID_ATTR_WAPI_MCBC		BIT(15)
+#define MT_WCID_ATTR_WAPI_KEYID		GENMASK(31, 24)
+
+#define MT_SKEY_BASE_0			0xac00
+#define MT_SKEY_BASE_1			0xb400
+#define MT_SKEY_0(_bss, _idx)		\
+	(MT_SKEY_BASE_0 + (4 * (_bss) + _idx) * 32)
+#define MT_SKEY_1(_bss, _idx)		\
+	(MT_SKEY_BASE_1 + (4 * ((_bss) & 7) + _idx) * 32)
+#define MT_SKEY(_bss, _idx)		\
+	((_bss & 8) ? MT_SKEY_1(_bss, _idx) : MT_SKEY_0(_bss, _idx))
+
+#define MT_SKEY_MODE_BASE_0		0xb000
+#define MT_SKEY_MODE_BASE_1		0xb3f0
+#define MT_SKEY_MODE_0(_bss)		\
+	(MT_SKEY_MODE_BASE_0 + ((_bss / 2) << 2))
+#define MT_SKEY_MODE_1(_bss)		\
+	(MT_SKEY_MODE_BASE_1 + ((((_bss) & 7) / 2) << 2))
+#define MT_SKEY_MODE(_bss)		\
+	((_bss & 8) ? MT_SKEY_MODE_1(_bss) : MT_SKEY_MODE_0(_bss))
+#define MT_SKEY_MODE_MASK		GENMASK(3, 0)
+#define MT_SKEY_MODE_SHIFT(_bss, _idx)	(4 * ((_idx) + 4 * (_bss & 1)))
+
+#define MT_BEACON_BASE			0xc000
+
+#define MT_TEMP_SENSOR			0x1d000
+#define MT_TEMP_SENSOR_VAL		GENMASK(6, 0)
+
+enum mt76_cipher_type {
+	MT_CIPHER_NONE,
+	MT_CIPHER_WEP40,
+	MT_CIPHER_WEP104,
+	MT_CIPHER_TKIP,
+	MT_CIPHER_AES_CCMP,
+	MT_CIPHER_CKIP40,
+	MT_CIPHER_CKIP104,
+	MT_CIPHER_CKIP128,
+	MT_CIPHER_WAPI,
+};
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/trace.c b/drivers/net/wireless/mediatek/mt76/mt76x0/trace.c
new file mode 100644
index 0000000000000000000000000000000000000000..8abdd3cd546d3485fa964dd5ca8739eb9e641130
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/trace.c
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+
+#ifndef __CHECKER__
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/trace.h b/drivers/net/wireless/mediatek/mt76/mt76x0/trace.h
new file mode 100644
index 0000000000000000000000000000000000000000..8a752a09f2dc164d8ce241dde753858e1b6933d0
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/trace.h
@@ -0,0 +1,313 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#if !defined(__MT76X0U_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __MT76X0U_TRACE_H
+
+#include <linux/tracepoint.h>
+#include "mt76x0.h"
+#include "mac.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mt76x0
+
+#define MAXNAME		32
+#define DEV_ENTRY	__array(char, wiphy_name, 32)
+#define DEV_ASSIGN	strlcpy(__entry->wiphy_name,			\
+				wiphy_name(dev->hw->wiphy), MAXNAME)
+#define DEV_PR_FMT	"%s "
+#define DEV_PR_ARG	__entry->wiphy_name
+
+#define REG_ENTRY	__field(u32, reg) __field(u32, val)
+#define REG_ASSIGN	__entry->reg = reg; __entry->val = val
+#define REG_PR_FMT	"%04x=%08x"
+#define REG_PR_ARG	__entry->reg, __entry->val
+
+DECLARE_EVENT_CLASS(dev_reg_evt,
+	TP_PROTO(struct mt76_dev *dev, u32 reg, u32 val),
+	TP_ARGS(dev, reg, val),
+	TP_STRUCT__entry(
+		DEV_ENTRY
+		REG_ENTRY
+	),
+	TP_fast_assign(
+		DEV_ASSIGN;
+		REG_ASSIGN;
+	),
+	TP_printk(
+		DEV_PR_FMT REG_PR_FMT,
+		DEV_PR_ARG, REG_PR_ARG
+	)
+);
+
+DEFINE_EVENT(dev_reg_evt, mt76x0_reg_read,
+	TP_PROTO(struct mt76_dev *dev, u32 reg, u32 val),
+	TP_ARGS(dev, reg, val)
+);
+
+DEFINE_EVENT(dev_reg_evt, mt76x0_reg_write,
+	TP_PROTO(struct mt76_dev *dev, u32 reg, u32 val),
+	TP_ARGS(dev, reg, val)
+);
+
+TRACE_EVENT(mt76x0_submit_urb,
+	TP_PROTO(struct mt76_dev *dev, struct urb *u),
+	TP_ARGS(dev, u),
+	TP_STRUCT__entry(
+		DEV_ENTRY __field(unsigned, pipe) __field(u32, len)
+	),
+	TP_fast_assign(
+		DEV_ASSIGN;
+		__entry->pipe = u->pipe;
+		__entry->len = u->transfer_buffer_length;
+	),
+	TP_printk(DEV_PR_FMT "p:%08x len:%u",
+		  DEV_PR_ARG, __entry->pipe, __entry->len)
+);
+
+#define trace_mt76x0_submit_urb_sync(__dev, __pipe, __len) ({	\
+	struct urb u;					\
+	u.pipe = __pipe;				\
+	u.transfer_buffer_length = __len;		\
+	trace_mt76x0_submit_urb(__dev, &u);			\
+})
+
+TRACE_EVENT(mt76x0_mcu_msg_send,
+	TP_PROTO(struct mt76_dev *dev,
+		 struct sk_buff *skb, u32 csum, bool resp),
+	TP_ARGS(dev, skb, csum, resp),
+	TP_STRUCT__entry(
+		DEV_ENTRY
+		__field(u32, info)
+		__field(u32, csum)
+		__field(bool, resp)
+	),
+	TP_fast_assign(
+		DEV_ASSIGN;
+		__entry->info = *(u32 *)skb->data;
+		__entry->csum = csum;
+		__entry->resp = resp;
+	),
+	TP_printk(DEV_PR_FMT "i:%08x c:%08x r:%d",
+		  DEV_PR_ARG, __entry->info, __entry->csum, __entry->resp)
+);
+
+TRACE_EVENT(mt76x0_vend_req,
+	TP_PROTO(struct mt76_dev *dev, unsigned pipe, u8 req, u8 req_type,
+		 u16 val, u16 offset, void *buf, size_t buflen, int ret),
+	TP_ARGS(dev, pipe, req, req_type, val, offset, buf, buflen, ret),
+	TP_STRUCT__entry(
+		DEV_ENTRY
+		__field(unsigned, pipe) __field(u8, req) __field(u8, req_type)
+		__field(u16, val) __field(u16, offset) __field(void*, buf)
+		__field(int, buflen) __field(int, ret)
+	),
+	TP_fast_assign(
+		DEV_ASSIGN;
+		__entry->pipe = pipe;
+		__entry->req = req;
+		__entry->req_type = req_type;
+		__entry->val = val;
+		__entry->offset = offset;
+		__entry->buf = buf;
+		__entry->buflen = buflen;
+		__entry->ret = ret;
+	),
+	TP_printk(DEV_PR_FMT
+		  "%d p:%08x req:%02hhx %02hhx val:%04hx %04hx buf:%d %d",
+		  DEV_PR_ARG, __entry->ret, __entry->pipe, __entry->req,
+		  __entry->req_type, __entry->val, __entry->offset,
+		  !!__entry->buf, __entry->buflen)
+);
+
+DECLARE_EVENT_CLASS(dev_rf_reg_evt,
+	TP_PROTO(struct mt76_dev *dev, u8 bank, u8 reg, u8 val),
+	TP_ARGS(dev, bank, reg, val),
+	TP_STRUCT__entry(
+		DEV_ENTRY
+		__field(u8, bank)
+		__field(u8, reg)
+		__field(u8, val)
+	),
+	TP_fast_assign(
+		DEV_ASSIGN;
+		REG_ASSIGN;
+		__entry->bank = bank;
+	),
+	TP_printk(
+		DEV_PR_FMT "%02hhx:%02hhx=%02hhx",
+		DEV_PR_ARG, __entry->bank, __entry->reg, __entry->val
+	)
+);
+
+DEFINE_EVENT(dev_rf_reg_evt, mt76x0_rf_read,
+	TP_PROTO(struct mt76_dev *dev, u8 bank, u8 reg, u8 val),
+	TP_ARGS(dev, bank, reg, val)
+);
+
+DEFINE_EVENT(dev_rf_reg_evt, mt76x0_rf_write,
+	TP_PROTO(struct mt76_dev *dev, u8 bank, u8 reg, u8 val),
+	TP_ARGS(dev, bank, reg, val)
+);
+
+DECLARE_EVENT_CLASS(dev_simple_evt,
+	TP_PROTO(struct mt76_dev *dev, u8 val),
+	TP_ARGS(dev, val),
+	TP_STRUCT__entry(
+		DEV_ENTRY
+		__field(u8, val)
+	),
+	TP_fast_assign(
+		DEV_ASSIGN;
+		__entry->val = val;
+	),
+	TP_printk(
+		DEV_PR_FMT "%02hhx", DEV_PR_ARG, __entry->val
+	)
+);
+
+TRACE_EVENT(mt76x0_rx,
+	TP_PROTO(struct mt76_dev *dev, struct mt76x0_rxwi *rxwi, u32 f),
+	TP_ARGS(dev, rxwi, f),
+	TP_STRUCT__entry(
+		DEV_ENTRY
+		__field_struct(struct mt76x0_rxwi, rxwi)
+		__field(u32, fce_info)
+	),
+	TP_fast_assign(
+		DEV_ASSIGN;
+		__entry->rxwi = *rxwi;
+		__entry->fce_info = f;
+	),
+	TP_printk(DEV_PR_FMT "rxi:%08x ctl:%08x", DEV_PR_ARG,
+		  le32_to_cpu(__entry->rxwi.rxinfo),
+		  le32_to_cpu(__entry->rxwi.ctl))
+);
+
+TRACE_EVENT(mt76x0_tx,
+	TP_PROTO(struct mt76_dev *dev, struct sk_buff *skb,
+		 struct mt76_sta *sta, struct mt76_txwi *h),
+	TP_ARGS(dev, skb, sta, h),
+	TP_STRUCT__entry(
+		DEV_ENTRY
+		__field_struct(struct mt76_txwi, h)
+		__field(struct sk_buff *, skb)
+		__field(struct mt76_sta *, sta)
+	),
+	TP_fast_assign(
+		DEV_ASSIGN;
+		__entry->h = *h;
+		__entry->skb = skb;
+		__entry->sta = sta;
+	),
+	TP_printk(DEV_PR_FMT "skb:%p sta:%p  flg:%04hx rate_ctl:%04hx "
+		  "ack:%02hhx wcid:%02hhx len_ctl:%05hx", DEV_PR_ARG,
+		  __entry->skb, __entry->sta,
+		  le16_to_cpu(__entry->h.flags),
+		  le16_to_cpu(__entry->h.rate_ctl),
+		  __entry->h.ack_ctl, __entry->h.wcid,
+		  le16_to_cpu(__entry->h.len_ctl))
+);
+
+TRACE_EVENT(mt76x0_tx_dma_done,
+	TP_PROTO(struct mt76_dev *dev, struct sk_buff *skb),
+	TP_ARGS(dev, skb),
+	TP_STRUCT__entry(
+		DEV_ENTRY
+		__field(struct sk_buff *, skb)
+	),
+	TP_fast_assign(
+		DEV_ASSIGN;
+		__entry->skb = skb;
+	),
+	TP_printk(DEV_PR_FMT "%p", DEV_PR_ARG, __entry->skb)
+);
+
+TRACE_EVENT(mt76x0_tx_status_cleaned,
+	TP_PROTO(struct mt76_dev *dev, int cleaned),
+	TP_ARGS(dev, cleaned),
+	TP_STRUCT__entry(
+		DEV_ENTRY
+		__field(int, cleaned)
+	),
+	TP_fast_assign(
+		DEV_ASSIGN;
+		__entry->cleaned = cleaned;
+	),
+	TP_printk(DEV_PR_FMT "%d", DEV_PR_ARG, __entry->cleaned)
+);
+
+TRACE_EVENT(mt76x0_tx_status,
+	TP_PROTO(struct mt76_dev *dev, u32 stat1, u32 stat2),
+	TP_ARGS(dev, stat1, stat2),
+	TP_STRUCT__entry(
+		DEV_ENTRY
+		__field(u32, stat1)	__field(u32, stat2)
+	),
+	TP_fast_assign(
+		DEV_ASSIGN;
+		__entry->stat1 = stat1;
+		__entry->stat2 = stat2;
+	),
+	TP_printk(DEV_PR_FMT "%08x %08x",
+		  DEV_PR_ARG, __entry->stat1, __entry->stat2)
+);
+
+TRACE_EVENT(mt76x0_rx_dma_aggr,
+	TP_PROTO(struct mt76_dev *dev, int cnt, bool paged),
+	TP_ARGS(dev, cnt, paged),
+	TP_STRUCT__entry(
+		DEV_ENTRY
+		__field(u8, cnt)
+		__field(bool, paged)
+	),
+	TP_fast_assign(
+		DEV_ASSIGN;
+		__entry->cnt = cnt;
+		__entry->paged = paged;
+	),
+	TP_printk(DEV_PR_FMT "cnt:%d paged:%d",
+		  DEV_PR_ARG, __entry->cnt, __entry->paged)
+);
+
+DEFINE_EVENT(dev_simple_evt, mt76x0_set_key,
+	TP_PROTO(struct mt76_dev *dev, u8 val),
+	TP_ARGS(dev, val)
+);
+
+TRACE_EVENT(mt76x0_set_shared_key,
+	TP_PROTO(struct mt76_dev *dev, u8 vid, u8 key),
+	TP_ARGS(dev, vid, key),
+	TP_STRUCT__entry(
+		DEV_ENTRY
+		__field(u8, vid)
+		__field(u8, key)
+	),
+	TP_fast_assign(
+		DEV_ASSIGN;
+		__entry->vid = vid;
+		__entry->key = key;
+	),
+	TP_printk(DEV_PR_FMT "phy:%02hhx off:%02hhx",
+		  DEV_PR_ARG, __entry->vid, __entry->key)
+);
+
+#endif
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/tx.c b/drivers/net/wireless/mediatek/mt76/mt76x0/tx.c
new file mode 100644
index 0000000000000000000000000000000000000000..751b49c28ae53f0fbe255ac6d6a3216e1a4eef1a
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/tx.c
@@ -0,0 +1,270 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "mt76x0.h"
+#include "trace.h"
+
+/* Take mac80211 Q id from the skb and translate it to hardware Q id */
+static u8 skb2q(struct sk_buff *skb)
+{
+	int qid = skb_get_queue_mapping(skb);
+
+	if (WARN_ON(qid >= MT_TXQ_PSD)) {
+		qid = MT_TXQ_BE;
+		skb_set_queue_mapping(skb, qid);
+	}
+
+	return q2hwq(qid);
+}
+
+static void mt76x0_tx_skb_remove_dma_overhead(struct sk_buff *skb,
+					       struct ieee80211_tx_info *info)
+{
+	int pkt_len = (unsigned long)info->status.status_driver_data[0];
+
+	skb_pull(skb, sizeof(struct mt76_txwi) + 4);
+	if (ieee80211_get_hdrlen_from_skb(skb) % 4)
+		mt76x0_remove_hdr_pad(skb);
+
+	skb_trim(skb, pkt_len);
+}
+
+void mt76x0_tx_status(struct mt76x0_dev *dev, struct sk_buff *skb)
+{
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+	mt76x0_tx_skb_remove_dma_overhead(skb, info);
+
+	ieee80211_tx_info_clear_status(info);
+	info->status.rates[0].idx = -1;
+	info->flags |= IEEE80211_TX_STAT_ACK;
+
+	spin_lock(&dev->mac_lock);
+	ieee80211_tx_status(dev->mt76.hw, skb);
+	spin_unlock(&dev->mac_lock);
+}
+
+static int mt76x0_skb_rooms(struct mt76x0_dev *dev, struct sk_buff *skb)
+{
+	int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
+	u32 need_head;
+
+	need_head = sizeof(struct mt76_txwi) + 4;
+	if (hdr_len % 4)
+		need_head += 2;
+
+	return skb_cow(skb, need_head);
+}
+
+static struct mt76_txwi *
+mt76x0_push_txwi(struct mt76x0_dev *dev, struct sk_buff *skb,
+		  struct ieee80211_sta *sta, struct mt76_wcid *wcid,
+		  int pkt_len)
+{
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+	struct ieee80211_tx_rate *rate = &info->control.rates[0];
+	struct mt76_txwi *txwi;
+	unsigned long flags;
+	u16 txwi_flags = 0;
+	u32 pkt_id;
+	u16 rate_ctl;
+	u8 nss;
+
+	txwi = (struct mt76_txwi *)skb_push(skb, sizeof(struct mt76_txwi));
+	memset(txwi, 0, sizeof(*txwi));
+
+	if (!wcid->tx_rate_set)
+		ieee80211_get_tx_rates(info->control.vif, sta, skb,
+				       info->control.rates, 1);
+
+	spin_lock_irqsave(&dev->mt76.lock, flags);
+	if (rate->idx < 0 || !rate->count) {
+		rate_ctl = wcid->tx_rate;
+		nss = wcid->tx_rate_nss;
+	} else {
+		rate_ctl = mt76x0_mac_tx_rate_val(dev, rate, &nss);
+	}
+	spin_unlock_irqrestore(&dev->mt76.lock, flags);
+
+	txwi->rate_ctl = cpu_to_le16(rate_ctl);
+
+	if (info->flags & IEEE80211_TX_CTL_LDPC)
+		txwi->rate_ctl |= cpu_to_le16(MT_RXWI_RATE_LDPC);
+	if ((info->flags & IEEE80211_TX_CTL_STBC) && nss == 1)
+		txwi->rate_ctl |= cpu_to_le16(MT_RXWI_RATE_STBC);
+	if (nss > 1 && sta && sta->smps_mode == IEEE80211_SMPS_DYNAMIC)
+		txwi_flags |= MT_TXWI_FLAGS_MMPS;
+
+	if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
+		txwi->ack_ctl |= MT_TXWI_ACK_CTL_REQ;
+		pkt_id = 1;
+	} else {
+		pkt_id = 0;
+	}
+
+	if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
+		pkt_id |= MT_TXWI_PKTID_PROBE;
+
+	if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
+		txwi->ack_ctl |= MT_TXWI_ACK_CTL_NSEQ;
+
+	if ((info->flags & IEEE80211_TX_CTL_AMPDU) && sta) {
+		u8 ba_size = IEEE80211_MIN_AMPDU_BUF;
+
+		ba_size <<= sta->ht_cap.ampdu_factor;
+		ba_size = min_t(int, 7, ba_size - 1);
+		if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) {
+			ba_size = 0;
+		} else {
+			txwi_flags |= MT_TXWI_FLAGS_AMPDU;
+			txwi_flags |= FIELD_PREP(MT_TXWI_FLAGS_MPDU_DENSITY,
+						 sta->ht_cap.ampdu_density);
+		}
+		txwi->ack_ctl |= FIELD_PREP(MT_TXWI_ACK_CTL_BA_WINDOW, ba_size);
+	}
+
+	txwi->wcid = wcid->idx;
+	txwi->flags |= cpu_to_le16(txwi_flags);
+	txwi->len_ctl = cpu_to_le16(pkt_len);
+	txwi->pktid = pkt_id;
+
+	return txwi;
+}
+
+void mt76x0_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
+		struct sk_buff *skb)
+{
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+	struct mt76x0_dev *dev = hw->priv;
+	struct ieee80211_vif *vif = info->control.vif;
+	struct ieee80211_sta *sta = control->sta;
+	struct mt76_sta *msta = NULL;
+	struct mt76_wcid *wcid = dev->mon_wcid;
+	struct mt76_txwi *txwi;
+	int pkt_len = skb->len;
+	int hw_q = skb2q(skb);
+
+	BUILD_BUG_ON(ARRAY_SIZE(info->status.status_driver_data) < 1);
+	info->status.status_driver_data[0] = (void *)(unsigned long)pkt_len;
+
+	if (mt76x0_skb_rooms(dev, skb) || mt76x0_insert_hdr_pad(skb)) {
+		ieee80211_free_txskb(dev->mt76.hw, skb);
+		return;
+	}
+
+	if (sta) {
+		msta = (struct mt76_sta *) sta->drv_priv;
+		wcid = &msta->wcid;
+	} else if (vif && (!info->control.hw_key && wcid->hw_key_idx != -1)) {
+		struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+
+		wcid = &mvif->group_wcid;
+	}
+
+	txwi = mt76x0_push_txwi(dev, skb, sta, wcid, pkt_len);
+
+	if (mt76x0_dma_enqueue_tx(dev, skb, wcid, hw_q))
+		return;
+
+	trace_mt76x0_tx(&dev->mt76, skb, msta, txwi);
+}
+
+void mt76x0_tx_stat(struct work_struct *work)
+{
+	struct mt76x0_dev *dev = container_of(work, struct mt76x0_dev,
+					       stat_work.work);
+	struct mt76_tx_status stat;
+	unsigned long flags;
+	int cleaned = 0;
+	u8 update = 1;
+
+	while (!test_bit(MT76_REMOVED, &dev->mt76.state)) {
+		stat = mt76x0_mac_fetch_tx_status(dev);
+		if (!stat.valid)
+			break;
+
+		mt76x0_send_tx_status(dev, &stat, &update);
+
+		cleaned++;
+	}
+	trace_mt76x0_tx_status_cleaned(&dev->mt76, cleaned);
+
+	spin_lock_irqsave(&dev->tx_lock, flags);
+	if (cleaned)
+		queue_delayed_work(dev->stat_wq, &dev->stat_work,
+				   msecs_to_jiffies(10));
+	else if (test_and_clear_bit(MT76_MORE_STATS, &dev->mt76.state))
+		queue_delayed_work(dev->stat_wq, &dev->stat_work,
+				   msecs_to_jiffies(20));
+	else
+		clear_bit(MT76_READING_STATS, &dev->mt76.state);
+	spin_unlock_irqrestore(&dev->tx_lock, flags);
+}
+
+int mt76x0_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+		    u16 queue, const struct ieee80211_tx_queue_params *params)
+{
+	struct mt76x0_dev *dev = hw->priv;
+	u8 cw_min = 5, cw_max = 10, hw_q = q2hwq(queue);
+	u32 val;
+
+	/* TODO: should we do funny things with the parameters?
+	 *	 See what mt76x0_set_default_edca() used to do in init.c.
+	 */
+
+	if (params->cw_min)
+		cw_min = fls(params->cw_min);
+	if (params->cw_max)
+		cw_max = fls(params->cw_max);
+
+	WARN_ON(params->txop > 0xff);
+	WARN_ON(params->aifs > 0xf);
+	WARN_ON(cw_min > 0xf);
+	WARN_ON(cw_max > 0xf);
+
+	val = FIELD_PREP(MT_EDCA_CFG_AIFSN, params->aifs) |
+	      FIELD_PREP(MT_EDCA_CFG_CWMIN, cw_min) |
+	      FIELD_PREP(MT_EDCA_CFG_CWMAX, cw_max);
+	/* TODO: based on user-controlled EnableTxBurst var vendor drv sets
+	 *	 a really long txop on AC0 (see connect.c:2009) but only on
+	 *	 connect? When not connected should be 0.
+	 */
+	if (!hw_q)
+		val |= 0x60;
+	else
+		val |= FIELD_PREP(MT_EDCA_CFG_TXOP, params->txop);
+	mt76_wr(dev, MT_EDCA_CFG_AC(hw_q), val);
+
+	val = mt76_rr(dev, MT_WMM_TXOP(hw_q));
+	val &= ~(MT_WMM_TXOP_MASK << MT_WMM_TXOP_SHIFT(hw_q));
+	val |= params->txop << MT_WMM_TXOP_SHIFT(hw_q);
+	mt76_wr(dev, MT_WMM_TXOP(hw_q), val);
+
+	val = mt76_rr(dev, MT_WMM_AIFSN);
+	val &= ~(MT_WMM_AIFSN_MASK << MT_WMM_AIFSN_SHIFT(hw_q));
+	val |= params->aifs << MT_WMM_AIFSN_SHIFT(hw_q);
+	mt76_wr(dev, MT_WMM_AIFSN, val);
+
+	val = mt76_rr(dev, MT_WMM_CWMIN);
+	val &= ~(MT_WMM_CWMIN_MASK << MT_WMM_CWMIN_SHIFT(hw_q));
+	val |= cw_min << MT_WMM_CWMIN_SHIFT(hw_q);
+	mt76_wr(dev, MT_WMM_CWMIN, val);
+
+	val = mt76_rr(dev, MT_WMM_CWMAX);
+	val &= ~(MT_WMM_CWMAX_MASK << MT_WMM_CWMAX_SHIFT(hw_q));
+	val |= cw_max << MT_WMM_CWMAX_SHIFT(hw_q);
+	mt76_wr(dev, MT_WMM_CWMAX, val);
+
+	return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
new file mode 100644
index 0000000000000000000000000000000000000000..54ae1f113be23dd51b1ab7fafb79bd3d991af893
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
@@ -0,0 +1,381 @@
+/*
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+
+#include "mt76x0.h"
+#include "usb.h"
+#include "trace.h"
+
+static struct usb_device_id mt76x0_device_table[] = {
+	{ USB_DEVICE(0x148F, 0x7610) },	/* MT7610U */
+	{ USB_DEVICE(0x13B1, 0x003E) },	/* Linksys AE6000 */
+	{ USB_DEVICE(0x0E8D, 0x7610) },	/* Sabrent NTWLAC */
+	{ USB_DEVICE(0x7392, 0xa711) },	/* Edimax 7711mac */
+	{ USB_DEVICE(0x7392, 0xb711) },	/* Edimax / Elecom  */
+	{ USB_DEVICE(0x148f, 0x761a) },	/* TP-Link TL-WDN5200 */
+	{ USB_DEVICE(0x148f, 0x760a) },	/* TP-Link unknown */
+	{ USB_DEVICE(0x0b05, 0x17d1) },	/* Asus USB-AC51 */
+	{ USB_DEVICE(0x0b05, 0x17db) },	/* Asus USB-AC50 */
+	{ USB_DEVICE(0x0df6, 0x0075) },	/* Sitecom WLA-3100 */
+	{ USB_DEVICE(0x2019, 0xab31) },	/* Planex GW-450D */
+	{ USB_DEVICE(0x2001, 0x3d02) },	/* D-LINK DWA-171 rev B1 */
+	{ USB_DEVICE(0x0586, 0x3425) },	/* Zyxel NWD6505 */
+	{ USB_DEVICE(0x07b8, 0x7610) },	/* AboCom AU7212 */
+	{ USB_DEVICE(0x04bb, 0x0951) },	/* I-O DATA WN-AC433UK */
+	{ USB_DEVICE(0x057c, 0x8502) },	/* AVM FRITZ!WLAN USB Stick AC 430 */
+	{ USB_DEVICE(0x293c, 0x5702) },	/* Comcast Xfinity KXW02AAA  */
+	{ USB_DEVICE(0x20f4, 0x806b) },	/* TRENDnet TEW-806UBH  */
+	{ USB_DEVICE(0x7392, 0xc711) }, /* Devolo Wifi ac Stick */
+	{ USB_DEVICE(0x0df6, 0x0079) }, /* Sitecom Europe B.V. ac  Stick */
+	{ USB_DEVICE(0x2357, 0x0105) }, /* TP-LINK Archer T1U */
+	{ USB_DEVICE_AND_INTERFACE_INFO(0x0E8D, 0x7630, 0xff, 0x2, 0xff)}, /* MT7630U */
+	{ USB_DEVICE_AND_INTERFACE_INFO(0x0E8D, 0x7650, 0xff, 0x2, 0xff)}, /* MT7650U */
+	{ 0, }
+};
+
+bool mt76x0_usb_alloc_buf(struct mt76x0_dev *dev, size_t len,
+			   struct mt76x0_dma_buf *buf)
+{
+	struct usb_device *usb_dev = mt76x0_to_usb_dev(dev);
+
+	buf->len = len;
+	buf->urb = usb_alloc_urb(0, GFP_KERNEL);
+	buf->buf = usb_alloc_coherent(usb_dev, buf->len, GFP_KERNEL, &buf->dma);
+
+	return !buf->urb || !buf->buf;
+}
+
+void mt76x0_usb_free_buf(struct mt76x0_dev *dev, struct mt76x0_dma_buf *buf)
+{
+	struct usb_device *usb_dev = mt76x0_to_usb_dev(dev);
+
+	usb_free_coherent(usb_dev, buf->len, buf->buf, buf->dma);
+	usb_free_urb(buf->urb);
+}
+
+int mt76x0_usb_submit_buf(struct mt76x0_dev *dev, int dir, int ep_idx,
+			   struct mt76x0_dma_buf *buf, gfp_t gfp,
+			   usb_complete_t complete_fn, void *context)
+{
+	struct usb_device *usb_dev = mt76x0_to_usb_dev(dev);
+	unsigned pipe;
+	int ret;
+
+	if (dir == USB_DIR_IN)
+		pipe = usb_rcvbulkpipe(usb_dev, dev->in_ep[ep_idx]);
+	else
+		pipe = usb_sndbulkpipe(usb_dev, dev->out_ep[ep_idx]);
+
+	usb_fill_bulk_urb(buf->urb, usb_dev, pipe, buf->buf, buf->len,
+			  complete_fn, context);
+	buf->urb->transfer_dma = buf->dma;
+	buf->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+
+	trace_mt76x0_submit_urb(&dev->mt76, buf->urb);
+	ret = usb_submit_urb(buf->urb, gfp);
+	if (ret)
+		dev_err(dev->mt76.dev, "Error: submit URB dir:%d ep:%d failed:%d\n",
+			dir, ep_idx, ret);
+	return ret;
+}
+
+void mt76x0_complete_urb(struct urb *urb)
+{
+	struct completion *cmpl = urb->context;
+
+	complete(cmpl);
+}
+
+int mt76x0_vendor_request(struct mt76x0_dev *dev, const u8 req,
+			   const u8 direction, const u16 val, const u16 offset,
+			   void *buf, const size_t buflen)
+{
+	int i, ret;
+	struct usb_device *usb_dev = mt76x0_to_usb_dev(dev);
+	const u8 req_type = direction | USB_TYPE_VENDOR | USB_RECIP_DEVICE;
+	const unsigned int pipe = (direction == USB_DIR_IN) ?
+		usb_rcvctrlpipe(usb_dev, 0) : usb_sndctrlpipe(usb_dev, 0);
+
+	for (i = 0; i < MT_VEND_REQ_MAX_RETRY; i++) {
+		ret = usb_control_msg(usb_dev, pipe, req, req_type,
+				      val, offset, buf, buflen,
+				      MT_VEND_REQ_TOUT_MS);
+		trace_mt76x0_vend_req(&dev->mt76, pipe, req, req_type, val, offset,
+				  buf, buflen, ret);
+
+		if (ret == -ENODEV)
+			set_bit(MT76_REMOVED, &dev->mt76.state);
+		if (ret >= 0 || ret == -ENODEV)
+			return ret;
+
+		msleep(5);
+	}
+
+	dev_err(dev->mt76.dev, "Vendor request req:%02x off:%04x failed:%d\n",
+		req, offset, ret);
+
+	return ret;
+}
+
+void mt76x0_vendor_reset(struct mt76x0_dev *dev)
+{
+	mt76x0_vendor_request(dev, MT_VEND_DEV_MODE, USB_DIR_OUT,
+			      MT_VEND_DEV_MODE_RESET, 0, NULL, 0);
+}
+
+static u32 mt76x0_rr(struct mt76_dev *dev, u32 offset)
+{
+	struct mt76x0_dev *mdev = (struct mt76x0_dev *) dev;
+	int ret;
+	u32 val = ~0;
+
+	WARN_ONCE(offset > USHRT_MAX, "read high off:%08x", offset);
+
+	mutex_lock(&mdev->usb_ctrl_mtx);
+
+	ret = mt76x0_vendor_request((struct mt76x0_dev *)dev, MT_VEND_MULTI_READ, USB_DIR_IN,
+				    0, offset, mdev->data, MT_VEND_BUF);
+	if (ret == MT_VEND_BUF)
+		val = get_unaligned_le32(mdev->data);
+	else if (ret > 0)
+		dev_err(dev->dev, "Error: wrong size read:%d off:%08x\n",
+			ret, offset);
+
+	mutex_unlock(&mdev->usb_ctrl_mtx);
+
+	trace_mt76x0_reg_read(dev, offset, val);
+	return val;
+}
+
+int mt76x0_vendor_single_wr(struct mt76x0_dev *dev, const u8 req,
+			     const u16 offset, const u32 val)
+{
+	struct mt76x0_dev *mdev = dev;
+	int ret;
+
+	mutex_lock(&mdev->usb_ctrl_mtx);
+
+	ret = mt76x0_vendor_request(dev, req, USB_DIR_OUT,
+				    val & 0xffff, offset, NULL, 0);
+	if (!ret)
+		ret = mt76x0_vendor_request(dev, req, USB_DIR_OUT,
+					    val >> 16, offset + 2, NULL, 0);
+
+	mutex_unlock(&mdev->usb_ctrl_mtx);
+
+	return ret;
+}
+
+static void mt76x0_wr(struct mt76_dev *dev, u32 offset, u32 val)
+{
+	struct mt76x0_dev *mdev = (struct mt76x0_dev *) dev;
+	int ret;
+
+	WARN_ONCE(offset > USHRT_MAX, "write high off:%08x", offset);
+
+	mutex_lock(&mdev->usb_ctrl_mtx);
+
+	put_unaligned_le32(val, mdev->data);
+	ret = mt76x0_vendor_request(mdev, MT_VEND_MULTI_WRITE, USB_DIR_OUT,
+				    0, offset, mdev->data, MT_VEND_BUF);
+	trace_mt76x0_reg_write(dev, offset, val);
+
+	mutex_unlock(&mdev->usb_ctrl_mtx);
+}
+
+static u32 mt76x0_rmw(struct mt76_dev *dev, u32 offset, u32 mask, u32 val)
+{
+	val |= mt76x0_rr(dev, offset) & ~mask;
+	mt76x0_wr(dev, offset, val);
+	return val;
+}
+
+static void mt76x0_wr_copy(struct mt76_dev *dev, u32 offset,
+			   const void *data, int len)
+{
+	WARN_ONCE(offset & 3, "unaligned write copy off:%08x", offset);
+	WARN_ONCE(len & 3, "short write copy off:%08x", offset);
+
+	mt76x0_burst_write_regs((struct mt76x0_dev *) dev, offset, data, len / 4);
+}
+
+void mt76x0_addr_wr(struct mt76x0_dev *dev, const u32 offset, const u8 *addr)
+{
+	mt76_wr(dev, offset, get_unaligned_le32(addr));
+	mt76_wr(dev, offset + 4, addr[4] | addr[5] << 8);
+}
+
+static int mt76x0_assign_pipes(struct usb_interface *usb_intf,
+				struct mt76x0_dev *dev)
+{
+	struct usb_endpoint_descriptor *ep_desc;
+	struct usb_host_interface *intf_desc = usb_intf->cur_altsetting;
+	unsigned i, ep_i = 0, ep_o = 0;
+
+	BUILD_BUG_ON(sizeof(dev->in_ep) < __MT_EP_IN_MAX);
+	BUILD_BUG_ON(sizeof(dev->out_ep) < __MT_EP_OUT_MAX);
+
+	for (i = 0; i < intf_desc->desc.bNumEndpoints; i++) {
+		ep_desc = &intf_desc->endpoint[i].desc;
+
+		if (usb_endpoint_is_bulk_in(ep_desc) &&
+		    ep_i++ < __MT_EP_IN_MAX) {
+			dev->in_ep[ep_i - 1] = usb_endpoint_num(ep_desc);
+			dev->in_max_packet = usb_endpoint_maxp(ep_desc);
+			/* Note: this is ignored by usb sub-system but vendor
+			 *	 code does it. We can drop this at some point.
+			 */
+			dev->in_ep[ep_i - 1] |= USB_DIR_IN;
+		} else if (usb_endpoint_is_bulk_out(ep_desc) &&
+			   ep_o++ < __MT_EP_OUT_MAX) {
+			dev->out_ep[ep_o - 1] = usb_endpoint_num(ep_desc);
+			dev->out_max_packet = usb_endpoint_maxp(ep_desc);
+		}
+	}
+
+	if (ep_i != __MT_EP_IN_MAX || ep_o != __MT_EP_OUT_MAX) {
+		dev_err(dev->mt76.dev, "Error: wrong pipe number in:%d out:%d\n",
+			ep_i, ep_o);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int mt76x0_probe(struct usb_interface *usb_intf,
+			 const struct usb_device_id *id)
+{
+	struct usb_device *usb_dev = interface_to_usbdev(usb_intf);
+	struct mt76x0_dev *dev;
+	u32 asic_rev, mac_rev;
+	int ret;
+	static const struct mt76_bus_ops usb_ops = {
+		.rr = mt76x0_rr,
+		.wr = mt76x0_wr,
+		.rmw = mt76x0_rmw,
+		.copy = mt76x0_wr_copy,
+	};
+
+	dev = mt76x0_alloc_device(&usb_intf->dev);
+	if (!dev)
+		return -ENOMEM;
+
+	usb_dev = usb_get_dev(usb_dev);
+	usb_reset_device(usb_dev);
+
+	usb_set_intfdata(usb_intf, dev);
+
+	dev->mt76.bus = &usb_ops;
+
+	ret = mt76x0_assign_pipes(usb_intf, dev);
+	if (ret)
+		goto err;
+
+	/* Disable the HW, otherwise MCU fail to initalize on hot reboot */
+	mt76x0_chip_onoff(dev, false, false);
+
+	ret = mt76x0_wait_asic_ready(dev);
+	if (ret)
+		goto err;
+
+	asic_rev = mt76_rr(dev, MT_ASIC_VERSION);
+	mac_rev = mt76_rr(dev, MT_MAC_CSR0);
+	dev_info(dev->mt76.dev, "ASIC revision: %08x MAC revision: %08x\n",
+		 asic_rev, mac_rev);
+
+	/* Note: vendor driver skips this check for MT76X0U */
+	if (!(mt76_rr(dev, MT_EFUSE_CTRL) & MT_EFUSE_CTRL_SEL))
+		dev_warn(dev->mt76.dev, "Warning: eFUSE not present\n");
+
+	ret = mt76x0_init_hardware(dev);
+	if (ret)
+		goto err;
+
+	ret = mt76x0_register_device(dev);
+	if (ret)
+		goto err_hw;
+
+	set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
+
+	return 0;
+err_hw:
+	mt76x0_cleanup(dev);
+err:
+	usb_set_intfdata(usb_intf, NULL);
+	usb_put_dev(interface_to_usbdev(usb_intf));
+
+	destroy_workqueue(dev->stat_wq);
+	ieee80211_free_hw(dev->mt76.hw);
+	return ret;
+}
+
+static void mt76x0_disconnect(struct usb_interface *usb_intf)
+{
+	struct mt76x0_dev *dev = usb_get_intfdata(usb_intf);
+	bool initalized = test_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
+
+	if (!initalized)
+		return;
+
+	ieee80211_unregister_hw(dev->mt76.hw);
+	mt76x0_cleanup(dev);
+
+	usb_set_intfdata(usb_intf, NULL);
+	usb_put_dev(interface_to_usbdev(usb_intf));
+
+	destroy_workqueue(dev->stat_wq);
+	ieee80211_free_hw(dev->mt76.hw);
+}
+
+static int mt76x0_suspend(struct usb_interface *usb_intf, pm_message_t state)
+{
+	struct mt76x0_dev *dev = usb_get_intfdata(usb_intf);
+
+	mt76x0_cleanup(dev);
+
+	return 0;
+}
+
+static int mt76x0_resume(struct usb_interface *usb_intf)
+{
+	struct mt76x0_dev *dev = usb_get_intfdata(usb_intf);
+	int ret;
+
+	ret = mt76x0_init_hardware(dev);
+	if (ret)
+		return ret;
+
+	set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
+
+	return 0;
+}
+
+MODULE_DEVICE_TABLE(usb, mt76x0_device_table);
+MODULE_FIRMWARE(MT7610_FIRMWARE);
+MODULE_LICENSE("GPL");
+
+static struct usb_driver mt76x0_driver = {
+	.name		= KBUILD_MODNAME,
+	.id_table	= mt76x0_device_table,
+	.probe		= mt76x0_probe,
+	.disconnect	= mt76x0_disconnect,
+	.suspend	= mt76x0_suspend,
+	.resume		= mt76x0_resume,
+	.reset_resume	= mt76x0_resume,
+	.soft_unbind	= 1,
+	.disable_hub_initiated_lpm = 1,
+};
+module_usb_driver(mt76x0_driver);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.h b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.h
new file mode 100644
index 0000000000000000000000000000000000000000..492e431390a8707867a30c87727ba735497232b3
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT76X0U_USB_H
+#define __MT76X0U_USB_H
+
+#include "mt76x0.h"
+
+#define MT7610_FIRMWARE	"mediatek/mt7610u.bin"
+
+#define MT_VEND_REQ_MAX_RETRY	10
+#define MT_VEND_REQ_TOUT_MS	300
+
+#define MT_VEND_DEV_MODE_RESET	1
+
+#define MT_VEND_BUF		sizeof(__le32)
+
+static inline struct usb_device *mt76x0_to_usb_dev(struct mt76x0_dev *mt76x0)
+{
+	return interface_to_usbdev(to_usb_interface(mt76x0->mt76.dev));
+}
+
+static inline struct usb_device *mt76_to_usb_dev(struct mt76_dev *mt76)
+{
+	return interface_to_usbdev(to_usb_interface(mt76->dev));
+}
+
+static inline bool mt76x0_urb_has_error(struct urb *urb)
+{
+	return urb->status &&
+		urb->status != -ENOENT &&
+		urb->status != -ECONNRESET &&
+		urb->status != -ESHUTDOWN;
+}
+
+bool mt76x0_usb_alloc_buf(struct mt76x0_dev *dev, size_t len,
+			   struct mt76x0_dma_buf *buf);
+void mt76x0_usb_free_buf(struct mt76x0_dev *dev, struct mt76x0_dma_buf *buf);
+int mt76x0_usb_submit_buf(struct mt76x0_dev *dev, int dir, int ep_idx,
+			   struct mt76x0_dma_buf *buf, gfp_t gfp,
+			   usb_complete_t complete_fn, void *context);
+void mt76x0_complete_urb(struct urb *urb);
+
+int mt76x0_vendor_request(struct mt76x0_dev *dev, const u8 req,
+			   const u8 direction, const u16 val, const u16 offset,
+			   void *buf, const size_t buflen);
+void mt76x0_vendor_reset(struct mt76x0_dev *dev);
+int mt76x0_vendor_single_wr(struct mt76x0_dev *dev, const u8 req,
+			     const u16 offset, const u32 val);
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/util.c b/drivers/net/wireless/mediatek/mt76/mt76x0/util.c
new file mode 100644
index 0000000000000000000000000000000000000000..7856dd760419e94eebc708401c05f0a25e0ac34f
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/util.c
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "mt76x0.h"
+
+void mt76x0_remove_hdr_pad(struct sk_buff *skb)
+{
+	int len = ieee80211_get_hdrlen_from_skb(skb);
+
+	memmove(skb->data + 2, skb->data, len);
+	skb_pull(skb, 2);
+}
+
+int mt76x0_insert_hdr_pad(struct sk_buff *skb)
+{
+	int len = ieee80211_get_hdrlen_from_skb(skb);
+	int ret;
+
+	if (len % 4 == 0)
+		return 0;
+
+	ret = skb_cow(skb, 2);
+	if (ret)
+		return ret;
+
+	skb_push(skb, 2);
+	memmove(skb->data, skb->data + 2, len);
+
+	skb->data[len] = 0;
+	skb->data[len + 1] = 0;
+	return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2.h b/drivers/net/wireless/mediatek/mt76/mt76x2.h
index 71fcfa44fb2e3aebfb39fda33e705ddd6a32eff2..dca3209bf5f1cb2ba2ed5194c6b2c2dfc4e0cee3 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2.h
@@ -33,6 +33,9 @@
 #define MT7662_ROM_PATCH	"mt7662_rom_patch.bin"
 #define MT7662_EEPROM_SIZE	512
 
+#define MT7662U_FIRMWARE	"mediatek/mt7662u.bin"
+#define MT7662U_ROM_PATCH	"mediatek/mt7662u_rom_patch.bin"
+
 #define MT76x2_RX_RING_SIZE	256
 #define MT_RX_HEADROOM		32
 
@@ -55,6 +58,7 @@ struct mt76x2_mcu {
 
 	wait_queue_head_t wait;
 	struct sk_buff_head res_q;
+	struct mt76u_buf res_u;
 
 	u32 msg_seq;
 };
@@ -159,6 +163,23 @@ struct mt76x2_sta {
 	int inactive_count;
 };
 
+static inline bool mt76x2_wait_for_mac(struct mt76x2_dev *dev)
+{
+	int i;
+
+	for (i = 0; i < 500; i++) {
+		switch (mt76_rr(dev, MT_MAC_CSR0)) {
+		case 0:
+		case ~0:
+			break;
+		default:
+			return true;
+		}
+		usleep_range(5000, 10000);
+	}
+	return false;
+}
+
 static inline bool is_mt7612(struct mt76x2_dev *dev)
 {
 	return mt76_chip(&dev->mt76) == 0x7612;
@@ -166,6 +187,14 @@ static inline bool is_mt7612(struct mt76x2_dev *dev)
 
 void mt76x2_set_irq_mask(struct mt76x2_dev *dev, u32 clear, u32 set);
 
+static inline bool mt76x2_channel_silent(struct mt76x2_dev *dev)
+{
+	struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+
+	return ((chan->flags & IEEE80211_CHAN_RADAR) &&
+		chan->dfs_state != NL80211_DFS_AVAILABLE);
+}
+
 static inline void mt76x2_irq_enable(struct mt76x2_dev *dev, u32 mask)
 {
 	mt76x2_set_irq_mask(dev, 0, mask);
@@ -176,11 +205,29 @@ static inline void mt76x2_irq_disable(struct mt76x2_dev *dev, u32 mask)
 	mt76x2_set_irq_mask(dev, mask, 0);
 }
 
+static inline bool mt76x2_wait_for_bbp(struct mt76x2_dev *dev)
+{
+	return mt76_poll_msec(dev, MT_MAC_STATUS,
+			      MT_MAC_STATUS_TX | MT_MAC_STATUS_RX,
+			      0, 100);
+}
+
+static inline bool wait_for_wpdma(struct mt76x2_dev *dev)
+{
+	return mt76_poll(dev, MT_WPDMA_GLO_CFG,
+			 MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
+			 MT_WPDMA_GLO_CFG_RX_DMA_BUSY,
+			 0, 1000);
+}
+
 extern const struct ieee80211_ops mt76x2_ops;
 
+extern struct ieee80211_rate mt76x2_rates[12];
+
 struct mt76x2_dev *mt76x2_alloc_device(struct device *pdev);
 int mt76x2_register_device(struct mt76x2_dev *dev);
 void mt76x2_init_debugfs(struct mt76x2_dev *dev);
+void mt76x2_init_device(struct mt76x2_dev *dev);
 
 irqreturn_t mt76x2_irq_handler(int irq, void *dev_instance);
 void mt76x2_phy_power_on(struct mt76x2_dev *dev);
@@ -194,7 +241,7 @@ void mt76x2_phy_set_antenna(struct mt76x2_dev *dev);
 int mt76x2_phy_start(struct mt76x2_dev *dev);
 int mt76x2_phy_set_channel(struct mt76x2_dev *dev,
 			 struct cfg80211_chan_def *chandef);
-int mt76x2_phy_get_rssi(struct mt76x2_dev *dev, s8 rssi, int chain);
+int mt76x2_mac_get_rssi(struct mt76x2_dev *dev, s8 rssi, int chain);
 void mt76x2_phy_calibrate(struct work_struct *work);
 void mt76x2_phy_set_txpower(struct mt76x2_dev *dev);
 
@@ -222,6 +269,7 @@ int mt76x2_tx_prepare_skb(struct mt76_dev *mdev, void *txwi,
 			  u32 *tx_info);
 void mt76x2_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
 			    struct mt76_queue_entry *e, bool flush);
+void mt76x2_mac_set_tx_protection(struct mt76x2_dev *dev, u32 val);
 
 void mt76x2_pre_tbtt_tasklet(unsigned long arg);
 
@@ -238,4 +286,45 @@ s8 mt76x2_tx_get_max_txpwr_adj(struct mt76x2_dev *dev,
 s8 mt76x2_tx_get_txpwr_adj(struct mt76x2_dev *dev, s8 txpwr, s8 max_txpwr_adj);
 void mt76x2_tx_set_txpwr_auto(struct mt76x2_dev *dev, s8 txpwr);
 
+int mt76x2_insert_hdr_pad(struct sk_buff *skb);
+
+bool mt76x2_mac_load_tx_status(struct mt76x2_dev *dev,
+			       struct mt76x2_tx_status *stat);
+void mt76x2_send_tx_status(struct mt76x2_dev *dev,
+			   struct mt76x2_tx_status *stat, u8 *update);
+void mt76x2_reset_wlan(struct mt76x2_dev *dev, bool enable);
+void mt76x2_init_txpower(struct mt76x2_dev *dev,
+			 struct ieee80211_supported_band *sband);
+void mt76_write_mac_initvals(struct mt76x2_dev *dev);
+
+int mt76x2_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+			struct ieee80211_ampdu_params *params);
+int mt76x2_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+		   struct ieee80211_sta *sta);
+int mt76x2_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+		      struct ieee80211_sta *sta);
+void mt76x2_remove_interface(struct ieee80211_hw *hw,
+			     struct ieee80211_vif *vif);
+int mt76x2_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+		   struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+		   struct ieee80211_key_conf *key);
+int mt76x2_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+		   u16 queue, const struct ieee80211_tx_queue_params *params);
+void mt76x2_configure_filter(struct ieee80211_hw *hw,
+			     unsigned int changed_flags,
+			     unsigned int *total_flags, u64 multicast);
+void mt76x2_txq_init(struct mt76x2_dev *dev, struct ieee80211_txq *txq);
+void mt76x2_sta_rate_tbl_update(struct ieee80211_hw *hw,
+				struct ieee80211_vif *vif,
+				struct ieee80211_sta *sta);
+
+void mt76x2_phy_set_txpower_regs(struct mt76x2_dev *dev,
+				 enum nl80211_band band);
+void mt76x2_configure_tx_delay(struct mt76x2_dev *dev,
+			       enum nl80211_band band, u8 bw);
+void mt76x2_phy_set_bw(struct mt76x2_dev *dev, int width, u8 ctrl);
+void mt76x2_phy_set_band(struct mt76x2_dev *dev, int band, bool primary_upper);
+int mt76x2_phy_get_min_avg_rssi(struct mt76x2_dev *dev);
+void mt76x2_apply_gain_adj(struct mt76x2_dev *dev);
+
 #endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_common.c b/drivers/net/wireless/mediatek/mt76/mt76x2_common.c
new file mode 100644
index 0000000000000000000000000000000000000000..a2338ba139b4d9d713b70e3ba4100cec2e14840d
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_common.c
@@ -0,0 +1,350 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76x2.h"
+
+void mt76x2_txq_init(struct mt76x2_dev *dev, struct ieee80211_txq *txq)
+{
+	struct mt76_txq *mtxq;
+
+	if (!txq)
+		return;
+
+	mtxq = (struct mt76_txq *) txq->drv_priv;
+	if (txq->sta) {
+		struct mt76x2_sta *sta;
+
+		sta = (struct mt76x2_sta *) txq->sta->drv_priv;
+		mtxq->wcid = &sta->wcid;
+	} else {
+		struct mt76x2_vif *mvif;
+
+		mvif = (struct mt76x2_vif *) txq->vif->drv_priv;
+		mtxq->wcid = &mvif->group_wcid;
+	}
+
+	mt76_txq_init(&dev->mt76, txq);
+}
+EXPORT_SYMBOL_GPL(mt76x2_txq_init);
+
+int mt76x2_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+			struct ieee80211_ampdu_params *params)
+{
+	enum ieee80211_ampdu_mlme_action action = params->action;
+	struct ieee80211_sta *sta = params->sta;
+	struct mt76x2_dev *dev = hw->priv;
+	struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
+	struct ieee80211_txq *txq = sta->txq[params->tid];
+	u16 tid = params->tid;
+	u16 *ssn = &params->ssn;
+	struct mt76_txq *mtxq;
+
+	if (!txq)
+		return -EINVAL;
+
+	mtxq = (struct mt76_txq *)txq->drv_priv;
+
+	switch (action) {
+	case IEEE80211_AMPDU_RX_START:
+		mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, *ssn, params->buf_size);
+		mt76_set(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, BIT(16 + tid));
+		break;
+	case IEEE80211_AMPDU_RX_STOP:
+		mt76_rx_aggr_stop(&dev->mt76, &msta->wcid, tid);
+		mt76_clear(dev, MT_WCID_ADDR(msta->wcid.idx) + 4,
+			   BIT(16 + tid));
+		break;
+	case IEEE80211_AMPDU_TX_OPERATIONAL:
+		mtxq->aggr = true;
+		mtxq->send_bar = false;
+		ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn);
+		break;
+	case IEEE80211_AMPDU_TX_STOP_FLUSH:
+	case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+		mtxq->aggr = false;
+		ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn);
+		break;
+	case IEEE80211_AMPDU_TX_START:
+		mtxq->agg_ssn = *ssn << 4;
+		ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+		break;
+	case IEEE80211_AMPDU_TX_STOP_CONT:
+		mtxq->aggr = false;
+		ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+		break;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x2_ampdu_action);
+
+int mt76x2_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+		   struct ieee80211_sta *sta)
+{
+	struct mt76x2_dev *dev = hw->priv;
+	struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
+	struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
+	int ret = 0;
+	int idx = 0;
+	int i;
+
+	mutex_lock(&dev->mutex);
+
+	idx = mt76_wcid_alloc(dev->wcid_mask, ARRAY_SIZE(dev->wcid));
+	if (idx < 0) {
+		ret = -ENOSPC;
+		goto out;
+	}
+
+	msta->vif = mvif;
+	msta->wcid.sta = 1;
+	msta->wcid.idx = idx;
+	msta->wcid.hw_key_idx = -1;
+	mt76x2_mac_wcid_setup(dev, idx, mvif->idx, sta->addr);
+	mt76x2_mac_wcid_set_drop(dev, idx, false);
+	for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
+		mt76x2_txq_init(dev, sta->txq[i]);
+
+	if (vif->type == NL80211_IFTYPE_AP)
+		set_bit(MT_WCID_FLAG_CHECK_PS, &msta->wcid.flags);
+
+	ewma_signal_init(&msta->rssi);
+
+	rcu_assign_pointer(dev->wcid[idx], &msta->wcid);
+
+out:
+	mutex_unlock(&dev->mutex);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(mt76x2_sta_add);
+
+int mt76x2_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+		      struct ieee80211_sta *sta)
+{
+	struct mt76x2_dev *dev = hw->priv;
+	struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
+	int idx = msta->wcid.idx;
+	int i;
+
+	mutex_lock(&dev->mutex);
+	rcu_assign_pointer(dev->wcid[idx], NULL);
+	for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
+		mt76_txq_remove(&dev->mt76, sta->txq[i]);
+	mt76x2_mac_wcid_set_drop(dev, idx, true);
+	mt76_wcid_free(dev->wcid_mask, idx);
+	mt76x2_mac_wcid_setup(dev, idx, 0, NULL);
+	mutex_unlock(&dev->mutex);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x2_sta_remove);
+
+void mt76x2_remove_interface(struct ieee80211_hw *hw,
+			     struct ieee80211_vif *vif)
+{
+	struct mt76x2_dev *dev = hw->priv;
+
+	mt76_txq_remove(&dev->mt76, vif->txq);
+}
+EXPORT_SYMBOL_GPL(mt76x2_remove_interface);
+
+int mt76x2_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+		   struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+		   struct ieee80211_key_conf *key)
+{
+	struct mt76x2_dev *dev = hw->priv;
+	struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
+	struct mt76x2_sta *msta;
+	struct mt76_wcid *wcid;
+	int idx = key->keyidx;
+	int ret;
+
+	/* fall back to sw encryption for unsupported ciphers */
+	switch (key->cipher) {
+	case WLAN_CIPHER_SUITE_WEP40:
+	case WLAN_CIPHER_SUITE_WEP104:
+	case WLAN_CIPHER_SUITE_TKIP:
+	case WLAN_CIPHER_SUITE_CCMP:
+		break;
+	default:
+		return -EOPNOTSUPP;
+	}
+
+	/*
+	 * The hardware does not support per-STA RX GTK, fall back
+	 * to software mode for these.
+	 */
+	if ((vif->type == NL80211_IFTYPE_ADHOC ||
+	     vif->type == NL80211_IFTYPE_MESH_POINT) &&
+	    (key->cipher == WLAN_CIPHER_SUITE_TKIP ||
+	     key->cipher == WLAN_CIPHER_SUITE_CCMP) &&
+	    !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+		return -EOPNOTSUPP;
+
+	msta = sta ? (struct mt76x2_sta *) sta->drv_priv : NULL;
+	wcid = msta ? &msta->wcid : &mvif->group_wcid;
+
+	if (cmd == SET_KEY) {
+		key->hw_key_idx = wcid->idx;
+		wcid->hw_key_idx = idx;
+		if (key->flags & IEEE80211_KEY_FLAG_RX_MGMT) {
+			key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
+			wcid->sw_iv = true;
+		}
+	} else {
+		if (idx == wcid->hw_key_idx) {
+			wcid->hw_key_idx = -1;
+			wcid->sw_iv = true;
+		}
+
+		key = NULL;
+	}
+	mt76_wcid_key_setup(&dev->mt76, wcid, key);
+
+	if (!msta) {
+		if (key || wcid->hw_key_idx == idx) {
+			ret = mt76x2_mac_wcid_set_key(dev, wcid->idx, key);
+			if (ret)
+				return ret;
+		}
+
+		return mt76x2_mac_shared_key_setup(dev, mvif->idx, idx, key);
+	}
+
+	return mt76x2_mac_wcid_set_key(dev, msta->wcid.idx, key);
+}
+EXPORT_SYMBOL_GPL(mt76x2_set_key);
+
+int mt76x2_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+		   u16 queue, const struct ieee80211_tx_queue_params *params)
+{
+	struct mt76x2_dev *dev = hw->priv;
+	u8 cw_min = 5, cw_max = 10, qid;
+	u32 val;
+
+	qid = dev->mt76.q_tx[queue].hw_idx;
+
+	if (params->cw_min)
+		cw_min = fls(params->cw_min);
+	if (params->cw_max)
+		cw_max = fls(params->cw_max);
+
+	val = FIELD_PREP(MT_EDCA_CFG_TXOP, params->txop) |
+	      FIELD_PREP(MT_EDCA_CFG_AIFSN, params->aifs) |
+	      FIELD_PREP(MT_EDCA_CFG_CWMIN, cw_min) |
+	      FIELD_PREP(MT_EDCA_CFG_CWMAX, cw_max);
+	mt76_wr(dev, MT_EDCA_CFG_AC(qid), val);
+
+	val = mt76_rr(dev, MT_WMM_TXOP(qid));
+	val &= ~(MT_WMM_TXOP_MASK << MT_WMM_TXOP_SHIFT(qid));
+	val |= params->txop << MT_WMM_TXOP_SHIFT(qid);
+	mt76_wr(dev, MT_WMM_TXOP(qid), val);
+
+	val = mt76_rr(dev, MT_WMM_AIFSN);
+	val &= ~(MT_WMM_AIFSN_MASK << MT_WMM_AIFSN_SHIFT(qid));
+	val |= params->aifs << MT_WMM_AIFSN_SHIFT(qid);
+	mt76_wr(dev, MT_WMM_AIFSN, val);
+
+	val = mt76_rr(dev, MT_WMM_CWMIN);
+	val &= ~(MT_WMM_CWMIN_MASK << MT_WMM_CWMIN_SHIFT(qid));
+	val |= cw_min << MT_WMM_CWMIN_SHIFT(qid);
+	mt76_wr(dev, MT_WMM_CWMIN, val);
+
+	val = mt76_rr(dev, MT_WMM_CWMAX);
+	val &= ~(MT_WMM_CWMAX_MASK << MT_WMM_CWMAX_SHIFT(qid));
+	val |= cw_max << MT_WMM_CWMAX_SHIFT(qid);
+	mt76_wr(dev, MT_WMM_CWMAX, val);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x2_conf_tx);
+
+void mt76x2_configure_filter(struct ieee80211_hw *hw,
+			     unsigned int changed_flags,
+			     unsigned int *total_flags, u64 multicast)
+{
+	struct mt76x2_dev *dev = hw->priv;
+	u32 flags = 0;
+
+#define MT76_FILTER(_flag, _hw) do { \
+		flags |= *total_flags & FIF_##_flag;			\
+		dev->rxfilter &= ~(_hw);				\
+		dev->rxfilter |= !(flags & FIF_##_flag) * (_hw);	\
+	} while (0)
+
+	mutex_lock(&dev->mutex);
+
+	dev->rxfilter &= ~MT_RX_FILTR_CFG_OTHER_BSS;
+
+	MT76_FILTER(FCSFAIL, MT_RX_FILTR_CFG_CRC_ERR);
+	MT76_FILTER(PLCPFAIL, MT_RX_FILTR_CFG_PHY_ERR);
+	MT76_FILTER(CONTROL, MT_RX_FILTR_CFG_ACK |
+			     MT_RX_FILTR_CFG_CTS |
+			     MT_RX_FILTR_CFG_CFEND |
+			     MT_RX_FILTR_CFG_CFACK |
+			     MT_RX_FILTR_CFG_BA |
+			     MT_RX_FILTR_CFG_CTRL_RSV);
+	MT76_FILTER(PSPOLL, MT_RX_FILTR_CFG_PSPOLL);
+
+	*total_flags = flags;
+	mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
+
+	mutex_unlock(&dev->mutex);
+}
+EXPORT_SYMBOL_GPL(mt76x2_configure_filter);
+
+void mt76x2_sta_rate_tbl_update(struct ieee80211_hw *hw,
+				struct ieee80211_vif *vif,
+				struct ieee80211_sta *sta)
+{
+	struct mt76x2_dev *dev = hw->priv;
+	struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
+	struct ieee80211_sta_rates *rates = rcu_dereference(sta->rates);
+	struct ieee80211_tx_rate rate = {};
+
+	if (!rates)
+		return;
+
+	rate.idx = rates->rate[0].idx;
+	rate.flags = rates->rate[0].flags;
+	mt76x2_mac_wcid_set_rate(dev, &msta->wcid, &rate);
+	msta->wcid.max_txpwr_adj = mt76x2_tx_get_max_txpwr_adj(dev, &rate);
+}
+EXPORT_SYMBOL_GPL(mt76x2_sta_rate_tbl_update);
+
+void mt76x2_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
+			 struct sk_buff *skb)
+{
+	struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
+	void *rxwi = skb->data;
+
+	if (q == MT_RXQ_MCU) {
+		skb_queue_tail(&dev->mcu.res_q, skb);
+		wake_up(&dev->mcu.wait);
+		return;
+	}
+
+	skb_pull(skb, sizeof(struct mt76x2_rxwi));
+	if (mt76x2_mac_process_rx(dev, skb, rxwi)) {
+		dev_kfree_skb(skb);
+		return;
+	}
+
+	mt76_rx(&dev->mt76, q, skb);
+}
+EXPORT_SYMBOL_GPL(mt76x2_queue_rx_skb);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_debugfs.c b/drivers/net/wireless/mediatek/mt76/mt76x2_debugfs.c
index 74725902e6dc7587f530ca6d75ceb86b170c8dbb..77b5ff1be05f864ceb33adcbdd298c4c9a8b4b5e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_debugfs.c
@@ -153,3 +153,4 @@ void mt76x2_init_debugfs(struct mt76x2_dev *dev)
 
 	debugfs_create_devm_seqfile(dev->mt76.dev, "agc", dir, read_agc);
 }
+EXPORT_SYMBOL_GPL(mt76x2_init_debugfs);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_dma.c b/drivers/net/wireless/mediatek/mt76/mt76x2_dma.c
index fd1ec4743e0b9fb7435b7062f60b6ea799cb840e..6720a6a1313f14bfa504f9936d20bcd44d0b441d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_dma.c
@@ -66,27 +66,6 @@ mt76x2_init_tx_queue(struct mt76x2_dev *dev, struct mt76_queue *q,
 	return 0;
 }
 
-void mt76x2_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
-			 struct sk_buff *skb)
-{
-	struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
-	void *rxwi = skb->data;
-
-	if (q == MT_RXQ_MCU) {
-		skb_queue_tail(&dev->mcu.res_q, skb);
-		wake_up(&dev->mcu.wait);
-		return;
-	}
-
-	skb_pull(skb, sizeof(struct mt76x2_rxwi));
-	if (mt76x2_mac_process_rx(dev, skb, rxwi)) {
-		dev_kfree_skb(skb);
-		return;
-	}
-
-	mt76_rx(&dev->mt76, q, skb);
-}
-
 static int
 mt76x2_init_rx_queue(struct mt76x2_dev *dev, struct mt76_queue *q,
 		     int idx, int n_desc, int bufsize)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_dma.h b/drivers/net/wireless/mediatek/mt76/mt76x2_dma.h
index e9d426bbf91a3df15a2b114bc5addde43c0c9e36..da294558c268de8750b8ccd3448f49bdbc8ddb34 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_dma.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_dma.h
@@ -19,34 +19,6 @@
 
 #include "dma.h"
 
-#define MT_TXD_INFO_LEN			GENMASK(15, 0)
-#define MT_TXD_INFO_NEXT_VLD		BIT(16)
-#define MT_TXD_INFO_TX_BURST		BIT(17)
-#define MT_TXD_INFO_80211		BIT(19)
-#define MT_TXD_INFO_TSO			BIT(20)
-#define MT_TXD_INFO_CSO			BIT(21)
-#define MT_TXD_INFO_WIV			BIT(24)
-#define MT_TXD_INFO_QSEL		GENMASK(26, 25)
-#define MT_TXD_INFO_DPORT		GENMASK(29, 27)
-#define MT_TXD_INFO_TYPE		GENMASK(31, 30)
-
-#define MT_RX_FCE_INFO_LEN		GENMASK(13, 0)
-#define MT_RX_FCE_INFO_SELF_GEN		BIT(15)
-#define MT_RX_FCE_INFO_CMD_SEQ		GENMASK(19, 16)
-#define MT_RX_FCE_INFO_EVT_TYPE		GENMASK(23, 20)
-#define MT_RX_FCE_INFO_PCIE_INTR	BIT(24)
-#define MT_RX_FCE_INFO_QSEL		GENMASK(26, 25)
-#define MT_RX_FCE_INFO_D_PORT		GENMASK(29, 27)
-#define MT_RX_FCE_INFO_TYPE		GENMASK(31, 30)
-
-/* MCU request message header  */
-#define MT_MCU_MSG_LEN			GENMASK(15, 0)
-#define MT_MCU_MSG_CMD_SEQ		GENMASK(19, 16)
-#define MT_MCU_MSG_CMD_TYPE		GENMASK(26, 20)
-#define MT_MCU_MSG_PORT			GENMASK(29, 27)
-#define MT_MCU_MSG_TYPE			GENMASK(31, 30)
-#define MT_MCU_MSG_TYPE_CMD		BIT(30)
-
 enum mt76x2_qsel {
 	MT_QSEL_MGMT,
 	MT_QSEL_HCCA,
@@ -54,14 +26,4 @@ enum mt76x2_qsel {
 	MT_QSEL_EDCA_2,
 };
 
-enum dma_msg_port {
-	WLAN_PORT,
-	CPU_RX_PORT,
-	CPU_TX_PORT,
-	HOST_PORT,
-	VIRTUAL_CPU_RX_PORT,
-	VIRTUAL_CPU_TX_PORT,
-	DISCARD,
-};
-
 #endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.c
index 95d5f7d888f0fd486cea9c06c6ea2208a4b11f58..1753bcb3635612ec8f4972fcd8b466a7d5f2ba8c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.c
@@ -40,8 +40,7 @@ mt76x2_eeprom_get_macaddr(struct mt76x2_dev *dev)
 	return 0;
 }
 
-static void
-mt76x2_eeprom_parse_hw_cap(struct mt76x2_dev *dev)
+void mt76x2_eeprom_parse_hw_cap(struct mt76x2_dev *dev)
 {
 	u16 val = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_0);
 
@@ -58,6 +57,7 @@ mt76x2_eeprom_parse_hw_cap(struct mt76x2_dev *dev)
 		break;
 	}
 }
+EXPORT_SYMBOL_GPL(mt76x2_eeprom_parse_hw_cap);
 
 static int
 mt76x2_efuse_read(struct mt76x2_dev *dev, u16 addr, u8 *data)
@@ -415,6 +415,7 @@ void mt76x2_read_rx_gain(struct mt76x2_dev *dev)
 
 	dev->cal.rx.lna_gain = mt76x2_sign_extend(lna, 8);
 }
+EXPORT_SYMBOL_GPL(mt76x2_read_rx_gain);
 
 static s8
 mt76x2_rate_power_val(u8 val)
@@ -482,6 +483,7 @@ void mt76x2_get_rate_power(struct mt76x2_dev *dev, struct mt76_rate_power *t,
 		val >>= 8;
 	t->vht[8] = t->vht[9] = mt76x2_rate_power_val(val >> 8);
 }
+EXPORT_SYMBOL_GPL(mt76x2_get_rate_power);
 
 int mt76x2_get_max_rate_power(struct mt76_rate_power *r)
 {
@@ -493,6 +495,7 @@ int mt76x2_get_max_rate_power(struct mt76_rate_power *r)
 
 	return ret;
 }
+EXPORT_SYMBOL_GPL(mt76x2_get_max_rate_power);
 
 static void
 mt76x2_get_power_info_2g(struct mt76x2_dev *dev, struct mt76x2_tx_power_info *t,
@@ -600,6 +603,7 @@ void mt76x2_get_power_info(struct mt76x2_dev *dev,
 	t->delta_bw40 = mt76x2_rate_power_val(bw40);
 	t->delta_bw80 = mt76x2_rate_power_val(bw80);
 }
+EXPORT_SYMBOL_GPL(mt76x2_get_power_info);
 
 int mt76x2_get_temp_comp(struct mt76x2_dev *dev, struct mt76x2_temp_comp *t)
 {
@@ -632,6 +636,7 @@ int mt76x2_get_temp_comp(struct mt76x2_dev *dev, struct mt76x2_temp_comp *t)
 
 	return 0;
 }
+EXPORT_SYMBOL_GPL(mt76x2_get_temp_comp);
 
 bool mt76x2_ext_pa_enabled(struct mt76x2_dev *dev, enum nl80211_band band)
 {
@@ -642,6 +647,7 @@ bool mt76x2_ext_pa_enabled(struct mt76x2_dev *dev, enum nl80211_band band)
 	else
 		return !(conf0 & MT_EE_NIC_CONF_0_PA_INT_2G);
 }
+EXPORT_SYMBOL_GPL(mt76x2_ext_pa_enabled);
 
 int mt76x2_eeprom_init(struct mt76x2_dev *dev)
 {
@@ -658,3 +664,6 @@ int mt76x2_eeprom_init(struct mt76x2_dev *dev)
 
 	return 0;
 }
+EXPORT_SYMBOL_GPL(mt76x2_eeprom_init);
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.h b/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.h
index aa0b0c040375a052593e5cc69016f6b7b91def20..0f3e4d2f4fee4cbadee5cb66bfdb8a0867878f64 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.h
@@ -155,6 +155,7 @@ void mt76x2_get_power_info(struct mt76x2_dev *dev,
 int mt76x2_get_temp_comp(struct mt76x2_dev *dev, struct mt76x2_temp_comp *t);
 bool mt76x2_ext_pa_enabled(struct mt76x2_dev *dev, enum nl80211_band band);
 void mt76x2_read_rx_gain(struct mt76x2_dev *dev);
+void mt76x2_eeprom_parse_hw_cap(struct mt76x2_dev *dev);
 
 static inline bool
 mt76x2_temp_tx_alc_enabled(struct mt76x2_dev *dev)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2_init.c
index 79ab93613e06c7d6dfbf4d26e84c1b912bcf8728..b814391f79ac8b5a63d53ab3d7f4a55633785186 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_init.c
@@ -19,39 +19,6 @@
 #include "mt76x2_eeprom.h"
 #include "mt76x2_mcu.h"
 
-struct mt76x2_reg_pair {
-	u32 reg;
-	u32 value;
-};
-
-static bool
-mt76x2_wait_for_mac(struct mt76x2_dev *dev)
-{
-	int i;
-
-	for (i = 0; i < 500; i++) {
-		switch (mt76_rr(dev, MT_MAC_CSR0)) {
-		case 0:
-		case ~0:
-			break;
-		default:
-			return true;
-		}
-		usleep_range(5000, 10000);
-	}
-
-	return false;
-}
-
-static bool
-wait_for_wpdma(struct mt76x2_dev *dev)
-{
-	return mt76_poll(dev, MT_WPDMA_GLO_CFG,
-			 MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
-			 MT_WPDMA_GLO_CFG_RX_DMA_BUSY,
-			 0, 1000);
-}
-
 static void
 mt76x2_mac_pbf_init(struct mt76x2_dev *dev)
 {
@@ -70,107 +37,6 @@ mt76x2_mac_pbf_init(struct mt76x2_dev *dev)
 	mt76_wr(dev, MT_PBF_RX_MAX_PCNT, 0xfebf);
 }
 
-static void
-mt76x2_write_reg_pairs(struct mt76x2_dev *dev,
-		       const struct mt76x2_reg_pair *data, int len)
-{
-	while (len > 0) {
-		mt76_wr(dev, data->reg, data->value);
-		len--;
-		data++;
-	}
-}
-
-static void
-mt76_write_mac_initvals(struct mt76x2_dev *dev)
-{
-#define DEFAULT_PROT_CFG				\
-	(FIELD_PREP(MT_PROT_CFG_RATE, 0x2004) |		\
-	 FIELD_PREP(MT_PROT_CFG_NAV, 1) |			\
-	 FIELD_PREP(MT_PROT_CFG_TXOP_ALLOW, 0x3f) |	\
-	 MT_PROT_CFG_RTS_THRESH)
-
-#define DEFAULT_PROT_CFG_20				\
-	(FIELD_PREP(MT_PROT_CFG_RATE, 0x2004) |		\
-	 FIELD_PREP(MT_PROT_CFG_CTRL, 1) |		\
-	 FIELD_PREP(MT_PROT_CFG_NAV, 1) |			\
-	 FIELD_PREP(MT_PROT_CFG_TXOP_ALLOW, 0x17))
-
-#define DEFAULT_PROT_CFG_40				\
-	(FIELD_PREP(MT_PROT_CFG_RATE, 0x2084) |		\
-	 FIELD_PREP(MT_PROT_CFG_CTRL, 1) |		\
-	 FIELD_PREP(MT_PROT_CFG_NAV, 1) |			\
-	 FIELD_PREP(MT_PROT_CFG_TXOP_ALLOW, 0x3f))
-
-	static const struct mt76x2_reg_pair vals[] = {
-		/* Copied from MediaTek reference source */
-		{ MT_PBF_SYS_CTRL,		0x00080c00 },
-		{ MT_PBF_CFG,			0x1efebcff },
-		{ MT_FCE_PSE_CTRL,		0x00000001 },
-		{ MT_MAC_SYS_CTRL,		0x0000000c },
-		{ MT_MAX_LEN_CFG,		0x003e3f00 },
-		{ MT_AMPDU_MAX_LEN_20M1S,	0xaaa99887 },
-		{ MT_AMPDU_MAX_LEN_20M2S,	0x000000aa },
-		{ MT_XIFS_TIME_CFG,		0x33a40d0a },
-		{ MT_BKOFF_SLOT_CFG,		0x00000209 },
-		{ MT_TBTT_SYNC_CFG,		0x00422010 },
-		{ MT_PWR_PIN_CFG,		0x00000000 },
-		{ 0x1238,			0x001700c8 },
-		{ MT_TX_SW_CFG0,		0x00101001 },
-		{ MT_TX_SW_CFG1,		0x00010000 },
-		{ MT_TX_SW_CFG2,		0x00000000 },
-		{ MT_TXOP_CTRL_CFG,		0x0400583f },
-		{ MT_TX_RTS_CFG,		0x00100020 },
-		{ MT_TX_TIMEOUT_CFG,		0x000a2290 },
-		{ MT_TX_RETRY_CFG,		0x47f01f0f },
-		{ MT_EXP_ACK_TIME,		0x002c00dc },
-		{ MT_TX_PROT_CFG6,		0xe3f42004 },
-		{ MT_TX_PROT_CFG7,		0xe3f42084 },
-		{ MT_TX_PROT_CFG8,		0xe3f42104 },
-		{ MT_PIFS_TX_CFG,		0x00060fff },
-		{ MT_RX_FILTR_CFG,		0x00015f97 },
-		{ MT_LEGACY_BASIC_RATE,		0x0000017f },
-		{ MT_HT_BASIC_RATE,		0x00004003 },
-		{ MT_PN_PAD_MODE,		0x00000003 },
-		{ MT_TXOP_HLDR_ET,		0x00000002 },
-		{ 0xa44,			0x00000000 },
-		{ MT_HEADER_TRANS_CTRL_REG,	0x00000000 },
-		{ MT_TSO_CTRL,			0x00000000 },
-		{ MT_AUX_CLK_CFG,		0x00000000 },
-		{ MT_DACCLK_EN_DLY_CFG,		0x00000000 },
-		{ MT_TX_ALC_CFG_4,		0x00000000 },
-		{ MT_TX_ALC_VGA3,		0x00000000 },
-		{ MT_TX_PWR_CFG_0,		0x3a3a3a3a },
-		{ MT_TX_PWR_CFG_1,		0x3a3a3a3a },
-		{ MT_TX_PWR_CFG_2,		0x3a3a3a3a },
-		{ MT_TX_PWR_CFG_3,		0x3a3a3a3a },
-		{ MT_TX_PWR_CFG_4,		0x3a3a3a3a },
-		{ MT_TX_PWR_CFG_7,		0x3a3a3a3a },
-		{ MT_TX_PWR_CFG_8,		0x0000003a },
-		{ MT_TX_PWR_CFG_9,		0x0000003a },
-		{ MT_EFUSE_CTRL,		0x0000d000 },
-		{ MT_PAUSE_ENABLE_CONTROL1,	0x0000000a },
-		{ MT_FCE_WLAN_FLOW_CONTROL1,	0x60401c18 },
-		{ MT_WPDMA_DELAY_INT_CFG,	0x94ff0000 },
-		{ MT_TX_SW_CFG3,		0x00000004 },
-		{ MT_HT_FBK_TO_LEGACY,		0x00001818 },
-		{ MT_VHT_HT_FBK_CFG1,		0xedcba980 },
-		{ MT_PROT_AUTO_TX_CFG,		0x00830083 },
-		{ MT_HT_CTRL_CFG,		0x000001ff },
-	};
-	struct mt76x2_reg_pair prot_vals[] = {
-		{ MT_CCK_PROT_CFG,		DEFAULT_PROT_CFG },
-		{ MT_OFDM_PROT_CFG,		DEFAULT_PROT_CFG },
-		{ MT_MM20_PROT_CFG,		DEFAULT_PROT_CFG_20 },
-		{ MT_MM40_PROT_CFG,		DEFAULT_PROT_CFG_40 },
-		{ MT_GF20_PROT_CFG,		DEFAULT_PROT_CFG_20 },
-		{ MT_GF40_PROT_CFG,		DEFAULT_PROT_CFG_40 },
-	};
-
-	mt76x2_write_reg_pairs(dev, vals, ARRAY_SIZE(vals));
-	mt76x2_write_reg_pairs(dev, prot_vals, ARRAY_SIZE(prot_vals));
-}
-
 static void
 mt76x2_fixup_xtal(struct mt76x2_dev *dev)
 {
@@ -360,41 +226,6 @@ int mt76x2_mac_start(struct mt76x2_dev *dev)
 	return 0;
 }
 
-void mt76x2_mac_stop(struct mt76x2_dev *dev, bool force)
-{
-	bool stopped = false;
-	u32 rts_cfg;
-	int i;
-
-	mt76_wr(dev, MT_MAC_SYS_CTRL, 0);
-
-	rts_cfg = mt76_rr(dev, MT_TX_RTS_CFG);
-	mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg & ~MT_TX_RTS_CFG_RETRY_LIMIT);
-
-	/* Wait for MAC to become idle */
-	for (i = 0; i < 300; i++) {
-		if ((mt76_rr(dev, MT_MAC_STATUS) &
-		     (MT_MAC_STATUS_RX | MT_MAC_STATUS_TX)) ||
-		    mt76_rr(dev, MT_BBP(IBI, 12))) {
-			udelay(1);
-			continue;
-		}
-
-		stopped = true;
-		break;
-	}
-
-	if (force && !stopped) {
-		mt76_set(dev, MT_BBP(CORE, 4), BIT(1));
-		mt76_clear(dev, MT_BBP(CORE, 4), BIT(1));
-
-		mt76_set(dev, MT_BBP(CORE, 4), BIT(0));
-		mt76_clear(dev, MT_BBP(CORE, 4), BIT(0));
-	}
-
-	mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg);
-}
-
 void mt76x2_mac_resume(struct mt76x2_dev *dev)
 {
 	mt76_wr(dev, MT_MAC_SYS_CTRL,
@@ -498,45 +329,6 @@ void mt76x2_set_tx_ackto(struct mt76x2_dev *dev)
 		       MT_TX_TIMEOUT_CFG_ACKTO, ackto);
 }
 
-static void
-mt76x2_set_wlan_state(struct mt76x2_dev *dev, bool enable)
-{
-	u32 val = mt76_rr(dev, MT_WLAN_FUN_CTRL);
-
-	if (enable)
-		val |= (MT_WLAN_FUN_CTRL_WLAN_EN |
-			MT_WLAN_FUN_CTRL_WLAN_CLK_EN);
-	else
-		val &= ~(MT_WLAN_FUN_CTRL_WLAN_EN |
-			 MT_WLAN_FUN_CTRL_WLAN_CLK_EN);
-
-	mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
-	udelay(20);
-}
-
-static void
-mt76x2_reset_wlan(struct mt76x2_dev *dev, bool enable)
-{
-	u32 val;
-
-	val = mt76_rr(dev, MT_WLAN_FUN_CTRL);
-
-	val &= ~MT_WLAN_FUN_CTRL_FRC_WL_ANT_SEL;
-
-	if (val & MT_WLAN_FUN_CTRL_WLAN_EN) {
-		val |= MT_WLAN_FUN_CTRL_WLAN_RESET_RF;
-		mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
-		udelay(20);
-
-		val &= ~MT_WLAN_FUN_CTRL_WLAN_RESET_RF;
-	}
-
-	mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
-	udelay(20);
-
-	mt76x2_set_wlan_state(dev, enable);
-}
-
 int mt76x2_init_hardware(struct mt76x2_dev *dev)
 {
 	static const u16 beacon_offsets[16] = {
@@ -567,11 +359,6 @@ int mt76x2_init_hardware(struct mt76x2_dev *dev)
 	tasklet_init(&dev->pre_tbtt_tasklet, mt76x2_pre_tbtt_tasklet,
 		     (unsigned long) dev);
 
-	dev->chainmask = 0x202;
-	dev->global_wcid.idx = 255;
-	dev->global_wcid.hw_key_idx = -1;
-	dev->slottime = 9;
-
 	val = mt76_rr(dev, MT_WPDMA_GLO_CFG);
 	val &= MT_WPDMA_GLO_CFG_DMA_BURST_SIZE |
 	       MT_WPDMA_GLO_CFG_BIG_ENDIAN |
@@ -663,34 +450,6 @@ static void mt76x2_regd_notifier(struct wiphy *wiphy,
 	mt76x2_dfs_set_domain(dev, request->dfs_region);
 }
 
-#define CCK_RATE(_idx, _rate) {					\
-	.bitrate = _rate,					\
-	.flags = IEEE80211_RATE_SHORT_PREAMBLE,			\
-	.hw_value = (MT_PHY_TYPE_CCK << 8) | _idx,		\
-	.hw_value_short = (MT_PHY_TYPE_CCK << 8) | (8 + _idx),	\
-}
-
-#define OFDM_RATE(_idx, _rate) {				\
-	.bitrate = _rate,					\
-	.hw_value = (MT_PHY_TYPE_OFDM << 8) | _idx,		\
-	.hw_value_short = (MT_PHY_TYPE_OFDM << 8) | _idx,	\
-}
-
-static struct ieee80211_rate mt76x2_rates[] = {
-	CCK_RATE(0, 10),
-	CCK_RATE(1, 20),
-	CCK_RATE(2, 55),
-	CCK_RATE(3, 110),
-	OFDM_RATE(0, 60),
-	OFDM_RATE(1, 90),
-	OFDM_RATE(2, 120),
-	OFDM_RATE(3, 180),
-	OFDM_RATE(4, 240),
-	OFDM_RATE(5, 360),
-	OFDM_RATE(6, 480),
-	OFDM_RATE(7, 540),
-};
-
 static const struct ieee80211_iface_limit if_limits[] = {
 	{
 		.max = 1,
@@ -767,37 +526,6 @@ static void mt76x2_led_set_brightness(struct led_classdev *led_cdev,
 		mt76x2_led_set_config(mt76, 0xff, 0);
 }
 
-static void
-mt76x2_init_txpower(struct mt76x2_dev *dev,
-		    struct ieee80211_supported_band *sband)
-{
-	struct ieee80211_channel *chan;
-	struct mt76x2_tx_power_info txp;
-	struct mt76_rate_power t = {};
-	int target_power;
-	int i;
-
-	for (i = 0; i < sband->n_channels; i++) {
-		chan = &sband->channels[i];
-
-		mt76x2_get_power_info(dev, &txp, chan);
-
-		target_power = max_t(int, (txp.chain[0].target_power +
-					   txp.chain[0].delta),
-					  (txp.chain[1].target_power +
-					   txp.chain[1].delta));
-
-		mt76x2_get_rate_power(dev, &t, chan);
-
-		chan->max_power = mt76x2_get_max_rate_power(&t) +
-				  target_power;
-		chan->max_power /= 2;
-
-		/* convert to combined output power on 2x2 devices */
-		chan->max_power += 3;
-	}
-}
-
 int mt76x2_register_device(struct mt76x2_dev *dev)
 {
 	struct ieee80211_hw *hw = mt76_hw(dev);
@@ -812,20 +540,15 @@ int mt76x2_register_device(struct mt76x2_dev *dev)
 		return -ENOMEM;
 
 	kfifo_init(&dev->txstatus_fifo, status_fifo, fifo_size);
+	INIT_DELAYED_WORK(&dev->cal_work, mt76x2_phy_calibrate);
+	INIT_DELAYED_WORK(&dev->mac_work, mt76x2_mac_work);
+
+	mt76x2_init_device(dev);
 
 	ret = mt76x2_init_hardware(dev);
 	if (ret)
 		return ret;
 
-	hw->queues = 4;
-	hw->max_rates = 1;
-	hw->max_report_rates = 7;
-	hw->max_rate_tries = 1;
-	hw->extra_tx_headroom = 2;
-
-	hw->sta_data_size = sizeof(struct mt76x2_sta);
-	hw->vif_data_size = sizeof(struct mt76x2_vif);
-
 	for (i = 0; i < ARRAY_SIZE(dev->macaddr_list); i++) {
 		u8 *addr = dev->macaddr_list[i].addr;
 
@@ -845,16 +568,15 @@ int mt76x2_register_device(struct mt76x2_dev *dev)
 
 	wiphy->reg_notifier = mt76x2_regd_notifier;
 
-	wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_VHT_IBSS);
-
-	ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES);
-	ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
-
-	INIT_DELAYED_WORK(&dev->cal_work, mt76x2_phy_calibrate);
-	INIT_DELAYED_WORK(&dev->mac_work, mt76x2_mac_work);
+	wiphy->interface_modes =
+		BIT(NL80211_IFTYPE_STATION) |
+		BIT(NL80211_IFTYPE_AP) |
+#ifdef CONFIG_MAC80211_MESH
+		BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
+		BIT(NL80211_IFTYPE_ADHOC);
 
-	dev->mt76.sband_2g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
-	dev->mt76.sband_5g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
+	wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_VHT_IBSS);
 
 	mt76x2_dfs_init_detector(dev);
 
@@ -862,9 +584,6 @@ int mt76x2_register_device(struct mt76x2_dev *dev)
 	dev->mt76.led_cdev.brightness_set = mt76x2_led_set_brightness;
 	dev->mt76.led_cdev.blink_set = mt76x2_led_set_blink;
 
-	/* init antenna configuration */
-	dev->mt76.antenna_mask = 3;
-
 	ret = mt76_register_device(&dev->mt76, true, mt76x2_rates,
 				   ARRAY_SIZE(mt76x2_rates));
 	if (ret)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_init_common.c b/drivers/net/wireless/mediatek/mt76/mt76x2_init_common.c
new file mode 100644
index 0000000000000000000000000000000000000000..324b2a4b8b67cf3bec9c4006a37063f2bad75a31
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_init_common.c
@@ -0,0 +1,259 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76x2.h"
+#include "mt76x2_eeprom.h"
+
+#define CCK_RATE(_idx, _rate) {					\
+	.bitrate = _rate,					\
+	.flags = IEEE80211_RATE_SHORT_PREAMBLE,			\
+	.hw_value = (MT_PHY_TYPE_CCK << 8) | _idx,		\
+	.hw_value_short = (MT_PHY_TYPE_CCK << 8) | (8 + _idx),	\
+}
+
+#define OFDM_RATE(_idx, _rate) {				\
+	.bitrate = _rate,					\
+	.hw_value = (MT_PHY_TYPE_OFDM << 8) | _idx,		\
+	.hw_value_short = (MT_PHY_TYPE_OFDM << 8) | _idx,	\
+}
+
+struct ieee80211_rate mt76x2_rates[] = {
+	CCK_RATE(0, 10),
+	CCK_RATE(1, 20),
+	CCK_RATE(2, 55),
+	CCK_RATE(3, 110),
+	OFDM_RATE(0, 60),
+	OFDM_RATE(1, 90),
+	OFDM_RATE(2, 120),
+	OFDM_RATE(3, 180),
+	OFDM_RATE(4, 240),
+	OFDM_RATE(5, 360),
+	OFDM_RATE(6, 480),
+	OFDM_RATE(7, 540),
+};
+EXPORT_SYMBOL_GPL(mt76x2_rates);
+
+struct mt76x2_reg_pair {
+	u32 reg;
+	u32 value;
+};
+
+static void
+mt76x2_set_wlan_state(struct mt76x2_dev *dev, bool enable)
+{
+	u32 val = mt76_rr(dev, MT_WLAN_FUN_CTRL);
+
+	if (enable)
+		val |= (MT_WLAN_FUN_CTRL_WLAN_EN |
+			MT_WLAN_FUN_CTRL_WLAN_CLK_EN);
+	else
+		val &= ~(MT_WLAN_FUN_CTRL_WLAN_EN |
+			 MT_WLAN_FUN_CTRL_WLAN_CLK_EN);
+
+	mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
+	udelay(20);
+}
+
+void mt76x2_reset_wlan(struct mt76x2_dev *dev, bool enable)
+{
+	u32 val;
+
+	val = mt76_rr(dev, MT_WLAN_FUN_CTRL);
+
+	val &= ~MT_WLAN_FUN_CTRL_FRC_WL_ANT_SEL;
+
+	if (val & MT_WLAN_FUN_CTRL_WLAN_EN) {
+		val |= MT_WLAN_FUN_CTRL_WLAN_RESET_RF;
+		mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
+		udelay(20);
+
+		val &= ~MT_WLAN_FUN_CTRL_WLAN_RESET_RF;
+	}
+
+	mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
+	udelay(20);
+
+	mt76x2_set_wlan_state(dev, enable);
+}
+EXPORT_SYMBOL_GPL(mt76x2_reset_wlan);
+
+static void
+mt76x2_write_reg_pairs(struct mt76x2_dev *dev,
+		       const struct mt76x2_reg_pair *data, int len)
+{
+	while (len > 0) {
+		mt76_wr(dev, data->reg, data->value);
+		len--;
+		data++;
+	}
+}
+
+void mt76_write_mac_initvals(struct mt76x2_dev *dev)
+{
+#define DEFAULT_PROT_CFG_CCK				\
+	(FIELD_PREP(MT_PROT_CFG_RATE, 0x3) |		\
+	 FIELD_PREP(MT_PROT_CFG_NAV, 1) |		\
+	 FIELD_PREP(MT_PROT_CFG_TXOP_ALLOW, 0x3f) |	\
+	 MT_PROT_CFG_RTS_THRESH)
+
+#define DEFAULT_PROT_CFG_OFDM				\
+	(FIELD_PREP(MT_PROT_CFG_RATE, 0x2004) |		\
+	 FIELD_PREP(MT_PROT_CFG_NAV, 1) |			\
+	 FIELD_PREP(MT_PROT_CFG_TXOP_ALLOW, 0x3f) |	\
+	 MT_PROT_CFG_RTS_THRESH)
+
+#define DEFAULT_PROT_CFG_20				\
+	(FIELD_PREP(MT_PROT_CFG_RATE, 0x2004) |		\
+	 FIELD_PREP(MT_PROT_CFG_CTRL, 1) |		\
+	 FIELD_PREP(MT_PROT_CFG_NAV, 1) |			\
+	 FIELD_PREP(MT_PROT_CFG_TXOP_ALLOW, 0x17))
+
+#define DEFAULT_PROT_CFG_40				\
+	(FIELD_PREP(MT_PROT_CFG_RATE, 0x2084) |		\
+	 FIELD_PREP(MT_PROT_CFG_CTRL, 1) |		\
+	 FIELD_PREP(MT_PROT_CFG_NAV, 1) |			\
+	 FIELD_PREP(MT_PROT_CFG_TXOP_ALLOW, 0x3f))
+
+	static const struct mt76x2_reg_pair vals[] = {
+		/* Copied from MediaTek reference source */
+		{ MT_PBF_SYS_CTRL,		0x00080c00 },
+		{ MT_PBF_CFG,			0x1efebcff },
+		{ MT_FCE_PSE_CTRL,		0x00000001 },
+		{ MT_MAC_SYS_CTRL,		0x0000000c },
+		{ MT_MAX_LEN_CFG,		0x003e3f00 },
+		{ MT_AMPDU_MAX_LEN_20M1S,	0xaaa99887 },
+		{ MT_AMPDU_MAX_LEN_20M2S,	0x000000aa },
+		{ MT_XIFS_TIME_CFG,		0x33a40d0a },
+		{ MT_BKOFF_SLOT_CFG,		0x00000209 },
+		{ MT_TBTT_SYNC_CFG,		0x00422010 },
+		{ MT_PWR_PIN_CFG,		0x00000000 },
+		{ 0x1238,			0x001700c8 },
+		{ MT_TX_SW_CFG0,		0x00101001 },
+		{ MT_TX_SW_CFG1,		0x00010000 },
+		{ MT_TX_SW_CFG2,		0x00000000 },
+		{ MT_TXOP_CTRL_CFG,		0x0400583f },
+		{ MT_TX_RTS_CFG,		0x00100020 },
+		{ MT_TX_TIMEOUT_CFG,		0x000a2290 },
+		{ MT_TX_RETRY_CFG,		0x47f01f0f },
+		{ MT_EXP_ACK_TIME,		0x002c00dc },
+		{ MT_TX_PROT_CFG6,		0xe3f42004 },
+		{ MT_TX_PROT_CFG7,		0xe3f42084 },
+		{ MT_TX_PROT_CFG8,		0xe3f42104 },
+		{ MT_PIFS_TX_CFG,		0x00060fff },
+		{ MT_RX_FILTR_CFG,		0x00015f97 },
+		{ MT_LEGACY_BASIC_RATE,		0x0000017f },
+		{ MT_HT_BASIC_RATE,		0x00004003 },
+		{ MT_PN_PAD_MODE,		0x00000003 },
+		{ MT_TXOP_HLDR_ET,		0x00000002 },
+		{ 0xa44,			0x00000000 },
+		{ MT_HEADER_TRANS_CTRL_REG,	0x00000000 },
+		{ MT_TSO_CTRL,			0x00000000 },
+		{ MT_AUX_CLK_CFG,		0x00000000 },
+		{ MT_DACCLK_EN_DLY_CFG,		0x00000000 },
+		{ MT_TX_ALC_CFG_4,		0x00000000 },
+		{ MT_TX_ALC_VGA3,		0x00000000 },
+		{ MT_TX_PWR_CFG_0,		0x3a3a3a3a },
+		{ MT_TX_PWR_CFG_1,		0x3a3a3a3a },
+		{ MT_TX_PWR_CFG_2,		0x3a3a3a3a },
+		{ MT_TX_PWR_CFG_3,		0x3a3a3a3a },
+		{ MT_TX_PWR_CFG_4,		0x3a3a3a3a },
+		{ MT_TX_PWR_CFG_7,		0x3a3a3a3a },
+		{ MT_TX_PWR_CFG_8,		0x0000003a },
+		{ MT_TX_PWR_CFG_9,		0x0000003a },
+		{ MT_EFUSE_CTRL,		0x0000d000 },
+		{ MT_PAUSE_ENABLE_CONTROL1,	0x0000000a },
+		{ MT_FCE_WLAN_FLOW_CONTROL1,	0x60401c18 },
+		{ MT_WPDMA_DELAY_INT_CFG,	0x94ff0000 },
+		{ MT_TX_SW_CFG3,		0x00000004 },
+		{ MT_HT_FBK_TO_LEGACY,		0x00001818 },
+		{ MT_VHT_HT_FBK_CFG1,		0xedcba980 },
+		{ MT_PROT_AUTO_TX_CFG,		0x00830083 },
+		{ MT_HT_CTRL_CFG,		0x000001ff },
+	};
+	struct mt76x2_reg_pair prot_vals[] = {
+		{ MT_CCK_PROT_CFG,		DEFAULT_PROT_CFG_CCK },
+		{ MT_OFDM_PROT_CFG,		DEFAULT_PROT_CFG_OFDM },
+		{ MT_MM20_PROT_CFG,		DEFAULT_PROT_CFG_20 },
+		{ MT_MM40_PROT_CFG,		DEFAULT_PROT_CFG_40 },
+		{ MT_GF20_PROT_CFG,		DEFAULT_PROT_CFG_20 },
+		{ MT_GF40_PROT_CFG,		DEFAULT_PROT_CFG_40 },
+	};
+
+	mt76x2_write_reg_pairs(dev, vals, ARRAY_SIZE(vals));
+	mt76x2_write_reg_pairs(dev, prot_vals, ARRAY_SIZE(prot_vals));
+}
+EXPORT_SYMBOL_GPL(mt76_write_mac_initvals);
+
+void mt76x2_init_device(struct mt76x2_dev *dev)
+{
+	struct ieee80211_hw *hw = mt76_hw(dev);
+
+	hw->queues = 4;
+	hw->max_rates = 1;
+	hw->max_report_rates = 7;
+	hw->max_rate_tries = 1;
+	hw->extra_tx_headroom = 2;
+
+	hw->sta_data_size = sizeof(struct mt76x2_sta);
+	hw->vif_data_size = sizeof(struct mt76x2_vif);
+
+	ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES);
+	ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
+
+	dev->mt76.sband_2g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
+	dev->mt76.sband_5g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
+
+	dev->chainmask = 0x202;
+	dev->global_wcid.idx = 255;
+	dev->global_wcid.hw_key_idx = -1;
+	dev->slottime = 9;
+
+	/* init antenna configuration */
+	dev->mt76.antenna_mask = 3;
+}
+EXPORT_SYMBOL_GPL(mt76x2_init_device);
+
+void mt76x2_init_txpower(struct mt76x2_dev *dev,
+			 struct ieee80211_supported_band *sband)
+{
+	struct ieee80211_channel *chan;
+	struct mt76x2_tx_power_info txp;
+	struct mt76_rate_power t = {};
+	int target_power;
+	int i;
+
+	for (i = 0; i < sband->n_channels; i++) {
+		chan = &sband->channels[i];
+
+		mt76x2_get_power_info(dev, &txp, chan);
+
+		target_power = max_t(int, (txp.chain[0].target_power +
+					   txp.chain[0].delta),
+					  (txp.chain[1].target_power +
+					   txp.chain[1].delta));
+
+		mt76x2_get_rate_power(dev, &t, chan);
+
+		chan->max_power = mt76x2_get_max_rate_power(&t) +
+				  target_power;
+		chan->max_power /= 2;
+
+		/* convert to combined output power on 2x2 devices */
+		chan->max_power += 3;
+	}
+}
+EXPORT_SYMBOL_GPL(mt76x2_init_txpower);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c
index fc9af79b3e6948f41ddeaa4af2ef5e703c7b1d2e..23cf437d14f96cbe3d39cd3fcb3a81ba4ae5b9a6 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c
@@ -28,515 +28,12 @@ void mt76x2_mac_set_bssid(struct mt76x2_dev *dev, u8 idx, const u8 *addr)
 		       get_unaligned_le16(addr + 4));
 }
 
-static int
-mt76x2_mac_process_rate(struct mt76_rx_status *status, u16 rate)
-{
-	u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate);
-
-	switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) {
-	case MT_PHY_TYPE_OFDM:
-		if (idx >= 8)
-			idx = 0;
-
-		if (status->band == NL80211_BAND_2GHZ)
-			idx += 4;
-
-		status->rate_idx = idx;
-		return 0;
-	case MT_PHY_TYPE_CCK:
-		if (idx >= 8) {
-			idx -= 8;
-			status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
-		}
-
-		if (idx >= 4)
-			idx = 0;
-
-		status->rate_idx = idx;
-		return 0;
-	case MT_PHY_TYPE_HT_GF:
-		status->enc_flags |= RX_ENC_FLAG_HT_GF;
-		/* fall through */
-	case MT_PHY_TYPE_HT:
-		status->encoding = RX_ENC_HT;
-		status->rate_idx = idx;
-		break;
-	case MT_PHY_TYPE_VHT:
-		status->encoding = RX_ENC_VHT;
-		status->rate_idx = FIELD_GET(MT_RATE_INDEX_VHT_IDX, idx);
-		status->nss = FIELD_GET(MT_RATE_INDEX_VHT_NSS, idx) + 1;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	if (rate & MT_RXWI_RATE_LDPC)
-		status->enc_flags |= RX_ENC_FLAG_LDPC;
-
-	if (rate & MT_RXWI_RATE_SGI)
-		status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
-
-	if (rate & MT_RXWI_RATE_STBC)
-		status->enc_flags |= 1 << RX_ENC_FLAG_STBC_SHIFT;
-
-	switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) {
-	case MT_PHY_BW_20:
-		break;
-	case MT_PHY_BW_40:
-		status->bw = RATE_INFO_BW_40;
-		break;
-	case MT_PHY_BW_80:
-		status->bw = RATE_INFO_BW_80;
-		break;
-	default:
-		break;
-	}
-
-	return 0;
-}
-
-static __le16
-mt76x2_mac_tx_rate_val(struct mt76x2_dev *dev,
-		       const struct ieee80211_tx_rate *rate, u8 *nss_val)
-{
-	u16 rateval;
-	u8 phy, rate_idx;
-	u8 nss = 1;
-	u8 bw = 0;
-
-	if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
-		rate_idx = rate->idx;
-		nss = 1 + (rate->idx >> 4);
-		phy = MT_PHY_TYPE_VHT;
-		if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
-			bw = 2;
-		else if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
-			bw = 1;
-	} else if (rate->flags & IEEE80211_TX_RC_MCS) {
-		rate_idx = rate->idx;
-		nss = 1 + (rate->idx >> 3);
-		phy = MT_PHY_TYPE_HT;
-		if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD)
-			phy = MT_PHY_TYPE_HT_GF;
-		if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
-			bw = 1;
-	} else {
-		const struct ieee80211_rate *r;
-		int band = dev->mt76.chandef.chan->band;
-		u16 val;
-
-		r = &mt76_hw(dev)->wiphy->bands[band]->bitrates[rate->idx];
-		if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
-			val = r->hw_value_short;
-		else
-			val = r->hw_value;
-
-		phy = val >> 8;
-		rate_idx = val & 0xff;
-		bw = 0;
-	}
-
-	rateval = FIELD_PREP(MT_RXWI_RATE_INDEX, rate_idx);
-	rateval |= FIELD_PREP(MT_RXWI_RATE_PHY, phy);
-	rateval |= FIELD_PREP(MT_RXWI_RATE_BW, bw);
-	if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
-		rateval |= MT_RXWI_RATE_SGI;
-
-	*nss_val = nss;
-	return cpu_to_le16(rateval);
-}
-
-void mt76x2_mac_wcid_set_drop(struct mt76x2_dev *dev, u8 idx, bool drop)
-{
-	u32 val = mt76_rr(dev, MT_WCID_DROP(idx));
-	u32 bit = MT_WCID_DROP_MASK(idx);
-
-	/* prevent unnecessary writes */
-	if ((val & bit) != (bit * drop))
-		mt76_wr(dev, MT_WCID_DROP(idx), (val & ~bit) | (bit * drop));
-}
-
-void mt76x2_mac_wcid_set_rate(struct mt76x2_dev *dev, struct mt76_wcid *wcid,
-			      const struct ieee80211_tx_rate *rate)
-{
-	spin_lock_bh(&dev->mt76.lock);
-	wcid->tx_rate = mt76x2_mac_tx_rate_val(dev, rate, &wcid->tx_rate_nss);
-	wcid->tx_rate_set = true;
-	spin_unlock_bh(&dev->mt76.lock);
-}
-
-void mt76x2_mac_write_txwi(struct mt76x2_dev *dev, struct mt76x2_txwi *txwi,
-			   struct sk_buff *skb, struct mt76_wcid *wcid,
-			   struct ieee80211_sta *sta)
-{
-	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-	struct ieee80211_tx_rate *rate = &info->control.rates[0];
-	struct ieee80211_key_conf *key = info->control.hw_key;
-	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
-	u16 rate_ht_mask = FIELD_PREP(MT_RXWI_RATE_PHY, BIT(1) | BIT(2));
-	u16 txwi_flags = 0;
-	u8 nss;
-	s8 txpwr_adj, max_txpwr_adj;
-	u8 ccmp_pn[8];
-
-	memset(txwi, 0, sizeof(*txwi));
-
-	if (wcid)
-		txwi->wcid = wcid->idx;
-	else
-		txwi->wcid = 0xff;
-
-	txwi->pktid = 1;
-
-	if (wcid && wcid->sw_iv && key) {
-		u64 pn = atomic64_inc_return(&key->tx_pn);
-		ccmp_pn[0] = pn;
-		ccmp_pn[1] = pn >> 8;
-		ccmp_pn[2] = 0;
-		ccmp_pn[3] = 0x20 | (key->keyidx << 6);
-		ccmp_pn[4] = pn >> 16;
-		ccmp_pn[5] = pn >> 24;
-		ccmp_pn[6] = pn >> 32;
-		ccmp_pn[7] = pn >> 40;
-		txwi->iv = *((__le32 *)&ccmp_pn[0]);
-		txwi->eiv = *((__le32 *)&ccmp_pn[1]);
-	}
-
-	spin_lock_bh(&dev->mt76.lock);
-	if (wcid && (rate->idx < 0 || !rate->count)) {
-		txwi->rate = wcid->tx_rate;
-		max_txpwr_adj = wcid->max_txpwr_adj;
-		nss = wcid->tx_rate_nss;
-	} else {
-		txwi->rate = mt76x2_mac_tx_rate_val(dev, rate, &nss);
-		max_txpwr_adj = mt76x2_tx_get_max_txpwr_adj(dev, rate);
-	}
-	spin_unlock_bh(&dev->mt76.lock);
-
-	txpwr_adj = mt76x2_tx_get_txpwr_adj(dev, dev->txpower_conf,
-					    max_txpwr_adj);
-	txwi->ctl2 = FIELD_PREP(MT_TX_PWR_ADJ, txpwr_adj);
-
-	if (mt76xx_rev(dev) >= MT76XX_REV_E4)
-		txwi->txstream = 0x13;
-	else if (mt76xx_rev(dev) >= MT76XX_REV_E3 &&
-		 !(txwi->rate & cpu_to_le16(rate_ht_mask)))
-		txwi->txstream = 0x93;
-
-	if (info->flags & IEEE80211_TX_CTL_LDPC)
-		txwi->rate |= cpu_to_le16(MT_RXWI_RATE_LDPC);
-	if ((info->flags & IEEE80211_TX_CTL_STBC) && nss == 1)
-		txwi->rate |= cpu_to_le16(MT_RXWI_RATE_STBC);
-	if (nss > 1 && sta && sta->smps_mode == IEEE80211_SMPS_DYNAMIC)
-		txwi_flags |= MT_TXWI_FLAGS_MMPS;
-	if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
-		txwi->ack_ctl |= MT_TXWI_ACK_CTL_REQ;
-	if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
-		txwi->ack_ctl |= MT_TXWI_ACK_CTL_NSEQ;
-	if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
-		txwi->pktid |= MT_TXWI_PKTID_PROBE;
-	if ((info->flags & IEEE80211_TX_CTL_AMPDU) && sta) {
-		u8 ba_size = IEEE80211_MIN_AMPDU_BUF;
-
-		ba_size <<= sta->ht_cap.ampdu_factor;
-		ba_size = min_t(int, 63, ba_size - 1);
-		if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
-			ba_size = 0;
-		txwi->ack_ctl |= FIELD_PREP(MT_TXWI_ACK_CTL_BA_WINDOW, ba_size);
-
-		txwi_flags |= MT_TXWI_FLAGS_AMPDU |
-			 FIELD_PREP(MT_TXWI_FLAGS_MPDU_DENSITY,
-				    sta->ht_cap.ampdu_density);
-	}
-
-	if (ieee80211_is_probe_resp(hdr->frame_control) ||
-	    ieee80211_is_beacon(hdr->frame_control))
-		txwi_flags |= MT_TXWI_FLAGS_TS;
-
-	txwi->flags |= cpu_to_le16(txwi_flags);
-	txwi->len_ctl = cpu_to_le16(skb->len);
-}
-
-static void mt76x2_remove_hdr_pad(struct sk_buff *skb, int len)
-{
-	int hdrlen;
-
-	if (!len)
-		return;
-
-	hdrlen = ieee80211_get_hdrlen_from_skb(skb);
-	memmove(skb->data + len, skb->data, hdrlen);
-	skb_pull(skb, len);
-}
-
-static struct mt76x2_sta *
-mt76x2_rx_get_sta(struct mt76x2_dev *dev, u8 idx)
-{
-	struct mt76_wcid *wcid;
-
-	if (idx >= ARRAY_SIZE(dev->wcid))
-		return NULL;
-
-	wcid = rcu_dereference(dev->wcid[idx]);
-	if (!wcid)
-		return NULL;
-
-	return container_of(wcid, struct mt76x2_sta, wcid);
-}
-
-static struct mt76_wcid *
-mt76x2_rx_get_sta_wcid(struct mt76x2_dev *dev, struct mt76x2_sta *sta, bool unicast)
-{
-	if (!sta)
-		return NULL;
-
-	if (unicast)
-		return &sta->wcid;
-	else
-		return &sta->vif->group_wcid;
-}
-
-int mt76x2_mac_process_rx(struct mt76x2_dev *dev, struct sk_buff *skb,
-			  void *rxi)
-{
-	struct mt76_rx_status *status = (struct mt76_rx_status *) skb->cb;
-	struct mt76x2_rxwi *rxwi = rxi;
-	struct mt76x2_sta *sta;
-	u32 rxinfo = le32_to_cpu(rxwi->rxinfo);
-	u32 ctl = le32_to_cpu(rxwi->ctl);
-	u16 rate = le16_to_cpu(rxwi->rate);
-	u16 tid_sn = le16_to_cpu(rxwi->tid_sn);
-	bool unicast = rxwi->rxinfo & cpu_to_le32(MT_RXINFO_UNICAST);
-	int pad_len = 0;
-	u8 pn_len;
-	u8 wcid;
-	int len;
-
-	if (!test_bit(MT76_STATE_RUNNING, &dev->mt76.state))
-		return -EINVAL;
-
-	if (rxinfo & MT_RXINFO_L2PAD)
-		pad_len += 2;
-
-	if (rxinfo & MT_RXINFO_DECRYPT) {
-		status->flag |= RX_FLAG_DECRYPTED;
-		status->flag |= RX_FLAG_MMIC_STRIPPED;
-		status->flag |= RX_FLAG_MIC_STRIPPED;
-		status->flag |= RX_FLAG_IV_STRIPPED;
-	}
-
-	wcid = FIELD_GET(MT_RXWI_CTL_WCID, ctl);
-	sta = mt76x2_rx_get_sta(dev, wcid);
-	status->wcid = mt76x2_rx_get_sta_wcid(dev, sta, unicast);
-
-	len = FIELD_GET(MT_RXWI_CTL_MPDU_LEN, ctl);
-	pn_len = FIELD_GET(MT_RXINFO_PN_LEN, rxinfo);
-	if (pn_len) {
-		int offset = ieee80211_get_hdrlen_from_skb(skb) + pad_len;
-		u8 *data = skb->data + offset;
-
-		status->iv[0] = data[7];
-		status->iv[1] = data[6];
-		status->iv[2] = data[5];
-		status->iv[3] = data[4];
-		status->iv[4] = data[1];
-		status->iv[5] = data[0];
-
-		/*
-		 * Driver CCMP validation can't deal with fragments.
-		 * Let mac80211 take care of it.
-		 */
-		if (rxinfo & MT_RXINFO_FRAG) {
-			status->flag &= ~RX_FLAG_IV_STRIPPED;
-		} else {
-			pad_len += pn_len << 2;
-			len -= pn_len << 2;
-		}
-	}
-
-	mt76x2_remove_hdr_pad(skb, pad_len);
-
-	if ((rxinfo & MT_RXINFO_BA) && !(rxinfo & MT_RXINFO_NULL))
-		status->aggr = true;
-
-	if (WARN_ON_ONCE(len > skb->len))
-		return -EINVAL;
-
-	pskb_trim(skb, len);
-	status->chains = BIT(0) | BIT(1);
-	status->chain_signal[0] = mt76x2_phy_get_rssi(dev, rxwi->rssi[0], 0);
-	status->chain_signal[1] = mt76x2_phy_get_rssi(dev, rxwi->rssi[1], 1);
-	status->signal = max(status->chain_signal[0], status->chain_signal[1]);
-	status->freq = dev->mt76.chandef.chan->center_freq;
-	status->band = dev->mt76.chandef.chan->band;
-
-	status->tid = FIELD_GET(MT_RXWI_TID, tid_sn);
-	status->seqno = FIELD_GET(MT_RXWI_SN, tid_sn);
-
-	if (sta) {
-		ewma_signal_add(&sta->rssi, status->signal);
-		sta->inactive_count = 0;
-	}
-
-	return mt76x2_mac_process_rate(status, rate);
-}
-
-static int
-mt76x2_mac_process_tx_rate(struct ieee80211_tx_rate *txrate, u16 rate,
-			   enum nl80211_band band)
-{
-	u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate);
-
-	txrate->idx = 0;
-	txrate->flags = 0;
-	txrate->count = 1;
-
-	switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) {
-	case MT_PHY_TYPE_OFDM:
-		if (band == NL80211_BAND_2GHZ)
-			idx += 4;
-
-		txrate->idx = idx;
-		return 0;
-	case MT_PHY_TYPE_CCK:
-		if (idx >= 8)
-			idx -= 8;
-
-		txrate->idx = idx;
-		return 0;
-	case MT_PHY_TYPE_HT_GF:
-		txrate->flags |= IEEE80211_TX_RC_GREEN_FIELD;
-		/* fall through */
-	case MT_PHY_TYPE_HT:
-		txrate->flags |= IEEE80211_TX_RC_MCS;
-		txrate->idx = idx;
-		break;
-	case MT_PHY_TYPE_VHT:
-		txrate->flags |= IEEE80211_TX_RC_VHT_MCS;
-		txrate->idx = idx;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) {
-	case MT_PHY_BW_20:
-		break;
-	case MT_PHY_BW_40:
-		txrate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
-		break;
-	case MT_PHY_BW_80:
-		txrate->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	if (rate & MT_RXWI_RATE_SGI)
-		txrate->flags |= IEEE80211_TX_RC_SHORT_GI;
-
-	return 0;
-}
-
-static void
-mt76x2_mac_fill_tx_status(struct mt76x2_dev *dev,
-			  struct ieee80211_tx_info *info,
-			  struct mt76x2_tx_status *st, int n_frames)
-{
-	struct ieee80211_tx_rate *rate = info->status.rates;
-	int cur_idx, last_rate;
-	int i;
-
-	if (!n_frames)
-		return;
-
-	last_rate = min_t(int, st->retry, IEEE80211_TX_MAX_RATES - 1);
-	mt76x2_mac_process_tx_rate(&rate[last_rate], st->rate,
-				 dev->mt76.chandef.chan->band);
-	if (last_rate < IEEE80211_TX_MAX_RATES - 1)
-		rate[last_rate + 1].idx = -1;
-
-	cur_idx = rate[last_rate].idx + last_rate;
-	for (i = 0; i <= last_rate; i++) {
-		rate[i].flags = rate[last_rate].flags;
-		rate[i].idx = max_t(int, 0, cur_idx - i);
-		rate[i].count = 1;
-	}
-	rate[last_rate].count = st->retry + 1 - last_rate;
-
-	info->status.ampdu_len = n_frames;
-	info->status.ampdu_ack_len = st->success ? n_frames : 0;
-
-	if (st->pktid & MT_TXWI_PKTID_PROBE)
-		info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
-
-	if (st->aggr)
-		info->flags |= IEEE80211_TX_CTL_AMPDU |
-			       IEEE80211_TX_STAT_AMPDU;
-
-	if (!st->ack_req)
-		info->flags |= IEEE80211_TX_CTL_NO_ACK;
-	else if (st->success)
-		info->flags |= IEEE80211_TX_STAT_ACK;
-}
-
-static void
-mt76x2_send_tx_status(struct mt76x2_dev *dev, struct mt76x2_tx_status *stat,
-		      u8 *update)
-{
-	struct ieee80211_tx_info info = {};
-	struct ieee80211_sta *sta = NULL;
-	struct mt76_wcid *wcid = NULL;
-	struct mt76x2_sta *msta = NULL;
-
-	rcu_read_lock();
-	if (stat->wcid < ARRAY_SIZE(dev->wcid))
-		wcid = rcu_dereference(dev->wcid[stat->wcid]);
-
-	if (wcid) {
-		void *priv;
-
-		priv = msta = container_of(wcid, struct mt76x2_sta, wcid);
-		sta = container_of(priv, struct ieee80211_sta,
-				   drv_priv);
-	}
-
-	if (msta && stat->aggr) {
-		u32 stat_val, stat_cache;
-
-		stat_val = stat->rate;
-		stat_val |= ((u32) stat->retry) << 16;
-		stat_cache = msta->status.rate;
-		stat_cache |= ((u32) msta->status.retry) << 16;
-
-		if (*update == 0 && stat_val == stat_cache &&
-		    stat->wcid == msta->status.wcid && msta->n_frames < 32) {
-			msta->n_frames++;
-			goto out;
-		}
-
-		mt76x2_mac_fill_tx_status(dev, &info, &msta->status,
-					  msta->n_frames);
-
-		msta->status = *stat;
-		msta->n_frames = 1;
-		*update = 0;
-	} else {
-		mt76x2_mac_fill_tx_status(dev, &info, stat, 1);
-		*update = 1;
-	}
-
-	ieee80211_tx_status_noskb(mt76_hw(dev), sta, &info);
-
-out:
-	rcu_read_unlock();
-}
-
 void mt76x2_mac_poll_tx_status(struct mt76x2_dev *dev, bool irq)
 {
 	struct mt76x2_tx_status stat = {};
 	unsigned long flags;
 	u8 update = 1;
+	bool ret;
 
 	if (!test_bit(MT76_STATE_RUNNING, &dev->mt76.state))
 		return;
@@ -544,26 +41,13 @@ void mt76x2_mac_poll_tx_status(struct mt76x2_dev *dev, bool irq)
 	trace_mac_txstat_poll(dev);
 
 	while (!irq || !kfifo_is_full(&dev->txstatus_fifo)) {
-		u32 stat1, stat2;
-
 		spin_lock_irqsave(&dev->irq_lock, flags);
-		stat2 = mt76_rr(dev, MT_TX_STAT_FIFO_EXT);
-		stat1 = mt76_rr(dev, MT_TX_STAT_FIFO);
-		if (!(stat1 & MT_TX_STAT_FIFO_VALID)) {
-			spin_unlock_irqrestore(&dev->irq_lock, flags);
-			break;
-		}
-
+		ret = mt76x2_mac_load_tx_status(dev, &stat);
 		spin_unlock_irqrestore(&dev->irq_lock, flags);
 
-		stat.valid = 1;
-		stat.success = !!(stat1 & MT_TX_STAT_FIFO_SUCCESS);
-		stat.aggr = !!(stat1 & MT_TX_STAT_FIFO_AGGR);
-		stat.ack_req = !!(stat1 & MT_TX_STAT_FIFO_ACKREQ);
-		stat.wcid = FIELD_GET(MT_TX_STAT_FIFO_WCID, stat1);
-		stat.rate = FIELD_GET(MT_TX_STAT_FIFO_RATE, stat1);
-		stat.retry = FIELD_GET(MT_TX_STAT_FIFO_EXT_RETRY, stat2);
-		stat.pktid = FIELD_GET(MT_TX_STAT_FIFO_EXT_PKTID, stat2);
+		if (!ret)
+			break;
+
 		trace_mac_txstat_fetch(dev, &stat);
 
 		if (!irq) {
@@ -612,104 +96,6 @@ void mt76x2_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
 		dev_kfree_skb_any(e->skb);
 }
 
-static enum mt76x2_cipher_type
-mt76x2_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data)
-{
-	memset(key_data, 0, 32);
-	if (!key)
-		return MT_CIPHER_NONE;
-
-	if (key->keylen > 32)
-		return MT_CIPHER_NONE;
-
-	memcpy(key_data, key->key, key->keylen);
-
-	switch (key->cipher) {
-	case WLAN_CIPHER_SUITE_WEP40:
-		return MT_CIPHER_WEP40;
-	case WLAN_CIPHER_SUITE_WEP104:
-		return MT_CIPHER_WEP104;
-	case WLAN_CIPHER_SUITE_TKIP:
-		return MT_CIPHER_TKIP;
-	case WLAN_CIPHER_SUITE_CCMP:
-		return MT_CIPHER_AES_CCMP;
-	default:
-		return MT_CIPHER_NONE;
-	}
-}
-
-void mt76x2_mac_wcid_setup(struct mt76x2_dev *dev, u8 idx, u8 vif_idx, u8 *mac)
-{
-	struct mt76_wcid_addr addr = {};
-	u32 attr;
-
-	attr = FIELD_PREP(MT_WCID_ATTR_BSS_IDX, vif_idx & 7) |
-	       FIELD_PREP(MT_WCID_ATTR_BSS_IDX_EXT, !!(vif_idx & 8));
-
-	mt76_wr(dev, MT_WCID_ATTR(idx), attr);
-
-	mt76_wr(dev, MT_WCID_TX_RATE(idx), 0);
-	mt76_wr(dev, MT_WCID_TX_RATE(idx) + 4, 0);
-
-	if (idx >= 128)
-		return;
-
-	if (mac)
-		memcpy(addr.macaddr, mac, ETH_ALEN);
-
-	mt76_wr_copy(dev, MT_WCID_ADDR(idx), &addr, sizeof(addr));
-}
-
-int mt76x2_mac_wcid_set_key(struct mt76x2_dev *dev, u8 idx,
-			    struct ieee80211_key_conf *key)
-{
-	enum mt76x2_cipher_type cipher;
-	u8 key_data[32];
-	u8 iv_data[8];
-
-	cipher = mt76x2_mac_get_key_info(key, key_data);
-	if (cipher == MT_CIPHER_NONE && key)
-		return -EOPNOTSUPP;
-
-	mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PKEY_MODE, cipher);
-	mt76_wr_copy(dev, MT_WCID_KEY(idx), key_data, sizeof(key_data));
-
-	memset(iv_data, 0, sizeof(iv_data));
-	if (key) {
-		mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PAIRWISE,
-			       !!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE));
-		iv_data[3] = key->keyidx << 6;
-		if (cipher >= MT_CIPHER_TKIP)
-			iv_data[3] |= 0x20;
-	}
-
-	mt76_wr_copy(dev, MT_WCID_IV(idx), iv_data, sizeof(iv_data));
-
-	return 0;
-}
-
-int mt76x2_mac_shared_key_setup(struct mt76x2_dev *dev, u8 vif_idx, u8 key_idx,
-			      struct ieee80211_key_conf *key)
-{
-	enum mt76x2_cipher_type cipher;
-	u8 key_data[32];
-	u32 val;
-
-	cipher = mt76x2_mac_get_key_info(key, key_data);
-	if (cipher == MT_CIPHER_NONE && key)
-		return -EOPNOTSUPP;
-
-	val = mt76_rr(dev, MT_SKEY_MODE(vif_idx));
-	val &= ~(MT_SKEY_MODE_MASK << MT_SKEY_MODE_SHIFT(vif_idx, key_idx));
-	val |= cipher << MT_SKEY_MODE_SHIFT(vif_idx, key_idx);
-	mt76_wr(dev, MT_SKEY_MODE(vif_idx), val);
-
-	mt76_wr_copy(dev, MT_SKEY(vif_idx, key_idx), key_data,
-		     sizeof(key_data));
-
-	return 0;
-}
-
 static int
 mt76_write_beacon(struct mt76x2_dev *dev, int offset, struct sk_buff *skb)
 {
@@ -719,7 +105,7 @@ mt76_write_beacon(struct mt76x2_dev *dev, int offset, struct sk_buff *skb)
 	if (WARN_ON_ONCE(beacon_len < skb->len + sizeof(struct mt76x2_txwi)))
 		return -ENOSPC;
 
-	mt76x2_mac_write_txwi(dev, &txwi, skb, NULL, NULL);
+	mt76x2_mac_write_txwi(dev, &txwi, skb, NULL, NULL, skb->len);
 
 	mt76_wr_copy(dev, offset, &txwi, sizeof(txwi));
 	offset += sizeof(txwi);
@@ -854,3 +240,33 @@ void mt76x2_mac_work(struct work_struct *work)
 	ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mac_work,
 				     MT_CALIBRATE_INTERVAL);
 }
+
+void mt76x2_mac_set_tx_protection(struct mt76x2_dev *dev, u32 val)
+{
+	u32 data = 0;
+
+	if (val != ~0)
+		data = FIELD_PREP(MT_PROT_CFG_CTRL, 1) |
+		       MT_PROT_CFG_RTS_THRESH;
+
+	mt76_rmw_field(dev, MT_TX_RTS_CFG, MT_TX_RTS_CFG_THRESH, val);
+
+	mt76_rmw(dev, MT_CCK_PROT_CFG,
+		 MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
+	mt76_rmw(dev, MT_OFDM_PROT_CFG,
+		 MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
+	mt76_rmw(dev, MT_MM20_PROT_CFG,
+		 MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
+	mt76_rmw(dev, MT_MM40_PROT_CFG,
+		 MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
+	mt76_rmw(dev, MT_GF20_PROT_CFG,
+		 MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
+	mt76_rmw(dev, MT_GF40_PROT_CFG,
+		 MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
+	mt76_rmw(dev, MT_TX_PROT_CFG6,
+		 MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
+	mt76_rmw(dev, MT_TX_PROT_CFG7,
+		 MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
+	mt76_rmw(dev, MT_TX_PROT_CFG8,
+		 MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.h b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.h
index c048cd06df6bdd4fbc048b880b25c9c54f26ee5f..5af0107ba7487d80cab8c767b86fdee5e7edcb55 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.h
@@ -166,7 +166,7 @@ int mt76x2_mac_process_rx(struct mt76x2_dev *dev, struct sk_buff *skb,
 			  void *rxi);
 void mt76x2_mac_write_txwi(struct mt76x2_dev *dev, struct mt76x2_txwi *txwi,
 			   struct sk_buff *skb, struct mt76_wcid *wcid,
-			   struct ieee80211_sta *sta);
+			   struct ieee80211_sta *sta, int len);
 void mt76x2_mac_wcid_setup(struct mt76x2_dev *dev, u8 idx, u8 vif_idx, u8 *mac);
 int mt76x2_mac_wcid_set_key(struct mt76x2_dev *dev, u8 idx,
 			    struct ieee80211_key_conf *key);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mac_common.c b/drivers/net/wireless/mediatek/mt76/mt76x2_mac_common.c
new file mode 100644
index 0000000000000000000000000000000000000000..6542644bc3259a6e12cff1d05242895eb81adaf3
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_mac_common.c
@@ -0,0 +1,699 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76x2.h"
+
+void mt76x2_mac_stop(struct mt76x2_dev *dev, bool force)
+{
+	bool stopped = false;
+	u32 rts_cfg;
+	int i;
+
+	mt76_wr(dev, MT_MAC_SYS_CTRL, 0);
+
+	rts_cfg = mt76_rr(dev, MT_TX_RTS_CFG);
+	mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg & ~MT_TX_RTS_CFG_RETRY_LIMIT);
+
+	/* Wait for MAC to become idle */
+	for (i = 0; i < 300; i++) {
+		if ((mt76_rr(dev, MT_MAC_STATUS) &
+		     (MT_MAC_STATUS_RX | MT_MAC_STATUS_TX)) ||
+		    mt76_rr(dev, MT_BBP(IBI, 12))) {
+			udelay(1);
+			continue;
+		}
+
+		stopped = true;
+		break;
+	}
+
+	if (force && !stopped) {
+		mt76_set(dev, MT_BBP(CORE, 4), BIT(1));
+		mt76_clear(dev, MT_BBP(CORE, 4), BIT(1));
+
+		mt76_set(dev, MT_BBP(CORE, 4), BIT(0));
+		mt76_clear(dev, MT_BBP(CORE, 4), BIT(0));
+	}
+
+	mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg);
+}
+EXPORT_SYMBOL_GPL(mt76x2_mac_stop);
+
+bool mt76x2_mac_load_tx_status(struct mt76x2_dev *dev,
+			       struct mt76x2_tx_status *stat)
+{
+	u32 stat1, stat2;
+
+	stat2 = mt76_rr(dev, MT_TX_STAT_FIFO_EXT);
+	stat1 = mt76_rr(dev, MT_TX_STAT_FIFO);
+
+	stat->valid = !!(stat1 & MT_TX_STAT_FIFO_VALID);
+	if (!stat->valid)
+		return false;
+
+	stat->success = !!(stat1 & MT_TX_STAT_FIFO_SUCCESS);
+	stat->aggr = !!(stat1 & MT_TX_STAT_FIFO_AGGR);
+	stat->ack_req = !!(stat1 & MT_TX_STAT_FIFO_ACKREQ);
+	stat->wcid = FIELD_GET(MT_TX_STAT_FIFO_WCID, stat1);
+	stat->rate = FIELD_GET(MT_TX_STAT_FIFO_RATE, stat1);
+
+	stat->retry = FIELD_GET(MT_TX_STAT_FIFO_EXT_RETRY, stat2);
+	stat->pktid = FIELD_GET(MT_TX_STAT_FIFO_EXT_PKTID, stat2);
+
+	return true;
+}
+EXPORT_SYMBOL_GPL(mt76x2_mac_load_tx_status);
+
+static int
+mt76x2_mac_process_tx_rate(struct ieee80211_tx_rate *txrate, u16 rate,
+			   enum nl80211_band band)
+{
+	u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate);
+
+	txrate->idx = 0;
+	txrate->flags = 0;
+	txrate->count = 1;
+
+	switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) {
+	case MT_PHY_TYPE_OFDM:
+		if (band == NL80211_BAND_2GHZ)
+			idx += 4;
+
+		txrate->idx = idx;
+		return 0;
+	case MT_PHY_TYPE_CCK:
+		if (idx >= 8)
+			idx -= 8;
+
+		txrate->idx = idx;
+		return 0;
+	case MT_PHY_TYPE_HT_GF:
+		txrate->flags |= IEEE80211_TX_RC_GREEN_FIELD;
+		/* fall through */
+	case MT_PHY_TYPE_HT:
+		txrate->flags |= IEEE80211_TX_RC_MCS;
+		txrate->idx = idx;
+		break;
+	case MT_PHY_TYPE_VHT:
+		txrate->flags |= IEEE80211_TX_RC_VHT_MCS;
+		txrate->idx = idx;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) {
+	case MT_PHY_BW_20:
+		break;
+	case MT_PHY_BW_40:
+		txrate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+		break;
+	case MT_PHY_BW_80:
+		txrate->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (rate & MT_RXWI_RATE_SGI)
+		txrate->flags |= IEEE80211_TX_RC_SHORT_GI;
+
+	return 0;
+}
+
+static void
+mt76x2_mac_fill_tx_status(struct mt76x2_dev *dev,
+			  struct ieee80211_tx_info *info,
+			  struct mt76x2_tx_status *st, int n_frames)
+{
+	struct ieee80211_tx_rate *rate = info->status.rates;
+	int cur_idx, last_rate;
+	int i;
+
+	if (!n_frames)
+		return;
+
+	last_rate = min_t(int, st->retry, IEEE80211_TX_MAX_RATES - 1);
+	mt76x2_mac_process_tx_rate(&rate[last_rate], st->rate,
+				 dev->mt76.chandef.chan->band);
+	if (last_rate < IEEE80211_TX_MAX_RATES - 1)
+		rate[last_rate + 1].idx = -1;
+
+	cur_idx = rate[last_rate].idx + last_rate;
+	for (i = 0; i <= last_rate; i++) {
+		rate[i].flags = rate[last_rate].flags;
+		rate[i].idx = max_t(int, 0, cur_idx - i);
+		rate[i].count = 1;
+	}
+	rate[last_rate].count = st->retry + 1 - last_rate;
+
+	info->status.ampdu_len = n_frames;
+	info->status.ampdu_ack_len = st->success ? n_frames : 0;
+
+	if (st->pktid & MT_TXWI_PKTID_PROBE)
+		info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
+
+	if (st->aggr)
+		info->flags |= IEEE80211_TX_CTL_AMPDU |
+			       IEEE80211_TX_STAT_AMPDU;
+
+	if (!st->ack_req)
+		info->flags |= IEEE80211_TX_CTL_NO_ACK;
+	else if (st->success)
+		info->flags |= IEEE80211_TX_STAT_ACK;
+}
+
+void mt76x2_send_tx_status(struct mt76x2_dev *dev,
+			   struct mt76x2_tx_status *stat, u8 *update)
+{
+	struct ieee80211_tx_info info = {};
+	struct ieee80211_sta *sta = NULL;
+	struct mt76_wcid *wcid = NULL;
+	struct mt76x2_sta *msta = NULL;
+
+	rcu_read_lock();
+	if (stat->wcid < ARRAY_SIZE(dev->wcid))
+		wcid = rcu_dereference(dev->wcid[stat->wcid]);
+
+	if (wcid) {
+		void *priv;
+
+		priv = msta = container_of(wcid, struct mt76x2_sta, wcid);
+		sta = container_of(priv, struct ieee80211_sta,
+				   drv_priv);
+	}
+
+	if (msta && stat->aggr) {
+		u32 stat_val, stat_cache;
+
+		stat_val = stat->rate;
+		stat_val |= ((u32) stat->retry) << 16;
+		stat_cache = msta->status.rate;
+		stat_cache |= ((u32) msta->status.retry) << 16;
+
+		if (*update == 0 && stat_val == stat_cache &&
+		    stat->wcid == msta->status.wcid && msta->n_frames < 32) {
+			msta->n_frames++;
+			goto out;
+		}
+
+		mt76x2_mac_fill_tx_status(dev, &info, &msta->status,
+					  msta->n_frames);
+
+		msta->status = *stat;
+		msta->n_frames = 1;
+		*update = 0;
+	} else {
+		mt76x2_mac_fill_tx_status(dev, &info, stat, 1);
+		*update = 1;
+	}
+
+	ieee80211_tx_status_noskb(mt76_hw(dev), sta, &info);
+
+out:
+	rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(mt76x2_send_tx_status);
+
+static enum mt76x2_cipher_type
+mt76x2_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data)
+{
+	memset(key_data, 0, 32);
+	if (!key)
+		return MT_CIPHER_NONE;
+
+	if (key->keylen > 32)
+		return MT_CIPHER_NONE;
+
+	memcpy(key_data, key->key, key->keylen);
+
+	switch (key->cipher) {
+	case WLAN_CIPHER_SUITE_WEP40:
+		return MT_CIPHER_WEP40;
+	case WLAN_CIPHER_SUITE_WEP104:
+		return MT_CIPHER_WEP104;
+	case WLAN_CIPHER_SUITE_TKIP:
+		return MT_CIPHER_TKIP;
+	case WLAN_CIPHER_SUITE_CCMP:
+		return MT_CIPHER_AES_CCMP;
+	default:
+		return MT_CIPHER_NONE;
+	}
+}
+
+int mt76x2_mac_shared_key_setup(struct mt76x2_dev *dev, u8 vif_idx, u8 key_idx,
+				struct ieee80211_key_conf *key)
+{
+	enum mt76x2_cipher_type cipher;
+	u8 key_data[32];
+	u32 val;
+
+	cipher = mt76x2_mac_get_key_info(key, key_data);
+	if (cipher == MT_CIPHER_NONE && key)
+		return -EOPNOTSUPP;
+
+	val = mt76_rr(dev, MT_SKEY_MODE(vif_idx));
+	val &= ~(MT_SKEY_MODE_MASK << MT_SKEY_MODE_SHIFT(vif_idx, key_idx));
+	val |= cipher << MT_SKEY_MODE_SHIFT(vif_idx, key_idx);
+	mt76_wr(dev, MT_SKEY_MODE(vif_idx), val);
+
+	mt76_wr_copy(dev, MT_SKEY(vif_idx, key_idx), key_data,
+		     sizeof(key_data));
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x2_mac_shared_key_setup);
+
+int mt76x2_mac_wcid_set_key(struct mt76x2_dev *dev, u8 idx,
+			    struct ieee80211_key_conf *key)
+{
+	enum mt76x2_cipher_type cipher;
+	u8 key_data[32];
+	u8 iv_data[8];
+
+	cipher = mt76x2_mac_get_key_info(key, key_data);
+	if (cipher == MT_CIPHER_NONE && key)
+		return -EOPNOTSUPP;
+
+	mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PKEY_MODE, cipher);
+	mt76_wr_copy(dev, MT_WCID_KEY(idx), key_data, sizeof(key_data));
+
+	memset(iv_data, 0, sizeof(iv_data));
+	if (key) {
+		mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PAIRWISE,
+			       !!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE));
+		iv_data[3] = key->keyidx << 6;
+		if (cipher >= MT_CIPHER_TKIP)
+			iv_data[3] |= 0x20;
+	}
+
+	mt76_wr_copy(dev, MT_WCID_IV(idx), iv_data, sizeof(iv_data));
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x2_mac_wcid_set_key);
+
+static __le16
+mt76x2_mac_tx_rate_val(struct mt76x2_dev *dev,
+		       const struct ieee80211_tx_rate *rate, u8 *nss_val)
+{
+	u16 rateval;
+	u8 phy, rate_idx;
+	u8 nss = 1;
+	u8 bw = 0;
+
+	if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
+		rate_idx = rate->idx;
+		nss = 1 + (rate->idx >> 4);
+		phy = MT_PHY_TYPE_VHT;
+		if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
+			bw = 2;
+		else if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+			bw = 1;
+	} else if (rate->flags & IEEE80211_TX_RC_MCS) {
+		rate_idx = rate->idx;
+		nss = 1 + (rate->idx >> 3);
+		phy = MT_PHY_TYPE_HT;
+		if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD)
+			phy = MT_PHY_TYPE_HT_GF;
+		if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+			bw = 1;
+	} else {
+		const struct ieee80211_rate *r;
+		int band = dev->mt76.chandef.chan->band;
+		u16 val;
+
+		r = &mt76_hw(dev)->wiphy->bands[band]->bitrates[rate->idx];
+		if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
+			val = r->hw_value_short;
+		else
+			val = r->hw_value;
+
+		phy = val >> 8;
+		rate_idx = val & 0xff;
+		bw = 0;
+	}
+
+	rateval = FIELD_PREP(MT_RXWI_RATE_INDEX, rate_idx);
+	rateval |= FIELD_PREP(MT_RXWI_RATE_PHY, phy);
+	rateval |= FIELD_PREP(MT_RXWI_RATE_BW, bw);
+	if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
+		rateval |= MT_RXWI_RATE_SGI;
+
+	*nss_val = nss;
+	return cpu_to_le16(rateval);
+}
+
+void mt76x2_mac_wcid_set_rate(struct mt76x2_dev *dev, struct mt76_wcid *wcid,
+			      const struct ieee80211_tx_rate *rate)
+{
+	spin_lock_bh(&dev->mt76.lock);
+	wcid->tx_rate = mt76x2_mac_tx_rate_val(dev, rate, &wcid->tx_rate_nss);
+	wcid->tx_rate_set = true;
+	spin_unlock_bh(&dev->mt76.lock);
+}
+EXPORT_SYMBOL_GPL(mt76x2_mac_wcid_set_rate);
+
+void mt76x2_mac_write_txwi(struct mt76x2_dev *dev, struct mt76x2_txwi *txwi,
+			   struct sk_buff *skb, struct mt76_wcid *wcid,
+			   struct ieee80211_sta *sta, int len)
+{
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+	struct ieee80211_tx_rate *rate = &info->control.rates[0];
+	struct ieee80211_key_conf *key = info->control.hw_key;
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+	u16 rate_ht_mask = FIELD_PREP(MT_RXWI_RATE_PHY, BIT(1) | BIT(2));
+	u16 txwi_flags = 0;
+	u8 nss;
+	s8 txpwr_adj, max_txpwr_adj;
+	u8 ccmp_pn[8];
+
+	memset(txwi, 0, sizeof(*txwi));
+
+	if (wcid)
+		txwi->wcid = wcid->idx;
+	else
+		txwi->wcid = 0xff;
+
+	txwi->pktid = 1;
+
+	if (wcid && wcid->sw_iv && key) {
+		u64 pn = atomic64_inc_return(&key->tx_pn);
+		ccmp_pn[0] = pn;
+		ccmp_pn[1] = pn >> 8;
+		ccmp_pn[2] = 0;
+		ccmp_pn[3] = 0x20 | (key->keyidx << 6);
+		ccmp_pn[4] = pn >> 16;
+		ccmp_pn[5] = pn >> 24;
+		ccmp_pn[6] = pn >> 32;
+		ccmp_pn[7] = pn >> 40;
+		txwi->iv = *((__le32 *)&ccmp_pn[0]);
+		txwi->eiv = *((__le32 *)&ccmp_pn[1]);
+	}
+
+	spin_lock_bh(&dev->mt76.lock);
+	if (wcid && (rate->idx < 0 || !rate->count)) {
+		txwi->rate = wcid->tx_rate;
+		max_txpwr_adj = wcid->max_txpwr_adj;
+		nss = wcid->tx_rate_nss;
+	} else {
+		txwi->rate = mt76x2_mac_tx_rate_val(dev, rate, &nss);
+		max_txpwr_adj = mt76x2_tx_get_max_txpwr_adj(dev, rate);
+	}
+	spin_unlock_bh(&dev->mt76.lock);
+
+	txpwr_adj = mt76x2_tx_get_txpwr_adj(dev, dev->txpower_conf,
+					    max_txpwr_adj);
+	txwi->ctl2 = FIELD_PREP(MT_TX_PWR_ADJ, txpwr_adj);
+
+	if (mt76xx_rev(dev) >= MT76XX_REV_E4)
+		txwi->txstream = 0x13;
+	else if (mt76xx_rev(dev) >= MT76XX_REV_E3 &&
+		 !(txwi->rate & cpu_to_le16(rate_ht_mask)))
+		txwi->txstream = 0x93;
+
+	if (info->flags & IEEE80211_TX_CTL_LDPC)
+		txwi->rate |= cpu_to_le16(MT_RXWI_RATE_LDPC);
+	if ((info->flags & IEEE80211_TX_CTL_STBC) && nss == 1)
+		txwi->rate |= cpu_to_le16(MT_RXWI_RATE_STBC);
+	if (nss > 1 && sta && sta->smps_mode == IEEE80211_SMPS_DYNAMIC)
+		txwi_flags |= MT_TXWI_FLAGS_MMPS;
+	if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
+		txwi->ack_ctl |= MT_TXWI_ACK_CTL_REQ;
+	if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
+		txwi->ack_ctl |= MT_TXWI_ACK_CTL_NSEQ;
+	if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
+		txwi->pktid |= MT_TXWI_PKTID_PROBE;
+	if ((info->flags & IEEE80211_TX_CTL_AMPDU) && sta) {
+		u8 ba_size = IEEE80211_MIN_AMPDU_BUF;
+
+		ba_size <<= sta->ht_cap.ampdu_factor;
+		ba_size = min_t(int, 63, ba_size - 1);
+		if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
+			ba_size = 0;
+		txwi->ack_ctl |= FIELD_PREP(MT_TXWI_ACK_CTL_BA_WINDOW, ba_size);
+
+		txwi_flags |= MT_TXWI_FLAGS_AMPDU |
+			 FIELD_PREP(MT_TXWI_FLAGS_MPDU_DENSITY,
+				    sta->ht_cap.ampdu_density);
+	}
+
+	if (ieee80211_is_probe_resp(hdr->frame_control) ||
+	    ieee80211_is_beacon(hdr->frame_control))
+		txwi_flags |= MT_TXWI_FLAGS_TS;
+
+	txwi->flags |= cpu_to_le16(txwi_flags);
+	txwi->len_ctl = cpu_to_le16(len);
+}
+EXPORT_SYMBOL_GPL(mt76x2_mac_write_txwi);
+
+void mt76x2_mac_wcid_set_drop(struct mt76x2_dev *dev, u8 idx, bool drop)
+{
+	u32 val = mt76_rr(dev, MT_WCID_DROP(idx));
+	u32 bit = MT_WCID_DROP_MASK(idx);
+
+	/* prevent unnecessary writes */
+	if ((val & bit) != (bit * drop))
+		mt76_wr(dev, MT_WCID_DROP(idx), (val & ~bit) | (bit * drop));
+}
+EXPORT_SYMBOL_GPL(mt76x2_mac_wcid_set_drop);
+
+void mt76x2_mac_wcid_setup(struct mt76x2_dev *dev, u8 idx, u8 vif_idx, u8 *mac)
+{
+	struct mt76_wcid_addr addr = {};
+	u32 attr;
+
+	attr = FIELD_PREP(MT_WCID_ATTR_BSS_IDX, vif_idx & 7) |
+	       FIELD_PREP(MT_WCID_ATTR_BSS_IDX_EXT, !!(vif_idx & 8));
+
+	mt76_wr(dev, MT_WCID_ATTR(idx), attr);
+
+	mt76_wr(dev, MT_WCID_TX_RATE(idx), 0);
+	mt76_wr(dev, MT_WCID_TX_RATE(idx) + 4, 0);
+
+	if (idx >= 128)
+		return;
+
+	if (mac)
+		memcpy(addr.macaddr, mac, ETH_ALEN);
+
+	mt76_wr_copy(dev, MT_WCID_ADDR(idx), &addr, sizeof(addr));
+}
+EXPORT_SYMBOL_GPL(mt76x2_mac_wcid_setup);
+
+static int
+mt76x2_mac_process_rate(struct mt76_rx_status *status, u16 rate)
+{
+	u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate);
+
+	switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) {
+	case MT_PHY_TYPE_OFDM:
+		if (idx >= 8)
+			idx = 0;
+
+		if (status->band == NL80211_BAND_2GHZ)
+			idx += 4;
+
+		status->rate_idx = idx;
+		return 0;
+	case MT_PHY_TYPE_CCK:
+		if (idx >= 8) {
+			idx -= 8;
+			status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
+		}
+
+		if (idx >= 4)
+			idx = 0;
+
+		status->rate_idx = idx;
+		return 0;
+	case MT_PHY_TYPE_HT_GF:
+		status->enc_flags |= RX_ENC_FLAG_HT_GF;
+		/* fall through */
+	case MT_PHY_TYPE_HT:
+		status->encoding = RX_ENC_HT;
+		status->rate_idx = idx;
+		break;
+	case MT_PHY_TYPE_VHT:
+		status->encoding = RX_ENC_VHT;
+		status->rate_idx = FIELD_GET(MT_RATE_INDEX_VHT_IDX, idx);
+		status->nss = FIELD_GET(MT_RATE_INDEX_VHT_NSS, idx) + 1;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (rate & MT_RXWI_RATE_LDPC)
+		status->enc_flags |= RX_ENC_FLAG_LDPC;
+
+	if (rate & MT_RXWI_RATE_SGI)
+		status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+
+	if (rate & MT_RXWI_RATE_STBC)
+		status->enc_flags |= 1 << RX_ENC_FLAG_STBC_SHIFT;
+
+	switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) {
+	case MT_PHY_BW_20:
+		break;
+	case MT_PHY_BW_40:
+		status->bw = RATE_INFO_BW_40;
+		break;
+	case MT_PHY_BW_80:
+		status->bw = RATE_INFO_BW_80;
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static void mt76x2_remove_hdr_pad(struct sk_buff *skb, int len)
+{
+	int hdrlen;
+
+	if (!len)
+		return;
+
+	hdrlen = ieee80211_get_hdrlen_from_skb(skb);
+	memmove(skb->data + len, skb->data, hdrlen);
+	skb_pull(skb, len);
+}
+
+int mt76x2_mac_get_rssi(struct mt76x2_dev *dev, s8 rssi, int chain)
+{
+	struct mt76x2_rx_freq_cal *cal = &dev->cal.rx;
+
+	rssi += cal->rssi_offset[chain];
+	rssi -= cal->lna_gain;
+
+	return rssi;
+}
+
+static struct mt76x2_sta *
+mt76x2_rx_get_sta(struct mt76x2_dev *dev, u8 idx)
+{
+	struct mt76_wcid *wcid;
+
+	if (idx >= ARRAY_SIZE(dev->wcid))
+		return NULL;
+
+	wcid = rcu_dereference(dev->wcid[idx]);
+	if (!wcid)
+		return NULL;
+
+	return container_of(wcid, struct mt76x2_sta, wcid);
+}
+
+static struct mt76_wcid *
+mt76x2_rx_get_sta_wcid(struct mt76x2_dev *dev, struct mt76x2_sta *sta,
+		       bool unicast)
+{
+	if (!sta)
+		return NULL;
+
+	if (unicast)
+		return &sta->wcid;
+	else
+		return &sta->vif->group_wcid;
+}
+
+int mt76x2_mac_process_rx(struct mt76x2_dev *dev, struct sk_buff *skb,
+			  void *rxi)
+{
+	struct mt76_rx_status *status = (struct mt76_rx_status *) skb->cb;
+	struct mt76x2_rxwi *rxwi = rxi;
+	struct mt76x2_sta *sta;
+	u32 rxinfo = le32_to_cpu(rxwi->rxinfo);
+	u32 ctl = le32_to_cpu(rxwi->ctl);
+	u16 rate = le16_to_cpu(rxwi->rate);
+	u16 tid_sn = le16_to_cpu(rxwi->tid_sn);
+	bool unicast = rxwi->rxinfo & cpu_to_le32(MT_RXINFO_UNICAST);
+	int pad_len = 0;
+	u8 pn_len;
+	u8 wcid;
+	int len;
+
+	if (!test_bit(MT76_STATE_RUNNING, &dev->mt76.state))
+		return -EINVAL;
+
+	if (rxinfo & MT_RXINFO_L2PAD)
+		pad_len += 2;
+
+	if (rxinfo & MT_RXINFO_DECRYPT) {
+		status->flag |= RX_FLAG_DECRYPTED;
+		status->flag |= RX_FLAG_MMIC_STRIPPED;
+		status->flag |= RX_FLAG_MIC_STRIPPED;
+		status->flag |= RX_FLAG_IV_STRIPPED;
+	}
+
+	wcid = FIELD_GET(MT_RXWI_CTL_WCID, ctl);
+	sta = mt76x2_rx_get_sta(dev, wcid);
+	status->wcid = mt76x2_rx_get_sta_wcid(dev, sta, unicast);
+
+	len = FIELD_GET(MT_RXWI_CTL_MPDU_LEN, ctl);
+	pn_len = FIELD_GET(MT_RXINFO_PN_LEN, rxinfo);
+	if (pn_len) {
+		int offset = ieee80211_get_hdrlen_from_skb(skb) + pad_len;
+		u8 *data = skb->data + offset;
+
+		status->iv[0] = data[7];
+		status->iv[1] = data[6];
+		status->iv[2] = data[5];
+		status->iv[3] = data[4];
+		status->iv[4] = data[1];
+		status->iv[5] = data[0];
+
+		/*
+		 * Driver CCMP validation can't deal with fragments.
+		 * Let mac80211 take care of it.
+		 */
+		if (rxinfo & MT_RXINFO_FRAG) {
+			status->flag &= ~RX_FLAG_IV_STRIPPED;
+		} else {
+			pad_len += pn_len << 2;
+			len -= pn_len << 2;
+		}
+	}
+
+	mt76x2_remove_hdr_pad(skb, pad_len);
+
+	if ((rxinfo & MT_RXINFO_BA) && !(rxinfo & MT_RXINFO_NULL))
+		status->aggr = true;
+
+	if (WARN_ON_ONCE(len > skb->len))
+		return -EINVAL;
+
+	pskb_trim(skb, len);
+	status->chains = BIT(0) | BIT(1);
+	status->chain_signal[0] = mt76x2_mac_get_rssi(dev, rxwi->rssi[0], 0);
+	status->chain_signal[1] = mt76x2_mac_get_rssi(dev, rxwi->rssi[1], 1);
+	status->signal = max(status->chain_signal[0], status->chain_signal[1]);
+	status->freq = dev->mt76.chandef.chan->center_freq;
+	status->band = dev->mt76.chandef.chan->band;
+
+	status->tid = FIELD_GET(MT_RXWI_TID, tid_sn);
+	status->seqno = FIELD_GET(MT_RXWI_SN, tid_sn);
+
+	if (sta) {
+		ewma_signal_add(&sta->rssi, status->signal);
+		sta->inactive_count = 0;
+	}
+
+	return mt76x2_mac_process_rate(status, rate);
+}
+EXPORT_SYMBOL_GPL(mt76x2_mac_process_rx);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2_main.c
index 3c0ebe6d231c8724fd1284118a20b10edd0ee051..680a89f8aa87c7dd7eb00a8a9022638109449e7b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_main.c
@@ -53,30 +53,6 @@ mt76x2_stop(struct ieee80211_hw *hw)
 	mutex_unlock(&dev->mutex);
 }
 
-static void
-mt76x2_txq_init(struct mt76x2_dev *dev, struct ieee80211_txq *txq)
-{
-	struct mt76_txq *mtxq;
-
-	if (!txq)
-		return;
-
-	mtxq = (struct mt76_txq *) txq->drv_priv;
-	if (txq->sta) {
-		struct mt76x2_sta *sta;
-
-		sta = (struct mt76x2_sta *) txq->sta->drv_priv;
-		mtxq->wcid = &sta->wcid;
-	} else {
-		struct mt76x2_vif *mvif;
-
-		mvif = (struct mt76x2_vif *) txq->vif->drv_priv;
-		mtxq->wcid = &mvif->group_wcid;
-	}
-
-	mt76_txq_init(&dev->mt76, txq);
-}
-
 static int
 mt76x2_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
 {
@@ -111,14 +87,6 @@ mt76x2_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
 	return 0;
 }
 
-static void
-mt76x2_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
-{
-	struct mt76x2_dev *dev = hw->priv;
-
-	mt76_txq_remove(&dev->mt76, vif->txq);
-}
-
 static int
 mt76x2_set_channel(struct mt76x2_dev *dev, struct cfg80211_chan_def *chandef)
 {
@@ -193,39 +161,6 @@ mt76x2_config(struct ieee80211_hw *hw, u32 changed)
 	return ret;
 }
 
-static void
-mt76x2_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
-			unsigned int *total_flags, u64 multicast)
-{
-	struct mt76x2_dev *dev = hw->priv;
-	u32 flags = 0;
-
-#define MT76_FILTER(_flag, _hw) do { \
-		flags |= *total_flags & FIF_##_flag;			\
-		dev->rxfilter &= ~(_hw);				\
-		dev->rxfilter |= !(flags & FIF_##_flag) * (_hw);	\
-	} while (0)
-
-	mutex_lock(&dev->mutex);
-
-	dev->rxfilter &= ~MT_RX_FILTR_CFG_OTHER_BSS;
-
-	MT76_FILTER(FCSFAIL, MT_RX_FILTR_CFG_CRC_ERR);
-	MT76_FILTER(PLCPFAIL, MT_RX_FILTR_CFG_PHY_ERR);
-	MT76_FILTER(CONTROL, MT_RX_FILTR_CFG_ACK |
-			     MT_RX_FILTR_CFG_CTS |
-			     MT_RX_FILTR_CFG_CFEND |
-			     MT_RX_FILTR_CFG_CFACK |
-			     MT_RX_FILTR_CFG_BA |
-			     MT_RX_FILTR_CFG_CTRL_RSV);
-	MT76_FILTER(PSPOLL, MT_RX_FILTR_CFG_PSPOLL);
-
-	*total_flags = flags;
-	mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
-
-	mutex_unlock(&dev->mutex);
-}
-
 static void
 mt76x2_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
 			struct ieee80211_bss_conf *info, u32 changed)
@@ -263,68 +198,6 @@ mt76x2_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
 	mutex_unlock(&dev->mutex);
 }
 
-static int
-mt76x2_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
-	       struct ieee80211_sta *sta)
-{
-	struct mt76x2_dev *dev = hw->priv;
-	struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
-	struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
-	int ret = 0;
-	int idx = 0;
-	int i;
-
-	mutex_lock(&dev->mutex);
-
-	idx = mt76_wcid_alloc(dev->wcid_mask, ARRAY_SIZE(dev->wcid));
-	if (idx < 0) {
-		ret = -ENOSPC;
-		goto out;
-	}
-
-	msta->vif = mvif;
-	msta->wcid.sta = 1;
-	msta->wcid.idx = idx;
-	msta->wcid.hw_key_idx = -1;
-	mt76x2_mac_wcid_setup(dev, idx, mvif->idx, sta->addr);
-	mt76x2_mac_wcid_set_drop(dev, idx, false);
-	for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
-		mt76x2_txq_init(dev, sta->txq[i]);
-
-	if (vif->type == NL80211_IFTYPE_AP)
-		set_bit(MT_WCID_FLAG_CHECK_PS, &msta->wcid.flags);
-
-	ewma_signal_init(&msta->rssi);
-
-	rcu_assign_pointer(dev->wcid[idx], &msta->wcid);
-
-out:
-	mutex_unlock(&dev->mutex);
-
-	return ret;
-}
-
-static int
-mt76x2_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
-		  struct ieee80211_sta *sta)
-{
-	struct mt76x2_dev *dev = hw->priv;
-	struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
-	int idx = msta->wcid.idx;
-	int i;
-
-	mutex_lock(&dev->mutex);
-	rcu_assign_pointer(dev->wcid[idx], NULL);
-	for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
-		mt76_txq_remove(&dev->mt76, sta->txq[i]);
-	mt76x2_mac_wcid_set_drop(dev, idx, true);
-	mt76_wcid_free(dev->wcid_mask, idx);
-	mt76x2_mac_wcid_setup(dev, idx, 0, NULL);
-	mutex_unlock(&dev->mutex);
-
-	return 0;
-}
-
 void
 mt76x2_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps)
 {
@@ -336,117 +209,6 @@ mt76x2_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps)
 	mt76x2_mac_wcid_set_drop(dev, idx, ps);
 }
 
-static int
-mt76x2_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
-	       struct ieee80211_vif *vif, struct ieee80211_sta *sta,
-	       struct ieee80211_key_conf *key)
-{
-	struct mt76x2_dev *dev = hw->priv;
-	struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
-	struct mt76x2_sta *msta;
-	struct mt76_wcid *wcid;
-	int idx = key->keyidx;
-	int ret;
-
-	/* fall back to sw encryption for unsupported ciphers */
-	switch (key->cipher) {
-	case WLAN_CIPHER_SUITE_WEP40:
-	case WLAN_CIPHER_SUITE_WEP104:
-	case WLAN_CIPHER_SUITE_TKIP:
-	case WLAN_CIPHER_SUITE_CCMP:
-		break;
-	default:
-		return -EOPNOTSUPP;
-	}
-
-	/*
-	 * The hardware does not support per-STA RX GTK, fall back
-	 * to software mode for these.
-	 */
-	if ((vif->type == NL80211_IFTYPE_ADHOC ||
-	     vif->type == NL80211_IFTYPE_MESH_POINT) &&
-	    (key->cipher == WLAN_CIPHER_SUITE_TKIP ||
-	     key->cipher == WLAN_CIPHER_SUITE_CCMP) &&
-	    !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
-		return -EOPNOTSUPP;
-
-	msta = sta ? (struct mt76x2_sta *) sta->drv_priv : NULL;
-	wcid = msta ? &msta->wcid : &mvif->group_wcid;
-
-	if (cmd == SET_KEY) {
-		key->hw_key_idx = wcid->idx;
-		wcid->hw_key_idx = idx;
-		if (key->flags & IEEE80211_KEY_FLAG_RX_MGMT) {
-			key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
-			wcid->sw_iv = true;
-		}
-	} else {
-		if (idx == wcid->hw_key_idx) {
-			wcid->hw_key_idx = -1;
-			wcid->sw_iv = true;
-		}
-
-		key = NULL;
-	}
-	mt76_wcid_key_setup(&dev->mt76, wcid, key);
-
-	if (!msta) {
-		if (key || wcid->hw_key_idx == idx) {
-			ret = mt76x2_mac_wcid_set_key(dev, wcid->idx, key);
-			if (ret)
-				return ret;
-		}
-
-		return mt76x2_mac_shared_key_setup(dev, mvif->idx, idx, key);
-	}
-
-	return mt76x2_mac_wcid_set_key(dev, msta->wcid.idx, key);
-}
-
-static int
-mt76x2_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
-	       const struct ieee80211_tx_queue_params *params)
-{
-	struct mt76x2_dev *dev = hw->priv;
-	u8 cw_min = 5, cw_max = 10, qid;
-	u32 val;
-
-	qid = dev->mt76.q_tx[queue].hw_idx;
-
-	if (params->cw_min)
-		cw_min = fls(params->cw_min);
-	if (params->cw_max)
-		cw_max = fls(params->cw_max);
-
-	val = FIELD_PREP(MT_EDCA_CFG_TXOP, params->txop) |
-	      FIELD_PREP(MT_EDCA_CFG_AIFSN, params->aifs) |
-	      FIELD_PREP(MT_EDCA_CFG_CWMIN, cw_min) |
-	      FIELD_PREP(MT_EDCA_CFG_CWMAX, cw_max);
-	mt76_wr(dev, MT_EDCA_CFG_AC(qid), val);
-
-	val = mt76_rr(dev, MT_WMM_TXOP(qid));
-	val &= ~(MT_WMM_TXOP_MASK << MT_WMM_TXOP_SHIFT(qid));
-	val |= params->txop << MT_WMM_TXOP_SHIFT(qid);
-	mt76_wr(dev, MT_WMM_TXOP(qid), val);
-
-	val = mt76_rr(dev, MT_WMM_AIFSN);
-	val &= ~(MT_WMM_AIFSN_MASK << MT_WMM_AIFSN_SHIFT(qid));
-	val |= params->aifs << MT_WMM_AIFSN_SHIFT(qid);
-	mt76_wr(dev, MT_WMM_AIFSN, val);
-
-	val = mt76_rr(dev, MT_WMM_CWMIN);
-	val &= ~(MT_WMM_CWMIN_MASK << MT_WMM_CWMIN_SHIFT(qid));
-	val |= cw_min << MT_WMM_CWMIN_SHIFT(qid);
-	mt76_wr(dev, MT_WMM_CWMIN, val);
-
-	val = mt76_rr(dev, MT_WMM_CWMAX);
-	val &= ~(MT_WMM_CWMAX_MASK << MT_WMM_CWMAX_SHIFT(qid));
-	val |= cw_max << MT_WMM_CWMAX_SHIFT(qid);
-	mt76_wr(dev, MT_WMM_CWMAX, val);
-
-	return 0;
-}
-
 static void
 mt76x2_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
 	       const u8 *mac)
@@ -485,75 +247,6 @@ mt76x2_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif, int *dbm)
 	return 0;
 }
 
-static int
-mt76x2_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
-		    struct ieee80211_ampdu_params *params)
-{
-	enum ieee80211_ampdu_mlme_action action = params->action;
-	struct ieee80211_sta *sta = params->sta;
-	struct mt76x2_dev *dev = hw->priv;
-	struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
-	struct ieee80211_txq *txq = sta->txq[params->tid];
-	u16 tid = params->tid;
-	u16 *ssn = &params->ssn;
-	struct mt76_txq *mtxq;
-
-	if (!txq)
-		return -EINVAL;
-
-	mtxq = (struct mt76_txq *)txq->drv_priv;
-
-	switch (action) {
-	case IEEE80211_AMPDU_RX_START:
-		mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, *ssn, params->buf_size);
-		mt76_set(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, BIT(16 + tid));
-		break;
-	case IEEE80211_AMPDU_RX_STOP:
-		mt76_rx_aggr_stop(&dev->mt76, &msta->wcid, tid);
-		mt76_clear(dev, MT_WCID_ADDR(msta->wcid.idx) + 4,
-			   BIT(16 + tid));
-		break;
-	case IEEE80211_AMPDU_TX_OPERATIONAL:
-		mtxq->aggr = true;
-		mtxq->send_bar = false;
-		ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn);
-		break;
-	case IEEE80211_AMPDU_TX_STOP_FLUSH:
-	case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
-		mtxq->aggr = false;
-		ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn);
-		break;
-	case IEEE80211_AMPDU_TX_START:
-		mtxq->agg_ssn = *ssn << 4;
-		ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
-		break;
-	case IEEE80211_AMPDU_TX_STOP_CONT:
-		mtxq->aggr = false;
-		ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
-		break;
-	}
-
-	return 0;
-}
-
-static void
-mt76x2_sta_rate_tbl_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
-			   struct ieee80211_sta *sta)
-{
-	struct mt76x2_dev *dev = hw->priv;
-	struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
-	struct ieee80211_sta_rates *rates = rcu_dereference(sta->rates);
-	struct ieee80211_tx_rate rate = {};
-
-	if (!rates)
-		return;
-
-	rate.idx = rates->rate[0].idx;
-	rate.flags = rates->rate[0].flags;
-	mt76x2_mac_wcid_set_rate(dev, &msta->wcid, &rate);
-	msta->wcid.max_txpwr_adj = mt76x2_tx_get_max_txpwr_adj(dev, &rate);
-}
-
 static void mt76x2_set_coverage_class(struct ieee80211_hw *hw,
 				      s16 coverage_class)
 {
@@ -605,6 +298,21 @@ static int mt76x2_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant,
 	return 0;
 }
 
+static int
+mt76x2_set_rts_threshold(struct ieee80211_hw *hw, u32 val)
+{
+	struct mt76x2_dev *dev = hw->priv;
+
+	if (val != ~0 && val > 0xffff)
+		return -EINVAL;
+
+	mutex_lock(&dev->mutex);
+	mt76x2_mac_set_tx_protection(dev, val);
+	mutex_unlock(&dev->mutex);
+
+	return 0;
+}
+
 const struct ieee80211_ops mt76x2_ops = {
 	.tx = mt76x2_tx,
 	.start = mt76x2_start,
@@ -631,5 +339,6 @@ const struct ieee80211_ops mt76x2_ops = {
 	.set_tim = mt76x2_set_tim,
 	.set_antenna = mt76x2_set_antenna,
 	.get_antenna = mt76x2_get_antenna,
+	.set_rts_threshold = mt76x2_set_rts_threshold,
 };
 
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.c
index dfd36d736b06f4c48b5e39c902faadc3641b8ebc..743da57760dc67f9a3e9a0370c1d3b8692126e38 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.c
@@ -23,23 +23,6 @@
 #include "mt76x2_dma.h"
 #include "mt76x2_eeprom.h"
 
-struct mt76x2_fw_header {
-	__le32 ilm_len;
-	__le32 dlm_len;
-	__le16 build_ver;
-	__le16 fw_ver;
-	u8 pad[4];
-	char build_time[16];
-};
-
-struct mt76x2_patch_header {
-	char build_time[16];
-	char platform[4];
-	char hw_version[4];
-	char patch_version[4];
-	u8 pad[2];
-};
-
 static struct sk_buff *mt76x2_mcu_msg_alloc(const void *data, int len)
 {
 	struct sk_buff *skb;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.h b/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.h
index d7a7e83262ced8cedd8a6a86aadd07e76193c446..e40293f21417762b7dd0f2b48d2fa6fa7f39b3c3 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.h
@@ -146,6 +146,23 @@ struct mt76x2_tssi_comp {
 	u8 offset1;
 } __packed __aligned(4);
 
+struct mt76x2_fw_header {
+	__le32 ilm_len;
+	__le32 dlm_len;
+	__le16 build_ver;
+	__le16 fw_ver;
+	u8 pad[4];
+	char build_time[16];
+};
+
+struct mt76x2_patch_header {
+	char build_time[16];
+	char platform[4];
+	char hw_version[4];
+	char patch_version[4];
+	u8 pad[2];
+};
+
 int mt76x2_mcu_calibrate(struct mt76x2_dev *dev, enum mcu_calibration type,
 			 u32 param);
 int mt76x2_mcu_tssi_comp(struct mt76x2_dev *dev, struct mt76x2_tssi_comp *tssi_data);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c
index 20ffa6a40d3903db5a80e07e1132a22bb4ae08ec..84c96c0415b67052ecd45b25203c5d88b29f4efc 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c
@@ -19,172 +19,6 @@
 #include "mt76x2_mcu.h"
 #include "mt76x2_eeprom.h"
 
-static void
-mt76x2_adjust_high_lna_gain(struct mt76x2_dev *dev, int reg, s8 offset)
-{
-	s8 gain;
-
-	gain = FIELD_GET(MT_BBP_AGC_LNA_HIGH_GAIN, mt76_rr(dev, MT_BBP(AGC, reg)));
-	gain -= offset / 2;
-	mt76_rmw_field(dev, MT_BBP(AGC, reg), MT_BBP_AGC_LNA_HIGH_GAIN, gain);
-}
-
-static void
-mt76x2_adjust_agc_gain(struct mt76x2_dev *dev, int reg, s8 offset)
-{
-	s8 gain;
-
-	gain = FIELD_GET(MT_BBP_AGC_GAIN, mt76_rr(dev, MT_BBP(AGC, reg)));
-	gain += offset;
-	mt76_rmw_field(dev, MT_BBP(AGC, reg), MT_BBP_AGC_GAIN, gain);
-}
-
-static void
-mt76x2_apply_gain_adj(struct mt76x2_dev *dev)
-{
-	s8 *gain_adj = dev->cal.rx.high_gain;
-
-	mt76x2_adjust_high_lna_gain(dev, 4, gain_adj[0]);
-	mt76x2_adjust_high_lna_gain(dev, 5, gain_adj[1]);
-
-	mt76x2_adjust_agc_gain(dev, 8, gain_adj[0]);
-	mt76x2_adjust_agc_gain(dev, 9, gain_adj[1]);
-}
-
-static u32
-mt76x2_tx_power_mask(u8 v1, u8 v2, u8 v3, u8 v4)
-{
-	u32 val = 0;
-
-	val |= (v1 & (BIT(6) - 1)) << 0;
-	val |= (v2 & (BIT(6) - 1)) << 8;
-	val |= (v3 & (BIT(6) - 1)) << 16;
-	val |= (v4 & (BIT(6) - 1)) << 24;
-	return val;
-}
-
-int mt76x2_phy_get_rssi(struct mt76x2_dev *dev, s8 rssi, int chain)
-{
-	struct mt76x2_rx_freq_cal *cal = &dev->cal.rx;
-
-	rssi += cal->rssi_offset[chain];
-	rssi -= cal->lna_gain;
-
-	return rssi;
-}
-
-static void
-mt76x2_add_rate_power_offset(struct mt76_rate_power *r, int offset)
-{
-	int i;
-
-	for (i = 0; i < sizeof(r->all); i++)
-		r->all[i] += offset;
-}
-
-static void
-mt76x2_limit_rate_power(struct mt76_rate_power *r, int limit)
-{
-	int i;
-
-	for (i = 0; i < sizeof(r->all); i++)
-		if (r->all[i] > limit)
-			r->all[i] = limit;
-}
-
-static int
-mt76x2_get_min_rate_power(struct mt76_rate_power *r)
-{
-	int i;
-	s8 ret = 0;
-
-	for (i = 0; i < sizeof(r->all); i++) {
-		if (!r->all[i])
-			continue;
-
-		if (ret)
-			ret = min(ret, r->all[i]);
-		else
-			ret = r->all[i];
-	}
-
-	return ret;
-}
-
-
-void mt76x2_phy_set_txpower(struct mt76x2_dev *dev)
-{
-	enum nl80211_chan_width width = dev->mt76.chandef.width;
-	struct ieee80211_channel *chan = dev->mt76.chandef.chan;
-	struct mt76x2_tx_power_info txp;
-	int txp_0, txp_1, delta = 0;
-	struct mt76_rate_power t = {};
-	int base_power, gain;
-
-	mt76x2_get_power_info(dev, &txp, chan);
-
-	if (width == NL80211_CHAN_WIDTH_40)
-		delta = txp.delta_bw40;
-	else if (width == NL80211_CHAN_WIDTH_80)
-		delta = txp.delta_bw80;
-
-	mt76x2_get_rate_power(dev, &t, chan);
-	mt76x2_add_rate_power_offset(&t, txp.chain[0].target_power);
-	mt76x2_limit_rate_power(&t, dev->txpower_conf);
-	dev->txpower_cur = mt76x2_get_max_rate_power(&t);
-
-	base_power = mt76x2_get_min_rate_power(&t);
-	delta += base_power - txp.chain[0].target_power;
-	txp_0 = txp.chain[0].target_power + txp.chain[0].delta + delta;
-	txp_1 = txp.chain[1].target_power + txp.chain[1].delta + delta;
-
-	gain = min(txp_0, txp_1);
-	if (gain < 0) {
-		base_power -= gain;
-		txp_0 -= gain;
-		txp_1 -= gain;
-	} else if (gain > 0x2f) {
-		base_power -= gain - 0x2f;
-		txp_0 = 0x2f;
-		txp_1 = 0x2f;
-	}
-
-	mt76x2_add_rate_power_offset(&t, -base_power);
-	dev->target_power = txp.chain[0].target_power;
-	dev->target_power_delta[0] = txp_0 - txp.chain[0].target_power;
-	dev->target_power_delta[1] = txp_1 - txp.chain[0].target_power;
-	dev->rate_power = t;
-
-	mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_0, txp_0);
-	mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_1, txp_1);
-
-	mt76_wr(dev, MT_TX_PWR_CFG_0,
-		mt76x2_tx_power_mask(t.cck[0], t.cck[2], t.ofdm[0], t.ofdm[2]));
-	mt76_wr(dev, MT_TX_PWR_CFG_1,
-		mt76x2_tx_power_mask(t.ofdm[4], t.ofdm[6], t.ht[0], t.ht[2]));
-	mt76_wr(dev, MT_TX_PWR_CFG_2,
-		mt76x2_tx_power_mask(t.ht[4], t.ht[6], t.ht[8], t.ht[10]));
-	mt76_wr(dev, MT_TX_PWR_CFG_3,
-		mt76x2_tx_power_mask(t.ht[12], t.ht[14], t.ht[0], t.ht[2]));
-	mt76_wr(dev, MT_TX_PWR_CFG_4,
-		mt76x2_tx_power_mask(t.ht[4], t.ht[6], 0, 0));
-	mt76_wr(dev, MT_TX_PWR_CFG_7,
-		mt76x2_tx_power_mask(t.ofdm[6], t.vht[8], t.ht[6], t.vht[8]));
-	mt76_wr(dev, MT_TX_PWR_CFG_8,
-		mt76x2_tx_power_mask(t.ht[14], t.vht[8], t.vht[8], 0));
-	mt76_wr(dev, MT_TX_PWR_CFG_9,
-		mt76x2_tx_power_mask(t.ht[6], t.vht[8], t.vht[8], 0));
-}
-
-static bool
-mt76x2_channel_silent(struct mt76x2_dev *dev)
-{
-	struct ieee80211_channel *chan = dev->mt76.chandef.chan;
-
-	return ((chan->flags & IEEE80211_CHAN_RADAR) &&
-		chan->dfs_state != NL80211_DFS_AVAILABLE);
-}
-
 static bool
 mt76x2_phy_tssi_init_cal(struct mt76x2_dev *dev)
 {
@@ -243,140 +77,6 @@ mt76x2_phy_channel_calibrate(struct mt76x2_dev *dev, bool mac_stopped)
 	dev->cal.channel_cal_done = true;
 }
 
-static void
-mt76x2_phy_set_txpower_regs(struct mt76x2_dev *dev, enum nl80211_band band)
-{
-	u32 pa_mode[2];
-	u32 pa_mode_adj;
-
-	if (band == NL80211_BAND_2GHZ) {
-		pa_mode[0] = 0x010055ff;
-		pa_mode[1] = 0x00550055;
-
-		mt76_wr(dev, MT_TX_ALC_CFG_2, 0x35160a00);
-		mt76_wr(dev, MT_TX_ALC_CFG_3, 0x35160a06);
-
-		if (mt76x2_ext_pa_enabled(dev, band)) {
-			mt76_wr(dev, MT_RF_PA_MODE_ADJ0, 0x0000ec00);
-			mt76_wr(dev, MT_RF_PA_MODE_ADJ1, 0x0000ec00);
-		} else {
-			mt76_wr(dev, MT_RF_PA_MODE_ADJ0, 0xf4000200);
-			mt76_wr(dev, MT_RF_PA_MODE_ADJ1, 0xfa000200);
-		}
-	} else {
-		pa_mode[0] = 0x0000ffff;
-		pa_mode[1] = 0x00ff00ff;
-
-		if (mt76x2_ext_pa_enabled(dev, band)) {
-			mt76_wr(dev, MT_TX_ALC_CFG_2, 0x2f0f0400);
-			mt76_wr(dev, MT_TX_ALC_CFG_3, 0x2f0f0476);
-		} else {
-			mt76_wr(dev, MT_TX_ALC_CFG_2, 0x1b0f0400);
-			mt76_wr(dev, MT_TX_ALC_CFG_3, 0x1b0f0476);
-		}
-
-		if (mt76x2_ext_pa_enabled(dev, band))
-			pa_mode_adj = 0x04000000;
-		else
-			pa_mode_adj = 0;
-
-		mt76_wr(dev, MT_RF_PA_MODE_ADJ0, pa_mode_adj);
-		mt76_wr(dev, MT_RF_PA_MODE_ADJ1, pa_mode_adj);
-	}
-
-	mt76_wr(dev, MT_BB_PA_MODE_CFG0, pa_mode[0]);
-	mt76_wr(dev, MT_BB_PA_MODE_CFG1, pa_mode[1]);
-	mt76_wr(dev, MT_RF_PA_MODE_CFG0, pa_mode[0]);
-	mt76_wr(dev, MT_RF_PA_MODE_CFG1, pa_mode[1]);
-
-	if (mt76x2_ext_pa_enabled(dev, band)) {
-		u32 val;
-
-		if (band == NL80211_BAND_2GHZ)
-			val = 0x3c3c023c;
-		else
-			val = 0x363c023c;
-
-		mt76_wr(dev, MT_TX0_RF_GAIN_CORR, val);
-		mt76_wr(dev, MT_TX1_RF_GAIN_CORR, val);
-		mt76_wr(dev, MT_TX_ALC_CFG_4, 0x00001818);
-	} else {
-		if (band == NL80211_BAND_2GHZ) {
-			u32 val = 0x0f3c3c3c;
-
-			mt76_wr(dev, MT_TX0_RF_GAIN_CORR, val);
-			mt76_wr(dev, MT_TX1_RF_GAIN_CORR, val);
-			mt76_wr(dev, MT_TX_ALC_CFG_4, 0x00000606);
-		} else {
-			mt76_wr(dev, MT_TX0_RF_GAIN_CORR, 0x383c023c);
-			mt76_wr(dev, MT_TX1_RF_GAIN_CORR, 0x24282e28);
-			mt76_wr(dev, MT_TX_ALC_CFG_4, 0);
-		}
-	}
-}
-
-static void
-mt76x2_configure_tx_delay(struct mt76x2_dev *dev, enum nl80211_band band, u8 bw)
-{
-	u32 cfg0, cfg1;
-
-	if (mt76x2_ext_pa_enabled(dev, band)) {
-		cfg0 = bw ? 0x000b0c01 : 0x00101101;
-		cfg1 = 0x00011414;
-	} else {
-		cfg0 = bw ? 0x000b0b01 : 0x00101001;
-		cfg1 = 0x00021414;
-	}
-	mt76_wr(dev, MT_TX_SW_CFG0, cfg0);
-	mt76_wr(dev, MT_TX_SW_CFG1, cfg1);
-
-	mt76_rmw_field(dev, MT_XIFS_TIME_CFG, MT_XIFS_TIME_CFG_OFDM_SIFS, 15);
-}
-
-static void
-mt76x2_phy_set_bw(struct mt76x2_dev *dev, int width, u8 ctrl)
-{
-	int core_val, agc_val;
-
-	switch (width) {
-	case NL80211_CHAN_WIDTH_80:
-		core_val = 3;
-		agc_val = 7;
-		break;
-	case NL80211_CHAN_WIDTH_40:
-		core_val = 2;
-		agc_val = 3;
-		break;
-	default:
-		core_val = 0;
-		agc_val = 1;
-		break;
-	}
-
-	mt76_rmw_field(dev, MT_BBP(CORE, 1), MT_BBP_CORE_R1_BW, core_val);
-	mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_BW, agc_val);
-	mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_CTRL_CHAN, ctrl);
-	mt76_rmw_field(dev, MT_BBP(TXBE, 0), MT_BBP_TXBE_R0_CTRL_CHAN, ctrl);
-}
-
-static void
-mt76x2_phy_set_band(struct mt76x2_dev *dev, int band, bool primary_upper)
-{
-	switch (band) {
-	case NL80211_BAND_2GHZ:
-		mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
-		mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
-		break;
-	case NL80211_BAND_5GHZ:
-		mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
-		mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
-		break;
-	}
-
-	mt76_rmw_field(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_UPPER_40M,
-		       primary_upper);
-}
-
 void mt76x2_phy_set_antenna(struct mt76x2_dev *dev)
 {
 	u32 val;
@@ -500,53 +200,6 @@ mt76x2_phy_adjust_vga_gain(struct mt76x2_dev *dev)
 	mt76x2_phy_set_gain_val(dev);
 }
 
-static int
-mt76x2_phy_get_min_avg_rssi(struct mt76x2_dev *dev)
-{
-	struct mt76x2_sta *sta;
-	struct mt76_wcid *wcid;
-	int i, j, min_rssi = 0;
-	s8 cur_rssi;
-
-	local_bh_disable();
-	rcu_read_lock();
-
-	for (i = 0; i < ARRAY_SIZE(dev->wcid_mask); i++) {
-		unsigned long mask = dev->wcid_mask[i];
-
-		if (!mask)
-			continue;
-
-		for (j = i * BITS_PER_LONG; mask; j++, mask >>= 1) {
-			if (!(mask & 1))
-				continue;
-
-			wcid = rcu_dereference(dev->wcid[j]);
-			if (!wcid)
-				continue;
-
-			sta = container_of(wcid, struct mt76x2_sta, wcid);
-			spin_lock(&dev->mt76.rx_lock);
-			if (sta->inactive_count++ < 5)
-				cur_rssi = ewma_signal_read(&sta->rssi);
-			else
-				cur_rssi = 0;
-			spin_unlock(&dev->mt76.rx_lock);
-
-			if (cur_rssi < min_rssi)
-				min_rssi = cur_rssi;
-		}
-	}
-
-	rcu_read_unlock();
-	local_bh_enable();
-
-	if (!min_rssi)
-		return -75;
-
-	return min_rssi;
-}
-
 static void
 mt76x2_phy_update_channel_gain(struct mt76x2_dev *dev)
 {
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_phy_common.c b/drivers/net/wireless/mediatek/mt76/mt76x2_phy_common.c
new file mode 100644
index 0000000000000000000000000000000000000000..9fd6ab4cbb949e617ee64527a9e2fa6a12f6c3fd
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_phy_common.c
@@ -0,0 +1,349 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76x2.h"
+#include "mt76x2_eeprom.h"
+
+static void
+mt76x2_adjust_high_lna_gain(struct mt76x2_dev *dev, int reg, s8 offset)
+{
+	s8 gain;
+
+	gain = FIELD_GET(MT_BBP_AGC_LNA_HIGH_GAIN, mt76_rr(dev, MT_BBP(AGC, reg)));
+	gain -= offset / 2;
+	mt76_rmw_field(dev, MT_BBP(AGC, reg), MT_BBP_AGC_LNA_HIGH_GAIN, gain);
+}
+
+static void
+mt76x2_adjust_agc_gain(struct mt76x2_dev *dev, int reg, s8 offset)
+{
+	s8 gain;
+
+	gain = FIELD_GET(MT_BBP_AGC_GAIN, mt76_rr(dev, MT_BBP(AGC, reg)));
+	gain += offset;
+	mt76_rmw_field(dev, MT_BBP(AGC, reg), MT_BBP_AGC_GAIN, gain);
+}
+
+void mt76x2_apply_gain_adj(struct mt76x2_dev *dev)
+{
+	s8 *gain_adj = dev->cal.rx.high_gain;
+
+	mt76x2_adjust_high_lna_gain(dev, 4, gain_adj[0]);
+	mt76x2_adjust_high_lna_gain(dev, 5, gain_adj[1]);
+
+	mt76x2_adjust_agc_gain(dev, 8, gain_adj[0]);
+	mt76x2_adjust_agc_gain(dev, 9, gain_adj[1]);
+}
+EXPORT_SYMBOL_GPL(mt76x2_apply_gain_adj);
+
+void mt76x2_phy_set_txpower_regs(struct mt76x2_dev *dev,
+				 enum nl80211_band band)
+{
+	u32 pa_mode[2];
+	u32 pa_mode_adj;
+
+	if (band == NL80211_BAND_2GHZ) {
+		pa_mode[0] = 0x010055ff;
+		pa_mode[1] = 0x00550055;
+
+		mt76_wr(dev, MT_TX_ALC_CFG_2, 0x35160a00);
+		mt76_wr(dev, MT_TX_ALC_CFG_3, 0x35160a06);
+
+		if (mt76x2_ext_pa_enabled(dev, band)) {
+			mt76_wr(dev, MT_RF_PA_MODE_ADJ0, 0x0000ec00);
+			mt76_wr(dev, MT_RF_PA_MODE_ADJ1, 0x0000ec00);
+		} else {
+			mt76_wr(dev, MT_RF_PA_MODE_ADJ0, 0xf4000200);
+			mt76_wr(dev, MT_RF_PA_MODE_ADJ1, 0xfa000200);
+		}
+	} else {
+		pa_mode[0] = 0x0000ffff;
+		pa_mode[1] = 0x00ff00ff;
+
+		if (mt76x2_ext_pa_enabled(dev, band)) {
+			mt76_wr(dev, MT_TX_ALC_CFG_2, 0x2f0f0400);
+			mt76_wr(dev, MT_TX_ALC_CFG_3, 0x2f0f0476);
+		} else {
+			mt76_wr(dev, MT_TX_ALC_CFG_2, 0x1b0f0400);
+			mt76_wr(dev, MT_TX_ALC_CFG_3, 0x1b0f0476);
+		}
+
+		if (mt76x2_ext_pa_enabled(dev, band))
+			pa_mode_adj = 0x04000000;
+		else
+			pa_mode_adj = 0;
+
+		mt76_wr(dev, MT_RF_PA_MODE_ADJ0, pa_mode_adj);
+		mt76_wr(dev, MT_RF_PA_MODE_ADJ1, pa_mode_adj);
+	}
+
+	mt76_wr(dev, MT_BB_PA_MODE_CFG0, pa_mode[0]);
+	mt76_wr(dev, MT_BB_PA_MODE_CFG1, pa_mode[1]);
+	mt76_wr(dev, MT_RF_PA_MODE_CFG0, pa_mode[0]);
+	mt76_wr(dev, MT_RF_PA_MODE_CFG1, pa_mode[1]);
+
+	if (mt76x2_ext_pa_enabled(dev, band)) {
+		u32 val;
+
+		if (band == NL80211_BAND_2GHZ)
+			val = 0x3c3c023c;
+		else
+			val = 0x363c023c;
+
+		mt76_wr(dev, MT_TX0_RF_GAIN_CORR, val);
+		mt76_wr(dev, MT_TX1_RF_GAIN_CORR, val);
+		mt76_wr(dev, MT_TX_ALC_CFG_4, 0x00001818);
+	} else {
+		if (band == NL80211_BAND_2GHZ) {
+			u32 val = 0x0f3c3c3c;
+
+			mt76_wr(dev, MT_TX0_RF_GAIN_CORR, val);
+			mt76_wr(dev, MT_TX1_RF_GAIN_CORR, val);
+			mt76_wr(dev, MT_TX_ALC_CFG_4, 0x00000606);
+		} else {
+			mt76_wr(dev, MT_TX0_RF_GAIN_CORR, 0x383c023c);
+			mt76_wr(dev, MT_TX1_RF_GAIN_CORR, 0x24282e28);
+			mt76_wr(dev, MT_TX_ALC_CFG_4, 0);
+		}
+	}
+}
+EXPORT_SYMBOL_GPL(mt76x2_phy_set_txpower_regs);
+
+static void
+mt76x2_limit_rate_power(struct mt76_rate_power *r, int limit)
+{
+	int i;
+
+	for (i = 0; i < sizeof(r->all); i++)
+		if (r->all[i] > limit)
+			r->all[i] = limit;
+}
+
+static u32
+mt76x2_tx_power_mask(u8 v1, u8 v2, u8 v3, u8 v4)
+{
+	u32 val = 0;
+
+	val |= (v1 & (BIT(6) - 1)) << 0;
+	val |= (v2 & (BIT(6) - 1)) << 8;
+	val |= (v3 & (BIT(6) - 1)) << 16;
+	val |= (v4 & (BIT(6) - 1)) << 24;
+	return val;
+}
+
+static void
+mt76x2_add_rate_power_offset(struct mt76_rate_power *r, int offset)
+{
+	int i;
+
+	for (i = 0; i < sizeof(r->all); i++)
+		r->all[i] += offset;
+}
+
+static int
+mt76x2_get_min_rate_power(struct mt76_rate_power *r)
+{
+	int i;
+	s8 ret = 0;
+
+	for (i = 0; i < sizeof(r->all); i++) {
+		if (!r->all[i])
+			continue;
+
+		if (ret)
+			ret = min(ret, r->all[i]);
+		else
+			ret = r->all[i];
+	}
+
+	return ret;
+}
+
+void mt76x2_phy_set_txpower(struct mt76x2_dev *dev)
+{
+	enum nl80211_chan_width width = dev->mt76.chandef.width;
+	struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+	struct mt76x2_tx_power_info txp;
+	int txp_0, txp_1, delta = 0;
+	struct mt76_rate_power t = {};
+	int base_power, gain;
+
+	mt76x2_get_power_info(dev, &txp, chan);
+
+	if (width == NL80211_CHAN_WIDTH_40)
+		delta = txp.delta_bw40;
+	else if (width == NL80211_CHAN_WIDTH_80)
+		delta = txp.delta_bw80;
+
+	mt76x2_get_rate_power(dev, &t, chan);
+	mt76x2_add_rate_power_offset(&t, txp.chain[0].target_power);
+	mt76x2_limit_rate_power(&t, dev->txpower_conf);
+	dev->txpower_cur = mt76x2_get_max_rate_power(&t);
+
+	base_power = mt76x2_get_min_rate_power(&t);
+	delta += base_power - txp.chain[0].target_power;
+	txp_0 = txp.chain[0].target_power + txp.chain[0].delta + delta;
+	txp_1 = txp.chain[1].target_power + txp.chain[1].delta + delta;
+
+	gain = min(txp_0, txp_1);
+	if (gain < 0) {
+		base_power -= gain;
+		txp_0 -= gain;
+		txp_1 -= gain;
+	} else if (gain > 0x2f) {
+		base_power -= gain - 0x2f;
+		txp_0 = 0x2f;
+		txp_1 = 0x2f;
+	}
+
+	mt76x2_add_rate_power_offset(&t, -base_power);
+	dev->target_power = txp.chain[0].target_power;
+	dev->target_power_delta[0] = txp_0 - txp.chain[0].target_power;
+	dev->target_power_delta[1] = txp_1 - txp.chain[0].target_power;
+	dev->rate_power = t;
+
+	mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_0, txp_0);
+	mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_1, txp_1);
+
+	mt76_wr(dev, MT_TX_PWR_CFG_0,
+		mt76x2_tx_power_mask(t.cck[0], t.cck[2], t.ofdm[0], t.ofdm[2]));
+	mt76_wr(dev, MT_TX_PWR_CFG_1,
+		mt76x2_tx_power_mask(t.ofdm[4], t.ofdm[6], t.ht[0], t.ht[2]));
+	mt76_wr(dev, MT_TX_PWR_CFG_2,
+		mt76x2_tx_power_mask(t.ht[4], t.ht[6], t.ht[8], t.ht[10]));
+	mt76_wr(dev, MT_TX_PWR_CFG_3,
+		mt76x2_tx_power_mask(t.ht[12], t.ht[14], t.ht[0], t.ht[2]));
+	mt76_wr(dev, MT_TX_PWR_CFG_4,
+		mt76x2_tx_power_mask(t.ht[4], t.ht[6], 0, 0));
+	mt76_wr(dev, MT_TX_PWR_CFG_7,
+		mt76x2_tx_power_mask(t.ofdm[6], t.vht[8], t.ht[6], t.vht[8]));
+	mt76_wr(dev, MT_TX_PWR_CFG_8,
+		mt76x2_tx_power_mask(t.ht[14], t.vht[8], t.vht[8], 0));
+	mt76_wr(dev, MT_TX_PWR_CFG_9,
+		mt76x2_tx_power_mask(t.ht[6], t.vht[8], t.vht[8], 0));
+}
+EXPORT_SYMBOL_GPL(mt76x2_phy_set_txpower);
+
+void mt76x2_configure_tx_delay(struct mt76x2_dev *dev,
+			       enum nl80211_band band, u8 bw)
+{
+	u32 cfg0, cfg1;
+
+	if (mt76x2_ext_pa_enabled(dev, band)) {
+		cfg0 = bw ? 0x000b0c01 : 0x00101101;
+		cfg1 = 0x00011414;
+	} else {
+		cfg0 = bw ? 0x000b0b01 : 0x00101001;
+		cfg1 = 0x00021414;
+	}
+	mt76_wr(dev, MT_TX_SW_CFG0, cfg0);
+	mt76_wr(dev, MT_TX_SW_CFG1, cfg1);
+
+	mt76_rmw_field(dev, MT_XIFS_TIME_CFG, MT_XIFS_TIME_CFG_OFDM_SIFS, 15);
+}
+EXPORT_SYMBOL_GPL(mt76x2_configure_tx_delay);
+
+void mt76x2_phy_set_bw(struct mt76x2_dev *dev, int width, u8 ctrl)
+{
+	int core_val, agc_val;
+
+	switch (width) {
+	case NL80211_CHAN_WIDTH_80:
+		core_val = 3;
+		agc_val = 7;
+		break;
+	case NL80211_CHAN_WIDTH_40:
+		core_val = 2;
+		agc_val = 3;
+		break;
+	default:
+		core_val = 0;
+		agc_val = 1;
+		break;
+	}
+
+	mt76_rmw_field(dev, MT_BBP(CORE, 1), MT_BBP_CORE_R1_BW, core_val);
+	mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_BW, agc_val);
+	mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_CTRL_CHAN, ctrl);
+	mt76_rmw_field(dev, MT_BBP(TXBE, 0), MT_BBP_TXBE_R0_CTRL_CHAN, ctrl);
+}
+EXPORT_SYMBOL_GPL(mt76x2_phy_set_bw);
+
+void mt76x2_phy_set_band(struct mt76x2_dev *dev, int band, bool primary_upper)
+{
+	switch (band) {
+	case NL80211_BAND_2GHZ:
+		mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
+		mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
+		break;
+	case NL80211_BAND_5GHZ:
+		mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
+		mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
+		break;
+	}
+
+	mt76_rmw_field(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_UPPER_40M,
+		       primary_upper);
+}
+EXPORT_SYMBOL_GPL(mt76x2_phy_set_band);
+
+int mt76x2_phy_get_min_avg_rssi(struct mt76x2_dev *dev)
+{
+	struct mt76x2_sta *sta;
+	struct mt76_wcid *wcid;
+	int i, j, min_rssi = 0;
+	s8 cur_rssi;
+
+	local_bh_disable();
+	rcu_read_lock();
+
+	for (i = 0; i < ARRAY_SIZE(dev->wcid_mask); i++) {
+		unsigned long mask = dev->wcid_mask[i];
+
+		if (!mask)
+			continue;
+
+		for (j = i * BITS_PER_LONG; mask; j++, mask >>= 1) {
+			if (!(mask & 1))
+				continue;
+
+			wcid = rcu_dereference(dev->wcid[j]);
+			if (!wcid)
+				continue;
+
+			sta = container_of(wcid, struct mt76x2_sta, wcid);
+			spin_lock(&dev->mt76.rx_lock);
+			if (sta->inactive_count++ < 5)
+				cur_rssi = ewma_signal_read(&sta->rssi);
+			else
+				cur_rssi = 0;
+			spin_unlock(&dev->mt76.rx_lock);
+
+			if (cur_rssi < min_rssi)
+				min_rssi = cur_rssi;
+		}
+	}
+
+	rcu_read_unlock();
+	local_bh_enable();
+
+	if (!min_rssi)
+		return -75;
+
+	return min_rssi;
+}
+EXPORT_SYMBOL_GPL(mt76x2_phy_get_min_avg_rssi);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_regs.h b/drivers/net/wireless/mediatek/mt76/mt76x2_regs.h
index b9c334d9e5b81b1fe4ae0605664c1d040da3aed7..1551ea453180d7e4972e23422ee7e1e5127ea440 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_regs.h
@@ -75,6 +75,21 @@
 
 #define MT_XO_CTRL7			0x011c
 
+#define MT_USB_U3DMA_CFG		0x9018
+#define MT_USB_DMA_CFG_RX_BULK_AGG_TOUT	GENMASK(7, 0)
+#define MT_USB_DMA_CFG_RX_BULK_AGG_LMT	GENMASK(15, 8)
+#define MT_USB_DMA_CFG_UDMA_TX_WL_DROP	BIT(16)
+#define MT_USB_DMA_CFG_WAKE_UP_EN	BIT(17)
+#define MT_USB_DMA_CFG_RX_DROP_OR_PAD	BIT(18)
+#define MT_USB_DMA_CFG_TX_CLR		BIT(19)
+#define MT_USB_DMA_CFG_TXOP_HALT	BIT(20)
+#define MT_USB_DMA_CFG_RX_BULK_AGG_EN	BIT(21)
+#define MT_USB_DMA_CFG_RX_BULK_EN	BIT(22)
+#define MT_USB_DMA_CFG_TX_BULK_EN	BIT(23)
+#define MT_USB_DMA_CFG_EP_OUT_VALID	GENMASK(29, 24)
+#define MT_USB_DMA_CFG_RX_BUSY		BIT(30)
+#define MT_USB_DMA_CFG_TX_BUSY		BIT(31)
+
 #define MT_WLAN_MTC_CTRL		0x10148
 #define MT_WLAN_MTC_CTRL_MTCMOS_PWR_UP	BIT(0)
 #define MT_WLAN_MTC_CTRL_PWR_ACK	BIT(12)
@@ -150,6 +165,9 @@
 #define MT_TX_HW_QUEUE_MCU		8
 #define MT_TX_HW_QUEUE_MGMT		9
 
+#define MT_US_CYC_CFG			0x02a4
+#define MT_US_CYC_CNT			GENMASK(7, 0)
+
 #define MT_PBF_SYS_CTRL			0x0400
 #define MT_PBF_SYS_CTRL_MCU_RESET	BIT(0)
 #define MT_PBF_SYS_CTRL_DMA_RESET	BIT(1)
@@ -202,6 +220,11 @@
 
 #define MT_FCE_WLAN_FLOW_CONTROL1	0x0824
 
+#define MT_TX_CPU_FROM_FCE_BASE_PTR	0x09a0
+#define MT_TX_CPU_FROM_FCE_MAX_COUNT	0x09a4
+#define MT_FCE_PDMA_GLOBAL_CONF		0x09c4
+#define MT_FCE_SKIP_FS			0x0a6c
+
 #define MT_PAUSE_ENABLE_CONTROL1	0x0a38
 
 #define MT_MAC_CSR0			0x1000
@@ -214,6 +237,7 @@
 
 #define MT_MAC_ADDR_DW0			0x1008
 #define MT_MAC_ADDR_DW1			0x100c
+#define MT_MAC_ADDR_DW1_U2ME_MASK	GENMASK(23, 16)
 
 #define MT_MAC_BSSID_DW0		0x1010
 #define MT_MAC_BSSID_DW1		0x1014
@@ -351,6 +375,7 @@
 #define MT_TX_TIMEOUT_CFG_ACKTO		GENMASK(15, 8)
 
 #define MT_TX_RETRY_CFG			0x134c
+#define MT_TX_LINK_CFG			0x1350
 #define MT_VHT_HT_FBK_CFG1		0x1358
 
 #define MT_PROT_CFG_RATE		GENMASK(15, 0)
@@ -425,6 +450,7 @@
 #define MT_RX_FILTR_CFG_BAR		BIT(15)
 #define MT_RX_FILTR_CFG_CTRL_RSV	BIT(16)
 
+#define MT_AUTO_RSP_CFG			0x1404
 #define MT_LEGACY_BASIC_RATE		0x1408
 #define MT_HT_BASIC_RATE		0x140c
 
@@ -460,6 +486,10 @@
 #define MT_RX_STAT_2_DUP_ERRORS		GENMASK(15, 0)
 #define MT_RX_STAT_2_OVERFLOW_ERRORS	GENMASK(31, 16)
 
+#define MT_TX_STA_0			0x170c
+#define MT_TX_STA_1			0x1710
+#define MT_TX_STA_2			0x1714
+
 #define MT_TX_STAT_FIFO			0x1718
 #define MT_TX_STAT_FIFO_VALID		BIT(0)
 #define MT_TX_STAT_FIFO_SUCCESS		BIT(5)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_tx.c b/drivers/net/wireless/mediatek/mt76/mt76x2_tx.c
index 560376dd113337a1ff4c09d892d995eedb7d85a4..4c907882e8b0b349dac643492e496cb5b571837e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_tx.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_tx.c
@@ -23,129 +23,6 @@ struct beacon_bc_data {
 	struct sk_buff *tail[8];
 };
 
-void mt76x2_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
-	     struct sk_buff *skb)
-{
-	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-	struct mt76x2_dev *dev = hw->priv;
-	struct ieee80211_vif *vif = info->control.vif;
-	struct mt76_wcid *wcid = &dev->global_wcid;
-
-	if (control->sta) {
-		struct mt76x2_sta *msta;
-
-		msta = (struct mt76x2_sta *) control->sta->drv_priv;
-		wcid = &msta->wcid;
-		/* sw encrypted frames */
-		if (!info->control.hw_key && wcid->hw_key_idx != -1)
-			control->sta = NULL;
-	}
-
-	if (vif && !control->sta) {
-		struct mt76x2_vif *mvif;
-
-		mvif = (struct mt76x2_vif *) vif->drv_priv;
-		wcid = &mvif->group_wcid;
-	}
-
-	mt76_tx(&dev->mt76, control->sta, wcid, skb);
-}
-
-void mt76x2_tx_complete(struct mt76x2_dev *dev, struct sk_buff *skb)
-{
-	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-
-	if (info->flags & IEEE80211_TX_CTL_AMPDU) {
-		ieee80211_free_txskb(mt76_hw(dev), skb);
-	} else {
-		ieee80211_tx_info_clear_status(info);
-		info->status.rates[0].idx = -1;
-		info->flags |= IEEE80211_TX_STAT_ACK;
-		ieee80211_tx_status(mt76_hw(dev), skb);
-	}
-}
-
-s8 mt76x2_tx_get_max_txpwr_adj(struct mt76x2_dev *dev,
-			       const struct ieee80211_tx_rate *rate)
-{
-	s8 max_txpwr;
-
-	if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
-		u8 mcs = ieee80211_rate_get_vht_mcs(rate);
-
-		if (mcs == 8 || mcs == 9) {
-			max_txpwr = dev->rate_power.vht[8];
-		} else {
-			u8 nss, idx;
-
-			nss = ieee80211_rate_get_vht_nss(rate);
-			idx = ((nss - 1) << 3) + mcs;
-			max_txpwr = dev->rate_power.ht[idx & 0xf];
-		}
-	} else if (rate->flags & IEEE80211_TX_RC_MCS) {
-		max_txpwr = dev->rate_power.ht[rate->idx & 0xf];
-	} else {
-		enum nl80211_band band = dev->mt76.chandef.chan->band;
-
-		if (band == NL80211_BAND_2GHZ) {
-			const struct ieee80211_rate *r;
-			struct wiphy *wiphy = mt76_hw(dev)->wiphy;
-			struct mt76_rate_power *rp = &dev->rate_power;
-
-			r = &wiphy->bands[band]->bitrates[rate->idx];
-			if (r->flags & IEEE80211_RATE_SHORT_PREAMBLE)
-				max_txpwr = rp->cck[r->hw_value & 0x3];
-			else
-				max_txpwr = rp->ofdm[r->hw_value & 0x7];
-		} else {
-			max_txpwr = dev->rate_power.ofdm[rate->idx & 0x7];
-		}
-	}
-
-	return max_txpwr;
-}
-
-s8 mt76x2_tx_get_txpwr_adj(struct mt76x2_dev *dev, s8 txpwr, s8 max_txpwr_adj)
-{
-	txpwr = min_t(s8, txpwr, dev->txpower_conf);
-	txpwr -= (dev->target_power + dev->target_power_delta[0]);
-	txpwr = min_t(s8, txpwr, max_txpwr_adj);
-
-	if (!dev->enable_tpc)
-		return 0;
-	else if (txpwr >= 0)
-		return min_t(s8, txpwr, 7);
-	else
-		return (txpwr < -16) ? 8 : (txpwr + 32) / 2;
-}
-
-void mt76x2_tx_set_txpwr_auto(struct mt76x2_dev *dev, s8 txpwr)
-{
-	s8 txpwr_adj;
-
-	txpwr_adj = mt76x2_tx_get_txpwr_adj(dev, txpwr,
-					    dev->rate_power.ofdm[4]);
-	mt76_rmw_field(dev, MT_PROT_AUTO_TX_CFG,
-		       MT_PROT_AUTO_TX_CFG_PROT_PADJ, txpwr_adj);
-	mt76_rmw_field(dev, MT_PROT_AUTO_TX_CFG,
-		       MT_PROT_AUTO_TX_CFG_AUTO_PADJ, txpwr_adj);
-}
-
-static int mt76x2_insert_hdr_pad(struct sk_buff *skb)
-{
-	int len = ieee80211_get_hdrlen_from_skb(skb);
-
-	if (len % 4 == 0)
-		return 0;
-
-	skb_push(skb, 2);
-	memmove(skb->data, skb->data + 2, len);
-
-	skb->data[len] = 0;
-	skb->data[len + 1] = 0;
-	return 2;
-}
-
 int mt76x2_tx_prepare_skb(struct mt76_dev *mdev, void *txwi,
 			  struct sk_buff *skb, struct mt76_queue *q,
 			  struct mt76_wcid *wcid, struct ieee80211_sta *sta,
@@ -159,7 +36,7 @@ int mt76x2_tx_prepare_skb(struct mt76_dev *mdev, void *txwi,
 	if (q == &dev->mt76.q_tx[MT_TXQ_PSD] && wcid && wcid->idx < 128)
 		mt76x2_mac_wcid_set_drop(dev, wcid->idx, false);
 
-	mt76x2_mac_write_txwi(dev, txwi, skb, wcid, sta);
+	mt76x2_mac_write_txwi(dev, txwi, skb, wcid, sta, skb->len);
 
 	ret = mt76x2_insert_hdr_pad(skb);
 	if (ret < 0)
@@ -289,7 +166,8 @@ void mt76x2_pre_tbtt_tasklet(unsigned long arg)
 		struct ieee80211_vif *vif = info->control.vif;
 		struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
 
-		mt76_tx_queue_skb(&dev->mt76, q, skb, &mvif->group_wcid, NULL);
+		mt76_dma_tx_queue_skb(&dev->mt76, q, skb, &mvif->group_wcid,
+				      NULL);
 	}
 	spin_unlock_bh(&q->lock);
 }
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_tx_common.c b/drivers/net/wireless/mediatek/mt76/mt76x2_tx_common.c
new file mode 100644
index 0000000000000000000000000000000000000000..36afb166fa3ffd29f24414e462961bca03545a6e
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_tx_common.c
@@ -0,0 +1,149 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76x2.h"
+#include "mt76x2_dma.h"
+
+void mt76x2_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
+	       struct sk_buff *skb)
+{
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+	struct mt76x2_dev *dev = hw->priv;
+	struct ieee80211_vif *vif = info->control.vif;
+	struct mt76_wcid *wcid = &dev->global_wcid;
+
+	if (control->sta) {
+		struct mt76x2_sta *msta;
+
+		msta = (struct mt76x2_sta *)control->sta->drv_priv;
+		wcid = &msta->wcid;
+		/* sw encrypted frames */
+		if (!info->control.hw_key && wcid->hw_key_idx != -1)
+			control->sta = NULL;
+	}
+
+	if (vif && !control->sta) {
+		struct mt76x2_vif *mvif;
+
+		mvif = (struct mt76x2_vif *)vif->drv_priv;
+		wcid = &mvif->group_wcid;
+	}
+
+	mt76_tx(&dev->mt76, control->sta, wcid, skb);
+}
+EXPORT_SYMBOL_GPL(mt76x2_tx);
+
+int mt76x2_insert_hdr_pad(struct sk_buff *skb)
+{
+	int len = ieee80211_get_hdrlen_from_skb(skb);
+
+	if (len % 4 == 0)
+		return 0;
+
+	skb_push(skb, 2);
+	memmove(skb->data, skb->data + 2, len);
+
+	skb->data[len] = 0;
+	skb->data[len + 1] = 0;
+	return 2;
+}
+EXPORT_SYMBOL_GPL(mt76x2_insert_hdr_pad);
+
+s8 mt76x2_tx_get_max_txpwr_adj(struct mt76x2_dev *dev,
+			       const struct ieee80211_tx_rate *rate)
+{
+	s8 max_txpwr;
+
+	if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
+		u8 mcs = ieee80211_rate_get_vht_mcs(rate);
+
+		if (mcs == 8 || mcs == 9) {
+			max_txpwr = dev->rate_power.vht[8];
+		} else {
+			u8 nss, idx;
+
+			nss = ieee80211_rate_get_vht_nss(rate);
+			idx = ((nss - 1) << 3) + mcs;
+			max_txpwr = dev->rate_power.ht[idx & 0xf];
+		}
+	} else if (rate->flags & IEEE80211_TX_RC_MCS) {
+		max_txpwr = dev->rate_power.ht[rate->idx & 0xf];
+	} else {
+		enum nl80211_band band = dev->mt76.chandef.chan->band;
+
+		if (band == NL80211_BAND_2GHZ) {
+			const struct ieee80211_rate *r;
+			struct wiphy *wiphy = mt76_hw(dev)->wiphy;
+			struct mt76_rate_power *rp = &dev->rate_power;
+
+			r = &wiphy->bands[band]->bitrates[rate->idx];
+			if (r->flags & IEEE80211_RATE_SHORT_PREAMBLE)
+				max_txpwr = rp->cck[r->hw_value & 0x3];
+			else
+				max_txpwr = rp->ofdm[r->hw_value & 0x7];
+		} else {
+			max_txpwr = dev->rate_power.ofdm[rate->idx & 0x7];
+		}
+	}
+
+	return max_txpwr;
+}
+EXPORT_SYMBOL_GPL(mt76x2_tx_get_max_txpwr_adj);
+
+s8 mt76x2_tx_get_txpwr_adj(struct mt76x2_dev *dev, s8 txpwr, s8 max_txpwr_adj)
+{
+	txpwr = min_t(s8, txpwr, dev->txpower_conf);
+	txpwr -= (dev->target_power + dev->target_power_delta[0]);
+	txpwr = min_t(s8, txpwr, max_txpwr_adj);
+
+	if (!dev->enable_tpc)
+		return 0;
+	else if (txpwr >= 0)
+		return min_t(s8, txpwr, 7);
+	else
+		return (txpwr < -16) ? 8 : (txpwr + 32) / 2;
+}
+EXPORT_SYMBOL_GPL(mt76x2_tx_get_txpwr_adj);
+
+void mt76x2_tx_set_txpwr_auto(struct mt76x2_dev *dev, s8 txpwr)
+{
+	s8 txpwr_adj;
+
+	txpwr_adj = mt76x2_tx_get_txpwr_adj(dev, txpwr,
+					    dev->rate_power.ofdm[4]);
+	mt76_rmw_field(dev, MT_PROT_AUTO_TX_CFG,
+		       MT_PROT_AUTO_TX_CFG_PROT_PADJ, txpwr_adj);
+	mt76_rmw_field(dev, MT_PROT_AUTO_TX_CFG,
+		       MT_PROT_AUTO_TX_CFG_AUTO_PADJ, txpwr_adj);
+}
+EXPORT_SYMBOL_GPL(mt76x2_tx_set_txpwr_auto);
+
+void mt76x2_tx_complete(struct mt76x2_dev *dev, struct sk_buff *skb)
+{
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+	if (info->flags & IEEE80211_TX_CTL_AMPDU) {
+		ieee80211_free_txskb(mt76_hw(dev), skb);
+	} else {
+		ieee80211_tx_info_clear_status(info);
+		info->status.rates[0].idx = -1;
+		info->flags |= IEEE80211_TX_STAT_ACK;
+		ieee80211_tx_status(mt76_hw(dev), skb);
+	}
+}
+EXPORT_SYMBOL_GPL(mt76x2_tx_complete);
+
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_usb.c b/drivers/net/wireless/mediatek/mt76/mt76x2_usb.c
new file mode 100644
index 0000000000000000000000000000000000000000..1428cfdee5795495c5df667c85abd7ab330080ce
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_usb.c
@@ -0,0 +1,142 @@
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include "mt76x2u.h"
+
+static const struct usb_device_id mt76x2u_device_table[] = {
+	{ USB_DEVICE(0x0b05, 0x1833) },	/* Asus USB-AC54 */
+	{ USB_DEVICE(0x0b05, 0x17eb) },	/* Asus USB-AC55 */
+	{ USB_DEVICE(0x0b05, 0x180b) },	/* Asus USB-N53 B1 */
+	{ USB_DEVICE(0x0e8d, 0x7612) },	/* Aukey USB-AC1200 */
+	{ USB_DEVICE(0x057c, 0x8503) },	/* Avm FRITZ!WLAN AC860 */
+	{ USB_DEVICE(0x7392, 0xb711) },	/* Edimax EW 7722 UAC */
+	{ USB_DEVICE(0x0846, 0x9053) },	/* Netgear A6210 */
+	{ USB_DEVICE(0x045e, 0x02e6) },	/* XBox One Wireless Adapter */
+	{ },
+};
+
+static int mt76x2u_probe(struct usb_interface *intf,
+			 const struct usb_device_id *id)
+{
+	struct usb_device *udev = interface_to_usbdev(intf);
+	struct mt76x2_dev *dev;
+	int err;
+
+	dev = mt76x2u_alloc_device(&intf->dev);
+	if (!dev)
+		return -ENOMEM;
+
+	udev = usb_get_dev(udev);
+	usb_reset_device(udev);
+
+	err = mt76u_init(&dev->mt76, intf);
+	if (err < 0)
+		goto err;
+
+	dev->mt76.rev = mt76_rr(dev, MT_ASIC_VERSION);
+	dev_info(dev->mt76.dev, "ASIC revision: %08x\n", dev->mt76.rev);
+
+	err = mt76x2u_register_device(dev);
+	if (err < 0)
+		goto err;
+
+	return 0;
+
+err:
+	ieee80211_free_hw(mt76_hw(dev));
+	usb_set_intfdata(intf, NULL);
+	usb_put_dev(udev);
+
+	return err;
+}
+
+static void mt76x2u_disconnect(struct usb_interface *intf)
+{
+	struct usb_device *udev = interface_to_usbdev(intf);
+	struct mt76x2_dev *dev = usb_get_intfdata(intf);
+	struct ieee80211_hw *hw = mt76_hw(dev);
+
+	set_bit(MT76_REMOVED, &dev->mt76.state);
+	ieee80211_unregister_hw(hw);
+	mt76x2u_cleanup(dev);
+
+	ieee80211_free_hw(hw);
+	usb_set_intfdata(intf, NULL);
+	usb_put_dev(udev);
+}
+
+static int __maybe_unused mt76x2u_suspend(struct usb_interface *intf,
+					  pm_message_t state)
+{
+	struct mt76x2_dev *dev = usb_get_intfdata(intf);
+	struct mt76_usb *usb = &dev->mt76.usb;
+
+	mt76u_stop_queues(&dev->mt76);
+	mt76x2u_stop_hw(dev);
+	usb_kill_urb(usb->mcu.res.urb);
+
+	return 0;
+}
+
+static int __maybe_unused mt76x2u_resume(struct usb_interface *intf)
+{
+	struct mt76x2_dev *dev = usb_get_intfdata(intf);
+	struct mt76_usb *usb = &dev->mt76.usb;
+	int err;
+
+	reinit_completion(&usb->mcu.cmpl);
+	err = mt76u_submit_buf(&dev->mt76, USB_DIR_IN,
+			       MT_EP_IN_CMD_RESP,
+			       &usb->mcu.res, GFP_KERNEL,
+			       mt76u_mcu_complete_urb,
+			       &usb->mcu.cmpl);
+	if (err < 0)
+		return err;
+
+	err = mt76u_submit_rx_buffers(&dev->mt76);
+	if (err < 0)
+		return err;
+
+	tasklet_enable(&usb->rx_tasklet);
+	tasklet_enable(&usb->tx_tasklet);
+
+	return mt76x2u_init_hardware(dev);
+}
+
+MODULE_DEVICE_TABLE(usb, mt76x2u_device_table);
+MODULE_FIRMWARE(MT7662U_FIRMWARE);
+MODULE_FIRMWARE(MT7662U_ROM_PATCH);
+
+static struct usb_driver mt76x2u_driver = {
+	.name		= KBUILD_MODNAME,
+	.id_table	= mt76x2u_device_table,
+	.probe		= mt76x2u_probe,
+	.disconnect	= mt76x2u_disconnect,
+#ifdef CONFIG_PM
+	.suspend	= mt76x2u_suspend,
+	.resume		= mt76x2u_resume,
+	.reset_resume	= mt76x2u_resume,
+#endif /* CONFIG_PM */
+	.soft_unbind	= 1,
+	.disable_hub_initiated_lpm = 1,
+};
+module_usb_driver(mt76x2u_driver);
+
+MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2u.h b/drivers/net/wireless/mediatek/mt76/mt76x2u.h
new file mode 100644
index 0000000000000000000000000000000000000000..008092f0cd8a56ff816178b019b61eb81d5cc944
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2u.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __MT76x2U_H
+#define __MT76x2U_H
+
+#include <linux/device.h>
+
+#include "mt76x2.h"
+#include "mt76x2_dma.h"
+#include "mt76x2_mcu.h"
+
+#define MT7612U_EEPROM_SIZE		512
+
+#define MT_USB_AGGR_SIZE_LIMIT		21 /* 1024B unit */
+#define MT_USB_AGGR_TIMEOUT		0x80 /* 33ns unit */
+
+extern const struct ieee80211_ops mt76x2u_ops;
+
+struct mt76x2_dev *mt76x2u_alloc_device(struct device *pdev);
+int mt76x2u_register_device(struct mt76x2_dev *dev);
+int mt76x2u_init_hardware(struct mt76x2_dev *dev);
+void mt76x2u_cleanup(struct mt76x2_dev *dev);
+void mt76x2u_stop_hw(struct mt76x2_dev *dev);
+
+void mt76x2u_mac_setaddr(struct mt76x2_dev *dev, u8 *addr);
+int mt76x2u_mac_reset(struct mt76x2_dev *dev);
+void mt76x2u_mac_resume(struct mt76x2_dev *dev);
+int mt76x2u_mac_start(struct mt76x2_dev *dev);
+int mt76x2u_mac_stop(struct mt76x2_dev *dev);
+
+int mt76x2u_phy_set_channel(struct mt76x2_dev *dev,
+			    struct cfg80211_chan_def *chandef);
+void mt76x2u_phy_calibrate(struct work_struct *work);
+void mt76x2u_phy_channel_calibrate(struct mt76x2_dev *dev);
+void mt76x2u_phy_set_txdac(struct mt76x2_dev *dev);
+void mt76x2u_phy_set_rxpath(struct mt76x2_dev *dev);
+
+void mt76x2u_mcu_complete_urb(struct urb *urb);
+int mt76x2u_mcu_set_channel(struct mt76x2_dev *dev, u8 channel, u8 bw,
+			    u8 bw_index, bool scan);
+int mt76x2u_mcu_calibrate(struct mt76x2_dev *dev, enum mcu_calibration type,
+			  u32 val);
+int mt76x2u_mcu_tssi_comp(struct mt76x2_dev *dev,
+			  struct mt76x2_tssi_comp *tssi_data);
+int mt76x2u_mcu_init_gain(struct mt76x2_dev *dev, u8 channel, u32 gain,
+			  bool force);
+int mt76x2u_mcu_set_dynamic_vga(struct mt76x2_dev *dev, u8 channel, bool ap,
+				bool ext, int rssi, u32 false_cca);
+int mt76x2u_mcu_set_radio_state(struct mt76x2_dev *dev, bool val);
+int mt76x2u_mcu_load_cr(struct mt76x2_dev *dev, u8 type,
+			u8 temp_level, u8 channel);
+int mt76x2u_mcu_init(struct mt76x2_dev *dev);
+int mt76x2u_mcu_fw_init(struct mt76x2_dev *dev);
+void mt76x2u_mcu_deinit(struct mt76x2_dev *dev);
+
+int mt76x2u_alloc_queues(struct mt76x2_dev *dev);
+void mt76x2u_queues_deinit(struct mt76x2_dev *dev);
+void mt76x2u_stop_queues(struct mt76x2_dev *dev);
+bool mt76x2u_tx_status_data(struct mt76_dev *mdev, u8 *update);
+int mt76x2u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
+			   struct sk_buff *skb, struct mt76_queue *q,
+			   struct mt76_wcid *wcid, struct ieee80211_sta *sta,
+			   u32 *tx_info);
+void mt76x2u_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
+			     struct mt76_queue_entry *e, bool flush);
+int mt76x2u_skb_dma_info(struct sk_buff *skb, enum dma_msg_port port,
+			 u32 flags);
+
+#endif /* __MT76x2U_H */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2u_core.c b/drivers/net/wireless/mediatek/mt76/mt76x2u_core.c
new file mode 100644
index 0000000000000000000000000000000000000000..1ca5dd05b265cea55336b3fbeeb00b8ffac56342
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2u_core.c
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76x2u.h"
+#include "dma.h"
+
+static void mt76x2u_remove_dma_hdr(struct sk_buff *skb)
+{
+	int hdr_len;
+
+	skb_pull(skb, sizeof(struct mt76x2_txwi) + MT_DMA_HDR_LEN);
+	hdr_len = ieee80211_get_hdrlen_from_skb(skb);
+	if (hdr_len % 4) {
+		memmove(skb->data + 2, skb->data, hdr_len);
+		skb_pull(skb, 2);
+	}
+}
+
+static int
+mt76x2u_check_skb_rooms(struct sk_buff *skb)
+{
+	int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
+	u32 need_head;
+
+	need_head = sizeof(struct mt76x2_txwi) + MT_DMA_HDR_LEN;
+	if (hdr_len % 4)
+		need_head += 2;
+	return skb_cow(skb, need_head);
+}
+
+static int
+mt76x2u_set_txinfo(struct sk_buff *skb,
+		   struct mt76_wcid *wcid, u8 ep)
+{
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+	enum mt76x2_qsel qsel;
+	u32 flags;
+
+	if ((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) ||
+	    ep == MT_EP_OUT_HCCA)
+		qsel = MT_QSEL_MGMT;
+	else
+		qsel = MT_QSEL_EDCA;
+
+	flags = FIELD_PREP(MT_TXD_INFO_QSEL, qsel) |
+		MT_TXD_INFO_80211;
+	if (!wcid || wcid->hw_key_idx == 0xff || wcid->sw_iv)
+		flags |= MT_TXD_INFO_WIV;
+
+	return mt76u_skb_dma_info(skb, WLAN_PORT, flags);
+}
+
+bool mt76x2u_tx_status_data(struct mt76_dev *mdev, u8 *update)
+{
+	struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
+	struct mt76x2_tx_status stat;
+
+	if (!mt76x2_mac_load_tx_status(dev, &stat))
+		return false;
+
+	mt76x2_send_tx_status(dev, &stat, update);
+
+	return true;
+}
+
+int mt76x2u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
+			   struct sk_buff *skb, struct mt76_queue *q,
+			   struct mt76_wcid *wcid, struct ieee80211_sta *sta,
+			   u32 *tx_info)
+{
+	struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
+	struct mt76x2_txwi *txwi;
+	int err, len = skb->len;
+
+	err = mt76x2u_check_skb_rooms(skb);
+	if (err < 0)
+		return -ENOMEM;
+
+	mt76x2_insert_hdr_pad(skb);
+
+	txwi = skb_push(skb, sizeof(struct mt76x2_txwi));
+	mt76x2_mac_write_txwi(dev, txwi, skb, wcid, sta, len);
+
+	return mt76x2u_set_txinfo(skb, wcid, q2ep(q->hw_idx));
+}
+
+void mt76x2u_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
+			     struct mt76_queue_entry *e, bool flush)
+{
+	struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
+
+	mt76x2u_remove_dma_hdr(e->skb);
+	mt76x2_tx_complete(dev, e->skb);
+}
+
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2u_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2u_init.c
new file mode 100644
index 0000000000000000000000000000000000000000..9b81e7641c06924110eadad2215f46183edfa1b0
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2u_init.c
@@ -0,0 +1,318 @@
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/delay.h>
+
+#include "mt76x2u.h"
+#include "mt76x2_eeprom.h"
+
+static void mt76x2u_init_dma(struct mt76x2_dev *dev)
+{
+	u32 val = mt76_rr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG));
+
+	val |= MT_USB_DMA_CFG_RX_DROP_OR_PAD |
+	       MT_USB_DMA_CFG_RX_BULK_EN |
+	       MT_USB_DMA_CFG_TX_BULK_EN;
+
+	/* disable AGGR_BULK_RX in order to receive one
+	 * frame in each rx urb and avoid copies
+	 */
+	val &= ~MT_USB_DMA_CFG_RX_BULK_AGG_EN;
+	mt76_wr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG), val);
+}
+
+static void mt76x2u_power_on_rf_patch(struct mt76x2_dev *dev)
+{
+	mt76_set(dev, MT_VEND_ADDR(CFG, 0x130), BIT(0) | BIT(16));
+	udelay(1);
+
+	mt76_clear(dev, MT_VEND_ADDR(CFG, 0x1c), 0xff);
+	mt76_set(dev, MT_VEND_ADDR(CFG, 0x1c), 0x30);
+
+	mt76_wr(dev, MT_VEND_ADDR(CFG, 0x14), 0x484f);
+	udelay(1);
+
+	mt76_set(dev, MT_VEND_ADDR(CFG, 0x130), BIT(17));
+	usleep_range(150, 200);
+
+	mt76_clear(dev, MT_VEND_ADDR(CFG, 0x130), BIT(16));
+	usleep_range(50, 100);
+
+	mt76_set(dev, MT_VEND_ADDR(CFG, 0x14c), BIT(19) | BIT(20));
+}
+
+static void mt76x2u_power_on_rf(struct mt76x2_dev *dev, int unit)
+{
+	int shift = unit ? 8 : 0;
+	u32 val = (BIT(1) | BIT(3) | BIT(4) | BIT(5)) << shift;
+
+	/* Enable RF BG */
+	mt76_set(dev, MT_VEND_ADDR(CFG, 0x130), BIT(0) << shift);
+	usleep_range(10, 20);
+
+	/* Enable RFDIG LDO/AFE/ABB/ADDA */
+	mt76_set(dev, MT_VEND_ADDR(CFG, 0x130), val);
+	usleep_range(10, 20);
+
+	/* Switch RFDIG power to internal LDO */
+	mt76_clear(dev, MT_VEND_ADDR(CFG, 0x130), BIT(2) << shift);
+	usleep_range(10, 20);
+
+	mt76x2u_power_on_rf_patch(dev);
+
+	mt76_set(dev, 0x530, 0xf);
+}
+
+static void mt76x2u_power_on(struct mt76x2_dev *dev)
+{
+	u32 val;
+
+	/* Turn on WL MTCMOS */
+	mt76_set(dev, MT_VEND_ADDR(CFG, 0x148),
+		 MT_WLAN_MTC_CTRL_MTCMOS_PWR_UP);
+
+	val = MT_WLAN_MTC_CTRL_STATE_UP |
+	      MT_WLAN_MTC_CTRL_PWR_ACK |
+	      MT_WLAN_MTC_CTRL_PWR_ACK_S;
+
+	mt76_poll(dev, MT_VEND_ADDR(CFG, 0x148), val, val, 1000);
+
+	mt76_clear(dev, MT_VEND_ADDR(CFG, 0x148), 0x7f << 16);
+	usleep_range(10, 20);
+
+	mt76_clear(dev, MT_VEND_ADDR(CFG, 0x148), 0xf << 24);
+	usleep_range(10, 20);
+
+	mt76_set(dev, MT_VEND_ADDR(CFG, 0x148), 0xf << 24);
+	mt76_clear(dev, MT_VEND_ADDR(CFG, 0x148), 0xfff);
+
+	/* Turn on AD/DA power down */
+	mt76_clear(dev, MT_VEND_ADDR(CFG, 0x1204), BIT(3));
+
+	/* WLAN function enable */
+	mt76_set(dev, MT_VEND_ADDR(CFG, 0x80), BIT(0));
+
+	/* Release BBP software reset */
+	mt76_clear(dev, MT_VEND_ADDR(CFG, 0x64), BIT(18));
+
+	mt76x2u_power_on_rf(dev, 0);
+	mt76x2u_power_on_rf(dev, 1);
+}
+
+static int mt76x2u_init_eeprom(struct mt76x2_dev *dev)
+{
+	u32 val, i;
+
+	dev->mt76.eeprom.data = devm_kzalloc(dev->mt76.dev,
+					     MT7612U_EEPROM_SIZE,
+					     GFP_KERNEL);
+	dev->mt76.eeprom.size = MT7612U_EEPROM_SIZE;
+	if (!dev->mt76.eeprom.data)
+		return -ENOMEM;
+
+	for (i = 0; i + 4 <= MT7612U_EEPROM_SIZE; i += 4) {
+		val = mt76_rr(dev, MT_VEND_ADDR(EEPROM, i));
+		put_unaligned_le32(val, dev->mt76.eeprom.data + i);
+	}
+
+	mt76x2_eeprom_parse_hw_cap(dev);
+	return 0;
+}
+
+struct mt76x2_dev *mt76x2u_alloc_device(struct device *pdev)
+{
+	static const struct mt76_driver_ops drv_ops = {
+		.tx_prepare_skb = mt76x2u_tx_prepare_skb,
+		.tx_complete_skb = mt76x2u_tx_complete_skb,
+		.tx_status_data = mt76x2u_tx_status_data,
+		.rx_skb = mt76x2_queue_rx_skb,
+	};
+	struct mt76x2_dev *dev;
+	struct mt76_dev *mdev;
+
+	mdev = mt76_alloc_device(sizeof(*dev), &mt76x2u_ops);
+	if (!mdev)
+		return NULL;
+
+	dev = container_of(mdev, struct mt76x2_dev, mt76);
+	mdev->dev = pdev;
+	mdev->drv = &drv_ops;
+
+	mutex_init(&dev->mutex);
+
+	return dev;
+}
+
+static void mt76x2u_init_beacon_offsets(struct mt76x2_dev *dev)
+{
+	mt76_wr(dev, MT_BCN_OFFSET(0), 0x18100800);
+	mt76_wr(dev, MT_BCN_OFFSET(1), 0x38302820);
+	mt76_wr(dev, MT_BCN_OFFSET(2), 0x58504840);
+	mt76_wr(dev, MT_BCN_OFFSET(3), 0x78706860);
+}
+
+int mt76x2u_init_hardware(struct mt76x2_dev *dev)
+{
+	static const u16 beacon_offsets[] = {
+		/* 512 byte per beacon */
+		0xc000, 0xc200, 0xc400, 0xc600,
+		0xc800, 0xca00, 0xcc00, 0xce00,
+		0xd000, 0xd200, 0xd400, 0xd600,
+		0xd800, 0xda00, 0xdc00, 0xde00
+	};
+	const struct mt76_wcid_addr addr = {
+		.macaddr = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
+		.ba_mask = 0,
+	};
+	int i, err;
+
+	dev->beacon_offsets = beacon_offsets;
+
+	mt76x2_reset_wlan(dev, true);
+	mt76x2u_power_on(dev);
+
+	if (!mt76x2_wait_for_mac(dev))
+		return -ETIMEDOUT;
+
+	err = mt76x2u_mcu_fw_init(dev);
+	if (err < 0)
+		return err;
+
+	if (!mt76_poll_msec(dev, MT_WPDMA_GLO_CFG,
+			    MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
+			    MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 100))
+		return -EIO;
+
+	/* wait for asic ready after fw load. */
+	if (!mt76x2_wait_for_mac(dev))
+		return -ETIMEDOUT;
+
+	mt76_wr(dev, MT_HEADER_TRANS_CTRL_REG, 0);
+	mt76_wr(dev, MT_TSO_CTRL, 0);
+
+	mt76x2u_init_dma(dev);
+
+	err = mt76x2u_mcu_init(dev);
+	if (err < 0)
+		return err;
+
+	err = mt76x2u_mac_reset(dev);
+	if (err < 0)
+		return err;
+
+	mt76x2u_mac_setaddr(dev, dev->mt76.eeprom.data + MT_EE_MAC_ADDR);
+	dev->rxfilter = mt76_rr(dev, MT_RX_FILTR_CFG);
+
+	mt76x2u_init_beacon_offsets(dev);
+
+	if (!mt76x2_wait_for_bbp(dev))
+		return -ETIMEDOUT;
+
+	/* reset wcid table */
+	for (i = 0; i < 254; i++)
+		mt76_wr_copy(dev, MT_WCID_ADDR(i), &addr,
+			     sizeof(struct mt76_wcid_addr));
+
+	/* reset shared key table and pairwise key table */
+	for (i = 0; i < 4; i++)
+		mt76_wr(dev, MT_SKEY_MODE_BASE_0 + 4 * i, 0);
+	for (i = 0; i < 256; i++)
+		mt76_wr(dev, MT_WCID_ATTR(i), 1);
+
+	mt76_clear(dev, MT_BEACON_TIME_CFG,
+		   MT_BEACON_TIME_CFG_TIMER_EN |
+		   MT_BEACON_TIME_CFG_SYNC_MODE |
+		   MT_BEACON_TIME_CFG_TBTT_EN |
+		   MT_BEACON_TIME_CFG_BEACON_TX);
+
+	mt76_rmw(dev, MT_US_CYC_CFG, MT_US_CYC_CNT, 0x1e);
+	mt76_wr(dev, MT_TXOP_CTRL_CFG, 0x583f);
+
+	err = mt76x2u_mcu_load_cr(dev, MT_RF_BBP_CR, 0, 0);
+	if (err < 0)
+		return err;
+
+	mt76x2u_phy_set_rxpath(dev);
+	mt76x2u_phy_set_txdac(dev);
+
+	return mt76x2u_mac_stop(dev);
+}
+
+int mt76x2u_register_device(struct mt76x2_dev *dev)
+{
+	struct ieee80211_hw *hw = mt76_hw(dev);
+	struct wiphy *wiphy = hw->wiphy;
+	int err;
+
+	INIT_DELAYED_WORK(&dev->cal_work, mt76x2u_phy_calibrate);
+	mt76x2_init_device(dev);
+
+	err = mt76x2u_init_eeprom(dev);
+	if (err < 0)
+		return err;
+
+	err = mt76u_mcu_init_rx(&dev->mt76);
+	if (err < 0)
+		return err;
+
+	err = mt76u_alloc_queues(&dev->mt76);
+	if (err < 0)
+		goto fail;
+
+	err = mt76x2u_init_hardware(dev);
+	if (err < 0)
+		goto fail;
+
+	wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
+
+	err = mt76_register_device(&dev->mt76, true, mt76x2_rates,
+				   ARRAY_SIZE(mt76x2_rates));
+	if (err)
+		goto fail;
+
+	/* check hw sg support in order to enable AMSDU */
+	if (mt76u_check_sg(&dev->mt76))
+		hw->max_tx_fragments = MT_SG_MAX_SIZE;
+	else
+		hw->max_tx_fragments = 1;
+
+	set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
+
+	mt76x2_init_debugfs(dev);
+	mt76x2_init_txpower(dev, &dev->mt76.sband_2g.sband);
+	mt76x2_init_txpower(dev, &dev->mt76.sband_5g.sband);
+
+	return 0;
+
+fail:
+	mt76x2u_cleanup(dev);
+	return err;
+}
+
+void mt76x2u_stop_hw(struct mt76x2_dev *dev)
+{
+	mt76u_stop_stat_wk(&dev->mt76);
+	cancel_delayed_work_sync(&dev->cal_work);
+	mt76x2u_mac_stop(dev);
+}
+
+void mt76x2u_cleanup(struct mt76x2_dev *dev)
+{
+	mt76x2u_mcu_set_radio_state(dev, false);
+	mt76x2u_stop_hw(dev);
+	mt76u_queues_deinit(&dev->mt76);
+	mt76x2u_mcu_deinit(dev);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2u_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x2u_mac.c
new file mode 100644
index 0000000000000000000000000000000000000000..eab7ab297aa6ffe5e5b110a12d6a68b5a7c133c0
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2u_mac.c
@@ -0,0 +1,240 @@
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76x2u.h"
+#include "mt76x2_eeprom.h"
+
+static void mt76x2u_mac_reset_counters(struct mt76x2_dev *dev)
+{
+	mt76_rr(dev, MT_RX_STAT_0);
+	mt76_rr(dev, MT_RX_STAT_1);
+	mt76_rr(dev, MT_RX_STAT_2);
+	mt76_rr(dev, MT_TX_STA_0);
+	mt76_rr(dev, MT_TX_STA_1);
+	mt76_rr(dev, MT_TX_STA_2);
+}
+
+static void mt76x2u_mac_fixup_xtal(struct mt76x2_dev *dev)
+{
+	s8 offset = 0;
+	u16 eep_val;
+
+	eep_val = mt76x2_eeprom_get(dev, MT_EE_XTAL_TRIM_2);
+
+	offset = eep_val & 0x7f;
+	if ((eep_val & 0xff) == 0xff)
+		offset = 0;
+	else if (eep_val & 0x80)
+		offset = 0 - offset;
+
+	eep_val >>= 8;
+	if (eep_val == 0x00 || eep_val == 0xff) {
+		eep_val = mt76x2_eeprom_get(dev, MT_EE_XTAL_TRIM_1);
+		eep_val &= 0xff;
+
+		if (eep_val == 0x00 || eep_val == 0xff)
+			eep_val = 0x14;
+	}
+
+	eep_val &= 0x7f;
+	mt76_rmw_field(dev, MT_VEND_ADDR(CFG, MT_XO_CTRL5),
+		       MT_XO_CTRL5_C2_VAL, eep_val + offset);
+	mt76_set(dev, MT_VEND_ADDR(CFG, MT_XO_CTRL6), MT_XO_CTRL6_C2_CTRL);
+
+	mt76_wr(dev, 0x504, 0x06000000);
+	mt76_wr(dev, 0x50c, 0x08800000);
+	mdelay(5);
+	mt76_wr(dev, 0x504, 0x0);
+
+	/* decrease SIFS from 16us to 13us */
+	mt76_rmw_field(dev, MT_XIFS_TIME_CFG,
+		       MT_XIFS_TIME_CFG_OFDM_SIFS, 0xd);
+	mt76_rmw_field(dev, MT_BKOFF_SLOT_CFG, MT_BKOFF_SLOT_CFG_CC_DELAY, 1);
+
+	/* init fce */
+	mt76_clear(dev, MT_FCE_L2_STUFF, MT_FCE_L2_STUFF_WR_MPDU_LEN_EN);
+
+	eep_val = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_2);
+	switch (FIELD_GET(MT_EE_NIC_CONF_2_XTAL_OPTION, eep_val)) {
+	case 0:
+		mt76_wr(dev, MT_XO_CTRL7, 0x5c1fee80);
+		break;
+	case 1:
+		mt76_wr(dev, MT_XO_CTRL7, 0x5c1feed0);
+		break;
+	default:
+		break;
+	}
+}
+
+int mt76x2u_mac_reset(struct mt76x2_dev *dev)
+{
+	mt76_wr(dev, MT_WPDMA_GLO_CFG, BIT(4) | BIT(5));
+
+	/* init pbf regs */
+	mt76_wr(dev, MT_PBF_TX_MAX_PCNT, 0xefef3f1f);
+	mt76_wr(dev, MT_PBF_RX_MAX_PCNT, 0xfebf);
+
+	mt76_write_mac_initvals(dev);
+
+	mt76_wr(dev, MT_TX_LINK_CFG, 0x1020);
+	mt76_wr(dev, MT_AUTO_RSP_CFG, 0x13);
+	mt76_wr(dev, MT_MAX_LEN_CFG, 0x2f00);
+	mt76_wr(dev, MT_TX_RTS_CFG, 0x92b20);
+
+	mt76_wr(dev, MT_WMM_AIFSN, 0x2273);
+	mt76_wr(dev, MT_WMM_CWMIN, 0x2344);
+	mt76_wr(dev, MT_WMM_CWMAX, 0x34aa);
+
+	mt76_clear(dev, MT_MAC_SYS_CTRL,
+		   MT_MAC_SYS_CTRL_RESET_CSR |
+		   MT_MAC_SYS_CTRL_RESET_BBP);
+
+	if (is_mt7612(dev))
+		mt76_clear(dev, MT_COEXCFG0, MT_COEXCFG0_COEX_EN);
+
+	mt76_set(dev, MT_EXT_CCA_CFG, 0xf000);
+	mt76_clear(dev, MT_TX_ALC_CFG_4, BIT(31));
+
+	mt76x2u_mac_fixup_xtal(dev);
+
+	return 0;
+}
+
+int mt76x2u_mac_start(struct mt76x2_dev *dev)
+{
+	mt76x2u_mac_reset_counters(dev);
+
+	mt76_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX);
+	wait_for_wpdma(dev);
+	usleep_range(50, 100);
+
+	mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
+
+	mt76_wr(dev, MT_MAC_SYS_CTRL,
+		MT_MAC_SYS_CTRL_ENABLE_TX |
+		MT_MAC_SYS_CTRL_ENABLE_RX);
+
+	return 0;
+}
+
+int mt76x2u_mac_stop(struct mt76x2_dev *dev)
+{
+	int i, count = 0, val;
+	bool stopped = false;
+	u32 rts_cfg;
+
+	if (test_bit(MT76_REMOVED, &dev->mt76.state))
+		return -EIO;
+
+	rts_cfg = mt76_rr(dev, MT_TX_RTS_CFG);
+	mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg & ~MT_TX_RTS_CFG_RETRY_LIMIT);
+
+	mt76_clear(dev, MT_TXOP_CTRL_CFG, BIT(20));
+	mt76_clear(dev, MT_TXOP_HLDR_ET, BIT(1));
+
+	/* wait tx dma to stop */
+	for (i = 0; i < 2000; i++) {
+		val = mt76_rr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG));
+		if (!(val & MT_USB_DMA_CFG_TX_BUSY) && i > 10)
+			break;
+		usleep_range(50, 100);
+	}
+
+	/* page count on TxQ */
+	for (i = 0; i < 200; i++) {
+		if (!(mt76_rr(dev, 0x0438) & 0xffffffff) &&
+		    !(mt76_rr(dev, 0x0a30) & 0x000000ff) &&
+		    !(mt76_rr(dev, 0x0a34) & 0xff00ff00))
+			break;
+		usleep_range(10, 20);
+	}
+
+	/* disable tx-rx */
+	mt76_clear(dev, MT_MAC_SYS_CTRL,
+		   MT_MAC_SYS_CTRL_ENABLE_RX |
+		   MT_MAC_SYS_CTRL_ENABLE_TX);
+
+	/* Wait for MAC to become idle */
+	for (i = 0; i < 1000; i++) {
+		if (!(mt76_rr(dev, MT_MAC_STATUS) & MT_MAC_STATUS_TX) &&
+		    !mt76_rr(dev, MT_BBP(IBI, 12))) {
+			stopped = true;
+			break;
+		}
+		usleep_range(10, 20);
+	}
+
+	if (!stopped) {
+		mt76_set(dev, MT_BBP(CORE, 4), BIT(1));
+		mt76_clear(dev, MT_BBP(CORE, 4), BIT(1));
+
+		mt76_set(dev, MT_BBP(CORE, 4), BIT(0));
+		mt76_clear(dev, MT_BBP(CORE, 4), BIT(0));
+	}
+
+	/* page count on RxQ */
+	for (i = 0; i < 200; i++) {
+		if (!(mt76_rr(dev, 0x0430) & 0x00ff0000) &&
+		    !(mt76_rr(dev, 0x0a30) & 0xffffffff) &&
+		    !(mt76_rr(dev, 0x0a34) & 0xffffffff) &&
+		    ++count > 10)
+			break;
+		msleep(50);
+	}
+
+	if (!mt76_poll(dev, MT_MAC_STATUS, MT_MAC_STATUS_RX, 0, 2000))
+		dev_warn(dev->mt76.dev, "MAC RX failed to stop\n");
+
+	/* wait rx dma to stop */
+	for (i = 0; i < 2000; i++) {
+		val = mt76_rr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG));
+		if (!(val & MT_USB_DMA_CFG_RX_BUSY) && i > 10)
+			break;
+		usleep_range(50, 100);
+	}
+
+	mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg);
+
+	return 0;
+}
+
+void mt76x2u_mac_resume(struct mt76x2_dev *dev)
+{
+	mt76_wr(dev, MT_MAC_SYS_CTRL,
+		MT_MAC_SYS_CTRL_ENABLE_TX |
+		MT_MAC_SYS_CTRL_ENABLE_RX);
+	mt76_set(dev, MT_TXOP_CTRL_CFG, BIT(20));
+	mt76_set(dev, MT_TXOP_HLDR_ET, BIT(1));
+}
+
+void mt76x2u_mac_setaddr(struct mt76x2_dev *dev, u8 *addr)
+{
+	ether_addr_copy(dev->mt76.macaddr, addr);
+
+	if (!is_valid_ether_addr(dev->mt76.macaddr)) {
+		eth_random_addr(dev->mt76.macaddr);
+		dev_info(dev->mt76.dev,
+			 "Invalid MAC address, using random address %pM\n",
+			 dev->mt76.macaddr);
+	}
+
+	mt76_wr(dev, MT_MAC_ADDR_DW0, get_unaligned_le32(dev->mt76.macaddr));
+	mt76_wr(dev, MT_MAC_ADDR_DW1,
+		get_unaligned_le16(dev->mt76.macaddr + 4) |
+		FIELD_PREP(MT_MAC_ADDR_DW1_U2ME_MASK, 0xff));
+}
+
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2u_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2u_main.c
new file mode 100644
index 0000000000000000000000000000000000000000..7367ba111119f359b73ed83217053f35242e305c
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2u_main.c
@@ -0,0 +1,185 @@
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76x2u.h"
+
+static int mt76x2u_start(struct ieee80211_hw *hw)
+{
+	struct mt76x2_dev *dev = hw->priv;
+	int ret;
+
+	mutex_lock(&dev->mutex);
+
+	ret = mt76x2u_mac_start(dev);
+	if (ret)
+		goto out;
+
+	set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+
+out:
+	mutex_unlock(&dev->mutex);
+	return ret;
+}
+
+static void mt76x2u_stop(struct ieee80211_hw *hw)
+{
+	struct mt76x2_dev *dev = hw->priv;
+
+	mutex_lock(&dev->mutex);
+	clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+	mt76x2u_stop_hw(dev);
+	mutex_unlock(&dev->mutex);
+}
+
+static int mt76x2u_add_interface(struct ieee80211_hw *hw,
+				 struct ieee80211_vif *vif)
+{
+	struct mt76x2_dev *dev = hw->priv;
+	struct mt76x2_vif *mvif = (struct mt76x2_vif *)vif->drv_priv;
+	unsigned int idx = 0;
+
+	if (!ether_addr_equal(dev->mt76.macaddr, vif->addr))
+		mt76x2u_mac_setaddr(dev, vif->addr);
+
+	mvif->idx = idx;
+	mvif->group_wcid.idx = MT_VIF_WCID(idx);
+	mvif->group_wcid.hw_key_idx = -1;
+	mt76x2_txq_init(dev, vif->txq);
+
+	return 0;
+}
+
+static int
+mt76x2u_set_channel(struct mt76x2_dev *dev,
+		    struct cfg80211_chan_def *chandef)
+{
+	int err;
+
+	cancel_delayed_work_sync(&dev->cal_work);
+	set_bit(MT76_RESET, &dev->mt76.state);
+
+	mt76_set_channel(&dev->mt76);
+
+	mt76_clear(dev, MT_TXOP_CTRL_CFG, BIT(20));
+	mt76_clear(dev, MT_TXOP_HLDR_ET, BIT(1));
+	mt76x2_mac_stop(dev, false);
+
+	err = mt76x2u_phy_set_channel(dev, chandef);
+
+	mt76x2u_mac_resume(dev);
+
+	clear_bit(MT76_RESET, &dev->mt76.state);
+	mt76_txq_schedule_all(&dev->mt76);
+
+	return err;
+}
+
+static void
+mt76x2u_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+			 struct ieee80211_bss_conf *info, u32 changed)
+{
+	struct mt76x2_dev *dev = hw->priv;
+
+	mutex_lock(&dev->mutex);
+
+	if (changed & BSS_CHANGED_ASSOC) {
+		mt76x2u_phy_channel_calibrate(dev);
+		mt76x2_apply_gain_adj(dev);
+	}
+
+	if (changed & BSS_CHANGED_BSSID) {
+		mt76_wr(dev, MT_MAC_BSSID_DW0,
+			get_unaligned_le32(info->bssid));
+		mt76_wr(dev, MT_MAC_BSSID_DW1,
+			get_unaligned_le16(info->bssid + 4));
+	}
+
+	mutex_unlock(&dev->mutex);
+}
+
+static int
+mt76x2u_config(struct ieee80211_hw *hw, u32 changed)
+{
+	struct mt76x2_dev *dev = hw->priv;
+	int err = 0;
+
+	mutex_lock(&dev->mutex);
+
+	if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
+		if (!(hw->conf.flags & IEEE80211_CONF_MONITOR))
+			dev->rxfilter |= MT_RX_FILTR_CFG_PROMISC;
+		else
+			dev->rxfilter &= ~MT_RX_FILTR_CFG_PROMISC;
+		mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
+	}
+
+	if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+		ieee80211_stop_queues(hw);
+		err = mt76x2u_set_channel(dev, &hw->conf.chandef);
+		ieee80211_wake_queues(hw);
+	}
+
+	if (changed & IEEE80211_CONF_CHANGE_POWER) {
+		dev->txpower_conf = hw->conf.power_level * 2;
+
+		/* convert to per-chain power for 2x2 devices */
+		dev->txpower_conf -= 6;
+
+		if (test_bit(MT76_STATE_RUNNING, &dev->mt76.state))
+			mt76x2_phy_set_txpower(dev);
+	}
+
+	mutex_unlock(&dev->mutex);
+
+	return err;
+}
+
+static void
+mt76x2u_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+		const u8 *mac)
+{
+	struct mt76x2_dev *dev = hw->priv;
+
+	set_bit(MT76_SCANNING, &dev->mt76.state);
+}
+
+static void
+mt76x2u_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+	struct mt76x2_dev *dev = hw->priv;
+
+	clear_bit(MT76_SCANNING, &dev->mt76.state);
+}
+
+const struct ieee80211_ops mt76x2u_ops = {
+	.tx = mt76x2_tx,
+	.start = mt76x2u_start,
+	.stop = mt76x2u_stop,
+	.add_interface = mt76x2u_add_interface,
+	.remove_interface = mt76x2_remove_interface,
+	.sta_add = mt76x2_sta_add,
+	.sta_remove = mt76x2_sta_remove,
+	.set_key = mt76x2_set_key,
+	.ampdu_action = mt76x2_ampdu_action,
+	.config = mt76x2u_config,
+	.wake_tx_queue = mt76_wake_tx_queue,
+	.bss_info_changed = mt76x2u_bss_info_changed,
+	.configure_filter = mt76x2_configure_filter,
+	.conf_tx = mt76x2_conf_tx,
+	.sw_scan_start = mt76x2u_sw_scan,
+	.sw_scan_complete = mt76x2u_sw_scan_complete,
+	.sta_rate_tbl_update = mt76x2_sta_rate_tbl_update,
+};
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2u_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x2u_mcu.c
new file mode 100644
index 0000000000000000000000000000000000000000..22c16d638baa4b18441d613ace6d537a16cc3f3a
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2u_mcu.c
@@ -0,0 +1,463 @@
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/firmware.h>
+
+#include "mt76x2u.h"
+#include "mt76x2_eeprom.h"
+
+#define MT_CMD_HDR_LEN			4
+#define MT_INBAND_PACKET_MAX_LEN	192
+#define MT_MCU_MEMMAP_WLAN		0x410000
+
+#define MCU_FW_URB_MAX_PAYLOAD		0x3900
+#define MCU_ROM_PATCH_MAX_PAYLOAD	2048
+
+#define MT76U_MCU_ILM_OFFSET		0x80000
+#define MT76U_MCU_DLM_OFFSET		0x110000
+#define MT76U_MCU_ROM_PATCH_OFFSET	0x90000
+
+static int
+mt76x2u_mcu_function_select(struct mt76x2_dev *dev, enum mcu_function func,
+			    u32 val)
+{
+	struct {
+		__le32 id;
+		__le32 value;
+	} __packed __aligned(4) msg = {
+		.id = cpu_to_le32(func),
+		.value = cpu_to_le32(val),
+	};
+	struct sk_buff *skb;
+
+	skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
+	if (!skb)
+		return -ENOMEM;
+	return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_FUN_SET_OP,
+				  func != Q_SELECT);
+}
+
+int mt76x2u_mcu_set_radio_state(struct mt76x2_dev *dev, bool val)
+{
+	struct {
+		__le32 mode;
+		__le32 level;
+	} __packed __aligned(4) msg = {
+		.mode = cpu_to_le32(val ? RADIO_ON : RADIO_OFF),
+		.level = cpu_to_le32(0),
+	};
+	struct sk_buff *skb;
+
+	skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
+	if (!skb)
+		return -ENOMEM;
+	return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_POWER_SAVING_OP,
+				  false);
+}
+
+int mt76x2u_mcu_load_cr(struct mt76x2_dev *dev, u8 type, u8 temp_level,
+			u8 channel)
+{
+	struct {
+		u8 cr_mode;
+		u8 temp;
+		u8 ch;
+		u8 _pad0;
+		__le32 cfg;
+	} __packed __aligned(4) msg = {
+		.cr_mode = type,
+		.temp = temp_level,
+		.ch = channel,
+	};
+	struct sk_buff *skb;
+	u32 val;
+
+	val = BIT(31);
+	val |= (mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_0) >> 8) & 0x00ff;
+	val |= (mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_1) << 8) & 0xff00;
+	msg.cfg = cpu_to_le32(val);
+
+	/* first set the channel without the extension channel info */
+	skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
+	if (!skb)
+		return -ENOMEM;
+	return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_LOAD_CR, true);
+}
+
+int mt76x2u_mcu_set_channel(struct mt76x2_dev *dev, u8 channel, u8 bw,
+			    u8 bw_index, bool scan)
+{
+	struct {
+		u8 idx;
+		u8 scan;
+		u8 bw;
+		u8 _pad0;
+
+		__le16 chainmask;
+		u8 ext_chan;
+		u8 _pad1;
+
+	} __packed __aligned(4) msg = {
+		.idx = channel,
+		.scan = scan,
+		.bw = bw,
+		.chainmask = cpu_to_le16(dev->chainmask),
+	};
+	struct sk_buff *skb;
+
+	/* first set the channel without the extension channel info */
+	skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
+	if (!skb)
+		return -ENOMEM;
+
+	mt76u_mcu_send_msg(&dev->mt76, skb, CMD_SWITCH_CHANNEL_OP, true);
+
+	usleep_range(5000, 10000);
+
+	msg.ext_chan = 0xe0 + bw_index;
+	skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
+	if (!skb)
+		return -ENOMEM;
+
+	return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_SWITCH_CHANNEL_OP, true);
+}
+
+int mt76x2u_mcu_calibrate(struct mt76x2_dev *dev, enum mcu_calibration type,
+			  u32 val)
+{
+	struct {
+		__le32 id;
+		__le32 value;
+	} __packed __aligned(4) msg = {
+		.id = cpu_to_le32(type),
+		.value = cpu_to_le32(val),
+	};
+	struct sk_buff *skb;
+
+	skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
+	if (!skb)
+		return -ENOMEM;
+	return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_CALIBRATION_OP, true);
+}
+
+int mt76x2u_mcu_init_gain(struct mt76x2_dev *dev, u8 channel, u32 gain,
+			  bool force)
+{
+	struct {
+		__le32 channel;
+		__le32 gain_val;
+	} __packed __aligned(4) msg = {
+		.channel = cpu_to_le32(channel),
+		.gain_val = cpu_to_le32(gain),
+	};
+	struct sk_buff *skb;
+
+	if (force)
+		msg.channel |= cpu_to_le32(BIT(31));
+
+	skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
+	if (!skb)
+		return -ENOMEM;
+	return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_INIT_GAIN_OP, true);
+}
+
+int mt76x2u_mcu_set_dynamic_vga(struct mt76x2_dev *dev, u8 channel, bool ap,
+				bool ext, int rssi, u32 false_cca)
+{
+	struct {
+		__le32 channel;
+		__le32 rssi_val;
+		__le32 false_cca_val;
+	} __packed __aligned(4) msg = {
+		.rssi_val = cpu_to_le32(rssi),
+		.false_cca_val = cpu_to_le32(false_cca),
+	};
+	struct sk_buff *skb;
+	u32 val = channel;
+
+	if (ap)
+		val |= BIT(31);
+	if (ext)
+		val |= BIT(30);
+	msg.channel = cpu_to_le32(val);
+
+	skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
+	if (!skb)
+		return -ENOMEM;
+	return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_DYNC_VGA_OP, true);
+}
+
+int mt76x2u_mcu_tssi_comp(struct mt76x2_dev *dev,
+			  struct mt76x2_tssi_comp *tssi_data)
+{
+	struct {
+		__le32 id;
+		struct mt76x2_tssi_comp data;
+	} __packed __aligned(4) msg = {
+		.id = cpu_to_le32(MCU_CAL_TSSI_COMP),
+		.data = *tssi_data,
+	};
+	struct sk_buff *skb;
+
+	skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
+	if (!skb)
+		return -ENOMEM;
+	return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_CALIBRATION_OP, true);
+}
+
+static void mt76x2u_mcu_load_ivb(struct mt76x2_dev *dev)
+{
+	mt76u_vendor_request(&dev->mt76, MT_VEND_DEV_MODE,
+			     USB_DIR_OUT | USB_TYPE_VENDOR,
+			     0x12, 0, NULL, 0);
+}
+
+static void mt76x2u_mcu_enable_patch(struct mt76x2_dev *dev)
+{
+	struct mt76_usb *usb = &dev->mt76.usb;
+	const u8 data[] = {
+		0x6f, 0xfc, 0x08, 0x01,
+		0x20, 0x04, 0x00, 0x00,
+		0x00, 0x09, 0x00,
+	};
+
+	memcpy(usb->data, data, sizeof(data));
+	mt76u_vendor_request(&dev->mt76, MT_VEND_DEV_MODE,
+			     USB_DIR_OUT | USB_TYPE_CLASS,
+			     0x12, 0, usb->data, sizeof(data));
+}
+
+static void mt76x2u_mcu_reset_wmt(struct mt76x2_dev *dev)
+{
+	struct mt76_usb *usb = &dev->mt76.usb;
+	u8 data[] = {
+		0x6f, 0xfc, 0x05, 0x01,
+		0x07, 0x01, 0x00, 0x04
+	};
+
+	memcpy(usb->data, data, sizeof(data));
+	mt76u_vendor_request(&dev->mt76, MT_VEND_DEV_MODE,
+			     USB_DIR_OUT | USB_TYPE_CLASS,
+			     0x12, 0, usb->data, sizeof(data));
+}
+
+static int mt76x2u_mcu_load_rom_patch(struct mt76x2_dev *dev)
+{
+	bool rom_protect = !is_mt7612(dev);
+	struct mt76x2_patch_header *hdr;
+	u32 val, patch_mask, patch_reg;
+	const struct firmware *fw;
+	int err;
+
+	if (rom_protect &&
+	    !mt76_poll_msec(dev, MT_MCU_SEMAPHORE_03, 1, 1, 600)) {
+		dev_err(dev->mt76.dev,
+			"could not get hardware semaphore for ROM PATCH\n");
+		return -ETIMEDOUT;
+	}
+
+	if (mt76xx_rev(dev) >= MT76XX_REV_E3) {
+		patch_mask = BIT(0);
+		patch_reg = MT_MCU_CLOCK_CTL;
+	} else {
+		patch_mask = BIT(1);
+		patch_reg = MT_MCU_COM_REG0;
+	}
+
+	if (rom_protect && (mt76_rr(dev, patch_reg) & patch_mask)) {
+		dev_info(dev->mt76.dev, "ROM patch already applied\n");
+		return 0;
+	}
+
+	err = request_firmware(&fw, MT7662U_ROM_PATCH, dev->mt76.dev);
+	if (err < 0)
+		return err;
+
+	if (!fw || !fw->data || fw->size <= sizeof(*hdr)) {
+		dev_err(dev->mt76.dev, "failed to load firmware\n");
+		err = -EIO;
+		goto out;
+	}
+
+	hdr = (struct mt76x2_patch_header *)fw->data;
+	dev_info(dev->mt76.dev, "ROM patch build: %.15s\n", hdr->build_time);
+
+	/* enable USB_DMA_CFG */
+	val = MT_USB_DMA_CFG_RX_BULK_EN |
+	      MT_USB_DMA_CFG_TX_BULK_EN |
+	      FIELD_PREP(MT_USB_DMA_CFG_RX_BULK_AGG_TOUT, 0x20);
+	mt76_wr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG), val);
+
+	/* vendor reset */
+	mt76u_mcu_fw_reset(&dev->mt76);
+	usleep_range(5000, 10000);
+
+	/* enable FCE to send in-band cmd */
+	mt76_wr(dev, MT_FCE_PSE_CTRL, 0x1);
+	/* FCE tx_fs_base_ptr */
+	mt76_wr(dev, MT_TX_CPU_FROM_FCE_BASE_PTR, 0x400230);
+	/* FCE tx_fs_max_cnt */
+	mt76_wr(dev, MT_TX_CPU_FROM_FCE_MAX_COUNT, 0x1);
+	/* FCE pdma enable */
+	mt76_wr(dev, MT_FCE_PDMA_GLOBAL_CONF, 0x44);
+	/* FCE skip_fs_en */
+	mt76_wr(dev, MT_FCE_SKIP_FS, 0x3);
+
+	err = mt76u_mcu_fw_send_data(&dev->mt76, fw->data + sizeof(*hdr),
+				     fw->size - sizeof(*hdr),
+				     MCU_ROM_PATCH_MAX_PAYLOAD,
+				     MT76U_MCU_ROM_PATCH_OFFSET);
+	if (err < 0) {
+		err = -EIO;
+		goto out;
+	}
+
+	mt76x2u_mcu_enable_patch(dev);
+	mt76x2u_mcu_reset_wmt(dev);
+	mdelay(20);
+
+	if (!mt76_poll_msec(dev, patch_reg, patch_mask, patch_mask, 100)) {
+		dev_err(dev->mt76.dev, "failed to load ROM patch\n");
+		err = -ETIMEDOUT;
+	}
+
+out:
+	if (rom_protect)
+		mt76_wr(dev, MT_MCU_SEMAPHORE_03, 1);
+	release_firmware(fw);
+	return err;
+}
+
+static int mt76x2u_mcu_load_firmware(struct mt76x2_dev *dev)
+{
+	u32 val, dlm_offset = MT76U_MCU_DLM_OFFSET;
+	const struct mt76x2_fw_header *hdr;
+	int err, len, ilm_len, dlm_len;
+	const struct firmware *fw;
+
+	err = request_firmware(&fw, MT7662U_FIRMWARE, dev->mt76.dev);
+	if (err < 0)
+		return err;
+
+	if (!fw || !fw->data || fw->size < sizeof(*hdr)) {
+		err = -EINVAL;
+		goto out;
+	}
+
+	hdr = (const struct mt76x2_fw_header *)fw->data;
+	ilm_len = le32_to_cpu(hdr->ilm_len);
+	dlm_len = le32_to_cpu(hdr->dlm_len);
+	len = sizeof(*hdr) + ilm_len + dlm_len;
+	if (fw->size != len) {
+		err = -EINVAL;
+		goto out;
+	}
+
+	val = le16_to_cpu(hdr->fw_ver);
+	dev_info(dev->mt76.dev, "Firmware Version: %d.%d.%02d\n",
+		 (val >> 12) & 0xf, (val >> 8) & 0xf, val & 0xf);
+
+	val = le16_to_cpu(hdr->build_ver);
+	dev_info(dev->mt76.dev, "Build: %x\n", val);
+	dev_info(dev->mt76.dev, "Build Time: %.16s\n", hdr->build_time);
+
+	/* vendor reset */
+	mt76u_mcu_fw_reset(&dev->mt76);
+	usleep_range(5000, 10000);
+
+	/* enable USB_DMA_CFG */
+	val = MT_USB_DMA_CFG_RX_BULK_EN |
+	      MT_USB_DMA_CFG_TX_BULK_EN |
+	      FIELD_PREP(MT_USB_DMA_CFG_RX_BULK_AGG_TOUT, 0x20);
+	mt76_wr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG), val);
+	/* enable FCE to send in-band cmd */
+	mt76_wr(dev, MT_FCE_PSE_CTRL, 0x1);
+	/* FCE tx_fs_base_ptr */
+	mt76_wr(dev, MT_TX_CPU_FROM_FCE_BASE_PTR, 0x400230);
+	/* FCE tx_fs_max_cnt */
+	mt76_wr(dev, MT_TX_CPU_FROM_FCE_MAX_COUNT, 0x1);
+	/* FCE pdma enable */
+	mt76_wr(dev, MT_FCE_PDMA_GLOBAL_CONF, 0x44);
+	/* FCE skip_fs_en */
+	mt76_wr(dev, MT_FCE_SKIP_FS, 0x3);
+
+	/* load ILM */
+	err = mt76u_mcu_fw_send_data(&dev->mt76, fw->data + sizeof(*hdr),
+				     ilm_len, MCU_FW_URB_MAX_PAYLOAD,
+				     MT76U_MCU_ILM_OFFSET);
+	if (err < 0) {
+		err = -EIO;
+		goto out;
+	}
+
+	/* load DLM */
+	if (mt76xx_rev(dev) >= MT76XX_REV_E3)
+		dlm_offset += 0x800;
+	err = mt76u_mcu_fw_send_data(&dev->mt76,
+				     fw->data + sizeof(*hdr) + ilm_len,
+				     dlm_len, MCU_FW_URB_MAX_PAYLOAD,
+				     dlm_offset);
+	if (err < 0) {
+		err = -EIO;
+		goto out;
+	}
+
+	mt76x2u_mcu_load_ivb(dev);
+	if (!mt76_poll_msec(dev, MT_MCU_COM_REG0, 1, 1, 100)) {
+		dev_err(dev->mt76.dev, "firmware failed to start\n");
+		err = -ETIMEDOUT;
+		goto out;
+	}
+
+	mt76_set(dev, MT_MCU_COM_REG0, BIT(1));
+	/* enable FCE to send in-band cmd */
+	mt76_wr(dev, MT_FCE_PSE_CTRL, 0x1);
+	dev_dbg(dev->mt76.dev, "firmware running\n");
+
+out:
+	release_firmware(fw);
+	return err;
+}
+
+int mt76x2u_mcu_fw_init(struct mt76x2_dev *dev)
+{
+	int err;
+
+	err = mt76x2u_mcu_load_rom_patch(dev);
+	if (err < 0)
+		return err;
+
+	return mt76x2u_mcu_load_firmware(dev);
+}
+
+int mt76x2u_mcu_init(struct mt76x2_dev *dev)
+{
+	int err;
+
+	err = mt76x2u_mcu_function_select(dev, Q_SELECT, 1);
+	if (err < 0)
+		return err;
+
+	return mt76x2u_mcu_set_radio_state(dev, true);
+}
+
+void mt76x2u_mcu_deinit(struct mt76x2_dev *dev)
+{
+	struct mt76_usb *usb = &dev->mt76.usb;
+
+	usb_kill_urb(usb->mcu.res.urb);
+	mt76u_buf_free(&usb->mcu.res);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2u_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2u_phy.c
new file mode 100644
index 0000000000000000000000000000000000000000..5158063d0c2e09e667deab6f69def1a99a95cab9
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2u_phy.c
@@ -0,0 +1,303 @@
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76x2u.h"
+#include "mt76x2_eeprom.h"
+
+void mt76x2u_phy_set_rxpath(struct mt76x2_dev *dev)
+{
+	u32 val;
+
+	val = mt76_rr(dev, MT_BBP(AGC, 0));
+	val &= ~BIT(4);
+
+	switch (dev->chainmask & 0xf) {
+	case 2:
+		val |= BIT(3);
+		break;
+	default:
+		val &= ~BIT(3);
+		break;
+	}
+	mt76_wr(dev, MT_BBP(AGC, 0), val);
+}
+
+void mt76x2u_phy_set_txdac(struct mt76x2_dev *dev)
+{
+	int txpath;
+
+	txpath = (dev->chainmask >> 8) & 0xf;
+	switch (txpath) {
+	case 2:
+		mt76_set(dev, MT_BBP(TXBE, 5), 0x3);
+		break;
+	default:
+		mt76_clear(dev, MT_BBP(TXBE, 5), 0x3);
+		break;
+	}
+}
+
+void mt76x2u_phy_channel_calibrate(struct mt76x2_dev *dev)
+{
+	struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+	bool is_5ghz = chan->band == NL80211_BAND_5GHZ;
+
+	if (mt76x2_channel_silent(dev))
+		return;
+
+	mt76x2u_mac_stop(dev);
+
+	if (is_5ghz)
+		mt76x2u_mcu_calibrate(dev, MCU_CAL_LC, 0);
+
+	mt76x2u_mcu_calibrate(dev, MCU_CAL_TX_LOFT, is_5ghz);
+	mt76x2u_mcu_calibrate(dev, MCU_CAL_TXIQ, is_5ghz);
+	mt76x2u_mcu_calibrate(dev, MCU_CAL_RXIQC_FI, is_5ghz);
+	mt76x2u_mcu_calibrate(dev, MCU_CAL_TEMP_SENSOR, 0);
+
+	mt76x2u_mac_resume(dev);
+}
+
+static void
+mt76x2u_phy_tssi_compensate(struct mt76x2_dev *dev)
+{
+	struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+	struct mt76x2_tx_power_info txp;
+	struct mt76x2_tssi_comp t = {};
+
+	if (!dev->cal.tssi_cal_done)
+		return;
+
+	if (!dev->cal.tssi_comp_pending) {
+		/* TSSI trigger */
+		t.cal_mode = BIT(0);
+		mt76x2u_mcu_tssi_comp(dev, &t);
+		dev->cal.tssi_comp_pending = true;
+	} else {
+		if (mt76_rr(dev, MT_BBP(CORE, 34)) & BIT(4))
+			return;
+
+		dev->cal.tssi_comp_pending = false;
+		mt76x2_get_power_info(dev, &txp, chan);
+
+		if (mt76x2_ext_pa_enabled(dev, chan->band))
+			t.pa_mode = 1;
+
+		t.cal_mode = BIT(1);
+		t.slope0 = txp.chain[0].tssi_slope;
+		t.offset0 = txp.chain[0].tssi_offset;
+		t.slope1 = txp.chain[1].tssi_slope;
+		t.offset1 = txp.chain[1].tssi_offset;
+		mt76x2u_mcu_tssi_comp(dev, &t);
+
+		if (t.pa_mode || dev->cal.dpd_cal_done)
+			return;
+
+		usleep_range(10000, 20000);
+		mt76x2u_mcu_calibrate(dev, MCU_CAL_DPD, chan->hw_value);
+		dev->cal.dpd_cal_done = true;
+	}
+}
+
+static void
+mt76x2u_phy_update_channel_gain(struct mt76x2_dev *dev)
+{
+	u8 channel = dev->mt76.chandef.chan->hw_value;
+	int freq, freq1;
+	u32 false_cca;
+
+	freq = dev->mt76.chandef.chan->center_freq;
+	freq1 = dev->mt76.chandef.center_freq1;
+
+	switch (dev->mt76.chandef.width) {
+	case NL80211_CHAN_WIDTH_80: {
+		int ch_group_index;
+
+		ch_group_index = (freq - freq1 + 30) / 20;
+		if (WARN_ON(ch_group_index < 0 || ch_group_index > 3))
+			ch_group_index = 0;
+		channel += 6 - ch_group_index * 4;
+		break;
+	}
+	case NL80211_CHAN_WIDTH_40:
+		if (freq1 > freq)
+			channel += 2;
+		else
+			channel -= 2;
+		break;
+	default:
+		break;
+	}
+
+	dev->cal.avg_rssi_all = mt76x2_phy_get_min_avg_rssi(dev);
+	false_cca = FIELD_GET(MT_RX_STAT_1_CCA_ERRORS,
+			      mt76_rr(dev, MT_RX_STAT_1));
+
+	mt76x2u_mcu_set_dynamic_vga(dev, channel, false, false,
+				    dev->cal.avg_rssi_all, false_cca);
+}
+
+void mt76x2u_phy_calibrate(struct work_struct *work)
+{
+	struct mt76x2_dev *dev;
+
+	dev = container_of(work, struct mt76x2_dev, cal_work.work);
+	mt76x2u_phy_tssi_compensate(dev);
+	mt76x2u_phy_update_channel_gain(dev);
+
+	ieee80211_queue_delayed_work(mt76_hw(dev), &dev->cal_work,
+				     MT_CALIBRATE_INTERVAL);
+}
+
+int mt76x2u_phy_set_channel(struct mt76x2_dev *dev,
+			    struct cfg80211_chan_def *chandef)
+{
+	u32 ext_cca_chan[4] = {
+		[0] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 0) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 1) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 2) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 3) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(0)),
+		[1] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 1) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 0) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 2) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 3) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(1)),
+		[2] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 2) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 3) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 1) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 0) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(2)),
+		[3] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 3) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 2) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 1) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 0) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(3)),
+	};
+	bool scan = test_bit(MT76_SCANNING, &dev->mt76.state);
+	struct ieee80211_channel *chan = chandef->chan;
+	u8 channel = chan->hw_value, bw, bw_index;
+	int ch_group_index, freq, freq1, ret;
+
+	dev->cal.channel_cal_done = false;
+	freq = chandef->chan->center_freq;
+	freq1 = chandef->center_freq1;
+
+	switch (chandef->width) {
+	case NL80211_CHAN_WIDTH_40:
+		bw = 1;
+		if (freq1 > freq) {
+			bw_index = 1;
+			ch_group_index = 0;
+		} else {
+			bw_index = 3;
+			ch_group_index = 1;
+		}
+		channel += 2 - ch_group_index * 4;
+		break;
+	case NL80211_CHAN_WIDTH_80:
+		ch_group_index = (freq - freq1 + 30) / 20;
+		if (WARN_ON(ch_group_index < 0 || ch_group_index > 3))
+			ch_group_index = 0;
+		bw = 2;
+		bw_index = ch_group_index;
+		channel += 6 - ch_group_index * 4;
+		break;
+	default:
+		bw = 0;
+		bw_index = 0;
+		ch_group_index = 0;
+		break;
+	}
+
+	mt76x2_read_rx_gain(dev);
+	mt76x2_phy_set_txpower_regs(dev, chan->band);
+	mt76x2_configure_tx_delay(dev, chan->band, bw);
+	mt76x2_phy_set_txpower(dev);
+
+	mt76x2_phy_set_band(dev, chan->band, ch_group_index & 1);
+	mt76x2_phy_set_bw(dev, chandef->width, ch_group_index);
+
+	mt76_rmw(dev, MT_EXT_CCA_CFG,
+		 (MT_EXT_CCA_CFG_CCA0 |
+		  MT_EXT_CCA_CFG_CCA1 |
+		  MT_EXT_CCA_CFG_CCA2 |
+		  MT_EXT_CCA_CFG_CCA3 |
+		  MT_EXT_CCA_CFG_CCA_MASK),
+		 ext_cca_chan[ch_group_index]);
+
+	ret = mt76x2u_mcu_set_channel(dev, channel, bw, bw_index, scan);
+	if (ret)
+		return ret;
+
+	mt76x2u_mcu_init_gain(dev, channel, dev->cal.rx.mcu_gain, true);
+
+	/* Enable LDPC Rx */
+	if (mt76xx_rev(dev) >= MT76XX_REV_E3)
+		mt76_set(dev, MT_BBP(RXO, 13), BIT(10));
+
+	if (!dev->cal.init_cal_done) {
+		u8 val = mt76x2_eeprom_get(dev, MT_EE_BT_RCAL_RESULT);
+
+		if (val != 0xff)
+			mt76x2u_mcu_calibrate(dev, MCU_CAL_R, 0);
+	}
+
+	mt76x2u_mcu_calibrate(dev, MCU_CAL_RXDCOC, channel);
+
+	/* Rx LPF calibration */
+	if (!dev->cal.init_cal_done)
+		mt76x2u_mcu_calibrate(dev, MCU_CAL_RC, 0);
+	dev->cal.init_cal_done = true;
+
+	mt76_wr(dev, MT_BBP(AGC, 61), 0xff64a4e2);
+	mt76_wr(dev, MT_BBP(AGC, 7), 0x08081010);
+	mt76_wr(dev, MT_BBP(AGC, 11), 0x00000404);
+	mt76_wr(dev, MT_BBP(AGC, 2), 0x00007070);
+	mt76_wr(dev, MT_TXOP_CTRL_CFG, 0X04101b3f);
+
+	mt76_set(dev, MT_BBP(TXO, 4), BIT(25));
+	mt76_set(dev, MT_BBP(RXO, 13), BIT(8));
+
+	if (scan)
+		return 0;
+
+	if (mt76x2_tssi_enabled(dev)) {
+		/* init default values for temp compensation */
+		mt76_rmw_field(dev, MT_TX_ALC_CFG_1, MT_TX_ALC_CFG_1_TEMP_COMP,
+			       0x38);
+		mt76_rmw_field(dev, MT_TX_ALC_CFG_2, MT_TX_ALC_CFG_2_TEMP_COMP,
+			       0x38);
+
+		/* init tssi calibration */
+		if (!mt76x2_channel_silent(dev)) {
+			struct ieee80211_channel *chan;
+			u32 flag = 0;
+
+			chan = dev->mt76.chandef.chan;
+			if (chan->band == NL80211_BAND_5GHZ)
+				flag |= BIT(0);
+			if (mt76x2_ext_pa_enabled(dev, chan->band))
+				flag |= BIT(8);
+			mt76x2u_mcu_calibrate(dev, MCU_CAL_TSSI, flag);
+			dev->cal.tssi_cal_done = true;
+		}
+	}
+
+	ieee80211_queue_delayed_work(mt76_hw(dev), &dev->cal_work,
+				     MT_CALIBRATE_INTERVAL);
+	return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c
index e96956710fb2aa2b0e1293692e969fca6fd21fc5..af48d43bb7dca0ee7a104fa71b6c194aab1788c7 100644
--- a/drivers/net/wireless/mediatek/mt76/tx.c
+++ b/drivers/net/wireless/mediatek/mt76/tx.c
@@ -51,7 +51,7 @@ __mt76_get_txwi(struct mt76_dev *dev)
 	return t;
 }
 
-static struct mt76_txwi_cache *
+struct mt76_txwi_cache *
 mt76_get_txwi(struct mt76_dev *dev)
 {
 	struct mt76_txwi_cache *t = __mt76_get_txwi(dev);
@@ -91,80 +91,6 @@ mt76_txq_get_qid(struct ieee80211_txq *txq)
 	return txq->ac;
 }
 
-int mt76_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
-		      struct sk_buff *skb, struct mt76_wcid *wcid,
-		      struct ieee80211_sta *sta)
-{
-	struct mt76_queue_entry e;
-	struct mt76_txwi_cache *t;
-	struct mt76_queue_buf buf[32];
-	struct sk_buff *iter;
-	dma_addr_t addr;
-	int len;
-	u32 tx_info = 0;
-	int n, ret;
-
-	t = mt76_get_txwi(dev);
-	if (!t) {
-		ieee80211_free_txskb(dev->hw, skb);
-		return -ENOMEM;
-	}
-
-	dma_sync_single_for_cpu(dev->dev, t->dma_addr, sizeof(t->txwi),
-				DMA_TO_DEVICE);
-	ret = dev->drv->tx_prepare_skb(dev, &t->txwi, skb, q, wcid, sta,
-				       &tx_info);
-	dma_sync_single_for_device(dev->dev, t->dma_addr, sizeof(t->txwi),
-				   DMA_TO_DEVICE);
-	if (ret < 0)
-		goto free;
-
-	len = skb->len - skb->data_len;
-	addr = dma_map_single(dev->dev, skb->data, len, DMA_TO_DEVICE);
-	if (dma_mapping_error(dev->dev, addr)) {
-		ret = -ENOMEM;
-		goto free;
-	}
-
-	n = 0;
-	buf[n].addr = t->dma_addr;
-	buf[n++].len = dev->drv->txwi_size;
-	buf[n].addr = addr;
-	buf[n++].len = len;
-
-	skb_walk_frags(skb, iter) {
-		if (n == ARRAY_SIZE(buf))
-			goto unmap;
-
-		addr = dma_map_single(dev->dev, iter->data, iter->len,
-				      DMA_TO_DEVICE);
-		if (dma_mapping_error(dev->dev, addr))
-			goto unmap;
-
-		buf[n].addr = addr;
-		buf[n++].len = iter->len;
-	}
-
-	if (q->queued + (n + 1) / 2 >= q->ndesc - 1)
-		goto unmap;
-
-	return dev->queue_ops->add_buf(dev, q, buf, n, tx_info, skb, t);
-
-unmap:
-	ret = -ENOMEM;
-	for (n--; n > 0; n--)
-		dma_unmap_single(dev->dev, buf[n].addr, buf[n].len,
-				 DMA_TO_DEVICE);
-
-free:
-	e.skb = skb;
-	e.txwi = t;
-	dev->drv->tx_complete_skb(dev, q, &e, true);
-	mt76_put_txwi(dev, t);
-	return ret;
-}
-EXPORT_SYMBOL_GPL(mt76_tx_queue_skb);
-
 void
 mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
 	struct mt76_wcid *wcid, struct sk_buff *skb)
@@ -185,7 +111,7 @@ mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
 	q = &dev->q_tx[qid];
 
 	spin_lock_bh(&q->lock);
-	mt76_tx_queue_skb(dev, q, skb, wcid, sta);
+	dev->queue_ops->tx_queue_skb(dev, q, skb, wcid, sta);
 	dev->queue_ops->kick(dev, q);
 
 	if (q->queued > q->ndesc - 8)
@@ -241,7 +167,7 @@ mt76_queue_ps_skb(struct mt76_dev *dev, struct ieee80211_sta *sta,
 		info->flags |= IEEE80211_TX_STATUS_EOSP;
 
 	mt76_skb_set_moredata(skb, !last);
-	mt76_tx_queue_skb(dev, hwq, skb, wcid, sta);
+	dev->queue_ops->tx_queue_skb(dev, hwq, skb, wcid, sta);
 }
 
 void
@@ -321,7 +247,7 @@ mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_queue *hwq,
 	if (ampdu)
 		mt76_check_agg_ssn(mtxq, skb);
 
-	idx = mt76_tx_queue_skb(dev, hwq, skb, wcid, txq->sta);
+	idx = dev->queue_ops->tx_queue_skb(dev, hwq, skb, wcid, txq->sta);
 
 	if (idx < 0)
 		return idx;
@@ -356,7 +282,8 @@ mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_queue *hwq,
 		if (cur_ampdu)
 			mt76_check_agg_ssn(mtxq, skb);
 
-		idx = mt76_tx_queue_skb(dev, hwq, skb, wcid, txq->sta);
+		idx = dev->queue_ops->tx_queue_skb(dev, hwq, skb, wcid,
+						   txq->sta);
 		if (idx < 0)
 			return idx;
 
diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c
new file mode 100644
index 0000000000000000000000000000000000000000..7780b07543bb8d2bf247268f456e44efed517eab
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/usb.c
@@ -0,0 +1,845 @@
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76.h"
+#include "usb_trace.h"
+#include "dma.h"
+
+#define MT_VEND_REQ_MAX_RETRY	10
+#define MT_VEND_REQ_TOUT_MS	300
+
+/* should be called with usb_ctrl_mtx locked */
+static int __mt76u_vendor_request(struct mt76_dev *dev, u8 req,
+				  u8 req_type, u16 val, u16 offset,
+				  void *buf, size_t len)
+{
+	struct usb_interface *intf = to_usb_interface(dev->dev);
+	struct usb_device *udev = interface_to_usbdev(intf);
+	unsigned int pipe;
+	int i, ret;
+
+	pipe = (req_type & USB_DIR_IN) ? usb_rcvctrlpipe(udev, 0)
+				       : usb_sndctrlpipe(udev, 0);
+	for (i = 0; i < MT_VEND_REQ_MAX_RETRY; i++) {
+		if (test_bit(MT76_REMOVED, &dev->state))
+			return -EIO;
+
+		ret = usb_control_msg(udev, pipe, req, req_type, val,
+				      offset, buf, len, MT_VEND_REQ_TOUT_MS);
+		if (ret == -ENODEV)
+			set_bit(MT76_REMOVED, &dev->state);
+		if (ret >= 0 || ret == -ENODEV)
+			return ret;
+		usleep_range(5000, 10000);
+	}
+
+	dev_err(dev->dev, "vendor request req:%02x off:%04x failed:%d\n",
+		req, offset, ret);
+	return ret;
+}
+
+int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
+			 u8 req_type, u16 val, u16 offset,
+			 void *buf, size_t len)
+{
+	int ret;
+
+	mutex_lock(&dev->usb.usb_ctrl_mtx);
+	ret = __mt76u_vendor_request(dev, req, req_type,
+				     val, offset, buf, len);
+	trace_usb_reg_wr(dev, offset, val);
+	mutex_unlock(&dev->usb.usb_ctrl_mtx);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(mt76u_vendor_request);
+
+/* should be called with usb_ctrl_mtx locked */
+static u32 __mt76u_rr(struct mt76_dev *dev, u32 addr)
+{
+	struct mt76_usb *usb = &dev->usb;
+	u32 data = ~0;
+	u16 offset;
+	int ret;
+	u8 req;
+
+	switch (addr & MT_VEND_TYPE_MASK) {
+	case MT_VEND_TYPE_EEPROM:
+		req = MT_VEND_READ_EEPROM;
+		break;
+	case MT_VEND_TYPE_CFG:
+		req = MT_VEND_READ_CFG;
+		break;
+	default:
+		req = MT_VEND_MULTI_READ;
+		break;
+	}
+	offset = addr & ~MT_VEND_TYPE_MASK;
+
+	ret = __mt76u_vendor_request(dev, req,
+				     USB_DIR_IN | USB_TYPE_VENDOR,
+				     0, offset, usb->data, sizeof(__le32));
+	if (ret == sizeof(__le32))
+		data = get_unaligned_le32(usb->data);
+	trace_usb_reg_rr(dev, addr, data);
+
+	return data;
+}
+
+u32 mt76u_rr(struct mt76_dev *dev, u32 addr)
+{
+	u32 ret;
+
+	mutex_lock(&dev->usb.usb_ctrl_mtx);
+	ret = __mt76u_rr(dev, addr);
+	mutex_unlock(&dev->usb.usb_ctrl_mtx);
+
+	return ret;
+}
+
+/* should be called with usb_ctrl_mtx locked */
+static void __mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
+{
+	struct mt76_usb *usb = &dev->usb;
+	u16 offset;
+	u8 req;
+
+	switch (addr & MT_VEND_TYPE_MASK) {
+	case MT_VEND_TYPE_CFG:
+		req = MT_VEND_WRITE_CFG;
+		break;
+	default:
+		req = MT_VEND_MULTI_WRITE;
+		break;
+	}
+	offset = addr & ~MT_VEND_TYPE_MASK;
+
+	put_unaligned_le32(val, usb->data);
+	__mt76u_vendor_request(dev, req,
+			       USB_DIR_OUT | USB_TYPE_VENDOR, 0,
+			       offset, usb->data, sizeof(__le32));
+	trace_usb_reg_wr(dev, addr, val);
+}
+
+void mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
+{
+	mutex_lock(&dev->usb.usb_ctrl_mtx);
+	__mt76u_wr(dev, addr, val);
+	mutex_unlock(&dev->usb.usb_ctrl_mtx);
+}
+
+static u32 mt76u_rmw(struct mt76_dev *dev, u32 addr,
+		     u32 mask, u32 val)
+{
+	mutex_lock(&dev->usb.usb_ctrl_mtx);
+	val |= __mt76u_rr(dev, addr) & ~mask;
+	__mt76u_wr(dev, addr, val);
+	mutex_unlock(&dev->usb.usb_ctrl_mtx);
+
+	return val;
+}
+
+static void mt76u_copy(struct mt76_dev *dev, u32 offset,
+		       const void *data, int len)
+{
+	struct mt76_usb *usb = &dev->usb;
+	const u32 *val = data;
+	int i, ret;
+
+	mutex_lock(&usb->usb_ctrl_mtx);
+	for (i = 0; i < (len / 4); i++) {
+		put_unaligned_le32(val[i], usb->data);
+		ret = __mt76u_vendor_request(dev, MT_VEND_MULTI_WRITE,
+					     USB_DIR_OUT | USB_TYPE_VENDOR,
+					     0, offset + i * 4, usb->data,
+					     sizeof(__le32));
+		if (ret < 0)
+			break;
+	}
+	mutex_unlock(&usb->usb_ctrl_mtx);
+}
+
+void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
+		     const u16 offset, const u32 val)
+{
+	mutex_lock(&dev->usb.usb_ctrl_mtx);
+	__mt76u_vendor_request(dev, req,
+			       USB_DIR_OUT | USB_TYPE_VENDOR,
+			       val & 0xffff, offset, NULL, 0);
+	__mt76u_vendor_request(dev, req,
+			       USB_DIR_OUT | USB_TYPE_VENDOR,
+			       val >> 16, offset + 2, NULL, 0);
+	mutex_unlock(&dev->usb.usb_ctrl_mtx);
+}
+EXPORT_SYMBOL_GPL(mt76u_single_wr);
+
+static int
+mt76u_set_endpoints(struct usb_interface *intf,
+		    struct mt76_usb *usb)
+{
+	struct usb_host_interface *intf_desc = intf->cur_altsetting;
+	struct usb_endpoint_descriptor *ep_desc;
+	int i, in_ep = 0, out_ep = 0;
+
+	for (i = 0; i < intf_desc->desc.bNumEndpoints; i++) {
+		ep_desc = &intf_desc->endpoint[i].desc;
+
+		if (usb_endpoint_is_bulk_in(ep_desc) &&
+		    in_ep < __MT_EP_IN_MAX) {
+			usb->in_ep[in_ep] = usb_endpoint_num(ep_desc);
+			usb->in_max_packet = usb_endpoint_maxp(ep_desc);
+			in_ep++;
+		} else if (usb_endpoint_is_bulk_out(ep_desc) &&
+			   out_ep < __MT_EP_OUT_MAX) {
+			usb->out_ep[out_ep] = usb_endpoint_num(ep_desc);
+			usb->out_max_packet = usb_endpoint_maxp(ep_desc);
+			out_ep++;
+		}
+	}
+
+	if (in_ep != __MT_EP_IN_MAX || out_ep != __MT_EP_OUT_MAX)
+		return -EINVAL;
+	return 0;
+}
+
+static int
+mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76u_buf *buf,
+		 int nsgs, int len, int sglen)
+{
+	struct urb *urb = buf->urb;
+	int i;
+
+	for (i = 0; i < nsgs; i++) {
+		struct page *page;
+		void *data;
+		int offset;
+
+		data = netdev_alloc_frag(len);
+		if (!data)
+			break;
+
+		page = virt_to_head_page(data);
+		offset = data - page_address(page);
+		sg_set_page(&urb->sg[i], page, sglen, offset);
+	}
+
+	if (i < nsgs) {
+		int j;
+
+		for (j = nsgs; j < urb->num_sgs; j++)
+			skb_free_frag(sg_virt(&urb->sg[j]));
+		urb->num_sgs = i;
+	}
+
+	urb->num_sgs = max_t(int, i, urb->num_sgs);
+	buf->len = urb->num_sgs * sglen,
+	sg_init_marker(urb->sg, urb->num_sgs);
+
+	return i ? : -ENOMEM;
+}
+
+int mt76u_buf_alloc(struct mt76_dev *dev, struct mt76u_buf *buf,
+		    int nsgs, int len, int sglen, gfp_t gfp)
+{
+	buf->urb = usb_alloc_urb(0, gfp);
+	if (!buf->urb)
+		return -ENOMEM;
+
+	buf->urb->sg = devm_kzalloc(dev->dev, nsgs * sizeof(*buf->urb->sg),
+				    gfp);
+	if (!buf->urb->sg)
+		return -ENOMEM;
+
+	sg_init_table(buf->urb->sg, nsgs);
+	buf->dev = dev;
+
+	return mt76u_fill_rx_sg(dev, buf, nsgs, len, sglen);
+}
+EXPORT_SYMBOL_GPL(mt76u_buf_alloc);
+
+void mt76u_buf_free(struct mt76u_buf *buf)
+{
+	struct urb *urb = buf->urb;
+	int i;
+
+	for (i = 0; i < urb->num_sgs; i++)
+		skb_free_frag(sg_virt(&urb->sg[i]));
+	usb_free_urb(buf->urb);
+}
+EXPORT_SYMBOL_GPL(mt76u_buf_free);
+
+int mt76u_submit_buf(struct mt76_dev *dev, int dir, int index,
+		     struct mt76u_buf *buf, gfp_t gfp,
+		     usb_complete_t complete_fn, void *context)
+{
+	struct usb_interface *intf = to_usb_interface(dev->dev);
+	struct usb_device *udev = interface_to_usbdev(intf);
+	unsigned int pipe;
+
+	if (dir == USB_DIR_IN)
+		pipe = usb_rcvbulkpipe(udev, dev->usb.in_ep[index]);
+	else
+		pipe = usb_sndbulkpipe(udev, dev->usb.out_ep[index]);
+
+	usb_fill_bulk_urb(buf->urb, udev, pipe, NULL, buf->len,
+			  complete_fn, context);
+
+	return usb_submit_urb(buf->urb, gfp);
+}
+EXPORT_SYMBOL_GPL(mt76u_submit_buf);
+
+static inline struct mt76u_buf
+*mt76u_get_next_rx_entry(struct mt76_queue *q)
+{
+	struct mt76u_buf *buf = NULL;
+	unsigned long flags;
+
+	spin_lock_irqsave(&q->lock, flags);
+	if (q->queued > 0) {
+		buf = &q->entry[q->head].ubuf;
+		q->head = (q->head + 1) % q->ndesc;
+		q->queued--;
+	}
+	spin_unlock_irqrestore(&q->lock, flags);
+
+	return buf;
+}
+
+static int mt76u_get_rx_entry_len(u8 *data, u32 data_len)
+{
+	u16 dma_len, min_len;
+
+	dma_len = get_unaligned_le16(data);
+	min_len = MT_DMA_HDR_LEN + MT_RX_RXWI_LEN +
+		  MT_FCE_INFO_LEN;
+
+	if (data_len < min_len || WARN_ON(!dma_len) ||
+	    WARN_ON(dma_len + MT_DMA_HDR_LEN > data_len) ||
+	    WARN_ON(dma_len & 0x3))
+		return -EINVAL;
+	return dma_len;
+}
+
+static int
+mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb)
+{
+	struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
+	u8 *data = sg_virt(&urb->sg[0]);
+	int data_len, len, nsgs = 1;
+	struct sk_buff *skb;
+
+	if (!test_bit(MT76_STATE_INITIALIZED, &dev->state))
+		return 0;
+
+	len = mt76u_get_rx_entry_len(data, urb->actual_length);
+	if (len < 0)
+		return 0;
+
+	skb = build_skb(data, q->buf_size);
+	if (!skb)
+		return 0;
+
+	data_len = min_t(int, len, urb->sg[0].length - MT_DMA_HDR_LEN);
+	skb_reserve(skb, MT_DMA_HDR_LEN);
+	if (skb->tail + data_len > skb->end) {
+		dev_kfree_skb(skb);
+		return 1;
+	}
+
+	__skb_put(skb, data_len);
+	len -= data_len;
+
+	while (len > 0) {
+		data_len = min_t(int, len, urb->sg[nsgs].length);
+		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+				sg_page(&urb->sg[nsgs]),
+				urb->sg[nsgs].offset,
+				data_len, q->buf_size);
+		len -= data_len;
+		nsgs++;
+	}
+	dev->drv->rx_skb(dev, MT_RXQ_MAIN, skb);
+
+	return nsgs;
+}
+
+static void mt76u_complete_rx(struct urb *urb)
+{
+	struct mt76_dev *dev = urb->context;
+	struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
+	unsigned long flags;
+
+	switch (urb->status) {
+	case -ECONNRESET:
+	case -ESHUTDOWN:
+	case -ENOENT:
+		return;
+	default:
+		dev_err(dev->dev, "rx urb failed: %d\n", urb->status);
+		/* fall through */
+	case 0:
+		break;
+	}
+
+	spin_lock_irqsave(&q->lock, flags);
+	if (WARN_ONCE(q->entry[q->tail].ubuf.urb != urb, "rx urb mismatch"))
+		goto out;
+
+	q->tail = (q->tail + 1) % q->ndesc;
+	q->queued++;
+	tasklet_schedule(&dev->usb.rx_tasklet);
+out:
+	spin_unlock_irqrestore(&q->lock, flags);
+}
+
+static void mt76u_rx_tasklet(unsigned long data)
+{
+	struct mt76_dev *dev = (struct mt76_dev *)data;
+	struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
+	int err, nsgs, buf_len = q->buf_size;
+	struct mt76u_buf *buf;
+
+	rcu_read_lock();
+
+	while (true) {
+		buf = mt76u_get_next_rx_entry(q);
+		if (!buf)
+			break;
+
+		nsgs = mt76u_process_rx_entry(dev, buf->urb);
+		if (nsgs > 0) {
+			err = mt76u_fill_rx_sg(dev, buf, nsgs,
+					       buf_len,
+					       SKB_WITH_OVERHEAD(buf_len));
+			if (err < 0)
+				break;
+		}
+		mt76u_submit_buf(dev, USB_DIR_IN, MT_EP_IN_PKT_RX,
+				 buf, GFP_ATOMIC,
+				 mt76u_complete_rx, dev);
+	}
+	mt76_rx_poll_complete(dev, MT_RXQ_MAIN, NULL);
+
+	rcu_read_unlock();
+}
+
+int mt76u_submit_rx_buffers(struct mt76_dev *dev)
+{
+	struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
+	unsigned long flags;
+	int i, err = 0;
+
+	spin_lock_irqsave(&q->lock, flags);
+	for (i = 0; i < q->ndesc; i++) {
+		err = mt76u_submit_buf(dev, USB_DIR_IN, MT_EP_IN_PKT_RX,
+				       &q->entry[i].ubuf, GFP_ATOMIC,
+				       mt76u_complete_rx, dev);
+		if (err < 0)
+			break;
+	}
+	q->head = q->tail = 0;
+	q->queued = 0;
+	spin_unlock_irqrestore(&q->lock, flags);
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(mt76u_submit_rx_buffers);
+
+static int mt76u_alloc_rx(struct mt76_dev *dev)
+{
+	struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
+	int i, err, nsgs;
+
+	spin_lock_init(&q->lock);
+	q->entry = devm_kzalloc(dev->dev,
+				MT_NUM_RX_ENTRIES * sizeof(*q->entry),
+				GFP_KERNEL);
+	if (!q->entry)
+		return -ENOMEM;
+
+	if (mt76u_check_sg(dev)) {
+		q->buf_size = MT_RX_BUF_SIZE;
+		nsgs = MT_SG_MAX_SIZE;
+	} else {
+		q->buf_size = PAGE_SIZE;
+		nsgs = 1;
+	}
+
+	for (i = 0; i < MT_NUM_RX_ENTRIES; i++) {
+		err = mt76u_buf_alloc(dev, &q->entry[i].ubuf,
+				      nsgs, q->buf_size,
+				      SKB_WITH_OVERHEAD(q->buf_size),
+				      GFP_KERNEL);
+		if (err < 0)
+			return err;
+	}
+	q->ndesc = MT_NUM_RX_ENTRIES;
+
+	return mt76u_submit_rx_buffers(dev);
+}
+
+static void mt76u_free_rx(struct mt76_dev *dev)
+{
+	struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
+	int i;
+
+	for (i = 0; i < q->ndesc; i++)
+		mt76u_buf_free(&q->entry[i].ubuf);
+}
+
+static void mt76u_stop_rx(struct mt76_dev *dev)
+{
+	struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
+	int i;
+
+	for (i = 0; i < q->ndesc; i++)
+		usb_kill_urb(q->entry[i].ubuf.urb);
+}
+
+int mt76u_skb_dma_info(struct sk_buff *skb, int port, u32 flags)
+{
+	struct sk_buff *iter, *last = skb;
+	u32 info, pad;
+
+	/* Buffer layout:
+	 *	|   4B   | xfer len |      pad       |  4B  |
+	 *	| TXINFO | pkt/cmd  | zero pad to 4B | zero |
+	 *
+	 * length field of TXINFO should be set to 'xfer len'.
+	 */
+	info = FIELD_PREP(MT_TXD_INFO_LEN, round_up(skb->len, 4)) |
+	       FIELD_PREP(MT_TXD_INFO_DPORT, port) | flags;
+	put_unaligned_le32(info, skb_push(skb, sizeof(info)));
+
+	pad = round_up(skb->len, 4) + 4 - skb->len;
+	skb_walk_frags(skb, iter) {
+		last = iter;
+		if (!iter->next) {
+			skb->data_len += pad;
+			skb->len += pad;
+			break;
+		}
+	}
+
+	if (unlikely(pad)) {
+		if (__skb_pad(last, pad, true))
+			return -ENOMEM;
+		__skb_put(last, pad);
+	}
+	return 0;
+}
+EXPORT_SYMBOL_GPL(mt76u_skb_dma_info);
+
+static void mt76u_tx_tasklet(unsigned long data)
+{
+	struct mt76_dev *dev = (struct mt76_dev *)data;
+	struct mt76u_buf *buf;
+	struct mt76_queue *q;
+	bool wake;
+	int i;
+
+	for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+		q = &dev->q_tx[i];
+
+		spin_lock_bh(&q->lock);
+		while (true) {
+			buf = &q->entry[q->head].ubuf;
+			if (!buf->done || !q->queued)
+				break;
+
+			dev->drv->tx_complete_skb(dev, q,
+						  &q->entry[q->head],
+						  false);
+
+			if (q->entry[q->head].schedule) {
+				q->entry[q->head].schedule = false;
+				q->swq_queued--;
+			}
+
+			q->head = (q->head + 1) % q->ndesc;
+			q->queued--;
+		}
+		mt76_txq_schedule(dev, q);
+		wake = i < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8;
+		if (!q->queued)
+			wake_up(&dev->tx_wait);
+
+		spin_unlock_bh(&q->lock);
+
+		if (!test_and_set_bit(MT76_READING_STATS, &dev->state))
+			ieee80211_queue_delayed_work(dev->hw,
+						     &dev->usb.stat_work,
+						     msecs_to_jiffies(10));
+
+		if (wake)
+			ieee80211_wake_queue(dev->hw, i);
+	}
+}
+
+static void mt76u_tx_status_data(struct work_struct *work)
+{
+	struct mt76_usb *usb;
+	struct mt76_dev *dev;
+	u8 update = 1;
+	u16 count = 0;
+
+	usb = container_of(work, struct mt76_usb, stat_work.work);
+	dev = container_of(usb, struct mt76_dev, usb);
+
+	while (true) {
+		if (test_bit(MT76_REMOVED, &dev->state))
+			break;
+
+		if (!dev->drv->tx_status_data(dev, &update))
+			break;
+		count++;
+	}
+
+	if (count && test_bit(MT76_STATE_RUNNING, &dev->state))
+		ieee80211_queue_delayed_work(dev->hw, &usb->stat_work,
+					     msecs_to_jiffies(10));
+	else
+		clear_bit(MT76_READING_STATS, &dev->state);
+}
+
+static void mt76u_complete_tx(struct urb *urb)
+{
+	struct mt76u_buf *buf = urb->context;
+	struct mt76_dev *dev = buf->dev;
+
+	if (mt76u_urb_error(urb))
+		dev_err(dev->dev, "tx urb failed: %d\n", urb->status);
+	buf->done = true;
+
+	tasklet_schedule(&dev->usb.tx_tasklet);
+}
+
+static int
+mt76u_tx_build_sg(struct sk_buff *skb, struct urb *urb)
+{
+	int nsgs = 1 + skb_shinfo(skb)->nr_frags;
+	struct sk_buff *iter;
+
+	skb_walk_frags(skb, iter)
+		nsgs += 1 + skb_shinfo(iter)->nr_frags;
+
+	memset(urb->sg, 0, sizeof(*urb->sg) * MT_SG_MAX_SIZE);
+
+	nsgs = min_t(int, MT_SG_MAX_SIZE, nsgs);
+	sg_init_marker(urb->sg, nsgs);
+	urb->num_sgs = nsgs;
+
+	return skb_to_sgvec_nomark(skb, urb->sg, 0, skb->len);
+}
+
+static int
+mt76u_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
+		   struct sk_buff *skb, struct mt76_wcid *wcid,
+		   struct ieee80211_sta *sta)
+{
+	struct usb_interface *intf = to_usb_interface(dev->dev);
+	struct usb_device *udev = interface_to_usbdev(intf);
+	u8 ep = q2ep(q->hw_idx);
+	struct mt76u_buf *buf;
+	u16 idx = q->tail;
+	unsigned int pipe;
+	int err;
+
+	if (q->queued == q->ndesc)
+		return -ENOSPC;
+
+	err = dev->drv->tx_prepare_skb(dev, NULL, skb, q, wcid, sta, NULL);
+	if (err < 0)
+		return err;
+
+	buf = &q->entry[idx].ubuf;
+	buf->done = false;
+
+	err = mt76u_tx_build_sg(skb, buf->urb);
+	if (err < 0)
+		return err;
+
+	pipe = usb_sndbulkpipe(udev, dev->usb.out_ep[ep]);
+	usb_fill_bulk_urb(buf->urb, udev, pipe, NULL, skb->len,
+			  mt76u_complete_tx, buf);
+
+	q->tail = (q->tail + 1) % q->ndesc;
+	q->entry[idx].skb = skb;
+	q->queued++;
+
+	return idx;
+}
+
+static void mt76u_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
+{
+	struct mt76u_buf *buf;
+	int err;
+
+	while (q->first != q->tail) {
+		buf = &q->entry[q->first].ubuf;
+		err = usb_submit_urb(buf->urb, GFP_ATOMIC);
+		if (err < 0) {
+			if (err == -ENODEV)
+				set_bit(MT76_REMOVED, &dev->state);
+			else
+				dev_err(dev->dev, "tx urb submit failed:%d\n",
+					err);
+			break;
+		}
+		q->first = (q->first + 1) % q->ndesc;
+	}
+}
+
+static int mt76u_alloc_tx(struct mt76_dev *dev)
+{
+	struct mt76u_buf *buf;
+	struct mt76_queue *q;
+	size_t size;
+	int i, j;
+
+	size = MT_SG_MAX_SIZE * sizeof(struct scatterlist);
+	for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+		q = &dev->q_tx[i];
+		spin_lock_init(&q->lock);
+		INIT_LIST_HEAD(&q->swq);
+		q->hw_idx = q2hwq(i);
+
+		q->entry = devm_kzalloc(dev->dev,
+					MT_NUM_TX_ENTRIES * sizeof(*q->entry),
+					GFP_KERNEL);
+		if (!q->entry)
+			return -ENOMEM;
+
+		q->ndesc = MT_NUM_TX_ENTRIES;
+		for (j = 0; j < q->ndesc; j++) {
+			buf = &q->entry[j].ubuf;
+			buf->dev = dev;
+
+			buf->urb = usb_alloc_urb(0, GFP_KERNEL);
+			if (!buf->urb)
+				return -ENOMEM;
+
+			buf->urb->sg = devm_kzalloc(dev->dev, size, GFP_KERNEL);
+			if (!buf->urb->sg)
+				return -ENOMEM;
+		}
+	}
+	return 0;
+}
+
+static void mt76u_free_tx(struct mt76_dev *dev)
+{
+	struct mt76_queue *q;
+	int i, j;
+
+	for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+		q = &dev->q_tx[i];
+		for (j = 0; j < q->ndesc; j++)
+			usb_free_urb(q->entry[j].ubuf.urb);
+	}
+}
+
+static void mt76u_stop_tx(struct mt76_dev *dev)
+{
+	struct mt76_queue *q;
+	int i, j;
+
+	for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+		q = &dev->q_tx[i];
+		for (j = 0; j < q->ndesc; j++)
+			usb_kill_urb(q->entry[j].ubuf.urb);
+	}
+}
+
+void mt76u_stop_queues(struct mt76_dev *dev)
+{
+	tasklet_disable(&dev->usb.rx_tasklet);
+	tasklet_disable(&dev->usb.tx_tasklet);
+
+	mt76u_stop_rx(dev);
+	mt76u_stop_tx(dev);
+}
+EXPORT_SYMBOL_GPL(mt76u_stop_queues);
+
+void mt76u_stop_stat_wk(struct mt76_dev *dev)
+{
+	cancel_delayed_work_sync(&dev->usb.stat_work);
+	clear_bit(MT76_READING_STATS, &dev->state);
+}
+EXPORT_SYMBOL_GPL(mt76u_stop_stat_wk);
+
+void mt76u_queues_deinit(struct mt76_dev *dev)
+{
+	mt76u_stop_queues(dev);
+
+	mt76u_free_rx(dev);
+	mt76u_free_tx(dev);
+}
+EXPORT_SYMBOL_GPL(mt76u_queues_deinit);
+
+int mt76u_alloc_queues(struct mt76_dev *dev)
+{
+	int err;
+
+	err = mt76u_alloc_rx(dev);
+	if (err < 0)
+		goto err;
+
+	err = mt76u_alloc_tx(dev);
+	if (err < 0)
+		goto err;
+
+	return 0;
+err:
+	mt76u_queues_deinit(dev);
+	return err;
+}
+EXPORT_SYMBOL_GPL(mt76u_alloc_queues);
+
+static const struct mt76_queue_ops usb_queue_ops = {
+	.tx_queue_skb = mt76u_tx_queue_skb,
+	.kick = mt76u_tx_kick,
+};
+
+int mt76u_init(struct mt76_dev *dev,
+	       struct usb_interface *intf)
+{
+	static const struct mt76_bus_ops mt76u_ops = {
+		.rr = mt76u_rr,
+		.wr = mt76u_wr,
+		.rmw = mt76u_rmw,
+		.copy = mt76u_copy,
+	};
+	struct mt76_usb *usb = &dev->usb;
+
+	tasklet_init(&usb->rx_tasklet, mt76u_rx_tasklet, (unsigned long)dev);
+	tasklet_init(&usb->tx_tasklet, mt76u_tx_tasklet, (unsigned long)dev);
+	INIT_DELAYED_WORK(&usb->stat_work, mt76u_tx_status_data);
+	skb_queue_head_init(&dev->rx_skb[MT_RXQ_MAIN]);
+
+	init_completion(&usb->mcu.cmpl);
+	mutex_init(&usb->mcu.mutex);
+
+	mutex_init(&usb->usb_ctrl_mtx);
+	dev->bus = &mt76u_ops;
+	dev->queue_ops = &usb_queue_ops;
+
+	return mt76u_set_endpoints(intf, usb);
+}
+EXPORT_SYMBOL_GPL(mt76u_init);
+
+MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/usb_mcu.c b/drivers/net/wireless/mediatek/mt76/usb_mcu.c
new file mode 100644
index 0000000000000000000000000000000000000000..070be803d4637aee4b538467c359cc5d0857f0c6
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/usb_mcu.c
@@ -0,0 +1,242 @@
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/firmware.h>
+
+#include "mt76.h"
+#include "dma.h"
+
+#define MT_CMD_HDR_LEN			4
+
+#define MT_FCE_DMA_ADDR			0x0230
+#define MT_FCE_DMA_LEN			0x0234
+
+#define MT_TX_CPU_FROM_FCE_CPU_DESC_IDX	0x09a8
+
+struct sk_buff *mt76u_mcu_msg_alloc(const void *data, int len)
+{
+	struct sk_buff *skb;
+
+	skb = alloc_skb(MT_CMD_HDR_LEN + len + 8, GFP_KERNEL);
+	if (!skb)
+		return NULL;
+
+	skb_reserve(skb, MT_CMD_HDR_LEN);
+	skb_put_data(skb, data, len);
+
+	return skb;
+}
+EXPORT_SYMBOL_GPL(mt76u_mcu_msg_alloc);
+
+void mt76u_mcu_complete_urb(struct urb *urb)
+{
+	struct completion *cmpl = urb->context;
+
+	complete(cmpl);
+}
+EXPORT_SYMBOL_GPL(mt76u_mcu_complete_urb);
+
+static int mt76u_mcu_wait_resp(struct mt76_dev *dev, u8 seq)
+{
+	struct mt76_usb *usb = &dev->usb;
+	struct mt76u_buf *buf = &usb->mcu.res;
+	int i, ret;
+	u32 rxfce;
+
+	for (i = 0; i < 5; i++) {
+		if (!wait_for_completion_timeout(&usb->mcu.cmpl,
+						 msecs_to_jiffies(300)))
+			continue;
+
+		if (buf->urb->status)
+			return -EIO;
+
+		rxfce = get_unaligned_le32(sg_virt(&buf->urb->sg[0]));
+		ret = mt76u_submit_buf(dev, USB_DIR_IN,
+				       MT_EP_IN_CMD_RESP,
+				       buf, GFP_KERNEL,
+				       mt76u_mcu_complete_urb,
+				       &usb->mcu.cmpl);
+		if (ret)
+			return ret;
+
+		if (seq == FIELD_GET(MT_RX_FCE_INFO_CMD_SEQ, rxfce))
+			return 0;
+
+		dev_err(dev->dev, "error: MCU resp evt:%lx seq:%hhx-%lx\n",
+			FIELD_GET(MT_RX_FCE_INFO_EVT_TYPE, rxfce),
+			seq, FIELD_GET(MT_RX_FCE_INFO_CMD_SEQ, rxfce));
+	}
+
+	dev_err(dev->dev, "error: %s timed out\n", __func__);
+	return -ETIMEDOUT;
+}
+
+int mt76u_mcu_send_msg(struct mt76_dev *dev, struct sk_buff *skb,
+		       int cmd, bool wait_resp)
+{
+	struct usb_interface *intf = to_usb_interface(dev->dev);
+	struct usb_device *udev = interface_to_usbdev(intf);
+	struct mt76_usb *usb = &dev->usb;
+	unsigned int pipe;
+	int ret, sent;
+	u8 seq = 0;
+	u32 info;
+
+	if (test_bit(MT76_REMOVED, &dev->state))
+		return 0;
+
+	mutex_lock(&usb->mcu.mutex);
+
+	pipe = usb_sndbulkpipe(udev, usb->out_ep[MT_EP_OUT_INBAND_CMD]);
+	if (wait_resp) {
+		seq = ++usb->mcu.msg_seq & 0xf;
+		if (!seq)
+			seq = ++usb->mcu.msg_seq & 0xf;
+	}
+
+	info = FIELD_PREP(MT_MCU_MSG_CMD_SEQ, seq) |
+	       FIELD_PREP(MT_MCU_MSG_CMD_TYPE, cmd) |
+	       MT_MCU_MSG_TYPE_CMD;
+	ret = mt76u_skb_dma_info(skb, CPU_TX_PORT, info);
+	if (ret)
+		goto out;
+
+	ret = usb_bulk_msg(udev, pipe, skb->data, skb->len, &sent, 500);
+	if (ret)
+		goto out;
+
+	if (wait_resp)
+		ret = mt76u_mcu_wait_resp(dev, seq);
+
+out:
+	mutex_unlock(&usb->mcu.mutex);
+
+	consume_skb(skb);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(mt76u_mcu_send_msg);
+
+void mt76u_mcu_fw_reset(struct mt76_dev *dev)
+{
+	mt76u_vendor_request(dev, MT_VEND_DEV_MODE,
+			     USB_DIR_OUT | USB_TYPE_VENDOR,
+			     0x1, 0, NULL, 0);
+}
+EXPORT_SYMBOL_GPL(mt76u_mcu_fw_reset);
+
+static int
+__mt76u_mcu_fw_send_data(struct mt76_dev *dev, struct mt76u_buf *buf,
+			 const void *fw_data, int len, u32 dst_addr)
+{
+	u8 *data = sg_virt(&buf->urb->sg[0]);
+	DECLARE_COMPLETION_ONSTACK(cmpl);
+	__le32 info;
+	u32 val;
+	int err;
+
+	info = cpu_to_le32(FIELD_PREP(MT_MCU_MSG_PORT, CPU_TX_PORT) |
+			   FIELD_PREP(MT_MCU_MSG_LEN, len) |
+			   MT_MCU_MSG_TYPE_CMD);
+
+	memcpy(data, &info, sizeof(info));
+	memcpy(data + sizeof(info), fw_data, len);
+	memset(data + sizeof(info) + len, 0, 4);
+
+	mt76u_single_wr(dev, MT_VEND_WRITE_FCE,
+			MT_FCE_DMA_ADDR, dst_addr);
+	len = roundup(len, 4);
+	mt76u_single_wr(dev, MT_VEND_WRITE_FCE,
+			MT_FCE_DMA_LEN, len << 16);
+
+	buf->len = MT_CMD_HDR_LEN + len + sizeof(info);
+	err = mt76u_submit_buf(dev, USB_DIR_OUT,
+			       MT_EP_OUT_INBAND_CMD,
+			       buf, GFP_KERNEL,
+			       mt76u_mcu_complete_urb, &cmpl);
+	if (err < 0)
+		return err;
+
+	if (!wait_for_completion_timeout(&cmpl,
+					 msecs_to_jiffies(1000))) {
+		dev_err(dev->dev, "firmware upload timed out\n");
+		usb_kill_urb(buf->urb);
+		return -ETIMEDOUT;
+	}
+
+	if (mt76u_urb_error(buf->urb)) {
+		dev_err(dev->dev, "firmware upload failed: %d\n",
+			buf->urb->status);
+		return buf->urb->status;
+	}
+
+	val = mt76u_rr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX);
+	val++;
+	mt76u_wr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX, val);
+
+	return 0;
+}
+
+int mt76u_mcu_fw_send_data(struct mt76_dev *dev, const void *data,
+			   int data_len, u32 max_payload, u32 offset)
+{
+	int err, len, pos = 0, max_len = max_payload - 8;
+	struct mt76u_buf buf;
+
+	err = mt76u_buf_alloc(dev, &buf, 1, max_payload, max_payload,
+			      GFP_KERNEL);
+	if (err < 0)
+		return err;
+
+	while (data_len > 0) {
+		len = min_t(int, data_len, max_len);
+		err = __mt76u_mcu_fw_send_data(dev, &buf, data + pos,
+					       len, offset + pos);
+		if (err < 0)
+			break;
+
+		data_len -= len;
+		pos += len;
+		usleep_range(5000, 10000);
+	}
+	mt76u_buf_free(&buf);
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(mt76u_mcu_fw_send_data);
+
+int mt76u_mcu_init_rx(struct mt76_dev *dev)
+{
+	struct mt76_usb *usb = &dev->usb;
+	int err;
+
+	err = mt76u_buf_alloc(dev, &usb->mcu.res, 1,
+			      MCU_RESP_URB_SIZE, MCU_RESP_URB_SIZE,
+			      GFP_KERNEL);
+	if (err < 0)
+		return err;
+
+	err = mt76u_submit_buf(dev, USB_DIR_IN, MT_EP_IN_CMD_RESP,
+			       &usb->mcu.res, GFP_KERNEL,
+			       mt76u_mcu_complete_urb,
+			       &usb->mcu.cmpl);
+	if (err < 0)
+		mt76u_buf_free(&usb->mcu.res);
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(mt76u_mcu_init_rx);
diff --git a/drivers/net/wireless/mediatek/mt76/usb_trace.c b/drivers/net/wireless/mediatek/mt76/usb_trace.c
new file mode 100644
index 0000000000000000000000000000000000000000..7e1f540f0b7acbc40473b4ab3fe9672f8c0b56a7
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/usb_trace.c
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/module.h>
+
+#ifndef __CHECKER__
+#define CREATE_TRACE_POINTS
+#include "usb_trace.h"
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/usb_trace.h b/drivers/net/wireless/mediatek/mt76/usb_trace.h
new file mode 100644
index 0000000000000000000000000000000000000000..52db7012304ab86e73e6937734cb47c02c224330
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/usb_trace.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#if !defined(__MT76_USB_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __MT76_USB_TRACE_H
+
+#include <linux/tracepoint.h>
+#include "mt76.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mt76_usb
+
+#define MAXNAME		32
+#define DEV_ENTRY   __array(char, wiphy_name, 32)
+#define DEV_ASSIGN  strlcpy(__entry->wiphy_name, wiphy_name(dev->hw->wiphy), MAXNAME)
+#define DEV_PR_FMT  "%s"
+#define DEV_PR_ARG  __entry->wiphy_name
+
+#define REG_ENTRY	__field(u32, reg) __field(u32, val)
+#define REG_ASSIGN	__entry->reg = reg; __entry->val = val
+#define REG_PR_FMT	" %04x=%08x"
+#define REG_PR_ARG	__entry->reg, __entry->val
+
+DECLARE_EVENT_CLASS(dev_reg_evt,
+	TP_PROTO(struct mt76_dev *dev, u32 reg, u32 val),
+	TP_ARGS(dev, reg, val),
+	TP_STRUCT__entry(
+		DEV_ENTRY
+		REG_ENTRY
+	),
+	TP_fast_assign(
+		DEV_ASSIGN;
+		REG_ASSIGN;
+	),
+	TP_printk(
+		DEV_PR_FMT REG_PR_FMT,
+		DEV_PR_ARG, REG_PR_ARG
+	)
+);
+
+DEFINE_EVENT(dev_reg_evt, usb_reg_rr,
+	TP_PROTO(struct mt76_dev *dev, u32 reg, u32 val),
+	TP_ARGS(dev, reg, val)
+);
+
+DEFINE_EVENT(dev_reg_evt, usb_reg_wr,
+	TP_PROTO(struct mt76_dev *dev, u32 reg, u32 val),
+	TP_ARGS(dev, reg, val)
+);
+
+#endif
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE usb_trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/mediatek/mt7601u/init.c b/drivers/net/wireless/mediatek/mt7601u/init.c
index d3b611aaf06180d29252ffd72bb11f67467803eb..faea99b7a44518a38ba42b4a1e4cf1e081f8176e 100644
--- a/drivers/net/wireless/mediatek/mt7601u/init.c
+++ b/drivers/net/wireless/mediatek/mt7601u/init.c
@@ -603,6 +603,7 @@ int mt7601u_register_device(struct mt7601u_dev *dev)
 	ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES);
 	ieee80211_hw_set(hw, AMPDU_AGGREGATION);
 	ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
+	ieee80211_hw_set(hw, MFP_CAPABLE);
 	hw->max_rates = 1;
 	hw->max_report_rates = 7;
 	hw->max_rate_tries = 1;
diff --git a/drivers/net/wireless/mediatek/mt7601u/main.c b/drivers/net/wireless/mediatek/mt7601u/main.c
index 7b21016012c333c2ac938318282d338cd39869b3..0f1789020960fbaf03d994747782e45317672317 100644
--- a/drivers/net/wireless/mediatek/mt7601u/main.c
+++ b/drivers/net/wireless/mediatek/mt7601u/main.c
@@ -308,6 +308,17 @@ mt7601u_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
 	int idx = key->keyidx;
 	int ret;
 
+	/* fall back to sw encryption for unsupported ciphers */
+	switch (key->cipher) {
+	case WLAN_CIPHER_SUITE_WEP40:
+	case WLAN_CIPHER_SUITE_WEP104:
+	case WLAN_CIPHER_SUITE_TKIP:
+	case WLAN_CIPHER_SUITE_CCMP:
+		break;
+	default:
+		return -EOPNOTSUPP;
+	}
+
 	if (cmd == SET_KEY) {
 		key->hw_key_idx = wcid->idx;
 		wcid->hw_key_idx = idx;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
index 656ddc65921854b5f78769417c471f57eaa6c961..4aa332f4646b1b79396b55ca6c3c2d2b552debdf 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
@@ -843,6 +843,88 @@ static int qtnf_set_mac_acl(struct wiphy *wiphy,
 	return ret;
 }
 
+static int qtnf_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
+			       bool enabled, int timeout)
+{
+	struct qtnf_vif *vif = qtnf_netdev_get_priv(dev);
+	int ret;
+
+	ret = qtnf_cmd_send_pm_set(vif, enabled ? QLINK_PM_AUTO_STANDBY :
+				   QLINK_PM_OFF, timeout);
+	if (ret) {
+		pr_err("%s: failed to set PM mode ret=%d\n", dev->name, ret);
+		return ret;
+	}
+
+	return ret;
+}
+
+#ifdef CONFIG_PM
+static int qtnf_suspend(struct wiphy *wiphy, struct cfg80211_wowlan *wowlan)
+{
+	struct qtnf_wmac *mac = wiphy_priv(wiphy);
+	struct qtnf_vif *vif;
+	int ret = 0;
+
+	vif = qtnf_mac_get_base_vif(mac);
+	if (!vif) {
+		pr_err("MAC%u: primary VIF is not configured\n", mac->macid);
+		ret = -EFAULT;
+		goto exit;
+	}
+
+	if (!wowlan) {
+		pr_debug("WoWLAN triggers are not enabled\n");
+		qtnf_virtual_intf_cleanup(vif->netdev);
+		goto exit;
+	}
+
+	qtnf_scan_done(vif->mac, true);
+
+	ret = qtnf_cmd_send_wowlan_set(vif, wowlan);
+	if (ret) {
+		pr_err("MAC%u: failed to set WoWLAN triggers\n",
+		       mac->macid);
+		goto exit;
+	}
+
+exit:
+	return ret;
+}
+
+static int qtnf_resume(struct wiphy *wiphy)
+{
+	struct qtnf_wmac *mac = wiphy_priv(wiphy);
+	struct qtnf_vif *vif;
+	int ret = 0;
+
+	vif = qtnf_mac_get_base_vif(mac);
+	if (!vif) {
+		pr_err("MAC%u: primary VIF is not configured\n", mac->macid);
+		ret = -EFAULT;
+		goto exit;
+	}
+
+	ret = qtnf_cmd_send_wowlan_set(vif, NULL);
+	if (ret) {
+		pr_err("MAC%u: failed to reset WoWLAN triggers\n",
+		       mac->macid);
+		goto exit;
+	}
+
+exit:
+	return ret;
+}
+
+static void qtnf_set_wakeup(struct wiphy *wiphy, bool enabled)
+{
+	struct qtnf_wmac *mac = wiphy_priv(wiphy);
+	struct qtnf_bus *bus = mac->bus;
+
+	device_set_wakeup_enable(bus->dev, enabled);
+}
+#endif
+
 static struct cfg80211_ops qtn_cfg80211_ops = {
 	.add_virtual_intf	= qtnf_add_virtual_intf,
 	.change_virtual_intf	= qtnf_change_virtual_intf,
@@ -869,6 +951,12 @@ static struct cfg80211_ops qtn_cfg80211_ops = {
 	.channel_switch		= qtnf_channel_switch,
 	.start_radar_detection	= qtnf_start_radar_detection,
 	.set_mac_acl		= qtnf_set_mac_acl,
+	.set_power_mgmt		= qtnf_set_power_mgmt,
+#ifdef CONFIG_PM
+	.suspend		= qtnf_suspend,
+	.resume			= qtnf_resume,
+	.set_wakeup		= qtnf_set_wakeup,
+#endif
 };
 
 static void qtnf_cfg80211_reg_notifier(struct wiphy *wiphy_in,
@@ -921,6 +1009,9 @@ struct wiphy *qtnf_wiphy_allocate(struct qtnf_bus *bus)
 	if (bus->hw_info.hw_capab & QLINK_HW_CAPAB_DFS_OFFLOAD)
 		qtn_cfg80211_ops.start_radar_detection = NULL;
 
+	if (!(bus->hw_info.hw_capab & QLINK_HW_CAPAB_PWR_MGMT))
+		qtn_cfg80211_ops.set_power_mgmt	= NULL;
+
 	wiphy = wiphy_new(&qtn_cfg80211_ops, sizeof(struct qtnf_wmac));
 	if (!wiphy)
 		return NULL;
@@ -975,7 +1066,8 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac)
 	wiphy->retry_long = macinfo->lretry_limit;
 	wiphy->coverage_class = macinfo->coverage_class;
 
-	wiphy->max_scan_ssids = QTNF_MAX_SSID_LIST_LENGTH;
+	wiphy->max_scan_ssids =
+		(hw_info->max_scan_ssids) ? hw_info->max_scan_ssids : 1;
 	wiphy->max_scan_ie_len = QTNF_MAX_VSIE_LEN;
 	wiphy->mgmt_stypes = qtnf_mgmt_stypes;
 	wiphy->max_remain_on_channel_duration = 5000;
@@ -994,6 +1086,7 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac)
 			WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD |
 			WIPHY_FLAG_AP_UAPSD |
 			WIPHY_FLAG_HAS_CHANNEL_SWITCH;
+	wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
 
 	if (hw_info->hw_capab & QLINK_HW_CAPAB_DFS_OFFLOAD)
 		wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_DFS_OFFLOAD);
@@ -1016,6 +1109,11 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac)
 	if (hw_info->hw_capab & QLINK_HW_CAPAB_SCAN_RANDOM_MAC_ADDR)
 		wiphy->features |= NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
 
+#ifdef CONFIG_PM
+	if (macinfo->wowlan)
+		wiphy->wowlan = macinfo->wowlan;
+#endif
+
 	if (hw_info->hw_capab & QLINK_HW_CAPAB_REG_UPDATE) {
 		wiphy->regulatory_flags |= REGULATORY_STRICT_REG |
 			REGULATORY_CUSTOM_REG;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.c b/drivers/net/wireless/quantenna/qtnfmac/commands.c
index 42a598f92539a20793e197d4ac1923e072f95d09..ae9e773005339b5ef22f23d5593297ffcbe04a7a 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/commands.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/commands.c
@@ -1092,6 +1092,9 @@ qtnf_cmd_resp_proc_hw_info(struct qtnf_bus *bus,
 		case QTN_TLV_ID_UBOOT_VER:
 			uboot_ver = (const void *)tlv->val;
 			break;
+		case QTN_TLV_ID_MAX_SCAN_SSIDS:
+			hwinfo->max_scan_ssids = *tlv->val;
+			break;
 		default:
 			break;
 		}
@@ -1135,6 +1138,37 @@ qtnf_cmd_resp_proc_hw_info(struct qtnf_bus *bus,
 	return 0;
 }
 
+static void
+qtnf_parse_wowlan_info(struct qtnf_wmac *mac,
+		       const struct qlink_wowlan_capab_data *wowlan)
+{
+	struct qtnf_mac_info *mac_info = &mac->macinfo;
+	const struct qlink_wowlan_support *data1;
+	struct wiphy_wowlan_support *supp;
+
+	supp = kzalloc(sizeof(*supp), GFP_KERNEL);
+	if (!supp)
+		return;
+
+	switch (le16_to_cpu(wowlan->version)) {
+	case 0x1:
+		data1 = (struct qlink_wowlan_support *)wowlan->data;
+
+		supp->flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT;
+		supp->n_patterns = le32_to_cpu(data1->n_patterns);
+		supp->pattern_max_len = le32_to_cpu(data1->pattern_max_len);
+		supp->pattern_min_len = le32_to_cpu(data1->pattern_min_len);
+
+		mac_info->wowlan = supp;
+		break;
+	default:
+		pr_warn("MAC%u: unsupported WoWLAN version 0x%x\n",
+			mac->macid, le16_to_cpu(wowlan->version));
+		kfree(supp);
+		break;
+	}
+}
+
 static int qtnf_parse_variable_mac_info(struct qtnf_wmac *mac,
 					const u8 *tlv_buf, size_t tlv_buf_size)
 {
@@ -1144,6 +1178,7 @@ static int qtnf_parse_variable_mac_info(struct qtnf_wmac *mac,
 	const struct qlink_iface_comb_num *comb_num;
 	const struct qlink_iface_limit_record *rec;
 	const struct qlink_iface_limit *lim;
+	const struct qlink_wowlan_capab_data *wowlan;
 	u16 rec_len;
 	u16 tlv_type;
 	u16 tlv_value_len;
@@ -1252,7 +1287,31 @@ static int qtnf_parse_variable_mac_info(struct qtnf_wmac *mac,
 			ext_capa_mask = (u8 *)tlv->val;
 			ext_capa_mask_len = tlv_value_len;
 			break;
+		case QTN_TLV_ID_WOWLAN_CAPAB:
+			if (tlv_value_len < sizeof(*wowlan))
+				return -EINVAL;
+
+			wowlan = (void *)tlv->val;
+			if (!le16_to_cpu(wowlan->len)) {
+				pr_warn("MAC%u: skip empty WoWLAN data\n",
+					mac->macid);
+				break;
+			}
+
+			rec_len = sizeof(*wowlan) + le16_to_cpu(wowlan->len);
+			if (unlikely(tlv_value_len != rec_len)) {
+				pr_warn("MAC%u: WoWLAN data size mismatch\n",
+					mac->macid);
+				return -EINVAL;
+			}
+
+			kfree(mac->macinfo.wowlan);
+			mac->macinfo.wowlan = NULL;
+			qtnf_parse_wowlan_info(mac, wowlan);
+			break;
 		default:
+			pr_warn("MAC%u: unknown TLV type %u\n",
+				mac->macid, tlv_type);
 			break;
 		}
 
@@ -2260,11 +2319,6 @@ int qtnf_cmd_send_scan(struct qtnf_wmac *mac)
 	int count = 0;
 	int ret;
 
-	if (scan_req->n_ssids > QTNF_MAX_SSID_LIST_LENGTH) {
-		pr_err("MAC%u: too many SSIDs in scan request\n", mac->macid);
-		return -EINVAL;
-	}
-
 	cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, QLINK_VIFID_RSVD,
 					    QLINK_CMD_SCAN,
 					    sizeof(struct qlink_cmd));
@@ -2799,3 +2853,93 @@ int qtnf_cmd_set_mac_acl(const struct qtnf_vif *vif,
 
 	return ret;
 }
+
+int qtnf_cmd_send_pm_set(const struct qtnf_vif *vif, u8 pm_mode, int timeout)
+{
+	struct qtnf_bus *bus = vif->mac->bus;
+	struct sk_buff *cmd_skb;
+	u16 res_code = QLINK_CMD_RESULT_OK;
+	struct qlink_cmd_pm_set *cmd;
+	int ret = 0;
+
+	cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
+					    QLINK_CMD_PM_SET, sizeof(*cmd));
+	if (!cmd_skb)
+		return -ENOMEM;
+
+	cmd = (struct qlink_cmd_pm_set *)cmd_skb->data;
+	cmd->pm_mode = pm_mode;
+	cmd->pm_standby_timer = cpu_to_le32(timeout);
+
+	qtnf_bus_lock(bus);
+
+	ret = qtnf_cmd_send(bus, cmd_skb, &res_code);
+
+	if (unlikely(ret))
+		goto out;
+
+	if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
+		pr_err("cmd exec failed: 0x%.4X\n", res_code);
+		ret = -EFAULT;
+	}
+
+out:
+	qtnf_bus_unlock(bus);
+	return ret;
+}
+
+int qtnf_cmd_send_wowlan_set(const struct qtnf_vif *vif,
+			     const struct cfg80211_wowlan *wowl)
+{
+	struct qtnf_bus *bus = vif->mac->bus;
+	struct sk_buff *cmd_skb;
+	u16 res_code = QLINK_CMD_RESULT_OK;
+	struct qlink_cmd_wowlan_set *cmd;
+	u32 triggers = 0;
+	int count = 0;
+	int ret = 0;
+
+	cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
+					    QLINK_CMD_WOWLAN_SET, sizeof(*cmd));
+	if (!cmd_skb)
+		return -ENOMEM;
+
+	qtnf_bus_lock(bus);
+
+	cmd = (struct qlink_cmd_wowlan_set *)cmd_skb->data;
+
+	if (wowl) {
+		if (wowl->disconnect)
+			triggers |=  QLINK_WOWLAN_TRIG_DISCONNECT;
+
+		if (wowl->magic_pkt)
+			triggers |= QLINK_WOWLAN_TRIG_MAGIC_PKT;
+
+		if (wowl->n_patterns && wowl->patterns) {
+			triggers |= QLINK_WOWLAN_TRIG_PATTERN_PKT;
+			while (count < wowl->n_patterns) {
+				qtnf_cmd_skb_put_tlv_arr(cmd_skb,
+					QTN_TLV_ID_WOWLAN_PATTERN,
+					wowl->patterns[count].pattern,
+					wowl->patterns[count].pattern_len);
+				count++;
+			}
+		}
+	}
+
+	cmd->triggers = cpu_to_le32(triggers);
+
+	ret = qtnf_cmd_send(bus, cmd_skb, &res_code);
+
+	if (unlikely(ret))
+		goto out;
+
+	if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
+		pr_err("cmd exec failed: 0x%.4X\n", res_code);
+		ret = -EFAULT;
+	}
+
+out:
+	qtnf_bus_unlock(bus);
+	return ret;
+}
diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.h b/drivers/net/wireless/quantenna/qtnfmac/commands.h
index cf9274add26d854ddeaa324445b48d7f750ea4b7..1ac41156c192c083307fb4848b3a54d594b9d329 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/commands.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/commands.h
@@ -76,5 +76,8 @@ int qtnf_cmd_start_cac(const struct qtnf_vif *vif,
 		       u32 cac_time_ms);
 int qtnf_cmd_set_mac_acl(const struct qtnf_vif *vif,
 			 const struct cfg80211_acl_data *params);
+int qtnf_cmd_send_pm_set(const struct qtnf_vif *vif, u8 pm_mode, int timeout);
+int qtnf_cmd_send_wowlan_set(const struct qtnf_vif *vif,
+			     const struct cfg80211_wowlan *wowl);
 
 #endif /* QLINK_COMMANDS_H_ */
diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.c b/drivers/net/wireless/quantenna/qtnfmac/core.c
index c318340e1bd57a29f3a4b5d171e600e7753ae5f9..19abbc4e23e068498118b562833197c053e999c4 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/core.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/core.c
@@ -495,6 +495,7 @@ static void qtnf_core_mac_detach(struct qtnf_bus *bus, unsigned int macid)
 	qtnf_mac_iface_comb_free(mac);
 	kfree(mac->macinfo.extended_capabilities);
 	kfree(mac->macinfo.extended_capabilities_mask);
+	kfree(mac->macinfo.wowlan);
 	wiphy_free(wiphy);
 	bus->mac[macid] = NULL;
 }
diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.h b/drivers/net/wireless/quantenna/qtnfmac/core.h
index 214435448335cae1a4866651aa625a0c2af4c3f0..a1e338a1f055a7ea906c5fc8ecfd979db84efe03 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/core.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/core.h
@@ -40,7 +40,6 @@
 #undef pr_fmt
 #define pr_fmt(fmt)	KBUILD_MODNAME ": %s: " fmt, __func__
 
-#define QTNF_MAX_SSID_LIST_LENGTH	2
 #define QTNF_MAX_VSIE_LEN		255
 #define QTNF_MAX_INTF			8
 #define QTNF_MAX_EVENT_QUEUE_LEN	255
@@ -111,6 +110,7 @@ struct qtnf_mac_info {
 	u8 *extended_capabilities;
 	u8 *extended_capabilities_mask;
 	u8 extended_capabilities_len;
+	struct wiphy_wowlan_support *wowlan;
 };
 
 struct qtnf_chan_stats {
@@ -145,6 +145,7 @@ struct qtnf_hw_info {
 	u8 total_rx_chain;
 	char fw_version[ETHTOOL_FWVERS_LEN];
 	u32 hw_version;
+	u8 max_scan_ssids;
 };
 
 struct qtnf_vif *qtnf_mac_get_free_vif(struct qtnf_wmac *mac);
diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink.h b/drivers/net/wireless/quantenna/qtnfmac/qlink.h
index 4a32967d04793862368d3cdbcf9a27e53dd5c6b2..99d37e3efba634e0e649349259eeaf5b5a4a72b9 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/qlink.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/qlink.h
@@ -77,6 +77,7 @@ enum qlink_hw_capab {
 	QLINK_HW_CAPAB_STA_INACT_TIMEOUT	= BIT(1),
 	QLINK_HW_CAPAB_DFS_OFFLOAD		= BIT(2),
 	QLINK_HW_CAPAB_SCAN_RANDOM_MAC_ADDR	= BIT(3),
+	QLINK_HW_CAPAB_PWR_MGMT			= BIT(4),
 };
 
 enum qlink_iface_type {
@@ -256,6 +257,8 @@ enum qlink_cmd_type {
 	QLINK_CMD_CHAN_STATS		= 0x0054,
 	QLINK_CMD_CONNECT		= 0x0060,
 	QLINK_CMD_DISCONNECT		= 0x0061,
+	QLINK_CMD_PM_SET		= 0x0062,
+	QLINK_CMD_WOWLAN_SET		= 0x0063,
 };
 
 /**
@@ -668,6 +671,54 @@ struct qlink_acl_data {
 	struct qlink_mac_address mac_addrs[0];
 } __packed;
 
+/**
+ * enum qlink_pm_mode - Power Management mode
+ *
+ * @QLINK_PM_OFF: normal mode, no power saving enabled
+ * @QLINK_PM_AUTO_STANDBY: enable auto power save mode
+ */
+enum qlink_pm_mode {
+	QLINK_PM_OFF		= 0,
+	QLINK_PM_AUTO_STANDBY	= 1,
+};
+
+/**
+ * struct qlink_cmd_pm_set - data for QLINK_CMD_PM_SET command
+ *
+ * @pm_standby timer: period of network inactivity in seconds before
+ *	putting a radio in power save mode
+ * @pm_mode: power management mode
+ */
+struct qlink_cmd_pm_set {
+	struct qlink_cmd chdr;
+	__le32 pm_standby_timer;
+	u8 pm_mode;
+} __packed;
+
+/**
+ * enum qlink_wowlan_trigger
+ *
+ * @QLINK_WOWLAN_TRIG_DISCONNECT: wakeup on disconnect
+ * @QLINK_WOWLAN_TRIG_MAGIC_PKT: wakeup on magic packet
+ * @QLINK_WOWLAN_TRIG_PATTERN_PKT: wakeup on user-defined packet
+ */
+enum qlink_wowlan_trigger {
+	QLINK_WOWLAN_TRIG_DISCONNECT	= BIT(0),
+	QLINK_WOWLAN_TRIG_MAGIC_PKT	= BIT(1),
+	QLINK_WOWLAN_TRIG_PATTERN_PKT	= BIT(2),
+};
+
+/**
+ * struct qlink_cmd_wowlan_set - data for QLINK_CMD_WOWLAN_SET command
+ *
+ * @triggers: requested bitmask of WoWLAN triggers
+ */
+struct qlink_cmd_wowlan_set {
+	struct qlink_cmd chdr;
+	__le32 triggers;
+	u8 data[0];
+} __packed;
+
 /* QLINK Command Responses messages related definitions
  */
 
@@ -1065,6 +1116,8 @@ struct qlink_event_radar {
  * @QTN_TLV_ID_STA_STATS: per-STA statistics as defined by
  *	&struct qlink_sta_stats. Valid values are marked as such in a bitmap
  *	carried by QTN_TLV_ID_STA_STATS_MAP.
+ * @QTN_TLV_ID_MAX_SCAN_SSIDS: maximum number of SSIDs the device can scan
+ *	for in any given scan.
  */
 enum qlink_tlv_id {
 	QTN_TLV_ID_FRAG_THRESH		= 0x0201,
@@ -1093,6 +1146,9 @@ enum qlink_tlv_id {
 	QTN_TLV_ID_CALIBRATION_VER	= 0x0406,
 	QTN_TLV_ID_UBOOT_VER		= 0x0407,
 	QTN_TLV_ID_RANDOM_MAC_ADDR	= 0x0408,
+	QTN_TLV_ID_MAX_SCAN_SSIDS	= 0x0409,
+	QTN_TLV_ID_WOWLAN_CAPAB		= 0x0410,
+	QTN_TLV_ID_WOWLAN_PATTERN	= 0x0411,
 };
 
 struct qlink_tlv_hdr {
@@ -1380,4 +1436,33 @@ struct qlink_random_mac_addr {
 	u8 mac_addr_mask[ETH_ALEN];
 } __packed;
 
+/**
+ * struct qlink_wowlan_capab_data - data for QTN_TLV_ID_WOWLAN_CAPAB TLV
+ *
+ * WoWLAN capabilities supported by cards.
+ *
+ * @version: version of WoWLAN data structure, to ensure backward
+ *	compatibility for firmwares with limited WoWLAN support
+ * @len: Total length of WoWLAN data
+ * @data: supported WoWLAN features
+ */
+struct qlink_wowlan_capab_data {
+	__le16 version;
+	__le16 len;
+	u8 data[0];
+} __packed;
+
+/**
+ * struct qlink_wowlan_support - supported WoWLAN capabilities
+ *
+ * @n_patterns: number of supported wakeup patterns
+ * @pattern_max_len: maximum length of each pattern
+ * @pattern_min_len: minimum length of each pattern
+ */
+struct qlink_wowlan_support {
+	__le32 n_patterns;
+	__le32 pattern_max_len;
+	__le32 pattern_min_len;
+} __packed;
+
 #endif /* _QTN_QLINK_H_ */
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
index c380c1f56ba6f1a9a8982d4a3543fc2eeb9e8f4e..fa2fd64084ac91c497c0b32f9f465af30d5ad076 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
@@ -527,24 +527,6 @@ int rt2x00mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
 EXPORT_SYMBOL_GPL(rt2x00mac_set_key);
 #endif /* CONFIG_RT2X00_LIB_CRYPTO */
 
-int rt2x00mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
-		      struct ieee80211_sta *sta)
-{
-	struct rt2x00_dev *rt2x00dev = hw->priv;
-
-	return rt2x00dev->ops->lib->sta_add(rt2x00dev, vif, sta);
-}
-EXPORT_SYMBOL_GPL(rt2x00mac_sta_add);
-
-int rt2x00mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
-			 struct ieee80211_sta *sta)
-{
-	struct rt2x00_dev *rt2x00dev = hw->priv;
-
-	return rt2x00dev->ops->lib->sta_remove(rt2x00dev, sta);
-}
-EXPORT_SYMBOL_GPL(rt2x00mac_sta_remove);
-
 void rt2x00mac_sw_scan_start(struct ieee80211_hw *hw,
 			     struct ieee80211_vif *vif,
 			     const u8 *mac_addr)
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index a7e0a17aa7e86e867264254ef1f3024d3d664993..08c607c031bc0a804207472a52d3eda75ea0a1f6 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -470,7 +470,6 @@ static inline struct rcs __iomem *rcs_base(ray_dev_t *dev)
 static int ray_init(struct net_device *dev)
 {
 	int i;
-	UCHAR *p;
 	struct ccs __iomem *pccs;
 	ray_dev_t *local = netdev_priv(dev);
 	struct pcmcia_device *link = local->finder;
@@ -513,12 +512,9 @@ static int ray_init(struct net_device *dev)
 	init_startup_params(local);
 
 	/* copy mac address to startup parameters */
-	if (parse_addr(phy_addr, local->sparm.b4.a_mac_addr)) {
-		p = local->sparm.b4.a_mac_addr;
-	} else {
+	if (!parse_addr(phy_addr, local->sparm.b4.a_mac_addr)) {
 		memcpy(&local->sparm.b4.a_mac_addr,
 		       &local->startup_res.station_addr, ADDRLEN);
-		p = local->sparm.b4.a_mac_addr;
 	}
 
 	clear_interrupt(local);	/* Clear any interrupt from the card */
diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225se.c b/drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225se.c
index fde89866fa8d648437000dcf0e701793bca58356..51e32df6120b619c33b8a5eedc0b705724fc12df 100644
--- a/drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225se.c
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225se.c
@@ -363,7 +363,7 @@ void rtl8225se_rf_init(struct ieee80211_hw *dev)
 	rtl8187se_rf_writereg(dev, 0x00, 0x0037); mdelay(11);
 	rtl8187se_rf_writereg(dev, 0x04, 0x0160); mdelay(11);
 	rtl8187se_rf_writereg(dev, 0x07, 0x0080); mdelay(11);
-	rtl8187se_rf_writereg(dev, 0x02, 0x088D); mdelay(221);
+	rtl8187se_rf_writereg(dev, 0x02, 0x088D); msleep(221);
 	rtl8187se_rf_writereg(dev, 0x00, 0x0137); mdelay(11);
 	rtl8187se_rf_writereg(dev, 0x07, 0x0000); mdelay(1);
 	rtl8187se_rf_writereg(dev, 0x07, 0x0180); mdelay(1);
@@ -386,7 +386,7 @@ void rtl8225se_rf_init(struct ieee80211_hw *dev)
 	rtl8187se_rf_writereg(dev, 0x00, 0x00BF); mdelay(1);
 	rtl8187se_rf_writereg(dev, 0x0D, 0x08DF); mdelay(1);
 	rtl8187se_rf_writereg(dev, 0x02, 0x004D); mdelay(1);
-	rtl8187se_rf_writereg(dev, 0x04, 0x0975); mdelay(31);
+	rtl8187se_rf_writereg(dev, 0x04, 0x0975); msleep(31);
 	rtl8187se_rf_writereg(dev, 0x00, 0x0197); mdelay(1);
 	rtl8187se_rf_writereg(dev, 0x05, 0x05AB); mdelay(1);
 
diff --git a/drivers/net/wireless/rsi/rsi_91x_hal.c b/drivers/net/wireless/rsi/rsi_91x_hal.c
index 0761e61591bdd3b4d1b2e3ee025678f6524e0bda..27e6baf12e90aebd7ed7a713373bdf58e7a9fdb6 100644
--- a/drivers/net/wireless/rsi/rsi_91x_hal.c
+++ b/drivers/net/wireless/rsi/rsi_91x_hal.c
@@ -26,6 +26,9 @@ static struct ta_metadata metadata_flash_content[] = {
 	{"flash_content", 0x00010000},
 	{"rsi/rs9113_wlan_qspi.rps", 0x00010000},
 	{"rsi/rs9113_wlan_bt_dual_mode.rps", 0x00010000},
+	{"flash_content", 0x00010000},
+	{"rsi/rs9113_ap_bt_dual_mode.rps", 0x00010000},
+
 };
 
 int rsi_send_pkt_to_bus(struct rsi_common *common, struct sk_buff *skb)
@@ -246,7 +249,7 @@ int rsi_prepare_data_desc(struct rsi_common *common, struct sk_buff *skb)
 		}
 	}
 
-	data_desc->mac_flags = cpu_to_le16(seq_num & 0xfff);
+	data_desc->mac_flags |= cpu_to_le16(seq_num & 0xfff);
 	data_desc->qid_tid = ((skb->priority & 0xf) |
 			      ((tx_params->tid & 0xf) << 4));
 	data_desc->sta_id = tx_params->sta_id;
@@ -842,7 +845,6 @@ static int rsi_load_firmware(struct rsi_hw *adapter)
 	const struct firmware *fw_entry = NULL;
 	u32 regout_val = 0, content_size;
 	u16 tmp_regout_val = 0;
-	u8 *flash_content = NULL;
 	struct ta_metadata *metadata_p;
 	int status;
 
@@ -904,28 +906,22 @@ static int rsi_load_firmware(struct rsi_hw *adapter)
 			__func__, metadata_p->name);
 		return status;
 	}
-	flash_content = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL);
-	if (!flash_content) {
-		rsi_dbg(ERR_ZONE, "%s: Failed to copy firmware\n", __func__);
-		status = -EIO;
-		goto fail;
-	}
 	content_size = fw_entry->size;
 	rsi_dbg(INFO_ZONE, "FW Length = %d bytes\n", content_size);
 
 	/* Get the firmware version */
 	common->lmac_ver.ver.info.fw_ver[0] =
-		flash_content[LMAC_VER_OFFSET] & 0xFF;
+		fw_entry->data[LMAC_VER_OFFSET] & 0xFF;
 	common->lmac_ver.ver.info.fw_ver[1] =
-		flash_content[LMAC_VER_OFFSET + 1] & 0xFF;
-	common->lmac_ver.major = flash_content[LMAC_VER_OFFSET + 2] & 0xFF;
+		fw_entry->data[LMAC_VER_OFFSET + 1] & 0xFF;
+	common->lmac_ver.major = fw_entry->data[LMAC_VER_OFFSET + 2] & 0xFF;
 	common->lmac_ver.release_num =
-		flash_content[LMAC_VER_OFFSET + 3] & 0xFF;
-	common->lmac_ver.minor = flash_content[LMAC_VER_OFFSET + 4] & 0xFF;
+		fw_entry->data[LMAC_VER_OFFSET + 3] & 0xFF;
+	common->lmac_ver.minor = fw_entry->data[LMAC_VER_OFFSET + 4] & 0xFF;
 	common->lmac_ver.patch_num = 0;
 	rsi_print_version(common);
 
-	status = bl_write_header(adapter, flash_content, content_size);
+	status = bl_write_header(adapter, (u8 *)fw_entry->data, content_size);
 	if (status) {
 		rsi_dbg(ERR_ZONE,
 			"%s: RPS Image header loading failed\n",
@@ -967,7 +963,7 @@ static int rsi_load_firmware(struct rsi_hw *adapter)
 
 	rsi_dbg(INFO_ZONE, "Burn Command Pass.. Upgrading the firmware\n");
 
-	status = auto_fw_upgrade(adapter, flash_content, content_size);
+	status = auto_fw_upgrade(adapter, (u8 *)fw_entry->data, content_size);
 	if (status == 0) {
 		rsi_dbg(ERR_ZONE, "Firmware upgradation Done\n");
 		goto load_image_cmd;
@@ -981,13 +977,11 @@ static int rsi_load_firmware(struct rsi_hw *adapter)
 
 success:
 	rsi_dbg(ERR_ZONE, "***** Firmware Loading successful *****\n");
-	kfree(flash_content);
 	release_firmware(fw_entry);
 	return 0;
 
 fail:
 	rsi_dbg(ERR_ZONE, "##### Firmware loading failed #####\n");
-	kfree(flash_content);
 	release_firmware(fw_entry);
 	return status;
 }
diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
index 2ca7464b7fa370687d5917578312774ebf0dfebb..4e510cbe0a89f011d6f5af6db22846146702c1e8 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
@@ -416,7 +416,8 @@ static int rsi_mac80211_add_interface(struct ieee80211_hw *hw,
 
 	/* Get free vap index */
 	for (i = 0; i < RSI_MAX_VIFS; i++) {
-		if (!adapter->vifs[i]) {
+		if (!adapter->vifs[i] ||
+		    !memcmp(vif->addr, adapter->vifs[i]->addr, ETH_ALEN)) {
 			vap_idx = i;
 			break;
 		}
diff --git a/drivers/net/wireless/rsi/rsi_91x_main.c b/drivers/net/wireless/rsi/rsi_91x_main.c
index 1485a0c89df2440a4bf49070ba09d1686483292e..01d99ed985eef83b88e5de3661c69a4b9225fde1 100644
--- a/drivers/net/wireless/rsi/rsi_91x_main.c
+++ b/drivers/net/wireless/rsi/rsi_91x_main.c
@@ -122,7 +122,6 @@ static struct sk_buff *rsi_prepare_skb(struct rsi_common *common,
 				       u8 extended_desc)
 {
 	struct ieee80211_tx_info *info;
-	struct skb_info *rx_params;
 	struct sk_buff *skb = NULL;
 	u8 payload_offset;
 	struct ieee80211_vif *vif;
@@ -149,10 +148,6 @@ static struct sk_buff *rsi_prepare_skb(struct rsi_common *common,
 	vif = rsi_get_vif(common->priv, wh->addr1);
 
 	info = IEEE80211_SKB_CB(skb);
-	rx_params = (struct skb_info *)info->driver_data;
-	rx_params->rssi = rsi_get_rssi(buffer);
-	rx_params->channel = rsi_get_connected_channel(vif);
-
 	return skb;
 }
 
@@ -336,7 +331,6 @@ struct rsi_hw *rsi_91x_init(u16 oper_mode)
 	spin_lock_init(&adapter->ps_lock);
 	timer_setup(&common->roc_timer, rsi_roc_timeout, 0);
 	init_completion(&common->wlan_init_completion);
-	common->init_done = true;
 	adapter->device_model = RSI_DEV_9113;
 	common->oper_mode = oper_mode;
 
@@ -374,6 +368,7 @@ struct rsi_hw *rsi_91x_init(u16 oper_mode)
 	}
 #endif
 
+	common->init_done = true;
 	return adapter;
 
 err:
diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
index d0e5937cad6d0b8e23f035ffe47e82ebda7a0d7b..1095df7d957350f20b218bd32c8dadb30d3d9153 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
@@ -334,20 +334,17 @@ static int rsi_load_radio_caps(struct rsi_common *common)
 			struct ieee80211_conf *conf = &hw->conf;
 
 			if (conf_is_ht40_plus(conf)) {
-				radio_caps->radio_cfg_info =
-					RSI_CMDDESC_LOWER_20_ENABLE;
-				radio_caps->radio_info =
-					RSI_CMDDESC_LOWER_20_ENABLE;
+				radio_caps->ppe_ack_rate =
+					cpu_to_le16(LOWER_20_ENABLE |
+						    (LOWER_20_ENABLE >> 12));
 			} else if (conf_is_ht40_minus(conf)) {
-				radio_caps->radio_cfg_info =
-					RSI_CMDDESC_UPPER_20_ENABLE;
-				radio_caps->radio_info =
-					RSI_CMDDESC_UPPER_20_ENABLE;
+				radio_caps->ppe_ack_rate =
+					cpu_to_le16(UPPER_20_ENABLE |
+						    (UPPER_20_ENABLE >> 12));
 			} else {
-				radio_caps->radio_cfg_info =
-					RSI_CMDDESC_40MHZ;
-				radio_caps->radio_info =
-					RSI_CMDDESC_FULL_40_ENABLE;
+				radio_caps->ppe_ack_rate =
+					cpu_to_le16((BW_40MHZ << 12) |
+						    FULL40M_ENABLE);
 			}
 		}
 	}
@@ -749,7 +746,7 @@ int rsi_hal_load_key(struct rsi_common *common,
 			key_descriptor |= RSI_CIPHER_TKIP;
 	}
 	key_descriptor |= RSI_PROTECT_DATA_FRAMES;
-	key_descriptor |= ((key_id << RSI_KEY_ID_OFFSET) & RSI_KEY_ID_MASK);
+	key_descriptor |= (key_id << RSI_KEY_ID_OFFSET);
 
 	rsi_set_len_qno(&set_key->desc_dword0.len_qno,
 			(frame_len - FRAME_DESC_SZ), RSI_WIFI_MGMT_Q);
diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c
index 416981d9922921e87a64b6325c167dab91eab875..5733e440ecafff23599bc5e561ca200e9887ee0a 100644
--- a/drivers/net/wireless/rsi/rsi_91x_sdio.c
+++ b/drivers/net/wireless/rsi/rsi_91x_sdio.c
@@ -1394,10 +1394,7 @@ static const struct dev_pm_ops rsi_pm_ops = {
 #endif
 
 static const struct sdio_device_id rsi_dev_table[] =  {
-	{ SDIO_DEVICE(0x303, 0x100) },
-	{ SDIO_DEVICE(0x041B, 0x0301) },
-	{ SDIO_DEVICE(0x041B, 0x0201) },
-	{ SDIO_DEVICE(0x041B, 0x9330) },
+	{ SDIO_DEVICE(RSI_SDIO_VID_9113, RSI_SDIO_PID_9113) },
 	{ /* Blank */},
 };
 
diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c
index 6ce6b754df12c120d057d4d3c2374f4397893027..c0a163e404029ce6f3cbb10100d2cf02ba7c0492 100644
--- a/drivers/net/wireless/rsi/rsi_91x_usb.c
+++ b/drivers/net/wireless/rsi/rsi_91x_usb.c
@@ -835,11 +835,7 @@ static int rsi_resume(struct usb_interface *intf)
 #endif
 
 static const struct usb_device_id rsi_dev_table[] = {
-	{ USB_DEVICE(0x0303, 0x0100) },
-	{ USB_DEVICE(0x041B, 0x0301) },
-	{ USB_DEVICE(0x041B, 0x0201) },
-	{ USB_DEVICE(0x041B, 0x9330) },
-	{ USB_DEVICE(0x1618, 0x9113) },
+	{ USB_DEVICE(RSI_USB_VID_9113, RSI_USB_PID_9113) },
 	{ /* Blank */},
 };
 
diff --git a/drivers/net/wireless/rsi/rsi_mgmt.h b/drivers/net/wireless/rsi/rsi_mgmt.h
index 14620935c925829548f11c9e1e20fd208661e0fa..359fbdf85739c346135dca0cfbdeb8dbd0e8fd2f 100644
--- a/drivers/net/wireless/rsi/rsi_mgmt.h
+++ b/drivers/net/wireless/rsi/rsi_mgmt.h
@@ -22,7 +22,7 @@
 #include "rsi_main.h"
 
 #define MAX_MGMT_PKT_SIZE               512
-#define RSI_NEEDED_HEADROOM             80
+#define RSI_NEEDED_HEADROOM             84
 #define RSI_RCV_BUFFER_LEN              2000
 
 #define RSI_11B_MODE                    0
diff --git a/drivers/net/wireless/rsi/rsi_sdio.h b/drivers/net/wireless/rsi/rsi_sdio.h
index 353dbdf31e758208aeeffa918980de8ef0abb439..66dcd2ec90515b96ac04c2d7fb9c5f7e4044333a 100644
--- a/drivers/net/wireless/rsi/rsi_sdio.h
+++ b/drivers/net/wireless/rsi/rsi_sdio.h
@@ -28,6 +28,9 @@
 #include <linux/mmc/sdio_ids.h>
 #include "rsi_main.h"
 
+#define RSI_SDIO_VID_9113    0x041B
+#define RSI_SDIO_PID_9113    0x9330
+
 enum sdio_interrupt_type {
 	BUFFER_FULL         = 0x0,
 	BUFFER_AVAILABLE    = 0x2,
diff --git a/drivers/net/wireless/rsi/rsi_usb.h b/drivers/net/wireless/rsi/rsi_usb.h
index b6fe79f0a513938a2a4757318dc1eaa2dcd28fe0..5b2eddd1a2ee5f7cc372ad1a16a027d3923b8d5b 100644
--- a/drivers/net/wireless/rsi/rsi_usb.h
+++ b/drivers/net/wireless/rsi/rsi_usb.h
@@ -22,6 +22,9 @@
 #include "rsi_main.h"
 #include "rsi_common.h"
 
+#define RSI_USB_VID_9113	0x1618
+#define RSI_USB_PID_9113	0x9113
+
 #define USB_INTERNAL_REG_1           0x25000
 #define RSI_USB_READY_MAGIC_NUM      0xab
 #define FW_STATUS_REG                0x41050012
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index 37f785f601c1603e4e28199862f08817715fb640..89b0d0fade9f2bcf9594d2a892b155089d4425ab 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -6160,16 +6160,16 @@ static int wl1271_register_hw(struct wl1271 *wl)
 	}
 
 	if (oui_addr == 0xdeadbe && nic_addr == 0xef0000) {
-		wl1271_warning("Detected unconfigured mac address in nvs, derive from fuse instead.\n");
+		wl1271_warning("Detected unconfigured mac address in nvs, derive from fuse instead.");
 		if (!strcmp(pdev_data->family->name, "wl18xx")) {
-			wl1271_warning("This default nvs file can be removed from the file system\n");
+			wl1271_warning("This default nvs file can be removed from the file system");
 		} else {
-			wl1271_warning("Your device performance is not optimized.\n");
-			wl1271_warning("Please use the calibrator tool to configure your device.\n");
+			wl1271_warning("Your device performance is not optimized.");
+			wl1271_warning("Please use the calibrator tool to configure your device.");
 		}
 
 		if (wl->fuse_oui_addr == 0 && wl->fuse_nic_addr == 0) {
-			wl1271_warning("Fuse mac address is zero. using random mac\n");
+			wl1271_warning("Fuse mac address is zero. using random mac");
 			/* Use TI oui and a random nic */
 			oui_addr = WLCORE_TI_OUI_ADDRESS;
 			nic_addr = get_random_int();
diff --git a/drivers/net/wireless/ti/wlcore/rx.c b/drivers/net/wireless/ti/wlcore/rx.c
index 0f15696195f884f59c82e431757593362a0489ca..078a4940bc5c3bcd9a496b8afec6536c61fee5c8 100644
--- a/drivers/net/wireless/ti/wlcore/rx.c
+++ b/drivers/net/wireless/ti/wlcore/rx.c
@@ -59,7 +59,7 @@ static u32 wlcore_rx_get_align_buf_size(struct wl1271 *wl, u32 pkt_len)
 static void wl1271_rx_status(struct wl1271 *wl,
 			     struct wl1271_rx_descriptor *desc,
 			     struct ieee80211_rx_status *status,
-			     u8 beacon)
+			     u8 beacon, u8 probe_rsp)
 {
 	memset(status, 0, sizeof(struct ieee80211_rx_status));
 
@@ -106,6 +106,9 @@ static void wl1271_rx_status(struct wl1271 *wl,
 		}
 	}
 
+	if (beacon || probe_rsp)
+		status->boottime_ns = ktime_get_boot_ns();
+
 	if (beacon)
 		wlcore_set_pending_regdomain_ch(wl, (u16)desc->channel,
 						status->band);
@@ -191,7 +194,8 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
 	if (ieee80211_is_data_present(hdr->frame_control))
 		is_data = 1;
 
-	wl1271_rx_status(wl, desc, IEEE80211_SKB_RXCB(skb), beacon);
+	wl1271_rx_status(wl, desc, IEEE80211_SKB_RXCB(skb), beacon,
+			 ieee80211_is_probe_resp(hdr->frame_control));
 	wlcore_hw_set_rx_csum(wl, desc, skb);
 
 	seq_num = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;