summaryrefslogtreecommitdiffstats
path: root/package/mac80211/patches/572-ath9k_fix_tx_flush.patch
blob: ddeeb145bdbd4394eada9155df23958271423a41 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -2149,56 +2149,40 @@ static void ath9k_set_coverage_class(str
 
 static void ath9k_flush(struct ieee80211_hw *hw, bool drop)
 {
-#define ATH_FLUSH_TIMEOUT	60 /* ms */
 	struct ath_softc *sc = hw->priv;
-	struct ath_txq *txq = NULL;
-	struct ath_hw *ah = sc->sc_ah;
-	struct ath_common *common = ath9k_hw_common(ah);
-	int i, j, npend = 0;
+	int timeout = 200; /* ms */
+	int i, j;
 
+	ath9k_ps_wakeup(sc);
 	mutex_lock(&sc->mutex);
 
 	cancel_delayed_work_sync(&sc->tx_complete_work);
 
-	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
-		if (!ATH_TXQ_SETUP(sc, i))
-			continue;
-		txq = &sc->tx.txq[i];
-
-		if (!drop) {
-			for (j = 0; j < ATH_FLUSH_TIMEOUT; j++) {
-				if (!ath9k_has_pending_frames(sc, txq))
-					break;
-				usleep_range(1000, 2000);
-			}
-		}
+	if (drop)
+		timeout = 1;
 
-		if (drop || ath9k_has_pending_frames(sc, txq)) {
-			ath_dbg(common, ATH_DBG_QUEUE, "Drop frames from hw queue:%d\n",
-				txq->axq_qnum);
-			spin_lock_bh(&txq->axq_lock);
-			txq->txq_flush_inprogress = true;
-			spin_unlock_bh(&txq->axq_lock);
-
-			ath9k_ps_wakeup(sc);
-			ath9k_hw_stoptxdma(ah, txq->axq_qnum);
-			npend = ath9k_hw_numtxpending(ah, txq->axq_qnum);
-			ath9k_ps_restore(sc);
-			if (npend)
-				break;
+	for (j = 0; j < timeout; j++) {
+		int npend = 0;
+		for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
+			if (!ATH_TXQ_SETUP(sc, i))
+				continue;
 
-			ath_draintxq(sc, txq, false);
-			txq->txq_flush_inprogress = false;
+			npend += ath9k_has_pending_frames(sc, &sc->tx.txq[i]);
 		}
+
+		if (!npend)
+		    goto out;
+
+		usleep_range(1000, 2000);
 	}
 
-	if (npend) {
+	if (!ath_drain_all_txq(sc, false))
 		ath_reset(sc, false);
-		txq->txq_flush_inprogress = false;
-	}
 
+out:
 	ieee80211_queue_delayed_work(hw, &sc->tx_complete_work, 0);
 	mutex_unlock(&sc->mutex);
+	ath9k_ps_restore(sc);
 }
 
 struct ieee80211_ops ath9k_ops = {
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -189,7 +189,6 @@ struct ath_txq {
 	u32 axq_ampdu_depth;
 	bool stopped;
 	bool axq_tx_inprogress;
-	bool txq_flush_inprogress;
 	struct list_head axq_acq;
 	struct list_head txq_fifo[ATH_TXFIFO_DEPTH];
 	struct list_head txq_fifo_pending;
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -2091,8 +2091,7 @@ static void ath_tx_processq(struct ath_s
 		spin_lock_bh(&txq->axq_lock);
 		if (list_empty(&txq->axq_q)) {
 			txq->axq_link = NULL;
-			if (sc->sc_flags & SC_OP_TXAGGR &&
-			    !txq->txq_flush_inprogress)
+			if (sc->sc_flags & SC_OP_TXAGGR)
 				ath_txq_schedule(sc, txq);
 			spin_unlock_bh(&txq->axq_lock);
 			break;
@@ -2173,7 +2172,7 @@ static void ath_tx_processq(struct ath_s
 
 		spin_lock_bh(&txq->axq_lock);
 
-		if (sc->sc_flags & SC_OP_TXAGGR && !txq->txq_flush_inprogress)
+		if (sc->sc_flags & SC_OP_TXAGGR)
 			ath_txq_schedule(sc, txq);
 		spin_unlock_bh(&txq->axq_lock);
 	}
@@ -2317,18 +2316,17 @@ void ath_tx_edma_tasklet(struct ath_soft
 
 		spin_lock_bh(&txq->axq_lock);
 
-		if (!txq->txq_flush_inprogress) {
-			if (!list_empty(&txq->txq_fifo_pending)) {
-				INIT_LIST_HEAD(&bf_head);
-				bf = list_first_entry(&txq->txq_fifo_pending,
-						      struct ath_buf, list);
-				list_cut_position(&bf_head,
-						  &txq->txq_fifo_pending,
-						  &bf->bf_lastbf->list);
-				ath_tx_txqaddbuf(sc, txq, &bf_head);
-			} else if (sc->sc_flags & SC_OP_TXAGGR)
-				ath_txq_schedule(sc, txq);
-		}
+		if (!list_empty(&txq->txq_fifo_pending)) {
+			INIT_LIST_HEAD(&bf_head);
+			bf = list_first_entry(&txq->txq_fifo_pending,
+					      struct ath_buf, list);
+			list_cut_position(&bf_head,
+					  &txq->txq_fifo_pending,
+					  &bf->bf_lastbf->list);
+			ath_tx_txqaddbuf(sc, txq, &bf_head);
+		} else if (sc->sc_flags & SC_OP_TXAGGR)
+			ath_txq_schedule(sc, txq);
+
 		spin_unlock_bh(&txq->axq_lock);
 	}
 }