1 /******************************************************************************
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2016 Intel Deutschland GmbH
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as
14 * published by the Free Software Foundation.
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
26 * The full GNU General Public License is included in this distribution
27 * in the file called COPYING.
29 * Contact Information:
30 * Intel Linux Wireless <linuxwifi@intel.com>
31 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
35 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
36 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
37 * Copyright(c) 2016 Intel Deutschland GmbH
38 * All rights reserved.
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
44 * * Redistributions of source code must retain the above copyright
45 * notice, this list of conditions and the following disclaimer.
46 * * Redistributions in binary form must reproduce the above copyright
47 * notice, this list of conditions and the following disclaimer in
48 * the documentation and/or other materials provided with the
50 * * Neither the name Intel Corporation nor the names of its
51 * contributors may be used to endorse or promote products derived
52 * from this software without specific prior written permission.
54 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
55 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
56 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
57 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
58 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
59 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
60 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
61 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
62 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
63 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
64 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
66 *****************************************************************************/
67 #include <net/mac80211.h>
74 * New version of ADD_STA_sta command added new fields at the end of the
75 * structure, so sending the size of the relevant API's structure is enough to
76 * support both API versions.
78 static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm *mvm)
80 return iwl_mvm_has_new_rx_api(mvm) ?
81 sizeof(struct iwl_mvm_add_sta_cmd) :
82 sizeof(struct iwl_mvm_add_sta_cmd_v7);
85 static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
86 enum nl80211_iftype iftype)
91 BUILD_BUG_ON(IWL_MVM_STATION_COUNT > 32);
92 WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status));
94 lockdep_assert_held(&mvm->mutex);
96 /* d0i3/d3 assumes the AP's sta_id (of sta vif) is 0. reserve it. */
97 if (iftype != NL80211_IFTYPE_STATION)
98 reserved_ids = BIT(0);
100 /* Don't take rcu_read_lock() since we are protected by mvm->mutex */
101 for (sta_id = 0; sta_id < IWL_MVM_STATION_COUNT; sta_id++) {
102 if (BIT(sta_id) & reserved_ids)
105 if (!rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
106 lockdep_is_held(&mvm->mutex)))
109 return IWL_MVM_STATION_COUNT;
112 /* send station add/update command to firmware */
113 int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
114 bool update, unsigned int flags)
116 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
117 struct iwl_mvm_add_sta_cmd add_sta_cmd = {
118 .sta_id = mvm_sta->sta_id,
119 .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color),
120 .add_modify = update ? 1 : 0,
121 .station_flags_msk = cpu_to_le32(STA_FLG_FAT_EN_MSK |
122 STA_FLG_MIMO_EN_MSK),
123 .tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg),
127 u32 agg_size = 0, mpdu_dens = 0;
129 if (!update || (flags & STA_MODIFY_QUEUES)) {
130 add_sta_cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
131 memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN);
133 if (flags & STA_MODIFY_QUEUES)
134 add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES;
137 switch (sta->bandwidth) {
138 case IEEE80211_STA_RX_BW_160:
139 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_160MHZ);
141 case IEEE80211_STA_RX_BW_80:
142 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_80MHZ);
144 case IEEE80211_STA_RX_BW_40:
145 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_40MHZ);
147 case IEEE80211_STA_RX_BW_20:
148 if (sta->ht_cap.ht_supported)
149 add_sta_cmd.station_flags |=
150 cpu_to_le32(STA_FLG_FAT_EN_20MHZ);
154 switch (sta->rx_nss) {
156 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
159 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO2);
162 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO3);
166 switch (sta->smps_mode) {
167 case IEEE80211_SMPS_AUTOMATIC:
168 case IEEE80211_SMPS_NUM_MODES:
171 case IEEE80211_SMPS_STATIC:
173 add_sta_cmd.station_flags &= ~cpu_to_le32(STA_FLG_MIMO_EN_MSK);
174 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
176 case IEEE80211_SMPS_DYNAMIC:
177 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_RTS_MIMO_PROT);
179 case IEEE80211_SMPS_OFF:
184 if (sta->ht_cap.ht_supported) {
185 add_sta_cmd.station_flags_msk |=
186 cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK |
187 STA_FLG_AGG_MPDU_DENS_MSK);
189 mpdu_dens = sta->ht_cap.ampdu_density;
192 if (sta->vht_cap.vht_supported) {
193 agg_size = sta->vht_cap.cap &
194 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
196 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
197 } else if (sta->ht_cap.ht_supported) {
198 agg_size = sta->ht_cap.ampdu_factor;
201 add_sta_cmd.station_flags |=
202 cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT);
203 add_sta_cmd.station_flags |=
204 cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT);
206 status = ADD_STA_SUCCESS;
207 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
208 iwl_mvm_add_sta_cmd_size(mvm),
209 &add_sta_cmd, &status);
213 switch (status & IWL_ADD_STA_STATUS_MASK) {
214 case ADD_STA_SUCCESS:
215 IWL_DEBUG_ASSOC(mvm, "ADD_STA PASSED\n");
219 IWL_ERR(mvm, "ADD_STA failed\n");
226 static void iwl_mvm_rx_agg_session_expired(unsigned long data)
228 struct iwl_mvm_baid_data __rcu **rcu_ptr = (void *)data;
229 struct iwl_mvm_baid_data *ba_data;
230 struct ieee80211_sta *sta;
231 struct iwl_mvm_sta *mvm_sta;
232 unsigned long timeout;
236 ba_data = rcu_dereference(*rcu_ptr);
238 if (WARN_ON(!ba_data))
241 if (!ba_data->timeout)
244 timeout = ba_data->last_rx + TU_TO_JIFFIES(ba_data->timeout * 2);
245 if (time_is_after_jiffies(timeout)) {
246 mod_timer(&ba_data->session_timer, timeout);
251 sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[ba_data->sta_id]);
252 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
253 ieee80211_stop_rx_ba_session_offl(mvm_sta->vif,
254 sta->addr, ba_data->tid);
259 static int iwl_mvm_tdls_sta_init(struct iwl_mvm *mvm,
260 struct ieee80211_sta *sta)
262 unsigned long used_hw_queues;
263 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
264 unsigned int wdg_timeout =
265 iwl_mvm_get_wd_timeout(mvm, NULL, true, false);
268 lockdep_assert_held(&mvm->mutex);
270 used_hw_queues = iwl_mvm_get_used_hw_queues(mvm, NULL);
272 /* Find available queues, and allocate them to the ACs */
273 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
274 u8 queue = find_first_zero_bit(&used_hw_queues,
275 mvm->first_agg_queue);
277 if (queue >= mvm->first_agg_queue) {
278 IWL_ERR(mvm, "Failed to allocate STA queue\n");
282 __set_bit(queue, &used_hw_queues);
283 mvmsta->hw_queue[ac] = queue;
286 /* Found a place for all queues - enable them */
287 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
288 iwl_mvm_enable_ac_txq(mvm, mvmsta->hw_queue[ac],
289 mvmsta->hw_queue[ac],
290 iwl_mvm_ac_to_tx_fifo[ac], 0,
292 mvmsta->tfd_queue_msk |= BIT(mvmsta->hw_queue[ac]);
298 static void iwl_mvm_tdls_sta_deinit(struct iwl_mvm *mvm,
299 struct ieee80211_sta *sta)
301 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
302 unsigned long sta_msk;
305 lockdep_assert_held(&mvm->mutex);
307 /* disable the TDLS STA-specific queues */
308 sta_msk = mvmsta->tfd_queue_msk;
309 for_each_set_bit(i, &sta_msk, sizeof(sta_msk) * BITS_PER_BYTE)
310 iwl_mvm_disable_txq(mvm, i, i, IWL_MAX_TID_COUNT, 0);
313 static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
314 struct ieee80211_sta *sta, u8 ac, int tid,
315 struct ieee80211_hdr *hdr)
317 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
318 struct iwl_trans_txq_scd_cfg cfg = {
319 .fifo = iwl_mvm_ac_to_tx_fifo[ac],
320 .sta_id = mvmsta->sta_id,
322 .frame_limit = IWL_FRAME_LIMIT,
324 unsigned int wdg_timeout =
325 iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
326 u8 mac_queue = mvmsta->vif->hw_queue[ac];
331 lockdep_assert_held(&mvm->mutex);
333 spin_lock_bh(&mvm->queue_info_lock);
336 * Non-QoS, QoS NDP and MGMT frames should go to a MGMT queue, if one
339 if (!ieee80211_is_data_qos(hdr->frame_control) ||
340 ieee80211_is_qos_nullfunc(hdr->frame_control)) {
341 queue = iwl_mvm_find_free_queue(mvm, IWL_MVM_DQA_MIN_MGMT_QUEUE,
342 IWL_MVM_DQA_MAX_MGMT_QUEUE);
343 if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE)
344 IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n",
347 /* If no such queue is found, we'll use a DATA queue instead */
350 if (queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) {
351 queue = mvmsta->reserved_queue;
352 IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue);
356 queue = iwl_mvm_find_free_queue(mvm, IWL_MVM_DQA_MIN_DATA_QUEUE,
357 IWL_MVM_DQA_MAX_DATA_QUEUE);
360 * Mark TXQ as ready, even though it hasn't been fully configured yet,
361 * to make sure no one else takes it.
362 * This will allow avoiding re-acquiring the lock at the end of the
363 * configuration. On error we'll mark it back as free.
366 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
368 spin_unlock_bh(&mvm->queue_info_lock);
370 /* TODO: support shared queues for same RA */
375 * Actual en/disablement of aggregations is through the ADD_STA HCMD,
376 * but for configuring the SCD to send A-MPDUs we need to mark the queue
378 * Mark all DATA queues as allowing to be aggregated at some point
380 cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
381 queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
383 IWL_DEBUG_TX_QUEUES(mvm, "Allocating queue #%d to sta %d on tid %d\n",
384 queue, mvmsta->sta_id, tid);
386 ssn = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
387 iwl_mvm_enable_txq(mvm, queue, mac_queue, ssn, &cfg,
390 spin_lock_bh(&mvmsta->lock);
391 mvmsta->tid_data[tid].txq_id = queue;
392 mvmsta->tfd_queue_msk |= BIT(queue);
394 if (mvmsta->reserved_queue == queue)
395 mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE;
396 spin_unlock_bh(&mvmsta->lock);
398 ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES);
405 iwl_mvm_disable_txq(mvm, queue, mac_queue, tid, 0);
410 static inline u8 iwl_mvm_tid_to_ac_queue(int tid)
412 if (tid == IWL_MAX_TID_COUNT)
413 return IEEE80211_AC_VO; /* MGMT */
415 return tid_to_mac80211_ac[tid];
418 static void iwl_mvm_tx_deferred_stream(struct iwl_mvm *mvm,
419 struct ieee80211_sta *sta, int tid)
421 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
422 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
424 struct ieee80211_hdr *hdr;
425 struct sk_buff_head deferred_tx;
427 bool no_queue = false; /* Marks if there is a problem with the queue */
430 lockdep_assert_held(&mvm->mutex);
432 skb = skb_peek(&tid_data->deferred_tx_frames);
435 hdr = (void *)skb->data;
437 ac = iwl_mvm_tid_to_ac_queue(tid);
438 mac_queue = IEEE80211_SKB_CB(skb)->hw_queue;
440 if (tid_data->txq_id == IEEE80211_INVAL_HW_QUEUE &&
441 iwl_mvm_sta_alloc_queue(mvm, sta, ac, tid, hdr)) {
443 "Can't alloc TXQ for sta %d tid %d - dropping frame\n",
444 mvmsta->sta_id, tid);
447 * Mark queue as problematic so later the deferred traffic is
448 * freed, as we can do nothing with it
453 __skb_queue_head_init(&deferred_tx);
455 /* Disable bottom-halves when entering TX path */
457 spin_lock(&mvmsta->lock);
458 skb_queue_splice_init(&tid_data->deferred_tx_frames, &deferred_tx);
459 spin_unlock(&mvmsta->lock);
461 while ((skb = __skb_dequeue(&deferred_tx)))
462 if (no_queue || iwl_mvm_tx_skb(mvm, skb, sta))
463 ieee80211_free_txskb(mvm->hw, skb);
467 iwl_mvm_start_mac_queues(mvm, BIT(mac_queue));
470 void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
472 struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm,
474 struct ieee80211_sta *sta;
475 struct iwl_mvm_sta *mvmsta;
476 unsigned long deferred_tid_traffic;
479 mutex_lock(&mvm->mutex);
481 /* Go over all stations with deferred traffic */
482 for_each_set_bit(sta_id, mvm->sta_deferred_frames,
483 IWL_MVM_STATION_COUNT) {
484 clear_bit(sta_id, mvm->sta_deferred_frames);
485 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
486 lockdep_is_held(&mvm->mutex));
487 if (IS_ERR_OR_NULL(sta))
490 mvmsta = iwl_mvm_sta_from_mac80211(sta);
491 deferred_tid_traffic = mvmsta->deferred_traffic_tid_map;
493 for_each_set_bit(tid, &deferred_tid_traffic,
494 IWL_MAX_TID_COUNT + 1)
495 iwl_mvm_tx_deferred_stream(mvm, sta, tid);
498 mutex_unlock(&mvm->mutex);
501 static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
502 struct ieee80211_sta *sta,
503 enum nl80211_iftype vif_type)
505 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
508 spin_lock_bh(&mvm->queue_info_lock);
510 /* Make sure we have free resources for this STA */
511 if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls &&
512 !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].hw_queue_refcount &&
513 (mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].status ==
515 queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE;
517 queue = iwl_mvm_find_free_queue(mvm, IWL_MVM_DQA_MIN_DATA_QUEUE,
518 IWL_MVM_DQA_MAX_DATA_QUEUE);
520 spin_unlock_bh(&mvm->queue_info_lock);
521 IWL_ERR(mvm, "No available queues for new station\n");
524 mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
526 spin_unlock_bh(&mvm->queue_info_lock);
528 mvmsta->reserved_queue = queue;
530 IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n",
531 queue, mvmsta->sta_id);
536 int iwl_mvm_add_sta(struct iwl_mvm *mvm,
537 struct ieee80211_vif *vif,
538 struct ieee80211_sta *sta)
540 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
541 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
542 struct iwl_mvm_rxq_dup_data *dup_data;
545 lockdep_assert_held(&mvm->mutex);
547 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
548 sta_id = iwl_mvm_find_free_sta_id(mvm,
549 ieee80211_vif_type_p2p(vif));
551 sta_id = mvm_sta->sta_id;
553 if (sta_id == IWL_MVM_STATION_COUNT)
556 spin_lock_init(&mvm_sta->lock);
558 mvm_sta->sta_id = sta_id;
559 mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id,
562 mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
563 mvm_sta->tx_protection = 0;
564 mvm_sta->tt_tx_protection = false;
566 /* HW restart, don't assume the memory has been zeroed */
567 atomic_set(&mvm->pending_frames[sta_id], 0);
568 mvm_sta->tid_disable_agg = 0xffff; /* No aggs at first */
569 mvm_sta->tfd_queue_msk = 0;
571 /* allocate new queues for a TDLS station */
573 ret = iwl_mvm_tdls_sta_init(mvm, sta);
576 } else if (!iwl_mvm_is_dqa_supported(mvm)) {
577 for (i = 0; i < IEEE80211_NUM_ACS; i++)
578 if (vif->hw_queue[i] != IEEE80211_INVAL_HW_QUEUE)
579 mvm_sta->tfd_queue_msk |= BIT(vif->hw_queue[i]);
582 /* for HW restart - reset everything but the sequence number */
583 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
584 u16 seq = mvm_sta->tid_data[i].seq_number;
585 memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i]));
586 mvm_sta->tid_data[i].seq_number = seq;
588 if (!iwl_mvm_is_dqa_supported(mvm))
592 * Mark all queues for this STA as unallocated and defer TX
593 * frames until the queue is allocated
595 mvm_sta->tid_data[i].txq_id = IEEE80211_INVAL_HW_QUEUE;
596 skb_queue_head_init(&mvm_sta->tid_data[i].deferred_tx_frames);
598 mvm_sta->deferred_traffic_tid_map = 0;
599 mvm_sta->agg_tids = 0;
601 if (iwl_mvm_has_new_rx_api(mvm) &&
602 !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
603 dup_data = kcalloc(mvm->trans->num_rx_queues,
608 mvm_sta->dup_data = dup_data;
611 if (iwl_mvm_is_dqa_supported(mvm)) {
612 ret = iwl_mvm_reserve_sta_stream(mvm, sta,
613 ieee80211_vif_type_p2p(vif));
618 ret = iwl_mvm_sta_send_to_fw(mvm, sta, false, 0);
622 if (vif->type == NL80211_IFTYPE_STATION) {
624 WARN_ON(mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT);
625 mvmvif->ap_sta_id = sta_id;
627 WARN_ON(mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT);
631 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta);
636 iwl_mvm_tdls_sta_deinit(mvm, sta);
640 int iwl_mvm_update_sta(struct iwl_mvm *mvm,
641 struct ieee80211_vif *vif,
642 struct ieee80211_sta *sta)
644 return iwl_mvm_sta_send_to_fw(mvm, sta, true, 0);
647 int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
650 struct iwl_mvm_add_sta_cmd cmd = {};
654 lockdep_assert_held(&mvm->mutex);
656 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
657 cmd.sta_id = mvmsta->sta_id;
658 cmd.add_modify = STA_MODE_MODIFY;
659 cmd.station_flags = drain ? cpu_to_le32(STA_FLG_DRAIN_FLOW) : 0;
660 cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW);
662 status = ADD_STA_SUCCESS;
663 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
664 iwl_mvm_add_sta_cmd_size(mvm),
669 switch (status & IWL_ADD_STA_STATUS_MASK) {
670 case ADD_STA_SUCCESS:
671 IWL_DEBUG_INFO(mvm, "Frames for staid %d will drained in fw\n",
676 IWL_ERR(mvm, "Couldn't drain frames for staid %d\n",
685 * Remove a station from the FW table. Before sending the command to remove
686 * the station validate that the station is indeed known to the driver (sanity
689 static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id)
691 struct ieee80211_sta *sta;
692 struct iwl_mvm_rm_sta_cmd rm_sta_cmd = {
697 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
698 lockdep_is_held(&mvm->mutex));
700 /* Note: internal stations are marked as error values */
702 IWL_ERR(mvm, "Invalid station id\n");
706 ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, 0,
707 sizeof(rm_sta_cmd), &rm_sta_cmd);
709 IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id);
716 void iwl_mvm_sta_drained_wk(struct work_struct *wk)
718 struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, sta_drained_wk);
722 * The mutex is needed because of the SYNC cmd, but not only: if the
723 * work would run concurrently with iwl_mvm_rm_sta, it would run before
724 * iwl_mvm_rm_sta sets the station as busy, and exit. Then
725 * iwl_mvm_rm_sta would set the station as busy, and nobody will clean
728 mutex_lock(&mvm->mutex);
730 for_each_set_bit(sta_id, mvm->sta_drained, IWL_MVM_STATION_COUNT) {
732 struct ieee80211_sta *sta =
733 rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
734 lockdep_is_held(&mvm->mutex));
737 * This station is in use or RCU-removed; the latter happens in
738 * managed mode, where mac80211 removes the station before we
739 * can remove it from firmware (we can only do that after the
740 * MAC is marked unassociated), and possibly while the deauth
741 * frame to disconnect from the AP is still queued. Then, the
742 * station pointer is -ENOENT when the last skb is reclaimed.
744 if (!IS_ERR(sta) || PTR_ERR(sta) == -ENOENT)
747 if (PTR_ERR(sta) == -EINVAL) {
748 IWL_ERR(mvm, "Drained sta %d, but it is internal?\n",
754 IWL_ERR(mvm, "Drained sta %d, but it was NULL?\n",
759 WARN_ON(PTR_ERR(sta) != -EBUSY);
760 /* This station was removed and we waited until it got drained,
761 * we can now proceed and remove it.
763 ret = iwl_mvm_rm_sta_common(mvm, sta_id);
766 "Couldn't remove sta %d after it was drained\n",
770 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
771 clear_bit(sta_id, mvm->sta_drained);
773 if (mvm->tfd_drained[sta_id]) {
774 unsigned long i, msk = mvm->tfd_drained[sta_id];
776 for_each_set_bit(i, &msk, sizeof(msk) * BITS_PER_BYTE)
777 iwl_mvm_disable_txq(mvm, i, i,
778 IWL_MAX_TID_COUNT, 0);
780 mvm->tfd_drained[sta_id] = 0;
781 IWL_DEBUG_TDLS(mvm, "Drained sta %d, with queues %ld\n",
786 mutex_unlock(&mvm->mutex);
789 static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
790 struct ieee80211_vif *vif,
791 struct iwl_mvm_sta *mvm_sta)
796 lockdep_assert_held(&mvm->mutex);
798 for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
799 if (mvm_sta->tid_data[i].txq_id == IEEE80211_INVAL_HW_QUEUE)
802 ac = iwl_mvm_tid_to_ac_queue(i);
803 iwl_mvm_disable_txq(mvm, mvm_sta->tid_data[i].txq_id,
804 vif->hw_queue[ac], i, 0);
805 mvm_sta->tid_data[i].txq_id = IEEE80211_INVAL_HW_QUEUE;
809 int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
810 struct ieee80211_vif *vif,
811 struct ieee80211_sta *sta)
813 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
814 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
817 lockdep_assert_held(&mvm->mutex);
819 if (iwl_mvm_has_new_rx_api(mvm))
820 kfree(mvm_sta->dup_data);
822 if ((vif->type == NL80211_IFTYPE_STATION &&
823 mvmvif->ap_sta_id == mvm_sta->sta_id) ||
824 iwl_mvm_is_dqa_supported(mvm)){
825 ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
828 /* flush its queues here since we are freeing mvm_sta */
829 ret = iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk, 0);
832 ret = iwl_trans_wait_tx_queue_empty(mvm->trans,
833 mvm_sta->tfd_queue_msk);
836 ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
838 /* If DQA is supported - the queues can be disabled now */
839 if (iwl_mvm_is_dqa_supported(mvm))
840 iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta);
842 /* if we are associated - we can't remove the AP STA now */
843 if (vif->bss_conf.assoc)
846 /* unassoc - go ahead - remove the AP STA now */
847 mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
849 /* clear d0i3_ap_sta_id if no longer relevant */
850 if (mvm->d0i3_ap_sta_id == mvm_sta->sta_id)
851 mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
855 * This shouldn't happen - the TDLS channel switch should be canceled
856 * before the STA is removed.
858 if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == mvm_sta->sta_id)) {
859 mvm->tdls_cs.peer.sta_id = IWL_MVM_STATION_COUNT;
860 cancel_delayed_work(&mvm->tdls_cs.dwork);
864 * Make sure that the tx response code sees the station as -EBUSY and
865 * calls the drain worker.
867 spin_lock_bh(&mvm_sta->lock);
869 * There are frames pending on the AC queues for this station.
870 * We need to wait until all the frames are drained...
872 if (atomic_read(&mvm->pending_frames[mvm_sta->sta_id])) {
873 rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id],
875 spin_unlock_bh(&mvm_sta->lock);
877 /* disable TDLS sta queues on drain complete */
879 mvm->tfd_drained[mvm_sta->sta_id] =
880 mvm_sta->tfd_queue_msk;
881 IWL_DEBUG_TDLS(mvm, "Draining TDLS sta %d\n",
885 ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
887 spin_unlock_bh(&mvm_sta->lock);
890 iwl_mvm_tdls_sta_deinit(mvm, sta);
892 ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id);
893 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL);
899 int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
900 struct ieee80211_vif *vif,
903 int ret = iwl_mvm_rm_sta_common(mvm, sta_id);
905 lockdep_assert_held(&mvm->mutex);
907 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
911 int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm,
912 struct iwl_mvm_int_sta *sta,
913 u32 qmask, enum nl80211_iftype iftype)
915 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
916 sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype);
917 if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_STATION_COUNT))
921 sta->tfd_queue_msk = qmask;
923 /* put a non-NULL value so iterating over the stations won't stop */
924 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], ERR_PTR(-EINVAL));
928 static void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm,
929 struct iwl_mvm_int_sta *sta)
931 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL);
932 memset(sta, 0, sizeof(struct iwl_mvm_int_sta));
933 sta->sta_id = IWL_MVM_STATION_COUNT;
936 static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
937 struct iwl_mvm_int_sta *sta,
939 u16 mac_id, u16 color)
941 struct iwl_mvm_add_sta_cmd cmd;
945 lockdep_assert_held(&mvm->mutex);
947 memset(&cmd, 0, sizeof(cmd));
948 cmd.sta_id = sta->sta_id;
949 cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
952 cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk);
953 cmd.tid_disable_tx = cpu_to_le16(0xffff);
956 memcpy(cmd.addr, addr, ETH_ALEN);
958 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
959 iwl_mvm_add_sta_cmd_size(mvm),
964 switch (status & IWL_ADD_STA_STATUS_MASK) {
965 case ADD_STA_SUCCESS:
966 IWL_DEBUG_INFO(mvm, "Internal station added.\n");
970 IWL_ERR(mvm, "Add internal station failed, status=0x%x\n",
977 int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
979 unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ?
980 mvm->cfg->base_params->wd_timeout :
981 IWL_WATCHDOG_DISABLED;
984 lockdep_assert_held(&mvm->mutex);
986 /* Map Aux queue to fifo - needs to happen before adding Aux station */
987 iwl_mvm_enable_ac_txq(mvm, mvm->aux_queue, mvm->aux_queue,
988 IWL_MVM_TX_FIFO_MCAST, 0, wdg_timeout);
990 /* Allocate aux station and assign to it the aux queue */
991 ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue),
992 NL80211_IFTYPE_UNSPECIFIED);
996 ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL,
1000 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
1004 int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1006 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1008 lockdep_assert_held(&mvm->mutex);
1009 return iwl_mvm_add_int_sta_common(mvm, &mvm->snif_sta, vif->addr,
1013 int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1017 lockdep_assert_held(&mvm->mutex);
1019 ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id);
1021 IWL_WARN(mvm, "Failed sending remove station\n");
1026 void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm)
1028 iwl_mvm_dealloc_int_sta(mvm, &mvm->snif_sta);
1031 void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm)
1033 lockdep_assert_held(&mvm->mutex);
1035 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
1039 * Send the add station command for the vif's broadcast station.
1040 * Assumes that the station was already allocated.
1042 * @mvm: the mvm component
1043 * @vif: the interface to which the broadcast station is added
1044 * @bsta: the broadcast station to add.
1046 int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1048 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1049 struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
1050 static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
1051 const u8 *baddr = _baddr;
1053 lockdep_assert_held(&mvm->mutex);
1055 if (iwl_mvm_is_dqa_supported(mvm)) {
1056 struct iwl_trans_txq_scd_cfg cfg = {
1057 .fifo = IWL_MVM_TX_FIFO_VO,
1058 .sta_id = mvmvif->bcast_sta.sta_id,
1059 .tid = IWL_MAX_TID_COUNT,
1061 .frame_limit = IWL_FRAME_LIMIT,
1063 unsigned int wdg_timeout =
1064 iwl_mvm_get_wd_timeout(mvm, vif, false, false);
1067 if ((vif->type == NL80211_IFTYPE_AP) &&
1068 (mvmvif->bcast_sta.tfd_queue_msk &
1069 BIT(IWL_MVM_DQA_AP_PROBE_RESP_QUEUE)))
1070 queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
1071 else if ((vif->type == NL80211_IFTYPE_P2P_DEVICE) &&
1072 (mvmvif->bcast_sta.tfd_queue_msk &
1073 BIT(IWL_MVM_DQA_P2P_DEVICE_QUEUE)))
1074 queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE;
1075 else if (WARN(1, "Missed required TXQ for adding bcast STA\n"))
1078 iwl_mvm_enable_txq(mvm, queue, vif->hw_queue[0], 0, &cfg,
1082 if (vif->type == NL80211_IFTYPE_ADHOC)
1083 baddr = vif->bss_conf.bssid;
1085 if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_STATION_COUNT))
1088 return iwl_mvm_add_int_sta_common(mvm, bsta, baddr,
1089 mvmvif->id, mvmvif->color);
1092 /* Send the FW a request to remove the station from it's internal data
1093 * structures, but DO NOT remove the entry from the local data structures. */
1094 int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1096 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1099 lockdep_assert_held(&mvm->mutex);
1101 ret = iwl_mvm_rm_sta_common(mvm, mvmvif->bcast_sta.sta_id);
1103 IWL_WARN(mvm, "Failed sending remove station\n");
1107 int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1109 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1112 lockdep_assert_held(&mvm->mutex);
1114 if (!iwl_mvm_is_dqa_supported(mvm))
1115 qmask = iwl_mvm_mac_get_queues_mask(vif);
1117 if (vif->type == NL80211_IFTYPE_AP) {
1119 * The firmware defines the TFD queue mask to only be relevant
1120 * for *unicast* queues, so the multicast (CAB) queue shouldn't
1123 qmask &= ~BIT(vif->cab_queue);
1125 if (iwl_mvm_is_dqa_supported(mvm))
1126 qmask |= BIT(IWL_MVM_DQA_AP_PROBE_RESP_QUEUE);
1127 } else if (iwl_mvm_is_dqa_supported(mvm) &&
1128 vif->type == NL80211_IFTYPE_P2P_DEVICE) {
1129 qmask |= BIT(IWL_MVM_DQA_P2P_DEVICE_QUEUE);
1132 return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, qmask,
1133 ieee80211_vif_type_p2p(vif));
1136 /* Allocate a new station entry for the broadcast station to the given vif,
1137 * and send it to the FW.
1138 * Note that each P2P mac should have its own broadcast station.
1140 * @mvm: the mvm component
1141 * @vif: the interface to which the broadcast station is added
1142 * @bsta: the broadcast station to add. */
1143 int iwl_mvm_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1145 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1146 struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
1149 lockdep_assert_held(&mvm->mutex);
1151 ret = iwl_mvm_alloc_bcast_sta(mvm, vif);
1155 ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
1158 iwl_mvm_dealloc_int_sta(mvm, bsta);
1163 void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1165 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1167 iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta);
1171 * Send the FW a request to remove the station from it's internal data
1172 * structures, and in addition remove it from the local data structure.
1174 int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1178 lockdep_assert_held(&mvm->mutex);
1180 ret = iwl_mvm_send_rm_bcast_sta(mvm, vif);
1182 iwl_mvm_dealloc_bcast_sta(mvm, vif);
1187 #define IWL_MAX_RX_BA_SESSIONS 16
1189 static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid)
1191 struct iwl_mvm_delba_notif notif = {
1192 .metadata.type = IWL_MVM_RXQ_NOTIF_DEL_BA,
1196 iwl_mvm_sync_rx_queues_internal(mvm, (void *)¬if, sizeof(notif));
1199 static void iwl_mvm_free_reorder(struct iwl_mvm *mvm,
1200 struct iwl_mvm_baid_data *data)
1204 iwl_mvm_sync_rxq_del_ba(mvm, data->baid);
1206 for (i = 0; i < mvm->trans->num_rx_queues; i++) {
1208 struct iwl_mvm_reorder_buffer *reorder_buf =
1209 &data->reorder_buf[i];
1211 spin_lock_bh(&reorder_buf->lock);
1212 if (likely(!reorder_buf->num_stored)) {
1213 spin_unlock_bh(&reorder_buf->lock);
1218 * This shouldn't happen in regular DELBA since the internal
1219 * delBA notification should trigger a release of all frames in
1220 * the reorder buffer.
1224 for (j = 0; j < reorder_buf->buf_size; j++)
1225 __skb_queue_purge(&reorder_buf->entries[j]);
1227 * Prevent timer re-arm. This prevents a very far fetched case
1228 * where we timed out on the notification. There may be prior
1229 * RX frames pending in the RX queue before the notification
1230 * that might get processed between now and the actual deletion
1231 * and we would re-arm the timer although we are deleting the
1234 reorder_buf->removed = true;
1235 spin_unlock_bh(&reorder_buf->lock);
1236 del_timer_sync(&reorder_buf->reorder_timer);
1240 static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
1242 struct iwl_mvm_baid_data *data,
1243 u16 ssn, u8 buf_size)
1247 for (i = 0; i < mvm->trans->num_rx_queues; i++) {
1248 struct iwl_mvm_reorder_buffer *reorder_buf =
1249 &data->reorder_buf[i];
1252 reorder_buf->num_stored = 0;
1253 reorder_buf->head_sn = ssn;
1254 reorder_buf->buf_size = buf_size;
1255 /* rx reorder timer */
1256 reorder_buf->reorder_timer.function =
1257 iwl_mvm_reorder_timer_expired;
1258 reorder_buf->reorder_timer.data = (unsigned long)reorder_buf;
1259 init_timer(&reorder_buf->reorder_timer);
1260 spin_lock_init(&reorder_buf->lock);
1261 reorder_buf->mvm = mvm;
1262 reorder_buf->queue = i;
1263 reorder_buf->sta_id = sta_id;
1264 for (j = 0; j < reorder_buf->buf_size; j++)
1265 __skb_queue_head_init(&reorder_buf->entries[j]);
1269 int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
1270 int tid, u16 ssn, bool start, u8 buf_size, u16 timeout)
1272 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1273 struct iwl_mvm_add_sta_cmd cmd = {};
1274 struct iwl_mvm_baid_data *baid_data = NULL;
1278 lockdep_assert_held(&mvm->mutex);
1280 if (start && mvm->rx_ba_sessions >= IWL_MAX_RX_BA_SESSIONS) {
1281 IWL_WARN(mvm, "Not enough RX BA SESSIONS\n");
1285 if (iwl_mvm_has_new_rx_api(mvm) && start) {
1287 * Allocate here so if allocation fails we can bail out early
1288 * before starting the BA session in the firmware
1290 baid_data = kzalloc(sizeof(*baid_data) +
1291 mvm->trans->num_rx_queues *
1292 sizeof(baid_data->reorder_buf[0]),
1298 cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
1299 cmd.sta_id = mvm_sta->sta_id;
1300 cmd.add_modify = STA_MODE_MODIFY;
1302 cmd.add_immediate_ba_tid = (u8) tid;
1303 cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
1304 cmd.rx_ba_window = cpu_to_le16((u16)buf_size);
1306 cmd.remove_immediate_ba_tid = (u8) tid;
1308 cmd.modify_mask = start ? STA_MODIFY_ADD_BA_TID :
1309 STA_MODIFY_REMOVE_BA_TID;
1311 status = ADD_STA_SUCCESS;
1312 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1313 iwl_mvm_add_sta_cmd_size(mvm),
1318 switch (status & IWL_ADD_STA_STATUS_MASK) {
1319 case ADD_STA_SUCCESS:
1320 IWL_DEBUG_INFO(mvm, "RX BA Session %sed in fw\n",
1321 start ? "start" : "stopp");
1323 case ADD_STA_IMMEDIATE_BA_FAILURE:
1324 IWL_WARN(mvm, "RX BA Session refused by fw\n");
1329 IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n",
1330 start ? "start" : "stopp", status);
1340 mvm->rx_ba_sessions++;
1342 if (!iwl_mvm_has_new_rx_api(mvm))
1345 if (WARN_ON(!(status & IWL_ADD_STA_BAID_VALID_MASK))) {
1349 baid = (u8)((status & IWL_ADD_STA_BAID_MASK) >>
1350 IWL_ADD_STA_BAID_SHIFT);
1351 baid_data->baid = baid;
1352 baid_data->timeout = timeout;
1353 baid_data->last_rx = jiffies;
1354 init_timer(&baid_data->session_timer);
1355 baid_data->session_timer.function =
1356 iwl_mvm_rx_agg_session_expired;
1357 baid_data->session_timer.data =
1358 (unsigned long)&mvm->baid_map[baid];
1359 baid_data->mvm = mvm;
1360 baid_data->tid = tid;
1361 baid_data->sta_id = mvm_sta->sta_id;
1363 mvm_sta->tid_to_baid[tid] = baid;
1365 mod_timer(&baid_data->session_timer,
1366 TU_TO_EXP_TIME(timeout * 2));
1368 iwl_mvm_init_reorder_buffer(mvm, mvm_sta->sta_id,
1369 baid_data, ssn, buf_size);
1371 * protect the BA data with RCU to cover a case where our
1372 * internal RX sync mechanism will timeout (not that it's
1373 * supposed to happen) and we will free the session data while
1374 * RX is being processed in parallel
1376 WARN_ON(rcu_access_pointer(mvm->baid_map[baid]));
1377 rcu_assign_pointer(mvm->baid_map[baid], baid_data);
1378 } else if (mvm->rx_ba_sessions > 0) {
1379 u8 baid = mvm_sta->tid_to_baid[tid];
1381 /* check that restart flow didn't zero the counter */
1382 mvm->rx_ba_sessions--;
1383 if (!iwl_mvm_has_new_rx_api(mvm))
1386 if (WARN_ON(baid == IWL_RX_REORDER_DATA_INVALID_BAID))
1389 baid_data = rcu_access_pointer(mvm->baid_map[baid]);
1390 if (WARN_ON(!baid_data))
1393 /* synchronize all rx queues so we can safely delete */
1394 iwl_mvm_free_reorder(mvm, baid_data);
1395 del_timer_sync(&baid_data->session_timer);
1396 RCU_INIT_POINTER(mvm->baid_map[baid], NULL);
1397 kfree_rcu(baid_data, rcu_head);
1406 static int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
1407 int tid, u8 queue, bool start)
1409 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1410 struct iwl_mvm_add_sta_cmd cmd = {};
1414 lockdep_assert_held(&mvm->mutex);
1417 mvm_sta->tfd_queue_msk |= BIT(queue);
1418 mvm_sta->tid_disable_agg &= ~BIT(tid);
1420 /* In DQA-mode the queue isn't removed on agg termination */
1421 if (!iwl_mvm_is_dqa_supported(mvm))
1422 mvm_sta->tfd_queue_msk &= ~BIT(queue);
1423 mvm_sta->tid_disable_agg |= BIT(tid);
1426 cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
1427 cmd.sta_id = mvm_sta->sta_id;
1428 cmd.add_modify = STA_MODE_MODIFY;
1429 cmd.modify_mask = STA_MODIFY_QUEUES | STA_MODIFY_TID_DISABLE_TX;
1430 cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
1431 cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg);
1433 status = ADD_STA_SUCCESS;
1434 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1435 iwl_mvm_add_sta_cmd_size(mvm),
1440 switch (status & IWL_ADD_STA_STATUS_MASK) {
1441 case ADD_STA_SUCCESS:
1445 IWL_ERR(mvm, "TX BA Session failed %sing, status 0x%x\n",
1446 start ? "start" : "stopp", status);
1453 const u8 tid_to_mac80211_ac[] = {
1464 static const u8 tid_to_ucode_ac[] = {
1475 int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1476 struct ieee80211_sta *sta, u16 tid, u16 *ssn)
1478 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1479 struct iwl_mvm_tid_data *tid_data;
1483 if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
1486 if (mvmsta->tid_data[tid].state != IWL_AGG_OFF) {
1487 IWL_ERR(mvm, "Start AGG when state is not IWL_AGG_OFF %d!\n",
1488 mvmsta->tid_data[tid].state);
1492 lockdep_assert_held(&mvm->mutex);
1494 spin_lock_bh(&mvmsta->lock);
1496 /* possible race condition - we entered D0i3 while starting agg */
1497 if (test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)) {
1498 spin_unlock_bh(&mvmsta->lock);
1499 IWL_ERR(mvm, "Entered D0i3 while starting Tx agg\n");
1503 spin_lock_bh(&mvm->queue_info_lock);
1506 * Note the possible cases:
1507 * 1. In DQA mode with an enabled TXQ - TXQ needs to become agg'ed
1508 * 2. Non-DQA mode: the TXQ hasn't yet been enabled, so find a free
1509 * one and mark it as reserved
1510 * 3. In DQA mode, but no traffic yet on this TID: same treatment as in
1511 * non-DQA mode, since the TXQ hasn't yet been allocated
1513 txq_id = mvmsta->tid_data[tid].txq_id;
1514 if (!iwl_mvm_is_dqa_supported(mvm) ||
1515 mvm->queue_info[txq_id].status != IWL_MVM_QUEUE_READY) {
1516 txq_id = iwl_mvm_find_free_queue(mvm, mvm->first_agg_queue,
1517 mvm->last_agg_queue);
1520 spin_unlock_bh(&mvm->queue_info_lock);
1521 IWL_ERR(mvm, "Failed to allocate agg queue\n");
1525 /* TXQ hasn't yet been enabled, so mark it only as reserved */
1526 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED;
1528 spin_unlock_bh(&mvm->queue_info_lock);
1530 IWL_DEBUG_TX_QUEUES(mvm,
1531 "AGG for tid %d will be on queue #%d\n",
1534 tid_data = &mvmsta->tid_data[tid];
1535 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
1536 tid_data->txq_id = txq_id;
1537 *ssn = tid_data->ssn;
1539 IWL_DEBUG_TX_QUEUES(mvm,
1540 "Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n",
1541 mvmsta->sta_id, tid, txq_id, tid_data->ssn,
1542 tid_data->next_reclaimed);
1544 if (tid_data->ssn == tid_data->next_reclaimed) {
1545 tid_data->state = IWL_AGG_STARTING;
1546 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1548 tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA;
1554 spin_unlock_bh(&mvmsta->lock);
1559 int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1560 struct ieee80211_sta *sta, u16 tid, u8 buf_size,
1563 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1564 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
1565 unsigned int wdg_timeout =
1566 iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false);
1568 bool alloc_queue = true;
1571 struct iwl_trans_txq_scd_cfg cfg = {
1572 .sta_id = mvmsta->sta_id,
1574 .frame_limit = buf_size,
1578 BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE)
1579 != IWL_MAX_TID_COUNT);
1581 buf_size = min_t(int, buf_size, LINK_QUAL_AGG_FRAME_LIMIT_DEF);
1583 spin_lock_bh(&mvmsta->lock);
1584 ssn = tid_data->ssn;
1585 queue = tid_data->txq_id;
1586 tid_data->state = IWL_AGG_ON;
1587 mvmsta->agg_tids |= BIT(tid);
1588 tid_data->ssn = 0xffff;
1589 tid_data->amsdu_in_ampdu_allowed = amsdu;
1590 spin_unlock_bh(&mvmsta->lock);
1592 cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
1594 /* In DQA mode, the existing queue might need to be reconfigured */
1595 if (iwl_mvm_is_dqa_supported(mvm)) {
1596 spin_lock_bh(&mvm->queue_info_lock);
1597 /* Maybe there is no need to even alloc a queue... */
1598 if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY)
1599 alloc_queue = false;
1600 spin_unlock_bh(&mvm->queue_info_lock);
1603 * Only reconfig the SCD for the queue if the window size has
1604 * changed from current (become smaller)
1606 if (!alloc_queue && buf_size < mvmsta->max_agg_bufsize) {
1608 * If reconfiguring an existing queue, it first must be
1611 ret = iwl_trans_wait_tx_queue_empty(mvm->trans,
1615 "Error draining queue before reconfig\n");
1619 ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo,
1620 mvmsta->sta_id, tid,
1624 "Error reconfiguring TXQ #%d\n", queue);
1631 iwl_mvm_enable_txq(mvm, queue,
1632 vif->hw_queue[tid_to_mac80211_ac[tid]], ssn,
1635 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
1639 /* No need to mark as reserved */
1640 spin_lock_bh(&mvm->queue_info_lock);
1641 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
1642 spin_unlock_bh(&mvm->queue_info_lock);
1645 * Even though in theory the peer could have different
1646 * aggregation reorder buffer sizes for different sessions,
1647 * our ucode doesn't allow for that and has a global limit
1648 * for each station. Therefore, use the minimum of all the
1649 * aggregation sessions and our default value.
1651 mvmsta->max_agg_bufsize =
1652 min(mvmsta->max_agg_bufsize, buf_size);
1653 mvmsta->lq_sta.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize;
1655 IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n",
1658 return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.lq, false);
1661 int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1662 struct ieee80211_sta *sta, u16 tid)
1664 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1665 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
1671 * If mac80211 is cleaning its state, then say that we finished since
1672 * our state has been cleared anyway.
1674 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1675 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1679 spin_lock_bh(&mvmsta->lock);
1681 txq_id = tid_data->txq_id;
1683 IWL_DEBUG_TX_QUEUES(mvm, "Stop AGG: sta %d tid %d q %d state %d\n",
1684 mvmsta->sta_id, tid, txq_id, tid_data->state);
1686 mvmsta->agg_tids &= ~BIT(tid);
1688 spin_lock_bh(&mvm->queue_info_lock);
1690 * The TXQ is marked as reserved only if no traffic came through yet
1691 * This means no traffic has been sent on this TID (agg'd or not), so
1692 * we no longer have use for the queue. Since it hasn't even been
1693 * allocated through iwl_mvm_enable_txq, so we can just mark it back as
1696 if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED)
1697 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
1698 spin_unlock_bh(&mvm->queue_info_lock);
1700 switch (tid_data->state) {
1702 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
1704 IWL_DEBUG_TX_QUEUES(mvm,
1705 "ssn = %d, next_recl = %d\n",
1706 tid_data->ssn, tid_data->next_reclaimed);
1708 /* There are still packets for this RA / TID in the HW */
1709 if (tid_data->ssn != tid_data->next_reclaimed) {
1710 tid_data->state = IWL_EMPTYING_HW_QUEUE_DELBA;
1715 tid_data->ssn = 0xffff;
1716 tid_data->state = IWL_AGG_OFF;
1717 spin_unlock_bh(&mvmsta->lock);
1719 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1721 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
1723 if (!iwl_mvm_is_dqa_supported(mvm)) {
1724 int mac_queue = vif->hw_queue[tid_to_mac80211_ac[tid]];
1726 iwl_mvm_disable_txq(mvm, txq_id, mac_queue, tid, 0);
1729 case IWL_AGG_STARTING:
1730 case IWL_EMPTYING_HW_QUEUE_ADDBA:
1732 * The agg session has been stopped before it was set up. This
1733 * can happen when the AddBA timer times out for example.
1736 /* No barriers since we are under mutex */
1737 lockdep_assert_held(&mvm->mutex);
1739 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1740 tid_data->state = IWL_AGG_OFF;
1745 "Stopping AGG while state not ON or starting for %d on %d (%d)\n",
1746 mvmsta->sta_id, tid, tid_data->state);
1748 "\ttid_data->txq_id = %d\n", tid_data->txq_id);
1752 spin_unlock_bh(&mvmsta->lock);
1757 int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1758 struct ieee80211_sta *sta, u16 tid)
1760 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1761 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
1763 enum iwl_mvm_agg_state old_state;
1766 * First set the agg state to OFF to avoid calling
1767 * ieee80211_stop_tx_ba_cb in iwl_mvm_check_ratid_empty.
1769 spin_lock_bh(&mvmsta->lock);
1770 txq_id = tid_data->txq_id;
1771 IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n",
1772 mvmsta->sta_id, tid, txq_id, tid_data->state);
1773 old_state = tid_data->state;
1774 tid_data->state = IWL_AGG_OFF;
1775 mvmsta->agg_tids &= ~BIT(tid);
1776 spin_unlock_bh(&mvmsta->lock);
1778 spin_lock_bh(&mvm->queue_info_lock);
1780 * The TXQ is marked as reserved only if no traffic came through yet
1781 * This means no traffic has been sent on this TID (agg'd or not), so
1782 * we no longer have use for the queue. Since it hasn't even been
1783 * allocated through iwl_mvm_enable_txq, so we can just mark it back as
1786 if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED)
1787 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
1788 spin_unlock_bh(&mvm->queue_info_lock);
1790 if (old_state >= IWL_AGG_ON) {
1791 iwl_mvm_drain_sta(mvm, mvmsta, true);
1792 if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), 0))
1793 IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
1794 iwl_trans_wait_tx_queue_empty(mvm->trans,
1795 mvmsta->tfd_queue_msk);
1796 iwl_mvm_drain_sta(mvm, mvmsta, false);
1798 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
1800 if (!iwl_mvm_is_dqa_supported(mvm)) {
1801 int mac_queue = vif->hw_queue[tid_to_mac80211_ac[tid]];
1803 iwl_mvm_disable_txq(mvm, tid_data->txq_id, mac_queue,
1811 static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm)
1813 int i, max = -1, max_offs = -1;
1815 lockdep_assert_held(&mvm->mutex);
1817 /* Pick the unused key offset with the highest 'deleted'
1818 * counter. Every time a key is deleted, all the counters
1819 * are incremented and the one that was just deleted is
1820 * reset to zero. Thus, the highest counter is the one
1821 * that was deleted longest ago. Pick that one.
1823 for (i = 0; i < STA_KEY_MAX_NUM; i++) {
1824 if (test_bit(i, mvm->fw_key_table))
1826 if (mvm->fw_key_deleted[i] > max) {
1827 max = mvm->fw_key_deleted[i];
1833 return STA_KEY_IDX_INVALID;
1838 static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm,
1839 struct ieee80211_vif *vif,
1840 struct ieee80211_sta *sta)
1842 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1845 return iwl_mvm_sta_from_mac80211(sta);
1848 * The device expects GTKs for station interfaces to be
1849 * installed as GTKs for the AP station. If we have no
1850 * station ID, then use AP's station ID.
1852 if (vif->type == NL80211_IFTYPE_STATION &&
1853 mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
1854 u8 sta_id = mvmvif->ap_sta_id;
1857 * It is possible that the 'sta' parameter is NULL,
1858 * for example when a GTK is removed - the sta_id will then
1859 * be the AP ID, and no station was passed by mac80211.
1861 return iwl_mvm_sta_from_staid_protected(mvm, sta_id);
1867 static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
1868 struct iwl_mvm_sta *mvm_sta,
1869 struct ieee80211_key_conf *keyconf, bool mcast,
1870 u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags,
1873 struct iwl_mvm_add_sta_key_cmd cmd = {};
1879 u8 sta_id = mvm_sta->sta_id;
1881 keyidx = (keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
1882 STA_KEY_FLG_KEYID_MSK;
1883 key_flags = cpu_to_le16(keyidx);
1884 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP);
1886 switch (keyconf->cipher) {
1887 case WLAN_CIPHER_SUITE_TKIP:
1888 key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP);
1889 cmd.tkip_rx_tsc_byte2 = tkip_iv32;
1890 for (i = 0; i < 5; i++)
1891 cmd.tkip_rx_ttak[i] = cpu_to_le16(tkip_p1k[i]);
1892 memcpy(cmd.key, keyconf->key, keyconf->keylen);
1894 case WLAN_CIPHER_SUITE_CCMP:
1895 key_flags |= cpu_to_le16(STA_KEY_FLG_CCM);
1896 memcpy(cmd.key, keyconf->key, keyconf->keylen);
1898 case WLAN_CIPHER_SUITE_WEP104:
1899 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES);
1901 case WLAN_CIPHER_SUITE_WEP40:
1902 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP);
1903 memcpy(cmd.key + 3, keyconf->key, keyconf->keylen);
1906 key_flags |= cpu_to_le16(STA_KEY_FLG_EXT);
1907 memcpy(cmd.key, keyconf->key, keyconf->keylen);
1911 key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
1913 cmd.key_offset = key_offset;
1914 cmd.key_flags = key_flags;
1915 cmd.sta_id = sta_id;
1917 status = ADD_STA_SUCCESS;
1918 if (cmd_flags & CMD_ASYNC)
1919 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC,
1922 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, sizeof(cmd),
1926 case ADD_STA_SUCCESS:
1927 IWL_DEBUG_WEP(mvm, "MODIFY_STA: set dynamic key passed\n");
1931 IWL_ERR(mvm, "MODIFY_STA: set dynamic key failed\n");
1938 static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
1939 struct ieee80211_key_conf *keyconf,
1940 u8 sta_id, bool remove_key)
1942 struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd = {};
1944 /* verify the key details match the required command's expectations */
1945 if (WARN_ON((keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC) ||
1946 (keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) ||
1947 (keyconf->keyidx != 4 && keyconf->keyidx != 5)))
1950 igtk_cmd.key_id = cpu_to_le32(keyconf->keyidx);
1951 igtk_cmd.sta_id = cpu_to_le32(sta_id);
1954 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID);
1956 struct ieee80211_key_seq seq;
1959 memcpy(igtk_cmd.IGTK, keyconf->key, keyconf->keylen);
1960 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
1961 pn = seq.aes_cmac.pn;
1962 igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) |
1963 ((u64) pn[4] << 8) |
1964 ((u64) pn[3] << 16) |
1965 ((u64) pn[2] << 24) |
1966 ((u64) pn[1] << 32) |
1967 ((u64) pn[0] << 40));
1970 IWL_DEBUG_INFO(mvm, "%s igtk for sta %u\n",
1971 remove_key ? "removing" : "installing",
1974 return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
1975 sizeof(igtk_cmd), &igtk_cmd);
1979 static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm,
1980 struct ieee80211_vif *vif,
1981 struct ieee80211_sta *sta)
1983 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1988 if (vif->type == NL80211_IFTYPE_STATION &&
1989 mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
1990 u8 sta_id = mvmvif->ap_sta_id;
1991 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1992 lockdep_is_held(&mvm->mutex));
2000 static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
2001 struct ieee80211_vif *vif,
2002 struct ieee80211_sta *sta,
2003 struct ieee80211_key_conf *keyconf,
2007 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2010 struct ieee80211_key_seq seq;
2013 switch (keyconf->cipher) {
2014 case WLAN_CIPHER_SUITE_TKIP:
2015 addr = iwl_mvm_get_mac_addr(mvm, vif, sta);
2016 /* get phase 1 key from mac80211 */
2017 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
2018 ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
2019 ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
2020 seq.tkip.iv32, p1k, 0, key_offset);
2022 case WLAN_CIPHER_SUITE_CCMP:
2023 case WLAN_CIPHER_SUITE_WEP40:
2024 case WLAN_CIPHER_SUITE_WEP104:
2025 ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
2026 0, NULL, 0, key_offset);
2029 ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
2030 0, NULL, 0, key_offset);
2036 static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,
2037 struct ieee80211_key_conf *keyconf,
2040 struct iwl_mvm_add_sta_key_cmd cmd = {};
2045 key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
2046 STA_KEY_FLG_KEYID_MSK);
2047 key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP);
2048 key_flags |= cpu_to_le16(STA_KEY_NOT_VALID);
2051 key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
2053 cmd.key_flags = key_flags;
2054 cmd.key_offset = keyconf->hw_key_idx;
2055 cmd.sta_id = sta_id;
2057 status = ADD_STA_SUCCESS;
2058 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, sizeof(cmd),
2062 case ADD_STA_SUCCESS:
2063 IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n");
2067 IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n");
2074 int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
2075 struct ieee80211_vif *vif,
2076 struct ieee80211_sta *sta,
2077 struct ieee80211_key_conf *keyconf,
2080 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
2081 struct iwl_mvm_sta *mvm_sta;
2084 static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0};
2086 lockdep_assert_held(&mvm->mutex);
2088 /* Get the station id from the mvm local station table */
2089 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
2091 IWL_ERR(mvm, "Failed to find station\n");
2094 sta_id = mvm_sta->sta_id;
2096 if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC) {
2097 ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false);
2102 * It is possible that the 'sta' parameter is NULL, and thus
2103 * there is a need to retrieve the sta from the local station table.
2106 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
2107 lockdep_is_held(&mvm->mutex));
2108 if (IS_ERR_OR_NULL(sta)) {
2109 IWL_ERR(mvm, "Invalid station id\n");
2114 if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif))
2117 /* If the key_offset is not pre-assigned, we need to find a
2118 * new offset to use. In normal cases, the offset is not
2119 * pre-assigned, but during HW_RESTART we want to reuse the
2120 * same indices, so we pass them when this function is called.
2122 * In D3 entry, we need to hardcoded the indices (because the
2123 * firmware hardcodes the PTK offset to 0). In this case, we
2124 * need to make sure we don't overwrite the hw_key_idx in the
2125 * keyconf structure, because otherwise we cannot configure
2126 * the original ones back when resuming.
2128 if (key_offset == STA_KEY_IDX_INVALID) {
2129 key_offset = iwl_mvm_set_fw_key_idx(mvm);
2130 if (key_offset == STA_KEY_IDX_INVALID)
2132 keyconf->hw_key_idx = key_offset;
2135 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, key_offset, mcast);
2140 * For WEP, the same key is used for multicast and unicast. Upload it
2141 * again, using the same key offset, and now pointing the other one
2142 * to the same key slot (offset).
2143 * If this fails, remove the original as well.
2145 if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
2146 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) {
2147 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf,
2148 key_offset, !mcast);
2150 __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
2155 __set_bit(key_offset, mvm->fw_key_table);
2158 IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
2159 keyconf->cipher, keyconf->keylen, keyconf->keyidx,
2160 sta ? sta->addr : zero_addr, ret);
2164 int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
2165 struct ieee80211_vif *vif,
2166 struct ieee80211_sta *sta,
2167 struct ieee80211_key_conf *keyconf)
2169 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
2170 struct iwl_mvm_sta *mvm_sta;
2171 u8 sta_id = IWL_MVM_STATION_COUNT;
2174 lockdep_assert_held(&mvm->mutex);
2176 /* Get the station from the mvm local station table */
2177 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
2179 IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
2180 keyconf->keyidx, sta_id);
2182 if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC)
2183 return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true);
2185 if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) {
2186 IWL_ERR(mvm, "offset %d not used in fw key table.\n",
2187 keyconf->hw_key_idx);
2191 /* track which key was deleted last */
2192 for (i = 0; i < STA_KEY_MAX_NUM; i++) {
2193 if (mvm->fw_key_deleted[i] < U8_MAX)
2194 mvm->fw_key_deleted[i]++;
2196 mvm->fw_key_deleted[keyconf->hw_key_idx] = 0;
2199 IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n");
2203 sta_id = mvm_sta->sta_id;
2205 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
2209 /* delete WEP key twice to get rid of (now useless) offset */
2210 if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
2211 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104)
2212 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, !mcast);
2217 void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
2218 struct ieee80211_vif *vif,
2219 struct ieee80211_key_conf *keyconf,
2220 struct ieee80211_sta *sta, u32 iv32,
2223 struct iwl_mvm_sta *mvm_sta;
2224 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
2228 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
2229 if (WARN_ON_ONCE(!mvm_sta))
2231 iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
2232 iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx);
2238 void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
2239 struct ieee80211_sta *sta)
2241 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
2242 struct iwl_mvm_add_sta_cmd cmd = {
2243 .add_modify = STA_MODE_MODIFY,
2244 .sta_id = mvmsta->sta_id,
2245 .station_flags_msk = cpu_to_le32(STA_FLG_PS),
2246 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
2250 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
2251 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
2253 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
2256 void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
2257 struct ieee80211_sta *sta,
2258 enum ieee80211_frame_release_type reason,
2259 u16 cnt, u16 tids, bool more_data,
2262 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
2263 struct iwl_mvm_add_sta_cmd cmd = {
2264 .add_modify = STA_MODE_MODIFY,
2265 .sta_id = mvmsta->sta_id,
2266 .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
2267 .sleep_tx_count = cpu_to_le16(cnt),
2268 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
2271 unsigned long _tids = tids;
2273 /* convert TIDs to ACs - we don't support TSPEC so that's OK
2274 * Note that this field is reserved and unused by firmware not
2275 * supporting GO uAPSD, so it's safe to always do this.
2277 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT)
2278 cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]);
2280 /* If we're releasing frames from aggregation queues then check if the
2281 * all queues combined that we're releasing frames from have
2282 * - more frames than the service period, in which case more_data
2284 * - fewer than 'cnt' frames, in which case we need to adjust the
2285 * firmware command (but do that unconditionally)
2288 int remaining = cnt;
2291 spin_lock_bh(&mvmsta->lock);
2292 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) {
2293 struct iwl_mvm_tid_data *tid_data;
2296 tid_data = &mvmsta->tid_data[tid];
2297 if (WARN(tid_data->state != IWL_AGG_ON &&
2298 tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA,
2299 "TID %d state is %d\n",
2300 tid, tid_data->state)) {
2301 spin_unlock_bh(&mvmsta->lock);
2302 ieee80211_sta_eosp(sta);
2306 n_queued = iwl_mvm_tid_queued(tid_data);
2307 if (n_queued > remaining) {
2312 remaining -= n_queued;
2314 sleep_tx_count = cnt - remaining;
2315 if (reason == IEEE80211_FRAME_RELEASE_UAPSD)
2316 mvmsta->sleep_tx_count = sleep_tx_count;
2317 spin_unlock_bh(&mvmsta->lock);
2319 cmd.sleep_tx_count = cpu_to_le16(sleep_tx_count);
2320 if (WARN_ON(cnt - remaining == 0)) {
2321 ieee80211_sta_eosp(sta);
2326 /* Note: this is ignored by firmware not supporting GO uAPSD */
2328 cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_MOREDATA);
2330 if (reason == IEEE80211_FRAME_RELEASE_PSPOLL) {
2331 mvmsta->next_status_eosp = true;
2332 cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_PS_POLL);
2334 cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_UAPSD);
2337 /* block the Tx queues until the FW updated the sleep Tx count */
2338 iwl_trans_block_txq_ptrs(mvm->trans, true);
2340 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA,
2341 CMD_ASYNC | CMD_WANT_ASYNC_CALLBACK,
2342 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
2344 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
2347 void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
2348 struct iwl_rx_cmd_buffer *rxb)
2350 struct iwl_rx_packet *pkt = rxb_addr(rxb);
2351 struct iwl_mvm_eosp_notification *notif = (void *)pkt->data;
2352 struct ieee80211_sta *sta;
2353 u32 sta_id = le32_to_cpu(notif->sta_id);
2355 if (WARN_ON_ONCE(sta_id >= IWL_MVM_STATION_COUNT))
2359 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
2360 if (!IS_ERR_OR_NULL(sta))
2361 ieee80211_sta_eosp(sta);
2365 void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
2366 struct iwl_mvm_sta *mvmsta, bool disable)
2368 struct iwl_mvm_add_sta_cmd cmd = {
2369 .add_modify = STA_MODE_MODIFY,
2370 .sta_id = mvmsta->sta_id,
2371 .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
2372 .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
2373 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
2377 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
2378 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
2380 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
2383 void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm,
2384 struct ieee80211_sta *sta,
2387 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2389 spin_lock_bh(&mvm_sta->lock);
2391 if (mvm_sta->disable_tx == disable) {
2392 spin_unlock_bh(&mvm_sta->lock);
2396 mvm_sta->disable_tx = disable;
2399 * Tell mac80211 to start/stop queuing tx for this station,
2400 * but don't stop queuing if there are still pending frames
2403 if (disable || !atomic_read(&mvm->pending_frames[mvm_sta->sta_id]))
2404 ieee80211_sta_block_awake(mvm->hw, sta, disable);
2406 iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable);
2408 spin_unlock_bh(&mvm_sta->lock);
2411 void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
2412 struct iwl_mvm_vif *mvmvif,
2415 struct ieee80211_sta *sta;
2416 struct iwl_mvm_sta *mvm_sta;
2419 lockdep_assert_held(&mvm->mutex);
2421 /* Block/unblock all the stations of the given mvmvif */
2422 for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
2423 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
2424 lockdep_is_held(&mvm->mutex));
2425 if (IS_ERR_OR_NULL(sta))
2428 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2429 if (mvm_sta->mac_id_n_color !=
2430 FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color))
2433 iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, disable);
2437 void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2439 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2440 struct iwl_mvm_sta *mvmsta;
2444 mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id);
2446 if (!WARN_ON(!mvmsta))
2447 iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true);