1 /******************************************************************************
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as
14 * published by the Free Software Foundation.
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
21 * The full GNU General Public License is included in this distribution
22 * in the file called COPYING.
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
31 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
32 * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
33 * All rights reserved.
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
39 * * Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * * Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in
43 * the documentation and/or other materials provided with the
45 * * Neither the name Intel Corporation nor the names of its
46 * contributors may be used to endorse or promote products derived
47 * from this software without specific prior written permission.
49 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
50 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
51 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
52 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
53 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
54 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
55 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
56 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
57 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
58 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
59 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
60 *****************************************************************************/
61 #include <linux/etherdevice.h>
62 #include <linux/skbuff.h>
63 #include "iwl-trans.h"
68 static inline int iwl_mvm_check_pn(struct iwl_mvm *mvm, struct sk_buff *skb,
69 int queue, struct ieee80211_sta *sta)
71 struct iwl_mvm_sta *mvmsta;
72 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
73 struct ieee80211_rx_status *stats = IEEE80211_SKB_RXCB(skb);
74 struct iwl_mvm_key_pn *ptk_pn;
76 u8 pn[IEEE80211_CCMP_PN_LEN];
81 /* multicast and non-data only arrives on default queue */
82 if (!ieee80211_is_data(hdr->frame_control) ||
83 is_multicast_ether_addr(hdr->addr1))
86 /* do not check PN for open AP */
87 if (!(stats->flag & RX_FLAG_DECRYPTED))
91 * avoid checking for default queue - we don't want to replicate
92 * all the logic that's necessary for checking the PN on fragmented
93 * frames, leave that to mac80211
98 /* if we are here - this for sure is either CCMP or GCMP */
99 if (IS_ERR_OR_NULL(sta)) {
101 "expected hw-decrypted unicast frame for station\n");
105 mvmsta = iwl_mvm_sta_from_mac80211(sta);
107 extiv = (u8 *)hdr + ieee80211_hdrlen(hdr->frame_control);
108 keyidx = extiv[3] >> 6;
110 ptk_pn = rcu_dereference(mvmsta->ptk_pn[keyidx]);
114 if (ieee80211_is_data_qos(hdr->frame_control))
115 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
119 /* we don't use HCCA/802.11 QoS TSPECs, so drop such frames */
120 if (tid >= IWL_MAX_TID_COUNT)
131 if (memcmp(pn, ptk_pn->q[queue].pn[tid],
132 IEEE80211_CCMP_PN_LEN) <= 0)
135 memcpy(ptk_pn->q[queue].pn[tid], pn, IEEE80211_CCMP_PN_LEN);
136 stats->flag |= RX_FLAG_PN_VALIDATED;
141 /* iwl_mvm_create_skb Adds the rxb to a new skb */
142 static void iwl_mvm_create_skb(struct sk_buff *skb, struct ieee80211_hdr *hdr,
143 u16 len, u8 crypt_len,
144 struct iwl_rx_cmd_buffer *rxb)
146 struct iwl_rx_packet *pkt = rxb_addr(rxb);
147 struct iwl_rx_mpdu_desc *desc = (void *)pkt->data;
148 unsigned int headlen, fraglen, pad_len = 0;
149 unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control);
151 if (desc->mac_flags2 & IWL_RX_MPDU_MFLG2_PAD)
155 /* If frame is small enough to fit in skb->head, pull it completely.
156 * If not, only pull ieee80211_hdr (including crypto if present, and
157 * an additional 8 bytes for SNAP/ethertype, see below) so that
158 * splice() or TCP coalesce are more efficient.
160 * Since, in addition, ieee80211_data_to_8023() always pull in at
161 * least 8 bytes (possibly more for mesh) we can do the same here
162 * to save the cost of doing it later. That still doesn't pull in
163 * the actual IP header since the typical case has a SNAP header.
164 * If the latter changes (there are efforts in the standards group
165 * to do so) we should revisit this and ieee80211_data_to_8023().
167 headlen = (len <= skb_tailroom(skb)) ? len :
168 hdrlen + crypt_len + 8;
170 /* The firmware may align the packet to DWORD.
171 * The padding is inserted after the IV.
172 * After copying the header + IV skip the padding if
173 * present before copying packet data.
176 memcpy(skb_put(skb, hdrlen), hdr, hdrlen);
177 memcpy(skb_put(skb, headlen - hdrlen), (u8 *)hdr + hdrlen + pad_len,
180 fraglen = len - headlen;
183 int offset = (void *)hdr + headlen + pad_len -
184 rxb_addr(rxb) + rxb_offset(rxb);
186 skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset,
187 fraglen, rxb->truesize);
191 /* iwl_mvm_pass_packet_to_mac80211 - passes the packet for mac80211 */
192 static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
193 struct napi_struct *napi,
194 struct sk_buff *skb, int queue,
195 struct ieee80211_sta *sta)
197 if (iwl_mvm_check_pn(mvm, skb, queue, sta))
200 ieee80211_rx_napi(mvm->hw, sta, skb, napi);
203 static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm,
204 struct iwl_rx_mpdu_desc *desc,
205 struct ieee80211_rx_status *rx_status)
207 int energy_a, energy_b, max_energy;
209 energy_a = desc->energy_a;
210 energy_a = energy_a ? -energy_a : S8_MIN;
211 energy_b = desc->energy_b;
212 energy_b = energy_b ? -energy_b : S8_MIN;
213 max_energy = max(energy_a, energy_b);
215 IWL_DEBUG_STATS(mvm, "energy In A %d B %d, and max %d\n",
216 energy_a, energy_b, max_energy);
218 rx_status->signal = max_energy;
219 rx_status->chains = 0; /* TODO: phy info */
220 rx_status->chain_signal[0] = energy_a;
221 rx_status->chain_signal[1] = energy_b;
222 rx_status->chain_signal[2] = S8_MIN;
225 static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
226 struct ieee80211_rx_status *stats,
227 struct iwl_rx_mpdu_desc *desc, int queue,
230 u16 status = le16_to_cpu(desc->status);
232 if (!ieee80211_has_protected(hdr->frame_control) ||
233 (status & IWL_RX_MPDU_STATUS_SEC_MASK) ==
234 IWL_RX_MPDU_STATUS_SEC_NONE)
237 /* TODO: handle packets encrypted with unknown alg */
239 switch (status & IWL_RX_MPDU_STATUS_SEC_MASK) {
240 case IWL_RX_MPDU_STATUS_SEC_CCM:
241 case IWL_RX_MPDU_STATUS_SEC_GCM:
242 BUILD_BUG_ON(IEEE80211_CCMP_PN_LEN != IEEE80211_GCMP_PN_LEN);
243 /* alg is CCM: check MIC only */
244 if (!(status & IWL_RX_MPDU_STATUS_MIC_OK))
247 stats->flag |= RX_FLAG_DECRYPTED;
248 *crypt_len = IEEE80211_CCMP_HDR_LEN;
250 case IWL_RX_MPDU_STATUS_SEC_TKIP:
251 /* Don't drop the frame and decrypt it in SW */
252 if (!(status & IWL_RX_MPDU_RES_STATUS_TTAK_OK))
255 *crypt_len = IEEE80211_TKIP_IV_LEN;
256 /* fall through if TTAK OK */
257 case IWL_RX_MPDU_STATUS_SEC_WEP:
258 if (!(status & IWL_RX_MPDU_STATUS_ICV_OK))
261 stats->flag |= RX_FLAG_DECRYPTED;
262 if ((status & IWL_RX_MPDU_STATUS_SEC_MASK) ==
263 IWL_RX_MPDU_STATUS_SEC_WEP)
264 *crypt_len = IEEE80211_WEP_IV_LEN;
266 case IWL_RX_MPDU_STATUS_SEC_EXT_ENC:
267 if (!(status & IWL_RX_MPDU_STATUS_MIC_OK))
269 stats->flag |= RX_FLAG_DECRYPTED;
272 IWL_ERR(mvm, "Unhandled alg: 0x%x\n", status);
278 static void iwl_mvm_rx_csum(struct ieee80211_sta *sta,
280 struct iwl_rx_mpdu_desc *desc)
282 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
283 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
284 u16 flags = le16_to_cpu(desc->l3l4_flags);
285 u8 l3_prot = (u8)((flags & IWL_RX_L3L4_L3_PROTO_MASK) >>
286 IWL_RX_L3_PROTO_POS);
288 if (mvmvif->features & NETIF_F_RXCSUM &&
289 flags & IWL_RX_L3L4_TCP_UDP_CSUM_OK &&
290 (flags & IWL_RX_L3L4_IP_HDR_CSUM_OK ||
291 l3_prot == IWL_RX_L3_TYPE_IPV6 ||
292 l3_prot == IWL_RX_L3_TYPE_IPV6_FRAG))
293 skb->ip_summed = CHECKSUM_UNNECESSARY;
297 * returns true if a packet outside BA session is a duplicate and
300 static bool iwl_mvm_is_nonagg_dup(struct ieee80211_sta *sta, int queue,
301 struct ieee80211_rx_status *rx_status,
302 struct ieee80211_hdr *hdr,
303 struct iwl_rx_mpdu_desc *desc)
305 struct iwl_mvm_sta *mvm_sta;
306 struct iwl_mvm_rxq_dup_data *dup_data;
307 u8 baid, tid, sub_frame_idx;
309 if (WARN_ON(IS_ERR_OR_NULL(sta)))
312 baid = (le32_to_cpu(desc->reorder_data) &
313 IWL_RX_MPDU_REORDER_BAID_MASK) >>
314 IWL_RX_MPDU_REORDER_BAID_SHIFT;
316 if (baid != IWL_RX_REORDER_DATA_INVALID_BAID)
319 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
320 dup_data = &mvm_sta->dup_data[queue];
323 * Drop duplicate 802.11 retransmissions
324 * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery")
326 if (ieee80211_is_ctl(hdr->frame_control) ||
327 ieee80211_is_qos_nullfunc(hdr->frame_control) ||
328 is_multicast_ether_addr(hdr->addr1)) {
329 rx_status->flag |= RX_FLAG_DUP_VALIDATED;
333 if (ieee80211_is_data_qos(hdr->frame_control))
334 /* frame has qos control */
335 tid = *ieee80211_get_qos_ctl(hdr) &
336 IEEE80211_QOS_CTL_TID_MASK;
338 tid = IWL_MAX_TID_COUNT;
340 /* If this wasn't a part of an A-MSDU the sub-frame index will be 0 */
341 sub_frame_idx = desc->amsdu_info & IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
343 if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
344 dup_data->last_seq[tid] == hdr->seq_ctrl &&
345 dup_data->last_sub_frame[tid] >= sub_frame_idx))
348 dup_data->last_seq[tid] = hdr->seq_ctrl;
349 dup_data->last_sub_frame[tid] = sub_frame_idx;
351 rx_status->flag |= RX_FLAG_DUP_VALIDATED;
356 int iwl_mvm_notify_rx_queue(struct iwl_mvm *mvm, u32 rxq_mask,
357 const u8 *data, u32 count)
359 struct iwl_rxq_sync_cmd *cmd;
360 u32 data_size = sizeof(*cmd) + count;
363 /* should be DWORD aligned */
364 if (WARN_ON(count & 3 || count > IWL_MULTI_QUEUE_SYNC_MSG_MAX_SIZE))
367 cmd = kzalloc(data_size, GFP_KERNEL);
371 cmd->rxq_mask = cpu_to_le32(rxq_mask);
372 cmd->count = cpu_to_le32(count);
374 memcpy(cmd->payload, data, count);
376 ret = iwl_mvm_send_cmd_pdu(mvm,
377 WIDE_ID(DATA_PATH_GROUP,
378 TRIGGER_RX_QUEUES_NOTIF_CMD),
386 * Returns true if sn2 - buffer_size < sn1 < sn2.
387 * To be used only in order to compare reorder buffer head with NSSN.
388 * We fully trust NSSN unless it is behind us due to reorder timeout.
389 * Reorder timeout can only bring us up to buffer_size SNs ahead of NSSN.
391 static bool iwl_mvm_is_sn_less(u16 sn1, u16 sn2, u16 buffer_size)
393 return ieee80211_sn_less(sn1, sn2) &&
394 !ieee80211_sn_less(sn1, sn2 - buffer_size);
397 #define RX_REORDER_BUF_TIMEOUT_MQ (HZ / 10)
399 static void iwl_mvm_release_frames(struct iwl_mvm *mvm,
400 struct ieee80211_sta *sta,
401 struct napi_struct *napi,
402 struct iwl_mvm_reorder_buffer *reorder_buf,
405 u16 ssn = reorder_buf->head_sn;
407 lockdep_assert_held(&reorder_buf->lock);
409 /* ignore nssn smaller than head sn - this can happen due to timeout */
410 if (iwl_mvm_is_sn_less(nssn, ssn, reorder_buf->buf_size))
413 while (iwl_mvm_is_sn_less(ssn, nssn, reorder_buf->buf_size)) {
414 int index = ssn % reorder_buf->buf_size;
415 struct sk_buff_head *skb_list = &reorder_buf->entries[index];
418 ssn = ieee80211_sn_inc(ssn);
420 /* holes are valid since nssn indicates frames were received. */
421 if (skb_queue_empty(skb_list) || !skb_peek_tail(skb_list))
423 /* Empty the list. Will have more than one frame for A-MSDU */
424 while ((skb = __skb_dequeue(skb_list))) {
425 iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb,
428 reorder_buf->num_stored--;
431 reorder_buf->head_sn = nssn;
433 if (reorder_buf->num_stored && !reorder_buf->removed) {
434 u16 index = reorder_buf->head_sn % reorder_buf->buf_size;
436 while (!skb_peek_tail(&reorder_buf->entries[index]))
437 index = (index + 1) % reorder_buf->buf_size;
438 /* modify timer to match next frame's expiration time */
439 mod_timer(&reorder_buf->reorder_timer,
440 reorder_buf->reorder_time[index] + 1 +
441 RX_REORDER_BUF_TIMEOUT_MQ);
443 del_timer(&reorder_buf->reorder_timer);
447 void iwl_mvm_reorder_timer_expired(unsigned long data)
449 struct iwl_mvm_reorder_buffer *buf = (void *)data;
451 u16 sn = 0, index = 0;
452 bool expired = false;
454 spin_lock_bh(&buf->lock);
456 if (!buf->num_stored || buf->removed) {
457 spin_unlock_bh(&buf->lock);
461 for (i = 0; i < buf->buf_size ; i++) {
462 index = (buf->head_sn + i) % buf->buf_size;
464 if (!skb_peek_tail(&buf->entries[index]))
466 if (!time_after(jiffies, buf->reorder_time[index] +
467 RX_REORDER_BUF_TIMEOUT_MQ))
470 sn = ieee80211_sn_add(buf->head_sn, i + 1);
474 struct ieee80211_sta *sta;
477 sta = rcu_dereference(buf->mvm->fw_id_to_mac_id[buf->sta_id]);
478 /* SN is set to the last expired frame + 1 */
479 iwl_mvm_release_frames(buf->mvm, sta, NULL, buf, sn);
481 } else if (buf->num_stored) {
483 * If no frame expired and there are stored frames, index is now
484 * pointing to the first unexpired frame - modify timer
485 * accordingly to this frame.
487 mod_timer(&buf->reorder_timer,
488 buf->reorder_time[index] +
489 1 + RX_REORDER_BUF_TIMEOUT_MQ);
491 spin_unlock_bh(&buf->lock);
494 static void iwl_mvm_del_ba(struct iwl_mvm *mvm, int queue,
495 struct iwl_mvm_delba_data *data)
497 struct iwl_mvm_baid_data *ba_data;
498 struct ieee80211_sta *sta;
499 struct iwl_mvm_reorder_buffer *reorder_buf;
500 u8 baid = data->baid;
502 if (WARN_ON_ONCE(baid >= IWL_RX_REORDER_DATA_INVALID_BAID))
507 ba_data = rcu_dereference(mvm->baid_map[baid]);
508 if (WARN_ON_ONCE(!ba_data))
511 sta = rcu_dereference(mvm->fw_id_to_mac_id[ba_data->sta_id]);
512 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
515 reorder_buf = &ba_data->reorder_buf[queue];
517 /* release all frames that are in the reorder buffer to the stack */
518 spin_lock_bh(&reorder_buf->lock);
519 iwl_mvm_release_frames(mvm, sta, NULL, reorder_buf,
520 ieee80211_sn_add(reorder_buf->head_sn,
521 reorder_buf->buf_size));
522 spin_unlock_bh(&reorder_buf->lock);
523 del_timer_sync(&reorder_buf->reorder_timer);
529 void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
532 struct iwl_rx_packet *pkt = rxb_addr(rxb);
533 struct iwl_rxq_sync_notification *notif;
534 struct iwl_mvm_internal_rxq_notif *internal_notif;
536 notif = (void *)pkt->data;
537 internal_notif = (void *)notif->payload;
539 if (internal_notif->sync) {
540 if (mvm->queue_sync_cookie != internal_notif->cookie) {
542 "Received expired RX queue sync message\n");
545 atomic_dec(&mvm->queue_sync_counter);
548 switch (internal_notif->type) {
549 case IWL_MVM_RXQ_EMPTY:
551 case IWL_MVM_RXQ_NOTIF_DEL_BA:
552 iwl_mvm_del_ba(mvm, queue, (void *)internal_notif->data);
555 WARN_ONCE(1, "Invalid identifier %d", internal_notif->type);
560 * Returns true if the MPDU was buffered\dropped, false if it should be passed
563 static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
564 struct napi_struct *napi,
566 struct ieee80211_sta *sta,
568 struct iwl_rx_mpdu_desc *desc)
570 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
571 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
572 struct iwl_mvm_baid_data *baid_data;
573 struct iwl_mvm_reorder_buffer *buffer;
574 struct sk_buff *tail;
575 u32 reorder = le32_to_cpu(desc->reorder_data);
576 bool amsdu = desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU;
578 desc->amsdu_info & IWL_RX_MPDU_AMSDU_LAST_SUBFRAME;
579 u8 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
580 u8 sub_frame_idx = desc->amsdu_info &
581 IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
586 baid = (reorder & IWL_RX_MPDU_REORDER_BAID_MASK) >>
587 IWL_RX_MPDU_REORDER_BAID_SHIFT;
589 if (baid == IWL_RX_REORDER_DATA_INVALID_BAID)
593 if (WARN_ON(IS_ERR_OR_NULL(sta)))
596 /* not a data packet */
597 if (!ieee80211_is_data_qos(hdr->frame_control) ||
598 is_multicast_ether_addr(hdr->addr1))
601 if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
604 baid_data = rcu_dereference(mvm->baid_map[baid]);
606 "Received baid %d, but no data exists for this BAID\n", baid))
608 if (WARN(tid != baid_data->tid || mvm_sta->sta_id != baid_data->sta_id,
609 "baid 0x%x is mapped to sta:%d tid:%d, but was received for sta:%d tid:%d\n",
610 baid, baid_data->sta_id, baid_data->tid, mvm_sta->sta_id,
614 nssn = reorder & IWL_RX_MPDU_REORDER_NSSN_MASK;
615 sn = (reorder & IWL_RX_MPDU_REORDER_SN_MASK) >>
616 IWL_RX_MPDU_REORDER_SN_SHIFT;
618 buffer = &baid_data->reorder_buf[queue];
620 spin_lock_bh(&buffer->lock);
623 * If there was a significant jump in the nssn - adjust.
624 * If the SN is smaller than the NSSN it might need to first go into
625 * the reorder buffer, in which case we just release up to it and the
626 * rest of the function will take of storing it and releasing up to the
629 if (!iwl_mvm_is_sn_less(nssn, buffer->head_sn + buffer->buf_size,
631 u16 min_sn = ieee80211_sn_less(sn, nssn) ? sn : nssn;
633 iwl_mvm_release_frames(mvm, sta, napi, buffer, min_sn);
636 /* drop any oudated packets */
637 if (ieee80211_sn_less(sn, buffer->head_sn))
640 /* release immediately if allowed by nssn and no stored frames */
641 if (!buffer->num_stored && ieee80211_sn_less(sn, nssn)) {
642 if (iwl_mvm_is_sn_less(buffer->head_sn, nssn,
644 (!amsdu || last_subframe))
645 buffer->head_sn = nssn;
646 /* No need to update AMSDU last SN - we are moving the head */
647 spin_unlock_bh(&buffer->lock);
651 index = sn % buffer->buf_size;
654 * Check if we already stored this frame
655 * As AMSDU is either received or not as whole, logic is simple:
656 * If we have frames in that position in the buffer and the last frame
657 * originated from AMSDU had a different SN then it is a retransmission.
658 * If it is the same SN then if the subframe index is incrementing it
659 * is the same AMSDU - otherwise it is a retransmission.
661 tail = skb_peek_tail(&buffer->entries[index]);
664 else if (tail && (sn != buffer->last_amsdu ||
665 buffer->last_sub_index >= sub_frame_idx))
668 /* put in reorder buffer */
669 __skb_queue_tail(&buffer->entries[index], skb);
670 buffer->num_stored++;
671 buffer->reorder_time[index] = jiffies;
674 buffer->last_amsdu = sn;
675 buffer->last_sub_index = sub_frame_idx;
679 * We cannot trust NSSN for AMSDU sub-frames that are not the last.
680 * The reason is that NSSN advances on the first sub-frame, and may
681 * cause the reorder buffer to advance before all the sub-frames arrive.
682 * Example: reorder buffer contains SN 0 & 2, and we receive AMSDU with
683 * SN 1. NSSN for first sub frame will be 3 with the result of driver
684 * releasing SN 0,1, 2. When sub-frame 1 arrives - reorder buffer is
685 * already ahead and it will be dropped.
686 * If the last sub-frame is not on this queue - we will get frame
687 * release notification with up to date NSSN.
689 if (!amsdu || last_subframe)
690 iwl_mvm_release_frames(mvm, sta, napi, buffer, nssn);
692 spin_unlock_bh(&buffer->lock);
697 spin_unlock_bh(&buffer->lock);
701 static void iwl_mvm_agg_rx_received(struct iwl_mvm *mvm, u8 baid)
703 unsigned long now = jiffies;
704 unsigned long timeout;
705 struct iwl_mvm_baid_data *data;
709 data = rcu_dereference(mvm->baid_map[baid]);
716 timeout = data->timeout;
718 * Do not update last rx all the time to avoid cache bouncing
719 * between the rx queues.
720 * Update it every timeout. Worst case is the session will
721 * expire after ~ 2 * timeout, which doesn't matter that much.
723 if (time_before(data->last_rx + TU_TO_JIFFIES(timeout), now))
724 /* Update is atomic */
731 void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
732 struct iwl_rx_cmd_buffer *rxb, int queue)
734 struct ieee80211_rx_status *rx_status;
735 struct iwl_rx_packet *pkt = rxb_addr(rxb);
736 struct iwl_rx_mpdu_desc *desc = (void *)pkt->data;
737 struct ieee80211_hdr *hdr = (void *)(pkt->data + sizeof(*desc));
738 u32 len = le16_to_cpu(desc->mpdu_len);
739 u32 rate_n_flags = le32_to_cpu(desc->rate_n_flags);
740 u16 phy_info = le16_to_cpu(desc->phy_info);
741 struct ieee80211_sta *sta = NULL;
745 /* Dont use dev_alloc_skb(), we'll have enough headroom once
746 * ieee80211_hdr pulled.
748 skb = alloc_skb(128, GFP_ATOMIC);
750 IWL_ERR(mvm, "alloc_skb failed\n");
754 rx_status = IEEE80211_SKB_RXCB(skb);
756 if (iwl_mvm_rx_crypto(mvm, hdr, rx_status, desc, queue, &crypt_len)) {
762 * Keep packets with CRC errors (and with overrun) for monitor mode
763 * (otherwise the firmware discards them) but mark them as bad.
765 if (!(desc->status & cpu_to_le16(IWL_RX_MPDU_STATUS_CRC_OK)) ||
766 !(desc->status & cpu_to_le16(IWL_RX_MPDU_STATUS_OVERRUN_OK))) {
767 IWL_DEBUG_RX(mvm, "Bad CRC or FIFO: 0x%08X.\n",
768 le16_to_cpu(desc->status));
769 rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
771 /* set the preamble flag if appropriate */
772 if (phy_info & IWL_RX_MPDU_PHY_SHORT_PREAMBLE)
773 rx_status->flag |= RX_FLAG_SHORTPRE;
775 if (likely(!(phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD))) {
776 rx_status->mactime = le64_to_cpu(desc->tsf_on_air_rise);
777 /* TSF as indicated by the firmware is at INA time */
778 rx_status->flag |= RX_FLAG_MACTIME_PLCP_START;
780 rx_status->device_timestamp = le32_to_cpu(desc->gp2_on_air_rise);
781 rx_status->band = desc->channel > 14 ? NL80211_BAND_5GHZ :
783 rx_status->freq = ieee80211_channel_to_frequency(desc->channel,
785 iwl_mvm_get_signal_strength(mvm, desc, rx_status);
787 /* update aggregation data for monitor sake on default queue */
788 if (!queue && (phy_info & IWL_RX_MPDU_PHY_AMPDU)) {
789 bool toggle_bit = phy_info & IWL_RX_MPDU_PHY_AMPDU_TOGGLE;
791 rx_status->flag |= RX_FLAG_AMPDU_DETAILS;
792 rx_status->ampdu_reference = mvm->ampdu_ref;
793 /* toggle is switched whenever new aggregation starts */
794 if (toggle_bit != mvm->ampdu_toggle) {
796 mvm->ampdu_toggle = toggle_bit;
802 if (le16_to_cpu(desc->status) & IWL_RX_MPDU_STATUS_SRC_STA_FOUND) {
803 u8 id = desc->sta_id_flags & IWL_RX_MPDU_SIF_STA_ID_MASK;
805 if (!WARN_ON_ONCE(id >= IWL_MVM_STATION_COUNT)) {
806 sta = rcu_dereference(mvm->fw_id_to_mac_id[id]);
810 } else if (!is_multicast_ether_addr(hdr->addr2)) {
812 * This is fine since we prevent two stations with the same
813 * address from being added.
815 sta = ieee80211_find_sta_by_ifaddr(mvm->hw, hdr->addr2, NULL);
819 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
820 u8 baid = (u8)((le32_to_cpu(desc->reorder_data) &
821 IWL_RX_MPDU_REORDER_BAID_MASK) >>
822 IWL_RX_MPDU_REORDER_BAID_SHIFT);
825 * We have tx blocked stations (with CS bit). If we heard
826 * frames from a blocked station on a new channel we can
829 if (unlikely(mvm->csa_tx_block_bcn_timeout))
830 iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, false);
832 rs_update_last_rssi(mvm, &mvmsta->lq_sta, rx_status);
834 if (iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_RSSI) &&
835 ieee80211_is_beacon(hdr->frame_control)) {
836 struct iwl_fw_dbg_trigger_tlv *trig;
837 struct iwl_fw_dbg_trigger_low_rssi *rssi_trig;
841 trig = iwl_fw_dbg_get_trigger(mvm->fw,
842 FW_DBG_TRIGGER_RSSI);
843 rssi_trig = (void *)trig->data;
844 rssi = le32_to_cpu(rssi_trig->rssi);
847 iwl_fw_dbg_trigger_check_stop(mvm, mvmsta->vif,
849 if (trig_check && rx_status->signal < rssi)
850 iwl_mvm_fw_dbg_collect_trig(mvm, trig, NULL);
853 if (ieee80211_is_data(hdr->frame_control))
854 iwl_mvm_rx_csum(sta, skb, desc);
856 if (iwl_mvm_is_nonagg_dup(sta, queue, rx_status, hdr, desc)) {
863 * Our hardware de-aggregates AMSDUs but copies the mac header
864 * as it to the de-aggregated MPDUs. We need to turn off the
865 * AMSDU bit in the QoS control ourselves.
867 if ((desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU) &&
868 !WARN_ON(!ieee80211_is_data_qos(hdr->frame_control))) {
869 u8 *qc = ieee80211_get_qos_ctl(hdr);
871 *qc &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
873 if (baid != IWL_RX_REORDER_DATA_INVALID_BAID)
874 iwl_mvm_agg_rx_received(mvm, baid);
877 /* Set up the HT phy flags */
878 switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
879 case RATE_MCS_CHAN_WIDTH_20:
881 case RATE_MCS_CHAN_WIDTH_40:
882 rx_status->flag |= RX_FLAG_40MHZ;
884 case RATE_MCS_CHAN_WIDTH_80:
885 rx_status->vht_flag |= RX_VHT_FLAG_80MHZ;
887 case RATE_MCS_CHAN_WIDTH_160:
888 rx_status->vht_flag |= RX_VHT_FLAG_160MHZ;
891 if (rate_n_flags & RATE_MCS_SGI_MSK)
892 rx_status->flag |= RX_FLAG_SHORT_GI;
893 if (rate_n_flags & RATE_HT_MCS_GF_MSK)
894 rx_status->flag |= RX_FLAG_HT_GF;
895 if (rate_n_flags & RATE_MCS_LDPC_MSK)
896 rx_status->flag |= RX_FLAG_LDPC;
897 if (rate_n_flags & RATE_MCS_HT_MSK) {
898 u8 stbc = (rate_n_flags & RATE_MCS_HT_STBC_MSK) >>
900 rx_status->flag |= RX_FLAG_HT;
901 rx_status->rate_idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK;
902 rx_status->flag |= stbc << RX_FLAG_STBC_SHIFT;
903 } else if (rate_n_flags & RATE_MCS_VHT_MSK) {
904 u8 stbc = (rate_n_flags & RATE_MCS_VHT_STBC_MSK) >>
907 ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >>
908 RATE_VHT_MCS_NSS_POS) + 1;
909 rx_status->rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK;
910 rx_status->flag |= RX_FLAG_VHT;
911 rx_status->flag |= stbc << RX_FLAG_STBC_SHIFT;
912 if (rate_n_flags & RATE_MCS_BF_MSK)
913 rx_status->vht_flag |= RX_VHT_FLAG_BF;
915 rx_status->rate_idx =
916 iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
920 /* management stuff on default queue */
922 if (unlikely((ieee80211_is_beacon(hdr->frame_control) ||
923 ieee80211_is_probe_resp(hdr->frame_control)) &&
924 mvm->sched_scan_pass_all ==
925 SCHED_SCAN_PASS_ALL_ENABLED))
926 mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_FOUND;
928 if (unlikely(ieee80211_is_beacon(hdr->frame_control) ||
929 ieee80211_is_probe_resp(hdr->frame_control)))
930 rx_status->boottime_ns = ktime_get_boot_ns();
933 iwl_mvm_create_skb(skb, hdr, len, crypt_len, rxb);
934 if (!iwl_mvm_reorder(mvm, napi, queue, sta, skb, desc))
935 iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue, sta);
939 void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi,
940 struct iwl_rx_cmd_buffer *rxb, int queue)
942 struct iwl_rx_packet *pkt = rxb_addr(rxb);
943 struct iwl_frame_release *release = (void *)pkt->data;
944 struct ieee80211_sta *sta;
945 struct iwl_mvm_reorder_buffer *reorder_buf;
946 struct iwl_mvm_baid_data *ba_data;
948 int baid = release->baid;
950 if (WARN_ON_ONCE(baid == IWL_RX_REORDER_DATA_INVALID_BAID))
955 ba_data = rcu_dereference(mvm->baid_map[baid]);
956 if (WARN_ON_ONCE(!ba_data))
959 sta = rcu_dereference(mvm->fw_id_to_mac_id[ba_data->sta_id]);
960 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
963 reorder_buf = &ba_data->reorder_buf[queue];
965 spin_lock_bh(&reorder_buf->lock);
966 iwl_mvm_release_frames(mvm, sta, napi, reorder_buf,
967 le16_to_cpu(release->nssn));
968 spin_unlock_bh(&reorder_buf->lock);