Merge tag 'iwlwifi-next-for-kalle-2016-07-01' of git://git.kernel.org/pub/scm/linux...
[cascardo/linux.git] / drivers / net / wireless / intel / iwlwifi / mvm / rxmq.c
index 9a54f2d..d13397a 100644 (file)
 #include "fw-api.h"
 #include "fw-dbg.h"
 
-void iwl_mvm_rx_phy_cmd_mq(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
-{
-       mvm->ampdu_ref++;
-
-#ifdef CONFIG_IWLWIFI_DEBUGFS
-       if (mvm->last_phy_info.phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_AGG)) {
-               spin_lock(&mvm->drv_stats_lock);
-               mvm->drv_rx_stats.ampdu_count++;
-               spin_unlock(&mvm->drv_stats_lock);
-       }
-#endif
-}
-
 static inline int iwl_mvm_check_pn(struct iwl_mvm *mvm, struct sk_buff *skb,
                                   int queue, struct ieee80211_sta *sta)
 {
@@ -210,7 +197,7 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
        if (iwl_mvm_check_pn(mvm, skb, queue, sta))
                kfree_skb(skb);
        else
-               ieee80211_rx_napi(mvm->hw, skb, napi);
+               ieee80211_rx_napi(mvm->hw, sta, skb, napi);
 }
 
 static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm,
@@ -294,10 +281,15 @@ static void iwl_mvm_rx_csum(struct ieee80211_sta *sta,
 {
        struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
+       u16 flags = le16_to_cpu(desc->l3l4_flags);
+       u8 l3_prot = (u8)((flags & IWL_RX_L3L4_L3_PROTO_MASK) >>
+                         IWL_RX_L3_PROTO_POS);
 
        if (mvmvif->features & NETIF_F_RXCSUM &&
-           desc->l3l4_flags & cpu_to_le16(IWL_RX_L3L4_IP_HDR_CSUM_OK) &&
-           desc->l3l4_flags & cpu_to_le16(IWL_RX_L3L4_TCP_UDP_CSUM_OK))
+           flags & IWL_RX_L3L4_TCP_UDP_CSUM_OK &&
+           (flags & IWL_RX_L3L4_IP_HDR_CSUM_OK ||
+            l3_prot == IWL_RX_L3_TYPE_IPV6 ||
+            l3_prot == IWL_RX_L3_TYPE_IPV6_FRAG))
                skb->ip_summed = CHECKSUM_UNNECESSARY;
 }
 
@@ -390,6 +382,150 @@ int iwl_mvm_notify_rx_queue(struct iwl_mvm *mvm, u32 rxq_mask,
        return ret;
 }
 
+/*
+ * Returns true if sn2 - buffer_size < sn1 < sn2.
+ * To be used only in order to compare reorder buffer head with NSSN.
+ * We fully trust NSSN unless it is behind us due to reorder timeout.
+ * Reorder timeout can only bring us up to buffer_size SNs ahead of NSSN.
+ */
+static bool iwl_mvm_is_sn_less(u16 sn1, u16 sn2, u16 buffer_size)
+{
+       return ieee80211_sn_less(sn1, sn2) &&
+              !ieee80211_sn_less(sn1, sn2 - buffer_size);
+}
+
+#define RX_REORDER_BUF_TIMEOUT_MQ (HZ / 10)
+
+static void iwl_mvm_release_frames(struct iwl_mvm *mvm,
+                                  struct ieee80211_sta *sta,
+                                  struct napi_struct *napi,
+                                  struct iwl_mvm_reorder_buffer *reorder_buf,
+                                  u16 nssn)
+{
+       u16 ssn = reorder_buf->head_sn;
+
+       lockdep_assert_held(&reorder_buf->lock);
+
+       /* ignore nssn smaller than head sn - this can happen due to timeout */
+       if (iwl_mvm_is_sn_less(nssn, ssn, reorder_buf->buf_size))
+               return;
+
+       while (iwl_mvm_is_sn_less(ssn, nssn, reorder_buf->buf_size)) {
+               int index = ssn % reorder_buf->buf_size;
+               struct sk_buff_head *skb_list = &reorder_buf->entries[index];
+               struct sk_buff *skb;
+
+               ssn = ieee80211_sn_inc(ssn);
+
+               /* holes are valid since nssn indicates frames were received. */
+               if (skb_queue_empty(skb_list) || !skb_peek_tail(skb_list))
+                       continue;
+               /* Empty the list. Will have more than one frame for A-MSDU */
+               while ((skb = __skb_dequeue(skb_list))) {
+                       iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb,
+                                                       reorder_buf->queue,
+                                                       sta);
+                       reorder_buf->num_stored--;
+               }
+       }
+       reorder_buf->head_sn = nssn;
+
+       if (reorder_buf->num_stored && !reorder_buf->removed) {
+               u16 index = reorder_buf->head_sn % reorder_buf->buf_size;
+
+               while (!skb_peek_tail(&reorder_buf->entries[index]))
+                       index = (index + 1) % reorder_buf->buf_size;
+               /* modify timer to match next frame's expiration time */
+               mod_timer(&reorder_buf->reorder_timer,
+                         reorder_buf->reorder_time[index] + 1 +
+                         RX_REORDER_BUF_TIMEOUT_MQ);
+       } else {
+               del_timer(&reorder_buf->reorder_timer);
+       }
+}
+
+void iwl_mvm_reorder_timer_expired(unsigned long data)
+{
+       struct iwl_mvm_reorder_buffer *buf = (void *)data;
+       int i;
+       u16 sn = 0, index = 0;
+       bool expired = false;
+
+       spin_lock_bh(&buf->lock);
+
+       if (!buf->num_stored || buf->removed) {
+               spin_unlock_bh(&buf->lock);
+               return;
+       }
+
+       for (i = 0; i < buf->buf_size ; i++) {
+               index = (buf->head_sn + i) % buf->buf_size;
+
+               if (!skb_peek_tail(&buf->entries[index]))
+                       continue;
+               if (!time_after(jiffies, buf->reorder_time[index] +
+                               RX_REORDER_BUF_TIMEOUT_MQ))
+                       break;
+               expired = true;
+               sn = ieee80211_sn_add(buf->head_sn, i + 1);
+       }
+
+       if (expired) {
+               struct ieee80211_sta *sta;
+
+               rcu_read_lock();
+               sta = rcu_dereference(buf->mvm->fw_id_to_mac_id[buf->sta_id]);
+               /* SN is set to the last expired frame + 1 */
+               iwl_mvm_release_frames(buf->mvm, sta, NULL, buf, sn);
+               rcu_read_unlock();
+       } else if (buf->num_stored) {
+               /*
+                * If no frame expired and there are stored frames, index is now
+                * pointing to the first unexpired frame - modify timer
+                * accordingly to this frame.
+                */
+               mod_timer(&buf->reorder_timer,
+                         buf->reorder_time[index] +
+                         1 + RX_REORDER_BUF_TIMEOUT_MQ);
+       }
+       spin_unlock_bh(&buf->lock);
+}
+
+static void iwl_mvm_del_ba(struct iwl_mvm *mvm, int queue,
+                          struct iwl_mvm_delba_data *data)
+{
+       struct iwl_mvm_baid_data *ba_data;
+       struct ieee80211_sta *sta;
+       struct iwl_mvm_reorder_buffer *reorder_buf;
+       u8 baid = data->baid;
+
+       if (WARN_ON_ONCE(baid >= IWL_RX_REORDER_DATA_INVALID_BAID))
+               return;
+
+       rcu_read_lock();
+
+       ba_data = rcu_dereference(mvm->baid_map[baid]);
+       if (WARN_ON_ONCE(!ba_data))
+               goto out;
+
+       sta = rcu_dereference(mvm->fw_id_to_mac_id[ba_data->sta_id]);
+       if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
+               goto out;
+
+       reorder_buf = &ba_data->reorder_buf[queue];
+
+       /* release all frames that are in the reorder buffer to the stack */
+       spin_lock_bh(&reorder_buf->lock);
+       iwl_mvm_release_frames(mvm, sta, NULL, reorder_buf,
+                              ieee80211_sn_add(reorder_buf->head_sn,
+                                               reorder_buf->buf_size));
+       spin_unlock_bh(&reorder_buf->lock);
+       del_timer_sync(&reorder_buf->reorder_timer);
+
+out:
+       rcu_read_unlock();
+}
+
 void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
                            int queue)
 {
@@ -400,15 +536,198 @@ void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
        notif = (void *)pkt->data;
        internal_notif = (void *)notif->payload;
 
+       if (internal_notif->sync) {
+               if (mvm->queue_sync_cookie != internal_notif->cookie) {
+                       WARN_ONCE(1,
+                                 "Received expired RX queue sync message\n");
+                       return;
+               }
+               atomic_dec(&mvm->queue_sync_counter);
+       }
+
        switch (internal_notif->type) {
+       case IWL_MVM_RXQ_EMPTY:
+               break;
        case IWL_MVM_RXQ_NOTIF_DEL_BA:
-               /* TODO */
+               iwl_mvm_del_ba(mvm, queue, (void *)internal_notif->data);
                break;
        default:
                WARN_ONCE(1, "Invalid identifier %d", internal_notif->type);
        }
 }
 
+/*
+ * Returns true if the MPDU was buffered\dropped, false if it should be passed
+ * to upper layer.
+ */
+static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
+                           struct napi_struct *napi,
+                           int queue,
+                           struct ieee80211_sta *sta,
+                           struct sk_buff *skb,
+                           struct iwl_rx_mpdu_desc *desc)
+{
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+       struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+       struct iwl_mvm_baid_data *baid_data;
+       struct iwl_mvm_reorder_buffer *buffer;
+       struct sk_buff *tail;
+       u32 reorder = le32_to_cpu(desc->reorder_data);
+       bool amsdu = desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU;
+       bool last_subframe =
+               desc->amsdu_info & IWL_RX_MPDU_AMSDU_LAST_SUBFRAME;
+       u8 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
+       u8 sub_frame_idx = desc->amsdu_info &
+                          IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
+       int index;
+       u16 nssn, sn;
+       u8 baid;
+
+       baid = (reorder & IWL_RX_MPDU_REORDER_BAID_MASK) >>
+               IWL_RX_MPDU_REORDER_BAID_SHIFT;
+
+       if (baid == IWL_RX_REORDER_DATA_INVALID_BAID)
+               return false;
+
+       /* no sta yet */
+       if (WARN_ON(IS_ERR_OR_NULL(sta)))
+               return false;
+
+       /* not a data packet */
+       if (!ieee80211_is_data_qos(hdr->frame_control) ||
+           is_multicast_ether_addr(hdr->addr1))
+               return false;
+
+       if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
+               return false;
+
+       baid_data = rcu_dereference(mvm->baid_map[baid]);
+       if (WARN(!baid_data,
+                "Received baid %d, but no data exists for this BAID\n", baid))
+               return false;
+       if (WARN(tid != baid_data->tid || mvm_sta->sta_id != baid_data->sta_id,
+                "baid 0x%x is mapped to sta:%d tid:%d, but was received for sta:%d tid:%d\n",
+                baid, baid_data->sta_id, baid_data->tid, mvm_sta->sta_id,
+                tid))
+               return false;
+
+       nssn = reorder & IWL_RX_MPDU_REORDER_NSSN_MASK;
+       sn = (reorder & IWL_RX_MPDU_REORDER_SN_MASK) >>
+               IWL_RX_MPDU_REORDER_SN_SHIFT;
+
+       buffer = &baid_data->reorder_buf[queue];
+
+       spin_lock_bh(&buffer->lock);
+
+       /*
+        * If there was a significant jump in the nssn - adjust.
+        * If the SN is smaller than the NSSN it might need to first go into
+        * the reorder buffer, in which case we just release up to it and the
+        * rest of the function will take of storing it and releasing up to the
+        * nssn
+        */
+       if (!iwl_mvm_is_sn_less(nssn, buffer->head_sn + buffer->buf_size,
+                               buffer->buf_size)) {
+               u16 min_sn = ieee80211_sn_less(sn, nssn) ? sn : nssn;
+
+               iwl_mvm_release_frames(mvm, sta, napi, buffer, min_sn);
+       }
+
+       /* drop any oudated packets */
+       if (ieee80211_sn_less(sn, buffer->head_sn))
+               goto drop;
+
+       /* release immediately if allowed by nssn and no stored frames */
+       if (!buffer->num_stored && ieee80211_sn_less(sn, nssn)) {
+               if (iwl_mvm_is_sn_less(buffer->head_sn, nssn,
+                                      buffer->buf_size) &&
+                  (!amsdu || last_subframe))
+                       buffer->head_sn = nssn;
+               /* No need to update AMSDU last SN - we are moving the head */
+               spin_unlock_bh(&buffer->lock);
+               return false;
+       }
+
+       index = sn % buffer->buf_size;
+
+       /*
+        * Check if we already stored this frame
+        * As AMSDU is either received or not as whole, logic is simple:
+        * If we have frames in that position in the buffer and the last frame
+        * originated from AMSDU had a different SN then it is a retransmission.
+        * If it is the same SN then if the subframe index is incrementing it
+        * is the same AMSDU - otherwise it is a retransmission.
+        */
+       tail = skb_peek_tail(&buffer->entries[index]);
+       if (tail && !amsdu)
+               goto drop;
+       else if (tail && (sn != buffer->last_amsdu ||
+                         buffer->last_sub_index >= sub_frame_idx))
+               goto drop;
+
+       /* put in reorder buffer */
+       __skb_queue_tail(&buffer->entries[index], skb);
+       buffer->num_stored++;
+       buffer->reorder_time[index] = jiffies;
+
+       if (amsdu) {
+               buffer->last_amsdu = sn;
+               buffer->last_sub_index = sub_frame_idx;
+       }
+
+       /*
+        * We cannot trust NSSN for AMSDU sub-frames that are not the last.
+        * The reason is that NSSN advances on the first sub-frame, and may
+        * cause the reorder buffer to advance before all the sub-frames arrive.
+        * Example: reorder buffer contains SN 0 & 2, and we receive AMSDU with
+        * SN 1. NSSN for first sub frame will be 3 with the result of driver
+        * releasing SN 0,1, 2. When sub-frame 1 arrives - reorder buffer is
+        * already ahead and it will be dropped.
+        * If the last sub-frame is not on this queue - we will get frame
+        * release notification with up to date NSSN.
+        */
+       if (!amsdu || last_subframe)
+               iwl_mvm_release_frames(mvm, sta, napi, buffer, nssn);
+
+       spin_unlock_bh(&buffer->lock);
+       return true;
+
+drop:
+       kfree_skb(skb);
+       spin_unlock_bh(&buffer->lock);
+       return true;
+}
+
+static void iwl_mvm_agg_rx_received(struct iwl_mvm *mvm, u8 baid)
+{
+       unsigned long now = jiffies;
+       unsigned long timeout;
+       struct iwl_mvm_baid_data *data;
+
+       rcu_read_lock();
+
+       data = rcu_dereference(mvm->baid_map[baid]);
+       if (WARN_ON(!data))
+               goto out;
+
+       if (!data->timeout)
+               goto out;
+
+       timeout = data->timeout;
+       /*
+        * Do not update last rx all the time to avoid cache bouncing
+        * between the rx queues.
+        * Update it every timeout. Worst case is the session will
+        * expire after ~ 2 * timeout, which doesn't matter that much.
+        */
+       if (time_before(data->last_rx + TU_TO_JIFFIES(timeout), now))
+               /* Update is atomic */
+               data->last_rx = now;
+
+out:
+       rcu_read_unlock();
+}
+
 void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
                        struct iwl_rx_cmd_buffer *rxb, int queue)
 {
@@ -418,6 +737,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
        struct ieee80211_hdr *hdr = (void *)(pkt->data + sizeof(*desc));
        u32 len = le16_to_cpu(desc->mpdu_len);
        u32 rate_n_flags = le32_to_cpu(desc->rate_n_flags);
+       u16 phy_info = le16_to_cpu(desc->phy_info);
        struct ieee80211_sta *sta = NULL;
        struct sk_buff *skb;
        u8 crypt_len = 0;
@@ -448,16 +768,34 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
                             le16_to_cpu(desc->status));
                rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
        }
-
-       rx_status->mactime = le64_to_cpu(desc->tsf_on_air_rise);
+       /* set the preamble flag if appropriate */
+       if (phy_info & IWL_RX_MPDU_PHY_SHORT_PREAMBLE)
+               rx_status->flag |= RX_FLAG_SHORTPRE;
+
+       if (likely(!(phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD))) {
+               rx_status->mactime = le64_to_cpu(desc->tsf_on_air_rise);
+               /* TSF as indicated by the firmware is at INA time */
+               rx_status->flag |= RX_FLAG_MACTIME_PLCP_START;
+       }
        rx_status->device_timestamp = le32_to_cpu(desc->gp2_on_air_rise);
-       rx_status->band = desc->channel > 14 ? IEEE80211_BAND_5GHZ :
-                                              IEEE80211_BAND_2GHZ;
+       rx_status->band = desc->channel > 14 ? NL80211_BAND_5GHZ :
+                                              NL80211_BAND_2GHZ;
        rx_status->freq = ieee80211_channel_to_frequency(desc->channel,
                                                         rx_status->band);
        iwl_mvm_get_signal_strength(mvm, desc, rx_status);
-       /* TSF as indicated by the firmware is at INA time */
-       rx_status->flag |= RX_FLAG_MACTIME_PLCP_START;
+
+       /* update aggregation data for monitor sake on default queue */
+       if (!queue && (phy_info & IWL_RX_MPDU_PHY_AMPDU)) {
+               bool toggle_bit = phy_info & IWL_RX_MPDU_PHY_AMPDU_TOGGLE;
+
+               rx_status->flag |= RX_FLAG_AMPDU_DETAILS;
+               rx_status->ampdu_reference = mvm->ampdu_ref;
+               /* toggle is switched whenever new aggregation starts */
+               if (toggle_bit != mvm->ampdu_toggle) {
+                       mvm->ampdu_ref++;
+                       mvm->ampdu_toggle = toggle_bit;
+               }
+       }
 
        rcu_read_lock();
 
@@ -479,14 +817,26 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
 
        if (sta) {
                struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+               struct ieee80211_vif *tx_blocked_vif =
+                       rcu_dereference(mvm->csa_tx_blocked_vif);
+               u8 baid = (u8)((le32_to_cpu(desc->reorder_data) &
+                              IWL_RX_MPDU_REORDER_BAID_MASK) >>
+                              IWL_RX_MPDU_REORDER_BAID_SHIFT);
 
                /*
                 * We have tx blocked stations (with CS bit). If we heard
                 * frames from a blocked station on a new channel we can
                 * TX to it again.
                 */
-               if (unlikely(mvm->csa_tx_block_bcn_timeout))
-                       iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, false);
+               if (unlikely(tx_blocked_vif) &&
+                   tx_blocked_vif == mvmsta->vif) {
+                       struct iwl_mvm_vif *mvmvif =
+                               iwl_mvm_vif_from_mac80211(tx_blocked_vif);
+
+                       if (mvmvif->csa_target_freq == rx_status->freq)
+                               iwl_mvm_sta_modify_disable_tx_ap(mvm, sta,
+                                                                false);
+               }
 
                rs_update_last_rssi(mvm, &mvmsta->lq_sta, rx_status);
 
@@ -509,8 +859,6 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
                                iwl_mvm_fw_dbg_collect_trig(mvm, trig, NULL);
                }
 
-               /* TODO: multi queue TCM */
-
                if (ieee80211_is_data(hdr->frame_control))
                        iwl_mvm_rx_csum(sta, skb, desc);
 
@@ -531,16 +879,10 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
 
                        *qc &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
                }
+               if (baid != IWL_RX_REORDER_DATA_INVALID_BAID)
+                       iwl_mvm_agg_rx_received(mvm, baid);
        }
 
-       /*
-        * TODO: PHY info.
-        * Verify we don't have the information in the MPDU descriptor and
-        * that it is not needed.
-        * Make sure for monitor mode that we are on default queue, update
-        * ampdu_ref and the rest of phy info then
-        */
-
        /* Set up the HT phy flags */
        switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
        case RATE_MCS_CHAN_WIDTH_20:
@@ -584,16 +926,56 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
                                                            rx_status->band);
        }
 
-       /* TODO: PHY info - update ampdu queue statistics (for debugfs) */
-       /* TODO: PHY info - gscan */
+       /* management stuff on default queue */
+       if (!queue) {
+               if (unlikely((ieee80211_is_beacon(hdr->frame_control) ||
+                             ieee80211_is_probe_resp(hdr->frame_control)) &&
+                            mvm->sched_scan_pass_all ==
+                            SCHED_SCAN_PASS_ALL_ENABLED))
+                       mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_FOUND;
+
+               if (unlikely(ieee80211_is_beacon(hdr->frame_control) ||
+                            ieee80211_is_probe_resp(hdr->frame_control)))
+                       rx_status->boottime_ns = ktime_get_boot_ns();
+       }
 
        iwl_mvm_create_skb(skb, hdr, len, crypt_len, rxb);
-       iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue, sta);
+       if (!iwl_mvm_reorder(mvm, napi, queue, sta, skb, desc))
+               iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue, sta);
        rcu_read_unlock();
 }
 
-void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm,
+void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi,
                              struct iwl_rx_cmd_buffer *rxb, int queue)
 {
-       /* TODO */
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       struct iwl_frame_release *release = (void *)pkt->data;
+       struct ieee80211_sta *sta;
+       struct iwl_mvm_reorder_buffer *reorder_buf;
+       struct iwl_mvm_baid_data *ba_data;
+
+       int baid = release->baid;
+
+       if (WARN_ON_ONCE(baid == IWL_RX_REORDER_DATA_INVALID_BAID))
+               return;
+
+       rcu_read_lock();
+
+       ba_data = rcu_dereference(mvm->baid_map[baid]);
+       if (WARN_ON_ONCE(!ba_data))
+               goto out;
+
+       sta = rcu_dereference(mvm->fw_id_to_mac_id[ba_data->sta_id]);
+       if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
+               goto out;
+
+       reorder_buf = &ba_data->reorder_buf[queue];
+
+       spin_lock_bh(&reorder_buf->lock);
+       iwl_mvm_release_frames(mvm, sta, napi, reorder_buf,
+                              le16_to_cpu(release->nssn));
+       spin_unlock_bh(&reorder_buf->lock);
+
+out:
+       rcu_read_unlock();
 }