3 * This file is part of wlcore
5 * Copyright (C) 2008-2010 Nokia Corporation
6 * Copyright (C) 2011-2013 Texas Instruments Inc.
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
24 #include <linux/module.h>
25 #include <linux/firmware.h>
26 #include <linux/etherdevice.h>
27 #include <linux/vmalloc.h>
28 #include <linux/wl12xx.h>
29 #include <linux/interrupt.h>
33 #include "wl12xx_80211.h"
44 #define WL1271_BOOT_RETRIES 3
46 static char *fwlog_param;
47 static int fwlog_mem_blocks = -1;
48 static int bug_on_recovery = -1;
49 static int no_recovery = -1;
51 static void __wl1271_op_remove_interface(struct wl1271 *wl,
52 struct ieee80211_vif *vif,
53 bool reset_tx_queues);
54 static void wlcore_op_stop_locked(struct wl1271 *wl);
55 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
57 static int wl12xx_set_authorized(struct wl1271 *wl, struct wl12xx_vif *wlvif)
61 if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS))
64 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
67 if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
70 ret = wl12xx_cmd_set_peer_state(wl, wlvif, wlvif->sta.hlid);
74 wl1271_info("Association completed.");
78 static void wl1271_reg_notify(struct wiphy *wiphy,
79 struct regulatory_request *request)
81 struct ieee80211_supported_band *band;
82 struct ieee80211_channel *ch;
84 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
85 struct wl1271 *wl = hw->priv;
87 band = wiphy->bands[IEEE80211_BAND_5GHZ];
88 for (i = 0; i < band->n_channels; i++) {
89 ch = &band->channels[i];
90 if (ch->flags & IEEE80211_CHAN_DISABLED)
93 if (ch->flags & IEEE80211_CHAN_RADAR)
94 ch->flags |= IEEE80211_CHAN_NO_IBSS |
95 IEEE80211_CHAN_PASSIVE_SCAN;
99 wlcore_regdomain_config(wl);
102 static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
107 /* we should hold wl->mutex */
108 ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
113 set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
115 clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
121 * this function is being called when the rx_streaming interval
122 * has beed changed or rx_streaming should be disabled
124 int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
127 int period = wl->conf.rx_streaming.interval;
129 /* don't reconfigure if rx_streaming is disabled */
130 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
133 /* reconfigure/disable according to new streaming_period */
135 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
136 (wl->conf.rx_streaming.always ||
137 test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
138 ret = wl1271_set_rx_streaming(wl, wlvif, true);
140 ret = wl1271_set_rx_streaming(wl, wlvif, false);
141 /* don't cancel_work_sync since we might deadlock */
142 del_timer_sync(&wlvif->rx_streaming_timer);
148 static void wl1271_rx_streaming_enable_work(struct work_struct *work)
151 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
152 rx_streaming_enable_work);
153 struct wl1271 *wl = wlvif->wl;
155 mutex_lock(&wl->mutex);
157 if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
158 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
159 (!wl->conf.rx_streaming.always &&
160 !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
163 if (!wl->conf.rx_streaming.interval)
166 ret = wl1271_ps_elp_wakeup(wl);
170 ret = wl1271_set_rx_streaming(wl, wlvif, true);
174 /* stop it after some time of inactivity */
175 mod_timer(&wlvif->rx_streaming_timer,
176 jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
179 wl1271_ps_elp_sleep(wl);
181 mutex_unlock(&wl->mutex);
184 static void wl1271_rx_streaming_disable_work(struct work_struct *work)
187 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
188 rx_streaming_disable_work);
189 struct wl1271 *wl = wlvif->wl;
191 mutex_lock(&wl->mutex);
193 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
196 ret = wl1271_ps_elp_wakeup(wl);
200 ret = wl1271_set_rx_streaming(wl, wlvif, false);
205 wl1271_ps_elp_sleep(wl);
207 mutex_unlock(&wl->mutex);
210 static void wl1271_rx_streaming_timer(unsigned long data)
212 struct wl12xx_vif *wlvif = (struct wl12xx_vif *)data;
213 struct wl1271 *wl = wlvif->wl;
214 ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
217 /* wl->mutex must be taken */
218 void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl)
220 /* if the watchdog is not armed, don't do anything */
221 if (wl->tx_allocated_blocks == 0)
224 cancel_delayed_work(&wl->tx_watchdog_work);
225 ieee80211_queue_delayed_work(wl->hw, &wl->tx_watchdog_work,
226 msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout));
229 static void wl12xx_tx_watchdog_work(struct work_struct *work)
231 struct delayed_work *dwork;
234 dwork = container_of(work, struct delayed_work, work);
235 wl = container_of(dwork, struct wl1271, tx_watchdog_work);
237 mutex_lock(&wl->mutex);
239 if (unlikely(wl->state != WLCORE_STATE_ON))
242 /* Tx went out in the meantime - everything is ok */
243 if (unlikely(wl->tx_allocated_blocks == 0))
247 * if a ROC is in progress, we might not have any Tx for a long
248 * time (e.g. pending Tx on the non-ROC channels)
250 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
251 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to ROC",
252 wl->conf.tx.tx_watchdog_timeout);
253 wl12xx_rearm_tx_watchdog_locked(wl);
258 * if a scan is in progress, we might not have any Tx for a long
261 if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
262 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to scan",
263 wl->conf.tx.tx_watchdog_timeout);
264 wl12xx_rearm_tx_watchdog_locked(wl);
269 * AP might cache a frame for a long time for a sleeping station,
270 * so rearm the timer if there's an AP interface with stations. If
271 * Tx is genuinely stuck we will most hopefully discover it when all
272 * stations are removed due to inactivity.
274 if (wl->active_sta_count) {
275 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms. AP has "
277 wl->conf.tx.tx_watchdog_timeout,
278 wl->active_sta_count);
279 wl12xx_rearm_tx_watchdog_locked(wl);
283 wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
284 wl->conf.tx.tx_watchdog_timeout);
285 wl12xx_queue_recovery_work(wl);
288 mutex_unlock(&wl->mutex);
291 static void wlcore_adjust_conf(struct wl1271 *wl)
293 /* Adjust settings according to optional module parameters */
295 /* Firmware Logger params */
296 if (fwlog_mem_blocks != -1) {
297 if (fwlog_mem_blocks >= CONF_FWLOG_MIN_MEM_BLOCKS &&
298 fwlog_mem_blocks <= CONF_FWLOG_MAX_MEM_BLOCKS) {
299 wl->conf.fwlog.mem_blocks = fwlog_mem_blocks;
302 "Illegal fwlog_mem_blocks=%d using default %d",
303 fwlog_mem_blocks, wl->conf.fwlog.mem_blocks);
308 if (!strcmp(fwlog_param, "continuous")) {
309 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
310 } else if (!strcmp(fwlog_param, "ondemand")) {
311 wl->conf.fwlog.mode = WL12XX_FWLOG_ON_DEMAND;
312 } else if (!strcmp(fwlog_param, "dbgpins")) {
313 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
314 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS;
315 } else if (!strcmp(fwlog_param, "disable")) {
316 wl->conf.fwlog.mem_blocks = 0;
317 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_NONE;
319 wl1271_error("Unknown fwlog parameter %s", fwlog_param);
323 if (bug_on_recovery != -1)
324 wl->conf.recovery.bug_on_recovery = (u8) bug_on_recovery;
326 if (no_recovery != -1)
327 wl->conf.recovery.no_recovery = (u8) no_recovery;
330 static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
331 struct wl12xx_vif *wlvif,
336 fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
339 * Wake up from high level PS if the STA is asleep with too little
340 * packets in FW or if the STA is awake.
342 if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
343 wl12xx_ps_link_end(wl, wlvif, hlid);
346 * Start high-level PS if the STA is asleep with enough blocks in FW.
347 * Make an exception if this is the only connected link. In this
348 * case FW-memory congestion is less of a problem.
349 * Note that a single connected STA means 3 active links, since we must
350 * account for the global and broadcast AP links. The "fw_ps" check
351 * assures us the third link is a STA connected to the AP. Otherwise
352 * the FW would not set the PSM bit.
354 else if (wl->active_link_count > 3 && fw_ps &&
355 tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
356 wl12xx_ps_link_start(wl, wlvif, hlid, true);
359 static void wl12xx_irq_update_links_status(struct wl1271 *wl,
360 struct wl12xx_vif *wlvif,
361 struct wl_fw_status_2 *status)
366 cur_fw_ps_map = le32_to_cpu(status->link_ps_bitmap);
367 if (wl->ap_fw_ps_map != cur_fw_ps_map) {
368 wl1271_debug(DEBUG_PSM,
369 "link ps prev 0x%x cur 0x%x changed 0x%x",
370 wl->ap_fw_ps_map, cur_fw_ps_map,
371 wl->ap_fw_ps_map ^ cur_fw_ps_map);
373 wl->ap_fw_ps_map = cur_fw_ps_map;
376 for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, WL12XX_MAX_LINKS)
377 wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
378 wl->links[hlid].allocated_pkts);
381 static int wlcore_fw_status(struct wl1271 *wl,
382 struct wl_fw_status_1 *status_1,
383 struct wl_fw_status_2 *status_2)
385 struct wl12xx_vif *wlvif;
387 u32 old_tx_blk_count = wl->tx_blocks_available;
388 int avail, freed_blocks;
392 struct wl1271_link *lnk;
394 status_len = WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc) +
395 sizeof(*status_2) + wl->fw_status_priv_len;
397 ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR, status_1,
402 wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
403 "drv_rx_counter = %d, tx_results_counter = %d)",
405 status_1->fw_rx_counter,
406 status_1->drv_rx_counter,
407 status_1->tx_results_counter);
409 for (i = 0; i < NUM_TX_QUEUES; i++) {
410 /* prevent wrap-around in freed-packets counter */
411 wl->tx_allocated_pkts[i] -=
412 (status_2->counters.tx_released_pkts[i] -
413 wl->tx_pkts_freed[i]) & 0xff;
415 wl->tx_pkts_freed[i] = status_2->counters.tx_released_pkts[i];
419 for_each_set_bit(i, wl->links_map, WL12XX_MAX_LINKS) {
423 /* prevent wrap-around in freed-packets counter */
424 diff = (status_2->counters.tx_lnk_free_pkts[i] -
425 lnk->prev_freed_pkts) & 0xff;
430 lnk->allocated_pkts -= diff;
431 lnk->prev_freed_pkts = status_2->counters.tx_lnk_free_pkts[i];
433 /* accumulate the prev_freed_pkts counter */
434 lnk->total_freed_pkts += diff;
437 /* prevent wrap-around in total blocks counter */
438 if (likely(wl->tx_blocks_freed <=
439 le32_to_cpu(status_2->total_released_blks)))
440 freed_blocks = le32_to_cpu(status_2->total_released_blks) -
443 freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
444 le32_to_cpu(status_2->total_released_blks);
446 wl->tx_blocks_freed = le32_to_cpu(status_2->total_released_blks);
448 wl->tx_allocated_blocks -= freed_blocks;
451 * If the FW freed some blocks:
452 * If we still have allocated blocks - re-arm the timer, Tx is
453 * not stuck. Otherwise, cancel the timer (no Tx currently).
456 if (wl->tx_allocated_blocks)
457 wl12xx_rearm_tx_watchdog_locked(wl);
459 cancel_delayed_work(&wl->tx_watchdog_work);
462 avail = le32_to_cpu(status_2->tx_total) - wl->tx_allocated_blocks;
465 * The FW might change the total number of TX memblocks before
466 * we get a notification about blocks being released. Thus, the
467 * available blocks calculation might yield a temporary result
468 * which is lower than the actual available blocks. Keeping in
469 * mind that only blocks that were allocated can be moved from
470 * TX to RX, tx_blocks_available should never decrease here.
472 wl->tx_blocks_available = max((int)wl->tx_blocks_available,
475 /* if more blocks are available now, tx work can be scheduled */
476 if (wl->tx_blocks_available > old_tx_blk_count)
477 clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
479 /* for AP update num of allocated TX blocks per link and ps status */
480 wl12xx_for_each_wlvif_ap(wl, wlvif) {
481 wl12xx_irq_update_links_status(wl, wlvif, status_2);
484 /* update the host-chipset time offset */
486 wl->time_offset = (timespec_to_ns(&ts) >> 10) -
487 (s64)le32_to_cpu(status_2->fw_localtime);
489 wl->fw_fast_lnk_map = le32_to_cpu(status_2->link_fast_bitmap);
494 static void wl1271_flush_deferred_work(struct wl1271 *wl)
498 /* Pass all received frames to the network stack */
499 while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
500 ieee80211_rx_ni(wl->hw, skb);
502 /* Return sent skbs to the network stack */
503 while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
504 ieee80211_tx_status_ni(wl->hw, skb);
507 static void wl1271_netstack_work(struct work_struct *work)
510 container_of(work, struct wl1271, netstack_work);
513 wl1271_flush_deferred_work(wl);
514 } while (skb_queue_len(&wl->deferred_rx_queue));
517 #define WL1271_IRQ_MAX_LOOPS 256
519 static int wlcore_irq_locked(struct wl1271 *wl)
523 int loopcount = WL1271_IRQ_MAX_LOOPS;
525 unsigned int defer_count;
529 * In case edge triggered interrupt must be used, we cannot iterate
530 * more than once without introducing race conditions with the hardirq.
532 if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
535 wl1271_debug(DEBUG_IRQ, "IRQ work");
537 if (unlikely(wl->state != WLCORE_STATE_ON))
540 ret = wl1271_ps_elp_wakeup(wl);
544 while (!done && loopcount--) {
546 * In order to avoid a race with the hardirq, clear the flag
547 * before acknowledging the chip. Since the mutex is held,
548 * wl1271_ps_elp_wakeup cannot be called concurrently.
550 clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
551 smp_mb__after_clear_bit();
553 ret = wlcore_fw_status(wl, wl->fw_status_1, wl->fw_status_2);
557 wlcore_hw_tx_immediate_compl(wl);
559 intr = le32_to_cpu(wl->fw_status_1->intr);
560 intr &= WLCORE_ALL_INTR_MASK;
566 if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
567 wl1271_error("HW watchdog interrupt received! starting recovery.");
568 wl->watchdog_recovery = true;
571 /* restarting the chip. ignore any other interrupt. */
575 if (unlikely(intr & WL1271_ACX_SW_INTR_WATCHDOG)) {
576 wl1271_error("SW watchdog interrupt received! "
577 "starting recovery.");
578 wl->watchdog_recovery = true;
581 /* restarting the chip. ignore any other interrupt. */
585 if (likely(intr & WL1271_ACX_INTR_DATA)) {
586 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
588 ret = wlcore_rx(wl, wl->fw_status_1);
592 /* Check if any tx blocks were freed */
593 spin_lock_irqsave(&wl->wl_lock, flags);
594 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
595 wl1271_tx_total_queue_count(wl) > 0) {
596 spin_unlock_irqrestore(&wl->wl_lock, flags);
598 * In order to avoid starvation of the TX path,
599 * call the work function directly.
601 ret = wlcore_tx_work_locked(wl);
605 spin_unlock_irqrestore(&wl->wl_lock, flags);
608 /* check for tx results */
609 ret = wlcore_hw_tx_delayed_compl(wl);
613 /* Make sure the deferred queues don't get too long */
614 defer_count = skb_queue_len(&wl->deferred_tx_queue) +
615 skb_queue_len(&wl->deferred_rx_queue);
616 if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
617 wl1271_flush_deferred_work(wl);
620 if (intr & WL1271_ACX_INTR_EVENT_A) {
621 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
622 ret = wl1271_event_handle(wl, 0);
627 if (intr & WL1271_ACX_INTR_EVENT_B) {
628 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
629 ret = wl1271_event_handle(wl, 1);
634 if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
635 wl1271_debug(DEBUG_IRQ,
636 "WL1271_ACX_INTR_INIT_COMPLETE");
638 if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
639 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
642 wl1271_ps_elp_sleep(wl);
648 static irqreturn_t wlcore_irq(int irq, void *cookie)
652 struct wl1271 *wl = cookie;
654 /* complete the ELP completion */
655 spin_lock_irqsave(&wl->wl_lock, flags);
656 set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
658 complete(wl->elp_compl);
659 wl->elp_compl = NULL;
662 if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
663 /* don't enqueue a work right now. mark it as pending */
664 set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
665 wl1271_debug(DEBUG_IRQ, "should not enqueue work");
666 disable_irq_nosync(wl->irq);
667 pm_wakeup_event(wl->dev, 0);
668 spin_unlock_irqrestore(&wl->wl_lock, flags);
671 spin_unlock_irqrestore(&wl->wl_lock, flags);
673 /* TX might be handled here, avoid redundant work */
674 set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
675 cancel_work_sync(&wl->tx_work);
677 mutex_lock(&wl->mutex);
679 ret = wlcore_irq_locked(wl);
681 wl12xx_queue_recovery_work(wl);
683 spin_lock_irqsave(&wl->wl_lock, flags);
684 /* In case TX was not handled here, queue TX work */
685 clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
686 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
687 wl1271_tx_total_queue_count(wl) > 0)
688 ieee80211_queue_work(wl->hw, &wl->tx_work);
689 spin_unlock_irqrestore(&wl->wl_lock, flags);
691 mutex_unlock(&wl->mutex);
696 struct vif_counter_data {
699 struct ieee80211_vif *cur_vif;
700 bool cur_vif_running;
703 static void wl12xx_vif_count_iter(void *data, u8 *mac,
704 struct ieee80211_vif *vif)
706 struct vif_counter_data *counter = data;
709 if (counter->cur_vif == vif)
710 counter->cur_vif_running = true;
713 /* caller must not hold wl->mutex, as it might deadlock */
714 static void wl12xx_get_vif_count(struct ieee80211_hw *hw,
715 struct ieee80211_vif *cur_vif,
716 struct vif_counter_data *data)
718 memset(data, 0, sizeof(*data));
719 data->cur_vif = cur_vif;
721 ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL,
722 wl12xx_vif_count_iter, data);
725 static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
727 const struct firmware *fw;
729 enum wl12xx_fw_type fw_type;
733 fw_type = WL12XX_FW_TYPE_PLT;
734 fw_name = wl->plt_fw_name;
737 * we can't call wl12xx_get_vif_count() here because
738 * wl->mutex is taken, so use the cached last_vif_count value
740 if (wl->last_vif_count > 1 && wl->mr_fw_name) {
741 fw_type = WL12XX_FW_TYPE_MULTI;
742 fw_name = wl->mr_fw_name;
744 fw_type = WL12XX_FW_TYPE_NORMAL;
745 fw_name = wl->sr_fw_name;
749 if (wl->fw_type == fw_type)
752 wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
754 ret = request_firmware(&fw, fw_name, wl->dev);
757 wl1271_error("could not get firmware %s: %d", fw_name, ret);
762 wl1271_error("firmware size is not multiple of 32 bits: %zu",
769 wl->fw_type = WL12XX_FW_TYPE_NONE;
770 wl->fw_len = fw->size;
771 wl->fw = vmalloc(wl->fw_len);
774 wl1271_error("could not allocate memory for the firmware");
779 memcpy(wl->fw, fw->data, wl->fw_len);
781 wl->fw_type = fw_type;
783 release_firmware(fw);
788 void wl12xx_queue_recovery_work(struct wl1271 *wl)
790 WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
792 /* Avoid a recursive recovery */
793 if (wl->state == WLCORE_STATE_ON) {
794 wl->state = WLCORE_STATE_RESTARTING;
795 set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
796 wlcore_disable_interrupts_nosync(wl);
797 ieee80211_queue_work(wl->hw, &wl->recovery_work);
801 size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
805 /* Make sure we have enough room */
806 len = min(maxlen, (size_t)(PAGE_SIZE - wl->fwlog_size));
808 /* Fill the FW log file, consumed by the sysfs fwlog entry */
809 memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
810 wl->fwlog_size += len;
815 static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
817 struct wlcore_partition_set part, old_part;
824 if ((wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED) ||
825 (wl->conf.fwlog.mem_blocks == 0))
828 wl1271_info("Reading FW panic log");
830 block = kmalloc(wl->fw_mem_block_size, GFP_KERNEL);
835 * Make sure the chip is awake and the logger isn't active.
836 * Do not send a stop fwlog command if the fw is hanged or if
837 * dbgpins are used (due to some fw bug).
839 if (wl1271_ps_elp_wakeup(wl))
841 if (!wl->watchdog_recovery &&
842 wl->conf.fwlog.output != WL12XX_FWLOG_OUTPUT_DBG_PINS)
843 wl12xx_cmd_stop_fwlog(wl);
845 /* Read the first memory block address */
846 ret = wlcore_fw_status(wl, wl->fw_status_1, wl->fw_status_2);
850 addr = le32_to_cpu(wl->fw_status_2->log_start_addr);
854 if (wl->conf.fwlog.mode == WL12XX_FWLOG_CONTINUOUS) {
855 offset = sizeof(addr) + sizeof(struct wl1271_rx_descriptor);
856 end_of_log = wl->fwlog_end;
858 offset = sizeof(addr);
862 old_part = wl->curr_part;
863 memset(&part, 0, sizeof(part));
865 /* Traverse the memory blocks linked list */
867 part.mem.start = wlcore_hw_convert_hwaddr(wl, addr);
868 part.mem.size = PAGE_SIZE;
870 ret = wlcore_set_partition(wl, &part);
872 wl1271_error("%s: set_partition start=0x%X size=%d",
873 __func__, part.mem.start, part.mem.size);
877 memset(block, 0, wl->fw_mem_block_size);
878 ret = wlcore_read_hwaddr(wl, addr, block,
879 wl->fw_mem_block_size, false);
885 * Memory blocks are linked to one another. The first 4 bytes
886 * of each memory block hold the hardware address of the next
887 * one. The last memory block points to the first one in
888 * on demand mode and is equal to 0x2000000 in continuous mode.
890 addr = le32_to_cpup((__le32 *)block);
892 if (!wl12xx_copy_fwlog(wl, block + offset,
893 wl->fw_mem_block_size - offset))
895 } while (addr && (addr != end_of_log));
897 wake_up_interruptible(&wl->fwlog_waitq);
901 wlcore_set_partition(wl, &old_part);
904 static void wlcore_print_recovery(struct wl1271 *wl)
910 wl1271_info("Hardware recovery in progress. FW ver: %s",
911 wl->chip.fw_ver_str);
913 /* change partitions momentarily so we can read the FW pc */
914 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
918 ret = wlcore_read_reg(wl, REG_PC_ON_RECOVERY, &pc);
922 ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &hint_sts);
926 wl1271_info("pc: 0x%x, hint_sts: 0x%08x count: %d",
927 pc, hint_sts, ++wl->recovery_count);
929 wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
933 static void wl1271_recovery_work(struct work_struct *work)
936 container_of(work, struct wl1271, recovery_work);
937 struct wl12xx_vif *wlvif;
938 struct ieee80211_vif *vif;
940 mutex_lock(&wl->mutex);
942 if (wl->state == WLCORE_STATE_OFF || wl->plt)
945 if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
946 if (wl->conf.fwlog.output == WL12XX_FWLOG_OUTPUT_HOST)
947 wl12xx_read_fwlog_panic(wl);
948 wlcore_print_recovery(wl);
951 BUG_ON(wl->conf.recovery.bug_on_recovery &&
952 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
954 if (wl->conf.recovery.no_recovery) {
955 wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
959 /* Prevent spurious TX during FW restart */
960 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
962 /* reboot the chipset */
963 while (!list_empty(&wl->wlvif_list)) {
964 wlvif = list_first_entry(&wl->wlvif_list,
965 struct wl12xx_vif, list);
966 vif = wl12xx_wlvif_to_vif(wlvif);
967 __wl1271_op_remove_interface(wl, vif, false);
970 wlcore_op_stop_locked(wl);
972 ieee80211_restart_hw(wl->hw);
975 * Its safe to enable TX now - the queues are stopped after a request
978 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
981 wl->watchdog_recovery = false;
982 clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
983 mutex_unlock(&wl->mutex);
986 static int wlcore_fw_wakeup(struct wl1271 *wl)
988 return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
991 static int wl1271_setup(struct wl1271 *wl)
993 wl->fw_status_1 = kzalloc(WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc) +
994 sizeof(*wl->fw_status_2) +
995 wl->fw_status_priv_len, GFP_KERNEL);
996 if (!wl->fw_status_1)
999 wl->fw_status_2 = (struct wl_fw_status_2 *)
1000 (((u8 *) wl->fw_status_1) +
1001 WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc));
1003 wl->tx_res_if = kzalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
1004 if (!wl->tx_res_if) {
1005 kfree(wl->fw_status_1);
1012 static int wl12xx_set_power_on(struct wl1271 *wl)
1016 msleep(WL1271_PRE_POWER_ON_SLEEP);
1017 ret = wl1271_power_on(wl);
1020 msleep(WL1271_POWER_ON_SLEEP);
1021 wl1271_io_reset(wl);
1024 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
1028 /* ELP module wake up */
1029 ret = wlcore_fw_wakeup(wl);
1037 wl1271_power_off(wl);
1041 static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
1045 ret = wl12xx_set_power_on(wl);
1050 * For wl127x based devices we could use the default block
1051 * size (512 bytes), but due to a bug in the sdio driver, we
1052 * need to set it explicitly after the chip is powered on. To
1053 * simplify the code and since the performance impact is
1054 * negligible, we use the same block size for all different
1057 * Check if the bus supports blocksize alignment and, if it
1058 * doesn't, make sure we don't have the quirk.
1060 if (!wl1271_set_block_size(wl))
1061 wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
1063 /* TODO: make sure the lower driver has set things up correctly */
1065 ret = wl1271_setup(wl);
1069 ret = wl12xx_fetch_firmware(wl, plt);
1077 int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
1079 int retries = WL1271_BOOT_RETRIES;
1080 struct wiphy *wiphy = wl->hw->wiphy;
1082 static const char* const PLT_MODE[] = {
1091 mutex_lock(&wl->mutex);
1093 wl1271_notice("power up");
1095 if (wl->state != WLCORE_STATE_OFF) {
1096 wl1271_error("cannot go into PLT state because not "
1097 "in off state: %d", wl->state);
1102 /* Indicate to lower levels that we are now in PLT mode */
1104 wl->plt_mode = plt_mode;
1108 ret = wl12xx_chip_wakeup(wl, true);
1112 if (plt_mode != PLT_CHIP_AWAKE) {
1113 ret = wl->ops->plt_init(wl);
1118 wl->state = WLCORE_STATE_ON;
1119 wl1271_notice("firmware booted in PLT mode %s (%s)",
1121 wl->chip.fw_ver_str);
1123 /* update hw/fw version info in wiphy struct */
1124 wiphy->hw_version = wl->chip.id;
1125 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
1126 sizeof(wiphy->fw_version));
1131 wl1271_power_off(wl);
1135 wl->plt_mode = PLT_OFF;
1137 wl1271_error("firmware boot in PLT mode failed despite %d retries",
1138 WL1271_BOOT_RETRIES);
1140 mutex_unlock(&wl->mutex);
1145 int wl1271_plt_stop(struct wl1271 *wl)
1149 wl1271_notice("power down");
1152 * Interrupts must be disabled before setting the state to OFF.
1153 * Otherwise, the interrupt handler might be called and exit without
1154 * reading the interrupt status.
1156 wlcore_disable_interrupts(wl);
1157 mutex_lock(&wl->mutex);
1159 mutex_unlock(&wl->mutex);
1162 * This will not necessarily enable interrupts as interrupts
1163 * may have been disabled when op_stop was called. It will,
1164 * however, balance the above call to disable_interrupts().
1166 wlcore_enable_interrupts(wl);
1168 wl1271_error("cannot power down because not in PLT "
1169 "state: %d", wl->state);
1174 mutex_unlock(&wl->mutex);
1176 wl1271_flush_deferred_work(wl);
1177 cancel_work_sync(&wl->netstack_work);
1178 cancel_work_sync(&wl->recovery_work);
1179 cancel_delayed_work_sync(&wl->elp_work);
1180 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1182 mutex_lock(&wl->mutex);
1183 wl1271_power_off(wl);
1185 wl->sleep_auth = WL1271_PSM_ILLEGAL;
1186 wl->state = WLCORE_STATE_OFF;
1188 wl->plt_mode = PLT_OFF;
1190 mutex_unlock(&wl->mutex);
1196 static void wl1271_op_tx(struct ieee80211_hw *hw,
1197 struct ieee80211_tx_control *control,
1198 struct sk_buff *skb)
1200 struct wl1271 *wl = hw->priv;
1201 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1202 struct ieee80211_vif *vif = info->control.vif;
1203 struct wl12xx_vif *wlvif = NULL;
1204 unsigned long flags;
1209 wl1271_debug(DEBUG_TX, "DROP skb with no vif");
1210 ieee80211_free_txskb(hw, skb);
1214 wlvif = wl12xx_vif_to_data(vif);
1215 mapping = skb_get_queue_mapping(skb);
1216 q = wl1271_tx_get_queue(mapping);
1218 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb, control->sta);
1220 spin_lock_irqsave(&wl->wl_lock, flags);
1223 * drop the packet if the link is invalid or the queue is stopped
1224 * for any reason but watermark. Watermark is a "soft"-stop so we
1225 * allow these packets through.
1227 if (hlid == WL12XX_INVALID_LINK_ID ||
1228 (!test_bit(hlid, wlvif->links_map)) ||
1229 (wlcore_is_queue_stopped_locked(wl, wlvif, q) &&
1230 !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1231 WLCORE_QUEUE_STOP_REASON_WATERMARK))) {
1232 wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
1233 ieee80211_free_txskb(hw, skb);
1237 wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d len %d",
1239 skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
1241 wl->tx_queue_count[q]++;
1242 wlvif->tx_queue_count[q]++;
1245 * The workqueue is slow to process the tx_queue and we need stop
1246 * the queue here, otherwise the queue will get too long.
1248 if (wlvif->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK &&
1249 !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1250 WLCORE_QUEUE_STOP_REASON_WATERMARK)) {
1251 wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
1252 wlcore_stop_queue_locked(wl, wlvif, q,
1253 WLCORE_QUEUE_STOP_REASON_WATERMARK);
1257 * The chip specific setup must run before the first TX packet -
1258 * before that, the tx_work will not be initialized!
1261 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
1262 !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
1263 ieee80211_queue_work(wl->hw, &wl->tx_work);
1266 spin_unlock_irqrestore(&wl->wl_lock, flags);
1269 int wl1271_tx_dummy_packet(struct wl1271 *wl)
1271 unsigned long flags;
1274 /* no need to queue a new dummy packet if one is already pending */
1275 if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags))
1278 q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet));
1280 spin_lock_irqsave(&wl->wl_lock, flags);
1281 set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
1282 wl->tx_queue_count[q]++;
1283 spin_unlock_irqrestore(&wl->wl_lock, flags);
1285 /* The FW is low on RX memory blocks, so send the dummy packet asap */
1286 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
1287 return wlcore_tx_work_locked(wl);
1290 * If the FW TX is busy, TX work will be scheduled by the threaded
1291 * interrupt handler function
1297 * The size of the dummy packet should be at least 1400 bytes. However, in
1298 * order to minimize the number of bus transactions, aligning it to 512 bytes
1299 * boundaries could be beneficial, performance wise
1301 #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1303 static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
1305 struct sk_buff *skb;
1306 struct ieee80211_hdr_3addr *hdr;
1307 unsigned int dummy_packet_size;
1309 dummy_packet_size = TOTAL_TX_DUMMY_PACKET_SIZE -
1310 sizeof(struct wl1271_tx_hw_descr) - sizeof(*hdr);
1312 skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE);
1314 wl1271_warning("Failed to allocate a dummy packet skb");
1318 skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr));
1320 hdr = (struct ieee80211_hdr_3addr *) skb_put(skb, sizeof(*hdr));
1321 memset(hdr, 0, sizeof(*hdr));
1322 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
1323 IEEE80211_STYPE_NULLFUNC |
1324 IEEE80211_FCTL_TODS);
1326 memset(skb_put(skb, dummy_packet_size), 0, dummy_packet_size);
1328 /* Dummy packets require the TID to be management */
1329 skb->priority = WL1271_TID_MGMT;
1331 /* Initialize all fields that might be used */
1332 skb_set_queue_mapping(skb, 0);
1333 memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info));
1341 wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern *p)
1343 int num_fields = 0, in_field = 0, fields_size = 0;
1344 int i, pattern_len = 0;
1347 wl1271_warning("No mask in WoWLAN pattern");
1352 * The pattern is broken up into segments of bytes at different offsets
1353 * that need to be checked by the FW filter. Each segment is called
1354 * a field in the FW API. We verify that the total number of fields
1355 * required for this pattern won't exceed FW limits (8)
1356 * as well as the total fields buffer won't exceed the FW limit.
1357 * Note that if there's a pattern which crosses Ethernet/IP header
1358 * boundary a new field is required.
1360 for (i = 0; i < p->pattern_len; i++) {
1361 if (test_bit(i, (unsigned long *)p->mask)) {
1366 if (i == WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1368 fields_size += pattern_len +
1369 RX_FILTER_FIELD_OVERHEAD;
1377 fields_size += pattern_len +
1378 RX_FILTER_FIELD_OVERHEAD;
1385 fields_size += pattern_len + RX_FILTER_FIELD_OVERHEAD;
1389 if (num_fields > WL1271_RX_FILTER_MAX_FIELDS) {
1390 wl1271_warning("RX Filter too complex. Too many segments");
1394 if (fields_size > WL1271_RX_FILTER_MAX_FIELDS_SIZE) {
1395 wl1271_warning("RX filter pattern is too big");
1402 struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void)
1404 return kzalloc(sizeof(struct wl12xx_rx_filter), GFP_KERNEL);
1407 void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter)
1414 for (i = 0; i < filter->num_fields; i++)
1415 kfree(filter->fields[i].pattern);
1420 int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
1421 u16 offset, u8 flags,
1422 u8 *pattern, u8 len)
1424 struct wl12xx_rx_filter_field *field;
1426 if (filter->num_fields == WL1271_RX_FILTER_MAX_FIELDS) {
1427 wl1271_warning("Max fields per RX filter. can't alloc another");
1431 field = &filter->fields[filter->num_fields];
1433 field->pattern = kzalloc(len, GFP_KERNEL);
1434 if (!field->pattern) {
1435 wl1271_warning("Failed to allocate RX filter pattern");
1439 filter->num_fields++;
1441 field->offset = cpu_to_le16(offset);
1442 field->flags = flags;
1444 memcpy(field->pattern, pattern, len);
1449 int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter)
1451 int i, fields_size = 0;
1453 for (i = 0; i < filter->num_fields; i++)
1454 fields_size += filter->fields[i].len +
1455 sizeof(struct wl12xx_rx_filter_field) -
1461 void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
1465 struct wl12xx_rx_filter_field *field;
1467 for (i = 0; i < filter->num_fields; i++) {
1468 field = (struct wl12xx_rx_filter_field *)buf;
1470 field->offset = filter->fields[i].offset;
1471 field->flags = filter->fields[i].flags;
1472 field->len = filter->fields[i].len;
1474 memcpy(&field->pattern, filter->fields[i].pattern, field->len);
1475 buf += sizeof(struct wl12xx_rx_filter_field) -
1476 sizeof(u8 *) + field->len;
1481 * Allocates an RX filter returned through f
1482 * which needs to be freed using rx_filter_free()
1485 wl1271_convert_wowlan_pattern_to_rx_filter(struct cfg80211_pkt_pattern *p,
1486 struct wl12xx_rx_filter **f)
1489 struct wl12xx_rx_filter *filter;
1493 filter = wl1271_rx_filter_alloc();
1495 wl1271_warning("Failed to alloc rx filter");
1501 while (i < p->pattern_len) {
1502 if (!test_bit(i, (unsigned long *)p->mask)) {
1507 for (j = i; j < p->pattern_len; j++) {
1508 if (!test_bit(j, (unsigned long *)p->mask))
1511 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE &&
1512 j >= WL1271_RX_FILTER_ETH_HEADER_SIZE)
1516 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1518 flags = WL1271_RX_FILTER_FLAG_ETHERNET_HEADER;
1520 offset = i - WL1271_RX_FILTER_ETH_HEADER_SIZE;
1521 flags = WL1271_RX_FILTER_FLAG_IP_HEADER;
1526 ret = wl1271_rx_filter_alloc_field(filter,
1529 &p->pattern[i], len);
1536 filter->action = FILTER_SIGNAL;
1542 wl1271_rx_filter_free(filter);
1548 static int wl1271_configure_wowlan(struct wl1271 *wl,
1549 struct cfg80211_wowlan *wow)
1553 if (!wow || wow->any || !wow->n_patterns) {
1554 ret = wl1271_acx_default_rx_filter_enable(wl, 0,
1559 ret = wl1271_rx_filter_clear_all(wl);
1566 if (WARN_ON(wow->n_patterns > WL1271_MAX_RX_FILTERS))
1569 /* Validate all incoming patterns before clearing current FW state */
1570 for (i = 0; i < wow->n_patterns; i++) {
1571 ret = wl1271_validate_wowlan_pattern(&wow->patterns[i]);
1573 wl1271_warning("Bad wowlan pattern %d", i);
1578 ret = wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
1582 ret = wl1271_rx_filter_clear_all(wl);
1586 /* Translate WoWLAN patterns into filters */
1587 for (i = 0; i < wow->n_patterns; i++) {
1588 struct cfg80211_pkt_pattern *p;
1589 struct wl12xx_rx_filter *filter = NULL;
1591 p = &wow->patterns[i];
1593 ret = wl1271_convert_wowlan_pattern_to_rx_filter(p, &filter);
1595 wl1271_warning("Failed to create an RX filter from "
1596 "wowlan pattern %d", i);
1600 ret = wl1271_rx_filter_enable(wl, i, 1, filter);
1602 wl1271_rx_filter_free(filter);
1607 ret = wl1271_acx_default_rx_filter_enable(wl, 1, FILTER_DROP);
1613 static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1614 struct wl12xx_vif *wlvif,
1615 struct cfg80211_wowlan *wow)
1619 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1622 ret = wl1271_ps_elp_wakeup(wl);
1626 ret = wl1271_configure_wowlan(wl, wow);
1630 if ((wl->conf.conn.suspend_wake_up_event ==
1631 wl->conf.conn.wake_up_event) &&
1632 (wl->conf.conn.suspend_listen_interval ==
1633 wl->conf.conn.listen_interval))
1636 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1637 wl->conf.conn.suspend_wake_up_event,
1638 wl->conf.conn.suspend_listen_interval);
1641 wl1271_error("suspend: set wake up conditions failed: %d", ret);
1644 wl1271_ps_elp_sleep(wl);
1650 static int wl1271_configure_suspend_ap(struct wl1271 *wl,
1651 struct wl12xx_vif *wlvif)
1655 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
1658 ret = wl1271_ps_elp_wakeup(wl);
1662 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
1664 wl1271_ps_elp_sleep(wl);
1670 static int wl1271_configure_suspend(struct wl1271 *wl,
1671 struct wl12xx_vif *wlvif,
1672 struct cfg80211_wowlan *wow)
1674 if (wlvif->bss_type == BSS_TYPE_STA_BSS)
1675 return wl1271_configure_suspend_sta(wl, wlvif, wow);
1676 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
1677 return wl1271_configure_suspend_ap(wl, wlvif);
1681 static void wl1271_configure_resume(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1684 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
1685 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
1687 if ((!is_ap) && (!is_sta))
1690 if (is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1693 ret = wl1271_ps_elp_wakeup(wl);
1698 wl1271_configure_wowlan(wl, NULL);
1700 if ((wl->conf.conn.suspend_wake_up_event ==
1701 wl->conf.conn.wake_up_event) &&
1702 (wl->conf.conn.suspend_listen_interval ==
1703 wl->conf.conn.listen_interval))
1706 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1707 wl->conf.conn.wake_up_event,
1708 wl->conf.conn.listen_interval);
1711 wl1271_error("resume: wake up conditions failed: %d",
1715 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
1719 wl1271_ps_elp_sleep(wl);
1722 static int wl1271_op_suspend(struct ieee80211_hw *hw,
1723 struct cfg80211_wowlan *wow)
1725 struct wl1271 *wl = hw->priv;
1726 struct wl12xx_vif *wlvif;
1729 wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
1732 /* we want to perform the recovery before suspending */
1733 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
1734 wl1271_warning("postponing suspend to perform recovery");
1738 wl1271_tx_flush(wl);
1740 mutex_lock(&wl->mutex);
1741 wl->wow_enabled = true;
1742 wl12xx_for_each_wlvif(wl, wlvif) {
1743 ret = wl1271_configure_suspend(wl, wlvif, wow);
1745 mutex_unlock(&wl->mutex);
1746 wl1271_warning("couldn't prepare device to suspend");
1750 mutex_unlock(&wl->mutex);
1751 /* flush any remaining work */
1752 wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
1755 * disable and re-enable interrupts in order to flush
1758 wlcore_disable_interrupts(wl);
1761 * set suspended flag to avoid triggering a new threaded_irq
1762 * work. no need for spinlock as interrupts are disabled.
1764 set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1766 wlcore_enable_interrupts(wl);
1767 flush_work(&wl->tx_work);
1768 flush_delayed_work(&wl->elp_work);
1773 static int wl1271_op_resume(struct ieee80211_hw *hw)
1775 struct wl1271 *wl = hw->priv;
1776 struct wl12xx_vif *wlvif;
1777 unsigned long flags;
1778 bool run_irq_work = false, pending_recovery;
1781 wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d",
1783 WARN_ON(!wl->wow_enabled);
1786 * re-enable irq_work enqueuing, and call irq_work directly if
1787 * there is a pending work.
1789 spin_lock_irqsave(&wl->wl_lock, flags);
1790 clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1791 if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags))
1792 run_irq_work = true;
1793 spin_unlock_irqrestore(&wl->wl_lock, flags);
1795 mutex_lock(&wl->mutex);
1797 /* test the recovery flag before calling any SDIO functions */
1798 pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1802 wl1271_debug(DEBUG_MAC80211,
1803 "run postponed irq_work directly");
1805 /* don't talk to the HW if recovery is pending */
1806 if (!pending_recovery) {
1807 ret = wlcore_irq_locked(wl);
1809 wl12xx_queue_recovery_work(wl);
1812 wlcore_enable_interrupts(wl);
1815 if (pending_recovery) {
1816 wl1271_warning("queuing forgotten recovery on resume");
1817 ieee80211_queue_work(wl->hw, &wl->recovery_work);
1821 wl12xx_for_each_wlvif(wl, wlvif) {
1822 wl1271_configure_resume(wl, wlvif);
1826 wl->wow_enabled = false;
1827 mutex_unlock(&wl->mutex);
1833 static int wl1271_op_start(struct ieee80211_hw *hw)
1835 wl1271_debug(DEBUG_MAC80211, "mac80211 start");
1838 * We have to delay the booting of the hardware because
1839 * we need to know the local MAC address before downloading and
1840 * initializing the firmware. The MAC address cannot be changed
1841 * after boot, and without the proper MAC address, the firmware
1842 * will not function properly.
1844 * The MAC address is first known when the corresponding interface
1845 * is added. That is where we will initialize the hardware.
1851 static void wlcore_op_stop_locked(struct wl1271 *wl)
1855 if (wl->state == WLCORE_STATE_OFF) {
1856 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1858 wlcore_enable_interrupts(wl);
1864 * this must be before the cancel_work calls below, so that the work
1865 * functions don't perform further work.
1867 wl->state = WLCORE_STATE_OFF;
1870 * Use the nosync variant to disable interrupts, so the mutex could be
1871 * held while doing so without deadlocking.
1873 wlcore_disable_interrupts_nosync(wl);
1875 mutex_unlock(&wl->mutex);
1877 wlcore_synchronize_interrupts(wl);
1878 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1879 cancel_work_sync(&wl->recovery_work);
1880 wl1271_flush_deferred_work(wl);
1881 cancel_delayed_work_sync(&wl->scan_complete_work);
1882 cancel_work_sync(&wl->netstack_work);
1883 cancel_work_sync(&wl->tx_work);
1884 cancel_delayed_work_sync(&wl->elp_work);
1885 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1887 /* let's notify MAC80211 about the remaining pending TX frames */
1888 mutex_lock(&wl->mutex);
1889 wl12xx_tx_reset(wl);
1891 wl1271_power_off(wl);
1893 * In case a recovery was scheduled, interrupts were disabled to avoid
1894 * an interrupt storm. Now that the power is down, it is safe to
1895 * re-enable interrupts to balance the disable depth
1897 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1898 wlcore_enable_interrupts(wl);
1900 wl->band = IEEE80211_BAND_2GHZ;
1903 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
1904 wl->channel_type = NL80211_CHAN_NO_HT;
1905 wl->tx_blocks_available = 0;
1906 wl->tx_allocated_blocks = 0;
1907 wl->tx_results_count = 0;
1908 wl->tx_packets_count = 0;
1909 wl->time_offset = 0;
1910 wl->ap_fw_ps_map = 0;
1912 wl->sleep_auth = WL1271_PSM_ILLEGAL;
1913 memset(wl->roles_map, 0, sizeof(wl->roles_map));
1914 memset(wl->links_map, 0, sizeof(wl->links_map));
1915 memset(wl->roc_map, 0, sizeof(wl->roc_map));
1916 memset(wl->session_ids, 0, sizeof(wl->session_ids));
1917 wl->active_sta_count = 0;
1918 wl->active_link_count = 0;
1920 /* The system link is always allocated */
1921 wl->links[WL12XX_SYSTEM_HLID].allocated_pkts = 0;
1922 wl->links[WL12XX_SYSTEM_HLID].prev_freed_pkts = 0;
1923 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
1926 * this is performed after the cancel_work calls and the associated
1927 * mutex_lock, so that wl1271_op_add_interface does not accidentally
1928 * get executed before all these vars have been reset.
1932 wl->tx_blocks_freed = 0;
1934 for (i = 0; i < NUM_TX_QUEUES; i++) {
1935 wl->tx_pkts_freed[i] = 0;
1936 wl->tx_allocated_pkts[i] = 0;
1939 wl1271_debugfs_reset(wl);
1941 kfree(wl->fw_status_1);
1942 wl->fw_status_1 = NULL;
1943 wl->fw_status_2 = NULL;
1944 kfree(wl->tx_res_if);
1945 wl->tx_res_if = NULL;
1946 kfree(wl->target_mem_map);
1947 wl->target_mem_map = NULL;
1950 * FW channels must be re-calibrated after recovery,
1951 * clear the last Reg-Domain channel configuration.
1953 memset(wl->reg_ch_conf_last, 0, sizeof(wl->reg_ch_conf_last));
1956 static void wlcore_op_stop(struct ieee80211_hw *hw)
1958 struct wl1271 *wl = hw->priv;
1960 wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
1962 mutex_lock(&wl->mutex);
1964 wlcore_op_stop_locked(wl);
1966 mutex_unlock(&wl->mutex);
1969 static void wlcore_channel_switch_work(struct work_struct *work)
1971 struct delayed_work *dwork;
1973 struct ieee80211_vif *vif;
1974 struct wl12xx_vif *wlvif;
1977 dwork = container_of(work, struct delayed_work, work);
1978 wlvif = container_of(dwork, struct wl12xx_vif, channel_switch_work);
1981 wl1271_info("channel switch failed (role_id: %d).", wlvif->role_id);
1983 mutex_lock(&wl->mutex);
1985 if (unlikely(wl->state != WLCORE_STATE_ON))
1988 /* check the channel switch is still ongoing */
1989 if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags))
1992 vif = wl12xx_wlvif_to_vif(wlvif);
1993 ieee80211_chswitch_done(vif, false);
1995 ret = wl1271_ps_elp_wakeup(wl);
1999 wl12xx_cmd_stop_channel_switch(wl, wlvif);
2001 wl1271_ps_elp_sleep(wl);
2003 mutex_unlock(&wl->mutex);
2006 static void wlcore_connection_loss_work(struct work_struct *work)
2008 struct delayed_work *dwork;
2010 struct ieee80211_vif *vif;
2011 struct wl12xx_vif *wlvif;
2013 dwork = container_of(work, struct delayed_work, work);
2014 wlvif = container_of(dwork, struct wl12xx_vif, connection_loss_work);
2017 wl1271_info("Connection loss work (role_id: %d).", wlvif->role_id);
2019 mutex_lock(&wl->mutex);
2021 if (unlikely(wl->state != WLCORE_STATE_ON))
2024 /* Call mac80211 connection loss */
2025 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2028 vif = wl12xx_wlvif_to_vif(wlvif);
2029 ieee80211_connection_loss(vif);
2031 mutex_unlock(&wl->mutex);
2034 static void wlcore_pending_auth_complete_work(struct work_struct *work)
2036 struct delayed_work *dwork;
2038 struct wl12xx_vif *wlvif;
2039 unsigned long time_spare;
2042 dwork = container_of(work, struct delayed_work, work);
2043 wlvif = container_of(dwork, struct wl12xx_vif,
2044 pending_auth_complete_work);
2047 mutex_lock(&wl->mutex);
2049 if (unlikely(wl->state != WLCORE_STATE_ON))
2053 * Make sure a second really passed since the last auth reply. Maybe
2054 * a second auth reply arrived while we were stuck on the mutex.
2055 * Check for a little less than the timeout to protect from scheduler
2058 time_spare = jiffies +
2059 msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT - 50);
2060 if (!time_after(time_spare, wlvif->pending_auth_reply_time))
2063 ret = wl1271_ps_elp_wakeup(wl);
2067 /* cancel the ROC if active */
2068 wlcore_update_inconn_sta(wl, wlvif, NULL, false);
2070 wl1271_ps_elp_sleep(wl);
2072 mutex_unlock(&wl->mutex);
2075 static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
2077 u8 policy = find_first_zero_bit(wl->rate_policies_map,
2078 WL12XX_MAX_RATE_POLICIES);
2079 if (policy >= WL12XX_MAX_RATE_POLICIES)
2082 __set_bit(policy, wl->rate_policies_map);
2087 static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
2089 if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES))
2092 __clear_bit(*idx, wl->rate_policies_map);
2093 *idx = WL12XX_MAX_RATE_POLICIES;
2096 static int wlcore_allocate_klv_template(struct wl1271 *wl, u8 *idx)
2098 u8 policy = find_first_zero_bit(wl->klv_templates_map,
2099 WLCORE_MAX_KLV_TEMPLATES);
2100 if (policy >= WLCORE_MAX_KLV_TEMPLATES)
2103 __set_bit(policy, wl->klv_templates_map);
2108 static void wlcore_free_klv_template(struct wl1271 *wl, u8 *idx)
2110 if (WARN_ON(*idx >= WLCORE_MAX_KLV_TEMPLATES))
2113 __clear_bit(*idx, wl->klv_templates_map);
2114 *idx = WLCORE_MAX_KLV_TEMPLATES;
2117 static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2119 switch (wlvif->bss_type) {
2120 case BSS_TYPE_AP_BSS:
2122 return WL1271_ROLE_P2P_GO;
2124 return WL1271_ROLE_AP;
2126 case BSS_TYPE_STA_BSS:
2128 return WL1271_ROLE_P2P_CL;
2130 return WL1271_ROLE_STA;
2133 return WL1271_ROLE_IBSS;
2136 wl1271_error("invalid bss_type: %d", wlvif->bss_type);
2138 return WL12XX_INVALID_ROLE_TYPE;
2141 static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
2143 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2146 /* clear everything but the persistent data */
2147 memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent));
2149 switch (ieee80211_vif_type_p2p(vif)) {
2150 case NL80211_IFTYPE_P2P_CLIENT:
2153 case NL80211_IFTYPE_STATION:
2154 wlvif->bss_type = BSS_TYPE_STA_BSS;
2156 case NL80211_IFTYPE_ADHOC:
2157 wlvif->bss_type = BSS_TYPE_IBSS;
2159 case NL80211_IFTYPE_P2P_GO:
2162 case NL80211_IFTYPE_AP:
2163 wlvif->bss_type = BSS_TYPE_AP_BSS;
2166 wlvif->bss_type = MAX_BSS_TYPE;
2170 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2171 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2172 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2174 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2175 wlvif->bss_type == BSS_TYPE_IBSS) {
2176 /* init sta/ibss data */
2177 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2178 wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2179 wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2180 wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2181 wlcore_allocate_klv_template(wl, &wlvif->sta.klv_template_id);
2182 wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
2183 wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
2184 wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
2187 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2188 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2189 wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2190 wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2191 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2192 wl12xx_allocate_rate_policy(wl,
2193 &wlvif->ap.ucast_rate_idx[i]);
2194 wlvif->basic_rate_set = CONF_TX_ENABLED_RATES;
2196 * TODO: check if basic_rate shouldn't be
2197 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2198 * instead (the same thing for STA above).
2200 wlvif->basic_rate = CONF_TX_ENABLED_RATES;
2201 /* TODO: this seems to be used only for STA, check it */
2202 wlvif->rate_set = CONF_TX_ENABLED_RATES;
2205 wlvif->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
2206 wlvif->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
2207 wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
2210 * mac80211 configures some values globally, while we treat them
2211 * per-interface. thus, on init, we have to copy them from wl
2213 wlvif->band = wl->band;
2214 wlvif->channel = wl->channel;
2215 wlvif->power_level = wl->power_level;
2216 wlvif->channel_type = wl->channel_type;
2218 INIT_WORK(&wlvif->rx_streaming_enable_work,
2219 wl1271_rx_streaming_enable_work);
2220 INIT_WORK(&wlvif->rx_streaming_disable_work,
2221 wl1271_rx_streaming_disable_work);
2222 INIT_DELAYED_WORK(&wlvif->channel_switch_work,
2223 wlcore_channel_switch_work);
2224 INIT_DELAYED_WORK(&wlvif->connection_loss_work,
2225 wlcore_connection_loss_work);
2226 INIT_DELAYED_WORK(&wlvif->pending_auth_complete_work,
2227 wlcore_pending_auth_complete_work);
2228 INIT_LIST_HEAD(&wlvif->list);
2230 setup_timer(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer,
2231 (unsigned long) wlvif);
2235 static int wl12xx_init_fw(struct wl1271 *wl)
2237 int retries = WL1271_BOOT_RETRIES;
2238 bool booted = false;
2239 struct wiphy *wiphy = wl->hw->wiphy;
2244 ret = wl12xx_chip_wakeup(wl, false);
2248 ret = wl->ops->boot(wl);
2252 ret = wl1271_hw_init(wl);
2260 mutex_unlock(&wl->mutex);
2261 /* Unlocking the mutex in the middle of handling is
2262 inherently unsafe. In this case we deem it safe to do,
2263 because we need to let any possibly pending IRQ out of
2264 the system (and while we are WLCORE_STATE_OFF the IRQ
2265 work function will not do anything.) Also, any other
2266 possible concurrent operations will fail due to the
2267 current state, hence the wl1271 struct should be safe. */
2268 wlcore_disable_interrupts(wl);
2269 wl1271_flush_deferred_work(wl);
2270 cancel_work_sync(&wl->netstack_work);
2271 mutex_lock(&wl->mutex);
2273 wl1271_power_off(wl);
2277 wl1271_error("firmware boot failed despite %d retries",
2278 WL1271_BOOT_RETRIES);
2282 wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
2284 /* update hw/fw version info in wiphy struct */
2285 wiphy->hw_version = wl->chip.id;
2286 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
2287 sizeof(wiphy->fw_version));
2290 * Now we know if 11a is supported (info from the NVS), so disable
2291 * 11a channels if not supported
2293 if (!wl->enable_11a)
2294 wiphy->bands[IEEE80211_BAND_5GHZ]->n_channels = 0;
2296 wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
2297 wl->enable_11a ? "" : "not ");
2299 wl->state = WLCORE_STATE_ON;
2304 static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif)
2306 return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID;
2310 * Check whether a fw switch (i.e. moving from one loaded
2311 * fw to another) is needed. This function is also responsible
2312 * for updating wl->last_vif_count, so it must be called before
2313 * loading a non-plt fw (so the correct fw (single-role/multi-role)
2316 static bool wl12xx_need_fw_change(struct wl1271 *wl,
2317 struct vif_counter_data vif_counter_data,
2320 enum wl12xx_fw_type current_fw = wl->fw_type;
2321 u8 vif_count = vif_counter_data.counter;
2323 if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags))
2326 /* increase the vif count if this is a new vif */
2327 if (add && !vif_counter_data.cur_vif_running)
2330 wl->last_vif_count = vif_count;
2332 /* no need for fw change if the device is OFF */
2333 if (wl->state == WLCORE_STATE_OFF)
2336 /* no need for fw change if a single fw is used */
2337 if (!wl->mr_fw_name)
2340 if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL)
2342 if (vif_count <= 1 && current_fw == WL12XX_FW_TYPE_MULTI)
2349 * Enter "forced psm". Make sure the sta is in psm against the ap,
2350 * to make the fw switch a bit more disconnection-persistent.
2352 static void wl12xx_force_active_psm(struct wl1271 *wl)
2354 struct wl12xx_vif *wlvif;
2356 wl12xx_for_each_wlvif_sta(wl, wlvif) {
2357 wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
2361 struct wlcore_hw_queue_iter_data {
2362 unsigned long hw_queue_map[BITS_TO_LONGS(WLCORE_NUM_MAC_ADDRESSES)];
2364 struct ieee80211_vif *vif;
2365 /* is the current vif among those iterated */
2369 static void wlcore_hw_queue_iter(void *data, u8 *mac,
2370 struct ieee80211_vif *vif)
2372 struct wlcore_hw_queue_iter_data *iter_data = data;
2374 if (WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE))
2377 if (iter_data->cur_running || vif == iter_data->vif) {
2378 iter_data->cur_running = true;
2382 __set_bit(vif->hw_queue[0] / NUM_TX_QUEUES, iter_data->hw_queue_map);
2385 static int wlcore_allocate_hw_queue_base(struct wl1271 *wl,
2386 struct wl12xx_vif *wlvif)
2388 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2389 struct wlcore_hw_queue_iter_data iter_data = {};
2392 iter_data.vif = vif;
2394 /* mark all bits taken by active interfaces */
2395 ieee80211_iterate_active_interfaces_atomic(wl->hw,
2396 IEEE80211_IFACE_ITER_RESUME_ALL,
2397 wlcore_hw_queue_iter, &iter_data);
2399 /* the current vif is already running in mac80211 (resume/recovery) */
2400 if (iter_data.cur_running) {
2401 wlvif->hw_queue_base = vif->hw_queue[0];
2402 wl1271_debug(DEBUG_MAC80211,
2403 "using pre-allocated hw queue base %d",
2404 wlvif->hw_queue_base);
2406 /* interface type might have changed type */
2407 goto adjust_cab_queue;
2410 q_base = find_first_zero_bit(iter_data.hw_queue_map,
2411 WLCORE_NUM_MAC_ADDRESSES);
2412 if (q_base >= WLCORE_NUM_MAC_ADDRESSES)
2415 wlvif->hw_queue_base = q_base * NUM_TX_QUEUES;
2416 wl1271_debug(DEBUG_MAC80211, "allocating hw queue base: %d",
2417 wlvif->hw_queue_base);
2419 for (i = 0; i < NUM_TX_QUEUES; i++) {
2420 wl->queue_stop_reasons[wlvif->hw_queue_base + i] = 0;
2421 /* register hw queues in mac80211 */
2422 vif->hw_queue[i] = wlvif->hw_queue_base + i;
2426 /* the last places are reserved for cab queues per interface */
2427 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2428 vif->cab_queue = NUM_TX_QUEUES * WLCORE_NUM_MAC_ADDRESSES +
2429 wlvif->hw_queue_base / NUM_TX_QUEUES;
2431 vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2436 static int wl1271_op_add_interface(struct ieee80211_hw *hw,
2437 struct ieee80211_vif *vif)
2439 struct wl1271 *wl = hw->priv;
2440 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2441 struct vif_counter_data vif_count;
2446 wl1271_error("Adding Interface not allowed while in PLT mode");
2450 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
2451 IEEE80211_VIF_SUPPORTS_CQM_RSSI;
2453 wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
2454 ieee80211_vif_type_p2p(vif), vif->addr);
2456 wl12xx_get_vif_count(hw, vif, &vif_count);
2458 mutex_lock(&wl->mutex);
2459 ret = wl1271_ps_elp_wakeup(wl);
2464 * in some very corner case HW recovery scenarios its possible to
2465 * get here before __wl1271_op_remove_interface is complete, so
2466 * opt out if that is the case.
2468 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
2469 test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
2475 ret = wl12xx_init_vif_data(wl, vif);
2480 role_type = wl12xx_get_role_type(wl, wlvif);
2481 if (role_type == WL12XX_INVALID_ROLE_TYPE) {
2486 ret = wlcore_allocate_hw_queue_base(wl, wlvif);
2490 if (wl12xx_need_fw_change(wl, vif_count, true)) {
2491 wl12xx_force_active_psm(wl);
2492 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2493 mutex_unlock(&wl->mutex);
2494 wl1271_recovery_work(&wl->recovery_work);
2499 * TODO: after the nvs issue will be solved, move this block
2500 * to start(), and make sure here the driver is ON.
2502 if (wl->state == WLCORE_STATE_OFF) {
2504 * we still need this in order to configure the fw
2505 * while uploading the nvs
2507 memcpy(wl->addresses[0].addr, vif->addr, ETH_ALEN);
2509 ret = wl12xx_init_fw(wl);
2514 ret = wl12xx_cmd_role_enable(wl, vif->addr,
2515 role_type, &wlvif->role_id);
2519 ret = wl1271_init_vif_specific(wl, vif);
2523 list_add(&wlvif->list, &wl->wlvif_list);
2524 set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
2526 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2531 wl1271_ps_elp_sleep(wl);
2533 mutex_unlock(&wl->mutex);
2538 static void __wl1271_op_remove_interface(struct wl1271 *wl,
2539 struct ieee80211_vif *vif,
2540 bool reset_tx_queues)
2542 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2544 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2546 wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
2548 if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2551 /* because of hardware recovery, we may get here twice */
2552 if (wl->state == WLCORE_STATE_OFF)
2555 wl1271_info("down");
2557 if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
2558 wl->scan_wlvif == wlvif) {
2560 * Rearm the tx watchdog just before idling scan. This
2561 * prevents just-finished scans from triggering the watchdog
2563 wl12xx_rearm_tx_watchdog_locked(wl);
2565 wl->scan.state = WL1271_SCAN_STATE_IDLE;
2566 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
2567 wl->scan_wlvif = NULL;
2568 wl->scan.req = NULL;
2569 ieee80211_scan_completed(wl->hw, true);
2572 if (wl->sched_vif == wlvif) {
2573 ieee80211_sched_scan_stopped(wl->hw);
2574 wl->sched_vif = NULL;
2577 if (wl->roc_vif == vif) {
2579 ieee80211_remain_on_channel_expired(wl->hw);
2582 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
2583 /* disable active roles */
2584 ret = wl1271_ps_elp_wakeup(wl);
2588 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2589 wlvif->bss_type == BSS_TYPE_IBSS) {
2590 if (wl12xx_dev_role_started(wlvif))
2591 wl12xx_stop_dev(wl, wlvif);
2594 ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
2598 wl1271_ps_elp_sleep(wl);
2601 wl12xx_tx_reset_wlvif(wl, wlvif);
2603 /* clear all hlids (except system_hlid) */
2604 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2606 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2607 wlvif->bss_type == BSS_TYPE_IBSS) {
2608 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2609 wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2610 wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2611 wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2612 wlcore_free_klv_template(wl, &wlvif->sta.klv_template_id);
2614 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2615 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2616 wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2617 wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2618 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2619 wl12xx_free_rate_policy(wl,
2620 &wlvif->ap.ucast_rate_idx[i]);
2621 wl1271_free_ap_keys(wl, wlvif);
2624 dev_kfree_skb(wlvif->probereq);
2625 wlvif->probereq = NULL;
2626 if (wl->last_wlvif == wlvif)
2627 wl->last_wlvif = NULL;
2628 list_del(&wlvif->list);
2629 memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map));
2630 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2631 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2639 * Last AP, have more stations. Configure sleep auth according to STA.
2640 * Don't do thin on unintended recovery.
2642 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) &&
2643 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags))
2646 if (wl->ap_count == 0 && is_ap) {
2647 /* mask ap events */
2648 wl->event_mask &= ~wl->ap_event_mask;
2649 wl1271_event_unmask(wl);
2652 if (wl->ap_count == 0 && is_ap && wl->sta_count) {
2653 u8 sta_auth = wl->conf.conn.sta_sleep_auth;
2654 /* Configure for power according to debugfs */
2655 if (sta_auth != WL1271_PSM_ILLEGAL)
2656 wl1271_acx_sleep_auth(wl, sta_auth);
2657 /* Configure for ELP power saving */
2659 wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
2663 mutex_unlock(&wl->mutex);
2665 del_timer_sync(&wlvif->rx_streaming_timer);
2666 cancel_work_sync(&wlvif->rx_streaming_enable_work);
2667 cancel_work_sync(&wlvif->rx_streaming_disable_work);
2668 cancel_delayed_work_sync(&wlvif->connection_loss_work);
2669 cancel_delayed_work_sync(&wlvif->channel_switch_work);
2670 cancel_delayed_work_sync(&wlvif->pending_auth_complete_work);
2672 mutex_lock(&wl->mutex);
2675 static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
2676 struct ieee80211_vif *vif)
2678 struct wl1271 *wl = hw->priv;
2679 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2680 struct wl12xx_vif *iter;
2681 struct vif_counter_data vif_count;
2683 wl12xx_get_vif_count(hw, vif, &vif_count);
2684 mutex_lock(&wl->mutex);
2686 if (wl->state == WLCORE_STATE_OFF ||
2687 !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2691 * wl->vif can be null here if someone shuts down the interface
2692 * just when hardware recovery has been started.
2694 wl12xx_for_each_wlvif(wl, iter) {
2698 __wl1271_op_remove_interface(wl, vif, true);
2701 WARN_ON(iter != wlvif);
2702 if (wl12xx_need_fw_change(wl, vif_count, false)) {
2703 wl12xx_force_active_psm(wl);
2704 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2705 wl12xx_queue_recovery_work(wl);
2708 mutex_unlock(&wl->mutex);
2711 static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
2712 struct ieee80211_vif *vif,
2713 enum nl80211_iftype new_type, bool p2p)
2715 struct wl1271 *wl = hw->priv;
2718 set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2719 wl1271_op_remove_interface(hw, vif);
2721 vif->type = new_type;
2723 ret = wl1271_op_add_interface(hw, vif);
2725 clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2729 static int wlcore_join(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2732 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
2735 * One of the side effects of the JOIN command is that is clears
2736 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2737 * to a WPA/WPA2 access point will therefore kill the data-path.
2738 * Currently the only valid scenario for JOIN during association
2739 * is on roaming, in which case we will also be given new keys.
2740 * Keep the below message for now, unless it starts bothering
2741 * users who really like to roam a lot :)
2743 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2744 wl1271_info("JOIN while associated.");
2746 /* clear encryption type */
2747 wlvif->encryption_type = KEY_NONE;
2750 ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
2752 if (wl->quirks & WLCORE_QUIRK_START_STA_FAILS) {
2754 * TODO: this is an ugly workaround for wl12xx fw
2755 * bug - we are not able to tx/rx after the first
2756 * start_sta, so make dummy start+stop calls,
2757 * and then call start_sta again.
2758 * this should be fixed in the fw.
2760 wl12xx_cmd_role_start_sta(wl, wlvif);
2761 wl12xx_cmd_role_stop_sta(wl, wlvif);
2764 ret = wl12xx_cmd_role_start_sta(wl, wlvif);
2770 static int wl1271_ssid_set(struct wl12xx_vif *wlvif, struct sk_buff *skb,
2774 const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
2778 wl1271_error("No SSID in IEs!");
2783 if (ssid_len > IEEE80211_MAX_SSID_LEN) {
2784 wl1271_error("SSID is too long!");
2788 wlvif->ssid_len = ssid_len;
2789 memcpy(wlvif->ssid, ptr+2, ssid_len);
2793 static int wlcore_set_ssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2795 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2796 struct sk_buff *skb;
2799 /* we currently only support setting the ssid from the ap probe req */
2800 if (wlvif->bss_type != BSS_TYPE_STA_BSS)
2803 skb = ieee80211_ap_probereq_get(wl->hw, vif);
2807 ieoffset = offsetof(struct ieee80211_mgmt,
2808 u.probe_req.variable);
2809 wl1271_ssid_set(wlvif, skb, ieoffset);
2815 static int wlcore_set_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2816 struct ieee80211_bss_conf *bss_conf,
2822 wlvif->aid = bss_conf->aid;
2823 wlvif->channel_type = cfg80211_get_chandef_type(&bss_conf->chandef);
2824 wlvif->beacon_int = bss_conf->beacon_int;
2825 wlvif->wmm_enabled = bss_conf->qos;
2827 set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
2830 * with wl1271, we don't need to update the
2831 * beacon_int and dtim_period, because the firmware
2832 * updates it by itself when the first beacon is
2833 * received after a join.
2835 ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
2840 * Get a template for hardware connection maintenance
2842 dev_kfree_skb(wlvif->probereq);
2843 wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
2846 ieoffset = offsetof(struct ieee80211_mgmt,
2847 u.probe_req.variable);
2848 wl1271_ssid_set(wlvif, wlvif->probereq, ieoffset);
2850 /* enable the connection monitoring feature */
2851 ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
2856 * The join command disable the keep-alive mode, shut down its process,
2857 * and also clear the template config, so we need to reset it all after
2858 * the join. The acx_aid starts the keep-alive process, and the order
2859 * of the commands below is relevant.
2861 ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
2865 ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
2869 ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
2873 ret = wl1271_acx_keep_alive_config(wl, wlvif,
2874 wlvif->sta.klv_template_id,
2875 ACX_KEEP_ALIVE_TPL_VALID);
2880 * The default fw psm configuration is AUTO, while mac80211 default
2881 * setting is off (ACTIVE), so sync the fw with the correct value.
2883 ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE);
2889 wl1271_tx_enabled_rates_get(wl,
2892 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
2900 static int wlcore_unset_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2903 bool sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
2905 /* make sure we are connected (sta) joined */
2907 !test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2910 /* make sure we are joined (ibss) */
2912 test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags))
2916 /* use defaults when not associated */
2919 /* free probe-request template */
2920 dev_kfree_skb(wlvif->probereq);
2921 wlvif->probereq = NULL;
2923 /* disable connection monitor features */
2924 ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
2928 /* Disable the keep-alive feature */
2929 ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
2934 if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
2935 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2937 wl12xx_cmd_stop_channel_switch(wl, wlvif);
2938 ieee80211_chswitch_done(vif, false);
2939 cancel_delayed_work(&wlvif->channel_switch_work);
2942 /* invalidate keep-alive template */
2943 wl1271_acx_keep_alive_config(wl, wlvif,
2944 wlvif->sta.klv_template_id,
2945 ACX_KEEP_ALIVE_TPL_INVALID);
2950 static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2952 wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band];
2953 wlvif->rate_set = wlvif->basic_rate_set;
2956 static void wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2959 bool cur_idle = !test_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
2961 if (idle == cur_idle)
2965 clear_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
2967 /* The current firmware only supports sched_scan in idle */
2968 if (wl->sched_vif == wlvif)
2969 wl->ops->sched_scan_stop(wl, wlvif);
2971 set_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
2975 static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2976 struct ieee80211_conf *conf, u32 changed)
2980 if (conf->power_level != wlvif->power_level) {
2981 ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
2985 wlvif->power_level = conf->power_level;
2991 static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
2993 struct wl1271 *wl = hw->priv;
2994 struct wl12xx_vif *wlvif;
2995 struct ieee80211_conf *conf = &hw->conf;
2998 wl1271_debug(DEBUG_MAC80211, "mac80211 config psm %s power %d %s"
3000 conf->flags & IEEE80211_CONF_PS ? "on" : "off",
3002 conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
3005 mutex_lock(&wl->mutex);
3007 if (changed & IEEE80211_CONF_CHANGE_POWER)
3008 wl->power_level = conf->power_level;
3010 if (unlikely(wl->state != WLCORE_STATE_ON))
3013 ret = wl1271_ps_elp_wakeup(wl);
3017 /* configure each interface */
3018 wl12xx_for_each_wlvif(wl, wlvif) {
3019 ret = wl12xx_config_vif(wl, wlvif, conf, changed);
3025 wl1271_ps_elp_sleep(wl);
3028 mutex_unlock(&wl->mutex);
3033 struct wl1271_filter_params {
3036 u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
3039 static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
3040 struct netdev_hw_addr_list *mc_list)
3042 struct wl1271_filter_params *fp;
3043 struct netdev_hw_addr *ha;
3045 fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
3047 wl1271_error("Out of memory setting filters.");
3051 /* update multicast filtering parameters */
3052 fp->mc_list_length = 0;
3053 if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
3054 fp->enabled = false;
3057 netdev_hw_addr_list_for_each(ha, mc_list) {
3058 memcpy(fp->mc_list[fp->mc_list_length],
3059 ha->addr, ETH_ALEN);
3060 fp->mc_list_length++;
3064 return (u64)(unsigned long)fp;
3067 #define WL1271_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \
3070 FIF_BCN_PRBRESP_PROMISC | \
3074 static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
3075 unsigned int changed,
3076 unsigned int *total, u64 multicast)
3078 struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
3079 struct wl1271 *wl = hw->priv;
3080 struct wl12xx_vif *wlvif;
3084 wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
3085 " total %x", changed, *total);
3087 mutex_lock(&wl->mutex);
3089 *total &= WL1271_SUPPORTED_FILTERS;
3090 changed &= WL1271_SUPPORTED_FILTERS;
3092 if (unlikely(wl->state != WLCORE_STATE_ON))
3095 ret = wl1271_ps_elp_wakeup(wl);
3099 wl12xx_for_each_wlvif(wl, wlvif) {
3100 if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
3101 if (*total & FIF_ALLMULTI)
3102 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3106 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3109 fp->mc_list_length);
3116 * the fw doesn't provide an api to configure the filters. instead,
3117 * the filters configuration is based on the active roles / ROC
3122 wl1271_ps_elp_sleep(wl);
3125 mutex_unlock(&wl->mutex);
3129 static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3130 u8 id, u8 key_type, u8 key_size,
3131 const u8 *key, u8 hlid, u32 tx_seq_32,
3134 struct wl1271_ap_key *ap_key;
3137 wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id);
3139 if (key_size > MAX_KEY_SIZE)
3143 * Find next free entry in ap_keys. Also check we are not replacing
3146 for (i = 0; i < MAX_NUM_KEYS; i++) {
3147 if (wlvif->ap.recorded_keys[i] == NULL)
3150 if (wlvif->ap.recorded_keys[i]->id == id) {
3151 wl1271_warning("trying to record key replacement");
3156 if (i == MAX_NUM_KEYS)
3159 ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL);
3164 ap_key->key_type = key_type;
3165 ap_key->key_size = key_size;
3166 memcpy(ap_key->key, key, key_size);
3167 ap_key->hlid = hlid;
3168 ap_key->tx_seq_32 = tx_seq_32;
3169 ap_key->tx_seq_16 = tx_seq_16;
3171 wlvif->ap.recorded_keys[i] = ap_key;
3175 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3179 for (i = 0; i < MAX_NUM_KEYS; i++) {
3180 kfree(wlvif->ap.recorded_keys[i]);
3181 wlvif->ap.recorded_keys[i] = NULL;
3185 static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3188 struct wl1271_ap_key *key;
3189 bool wep_key_added = false;
3191 for (i = 0; i < MAX_NUM_KEYS; i++) {
3193 if (wlvif->ap.recorded_keys[i] == NULL)
3196 key = wlvif->ap.recorded_keys[i];
3198 if (hlid == WL12XX_INVALID_LINK_ID)
3199 hlid = wlvif->ap.bcast_hlid;
3201 ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3202 key->id, key->key_type,
3203 key->key_size, key->key,
3204 hlid, key->tx_seq_32,
3209 if (key->key_type == KEY_WEP)
3210 wep_key_added = true;
3213 if (wep_key_added) {
3214 ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key,
3215 wlvif->ap.bcast_hlid);
3221 wl1271_free_ap_keys(wl, wlvif);
3225 static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3226 u16 action, u8 id, u8 key_type,
3227 u8 key_size, const u8 *key, u32 tx_seq_32,
3228 u16 tx_seq_16, struct ieee80211_sta *sta)
3231 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3234 struct wl1271_station *wl_sta;
3238 wl_sta = (struct wl1271_station *)sta->drv_priv;
3239 hlid = wl_sta->hlid;
3241 hlid = wlvif->ap.bcast_hlid;
3244 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3246 * We do not support removing keys after AP shutdown.
3247 * Pretend we do to make mac80211 happy.
3249 if (action != KEY_ADD_OR_REPLACE)
3252 ret = wl1271_record_ap_key(wl, wlvif, id,
3254 key, hlid, tx_seq_32,
3257 ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
3258 id, key_type, key_size,
3259 key, hlid, tx_seq_32,
3267 static const u8 bcast_addr[ETH_ALEN] = {
3268 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
3271 addr = sta ? sta->addr : bcast_addr;
3273 if (is_zero_ether_addr(addr)) {
3274 /* We dont support TX only encryption */
3278 /* The wl1271 does not allow to remove unicast keys - they
3279 will be cleared automatically on next CMD_JOIN. Ignore the
3280 request silently, as we dont want the mac80211 to emit
3281 an error message. */
3282 if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
3285 /* don't remove key if hlid was already deleted */
3286 if (action == KEY_REMOVE &&
3287 wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
3290 ret = wl1271_cmd_set_sta_key(wl, wlvif, action,
3291 id, key_type, key_size,
3292 key, addr, tx_seq_32,
3302 static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3303 struct ieee80211_vif *vif,
3304 struct ieee80211_sta *sta,
3305 struct ieee80211_key_conf *key_conf)
3307 struct wl1271 *wl = hw->priv;
3309 bool might_change_spare =
3310 key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
3311 key_conf->cipher == WLAN_CIPHER_SUITE_TKIP;
3313 if (might_change_spare) {
3315 * stop the queues and flush to ensure the next packets are
3316 * in sync with FW spare block accounting
3318 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3319 wl1271_tx_flush(wl);
3322 mutex_lock(&wl->mutex);
3324 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3326 goto out_wake_queues;
3329 ret = wl1271_ps_elp_wakeup(wl);
3331 goto out_wake_queues;
3333 ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
3335 wl1271_ps_elp_sleep(wl);
3338 if (might_change_spare)
3339 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3341 mutex_unlock(&wl->mutex);
3346 int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3347 struct ieee80211_vif *vif,
3348 struct ieee80211_sta *sta,
3349 struct ieee80211_key_conf *key_conf)
3351 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3358 wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
3360 wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta);
3361 wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
3362 key_conf->cipher, key_conf->keyidx,
3363 key_conf->keylen, key_conf->flags);
3364 wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
3366 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
3368 struct wl1271_station *wl_sta = (void *)sta->drv_priv;
3369 hlid = wl_sta->hlid;
3371 hlid = wlvif->ap.bcast_hlid;
3374 hlid = wlvif->sta.hlid;
3376 if (hlid != WL12XX_INVALID_LINK_ID) {
3377 u64 tx_seq = wl->links[hlid].total_freed_pkts;
3378 tx_seq_32 = WL1271_TX_SECURITY_HI32(tx_seq);
3379 tx_seq_16 = WL1271_TX_SECURITY_LO16(tx_seq);
3382 switch (key_conf->cipher) {
3383 case WLAN_CIPHER_SUITE_WEP40:
3384 case WLAN_CIPHER_SUITE_WEP104:
3387 key_conf->hw_key_idx = key_conf->keyidx;
3389 case WLAN_CIPHER_SUITE_TKIP:
3390 key_type = KEY_TKIP;
3391 key_conf->hw_key_idx = key_conf->keyidx;
3393 case WLAN_CIPHER_SUITE_CCMP:
3395 key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
3397 case WL1271_CIPHER_SUITE_GEM:
3401 wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
3408 ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3409 key_conf->keyidx, key_type,
3410 key_conf->keylen, key_conf->key,
3411 tx_seq_32, tx_seq_16, sta);
3413 wl1271_error("Could not add or replace key");
3418 * reconfiguring arp response if the unicast (or common)
3419 * encryption key type was changed
3421 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
3422 (sta || key_type == KEY_WEP) &&
3423 wlvif->encryption_type != key_type) {
3424 wlvif->encryption_type = key_type;
3425 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
3427 wl1271_warning("build arp rsp failed: %d", ret);
3434 ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
3435 key_conf->keyidx, key_type,
3436 key_conf->keylen, key_conf->key,
3439 wl1271_error("Could not remove key");
3445 wl1271_error("Unsupported key cmd 0x%x", cmd);
3451 EXPORT_SYMBOL_GPL(wlcore_set_key);
3453 static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw,
3454 struct ieee80211_vif *vif,
3457 struct wl1271 *wl = hw->priv;
3458 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3461 wl1271_debug(DEBUG_MAC80211, "mac80211 set default key idx %d",
3464 mutex_lock(&wl->mutex);
3466 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3471 ret = wl1271_ps_elp_wakeup(wl);
3475 wlvif->default_key = key_idx;
3477 /* the default WEP key needs to be configured at least once */
3478 if (wlvif->encryption_type == KEY_WEP) {
3479 ret = wl12xx_cmd_set_default_wep_key(wl,
3487 wl1271_ps_elp_sleep(wl);
3490 mutex_unlock(&wl->mutex);
3493 void wlcore_regdomain_config(struct wl1271 *wl)
3497 if (!(wl->quirks & WLCORE_QUIRK_REGDOMAIN_CONF))
3500 mutex_lock(&wl->mutex);
3502 if (unlikely(wl->state != WLCORE_STATE_ON))
3505 ret = wl1271_ps_elp_wakeup(wl);
3509 ret = wlcore_cmd_regdomain_config_locked(wl);
3511 wl12xx_queue_recovery_work(wl);
3515 wl1271_ps_elp_sleep(wl);
3517 mutex_unlock(&wl->mutex);
3520 static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
3521 struct ieee80211_vif *vif,
3522 struct cfg80211_scan_request *req)
3524 struct wl1271 *wl = hw->priv;
3529 wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan");
3532 ssid = req->ssids[0].ssid;
3533 len = req->ssids[0].ssid_len;
3536 mutex_lock(&wl->mutex);
3538 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3540 * We cannot return -EBUSY here because cfg80211 will expect
3541 * a call to ieee80211_scan_completed if we do - in this case
3542 * there won't be any call.
3548 ret = wl1271_ps_elp_wakeup(wl);
3552 /* fail if there is any role in ROC */
3553 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
3554 /* don't allow scanning right now */
3559 ret = wlcore_scan(hw->priv, vif, ssid, len, req);
3561 wl1271_ps_elp_sleep(wl);
3563 mutex_unlock(&wl->mutex);
3568 static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
3569 struct ieee80211_vif *vif)
3571 struct wl1271 *wl = hw->priv;
3572 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3575 wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan");
3577 mutex_lock(&wl->mutex);
3579 if (unlikely(wl->state != WLCORE_STATE_ON))
3582 if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
3585 ret = wl1271_ps_elp_wakeup(wl);
3589 if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
3590 ret = wl->ops->scan_stop(wl, wlvif);
3596 * Rearm the tx watchdog just before idling scan. This
3597 * prevents just-finished scans from triggering the watchdog
3599 wl12xx_rearm_tx_watchdog_locked(wl);
3601 wl->scan.state = WL1271_SCAN_STATE_IDLE;
3602 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
3603 wl->scan_wlvif = NULL;
3604 wl->scan.req = NULL;
3605 ieee80211_scan_completed(wl->hw, true);
3608 wl1271_ps_elp_sleep(wl);
3610 mutex_unlock(&wl->mutex);
3612 cancel_delayed_work_sync(&wl->scan_complete_work);
3615 static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
3616 struct ieee80211_vif *vif,
3617 struct cfg80211_sched_scan_request *req,
3618 struct ieee80211_sched_scan_ies *ies)
3620 struct wl1271 *wl = hw->priv;
3621 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3624 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start");
3626 mutex_lock(&wl->mutex);
3628 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3633 ret = wl1271_ps_elp_wakeup(wl);
3637 ret = wl->ops->sched_scan_start(wl, wlvif, req, ies);
3641 wl->sched_vif = wlvif;
3644 wl1271_ps_elp_sleep(wl);
3646 mutex_unlock(&wl->mutex);
3650 static void wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
3651 struct ieee80211_vif *vif)
3653 struct wl1271 *wl = hw->priv;
3654 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3657 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop");
3659 mutex_lock(&wl->mutex);
3661 if (unlikely(wl->state != WLCORE_STATE_ON))
3664 ret = wl1271_ps_elp_wakeup(wl);
3668 wl->ops->sched_scan_stop(wl, wlvif);
3670 wl1271_ps_elp_sleep(wl);
3672 mutex_unlock(&wl->mutex);
3675 static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
3677 struct wl1271 *wl = hw->priv;
3680 mutex_lock(&wl->mutex);
3682 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3687 ret = wl1271_ps_elp_wakeup(wl);
3691 ret = wl1271_acx_frag_threshold(wl, value);
3693 wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret);
3695 wl1271_ps_elp_sleep(wl);
3698 mutex_unlock(&wl->mutex);
3703 static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3705 struct wl1271 *wl = hw->priv;
3706 struct wl12xx_vif *wlvif;
3709 mutex_lock(&wl->mutex);
3711 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3716 ret = wl1271_ps_elp_wakeup(wl);
3720 wl12xx_for_each_wlvif(wl, wlvif) {
3721 ret = wl1271_acx_rts_threshold(wl, wlvif, value);
3723 wl1271_warning("set rts threshold failed: %d", ret);
3725 wl1271_ps_elp_sleep(wl);
3728 mutex_unlock(&wl->mutex);
3733 static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset)
3736 const u8 *next, *end = skb->data + skb->len;
3737 u8 *ie = (u8 *)cfg80211_find_ie(eid, skb->data + ieoffset,
3738 skb->len - ieoffset);
3743 memmove(ie, next, end - next);
3744 skb_trim(skb, skb->len - len);
3747 static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
3748 unsigned int oui, u8 oui_type,
3752 const u8 *next, *end = skb->data + skb->len;
3753 u8 *ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
3754 skb->data + ieoffset,
3755 skb->len - ieoffset);
3760 memmove(ie, next, end - next);
3761 skb_trim(skb, skb->len - len);
3764 static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
3765 struct ieee80211_vif *vif)
3767 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3768 struct sk_buff *skb;
3771 skb = ieee80211_proberesp_get(wl->hw, vif);
3775 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3776 CMD_TEMPL_AP_PROBE_RESPONSE,
3785 wl1271_debug(DEBUG_AP, "probe response updated");
3786 set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
3792 static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
3793 struct ieee80211_vif *vif,
3795 size_t probe_rsp_len,
3798 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3799 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
3800 u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
3801 int ssid_ie_offset, ie_offset, templ_len;
3804 /* no need to change probe response if the SSID is set correctly */
3805 if (wlvif->ssid_len > 0)
3806 return wl1271_cmd_template_set(wl, wlvif->role_id,
3807 CMD_TEMPL_AP_PROBE_RESPONSE,
3812 if (probe_rsp_len + bss_conf->ssid_len > WL1271_CMD_TEMPL_MAX_SIZE) {
3813 wl1271_error("probe_rsp template too big");
3817 /* start searching from IE offset */
3818 ie_offset = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
3820 ptr = cfg80211_find_ie(WLAN_EID_SSID, probe_rsp_data + ie_offset,
3821 probe_rsp_len - ie_offset);
3823 wl1271_error("No SSID in beacon!");
3827 ssid_ie_offset = ptr - probe_rsp_data;
3828 ptr += (ptr[1] + 2);
3830 memcpy(probe_rsp_templ, probe_rsp_data, ssid_ie_offset);
3832 /* insert SSID from bss_conf */
3833 probe_rsp_templ[ssid_ie_offset] = WLAN_EID_SSID;
3834 probe_rsp_templ[ssid_ie_offset + 1] = bss_conf->ssid_len;
3835 memcpy(probe_rsp_templ + ssid_ie_offset + 2,
3836 bss_conf->ssid, bss_conf->ssid_len);
3837 templ_len = ssid_ie_offset + 2 + bss_conf->ssid_len;
3839 memcpy(probe_rsp_templ + ssid_ie_offset + 2 + bss_conf->ssid_len,
3840 ptr, probe_rsp_len - (ptr - probe_rsp_data));
3841 templ_len += probe_rsp_len - (ptr - probe_rsp_data);
3843 return wl1271_cmd_template_set(wl, wlvif->role_id,
3844 CMD_TEMPL_AP_PROBE_RESPONSE,
3850 static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
3851 struct ieee80211_vif *vif,
3852 struct ieee80211_bss_conf *bss_conf,
3855 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3858 if (changed & BSS_CHANGED_ERP_SLOT) {
3859 if (bss_conf->use_short_slot)
3860 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT);
3862 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG);
3864 wl1271_warning("Set slot time failed %d", ret);
3869 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
3870 if (bss_conf->use_short_preamble)
3871 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT);
3873 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG);
3876 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
3877 if (bss_conf->use_cts_prot)
3878 ret = wl1271_acx_cts_protect(wl, wlvif,
3881 ret = wl1271_acx_cts_protect(wl, wlvif,
3882 CTSPROTECT_DISABLE);
3884 wl1271_warning("Set ctsprotect failed %d", ret);
3893 static int wlcore_set_beacon_template(struct wl1271 *wl,
3894 struct ieee80211_vif *vif,
3897 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3898 struct ieee80211_hdr *hdr;
3901 int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
3902 struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
3910 wl1271_debug(DEBUG_MASTER, "beacon updated");
3912 ret = wl1271_ssid_set(wlvif, beacon, ieoffset);
3914 dev_kfree_skb(beacon);
3917 min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
3918 tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
3920 ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id,
3925 dev_kfree_skb(beacon);
3929 wlvif->wmm_enabled =
3930 cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
3931 WLAN_OUI_TYPE_MICROSOFT_WMM,
3932 beacon->data + ieoffset,
3933 beacon->len - ieoffset);
3936 * In case we already have a probe-resp beacon set explicitly
3937 * by usermode, don't use the beacon data.
3939 if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
3942 /* remove TIM ie from probe response */
3943 wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
3946 * remove p2p ie from probe response.
3947 * the fw reponds to probe requests that don't include
3948 * the p2p ie. probe requests with p2p ie will be passed,
3949 * and will be responded by the supplicant (the spec
3950 * forbids including the p2p ie when responding to probe
3951 * requests that didn't include it).
3953 wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA,
3954 WLAN_OUI_TYPE_WFA_P2P, ieoffset);
3956 hdr = (struct ieee80211_hdr *) beacon->data;
3957 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
3958 IEEE80211_STYPE_PROBE_RESP);
3960 ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
3965 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3966 CMD_TEMPL_PROBE_RESPONSE,
3971 dev_kfree_skb(beacon);
3979 static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
3980 struct ieee80211_vif *vif,
3981 struct ieee80211_bss_conf *bss_conf,
3984 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3985 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3988 if (changed & BSS_CHANGED_BEACON_INT) {
3989 wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
3990 bss_conf->beacon_int);
3992 wlvif->beacon_int = bss_conf->beacon_int;
3995 if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
3996 u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
3998 wl1271_ap_set_probe_resp_tmpl(wl, rate, vif);
4001 if (changed & BSS_CHANGED_BEACON) {
4002 ret = wlcore_set_beacon_template(wl, vif, is_ap);
4009 wl1271_error("beacon info change failed: %d", ret);
4013 /* AP mode changes */
4014 static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
4015 struct ieee80211_vif *vif,
4016 struct ieee80211_bss_conf *bss_conf,
4019 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4022 if (changed & BSS_CHANGED_BASIC_RATES) {
4023 u32 rates = bss_conf->basic_rates;
4025 wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
4027 wlvif->basic_rate = wl1271_tx_min_rate_get(wl,
4028 wlvif->basic_rate_set);
4030 ret = wl1271_init_ap_rates(wl, wlvif);
4032 wl1271_error("AP rate policy change failed %d", ret);
4036 ret = wl1271_ap_init_templates(wl, vif);
4040 ret = wl1271_ap_set_probe_resp_tmpl(wl, wlvif->basic_rate, vif);
4044 ret = wlcore_set_beacon_template(wl, vif, true);
4049 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
4053 if (changed & BSS_CHANGED_BEACON_ENABLED) {
4054 if (bss_conf->enable_beacon) {
4055 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4056 ret = wl12xx_cmd_role_start_ap(wl, wlvif);
4060 ret = wl1271_ap_init_hwenc(wl, wlvif);
4064 set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4065 wl1271_debug(DEBUG_AP, "started AP");
4068 if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4070 * AP might be in ROC in case we have just
4071 * sent auth reply. handle it.
4073 if (test_bit(wlvif->role_id, wl->roc_map))
4074 wl12xx_croc(wl, wlvif->role_id);
4076 ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
4080 clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4081 clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET,
4083 wl1271_debug(DEBUG_AP, "stopped AP");
4088 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4092 /* Handle HT information change */
4093 if ((changed & BSS_CHANGED_HT) &&
4094 (bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)) {
4095 ret = wl1271_acx_set_ht_information(wl, wlvif,
4096 bss_conf->ht_operation_mode);
4098 wl1271_warning("Set ht information failed %d", ret);
4107 static int wlcore_set_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
4108 struct ieee80211_bss_conf *bss_conf,
4114 wl1271_debug(DEBUG_MAC80211,
4115 "changed_bssid: %pM, aid: %d, bcn_int: %d, brates: 0x%x sta_rate_set: 0x%x",
4116 bss_conf->bssid, bss_conf->aid,
4117 bss_conf->beacon_int,
4118 bss_conf->basic_rates, sta_rate_set);
4120 wlvif->beacon_int = bss_conf->beacon_int;
4121 rates = bss_conf->basic_rates;
4122 wlvif->basic_rate_set =
4123 wl1271_tx_enabled_rates_get(wl, rates,
4126 wl1271_tx_min_rate_get(wl,
4127 wlvif->basic_rate_set);
4131 wl1271_tx_enabled_rates_get(wl,
4135 /* we only support sched_scan while not connected */
4136 if (wl->sched_vif == wlvif)
4137 wl->ops->sched_scan_stop(wl, wlvif);
4139 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4143 ret = wl12xx_cmd_build_null_data(wl, wlvif);
4147 ret = wl1271_build_qos_null_data(wl, wl12xx_wlvif_to_vif(wlvif));
4151 wlcore_set_ssid(wl, wlvif);
4153 set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4158 static int wlcore_clear_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
4162 /* revert back to minimum rates for the current band */
4163 wl1271_set_band_rate(wl, wlvif);
4164 wlvif->basic_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4166 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4170 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
4171 test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) {
4172 ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
4177 clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4180 /* STA/IBSS mode changes */
4181 static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
4182 struct ieee80211_vif *vif,
4183 struct ieee80211_bss_conf *bss_conf,
4186 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4187 bool do_join = false;
4188 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
4189 bool ibss_joined = false;
4190 u32 sta_rate_set = 0;
4192 struct ieee80211_sta *sta;
4193 bool sta_exists = false;
4194 struct ieee80211_sta_ht_cap sta_ht_cap;
4197 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf,
4203 if (changed & BSS_CHANGED_IBSS) {
4204 if (bss_conf->ibss_joined) {
4205 set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
4208 wlcore_unset_assoc(wl, wlvif);
4209 wl12xx_cmd_role_stop_sta(wl, wlvif);
4213 if ((changed & BSS_CHANGED_BEACON_INT) && ibss_joined)
4216 /* Need to update the SSID (for filtering etc) */
4217 if ((changed & BSS_CHANGED_BEACON) && ibss_joined)
4220 if ((changed & BSS_CHANGED_BEACON_ENABLED) && ibss_joined) {
4221 wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
4222 bss_conf->enable_beacon ? "enabled" : "disabled");
4227 if (changed & BSS_CHANGED_IDLE && !is_ibss)
4228 wl1271_sta_handle_idle(wl, wlvif, bss_conf->idle);
4230 if (changed & BSS_CHANGED_CQM) {
4231 bool enable = false;
4232 if (bss_conf->cqm_rssi_thold)
4234 ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable,
4235 bss_conf->cqm_rssi_thold,
4236 bss_conf->cqm_rssi_hyst);
4239 wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
4242 if (changed & (BSS_CHANGED_BSSID | BSS_CHANGED_HT |
4243 BSS_CHANGED_ASSOC)) {
4245 sta = ieee80211_find_sta(vif, bss_conf->bssid);
4247 u8 *rx_mask = sta->ht_cap.mcs.rx_mask;
4249 /* save the supp_rates of the ap */
4250 sta_rate_set = sta->supp_rates[wlvif->band];
4251 if (sta->ht_cap.ht_supported)
4253 (rx_mask[0] << HW_HT_RATES_OFFSET) |
4254 (rx_mask[1] << HW_MIMO_RATES_OFFSET);
4255 sta_ht_cap = sta->ht_cap;
4262 if (changed & BSS_CHANGED_BSSID) {
4263 if (!is_zero_ether_addr(bss_conf->bssid)) {
4264 ret = wlcore_set_bssid(wl, wlvif, bss_conf,
4269 /* Need to update the BSSID (for filtering etc) */
4272 ret = wlcore_clear_bssid(wl, wlvif);
4278 if (changed & BSS_CHANGED_IBSS) {
4279 wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d",
4280 bss_conf->ibss_joined);
4282 if (bss_conf->ibss_joined) {
4283 u32 rates = bss_conf->basic_rates;
4284 wlvif->basic_rate_set =
4285 wl1271_tx_enabled_rates_get(wl, rates,
4288 wl1271_tx_min_rate_get(wl,
4289 wlvif->basic_rate_set);
4291 /* by default, use 11b + OFDM rates */
4292 wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
4293 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4299 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4304 ret = wlcore_join(wl, wlvif);
4306 wl1271_warning("cmd join failed %d", ret);
4311 if (changed & BSS_CHANGED_ASSOC) {
4312 if (bss_conf->assoc) {
4313 ret = wlcore_set_assoc(wl, wlvif, bss_conf,
4318 if (test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags))
4319 wl12xx_set_authorized(wl, wlvif);
4321 wlcore_unset_assoc(wl, wlvif);
4325 if (changed & BSS_CHANGED_PS) {
4326 if ((bss_conf->ps) &&
4327 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
4328 !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4332 if (wl->conf.conn.forced_ps) {
4333 ps_mode = STATION_POWER_SAVE_MODE;
4334 ps_mode_str = "forced";
4336 ps_mode = STATION_AUTO_PS_MODE;
4337 ps_mode_str = "auto";
4340 wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str);
4342 ret = wl1271_ps_set_mode(wl, wlvif, ps_mode);
4344 wl1271_warning("enter %s ps failed %d",
4346 } else if (!bss_conf->ps &&
4347 test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4348 wl1271_debug(DEBUG_PSM, "auto ps disabled");
4350 ret = wl1271_ps_set_mode(wl, wlvif,
4351 STATION_ACTIVE_MODE);
4353 wl1271_warning("exit auto ps failed %d", ret);
4357 /* Handle new association with HT. Do this after join. */
4360 bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT;
4362 ret = wlcore_hw_set_peer_cap(wl,
4368 wl1271_warning("Set ht cap failed %d", ret);
4374 ret = wl1271_acx_set_ht_information(wl, wlvif,
4375 bss_conf->ht_operation_mode);
4377 wl1271_warning("Set ht information failed %d",
4384 /* Handle arp filtering. Done after join. */
4385 if ((changed & BSS_CHANGED_ARP_FILTER) ||
4386 (!is_ibss && (changed & BSS_CHANGED_QOS))) {
4387 __be32 addr = bss_conf->arp_addr_list[0];
4388 wlvif->sta.qos = bss_conf->qos;
4389 WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
4391 if (bss_conf->arp_addr_cnt == 1 && bss_conf->assoc) {
4392 wlvif->ip_addr = addr;
4394 * The template should have been configured only upon
4395 * association. however, it seems that the correct ip
4396 * isn't being set (when sending), so we have to
4397 * reconfigure the template upon every ip change.
4399 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
4401 wl1271_warning("build arp rsp failed: %d", ret);
4405 ret = wl1271_acx_arp_ip_filter(wl, wlvif,
4406 (ACX_ARP_FILTER_ARP_FILTERING |
4407 ACX_ARP_FILTER_AUTO_ARP),
4411 ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr);
4422 static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
4423 struct ieee80211_vif *vif,
4424 struct ieee80211_bss_conf *bss_conf,
4427 struct wl1271 *wl = hw->priv;
4428 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4429 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4432 wl1271_debug(DEBUG_MAC80211, "mac80211 bss info role %d changed 0x%x",
4433 wlvif->role_id, (int)changed);
4436 * make sure to cancel pending disconnections if our association
4439 if (!is_ap && (changed & BSS_CHANGED_ASSOC))
4440 cancel_delayed_work_sync(&wlvif->connection_loss_work);
4442 if (is_ap && (changed & BSS_CHANGED_BEACON_ENABLED) &&
4443 !bss_conf->enable_beacon)
4444 wl1271_tx_flush(wl);
4446 mutex_lock(&wl->mutex);
4448 if (unlikely(wl->state != WLCORE_STATE_ON))
4451 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4454 ret = wl1271_ps_elp_wakeup(wl);
4459 wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
4461 wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
4463 wl1271_ps_elp_sleep(wl);
4466 mutex_unlock(&wl->mutex);
4469 static int wlcore_op_add_chanctx(struct ieee80211_hw *hw,
4470 struct ieee80211_chanctx_conf *ctx)
4472 wl1271_debug(DEBUG_MAC80211, "mac80211 add chanctx %d (type %d)",
4473 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4474 cfg80211_get_chandef_type(&ctx->def));
4478 static void wlcore_op_remove_chanctx(struct ieee80211_hw *hw,
4479 struct ieee80211_chanctx_conf *ctx)
4481 wl1271_debug(DEBUG_MAC80211, "mac80211 remove chanctx %d (type %d)",
4482 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4483 cfg80211_get_chandef_type(&ctx->def));
4486 static void wlcore_op_change_chanctx(struct ieee80211_hw *hw,
4487 struct ieee80211_chanctx_conf *ctx,
4490 wl1271_debug(DEBUG_MAC80211,
4491 "mac80211 change chanctx %d (type %d) changed 0x%x",
4492 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4493 cfg80211_get_chandef_type(&ctx->def), changed);
4496 static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
4497 struct ieee80211_vif *vif,
4498 struct ieee80211_chanctx_conf *ctx)
4500 struct wl1271 *wl = hw->priv;
4501 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4502 int channel = ieee80211_frequency_to_channel(
4503 ctx->def.chan->center_freq);
4505 wl1271_debug(DEBUG_MAC80211,
4506 "mac80211 assign chanctx (role %d) %d (type %d)",
4507 wlvif->role_id, channel, cfg80211_get_chandef_type(&ctx->def));
4509 mutex_lock(&wl->mutex);
4511 wlvif->band = ctx->def.chan->band;
4512 wlvif->channel = channel;
4513 wlvif->channel_type = cfg80211_get_chandef_type(&ctx->def);
4515 /* update default rates according to the band */
4516 wl1271_set_band_rate(wl, wlvif);
4518 mutex_unlock(&wl->mutex);
4523 static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
4524 struct ieee80211_vif *vif,
4525 struct ieee80211_chanctx_conf *ctx)
4527 struct wl1271 *wl = hw->priv;
4528 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4530 wl1271_debug(DEBUG_MAC80211,
4531 "mac80211 unassign chanctx (role %d) %d (type %d)",
4533 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4534 cfg80211_get_chandef_type(&ctx->def));
4536 wl1271_tx_flush(wl);
4539 static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
4540 struct ieee80211_vif *vif, u16 queue,
4541 const struct ieee80211_tx_queue_params *params)
4543 struct wl1271 *wl = hw->priv;
4544 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4548 mutex_lock(&wl->mutex);
4550 wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
4553 ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
4555 ps_scheme = CONF_PS_SCHEME_LEGACY;
4557 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
4560 ret = wl1271_ps_elp_wakeup(wl);
4565 * the txop is confed in units of 32us by the mac80211,
4568 ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4569 params->cw_min, params->cw_max,
4570 params->aifs, params->txop << 5);
4574 ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4575 CONF_CHANNEL_TYPE_EDCF,
4576 wl1271_tx_get_queue(queue),
4577 ps_scheme, CONF_ACK_POLICY_LEGACY,
4581 wl1271_ps_elp_sleep(wl);
4584 mutex_unlock(&wl->mutex);
4589 static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
4590 struct ieee80211_vif *vif)
4593 struct wl1271 *wl = hw->priv;
4594 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4595 u64 mactime = ULLONG_MAX;
4598 wl1271_debug(DEBUG_MAC80211, "mac80211 get tsf");
4600 mutex_lock(&wl->mutex);
4602 if (unlikely(wl->state != WLCORE_STATE_ON))
4605 ret = wl1271_ps_elp_wakeup(wl);
4609 ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime);
4614 wl1271_ps_elp_sleep(wl);
4617 mutex_unlock(&wl->mutex);
4621 static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
4622 struct survey_info *survey)
4624 struct ieee80211_conf *conf = &hw->conf;
4629 survey->channel = conf->chandef.chan;
4634 static int wl1271_allocate_sta(struct wl1271 *wl,
4635 struct wl12xx_vif *wlvif,
4636 struct ieee80211_sta *sta)
4638 struct wl1271_station *wl_sta;
4642 if (wl->active_sta_count >= AP_MAX_STATIONS) {
4643 wl1271_warning("could not allocate HLID - too much stations");
4647 wl_sta = (struct wl1271_station *)sta->drv_priv;
4648 ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid);
4650 wl1271_warning("could not allocate HLID - too many links");
4654 /* use the previous security seq, if this is a recovery/resume */
4655 wl->links[wl_sta->hlid].total_freed_pkts = wl_sta->total_freed_pkts;
4657 set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map);
4658 memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
4659 wl->active_sta_count++;
4663 void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
4665 struct wl1271_station *wl_sta;
4666 struct ieee80211_sta *sta;
4667 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
4669 if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
4672 clear_bit(hlid, wlvif->ap.sta_hlid_map);
4673 __clear_bit(hlid, &wl->ap_ps_map);
4674 __clear_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
4677 * save the last used PN in the private part of iee80211_sta,
4678 * in case of recovery/suspend
4681 sta = ieee80211_find_sta(vif, wl->links[hlid].addr);
4683 wl_sta = (void *)sta->drv_priv;
4684 wl_sta->total_freed_pkts = wl->links[hlid].total_freed_pkts;
4687 * increment the initial seq number on recovery to account for
4688 * transmitted packets that we haven't yet got in the FW status
4690 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
4691 wl_sta->total_freed_pkts +=
4692 WL1271_TX_SQN_POST_RECOVERY_PADDING;
4696 wl12xx_free_link(wl, wlvif, &hlid);
4697 wl->active_sta_count--;
4700 * rearm the tx watchdog when the last STA is freed - give the FW a
4701 * chance to return STA-buffered packets before complaining.
4703 if (wl->active_sta_count == 0)
4704 wl12xx_rearm_tx_watchdog_locked(wl);
4707 static int wl12xx_sta_add(struct wl1271 *wl,
4708 struct wl12xx_vif *wlvif,
4709 struct ieee80211_sta *sta)
4711 struct wl1271_station *wl_sta;
4715 wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
4717 ret = wl1271_allocate_sta(wl, wlvif, sta);
4721 wl_sta = (struct wl1271_station *)sta->drv_priv;
4722 hlid = wl_sta->hlid;
4724 ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
4726 wl1271_free_sta(wl, wlvif, hlid);
4731 static int wl12xx_sta_remove(struct wl1271 *wl,
4732 struct wl12xx_vif *wlvif,
4733 struct ieee80211_sta *sta)
4735 struct wl1271_station *wl_sta;
4738 wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
4740 wl_sta = (struct wl1271_station *)sta->drv_priv;
4742 if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
4745 ret = wl12xx_cmd_remove_peer(wl, wl_sta->hlid);
4749 wl1271_free_sta(wl, wlvif, wl_sta->hlid);
4753 static void wlcore_roc_if_possible(struct wl1271 *wl,
4754 struct wl12xx_vif *wlvif)
4756 if (find_first_bit(wl->roc_map,
4757 WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)
4760 if (WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID))
4763 wl12xx_roc(wl, wlvif, wlvif->role_id, wlvif->band, wlvif->channel);
4767 * when wl_sta is NULL, we treat this call as if coming from a
4768 * pending auth reply.
4769 * wl->mutex must be taken and the FW must be awake when the call
4772 void wlcore_update_inconn_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif,
4773 struct wl1271_station *wl_sta, bool in_conn)
4776 if (WARN_ON(wl_sta && wl_sta->in_connection))
4779 if (!wlvif->ap_pending_auth_reply &&
4780 !wlvif->inconn_count)
4781 wlcore_roc_if_possible(wl, wlvif);
4784 wl_sta->in_connection = true;
4785 wlvif->inconn_count++;
4787 wlvif->ap_pending_auth_reply = true;
4790 if (wl_sta && !wl_sta->in_connection)
4793 if (WARN_ON(!wl_sta && !wlvif->ap_pending_auth_reply))
4796 if (WARN_ON(wl_sta && !wlvif->inconn_count))
4800 wl_sta->in_connection = false;
4801 wlvif->inconn_count--;
4803 wlvif->ap_pending_auth_reply = false;
4806 if (!wlvif->inconn_count && !wlvif->ap_pending_auth_reply &&
4807 test_bit(wlvif->role_id, wl->roc_map))
4808 wl12xx_croc(wl, wlvif->role_id);
4812 static int wl12xx_update_sta_state(struct wl1271 *wl,
4813 struct wl12xx_vif *wlvif,
4814 struct ieee80211_sta *sta,
4815 enum ieee80211_sta_state old_state,
4816 enum ieee80211_sta_state new_state)
4818 struct wl1271_station *wl_sta;
4819 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
4820 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
4823 wl_sta = (struct wl1271_station *)sta->drv_priv;
4825 /* Add station (AP mode) */
4827 old_state == IEEE80211_STA_NOTEXIST &&
4828 new_state == IEEE80211_STA_NONE) {
4829 ret = wl12xx_sta_add(wl, wlvif, sta);
4833 wlcore_update_inconn_sta(wl, wlvif, wl_sta, true);
4836 /* Remove station (AP mode) */
4838 old_state == IEEE80211_STA_NONE &&
4839 new_state == IEEE80211_STA_NOTEXIST) {
4841 wl12xx_sta_remove(wl, wlvif, sta);
4843 wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
4846 /* Authorize station (AP mode) */
4848 new_state == IEEE80211_STA_AUTHORIZED) {
4849 ret = wl12xx_cmd_set_peer_state(wl, wlvif, wl_sta->hlid);
4853 ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true,
4858 wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
4861 /* Authorize station */
4863 new_state == IEEE80211_STA_AUTHORIZED) {
4864 set_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
4865 ret = wl12xx_set_authorized(wl, wlvif);
4871 old_state == IEEE80211_STA_AUTHORIZED &&
4872 new_state == IEEE80211_STA_ASSOC) {
4873 clear_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
4874 clear_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags);
4877 /* clear ROCs on failure or authorization */
4879 (new_state == IEEE80211_STA_AUTHORIZED ||
4880 new_state == IEEE80211_STA_NOTEXIST)) {
4881 if (test_bit(wlvif->role_id, wl->roc_map))
4882 wl12xx_croc(wl, wlvif->role_id);
4886 old_state == IEEE80211_STA_NOTEXIST &&
4887 new_state == IEEE80211_STA_NONE) {
4888 if (find_first_bit(wl->roc_map,
4889 WL12XX_MAX_ROLES) >= WL12XX_MAX_ROLES) {
4890 WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID);
4891 wl12xx_roc(wl, wlvif, wlvif->role_id,
4892 wlvif->band, wlvif->channel);
4898 static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
4899 struct ieee80211_vif *vif,
4900 struct ieee80211_sta *sta,
4901 enum ieee80211_sta_state old_state,
4902 enum ieee80211_sta_state new_state)
4904 struct wl1271 *wl = hw->priv;
4905 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4908 wl1271_debug(DEBUG_MAC80211, "mac80211 sta %d state=%d->%d",
4909 sta->aid, old_state, new_state);
4911 mutex_lock(&wl->mutex);
4913 if (unlikely(wl->state != WLCORE_STATE_ON)) {
4918 ret = wl1271_ps_elp_wakeup(wl);
4922 ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state);
4924 wl1271_ps_elp_sleep(wl);
4926 mutex_unlock(&wl->mutex);
4927 if (new_state < old_state)
4932 static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
4933 struct ieee80211_vif *vif,
4934 enum ieee80211_ampdu_mlme_action action,
4935 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
4938 struct wl1271 *wl = hw->priv;
4939 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4941 u8 hlid, *ba_bitmap;
4943 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action,
4946 /* sanity check - the fields in FW are only 8bits wide */
4947 if (WARN_ON(tid > 0xFF))
4950 mutex_lock(&wl->mutex);
4952 if (unlikely(wl->state != WLCORE_STATE_ON)) {
4957 if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
4958 hlid = wlvif->sta.hlid;
4959 } else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
4960 struct wl1271_station *wl_sta;
4962 wl_sta = (struct wl1271_station *)sta->drv_priv;
4963 hlid = wl_sta->hlid;
4969 ba_bitmap = &wl->links[hlid].ba_bitmap;
4971 ret = wl1271_ps_elp_wakeup(wl);
4975 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d",
4979 case IEEE80211_AMPDU_RX_START:
4980 if (!wlvif->ba_support || !wlvif->ba_allowed) {
4985 if (wl->ba_rx_session_count >= wl->ba_rx_session_count_max) {
4987 wl1271_error("exceeded max RX BA sessions");
4991 if (*ba_bitmap & BIT(tid)) {
4993 wl1271_error("cannot enable RX BA session on active "
4998 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true,
5001 *ba_bitmap |= BIT(tid);
5002 wl->ba_rx_session_count++;
5006 case IEEE80211_AMPDU_RX_STOP:
5007 if (!(*ba_bitmap & BIT(tid))) {
5009 * this happens on reconfig - so only output a debug
5010 * message for now, and don't fail the function.
5012 wl1271_debug(DEBUG_MAC80211,
5013 "no active RX BA session on tid: %d",
5019 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false,
5022 *ba_bitmap &= ~BIT(tid);
5023 wl->ba_rx_session_count--;
5028 * The BA initiator session management in FW independently.
5029 * Falling break here on purpose for all TX APDU commands.
5031 case IEEE80211_AMPDU_TX_START:
5032 case IEEE80211_AMPDU_TX_STOP_CONT:
5033 case IEEE80211_AMPDU_TX_STOP_FLUSH:
5034 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
5035 case IEEE80211_AMPDU_TX_OPERATIONAL:
5040 wl1271_error("Incorrect ampdu action id=%x\n", action);
5044 wl1271_ps_elp_sleep(wl);
5047 mutex_unlock(&wl->mutex);
5052 static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
5053 struct ieee80211_vif *vif,
5054 const struct cfg80211_bitrate_mask *mask)
5056 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5057 struct wl1271 *wl = hw->priv;
5060 wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x",
5061 mask->control[NL80211_BAND_2GHZ].legacy,
5062 mask->control[NL80211_BAND_5GHZ].legacy);
5064 mutex_lock(&wl->mutex);
5066 for (i = 0; i < WLCORE_NUM_BANDS; i++)
5067 wlvif->bitrate_masks[i] =
5068 wl1271_tx_enabled_rates_get(wl,
5069 mask->control[i].legacy,
5072 if (unlikely(wl->state != WLCORE_STATE_ON))
5075 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
5076 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5078 ret = wl1271_ps_elp_wakeup(wl);
5082 wl1271_set_band_rate(wl, wlvif);
5084 wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
5085 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
5087 wl1271_ps_elp_sleep(wl);
5090 mutex_unlock(&wl->mutex);
5095 static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
5096 struct ieee80211_channel_switch *ch_switch)
5098 struct wl1271 *wl = hw->priv;
5099 struct wl12xx_vif *wlvif;
5102 wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
5104 wl1271_tx_flush(wl);
5106 mutex_lock(&wl->mutex);
5108 if (unlikely(wl->state == WLCORE_STATE_OFF)) {
5109 wl12xx_for_each_wlvif_sta(wl, wlvif) {
5110 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
5111 ieee80211_chswitch_done(vif, false);
5114 } else if (unlikely(wl->state != WLCORE_STATE_ON)) {
5118 ret = wl1271_ps_elp_wakeup(wl);
5122 /* TODO: change mac80211 to pass vif as param */
5123 wl12xx_for_each_wlvif_sta(wl, wlvif) {
5124 unsigned long delay_usec;
5126 ret = wl->ops->channel_switch(wl, wlvif, ch_switch);
5130 set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5132 /* indicate failure 5 seconds after channel switch time */
5133 delay_usec = ieee80211_tu_to_usec(wlvif->beacon_int) *
5135 ieee80211_queue_delayed_work(hw, &wlvif->channel_switch_work,
5136 usecs_to_jiffies(delay_usec) +
5137 msecs_to_jiffies(5000));
5141 wl1271_ps_elp_sleep(wl);
5144 mutex_unlock(&wl->mutex);
5147 static void wlcore_op_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
5149 struct wl1271 *wl = hw->priv;
5151 wl1271_tx_flush(wl);
5154 static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw,
5155 struct ieee80211_vif *vif,
5156 struct ieee80211_channel *chan,
5158 enum ieee80211_roc_type type)
5160 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5161 struct wl1271 *wl = hw->priv;
5162 int channel, ret = 0;
5164 channel = ieee80211_frequency_to_channel(chan->center_freq);
5166 wl1271_debug(DEBUG_MAC80211, "mac80211 roc %d (%d)",
5167 channel, wlvif->role_id);
5169 mutex_lock(&wl->mutex);
5171 if (unlikely(wl->state != WLCORE_STATE_ON))
5174 /* return EBUSY if we can't ROC right now */
5175 if (WARN_ON(wl->roc_vif ||
5176 find_first_bit(wl->roc_map,
5177 WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)) {
5182 ret = wl1271_ps_elp_wakeup(wl);
5186 ret = wl12xx_start_dev(wl, wlvif, chan->band, channel);
5191 ieee80211_queue_delayed_work(hw, &wl->roc_complete_work,
5192 msecs_to_jiffies(duration));
5194 wl1271_ps_elp_sleep(wl);
5196 mutex_unlock(&wl->mutex);
5200 static int __wlcore_roc_completed(struct wl1271 *wl)
5202 struct wl12xx_vif *wlvif;
5205 /* already completed */
5206 if (unlikely(!wl->roc_vif))
5209 wlvif = wl12xx_vif_to_data(wl->roc_vif);
5211 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
5214 ret = wl12xx_stop_dev(wl, wlvif);
5223 static int wlcore_roc_completed(struct wl1271 *wl)
5227 wl1271_debug(DEBUG_MAC80211, "roc complete");
5229 mutex_lock(&wl->mutex);
5231 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5236 ret = wl1271_ps_elp_wakeup(wl);
5240 ret = __wlcore_roc_completed(wl);
5242 wl1271_ps_elp_sleep(wl);
5244 mutex_unlock(&wl->mutex);
5249 static void wlcore_roc_complete_work(struct work_struct *work)
5251 struct delayed_work *dwork;
5255 dwork = container_of(work, struct delayed_work, work);
5256 wl = container_of(dwork, struct wl1271, roc_complete_work);
5258 ret = wlcore_roc_completed(wl);
5260 ieee80211_remain_on_channel_expired(wl->hw);
5263 static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw *hw)
5265 struct wl1271 *wl = hw->priv;
5267 wl1271_debug(DEBUG_MAC80211, "mac80211 croc");
5270 wl1271_tx_flush(wl);
5273 * we can't just flush_work here, because it might deadlock
5274 * (as we might get called from the same workqueue)
5276 cancel_delayed_work_sync(&wl->roc_complete_work);
5277 wlcore_roc_completed(wl);
5282 static void wlcore_op_sta_rc_update(struct ieee80211_hw *hw,
5283 struct ieee80211_vif *vif,
5284 struct ieee80211_sta *sta,
5287 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5288 struct wl1271 *wl = hw->priv;
5290 wlcore_hw_sta_rc_update(wl, wlvif, sta, changed);
5293 static int wlcore_op_get_rssi(struct ieee80211_hw *hw,
5294 struct ieee80211_vif *vif,
5295 struct ieee80211_sta *sta,
5298 struct wl1271 *wl = hw->priv;
5299 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5302 wl1271_debug(DEBUG_MAC80211, "mac80211 get_rssi");
5304 mutex_lock(&wl->mutex);
5306 if (unlikely(wl->state != WLCORE_STATE_ON))
5309 ret = wl1271_ps_elp_wakeup(wl);
5313 ret = wlcore_acx_average_rssi(wl, wlvif, rssi_dbm);
5318 wl1271_ps_elp_sleep(wl);
5321 mutex_unlock(&wl->mutex);
5326 static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
5328 struct wl1271 *wl = hw->priv;
5331 mutex_lock(&wl->mutex);
5333 if (unlikely(wl->state != WLCORE_STATE_ON))
5336 /* packets are considered pending if in the TX queue or the FW */
5337 ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0);
5339 mutex_unlock(&wl->mutex);
5344 /* can't be const, mac80211 writes to this */
5345 static struct ieee80211_rate wl1271_rates[] = {
5347 .hw_value = CONF_HW_BIT_RATE_1MBPS,
5348 .hw_value_short = CONF_HW_BIT_RATE_1MBPS, },
5350 .hw_value = CONF_HW_BIT_RATE_2MBPS,
5351 .hw_value_short = CONF_HW_BIT_RATE_2MBPS,
5352 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5354 .hw_value = CONF_HW_BIT_RATE_5_5MBPS,
5355 .hw_value_short = CONF_HW_BIT_RATE_5_5MBPS,
5356 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5358 .hw_value = CONF_HW_BIT_RATE_11MBPS,
5359 .hw_value_short = CONF_HW_BIT_RATE_11MBPS,
5360 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5362 .hw_value = CONF_HW_BIT_RATE_6MBPS,
5363 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5365 .hw_value = CONF_HW_BIT_RATE_9MBPS,
5366 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5368 .hw_value = CONF_HW_BIT_RATE_12MBPS,
5369 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5371 .hw_value = CONF_HW_BIT_RATE_18MBPS,
5372 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5374 .hw_value = CONF_HW_BIT_RATE_24MBPS,
5375 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5377 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5378 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5380 .hw_value = CONF_HW_BIT_RATE_48MBPS,
5381 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5383 .hw_value = CONF_HW_BIT_RATE_54MBPS,
5384 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5387 /* can't be const, mac80211 writes to this */
5388 static struct ieee80211_channel wl1271_channels[] = {
5389 { .hw_value = 1, .center_freq = 2412, .max_power = WLCORE_MAX_TXPWR },
5390 { .hw_value = 2, .center_freq = 2417, .max_power = WLCORE_MAX_TXPWR },
5391 { .hw_value = 3, .center_freq = 2422, .max_power = WLCORE_MAX_TXPWR },
5392 { .hw_value = 4, .center_freq = 2427, .max_power = WLCORE_MAX_TXPWR },
5393 { .hw_value = 5, .center_freq = 2432, .max_power = WLCORE_MAX_TXPWR },
5394 { .hw_value = 6, .center_freq = 2437, .max_power = WLCORE_MAX_TXPWR },
5395 { .hw_value = 7, .center_freq = 2442, .max_power = WLCORE_MAX_TXPWR },
5396 { .hw_value = 8, .center_freq = 2447, .max_power = WLCORE_MAX_TXPWR },
5397 { .hw_value = 9, .center_freq = 2452, .max_power = WLCORE_MAX_TXPWR },
5398 { .hw_value = 10, .center_freq = 2457, .max_power = WLCORE_MAX_TXPWR },
5399 { .hw_value = 11, .center_freq = 2462, .max_power = WLCORE_MAX_TXPWR },
5400 { .hw_value = 12, .center_freq = 2467, .max_power = WLCORE_MAX_TXPWR },
5401 { .hw_value = 13, .center_freq = 2472, .max_power = WLCORE_MAX_TXPWR },
5402 { .hw_value = 14, .center_freq = 2484, .max_power = WLCORE_MAX_TXPWR },
5405 /* can't be const, mac80211 writes to this */
5406 static struct ieee80211_supported_band wl1271_band_2ghz = {
5407 .channels = wl1271_channels,
5408 .n_channels = ARRAY_SIZE(wl1271_channels),
5409 .bitrates = wl1271_rates,
5410 .n_bitrates = ARRAY_SIZE(wl1271_rates),
5413 /* 5 GHz data rates for WL1273 */
5414 static struct ieee80211_rate wl1271_rates_5ghz[] = {
5416 .hw_value = CONF_HW_BIT_RATE_6MBPS,
5417 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5419 .hw_value = CONF_HW_BIT_RATE_9MBPS,
5420 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5422 .hw_value = CONF_HW_BIT_RATE_12MBPS,
5423 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5425 .hw_value = CONF_HW_BIT_RATE_18MBPS,
5426 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5428 .hw_value = CONF_HW_BIT_RATE_24MBPS,
5429 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5431 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5432 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5434 .hw_value = CONF_HW_BIT_RATE_48MBPS,
5435 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5437 .hw_value = CONF_HW_BIT_RATE_54MBPS,
5438 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5441 /* 5 GHz band channels for WL1273 */
5442 static struct ieee80211_channel wl1271_channels_5ghz[] = {
5443 { .hw_value = 8, .center_freq = 5040, .max_power = WLCORE_MAX_TXPWR },
5444 { .hw_value = 12, .center_freq = 5060, .max_power = WLCORE_MAX_TXPWR },
5445 { .hw_value = 16, .center_freq = 5080, .max_power = WLCORE_MAX_TXPWR },
5446 { .hw_value = 34, .center_freq = 5170, .max_power = WLCORE_MAX_TXPWR },
5447 { .hw_value = 36, .center_freq = 5180, .max_power = WLCORE_MAX_TXPWR },
5448 { .hw_value = 38, .center_freq = 5190, .max_power = WLCORE_MAX_TXPWR },
5449 { .hw_value = 40, .center_freq = 5200, .max_power = WLCORE_MAX_TXPWR },
5450 { .hw_value = 42, .center_freq = 5210, .max_power = WLCORE_MAX_TXPWR },
5451 { .hw_value = 44, .center_freq = 5220, .max_power = WLCORE_MAX_TXPWR },
5452 { .hw_value = 46, .center_freq = 5230, .max_power = WLCORE_MAX_TXPWR },
5453 { .hw_value = 48, .center_freq = 5240, .max_power = WLCORE_MAX_TXPWR },
5454 { .hw_value = 52, .center_freq = 5260, .max_power = WLCORE_MAX_TXPWR },
5455 { .hw_value = 56, .center_freq = 5280, .max_power = WLCORE_MAX_TXPWR },
5456 { .hw_value = 60, .center_freq = 5300, .max_power = WLCORE_MAX_TXPWR },
5457 { .hw_value = 64, .center_freq = 5320, .max_power = WLCORE_MAX_TXPWR },
5458 { .hw_value = 100, .center_freq = 5500, .max_power = WLCORE_MAX_TXPWR },
5459 { .hw_value = 104, .center_freq = 5520, .max_power = WLCORE_MAX_TXPWR },
5460 { .hw_value = 108, .center_freq = 5540, .max_power = WLCORE_MAX_TXPWR },
5461 { .hw_value = 112, .center_freq = 5560, .max_power = WLCORE_MAX_TXPWR },
5462 { .hw_value = 116, .center_freq = 5580, .max_power = WLCORE_MAX_TXPWR },
5463 { .hw_value = 120, .center_freq = 5600, .max_power = WLCORE_MAX_TXPWR },
5464 { .hw_value = 124, .center_freq = 5620, .max_power = WLCORE_MAX_TXPWR },
5465 { .hw_value = 128, .center_freq = 5640, .max_power = WLCORE_MAX_TXPWR },
5466 { .hw_value = 132, .center_freq = 5660, .max_power = WLCORE_MAX_TXPWR },
5467 { .hw_value = 136, .center_freq = 5680, .max_power = WLCORE_MAX_TXPWR },
5468 { .hw_value = 140, .center_freq = 5700, .max_power = WLCORE_MAX_TXPWR },
5469 { .hw_value = 149, .center_freq = 5745, .max_power = WLCORE_MAX_TXPWR },
5470 { .hw_value = 153, .center_freq = 5765, .max_power = WLCORE_MAX_TXPWR },
5471 { .hw_value = 157, .center_freq = 5785, .max_power = WLCORE_MAX_TXPWR },
5472 { .hw_value = 161, .center_freq = 5805, .max_power = WLCORE_MAX_TXPWR },
5473 { .hw_value = 165, .center_freq = 5825, .max_power = WLCORE_MAX_TXPWR },
5476 static struct ieee80211_supported_band wl1271_band_5ghz = {
5477 .channels = wl1271_channels_5ghz,
5478 .n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
5479 .bitrates = wl1271_rates_5ghz,
5480 .n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
5483 static const struct ieee80211_ops wl1271_ops = {
5484 .start = wl1271_op_start,
5485 .stop = wlcore_op_stop,
5486 .add_interface = wl1271_op_add_interface,
5487 .remove_interface = wl1271_op_remove_interface,
5488 .change_interface = wl12xx_op_change_interface,
5490 .suspend = wl1271_op_suspend,
5491 .resume = wl1271_op_resume,
5493 .config = wl1271_op_config,
5494 .prepare_multicast = wl1271_op_prepare_multicast,
5495 .configure_filter = wl1271_op_configure_filter,
5497 .set_key = wlcore_op_set_key,
5498 .hw_scan = wl1271_op_hw_scan,
5499 .cancel_hw_scan = wl1271_op_cancel_hw_scan,
5500 .sched_scan_start = wl1271_op_sched_scan_start,
5501 .sched_scan_stop = wl1271_op_sched_scan_stop,
5502 .bss_info_changed = wl1271_op_bss_info_changed,
5503 .set_frag_threshold = wl1271_op_set_frag_threshold,
5504 .set_rts_threshold = wl1271_op_set_rts_threshold,
5505 .conf_tx = wl1271_op_conf_tx,
5506 .get_tsf = wl1271_op_get_tsf,
5507 .get_survey = wl1271_op_get_survey,
5508 .sta_state = wl12xx_op_sta_state,
5509 .ampdu_action = wl1271_op_ampdu_action,
5510 .tx_frames_pending = wl1271_tx_frames_pending,
5511 .set_bitrate_mask = wl12xx_set_bitrate_mask,
5512 .set_default_unicast_key = wl1271_op_set_default_key_idx,
5513 .channel_switch = wl12xx_op_channel_switch,
5514 .flush = wlcore_op_flush,
5515 .remain_on_channel = wlcore_op_remain_on_channel,
5516 .cancel_remain_on_channel = wlcore_op_cancel_remain_on_channel,
5517 .add_chanctx = wlcore_op_add_chanctx,
5518 .remove_chanctx = wlcore_op_remove_chanctx,
5519 .change_chanctx = wlcore_op_change_chanctx,
5520 .assign_vif_chanctx = wlcore_op_assign_vif_chanctx,
5521 .unassign_vif_chanctx = wlcore_op_unassign_vif_chanctx,
5522 .sta_rc_update = wlcore_op_sta_rc_update,
5523 .get_rssi = wlcore_op_get_rssi,
5524 CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
5528 u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum ieee80211_band band)
5534 if (unlikely(rate >= wl->hw_tx_rate_tbl_size)) {
5535 wl1271_error("Illegal RX rate from HW: %d", rate);
5539 idx = wl->band_rate_to_idx[band][rate];
5540 if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
5541 wl1271_error("Unsupported RX rate from HW: %d", rate);
5548 static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic)
5552 wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x",
5555 if (nic + WLCORE_NUM_MAC_ADDRESSES - wl->num_mac_addr > 0xffffff)
5556 wl1271_warning("NIC part of the MAC address wraps around!");
5558 for (i = 0; i < wl->num_mac_addr; i++) {
5559 wl->addresses[i].addr[0] = (u8)(oui >> 16);
5560 wl->addresses[i].addr[1] = (u8)(oui >> 8);
5561 wl->addresses[i].addr[2] = (u8) oui;
5562 wl->addresses[i].addr[3] = (u8)(nic >> 16);
5563 wl->addresses[i].addr[4] = (u8)(nic >> 8);
5564 wl->addresses[i].addr[5] = (u8) nic;
5568 /* we may be one address short at the most */
5569 WARN_ON(wl->num_mac_addr + 1 < WLCORE_NUM_MAC_ADDRESSES);
5572 * turn on the LAA bit in the first address and use it as
5575 if (wl->num_mac_addr < WLCORE_NUM_MAC_ADDRESSES) {
5576 int idx = WLCORE_NUM_MAC_ADDRESSES - 1;
5577 memcpy(&wl->addresses[idx], &wl->addresses[0],
5578 sizeof(wl->addresses[0]));
5580 wl->addresses[idx].addr[2] |= BIT(1);
5583 wl->hw->wiphy->n_addresses = WLCORE_NUM_MAC_ADDRESSES;
5584 wl->hw->wiphy->addresses = wl->addresses;
5587 static int wl12xx_get_hw_info(struct wl1271 *wl)
5591 ret = wl12xx_set_power_on(wl);
5595 ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id);
5599 wl->fuse_oui_addr = 0;
5600 wl->fuse_nic_addr = 0;
5602 ret = wl->ops->get_pg_ver(wl, &wl->hw_pg_ver);
5606 if (wl->ops->get_mac)
5607 ret = wl->ops->get_mac(wl);
5610 wl1271_power_off(wl);
5614 static int wl1271_register_hw(struct wl1271 *wl)
5617 u32 oui_addr = 0, nic_addr = 0;
5619 if (wl->mac80211_registered)
5622 if (wl->nvs_len >= 12) {
5623 /* NOTE: The wl->nvs->nvs element must be first, in
5624 * order to simplify the casting, we assume it is at
5625 * the beginning of the wl->nvs structure.
5627 u8 *nvs_ptr = (u8 *)wl->nvs;
5630 (nvs_ptr[11] << 16) + (nvs_ptr[10] << 8) + nvs_ptr[6];
5632 (nvs_ptr[5] << 16) + (nvs_ptr[4] << 8) + nvs_ptr[3];
5635 /* if the MAC address is zeroed in the NVS derive from fuse */
5636 if (oui_addr == 0 && nic_addr == 0) {
5637 oui_addr = wl->fuse_oui_addr;
5638 /* fuse has the BD_ADDR, the WLAN addresses are the next two */
5639 nic_addr = wl->fuse_nic_addr + 1;
5642 wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr);
5644 ret = ieee80211_register_hw(wl->hw);
5646 wl1271_error("unable to register mac80211 hw: %d", ret);
5650 wl->mac80211_registered = true;
5652 wl1271_debugfs_init(wl);
5654 wl1271_notice("loaded");
5660 static void wl1271_unregister_hw(struct wl1271 *wl)
5663 wl1271_plt_stop(wl);
5665 ieee80211_unregister_hw(wl->hw);
5666 wl->mac80211_registered = false;
5670 static const struct ieee80211_iface_limit wlcore_iface_limits[] = {
5673 .types = BIT(NL80211_IFTYPE_STATION),
5677 .types = BIT(NL80211_IFTYPE_AP) |
5678 BIT(NL80211_IFTYPE_P2P_GO) |
5679 BIT(NL80211_IFTYPE_P2P_CLIENT),
5683 static struct ieee80211_iface_combination
5684 wlcore_iface_combinations[] = {
5686 .max_interfaces = 3,
5687 .limits = wlcore_iface_limits,
5688 .n_limits = ARRAY_SIZE(wlcore_iface_limits),
5692 static int wl1271_init_ieee80211(struct wl1271 *wl)
5695 static const u32 cipher_suites[] = {
5696 WLAN_CIPHER_SUITE_WEP40,
5697 WLAN_CIPHER_SUITE_WEP104,
5698 WLAN_CIPHER_SUITE_TKIP,
5699 WLAN_CIPHER_SUITE_CCMP,
5700 WL1271_CIPHER_SUITE_GEM,
5703 /* The tx descriptor buffer */
5704 wl->hw->extra_tx_headroom = sizeof(struct wl1271_tx_hw_descr);
5706 if (wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE)
5707 wl->hw->extra_tx_headroom += WL1271_EXTRA_SPACE_TKIP;
5710 /* FIXME: find a proper value */
5711 wl->hw->channel_change_time = 10000;
5712 wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
5714 wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
5715 IEEE80211_HW_SUPPORTS_PS |
5716 IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
5717 IEEE80211_HW_SUPPORTS_UAPSD |
5718 IEEE80211_HW_HAS_RATE_CONTROL |
5719 IEEE80211_HW_CONNECTION_MONITOR |
5720 IEEE80211_HW_REPORTS_TX_ACK_STATUS |
5721 IEEE80211_HW_SPECTRUM_MGMT |
5722 IEEE80211_HW_AP_LINK_PS |
5723 IEEE80211_HW_AMPDU_AGGREGATION |
5724 IEEE80211_HW_TX_AMPDU_SETUP_IN_HW |
5725 IEEE80211_HW_QUEUE_CONTROL;
5727 wl->hw->wiphy->cipher_suites = cipher_suites;
5728 wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
5730 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
5731 BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP) |
5732 BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO);
5733 wl->hw->wiphy->max_scan_ssids = 1;
5734 wl->hw->wiphy->max_sched_scan_ssids = 16;
5735 wl->hw->wiphy->max_match_sets = 16;
5737 * Maximum length of elements in scanning probe request templates
5738 * should be the maximum length possible for a template, without
5739 * the IEEE80211 header of the template
5741 wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
5742 sizeof(struct ieee80211_header);
5744 wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
5745 sizeof(struct ieee80211_header);
5747 wl->hw->wiphy->max_remain_on_channel_duration = 5000;
5749 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
5750 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
5751 WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
5753 /* make sure all our channels fit in the scanned_ch bitmask */
5754 BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
5755 ARRAY_SIZE(wl1271_channels_5ghz) >
5756 WL1271_MAX_CHANNELS);
5758 * clear channel flags from the previous usage
5759 * and restore max_power & max_antenna_gain values.
5761 for (i = 0; i < ARRAY_SIZE(wl1271_channels); i++) {
5762 wl1271_band_2ghz.channels[i].flags = 0;
5763 wl1271_band_2ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
5764 wl1271_band_2ghz.channels[i].max_antenna_gain = 0;
5767 for (i = 0; i < ARRAY_SIZE(wl1271_channels_5ghz); i++) {
5768 wl1271_band_5ghz.channels[i].flags = 0;
5769 wl1271_band_5ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
5770 wl1271_band_5ghz.channels[i].max_antenna_gain = 0;
5774 * We keep local copies of the band structs because we need to
5775 * modify them on a per-device basis.
5777 memcpy(&wl->bands[IEEE80211_BAND_2GHZ], &wl1271_band_2ghz,
5778 sizeof(wl1271_band_2ghz));
5779 memcpy(&wl->bands[IEEE80211_BAND_2GHZ].ht_cap,
5780 &wl->ht_cap[IEEE80211_BAND_2GHZ],
5781 sizeof(*wl->ht_cap));
5782 memcpy(&wl->bands[IEEE80211_BAND_5GHZ], &wl1271_band_5ghz,
5783 sizeof(wl1271_band_5ghz));
5784 memcpy(&wl->bands[IEEE80211_BAND_5GHZ].ht_cap,
5785 &wl->ht_cap[IEEE80211_BAND_5GHZ],
5786 sizeof(*wl->ht_cap));
5788 wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
5789 &wl->bands[IEEE80211_BAND_2GHZ];
5790 wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
5791 &wl->bands[IEEE80211_BAND_5GHZ];
5794 * allow 4 queues per mac address we support +
5795 * 1 cab queue per mac + one global offchannel Tx queue
5797 wl->hw->queues = (NUM_TX_QUEUES + 1) * WLCORE_NUM_MAC_ADDRESSES + 1;
5799 /* the last queue is the offchannel queue */
5800 wl->hw->offchannel_tx_hw_queue = wl->hw->queues - 1;
5801 wl->hw->max_rates = 1;
5803 wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
5805 /* the FW answers probe-requests in AP-mode */
5806 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
5807 wl->hw->wiphy->probe_resp_offload =
5808 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
5809 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
5810 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
5812 /* allowed interface combinations */
5813 wlcore_iface_combinations[0].num_different_channels = wl->num_channels;
5814 wl->hw->wiphy->iface_combinations = wlcore_iface_combinations;
5815 wl->hw->wiphy->n_iface_combinations =
5816 ARRAY_SIZE(wlcore_iface_combinations);
5818 SET_IEEE80211_DEV(wl->hw, wl->dev);
5820 wl->hw->sta_data_size = sizeof(struct wl1271_station);
5821 wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
5823 wl->hw->max_rx_aggregation_subframes = wl->conf.ht.rx_ba_win_size;
5828 struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
5831 struct ieee80211_hw *hw;
5836 BUILD_BUG_ON(AP_MAX_STATIONS > WL12XX_MAX_LINKS);
5838 hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
5840 wl1271_error("could not alloc ieee80211_hw");
5846 memset(wl, 0, sizeof(*wl));
5848 wl->priv = kzalloc(priv_size, GFP_KERNEL);
5850 wl1271_error("could not alloc wl priv");
5852 goto err_priv_alloc;
5855 INIT_LIST_HEAD(&wl->wlvif_list);
5859 for (i = 0; i < NUM_TX_QUEUES; i++)
5860 for (j = 0; j < WL12XX_MAX_LINKS; j++)
5861 skb_queue_head_init(&wl->links[j].tx_queue[i]);
5863 skb_queue_head_init(&wl->deferred_rx_queue);
5864 skb_queue_head_init(&wl->deferred_tx_queue);
5866 INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
5867 INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
5868 INIT_WORK(&wl->tx_work, wl1271_tx_work);
5869 INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
5870 INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
5871 INIT_DELAYED_WORK(&wl->roc_complete_work, wlcore_roc_complete_work);
5872 INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work);
5874 wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
5875 if (!wl->freezable_wq) {
5882 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
5883 wl->band = IEEE80211_BAND_2GHZ;
5884 wl->channel_type = NL80211_CHAN_NO_HT;
5886 wl->sg_enabled = true;
5887 wl->sleep_auth = WL1271_PSM_ILLEGAL;
5888 wl->recovery_count = 0;
5891 wl->ap_fw_ps_map = 0;
5893 wl->platform_quirks = 0;
5894 wl->system_hlid = WL12XX_SYSTEM_HLID;
5895 wl->active_sta_count = 0;
5896 wl->active_link_count = 0;
5898 init_waitqueue_head(&wl->fwlog_waitq);
5900 /* The system link is always allocated */
5901 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
5903 memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
5904 for (i = 0; i < wl->num_tx_desc; i++)
5905 wl->tx_frames[i] = NULL;
5907 spin_lock_init(&wl->wl_lock);
5909 wl->state = WLCORE_STATE_OFF;
5910 wl->fw_type = WL12XX_FW_TYPE_NONE;
5911 mutex_init(&wl->mutex);
5912 mutex_init(&wl->flush_mutex);
5913 init_completion(&wl->nvs_loading_complete);
5915 order = get_order(aggr_buf_size);
5916 wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
5917 if (!wl->aggr_buf) {
5921 wl->aggr_buf_size = aggr_buf_size;
5923 wl->dummy_packet = wl12xx_alloc_dummy_packet(wl);
5924 if (!wl->dummy_packet) {
5929 /* Allocate one page for the FW log */
5930 wl->fwlog = (u8 *)get_zeroed_page(GFP_KERNEL);
5933 goto err_dummy_packet;
5936 wl->mbox_size = mbox_size;
5937 wl->mbox = kmalloc(wl->mbox_size, GFP_KERNEL | GFP_DMA);
5943 wl->buffer_32 = kmalloc(sizeof(*wl->buffer_32), GFP_KERNEL);
5944 if (!wl->buffer_32) {
5955 free_page((unsigned long)wl->fwlog);
5958 dev_kfree_skb(wl->dummy_packet);
5961 free_pages((unsigned long)wl->aggr_buf, order);
5964 destroy_workqueue(wl->freezable_wq);
5967 wl1271_debugfs_exit(wl);
5971 ieee80211_free_hw(hw);
5975 return ERR_PTR(ret);
5977 EXPORT_SYMBOL_GPL(wlcore_alloc_hw);
5979 int wlcore_free_hw(struct wl1271 *wl)
5981 /* Unblock any fwlog readers */
5982 mutex_lock(&wl->mutex);
5983 wl->fwlog_size = -1;
5984 wake_up_interruptible_all(&wl->fwlog_waitq);
5985 mutex_unlock(&wl->mutex);
5987 wlcore_sysfs_free(wl);
5989 kfree(wl->buffer_32);
5991 free_page((unsigned long)wl->fwlog);
5992 dev_kfree_skb(wl->dummy_packet);
5993 free_pages((unsigned long)wl->aggr_buf, get_order(wl->aggr_buf_size));
5995 wl1271_debugfs_exit(wl);
5999 wl->fw_type = WL12XX_FW_TYPE_NONE;
6003 kfree(wl->fw_status_1);
6004 kfree(wl->tx_res_if);
6005 destroy_workqueue(wl->freezable_wq);
6008 ieee80211_free_hw(wl->hw);
6012 EXPORT_SYMBOL_GPL(wlcore_free_hw);
6015 static const struct wiphy_wowlan_support wlcore_wowlan_support = {
6016 .flags = WIPHY_WOWLAN_ANY,
6017 .n_patterns = WL1271_MAX_RX_FILTERS,
6018 .pattern_min_len = 1,
6019 .pattern_max_len = WL1271_RX_FILTER_MAX_PATTERN_SIZE,
6023 static irqreturn_t wlcore_hardirq(int irq, void *cookie)
6025 return IRQ_WAKE_THREAD;
6028 static void wlcore_nvs_cb(const struct firmware *fw, void *context)
6030 struct wl1271 *wl = context;
6031 struct platform_device *pdev = wl->pdev;
6032 struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6033 struct wl12xx_platform_data *pdata = pdev_data->pdata;
6034 unsigned long irqflags;
6036 irq_handler_t hardirq_fn = NULL;
6039 wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
6041 wl1271_error("Could not allocate nvs data");
6044 wl->nvs_len = fw->size;
6046 wl1271_debug(DEBUG_BOOT, "Could not get nvs file %s",
6052 ret = wl->ops->setup(wl);
6056 BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS);
6058 /* adjust some runtime configuration parameters */
6059 wlcore_adjust_conf(wl);
6061 wl->irq = platform_get_irq(pdev, 0);
6062 wl->platform_quirks = pdata->platform_quirks;
6063 wl->if_ops = pdev_data->if_ops;
6065 if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ) {
6066 irqflags = IRQF_TRIGGER_RISING;
6067 hardirq_fn = wlcore_hardirq;
6069 irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT;
6072 ret = request_threaded_irq(wl->irq, hardirq_fn, wlcore_irq,
6073 irqflags, pdev->name, wl);
6075 wl1271_error("request_irq() failed: %d", ret);
6080 ret = enable_irq_wake(wl->irq);
6082 wl->irq_wake_enabled = true;
6083 device_init_wakeup(wl->dev, 1);
6084 if (pdata->pwr_in_suspend)
6085 wl->hw->wiphy->wowlan = &wlcore_wowlan_support;
6088 disable_irq(wl->irq);
6090 ret = wl12xx_get_hw_info(wl);
6092 wl1271_error("couldn't get hw info");
6096 ret = wl->ops->identify_chip(wl);
6100 ret = wl1271_init_ieee80211(wl);
6104 ret = wl1271_register_hw(wl);
6108 ret = wlcore_sysfs_init(wl);
6112 wl->initialized = true;
6116 wl1271_unregister_hw(wl);
6119 free_irq(wl->irq, wl);
6125 release_firmware(fw);
6126 complete_all(&wl->nvs_loading_complete);
6129 int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
6133 if (!wl->ops || !wl->ptable)
6136 wl->dev = &pdev->dev;
6138 platform_set_drvdata(pdev, wl);
6140 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
6141 WL12XX_NVS_NAME, &pdev->dev, GFP_KERNEL,
6144 wl1271_error("request_firmware_nowait failed: %d", ret);
6145 complete_all(&wl->nvs_loading_complete);
6150 EXPORT_SYMBOL_GPL(wlcore_probe);
6152 int wlcore_remove(struct platform_device *pdev)
6154 struct wl1271 *wl = platform_get_drvdata(pdev);
6156 wait_for_completion(&wl->nvs_loading_complete);
6157 if (!wl->initialized)
6160 if (wl->irq_wake_enabled) {
6161 device_init_wakeup(wl->dev, 0);
6162 disable_irq_wake(wl->irq);
6164 wl1271_unregister_hw(wl);
6165 free_irq(wl->irq, wl);
6170 EXPORT_SYMBOL_GPL(wlcore_remove);
6172 u32 wl12xx_debug_level = DEBUG_NONE;
6173 EXPORT_SYMBOL_GPL(wl12xx_debug_level);
6174 module_param_named(debug_level, wl12xx_debug_level, uint, S_IRUSR | S_IWUSR);
6175 MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
6177 module_param_named(fwlog, fwlog_param, charp, 0);
6178 MODULE_PARM_DESC(fwlog,
6179 "FW logger options: continuous, ondemand, dbgpins or disable");
6181 module_param(fwlog_mem_blocks, int, S_IRUSR | S_IWUSR);
6182 MODULE_PARM_DESC(fwlog_mem_blocks, "fwlog mem_blocks");
6184 module_param(bug_on_recovery, int, S_IRUSR | S_IWUSR);
6185 MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
6187 module_param(no_recovery, int, S_IRUSR | S_IWUSR);
6188 MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
6190 MODULE_LICENSE("GPL");
6191 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
6192 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
6193 MODULE_FIRMWARE(WL12XX_NVS_NAME);