2 * Copyright (c) 2004-2011 Atheros Communications Inc.
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 #include <linux/mmc/card.h>
18 #include <linux/mmc/mmc.h>
19 #include <linux/mmc/host.h>
20 #include <linux/mmc/sdio_func.h>
21 #include <linux/mmc/sdio_ids.h>
22 #include <linux/mmc/sdio.h>
23 #include <linux/mmc/sd.h>
31 struct sdio_func *func;
36 struct list_head bus_req_freeq;
38 /* available bus requests */
39 struct bus_request bus_req[BUS_REQUEST_MAX_NUM];
45 /* protects access to dma_buffer */
46 struct mutex dma_buffer_mutex;
48 /* scatter request list head */
49 struct list_head scat_req;
55 atomic_t irq_handling;
56 const struct sdio_device_id *id;
57 struct work_struct wr_async_work;
58 struct list_head wr_asyncq;
59 spinlock_t wr_async_lock;
62 #define CMD53_ARG_READ 0
63 #define CMD53_ARG_WRITE 1
64 #define CMD53_ARG_BLOCK_BASIS 1
65 #define CMD53_ARG_FIXED_ADDRESS 0
66 #define CMD53_ARG_INCR_ADDRESS 1
68 static inline struct ath6kl_sdio *ath6kl_sdio_priv(struct ath6kl *ar)
74 * Macro to check if DMA buffer is WORD-aligned and DMA-able.
75 * Most host controllers assume the buffer is DMA'able and will
76 * bug-check otherwise (i.e. buffers on the stack). virt_addr_valid
77 * check fails on stack memory.
79 static inline bool buf_needs_bounce(u8 *buf)
81 return ((unsigned long) buf & 0x3) || !virt_addr_valid(buf);
84 static void ath6kl_sdio_set_mbox_info(struct ath6kl *ar)
86 struct ath6kl_mbox_info *mbox_info = &ar->mbox_info;
88 /* EP1 has an extended range */
89 mbox_info->htc_addr = HIF_MBOX_BASE_ADDR;
90 mbox_info->htc_ext_addr = HIF_MBOX0_EXT_BASE_ADDR;
91 mbox_info->htc_ext_sz = HIF_MBOX0_EXT_WIDTH;
92 mbox_info->block_size = HIF_MBOX_BLOCK_SIZE;
93 mbox_info->gmbox_addr = HIF_GMBOX_BASE_ADDR;
94 mbox_info->gmbox_sz = HIF_GMBOX_WIDTH;
97 static inline void ath6kl_sdio_set_cmd53_arg(u32 *arg, u8 rw, u8 func,
98 u8 mode, u8 opcode, u32 addr,
101 *arg = (((rw & 1) << 31) |
102 ((func & 0x7) << 28) |
104 ((opcode & 1) << 26) |
105 ((addr & 0x1FFFF) << 9) |
109 static inline void ath6kl_sdio_set_cmd52_arg(u32 *arg, u8 write, u8 raw,
110 unsigned int address,
115 *arg = ((write & 1) << 31) |
116 ((func & 0x7) << 28) |
119 ((address & 0x1FFFF) << 9) |
124 static int ath6kl_sdio_func0_cmd52_wr_byte(struct mmc_card *card,
125 unsigned int address,
128 struct mmc_command io_cmd;
130 memset(&io_cmd, 0, sizeof(io_cmd));
131 ath6kl_sdio_set_cmd52_arg(&io_cmd.arg, 1, 0, address, byte);
132 io_cmd.opcode = SD_IO_RW_DIRECT;
133 io_cmd.flags = MMC_RSP_R5 | MMC_CMD_AC;
135 return mmc_wait_for_cmd(card->host, &io_cmd, 0);
138 static int ath6kl_sdio_io(struct sdio_func *func, u32 request, u32 addr,
143 sdio_claim_host(func);
145 if (request & HIF_WRITE) {
146 /* FIXME: looks like ugly workaround for something */
147 if (addr >= HIF_MBOX_BASE_ADDR &&
148 addr <= HIF_MBOX_END_ADDR)
149 addr += (HIF_MBOX_WIDTH - len);
151 /* FIXME: this also looks like ugly workaround */
152 if (addr == HIF_MBOX0_EXT_BASE_ADDR)
153 addr += HIF_MBOX0_EXT_WIDTH - len;
155 if (request & HIF_FIXED_ADDRESS)
156 ret = sdio_writesb(func, addr, buf, len);
158 ret = sdio_memcpy_toio(func, addr, buf, len);
160 if (request & HIF_FIXED_ADDRESS)
161 ret = sdio_readsb(func, buf, addr, len);
163 ret = sdio_memcpy_fromio(func, buf, addr, len);
166 sdio_release_host(func);
168 ath6kl_dbg(ATH6KL_DBG_SDIO, "%s addr 0x%x%s buf 0x%p len %d\n",
169 request & HIF_WRITE ? "wr" : "rd", addr,
170 request & HIF_FIXED_ADDRESS ? " (fixed)" : "", buf, len);
171 ath6kl_dbg_dump(ATH6KL_DBG_SDIO_DUMP, NULL, "sdio ", buf, len);
176 static struct bus_request *ath6kl_sdio_alloc_busreq(struct ath6kl_sdio *ar_sdio)
178 struct bus_request *bus_req;
180 spin_lock_bh(&ar_sdio->lock);
182 if (list_empty(&ar_sdio->bus_req_freeq)) {
183 spin_unlock_bh(&ar_sdio->lock);
187 bus_req = list_first_entry(&ar_sdio->bus_req_freeq,
188 struct bus_request, list);
189 list_del(&bus_req->list);
191 spin_unlock_bh(&ar_sdio->lock);
192 ath6kl_dbg(ATH6KL_DBG_SCATTER, "%s: bus request 0x%p\n",
198 static void ath6kl_sdio_free_bus_req(struct ath6kl_sdio *ar_sdio,
199 struct bus_request *bus_req)
201 ath6kl_dbg(ATH6KL_DBG_SCATTER, "%s: bus request 0x%p\n",
204 spin_lock_bh(&ar_sdio->lock);
205 list_add_tail(&bus_req->list, &ar_sdio->bus_req_freeq);
206 spin_unlock_bh(&ar_sdio->lock);
209 static void ath6kl_sdio_setup_scat_data(struct hif_scatter_req *scat_req,
210 struct mmc_data *data)
212 struct scatterlist *sg;
215 data->blksz = HIF_MBOX_BLOCK_SIZE;
216 data->blocks = scat_req->len / HIF_MBOX_BLOCK_SIZE;
218 ath6kl_dbg(ATH6KL_DBG_SCATTER,
219 "hif-scatter: (%s) addr: 0x%X, (block len: %d, block count: %d) , (tot:%d,sg:%d)\n",
220 (scat_req->req & HIF_WRITE) ? "WR" : "RD", scat_req->addr,
221 data->blksz, data->blocks, scat_req->len,
222 scat_req->scat_entries);
224 data->flags = (scat_req->req & HIF_WRITE) ? MMC_DATA_WRITE :
227 /* fill SG entries */
228 sg = scat_req->sgentries;
229 sg_init_table(sg, scat_req->scat_entries);
231 /* assemble SG list */
232 for (i = 0; i < scat_req->scat_entries; i++, sg++) {
233 ath6kl_dbg(ATH6KL_DBG_SCATTER, "%d: addr:0x%p, len:%d\n",
234 i, scat_req->scat_list[i].buf,
235 scat_req->scat_list[i].len);
237 sg_set_buf(sg, scat_req->scat_list[i].buf,
238 scat_req->scat_list[i].len);
241 /* set scatter-gather table for request */
242 data->sg = scat_req->sgentries;
243 data->sg_len = scat_req->scat_entries;
246 static int ath6kl_sdio_scat_rw(struct ath6kl_sdio *ar_sdio,
247 struct bus_request *req)
249 struct mmc_request mmc_req;
250 struct mmc_command cmd;
251 struct mmc_data data;
252 struct hif_scatter_req *scat_req;
256 scat_req = req->scat_req;
258 if (scat_req->virt_scat) {
260 if (scat_req->req & HIF_BLOCK_BASIS)
261 len = round_down(len, HIF_MBOX_BLOCK_SIZE);
263 status = ath6kl_sdio_io(ar_sdio->func, scat_req->req,
264 scat_req->addr, scat_req->virt_dma_buf,
269 memset(&mmc_req, 0, sizeof(struct mmc_request));
270 memset(&cmd, 0, sizeof(struct mmc_command));
271 memset(&data, 0, sizeof(struct mmc_data));
273 ath6kl_sdio_setup_scat_data(scat_req, &data);
275 opcode = (scat_req->req & HIF_FIXED_ADDRESS) ?
276 CMD53_ARG_FIXED_ADDRESS : CMD53_ARG_INCR_ADDRESS;
278 rw = (scat_req->req & HIF_WRITE) ? CMD53_ARG_WRITE : CMD53_ARG_READ;
280 /* Fixup the address so that the last byte will fall on MBOX EOM */
281 if (scat_req->req & HIF_WRITE) {
282 if (scat_req->addr == HIF_MBOX_BASE_ADDR)
283 scat_req->addr += HIF_MBOX_WIDTH - scat_req->len;
285 /* Uses extended address range */
286 scat_req->addr += HIF_MBOX0_EXT_WIDTH - scat_req->len;
289 /* set command argument */
290 ath6kl_sdio_set_cmd53_arg(&cmd.arg, rw, ar_sdio->func->num,
291 CMD53_ARG_BLOCK_BASIS, opcode, scat_req->addr,
294 cmd.opcode = SD_IO_RW_EXTENDED;
295 cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
298 mmc_req.data = &data;
300 sdio_claim_host(ar_sdio->func);
302 mmc_set_data_timeout(&data, ar_sdio->func->card);
303 /* synchronous call to process request */
304 mmc_wait_for_req(ar_sdio->func->card->host, &mmc_req);
306 sdio_release_host(ar_sdio->func);
308 status = cmd.error ? cmd.error : data.error;
311 scat_req->status = status;
313 if (scat_req->status)
314 ath6kl_err("Scatter write request failed:%d\n",
317 if (scat_req->req & HIF_ASYNCHRONOUS)
318 scat_req->complete(ar_sdio->ar->htc_target, scat_req);
323 static int ath6kl_sdio_alloc_prep_scat_req(struct ath6kl_sdio *ar_sdio,
324 int n_scat_entry, int n_scat_req,
327 struct hif_scatter_req *s_req;
328 struct bus_request *bus_req;
329 int i, scat_req_sz, scat_list_sz, sg_sz, buf_sz;
332 scat_list_sz = (n_scat_entry - 1) * sizeof(struct hif_scatter_item);
333 scat_req_sz = sizeof(*s_req) + scat_list_sz;
336 sg_sz = sizeof(struct scatterlist) * n_scat_entry;
338 buf_sz = 2 * L1_CACHE_BYTES +
339 ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER;
341 for (i = 0; i < n_scat_req; i++) {
342 /* allocate the scatter request */
343 s_req = kzalloc(scat_req_sz, GFP_KERNEL);
348 virt_buf = kzalloc(buf_sz, GFP_KERNEL);
354 s_req->virt_dma_buf =
355 (u8 *)L1_CACHE_ALIGN((unsigned long)virt_buf);
357 /* allocate sglist */
358 s_req->sgentries = kzalloc(sg_sz, GFP_KERNEL);
360 if (!s_req->sgentries) {
366 /* allocate a bus request for this scatter request */
367 bus_req = ath6kl_sdio_alloc_busreq(ar_sdio);
369 kfree(s_req->sgentries);
370 kfree(s_req->virt_dma_buf);
375 /* assign the scatter request to this bus request */
376 bus_req->scat_req = s_req;
377 s_req->busrequest = bus_req;
379 s_req->virt_scat = virt_scat;
381 /* add it to the scatter pool */
382 hif_scatter_req_add(ar_sdio->ar, s_req);
388 static int ath6kl_sdio_read_write_sync(struct ath6kl *ar, u32 addr, u8 *buf,
389 u32 len, u32 request)
391 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
394 bool bounced = false;
396 if (request & HIF_BLOCK_BASIS)
397 len = round_down(len, HIF_MBOX_BLOCK_SIZE);
399 if (buf_needs_bounce(buf)) {
400 if (!ar_sdio->dma_buffer)
402 mutex_lock(&ar_sdio->dma_buffer_mutex);
403 tbuf = ar_sdio->dma_buffer;
404 memcpy(tbuf, buf, len);
409 ret = ath6kl_sdio_io(ar_sdio->func, request, addr, tbuf, len);
410 if ((request & HIF_READ) && bounced)
411 memcpy(buf, tbuf, len);
414 mutex_unlock(&ar_sdio->dma_buffer_mutex);
419 static void __ath6kl_sdio_write_async(struct ath6kl_sdio *ar_sdio,
420 struct bus_request *req)
423 ath6kl_sdio_scat_rw(ar_sdio, req);
428 status = ath6kl_sdio_read_write_sync(ar_sdio->ar, req->address,
429 req->buffer, req->length,
431 context = req->packet;
432 ath6kl_sdio_free_bus_req(ar_sdio, req);
433 ath6kl_hif_rw_comp_handler(context, status);
437 static void ath6kl_sdio_write_async_work(struct work_struct *work)
439 struct ath6kl_sdio *ar_sdio;
440 struct bus_request *req, *tmp_req;
442 ar_sdio = container_of(work, struct ath6kl_sdio, wr_async_work);
444 spin_lock_bh(&ar_sdio->wr_async_lock);
445 list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) {
446 list_del(&req->list);
447 spin_unlock_bh(&ar_sdio->wr_async_lock);
448 __ath6kl_sdio_write_async(ar_sdio, req);
449 spin_lock_bh(&ar_sdio->wr_async_lock);
451 spin_unlock_bh(&ar_sdio->wr_async_lock);
454 static void ath6kl_sdio_irq_handler(struct sdio_func *func)
457 struct ath6kl_sdio *ar_sdio;
459 ath6kl_dbg(ATH6KL_DBG_SDIO, "irq\n");
461 ar_sdio = sdio_get_drvdata(func);
462 atomic_set(&ar_sdio->irq_handling, 1);
465 * Release the host during interrups so we can pick it back up when
466 * we process commands.
468 sdio_release_host(ar_sdio->func);
470 status = ath6kl_hif_intr_bh_handler(ar_sdio->ar);
471 sdio_claim_host(ar_sdio->func);
472 atomic_set(&ar_sdio->irq_handling, 0);
473 WARN_ON(status && status != -ECANCELED);
476 static int ath6kl_sdio_power_on(struct ath6kl *ar)
478 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
479 struct sdio_func *func = ar_sdio->func;
482 if (!ar_sdio->is_disabled)
485 ath6kl_dbg(ATH6KL_DBG_BOOT, "sdio power on\n");
487 sdio_claim_host(func);
489 ret = sdio_enable_func(func);
491 ath6kl_err("Unable to enable sdio func: %d)\n", ret);
492 sdio_release_host(func);
496 sdio_release_host(func);
499 * Wait for hardware to initialise. It should take a lot less than
500 * 10 ms but let's be conservative here.
504 ar_sdio->is_disabled = false;
509 static int ath6kl_sdio_power_off(struct ath6kl *ar)
511 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
514 if (ar_sdio->is_disabled)
517 ath6kl_dbg(ATH6KL_DBG_BOOT, "sdio power off\n");
519 /* Disable the card */
520 sdio_claim_host(ar_sdio->func);
521 ret = sdio_disable_func(ar_sdio->func);
522 sdio_release_host(ar_sdio->func);
527 ar_sdio->is_disabled = true;
532 static int ath6kl_sdio_write_async(struct ath6kl *ar, u32 address, u8 *buffer,
533 u32 length, u32 request,
534 struct htc_packet *packet)
536 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
537 struct bus_request *bus_req;
539 bus_req = ath6kl_sdio_alloc_busreq(ar_sdio);
544 bus_req->address = address;
545 bus_req->buffer = buffer;
546 bus_req->length = length;
547 bus_req->request = request;
548 bus_req->packet = packet;
550 spin_lock_bh(&ar_sdio->wr_async_lock);
551 list_add_tail(&bus_req->list, &ar_sdio->wr_asyncq);
552 spin_unlock_bh(&ar_sdio->wr_async_lock);
553 queue_work(ar->ath6kl_wq, &ar_sdio->wr_async_work);
558 static void ath6kl_sdio_irq_enable(struct ath6kl *ar)
560 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
563 sdio_claim_host(ar_sdio->func);
565 /* Register the isr */
566 ret = sdio_claim_irq(ar_sdio->func, ath6kl_sdio_irq_handler);
568 ath6kl_err("Failed to claim sdio irq: %d\n", ret);
570 sdio_release_host(ar_sdio->func);
573 static void ath6kl_sdio_irq_disable(struct ath6kl *ar)
575 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
578 sdio_claim_host(ar_sdio->func);
580 /* Mask our function IRQ */
581 while (atomic_read(&ar_sdio->irq_handling)) {
582 sdio_release_host(ar_sdio->func);
583 schedule_timeout(HZ / 10);
584 sdio_claim_host(ar_sdio->func);
587 ret = sdio_release_irq(ar_sdio->func);
589 ath6kl_err("Failed to release sdio irq: %d\n", ret);
591 sdio_release_host(ar_sdio->func);
594 static struct hif_scatter_req *ath6kl_sdio_scatter_req_get(struct ath6kl *ar)
596 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
597 struct hif_scatter_req *node = NULL;
599 spin_lock_bh(&ar_sdio->scat_lock);
601 if (!list_empty(&ar_sdio->scat_req)) {
602 node = list_first_entry(&ar_sdio->scat_req,
603 struct hif_scatter_req, list);
604 list_del(&node->list);
607 spin_unlock_bh(&ar_sdio->scat_lock);
612 static void ath6kl_sdio_scatter_req_add(struct ath6kl *ar,
613 struct hif_scatter_req *s_req)
615 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
617 spin_lock_bh(&ar_sdio->scat_lock);
619 list_add_tail(&s_req->list, &ar_sdio->scat_req);
621 spin_unlock_bh(&ar_sdio->scat_lock);
625 /* scatter gather read write request */
626 static int ath6kl_sdio_async_rw_scatter(struct ath6kl *ar,
627 struct hif_scatter_req *scat_req)
629 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
630 u32 request = scat_req->req;
636 ath6kl_dbg(ATH6KL_DBG_SCATTER,
637 "hif-scatter: total len: %d scatter entries: %d\n",
638 scat_req->len, scat_req->scat_entries);
640 if (request & HIF_SYNCHRONOUS)
641 status = ath6kl_sdio_scat_rw(ar_sdio, scat_req->busrequest);
643 spin_lock_bh(&ar_sdio->wr_async_lock);
644 list_add_tail(&scat_req->busrequest->list, &ar_sdio->wr_asyncq);
645 spin_unlock_bh(&ar_sdio->wr_async_lock);
646 queue_work(ar->ath6kl_wq, &ar_sdio->wr_async_work);
652 /* clean up scatter support */
653 static void ath6kl_sdio_cleanup_scatter(struct ath6kl *ar)
655 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
656 struct hif_scatter_req *s_req, *tmp_req;
658 /* empty the free list */
659 spin_lock_bh(&ar_sdio->scat_lock);
660 list_for_each_entry_safe(s_req, tmp_req, &ar_sdio->scat_req, list) {
661 list_del(&s_req->list);
662 spin_unlock_bh(&ar_sdio->scat_lock);
665 * FIXME: should we also call completion handler with
666 * ath6kl_hif_rw_comp_handler() with status -ECANCELED so
667 * that the packet is properly freed?
669 if (s_req->busrequest)
670 ath6kl_sdio_free_bus_req(ar_sdio, s_req->busrequest);
671 kfree(s_req->virt_dma_buf);
672 kfree(s_req->sgentries);
675 spin_lock_bh(&ar_sdio->scat_lock);
677 spin_unlock_bh(&ar_sdio->scat_lock);
680 /* setup of HIF scatter resources */
681 static int ath6kl_sdio_enable_scatter(struct ath6kl *ar)
683 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
684 struct htc_target *target = ar->htc_target;
686 bool virt_scat = false;
688 if (ar_sdio->scatter_enabled)
691 ar_sdio->scatter_enabled = true;
693 /* check if host supports scatter and it meets our requirements */
694 if (ar_sdio->func->card->host->max_segs < MAX_SCATTER_ENTRIES_PER_REQ) {
695 ath6kl_err("host only supports scatter of :%d entries, need: %d\n",
696 ar_sdio->func->card->host->max_segs,
697 MAX_SCATTER_ENTRIES_PER_REQ);
702 ret = ath6kl_sdio_alloc_prep_scat_req(ar_sdio,
703 MAX_SCATTER_ENTRIES_PER_REQ,
704 MAX_SCATTER_REQUESTS, virt_scat);
707 ath6kl_dbg(ATH6KL_DBG_BOOT,
708 "hif-scatter enabled requests %d entries %d\n",
709 MAX_SCATTER_REQUESTS,
710 MAX_SCATTER_ENTRIES_PER_REQ);
712 target->max_scat_entries = MAX_SCATTER_ENTRIES_PER_REQ;
713 target->max_xfer_szper_scatreq =
714 MAX_SCATTER_REQ_TRANSFER_SIZE;
716 ath6kl_sdio_cleanup_scatter(ar);
717 ath6kl_warn("hif scatter resource setup failed, trying virtual scatter method\n");
721 if (virt_scat || ret) {
722 ret = ath6kl_sdio_alloc_prep_scat_req(ar_sdio,
723 ATH6KL_SCATTER_ENTRIES_PER_REQ,
724 ATH6KL_SCATTER_REQS, virt_scat);
727 ath6kl_err("failed to alloc virtual scatter resources !\n");
728 ath6kl_sdio_cleanup_scatter(ar);
732 ath6kl_dbg(ATH6KL_DBG_BOOT,
733 "virtual scatter enabled requests %d entries %d\n",
734 ATH6KL_SCATTER_REQS, ATH6KL_SCATTER_ENTRIES_PER_REQ);
736 target->max_scat_entries = ATH6KL_SCATTER_ENTRIES_PER_REQ;
737 target->max_xfer_szper_scatreq =
738 ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER;
744 static int ath6kl_sdio_config(struct ath6kl *ar)
746 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
747 struct sdio_func *func = ar_sdio->func;
750 sdio_claim_host(func);
752 if ((ar_sdio->id->device & MANUFACTURER_ID_ATH6KL_BASE_MASK) >=
753 MANUFACTURER_ID_AR6003_BASE) {
754 /* enable 4-bit ASYNC interrupt on AR6003 or later */
755 ret = ath6kl_sdio_func0_cmd52_wr_byte(func->card,
756 CCCR_SDIO_IRQ_MODE_REG,
757 SDIO_IRQ_MODE_ASYNC_4BIT_IRQ);
759 ath6kl_err("Failed to enable 4-bit async irq mode %d\n",
764 ath6kl_dbg(ATH6KL_DBG_BOOT, "4-bit async irq mode enabled\n");
767 /* give us some time to enable, in ms */
768 func->enable_timeout = 100;
770 ret = sdio_set_block_size(func, HIF_MBOX_BLOCK_SIZE);
772 ath6kl_err("Set sdio block size %d failed: %d)\n",
773 HIF_MBOX_BLOCK_SIZE, ret);
774 sdio_release_host(func);
779 sdio_release_host(func);
784 static int ath6kl_sdio_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
786 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
787 struct sdio_func *func = ar_sdio->func;
791 flags = sdio_get_host_pm_caps(func);
793 ath6kl_dbg(ATH6KL_DBG_SUSPEND, "sdio suspend pm_caps 0x%x\n", flags);
795 if (!(flags & MMC_PM_KEEP_POWER) ||
796 (ar->conf_flags & ATH6KL_CONF_SUSPEND_CUTPOWER)) {
797 /* as host doesn't support keep power we need to cut power */
798 return ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_CUTPOWER,
802 ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
804 printk(KERN_ERR "ath6kl: set sdio pm flags failed: %d\n",
809 if (!(flags & MMC_PM_WAKE_SDIO_IRQ))
812 /* sdio irq wakes up host */
814 if (ar->state == ATH6KL_STATE_SCHED_SCAN) {
815 ret = ath6kl_cfg80211_suspend(ar,
816 ATH6KL_CFG_SUSPEND_SCHED_SCAN,
819 ath6kl_warn("Schedule scan suspend failed: %d", ret);
823 ret = sdio_set_host_pm_flags(func, MMC_PM_WAKE_SDIO_IRQ);
825 ath6kl_warn("set sdio wake irq flag failed: %d\n", ret);
832 * The host sdio controller is capable of keep power and
833 * sdio irq wake up at this point. It's fine to continue
834 * wow suspend operation.
836 ret = ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_WOW, wow);
840 ret = sdio_set_host_pm_flags(func, MMC_PM_WAKE_SDIO_IRQ);
842 ath6kl_err("set sdio wake irq flag failed: %d\n", ret);
848 return ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_DEEPSLEEP, NULL);
851 static int ath6kl_sdio_resume(struct ath6kl *ar)
854 case ATH6KL_STATE_OFF:
855 case ATH6KL_STATE_CUTPOWER:
856 ath6kl_dbg(ATH6KL_DBG_SUSPEND,
857 "sdio resume configuring sdio\n");
859 /* need to set sdio settings after power is cut from sdio */
860 ath6kl_sdio_config(ar);
863 case ATH6KL_STATE_ON:
866 case ATH6KL_STATE_DEEPSLEEP:
869 case ATH6KL_STATE_WOW:
871 case ATH6KL_STATE_SCHED_SCAN:
875 ath6kl_cfg80211_resume(ar);
880 /* set the window address register (using 4-byte register access ). */
881 static int ath6kl_set_addrwin_reg(struct ath6kl *ar, u32 reg_addr, u32 addr)
888 * Write bytes 1,2,3 of the register to set the upper address bytes,
889 * the LSB is written last to initiate the access cycle
892 for (i = 1; i <= 3; i++) {
894 * Fill the buffer with the address byte value we want to
897 memset(addr_val, ((u8 *)&addr)[i], 4);
900 * Hit each byte of the register address with a 4-byte
901 * write operation to the same address, this is a harmless
904 status = ath6kl_sdio_read_write_sync(ar, reg_addr + i, addr_val,
905 4, HIF_WR_SYNC_BYTE_FIX);
911 ath6kl_err("%s: failed to write initial bytes of 0x%x "
912 "to window reg: 0x%X\n", __func__,
918 * Write the address register again, this time write the whole
919 * 4-byte value. The effect here is that the LSB write causes the
920 * cycle to start, the extra 3 byte write to bytes 1,2,3 has no
921 * effect since we are writing the same values again
923 status = ath6kl_sdio_read_write_sync(ar, reg_addr, (u8 *)(&addr),
924 4, HIF_WR_SYNC_BYTE_INC);
927 ath6kl_err("%s: failed to write 0x%x to window reg: 0x%X\n",
928 __func__, addr, reg_addr);
935 static int ath6kl_sdio_diag_read32(struct ath6kl *ar, u32 address, u32 *data)
939 /* set window register to start read cycle */
940 status = ath6kl_set_addrwin_reg(ar, WINDOW_READ_ADDR_ADDRESS,
947 status = ath6kl_sdio_read_write_sync(ar, WINDOW_DATA_ADDRESS,
948 (u8 *)data, sizeof(u32), HIF_RD_SYNC_BYTE_INC);
950 ath6kl_err("%s: failed to read from window data addr\n",
958 static int ath6kl_sdio_diag_write32(struct ath6kl *ar, u32 address,
962 u32 val = (__force u32) data;
965 status = ath6kl_sdio_read_write_sync(ar, WINDOW_DATA_ADDRESS,
966 (u8 *) &val, sizeof(u32), HIF_WR_SYNC_BYTE_INC);
968 ath6kl_err("%s: failed to write 0x%x to window data addr\n",
973 /* set window register, which starts the write cycle */
974 return ath6kl_set_addrwin_reg(ar, WINDOW_WRITE_ADDR_ADDRESS,
978 static int ath6kl_sdio_bmi_credits(struct ath6kl *ar)
981 unsigned long timeout;
984 ar->bmi.cmd_credits = 0;
986 /* Read the counter register to get the command credits */
987 addr = COUNT_DEC_ADDRESS + (HTC_MAILBOX_NUM_MAX + ENDPOINT1) * 4;
989 timeout = jiffies + msecs_to_jiffies(BMI_COMMUNICATION_TIMEOUT);
990 while (time_before(jiffies, timeout) && !ar->bmi.cmd_credits) {
993 * Hit the credit counter with a 4-byte access, the first byte
994 * read will hit the counter and cause a decrement, while the
995 * remaining 3 bytes has no effect. The rationale behind this
996 * is to make all HIF accesses 4-byte aligned.
998 ret = ath6kl_sdio_read_write_sync(ar, addr,
999 (u8 *)&ar->bmi.cmd_credits, 4,
1000 HIF_RD_SYNC_BYTE_INC);
1002 ath6kl_err("Unable to decrement the command credit "
1003 "count register: %d\n", ret);
1007 /* The counter is only 8 bits.
1008 * Ignore anything in the upper 3 bytes
1010 ar->bmi.cmd_credits &= 0xFF;
1013 if (!ar->bmi.cmd_credits) {
1014 ath6kl_err("bmi communication timeout\n");
1021 static int ath6kl_bmi_get_rx_lkahd(struct ath6kl *ar)
1023 unsigned long timeout;
1027 timeout = jiffies + msecs_to_jiffies(BMI_COMMUNICATION_TIMEOUT);
1028 while ((time_before(jiffies, timeout)) && !rx_word) {
1029 ret = ath6kl_sdio_read_write_sync(ar,
1030 RX_LOOKAHEAD_VALID_ADDRESS,
1031 (u8 *)&rx_word, sizeof(rx_word),
1032 HIF_RD_SYNC_BYTE_INC);
1034 ath6kl_err("unable to read RX_LOOKAHEAD_VALID\n");
1038 /* all we really want is one bit */
1039 rx_word &= (1 << ENDPOINT1);
1043 ath6kl_err("bmi_recv_buf FIFO empty\n");
1050 static int ath6kl_sdio_bmi_write(struct ath6kl *ar, u8 *buf, u32 len)
1055 ret = ath6kl_sdio_bmi_credits(ar);
1059 addr = ar->mbox_info.htc_addr;
1061 ret = ath6kl_sdio_read_write_sync(ar, addr, buf, len,
1062 HIF_WR_SYNC_BYTE_INC);
1064 ath6kl_err("unable to send the bmi data to the device\n");
1069 static int ath6kl_sdio_bmi_read(struct ath6kl *ar, u8 *buf, u32 len)
1075 * During normal bootup, small reads may be required.
1076 * Rather than issue an HIF Read and then wait as the Target
1077 * adds successive bytes to the FIFO, we wait here until
1078 * we know that response data is available.
1080 * This allows us to cleanly timeout on an unexpected
1081 * Target failure rather than risk problems at the HIF level.
1082 * In particular, this avoids SDIO timeouts and possibly garbage
1083 * data on some host controllers. And on an interconnect
1084 * such as Compact Flash (as well as some SDIO masters) which
1085 * does not provide any indication on data timeout, it avoids
1086 * a potential hang or garbage response.
1088 * Synchronization is more difficult for reads larger than the
1089 * size of the MBOX FIFO (128B), because the Target is unable
1090 * to push the 129th byte of data until AFTER the Host posts an
1091 * HIF Read and removes some FIFO data. So for large reads the
1092 * Host proceeds to post an HIF Read BEFORE all the data is
1093 * actually available to read. Fortunately, large BMI reads do
1094 * not occur in practice -- they're supported for debug/development.
1096 * So Host/Target BMI synchronization is divided into these cases:
1097 * CASE 1: length < 4
1100 * CASE 2: 4 <= length <= 128
1101 * Wait for first 4 bytes to be in FIFO
1102 * If CONSERVATIVE_BMI_READ is enabled, also wait for
1103 * a BMI command credit, which indicates that the ENTIRE
1104 * response is available in the the FIFO
1106 * CASE 3: length > 128
1107 * Wait for the first 4 bytes to be in FIFO
1109 * For most uses, a small timeout should be sufficient and we will
1110 * usually see a response quickly; but there may be some unusual
1111 * (debug) cases of BMI_EXECUTE where we want an larger timeout.
1112 * For now, we use an unbounded busy loop while waiting for
1115 * If BMI_EXECUTE ever needs to support longer-latency execution,
1116 * especially in production, this code needs to be enhanced to sleep
1117 * and yield. Also note that BMI_COMMUNICATION_TIMEOUT is currently
1118 * a function of Host processor speed.
1120 if (len >= 4) { /* NB: Currently, always true */
1121 ret = ath6kl_bmi_get_rx_lkahd(ar);
1126 addr = ar->mbox_info.htc_addr;
1127 ret = ath6kl_sdio_read_write_sync(ar, addr, buf, len,
1128 HIF_RD_SYNC_BYTE_INC);
1130 ath6kl_err("Unable to read the bmi data from the device: %d\n",
1138 static void ath6kl_sdio_stop(struct ath6kl *ar)
1140 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
1141 struct bus_request *req, *tmp_req;
1144 /* FIXME: make sure that wq is not queued again */
1146 cancel_work_sync(&ar_sdio->wr_async_work);
1148 spin_lock_bh(&ar_sdio->wr_async_lock);
1150 list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) {
1151 list_del(&req->list);
1153 if (req->scat_req) {
1154 /* this is a scatter gather request */
1155 req->scat_req->status = -ECANCELED;
1156 req->scat_req->complete(ar_sdio->ar->htc_target,
1159 context = req->packet;
1160 ath6kl_sdio_free_bus_req(ar_sdio, req);
1161 ath6kl_hif_rw_comp_handler(context, -ECANCELED);
1165 spin_unlock_bh(&ar_sdio->wr_async_lock);
1167 WARN_ON(get_queue_depth(&ar_sdio->scat_req) != 4);
1170 static const struct ath6kl_hif_ops ath6kl_sdio_ops = {
1171 .read_write_sync = ath6kl_sdio_read_write_sync,
1172 .write_async = ath6kl_sdio_write_async,
1173 .irq_enable = ath6kl_sdio_irq_enable,
1174 .irq_disable = ath6kl_sdio_irq_disable,
1175 .scatter_req_get = ath6kl_sdio_scatter_req_get,
1176 .scatter_req_add = ath6kl_sdio_scatter_req_add,
1177 .enable_scatter = ath6kl_sdio_enable_scatter,
1178 .scat_req_rw = ath6kl_sdio_async_rw_scatter,
1179 .cleanup_scatter = ath6kl_sdio_cleanup_scatter,
1180 .suspend = ath6kl_sdio_suspend,
1181 .resume = ath6kl_sdio_resume,
1182 .diag_read32 = ath6kl_sdio_diag_read32,
1183 .diag_write32 = ath6kl_sdio_diag_write32,
1184 .bmi_read = ath6kl_sdio_bmi_read,
1185 .bmi_write = ath6kl_sdio_bmi_write,
1186 .power_on = ath6kl_sdio_power_on,
1187 .power_off = ath6kl_sdio_power_off,
1188 .stop = ath6kl_sdio_stop,
1191 #ifdef CONFIG_PM_SLEEP
1194 * Empty handlers so that mmc subsystem doesn't remove us entirely during
1195 * suspend. We instead follow cfg80211 suspend/resume handlers.
1197 static int ath6kl_sdio_pm_suspend(struct device *device)
1199 ath6kl_dbg(ATH6KL_DBG_SUSPEND, "sdio pm suspend\n");
1204 static int ath6kl_sdio_pm_resume(struct device *device)
1206 ath6kl_dbg(ATH6KL_DBG_SUSPEND, "sdio pm resume\n");
1211 static SIMPLE_DEV_PM_OPS(ath6kl_sdio_pm_ops, ath6kl_sdio_pm_suspend,
1212 ath6kl_sdio_pm_resume);
1214 #define ATH6KL_SDIO_PM_OPS (&ath6kl_sdio_pm_ops)
1218 #define ATH6KL_SDIO_PM_OPS NULL
1220 #endif /* CONFIG_PM_SLEEP */
1222 static int ath6kl_sdio_probe(struct sdio_func *func,
1223 const struct sdio_device_id *id)
1226 struct ath6kl_sdio *ar_sdio;
1230 ath6kl_dbg(ATH6KL_DBG_BOOT,
1231 "sdio new func %d vendor 0x%x device 0x%x block 0x%x/0x%x\n",
1232 func->num, func->vendor, func->device,
1233 func->max_blksize, func->cur_blksize);
1235 ar_sdio = kzalloc(sizeof(struct ath6kl_sdio), GFP_KERNEL);
1239 ar_sdio->dma_buffer = kzalloc(HIF_DMA_BUFFER_SIZE, GFP_KERNEL);
1240 if (!ar_sdio->dma_buffer) {
1245 ar_sdio->func = func;
1246 sdio_set_drvdata(func, ar_sdio);
1249 ar_sdio->is_disabled = true;
1251 spin_lock_init(&ar_sdio->lock);
1252 spin_lock_init(&ar_sdio->scat_lock);
1253 spin_lock_init(&ar_sdio->wr_async_lock);
1254 mutex_init(&ar_sdio->dma_buffer_mutex);
1256 INIT_LIST_HEAD(&ar_sdio->scat_req);
1257 INIT_LIST_HEAD(&ar_sdio->bus_req_freeq);
1258 INIT_LIST_HEAD(&ar_sdio->wr_asyncq);
1260 INIT_WORK(&ar_sdio->wr_async_work, ath6kl_sdio_write_async_work);
1262 for (count = 0; count < BUS_REQUEST_MAX_NUM; count++)
1263 ath6kl_sdio_free_bus_req(ar_sdio, &ar_sdio->bus_req[count]);
1265 ar = ath6kl_core_alloc(&ar_sdio->func->dev);
1267 ath6kl_err("Failed to alloc ath6kl core\n");
1273 ar->hif_type = ATH6KL_HIF_TYPE_SDIO;
1274 ar->hif_priv = ar_sdio;
1275 ar->hif_ops = &ath6kl_sdio_ops;
1276 ar->bmi.max_data_size = 256;
1278 ath6kl_sdio_set_mbox_info(ar);
1280 ret = ath6kl_sdio_config(ar);
1282 ath6kl_err("Failed to config sdio: %d\n", ret);
1283 goto err_core_alloc;
1286 ret = ath6kl_core_init(ar);
1288 ath6kl_err("Failed to init ath6kl core\n");
1289 goto err_core_alloc;
1295 ath6kl_core_free(ar_sdio->ar);
1297 kfree(ar_sdio->dma_buffer);
1304 static void ath6kl_sdio_remove(struct sdio_func *func)
1306 struct ath6kl_sdio *ar_sdio;
1308 ath6kl_dbg(ATH6KL_DBG_BOOT,
1309 "sdio removed func %d vendor 0x%x device 0x%x\n",
1310 func->num, func->vendor, func->device);
1312 ar_sdio = sdio_get_drvdata(func);
1314 ath6kl_stop_txrx(ar_sdio->ar);
1315 cancel_work_sync(&ar_sdio->wr_async_work);
1317 ath6kl_core_cleanup(ar_sdio->ar);
1319 kfree(ar_sdio->dma_buffer);
1323 static const struct sdio_device_id ath6kl_sdio_devices[] = {
1324 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6003_BASE | 0x0))},
1325 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6003_BASE | 0x1))},
1326 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x0))},
1327 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x1))},
1331 MODULE_DEVICE_TABLE(sdio, ath6kl_sdio_devices);
1333 static struct sdio_driver ath6kl_sdio_driver = {
1334 .name = "ath6kl_sdio",
1335 .id_table = ath6kl_sdio_devices,
1336 .probe = ath6kl_sdio_probe,
1337 .remove = ath6kl_sdio_remove,
1338 .drv.pm = ATH6KL_SDIO_PM_OPS,
1341 static int __init ath6kl_sdio_init(void)
1345 ret = sdio_register_driver(&ath6kl_sdio_driver);
1347 ath6kl_err("sdio driver registration failed: %d\n", ret);
1352 static void __exit ath6kl_sdio_exit(void)
1354 sdio_unregister_driver(&ath6kl_sdio_driver);
1357 module_init(ath6kl_sdio_init);
1358 module_exit(ath6kl_sdio_exit);
1360 MODULE_AUTHOR("Atheros Communications, Inc.");
1361 MODULE_DESCRIPTION("Driver support for Atheros AR600x SDIO devices");
1362 MODULE_LICENSE("Dual BSD/GPL");
1364 MODULE_FIRMWARE(AR6003_HW_2_0_OTP_FILE);
1365 MODULE_FIRMWARE(AR6003_HW_2_0_FIRMWARE_FILE);
1366 MODULE_FIRMWARE(AR6003_HW_2_0_PATCH_FILE);
1367 MODULE_FIRMWARE(AR6003_HW_2_0_BOARD_DATA_FILE);
1368 MODULE_FIRMWARE(AR6003_HW_2_0_DEFAULT_BOARD_DATA_FILE);
1369 MODULE_FIRMWARE(AR6003_HW_2_1_1_OTP_FILE);
1370 MODULE_FIRMWARE(AR6003_HW_2_1_1_FIRMWARE_FILE);
1371 MODULE_FIRMWARE(AR6003_HW_2_1_1_PATCH_FILE);
1372 MODULE_FIRMWARE(AR6003_HW_2_1_1_BOARD_DATA_FILE);
1373 MODULE_FIRMWARE(AR6003_HW_2_1_1_DEFAULT_BOARD_DATA_FILE);
1374 MODULE_FIRMWARE(AR6004_HW_1_0_FIRMWARE_FILE);
1375 MODULE_FIRMWARE(AR6004_HW_1_0_BOARD_DATA_FILE);
1376 MODULE_FIRMWARE(AR6004_HW_1_0_DEFAULT_BOARD_DATA_FILE);
1377 MODULE_FIRMWARE(AR6004_HW_1_1_FIRMWARE_FILE);
1378 MODULE_FIRMWARE(AR6004_HW_1_1_BOARD_DATA_FILE);
1379 MODULE_FIRMWARE(AR6004_HW_1_1_DEFAULT_BOARD_DATA_FILE);