ACPI / debugger: Add module support for ACPI debugger
[cascardo/linux.git] / drivers / acpi / osl.c
1 /*
2  *  acpi_osl.c - OS-dependent functions ($Revision: 83 $)
3  *
4  *  Copyright (C) 2000       Andrew Henroid
5  *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
6  *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
7  *  Copyright (c) 2008 Intel Corporation
8  *   Author: Matthew Wilcox <willy@linux.intel.com>
9  *
10  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
11  *
12  *  This program is free software; you can redistribute it and/or modify
13  *  it under the terms of the GNU General Public License as published by
14  *  the Free Software Foundation; either version 2 of the License, or
15  *  (at your option) any later version.
16  *
17  *  This program is distributed in the hope that it will be useful,
18  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
19  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20  *  GNU General Public License for more details.
21  *
22  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
23  *
24  */
25
26 #include <linux/module.h>
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
29 #include <linux/mm.h>
30 #include <linux/highmem.h>
31 #include <linux/pci.h>
32 #include <linux/interrupt.h>
33 #include <linux/kmod.h>
34 #include <linux/delay.h>
35 #include <linux/workqueue.h>
36 #include <linux/nmi.h>
37 #include <linux/acpi.h>
38 #include <linux/efi.h>
39 #include <linux/ioport.h>
40 #include <linux/list.h>
41 #include <linux/jiffies.h>
42 #include <linux/semaphore.h>
43
44 #include <asm/io.h>
45 #include <asm/uaccess.h>
46 #include <linux/io-64-nonatomic-lo-hi.h>
47
48 #include "internal.h"
49
50 #define _COMPONENT              ACPI_OS_SERVICES
51 ACPI_MODULE_NAME("osl");
52
53 struct acpi_os_dpc {
54         acpi_osd_exec_callback function;
55         void *context;
56         struct work_struct work;
57 };
58
59 #ifdef CONFIG_ACPI_CUSTOM_DSDT
60 #include CONFIG_ACPI_CUSTOM_DSDT_FILE
61 #endif
62
63 #ifdef ENABLE_DEBUGGER
64 #include <linux/kdb.h>
65
66 /* stuff for debugger support */
67 int acpi_in_debugger;
68 EXPORT_SYMBOL(acpi_in_debugger);
69 #endif                          /*ENABLE_DEBUGGER */
70
71 static int (*__acpi_os_prepare_sleep)(u8 sleep_state, u32 pm1a_ctrl,
72                                       u32 pm1b_ctrl);
73 static int (*__acpi_os_prepare_extended_sleep)(u8 sleep_state, u32 val_a,
74                                       u32 val_b);
75
76 static acpi_osd_handler acpi_irq_handler;
77 static void *acpi_irq_context;
78 static struct workqueue_struct *kacpid_wq;
79 static struct workqueue_struct *kacpi_notify_wq;
80 static struct workqueue_struct *kacpi_hotplug_wq;
81 static bool acpi_os_initialized;
82 unsigned int acpi_sci_irq = INVALID_ACPI_IRQ;
83
84 /*
85  * This list of permanent mappings is for memory that may be accessed from
86  * interrupt context, where we can't do the ioremap().
87  */
88 struct acpi_ioremap {
89         struct list_head list;
90         void __iomem *virt;
91         acpi_physical_address phys;
92         acpi_size size;
93         unsigned long refcount;
94 };
95
96 static LIST_HEAD(acpi_ioremaps);
97 static DEFINE_MUTEX(acpi_ioremap_lock);
98
99 static void __init acpi_osi_setup_late(void);
100
101 /*
102  * The story of _OSI(Linux)
103  *
104  * From pre-history through Linux-2.6.22,
105  * Linux responded TRUE upon a BIOS OSI(Linux) query.
106  *
107  * Unfortunately, reference BIOS writers got wind of this
108  * and put OSI(Linux) in their example code, quickly exposing
109  * this string as ill-conceived and opening the door to
110  * an un-bounded number of BIOS incompatibilities.
111  *
112  * For example, OSI(Linux) was used on resume to re-POST a
113  * video card on one system, because Linux at that time
114  * could not do a speedy restore in its native driver.
115  * But then upon gaining quick native restore capability,
116  * Linux has no way to tell the BIOS to skip the time-consuming
117  * POST -- putting Linux at a permanent performance disadvantage.
118  * On another system, the BIOS writer used OSI(Linux)
119  * to infer native OS support for IPMI!  On other systems,
120  * OSI(Linux) simply got in the way of Linux claiming to
121  * be compatible with other operating systems, exposing
122  * BIOS issues such as skipped device initialization.
123  *
124  * So "Linux" turned out to be a really poor chose of
125  * OSI string, and from Linux-2.6.23 onward we respond FALSE.
126  *
127  * BIOS writers should NOT query _OSI(Linux) on future systems.
128  * Linux will complain on the console when it sees it, and return FALSE.
129  * To get Linux to return TRUE for your system  will require
130  * a kernel source update to add a DMI entry,
131  * or boot with "acpi_osi=Linux"
132  */
133
134 static struct osi_linux {
135         unsigned int    enable:1;
136         unsigned int    dmi:1;
137         unsigned int    cmdline:1;
138         unsigned int    default_disabling:1;
139 } osi_linux = {0, 0, 0, 0};
140
141 static u32 acpi_osi_handler(acpi_string interface, u32 supported)
142 {
143         if (!strcmp("Linux", interface)) {
144
145                 printk_once(KERN_NOTICE FW_BUG PREFIX
146                         "BIOS _OSI(Linux) query %s%s\n",
147                         osi_linux.enable ? "honored" : "ignored",
148                         osi_linux.cmdline ? " via cmdline" :
149                         osi_linux.dmi ? " via DMI" : "");
150         }
151
152         if (!strcmp("Darwin", interface)) {
153                 /*
154                  * Apple firmware will behave poorly if it receives positive
155                  * answers to "Darwin" and any other OS. Respond positively
156                  * to Darwin and then disable all other vendor strings.
157                  */
158                 acpi_update_interfaces(ACPI_DISABLE_ALL_VENDOR_STRINGS);
159                 supported = ACPI_UINT32_MAX;
160         }
161
162         return supported;
163 }
164
165 static void __init acpi_request_region (struct acpi_generic_address *gas,
166         unsigned int length, char *desc)
167 {
168         u64 addr;
169
170         /* Handle possible alignment issues */
171         memcpy(&addr, &gas->address, sizeof(addr));
172         if (!addr || !length)
173                 return;
174
175         /* Resources are never freed */
176         if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO)
177                 request_region(addr, length, desc);
178         else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
179                 request_mem_region(addr, length, desc);
180 }
181
182 static int __init acpi_reserve_resources(void)
183 {
184         acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length,
185                 "ACPI PM1a_EVT_BLK");
186
187         acpi_request_region(&acpi_gbl_FADT.xpm1b_event_block, acpi_gbl_FADT.pm1_event_length,
188                 "ACPI PM1b_EVT_BLK");
189
190         acpi_request_region(&acpi_gbl_FADT.xpm1a_control_block, acpi_gbl_FADT.pm1_control_length,
191                 "ACPI PM1a_CNT_BLK");
192
193         acpi_request_region(&acpi_gbl_FADT.xpm1b_control_block, acpi_gbl_FADT.pm1_control_length,
194                 "ACPI PM1b_CNT_BLK");
195
196         if (acpi_gbl_FADT.pm_timer_length == 4)
197                 acpi_request_region(&acpi_gbl_FADT.xpm_timer_block, 4, "ACPI PM_TMR");
198
199         acpi_request_region(&acpi_gbl_FADT.xpm2_control_block, acpi_gbl_FADT.pm2_control_length,
200                 "ACPI PM2_CNT_BLK");
201
202         /* Length of GPE blocks must be a non-negative multiple of 2 */
203
204         if (!(acpi_gbl_FADT.gpe0_block_length & 0x1))
205                 acpi_request_region(&acpi_gbl_FADT.xgpe0_block,
206                                acpi_gbl_FADT.gpe0_block_length, "ACPI GPE0_BLK");
207
208         if (!(acpi_gbl_FADT.gpe1_block_length & 0x1))
209                 acpi_request_region(&acpi_gbl_FADT.xgpe1_block,
210                                acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK");
211
212         return 0;
213 }
214 fs_initcall_sync(acpi_reserve_resources);
215
216 void acpi_os_printf(const char *fmt, ...)
217 {
218         va_list args;
219         va_start(args, fmt);
220         acpi_os_vprintf(fmt, args);
221         va_end(args);
222 }
223 EXPORT_SYMBOL(acpi_os_printf);
224
225 void acpi_os_vprintf(const char *fmt, va_list args)
226 {
227         static char buffer[512];
228
229         vsprintf(buffer, fmt, args);
230
231 #ifdef ENABLE_DEBUGGER
232         if (acpi_in_debugger) {
233                 kdb_printf("%s", buffer);
234         } else {
235                 printk(KERN_CONT "%s", buffer);
236         }
237 #else
238         if (acpi_debugger_write_log(buffer) < 0)
239                 printk(KERN_CONT "%s", buffer);
240 #endif
241 }
242
243 #ifdef CONFIG_KEXEC
244 static unsigned long acpi_rsdp;
245 static int __init setup_acpi_rsdp(char *arg)
246 {
247         if (kstrtoul(arg, 16, &acpi_rsdp))
248                 return -EINVAL;
249         return 0;
250 }
251 early_param("acpi_rsdp", setup_acpi_rsdp);
252 #endif
253
254 acpi_physical_address __init acpi_os_get_root_pointer(void)
255 {
256 #ifdef CONFIG_KEXEC
257         if (acpi_rsdp)
258                 return acpi_rsdp;
259 #endif
260
261         if (efi_enabled(EFI_CONFIG_TABLES)) {
262                 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
263                         return efi.acpi20;
264                 else if (efi.acpi != EFI_INVALID_TABLE_ADDR)
265                         return efi.acpi;
266                 else {
267                         printk(KERN_ERR PREFIX
268                                "System description tables not found\n");
269                         return 0;
270                 }
271         } else if (IS_ENABLED(CONFIG_ACPI_LEGACY_TABLES_LOOKUP)) {
272                 acpi_physical_address pa = 0;
273
274                 acpi_find_root_pointer(&pa);
275                 return pa;
276         }
277
278         return 0;
279 }
280
281 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
282 static struct acpi_ioremap *
283 acpi_map_lookup(acpi_physical_address phys, acpi_size size)
284 {
285         struct acpi_ioremap *map;
286
287         list_for_each_entry_rcu(map, &acpi_ioremaps, list)
288                 if (map->phys <= phys &&
289                     phys + size <= map->phys + map->size)
290                         return map;
291
292         return NULL;
293 }
294
295 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
296 static void __iomem *
297 acpi_map_vaddr_lookup(acpi_physical_address phys, unsigned int size)
298 {
299         struct acpi_ioremap *map;
300
301         map = acpi_map_lookup(phys, size);
302         if (map)
303                 return map->virt + (phys - map->phys);
304
305         return NULL;
306 }
307
308 void __iomem *acpi_os_get_iomem(acpi_physical_address phys, unsigned int size)
309 {
310         struct acpi_ioremap *map;
311         void __iomem *virt = NULL;
312
313         mutex_lock(&acpi_ioremap_lock);
314         map = acpi_map_lookup(phys, size);
315         if (map) {
316                 virt = map->virt + (phys - map->phys);
317                 map->refcount++;
318         }
319         mutex_unlock(&acpi_ioremap_lock);
320         return virt;
321 }
322 EXPORT_SYMBOL_GPL(acpi_os_get_iomem);
323
324 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
325 static struct acpi_ioremap *
326 acpi_map_lookup_virt(void __iomem *virt, acpi_size size)
327 {
328         struct acpi_ioremap *map;
329
330         list_for_each_entry_rcu(map, &acpi_ioremaps, list)
331                 if (map->virt <= virt &&
332                     virt + size <= map->virt + map->size)
333                         return map;
334
335         return NULL;
336 }
337
338 #if defined(CONFIG_IA64) || defined(CONFIG_ARM64)
339 /* ioremap will take care of cache attributes */
340 #define should_use_kmap(pfn)   0
341 #else
342 #define should_use_kmap(pfn)   page_is_ram(pfn)
343 #endif
344
345 static void __iomem *acpi_map(acpi_physical_address pg_off, unsigned long pg_sz)
346 {
347         unsigned long pfn;
348
349         pfn = pg_off >> PAGE_SHIFT;
350         if (should_use_kmap(pfn)) {
351                 if (pg_sz > PAGE_SIZE)
352                         return NULL;
353                 return (void __iomem __force *)kmap(pfn_to_page(pfn));
354         } else
355                 return acpi_os_ioremap(pg_off, pg_sz);
356 }
357
358 static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr)
359 {
360         unsigned long pfn;
361
362         pfn = pg_off >> PAGE_SHIFT;
363         if (should_use_kmap(pfn))
364                 kunmap(pfn_to_page(pfn));
365         else
366                 iounmap(vaddr);
367 }
368
369 void __iomem *__init_refok
370 acpi_os_map_iomem(acpi_physical_address phys, acpi_size size)
371 {
372         struct acpi_ioremap *map;
373         void __iomem *virt;
374         acpi_physical_address pg_off;
375         acpi_size pg_sz;
376
377         if (phys > ULONG_MAX) {
378                 printk(KERN_ERR PREFIX "Cannot map memory that high\n");
379                 return NULL;
380         }
381
382         if (!acpi_gbl_permanent_mmap)
383                 return __acpi_map_table((unsigned long)phys, size);
384
385         mutex_lock(&acpi_ioremap_lock);
386         /* Check if there's a suitable mapping already. */
387         map = acpi_map_lookup(phys, size);
388         if (map) {
389                 map->refcount++;
390                 goto out;
391         }
392
393         map = kzalloc(sizeof(*map), GFP_KERNEL);
394         if (!map) {
395                 mutex_unlock(&acpi_ioremap_lock);
396                 return NULL;
397         }
398
399         pg_off = round_down(phys, PAGE_SIZE);
400         pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off;
401         virt = acpi_map(pg_off, pg_sz);
402         if (!virt) {
403                 mutex_unlock(&acpi_ioremap_lock);
404                 kfree(map);
405                 return NULL;
406         }
407
408         INIT_LIST_HEAD(&map->list);
409         map->virt = virt;
410         map->phys = pg_off;
411         map->size = pg_sz;
412         map->refcount = 1;
413
414         list_add_tail_rcu(&map->list, &acpi_ioremaps);
415
416 out:
417         mutex_unlock(&acpi_ioremap_lock);
418         return map->virt + (phys - map->phys);
419 }
420 EXPORT_SYMBOL_GPL(acpi_os_map_iomem);
421
422 void *__init_refok
423 acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
424 {
425         return (void *)acpi_os_map_iomem(phys, size);
426 }
427 EXPORT_SYMBOL_GPL(acpi_os_map_memory);
428
429 static void acpi_os_drop_map_ref(struct acpi_ioremap *map)
430 {
431         if (!--map->refcount)
432                 list_del_rcu(&map->list);
433 }
434
435 static void acpi_os_map_cleanup(struct acpi_ioremap *map)
436 {
437         if (!map->refcount) {
438                 synchronize_rcu_expedited();
439                 acpi_unmap(map->phys, map->virt);
440                 kfree(map);
441         }
442 }
443
444 void __ref acpi_os_unmap_iomem(void __iomem *virt, acpi_size size)
445 {
446         struct acpi_ioremap *map;
447
448         if (!acpi_gbl_permanent_mmap) {
449                 __acpi_unmap_table(virt, size);
450                 return;
451         }
452
453         mutex_lock(&acpi_ioremap_lock);
454         map = acpi_map_lookup_virt(virt, size);
455         if (!map) {
456                 mutex_unlock(&acpi_ioremap_lock);
457                 WARN(true, PREFIX "%s: bad address %p\n", __func__, virt);
458                 return;
459         }
460         acpi_os_drop_map_ref(map);
461         mutex_unlock(&acpi_ioremap_lock);
462
463         acpi_os_map_cleanup(map);
464 }
465 EXPORT_SYMBOL_GPL(acpi_os_unmap_iomem);
466
467 void __ref acpi_os_unmap_memory(void *virt, acpi_size size)
468 {
469         return acpi_os_unmap_iomem((void __iomem *)virt, size);
470 }
471 EXPORT_SYMBOL_GPL(acpi_os_unmap_memory);
472
473 void __init early_acpi_os_unmap_memory(void __iomem *virt, acpi_size size)
474 {
475         if (!acpi_gbl_permanent_mmap)
476                 __acpi_unmap_table(virt, size);
477 }
478
479 int acpi_os_map_generic_address(struct acpi_generic_address *gas)
480 {
481         u64 addr;
482         void __iomem *virt;
483
484         if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
485                 return 0;
486
487         /* Handle possible alignment issues */
488         memcpy(&addr, &gas->address, sizeof(addr));
489         if (!addr || !gas->bit_width)
490                 return -EINVAL;
491
492         virt = acpi_os_map_iomem(addr, gas->bit_width / 8);
493         if (!virt)
494                 return -EIO;
495
496         return 0;
497 }
498 EXPORT_SYMBOL(acpi_os_map_generic_address);
499
500 void acpi_os_unmap_generic_address(struct acpi_generic_address *gas)
501 {
502         u64 addr;
503         struct acpi_ioremap *map;
504
505         if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
506                 return;
507
508         /* Handle possible alignment issues */
509         memcpy(&addr, &gas->address, sizeof(addr));
510         if (!addr || !gas->bit_width)
511                 return;
512
513         mutex_lock(&acpi_ioremap_lock);
514         map = acpi_map_lookup(addr, gas->bit_width / 8);
515         if (!map) {
516                 mutex_unlock(&acpi_ioremap_lock);
517                 return;
518         }
519         acpi_os_drop_map_ref(map);
520         mutex_unlock(&acpi_ioremap_lock);
521
522         acpi_os_map_cleanup(map);
523 }
524 EXPORT_SYMBOL(acpi_os_unmap_generic_address);
525
526 #ifdef ACPI_FUTURE_USAGE
527 acpi_status
528 acpi_os_get_physical_address(void *virt, acpi_physical_address * phys)
529 {
530         if (!phys || !virt)
531                 return AE_BAD_PARAMETER;
532
533         *phys = virt_to_phys(virt);
534
535         return AE_OK;
536 }
537 #endif
538
539 #ifdef CONFIG_ACPI_REV_OVERRIDE_POSSIBLE
540 static bool acpi_rev_override;
541
542 int __init acpi_rev_override_setup(char *str)
543 {
544         acpi_rev_override = true;
545         return 1;
546 }
547 __setup("acpi_rev_override", acpi_rev_override_setup);
548 #else
549 #define acpi_rev_override       false
550 #endif
551
552 #define ACPI_MAX_OVERRIDE_LEN 100
553
554 static char acpi_os_name[ACPI_MAX_OVERRIDE_LEN];
555
556 acpi_status
557 acpi_os_predefined_override(const struct acpi_predefined_names *init_val,
558                             char **new_val)
559 {
560         if (!init_val || !new_val)
561                 return AE_BAD_PARAMETER;
562
563         *new_val = NULL;
564         if (!memcmp(init_val->name, "_OS_", 4) && strlen(acpi_os_name)) {
565                 printk(KERN_INFO PREFIX "Overriding _OS definition to '%s'\n",
566                        acpi_os_name);
567                 *new_val = acpi_os_name;
568         }
569
570         if (!memcmp(init_val->name, "_REV", 4) && acpi_rev_override) {
571                 printk(KERN_INFO PREFIX "Overriding _REV return value to 5\n");
572                 *new_val = (char *)5;
573         }
574
575         return AE_OK;
576 }
577
578 #ifdef CONFIG_ACPI_INITRD_TABLE_OVERRIDE
579 #include <linux/earlycpio.h>
580 #include <linux/memblock.h>
581
582 static u64 acpi_tables_addr;
583 static int all_tables_size;
584
585 /* Copied from acpica/tbutils.c:acpi_tb_checksum() */
586 static u8 __init acpi_table_checksum(u8 *buffer, u32 length)
587 {
588         u8 sum = 0;
589         u8 *end = buffer + length;
590
591         while (buffer < end)
592                 sum = (u8) (sum + *(buffer++));
593         return sum;
594 }
595
596 /* All but ACPI_SIG_RSDP and ACPI_SIG_FACS: */
597 static const char * const table_sigs[] = {
598         ACPI_SIG_BERT, ACPI_SIG_CPEP, ACPI_SIG_ECDT, ACPI_SIG_EINJ,
599         ACPI_SIG_ERST, ACPI_SIG_HEST, ACPI_SIG_MADT, ACPI_SIG_MSCT,
600         ACPI_SIG_SBST, ACPI_SIG_SLIT, ACPI_SIG_SRAT, ACPI_SIG_ASF,
601         ACPI_SIG_BOOT, ACPI_SIG_DBGP, ACPI_SIG_DMAR, ACPI_SIG_HPET,
602         ACPI_SIG_IBFT, ACPI_SIG_IVRS, ACPI_SIG_MCFG, ACPI_SIG_MCHI,
603         ACPI_SIG_SLIC, ACPI_SIG_SPCR, ACPI_SIG_SPMI, ACPI_SIG_TCPA,
604         ACPI_SIG_UEFI, ACPI_SIG_WAET, ACPI_SIG_WDAT, ACPI_SIG_WDDT,
605         ACPI_SIG_WDRT, ACPI_SIG_DSDT, ACPI_SIG_FADT, ACPI_SIG_PSDT,
606         ACPI_SIG_RSDT, ACPI_SIG_XSDT, ACPI_SIG_SSDT, NULL };
607
608 #define ACPI_HEADER_SIZE sizeof(struct acpi_table_header)
609
610 #define ACPI_OVERRIDE_TABLES 64
611 static struct cpio_data __initdata acpi_initrd_files[ACPI_OVERRIDE_TABLES];
612
613 #define MAP_CHUNK_SIZE   (NR_FIX_BTMAPS << PAGE_SHIFT)
614
615 void __init acpi_initrd_override(void *data, size_t size)
616 {
617         int sig, no, table_nr = 0, total_offset = 0;
618         long offset = 0;
619         struct acpi_table_header *table;
620         char cpio_path[32] = "kernel/firmware/acpi/";
621         struct cpio_data file;
622
623         if (data == NULL || size == 0)
624                 return;
625
626         for (no = 0; no < ACPI_OVERRIDE_TABLES; no++) {
627                 file = find_cpio_data(cpio_path, data, size, &offset);
628                 if (!file.data)
629                         break;
630
631                 data += offset;
632                 size -= offset;
633
634                 if (file.size < sizeof(struct acpi_table_header)) {
635                         pr_err("ACPI OVERRIDE: Table smaller than ACPI header [%s%s]\n",
636                                 cpio_path, file.name);
637                         continue;
638                 }
639
640                 table = file.data;
641
642                 for (sig = 0; table_sigs[sig]; sig++)
643                         if (!memcmp(table->signature, table_sigs[sig], 4))
644                                 break;
645
646                 if (!table_sigs[sig]) {
647                         pr_err("ACPI OVERRIDE: Unknown signature [%s%s]\n",
648                                 cpio_path, file.name);
649                         continue;
650                 }
651                 if (file.size != table->length) {
652                         pr_err("ACPI OVERRIDE: File length does not match table length [%s%s]\n",
653                                 cpio_path, file.name);
654                         continue;
655                 }
656                 if (acpi_table_checksum(file.data, table->length)) {
657                         pr_err("ACPI OVERRIDE: Bad table checksum [%s%s]\n",
658                                 cpio_path, file.name);
659                         continue;
660                 }
661
662                 pr_info("%4.4s ACPI table found in initrd [%s%s][0x%x]\n",
663                         table->signature, cpio_path, file.name, table->length);
664
665                 all_tables_size += table->length;
666                 acpi_initrd_files[table_nr].data = file.data;
667                 acpi_initrd_files[table_nr].size = file.size;
668                 table_nr++;
669         }
670         if (table_nr == 0)
671                 return;
672
673         acpi_tables_addr =
674                 memblock_find_in_range(0, max_low_pfn_mapped << PAGE_SHIFT,
675                                        all_tables_size, PAGE_SIZE);
676         if (!acpi_tables_addr) {
677                 WARN_ON(1);
678                 return;
679         }
680         /*
681          * Only calling e820_add_reserve does not work and the
682          * tables are invalid (memory got used) later.
683          * memblock_reserve works as expected and the tables won't get modified.
684          * But it's not enough on X86 because ioremap will
685          * complain later (used by acpi_os_map_memory) that the pages
686          * that should get mapped are not marked "reserved".
687          * Both memblock_reserve and e820_add_region (via arch_reserve_mem_area)
688          * works fine.
689          */
690         memblock_reserve(acpi_tables_addr, all_tables_size);
691         arch_reserve_mem_area(acpi_tables_addr, all_tables_size);
692
693         /*
694          * early_ioremap only can remap 256k one time. If we map all
695          * tables one time, we will hit the limit. Need to map chunks
696          * one by one during copying the same as that in relocate_initrd().
697          */
698         for (no = 0; no < table_nr; no++) {
699                 unsigned char *src_p = acpi_initrd_files[no].data;
700                 phys_addr_t size = acpi_initrd_files[no].size;
701                 phys_addr_t dest_addr = acpi_tables_addr + total_offset;
702                 phys_addr_t slop, clen;
703                 char *dest_p;
704
705                 total_offset += size;
706
707                 while (size) {
708                         slop = dest_addr & ~PAGE_MASK;
709                         clen = size;
710                         if (clen > MAP_CHUNK_SIZE - slop)
711                                 clen = MAP_CHUNK_SIZE - slop;
712                         dest_p = early_ioremap(dest_addr & PAGE_MASK,
713                                                  clen + slop);
714                         memcpy(dest_p + slop, src_p, clen);
715                         early_iounmap(dest_p, clen + slop);
716                         src_p += clen;
717                         dest_addr += clen;
718                         size -= clen;
719                 }
720         }
721 }
722 #endif /* CONFIG_ACPI_INITRD_TABLE_OVERRIDE */
723
724 static void acpi_table_taint(struct acpi_table_header *table)
725 {
726         pr_warn(PREFIX
727                 "Override [%4.4s-%8.8s], this is unsafe: tainting kernel\n",
728                 table->signature, table->oem_table_id);
729         add_taint(TAINT_OVERRIDDEN_ACPI_TABLE, LOCKDEP_NOW_UNRELIABLE);
730 }
731
732
733 acpi_status
734 acpi_os_table_override(struct acpi_table_header * existing_table,
735                        struct acpi_table_header ** new_table)
736 {
737         if (!existing_table || !new_table)
738                 return AE_BAD_PARAMETER;
739
740         *new_table = NULL;
741
742 #ifdef CONFIG_ACPI_CUSTOM_DSDT
743         if (strncmp(existing_table->signature, "DSDT", 4) == 0)
744                 *new_table = (struct acpi_table_header *)AmlCode;
745 #endif
746         if (*new_table != NULL)
747                 acpi_table_taint(existing_table);
748         return AE_OK;
749 }
750
751 acpi_status
752 acpi_os_physical_table_override(struct acpi_table_header *existing_table,
753                                 acpi_physical_address *address,
754                                 u32 *table_length)
755 {
756 #ifndef CONFIG_ACPI_INITRD_TABLE_OVERRIDE
757         *table_length = 0;
758         *address = 0;
759         return AE_OK;
760 #else
761         int table_offset = 0;
762         struct acpi_table_header *table;
763
764         *table_length = 0;
765         *address = 0;
766
767         if (!acpi_tables_addr)
768                 return AE_OK;
769
770         do {
771                 if (table_offset + ACPI_HEADER_SIZE > all_tables_size) {
772                         WARN_ON(1);
773                         return AE_OK;
774                 }
775
776                 table = acpi_os_map_memory(acpi_tables_addr + table_offset,
777                                            ACPI_HEADER_SIZE);
778
779                 if (table_offset + table->length > all_tables_size) {
780                         acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
781                         WARN_ON(1);
782                         return AE_OK;
783                 }
784
785                 table_offset += table->length;
786
787                 if (memcmp(existing_table->signature, table->signature, 4)) {
788                         acpi_os_unmap_memory(table,
789                                      ACPI_HEADER_SIZE);
790                         continue;
791                 }
792
793                 /* Only override tables with matching oem id */
794                 if (memcmp(table->oem_table_id, existing_table->oem_table_id,
795                            ACPI_OEM_TABLE_ID_SIZE)) {
796                         acpi_os_unmap_memory(table,
797                                      ACPI_HEADER_SIZE);
798                         continue;
799                 }
800
801                 table_offset -= table->length;
802                 *table_length = table->length;
803                 acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
804                 *address = acpi_tables_addr + table_offset;
805                 break;
806         } while (table_offset + ACPI_HEADER_SIZE < all_tables_size);
807
808         if (*address != 0)
809                 acpi_table_taint(existing_table);
810         return AE_OK;
811 #endif
812 }
813
814 static irqreturn_t acpi_irq(int irq, void *dev_id)
815 {
816         u32 handled;
817
818         handled = (*acpi_irq_handler) (acpi_irq_context);
819
820         if (handled) {
821                 acpi_irq_handled++;
822                 return IRQ_HANDLED;
823         } else {
824                 acpi_irq_not_handled++;
825                 return IRQ_NONE;
826         }
827 }
828
829 acpi_status
830 acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler,
831                                   void *context)
832 {
833         unsigned int irq;
834
835         acpi_irq_stats_init();
836
837         /*
838          * ACPI interrupts different from the SCI in our copy of the FADT are
839          * not supported.
840          */
841         if (gsi != acpi_gbl_FADT.sci_interrupt)
842                 return AE_BAD_PARAMETER;
843
844         if (acpi_irq_handler)
845                 return AE_ALREADY_ACQUIRED;
846
847         if (acpi_gsi_to_irq(gsi, &irq) < 0) {
848                 printk(KERN_ERR PREFIX "SCI (ACPI GSI %d) not registered\n",
849                        gsi);
850                 return AE_OK;
851         }
852
853         acpi_irq_handler = handler;
854         acpi_irq_context = context;
855         if (request_irq(irq, acpi_irq, IRQF_SHARED, "acpi", acpi_irq)) {
856                 printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq);
857                 acpi_irq_handler = NULL;
858                 return AE_NOT_ACQUIRED;
859         }
860         acpi_sci_irq = irq;
861
862         return AE_OK;
863 }
864
865 acpi_status acpi_os_remove_interrupt_handler(u32 gsi, acpi_osd_handler handler)
866 {
867         if (gsi != acpi_gbl_FADT.sci_interrupt || !acpi_sci_irq_valid())
868                 return AE_BAD_PARAMETER;
869
870         free_irq(acpi_sci_irq, acpi_irq);
871         acpi_irq_handler = NULL;
872         acpi_sci_irq = INVALID_ACPI_IRQ;
873
874         return AE_OK;
875 }
876
877 /*
878  * Running in interpreter thread context, safe to sleep
879  */
880
881 void acpi_os_sleep(u64 ms)
882 {
883         msleep(ms);
884 }
885
886 void acpi_os_stall(u32 us)
887 {
888         while (us) {
889                 u32 delay = 1000;
890
891                 if (delay > us)
892                         delay = us;
893                 udelay(delay);
894                 touch_nmi_watchdog();
895                 us -= delay;
896         }
897 }
898
899 /*
900  * Support ACPI 3.0 AML Timer operand
901  * Returns 64-bit free-running, monotonically increasing timer
902  * with 100ns granularity
903  */
904 u64 acpi_os_get_timer(void)
905 {
906         u64 time_ns = ktime_to_ns(ktime_get());
907         do_div(time_ns, 100);
908         return time_ns;
909 }
910
911 acpi_status acpi_os_read_port(acpi_io_address port, u32 * value, u32 width)
912 {
913         u32 dummy;
914
915         if (!value)
916                 value = &dummy;
917
918         *value = 0;
919         if (width <= 8) {
920                 *(u8 *) value = inb(port);
921         } else if (width <= 16) {
922                 *(u16 *) value = inw(port);
923         } else if (width <= 32) {
924                 *(u32 *) value = inl(port);
925         } else {
926                 BUG();
927         }
928
929         return AE_OK;
930 }
931
932 EXPORT_SYMBOL(acpi_os_read_port);
933
934 acpi_status acpi_os_write_port(acpi_io_address port, u32 value, u32 width)
935 {
936         if (width <= 8) {
937                 outb(value, port);
938         } else if (width <= 16) {
939                 outw(value, port);
940         } else if (width <= 32) {
941                 outl(value, port);
942         } else {
943                 BUG();
944         }
945
946         return AE_OK;
947 }
948
949 EXPORT_SYMBOL(acpi_os_write_port);
950
951 acpi_status
952 acpi_os_read_memory(acpi_physical_address phys_addr, u64 *value, u32 width)
953 {
954         void __iomem *virt_addr;
955         unsigned int size = width / 8;
956         bool unmap = false;
957         u64 dummy;
958
959         rcu_read_lock();
960         virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
961         if (!virt_addr) {
962                 rcu_read_unlock();
963                 virt_addr = acpi_os_ioremap(phys_addr, size);
964                 if (!virt_addr)
965                         return AE_BAD_ADDRESS;
966                 unmap = true;
967         }
968
969         if (!value)
970                 value = &dummy;
971
972         switch (width) {
973         case 8:
974                 *(u8 *) value = readb(virt_addr);
975                 break;
976         case 16:
977                 *(u16 *) value = readw(virt_addr);
978                 break;
979         case 32:
980                 *(u32 *) value = readl(virt_addr);
981                 break;
982         case 64:
983                 *(u64 *) value = readq(virt_addr);
984                 break;
985         default:
986                 BUG();
987         }
988
989         if (unmap)
990                 iounmap(virt_addr);
991         else
992                 rcu_read_unlock();
993
994         return AE_OK;
995 }
996
997 acpi_status
998 acpi_os_write_memory(acpi_physical_address phys_addr, u64 value, u32 width)
999 {
1000         void __iomem *virt_addr;
1001         unsigned int size = width / 8;
1002         bool unmap = false;
1003
1004         rcu_read_lock();
1005         virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
1006         if (!virt_addr) {
1007                 rcu_read_unlock();
1008                 virt_addr = acpi_os_ioremap(phys_addr, size);
1009                 if (!virt_addr)
1010                         return AE_BAD_ADDRESS;
1011                 unmap = true;
1012         }
1013
1014         switch (width) {
1015         case 8:
1016                 writeb(value, virt_addr);
1017                 break;
1018         case 16:
1019                 writew(value, virt_addr);
1020                 break;
1021         case 32:
1022                 writel(value, virt_addr);
1023                 break;
1024         case 64:
1025                 writeq(value, virt_addr);
1026                 break;
1027         default:
1028                 BUG();
1029         }
1030
1031         if (unmap)
1032                 iounmap(virt_addr);
1033         else
1034                 rcu_read_unlock();
1035
1036         return AE_OK;
1037 }
1038
1039 acpi_status
1040 acpi_os_read_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
1041                                u64 *value, u32 width)
1042 {
1043         int result, size;
1044         u32 value32;
1045
1046         if (!value)
1047                 return AE_BAD_PARAMETER;
1048
1049         switch (width) {
1050         case 8:
1051                 size = 1;
1052                 break;
1053         case 16:
1054                 size = 2;
1055                 break;
1056         case 32:
1057                 size = 4;
1058                 break;
1059         default:
1060                 return AE_ERROR;
1061         }
1062
1063         result = raw_pci_read(pci_id->segment, pci_id->bus,
1064                                 PCI_DEVFN(pci_id->device, pci_id->function),
1065                                 reg, size, &value32);
1066         *value = value32;
1067
1068         return (result ? AE_ERROR : AE_OK);
1069 }
1070
1071 acpi_status
1072 acpi_os_write_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
1073                                 u64 value, u32 width)
1074 {
1075         int result, size;
1076
1077         switch (width) {
1078         case 8:
1079                 size = 1;
1080                 break;
1081         case 16:
1082                 size = 2;
1083                 break;
1084         case 32:
1085                 size = 4;
1086                 break;
1087         default:
1088                 return AE_ERROR;
1089         }
1090
1091         result = raw_pci_write(pci_id->segment, pci_id->bus,
1092                                 PCI_DEVFN(pci_id->device, pci_id->function),
1093                                 reg, size, value);
1094
1095         return (result ? AE_ERROR : AE_OK);
1096 }
1097
1098 static void acpi_os_execute_deferred(struct work_struct *work)
1099 {
1100         struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
1101
1102         dpc->function(dpc->context);
1103         kfree(dpc);
1104 }
1105
1106 #ifdef CONFIG_ACPI_DEBUGGER
1107 static struct acpi_debugger acpi_debugger;
1108 static bool acpi_debugger_initialized;
1109
1110 int acpi_register_debugger(struct module *owner,
1111                            const struct acpi_debugger_ops *ops)
1112 {
1113         int ret = 0;
1114
1115         mutex_lock(&acpi_debugger.lock);
1116         if (acpi_debugger.ops) {
1117                 ret = -EBUSY;
1118                 goto err_lock;
1119         }
1120
1121         acpi_debugger.owner = owner;
1122         acpi_debugger.ops = ops;
1123
1124 err_lock:
1125         mutex_unlock(&acpi_debugger.lock);
1126         return ret;
1127 }
1128 EXPORT_SYMBOL(acpi_register_debugger);
1129
1130 void acpi_unregister_debugger(const struct acpi_debugger_ops *ops)
1131 {
1132         mutex_lock(&acpi_debugger.lock);
1133         if (ops == acpi_debugger.ops) {
1134                 acpi_debugger.ops = NULL;
1135                 acpi_debugger.owner = NULL;
1136         }
1137         mutex_unlock(&acpi_debugger.lock);
1138 }
1139 EXPORT_SYMBOL(acpi_unregister_debugger);
1140
1141 int acpi_debugger_create_thread(acpi_osd_exec_callback function, void *context)
1142 {
1143         int ret;
1144         int (*func)(acpi_osd_exec_callback, void *);
1145         struct module *owner;
1146
1147         if (!acpi_debugger_initialized)
1148                 return -ENODEV;
1149         mutex_lock(&acpi_debugger.lock);
1150         if (!acpi_debugger.ops) {
1151                 ret = -ENODEV;
1152                 goto err_lock;
1153         }
1154         if (!try_module_get(acpi_debugger.owner)) {
1155                 ret = -ENODEV;
1156                 goto err_lock;
1157         }
1158         func = acpi_debugger.ops->create_thread;
1159         owner = acpi_debugger.owner;
1160         mutex_unlock(&acpi_debugger.lock);
1161
1162         ret = func(function, context);
1163
1164         mutex_lock(&acpi_debugger.lock);
1165         module_put(owner);
1166 err_lock:
1167         mutex_unlock(&acpi_debugger.lock);
1168         return ret;
1169 }
1170
1171 ssize_t acpi_debugger_write_log(const char *msg)
1172 {
1173         ssize_t ret;
1174         ssize_t (*func)(const char *);
1175         struct module *owner;
1176
1177         if (!acpi_debugger_initialized)
1178                 return -ENODEV;
1179         mutex_lock(&acpi_debugger.lock);
1180         if (!acpi_debugger.ops) {
1181                 ret = -ENODEV;
1182                 goto err_lock;
1183         }
1184         if (!try_module_get(acpi_debugger.owner)) {
1185                 ret = -ENODEV;
1186                 goto err_lock;
1187         }
1188         func = acpi_debugger.ops->write_log;
1189         owner = acpi_debugger.owner;
1190         mutex_unlock(&acpi_debugger.lock);
1191
1192         ret = func(msg);
1193
1194         mutex_lock(&acpi_debugger.lock);
1195         module_put(owner);
1196 err_lock:
1197         mutex_unlock(&acpi_debugger.lock);
1198         return ret;
1199 }
1200
1201 ssize_t acpi_debugger_read_cmd(char *buffer, size_t buffer_length)
1202 {
1203         ssize_t ret;
1204         ssize_t (*func)(char *, size_t);
1205         struct module *owner;
1206
1207         if (!acpi_debugger_initialized)
1208                 return -ENODEV;
1209         mutex_lock(&acpi_debugger.lock);
1210         if (!acpi_debugger.ops) {
1211                 ret = -ENODEV;
1212                 goto err_lock;
1213         }
1214         if (!try_module_get(acpi_debugger.owner)) {
1215                 ret = -ENODEV;
1216                 goto err_lock;
1217         }
1218         func = acpi_debugger.ops->read_cmd;
1219         owner = acpi_debugger.owner;
1220         mutex_unlock(&acpi_debugger.lock);
1221
1222         ret = func(buffer, buffer_length);
1223
1224         mutex_lock(&acpi_debugger.lock);
1225         module_put(owner);
1226 err_lock:
1227         mutex_unlock(&acpi_debugger.lock);
1228         return ret;
1229 }
1230
1231 int acpi_debugger_wait_command_ready(void)
1232 {
1233         int ret;
1234         int (*func)(bool, char *, size_t);
1235         struct module *owner;
1236
1237         if (!acpi_debugger_initialized)
1238                 return -ENODEV;
1239         mutex_lock(&acpi_debugger.lock);
1240         if (!acpi_debugger.ops) {
1241                 ret = -ENODEV;
1242                 goto err_lock;
1243         }
1244         if (!try_module_get(acpi_debugger.owner)) {
1245                 ret = -ENODEV;
1246                 goto err_lock;
1247         }
1248         func = acpi_debugger.ops->wait_command_ready;
1249         owner = acpi_debugger.owner;
1250         mutex_unlock(&acpi_debugger.lock);
1251
1252         ret = func(acpi_gbl_method_executing,
1253                    acpi_gbl_db_line_buf, ACPI_DB_LINE_BUFFER_SIZE);
1254
1255         mutex_lock(&acpi_debugger.lock);
1256         module_put(owner);
1257 err_lock:
1258         mutex_unlock(&acpi_debugger.lock);
1259         return ret;
1260 }
1261
1262 int acpi_debugger_notify_command_complete(void)
1263 {
1264         int ret;
1265         int (*func)(void);
1266         struct module *owner;
1267
1268         if (!acpi_debugger_initialized)
1269                 return -ENODEV;
1270         mutex_lock(&acpi_debugger.lock);
1271         if (!acpi_debugger.ops) {
1272                 ret = -ENODEV;
1273                 goto err_lock;
1274         }
1275         if (!try_module_get(acpi_debugger.owner)) {
1276                 ret = -ENODEV;
1277                 goto err_lock;
1278         }
1279         func = acpi_debugger.ops->notify_command_complete;
1280         owner = acpi_debugger.owner;
1281         mutex_unlock(&acpi_debugger.lock);
1282
1283         ret = func();
1284
1285         mutex_lock(&acpi_debugger.lock);
1286         module_put(owner);
1287 err_lock:
1288         mutex_unlock(&acpi_debugger.lock);
1289         return ret;
1290 }
1291
1292 int __init acpi_debugger_init(void)
1293 {
1294         mutex_init(&acpi_debugger.lock);
1295         acpi_debugger_initialized = true;
1296         return 0;
1297 }
1298 #endif
1299
1300 /*******************************************************************************
1301  *
1302  * FUNCTION:    acpi_os_execute
1303  *
1304  * PARAMETERS:  Type               - Type of the callback
1305  *              Function           - Function to be executed
1306  *              Context            - Function parameters
1307  *
1308  * RETURN:      Status
1309  *
1310  * DESCRIPTION: Depending on type, either queues function for deferred execution or
1311  *              immediately executes function on a separate thread.
1312  *
1313  ******************************************************************************/
1314
1315 acpi_status acpi_os_execute(acpi_execute_type type,
1316                             acpi_osd_exec_callback function, void *context)
1317 {
1318         acpi_status status = AE_OK;
1319         struct acpi_os_dpc *dpc;
1320         struct workqueue_struct *queue;
1321         int ret;
1322         ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
1323                           "Scheduling function [%p(%p)] for deferred execution.\n",
1324                           function, context));
1325
1326         if (type == OSL_DEBUGGER_MAIN_THREAD) {
1327                 ret = acpi_debugger_create_thread(function, context);
1328                 if (ret) {
1329                         pr_err("Call to kthread_create() failed.\n");
1330                         status = AE_ERROR;
1331                 }
1332                 goto out_thread;
1333         }
1334
1335         /*
1336          * Allocate/initialize DPC structure.  Note that this memory will be
1337          * freed by the callee.  The kernel handles the work_struct list  in a
1338          * way that allows us to also free its memory inside the callee.
1339          * Because we may want to schedule several tasks with different
1340          * parameters we can't use the approach some kernel code uses of
1341          * having a static work_struct.
1342          */
1343
1344         dpc = kzalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC);
1345         if (!dpc)
1346                 return AE_NO_MEMORY;
1347
1348         dpc->function = function;
1349         dpc->context = context;
1350
1351         /*
1352          * To prevent lockdep from complaining unnecessarily, make sure that
1353          * there is a different static lockdep key for each workqueue by using
1354          * INIT_WORK() for each of them separately.
1355          */
1356         if (type == OSL_NOTIFY_HANDLER) {
1357                 queue = kacpi_notify_wq;
1358                 INIT_WORK(&dpc->work, acpi_os_execute_deferred);
1359         } else if (type == OSL_GPE_HANDLER) {
1360                 queue = kacpid_wq;
1361                 INIT_WORK(&dpc->work, acpi_os_execute_deferred);
1362         } else {
1363                 pr_err("Unsupported os_execute type %d.\n", type);
1364                 status = AE_ERROR;
1365         }
1366
1367         if (ACPI_FAILURE(status))
1368                 goto err_workqueue;
1369
1370         /*
1371          * On some machines, a software-initiated SMI causes corruption unless
1372          * the SMI runs on CPU 0.  An SMI can be initiated by any AML, but
1373          * typically it's done in GPE-related methods that are run via
1374          * workqueues, so we can avoid the known corruption cases by always
1375          * queueing on CPU 0.
1376          */
1377         ret = queue_work_on(0, queue, &dpc->work);
1378         if (!ret) {
1379                 printk(KERN_ERR PREFIX
1380                           "Call to queue_work() failed.\n");
1381                 status = AE_ERROR;
1382         }
1383 err_workqueue:
1384         if (ACPI_FAILURE(status))
1385                 kfree(dpc);
1386 out_thread:
1387         return status;
1388 }
1389 EXPORT_SYMBOL(acpi_os_execute);
1390
1391 void acpi_os_wait_events_complete(void)
1392 {
1393         /*
1394          * Make sure the GPE handler or the fixed event handler is not used
1395          * on another CPU after removal.
1396          */
1397         if (acpi_sci_irq_valid())
1398                 synchronize_hardirq(acpi_sci_irq);
1399         flush_workqueue(kacpid_wq);
1400         flush_workqueue(kacpi_notify_wq);
1401 }
1402
1403 struct acpi_hp_work {
1404         struct work_struct work;
1405         struct acpi_device *adev;
1406         u32 src;
1407 };
1408
1409 static void acpi_hotplug_work_fn(struct work_struct *work)
1410 {
1411         struct acpi_hp_work *hpw = container_of(work, struct acpi_hp_work, work);
1412
1413         acpi_os_wait_events_complete();
1414         acpi_device_hotplug(hpw->adev, hpw->src);
1415         kfree(hpw);
1416 }
1417
1418 acpi_status acpi_hotplug_schedule(struct acpi_device *adev, u32 src)
1419 {
1420         struct acpi_hp_work *hpw;
1421
1422         ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
1423                   "Scheduling hotplug event (%p, %u) for deferred execution.\n",
1424                   adev, src));
1425
1426         hpw = kmalloc(sizeof(*hpw), GFP_KERNEL);
1427         if (!hpw)
1428                 return AE_NO_MEMORY;
1429
1430         INIT_WORK(&hpw->work, acpi_hotplug_work_fn);
1431         hpw->adev = adev;
1432         hpw->src = src;
1433         /*
1434          * We can't run hotplug code in kacpid_wq/kacpid_notify_wq etc., because
1435          * the hotplug code may call driver .remove() functions, which may
1436          * invoke flush_scheduled_work()/acpi_os_wait_events_complete() to flush
1437          * these workqueues.
1438          */
1439         if (!queue_work(kacpi_hotplug_wq, &hpw->work)) {
1440                 kfree(hpw);
1441                 return AE_ERROR;
1442         }
1443         return AE_OK;
1444 }
1445
1446 bool acpi_queue_hotplug_work(struct work_struct *work)
1447 {
1448         return queue_work(kacpi_hotplug_wq, work);
1449 }
1450
1451 acpi_status
1452 acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle)
1453 {
1454         struct semaphore *sem = NULL;
1455
1456         sem = acpi_os_allocate_zeroed(sizeof(struct semaphore));
1457         if (!sem)
1458                 return AE_NO_MEMORY;
1459
1460         sema_init(sem, initial_units);
1461
1462         *handle = (acpi_handle *) sem;
1463
1464         ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Creating semaphore[%p|%d].\n",
1465                           *handle, initial_units));
1466
1467         return AE_OK;
1468 }
1469
1470 /*
1471  * TODO: A better way to delete semaphores?  Linux doesn't have a
1472  * 'delete_semaphore()' function -- may result in an invalid
1473  * pointer dereference for non-synchronized consumers.  Should
1474  * we at least check for blocked threads and signal/cancel them?
1475  */
1476
1477 acpi_status acpi_os_delete_semaphore(acpi_handle handle)
1478 {
1479         struct semaphore *sem = (struct semaphore *)handle;
1480
1481         if (!sem)
1482                 return AE_BAD_PARAMETER;
1483
1484         ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Deleting semaphore[%p].\n", handle));
1485
1486         BUG_ON(!list_empty(&sem->wait_list));
1487         kfree(sem);
1488         sem = NULL;
1489
1490         return AE_OK;
1491 }
1492
1493 /*
1494  * TODO: Support for units > 1?
1495  */
1496 acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout)
1497 {
1498         acpi_status status = AE_OK;
1499         struct semaphore *sem = (struct semaphore *)handle;
1500         long jiffies;
1501         int ret = 0;
1502
1503         if (!acpi_os_initialized)
1504                 return AE_OK;
1505
1506         if (!sem || (units < 1))
1507                 return AE_BAD_PARAMETER;
1508
1509         if (units > 1)
1510                 return AE_SUPPORT;
1511
1512         ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n",
1513                           handle, units, timeout));
1514
1515         if (timeout == ACPI_WAIT_FOREVER)
1516                 jiffies = MAX_SCHEDULE_TIMEOUT;
1517         else
1518                 jiffies = msecs_to_jiffies(timeout);
1519
1520         ret = down_timeout(sem, jiffies);
1521         if (ret)
1522                 status = AE_TIME;
1523
1524         if (ACPI_FAILURE(status)) {
1525                 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
1526                                   "Failed to acquire semaphore[%p|%d|%d], %s",
1527                                   handle, units, timeout,
1528                                   acpi_format_exception(status)));
1529         } else {
1530                 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
1531                                   "Acquired semaphore[%p|%d|%d]", handle,
1532                                   units, timeout));
1533         }
1534
1535         return status;
1536 }
1537
1538 /*
1539  * TODO: Support for units > 1?
1540  */
1541 acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units)
1542 {
1543         struct semaphore *sem = (struct semaphore *)handle;
1544
1545         if (!acpi_os_initialized)
1546                 return AE_OK;
1547
1548         if (!sem || (units < 1))
1549                 return AE_BAD_PARAMETER;
1550
1551         if (units > 1)
1552                 return AE_SUPPORT;
1553
1554         ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Signaling semaphore[%p|%d]\n", handle,
1555                           units));
1556
1557         up(sem);
1558
1559         return AE_OK;
1560 }
1561
1562 acpi_status acpi_os_get_line(char *buffer, u32 buffer_length, u32 *bytes_read)
1563 {
1564 #ifdef ENABLE_DEBUGGER
1565         if (acpi_in_debugger) {
1566                 u32 chars;
1567
1568                 kdb_read(buffer, buffer_length);
1569
1570                 /* remove the CR kdb includes */
1571                 chars = strlen(buffer) - 1;
1572                 buffer[chars] = '\0';
1573         }
1574 #else
1575         int ret;
1576
1577         ret = acpi_debugger_read_cmd(buffer, buffer_length);
1578         if (ret < 0)
1579                 return AE_ERROR;
1580         if (bytes_read)
1581                 *bytes_read = ret;
1582 #endif
1583
1584         return AE_OK;
1585 }
1586 EXPORT_SYMBOL(acpi_os_get_line);
1587
1588 acpi_status acpi_os_wait_command_ready(void)
1589 {
1590         int ret;
1591
1592         ret = acpi_debugger_wait_command_ready();
1593         if (ret < 0)
1594                 return AE_ERROR;
1595         return AE_OK;
1596 }
1597
1598 acpi_status acpi_os_notify_command_complete(void)
1599 {
1600         int ret;
1601
1602         ret = acpi_debugger_notify_command_complete();
1603         if (ret < 0)
1604                 return AE_ERROR;
1605         return AE_OK;
1606 }
1607
1608 acpi_status acpi_os_signal(u32 function, void *info)
1609 {
1610         switch (function) {
1611         case ACPI_SIGNAL_FATAL:
1612                 printk(KERN_ERR PREFIX "Fatal opcode executed\n");
1613                 break;
1614         case ACPI_SIGNAL_BREAKPOINT:
1615                 /*
1616                  * AML Breakpoint
1617                  * ACPI spec. says to treat it as a NOP unless
1618                  * you are debugging.  So if/when we integrate
1619                  * AML debugger into the kernel debugger its
1620                  * hook will go here.  But until then it is
1621                  * not useful to print anything on breakpoints.
1622                  */
1623                 break;
1624         default:
1625                 break;
1626         }
1627
1628         return AE_OK;
1629 }
1630
1631 static int __init acpi_os_name_setup(char *str)
1632 {
1633         char *p = acpi_os_name;
1634         int count = ACPI_MAX_OVERRIDE_LEN - 1;
1635
1636         if (!str || !*str)
1637                 return 0;
1638
1639         for (; count-- && *str; str++) {
1640                 if (isalnum(*str) || *str == ' ' || *str == ':')
1641                         *p++ = *str;
1642                 else if (*str == '\'' || *str == '"')
1643                         continue;
1644                 else
1645                         break;
1646         }
1647         *p = 0;
1648
1649         return 1;
1650
1651 }
1652
1653 __setup("acpi_os_name=", acpi_os_name_setup);
1654
1655 #define OSI_STRING_LENGTH_MAX 64        /* arbitrary */
1656 #define OSI_STRING_ENTRIES_MAX 16       /* arbitrary */
1657
1658 struct osi_setup_entry {
1659         char string[OSI_STRING_LENGTH_MAX];
1660         bool enable;
1661 };
1662
1663 static struct osi_setup_entry
1664                 osi_setup_entries[OSI_STRING_ENTRIES_MAX] __initdata = {
1665         {"Module Device", true},
1666         {"Processor Device", true},
1667         {"3.0 _SCP Extensions", true},
1668         {"Processor Aggregator Device", true},
1669 };
1670
1671 void __init acpi_osi_setup(char *str)
1672 {
1673         struct osi_setup_entry *osi;
1674         bool enable = true;
1675         int i;
1676
1677         if (!acpi_gbl_create_osi_method)
1678                 return;
1679
1680         if (str == NULL || *str == '\0') {
1681                 printk(KERN_INFO PREFIX "_OSI method disabled\n");
1682                 acpi_gbl_create_osi_method = FALSE;
1683                 return;
1684         }
1685
1686         if (*str == '!') {
1687                 str++;
1688                 if (*str == '\0') {
1689                         osi_linux.default_disabling = 1;
1690                         return;
1691                 } else if (*str == '*') {
1692                         acpi_update_interfaces(ACPI_DISABLE_ALL_STRINGS);
1693                         for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) {
1694                                 osi = &osi_setup_entries[i];
1695                                 osi->enable = false;
1696                         }
1697                         return;
1698                 }
1699                 enable = false;
1700         }
1701
1702         for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) {
1703                 osi = &osi_setup_entries[i];
1704                 if (!strcmp(osi->string, str)) {
1705                         osi->enable = enable;
1706                         break;
1707                 } else if (osi->string[0] == '\0') {
1708                         osi->enable = enable;
1709                         strncpy(osi->string, str, OSI_STRING_LENGTH_MAX);
1710                         break;
1711                 }
1712         }
1713 }
1714
1715 static void __init set_osi_linux(unsigned int enable)
1716 {
1717         if (osi_linux.enable != enable)
1718                 osi_linux.enable = enable;
1719
1720         if (osi_linux.enable)
1721                 acpi_osi_setup("Linux");
1722         else
1723                 acpi_osi_setup("!Linux");
1724
1725         return;
1726 }
1727
1728 static void __init acpi_cmdline_osi_linux(unsigned int enable)
1729 {
1730         osi_linux.cmdline = 1;  /* cmdline set the default and override DMI */
1731         osi_linux.dmi = 0;
1732         set_osi_linux(enable);
1733
1734         return;
1735 }
1736
1737 void __init acpi_dmi_osi_linux(int enable, const struct dmi_system_id *d)
1738 {
1739         printk(KERN_NOTICE PREFIX "DMI detected: %s\n", d->ident);
1740
1741         if (enable == -1)
1742                 return;
1743
1744         osi_linux.dmi = 1;      /* DMI knows that this box asks OSI(Linux) */
1745         set_osi_linux(enable);
1746
1747         return;
1748 }
1749
1750 /*
1751  * Modify the list of "OS Interfaces" reported to BIOS via _OSI
1752  *
1753  * empty string disables _OSI
1754  * string starting with '!' disables that string
1755  * otherwise string is added to list, augmenting built-in strings
1756  */
1757 static void __init acpi_osi_setup_late(void)
1758 {
1759         struct osi_setup_entry *osi;
1760         char *str;
1761         int i;
1762         acpi_status status;
1763
1764         if (osi_linux.default_disabling) {
1765                 status = acpi_update_interfaces(ACPI_DISABLE_ALL_VENDOR_STRINGS);
1766
1767                 if (ACPI_SUCCESS(status))
1768                         printk(KERN_INFO PREFIX "Disabled all _OSI OS vendors\n");
1769         }
1770
1771         for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) {
1772                 osi = &osi_setup_entries[i];
1773                 str = osi->string;
1774
1775                 if (*str == '\0')
1776                         break;
1777                 if (osi->enable) {
1778                         status = acpi_install_interface(str);
1779
1780                         if (ACPI_SUCCESS(status))
1781                                 printk(KERN_INFO PREFIX "Added _OSI(%s)\n", str);
1782                 } else {
1783                         status = acpi_remove_interface(str);
1784
1785                         if (ACPI_SUCCESS(status))
1786                                 printk(KERN_INFO PREFIX "Deleted _OSI(%s)\n", str);
1787                 }
1788         }
1789 }
1790
1791 static int __init osi_setup(char *str)
1792 {
1793         if (str && !strcmp("Linux", str))
1794                 acpi_cmdline_osi_linux(1);
1795         else if (str && !strcmp("!Linux", str))
1796                 acpi_cmdline_osi_linux(0);
1797         else
1798                 acpi_osi_setup(str);
1799
1800         return 1;
1801 }
1802
1803 __setup("acpi_osi=", osi_setup);
1804
1805 /*
1806  * Disable the auto-serialization of named objects creation methods.
1807  *
1808  * This feature is enabled by default.  It marks the AML control methods
1809  * that contain the opcodes to create named objects as "Serialized".
1810  */
1811 static int __init acpi_no_auto_serialize_setup(char *str)
1812 {
1813         acpi_gbl_auto_serialize_methods = FALSE;
1814         pr_info("ACPI: auto-serialization disabled\n");
1815
1816         return 1;
1817 }
1818
1819 __setup("acpi_no_auto_serialize", acpi_no_auto_serialize_setup);
1820
1821 /* Check of resource interference between native drivers and ACPI
1822  * OperationRegions (SystemIO and System Memory only).
1823  * IO ports and memory declared in ACPI might be used by the ACPI subsystem
1824  * in arbitrary AML code and can interfere with legacy drivers.
1825  * acpi_enforce_resources= can be set to:
1826  *
1827  *   - strict (default) (2)
1828  *     -> further driver trying to access the resources will not load
1829  *   - lax              (1)
1830  *     -> further driver trying to access the resources will load, but you
1831  *     get a system message that something might go wrong...
1832  *
1833  *   - no               (0)
1834  *     -> ACPI Operation Region resources will not be registered
1835  *
1836  */
1837 #define ENFORCE_RESOURCES_STRICT 2
1838 #define ENFORCE_RESOURCES_LAX    1
1839 #define ENFORCE_RESOURCES_NO     0
1840
1841 static unsigned int acpi_enforce_resources = ENFORCE_RESOURCES_STRICT;
1842
1843 static int __init acpi_enforce_resources_setup(char *str)
1844 {
1845         if (str == NULL || *str == '\0')
1846                 return 0;
1847
1848         if (!strcmp("strict", str))
1849                 acpi_enforce_resources = ENFORCE_RESOURCES_STRICT;
1850         else if (!strcmp("lax", str))
1851                 acpi_enforce_resources = ENFORCE_RESOURCES_LAX;
1852         else if (!strcmp("no", str))
1853                 acpi_enforce_resources = ENFORCE_RESOURCES_NO;
1854
1855         return 1;
1856 }
1857
1858 __setup("acpi_enforce_resources=", acpi_enforce_resources_setup);
1859
1860 /* Check for resource conflicts between ACPI OperationRegions and native
1861  * drivers */
1862 int acpi_check_resource_conflict(const struct resource *res)
1863 {
1864         acpi_adr_space_type space_id;
1865         acpi_size length;
1866         u8 warn = 0;
1867         int clash = 0;
1868
1869         if (acpi_enforce_resources == ENFORCE_RESOURCES_NO)
1870                 return 0;
1871         if (!(res->flags & IORESOURCE_IO) && !(res->flags & IORESOURCE_MEM))
1872                 return 0;
1873
1874         if (res->flags & IORESOURCE_IO)
1875                 space_id = ACPI_ADR_SPACE_SYSTEM_IO;
1876         else
1877                 space_id = ACPI_ADR_SPACE_SYSTEM_MEMORY;
1878
1879         length = resource_size(res);
1880         if (acpi_enforce_resources != ENFORCE_RESOURCES_NO)
1881                 warn = 1;
1882         clash = acpi_check_address_range(space_id, res->start, length, warn);
1883
1884         if (clash) {
1885                 if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) {
1886                         if (acpi_enforce_resources == ENFORCE_RESOURCES_LAX)
1887                                 printk(KERN_NOTICE "ACPI: This conflict may"
1888                                        " cause random problems and system"
1889                                        " instability\n");
1890                         printk(KERN_INFO "ACPI: If an ACPI driver is available"
1891                                " for this device, you should use it instead of"
1892                                " the native driver\n");
1893                 }
1894                 if (acpi_enforce_resources == ENFORCE_RESOURCES_STRICT)
1895                         return -EBUSY;
1896         }
1897         return 0;
1898 }
1899 EXPORT_SYMBOL(acpi_check_resource_conflict);
1900
1901 int acpi_check_region(resource_size_t start, resource_size_t n,
1902                       const char *name)
1903 {
1904         struct resource res = {
1905                 .start = start,
1906                 .end   = start + n - 1,
1907                 .name  = name,
1908                 .flags = IORESOURCE_IO,
1909         };
1910
1911         return acpi_check_resource_conflict(&res);
1912 }
1913 EXPORT_SYMBOL(acpi_check_region);
1914
1915 /*
1916  * Let drivers know whether the resource checks are effective
1917  */
1918 int acpi_resources_are_enforced(void)
1919 {
1920         return acpi_enforce_resources == ENFORCE_RESOURCES_STRICT;
1921 }
1922 EXPORT_SYMBOL(acpi_resources_are_enforced);
1923
1924 bool acpi_osi_is_win8(void)
1925 {
1926         return acpi_gbl_osi_data >= ACPI_OSI_WIN_8;
1927 }
1928 EXPORT_SYMBOL(acpi_osi_is_win8);
1929
1930 /*
1931  * Deallocate the memory for a spinlock.
1932  */
1933 void acpi_os_delete_lock(acpi_spinlock handle)
1934 {
1935         ACPI_FREE(handle);
1936 }
1937
1938 /*
1939  * Acquire a spinlock.
1940  *
1941  * handle is a pointer to the spinlock_t.
1942  */
1943
1944 acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock lockp)
1945 {
1946         acpi_cpu_flags flags;
1947         spin_lock_irqsave(lockp, flags);
1948         return flags;
1949 }
1950
1951 /*
1952  * Release a spinlock. See above.
1953  */
1954
1955 void acpi_os_release_lock(acpi_spinlock lockp, acpi_cpu_flags flags)
1956 {
1957         spin_unlock_irqrestore(lockp, flags);
1958 }
1959
1960 #ifndef ACPI_USE_LOCAL_CACHE
1961
1962 /*******************************************************************************
1963  *
1964  * FUNCTION:    acpi_os_create_cache
1965  *
1966  * PARAMETERS:  name      - Ascii name for the cache
1967  *              size      - Size of each cached object
1968  *              depth     - Maximum depth of the cache (in objects) <ignored>
1969  *              cache     - Where the new cache object is returned
1970  *
1971  * RETURN:      status
1972  *
1973  * DESCRIPTION: Create a cache object
1974  *
1975  ******************************************************************************/
1976
1977 acpi_status
1978 acpi_os_create_cache(char *name, u16 size, u16 depth, acpi_cache_t ** cache)
1979 {
1980         *cache = kmem_cache_create(name, size, 0, 0, NULL);
1981         if (*cache == NULL)
1982                 return AE_ERROR;
1983         else
1984                 return AE_OK;
1985 }
1986
1987 /*******************************************************************************
1988  *
1989  * FUNCTION:    acpi_os_purge_cache
1990  *
1991  * PARAMETERS:  Cache           - Handle to cache object
1992  *
1993  * RETURN:      Status
1994  *
1995  * DESCRIPTION: Free all objects within the requested cache.
1996  *
1997  ******************************************************************************/
1998
1999 acpi_status acpi_os_purge_cache(acpi_cache_t * cache)
2000 {
2001         kmem_cache_shrink(cache);
2002         return (AE_OK);
2003 }
2004
2005 /*******************************************************************************
2006  *
2007  * FUNCTION:    acpi_os_delete_cache
2008  *
2009  * PARAMETERS:  Cache           - Handle to cache object
2010  *
2011  * RETURN:      Status
2012  *
2013  * DESCRIPTION: Free all objects within the requested cache and delete the
2014  *              cache object.
2015  *
2016  ******************************************************************************/
2017
2018 acpi_status acpi_os_delete_cache(acpi_cache_t * cache)
2019 {
2020         kmem_cache_destroy(cache);
2021         return (AE_OK);
2022 }
2023
2024 /*******************************************************************************
2025  *
2026  * FUNCTION:    acpi_os_release_object
2027  *
2028  * PARAMETERS:  Cache       - Handle to cache object
2029  *              Object      - The object to be released
2030  *
2031  * RETURN:      None
2032  *
2033  * DESCRIPTION: Release an object to the specified cache.  If cache is full,
2034  *              the object is deleted.
2035  *
2036  ******************************************************************************/
2037
2038 acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object)
2039 {
2040         kmem_cache_free(cache, object);
2041         return (AE_OK);
2042 }
2043 #endif
2044
2045 static int __init acpi_no_static_ssdt_setup(char *s)
2046 {
2047         acpi_gbl_disable_ssdt_table_install = TRUE;
2048         pr_info("ACPI: static SSDT installation disabled\n");
2049
2050         return 0;
2051 }
2052
2053 early_param("acpi_no_static_ssdt", acpi_no_static_ssdt_setup);
2054
2055 static int __init acpi_disable_return_repair(char *s)
2056 {
2057         printk(KERN_NOTICE PREFIX
2058                "ACPI: Predefined validation mechanism disabled\n");
2059         acpi_gbl_disable_auto_repair = TRUE;
2060
2061         return 1;
2062 }
2063
2064 __setup("acpica_no_return_repair", acpi_disable_return_repair);
2065
2066 acpi_status __init acpi_os_initialize(void)
2067 {
2068         acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1a_event_block);
2069         acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1b_event_block);
2070         acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe0_block);
2071         acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe1_block);
2072         if (acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER) {
2073                 /*
2074                  * Use acpi_os_map_generic_address to pre-map the reset
2075                  * register if it's in system memory.
2076                  */
2077                 int rv;
2078
2079                 rv = acpi_os_map_generic_address(&acpi_gbl_FADT.reset_register);
2080                 pr_debug(PREFIX "%s: map reset_reg status %d\n", __func__, rv);
2081         }
2082         acpi_os_initialized = true;
2083
2084         return AE_OK;
2085 }
2086
2087 acpi_status __init acpi_os_initialize1(void)
2088 {
2089         kacpid_wq = alloc_workqueue("kacpid", 0, 1);
2090         kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 1);
2091         kacpi_hotplug_wq = alloc_ordered_workqueue("kacpi_hotplug", 0);
2092         BUG_ON(!kacpid_wq);
2093         BUG_ON(!kacpi_notify_wq);
2094         BUG_ON(!kacpi_hotplug_wq);
2095         acpi_install_interface_handler(acpi_osi_handler);
2096         acpi_osi_setup_late();
2097         return AE_OK;
2098 }
2099
2100 acpi_status acpi_os_terminate(void)
2101 {
2102         if (acpi_irq_handler) {
2103                 acpi_os_remove_interrupt_handler(acpi_gbl_FADT.sci_interrupt,
2104                                                  acpi_irq_handler);
2105         }
2106
2107         acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe1_block);
2108         acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe0_block);
2109         acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1b_event_block);
2110         acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1a_event_block);
2111         if (acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER)
2112                 acpi_os_unmap_generic_address(&acpi_gbl_FADT.reset_register);
2113
2114         destroy_workqueue(kacpid_wq);
2115         destroy_workqueue(kacpi_notify_wq);
2116         destroy_workqueue(kacpi_hotplug_wq);
2117
2118         return AE_OK;
2119 }
2120
2121 acpi_status acpi_os_prepare_sleep(u8 sleep_state, u32 pm1a_control,
2122                                   u32 pm1b_control)
2123 {
2124         int rc = 0;
2125         if (__acpi_os_prepare_sleep)
2126                 rc = __acpi_os_prepare_sleep(sleep_state,
2127                                              pm1a_control, pm1b_control);
2128         if (rc < 0)
2129                 return AE_ERROR;
2130         else if (rc > 0)
2131                 return AE_CTRL_SKIP;
2132
2133         return AE_OK;
2134 }
2135
2136 void acpi_os_set_prepare_sleep(int (*func)(u8 sleep_state,
2137                                u32 pm1a_ctrl, u32 pm1b_ctrl))
2138 {
2139         __acpi_os_prepare_sleep = func;
2140 }
2141
2142 acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state, u32 val_a,
2143                                   u32 val_b)
2144 {
2145         int rc = 0;
2146         if (__acpi_os_prepare_extended_sleep)
2147                 rc = __acpi_os_prepare_extended_sleep(sleep_state,
2148                                              val_a, val_b);
2149         if (rc < 0)
2150                 return AE_ERROR;
2151         else if (rc > 0)
2152                 return AE_CTRL_SKIP;
2153
2154         return AE_OK;
2155 }
2156
2157 void acpi_os_set_prepare_extended_sleep(int (*func)(u8 sleep_state,
2158                                u32 val_a, u32 val_b))
2159 {
2160         __acpi_os_prepare_extended_sleep = func;
2161 }