edb83151041f33064a456d1ca9b1e92beb14de98
[cascardo/linux.git] / drivers / gpu / drm / ttm / ttm_page_alloc.c
1 /*
2  * Copyright (c) Red Hat Inc.
3
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sub license,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the
12  * next paragraph) shall be included in all copies or substantial portions
13  * of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors: Dave Airlie <airlied@redhat.com>
24  *          Jerome Glisse <jglisse@redhat.com>
25  *          Pauli Nieminen <suokkos@gmail.com>
26  */
27
28 /* simple list based uncached page pool
29  * - Pool collects resently freed pages for reuse
30  * - Use page->lru to keep a free list
31  * - doesn't track currently in use pages
32  */
33
34 #define pr_fmt(fmt) "[TTM] " fmt
35
36 #include <linux/list.h>
37 #include <linux/spinlock.h>
38 #include <linux/highmem.h>
39 #include <linux/mm_types.h>
40 #include <linux/module.h>
41 #include <linux/mm.h>
42 #include <linux/seq_file.h> /* for seq_printf */
43 #include <linux/slab.h>
44 #include <linux/dma-mapping.h>
45
46 #include <linux/atomic.h>
47
48 #include <drm/ttm/ttm_bo_driver.h>
49 #include <drm/ttm/ttm_page_alloc.h>
50
51 #ifdef TTM_HAS_AGP
52 #include <asm/agp.h>
53 #endif
54
55 #define NUM_PAGES_TO_ALLOC              (PAGE_SIZE/sizeof(struct page *))
56 #define SMALL_ALLOCATION                16
57 #define FREE_ALL_PAGES                  (~0U)
58 /* times are in msecs */
59 #define PAGE_FREE_INTERVAL              1000
60
61 /**
62  * struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages.
63  *
64  * @lock: Protects the shared pool from concurrnet access. Must be used with
65  * irqsave/irqrestore variants because pool allocator maybe called from
66  * delayed work.
67  * @fill_lock: Prevent concurrent calls to fill.
68  * @list: Pool of free uc/wc pages for fast reuse.
69  * @gfp_flags: Flags to pass for alloc_page.
70  * @npages: Number of pages in pool.
71  */
72 struct ttm_page_pool {
73         spinlock_t              lock;
74         bool                    fill_lock;
75         struct list_head        list;
76         gfp_t                   gfp_flags;
77         unsigned                npages;
78         char                    *name;
79         unsigned long           nfrees;
80         unsigned long           nrefills;
81 };
82
83 /**
84  * Limits for the pool. They are handled without locks because only place where
85  * they may change is in sysfs store. They won't have immediate effect anyway
86  * so forcing serialization to access them is pointless.
87  */
88
89 struct ttm_pool_opts {
90         unsigned        alloc_size;
91         unsigned        max_size;
92         unsigned        small;
93 };
94
95 #define NUM_POOLS 4
96
97 /**
98  * struct ttm_pool_manager - Holds memory pools for fst allocation
99  *
100  * Manager is read only object for pool code so it doesn't need locking.
101  *
102  * @free_interval: minimum number of jiffies between freeing pages from pool.
103  * @page_alloc_inited: reference counting for pool allocation.
104  * @work: Work that is used to shrink the pool. Work is only run when there is
105  * some pages to free.
106  * @small_allocation: Limit in number of pages what is small allocation.
107  *
108  * @pools: All pool objects in use.
109  **/
110 struct ttm_pool_manager {
111         struct kobject          kobj;
112         struct shrinker         mm_shrink;
113         struct ttm_pool_opts    options;
114
115         union {
116                 struct ttm_page_pool    pools[NUM_POOLS];
117                 struct {
118                         struct ttm_page_pool    wc_pool;
119                         struct ttm_page_pool    uc_pool;
120                         struct ttm_page_pool    wc_pool_dma32;
121                         struct ttm_page_pool    uc_pool_dma32;
122                 } ;
123         };
124 };
125
126 static struct attribute ttm_page_pool_max = {
127         .name = "pool_max_size",
128         .mode = S_IRUGO | S_IWUSR
129 };
130 static struct attribute ttm_page_pool_small = {
131         .name = "pool_small_allocation",
132         .mode = S_IRUGO | S_IWUSR
133 };
134 static struct attribute ttm_page_pool_alloc_size = {
135         .name = "pool_allocation_size",
136         .mode = S_IRUGO | S_IWUSR
137 };
138
139 static struct attribute *ttm_pool_attrs[] = {
140         &ttm_page_pool_max,
141         &ttm_page_pool_small,
142         &ttm_page_pool_alloc_size,
143         NULL
144 };
145
146 static void ttm_pool_kobj_release(struct kobject *kobj)
147 {
148         struct ttm_pool_manager *m =
149                 container_of(kobj, struct ttm_pool_manager, kobj);
150         kfree(m);
151 }
152
153 static ssize_t ttm_pool_store(struct kobject *kobj,
154                 struct attribute *attr, const char *buffer, size_t size)
155 {
156         struct ttm_pool_manager *m =
157                 container_of(kobj, struct ttm_pool_manager, kobj);
158         int chars;
159         unsigned val;
160         chars = sscanf(buffer, "%u", &val);
161         if (chars == 0)
162                 return size;
163
164         /* Convert kb to number of pages */
165         val = val / (PAGE_SIZE >> 10);
166
167         if (attr == &ttm_page_pool_max)
168                 m->options.max_size = val;
169         else if (attr == &ttm_page_pool_small)
170                 m->options.small = val;
171         else if (attr == &ttm_page_pool_alloc_size) {
172                 if (val > NUM_PAGES_TO_ALLOC*8) {
173                         pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n",
174                                NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
175                                NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
176                         return size;
177                 } else if (val > NUM_PAGES_TO_ALLOC) {
178                         pr_warn("Setting allocation size to larger than %lu is not recommended\n",
179                                 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
180                 }
181                 m->options.alloc_size = val;
182         }
183
184         return size;
185 }
186
187 static ssize_t ttm_pool_show(struct kobject *kobj,
188                 struct attribute *attr, char *buffer)
189 {
190         struct ttm_pool_manager *m =
191                 container_of(kobj, struct ttm_pool_manager, kobj);
192         unsigned val = 0;
193
194         if (attr == &ttm_page_pool_max)
195                 val = m->options.max_size;
196         else if (attr == &ttm_page_pool_small)
197                 val = m->options.small;
198         else if (attr == &ttm_page_pool_alloc_size)
199                 val = m->options.alloc_size;
200
201         val = val * (PAGE_SIZE >> 10);
202
203         return snprintf(buffer, PAGE_SIZE, "%u\n", val);
204 }
205
206 static const struct sysfs_ops ttm_pool_sysfs_ops = {
207         .show = &ttm_pool_show,
208         .store = &ttm_pool_store,
209 };
210
211 static struct kobj_type ttm_pool_kobj_type = {
212         .release = &ttm_pool_kobj_release,
213         .sysfs_ops = &ttm_pool_sysfs_ops,
214         .default_attrs = ttm_pool_attrs,
215 };
216
217 static struct ttm_pool_manager *_manager;
218
219 #ifndef CONFIG_X86
220 static int set_pages_array_wb(struct page **pages, int addrinarray)
221 {
222 #ifdef TTM_HAS_AGP
223         int i;
224
225         for (i = 0; i < addrinarray; i++)
226                 unmap_page_from_agp(pages[i]);
227 #endif
228         return 0;
229 }
230
231 static int set_pages_array_wc(struct page **pages, int addrinarray)
232 {
233 #ifdef TTM_HAS_AGP
234         int i;
235
236         for (i = 0; i < addrinarray; i++)
237                 map_page_into_agp(pages[i]);
238 #endif
239         return 0;
240 }
241
242 static int set_pages_array_uc(struct page **pages, int addrinarray)
243 {
244 #ifdef TTM_HAS_AGP
245         int i;
246
247         for (i = 0; i < addrinarray; i++)
248                 map_page_into_agp(pages[i]);
249 #endif
250         return 0;
251 }
252 #endif
253
254 /**
255  * Select the right pool or requested caching state and ttm flags. */
256 static struct ttm_page_pool *ttm_get_pool(int flags,
257                 enum ttm_caching_state cstate)
258 {
259         int pool_index;
260
261         if (cstate == tt_cached)
262                 return NULL;
263
264         if (cstate == tt_wc)
265                 pool_index = 0x0;
266         else
267                 pool_index = 0x1;
268
269         if (flags & TTM_PAGE_FLAG_DMA32)
270                 pool_index |= 0x2;
271
272         return &_manager->pools[pool_index];
273 }
274
275 /* set memory back to wb and free the pages. */
276 static void ttm_pages_put(struct page *pages[], unsigned npages)
277 {
278         unsigned i;
279         if (set_pages_array_wb(pages, npages))
280                 pr_err("Failed to set %d pages to wb!\n", npages);
281         for (i = 0; i < npages; ++i)
282                 __free_page(pages[i]);
283 }
284
285 static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
286                 unsigned freed_pages)
287 {
288         pool->npages -= freed_pages;
289         pool->nfrees += freed_pages;
290 }
291
292 /**
293  * Free pages from pool.
294  *
295  * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
296  * number of pages in one go.
297  *
298  * @pool: to free the pages from
299  * @free_all: If set to true will free all pages in pool
300  **/
301 static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
302 {
303         unsigned long irq_flags;
304         struct page *p;
305         struct page **pages_to_free;
306         unsigned freed_pages = 0,
307                  npages_to_free = nr_free;
308
309         if (NUM_PAGES_TO_ALLOC < nr_free)
310                 npages_to_free = NUM_PAGES_TO_ALLOC;
311
312         pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
313                         GFP_KERNEL);
314         if (!pages_to_free) {
315                 pr_err("Failed to allocate memory for pool free operation\n");
316                 return 0;
317         }
318
319 restart:
320         spin_lock_irqsave(&pool->lock, irq_flags);
321
322         list_for_each_entry_reverse(p, &pool->list, lru) {
323                 if (freed_pages >= npages_to_free)
324                         break;
325
326                 pages_to_free[freed_pages++] = p;
327                 /* We can only remove NUM_PAGES_TO_ALLOC at a time. */
328                 if (freed_pages >= NUM_PAGES_TO_ALLOC) {
329                         /* remove range of pages from the pool */
330                         __list_del(p->lru.prev, &pool->list);
331
332                         ttm_pool_update_free_locked(pool, freed_pages);
333                         /**
334                          * Because changing page caching is costly
335                          * we unlock the pool to prevent stalling.
336                          */
337                         spin_unlock_irqrestore(&pool->lock, irq_flags);
338
339                         ttm_pages_put(pages_to_free, freed_pages);
340                         if (likely(nr_free != FREE_ALL_PAGES))
341                                 nr_free -= freed_pages;
342
343                         if (NUM_PAGES_TO_ALLOC >= nr_free)
344                                 npages_to_free = nr_free;
345                         else
346                                 npages_to_free = NUM_PAGES_TO_ALLOC;
347
348                         freed_pages = 0;
349
350                         /* free all so restart the processing */
351                         if (nr_free)
352                                 goto restart;
353
354                         /* Not allowed to fall through or break because
355                          * following context is inside spinlock while we are
356                          * outside here.
357                          */
358                         goto out;
359
360                 }
361         }
362
363         /* remove range of pages from the pool */
364         if (freed_pages) {
365                 __list_del(&p->lru, &pool->list);
366
367                 ttm_pool_update_free_locked(pool, freed_pages);
368                 nr_free -= freed_pages;
369         }
370
371         spin_unlock_irqrestore(&pool->lock, irq_flags);
372
373         if (freed_pages)
374                 ttm_pages_put(pages_to_free, freed_pages);
375 out:
376         kfree(pages_to_free);
377         return nr_free;
378 }
379
380 /**
381  * Callback for mm to request pool to reduce number of page held.
382  *
383  * XXX: (dchinner) Deadlock warning!
384  *
385  * ttm_page_pool_free() does memory allocation using GFP_KERNEL.  that means
386  * this can deadlock when called a sc->gfp_mask that is not equal to
387  * GFP_KERNEL.
388  *
389  * This code is crying out for a shrinker per pool....
390  */
391 static unsigned long
392 ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
393 {
394         static DEFINE_MUTEX(lock);
395         static unsigned start_pool;
396         unsigned i;
397         unsigned pool_offset;
398         struct ttm_page_pool *pool;
399         int shrink_pages = sc->nr_to_scan;
400         unsigned long freed = 0;
401
402         if (!mutex_trylock(&lock))
403                 return SHRINK_STOP;
404         pool_offset = ++start_pool % NUM_POOLS;
405         /* select start pool in round robin fashion */
406         for (i = 0; i < NUM_POOLS; ++i) {
407                 unsigned nr_free = shrink_pages;
408                 if (shrink_pages == 0)
409                         break;
410                 pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
411                 shrink_pages = ttm_page_pool_free(pool, nr_free);
412                 freed += nr_free - shrink_pages;
413         }
414         mutex_unlock(&lock);
415         return freed;
416 }
417
418
419 static unsigned long
420 ttm_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
421 {
422         unsigned i;
423         unsigned long count = 0;
424
425         for (i = 0; i < NUM_POOLS; ++i)
426                 count += _manager->pools[i].npages;
427
428         return count;
429 }
430
431 static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager)
432 {
433         manager->mm_shrink.count_objects = ttm_pool_shrink_count;
434         manager->mm_shrink.scan_objects = ttm_pool_shrink_scan;
435         manager->mm_shrink.seeks = 1;
436         register_shrinker(&manager->mm_shrink);
437 }
438
439 static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
440 {
441         unregister_shrinker(&manager->mm_shrink);
442 }
443
444 static int ttm_set_pages_caching(struct page **pages,
445                 enum ttm_caching_state cstate, unsigned cpages)
446 {
447         int r = 0;
448         /* Set page caching */
449         switch (cstate) {
450         case tt_uncached:
451                 r = set_pages_array_uc(pages, cpages);
452                 if (r)
453                         pr_err("Failed to set %d pages to uc!\n", cpages);
454                 break;
455         case tt_wc:
456                 r = set_pages_array_wc(pages, cpages);
457                 if (r)
458                         pr_err("Failed to set %d pages to wc!\n", cpages);
459                 break;
460         default:
461                 break;
462         }
463         return r;
464 }
465
466 /**
467  * Free pages the pages that failed to change the caching state. If there is
468  * any pages that have changed their caching state already put them to the
469  * pool.
470  */
471 static void ttm_handle_caching_state_failure(struct list_head *pages,
472                 int ttm_flags, enum ttm_caching_state cstate,
473                 struct page **failed_pages, unsigned cpages)
474 {
475         unsigned i;
476         /* Failed pages have to be freed */
477         for (i = 0; i < cpages; ++i) {
478                 list_del(&failed_pages[i]->lru);
479                 __free_page(failed_pages[i]);
480         }
481 }
482
483 /**
484  * Allocate new pages with correct caching.
485  *
486  * This function is reentrant if caller updates count depending on number of
487  * pages returned in pages array.
488  */
489 static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
490                 int ttm_flags, enum ttm_caching_state cstate, unsigned count)
491 {
492         struct page **caching_array;
493         struct page *p;
494         int r = 0;
495         unsigned i, cpages;
496         unsigned max_cpages = min(count,
497                         (unsigned)(PAGE_SIZE/sizeof(struct page *)));
498
499         /* allocate array for page caching change */
500         caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
501
502         if (!caching_array) {
503                 pr_err("Unable to allocate table for new pages\n");
504                 return -ENOMEM;
505         }
506
507         for (i = 0, cpages = 0; i < count; ++i) {
508                 p = alloc_page(gfp_flags);
509
510                 if (!p) {
511                         pr_err("Unable to get page %u\n", i);
512
513                         /* store already allocated pages in the pool after
514                          * setting the caching state */
515                         if (cpages) {
516                                 r = ttm_set_pages_caching(caching_array,
517                                                           cstate, cpages);
518                                 if (r)
519                                         ttm_handle_caching_state_failure(pages,
520                                                 ttm_flags, cstate,
521                                                 caching_array, cpages);
522                         }
523                         r = -ENOMEM;
524                         goto out;
525                 }
526
527 #ifdef CONFIG_HIGHMEM
528                 /* gfp flags of highmem page should never be dma32 so we
529                  * we should be fine in such case
530                  */
531                 if (!PageHighMem(p))
532 #endif
533                 {
534                         caching_array[cpages++] = p;
535                         if (cpages == max_cpages) {
536
537                                 r = ttm_set_pages_caching(caching_array,
538                                                 cstate, cpages);
539                                 if (r) {
540                                         ttm_handle_caching_state_failure(pages,
541                                                 ttm_flags, cstate,
542                                                 caching_array, cpages);
543                                         goto out;
544                                 }
545                                 cpages = 0;
546                         }
547                 }
548
549                 list_add(&p->lru, pages);
550         }
551
552         if (cpages) {
553                 r = ttm_set_pages_caching(caching_array, cstate, cpages);
554                 if (r)
555                         ttm_handle_caching_state_failure(pages,
556                                         ttm_flags, cstate,
557                                         caching_array, cpages);
558         }
559 out:
560         kfree(caching_array);
561
562         return r;
563 }
564
565 /**
566  * Fill the given pool if there aren't enough pages and the requested number of
567  * pages is small.
568  */
569 static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
570                 int ttm_flags, enum ttm_caching_state cstate, unsigned count,
571                 unsigned long *irq_flags)
572 {
573         struct page *p;
574         int r;
575         unsigned cpages = 0;
576         /**
577          * Only allow one pool fill operation at a time.
578          * If pool doesn't have enough pages for the allocation new pages are
579          * allocated from outside of pool.
580          */
581         if (pool->fill_lock)
582                 return;
583
584         pool->fill_lock = true;
585
586         /* If allocation request is small and there are not enough
587          * pages in a pool we fill the pool up first. */
588         if (count < _manager->options.small
589                 && count > pool->npages) {
590                 struct list_head new_pages;
591                 unsigned alloc_size = _manager->options.alloc_size;
592
593                 /**
594                  * Can't change page caching if in irqsave context. We have to
595                  * drop the pool->lock.
596                  */
597                 spin_unlock_irqrestore(&pool->lock, *irq_flags);
598
599                 INIT_LIST_HEAD(&new_pages);
600                 r = ttm_alloc_new_pages(&new_pages, pool->gfp_flags, ttm_flags,
601                                 cstate, alloc_size);
602                 spin_lock_irqsave(&pool->lock, *irq_flags);
603
604                 if (!r) {
605                         list_splice(&new_pages, &pool->list);
606                         ++pool->nrefills;
607                         pool->npages += alloc_size;
608                 } else {
609                         pr_err("Failed to fill pool (%p)\n", pool);
610                         /* If we have any pages left put them to the pool. */
611                         list_for_each_entry(p, &pool->list, lru) {
612                                 ++cpages;
613                         }
614                         list_splice(&new_pages, &pool->list);
615                         pool->npages += cpages;
616                 }
617
618         }
619         pool->fill_lock = false;
620 }
621
622 /**
623  * Cut 'count' number of pages from the pool and put them on the return list.
624  *
625  * @return count of pages still required to fulfill the request.
626  */
627 static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
628                                         struct list_head *pages,
629                                         int ttm_flags,
630                                         enum ttm_caching_state cstate,
631                                         unsigned count)
632 {
633         unsigned long irq_flags;
634         struct list_head *p;
635         unsigned i;
636
637         spin_lock_irqsave(&pool->lock, irq_flags);
638         ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count, &irq_flags);
639
640         if (count >= pool->npages) {
641                 /* take all pages from the pool */
642                 list_splice_init(&pool->list, pages);
643                 count -= pool->npages;
644                 pool->npages = 0;
645                 goto out;
646         }
647         /* find the last pages to include for requested number of pages. Split
648          * pool to begin and halve it to reduce search space. */
649         if (count <= pool->npages/2) {
650                 i = 0;
651                 list_for_each(p, &pool->list) {
652                         if (++i == count)
653                                 break;
654                 }
655         } else {
656                 i = pool->npages + 1;
657                 list_for_each_prev(p, &pool->list) {
658                         if (--i == count)
659                                 break;
660                 }
661         }
662         /* Cut 'count' number of pages from the pool */
663         list_cut_position(pages, &pool->list, p);
664         pool->npages -= count;
665         count = 0;
666 out:
667         spin_unlock_irqrestore(&pool->lock, irq_flags);
668         return count;
669 }
670
671 /* Put all pages in pages list to correct pool to wait for reuse */
672 static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
673                           enum ttm_caching_state cstate)
674 {
675         unsigned long irq_flags;
676         struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
677         unsigned i;
678
679         if (pool == NULL) {
680                 /* No pool for this memory type so free the pages */
681                 for (i = 0; i < npages; i++) {
682                         if (pages[i]) {
683                                 if (page_count(pages[i]) != 1)
684                                         pr_err("Erroneous page count. Leaking pages.\n");
685                                 __free_page(pages[i]);
686                                 pages[i] = NULL;
687                         }
688                 }
689                 return;
690         }
691
692         spin_lock_irqsave(&pool->lock, irq_flags);
693         for (i = 0; i < npages; i++) {
694                 if (pages[i]) {
695                         if (page_count(pages[i]) != 1)
696                                 pr_err("Erroneous page count. Leaking pages.\n");
697                         list_add_tail(&pages[i]->lru, &pool->list);
698                         pages[i] = NULL;
699                         pool->npages++;
700                 }
701         }
702         /* Check that we don't go over the pool limit */
703         npages = 0;
704         if (pool->npages > _manager->options.max_size) {
705                 npages = pool->npages - _manager->options.max_size;
706                 /* free at least NUM_PAGES_TO_ALLOC number of pages
707                  * to reduce calls to set_memory_wb */
708                 if (npages < NUM_PAGES_TO_ALLOC)
709                         npages = NUM_PAGES_TO_ALLOC;
710         }
711         spin_unlock_irqrestore(&pool->lock, irq_flags);
712         if (npages)
713                 ttm_page_pool_free(pool, npages);
714 }
715
716 /*
717  * On success pages list will hold count number of correctly
718  * cached pages.
719  */
720 static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
721                          enum ttm_caching_state cstate)
722 {
723         struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
724         struct list_head plist;
725         struct page *p = NULL;
726         gfp_t gfp_flags = GFP_USER;
727         unsigned count;
728         int r;
729
730         /* set zero flag for page allocation if required */
731         if (flags & TTM_PAGE_FLAG_ZERO_ALLOC)
732                 gfp_flags |= __GFP_ZERO;
733
734         /* No pool for cached pages */
735         if (pool == NULL) {
736                 if (flags & TTM_PAGE_FLAG_DMA32)
737                         gfp_flags |= GFP_DMA32;
738                 else
739                         gfp_flags |= GFP_HIGHUSER;
740
741                 for (r = 0; r < npages; ++r) {
742                         p = alloc_page(gfp_flags);
743                         if (!p) {
744
745                                 pr_err("Unable to allocate page\n");
746                                 return -ENOMEM;
747                         }
748
749                         pages[r] = p;
750                 }
751                 return 0;
752         }
753
754         /* combine zero flag to pool flags */
755         gfp_flags |= pool->gfp_flags;
756
757         /* First we take pages from the pool */
758         INIT_LIST_HEAD(&plist);
759         npages = ttm_page_pool_get_pages(pool, &plist, flags, cstate, npages);
760         count = 0;
761         list_for_each_entry(p, &plist, lru) {
762                 pages[count++] = p;
763         }
764
765         /* clear the pages coming from the pool if requested */
766         if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
767                 list_for_each_entry(p, &plist, lru) {
768                         if (PageHighMem(p))
769                                 clear_highpage(p);
770                         else
771                                 clear_page(page_address(p));
772                 }
773         }
774
775         /* If pool didn't have enough pages allocate new one. */
776         if (npages > 0) {
777                 /* ttm_alloc_new_pages doesn't reference pool so we can run
778                  * multiple requests in parallel.
779                  **/
780                 INIT_LIST_HEAD(&plist);
781                 r = ttm_alloc_new_pages(&plist, gfp_flags, flags, cstate, npages);
782                 list_for_each_entry(p, &plist, lru) {
783                         pages[count++] = p;
784                 }
785                 if (r) {
786                         /* If there is any pages in the list put them back to
787                          * the pool. */
788                         pr_err("Failed to allocate extra pages for large request\n");
789                         ttm_put_pages(pages, count, flags, cstate);
790                         return r;
791                 }
792         }
793
794         return 0;
795 }
796
797 static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, gfp_t flags,
798                 char *name)
799 {
800         spin_lock_init(&pool->lock);
801         pool->fill_lock = false;
802         INIT_LIST_HEAD(&pool->list);
803         pool->npages = pool->nfrees = 0;
804         pool->gfp_flags = flags;
805         pool->name = name;
806 }
807
808 int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
809 {
810         int ret;
811
812         WARN_ON(_manager);
813
814         pr_info("Initializing pool allocator\n");
815
816         _manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
817
818         ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc");
819
820         ttm_page_pool_init_locked(&_manager->uc_pool, GFP_HIGHUSER, "uc");
821
822         ttm_page_pool_init_locked(&_manager->wc_pool_dma32,
823                                   GFP_USER | GFP_DMA32, "wc dma");
824
825         ttm_page_pool_init_locked(&_manager->uc_pool_dma32,
826                                   GFP_USER | GFP_DMA32, "uc dma");
827
828         _manager->options.max_size = max_pages;
829         _manager->options.small = SMALL_ALLOCATION;
830         _manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
831
832         ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type,
833                                    &glob->kobj, "pool");
834         if (unlikely(ret != 0)) {
835                 kobject_put(&_manager->kobj);
836                 _manager = NULL;
837                 return ret;
838         }
839
840         ttm_pool_mm_shrink_init(_manager);
841
842         return 0;
843 }
844
845 void ttm_page_alloc_fini(void)
846 {
847         int i;
848
849         pr_info("Finalizing pool allocator\n");
850         ttm_pool_mm_shrink_fini(_manager);
851
852         for (i = 0; i < NUM_POOLS; ++i)
853                 ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES);
854
855         kobject_put(&_manager->kobj);
856         _manager = NULL;
857 }
858
859 int ttm_pool_populate(struct ttm_tt *ttm)
860 {
861         struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
862         unsigned i;
863         int ret;
864
865         if (ttm->state != tt_unpopulated)
866                 return 0;
867
868         for (i = 0; i < ttm->num_pages; ++i) {
869                 ret = ttm_get_pages(&ttm->pages[i], 1,
870                                     ttm->page_flags,
871                                     ttm->caching_state);
872                 if (ret != 0) {
873                         ttm_pool_unpopulate(ttm);
874                         return -ENOMEM;
875                 }
876
877                 ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
878                                                 false, false);
879                 if (unlikely(ret != 0)) {
880                         ttm_pool_unpopulate(ttm);
881                         return -ENOMEM;
882                 }
883         }
884
885         if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
886                 ret = ttm_tt_swapin(ttm);
887                 if (unlikely(ret != 0)) {
888                         ttm_pool_unpopulate(ttm);
889                         return ret;
890                 }
891         }
892
893         ttm->state = tt_unbound;
894         return 0;
895 }
896 EXPORT_SYMBOL(ttm_pool_populate);
897
898 void ttm_pool_unpopulate(struct ttm_tt *ttm)
899 {
900         unsigned i;
901
902         for (i = 0; i < ttm->num_pages; ++i) {
903                 if (ttm->pages[i]) {
904                         ttm_mem_global_free_page(ttm->glob->mem_glob,
905                                                  ttm->pages[i]);
906                         ttm_put_pages(&ttm->pages[i], 1,
907                                       ttm->page_flags,
908                                       ttm->caching_state);
909                 }
910         }
911         ttm->state = tt_unpopulated;
912 }
913 EXPORT_SYMBOL(ttm_pool_unpopulate);
914
915 int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
916 {
917         struct ttm_page_pool *p;
918         unsigned i;
919         char *h[] = {"pool", "refills", "pages freed", "size"};
920         if (!_manager) {
921                 seq_printf(m, "No pool allocator running.\n");
922                 return 0;
923         }
924         seq_printf(m, "%6s %12s %13s %8s\n",
925                         h[0], h[1], h[2], h[3]);
926         for (i = 0; i < NUM_POOLS; ++i) {
927                 p = &_manager->pools[i];
928
929                 seq_printf(m, "%6s %12ld %13ld %8d\n",
930                                 p->name, p->nrefills,
931                                 p->nfrees, p->npages);
932         }
933         return 0;
934 }
935 EXPORT_SYMBOL(ttm_page_alloc_debugfs);