powerpc/mm: Move register_process_table() out of ppc_md
[cascardo/linux.git] / drivers / infiniband / hw / hfi1 / user_exp_rcv.c
1 /*
2  * Copyright(c) 2015, 2016 Intel Corporation.
3  *
4  * This file is provided under a dual BSD/GPLv2 license.  When using or
5  * redistributing this file, you may do so under either license.
6  *
7  * GPL LICENSE SUMMARY
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of version 2 of the GNU General Public License as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * BSD LICENSE
19  *
20  * Redistribution and use in source and binary forms, with or without
21  * modification, are permitted provided that the following conditions
22  * are met:
23  *
24  *  - Redistributions of source code must retain the above copyright
25  *    notice, this list of conditions and the following disclaimer.
26  *  - Redistributions in binary form must reproduce the above copyright
27  *    notice, this list of conditions and the following disclaimer in
28  *    the documentation and/or other materials provided with the
29  *    distribution.
30  *  - Neither the name of Intel Corporation nor the names of its
31  *    contributors may be used to endorse or promote products derived
32  *    from this software without specific prior written permission.
33  *
34  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45  *
46  */
47 #include <asm/page.h>
48
49 #include "user_exp_rcv.h"
50 #include "trace.h"
51 #include "mmu_rb.h"
52
53 struct tid_group {
54         struct list_head list;
55         unsigned base;
56         u8 size;
57         u8 used;
58         u8 map;
59 };
60
61 struct tid_rb_node {
62         struct mmu_rb_node mmu;
63         unsigned long phys;
64         struct tid_group *grp;
65         u32 rcventry;
66         dma_addr_t dma_addr;
67         bool freed;
68         unsigned npages;
69         struct page *pages[0];
70 };
71
72 struct tid_pageset {
73         u16 idx;
74         u16 count;
75 };
76
77 #define EXP_TID_SET_EMPTY(set) (set.count == 0 && list_empty(&set.list))
78
79 #define num_user_pages(vaddr, len)                                     \
80         (1 + (((((unsigned long)(vaddr) +                              \
81                  (unsigned long)(len) - 1) & PAGE_MASK) -              \
82                ((unsigned long)vaddr & PAGE_MASK)) >> PAGE_SHIFT))
83
84 static void unlock_exp_tids(struct hfi1_ctxtdata *, struct exp_tid_set *,
85                             struct rb_root *);
86 static u32 find_phys_blocks(struct page **, unsigned, struct tid_pageset *);
87 static int set_rcvarray_entry(struct file *, unsigned long, u32,
88                               struct tid_group *, struct page **, unsigned);
89 static int mmu_rb_insert(struct rb_root *, struct mmu_rb_node *);
90 static void mmu_rb_remove(struct rb_root *, struct mmu_rb_node *,
91                           struct mm_struct *);
92 static int mmu_rb_invalidate(struct rb_root *, struct mmu_rb_node *);
93 static int program_rcvarray(struct file *, unsigned long, struct tid_group *,
94                             struct tid_pageset *, unsigned, u16, struct page **,
95                             u32 *, unsigned *, unsigned *);
96 static int unprogram_rcvarray(struct file *, u32, struct tid_group **);
97 static void clear_tid_node(struct hfi1_filedata *, u16, struct tid_rb_node *);
98
99 static struct mmu_rb_ops tid_rb_ops = {
100         .insert = mmu_rb_insert,
101         .remove = mmu_rb_remove,
102         .invalidate = mmu_rb_invalidate
103 };
104
105 static inline u32 rcventry2tidinfo(u32 rcventry)
106 {
107         u32 pair = rcventry & ~0x1;
108
109         return EXP_TID_SET(IDX, pair >> 1) |
110                 EXP_TID_SET(CTRL, 1 << (rcventry - pair));
111 }
112
113 static inline void exp_tid_group_init(struct exp_tid_set *set)
114 {
115         INIT_LIST_HEAD(&set->list);
116         set->count = 0;
117 }
118
119 static inline void tid_group_remove(struct tid_group *grp,
120                                     struct exp_tid_set *set)
121 {
122         list_del_init(&grp->list);
123         set->count--;
124 }
125
126 static inline void tid_group_add_tail(struct tid_group *grp,
127                                       struct exp_tid_set *set)
128 {
129         list_add_tail(&grp->list, &set->list);
130         set->count++;
131 }
132
133 static inline struct tid_group *tid_group_pop(struct exp_tid_set *set)
134 {
135         struct tid_group *grp =
136                 list_first_entry(&set->list, struct tid_group, list);
137         list_del_init(&grp->list);
138         set->count--;
139         return grp;
140 }
141
142 static inline void tid_group_move(struct tid_group *group,
143                                   struct exp_tid_set *s1,
144                                   struct exp_tid_set *s2)
145 {
146         tid_group_remove(group, s1);
147         tid_group_add_tail(group, s2);
148 }
149
150 /*
151  * Initialize context and file private data needed for Expected
152  * receive caching. This needs to be done after the context has
153  * been configured with the eager/expected RcvEntry counts.
154  */
155 int hfi1_user_exp_rcv_init(struct file *fp)
156 {
157         struct hfi1_filedata *fd = fp->private_data;
158         struct hfi1_ctxtdata *uctxt = fd->uctxt;
159         struct hfi1_devdata *dd = uctxt->dd;
160         unsigned tidbase;
161         int i, ret = 0;
162
163         spin_lock_init(&fd->tid_lock);
164         spin_lock_init(&fd->invalid_lock);
165         fd->tid_rb_root = RB_ROOT;
166
167         if (!uctxt->subctxt_cnt || !fd->subctxt) {
168                 exp_tid_group_init(&uctxt->tid_group_list);
169                 exp_tid_group_init(&uctxt->tid_used_list);
170                 exp_tid_group_init(&uctxt->tid_full_list);
171
172                 tidbase = uctxt->expected_base;
173                 for (i = 0; i < uctxt->expected_count /
174                              dd->rcv_entries.group_size; i++) {
175                         struct tid_group *grp;
176
177                         grp = kzalloc(sizeof(*grp), GFP_KERNEL);
178                         if (!grp) {
179                                 /*
180                                  * If we fail here, the groups already
181                                  * allocated will be freed by the close
182                                  * call.
183                                  */
184                                 ret = -ENOMEM;
185                                 goto done;
186                         }
187                         grp->size = dd->rcv_entries.group_size;
188                         grp->base = tidbase;
189                         tid_group_add_tail(grp, &uctxt->tid_group_list);
190                         tidbase += dd->rcv_entries.group_size;
191                 }
192         }
193
194         fd->entry_to_rb = kcalloc(uctxt->expected_count,
195                                      sizeof(struct rb_node *),
196                                      GFP_KERNEL);
197         if (!fd->entry_to_rb)
198                 return -ENOMEM;
199
200         if (!HFI1_CAP_IS_USET(TID_UNMAP)) {
201                 fd->invalid_tid_idx = 0;
202                 fd->invalid_tids = kzalloc(uctxt->expected_count *
203                                            sizeof(u32), GFP_KERNEL);
204                 if (!fd->invalid_tids) {
205                         ret = -ENOMEM;
206                         goto done;
207                 }
208
209                 /*
210                  * Register MMU notifier callbacks. If the registration
211                  * fails, continue but turn off the TID caching for
212                  * all user contexts.
213                  */
214                 ret = hfi1_mmu_rb_register(&fd->tid_rb_root, &tid_rb_ops);
215                 if (ret) {
216                         dd_dev_info(dd,
217                                     "Failed MMU notifier registration %d\n",
218                                     ret);
219                         HFI1_CAP_USET(TID_UNMAP);
220                         ret = 0;
221                 }
222         }
223
224         /*
225          * PSM does not have a good way to separate, count, and
226          * effectively enforce a limit on RcvArray entries used by
227          * subctxts (when context sharing is used) when TID caching
228          * is enabled. To help with that, we calculate a per-process
229          * RcvArray entry share and enforce that.
230          * If TID caching is not in use, PSM deals with usage on its
231          * own. In that case, we allow any subctxt to take all of the
232          * entries.
233          *
234          * Make sure that we set the tid counts only after successful
235          * init.
236          */
237         spin_lock(&fd->tid_lock);
238         if (uctxt->subctxt_cnt && !HFI1_CAP_IS_USET(TID_UNMAP)) {
239                 u16 remainder;
240
241                 fd->tid_limit = uctxt->expected_count / uctxt->subctxt_cnt;
242                 remainder = uctxt->expected_count % uctxt->subctxt_cnt;
243                 if (remainder && fd->subctxt < remainder)
244                         fd->tid_limit++;
245         } else {
246                 fd->tid_limit = uctxt->expected_count;
247         }
248         spin_unlock(&fd->tid_lock);
249 done:
250         return ret;
251 }
252
253 int hfi1_user_exp_rcv_free(struct hfi1_filedata *fd)
254 {
255         struct hfi1_ctxtdata *uctxt = fd->uctxt;
256         struct tid_group *grp, *gptr;
257
258         if (!test_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags))
259                 return 0;
260         /*
261          * The notifier would have been removed when the process'es mm
262          * was freed.
263          */
264         if (!HFI1_CAP_IS_USET(TID_UNMAP))
265                 hfi1_mmu_rb_unregister(&fd->tid_rb_root);
266
267         kfree(fd->invalid_tids);
268
269         if (!uctxt->cnt) {
270                 if (!EXP_TID_SET_EMPTY(uctxt->tid_full_list))
271                         unlock_exp_tids(uctxt, &uctxt->tid_full_list,
272                                         &fd->tid_rb_root);
273                 if (!EXP_TID_SET_EMPTY(uctxt->tid_used_list))
274                         unlock_exp_tids(uctxt, &uctxt->tid_used_list,
275                                         &fd->tid_rb_root);
276                 list_for_each_entry_safe(grp, gptr, &uctxt->tid_group_list.list,
277                                          list) {
278                         list_del_init(&grp->list);
279                         kfree(grp);
280                 }
281                 hfi1_clear_tids(uctxt);
282         }
283
284         kfree(fd->entry_to_rb);
285         return 0;
286 }
287
288 /*
289  * Write an "empty" RcvArray entry.
290  * This function exists so the TID registaration code can use it
291  * to write to unused/unneeded entries and still take advantage
292  * of the WC performance improvements. The HFI will ignore this
293  * write to the RcvArray entry.
294  */
295 static inline void rcv_array_wc_fill(struct hfi1_devdata *dd, u32 index)
296 {
297         /*
298          * Doing the WC fill writes only makes sense if the device is
299          * present and the RcvArray has been mapped as WC memory.
300          */
301         if ((dd->flags & HFI1_PRESENT) && dd->rcvarray_wc)
302                 writeq(0, dd->rcvarray_wc + (index * 8));
303 }
304
305 /*
306  * RcvArray entry allocation for Expected Receives is done by the
307  * following algorithm:
308  *
309  * The context keeps 3 lists of groups of RcvArray entries:
310  *   1. List of empty groups - tid_group_list
311  *      This list is created during user context creation and
312  *      contains elements which describe sets (of 8) of empty
313  *      RcvArray entries.
314  *   2. List of partially used groups - tid_used_list
315  *      This list contains sets of RcvArray entries which are
316  *      not completely used up. Another mapping request could
317  *      use some of all of the remaining entries.
318  *   3. List of full groups - tid_full_list
319  *      This is the list where sets that are completely used
320  *      up go.
321  *
322  * An attempt to optimize the usage of RcvArray entries is
323  * made by finding all sets of physically contiguous pages in a
324  * user's buffer.
325  * These physically contiguous sets are further split into
326  * sizes supported by the receive engine of the HFI. The
327  * resulting sets of pages are stored in struct tid_pageset,
328  * which describes the sets as:
329  *    * .count - number of pages in this set
330  *    * .idx - starting index into struct page ** array
331  *                    of this set
332  *
333  * From this point on, the algorithm deals with the page sets
334  * described above. The number of pagesets is divided by the
335  * RcvArray group size to produce the number of full groups
336  * needed.
337  *
338  * Groups from the 3 lists are manipulated using the following
339  * rules:
340  *   1. For each set of 8 pagesets, a complete group from
341  *      tid_group_list is taken, programmed, and moved to
342  *      the tid_full_list list.
343  *   2. For all remaining pagesets:
344  *      2.1 If the tid_used_list is empty and the tid_group_list
345  *          is empty, stop processing pageset and return only
346  *          what has been programmed up to this point.
347  *      2.2 If the tid_used_list is empty and the tid_group_list
348  *          is not empty, move a group from tid_group_list to
349  *          tid_used_list.
350  *      2.3 For each group is tid_used_group, program as much as
351  *          can fit into the group. If the group becomes fully
352  *          used, move it to tid_full_list.
353  */
354 int hfi1_user_exp_rcv_setup(struct file *fp, struct hfi1_tid_info *tinfo)
355 {
356         int ret = 0, need_group = 0, pinned;
357         struct hfi1_filedata *fd = fp->private_data;
358         struct hfi1_ctxtdata *uctxt = fd->uctxt;
359         struct hfi1_devdata *dd = uctxt->dd;
360         unsigned npages, ngroups, pageidx = 0, pageset_count, npagesets,
361                 tididx = 0, mapped, mapped_pages = 0;
362         unsigned long vaddr = tinfo->vaddr;
363         struct page **pages = NULL;
364         u32 *tidlist = NULL;
365         struct tid_pageset *pagesets = NULL;
366
367         /* Get the number of pages the user buffer spans */
368         npages = num_user_pages(vaddr, tinfo->length);
369         if (!npages)
370                 return -EINVAL;
371
372         if (npages > uctxt->expected_count) {
373                 dd_dev_err(dd, "Expected buffer too big\n");
374                 return -EINVAL;
375         }
376
377         /* Verify that access is OK for the user buffer */
378         if (!access_ok(VERIFY_WRITE, (void __user *)vaddr,
379                        npages * PAGE_SIZE)) {
380                 dd_dev_err(dd, "Fail vaddr %p, %u pages, !access_ok\n",
381                            (void *)vaddr, npages);
382                 return -EFAULT;
383         }
384
385         pagesets = kcalloc(uctxt->expected_count, sizeof(*pagesets),
386                            GFP_KERNEL);
387         if (!pagesets)
388                 return -ENOMEM;
389
390         /* Allocate the array of struct page pointers needed for pinning */
391         pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
392         if (!pages) {
393                 ret = -ENOMEM;
394                 goto bail;
395         }
396
397         /*
398          * Pin all the pages of the user buffer. If we can't pin all the
399          * pages, accept the amount pinned so far and program only that.
400          * User space knows how to deal with partially programmed buffers.
401          */
402         if (!hfi1_can_pin_pages(dd, fd->tid_n_pinned, npages)) {
403                 ret = -ENOMEM;
404                 goto bail;
405         }
406
407         pinned = hfi1_acquire_user_pages(vaddr, npages, true, pages);
408         if (pinned <= 0) {
409                 ret = pinned;
410                 goto bail;
411         }
412         fd->tid_n_pinned += npages;
413
414         /* Find sets of physically contiguous pages */
415         npagesets = find_phys_blocks(pages, pinned, pagesets);
416
417         /*
418          * We don't need to access this under a lock since tid_used is per
419          * process and the same process cannot be in hfi1_user_exp_rcv_clear()
420          * and hfi1_user_exp_rcv_setup() at the same time.
421          */
422         spin_lock(&fd->tid_lock);
423         if (fd->tid_used + npagesets > fd->tid_limit)
424                 pageset_count = fd->tid_limit - fd->tid_used;
425         else
426                 pageset_count = npagesets;
427         spin_unlock(&fd->tid_lock);
428
429         if (!pageset_count)
430                 goto bail;
431
432         ngroups = pageset_count / dd->rcv_entries.group_size;
433         tidlist = kcalloc(pageset_count, sizeof(*tidlist), GFP_KERNEL);
434         if (!tidlist) {
435                 ret = -ENOMEM;
436                 goto nomem;
437         }
438
439         tididx = 0;
440
441         /*
442          * From this point on, we are going to be using shared (between master
443          * and subcontexts) context resources. We need to take the lock.
444          */
445         mutex_lock(&uctxt->exp_lock);
446         /*
447          * The first step is to program the RcvArray entries which are complete
448          * groups.
449          */
450         while (ngroups && uctxt->tid_group_list.count) {
451                 struct tid_group *grp =
452                         tid_group_pop(&uctxt->tid_group_list);
453
454                 ret = program_rcvarray(fp, vaddr, grp, pagesets,
455                                        pageidx, dd->rcv_entries.group_size,
456                                        pages, tidlist, &tididx, &mapped);
457                 /*
458                  * If there was a failure to program the RcvArray
459                  * entries for the entire group, reset the grp fields
460                  * and add the grp back to the free group list.
461                  */
462                 if (ret <= 0) {
463                         tid_group_add_tail(grp, &uctxt->tid_group_list);
464                         hfi1_cdbg(TID,
465                                   "Failed to program RcvArray group %d", ret);
466                         goto unlock;
467                 }
468
469                 tid_group_add_tail(grp, &uctxt->tid_full_list);
470                 ngroups--;
471                 pageidx += ret;
472                 mapped_pages += mapped;
473         }
474
475         while (pageidx < pageset_count) {
476                 struct tid_group *grp, *ptr;
477                 /*
478                  * If we don't have any partially used tid groups, check
479                  * if we have empty groups. If so, take one from there and
480                  * put in the partially used list.
481                  */
482                 if (!uctxt->tid_used_list.count || need_group) {
483                         if (!uctxt->tid_group_list.count)
484                                 goto unlock;
485
486                         grp = tid_group_pop(&uctxt->tid_group_list);
487                         tid_group_add_tail(grp, &uctxt->tid_used_list);
488                         need_group = 0;
489                 }
490                 /*
491                  * There is an optimization opportunity here - instead of
492                  * fitting as many page sets as we can, check for a group
493                  * later on in the list that could fit all of them.
494                  */
495                 list_for_each_entry_safe(grp, ptr, &uctxt->tid_used_list.list,
496                                          list) {
497                         unsigned use = min_t(unsigned, pageset_count - pageidx,
498                                              grp->size - grp->used);
499
500                         ret = program_rcvarray(fp, vaddr, grp, pagesets,
501                                                pageidx, use, pages, tidlist,
502                                                &tididx, &mapped);
503                         if (ret < 0) {
504                                 hfi1_cdbg(TID,
505                                           "Failed to program RcvArray entries %d",
506                                           ret);
507                                 ret = -EFAULT;
508                                 goto unlock;
509                         } else if (ret > 0) {
510                                 if (grp->used == grp->size)
511                                         tid_group_move(grp,
512                                                        &uctxt->tid_used_list,
513                                                        &uctxt->tid_full_list);
514                                 pageidx += ret;
515                                 mapped_pages += mapped;
516                                 need_group = 0;
517                                 /* Check if we are done so we break out early */
518                                 if (pageidx >= pageset_count)
519                                         break;
520                         } else if (WARN_ON(ret == 0)) {
521                                 /*
522                                  * If ret is 0, we did not program any entries
523                                  * into this group, which can only happen if
524                                  * we've screwed up the accounting somewhere.
525                                  * Warn and try to continue.
526                                  */
527                                 need_group = 1;
528                         }
529                 }
530         }
531 unlock:
532         mutex_unlock(&uctxt->exp_lock);
533 nomem:
534         hfi1_cdbg(TID, "total mapped: tidpairs:%u pages:%u (%d)", tididx,
535                   mapped_pages, ret);
536         if (tididx) {
537                 spin_lock(&fd->tid_lock);
538                 fd->tid_used += tididx;
539                 spin_unlock(&fd->tid_lock);
540                 tinfo->tidcnt = tididx;
541                 tinfo->length = mapped_pages * PAGE_SIZE;
542
543                 if (copy_to_user((void __user *)(unsigned long)tinfo->tidlist,
544                                  tidlist, sizeof(tidlist[0]) * tididx)) {
545                         /*
546                          * On failure to copy to the user level, we need to undo
547                          * everything done so far so we don't leak resources.
548                          */
549                         tinfo->tidlist = (unsigned long)&tidlist;
550                         hfi1_user_exp_rcv_clear(fp, tinfo);
551                         tinfo->tidlist = 0;
552                         ret = -EFAULT;
553                         goto bail;
554                 }
555         }
556
557         /*
558          * If not everything was mapped (due to insufficient RcvArray entries,
559          * for example), unpin all unmapped pages so we can pin them nex time.
560          */
561         if (mapped_pages != pinned) {
562                 hfi1_release_user_pages(current->mm, &pages[mapped_pages],
563                                         pinned - mapped_pages,
564                                         false);
565                 fd->tid_n_pinned -= pinned - mapped_pages;
566         }
567 bail:
568         kfree(pagesets);
569         kfree(pages);
570         kfree(tidlist);
571         return ret > 0 ? 0 : ret;
572 }
573
574 int hfi1_user_exp_rcv_clear(struct file *fp, struct hfi1_tid_info *tinfo)
575 {
576         int ret = 0;
577         struct hfi1_filedata *fd = fp->private_data;
578         struct hfi1_ctxtdata *uctxt = fd->uctxt;
579         u32 *tidinfo;
580         unsigned tididx;
581
582         tidinfo = kcalloc(tinfo->tidcnt, sizeof(*tidinfo), GFP_KERNEL);
583         if (!tidinfo)
584                 return -ENOMEM;
585
586         if (copy_from_user(tidinfo, (void __user *)(unsigned long)
587                            tinfo->tidlist, sizeof(tidinfo[0]) *
588                            tinfo->tidcnt)) {
589                 ret = -EFAULT;
590                 goto done;
591         }
592
593         mutex_lock(&uctxt->exp_lock);
594         for (tididx = 0; tididx < tinfo->tidcnt; tididx++) {
595                 ret = unprogram_rcvarray(fp, tidinfo[tididx], NULL);
596                 if (ret) {
597                         hfi1_cdbg(TID, "Failed to unprogram rcv array %d",
598                                   ret);
599                         break;
600                 }
601         }
602         spin_lock(&fd->tid_lock);
603         fd->tid_used -= tididx;
604         spin_unlock(&fd->tid_lock);
605         tinfo->tidcnt = tididx;
606         mutex_unlock(&uctxt->exp_lock);
607 done:
608         kfree(tidinfo);
609         return ret;
610 }
611
612 int hfi1_user_exp_rcv_invalid(struct file *fp, struct hfi1_tid_info *tinfo)
613 {
614         struct hfi1_filedata *fd = fp->private_data;
615         struct hfi1_ctxtdata *uctxt = fd->uctxt;
616         unsigned long *ev = uctxt->dd->events +
617                 (((uctxt->ctxt - uctxt->dd->first_user_ctxt) *
618                   HFI1_MAX_SHARED_CTXTS) + fd->subctxt);
619         u32 *array;
620         int ret = 0;
621
622         if (!fd->invalid_tids)
623                 return -EINVAL;
624
625         /*
626          * copy_to_user() can sleep, which will leave the invalid_lock
627          * locked and cause the MMU notifier to be blocked on the lock
628          * for a long time.
629          * Copy the data to a local buffer so we can release the lock.
630          */
631         array = kcalloc(uctxt->expected_count, sizeof(*array), GFP_KERNEL);
632         if (!array)
633                 return -EFAULT;
634
635         spin_lock(&fd->invalid_lock);
636         if (fd->invalid_tid_idx) {
637                 memcpy(array, fd->invalid_tids, sizeof(*array) *
638                        fd->invalid_tid_idx);
639                 memset(fd->invalid_tids, 0, sizeof(*fd->invalid_tids) *
640                        fd->invalid_tid_idx);
641                 tinfo->tidcnt = fd->invalid_tid_idx;
642                 fd->invalid_tid_idx = 0;
643                 /*
644                  * Reset the user flag while still holding the lock.
645                  * Otherwise, PSM can miss events.
646                  */
647                 clear_bit(_HFI1_EVENT_TID_MMU_NOTIFY_BIT, ev);
648         } else {
649                 tinfo->tidcnt = 0;
650         }
651         spin_unlock(&fd->invalid_lock);
652
653         if (tinfo->tidcnt) {
654                 if (copy_to_user((void __user *)tinfo->tidlist,
655                                  array, sizeof(*array) * tinfo->tidcnt))
656                         ret = -EFAULT;
657         }
658         kfree(array);
659
660         return ret;
661 }
662
663 static u32 find_phys_blocks(struct page **pages, unsigned npages,
664                             struct tid_pageset *list)
665 {
666         unsigned pagecount, pageidx, setcount = 0, i;
667         unsigned long pfn, this_pfn;
668
669         if (!npages)
670                 return 0;
671
672         /*
673          * Look for sets of physically contiguous pages in the user buffer.
674          * This will allow us to optimize Expected RcvArray entry usage by
675          * using the bigger supported sizes.
676          */
677         pfn = page_to_pfn(pages[0]);
678         for (pageidx = 0, pagecount = 1, i = 1; i <= npages; i++) {
679                 this_pfn = i < npages ? page_to_pfn(pages[i]) : 0;
680
681                 /*
682                  * If the pfn's are not sequential, pages are not physically
683                  * contiguous.
684                  */
685                 if (this_pfn != ++pfn) {
686                         /*
687                          * At this point we have to loop over the set of
688                          * physically contiguous pages and break them down it
689                          * sizes supported by the HW.
690                          * There are two main constraints:
691                          *     1. The max buffer size is MAX_EXPECTED_BUFFER.
692                          *        If the total set size is bigger than that
693                          *        program only a MAX_EXPECTED_BUFFER chunk.
694                          *     2. The buffer size has to be a power of two. If
695                          *        it is not, round down to the closes power of
696                          *        2 and program that size.
697                          */
698                         while (pagecount) {
699                                 int maxpages = pagecount;
700                                 u32 bufsize = pagecount * PAGE_SIZE;
701
702                                 if (bufsize > MAX_EXPECTED_BUFFER)
703                                         maxpages =
704                                                 MAX_EXPECTED_BUFFER >>
705                                                 PAGE_SHIFT;
706                                 else if (!is_power_of_2(bufsize))
707                                         maxpages =
708                                                 rounddown_pow_of_two(bufsize) >>
709                                                 PAGE_SHIFT;
710
711                                 list[setcount].idx = pageidx;
712                                 list[setcount].count = maxpages;
713                                 pagecount -= maxpages;
714                                 pageidx += maxpages;
715                                 setcount++;
716                         }
717                         pageidx = i;
718                         pagecount = 1;
719                         pfn = this_pfn;
720                 } else {
721                         pagecount++;
722                 }
723         }
724         return setcount;
725 }
726
727 /**
728  * program_rcvarray() - program an RcvArray group with receive buffers
729  * @fp: file pointer
730  * @vaddr: starting user virtual address
731  * @grp: RcvArray group
732  * @sets: array of struct tid_pageset holding information on physically
733  *        contiguous chunks from the user buffer
734  * @start: starting index into sets array
735  * @count: number of struct tid_pageset's to program
736  * @pages: an array of struct page * for the user buffer
737  * @tidlist: the array of u32 elements when the information about the
738  *           programmed RcvArray entries is to be encoded.
739  * @tididx: starting offset into tidlist
740  * @pmapped: (output parameter) number of pages programmed into the RcvArray
741  *           entries.
742  *
743  * This function will program up to 'count' number of RcvArray entries from the
744  * group 'grp'. To make best use of write-combining writes, the function will
745  * perform writes to the unused RcvArray entries which will be ignored by the
746  * HW. Each RcvArray entry will be programmed with a physically contiguous
747  * buffer chunk from the user's virtual buffer.
748  *
749  * Return:
750  * -EINVAL if the requested count is larger than the size of the group,
751  * -ENOMEM or -EFAULT on error from set_rcvarray_entry(), or
752  * number of RcvArray entries programmed.
753  */
754 static int program_rcvarray(struct file *fp, unsigned long vaddr,
755                             struct tid_group *grp,
756                             struct tid_pageset *sets,
757                             unsigned start, u16 count, struct page **pages,
758                             u32 *tidlist, unsigned *tididx, unsigned *pmapped)
759 {
760         struct hfi1_filedata *fd = fp->private_data;
761         struct hfi1_ctxtdata *uctxt = fd->uctxt;
762         struct hfi1_devdata *dd = uctxt->dd;
763         u16 idx;
764         u32 tidinfo = 0, rcventry, useidx = 0;
765         int mapped = 0;
766
767         /* Count should never be larger than the group size */
768         if (count > grp->size)
769                 return -EINVAL;
770
771         /* Find the first unused entry in the group */
772         for (idx = 0; idx < grp->size; idx++) {
773                 if (!(grp->map & (1 << idx))) {
774                         useidx = idx;
775                         break;
776                 }
777                 rcv_array_wc_fill(dd, grp->base + idx);
778         }
779
780         idx = 0;
781         while (idx < count) {
782                 u16 npages, pageidx, setidx = start + idx;
783                 int ret = 0;
784
785                 /*
786                  * If this entry in the group is used, move to the next one.
787                  * If we go past the end of the group, exit the loop.
788                  */
789                 if (useidx >= grp->size) {
790                         break;
791                 } else if (grp->map & (1 << useidx)) {
792                         rcv_array_wc_fill(dd, grp->base + useidx);
793                         useidx++;
794                         continue;
795                 }
796
797                 rcventry = grp->base + useidx;
798                 npages = sets[setidx].count;
799                 pageidx = sets[setidx].idx;
800
801                 ret = set_rcvarray_entry(fp, vaddr + (pageidx * PAGE_SIZE),
802                                          rcventry, grp, pages + pageidx,
803                                          npages);
804                 if (ret)
805                         return ret;
806                 mapped += npages;
807
808                 tidinfo = rcventry2tidinfo(rcventry - uctxt->expected_base) |
809                         EXP_TID_SET(LEN, npages);
810                 tidlist[(*tididx)++] = tidinfo;
811                 grp->used++;
812                 grp->map |= 1 << useidx++;
813                 idx++;
814         }
815
816         /* Fill the rest of the group with "blank" writes */
817         for (; useidx < grp->size; useidx++)
818                 rcv_array_wc_fill(dd, grp->base + useidx);
819         *pmapped = mapped;
820         return idx;
821 }
822
823 static int set_rcvarray_entry(struct file *fp, unsigned long vaddr,
824                               u32 rcventry, struct tid_group *grp,
825                               struct page **pages, unsigned npages)
826 {
827         int ret;
828         struct hfi1_filedata *fd = fp->private_data;
829         struct hfi1_ctxtdata *uctxt = fd->uctxt;
830         struct tid_rb_node *node;
831         struct hfi1_devdata *dd = uctxt->dd;
832         struct rb_root *root = &fd->tid_rb_root;
833         dma_addr_t phys;
834
835         /*
836          * Allocate the node first so we can handle a potential
837          * failure before we've programmed anything.
838          */
839         node = kzalloc(sizeof(*node) + (sizeof(struct page *) * npages),
840                        GFP_KERNEL);
841         if (!node)
842                 return -ENOMEM;
843
844         phys = pci_map_single(dd->pcidev,
845                               __va(page_to_phys(pages[0])),
846                               npages * PAGE_SIZE, PCI_DMA_FROMDEVICE);
847         if (dma_mapping_error(&dd->pcidev->dev, phys)) {
848                 dd_dev_err(dd, "Failed to DMA map Exp Rcv pages 0x%llx\n",
849                            phys);
850                 kfree(node);
851                 return -EFAULT;
852         }
853
854         node->mmu.addr = vaddr;
855         node->mmu.len = npages * PAGE_SIZE;
856         node->phys = page_to_phys(pages[0]);
857         node->npages = npages;
858         node->rcventry = rcventry;
859         node->dma_addr = phys;
860         node->grp = grp;
861         node->freed = false;
862         memcpy(node->pages, pages, sizeof(struct page *) * npages);
863
864         if (HFI1_CAP_IS_USET(TID_UNMAP))
865                 ret = mmu_rb_insert(root, &node->mmu);
866         else
867                 ret = hfi1_mmu_rb_insert(root, &node->mmu);
868
869         if (ret) {
870                 hfi1_cdbg(TID, "Failed to insert RB node %u 0x%lx, 0x%lx %d",
871                           node->rcventry, node->mmu.addr, node->phys, ret);
872                 pci_unmap_single(dd->pcidev, phys, npages * PAGE_SIZE,
873                                  PCI_DMA_FROMDEVICE);
874                 kfree(node);
875                 return -EFAULT;
876         }
877         hfi1_put_tid(dd, rcventry, PT_EXPECTED, phys, ilog2(npages) + 1);
878         trace_hfi1_exp_tid_reg(uctxt->ctxt, fd->subctxt, rcventry, npages,
879                                node->mmu.addr, node->phys, phys);
880         return 0;
881 }
882
883 static int unprogram_rcvarray(struct file *fp, u32 tidinfo,
884                               struct tid_group **grp)
885 {
886         struct hfi1_filedata *fd = fp->private_data;
887         struct hfi1_ctxtdata *uctxt = fd->uctxt;
888         struct hfi1_devdata *dd = uctxt->dd;
889         struct tid_rb_node *node;
890         u8 tidctrl = EXP_TID_GET(tidinfo, CTRL);
891         u32 tididx = EXP_TID_GET(tidinfo, IDX) << 1, rcventry;
892
893         if (tididx >= uctxt->expected_count) {
894                 dd_dev_err(dd, "Invalid RcvArray entry (%u) index for ctxt %u\n",
895                            tididx, uctxt->ctxt);
896                 return -EINVAL;
897         }
898
899         if (tidctrl == 0x3)
900                 return -EINVAL;
901
902         rcventry = tididx + (tidctrl - 1);
903
904         node = fd->entry_to_rb[rcventry];
905         if (!node || node->rcventry != (uctxt->expected_base + rcventry))
906                 return -EBADF;
907         if (HFI1_CAP_IS_USET(TID_UNMAP))
908                 mmu_rb_remove(&fd->tid_rb_root, &node->mmu, NULL);
909         else
910                 hfi1_mmu_rb_remove(&fd->tid_rb_root, &node->mmu);
911
912         if (grp)
913                 *grp = node->grp;
914         clear_tid_node(fd, fd->subctxt, node);
915         return 0;
916 }
917
918 static void clear_tid_node(struct hfi1_filedata *fd, u16 subctxt,
919                            struct tid_rb_node *node)
920 {
921         struct hfi1_ctxtdata *uctxt = fd->uctxt;
922         struct hfi1_devdata *dd = uctxt->dd;
923
924         trace_hfi1_exp_tid_unreg(uctxt->ctxt, fd->subctxt, node->rcventry,
925                                  node->npages, node->mmu.addr, node->phys,
926                                  node->dma_addr);
927
928         hfi1_put_tid(dd, node->rcventry, PT_INVALID, 0, 0);
929         /*
930          * Make sure device has seen the write before we unpin the
931          * pages.
932          */
933         flush_wc();
934
935         pci_unmap_single(dd->pcidev, node->dma_addr, node->mmu.len,
936                          PCI_DMA_FROMDEVICE);
937         hfi1_release_user_pages(current->mm, node->pages, node->npages, true);
938         fd->tid_n_pinned -= node->npages;
939
940         node->grp->used--;
941         node->grp->map &= ~(1 << (node->rcventry - node->grp->base));
942
943         if (node->grp->used == node->grp->size - 1)
944                 tid_group_move(node->grp, &uctxt->tid_full_list,
945                                &uctxt->tid_used_list);
946         else if (!node->grp->used)
947                 tid_group_move(node->grp, &uctxt->tid_used_list,
948                                &uctxt->tid_group_list);
949         kfree(node);
950 }
951
952 static void unlock_exp_tids(struct hfi1_ctxtdata *uctxt,
953                             struct exp_tid_set *set, struct rb_root *root)
954 {
955         struct tid_group *grp, *ptr;
956         struct hfi1_filedata *fd = container_of(root, struct hfi1_filedata,
957                                                 tid_rb_root);
958         int i;
959
960         list_for_each_entry_safe(grp, ptr, &set->list, list) {
961                 list_del_init(&grp->list);
962
963                 for (i = 0; i < grp->size; i++) {
964                         if (grp->map & (1 << i)) {
965                                 u16 rcventry = grp->base + i;
966                                 struct tid_rb_node *node;
967
968                                 node = fd->entry_to_rb[rcventry -
969                                                           uctxt->expected_base];
970                                 if (!node || node->rcventry != rcventry)
971                                         continue;
972                                 if (HFI1_CAP_IS_USET(TID_UNMAP))
973                                         mmu_rb_remove(&fd->tid_rb_root,
974                                                       &node->mmu, NULL);
975                                 else
976                                         hfi1_mmu_rb_remove(&fd->tid_rb_root,
977                                                            &node->mmu);
978                                 clear_tid_node(fd, -1, node);
979                         }
980                 }
981         }
982 }
983
984 static int mmu_rb_invalidate(struct rb_root *root, struct mmu_rb_node *mnode)
985 {
986         struct hfi1_filedata *fdata =
987                 container_of(root, struct hfi1_filedata, tid_rb_root);
988         struct hfi1_ctxtdata *uctxt = fdata->uctxt;
989         struct tid_rb_node *node =
990                 container_of(mnode, struct tid_rb_node, mmu);
991
992         if (node->freed)
993                 return 0;
994
995         trace_hfi1_exp_tid_inval(uctxt->ctxt, fdata->subctxt, node->mmu.addr,
996                                  node->rcventry, node->npages, node->dma_addr);
997         node->freed = true;
998
999         spin_lock(&fdata->invalid_lock);
1000         if (fdata->invalid_tid_idx < uctxt->expected_count) {
1001                 fdata->invalid_tids[fdata->invalid_tid_idx] =
1002                         rcventry2tidinfo(node->rcventry - uctxt->expected_base);
1003                 fdata->invalid_tids[fdata->invalid_tid_idx] |=
1004                         EXP_TID_SET(LEN, node->npages);
1005                 if (!fdata->invalid_tid_idx) {
1006                         unsigned long *ev;
1007
1008                         /*
1009                          * hfi1_set_uevent_bits() sets a user event flag
1010                          * for all processes. Because calling into the
1011                          * driver to process TID cache invalidations is
1012                          * expensive and TID cache invalidations are
1013                          * handled on a per-process basis, we can
1014                          * optimize this to set the flag only for the
1015                          * process in question.
1016                          */
1017                         ev = uctxt->dd->events +
1018                                 (((uctxt->ctxt - uctxt->dd->first_user_ctxt) *
1019                                   HFI1_MAX_SHARED_CTXTS) + fdata->subctxt);
1020                         set_bit(_HFI1_EVENT_TID_MMU_NOTIFY_BIT, ev);
1021                 }
1022                 fdata->invalid_tid_idx++;
1023         }
1024         spin_unlock(&fdata->invalid_lock);
1025         return 0;
1026 }
1027
1028 static int mmu_rb_insert(struct rb_root *root, struct mmu_rb_node *node)
1029 {
1030         struct hfi1_filedata *fdata =
1031                 container_of(root, struct hfi1_filedata, tid_rb_root);
1032         struct tid_rb_node *tnode =
1033                 container_of(node, struct tid_rb_node, mmu);
1034         u32 base = fdata->uctxt->expected_base;
1035
1036         fdata->entry_to_rb[tnode->rcventry - base] = tnode;
1037         return 0;
1038 }
1039
1040 static void mmu_rb_remove(struct rb_root *root, struct mmu_rb_node *node,
1041                           struct mm_struct *mm)
1042 {
1043         struct hfi1_filedata *fdata =
1044                 container_of(root, struct hfi1_filedata, tid_rb_root);
1045         struct tid_rb_node *tnode =
1046                 container_of(node, struct tid_rb_node, mmu);
1047         u32 base = fdata->uctxt->expected_base;
1048
1049         fdata->entry_to_rb[tnode->rcventry - base] = NULL;
1050 }