powerpc/mm: Move register_process_table() out of ppc_md
[cascardo/linux.git] / kernel / jump_label.c
1 /*
2  * jump label support
3  *
4  * Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
5  * Copyright (C) 2011 Peter Zijlstra
6  *
7  */
8 #include <linux/memory.h>
9 #include <linux/uaccess.h>
10 #include <linux/module.h>
11 #include <linux/list.h>
12 #include <linux/slab.h>
13 #include <linux/sort.h>
14 #include <linux/err.h>
15 #include <linux/static_key.h>
16 #include <linux/jump_label_ratelimit.h>
17
18 #ifdef HAVE_JUMP_LABEL
19
20 /* mutex to protect coming/going of the the jump_label table */
21 static DEFINE_MUTEX(jump_label_mutex);
22
23 void jump_label_lock(void)
24 {
25         mutex_lock(&jump_label_mutex);
26 }
27
28 void jump_label_unlock(void)
29 {
30         mutex_unlock(&jump_label_mutex);
31 }
32
33 static int jump_label_cmp(const void *a, const void *b)
34 {
35         const struct jump_entry *jea = a;
36         const struct jump_entry *jeb = b;
37
38         if (jea->key < jeb->key)
39                 return -1;
40
41         if (jea->key > jeb->key)
42                 return 1;
43
44         return 0;
45 }
46
47 static void
48 jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
49 {
50         unsigned long size;
51
52         size = (((unsigned long)stop - (unsigned long)start)
53                                         / sizeof(struct jump_entry));
54         sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
55 }
56
57 static void jump_label_update(struct static_key *key);
58
59 void static_key_slow_inc(struct static_key *key)
60 {
61         int v, v1;
62
63         STATIC_KEY_CHECK_USE();
64
65         /*
66          * Careful if we get concurrent static_key_slow_inc() calls;
67          * later calls must wait for the first one to _finish_ the
68          * jump_label_update() process.  At the same time, however,
69          * the jump_label_update() call below wants to see
70          * static_key_enabled(&key) for jumps to be updated properly.
71          *
72          * So give a special meaning to negative key->enabled: it sends
73          * static_key_slow_inc() down the slow path, and it is non-zero
74          * so it counts as "enabled" in jump_label_update().  Note that
75          * atomic_inc_unless_negative() checks >= 0, so roll our own.
76          */
77         for (v = atomic_read(&key->enabled); v > 0; v = v1) {
78                 v1 = atomic_cmpxchg(&key->enabled, v, v + 1);
79                 if (likely(v1 == v))
80                         return;
81         }
82
83         jump_label_lock();
84         if (atomic_read(&key->enabled) == 0) {
85                 atomic_set(&key->enabled, -1);
86                 jump_label_update(key);
87                 atomic_set(&key->enabled, 1);
88         } else {
89                 atomic_inc(&key->enabled);
90         }
91         jump_label_unlock();
92 }
93 EXPORT_SYMBOL_GPL(static_key_slow_inc);
94
95 static void __static_key_slow_dec(struct static_key *key,
96                 unsigned long rate_limit, struct delayed_work *work)
97 {
98         /*
99          * The negative count check is valid even when a negative
100          * key->enabled is in use by static_key_slow_inc(); a
101          * __static_key_slow_dec() before the first static_key_slow_inc()
102          * returns is unbalanced, because all other static_key_slow_inc()
103          * instances block while the update is in progress.
104          */
105         if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) {
106                 WARN(atomic_read(&key->enabled) < 0,
107                      "jump label: negative count!\n");
108                 return;
109         }
110
111         if (rate_limit) {
112                 atomic_inc(&key->enabled);
113                 schedule_delayed_work(work, rate_limit);
114         } else {
115                 jump_label_update(key);
116         }
117         jump_label_unlock();
118 }
119
120 static void jump_label_update_timeout(struct work_struct *work)
121 {
122         struct static_key_deferred *key =
123                 container_of(work, struct static_key_deferred, work.work);
124         __static_key_slow_dec(&key->key, 0, NULL);
125 }
126
127 void static_key_slow_dec(struct static_key *key)
128 {
129         STATIC_KEY_CHECK_USE();
130         __static_key_slow_dec(key, 0, NULL);
131 }
132 EXPORT_SYMBOL_GPL(static_key_slow_dec);
133
134 void static_key_slow_dec_deferred(struct static_key_deferred *key)
135 {
136         STATIC_KEY_CHECK_USE();
137         __static_key_slow_dec(&key->key, key->timeout, &key->work);
138 }
139 EXPORT_SYMBOL_GPL(static_key_slow_dec_deferred);
140
141 void jump_label_rate_limit(struct static_key_deferred *key,
142                 unsigned long rl)
143 {
144         STATIC_KEY_CHECK_USE();
145         key->timeout = rl;
146         INIT_DELAYED_WORK(&key->work, jump_label_update_timeout);
147 }
148 EXPORT_SYMBOL_GPL(jump_label_rate_limit);
149
150 static int addr_conflict(struct jump_entry *entry, void *start, void *end)
151 {
152         if (entry->code <= (unsigned long)end &&
153                 entry->code + JUMP_LABEL_NOP_SIZE > (unsigned long)start)
154                 return 1;
155
156         return 0;
157 }
158
159 static int __jump_label_text_reserved(struct jump_entry *iter_start,
160                 struct jump_entry *iter_stop, void *start, void *end)
161 {
162         struct jump_entry *iter;
163
164         iter = iter_start;
165         while (iter < iter_stop) {
166                 if (addr_conflict(iter, start, end))
167                         return 1;
168                 iter++;
169         }
170
171         return 0;
172 }
173
174 /*
175  * Update code which is definitely not currently executing.
176  * Architectures which need heavyweight synchronization to modify
177  * running code can override this to make the non-live update case
178  * cheaper.
179  */
180 void __weak __init_or_module arch_jump_label_transform_static(struct jump_entry *entry,
181                                             enum jump_label_type type)
182 {
183         arch_jump_label_transform(entry, type);
184 }
185
186 static inline struct jump_entry *static_key_entries(struct static_key *key)
187 {
188         return (struct jump_entry *)((unsigned long)key->entries & ~JUMP_TYPE_MASK);
189 }
190
191 static inline bool static_key_type(struct static_key *key)
192 {
193         return (unsigned long)key->entries & JUMP_TYPE_MASK;
194 }
195
196 static inline struct static_key *jump_entry_key(struct jump_entry *entry)
197 {
198         return (struct static_key *)((unsigned long)entry->key & ~1UL);
199 }
200
201 static bool jump_entry_branch(struct jump_entry *entry)
202 {
203         return (unsigned long)entry->key & 1UL;
204 }
205
206 static enum jump_label_type jump_label_type(struct jump_entry *entry)
207 {
208         struct static_key *key = jump_entry_key(entry);
209         bool enabled = static_key_enabled(key);
210         bool branch = jump_entry_branch(entry);
211
212         /* See the comment in linux/jump_label.h */
213         return enabled ^ branch;
214 }
215
216 static void __jump_label_update(struct static_key *key,
217                                 struct jump_entry *entry,
218                                 struct jump_entry *stop)
219 {
220         for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) {
221                 /*
222                  * entry->code set to 0 invalidates module init text sections
223                  * kernel_text_address() verifies we are not in core kernel
224                  * init code, see jump_label_invalidate_module_init().
225                  */
226                 if (entry->code && kernel_text_address(entry->code))
227                         arch_jump_label_transform(entry, jump_label_type(entry));
228         }
229 }
230
231 void __init jump_label_init(void)
232 {
233         struct jump_entry *iter_start = __start___jump_table;
234         struct jump_entry *iter_stop = __stop___jump_table;
235         struct static_key *key = NULL;
236         struct jump_entry *iter;
237
238         if (static_key_initialized)
239                 return;
240
241         jump_label_lock();
242         jump_label_sort_entries(iter_start, iter_stop);
243
244         for (iter = iter_start; iter < iter_stop; iter++) {
245                 struct static_key *iterk;
246
247                 /* rewrite NOPs */
248                 if (jump_label_type(iter) == JUMP_LABEL_NOP)
249                         arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);
250
251                 iterk = jump_entry_key(iter);
252                 if (iterk == key)
253                         continue;
254
255                 key = iterk;
256                 /*
257                  * Set key->entries to iter, but preserve JUMP_LABEL_TRUE_BRANCH.
258                  */
259                 *((unsigned long *)&key->entries) += (unsigned long)iter;
260 #ifdef CONFIG_MODULES
261                 key->next = NULL;
262 #endif
263         }
264         static_key_initialized = true;
265         jump_label_unlock();
266 }
267
268 #ifdef CONFIG_MODULES
269
270 static enum jump_label_type jump_label_init_type(struct jump_entry *entry)
271 {
272         struct static_key *key = jump_entry_key(entry);
273         bool type = static_key_type(key);
274         bool branch = jump_entry_branch(entry);
275
276         /* See the comment in linux/jump_label.h */
277         return type ^ branch;
278 }
279
280 struct static_key_mod {
281         struct static_key_mod *next;
282         struct jump_entry *entries;
283         struct module *mod;
284 };
285
286 static int __jump_label_mod_text_reserved(void *start, void *end)
287 {
288         struct module *mod;
289
290         mod = __module_text_address((unsigned long)start);
291         if (!mod)
292                 return 0;
293
294         WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod);
295
296         return __jump_label_text_reserved(mod->jump_entries,
297                                 mod->jump_entries + mod->num_jump_entries,
298                                 start, end);
299 }
300
301 static void __jump_label_mod_update(struct static_key *key)
302 {
303         struct static_key_mod *mod;
304
305         for (mod = key->next; mod; mod = mod->next) {
306                 struct module *m = mod->mod;
307
308                 __jump_label_update(key, mod->entries,
309                                     m->jump_entries + m->num_jump_entries);
310         }
311 }
312
313 /***
314  * apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop()
315  * @mod: module to patch
316  *
317  * Allow for run-time selection of the optimal nops. Before the module
318  * loads patch these with arch_get_jump_label_nop(), which is specified by
319  * the arch specific jump label code.
320  */
321 void jump_label_apply_nops(struct module *mod)
322 {
323         struct jump_entry *iter_start = mod->jump_entries;
324         struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
325         struct jump_entry *iter;
326
327         /* if the module doesn't have jump label entries, just return */
328         if (iter_start == iter_stop)
329                 return;
330
331         for (iter = iter_start; iter < iter_stop; iter++) {
332                 /* Only write NOPs for arch_branch_static(). */
333                 if (jump_label_init_type(iter) == JUMP_LABEL_NOP)
334                         arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);
335         }
336 }
337
338 static int jump_label_add_module(struct module *mod)
339 {
340         struct jump_entry *iter_start = mod->jump_entries;
341         struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
342         struct jump_entry *iter;
343         struct static_key *key = NULL;
344         struct static_key_mod *jlm;
345
346         /* if the module doesn't have jump label entries, just return */
347         if (iter_start == iter_stop)
348                 return 0;
349
350         jump_label_sort_entries(iter_start, iter_stop);
351
352         for (iter = iter_start; iter < iter_stop; iter++) {
353                 struct static_key *iterk;
354
355                 iterk = jump_entry_key(iter);
356                 if (iterk == key)
357                         continue;
358
359                 key = iterk;
360                 if (within_module(iter->key, mod)) {
361                         /*
362                          * Set key->entries to iter, but preserve JUMP_LABEL_TRUE_BRANCH.
363                          */
364                         *((unsigned long *)&key->entries) += (unsigned long)iter;
365                         key->next = NULL;
366                         continue;
367                 }
368                 jlm = kzalloc(sizeof(struct static_key_mod), GFP_KERNEL);
369                 if (!jlm)
370                         return -ENOMEM;
371                 jlm->mod = mod;
372                 jlm->entries = iter;
373                 jlm->next = key->next;
374                 key->next = jlm;
375
376                 /* Only update if we've changed from our initial state */
377                 if (jump_label_type(iter) != jump_label_init_type(iter))
378                         __jump_label_update(key, iter, iter_stop);
379         }
380
381         return 0;
382 }
383
384 static void jump_label_del_module(struct module *mod)
385 {
386         struct jump_entry *iter_start = mod->jump_entries;
387         struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
388         struct jump_entry *iter;
389         struct static_key *key = NULL;
390         struct static_key_mod *jlm, **prev;
391
392         for (iter = iter_start; iter < iter_stop; iter++) {
393                 if (jump_entry_key(iter) == key)
394                         continue;
395
396                 key = jump_entry_key(iter);
397
398                 if (within_module(iter->key, mod))
399                         continue;
400
401                 prev = &key->next;
402                 jlm = key->next;
403
404                 while (jlm && jlm->mod != mod) {
405                         prev = &jlm->next;
406                         jlm = jlm->next;
407                 }
408
409                 if (jlm) {
410                         *prev = jlm->next;
411                         kfree(jlm);
412                 }
413         }
414 }
415
416 static void jump_label_invalidate_module_init(struct module *mod)
417 {
418         struct jump_entry *iter_start = mod->jump_entries;
419         struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
420         struct jump_entry *iter;
421
422         for (iter = iter_start; iter < iter_stop; iter++) {
423                 if (within_module_init(iter->code, mod))
424                         iter->code = 0;
425         }
426 }
427
428 static int
429 jump_label_module_notify(struct notifier_block *self, unsigned long val,
430                          void *data)
431 {
432         struct module *mod = data;
433         int ret = 0;
434
435         switch (val) {
436         case MODULE_STATE_COMING:
437                 jump_label_lock();
438                 ret = jump_label_add_module(mod);
439                 if (ret)
440                         jump_label_del_module(mod);
441                 jump_label_unlock();
442                 break;
443         case MODULE_STATE_GOING:
444                 jump_label_lock();
445                 jump_label_del_module(mod);
446                 jump_label_unlock();
447                 break;
448         case MODULE_STATE_LIVE:
449                 jump_label_lock();
450                 jump_label_invalidate_module_init(mod);
451                 jump_label_unlock();
452                 break;
453         }
454
455         return notifier_from_errno(ret);
456 }
457
458 static struct notifier_block jump_label_module_nb = {
459         .notifier_call = jump_label_module_notify,
460         .priority = 1, /* higher than tracepoints */
461 };
462
463 static __init int jump_label_init_module(void)
464 {
465         return register_module_notifier(&jump_label_module_nb);
466 }
467 early_initcall(jump_label_init_module);
468
469 #endif /* CONFIG_MODULES */
470
471 /***
472  * jump_label_text_reserved - check if addr range is reserved
473  * @start: start text addr
474  * @end: end text addr
475  *
476  * checks if the text addr located between @start and @end
477  * overlaps with any of the jump label patch addresses. Code
478  * that wants to modify kernel text should first verify that
479  * it does not overlap with any of the jump label addresses.
480  * Caller must hold jump_label_mutex.
481  *
482  * returns 1 if there is an overlap, 0 otherwise
483  */
484 int jump_label_text_reserved(void *start, void *end)
485 {
486         int ret = __jump_label_text_reserved(__start___jump_table,
487                         __stop___jump_table, start, end);
488
489         if (ret)
490                 return ret;
491
492 #ifdef CONFIG_MODULES
493         ret = __jump_label_mod_text_reserved(start, end);
494 #endif
495         return ret;
496 }
497
498 static void jump_label_update(struct static_key *key)
499 {
500         struct jump_entry *stop = __stop___jump_table;
501         struct jump_entry *entry = static_key_entries(key);
502 #ifdef CONFIG_MODULES
503         struct module *mod;
504
505         __jump_label_mod_update(key);
506
507         preempt_disable();
508         mod = __module_address((unsigned long)key);
509         if (mod)
510                 stop = mod->jump_entries + mod->num_jump_entries;
511         preempt_enable();
512 #endif
513         /* if there are no users, entry can be NULL */
514         if (entry)
515                 __jump_label_update(key, entry, stop);
516 }
517
518 #ifdef CONFIG_STATIC_KEYS_SELFTEST
519 static DEFINE_STATIC_KEY_TRUE(sk_true);
520 static DEFINE_STATIC_KEY_FALSE(sk_false);
521
522 static __init int jump_label_test(void)
523 {
524         int i;
525
526         for (i = 0; i < 2; i++) {
527                 WARN_ON(static_key_enabled(&sk_true.key) != true);
528                 WARN_ON(static_key_enabled(&sk_false.key) != false);
529
530                 WARN_ON(!static_branch_likely(&sk_true));
531                 WARN_ON(!static_branch_unlikely(&sk_true));
532                 WARN_ON(static_branch_likely(&sk_false));
533                 WARN_ON(static_branch_unlikely(&sk_false));
534
535                 static_branch_disable(&sk_true);
536                 static_branch_enable(&sk_false);
537
538                 WARN_ON(static_key_enabled(&sk_true.key) == true);
539                 WARN_ON(static_key_enabled(&sk_false.key) == false);
540
541                 WARN_ON(static_branch_likely(&sk_true));
542                 WARN_ON(static_branch_unlikely(&sk_true));
543                 WARN_ON(!static_branch_likely(&sk_false));
544                 WARN_ON(!static_branch_unlikely(&sk_false));
545
546                 static_branch_enable(&sk_true);
547                 static_branch_disable(&sk_false);
548         }
549
550         return 0;
551 }
552 late_initcall(jump_label_test);
553 #endif /* STATIC_KEYS_SELFTEST */
554
555 #endif /* HAVE_JUMP_LABEL */