2 * Copyright (C) 2007 Oracle. All rights reserved.
3 * Copyright (C) 2014 Fujitsu. All rights reserved.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public
7 * License v2 as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * You should have received a copy of the GNU General Public
15 * License along with this program; if not, write to the
16 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
17 * Boston, MA 021110-1307, USA.
20 #include <linux/kthread.h>
21 #include <linux/slab.h>
22 #include <linux/list.h>
23 #include <linux/spinlock.h>
24 #include <linux/freezer.h>
25 #include <linux/workqueue.h>
26 #include "async-thread.h"
28 #define WORK_DONE_BIT 0
29 #define WORK_ORDER_DONE_BIT 1
30 #define WORK_HIGH_PRIO_BIT 2
32 #define NO_THRESHOLD (-1)
33 #define DFT_THRESHOLD (32)
35 struct __btrfs_workqueue {
36 struct workqueue_struct *normal_wq;
37 /* List head pointing to ordered work list */
38 struct list_head ordered_list;
40 /* Spinlock for ordered_list */
43 /* Thresholding related variants */
49 spinlock_t thres_lock;
52 struct btrfs_workqueue {
53 struct __btrfs_workqueue *normal;
54 struct __btrfs_workqueue *high;
57 static inline struct __btrfs_workqueue
58 *__btrfs_alloc_workqueue(char *name, int flags, int max_active, int thresh)
60 struct __btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_NOFS);
65 ret->max_active = max_active;
66 atomic_set(&ret->pending, 0);
68 thresh = DFT_THRESHOLD;
69 /* For low threshold, disabling threshold is a better choice */
70 if (thresh < DFT_THRESHOLD) {
71 ret->current_max = max_active;
72 ret->thresh = NO_THRESHOLD;
78 if (flags & WQ_HIGHPRI)
79 ret->normal_wq = alloc_workqueue("%s-%s-high", flags,
83 ret->normal_wq = alloc_workqueue("%s-%s", flags,
84 ret->max_active, "btrfs",
86 if (unlikely(!ret->normal_wq)) {
91 INIT_LIST_HEAD(&ret->ordered_list);
92 spin_lock_init(&ret->list_lock);
93 spin_lock_init(&ret->thres_lock);
98 __btrfs_destroy_workqueue(struct __btrfs_workqueue *wq);
100 struct btrfs_workqueue *btrfs_alloc_workqueue(char *name,
105 struct btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_NOFS);
110 ret->normal = __btrfs_alloc_workqueue(name, flags & ~WQ_HIGHPRI,
112 if (unlikely(!ret->normal)) {
117 if (flags & WQ_HIGHPRI) {
118 ret->high = __btrfs_alloc_workqueue(name, flags, max_active,
120 if (unlikely(!ret->high)) {
121 __btrfs_destroy_workqueue(ret->normal);
130 * Hook for threshold which will be called in btrfs_queue_work.
131 * This hook WILL be called in IRQ handler context,
132 * so workqueue_set_max_active MUST NOT be called in this hook
134 static inline void thresh_queue_hook(struct __btrfs_workqueue *wq)
136 if (wq->thresh == NO_THRESHOLD)
138 atomic_inc(&wq->pending);
142 * Hook for threshold which will be called before executing the work,
143 * This hook is called in kthread content.
144 * So workqueue_set_max_active is called here.
146 static inline void thresh_exec_hook(struct __btrfs_workqueue *wq)
152 if (wq->thresh == NO_THRESHOLD)
155 atomic_dec(&wq->pending);
156 spin_lock(&wq->thres_lock);
158 * Use wq->count to limit the calling frequency of
159 * workqueue_set_max_active.
162 wq->count %= (wq->thresh / 4);
165 new_max_active = wq->current_max;
168 * pending may be changed later, but it's OK since we really
169 * don't need it so accurate to calculate new_max_active.
171 pending = atomic_read(&wq->pending);
172 if (pending > wq->thresh)
174 if (pending < wq->thresh / 2)
176 new_max_active = clamp_val(new_max_active, 1, wq->max_active);
177 if (new_max_active != wq->current_max) {
179 wq->current_max = new_max_active;
182 spin_unlock(&wq->thres_lock);
185 workqueue_set_max_active(wq->normal_wq, wq->current_max);
189 static void run_ordered_work(struct __btrfs_workqueue *wq)
191 struct list_head *list = &wq->ordered_list;
192 struct btrfs_work *work;
193 spinlock_t *lock = &wq->list_lock;
197 spin_lock_irqsave(lock, flags);
198 if (list_empty(list))
200 work = list_entry(list->next, struct btrfs_work,
202 if (!test_bit(WORK_DONE_BIT, &work->flags))
206 * we are going to call the ordered done function, but
207 * we leave the work item on the list as a barrier so
208 * that later work items that are done don't have their
209 * functions called before this one returns
211 if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags))
213 spin_unlock_irqrestore(lock, flags);
214 work->ordered_func(work);
216 /* now take the lock again and drop our item from the list */
217 spin_lock_irqsave(lock, flags);
218 list_del(&work->ordered_list);
219 spin_unlock_irqrestore(lock, flags);
222 * we don't want to call the ordered free functions
223 * with the lock held though
225 work->ordered_free(work);
227 spin_unlock_irqrestore(lock, flags);
230 static void normal_work_helper(struct work_struct *arg)
232 struct btrfs_work *work;
233 struct __btrfs_workqueue *wq;
236 work = container_of(arg, struct btrfs_work, normal_work);
238 * We should not touch things inside work in the following cases:
239 * 1) after work->func() if it has no ordered_free
240 * Since the struct is freed in work->func().
241 * 2) after setting WORK_DONE_BIT
242 * The work may be freed in other threads almost instantly.
243 * So we save the needed things here.
245 if (work->ordered_func)
249 thresh_exec_hook(wq);
252 set_bit(WORK_DONE_BIT, &work->flags);
253 run_ordered_work(wq);
257 void btrfs_init_work(struct btrfs_work *work,
258 void (*func)(struct btrfs_work *),
259 void (*ordered_func)(struct btrfs_work *),
260 void (*ordered_free)(struct btrfs_work *))
263 work->ordered_func = ordered_func;
264 work->ordered_free = ordered_free;
265 INIT_WORK(&work->normal_work, normal_work_helper);
266 INIT_LIST_HEAD(&work->ordered_list);
270 static inline void __btrfs_queue_work(struct __btrfs_workqueue *wq,
271 struct btrfs_work *work)
276 thresh_queue_hook(wq);
277 if (work->ordered_func) {
278 spin_lock_irqsave(&wq->list_lock, flags);
279 list_add_tail(&work->ordered_list, &wq->ordered_list);
280 spin_unlock_irqrestore(&wq->list_lock, flags);
282 queue_work(wq->normal_wq, &work->normal_work);
285 void btrfs_queue_work(struct btrfs_workqueue *wq,
286 struct btrfs_work *work)
288 struct __btrfs_workqueue *dest_wq;
290 if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags) && wq->high)
293 dest_wq = wq->normal;
294 __btrfs_queue_work(dest_wq, work);
298 __btrfs_destroy_workqueue(struct __btrfs_workqueue *wq)
300 destroy_workqueue(wq->normal_wq);
304 void btrfs_destroy_workqueue(struct btrfs_workqueue *wq)
309 __btrfs_destroy_workqueue(wq->high);
310 __btrfs_destroy_workqueue(wq->normal);
313 void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int max)
315 wq->normal->max_active = max;
317 wq->high->max_active = max;
320 void btrfs_set_work_high_priority(struct btrfs_work *work)
322 set_bit(WORK_HIGH_PRIO_BIT, &work->flags);