2 * Copyright (C) 2012 Pierre Habouzit <pierre.habouzit@intersec.com>
3 * Copyright (C) 2012 Intersec SAS
5 * This file implements the Linux Pthread Workqueue Regulator, and is part
8 * The Linux Kernel is free software: you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License version 2 as published by
10 * the Free Software Foundation.
12 * The Linux Kernel is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
15 * License for more details.
17 * You should have received a copy of the GNU General Public License version 2
18 * along with The Linux Kernel. If not, see <http://www.gnu.org/licenses/>.
21 #include <linux/cdev.h>
22 #include <linux/device.h>
23 #include <linux/file.h>
25 #include <linux/hash.h>
26 #include <linux/init.h>
27 #include <linux/kref.h>
28 #include <linux/module.h>
29 #include <linux/poll.h>
30 #include <linux/sched.h>
31 #include <linux/slab.h>
32 #include <linux/spinlock.h>
33 #include <linux/timer.h>
34 #include <linux/uaccess.h>
35 #include <linux/wait.h>
36 #include <linux/version.h>
39 * The pthread workqueue regulator code is for now written as a proof of
40 * concept module, meant to work with 2.6.23+ kernels or redhat5 ones.
42 * For now it uses a device /dev/pwq, which spawns magic file-descriptors
43 * supporting a few ioctl operations (see Documentation/pwqr.adoc shipped in
44 * the same git repository).
46 * This code is meant to be merged into mainline, but after the following
47 * changes, kept here as a "todolist":
49 * - get rid of the device stuff (which is 100% of the init code for 2.6.23
52 * - resubmit the patch that makes it possible to call
53 * preempt_notifier_unregister from sched_in/sched_out (just a matter of a
54 * hlist_for_each_safe instead of hlist_for_each), and fix
55 * pwqr_task_release to not require RCU anymore. It makes
56 * pwqr_preempt_noop_ops go away.
58 * - think about the possibility to add a pwq_notifier pointer directly into
59 * the task_struct, thought it's not *that* necessary, it grows the
60 * structure for a speed gain we don't really need (making pwqr_ctl
61 * faster). I think it's okay to crawl the preempt_notifier list instead.
62 * We may want to add nice "macros" for that though.
64 * - replace the ioctl with a pwqr_ctl syscall
66 * - create a pwqr_create() syscall to create a pwqr file-descriptor.
68 * Summary: most of the code should be untouched or almost not changed,
69 * pwqr_ioctl adapted to become a syscall, and the module boilerplate replaced
70 * with pwqr_create() and file-descriptor creation boilerplate instead. But
71 * looking at fs/eventfd.c this looks rather simple.
74 #ifndef CONFIG_PREEMPT_NOTIFIERS
75 # error PWQ module requires CONFIG_PREEMPT_NOTIFIERS
80 #define PWQR_UC_DELAY (HZ / 10)
81 #define PWQR_OC_DELAY (HZ / 20)
83 #define PWQR_STATE_NONE 0
84 #define PWQR_STATE_UC 1
85 #define PWQR_STATE_OC 2
86 #define PWQR_STATE_DEAD (-1)
89 * This is the first inclusion of CONFIG_PREEMPT_NOTIFIERS in the kernel.
91 * Though I want it to work on older redhat 5 kernels, that have an emulation
92 * of the feature but not implemented the same way, and instead of linking the
93 * preempt_notifiers from the task_struct directly, they have a private
94 * h-table I don't have access to, so I need my own too.
96 * For vanilla kernels we crawl through the task_struct::preempt_notifiers
97 * hlist until we find our entry, this list is often very short, and it's no
98 * slower than the global h-table which also crawls a list anyway.
100 #define IS_PRE_2_6_23 (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 23))
105 struct timer_list timer;
106 wait_queue_head_t wqh;
107 wait_queue_head_t wqh_poll;
109 unsigned concurrency;
115 unsigned overcommit_wakes;
122 struct preempt_notifier notifier;
126 struct hlist_node link;
127 struct task_struct *task;
133 #define PWQR_HASH_BITS 5
134 #define PWQR_HASH_SIZE (1 << PWQR_HASH_BITS)
136 struct pwqr_task_bucket {
138 struct hlist_head tasks;
141 static struct pwqr_task_bucket pwqr_tasks_hash[PWQR_HASH_SIZE];
147 static struct class *pwqr_class;
148 static int pwqr_major;
149 static struct preempt_ops pwqr_preempt_running_ops;
150 static struct preempt_ops pwqr_preempt_blocked_ops;
151 static struct preempt_ops pwqr_preempt_noop_ops;
153 /*****************************************************************************
157 #define pwqr_sb_lock_irqsave(sb, flags) \
158 spin_lock_irqsave(&(sb)->wqh.lock, flags)
159 #define pwqr_sb_unlock_irqrestore(sb, flags) \
160 spin_unlock_irqrestore(&(sb)->wqh.lock, flags)
162 static inline void pwqr_arm_timer(struct pwqr_sb *sb, int how, int delay)
164 if (timer_pending(&sb->timer) && sb->state == how)
166 mod_timer(&sb->timer, jiffies + delay);
170 static inline void __pwqr_sb_update_state(struct pwqr_sb *sb, int running_delta)
172 sb->running += running_delta;
174 if (sb->running < sb->concurrency && sb->waiting == 0 && sb->parked) {
175 pwqr_arm_timer(sb, PWQR_STATE_UC, PWQR_UC_DELAY);
176 } else if (sb->running > sb->concurrency) {
177 pwqr_arm_timer(sb, PWQR_STATE_OC, PWQR_OC_DELAY);
179 sb->state = PWQR_STATE_NONE;
180 if (!timer_pending(&sb->timer))
181 del_timer(&sb->timer);
185 static void pwqr_sb_timer_cb(unsigned long arg)
187 struct pwqr_sb *sb = (struct pwqr_sb *)arg;
190 pwqr_sb_lock_irqsave(sb, flags);
191 if (sb->running < sb->concurrency && sb->waiting == 0 && sb->parked) {
192 if (sb->overcommit_wakes == 0)
193 wake_up_locked(&sb->wqh);
195 if (sb->running > sb->concurrency) {
196 printk(KERN_DEBUG "wake up poll");
197 wake_up_poll(&sb->wqh_poll, POLLIN);
200 pwqr_sb_unlock_irqrestore(sb, flags);
203 static struct pwqr_sb *pwqr_sb_create(void)
207 sb = kzalloc(sizeof(struct pwqr_sb), GFP_KERNEL);
209 return ERR_PTR(-ENOMEM);
211 kref_init(&sb->kref);
212 init_waitqueue_head(&sb->wqh);
213 init_waitqueue_head(&sb->wqh_poll);
214 sb->concurrency = num_online_cpus();
215 init_timer(&sb->timer);
216 sb->timer.function = pwqr_sb_timer_cb;
217 sb->timer.data = (unsigned long)sb;
219 __module_get(THIS_MODULE);
222 static inline void pwqr_sb_get(struct pwqr_sb *sb)
227 static void pwqr_sb_finalize(struct rcu_head *rcu)
229 struct pwqr_sb *sb = container_of(rcu, struct pwqr_sb, rcu);
231 module_put(THIS_MODULE);
235 static void pwqr_sb_release(struct kref *kref)
237 struct pwqr_sb *sb = container_of(kref, struct pwqr_sb, kref);
239 del_timer_sync(&sb->timer);
240 wake_up_poll(&sb->wqh_poll, POLLHUP);
241 call_rcu(&sb->rcu, pwqr_sb_finalize);
243 static inline void pwqr_sb_put(struct pwqr_sb *sb)
245 kref_put(&sb->kref, pwqr_sb_release);
248 /*****************************************************************************
252 static inline struct pwqr_task_bucket *task_hbucket(struct task_struct *task)
254 return &pwqr_tasks_hash[hash_ptr(task, PWQR_HASH_BITS)];
257 static struct pwqr_task *pwqr_task_find(struct task_struct *task)
259 struct pwqr_task_bucket *b = task_hbucket(task);
260 struct hlist_node *node;
261 struct pwqr_task *pwqt = NULL;
264 hlist_for_each_entry(pwqt, node, &b->tasks, link) {
265 if (pwqt->task == task)
268 spin_unlock(&b->lock);
272 static struct pwqr_task *pwqr_task_find(struct task_struct *task)
274 struct hlist_node *node;
275 struct preempt_notifier *it;
276 struct pwqr_task *pwqt = NULL;
278 hlist_for_each_entry(it, node, &task->preempt_notifiers, link) {
279 if (it->ops == &pwqr_preempt_running_ops ||
280 it->ops == &pwqr_preempt_blocked_ops ||
281 it->ops == &pwqr_preempt_noop_ops)
283 pwqt = container_of(it, struct pwqr_task, notifier);
292 static struct pwqr_task *pwqr_task_create(struct task_struct *task)
294 struct pwqr_task *pwqt;
296 pwqt = kmalloc(sizeof(*pwqt), GFP_KERNEL);
298 return ERR_PTR(-ENOMEM);
300 preempt_notifier_init(&pwqt->notifier, &pwqr_preempt_running_ops);
301 preempt_notifier_register(&pwqt->notifier);
304 struct pwqr_task_bucket *b = task_hbucket(task);
308 hlist_add_head(&pwqt->link, &b->tasks);
309 spin_unlock(&b->lock);
316 static void pwqr_task_detach(struct pwqr_task *pwqt, struct pwqr_sb *sb)
320 pwqr_sb_lock_irqsave(sb, flags);
322 if (pwqt->notifier.ops == &pwqr_preempt_running_ops) {
323 __pwqr_sb_update_state(sb, -1);
325 __pwqr_sb_update_state(sb, 0);
327 pwqr_sb_unlock_irqrestore(sb, flags);
333 static void pwqr_task_attach(struct pwqr_task *pwqt, struct pwqr_sb *sb)
337 pwqr_sb_lock_irqsave(sb, flags);
338 pwqr_sb_get(pwqt->sb = sb);
340 __pwqr_sb_update_state(sb, 1);
341 pwqr_sb_unlock_irqrestore(sb, flags);
345 static void pwqr_task_release(struct pwqr_task *pwqt, bool from_notifier)
348 struct pwqr_task_bucket *b = task_hbucket(pwqt->task);
351 hlist_del(&pwqt->link);
352 spin_unlock(&b->lock);
354 pwqt->notifier.ops = &pwqr_preempt_noop_ops;
357 /* When called from sched_{out,in}, it's not allowed to
358 * call preempt_notifier_unregister (or worse kfree())
360 * Though it's not a good idea to kfree() still registered
361 * callbacks if we're not dying, it'll panic on the next
362 * sched_{in,out} call.
364 BUG_ON(!(current->state & TASK_DEAD));
365 kfree_rcu(pwqt, rcu);
367 preempt_notifier_unregister(&pwqt->notifier);
372 static void pwqr_task_noop_sched_in(struct preempt_notifier *notifier, int cpu)
376 static void pwqr_task_noop_sched_out(struct preempt_notifier *notifier,
377 struct task_struct *next)
381 static void pwqr_task_blocked_sched_in(struct preempt_notifier *notifier, int cpu)
383 struct pwqr_task *pwqt = container_of(notifier, struct pwqr_task, notifier);
384 struct pwqr_sb *sb = pwqt->sb;
387 if (unlikely(sb->state < 0)) {
388 pwqr_task_detach(pwqt, sb);
389 pwqr_task_release(pwqt, true);
393 pwqt->notifier.ops = &pwqr_preempt_running_ops;
394 pwqr_sb_lock_irqsave(sb, flags);
395 __pwqr_sb_update_state(sb, 1);
396 pwqr_sb_unlock_irqrestore(sb, flags);
399 static void pwqr_task_sched_out(struct preempt_notifier *notifier,
400 struct task_struct *next)
402 struct pwqr_task *pwqt = container_of(notifier, struct pwqr_task, notifier);
403 struct pwqr_sb *sb = pwqt->sb;
404 struct task_struct *p = current;
406 if (unlikely(p->state & TASK_DEAD) || unlikely(sb->state < 0)) {
407 pwqr_task_detach(pwqt, sb);
408 pwqr_task_release(pwqt, true);
411 if (p->state == 0 || (p->state & (__TASK_STOPPED | __TASK_TRACED)))
414 pwqt->notifier.ops = &pwqr_preempt_blocked_ops;
415 /* see preempt.h: irq are disabled for sched_out */
416 spin_lock(&sb->wqh.lock);
417 __pwqr_sb_update_state(sb, -1);
418 spin_unlock(&sb->wqh.lock);
421 static struct preempt_ops __read_mostly pwqr_preempt_noop_ops = {
422 .sched_in = pwqr_task_noop_sched_in,
423 .sched_out = pwqr_task_noop_sched_out,
426 static struct preempt_ops __read_mostly pwqr_preempt_running_ops = {
427 .sched_in = pwqr_task_noop_sched_in,
428 .sched_out = pwqr_task_sched_out,
431 static struct preempt_ops __read_mostly pwqr_preempt_blocked_ops = {
432 .sched_in = pwqr_task_blocked_sched_in,
433 .sched_out = pwqr_task_sched_out,
436 /*****************************************************************************
439 static int pwqr_open(struct inode *inode, struct file *filp)
443 sb = pwqr_sb_create();
446 filp->private_data = sb;
450 static int pwqr_release(struct inode *inode, struct file *filp)
452 struct pwqr_sb *sb = filp->private_data;
455 pwqr_sb_lock_irqsave(sb, flags);
456 sb->state = PWQR_STATE_DEAD;
457 pwqr_sb_unlock_irqrestore(sb, flags);
458 wake_up_all(&sb->wqh);
463 static unsigned int pwqr_poll(struct file *filp, poll_table *wait)
465 struct pwqr_sb *sb = filp->private_data;
466 unsigned int events = 0;
469 poll_wait(filp, &sb->wqh_poll, wait);
471 pwqr_sb_lock_irqsave(sb, flags);
476 pwqr_sb_unlock_irqrestore(sb, flags);
481 static inline ssize_t pwqr_sb_read(struct pwqr_sb *sb, int no_wait, u32 *cnt)
483 DECLARE_WAITQUEUE(wait, current);
484 ssize_t rc = -EAGAIN;
486 spin_lock_irq(&sb->wqh.lock);
487 if (sb->running > sb->concurrency) {
489 } else if (!no_wait) {
490 add_wait_queue(&sb->wqh_poll, &wait);
492 set_current_state(TASK_INTERRUPTIBLE);
493 if (sb->running > sb->concurrency) {
497 if (signal_pending(current)) {
501 spin_unlock_irq(&sb->wqh.lock);
503 spin_lock_irq(&sb->wqh.lock);
505 remove_wait_queue(&sb->wqh_poll, &wait);
506 __set_current_state(TASK_RUNNING);
508 if (likely(rc == 0)) {
509 *cnt = sb->running - sb->concurrency;
512 spin_unlock_irq(&sb->wqh.lock);
518 pwqr_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos)
520 struct pwqr_sb *sb = filp->private_data;
524 if (count < sizeof(cnt))
526 rc = pwqr_sb_read(sb, filp->f_flags & O_NONBLOCK, &cnt);
529 return put_user(cnt, (u32 __user *)buf) ? -EFAULT : sizeof(cnt);
533 do_pwqr_wait(struct pwqr_sb *sb, struct pwqr_task *pwqt,
534 int is_wait, struct pwqr_ioc_wait __user *arg)
537 struct pwqr_ioc_wait wait;
541 preempt_notifier_unregister(&pwqt->notifier);
544 if (copy_from_user(&wait, arg, sizeof(wait))) {
548 if (unlikely((long)wait.pwqr_uaddr % sizeof(int) != 0)) {
554 pwqr_sb_lock_irqsave(sb, flags);
555 if (sb->running + sb->waiting <= sb->concurrency) {
557 while (probe_kernel_address(wait.pwqr_uaddr, uval)) {
558 pwqr_sb_unlock_irqrestore(sb, flags);
559 rc = get_user(uval, (u32 *)wait.pwqr_uaddr);
562 pwqr_sb_lock_irqsave(sb, flags);
565 if (uval != (u32)wait.pwqr_ticket) {
574 /* @ see <wait_event_interruptible_exclusive_locked_irq> */
575 if (likely(sb->state >= 0)) {
577 __wait.flags |= WQ_FLAG_EXCLUSIVE;
581 __add_wait_queue(&sb->wqh, &__wait);
584 __add_wait_queue_tail(&sb->wqh, &__wait);
586 __pwqr_sb_update_state(sb, -1);
589 set_current_state(TASK_INTERRUPTIBLE);
590 if (sb->overcommit_wakes)
592 if (signal_pending(current)) {
596 spin_unlock_irq(&sb->wqh.lock);
598 spin_lock_irq(&sb->wqh.lock);
601 if (sb->running + sb->waiting < sb->concurrency)
603 } while (likely(sb->state >= 0));
605 __remove_wait_queue(&sb->wqh, &__wait);
606 __set_current_state(TASK_RUNNING);
612 __pwqr_sb_update_state(sb, 1);
614 if (sb->overcommit_wakes)
615 sb->overcommit_wakes--;
616 if (sb->waiting + sb->running > sb->concurrency)
621 if (unlikely(sb->state < 0))
623 pwqr_sb_unlock_irqrestore(sb, flags);
625 preempt_notifier_register(&pwqt->notifier);
629 static long do_pwqr_unregister(struct pwqr_sb *sb, struct pwqr_task *pwqt)
635 pwqr_task_detach(pwqt, sb);
636 pwqr_task_release(pwqt, false);
640 static long do_pwqr_set_conc(struct pwqr_sb *sb, int conc)
642 long old_conc = sb->concurrency;
645 pwqr_sb_lock_irqsave(sb, flags);
647 conc = num_online_cpus();
648 if (conc != old_conc) {
649 sb->concurrency = conc;
650 __pwqr_sb_update_state(sb, 0);
652 pwqr_sb_unlock_irqrestore(sb, flags);
657 static long do_pwqr_wake(struct pwqr_sb *sb, int oc, int count)
665 pwqr_sb_lock_irqsave(sb, flags);
668 nwake = sb->waiting + sb->parked - sb->overcommit_wakes;
674 sb->overcommit_wakes += count;
675 } else if (sb->running + sb->overcommit_wakes < sb->concurrency) {
676 nwake = sb->concurrency - sb->overcommit_wakes - sb->running;
677 if (nwake > sb->waiting + sb->parked - sb->overcommit_wakes) {
678 nwake = sb->waiting + sb->parked -
679 sb->overcommit_wakes;
688 * This codepath deserves an explanation: waking the thread
689 * "for real" would overcommit, though userspace KNOWS there
690 * is at least one waiting thread. Such threads are threads
691 * that are "quarantined".
693 * Quarantined threads are woken up one by one, to allow a
694 * slow ramp down, trying to minimize "waiting" <-> "parked"
695 * flip-flops, no matter how many wakes have been asked.
697 * Since releasing one quarantined thread will wake up a
698 * thread that will (almost) straight go to parked mode, lie
699 * to userland about the fact that we unblocked that thread,
702 * Though if we're already waking all waiting threads for
703 * overcommitting jobs, well, we don't need that.
706 nwake = sb->waiting > sb->overcommit_wakes;
709 wake_up_locked(&sb->wqh);
710 pwqr_sb_unlock_irqrestore(sb, flags);
715 static long pwqr_ioctl(struct file *filp, unsigned command, unsigned long arg)
717 struct pwqr_sb *sb = filp->private_data;
718 struct task_struct *task = current;
719 struct pwqr_task *pwqt;
723 case PWQR_CTL_GET_CONC:
724 return sb->concurrency;
725 case PWQR_CTL_SET_CONC:
726 return do_pwqr_set_conc(sb, (int)arg);
729 case PWQR_CTL_WAKE_OC:
730 return do_pwqr_wake(sb, command == PWQR_CTL_WAKE_OC, (int)arg);
734 case PWQR_CTL_REGISTER:
735 case PWQR_CTL_UNREGISTER:
741 pwqt = pwqr_task_find(task);
742 if (command == PWQR_CTL_UNREGISTER)
743 return do_pwqr_unregister(sb, pwqt);
746 pwqt = pwqr_task_create(task);
748 return PTR_ERR(pwqt);
749 pwqr_task_attach(pwqt, sb);
750 } else if (unlikely(pwqt->sb != sb)) {
751 pwqr_task_detach(pwqt, pwqt->sb);
752 pwqr_task_attach(pwqt, sb);
757 rc = do_pwqr_wait(sb, pwqt, true, (struct pwqr_ioc_wait __user *)arg);
760 rc = do_pwqr_wait(sb, pwqt, false, NULL);
766 static const struct file_operations pwqr_dev_fops = {
767 .owner = THIS_MODULE,
769 .release = pwqr_release,
772 .llseek = noop_llseek,
773 .unlocked_ioctl = pwqr_ioctl,
775 .compat_ioctl = pwqr_ioctl,
779 /*****************************************************************************
782 static int __init pwqr_start(void)
787 for (i = 0; i < PWQR_HASH_SIZE; i++) {
788 spin_lock_init(&pwqr_tasks_hash[i].lock);
789 INIT_HLIST_HEAD(&pwqr_tasks_hash[i].tasks);
793 /* Register as a character device */
794 pwqr_major = register_chrdev(0, "pwqr", &pwqr_dev_fops);
795 if (pwqr_major < 0) {
796 printk(KERN_ERR "pwqr: register_chrdev() failed\n");
800 /* Create a device node */
801 pwqr_class = class_create(THIS_MODULE, PWQR_DEVICE_NAME);
802 if (IS_ERR(pwqr_class)) {
803 printk(KERN_ERR "pwqr: Error creating raw class\n");
804 unregister_chrdev(pwqr_major, PWQR_DEVICE_NAME);
805 return PTR_ERR(pwqr_class);
807 device_create(pwqr_class, NULL, MKDEV(pwqr_major, 0), NULL, PWQR_DEVICE_NAME);
808 printk(KERN_INFO "pwqr: PThreads Work Queues Regulator v1 loaded");
812 static void __exit pwqr_end(void)
815 device_destroy(pwqr_class, MKDEV(pwqr_major, 0));
816 class_destroy(pwqr_class);
817 unregister_chrdev(pwqr_major, PWQR_DEVICE_NAME);
820 module_init(pwqr_start);
821 module_exit(pwqr_end);
823 MODULE_LICENSE("GPL");
824 MODULE_AUTHOR("Pierre Habouzit <pierre.habouzit@intersec.com>");
825 MODULE_DESCRIPTION("PThreads Work Queues Regulator");
828 // vim:noet:sw=8:cinoptions+=\:0,L-1,=1s: