0d6b42ca187004e797fd1f395908daa5289656d3
[~madcoder/pwqr.git] / kernel / pwqr.c
1 /*
2  * Copyright (C) 2012   Pierre Habouzit <pierre.habouzit@intersec.com>
3  * Copyright (C) 2012   Intersec SAS
4  *
5  * This file implements the Linux Pthread Workqueue Regulator, and is part
6  * of the linux kernel.
7  *
8  * The Linux Kernel is free software: you can redistribute it and/or modify it
9  * under the terms of the GNU General Public License version 2 as published by
10  * the Free Software Foundation.
11  *
12  * The Linux Kernel is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14  * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
15  * License for more details.
16  *
17  * You should have received a copy of the GNU General Public License version 2
18  * along with The Linux Kernel.  If not, see <http://www.gnu.org/licenses/>.
19  */
20
21 #include <linux/cdev.h>
22 #include <linux/device.h>
23 #include <linux/file.h>
24 #include <linux/fs.h>
25 #include <linux/hash.h>
26 #include <linux/init.h>
27 #include <linux/kref.h>
28 #include <linux/module.h>
29 #include <linux/poll.h>
30 #include <linux/sched.h>
31 #include <linux/slab.h>
32 #include <linux/spinlock.h>
33 #include <linux/timer.h>
34 #include <linux/uaccess.h>
35 #include <linux/wait.h>
36
37 #ifndef CONFIG_PREEMPT_NOTIFIERS
38 #  error PWQ module requires CONFIG_PREEMPT_NOTIFIERS
39 #endif
40
41 #include "pwqr.h"
42
43 #define PWQR_HASH_BITS          5
44 #define PWQR_HASH_SIZE          (1 << PWQR_HASH_BITS)
45
46 #define PWQR_UC_DELAY           (HZ / 10)
47 #define PWQR_OC_DELAY           (HZ / 20)
48
49 #define PWQR_STATE_NONE         0
50 #define PWQR_STATE_UC           1
51 #define PWQR_STATE_OC           2
52 #define PWQR_STATE_DEAD         (-1)
53
54 struct pwqr_task_bucket {
55         spinlock_t              lock;
56         struct hlist_head       tasks;
57 };
58
59 struct pwqr_sb {
60         struct kref             kref;
61         struct rcu_head         rcu;
62         struct timer_list       timer;
63         wait_queue_head_t       wqh;
64         wait_queue_head_t       wqh_poll;
65
66         unsigned                concurrency;
67         unsigned                registered;
68
69         unsigned                running;
70         unsigned                waiting;
71         unsigned                parked;
72         unsigned                overcommit_wakes;
73
74         int                     state;
75 };
76
77 struct pwqr_task {
78         struct preempt_notifier notifier;
79         struct hlist_node       link;
80         struct rcu_head         rcu;
81         struct task_struct     *task;
82         struct pwqr_sb         *sb;
83 };
84
85 /*
86  * Global variables
87  */
88 static struct class            *pwqr_class;
89 static int                      pwqr_major;
90 static struct pwqr_task_bucket  pwqr_tasks_hash[PWQR_HASH_SIZE];
91 static struct preempt_ops       pwqr_preempt_running_ops;
92 static struct preempt_ops       pwqr_preempt_blocked_ops;
93 static struct preempt_ops       pwqr_preempt_noop_ops;
94
95 /*****************************************************************************
96  * Scoreboards
97  */
98
99 #define pwqr_sb_lock_irqsave(sb, flags) \
100         spin_lock_irqsave(&(sb)->wqh.lock, flags)
101 #define pwqr_sb_unlock_irqrestore(sb, flags) \
102         spin_unlock_irqrestore(&(sb)->wqh.lock, flags)
103
104 static inline void pwqr_arm_timer(struct pwqr_sb *sb, int how, int delay)
105 {
106         if (timer_pending(&sb->timer) && sb->state == how)
107                 return;
108         mod_timer(&sb->timer, jiffies + delay);
109         sb->state = how;
110 }
111
112 static inline void __pwqr_sb_update_state(struct pwqr_sb *sb, int running_delta)
113 {
114         sb->running += running_delta;
115
116         if (sb->running < sb->concurrency && sb->waiting == 0 && sb->parked) {
117                 pwqr_arm_timer(sb, PWQR_STATE_UC, PWQR_UC_DELAY);
118         } else if (sb->running > sb->concurrency) {
119                 pwqr_arm_timer(sb, PWQR_STATE_OC, PWQR_OC_DELAY);
120         } else {
121                 sb->state = PWQR_STATE_NONE;
122                 if (!timer_pending(&sb->timer))
123                         del_timer(&sb->timer);
124         }
125 }
126
127 static void pwqr_sb_timer_cb(unsigned long arg)
128 {
129         struct pwqr_sb *sb = (struct pwqr_sb *)arg;
130         unsigned long flags;
131
132         pwqr_sb_lock_irqsave(sb, flags);
133         if (sb->running < sb->concurrency && sb->waiting == 0 && sb->parked) {
134                 if (sb->overcommit_wakes == 0)
135                         wake_up_locked(&sb->wqh);
136         }
137         if (sb->running > sb->concurrency) {
138                 wake_up_poll(&sb->wqh_poll, POLLIN);
139         }
140         pwqr_sb_unlock_irqrestore(sb, flags);
141 }
142
143 static struct pwqr_sb *pwqr_sb_create(void)
144 {
145         struct pwqr_sb *sb;
146
147         sb = kzalloc(sizeof(struct pwqr_sb), GFP_KERNEL);
148         if (sb == NULL)
149                 return ERR_PTR(-ENOMEM);
150
151         kref_init(&sb->kref);
152         init_waitqueue_head(&sb->wqh);
153         sb->concurrency    = num_online_cpus();
154         init_timer(&sb->timer);
155         sb->timer.function = pwqr_sb_timer_cb;
156         sb->timer.data     = (unsigned long)sb;
157
158         __module_get(THIS_MODULE);
159         return sb;
160 }
161 static inline void pwqr_sb_get(struct pwqr_sb *sb)
162 {
163         kref_get(&sb->kref);
164 }
165
166 static void pwqr_sb_finalize(struct rcu_head *rcu)
167 {
168         struct pwqr_sb *sb = container_of(rcu, struct pwqr_sb, rcu);
169
170         module_put(THIS_MODULE);
171         kfree(sb);
172 }
173
174 static void pwqr_sb_release(struct kref *kref)
175 {
176         struct pwqr_sb *sb = container_of(kref, struct pwqr_sb, kref);
177
178         del_timer_sync(&sb->timer);
179         wake_up_poll(&sb->wqh_poll, POLLHUP);
180         call_rcu(&sb->rcu, pwqr_sb_finalize);
181 }
182 static inline void pwqr_sb_put(struct pwqr_sb *sb)
183 {
184         kref_put(&sb->kref, pwqr_sb_release);
185 }
186
187 /*****************************************************************************
188  * tasks
189  */
190 static inline struct pwqr_task_bucket *task_hbucket(struct task_struct *task)
191 {
192         return &pwqr_tasks_hash[hash_ptr(task, PWQR_HASH_BITS)];
193 }
194
195 static struct pwqr_task *pwqr_task_find(struct task_struct *task)
196 {
197         struct pwqr_task_bucket *b = task_hbucket(task);
198         struct hlist_node *node;
199         struct pwqr_task *pwqt = NULL;
200
201         spin_lock(&b->lock);
202         hlist_for_each_entry(pwqt, node, &b->tasks, link) {
203                 if (pwqt->task == task)
204                         break;
205         }
206         spin_unlock(&b->lock);
207         return pwqt;
208 }
209
210 static struct pwqr_task *pwqr_task_create(struct task_struct *task)
211 {
212         struct pwqr_task_bucket *b = task_hbucket(task);
213         struct pwqr_task *pwqt;
214
215         pwqt = kmalloc(sizeof(*pwqt), GFP_KERNEL);
216         if (pwqt == NULL)
217                 return ERR_PTR(-ENOMEM);
218
219         preempt_notifier_init(&pwqt->notifier, &pwqr_preempt_running_ops);
220         preempt_notifier_register(&pwqt->notifier);
221         pwqt->task = task;
222
223         spin_lock(&b->lock);
224         hlist_add_head(&pwqt->link, &b->tasks);
225         spin_unlock(&b->lock);
226
227         return pwqt;
228 }
229
230 __cold
231 static void pwqr_task_detach(struct pwqr_task *pwqt, struct pwqr_sb *sb)
232 {
233         unsigned long flags;
234
235         pwqr_sb_lock_irqsave(sb, flags);
236         sb->registered--;
237         if (pwqt->notifier.ops == &pwqr_preempt_running_ops) {
238                 __pwqr_sb_update_state(sb, -1);
239         } else {
240                 __pwqr_sb_update_state(sb, 0);
241         }
242         pwqr_sb_unlock_irqrestore(sb, flags);
243         pwqr_sb_put(sb);
244         pwqt->sb = NULL;
245 }
246
247 __cold
248 static void pwqr_task_attach(struct pwqr_task *pwqt, struct pwqr_sb *sb)
249 {
250         unsigned long flags;
251
252         pwqr_sb_lock_irqsave(sb, flags);
253         pwqr_sb_get(pwqt->sb = sb);
254         sb->registered++;
255         __pwqr_sb_update_state(sb, 1);
256         pwqr_sb_unlock_irqrestore(sb, flags);
257 }
258
259 __cold
260 static void pwqr_task_release(struct pwqr_task *pwqt, bool from_notifier)
261 {
262         struct pwqr_task_bucket *b = task_hbucket(pwqt->task);
263
264         spin_lock(&b->lock);
265         hlist_del(&pwqt->link);
266         spin_unlock(&b->lock);
267         pwqt->notifier.ops = &pwqr_preempt_noop_ops;
268
269         if (from_notifier) {
270                 /* When called from sched_{out,in}, it's not allowed to
271                  * call preempt_notifier_unregister (or worse kfree())
272                  *
273                  * Though it's not a good idea to kfree() still registered
274                  * callbacks if we're not dying, it'll panic on the next
275                  * sched_{in,out} call.
276                  */
277                 BUG_ON(!(pwqt->task->state & TASK_DEAD));
278                 kfree_rcu(pwqt, rcu);
279         } else {
280                 preempt_notifier_unregister(&pwqt->notifier);
281                 kfree(pwqt);
282         }
283 }
284
285 static void pwqr_task_noop_sched_in(struct preempt_notifier *notifier, int cpu)
286 {
287 }
288
289 static void pwqr_task_noop_sched_out(struct preempt_notifier *notifier,
290                                     struct task_struct *next)
291 {
292 }
293
294 static void pwqr_task_blocked_sched_in(struct preempt_notifier *notifier, int cpu)
295 {
296         struct pwqr_task *pwqt = container_of(notifier, struct pwqr_task, notifier);
297         struct pwqr_sb   *sb   = pwqt->sb;
298         unsigned long flags;
299
300         if (unlikely(sb->state < 0)) {
301                 pwqr_task_detach(pwqt, sb);
302                 pwqr_task_release(pwqt, true);
303                 return;
304         }
305
306         pwqt->notifier.ops = &pwqr_preempt_running_ops;
307         pwqr_sb_lock_irqsave(sb, flags);
308         __pwqr_sb_update_state(sb, 1);
309         pwqr_sb_unlock_irqrestore(sb, flags);
310 }
311
312 static void pwqr_task_sched_out(struct preempt_notifier *notifier,
313                                struct task_struct *next)
314 {
315         struct pwqr_task   *pwqt = container_of(notifier, struct pwqr_task, notifier);
316         struct pwqr_sb     *sb   = pwqt->sb;
317         struct task_struct *p    = pwqt->task;
318
319         if (unlikely(p->state & TASK_DEAD) || unlikely(sb->state < 0)) {
320                 pwqr_task_detach(pwqt, sb);
321                 pwqr_task_release(pwqt, true);
322                 return;
323         }
324         if (p->state == 0 || (p->state & (__TASK_STOPPED | __TASK_TRACED)))
325                 return;
326
327         pwqt->notifier.ops = &pwqr_preempt_blocked_ops;
328         /* see preempt.h: irq are disabled for sched_out */
329         spin_lock(&sb->wqh.lock);
330         __pwqr_sb_update_state(sb, -1);
331         spin_unlock(&sb->wqh.lock);
332 }
333
334 static struct preempt_ops __read_mostly pwqr_preempt_noop_ops = {
335         .sched_in       = pwqr_task_noop_sched_in,
336         .sched_out      = pwqr_task_noop_sched_out,
337 };
338
339 static struct preempt_ops __read_mostly pwqr_preempt_running_ops = {
340         .sched_in       = pwqr_task_noop_sched_in,
341         .sched_out      = pwqr_task_sched_out,
342 };
343
344 static struct preempt_ops __read_mostly pwqr_preempt_blocked_ops = {
345         .sched_in       = pwqr_task_blocked_sched_in,
346         .sched_out      = pwqr_task_sched_out,
347 };
348
349 /*****************************************************************************
350  * file descriptor
351  */
352 static int pwqr_open(struct inode *inode, struct file *filp)
353 {
354         struct pwqr_sb *sb;
355
356         sb = pwqr_sb_create();
357         if (IS_ERR(sb))
358                 return PTR_ERR(sb);
359         filp->private_data = sb;
360         return 0;
361 }
362
363 static int pwqr_release(struct inode *inode, struct file *filp)
364 {
365         struct pwqr_sb *sb = filp->private_data;
366         unsigned long flags;
367
368         pwqr_sb_lock_irqsave(sb, flags);
369         sb->state = PWQR_STATE_DEAD;
370         pwqr_sb_unlock_irqrestore(sb, flags);
371         wake_up_all(&sb->wqh);
372         pwqr_sb_put(sb);
373         return 0;
374 }
375
376 static unsigned int pwqr_poll(struct file *filp, poll_table *wait)
377 {
378         struct pwqr_sb *sb = filp->private_data;
379         unsigned int events = 0;
380         unsigned long flags;
381
382         poll_wait(filp, &sb->wqh_poll, wait);
383
384         pwqr_sb_lock_irqsave(sb, flags);
385         if (sb->running > sb->concurrency)
386                 events |= POLLIN;
387         if (sb->state < 0)
388                 events |= POLLHUP;
389         pwqr_sb_unlock_irqrestore(sb, flags);
390
391         return events;
392 }
393
394 static inline ssize_t pwqr_sb_read(struct pwqr_sb *sb, int no_wait, u32 *cnt)
395 {
396         DECLARE_WAITQUEUE(wait, current);
397         ssize_t rc = -EAGAIN;
398
399         spin_lock_irq(&sb->wqh.lock);
400         if (sb->running > sb->concurrency) {
401                 rc = 0;
402         } else if (!no_wait) {
403                 add_wait_queue(&sb->wqh_poll, &wait);
404                 for (;;) {
405                         set_current_state(TASK_INTERRUPTIBLE);
406                         if (sb->running > sb->concurrency) {
407                                 rc = 0;
408                                 break;
409                         }
410                         if (signal_pending(current)) {
411                                 rc = -ERESTARTSYS;
412                                 break;
413                         }
414                         spin_unlock_irq(&sb->wqh.lock);
415                         schedule();
416                         spin_lock_irq(&sb->wqh.lock);
417                 }
418                 remove_wait_queue(&sb->wqh_poll, &wait);
419                 __set_current_state(TASK_RUNNING);
420         }
421         if (likely(rc == 0))
422                 *cnt = sb->running - sb->concurrency;
423         spin_unlock_irq(&sb->wqh.lock);
424
425         return rc;
426 }
427
428 static ssize_t
429 pwqr_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos)
430 {
431         struct pwqr_sb *sb = filp->private_data;
432         u32 cnt = 0;
433         ssize_t rc;
434
435         if (count < sizeof(cnt))
436                 return -EINVAL;
437         rc = pwqr_sb_read(sb, filp->f_flags & O_NONBLOCK, &cnt);
438         if (rc < 0)
439                 return rc;
440         return put_user(cnt, (u32 __user *)buf) ? -EFAULT : sizeof(cnt);
441 }
442
443 static long
444 do_pwqr_wait(struct pwqr_sb *sb, struct pwqr_task *pwqt,
445              int is_wait, struct pwqr_ioc_wait __user *arg)
446 {
447         unsigned long flags;
448         struct pwqr_ioc_wait wait;
449         long rc = 0;
450         u32 uval;
451
452         preempt_notifier_unregister(&pwqt->notifier);
453
454         if (is_wait) {
455                 if (copy_from_user(&wait, arg, sizeof(wait))) {
456                         rc = -EFAULT;
457                         goto out;
458                 }
459                 if (unlikely((long)wait.pwqr_uaddr % sizeof(int) != 0)) {
460                         rc = -EINVAL;
461                         goto out;
462                 }
463         }
464
465         pwqr_sb_lock_irqsave(sb, flags);
466         if (sb->running + sb->waiting <= sb->concurrency) {
467                 if (is_wait) {
468                         while (probe_kernel_address(wait.pwqr_uaddr, uval)) {
469                                 pwqr_sb_unlock_irqrestore(sb, flags);
470                                 rc = get_user(uval, (u32 *)wait.pwqr_uaddr);
471                                 if (rc)
472                                         goto out;
473                                 pwqr_sb_lock_irqsave(sb, flags);
474                         }
475
476                         if (uval != (u32)wait.pwqr_ticket) {
477                                 rc = -EWOULDBLOCK;
478                                 goto out_unlock;
479                         }
480                 } else {
481                         goto out_unlock;
482                 }
483         }
484
485         /* @ see <wait_event_interruptible_exclusive_locked_irq> */
486         if (likely(sb->state >= 0)) {
487                 DEFINE_WAIT(__wait);
488                 __wait.flags |= WQ_FLAG_EXCLUSIVE;
489
490                 if (is_wait) {
491                         sb->waiting++;
492                         __add_wait_queue(&sb->wqh, &__wait);
493                 } else {
494                         sb->parked++;
495                         __add_wait_queue_tail(&sb->wqh, &__wait);
496                 }
497                 __pwqr_sb_update_state(sb, -1);
498
499                 do {
500                         set_current_state(TASK_INTERRUPTIBLE);
501                         if (sb->overcommit_wakes)
502                                 break;
503                         if (signal_pending(current)) {
504                                 rc = -ERESTARTSYS;
505                                 break;
506                         }
507                         spin_unlock_irq(&sb->wqh.lock);
508                         schedule();
509                         spin_lock_irq(&sb->wqh.lock);
510                         if (is_wait)
511                                 break;
512                         if (sb->running + sb->waiting < sb->concurrency)
513                                 break;
514                 } while (likely(sb->state >= 0));
515
516                 __remove_wait_queue(&sb->wqh, &__wait);
517                 __set_current_state(TASK_RUNNING);
518                 if (is_wait) {
519                         sb->waiting--;
520                 } else {
521                         sb->parked--;
522                 }
523                 __pwqr_sb_update_state(sb, 1);
524
525                 if (sb->overcommit_wakes)
526                         sb->overcommit_wakes--;
527                 if (sb->waiting + sb->running > sb->concurrency)
528                         rc = -EDQUOT;
529         }
530
531 out_unlock:
532         if (unlikely(sb->state < 0))
533                 rc = -EBADFD;
534         pwqr_sb_unlock_irqrestore(sb, flags);
535 out:
536         preempt_notifier_register(&pwqt->notifier);
537         return rc;
538 }
539
540 static long do_pwqr_unregister(struct pwqr_sb *sb, struct pwqr_task *pwqt)
541 {
542         if (!pwqt)
543                 return -EINVAL;
544         if (pwqt->sb != sb)
545                 return -ENOENT;
546         pwqr_task_detach(pwqt, sb);
547         pwqr_task_release(pwqt, false);
548         return 0;
549 }
550
551 static long do_pwqr_set_conc(struct pwqr_sb *sb, int conc)
552 {
553         long old_conc = sb->concurrency;
554         unsigned long flags;
555
556         pwqr_sb_lock_irqsave(sb, flags);
557         if (conc <= 0)
558                 conc = num_online_cpus();
559         if (conc != old_conc) {
560                 sb->concurrency = conc;
561                 __pwqr_sb_update_state(sb, 0);
562         }
563         pwqr_sb_unlock_irqrestore(sb, flags);
564
565         return old_conc;
566 }
567
568 static long do_pwqr_wake(struct pwqr_sb *sb, int oc, int count)
569 {
570         unsigned long flags;
571         int nwake;
572
573         if (count < 0)
574                 return -EINVAL;
575
576         pwqr_sb_lock_irqsave(sb, flags);
577
578         if (oc) {
579                 nwake = sb->waiting + sb->parked - sb->overcommit_wakes;
580                 if (count > nwake) {
581                         count = nwake;
582                 } else {
583                         nwake = count;
584                 }
585                 sb->overcommit_wakes += count;
586         } else if (sb->running + sb->overcommit_wakes < sb->concurrency) {
587                 nwake = sb->concurrency - sb->overcommit_wakes - sb->running;
588                 if (nwake > sb->waiting + sb->parked - sb->overcommit_wakes) {
589                         nwake = sb->waiting + sb->parked -
590                                 sb->overcommit_wakes;
591                 }
592                 if (count > nwake) {
593                         count = nwake;
594                 } else {
595                         nwake = count;
596                 }
597         } else {
598                 /*
599                  * This codepath deserves an explanation: waking the thread
600                  * "for real" would overcommit, though userspace KNOWS there
601                  * is at least one waiting thread. Such threads are threads
602                  * that are "quarantined".
603                  *
604                  * Quarantined threads are woken up one by one, to allow a
605                  * slow ramp down, trying to minimize "waiting" <-> "parked"
606                  * flip-flops, no matter how many wakes have been asked.
607                  *
608                  * Since releasing one quarantined thread will wake up a
609                  * thread that will (almost) straight go to parked mode, lie
610                  * to userland about the fact that we unblocked that thread,
611                  * and return 0.
612                  *
613                  * Though if we're already waking all waiting threads for
614                  * overcommitting jobs, well, we don't need that.
615                  */
616                 count = 0;
617                 nwake = sb->waiting > sb->overcommit_wakes;
618         }
619         while (nwake-- > 0)
620                 wake_up_locked(&sb->wqh);
621         pwqr_sb_unlock_irqrestore(sb, flags);
622
623         return count;
624 }
625
626 static long pwqr_ioctl(struct file *filp, unsigned command, unsigned long arg)
627 {
628         struct pwqr_sb     *sb   = filp->private_data;
629         struct task_struct *task = current;
630         struct pwqr_task   *pwqt;
631         int rc = 0;
632
633         switch (command) {
634         case PWQR_GET_CONC:
635                 return sb->concurrency;
636         case PWQR_SET_CONC:
637                 return do_pwqr_set_conc(sb, (int)arg);
638
639         case PWQR_WAKE:
640         case PWQR_WAKE_OC:
641                 return do_pwqr_wake(sb, command == PWQR_WAKE_OC, (int)arg);
642
643         case PWQR_WAIT:
644         case PWQR_PARK:
645         case PWQR_REGISTER:
646         case PWQR_UNREGISTER:
647                 break;
648         default:
649                 return -EINVAL;
650         }
651
652         pwqt = pwqr_task_find(task);
653         if (command == PWQR_UNREGISTER)
654                 return do_pwqr_unregister(sb, pwqt);
655
656         if (pwqt == NULL) {
657                 pwqt = pwqr_task_create(task);
658                 if (IS_ERR(pwqt))
659                         return PTR_ERR(pwqt);
660                 pwqr_task_attach(pwqt, sb);
661         } else if (unlikely(pwqt->sb != sb)) {
662                 pwqr_task_detach(pwqt, pwqt->sb);
663                 pwqr_task_attach(pwqt, sb);
664         }
665
666         switch (command) {
667         case PWQR_WAIT:
668                 rc = do_pwqr_wait(sb, pwqt, true, (struct pwqr_ioc_wait __user *)arg);
669                 break;
670         case PWQR_PARK:
671                 rc = do_pwqr_wait(sb, pwqt, false, NULL);
672                 break;
673         }
674
675         if (unlikely(sb->state < 0)) {
676                 pwqr_task_detach(pwqt, pwqt->sb);
677                 return -EBADFD;
678         }
679         return rc;
680 }
681
682 static const struct file_operations pwqr_dev_fops = {
683         .owner          = THIS_MODULE,
684         .open           = pwqr_open,
685         .release        = pwqr_release,
686         .poll           = pwqr_poll,
687         .read           = pwqr_read,
688         .llseek         = noop_llseek,
689         .unlocked_ioctl = pwqr_ioctl,
690 #ifdef CONFIG_COMPAT
691         .compat_ioctl   = pwqr_ioctl,
692 #endif
693 };
694
695 /*****************************************************************************
696  * module
697  */
698 static int __init pwqr_start(void)
699 {
700         int i;
701
702         for (i = 0; i < PWQR_HASH_SIZE; i++) {
703                 spin_lock_init(&pwqr_tasks_hash[i].lock);
704                 INIT_HLIST_HEAD(&pwqr_tasks_hash[i].tasks);
705         }
706
707         /* Register as a character device */
708         pwqr_major = register_chrdev(0, "pwqr", &pwqr_dev_fops);
709         if (pwqr_major < 0) {
710                 printk(KERN_ERR "pwqr: register_chrdev() failed\n");
711                 return pwqr_major;
712         }
713
714         /* Create a device node */
715         pwqr_class = class_create(THIS_MODULE, PWQR_DEVICE_NAME);
716         if (IS_ERR(pwqr_class)) {
717                 printk(KERN_ERR "pwqr: Error creating raw class\n");
718                 unregister_chrdev(pwqr_major, PWQR_DEVICE_NAME);
719                 return PTR_ERR(pwqr_class);
720         }
721         device_create(pwqr_class, NULL, MKDEV(pwqr_major, 0), NULL, PWQR_DEVICE_NAME);
722         printk(KERN_INFO "pwqr: PThreads Work Queues Regulator v1 loaded");
723         return 0;
724 }
725
726 static void __exit pwqr_end(void)
727 {
728         rcu_barrier();
729         device_destroy(pwqr_class, MKDEV(pwqr_major, 0));
730         class_destroy(pwqr_class);
731         unregister_chrdev(pwqr_major, PWQR_DEVICE_NAME);
732 }
733
734 module_init(pwqr_start);
735 module_exit(pwqr_end);
736
737 MODULE_LICENSE("GPL");
738 MODULE_AUTHOR("Pierre Habouzit <pierre.habouzit@intersec.com>");
739 MODULE_DESCRIPTION("PThreads Work Queues Regulator");
740
741 // vim:noet:sw=8:cinoptions+=\:0,L-1,=1s: