2400_kcopy-patch-for-infiniband-driver.patch 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732
  1. From 1f52075d672a9bdd0069b3ea68be266ef5c229bd Mon Sep 17 00:00:00 2001
  2. From: Alexey Shvetsov <alexxy@gentoo.org>
  3. Date: Tue, 17 Jan 2012 21:08:49 +0400
  4. Subject: [PATCH] [kcopy] Add kcopy driver
  5. Add kcopy driver from qlogic to implement zero copy for infiniband psm
  6. userspace driver
  7. Signed-off-by: Alexey Shvetsov <alexxy@gentoo.org>
  8. ---
  9. drivers/char/Kconfig | 2 +
  10. drivers/char/Makefile | 2 +
  11. drivers/char/kcopy/Kconfig | 17 ++
  12. drivers/char/kcopy/Makefile | 4 +
  13. drivers/char/kcopy/kcopy.c | 646 +++++++++++++++++++++++++++++++++++++++++++
  14. 5 files changed, 671 insertions(+)
  15. create mode 100644 drivers/char/kcopy/Kconfig
  16. create mode 100644 drivers/char/kcopy/Makefile
  17. create mode 100644 drivers/char/kcopy/kcopy.c
  18. diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
  19. index ee94686..5b81449 100644
  20. --- a/drivers/char/Kconfig
  21. +++ b/drivers/char/Kconfig
  22. @@ -6,6 +6,8 @@ menu "Character devices"
  23. source "drivers/tty/Kconfig"
  24. +source "drivers/char/kcopy/Kconfig"
  25. +
  26. config DEVKMEM
  27. bool "/dev/kmem virtual device support"
  28. default y
  29. diff --git a/drivers/char/Makefile b/drivers/char/Makefile
  30. index 0dc5d7c..be519d6 100644
  31. --- a/drivers/char/Makefile
  32. +++ b/drivers/char/Makefile
  33. @@ -62,3 +62,5 @@
  34. js-rtc-y = rtc.o
  35. obj-$(CONFIG_TILE_SROM) += tile-srom.o
  36. +
  37. +obj-$(CONFIG_KCOPY) += kcopy/
  38. diff --git a/drivers/char/kcopy/Kconfig b/drivers/char/kcopy/Kconfig
  39. new file mode 100644
  40. index 0000000..453ae52
  41. --- /dev/null
  42. +++ b/drivers/char/kcopy/Kconfig
  43. @@ -0,0 +1,17 @@
  44. +#
  45. +# KCopy character device configuration
  46. +#
  47. +
  48. +menu "KCopy"
  49. +
  50. +config KCOPY
  51. + tristate "Memory-to-memory copies using kernel assist"
  52. + default m
  53. + ---help---
  54. + High-performance inter-process memory copies. Can often save a
  55. + memory copy to shared memory in the application. Useful at least
  56. + for MPI applications where the point-to-point nature of vmsplice
  57. + and pipes can be a limiting factor in performance.
  58. +
  59. +endmenu
  60. +
  61. diff --git a/drivers/char/kcopy/Makefile b/drivers/char/kcopy/Makefile
  62. new file mode 100644
  63. index 0000000..9cb269b
  64. --- /dev/null
  65. +++ b/drivers/char/kcopy/Makefile
  66. @@ -0,0 +1,4 @@
  67. +#
  68. +# Makefile for the kernel character device drivers.
  69. +#
  70. +obj-$(CONFIG_KCOPY) += kcopy.o
  71. diff --git a/drivers/char/kcopy/kcopy.c b/drivers/char/kcopy/kcopy.c
  72. new file mode 100644
  73. index 0000000..a9f915c
  74. --- /dev/null
  75. +++ b/drivers/char/kcopy/kcopy.c
  76. @@ -0,0 +1,646 @@
  77. +#include <linux/module.h>
  78. +#include <linux/fs.h>
  79. +#include <linux/cdev.h>
  80. +#include <linux/device.h>
  81. +#include <linux/mutex.h>
  82. +#include <linux/mman.h>
  83. +#include <linux/highmem.h>
  84. +#include <linux/spinlock.h>
  85. +#include <linux/sched.h>
  86. +#include <linux/rbtree.h>
  87. +#include <linux/rcupdate.h>
  88. +#include <linux/uaccess.h>
  89. +#include <linux/slab.h>
  90. +
  91. +MODULE_LICENSE("GPL");
  92. +MODULE_AUTHOR("Arthur Jones <arthur.jones@qlogic.com>");
  93. +MODULE_DESCRIPTION("QLogic kcopy driver");
  94. +
  95. +#define KCOPY_ABI 1
  96. +#define KCOPY_MAX_MINORS 64
  97. +
  98. +struct kcopy_device {
  99. + struct cdev cdev;
  100. + struct class *class;
  101. + struct device *devp[KCOPY_MAX_MINORS];
  102. + dev_t dev;
  103. +
  104. + struct kcopy_file *kf[KCOPY_MAX_MINORS];
  105. + struct mutex open_lock;
  106. +};
  107. +
  108. +static struct kcopy_device kcopy_dev;
  109. +
  110. +/* per file data / one of these is shared per minor */
  111. +struct kcopy_file {
  112. + int count;
  113. +
  114. + /* pid indexed */
  115. + struct rb_root live_map_tree;
  116. +
  117. + struct mutex map_lock;
  118. +};
  119. +
  120. +struct kcopy_map_entry {
  121. + int count;
  122. + struct task_struct *task;
  123. + pid_t pid;
  124. + struct kcopy_file *file; /* file backpointer */
  125. +
  126. + struct list_head list; /* free map list */
  127. + struct rb_node node; /* live map tree */
  128. +};
  129. +
  130. +#define KCOPY_GET_SYSCALL 1
  131. +#define KCOPY_PUT_SYSCALL 2
  132. +#define KCOPY_ABI_SYSCALL 3
  133. +
  134. +struct kcopy_syscall {
  135. + __u32 tag;
  136. + pid_t pid;
  137. + __u64 n;
  138. + __u64 src;
  139. + __u64 dst;
  140. +};
  141. +
  142. +static const void __user *kcopy_syscall_src(const struct kcopy_syscall *ks)
  143. +{
  144. + return (const void __user *) (unsigned long) ks->src;
  145. +}
  146. +
  147. +static void __user *kcopy_syscall_dst(const struct kcopy_syscall *ks)
  148. +{
  149. + return (void __user *) (unsigned long) ks->dst;
  150. +}
  151. +
  152. +static unsigned long kcopy_syscall_n(const struct kcopy_syscall *ks)
  153. +{
  154. + return (unsigned long) ks->n;
  155. +}
  156. +
  157. +static struct kcopy_map_entry *kcopy_create_entry(struct kcopy_file *file)
  158. +{
  159. + struct kcopy_map_entry *kme =
  160. + kmalloc(sizeof(struct kcopy_map_entry), GFP_KERNEL);
  161. +
  162. + if (!kme)
  163. + return NULL;
  164. +
  165. + kme->count = 1;
  166. + kme->file = file;
  167. + kme->task = current;
  168. + kme->pid = current->tgid;
  169. + INIT_LIST_HEAD(&kme->list);
  170. +
  171. + return kme;
  172. +}
  173. +
  174. +static struct kcopy_map_entry *
  175. +kcopy_lookup_pid(struct rb_root *root, pid_t pid)
  176. +{
  177. + struct rb_node *node = root->rb_node;
  178. +
  179. + while (node) {
  180. + struct kcopy_map_entry *kme =
  181. + container_of(node, struct kcopy_map_entry, node);
  182. +
  183. + if (pid < kme->pid)
  184. + node = node->rb_left;
  185. + else if (pid > kme->pid)
  186. + node = node->rb_right;
  187. + else
  188. + return kme;
  189. + }
  190. +
  191. + return NULL;
  192. +}
  193. +
  194. +static int kcopy_insert(struct rb_root *root, struct kcopy_map_entry *kme)
  195. +{
  196. + struct rb_node **new = &(root->rb_node);
  197. + struct rb_node *parent = NULL;
  198. +
  199. + while (*new) {
  200. + struct kcopy_map_entry *tkme =
  201. + container_of(*new, struct kcopy_map_entry, node);
  202. +
  203. + parent = *new;
  204. + if (kme->pid < tkme->pid)
  205. + new = &((*new)->rb_left);
  206. + else if (kme->pid > tkme->pid)
  207. + new = &((*new)->rb_right);
  208. + else {
  209. + printk(KERN_INFO "!!! debugging: bad rb tree !!!\n");
  210. + return -EINVAL;
  211. + }
  212. + }
  213. +
  214. + rb_link_node(&kme->node, parent, new);
  215. + rb_insert_color(&kme->node, root);
  216. +
  217. + return 0;
  218. +}
  219. +
  220. +static int kcopy_open(struct inode *inode, struct file *filp)
  221. +{
  222. + int ret;
  223. + const int minor = iminor(inode);
  224. + struct kcopy_file *kf = NULL;
  225. + struct kcopy_map_entry *kme;
  226. + struct kcopy_map_entry *okme;
  227. +
  228. + if (minor < 0 || minor >= KCOPY_MAX_MINORS)
  229. + return -ENODEV;
  230. +
  231. + mutex_lock(&kcopy_dev.open_lock);
  232. +
  233. + if (!kcopy_dev.kf[minor]) {
  234. + kf = kmalloc(sizeof(struct kcopy_file), GFP_KERNEL);
  235. +
  236. + if (!kf) {
  237. + ret = -ENOMEM;
  238. + goto bail;
  239. + }
  240. +
  241. + kf->count = 1;
  242. + kf->live_map_tree = RB_ROOT;
  243. + mutex_init(&kf->map_lock);
  244. + kcopy_dev.kf[minor] = kf;
  245. + } else {
  246. + if (filp->f_flags & O_EXCL) {
  247. + ret = -EBUSY;
  248. + goto bail;
  249. + }
  250. + kcopy_dev.kf[minor]->count++;
  251. + }
  252. +
  253. + kme = kcopy_create_entry(kcopy_dev.kf[minor]);
  254. + if (!kme) {
  255. + ret = -ENOMEM;
  256. + goto err_free_kf;
  257. + }
  258. +
  259. + kf = kcopy_dev.kf[minor];
  260. +
  261. + mutex_lock(&kf->map_lock);
  262. +
  263. + okme = kcopy_lookup_pid(&kf->live_map_tree, kme->pid);
  264. + if (okme) {
  265. + /* pid already exists... */
  266. + okme->count++;
  267. + kfree(kme);
  268. + kme = okme;
  269. + } else
  270. + ret = kcopy_insert(&kf->live_map_tree, kme);
  271. +
  272. + mutex_unlock(&kf->map_lock);
  273. +
  274. + filp->private_data = kme;
  275. +
  276. + ret = 0;
  277. + goto bail;
  278. +
  279. +err_free_kf:
  280. + if (kf) {
  281. + kcopy_dev.kf[minor] = NULL;
  282. + kfree(kf);
  283. + }
  284. +bail:
  285. + mutex_unlock(&kcopy_dev.open_lock);
  286. + return ret;
  287. +}
  288. +
  289. +static int kcopy_flush(struct file *filp, fl_owner_t id)
  290. +{
  291. + struct kcopy_map_entry *kme = filp->private_data;
  292. + struct kcopy_file *kf = kme->file;
  293. +
  294. + if (file_count(filp) == 1) {
  295. + mutex_lock(&kf->map_lock);
  296. + kme->count--;
  297. +
  298. + if (!kme->count) {
  299. + rb_erase(&kme->node, &kf->live_map_tree);
  300. + kfree(kme);
  301. + }
  302. + mutex_unlock(&kf->map_lock);
  303. + }
  304. +
  305. + return 0;
  306. +}
  307. +
  308. +static int kcopy_release(struct inode *inode, struct file *filp)
  309. +{
  310. + const int minor = iminor(inode);
  311. +
  312. + mutex_lock(&kcopy_dev.open_lock);
  313. + kcopy_dev.kf[minor]->count--;
  314. + if (!kcopy_dev.kf[minor]->count) {
  315. + kfree(kcopy_dev.kf[minor]);
  316. + kcopy_dev.kf[minor] = NULL;
  317. + }
  318. + mutex_unlock(&kcopy_dev.open_lock);
  319. +
  320. + return 0;
  321. +}
  322. +
  323. +static void kcopy_put_pages(struct page **pages, int npages)
  324. +{
  325. + int j;
  326. +
  327. + for (j = 0; j < npages; j++)
  328. + put_page(pages[j]);
  329. +}
  330. +
  331. +static int kcopy_validate_task(struct task_struct *p)
  332. +{
  333. + return p && (uid_eq(current_euid(), task_euid(p)) || uid_eq(current_euid(), task_uid(p)));
  334. +}
  335. +
  336. +static int kcopy_get_pages(struct kcopy_file *kf, pid_t pid,
  337. + struct page **pages, void __user *addr,
  338. + int write, size_t npages)
  339. +{
  340. + int err;
  341. + struct mm_struct *mm;
  342. + struct kcopy_map_entry *rkme;
  343. +
  344. + mutex_lock(&kf->map_lock);
  345. +
  346. + rkme = kcopy_lookup_pid(&kf->live_map_tree, pid);
  347. + if (!rkme || !kcopy_validate_task(rkme->task)) {
  348. + err = -EINVAL;
  349. + goto bail_unlock;
  350. + }
  351. +
  352. + mm = get_task_mm(rkme->task);
  353. + if (unlikely(!mm)) {
  354. + err = -ENOMEM;
  355. + goto bail_unlock;
  356. + }
  357. +
  358. + down_read(&mm->mmap_sem);
  359. + err = get_user_pages(rkme->task, mm,
  360. + (unsigned long) addr, npages, write, 0,
  361. + pages, NULL);
  362. +
  363. + if (err < npages && err > 0) {
  364. + kcopy_put_pages(pages, err);
  365. + err = -ENOMEM;
  366. + } else if (err == npages)
  367. + err = 0;
  368. +
  369. + up_read(&mm->mmap_sem);
  370. +
  371. + mmput(mm);
  372. +
  373. +bail_unlock:
  374. + mutex_unlock(&kf->map_lock);
  375. +
  376. + return err;
  377. +}
  378. +
  379. +static unsigned long kcopy_copy_pages_from_user(void __user *src,
  380. + struct page **dpages,
  381. + unsigned doff,
  382. + unsigned long n)
  383. +{
  384. + struct page *dpage = *dpages;
  385. + char *daddr = kmap(dpage);
  386. + int ret = 0;
  387. +
  388. + while (1) {
  389. + const unsigned long nleft = PAGE_SIZE - doff;
  390. + const unsigned long nc = (n < nleft) ? n : nleft;
  391. +
  392. + /* if (copy_from_user(daddr + doff, src, nc)) { */
  393. + if (__copy_from_user_nocache(daddr + doff, src, nc)) {
  394. + ret = -EFAULT;
  395. + goto bail;
  396. + }
  397. +
  398. + n -= nc;
  399. + if (n == 0)
  400. + break;
  401. +
  402. + doff += nc;
  403. + doff &= ~PAGE_MASK;
  404. + if (doff == 0) {
  405. + kunmap(dpage);
  406. + dpages++;
  407. + dpage = *dpages;
  408. + daddr = kmap(dpage);
  409. + }
  410. +
  411. + src += nc;
  412. + }
  413. +
  414. +bail:
  415. + kunmap(dpage);
  416. +
  417. + return ret;
  418. +}
  419. +
  420. +static unsigned long kcopy_copy_pages_to_user(void __user *dst,
  421. + struct page **spages,
  422. + unsigned soff,
  423. + unsigned long n)
  424. +{
  425. + struct page *spage = *spages;
  426. + const char *saddr = kmap(spage);
  427. + int ret = 0;
  428. +
  429. + while (1) {
  430. + const unsigned long nleft = PAGE_SIZE - soff;
  431. + const unsigned long nc = (n < nleft) ? n : nleft;
  432. +
  433. + if (copy_to_user(dst, saddr + soff, nc)) {
  434. + ret = -EFAULT;
  435. + goto bail;
  436. + }
  437. +
  438. + n -= nc;
  439. + if (n == 0)
  440. + break;
  441. +
  442. + soff += nc;
  443. + soff &= ~PAGE_MASK;
  444. + if (soff == 0) {
  445. + kunmap(spage);
  446. + spages++;
  447. + spage = *spages;
  448. + saddr = kmap(spage);
  449. + }
  450. +
  451. + dst += nc;
  452. + }
  453. +
  454. +bail:
  455. + kunmap(spage);
  456. +
  457. + return ret;
  458. +}
  459. +
  460. +static unsigned long kcopy_copy_to_user(void __user *dst,
  461. + struct kcopy_file *kf, pid_t pid,
  462. + void __user *src,
  463. + unsigned long n)
  464. +{
  465. + struct page **pages;
  466. + const int pages_len = PAGE_SIZE / sizeof(struct page *);
  467. + int ret = 0;
  468. +
  469. + pages = (struct page **) __get_free_page(GFP_KERNEL);
  470. + if (!pages) {
  471. + ret = -ENOMEM;
  472. + goto bail;
  473. + }
  474. +
  475. + while (n) {
  476. + const unsigned long soff = (unsigned long) src & ~PAGE_MASK;
  477. + const unsigned long spages_left =
  478. + (soff + n + PAGE_SIZE - 1) >> PAGE_SHIFT;
  479. + const unsigned long spages_cp =
  480. + min_t(unsigned long, spages_left, pages_len);
  481. + const unsigned long sbytes =
  482. + PAGE_SIZE - soff + (spages_cp - 1) * PAGE_SIZE;
  483. + const unsigned long nbytes = min_t(unsigned long, sbytes, n);
  484. +
  485. + ret = kcopy_get_pages(kf, pid, pages, src, 0, spages_cp);
  486. + if (unlikely(ret))
  487. + goto bail_free;
  488. +
  489. + ret = kcopy_copy_pages_to_user(dst, pages, soff, nbytes);
  490. + kcopy_put_pages(pages, spages_cp);
  491. + if (ret)
  492. + goto bail_free;
  493. + dst = (char *) dst + nbytes;
  494. + src = (char *) src + nbytes;
  495. +
  496. + n -= nbytes;
  497. + }
  498. +
  499. +bail_free:
  500. + free_page((unsigned long) pages);
  501. +bail:
  502. + return ret;
  503. +}
  504. +
  505. +static unsigned long kcopy_copy_from_user(const void __user *src,
  506. + struct kcopy_file *kf, pid_t pid,
  507. + void __user *dst,
  508. + unsigned long n)
  509. +{
  510. + struct page **pages;
  511. + const int pages_len = PAGE_SIZE / sizeof(struct page *);
  512. + int ret = 0;
  513. +
  514. + pages = (struct page **) __get_free_page(GFP_KERNEL);
  515. + if (!pages) {
  516. + ret = -ENOMEM;
  517. + goto bail;
  518. + }
  519. +
  520. + while (n) {
  521. + const unsigned long doff = (unsigned long) dst & ~PAGE_MASK;
  522. + const unsigned long dpages_left =
  523. + (doff + n + PAGE_SIZE - 1) >> PAGE_SHIFT;
  524. + const unsigned long dpages_cp =
  525. + min_t(unsigned long, dpages_left, pages_len);
  526. + const unsigned long dbytes =
  527. + PAGE_SIZE - doff + (dpages_cp - 1) * PAGE_SIZE;
  528. + const unsigned long nbytes = min_t(unsigned long, dbytes, n);
  529. +
  530. + ret = kcopy_get_pages(kf, pid, pages, dst, 1, dpages_cp);
  531. + if (unlikely(ret))
  532. + goto bail_free;
  533. +
  534. + ret = kcopy_copy_pages_from_user((void __user *) src,
  535. + pages, doff, nbytes);
  536. + kcopy_put_pages(pages, dpages_cp);
  537. + if (ret)
  538. + goto bail_free;
  539. +
  540. + dst = (char *) dst + nbytes;
  541. + src = (char *) src + nbytes;
  542. +
  543. + n -= nbytes;
  544. + }
  545. +
  546. +bail_free:
  547. + free_page((unsigned long) pages);
  548. +bail:
  549. + return ret;
  550. +}
  551. +
  552. +static int kcopy_do_get(struct kcopy_map_entry *kme, pid_t pid,
  553. + const void __user *src, void __user *dst,
  554. + unsigned long n)
  555. +{
  556. + struct kcopy_file *kf = kme->file;
  557. + int ret = 0;
  558. +
  559. + if (n == 0) {
  560. + ret = -EINVAL;
  561. + goto bail;
  562. + }
  563. +
  564. + ret = kcopy_copy_to_user(dst, kf, pid, (void __user *) src, n);
  565. +
  566. +bail:
  567. + return ret;
  568. +}
  569. +
  570. +static int kcopy_do_put(struct kcopy_map_entry *kme, const void __user *src,
  571. + pid_t pid, void __user *dst,
  572. + unsigned long n)
  573. +{
  574. + struct kcopy_file *kf = kme->file;
  575. + int ret = 0;
  576. +
  577. + if (n == 0) {
  578. + ret = -EINVAL;
  579. + goto bail;
  580. + }
  581. +
  582. + ret = kcopy_copy_from_user(src, kf, pid, (void __user *) dst, n);
  583. +
  584. +bail:
  585. + return ret;
  586. +}
  587. +
  588. +static int kcopy_do_abi(u32 __user *dst)
  589. +{
  590. + u32 val = KCOPY_ABI;
  591. + int err;
  592. +
  593. + err = put_user(val, dst);
  594. + if (err)
  595. + return -EFAULT;
  596. +
  597. + return 0;
  598. +}
  599. +
  600. +ssize_t kcopy_write(struct file *filp, const char __user *data, size_t cnt,
  601. + loff_t *o)
  602. +{
  603. + struct kcopy_map_entry *kme = filp->private_data;
  604. + struct kcopy_syscall ks;
  605. + int err = 0;
  606. + const void __user *src;
  607. + void __user *dst;
  608. + unsigned long n;
  609. +
  610. + if (cnt != sizeof(struct kcopy_syscall)) {
  611. + err = -EINVAL;
  612. + goto bail;
  613. + }
  614. +
  615. + err = copy_from_user(&ks, data, cnt);
  616. + if (unlikely(err))
  617. + goto bail;
  618. +
  619. + src = kcopy_syscall_src(&ks);
  620. + dst = kcopy_syscall_dst(&ks);
  621. + n = kcopy_syscall_n(&ks);
  622. + if (ks.tag == KCOPY_GET_SYSCALL)
  623. + err = kcopy_do_get(kme, ks.pid, src, dst, n);
  624. + else if (ks.tag == KCOPY_PUT_SYSCALL)
  625. + err = kcopy_do_put(kme, src, ks.pid, dst, n);
  626. + else if (ks.tag == KCOPY_ABI_SYSCALL)
  627. + err = kcopy_do_abi(dst);
  628. + else
  629. + err = -EINVAL;
  630. +
  631. +bail:
  632. + return err ? err : cnt;
  633. +}
  634. +
  635. +static const struct file_operations kcopy_fops = {
  636. + .owner = THIS_MODULE,
  637. + .open = kcopy_open,
  638. + .release = kcopy_release,
  639. + .flush = kcopy_flush,
  640. + .write = kcopy_write,
  641. +};
  642. +
  643. +static int __init kcopy_init(void)
  644. +{
  645. + int ret;
  646. + const char *name = "kcopy";
  647. + int i;
  648. + int ninit = 0;
  649. +
  650. + mutex_init(&kcopy_dev.open_lock);
  651. +
  652. + ret = alloc_chrdev_region(&kcopy_dev.dev, 0, KCOPY_MAX_MINORS, name);
  653. + if (ret)
  654. + goto bail;
  655. +
  656. + kcopy_dev.class = class_create(THIS_MODULE, (char *) name);
  657. +
  658. + if (IS_ERR(kcopy_dev.class)) {
  659. + ret = PTR_ERR(kcopy_dev.class);
  660. + printk(KERN_ERR "kcopy: Could not create "
  661. + "device class (err %d)\n", -ret);
  662. + goto bail_chrdev;
  663. + }
  664. +
  665. + cdev_init(&kcopy_dev.cdev, &kcopy_fops);
  666. + ret = cdev_add(&kcopy_dev.cdev, kcopy_dev.dev, KCOPY_MAX_MINORS);
  667. + if (ret < 0) {
  668. + printk(KERN_ERR "kcopy: Could not add cdev (err %d)\n",
  669. + -ret);
  670. + goto bail_class;
  671. + }
  672. +
  673. + for (i = 0; i < KCOPY_MAX_MINORS; i++) {
  674. + char devname[8];
  675. + const int minor = MINOR(kcopy_dev.dev) + i;
  676. + const dev_t dev = MKDEV(MAJOR(kcopy_dev.dev), minor);
  677. +
  678. + snprintf(devname, sizeof(devname), "kcopy%02d", i);
  679. + kcopy_dev.devp[i] =
  680. + device_create(kcopy_dev.class, NULL,
  681. + dev, NULL, devname);
  682. +
  683. + if (IS_ERR(kcopy_dev.devp[i])) {
  684. + ret = PTR_ERR(kcopy_dev.devp[i]);
  685. + printk(KERN_ERR "kcopy: Could not create "
  686. + "devp %d (err %d)\n", i, -ret);
  687. + goto bail_cdev_add;
  688. + }
  689. +
  690. + ninit++;
  691. + }
  692. +
  693. + ret = 0;
  694. + goto bail;
  695. +
  696. +bail_cdev_add:
  697. + for (i = 0; i < ninit; i++)
  698. + device_unregister(kcopy_dev.devp[i]);
  699. +
  700. + cdev_del(&kcopy_dev.cdev);
  701. +bail_class:
  702. + class_destroy(kcopy_dev.class);
  703. +bail_chrdev:
  704. + unregister_chrdev_region(kcopy_dev.dev, KCOPY_MAX_MINORS);
  705. +bail:
  706. + return ret;
  707. +}
  708. +
  709. +static void __exit kcopy_fini(void)
  710. +{
  711. + int i;
  712. +
  713. + for (i = 0; i < KCOPY_MAX_MINORS; i++)
  714. + device_unregister(kcopy_dev.devp[i]);
  715. +
  716. + cdev_del(&kcopy_dev.cdev);
  717. + class_destroy(kcopy_dev.class);
  718. + unregister_chrdev_region(kcopy_dev.dev, KCOPY_MAX_MINORS);
  719. +}
  720. +
  721. +module_init(kcopy_init);
  722. +module_exit(kcopy_fini);
  723. --
  724. 1.7.10