dm-uevent.c 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219
  1. /*
  2. * Device Mapper Uevent Support (dm-uevent)
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms of the GNU General Public License as published by the
  6. * Free Software Foundation; either version 2 of the License, or (at your
  7. * option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful, but
  10. * WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. * General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along
  15. * with this program; if not, write to the Free Software Foundation, Inc.,
  16. * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  17. *
  18. * Copyright IBM Corporation, 2007
  19. * Author: Mike Anderson <andmike@linux.vnet.ibm.com>
  20. */
  21. #include <linux/list.h>
  22. #include <linux/slab.h>
  23. #include <linux/kobject.h>
  24. #include <linux/dm-ioctl.h>
  25. #include "dm.h"
  26. #include "dm-uevent.h"
  27. #define DM_MSG_PREFIX "uevent"
  28. static const struct {
  29. enum dm_uevent_type type;
  30. enum kobject_action action;
  31. char *name;
  32. } _dm_uevent_type_names[] = {
  33. {DM_UEVENT_PATH_FAILED, KOBJ_CHANGE, "PATH_FAILED"},
  34. {DM_UEVENT_PATH_REINSTATED, KOBJ_CHANGE, "PATH_REINSTATED"},
  35. };
  36. static struct kmem_cache *_dm_event_cache;
  37. struct dm_uevent {
  38. struct mapped_device *md;
  39. enum kobject_action action;
  40. struct kobj_uevent_env ku_env;
  41. struct list_head elist;
  42. char name[DM_NAME_LEN];
  43. char uuid[DM_UUID_LEN];
  44. };
  45. static void dm_uevent_free(struct dm_uevent *event)
  46. {
  47. kmem_cache_free(_dm_event_cache, event);
  48. }
  49. static struct dm_uevent *dm_uevent_alloc(struct mapped_device *md)
  50. {
  51. struct dm_uevent *event;
  52. event = kmem_cache_zalloc(_dm_event_cache, GFP_ATOMIC);
  53. if (!event)
  54. return NULL;
  55. INIT_LIST_HEAD(&event->elist);
  56. event->md = md;
  57. return event;
  58. }
  59. static struct dm_uevent *dm_build_path_uevent(struct mapped_device *md,
  60. struct dm_target *ti,
  61. enum kobject_action action,
  62. const char *dm_action,
  63. const char *path,
  64. unsigned nr_valid_paths)
  65. {
  66. struct dm_uevent *event;
  67. event = dm_uevent_alloc(md);
  68. if (!event) {
  69. DMERR("%s: dm_uevent_alloc() failed", __func__);
  70. goto err_nomem;
  71. }
  72. event->action = action;
  73. if (add_uevent_var(&event->ku_env, "DM_TARGET=%s", ti->type->name)) {
  74. DMERR("%s: add_uevent_var() for DM_TARGET failed",
  75. __func__);
  76. goto err_add;
  77. }
  78. if (add_uevent_var(&event->ku_env, "DM_ACTION=%s", dm_action)) {
  79. DMERR("%s: add_uevent_var() for DM_ACTION failed",
  80. __func__);
  81. goto err_add;
  82. }
  83. if (add_uevent_var(&event->ku_env, "DM_SEQNUM=%u",
  84. dm_next_uevent_seq(md))) {
  85. DMERR("%s: add_uevent_var() for DM_SEQNUM failed",
  86. __func__);
  87. goto err_add;
  88. }
  89. if (add_uevent_var(&event->ku_env, "DM_PATH=%s", path)) {
  90. DMERR("%s: add_uevent_var() for DM_PATH failed", __func__);
  91. goto err_add;
  92. }
  93. if (add_uevent_var(&event->ku_env, "DM_NR_VALID_PATHS=%d",
  94. nr_valid_paths)) {
  95. DMERR("%s: add_uevent_var() for DM_NR_VALID_PATHS failed",
  96. __func__);
  97. goto err_add;
  98. }
  99. return event;
  100. err_add:
  101. dm_uevent_free(event);
  102. err_nomem:
  103. return ERR_PTR(-ENOMEM);
  104. }
  105. /**
  106. * dm_send_uevents - send uevents for given list
  107. *
  108. * @events: list of events to send
  109. * @kobj: kobject generating event
  110. *
  111. */
  112. void dm_send_uevents(struct list_head *events, struct kobject *kobj)
  113. {
  114. int r;
  115. struct dm_uevent *event, *next;
  116. list_for_each_entry_safe(event, next, events, elist) {
  117. list_del_init(&event->elist);
  118. /*
  119. * When a device is being removed this copy fails and we
  120. * discard these unsent events.
  121. */
  122. if (dm_copy_name_and_uuid(event->md, event->name,
  123. event->uuid)) {
  124. DMINFO("%s: skipping sending uevent for lost device",
  125. __func__);
  126. goto uevent_free;
  127. }
  128. if (add_uevent_var(&event->ku_env, "DM_NAME=%s", event->name)) {
  129. DMERR("%s: add_uevent_var() for DM_NAME failed",
  130. __func__);
  131. goto uevent_free;
  132. }
  133. if (add_uevent_var(&event->ku_env, "DM_UUID=%s", event->uuid)) {
  134. DMERR("%s: add_uevent_var() for DM_UUID failed",
  135. __func__);
  136. goto uevent_free;
  137. }
  138. r = kobject_uevent_env(kobj, event->action, event->ku_env.envp);
  139. if (r)
  140. DMERR("%s: kobject_uevent_env failed", __func__);
  141. uevent_free:
  142. dm_uevent_free(event);
  143. }
  144. }
  145. EXPORT_SYMBOL_GPL(dm_send_uevents);
  146. /**
  147. * dm_path_uevent - called to create a new path event and queue it
  148. *
  149. * @event_type: path event type enum
  150. * @ti: pointer to a dm_target
  151. * @path: string containing pathname
  152. * @nr_valid_paths: number of valid paths remaining
  153. *
  154. */
  155. void dm_path_uevent(enum dm_uevent_type event_type, struct dm_target *ti,
  156. const char *path, unsigned nr_valid_paths)
  157. {
  158. struct mapped_device *md = dm_table_get_md(ti->table);
  159. struct dm_uevent *event;
  160. if (event_type >= ARRAY_SIZE(_dm_uevent_type_names)) {
  161. DMERR("%s: Invalid event_type %d", __func__, event_type);
  162. return;
  163. }
  164. event = dm_build_path_uevent(md, ti,
  165. _dm_uevent_type_names[event_type].action,
  166. _dm_uevent_type_names[event_type].name,
  167. path, nr_valid_paths);
  168. if (IS_ERR(event))
  169. return;
  170. dm_uevent_add(md, &event->elist);
  171. }
  172. EXPORT_SYMBOL_GPL(dm_path_uevent);
  173. int dm_uevent_init(void)
  174. {
  175. _dm_event_cache = KMEM_CACHE(dm_uevent, 0);
  176. if (!_dm_event_cache)
  177. return -ENOMEM;
  178. DMINFO("version 1.0.3");
  179. return 0;
  180. }
  181. void dm_uevent_exit(void)
  182. {
  183. kmem_cache_destroy(_dm_event_cache);
  184. }