nvmet.h 8.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330
  1. /*
  2. * Copyright (c) 2015-2016 HGST, a Western Digital Company.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms and conditions of the GNU General Public License,
  6. * version 2, as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope it will be useful, but WITHOUT
  9. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  11. * more details.
  12. */
  13. #ifndef _NVMET_H
  14. #define _NVMET_H
  15. #include <linux/dma-mapping.h>
  16. #include <linux/types.h>
  17. #include <linux/device.h>
  18. #include <linux/kref.h>
  19. #include <linux/percpu-refcount.h>
  20. #include <linux/list.h>
  21. #include <linux/mutex.h>
  22. #include <linux/nvme.h>
  23. #include <linux/configfs.h>
  24. #include <linux/rcupdate.h>
  25. #include <linux/blkdev.h>
  26. #define NVMET_ASYNC_EVENTS 4
  27. #define NVMET_ERROR_LOG_SLOTS 128
  28. /* Helper Macros when NVMe error is NVME_SC_CONNECT_INVALID_PARAM
  29. * The 16 bit shift is to set IATTR bit to 1, which means offending
  30. * offset starts in the data section of connect()
  31. */
  32. #define IPO_IATTR_CONNECT_DATA(x) \
  33. (cpu_to_le32((1 << 16) | (offsetof(struct nvmf_connect_data, x))))
  34. #define IPO_IATTR_CONNECT_SQE(x) \
  35. (cpu_to_le32(offsetof(struct nvmf_connect_command, x)))
  36. struct nvmet_ns {
  37. struct list_head dev_link;
  38. struct percpu_ref ref;
  39. struct block_device *bdev;
  40. u32 nsid;
  41. u32 blksize_shift;
  42. loff_t size;
  43. u8 nguid[16];
  44. bool enabled;
  45. struct nvmet_subsys *subsys;
  46. const char *device_path;
  47. struct config_group device_group;
  48. struct config_group group;
  49. struct completion disable_done;
  50. };
  51. static inline struct nvmet_ns *to_nvmet_ns(struct config_item *item)
  52. {
  53. return container_of(to_config_group(item), struct nvmet_ns, group);
  54. }
  55. struct nvmet_cq {
  56. u16 qid;
  57. u16 size;
  58. };
  59. struct nvmet_sq {
  60. struct nvmet_ctrl *ctrl;
  61. struct percpu_ref ref;
  62. u16 qid;
  63. u16 size;
  64. struct completion free_done;
  65. struct completion confirm_done;
  66. };
  67. /**
  68. * struct nvmet_port - Common structure to keep port
  69. * information for the target.
  70. * @entry: List head for holding a list of these elements.
  71. * @disc_addr: Address information is stored in a format defined
  72. * for a discovery log page entry.
  73. * @group: ConfigFS group for this element's folder.
  74. * @priv: Private data for the transport.
  75. */
  76. struct nvmet_port {
  77. struct list_head entry;
  78. struct nvmf_disc_rsp_page_entry disc_addr;
  79. struct config_group group;
  80. struct config_group subsys_group;
  81. struct list_head subsystems;
  82. struct config_group referrals_group;
  83. struct list_head referrals;
  84. void *priv;
  85. bool enabled;
  86. };
  87. static inline struct nvmet_port *to_nvmet_port(struct config_item *item)
  88. {
  89. return container_of(to_config_group(item), struct nvmet_port,
  90. group);
  91. }
  92. struct nvmet_ctrl {
  93. struct nvmet_subsys *subsys;
  94. struct nvmet_cq **cqs;
  95. struct nvmet_sq **sqs;
  96. struct mutex lock;
  97. u64 cap;
  98. u32 cc;
  99. u32 csts;
  100. u16 cntlid;
  101. u32 kato;
  102. struct nvmet_req *async_event_cmds[NVMET_ASYNC_EVENTS];
  103. unsigned int nr_async_event_cmds;
  104. struct list_head async_events;
  105. struct work_struct async_event_work;
  106. struct list_head subsys_entry;
  107. struct kref ref;
  108. struct delayed_work ka_work;
  109. struct work_struct fatal_err_work;
  110. struct nvmet_fabrics_ops *ops;
  111. char subsysnqn[NVMF_NQN_FIELD_LEN];
  112. char hostnqn[NVMF_NQN_FIELD_LEN];
  113. };
  114. struct nvmet_subsys {
  115. enum nvme_subsys_type type;
  116. struct mutex lock;
  117. struct kref ref;
  118. struct list_head namespaces;
  119. unsigned int max_nsid;
  120. struct list_head ctrls;
  121. struct ida cntlid_ida;
  122. struct list_head hosts;
  123. bool allow_any_host;
  124. u16 max_qid;
  125. u64 ver;
  126. u64 serial;
  127. char *subsysnqn;
  128. struct config_group group;
  129. struct config_group namespaces_group;
  130. struct config_group allowed_hosts_group;
  131. };
  132. static inline struct nvmet_subsys *to_subsys(struct config_item *item)
  133. {
  134. return container_of(to_config_group(item), struct nvmet_subsys, group);
  135. }
  136. static inline struct nvmet_subsys *namespaces_to_subsys(
  137. struct config_item *item)
  138. {
  139. return container_of(to_config_group(item), struct nvmet_subsys,
  140. namespaces_group);
  141. }
  142. struct nvmet_host {
  143. struct config_group group;
  144. };
  145. static inline struct nvmet_host *to_host(struct config_item *item)
  146. {
  147. return container_of(to_config_group(item), struct nvmet_host, group);
  148. }
  149. static inline char *nvmet_host_name(struct nvmet_host *host)
  150. {
  151. return config_item_name(&host->group.cg_item);
  152. }
  153. struct nvmet_host_link {
  154. struct list_head entry;
  155. struct nvmet_host *host;
  156. };
  157. struct nvmet_subsys_link {
  158. struct list_head entry;
  159. struct nvmet_subsys *subsys;
  160. };
  161. struct nvmet_req;
  162. struct nvmet_fabrics_ops {
  163. struct module *owner;
  164. unsigned int type;
  165. unsigned int sqe_inline_size;
  166. unsigned int msdbd;
  167. bool has_keyed_sgls : 1;
  168. void (*queue_response)(struct nvmet_req *req);
  169. int (*add_port)(struct nvmet_port *port);
  170. void (*remove_port)(struct nvmet_port *port);
  171. void (*delete_ctrl)(struct nvmet_ctrl *ctrl);
  172. };
  173. #define NVMET_MAX_INLINE_BIOVEC 8
  174. struct nvmet_req {
  175. struct nvme_command *cmd;
  176. struct nvme_completion *rsp;
  177. struct nvmet_sq *sq;
  178. struct nvmet_cq *cq;
  179. struct nvmet_ns *ns;
  180. struct scatterlist *sg;
  181. struct bio inline_bio;
  182. struct bio_vec inline_bvec[NVMET_MAX_INLINE_BIOVEC];
  183. int sg_cnt;
  184. size_t data_len;
  185. struct nvmet_port *port;
  186. void (*execute)(struct nvmet_req *req);
  187. struct nvmet_fabrics_ops *ops;
  188. };
  189. static inline void nvmet_set_status(struct nvmet_req *req, u16 status)
  190. {
  191. req->rsp->status = cpu_to_le16(status << 1);
  192. }
  193. static inline void nvmet_set_result(struct nvmet_req *req, u32 result)
  194. {
  195. req->rsp->result = cpu_to_le32(result);
  196. }
  197. /*
  198. * NVMe command writes actually are DMA reads for us on the target side.
  199. */
  200. static inline enum dma_data_direction
  201. nvmet_data_dir(struct nvmet_req *req)
  202. {
  203. return nvme_is_write(req->cmd) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
  204. }
  205. struct nvmet_async_event {
  206. struct list_head entry;
  207. u8 event_type;
  208. u8 event_info;
  209. u8 log_page;
  210. };
  211. int nvmet_parse_connect_cmd(struct nvmet_req *req);
  212. int nvmet_parse_io_cmd(struct nvmet_req *req);
  213. int nvmet_parse_admin_cmd(struct nvmet_req *req);
  214. int nvmet_parse_discovery_cmd(struct nvmet_req *req);
  215. int nvmet_parse_fabrics_cmd(struct nvmet_req *req);
  216. bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
  217. struct nvmet_sq *sq, struct nvmet_fabrics_ops *ops);
  218. void nvmet_req_complete(struct nvmet_req *req, u16 status);
  219. void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid,
  220. u16 size);
  221. void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, u16 qid,
  222. u16 size);
  223. void nvmet_sq_destroy(struct nvmet_sq *sq);
  224. int nvmet_sq_init(struct nvmet_sq *sq);
  225. void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl);
  226. void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new);
  227. u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
  228. struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp);
  229. u16 nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid,
  230. struct nvmet_req *req, struct nvmet_ctrl **ret);
  231. void nvmet_ctrl_put(struct nvmet_ctrl *ctrl);
  232. struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
  233. enum nvme_subsys_type type);
  234. void nvmet_subsys_put(struct nvmet_subsys *subsys);
  235. struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid);
  236. void nvmet_put_namespace(struct nvmet_ns *ns);
  237. int nvmet_ns_enable(struct nvmet_ns *ns);
  238. void nvmet_ns_disable(struct nvmet_ns *ns);
  239. struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid);
  240. void nvmet_ns_free(struct nvmet_ns *ns);
  241. int nvmet_register_transport(struct nvmet_fabrics_ops *ops);
  242. void nvmet_unregister_transport(struct nvmet_fabrics_ops *ops);
  243. int nvmet_enable_port(struct nvmet_port *port);
  244. void nvmet_disable_port(struct nvmet_port *port);
  245. void nvmet_referral_enable(struct nvmet_port *parent, struct nvmet_port *port);
  246. void nvmet_referral_disable(struct nvmet_port *port);
  247. u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf,
  248. size_t len);
  249. u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf,
  250. size_t len);
  251. u32 nvmet_get_log_page_len(struct nvme_command *cmd);
  252. #define NVMET_QUEUE_SIZE 1024
  253. #define NVMET_NR_QUEUES 64
  254. #define NVMET_MAX_CMD NVMET_QUEUE_SIZE
  255. #define NVMET_KAS 10
  256. #define NVMET_DISC_KATO 120
  257. int __init nvmet_init_configfs(void);
  258. void __exit nvmet_exit_configfs(void);
  259. int __init nvmet_init_discovery(void);
  260. void nvmet_exit_discovery(void);
  261. extern struct nvmet_subsys *nvmet_disc_subsys;
  262. extern u64 nvmet_genctr;
  263. extern struct rw_semaphore nvmet_config_sem;
  264. bool nvmet_host_allowed(struct nvmet_req *req, struct nvmet_subsys *subsys,
  265. const char *hostnqn);
  266. #endif /* _NVMET_H */