osd_initiator.c 56 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068
  1. /*
  2. * osd_initiator - Main body of the osd initiator library.
  3. *
  4. * Note: The file does not contain the advanced security functionality which
  5. * is only needed by the security_manager's initiators.
  6. *
  7. * Copyright (C) 2008 Panasas Inc. All rights reserved.
  8. *
  9. * Authors:
  10. * Boaz Harrosh <bharrosh@panasas.com>
  11. * Benny Halevy <bhalevy@panasas.com>
  12. *
  13. * This program is free software; you can redistribute it and/or modify
  14. * it under the terms of the GNU General Public License version 2
  15. *
  16. * Redistribution and use in source and binary forms, with or without
  17. * modification, are permitted provided that the following conditions
  18. * are met:
  19. *
  20. * 1. Redistributions of source code must retain the above copyright
  21. * notice, this list of conditions and the following disclaimer.
  22. * 2. Redistributions in binary form must reproduce the above copyright
  23. * notice, this list of conditions and the following disclaimer in the
  24. * documentation and/or other materials provided with the distribution.
  25. * 3. Neither the name of the Panasas company nor the names of its
  26. * contributors may be used to endorse or promote products derived
  27. * from this software without specific prior written permission.
  28. *
  29. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
  30. * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
  31. * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  32. * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
  33. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  34. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  35. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
  36. * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
  37. * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
  38. * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  39. * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  40. */
  41. #include <linux/slab.h>
  42. #include <linux/module.h>
  43. #include <scsi/osd_initiator.h>
  44. #include <scsi/osd_sec.h>
  45. #include <scsi/osd_attributes.h>
  46. #include <scsi/osd_sense.h>
  47. #include <scsi/scsi_device.h>
  48. #include "osd_debug.h"
  49. #ifndef __unused
  50. # define __unused __attribute__((unused))
  51. #endif
  52. enum { OSD_REQ_RETRIES = 1 };
  53. MODULE_AUTHOR("Boaz Harrosh <bharrosh@panasas.com>");
  54. MODULE_DESCRIPTION("open-osd initiator library libosd.ko");
  55. MODULE_LICENSE("GPL");
  56. static inline void build_test(void)
  57. {
  58. /* structures were not packed */
  59. BUILD_BUG_ON(sizeof(struct osd_capability) != OSD_CAP_LEN);
  60. BUILD_BUG_ON(sizeof(struct osdv2_cdb) != OSD_TOTAL_CDB_LEN);
  61. BUILD_BUG_ON(sizeof(struct osdv1_cdb) != OSDv1_TOTAL_CDB_LEN);
  62. }
  63. static const char *_osd_ver_desc(struct osd_request *or)
  64. {
  65. return osd_req_is_ver1(or) ? "OSD1" : "OSD2";
  66. }
  67. #define ATTR_DEF_RI(id, len) ATTR_DEF(OSD_APAGE_ROOT_INFORMATION, id, len)
  68. static int _osd_get_print_system_info(struct osd_dev *od,
  69. void *caps, struct osd_dev_info *odi)
  70. {
  71. struct osd_request *or;
  72. struct osd_attr get_attrs[] = {
  73. ATTR_DEF_RI(OSD_ATTR_RI_VENDOR_IDENTIFICATION, 8),
  74. ATTR_DEF_RI(OSD_ATTR_RI_PRODUCT_IDENTIFICATION, 16),
  75. ATTR_DEF_RI(OSD_ATTR_RI_PRODUCT_MODEL, 32),
  76. ATTR_DEF_RI(OSD_ATTR_RI_PRODUCT_REVISION_LEVEL, 4),
  77. ATTR_DEF_RI(OSD_ATTR_RI_PRODUCT_SERIAL_NUMBER, 64 /*variable*/),
  78. ATTR_DEF_RI(OSD_ATTR_RI_OSD_NAME, 64 /*variable*/),
  79. ATTR_DEF_RI(OSD_ATTR_RI_TOTAL_CAPACITY, 8),
  80. ATTR_DEF_RI(OSD_ATTR_RI_USED_CAPACITY, 8),
  81. ATTR_DEF_RI(OSD_ATTR_RI_NUMBER_OF_PARTITIONS, 8),
  82. ATTR_DEF_RI(OSD_ATTR_RI_CLOCK, 6),
  83. /* IBM-OSD-SIM Has a bug with this one put it last */
  84. ATTR_DEF_RI(OSD_ATTR_RI_OSD_SYSTEM_ID, 20),
  85. };
  86. void *iter = NULL, *pFirst;
  87. int nelem = ARRAY_SIZE(get_attrs), a = 0;
  88. int ret;
  89. or = osd_start_request(od, GFP_KERNEL);
  90. if (!or)
  91. return -ENOMEM;
  92. /* get attrs */
  93. osd_req_get_attributes(or, &osd_root_object);
  94. osd_req_add_get_attr_list(or, get_attrs, ARRAY_SIZE(get_attrs));
  95. ret = osd_finalize_request(or, 0, caps, NULL);
  96. if (ret)
  97. goto out;
  98. ret = osd_execute_request(or);
  99. if (ret) {
  100. OSD_ERR("Failed to detect %s => %d\n", _osd_ver_desc(or), ret);
  101. goto out;
  102. }
  103. osd_req_decode_get_attr_list(or, get_attrs, &nelem, &iter);
  104. OSD_INFO("Detected %s device\n",
  105. _osd_ver_desc(or));
  106. pFirst = get_attrs[a++].val_ptr;
  107. OSD_INFO("VENDOR_IDENTIFICATION [%s]\n",
  108. (char *)pFirst);
  109. pFirst = get_attrs[a++].val_ptr;
  110. OSD_INFO("PRODUCT_IDENTIFICATION [%s]\n",
  111. (char *)pFirst);
  112. pFirst = get_attrs[a++].val_ptr;
  113. OSD_INFO("PRODUCT_MODEL [%s]\n",
  114. (char *)pFirst);
  115. pFirst = get_attrs[a++].val_ptr;
  116. OSD_INFO("PRODUCT_REVISION_LEVEL [%u]\n",
  117. pFirst ? get_unaligned_be32(pFirst) : ~0U);
  118. pFirst = get_attrs[a++].val_ptr;
  119. OSD_INFO("PRODUCT_SERIAL_NUMBER [%s]\n",
  120. (char *)pFirst);
  121. odi->osdname_len = get_attrs[a].len;
  122. /* Avoid NULL for memcmp optimization 0-length is good enough */
  123. odi->osdname = kzalloc(odi->osdname_len + 1, GFP_KERNEL);
  124. if (odi->osdname_len)
  125. memcpy(odi->osdname, get_attrs[a].val_ptr, odi->osdname_len);
  126. OSD_INFO("OSD_NAME [%s]\n", odi->osdname);
  127. a++;
  128. pFirst = get_attrs[a++].val_ptr;
  129. OSD_INFO("TOTAL_CAPACITY [0x%llx]\n",
  130. pFirst ? _LLU(get_unaligned_be64(pFirst)) : ~0ULL);
  131. pFirst = get_attrs[a++].val_ptr;
  132. OSD_INFO("USED_CAPACITY [0x%llx]\n",
  133. pFirst ? _LLU(get_unaligned_be64(pFirst)) : ~0ULL);
  134. pFirst = get_attrs[a++].val_ptr;
  135. OSD_INFO("NUMBER_OF_PARTITIONS [%llu]\n",
  136. pFirst ? _LLU(get_unaligned_be64(pFirst)) : ~0ULL);
  137. if (a >= nelem)
  138. goto out;
  139. /* FIXME: Where are the time utilities */
  140. pFirst = get_attrs[a++].val_ptr;
  141. OSD_INFO("CLOCK [0x%02x%02x%02x%02x%02x%02x]\n",
  142. ((char *)pFirst)[0], ((char *)pFirst)[1],
  143. ((char *)pFirst)[2], ((char *)pFirst)[3],
  144. ((char *)pFirst)[4], ((char *)pFirst)[5]);
  145. if (a < nelem) { /* IBM-OSD-SIM bug, Might not have it */
  146. unsigned len = get_attrs[a].len;
  147. char sid_dump[32*4 + 2]; /* 2nibbles+space+ASCII */
  148. hex_dump_to_buffer(get_attrs[a].val_ptr, len, 32, 1,
  149. sid_dump, sizeof(sid_dump), true);
  150. OSD_INFO("OSD_SYSTEM_ID(%d)\n"
  151. " [%s]\n", len, sid_dump);
  152. if (unlikely(len > sizeof(odi->systemid))) {
  153. OSD_ERR("OSD Target error: OSD_SYSTEM_ID too long(%d). "
  154. "device idetification might not work\n", len);
  155. len = sizeof(odi->systemid);
  156. }
  157. odi->systemid_len = len;
  158. memcpy(odi->systemid, get_attrs[a].val_ptr, len);
  159. a++;
  160. }
  161. out:
  162. osd_end_request(or);
  163. return ret;
  164. }
  165. int osd_auto_detect_ver(struct osd_dev *od,
  166. void *caps, struct osd_dev_info *odi)
  167. {
  168. int ret;
  169. /* Auto-detect the osd version */
  170. ret = _osd_get_print_system_info(od, caps, odi);
  171. if (ret) {
  172. osd_dev_set_ver(od, OSD_VER1);
  173. OSD_DEBUG("converting to OSD1\n");
  174. ret = _osd_get_print_system_info(od, caps, odi);
  175. }
  176. return ret;
  177. }
  178. EXPORT_SYMBOL(osd_auto_detect_ver);
  179. static unsigned _osd_req_cdb_len(struct osd_request *or)
  180. {
  181. return osd_req_is_ver1(or) ? OSDv1_TOTAL_CDB_LEN : OSD_TOTAL_CDB_LEN;
  182. }
  183. static unsigned _osd_req_alist_elem_size(struct osd_request *or, unsigned len)
  184. {
  185. return osd_req_is_ver1(or) ?
  186. osdv1_attr_list_elem_size(len) :
  187. osdv2_attr_list_elem_size(len);
  188. }
  189. static void _osd_req_alist_elem_encode(struct osd_request *or,
  190. void *attr_last, const struct osd_attr *oa)
  191. {
  192. if (osd_req_is_ver1(or)) {
  193. struct osdv1_attributes_list_element *attr = attr_last;
  194. attr->attr_page = cpu_to_be32(oa->attr_page);
  195. attr->attr_id = cpu_to_be32(oa->attr_id);
  196. attr->attr_bytes = cpu_to_be16(oa->len);
  197. memcpy(attr->attr_val, oa->val_ptr, oa->len);
  198. } else {
  199. struct osdv2_attributes_list_element *attr = attr_last;
  200. attr->attr_page = cpu_to_be32(oa->attr_page);
  201. attr->attr_id = cpu_to_be32(oa->attr_id);
  202. attr->attr_bytes = cpu_to_be16(oa->len);
  203. memcpy(attr->attr_val, oa->val_ptr, oa->len);
  204. }
  205. }
  206. static int _osd_req_alist_elem_decode(struct osd_request *or,
  207. void *cur_p, struct osd_attr *oa, unsigned max_bytes)
  208. {
  209. unsigned inc;
  210. if (osd_req_is_ver1(or)) {
  211. struct osdv1_attributes_list_element *attr = cur_p;
  212. if (max_bytes < sizeof(*attr))
  213. return -1;
  214. oa->len = be16_to_cpu(attr->attr_bytes);
  215. inc = _osd_req_alist_elem_size(or, oa->len);
  216. if (inc > max_bytes)
  217. return -1;
  218. oa->attr_page = be32_to_cpu(attr->attr_page);
  219. oa->attr_id = be32_to_cpu(attr->attr_id);
  220. /* OSD1: On empty attributes we return a pointer to 2 bytes
  221. * of zeros. This keeps similar behaviour with OSD2.
  222. * (See below)
  223. */
  224. oa->val_ptr = likely(oa->len) ? attr->attr_val :
  225. (u8 *)&attr->attr_bytes;
  226. } else {
  227. struct osdv2_attributes_list_element *attr = cur_p;
  228. if (max_bytes < sizeof(*attr))
  229. return -1;
  230. oa->len = be16_to_cpu(attr->attr_bytes);
  231. inc = _osd_req_alist_elem_size(or, oa->len);
  232. if (inc > max_bytes)
  233. return -1;
  234. oa->attr_page = be32_to_cpu(attr->attr_page);
  235. oa->attr_id = be32_to_cpu(attr->attr_id);
  236. /* OSD2: For convenience, on empty attributes, we return 8 bytes
  237. * of zeros here. This keeps the same behaviour with OSD2r04,
  238. * and is nice with null terminating ASCII fields.
  239. * oa->val_ptr == NULL marks the end-of-list, or error.
  240. */
  241. oa->val_ptr = likely(oa->len) ? attr->attr_val : attr->reserved;
  242. }
  243. return inc;
  244. }
  245. static unsigned _osd_req_alist_size(struct osd_request *or, void *list_head)
  246. {
  247. return osd_req_is_ver1(or) ?
  248. osdv1_list_size(list_head) :
  249. osdv2_list_size(list_head);
  250. }
  251. static unsigned _osd_req_sizeof_alist_header(struct osd_request *or)
  252. {
  253. return osd_req_is_ver1(or) ?
  254. sizeof(struct osdv1_attributes_list_header) :
  255. sizeof(struct osdv2_attributes_list_header);
  256. }
  257. static void _osd_req_set_alist_type(struct osd_request *or,
  258. void *list, int list_type)
  259. {
  260. if (osd_req_is_ver1(or)) {
  261. struct osdv1_attributes_list_header *attr_list = list;
  262. memset(attr_list, 0, sizeof(*attr_list));
  263. attr_list->type = list_type;
  264. } else {
  265. struct osdv2_attributes_list_header *attr_list = list;
  266. memset(attr_list, 0, sizeof(*attr_list));
  267. attr_list->type = list_type;
  268. }
  269. }
  270. static bool _osd_req_is_alist_type(struct osd_request *or,
  271. void *list, int list_type)
  272. {
  273. if (!list)
  274. return false;
  275. if (osd_req_is_ver1(or)) {
  276. struct osdv1_attributes_list_header *attr_list = list;
  277. return attr_list->type == list_type;
  278. } else {
  279. struct osdv2_attributes_list_header *attr_list = list;
  280. return attr_list->type == list_type;
  281. }
  282. }
  283. /* This is for List-objects not Attributes-Lists */
  284. static void _osd_req_encode_olist(struct osd_request *or,
  285. struct osd_obj_id_list *list)
  286. {
  287. struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb);
  288. if (osd_req_is_ver1(or)) {
  289. cdbh->v1.list_identifier = list->list_identifier;
  290. cdbh->v1.start_address = list->continuation_id;
  291. } else {
  292. cdbh->v2.list_identifier = list->list_identifier;
  293. cdbh->v2.start_address = list->continuation_id;
  294. }
  295. }
  296. static osd_cdb_offset osd_req_encode_offset(struct osd_request *or,
  297. u64 offset, unsigned *padding)
  298. {
  299. return __osd_encode_offset(offset, padding,
  300. osd_req_is_ver1(or) ?
  301. OSDv1_OFFSET_MIN_SHIFT : OSD_OFFSET_MIN_SHIFT,
  302. OSD_OFFSET_MAX_SHIFT);
  303. }
  304. static struct osd_security_parameters *
  305. _osd_req_sec_params(struct osd_request *or)
  306. {
  307. struct osd_cdb *ocdb = &or->cdb;
  308. if (osd_req_is_ver1(or))
  309. return (struct osd_security_parameters *)&ocdb->v1.sec_params;
  310. else
  311. return (struct osd_security_parameters *)&ocdb->v2.sec_params;
  312. }
  313. void osd_dev_init(struct osd_dev *osdd, struct scsi_device *scsi_device)
  314. {
  315. memset(osdd, 0, sizeof(*osdd));
  316. osdd->scsi_device = scsi_device;
  317. osdd->def_timeout = BLK_DEFAULT_SG_TIMEOUT;
  318. #ifdef OSD_VER1_SUPPORT
  319. osdd->version = OSD_VER2;
  320. #endif
  321. /* TODO: Allocate pools for osd_request attributes ... */
  322. }
  323. EXPORT_SYMBOL(osd_dev_init);
  324. void osd_dev_fini(struct osd_dev *osdd)
  325. {
  326. /* TODO: De-allocate pools */
  327. osdd->scsi_device = NULL;
  328. }
  329. EXPORT_SYMBOL(osd_dev_fini);
  330. static struct osd_request *_osd_request_alloc(gfp_t gfp)
  331. {
  332. struct osd_request *or;
  333. /* TODO: Use mempool with one saved request */
  334. or = kzalloc(sizeof(*or), gfp);
  335. return or;
  336. }
  337. static void _osd_request_free(struct osd_request *or)
  338. {
  339. kfree(or);
  340. }
  341. struct osd_request *osd_start_request(struct osd_dev *dev, gfp_t gfp)
  342. {
  343. struct osd_request *or;
  344. or = _osd_request_alloc(gfp);
  345. if (!or)
  346. return NULL;
  347. or->osd_dev = dev;
  348. or->alloc_flags = gfp;
  349. or->timeout = dev->def_timeout;
  350. or->retries = OSD_REQ_RETRIES;
  351. return or;
  352. }
  353. EXPORT_SYMBOL(osd_start_request);
  354. static void _osd_free_seg(struct osd_request *or __unused,
  355. struct _osd_req_data_segment *seg)
  356. {
  357. if (!seg->buff || !seg->alloc_size)
  358. return;
  359. kfree(seg->buff);
  360. seg->buff = NULL;
  361. seg->alloc_size = 0;
  362. }
  363. static void _put_request(struct request *rq)
  364. {
  365. /*
  366. * If osd_finalize_request() was called but the request was not
  367. * executed through the block layer, then we must release BIOs.
  368. * TODO: Keep error code in or->async_error. Need to audit all
  369. * code paths.
  370. */
  371. if (unlikely(rq->bio))
  372. blk_end_request(rq, -ENOMEM, blk_rq_bytes(rq));
  373. else
  374. blk_put_request(rq);
  375. }
  376. void osd_end_request(struct osd_request *or)
  377. {
  378. struct request *rq = or->request;
  379. if (rq) {
  380. if (rq->next_rq) {
  381. _put_request(rq->next_rq);
  382. rq->next_rq = NULL;
  383. }
  384. _put_request(rq);
  385. }
  386. _osd_free_seg(or, &or->get_attr);
  387. _osd_free_seg(or, &or->enc_get_attr);
  388. _osd_free_seg(or, &or->set_attr);
  389. _osd_free_seg(or, &or->cdb_cont);
  390. _osd_request_free(or);
  391. }
  392. EXPORT_SYMBOL(osd_end_request);
  393. static void _set_error_resid(struct osd_request *or, struct request *req,
  394. int error)
  395. {
  396. or->async_error = error;
  397. or->req_errors = req->errors ? : error;
  398. or->sense_len = req->sense_len;
  399. if (or->out.req)
  400. or->out.residual = or->out.req->resid_len;
  401. if (or->in.req)
  402. or->in.residual = or->in.req->resid_len;
  403. }
  404. int osd_execute_request(struct osd_request *or)
  405. {
  406. int error = blk_execute_rq(or->request->q, NULL, or->request, 0);
  407. _set_error_resid(or, or->request, error);
  408. return error;
  409. }
  410. EXPORT_SYMBOL(osd_execute_request);
  411. static void osd_request_async_done(struct request *req, int error)
  412. {
  413. struct osd_request *or = req->end_io_data;
  414. _set_error_resid(or, req, error);
  415. if (req->next_rq) {
  416. __blk_put_request(req->q, req->next_rq);
  417. req->next_rq = NULL;
  418. }
  419. __blk_put_request(req->q, req);
  420. or->request = NULL;
  421. or->in.req = NULL;
  422. or->out.req = NULL;
  423. if (or->async_done)
  424. or->async_done(or, or->async_private);
  425. else
  426. osd_end_request(or);
  427. }
  428. int osd_execute_request_async(struct osd_request *or,
  429. osd_req_done_fn *done, void *private)
  430. {
  431. or->request->end_io_data = or;
  432. or->async_private = private;
  433. or->async_done = done;
  434. blk_execute_rq_nowait(or->request->q, NULL, or->request, 0,
  435. osd_request_async_done);
  436. return 0;
  437. }
  438. EXPORT_SYMBOL(osd_execute_request_async);
  439. u8 sg_out_pad_buffer[1 << OSDv1_OFFSET_MIN_SHIFT];
  440. u8 sg_in_pad_buffer[1 << OSDv1_OFFSET_MIN_SHIFT];
  441. static int _osd_realloc_seg(struct osd_request *or,
  442. struct _osd_req_data_segment *seg, unsigned max_bytes)
  443. {
  444. void *buff;
  445. if (seg->alloc_size >= max_bytes)
  446. return 0;
  447. buff = krealloc(seg->buff, max_bytes, or->alloc_flags);
  448. if (!buff) {
  449. OSD_ERR("Failed to Realloc %d-bytes was-%d\n", max_bytes,
  450. seg->alloc_size);
  451. return -ENOMEM;
  452. }
  453. memset(buff + seg->alloc_size, 0, max_bytes - seg->alloc_size);
  454. seg->buff = buff;
  455. seg->alloc_size = max_bytes;
  456. return 0;
  457. }
  458. static int _alloc_cdb_cont(struct osd_request *or, unsigned total_bytes)
  459. {
  460. OSD_DEBUG("total_bytes=%d\n", total_bytes);
  461. return _osd_realloc_seg(or, &or->cdb_cont, total_bytes);
  462. }
  463. static int _alloc_set_attr_list(struct osd_request *or,
  464. const struct osd_attr *oa, unsigned nelem, unsigned add_bytes)
  465. {
  466. unsigned total_bytes = add_bytes;
  467. for (; nelem; --nelem, ++oa)
  468. total_bytes += _osd_req_alist_elem_size(or, oa->len);
  469. OSD_DEBUG("total_bytes=%d\n", total_bytes);
  470. return _osd_realloc_seg(or, &or->set_attr, total_bytes);
  471. }
  472. static int _alloc_get_attr_desc(struct osd_request *or, unsigned max_bytes)
  473. {
  474. OSD_DEBUG("total_bytes=%d\n", max_bytes);
  475. return _osd_realloc_seg(or, &or->enc_get_attr, max_bytes);
  476. }
  477. static int _alloc_get_attr_list(struct osd_request *or)
  478. {
  479. OSD_DEBUG("total_bytes=%d\n", or->get_attr.total_bytes);
  480. return _osd_realloc_seg(or, &or->get_attr, or->get_attr.total_bytes);
  481. }
  482. /*
  483. * Common to all OSD commands
  484. */
  485. static void _osdv1_req_encode_common(struct osd_request *or,
  486. __be16 act, const struct osd_obj_id *obj, u64 offset, u64 len)
  487. {
  488. struct osdv1_cdb *ocdb = &or->cdb.v1;
  489. /*
  490. * For speed, the commands
  491. * OSD_ACT_PERFORM_SCSI_COMMAND , V1 0x8F7E, V2 0x8F7C
  492. * OSD_ACT_SCSI_TASK_MANAGEMENT , V1 0x8F7F, V2 0x8F7D
  493. * are not supported here. Should pass zero and set after the call
  494. */
  495. act &= cpu_to_be16(~0x0080); /* V1 action code */
  496. OSD_DEBUG("OSDv1 execute opcode 0x%x\n", be16_to_cpu(act));
  497. ocdb->h.varlen_cdb.opcode = VARIABLE_LENGTH_CMD;
  498. ocdb->h.varlen_cdb.additional_cdb_length = OSD_ADDITIONAL_CDB_LENGTH;
  499. ocdb->h.varlen_cdb.service_action = act;
  500. ocdb->h.partition = cpu_to_be64(obj->partition);
  501. ocdb->h.object = cpu_to_be64(obj->id);
  502. ocdb->h.v1.length = cpu_to_be64(len);
  503. ocdb->h.v1.start_address = cpu_to_be64(offset);
  504. }
  505. static void _osdv2_req_encode_common(struct osd_request *or,
  506. __be16 act, const struct osd_obj_id *obj, u64 offset, u64 len)
  507. {
  508. struct osdv2_cdb *ocdb = &or->cdb.v2;
  509. OSD_DEBUG("OSDv2 execute opcode 0x%x\n", be16_to_cpu(act));
  510. ocdb->h.varlen_cdb.opcode = VARIABLE_LENGTH_CMD;
  511. ocdb->h.varlen_cdb.additional_cdb_length = OSD_ADDITIONAL_CDB_LENGTH;
  512. ocdb->h.varlen_cdb.service_action = act;
  513. ocdb->h.partition = cpu_to_be64(obj->partition);
  514. ocdb->h.object = cpu_to_be64(obj->id);
  515. ocdb->h.v2.length = cpu_to_be64(len);
  516. ocdb->h.v2.start_address = cpu_to_be64(offset);
  517. }
  518. static void _osd_req_encode_common(struct osd_request *or,
  519. __be16 act, const struct osd_obj_id *obj, u64 offset, u64 len)
  520. {
  521. if (osd_req_is_ver1(or))
  522. _osdv1_req_encode_common(or, act, obj, offset, len);
  523. else
  524. _osdv2_req_encode_common(or, act, obj, offset, len);
  525. }
  526. /*
  527. * Device commands
  528. */
  529. /*TODO: void osd_req_set_master_seed_xchg(struct osd_request *, ...); */
  530. /*TODO: void osd_req_set_master_key(struct osd_request *, ...); */
  531. void osd_req_format(struct osd_request *or, u64 tot_capacity)
  532. {
  533. _osd_req_encode_common(or, OSD_ACT_FORMAT_OSD, &osd_root_object, 0,
  534. tot_capacity);
  535. }
  536. EXPORT_SYMBOL(osd_req_format);
  537. int osd_req_list_dev_partitions(struct osd_request *or,
  538. osd_id initial_id, struct osd_obj_id_list *list, unsigned nelem)
  539. {
  540. return osd_req_list_partition_objects(or, 0, initial_id, list, nelem);
  541. }
  542. EXPORT_SYMBOL(osd_req_list_dev_partitions);
  543. static void _osd_req_encode_flush(struct osd_request *or,
  544. enum osd_options_flush_scope_values op)
  545. {
  546. struct osd_cdb_head *ocdb = osd_cdb_head(&or->cdb);
  547. ocdb->command_specific_options = op;
  548. }
  549. void osd_req_flush_obsd(struct osd_request *or,
  550. enum osd_options_flush_scope_values op)
  551. {
  552. _osd_req_encode_common(or, OSD_ACT_FLUSH_OSD, &osd_root_object, 0, 0);
  553. _osd_req_encode_flush(or, op);
  554. }
  555. EXPORT_SYMBOL(osd_req_flush_obsd);
  556. /*TODO: void osd_req_perform_scsi_command(struct osd_request *,
  557. const u8 *cdb, ...); */
  558. /*TODO: void osd_req_task_management(struct osd_request *, ...); */
  559. /*
  560. * Partition commands
  561. */
  562. static void _osd_req_encode_partition(struct osd_request *or,
  563. __be16 act, osd_id partition)
  564. {
  565. struct osd_obj_id par = {
  566. .partition = partition,
  567. .id = 0,
  568. };
  569. _osd_req_encode_common(or, act, &par, 0, 0);
  570. }
  571. void osd_req_create_partition(struct osd_request *or, osd_id partition)
  572. {
  573. _osd_req_encode_partition(or, OSD_ACT_CREATE_PARTITION, partition);
  574. }
  575. EXPORT_SYMBOL(osd_req_create_partition);
  576. void osd_req_remove_partition(struct osd_request *or, osd_id partition)
  577. {
  578. _osd_req_encode_partition(or, OSD_ACT_REMOVE_PARTITION, partition);
  579. }
  580. EXPORT_SYMBOL(osd_req_remove_partition);
  581. /*TODO: void osd_req_set_partition_key(struct osd_request *,
  582. osd_id partition, u8 new_key_id[OSD_CRYPTO_KEYID_SIZE],
  583. u8 seed[OSD_CRYPTO_SEED_SIZE]); */
  584. static int _osd_req_list_objects(struct osd_request *or,
  585. __be16 action, const struct osd_obj_id *obj, osd_id initial_id,
  586. struct osd_obj_id_list *list, unsigned nelem)
  587. {
  588. struct request_queue *q = osd_request_queue(or->osd_dev);
  589. u64 len = nelem * sizeof(osd_id) + sizeof(*list);
  590. struct bio *bio;
  591. _osd_req_encode_common(or, action, obj, (u64)initial_id, len);
  592. if (list->list_identifier)
  593. _osd_req_encode_olist(or, list);
  594. WARN_ON(or->in.bio);
  595. bio = bio_map_kern(q, list, len, or->alloc_flags);
  596. if (IS_ERR(bio)) {
  597. OSD_ERR("!!! Failed to allocate list_objects BIO\n");
  598. return PTR_ERR(bio);
  599. }
  600. bio->bi_rw &= ~REQ_WRITE;
  601. or->in.bio = bio;
  602. or->in.total_bytes = bio->bi_size;
  603. return 0;
  604. }
  605. int osd_req_list_partition_collections(struct osd_request *or,
  606. osd_id partition, osd_id initial_id, struct osd_obj_id_list *list,
  607. unsigned nelem)
  608. {
  609. struct osd_obj_id par = {
  610. .partition = partition,
  611. .id = 0,
  612. };
  613. return osd_req_list_collection_objects(or, &par, initial_id, list,
  614. nelem);
  615. }
  616. EXPORT_SYMBOL(osd_req_list_partition_collections);
  617. int osd_req_list_partition_objects(struct osd_request *or,
  618. osd_id partition, osd_id initial_id, struct osd_obj_id_list *list,
  619. unsigned nelem)
  620. {
  621. struct osd_obj_id par = {
  622. .partition = partition,
  623. .id = 0,
  624. };
  625. return _osd_req_list_objects(or, OSD_ACT_LIST, &par, initial_id, list,
  626. nelem);
  627. }
  628. EXPORT_SYMBOL(osd_req_list_partition_objects);
  629. void osd_req_flush_partition(struct osd_request *or,
  630. osd_id partition, enum osd_options_flush_scope_values op)
  631. {
  632. _osd_req_encode_partition(or, OSD_ACT_FLUSH_PARTITION, partition);
  633. _osd_req_encode_flush(or, op);
  634. }
  635. EXPORT_SYMBOL(osd_req_flush_partition);
  636. /*
  637. * Collection commands
  638. */
  639. /*TODO: void osd_req_create_collection(struct osd_request *,
  640. const struct osd_obj_id *); */
  641. /*TODO: void osd_req_remove_collection(struct osd_request *,
  642. const struct osd_obj_id *); */
  643. int osd_req_list_collection_objects(struct osd_request *or,
  644. const struct osd_obj_id *obj, osd_id initial_id,
  645. struct osd_obj_id_list *list, unsigned nelem)
  646. {
  647. return _osd_req_list_objects(or, OSD_ACT_LIST_COLLECTION, obj,
  648. initial_id, list, nelem);
  649. }
  650. EXPORT_SYMBOL(osd_req_list_collection_objects);
  651. /*TODO: void query(struct osd_request *, ...); V2 */
  652. void osd_req_flush_collection(struct osd_request *or,
  653. const struct osd_obj_id *obj, enum osd_options_flush_scope_values op)
  654. {
  655. _osd_req_encode_common(or, OSD_ACT_FLUSH_PARTITION, obj, 0, 0);
  656. _osd_req_encode_flush(or, op);
  657. }
  658. EXPORT_SYMBOL(osd_req_flush_collection);
  659. /*TODO: void get_member_attrs(struct osd_request *, ...); V2 */
  660. /*TODO: void set_member_attrs(struct osd_request *, ...); V2 */
  661. /*
  662. * Object commands
  663. */
  664. void osd_req_create_object(struct osd_request *or, struct osd_obj_id *obj)
  665. {
  666. _osd_req_encode_common(or, OSD_ACT_CREATE, obj, 0, 0);
  667. }
  668. EXPORT_SYMBOL(osd_req_create_object);
  669. void osd_req_remove_object(struct osd_request *or, struct osd_obj_id *obj)
  670. {
  671. _osd_req_encode_common(or, OSD_ACT_REMOVE, obj, 0, 0);
  672. }
  673. EXPORT_SYMBOL(osd_req_remove_object);
  674. /*TODO: void osd_req_create_multi(struct osd_request *or,
  675. struct osd_obj_id *first, struct osd_obj_id_list *list, unsigned nelem);
  676. */
  677. void osd_req_write(struct osd_request *or,
  678. const struct osd_obj_id *obj, u64 offset,
  679. struct bio *bio, u64 len)
  680. {
  681. _osd_req_encode_common(or, OSD_ACT_WRITE, obj, offset, len);
  682. WARN_ON(or->out.bio || or->out.total_bytes);
  683. WARN_ON(0 == (bio->bi_rw & REQ_WRITE));
  684. or->out.bio = bio;
  685. or->out.total_bytes = len;
  686. }
  687. EXPORT_SYMBOL(osd_req_write);
  688. int osd_req_write_kern(struct osd_request *or,
  689. const struct osd_obj_id *obj, u64 offset, void* buff, u64 len)
  690. {
  691. struct request_queue *req_q = osd_request_queue(or->osd_dev);
  692. struct bio *bio = bio_map_kern(req_q, buff, len, GFP_KERNEL);
  693. if (IS_ERR(bio))
  694. return PTR_ERR(bio);
  695. bio->bi_rw |= REQ_WRITE; /* FIXME: bio_set_dir() */
  696. osd_req_write(or, obj, offset, bio, len);
  697. return 0;
  698. }
  699. EXPORT_SYMBOL(osd_req_write_kern);
  700. /*TODO: void osd_req_append(struct osd_request *,
  701. const struct osd_obj_id *, struct bio *data_out); */
  702. /*TODO: void osd_req_create_write(struct osd_request *,
  703. const struct osd_obj_id *, struct bio *data_out, u64 offset); */
  704. /*TODO: void osd_req_clear(struct osd_request *,
  705. const struct osd_obj_id *, u64 offset, u64 len); */
  706. /*TODO: void osd_req_punch(struct osd_request *,
  707. const struct osd_obj_id *, u64 offset, u64 len); V2 */
  708. void osd_req_flush_object(struct osd_request *or,
  709. const struct osd_obj_id *obj, enum osd_options_flush_scope_values op,
  710. /*V2*/ u64 offset, /*V2*/ u64 len)
  711. {
  712. if (unlikely(osd_req_is_ver1(or) && (offset || len))) {
  713. OSD_DEBUG("OSD Ver1 flush on specific range ignored\n");
  714. offset = 0;
  715. len = 0;
  716. }
  717. _osd_req_encode_common(or, OSD_ACT_FLUSH, obj, offset, len);
  718. _osd_req_encode_flush(or, op);
  719. }
  720. EXPORT_SYMBOL(osd_req_flush_object);
  721. void osd_req_read(struct osd_request *or,
  722. const struct osd_obj_id *obj, u64 offset,
  723. struct bio *bio, u64 len)
  724. {
  725. _osd_req_encode_common(or, OSD_ACT_READ, obj, offset, len);
  726. WARN_ON(or->in.bio || or->in.total_bytes);
  727. WARN_ON(bio->bi_rw & REQ_WRITE);
  728. or->in.bio = bio;
  729. or->in.total_bytes = len;
  730. }
  731. EXPORT_SYMBOL(osd_req_read);
  732. int osd_req_read_kern(struct osd_request *or,
  733. const struct osd_obj_id *obj, u64 offset, void* buff, u64 len)
  734. {
  735. struct request_queue *req_q = osd_request_queue(or->osd_dev);
  736. struct bio *bio = bio_map_kern(req_q, buff, len, GFP_KERNEL);
  737. if (IS_ERR(bio))
  738. return PTR_ERR(bio);
  739. osd_req_read(or, obj, offset, bio, len);
  740. return 0;
  741. }
  742. EXPORT_SYMBOL(osd_req_read_kern);
  743. static int _add_sg_continuation_descriptor(struct osd_request *or,
  744. const struct osd_sg_entry *sglist, unsigned numentries, u64 *len)
  745. {
  746. struct osd_sg_continuation_descriptor *oscd;
  747. u32 oscd_size;
  748. unsigned i;
  749. int ret;
  750. oscd_size = sizeof(*oscd) + numentries * sizeof(oscd->entries[0]);
  751. if (!or->cdb_cont.total_bytes) {
  752. /* First time, jump over the header, we will write to:
  753. * cdb_cont.buff + cdb_cont.total_bytes
  754. */
  755. or->cdb_cont.total_bytes =
  756. sizeof(struct osd_continuation_segment_header);
  757. }
  758. ret = _alloc_cdb_cont(or, or->cdb_cont.total_bytes + oscd_size);
  759. if (unlikely(ret))
  760. return ret;
  761. oscd = or->cdb_cont.buff + or->cdb_cont.total_bytes;
  762. oscd->hdr.type = cpu_to_be16(SCATTER_GATHER_LIST);
  763. oscd->hdr.pad_length = 0;
  764. oscd->hdr.length = cpu_to_be32(oscd_size - sizeof(*oscd));
  765. *len = 0;
  766. /* copy the sg entries and convert to network byte order */
  767. for (i = 0; i < numentries; i++) {
  768. oscd->entries[i].offset = cpu_to_be64(sglist[i].offset);
  769. oscd->entries[i].len = cpu_to_be64(sglist[i].len);
  770. *len += sglist[i].len;
  771. }
  772. or->cdb_cont.total_bytes += oscd_size;
  773. OSD_DEBUG("total_bytes=%d oscd_size=%d numentries=%d\n",
  774. or->cdb_cont.total_bytes, oscd_size, numentries);
  775. return 0;
  776. }
  777. static int _osd_req_finalize_cdb_cont(struct osd_request *or, const u8 *cap_key)
  778. {
  779. struct request_queue *req_q = osd_request_queue(or->osd_dev);
  780. struct bio *bio;
  781. struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb);
  782. struct osd_continuation_segment_header *cont_seg_hdr;
  783. if (!or->cdb_cont.total_bytes)
  784. return 0;
  785. cont_seg_hdr = or->cdb_cont.buff;
  786. cont_seg_hdr->format = CDB_CONTINUATION_FORMAT_V2;
  787. cont_seg_hdr->service_action = cdbh->varlen_cdb.service_action;
  788. /* create a bio for continuation segment */
  789. bio = bio_map_kern(req_q, or->cdb_cont.buff, or->cdb_cont.total_bytes,
  790. GFP_KERNEL);
  791. if (IS_ERR(bio))
  792. return PTR_ERR(bio);
  793. bio->bi_rw |= REQ_WRITE;
  794. /* integrity check the continuation before the bio is linked
  795. * with the other data segments since the continuation
  796. * integrity is separate from the other data segments.
  797. */
  798. osd_sec_sign_data(cont_seg_hdr->integrity_check, bio, cap_key);
  799. cdbh->v2.cdb_continuation_length = cpu_to_be32(or->cdb_cont.total_bytes);
  800. /* we can't use _req_append_segment, because we need to link in the
  801. * continuation bio to the head of the bio list - the
  802. * continuation segment (if it exists) is always the first segment in
  803. * the out data buffer.
  804. */
  805. bio->bi_next = or->out.bio;
  806. or->out.bio = bio;
  807. or->out.total_bytes += or->cdb_cont.total_bytes;
  808. return 0;
  809. }
  810. /* osd_req_write_sg: Takes a @bio that points to the data out buffer and an
  811. * @sglist that has the scatter gather entries. Scatter-gather enables a write
  812. * of multiple none-contiguous areas of an object, in a single call. The extents
  813. * may overlap and/or be in any order. The only constrain is that:
  814. * total_bytes(sglist) >= total_bytes(bio)
  815. */
  816. int osd_req_write_sg(struct osd_request *or,
  817. const struct osd_obj_id *obj, struct bio *bio,
  818. const struct osd_sg_entry *sglist, unsigned numentries)
  819. {
  820. u64 len;
  821. int ret = _add_sg_continuation_descriptor(or, sglist, numentries, &len);
  822. if (ret)
  823. return ret;
  824. osd_req_write(or, obj, 0, bio, len);
  825. return 0;
  826. }
  827. EXPORT_SYMBOL(osd_req_write_sg);
  828. /* osd_req_read_sg: Read multiple extents of an object into @bio
  829. * See osd_req_write_sg
  830. */
  831. int osd_req_read_sg(struct osd_request *or,
  832. const struct osd_obj_id *obj, struct bio *bio,
  833. const struct osd_sg_entry *sglist, unsigned numentries)
  834. {
  835. u64 len;
  836. u64 off;
  837. int ret;
  838. if (numentries > 1) {
  839. off = 0;
  840. ret = _add_sg_continuation_descriptor(or, sglist, numentries,
  841. &len);
  842. if (ret)
  843. return ret;
  844. } else {
  845. /* Optimize the case of single segment, read_sg is a
  846. * bidi operation.
  847. */
  848. len = sglist->len;
  849. off = sglist->offset;
  850. }
  851. osd_req_read(or, obj, off, bio, len);
  852. return 0;
  853. }
  854. EXPORT_SYMBOL(osd_req_read_sg);
  855. /* SG-list write/read Kern API
  856. *
  857. * osd_req_{write,read}_sg_kern takes an array of @buff pointers and an array
  858. * of sg_entries. @numentries indicates how many pointers and sg_entries there
  859. * are. By requiring an array of buff pointers. This allows a caller to do a
  860. * single write/read and scatter into multiple buffers.
  861. * NOTE: Each buffer + len should not cross a page boundary.
  862. */
  863. static struct bio *_create_sg_bios(struct osd_request *or,
  864. void **buff, const struct osd_sg_entry *sglist, unsigned numentries)
  865. {
  866. struct request_queue *q = osd_request_queue(or->osd_dev);
  867. struct bio *bio;
  868. unsigned i;
  869. bio = bio_kmalloc(GFP_KERNEL, numentries);
  870. if (unlikely(!bio)) {
  871. OSD_DEBUG("Faild to allocate BIO size=%u\n", numentries);
  872. return ERR_PTR(-ENOMEM);
  873. }
  874. for (i = 0; i < numentries; i++) {
  875. unsigned offset = offset_in_page(buff[i]);
  876. struct page *page = virt_to_page(buff[i]);
  877. unsigned len = sglist[i].len;
  878. unsigned added_len;
  879. BUG_ON(offset + len > PAGE_SIZE);
  880. added_len = bio_add_pc_page(q, bio, page, len, offset);
  881. if (unlikely(len != added_len)) {
  882. OSD_DEBUG("bio_add_pc_page len(%d) != added_len(%d)\n",
  883. len, added_len);
  884. bio_put(bio);
  885. return ERR_PTR(-ENOMEM);
  886. }
  887. }
  888. return bio;
  889. }
  890. int osd_req_write_sg_kern(struct osd_request *or,
  891. const struct osd_obj_id *obj, void **buff,
  892. const struct osd_sg_entry *sglist, unsigned numentries)
  893. {
  894. struct bio *bio = _create_sg_bios(or, buff, sglist, numentries);
  895. if (IS_ERR(bio))
  896. return PTR_ERR(bio);
  897. bio->bi_rw |= REQ_WRITE;
  898. osd_req_write_sg(or, obj, bio, sglist, numentries);
  899. return 0;
  900. }
  901. EXPORT_SYMBOL(osd_req_write_sg_kern);
  902. int osd_req_read_sg_kern(struct osd_request *or,
  903. const struct osd_obj_id *obj, void **buff,
  904. const struct osd_sg_entry *sglist, unsigned numentries)
  905. {
  906. struct bio *bio = _create_sg_bios(or, buff, sglist, numentries);
  907. if (IS_ERR(bio))
  908. return PTR_ERR(bio);
  909. osd_req_read_sg(or, obj, bio, sglist, numentries);
  910. return 0;
  911. }
  912. EXPORT_SYMBOL(osd_req_read_sg_kern);
  913. void osd_req_get_attributes(struct osd_request *or,
  914. const struct osd_obj_id *obj)
  915. {
  916. _osd_req_encode_common(or, OSD_ACT_GET_ATTRIBUTES, obj, 0, 0);
  917. }
  918. EXPORT_SYMBOL(osd_req_get_attributes);
  919. void osd_req_set_attributes(struct osd_request *or,
  920. const struct osd_obj_id *obj)
  921. {
  922. _osd_req_encode_common(or, OSD_ACT_SET_ATTRIBUTES, obj, 0, 0);
  923. }
  924. EXPORT_SYMBOL(osd_req_set_attributes);
  925. /*
  926. * Attributes List-mode
  927. */
  928. int osd_req_add_set_attr_list(struct osd_request *or,
  929. const struct osd_attr *oa, unsigned nelem)
  930. {
  931. unsigned total_bytes = or->set_attr.total_bytes;
  932. void *attr_last;
  933. int ret;
  934. if (or->attributes_mode &&
  935. or->attributes_mode != OSD_CDB_GET_SET_ATTR_LISTS) {
  936. WARN_ON(1);
  937. return -EINVAL;
  938. }
  939. or->attributes_mode = OSD_CDB_GET_SET_ATTR_LISTS;
  940. if (!total_bytes) { /* first-time: allocate and put list header */
  941. total_bytes = _osd_req_sizeof_alist_header(or);
  942. ret = _alloc_set_attr_list(or, oa, nelem, total_bytes);
  943. if (ret)
  944. return ret;
  945. _osd_req_set_alist_type(or, or->set_attr.buff,
  946. OSD_ATTR_LIST_SET_RETRIEVE);
  947. }
  948. attr_last = or->set_attr.buff + total_bytes;
  949. for (; nelem; --nelem) {
  950. unsigned elem_size = _osd_req_alist_elem_size(or, oa->len);
  951. total_bytes += elem_size;
  952. if (unlikely(or->set_attr.alloc_size < total_bytes)) {
  953. or->set_attr.total_bytes = total_bytes - elem_size;
  954. ret = _alloc_set_attr_list(or, oa, nelem, total_bytes);
  955. if (ret)
  956. return ret;
  957. attr_last =
  958. or->set_attr.buff + or->set_attr.total_bytes;
  959. }
  960. _osd_req_alist_elem_encode(or, attr_last, oa);
  961. attr_last += elem_size;
  962. ++oa;
  963. }
  964. or->set_attr.total_bytes = total_bytes;
  965. return 0;
  966. }
  967. EXPORT_SYMBOL(osd_req_add_set_attr_list);
  968. static int _req_append_segment(struct osd_request *or,
  969. unsigned padding, struct _osd_req_data_segment *seg,
  970. struct _osd_req_data_segment *last_seg, struct _osd_io_info *io)
  971. {
  972. void *pad_buff;
  973. int ret;
  974. if (padding) {
  975. /* check if we can just add it to last buffer */
  976. if (last_seg &&
  977. (padding <= last_seg->alloc_size - last_seg->total_bytes))
  978. pad_buff = last_seg->buff + last_seg->total_bytes;
  979. else
  980. pad_buff = io->pad_buff;
  981. ret = blk_rq_map_kern(io->req->q, io->req, pad_buff, padding,
  982. or->alloc_flags);
  983. if (ret)
  984. return ret;
  985. io->total_bytes += padding;
  986. }
  987. ret = blk_rq_map_kern(io->req->q, io->req, seg->buff, seg->total_bytes,
  988. or->alloc_flags);
  989. if (ret)
  990. return ret;
  991. io->total_bytes += seg->total_bytes;
  992. OSD_DEBUG("padding=%d buff=%p total_bytes=%d\n", padding, seg->buff,
  993. seg->total_bytes);
  994. return 0;
  995. }
  996. static int _osd_req_finalize_set_attr_list(struct osd_request *or)
  997. {
  998. struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb);
  999. unsigned padding;
  1000. int ret;
  1001. if (!or->set_attr.total_bytes) {
  1002. cdbh->attrs_list.set_attr_offset = OSD_OFFSET_UNUSED;
  1003. return 0;
  1004. }
  1005. cdbh->attrs_list.set_attr_bytes = cpu_to_be32(or->set_attr.total_bytes);
  1006. cdbh->attrs_list.set_attr_offset =
  1007. osd_req_encode_offset(or, or->out.total_bytes, &padding);
  1008. ret = _req_append_segment(or, padding, &or->set_attr,
  1009. or->out.last_seg, &or->out);
  1010. if (ret)
  1011. return ret;
  1012. or->out.last_seg = &or->set_attr;
  1013. return 0;
  1014. }
  1015. int osd_req_add_get_attr_list(struct osd_request *or,
  1016. const struct osd_attr *oa, unsigned nelem)
  1017. {
  1018. unsigned total_bytes = or->enc_get_attr.total_bytes;
  1019. void *attr_last;
  1020. int ret;
  1021. if (or->attributes_mode &&
  1022. or->attributes_mode != OSD_CDB_GET_SET_ATTR_LISTS) {
  1023. WARN_ON(1);
  1024. return -EINVAL;
  1025. }
  1026. or->attributes_mode = OSD_CDB_GET_SET_ATTR_LISTS;
  1027. /* first time calc data-in list header size */
  1028. if (!or->get_attr.total_bytes)
  1029. or->get_attr.total_bytes = _osd_req_sizeof_alist_header(or);
  1030. /* calc data-out info */
  1031. if (!total_bytes) { /* first-time: allocate and put list header */
  1032. unsigned max_bytes;
  1033. total_bytes = _osd_req_sizeof_alist_header(or);
  1034. max_bytes = total_bytes +
  1035. nelem * sizeof(struct osd_attributes_list_attrid);
  1036. ret = _alloc_get_attr_desc(or, max_bytes);
  1037. if (ret)
  1038. return ret;
  1039. _osd_req_set_alist_type(or, or->enc_get_attr.buff,
  1040. OSD_ATTR_LIST_GET);
  1041. }
  1042. attr_last = or->enc_get_attr.buff + total_bytes;
  1043. for (; nelem; --nelem) {
  1044. struct osd_attributes_list_attrid *attrid;
  1045. const unsigned cur_size = sizeof(*attrid);
  1046. total_bytes += cur_size;
  1047. if (unlikely(or->enc_get_attr.alloc_size < total_bytes)) {
  1048. or->enc_get_attr.total_bytes = total_bytes - cur_size;
  1049. ret = _alloc_get_attr_desc(or,
  1050. total_bytes + nelem * sizeof(*attrid));
  1051. if (ret)
  1052. return ret;
  1053. attr_last = or->enc_get_attr.buff +
  1054. or->enc_get_attr.total_bytes;
  1055. }
  1056. attrid = attr_last;
  1057. attrid->attr_page = cpu_to_be32(oa->attr_page);
  1058. attrid->attr_id = cpu_to_be32(oa->attr_id);
  1059. attr_last += cur_size;
  1060. /* calc data-in size */
  1061. or->get_attr.total_bytes +=
  1062. _osd_req_alist_elem_size(or, oa->len);
  1063. ++oa;
  1064. }
  1065. or->enc_get_attr.total_bytes = total_bytes;
  1066. OSD_DEBUG(
  1067. "get_attr.total_bytes=%u(%u) enc_get_attr.total_bytes=%u(%Zu)\n",
  1068. or->get_attr.total_bytes,
  1069. or->get_attr.total_bytes - _osd_req_sizeof_alist_header(or),
  1070. or->enc_get_attr.total_bytes,
  1071. (or->enc_get_attr.total_bytes - _osd_req_sizeof_alist_header(or))
  1072. / sizeof(struct osd_attributes_list_attrid));
  1073. return 0;
  1074. }
  1075. EXPORT_SYMBOL(osd_req_add_get_attr_list);
  1076. static int _osd_req_finalize_get_attr_list(struct osd_request *or)
  1077. {
  1078. struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb);
  1079. unsigned out_padding;
  1080. unsigned in_padding;
  1081. int ret;
  1082. if (!or->enc_get_attr.total_bytes) {
  1083. cdbh->attrs_list.get_attr_desc_offset = OSD_OFFSET_UNUSED;
  1084. cdbh->attrs_list.get_attr_offset = OSD_OFFSET_UNUSED;
  1085. return 0;
  1086. }
  1087. ret = _alloc_get_attr_list(or);
  1088. if (ret)
  1089. return ret;
  1090. /* The out-going buffer info update */
  1091. OSD_DEBUG("out-going\n");
  1092. cdbh->attrs_list.get_attr_desc_bytes =
  1093. cpu_to_be32(or->enc_get_attr.total_bytes);
  1094. cdbh->attrs_list.get_attr_desc_offset =
  1095. osd_req_encode_offset(or, or->out.total_bytes, &out_padding);
  1096. ret = _req_append_segment(or, out_padding, &or->enc_get_attr,
  1097. or->out.last_seg, &or->out);
  1098. if (ret)
  1099. return ret;
  1100. or->out.last_seg = &or->enc_get_attr;
  1101. /* The incoming buffer info update */
  1102. OSD_DEBUG("in-coming\n");
  1103. cdbh->attrs_list.get_attr_alloc_length =
  1104. cpu_to_be32(or->get_attr.total_bytes);
  1105. cdbh->attrs_list.get_attr_offset =
  1106. osd_req_encode_offset(or, or->in.total_bytes, &in_padding);
  1107. ret = _req_append_segment(or, in_padding, &or->get_attr, NULL,
  1108. &or->in);
  1109. if (ret)
  1110. return ret;
  1111. or->in.last_seg = &or->get_attr;
  1112. return 0;
  1113. }
  1114. int osd_req_decode_get_attr_list(struct osd_request *or,
  1115. struct osd_attr *oa, int *nelem, void **iterator)
  1116. {
  1117. unsigned cur_bytes, returned_bytes;
  1118. int n;
  1119. const unsigned sizeof_attr_list = _osd_req_sizeof_alist_header(or);
  1120. void *cur_p;
  1121. if (!_osd_req_is_alist_type(or, or->get_attr.buff,
  1122. OSD_ATTR_LIST_SET_RETRIEVE)) {
  1123. oa->attr_page = 0;
  1124. oa->attr_id = 0;
  1125. oa->val_ptr = NULL;
  1126. oa->len = 0;
  1127. *iterator = NULL;
  1128. return 0;
  1129. }
  1130. if (*iterator) {
  1131. BUG_ON((*iterator < or->get_attr.buff) ||
  1132. (or->get_attr.buff + or->get_attr.alloc_size < *iterator));
  1133. cur_p = *iterator;
  1134. cur_bytes = (*iterator - or->get_attr.buff) - sizeof_attr_list;
  1135. returned_bytes = or->get_attr.total_bytes;
  1136. } else { /* first time decode the list header */
  1137. cur_bytes = sizeof_attr_list;
  1138. returned_bytes = _osd_req_alist_size(or, or->get_attr.buff) +
  1139. sizeof_attr_list;
  1140. cur_p = or->get_attr.buff + sizeof_attr_list;
  1141. if (returned_bytes > or->get_attr.alloc_size) {
  1142. OSD_DEBUG("target report: space was not big enough! "
  1143. "Allocate=%u Needed=%u\n",
  1144. or->get_attr.alloc_size,
  1145. returned_bytes + sizeof_attr_list);
  1146. returned_bytes =
  1147. or->get_attr.alloc_size - sizeof_attr_list;
  1148. }
  1149. or->get_attr.total_bytes = returned_bytes;
  1150. }
  1151. for (n = 0; (n < *nelem) && (cur_bytes < returned_bytes); ++n) {
  1152. int inc = _osd_req_alist_elem_decode(or, cur_p, oa,
  1153. returned_bytes - cur_bytes);
  1154. if (inc < 0) {
  1155. OSD_ERR("BAD FOOD from target. list not valid!"
  1156. "c=%d r=%d n=%d\n",
  1157. cur_bytes, returned_bytes, n);
  1158. oa->val_ptr = NULL;
  1159. cur_bytes = returned_bytes; /* break the caller loop */
  1160. break;
  1161. }
  1162. cur_bytes += inc;
  1163. cur_p += inc;
  1164. ++oa;
  1165. }
  1166. *iterator = (returned_bytes - cur_bytes) ? cur_p : NULL;
  1167. *nelem = n;
  1168. return returned_bytes - cur_bytes;
  1169. }
  1170. EXPORT_SYMBOL(osd_req_decode_get_attr_list);
  1171. /*
  1172. * Attributes Page-mode
  1173. */
  1174. int osd_req_add_get_attr_page(struct osd_request *or,
  1175. u32 page_id, void *attar_page, unsigned max_page_len,
  1176. const struct osd_attr *set_one_attr)
  1177. {
  1178. struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb);
  1179. if (or->attributes_mode &&
  1180. or->attributes_mode != OSD_CDB_GET_ATTR_PAGE_SET_ONE) {
  1181. WARN_ON(1);
  1182. return -EINVAL;
  1183. }
  1184. or->attributes_mode = OSD_CDB_GET_ATTR_PAGE_SET_ONE;
  1185. or->get_attr.buff = attar_page;
  1186. or->get_attr.total_bytes = max_page_len;
  1187. cdbh->attrs_page.get_attr_page = cpu_to_be32(page_id);
  1188. cdbh->attrs_page.get_attr_alloc_length = cpu_to_be32(max_page_len);
  1189. if (!set_one_attr || !set_one_attr->attr_page)
  1190. return 0; /* The set is optional */
  1191. or->set_attr.buff = set_one_attr->val_ptr;
  1192. or->set_attr.total_bytes = set_one_attr->len;
  1193. cdbh->attrs_page.set_attr_page = cpu_to_be32(set_one_attr->attr_page);
  1194. cdbh->attrs_page.set_attr_id = cpu_to_be32(set_one_attr->attr_id);
  1195. cdbh->attrs_page.set_attr_length = cpu_to_be32(set_one_attr->len);
  1196. return 0;
  1197. }
  1198. EXPORT_SYMBOL(osd_req_add_get_attr_page);
  1199. static int _osd_req_finalize_attr_page(struct osd_request *or)
  1200. {
  1201. struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb);
  1202. unsigned in_padding, out_padding;
  1203. int ret;
  1204. /* returned page */
  1205. cdbh->attrs_page.get_attr_offset =
  1206. osd_req_encode_offset(or, or->in.total_bytes, &in_padding);
  1207. ret = _req_append_segment(or, in_padding, &or->get_attr, NULL,
  1208. &or->in);
  1209. if (ret)
  1210. return ret;
  1211. if (or->set_attr.total_bytes == 0)
  1212. return 0;
  1213. /* set one value */
  1214. cdbh->attrs_page.set_attr_offset =
  1215. osd_req_encode_offset(or, or->out.total_bytes, &out_padding);
  1216. ret = _req_append_segment(or, out_padding, &or->set_attr, NULL,
  1217. &or->out);
  1218. return ret;
  1219. }
  1220. static inline void osd_sec_parms_set_out_offset(bool is_v1,
  1221. struct osd_security_parameters *sec_parms, osd_cdb_offset offset)
  1222. {
  1223. if (is_v1)
  1224. sec_parms->v1.data_out_integrity_check_offset = offset;
  1225. else
  1226. sec_parms->v2.data_out_integrity_check_offset = offset;
  1227. }
  1228. static inline void osd_sec_parms_set_in_offset(bool is_v1,
  1229. struct osd_security_parameters *sec_parms, osd_cdb_offset offset)
  1230. {
  1231. if (is_v1)
  1232. sec_parms->v1.data_in_integrity_check_offset = offset;
  1233. else
  1234. sec_parms->v2.data_in_integrity_check_offset = offset;
  1235. }
  1236. static int _osd_req_finalize_data_integrity(struct osd_request *or,
  1237. bool has_in, bool has_out, struct bio *out_data_bio, u64 out_data_bytes,
  1238. const u8 *cap_key)
  1239. {
  1240. struct osd_security_parameters *sec_parms = _osd_req_sec_params(or);
  1241. int ret;
  1242. if (!osd_is_sec_alldata(sec_parms))
  1243. return 0;
  1244. if (has_out) {
  1245. struct _osd_req_data_segment seg = {
  1246. .buff = &or->out_data_integ,
  1247. .total_bytes = sizeof(or->out_data_integ),
  1248. };
  1249. unsigned pad;
  1250. or->out_data_integ.data_bytes = cpu_to_be64(out_data_bytes);
  1251. or->out_data_integ.set_attributes_bytes = cpu_to_be64(
  1252. or->set_attr.total_bytes);
  1253. or->out_data_integ.get_attributes_bytes = cpu_to_be64(
  1254. or->enc_get_attr.total_bytes);
  1255. osd_sec_parms_set_out_offset(osd_req_is_ver1(or), sec_parms,
  1256. osd_req_encode_offset(or, or->out.total_bytes, &pad));
  1257. ret = _req_append_segment(or, pad, &seg, or->out.last_seg,
  1258. &or->out);
  1259. if (ret)
  1260. return ret;
  1261. or->out.last_seg = NULL;
  1262. /* they are now all chained to request sign them all together */
  1263. osd_sec_sign_data(&or->out_data_integ, out_data_bio,
  1264. cap_key);
  1265. }
  1266. if (has_in) {
  1267. struct _osd_req_data_segment seg = {
  1268. .buff = &or->in_data_integ,
  1269. .total_bytes = sizeof(or->in_data_integ),
  1270. };
  1271. unsigned pad;
  1272. osd_sec_parms_set_in_offset(osd_req_is_ver1(or), sec_parms,
  1273. osd_req_encode_offset(or, or->in.total_bytes, &pad));
  1274. ret = _req_append_segment(or, pad, &seg, or->in.last_seg,
  1275. &or->in);
  1276. if (ret)
  1277. return ret;
  1278. or->in.last_seg = NULL;
  1279. }
  1280. return 0;
  1281. }
  1282. /*
  1283. * osd_finalize_request and helpers
  1284. */
  1285. static struct request *_make_request(struct request_queue *q, bool has_write,
  1286. struct _osd_io_info *oii, gfp_t flags)
  1287. {
  1288. if (oii->bio)
  1289. return blk_make_request(q, oii->bio, flags);
  1290. else {
  1291. struct request *req;
  1292. req = blk_get_request(q, has_write ? WRITE : READ, flags);
  1293. if (unlikely(!req))
  1294. return ERR_PTR(-ENOMEM);
  1295. blk_rq_set_block_pc(req);
  1296. return req;
  1297. }
  1298. }
  1299. static int _init_blk_request(struct osd_request *or,
  1300. bool has_in, bool has_out)
  1301. {
  1302. gfp_t flags = or->alloc_flags;
  1303. struct scsi_device *scsi_device = or->osd_dev->scsi_device;
  1304. struct request_queue *q = scsi_device->request_queue;
  1305. struct request *req;
  1306. int ret;
  1307. req = _make_request(q, has_out, has_out ? &or->out : &or->in, flags);
  1308. if (IS_ERR(req)) {
  1309. ret = PTR_ERR(req);
  1310. goto out;
  1311. }
  1312. or->request = req;
  1313. req->cmd_flags |= REQ_QUIET;
  1314. req->timeout = or->timeout;
  1315. req->retries = or->retries;
  1316. req->sense = or->sense;
  1317. req->sense_len = 0;
  1318. if (has_out) {
  1319. or->out.req = req;
  1320. if (has_in) {
  1321. /* allocate bidi request */
  1322. req = _make_request(q, false, &or->in, flags);
  1323. if (IS_ERR(req)) {
  1324. OSD_DEBUG("blk_get_request for bidi failed\n");
  1325. ret = PTR_ERR(req);
  1326. goto out;
  1327. }
  1328. blk_rq_set_block_pc(req);
  1329. or->in.req = or->request->next_rq = req;
  1330. }
  1331. } else if (has_in)
  1332. or->in.req = req;
  1333. ret = 0;
  1334. out:
  1335. OSD_DEBUG("or=%p has_in=%d has_out=%d => %d, %p\n",
  1336. or, has_in, has_out, ret, or->request);
  1337. return ret;
  1338. }
  1339. int osd_finalize_request(struct osd_request *or,
  1340. u8 options, const void *cap, const u8 *cap_key)
  1341. {
  1342. struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb);
  1343. bool has_in, has_out;
  1344. /* Save for data_integrity without the cdb_continuation */
  1345. struct bio *out_data_bio = or->out.bio;
  1346. u64 out_data_bytes = or->out.total_bytes;
  1347. int ret;
  1348. if (options & OSD_REQ_FUA)
  1349. cdbh->options |= OSD_CDB_FUA;
  1350. if (options & OSD_REQ_DPO)
  1351. cdbh->options |= OSD_CDB_DPO;
  1352. if (options & OSD_REQ_BYPASS_TIMESTAMPS)
  1353. cdbh->timestamp_control = OSD_CDB_BYPASS_TIMESTAMPS;
  1354. osd_set_caps(&or->cdb, cap);
  1355. has_in = or->in.bio || or->get_attr.total_bytes;
  1356. has_out = or->out.bio || or->cdb_cont.total_bytes ||
  1357. or->set_attr.total_bytes || or->enc_get_attr.total_bytes;
  1358. ret = _osd_req_finalize_cdb_cont(or, cap_key);
  1359. if (ret) {
  1360. OSD_DEBUG("_osd_req_finalize_cdb_cont failed\n");
  1361. return ret;
  1362. }
  1363. ret = _init_blk_request(or, has_in, has_out);
  1364. if (ret) {
  1365. OSD_DEBUG("_init_blk_request failed\n");
  1366. return ret;
  1367. }
  1368. or->out.pad_buff = sg_out_pad_buffer;
  1369. or->in.pad_buff = sg_in_pad_buffer;
  1370. if (!or->attributes_mode)
  1371. or->attributes_mode = OSD_CDB_GET_SET_ATTR_LISTS;
  1372. cdbh->command_specific_options |= or->attributes_mode;
  1373. if (or->attributes_mode == OSD_CDB_GET_ATTR_PAGE_SET_ONE) {
  1374. ret = _osd_req_finalize_attr_page(or);
  1375. if (ret) {
  1376. OSD_DEBUG("_osd_req_finalize_attr_page failed\n");
  1377. return ret;
  1378. }
  1379. } else {
  1380. /* TODO: I think that for the GET_ATTR command these 2 should
  1381. * be reversed to keep them in execution order (for embeded
  1382. * targets with low memory footprint)
  1383. */
  1384. ret = _osd_req_finalize_set_attr_list(or);
  1385. if (ret) {
  1386. OSD_DEBUG("_osd_req_finalize_set_attr_list failed\n");
  1387. return ret;
  1388. }
  1389. ret = _osd_req_finalize_get_attr_list(or);
  1390. if (ret) {
  1391. OSD_DEBUG("_osd_req_finalize_get_attr_list failed\n");
  1392. return ret;
  1393. }
  1394. }
  1395. ret = _osd_req_finalize_data_integrity(or, has_in, has_out,
  1396. out_data_bio, out_data_bytes,
  1397. cap_key);
  1398. if (ret)
  1399. return ret;
  1400. osd_sec_sign_cdb(&or->cdb, cap_key);
  1401. or->request->cmd = or->cdb.buff;
  1402. or->request->cmd_len = _osd_req_cdb_len(or);
  1403. return 0;
  1404. }
  1405. EXPORT_SYMBOL(osd_finalize_request);
  1406. static bool _is_osd_security_code(int code)
  1407. {
  1408. return (code == osd_security_audit_value_frozen) ||
  1409. (code == osd_security_working_key_frozen) ||
  1410. (code == osd_nonce_not_unique) ||
  1411. (code == osd_nonce_timestamp_out_of_range) ||
  1412. (code == osd_invalid_dataout_buffer_integrity_check_value);
  1413. }
  1414. #define OSD_SENSE_PRINT1(fmt, a...) \
  1415. do { \
  1416. if (__cur_sense_need_output) \
  1417. OSD_ERR(fmt, ##a); \
  1418. } while (0)
  1419. #define OSD_SENSE_PRINT2(fmt, a...) OSD_SENSE_PRINT1(" " fmt, ##a)
  1420. int osd_req_decode_sense_full(struct osd_request *or,
  1421. struct osd_sense_info *osi, bool silent,
  1422. struct osd_obj_id *bad_obj_list __unused, int max_obj __unused,
  1423. struct osd_attr *bad_attr_list, int max_attr)
  1424. {
  1425. int sense_len, original_sense_len;
  1426. struct osd_sense_info local_osi;
  1427. struct scsi_sense_descriptor_based *ssdb;
  1428. void *cur_descriptor;
  1429. #if (CONFIG_SCSI_OSD_DPRINT_SENSE == 0)
  1430. const bool __cur_sense_need_output = false;
  1431. #else
  1432. bool __cur_sense_need_output = !silent;
  1433. #endif
  1434. int ret;
  1435. if (likely(!or->req_errors))
  1436. return 0;
  1437. osi = osi ? : &local_osi;
  1438. memset(osi, 0, sizeof(*osi));
  1439. ssdb = (typeof(ssdb))or->sense;
  1440. sense_len = or->sense_len;
  1441. if ((sense_len < (int)sizeof(*ssdb) || !ssdb->sense_key)) {
  1442. OSD_ERR("Block-layer returned error(0x%x) but "
  1443. "sense_len(%u) || key(%d) is empty\n",
  1444. or->req_errors, sense_len, ssdb->sense_key);
  1445. goto analyze;
  1446. }
  1447. if ((ssdb->response_code != 0x72) && (ssdb->response_code != 0x73)) {
  1448. OSD_ERR("Unrecognized scsi sense: rcode=%x length=%d\n",
  1449. ssdb->response_code, sense_len);
  1450. goto analyze;
  1451. }
  1452. osi->key = ssdb->sense_key;
  1453. osi->additional_code = be16_to_cpu(ssdb->additional_sense_code);
  1454. original_sense_len = ssdb->additional_sense_length + 8;
  1455. #if (CONFIG_SCSI_OSD_DPRINT_SENSE == 1)
  1456. if (__cur_sense_need_output)
  1457. __cur_sense_need_output = (osi->key > scsi_sk_recovered_error);
  1458. #endif
  1459. OSD_SENSE_PRINT1("Main Sense information key=0x%x length(%d, %d) "
  1460. "additional_code=0x%x async_error=%d errors=0x%x\n",
  1461. osi->key, original_sense_len, sense_len,
  1462. osi->additional_code, or->async_error,
  1463. or->req_errors);
  1464. if (original_sense_len < sense_len)
  1465. sense_len = original_sense_len;
  1466. cur_descriptor = ssdb->ssd;
  1467. sense_len -= sizeof(*ssdb);
  1468. while (sense_len > 0) {
  1469. struct scsi_sense_descriptor *ssd = cur_descriptor;
  1470. int cur_len = ssd->additional_length + 2;
  1471. sense_len -= cur_len;
  1472. if (sense_len < 0)
  1473. break; /* sense was truncated */
  1474. switch (ssd->descriptor_type) {
  1475. case scsi_sense_information:
  1476. case scsi_sense_command_specific_information:
  1477. {
  1478. struct scsi_sense_command_specific_data_descriptor
  1479. *sscd = cur_descriptor;
  1480. osi->command_info =
  1481. get_unaligned_be64(&sscd->information) ;
  1482. OSD_SENSE_PRINT2(
  1483. "command_specific_information 0x%llx \n",
  1484. _LLU(osi->command_info));
  1485. break;
  1486. }
  1487. case scsi_sense_key_specific:
  1488. {
  1489. struct scsi_sense_key_specific_data_descriptor
  1490. *ssks = cur_descriptor;
  1491. osi->sense_info = get_unaligned_be16(&ssks->value);
  1492. OSD_SENSE_PRINT2(
  1493. "sense_key_specific_information %u"
  1494. "sksv_cd_bpv_bp (0x%x)\n",
  1495. osi->sense_info, ssks->sksv_cd_bpv_bp);
  1496. break;
  1497. }
  1498. case osd_sense_object_identification:
  1499. { /*FIXME: Keep first not last, Store in array*/
  1500. struct osd_sense_identification_data_descriptor
  1501. *osidd = cur_descriptor;
  1502. osi->not_initiated_command_functions =
  1503. le32_to_cpu(osidd->not_initiated_functions);
  1504. osi->completed_command_functions =
  1505. le32_to_cpu(osidd->completed_functions);
  1506. osi->obj.partition = be64_to_cpu(osidd->partition_id);
  1507. osi->obj.id = be64_to_cpu(osidd->object_id);
  1508. OSD_SENSE_PRINT2(
  1509. "object_identification pid=0x%llx oid=0x%llx\n",
  1510. _LLU(osi->obj.partition), _LLU(osi->obj.id));
  1511. OSD_SENSE_PRINT2(
  1512. "not_initiated_bits(%x) "
  1513. "completed_command_bits(%x)\n",
  1514. osi->not_initiated_command_functions,
  1515. osi->completed_command_functions);
  1516. break;
  1517. }
  1518. case osd_sense_response_integrity_check:
  1519. {
  1520. struct osd_sense_response_integrity_check_descriptor
  1521. *osricd = cur_descriptor;
  1522. const unsigned len =
  1523. sizeof(osricd->integrity_check_value);
  1524. char key_dump[len*4 + 2]; /* 2nibbles+space+ASCII */
  1525. hex_dump_to_buffer(osricd->integrity_check_value, len,
  1526. 32, 1, key_dump, sizeof(key_dump), true);
  1527. OSD_SENSE_PRINT2("response_integrity [%s]\n", key_dump);
  1528. }
  1529. case osd_sense_attribute_identification:
  1530. {
  1531. struct osd_sense_attributes_data_descriptor
  1532. *osadd = cur_descriptor;
  1533. unsigned len = min(cur_len, sense_len);
  1534. struct osd_sense_attr *pattr = osadd->sense_attrs;
  1535. while (len >= sizeof(*pattr)) {
  1536. u32 attr_page = be32_to_cpu(pattr->attr_page);
  1537. u32 attr_id = be32_to_cpu(pattr->attr_id);
  1538. if (!osi->attr.attr_page) {
  1539. osi->attr.attr_page = attr_page;
  1540. osi->attr.attr_id = attr_id;
  1541. }
  1542. if (bad_attr_list && max_attr) {
  1543. bad_attr_list->attr_page = attr_page;
  1544. bad_attr_list->attr_id = attr_id;
  1545. bad_attr_list++;
  1546. max_attr--;
  1547. }
  1548. len -= sizeof(*pattr);
  1549. OSD_SENSE_PRINT2(
  1550. "osd_sense_attribute_identification"
  1551. "attr_page=0x%x attr_id=0x%x\n",
  1552. attr_page, attr_id);
  1553. }
  1554. }
  1555. /*These are not legal for OSD*/
  1556. case scsi_sense_field_replaceable_unit:
  1557. OSD_SENSE_PRINT2("scsi_sense_field_replaceable_unit\n");
  1558. break;
  1559. case scsi_sense_stream_commands:
  1560. OSD_SENSE_PRINT2("scsi_sense_stream_commands\n");
  1561. break;
  1562. case scsi_sense_block_commands:
  1563. OSD_SENSE_PRINT2("scsi_sense_block_commands\n");
  1564. break;
  1565. case scsi_sense_ata_return:
  1566. OSD_SENSE_PRINT2("scsi_sense_ata_return\n");
  1567. break;
  1568. default:
  1569. if (ssd->descriptor_type <= scsi_sense_Reserved_last)
  1570. OSD_SENSE_PRINT2(
  1571. "scsi_sense Reserved descriptor (0x%x)",
  1572. ssd->descriptor_type);
  1573. else
  1574. OSD_SENSE_PRINT2(
  1575. "scsi_sense Vendor descriptor (0x%x)",
  1576. ssd->descriptor_type);
  1577. }
  1578. cur_descriptor += cur_len;
  1579. }
  1580. analyze:
  1581. if (!osi->key) {
  1582. /* scsi sense is Empty, the request was never issued to target
  1583. * linux return code might tell us what happened.
  1584. */
  1585. if (or->async_error == -ENOMEM)
  1586. osi->osd_err_pri = OSD_ERR_PRI_RESOURCE;
  1587. else
  1588. osi->osd_err_pri = OSD_ERR_PRI_UNREACHABLE;
  1589. ret = or->async_error;
  1590. } else if (osi->key <= scsi_sk_recovered_error) {
  1591. osi->osd_err_pri = 0;
  1592. ret = 0;
  1593. } else if (osi->additional_code == scsi_invalid_field_in_cdb) {
  1594. if (osi->cdb_field_offset == OSD_CFO_STARTING_BYTE) {
  1595. osi->osd_err_pri = OSD_ERR_PRI_CLEAR_PAGES;
  1596. ret = -EFAULT; /* caller should recover from this */
  1597. } else if (osi->cdb_field_offset == OSD_CFO_OBJECT_ID) {
  1598. osi->osd_err_pri = OSD_ERR_PRI_NOT_FOUND;
  1599. ret = -ENOENT;
  1600. } else if (osi->cdb_field_offset == OSD_CFO_PERMISSIONS) {
  1601. osi->osd_err_pri = OSD_ERR_PRI_NO_ACCESS;
  1602. ret = -EACCES;
  1603. } else {
  1604. osi->osd_err_pri = OSD_ERR_PRI_BAD_CRED;
  1605. ret = -EINVAL;
  1606. }
  1607. } else if (osi->additional_code == osd_quota_error) {
  1608. osi->osd_err_pri = OSD_ERR_PRI_NO_SPACE;
  1609. ret = -ENOSPC;
  1610. } else if (_is_osd_security_code(osi->additional_code)) {
  1611. osi->osd_err_pri = OSD_ERR_PRI_BAD_CRED;
  1612. ret = -EINVAL;
  1613. } else {
  1614. osi->osd_err_pri = OSD_ERR_PRI_EIO;
  1615. ret = -EIO;
  1616. }
  1617. if (!or->out.residual)
  1618. or->out.residual = or->out.total_bytes;
  1619. if (!or->in.residual)
  1620. or->in.residual = or->in.total_bytes;
  1621. return ret;
  1622. }
  1623. EXPORT_SYMBOL(osd_req_decode_sense_full);
  1624. /*
  1625. * Implementation of osd_sec.h API
  1626. * TODO: Move to a separate osd_sec.c file at a later stage.
  1627. */
  1628. enum { OSD_SEC_CAP_V1_ALL_CAPS =
  1629. OSD_SEC_CAP_APPEND | OSD_SEC_CAP_OBJ_MGMT | OSD_SEC_CAP_REMOVE |
  1630. OSD_SEC_CAP_CREATE | OSD_SEC_CAP_SET_ATTR | OSD_SEC_CAP_GET_ATTR |
  1631. OSD_SEC_CAP_WRITE | OSD_SEC_CAP_READ | OSD_SEC_CAP_POL_SEC |
  1632. OSD_SEC_CAP_GLOBAL | OSD_SEC_CAP_DEV_MGMT
  1633. };
  1634. enum { OSD_SEC_CAP_V2_ALL_CAPS =
  1635. OSD_SEC_CAP_V1_ALL_CAPS | OSD_SEC_CAP_QUERY | OSD_SEC_CAP_M_OBJECT
  1636. };
  1637. void osd_sec_init_nosec_doall_caps(void *caps,
  1638. const struct osd_obj_id *obj, bool is_collection, const bool is_v1)
  1639. {
  1640. struct osd_capability *cap = caps;
  1641. u8 type;
  1642. u8 descriptor_type;
  1643. if (likely(obj->id)) {
  1644. if (unlikely(is_collection)) {
  1645. type = OSD_SEC_OBJ_COLLECTION;
  1646. descriptor_type = is_v1 ? OSD_SEC_OBJ_DESC_OBJ :
  1647. OSD_SEC_OBJ_DESC_COL;
  1648. } else {
  1649. type = OSD_SEC_OBJ_USER;
  1650. descriptor_type = OSD_SEC_OBJ_DESC_OBJ;
  1651. }
  1652. WARN_ON(!obj->partition);
  1653. } else {
  1654. type = obj->partition ? OSD_SEC_OBJ_PARTITION :
  1655. OSD_SEC_OBJ_ROOT;
  1656. descriptor_type = OSD_SEC_OBJ_DESC_PAR;
  1657. }
  1658. memset(cap, 0, sizeof(*cap));
  1659. cap->h.format = OSD_SEC_CAP_FORMAT_VER1;
  1660. cap->h.integrity_algorithm__key_version = 0; /* MAKE_BYTE(0, 0); */
  1661. cap->h.security_method = OSD_SEC_NOSEC;
  1662. /* cap->expiration_time;
  1663. cap->AUDIT[30-10];
  1664. cap->discriminator[42-30];
  1665. cap->object_created_time; */
  1666. cap->h.object_type = type;
  1667. osd_sec_set_caps(&cap->h, OSD_SEC_CAP_V1_ALL_CAPS);
  1668. cap->h.object_descriptor_type = descriptor_type;
  1669. cap->od.obj_desc.policy_access_tag = 0;
  1670. cap->od.obj_desc.allowed_partition_id = cpu_to_be64(obj->partition);
  1671. cap->od.obj_desc.allowed_object_id = cpu_to_be64(obj->id);
  1672. }
  1673. EXPORT_SYMBOL(osd_sec_init_nosec_doall_caps);
  1674. /* FIXME: Extract version from caps pointer.
  1675. * Also Pete's target only supports caps from OSDv1 for now
  1676. */
  1677. void osd_set_caps(struct osd_cdb *cdb, const void *caps)
  1678. {
  1679. bool is_ver1 = true;
  1680. /* NOTE: They start at same address */
  1681. memcpy(&cdb->v1.caps, caps, is_ver1 ? OSDv1_CAP_LEN : OSD_CAP_LEN);
  1682. }
  1683. bool osd_is_sec_alldata(struct osd_security_parameters *sec_parms __unused)
  1684. {
  1685. return false;
  1686. }
  1687. void osd_sec_sign_cdb(struct osd_cdb *ocdb __unused, const u8 *cap_key __unused)
  1688. {
  1689. }
  1690. void osd_sec_sign_data(void *data_integ __unused,
  1691. struct bio *bio __unused, const u8 *cap_key __unused)
  1692. {
  1693. }
  1694. /*
  1695. * Declared in osd_protocol.h
  1696. * 4.12.5 Data-In and Data-Out buffer offsets
  1697. * byte offset = mantissa * (2^(exponent+8))
  1698. * Returns the smallest allowed encoded offset that contains given @offset
  1699. * The actual encoded offset returned is @offset + *@padding.
  1700. */
  1701. osd_cdb_offset __osd_encode_offset(
  1702. u64 offset, unsigned *padding, int min_shift, int max_shift)
  1703. {
  1704. u64 try_offset = -1, mod, align;
  1705. osd_cdb_offset be32_offset;
  1706. int shift;
  1707. *padding = 0;
  1708. if (!offset)
  1709. return 0;
  1710. for (shift = min_shift; shift < max_shift; ++shift) {
  1711. try_offset = offset >> shift;
  1712. if (try_offset < (1 << OSD_OFFSET_MAX_BITS))
  1713. break;
  1714. }
  1715. BUG_ON(shift == max_shift);
  1716. align = 1 << shift;
  1717. mod = offset & (align - 1);
  1718. if (mod) {
  1719. *padding = align - mod;
  1720. try_offset += 1;
  1721. }
  1722. try_offset |= ((shift - 8) & 0xf) << 28;
  1723. be32_offset = cpu_to_be32((u32)try_offset);
  1724. OSD_DEBUG("offset=%llu mantissa=%llu exp=%d encoded=%x pad=%d\n",
  1725. _LLU(offset), _LLU(try_offset & 0x0FFFFFFF), shift,
  1726. be32_offset, *padding);
  1727. return be32_offset;
  1728. }