xattr.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947
  1. #include <linux/ceph/ceph_debug.h>
  2. #include "super.h"
  3. #include "mds_client.h"
  4. #include <linux/ceph/decode.h>
  5. #include <linux/xattr.h>
  6. #include <linux/slab.h>
  7. #define XATTR_CEPH_PREFIX "ceph."
  8. #define XATTR_CEPH_PREFIX_LEN (sizeof (XATTR_CEPH_PREFIX) - 1)
  9. static bool ceph_is_valid_xattr(const char *name)
  10. {
  11. return !strncmp(name, XATTR_CEPH_PREFIX, XATTR_CEPH_PREFIX_LEN) ||
  12. !strncmp(name, XATTR_SECURITY_PREFIX,
  13. XATTR_SECURITY_PREFIX_LEN) ||
  14. !strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) ||
  15. !strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN);
  16. }
  17. /*
  18. * These define virtual xattrs exposing the recursive directory
  19. * statistics and layout metadata.
  20. */
  21. struct ceph_vxattr {
  22. char *name;
  23. size_t name_size; /* strlen(name) + 1 (for '\0') */
  24. size_t (*getxattr_cb)(struct ceph_inode_info *ci, char *val,
  25. size_t size);
  26. bool readonly;
  27. };
  28. /* directories */
  29. static size_t ceph_vxattrcb_dir_entries(struct ceph_inode_info *ci, char *val,
  30. size_t size)
  31. {
  32. return snprintf(val, size, "%lld", ci->i_files + ci->i_subdirs);
  33. }
  34. static size_t ceph_vxattrcb_dir_files(struct ceph_inode_info *ci, char *val,
  35. size_t size)
  36. {
  37. return snprintf(val, size, "%lld", ci->i_files);
  38. }
  39. static size_t ceph_vxattrcb_dir_subdirs(struct ceph_inode_info *ci, char *val,
  40. size_t size)
  41. {
  42. return snprintf(val, size, "%lld", ci->i_subdirs);
  43. }
  44. static size_t ceph_vxattrcb_dir_rentries(struct ceph_inode_info *ci, char *val,
  45. size_t size)
  46. {
  47. return snprintf(val, size, "%lld", ci->i_rfiles + ci->i_rsubdirs);
  48. }
  49. static size_t ceph_vxattrcb_dir_rfiles(struct ceph_inode_info *ci, char *val,
  50. size_t size)
  51. {
  52. return snprintf(val, size, "%lld", ci->i_rfiles);
  53. }
  54. static size_t ceph_vxattrcb_dir_rsubdirs(struct ceph_inode_info *ci, char *val,
  55. size_t size)
  56. {
  57. return snprintf(val, size, "%lld", ci->i_rsubdirs);
  58. }
  59. static size_t ceph_vxattrcb_dir_rbytes(struct ceph_inode_info *ci, char *val,
  60. size_t size)
  61. {
  62. return snprintf(val, size, "%lld", ci->i_rbytes);
  63. }
  64. static size_t ceph_vxattrcb_dir_rctime(struct ceph_inode_info *ci, char *val,
  65. size_t size)
  66. {
  67. return snprintf(val, size, "%ld.09%ld", (long)ci->i_rctime.tv_sec,
  68. (long)ci->i_rctime.tv_nsec);
  69. }
  70. #define CEPH_XATTR_NAME(_type, _name) XATTR_CEPH_PREFIX #_type "." #_name
  71. #define XATTR_NAME_CEPH(_type, _name) \
  72. { \
  73. .name = CEPH_XATTR_NAME(_type, _name), \
  74. .name_size = sizeof (CEPH_XATTR_NAME(_type, _name)), \
  75. .getxattr_cb = ceph_vxattrcb_ ## _type ## _ ## _name, \
  76. .readonly = true, \
  77. }
  78. static struct ceph_vxattr ceph_dir_vxattrs[] = {
  79. XATTR_NAME_CEPH(dir, entries),
  80. XATTR_NAME_CEPH(dir, files),
  81. XATTR_NAME_CEPH(dir, subdirs),
  82. XATTR_NAME_CEPH(dir, rentries),
  83. XATTR_NAME_CEPH(dir, rfiles),
  84. XATTR_NAME_CEPH(dir, rsubdirs),
  85. XATTR_NAME_CEPH(dir, rbytes),
  86. XATTR_NAME_CEPH(dir, rctime),
  87. { 0 } /* Required table terminator */
  88. };
  89. static size_t ceph_dir_vxattrs_name_size; /* total size of all names */
  90. /* files */
  91. static size_t ceph_vxattrcb_file_layout(struct ceph_inode_info *ci, char *val,
  92. size_t size)
  93. {
  94. int ret;
  95. ret = snprintf(val, size,
  96. "chunk_bytes=%lld\nstripe_count=%lld\nobject_size=%lld\n",
  97. (unsigned long long)ceph_file_layout_su(ci->i_layout),
  98. (unsigned long long)ceph_file_layout_stripe_count(ci->i_layout),
  99. (unsigned long long)ceph_file_layout_object_size(ci->i_layout));
  100. if (ceph_file_layout_pg_preferred(ci->i_layout) >= 0) {
  101. val += ret;
  102. size -= ret;
  103. ret += snprintf(val, size, "preferred_osd=%lld\n",
  104. (unsigned long long)ceph_file_layout_pg_preferred(
  105. ci->i_layout));
  106. }
  107. return ret;
  108. }
  109. static struct ceph_vxattr ceph_file_vxattrs[] = {
  110. XATTR_NAME_CEPH(file, layout),
  111. /* The following extended attribute name is deprecated */
  112. {
  113. .name = XATTR_CEPH_PREFIX "layout",
  114. .name_size = sizeof (XATTR_CEPH_PREFIX "layout"),
  115. .getxattr_cb = ceph_vxattrcb_file_layout,
  116. .readonly = true,
  117. },
  118. { 0 } /* Required table terminator */
  119. };
  120. static size_t ceph_file_vxattrs_name_size; /* total size of all names */
  121. static struct ceph_vxattr *ceph_inode_vxattrs(struct inode *inode)
  122. {
  123. if (S_ISDIR(inode->i_mode))
  124. return ceph_dir_vxattrs;
  125. else if (S_ISREG(inode->i_mode))
  126. return ceph_file_vxattrs;
  127. return NULL;
  128. }
  129. static size_t ceph_vxattrs_name_size(struct ceph_vxattr *vxattrs)
  130. {
  131. if (vxattrs == ceph_dir_vxattrs)
  132. return ceph_dir_vxattrs_name_size;
  133. if (vxattrs == ceph_file_vxattrs)
  134. return ceph_file_vxattrs_name_size;
  135. BUG();
  136. return 0;
  137. }
  138. /*
  139. * Compute the aggregate size (including terminating '\0') of all
  140. * virtual extended attribute names in the given vxattr table.
  141. */
  142. static size_t __init vxattrs_name_size(struct ceph_vxattr *vxattrs)
  143. {
  144. struct ceph_vxattr *vxattr;
  145. size_t size = 0;
  146. for (vxattr = vxattrs; vxattr->name; vxattr++)
  147. size += vxattr->name_size;
  148. return size;
  149. }
  150. /* Routines called at initialization and exit time */
  151. void __init ceph_xattr_init(void)
  152. {
  153. ceph_dir_vxattrs_name_size = vxattrs_name_size(ceph_dir_vxattrs);
  154. ceph_file_vxattrs_name_size = vxattrs_name_size(ceph_file_vxattrs);
  155. }
  156. void ceph_xattr_exit(void)
  157. {
  158. ceph_dir_vxattrs_name_size = 0;
  159. ceph_file_vxattrs_name_size = 0;
  160. }
  161. static struct ceph_vxattr *ceph_match_vxattr(struct inode *inode,
  162. const char *name)
  163. {
  164. struct ceph_vxattr *vxattr = ceph_inode_vxattrs(inode);
  165. if (vxattr) {
  166. while (vxattr->name) {
  167. if (!strcmp(vxattr->name, name))
  168. return vxattr;
  169. vxattr++;
  170. }
  171. }
  172. return NULL;
  173. }
  174. static int __set_xattr(struct ceph_inode_info *ci,
  175. const char *name, int name_len,
  176. const char *val, int val_len,
  177. int dirty,
  178. int should_free_name, int should_free_val,
  179. struct ceph_inode_xattr **newxattr)
  180. {
  181. struct rb_node **p;
  182. struct rb_node *parent = NULL;
  183. struct ceph_inode_xattr *xattr = NULL;
  184. int c;
  185. int new = 0;
  186. p = &ci->i_xattrs.index.rb_node;
  187. while (*p) {
  188. parent = *p;
  189. xattr = rb_entry(parent, struct ceph_inode_xattr, node);
  190. c = strncmp(name, xattr->name, min(name_len, xattr->name_len));
  191. if (c < 0)
  192. p = &(*p)->rb_left;
  193. else if (c > 0)
  194. p = &(*p)->rb_right;
  195. else {
  196. if (name_len == xattr->name_len)
  197. break;
  198. else if (name_len < xattr->name_len)
  199. p = &(*p)->rb_left;
  200. else
  201. p = &(*p)->rb_right;
  202. }
  203. xattr = NULL;
  204. }
  205. if (!xattr) {
  206. new = 1;
  207. xattr = *newxattr;
  208. xattr->name = name;
  209. xattr->name_len = name_len;
  210. xattr->should_free_name = should_free_name;
  211. ci->i_xattrs.count++;
  212. dout("__set_xattr count=%d\n", ci->i_xattrs.count);
  213. } else {
  214. kfree(*newxattr);
  215. *newxattr = NULL;
  216. if (xattr->should_free_val)
  217. kfree((void *)xattr->val);
  218. if (should_free_name) {
  219. kfree((void *)name);
  220. name = xattr->name;
  221. }
  222. ci->i_xattrs.names_size -= xattr->name_len;
  223. ci->i_xattrs.vals_size -= xattr->val_len;
  224. }
  225. ci->i_xattrs.names_size += name_len;
  226. ci->i_xattrs.vals_size += val_len;
  227. if (val)
  228. xattr->val = val;
  229. else
  230. xattr->val = "";
  231. xattr->val_len = val_len;
  232. xattr->dirty = dirty;
  233. xattr->should_free_val = (val && should_free_val);
  234. if (new) {
  235. rb_link_node(&xattr->node, parent, p);
  236. rb_insert_color(&xattr->node, &ci->i_xattrs.index);
  237. dout("__set_xattr_val p=%p\n", p);
  238. }
  239. dout("__set_xattr_val added %llx.%llx xattr %p %s=%.*s\n",
  240. ceph_vinop(&ci->vfs_inode), xattr, name, val_len, val);
  241. return 0;
  242. }
  243. static struct ceph_inode_xattr *__get_xattr(struct ceph_inode_info *ci,
  244. const char *name)
  245. {
  246. struct rb_node **p;
  247. struct rb_node *parent = NULL;
  248. struct ceph_inode_xattr *xattr = NULL;
  249. int name_len = strlen(name);
  250. int c;
  251. p = &ci->i_xattrs.index.rb_node;
  252. while (*p) {
  253. parent = *p;
  254. xattr = rb_entry(parent, struct ceph_inode_xattr, node);
  255. c = strncmp(name, xattr->name, xattr->name_len);
  256. if (c == 0 && name_len > xattr->name_len)
  257. c = 1;
  258. if (c < 0)
  259. p = &(*p)->rb_left;
  260. else if (c > 0)
  261. p = &(*p)->rb_right;
  262. else {
  263. dout("__get_xattr %s: found %.*s\n", name,
  264. xattr->val_len, xattr->val);
  265. return xattr;
  266. }
  267. }
  268. dout("__get_xattr %s: not found\n", name);
  269. return NULL;
  270. }
  271. static void __free_xattr(struct ceph_inode_xattr *xattr)
  272. {
  273. BUG_ON(!xattr);
  274. if (xattr->should_free_name)
  275. kfree((void *)xattr->name);
  276. if (xattr->should_free_val)
  277. kfree((void *)xattr->val);
  278. kfree(xattr);
  279. }
  280. static int __remove_xattr(struct ceph_inode_info *ci,
  281. struct ceph_inode_xattr *xattr)
  282. {
  283. if (!xattr)
  284. return -EOPNOTSUPP;
  285. rb_erase(&xattr->node, &ci->i_xattrs.index);
  286. if (xattr->should_free_name)
  287. kfree((void *)xattr->name);
  288. if (xattr->should_free_val)
  289. kfree((void *)xattr->val);
  290. ci->i_xattrs.names_size -= xattr->name_len;
  291. ci->i_xattrs.vals_size -= xattr->val_len;
  292. ci->i_xattrs.count--;
  293. kfree(xattr);
  294. return 0;
  295. }
  296. static int __remove_xattr_by_name(struct ceph_inode_info *ci,
  297. const char *name)
  298. {
  299. struct rb_node **p;
  300. struct ceph_inode_xattr *xattr;
  301. int err;
  302. p = &ci->i_xattrs.index.rb_node;
  303. xattr = __get_xattr(ci, name);
  304. err = __remove_xattr(ci, xattr);
  305. return err;
  306. }
  307. static char *__copy_xattr_names(struct ceph_inode_info *ci,
  308. char *dest)
  309. {
  310. struct rb_node *p;
  311. struct ceph_inode_xattr *xattr = NULL;
  312. p = rb_first(&ci->i_xattrs.index);
  313. dout("__copy_xattr_names count=%d\n", ci->i_xattrs.count);
  314. while (p) {
  315. xattr = rb_entry(p, struct ceph_inode_xattr, node);
  316. memcpy(dest, xattr->name, xattr->name_len);
  317. dest[xattr->name_len] = '\0';
  318. dout("dest=%s %p (%s) (%d/%d)\n", dest, xattr, xattr->name,
  319. xattr->name_len, ci->i_xattrs.names_size);
  320. dest += xattr->name_len + 1;
  321. p = rb_next(p);
  322. }
  323. return dest;
  324. }
  325. void __ceph_destroy_xattrs(struct ceph_inode_info *ci)
  326. {
  327. struct rb_node *p, *tmp;
  328. struct ceph_inode_xattr *xattr = NULL;
  329. p = rb_first(&ci->i_xattrs.index);
  330. dout("__ceph_destroy_xattrs p=%p\n", p);
  331. while (p) {
  332. xattr = rb_entry(p, struct ceph_inode_xattr, node);
  333. tmp = p;
  334. p = rb_next(tmp);
  335. dout("__ceph_destroy_xattrs next p=%p (%.*s)\n", p,
  336. xattr->name_len, xattr->name);
  337. rb_erase(tmp, &ci->i_xattrs.index);
  338. __free_xattr(xattr);
  339. }
  340. ci->i_xattrs.names_size = 0;
  341. ci->i_xattrs.vals_size = 0;
  342. ci->i_xattrs.index_version = 0;
  343. ci->i_xattrs.count = 0;
  344. ci->i_xattrs.index = RB_ROOT;
  345. }
  346. static int __build_xattrs(struct inode *inode)
  347. __releases(ci->i_ceph_lock)
  348. __acquires(ci->i_ceph_lock)
  349. {
  350. u32 namelen;
  351. u32 numattr = 0;
  352. void *p, *end;
  353. u32 len;
  354. const char *name, *val;
  355. struct ceph_inode_info *ci = ceph_inode(inode);
  356. int xattr_version;
  357. struct ceph_inode_xattr **xattrs = NULL;
  358. int err = 0;
  359. int i;
  360. dout("__build_xattrs() len=%d\n",
  361. ci->i_xattrs.blob ? (int)ci->i_xattrs.blob->vec.iov_len : 0);
  362. if (ci->i_xattrs.index_version >= ci->i_xattrs.version)
  363. return 0; /* already built */
  364. __ceph_destroy_xattrs(ci);
  365. start:
  366. /* updated internal xattr rb tree */
  367. if (ci->i_xattrs.blob && ci->i_xattrs.blob->vec.iov_len > 4) {
  368. p = ci->i_xattrs.blob->vec.iov_base;
  369. end = p + ci->i_xattrs.blob->vec.iov_len;
  370. ceph_decode_32_safe(&p, end, numattr, bad);
  371. xattr_version = ci->i_xattrs.version;
  372. spin_unlock(&ci->i_ceph_lock);
  373. xattrs = kcalloc(numattr, sizeof(struct ceph_xattr *),
  374. GFP_NOFS);
  375. err = -ENOMEM;
  376. if (!xattrs)
  377. goto bad_lock;
  378. memset(xattrs, 0, numattr*sizeof(struct ceph_xattr *));
  379. for (i = 0; i < numattr; i++) {
  380. xattrs[i] = kmalloc(sizeof(struct ceph_inode_xattr),
  381. GFP_NOFS);
  382. if (!xattrs[i])
  383. goto bad_lock;
  384. }
  385. spin_lock(&ci->i_ceph_lock);
  386. if (ci->i_xattrs.version != xattr_version) {
  387. /* lost a race, retry */
  388. for (i = 0; i < numattr; i++)
  389. kfree(xattrs[i]);
  390. kfree(xattrs);
  391. goto start;
  392. }
  393. err = -EIO;
  394. while (numattr--) {
  395. ceph_decode_32_safe(&p, end, len, bad);
  396. namelen = len;
  397. name = p;
  398. p += len;
  399. ceph_decode_32_safe(&p, end, len, bad);
  400. val = p;
  401. p += len;
  402. err = __set_xattr(ci, name, namelen, val, len,
  403. 0, 0, 0, &xattrs[numattr]);
  404. if (err < 0)
  405. goto bad;
  406. }
  407. kfree(xattrs);
  408. }
  409. ci->i_xattrs.index_version = ci->i_xattrs.version;
  410. ci->i_xattrs.dirty = false;
  411. return err;
  412. bad_lock:
  413. spin_lock(&ci->i_ceph_lock);
  414. bad:
  415. if (xattrs) {
  416. for (i = 0; i < numattr; i++)
  417. kfree(xattrs[i]);
  418. kfree(xattrs);
  419. }
  420. ci->i_xattrs.names_size = 0;
  421. return err;
  422. }
  423. static int __get_required_blob_size(struct ceph_inode_info *ci, int name_size,
  424. int val_size)
  425. {
  426. /*
  427. * 4 bytes for the length, and additional 4 bytes per each xattr name,
  428. * 4 bytes per each value
  429. */
  430. int size = 4 + ci->i_xattrs.count*(4 + 4) +
  431. ci->i_xattrs.names_size +
  432. ci->i_xattrs.vals_size;
  433. dout("__get_required_blob_size c=%d names.size=%d vals.size=%d\n",
  434. ci->i_xattrs.count, ci->i_xattrs.names_size,
  435. ci->i_xattrs.vals_size);
  436. if (name_size)
  437. size += 4 + 4 + name_size + val_size;
  438. return size;
  439. }
  440. /*
  441. * If there are dirty xattrs, reencode xattrs into the prealloc_blob
  442. * and swap into place.
  443. */
  444. void __ceph_build_xattrs_blob(struct ceph_inode_info *ci)
  445. {
  446. struct rb_node *p;
  447. struct ceph_inode_xattr *xattr = NULL;
  448. void *dest;
  449. dout("__build_xattrs_blob %p\n", &ci->vfs_inode);
  450. if (ci->i_xattrs.dirty) {
  451. int need = __get_required_blob_size(ci, 0, 0);
  452. BUG_ON(need > ci->i_xattrs.prealloc_blob->alloc_len);
  453. p = rb_first(&ci->i_xattrs.index);
  454. dest = ci->i_xattrs.prealloc_blob->vec.iov_base;
  455. ceph_encode_32(&dest, ci->i_xattrs.count);
  456. while (p) {
  457. xattr = rb_entry(p, struct ceph_inode_xattr, node);
  458. ceph_encode_32(&dest, xattr->name_len);
  459. memcpy(dest, xattr->name, xattr->name_len);
  460. dest += xattr->name_len;
  461. ceph_encode_32(&dest, xattr->val_len);
  462. memcpy(dest, xattr->val, xattr->val_len);
  463. dest += xattr->val_len;
  464. p = rb_next(p);
  465. }
  466. /* adjust buffer len; it may be larger than we need */
  467. ci->i_xattrs.prealloc_blob->vec.iov_len =
  468. dest - ci->i_xattrs.prealloc_blob->vec.iov_base;
  469. if (ci->i_xattrs.blob)
  470. ceph_buffer_put(ci->i_xattrs.blob);
  471. ci->i_xattrs.blob = ci->i_xattrs.prealloc_blob;
  472. ci->i_xattrs.prealloc_blob = NULL;
  473. ci->i_xattrs.dirty = false;
  474. ci->i_xattrs.version++;
  475. }
  476. }
  477. ssize_t ceph_getxattr(struct dentry *dentry, const char *name, void *value,
  478. size_t size)
  479. {
  480. struct inode *inode = dentry->d_inode;
  481. struct ceph_inode_info *ci = ceph_inode(inode);
  482. int err;
  483. struct ceph_inode_xattr *xattr;
  484. struct ceph_vxattr *vxattr = NULL;
  485. if (!ceph_is_valid_xattr(name))
  486. return -ENODATA;
  487. /* let's see if a virtual xattr was requested */
  488. vxattr = ceph_match_vxattr(inode, name);
  489. spin_lock(&ci->i_ceph_lock);
  490. dout("getxattr %p ver=%lld index_ver=%lld\n", inode,
  491. ci->i_xattrs.version, ci->i_xattrs.index_version);
  492. if (__ceph_caps_issued_mask(ci, CEPH_CAP_XATTR_SHARED, 1) &&
  493. (ci->i_xattrs.index_version >= ci->i_xattrs.version)) {
  494. goto get_xattr;
  495. } else {
  496. spin_unlock(&ci->i_ceph_lock);
  497. /* get xattrs from mds (if we don't already have them) */
  498. err = ceph_do_getattr(inode, CEPH_STAT_CAP_XATTR);
  499. if (err)
  500. return err;
  501. }
  502. spin_lock(&ci->i_ceph_lock);
  503. if (vxattr && vxattr->readonly) {
  504. err = vxattr->getxattr_cb(ci, value, size);
  505. goto out;
  506. }
  507. err = __build_xattrs(inode);
  508. if (err < 0)
  509. goto out;
  510. get_xattr:
  511. err = -ENODATA; /* == ENOATTR */
  512. xattr = __get_xattr(ci, name);
  513. if (!xattr) {
  514. if (vxattr)
  515. err = vxattr->getxattr_cb(ci, value, size);
  516. goto out;
  517. }
  518. err = -ERANGE;
  519. if (size && size < xattr->val_len)
  520. goto out;
  521. err = xattr->val_len;
  522. if (size == 0)
  523. goto out;
  524. memcpy(value, xattr->val, xattr->val_len);
  525. out:
  526. spin_unlock(&ci->i_ceph_lock);
  527. return err;
  528. }
  529. ssize_t ceph_listxattr(struct dentry *dentry, char *names, size_t size)
  530. {
  531. struct inode *inode = dentry->d_inode;
  532. struct ceph_inode_info *ci = ceph_inode(inode);
  533. struct ceph_vxattr *vxattrs = ceph_inode_vxattrs(inode);
  534. u32 vir_namelen = 0;
  535. u32 namelen;
  536. int err;
  537. u32 len;
  538. int i;
  539. spin_lock(&ci->i_ceph_lock);
  540. dout("listxattr %p ver=%lld index_ver=%lld\n", inode,
  541. ci->i_xattrs.version, ci->i_xattrs.index_version);
  542. if (__ceph_caps_issued_mask(ci, CEPH_CAP_XATTR_SHARED, 1) &&
  543. (ci->i_xattrs.index_version >= ci->i_xattrs.version)) {
  544. goto list_xattr;
  545. } else {
  546. spin_unlock(&ci->i_ceph_lock);
  547. err = ceph_do_getattr(inode, CEPH_STAT_CAP_XATTR);
  548. if (err)
  549. return err;
  550. }
  551. spin_lock(&ci->i_ceph_lock);
  552. err = __build_xattrs(inode);
  553. if (err < 0)
  554. goto out;
  555. list_xattr:
  556. /*
  557. * Start with virtual dir xattr names (if any) (including
  558. * terminating '\0' characters for each).
  559. */
  560. vir_namelen = ceph_vxattrs_name_size(vxattrs);
  561. /* adding 1 byte per each variable due to the null termination */
  562. namelen = vir_namelen + ci->i_xattrs.names_size + ci->i_xattrs.count;
  563. err = -ERANGE;
  564. if (size && namelen > size)
  565. goto out;
  566. err = namelen;
  567. if (size == 0)
  568. goto out;
  569. names = __copy_xattr_names(ci, names);
  570. /* virtual xattr names, too */
  571. if (vxattrs)
  572. for (i = 0; vxattrs[i].name; i++) {
  573. len = sprintf(names, "%s", vxattrs[i].name);
  574. names += len + 1;
  575. }
  576. out:
  577. spin_unlock(&ci->i_ceph_lock);
  578. return err;
  579. }
  580. static int ceph_sync_setxattr(struct dentry *dentry, const char *name,
  581. const char *value, size_t size, int flags)
  582. {
  583. struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
  584. struct inode *inode = dentry->d_inode;
  585. struct ceph_inode_info *ci = ceph_inode(inode);
  586. struct inode *parent_inode;
  587. struct ceph_mds_request *req;
  588. struct ceph_mds_client *mdsc = fsc->mdsc;
  589. int err;
  590. int i, nr_pages;
  591. struct page **pages = NULL;
  592. void *kaddr;
  593. /* copy value into some pages */
  594. nr_pages = calc_pages_for(0, size);
  595. if (nr_pages) {
  596. pages = kmalloc(sizeof(pages[0])*nr_pages, GFP_NOFS);
  597. if (!pages)
  598. return -ENOMEM;
  599. err = -ENOMEM;
  600. for (i = 0; i < nr_pages; i++) {
  601. pages[i] = __page_cache_alloc(GFP_NOFS);
  602. if (!pages[i]) {
  603. nr_pages = i;
  604. goto out;
  605. }
  606. kaddr = kmap(pages[i]);
  607. memcpy(kaddr, value + i*PAGE_CACHE_SIZE,
  608. min(PAGE_CACHE_SIZE, size-i*PAGE_CACHE_SIZE));
  609. }
  610. }
  611. dout("setxattr value=%.*s\n", (int)size, value);
  612. /* do request */
  613. req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETXATTR,
  614. USE_AUTH_MDS);
  615. if (IS_ERR(req)) {
  616. err = PTR_ERR(req);
  617. goto out;
  618. }
  619. req->r_inode = inode;
  620. ihold(inode);
  621. req->r_inode_drop = CEPH_CAP_XATTR_SHARED;
  622. req->r_num_caps = 1;
  623. req->r_args.setxattr.flags = cpu_to_le32(flags);
  624. req->r_path2 = kstrdup(name, GFP_NOFS);
  625. req->r_pages = pages;
  626. req->r_num_pages = nr_pages;
  627. req->r_data_len = size;
  628. dout("xattr.ver (before): %lld\n", ci->i_xattrs.version);
  629. parent_inode = ceph_get_dentry_parent_inode(dentry);
  630. err = ceph_mdsc_do_request(mdsc, parent_inode, req);
  631. iput(parent_inode);
  632. ceph_mdsc_put_request(req);
  633. dout("xattr.ver (after): %lld\n", ci->i_xattrs.version);
  634. out:
  635. if (pages) {
  636. for (i = 0; i < nr_pages; i++)
  637. __free_page(pages[i]);
  638. kfree(pages);
  639. }
  640. return err;
  641. }
  642. int ceph_setxattr(struct dentry *dentry, const char *name,
  643. const void *value, size_t size, int flags)
  644. {
  645. struct inode *inode = dentry->d_inode;
  646. struct ceph_vxattr *vxattr;
  647. struct ceph_inode_info *ci = ceph_inode(inode);
  648. int issued;
  649. int err;
  650. int dirty;
  651. int name_len = strlen(name);
  652. int val_len = size;
  653. char *newname = NULL;
  654. char *newval = NULL;
  655. struct ceph_inode_xattr *xattr = NULL;
  656. int required_blob_size;
  657. if (ceph_snap(inode) != CEPH_NOSNAP)
  658. return -EROFS;
  659. if (!ceph_is_valid_xattr(name))
  660. return -EOPNOTSUPP;
  661. vxattr = ceph_match_vxattr(inode, name);
  662. if (vxattr && vxattr->readonly)
  663. return -EOPNOTSUPP;
  664. /* preallocate memory for xattr name, value, index node */
  665. err = -ENOMEM;
  666. newname = kmemdup(name, name_len + 1, GFP_NOFS);
  667. if (!newname)
  668. goto out;
  669. if (val_len) {
  670. newval = kmemdup(value, val_len, GFP_NOFS);
  671. if (!newval)
  672. goto out;
  673. }
  674. xattr = kmalloc(sizeof(struct ceph_inode_xattr), GFP_NOFS);
  675. if (!xattr)
  676. goto out;
  677. spin_lock(&ci->i_ceph_lock);
  678. retry:
  679. issued = __ceph_caps_issued(ci, NULL);
  680. dout("setxattr %p issued %s\n", inode, ceph_cap_string(issued));
  681. if (!(issued & CEPH_CAP_XATTR_EXCL))
  682. goto do_sync;
  683. __build_xattrs(inode);
  684. required_blob_size = __get_required_blob_size(ci, name_len, val_len);
  685. if (!ci->i_xattrs.prealloc_blob ||
  686. required_blob_size > ci->i_xattrs.prealloc_blob->alloc_len) {
  687. struct ceph_buffer *blob;
  688. spin_unlock(&ci->i_ceph_lock);
  689. dout(" preaallocating new blob size=%d\n", required_blob_size);
  690. blob = ceph_buffer_new(required_blob_size, GFP_NOFS);
  691. if (!blob)
  692. goto out;
  693. spin_lock(&ci->i_ceph_lock);
  694. if (ci->i_xattrs.prealloc_blob)
  695. ceph_buffer_put(ci->i_xattrs.prealloc_blob);
  696. ci->i_xattrs.prealloc_blob = blob;
  697. goto retry;
  698. }
  699. err = __set_xattr(ci, newname, name_len, newval,
  700. val_len, 1, 1, 1, &xattr);
  701. dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL);
  702. ci->i_xattrs.dirty = true;
  703. inode->i_ctime = CURRENT_TIME;
  704. spin_unlock(&ci->i_ceph_lock);
  705. if (dirty)
  706. __mark_inode_dirty(inode, dirty);
  707. return err;
  708. do_sync:
  709. spin_unlock(&ci->i_ceph_lock);
  710. err = ceph_sync_setxattr(dentry, name, value, size, flags);
  711. out:
  712. kfree(newname);
  713. kfree(newval);
  714. kfree(xattr);
  715. return err;
  716. }
  717. static int ceph_send_removexattr(struct dentry *dentry, const char *name)
  718. {
  719. struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
  720. struct ceph_mds_client *mdsc = fsc->mdsc;
  721. struct inode *inode = dentry->d_inode;
  722. struct inode *parent_inode;
  723. struct ceph_mds_request *req;
  724. int err;
  725. req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_RMXATTR,
  726. USE_AUTH_MDS);
  727. if (IS_ERR(req))
  728. return PTR_ERR(req);
  729. req->r_inode = inode;
  730. ihold(inode);
  731. req->r_inode_drop = CEPH_CAP_XATTR_SHARED;
  732. req->r_num_caps = 1;
  733. req->r_path2 = kstrdup(name, GFP_NOFS);
  734. parent_inode = ceph_get_dentry_parent_inode(dentry);
  735. err = ceph_mdsc_do_request(mdsc, parent_inode, req);
  736. iput(parent_inode);
  737. ceph_mdsc_put_request(req);
  738. return err;
  739. }
  740. int ceph_removexattr(struct dentry *dentry, const char *name)
  741. {
  742. struct inode *inode = dentry->d_inode;
  743. struct ceph_vxattr *vxattr;
  744. struct ceph_inode_info *ci = ceph_inode(inode);
  745. int issued;
  746. int err;
  747. int required_blob_size;
  748. int dirty;
  749. if (ceph_snap(inode) != CEPH_NOSNAP)
  750. return -EROFS;
  751. if (!ceph_is_valid_xattr(name))
  752. return -EOPNOTSUPP;
  753. vxattr = ceph_match_vxattr(inode, name);
  754. if (vxattr && vxattr->readonly)
  755. return -EOPNOTSUPP;
  756. err = -ENOMEM;
  757. spin_lock(&ci->i_ceph_lock);
  758. retry:
  759. issued = __ceph_caps_issued(ci, NULL);
  760. dout("removexattr %p issued %s\n", inode, ceph_cap_string(issued));
  761. if (!(issued & CEPH_CAP_XATTR_EXCL))
  762. goto do_sync;
  763. __build_xattrs(inode);
  764. required_blob_size = __get_required_blob_size(ci, 0, 0);
  765. if (!ci->i_xattrs.prealloc_blob ||
  766. required_blob_size > ci->i_xattrs.prealloc_blob->alloc_len) {
  767. struct ceph_buffer *blob;
  768. spin_unlock(&ci->i_ceph_lock);
  769. dout(" preaallocating new blob size=%d\n", required_blob_size);
  770. blob = ceph_buffer_new(required_blob_size, GFP_NOFS);
  771. if (!blob)
  772. goto out;
  773. spin_lock(&ci->i_ceph_lock);
  774. if (ci->i_xattrs.prealloc_blob)
  775. ceph_buffer_put(ci->i_xattrs.prealloc_blob);
  776. ci->i_xattrs.prealloc_blob = blob;
  777. goto retry;
  778. }
  779. err = __remove_xattr_by_name(ceph_inode(inode), name);
  780. dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL);
  781. ci->i_xattrs.dirty = true;
  782. inode->i_ctime = CURRENT_TIME;
  783. spin_unlock(&ci->i_ceph_lock);
  784. if (dirty)
  785. __mark_inode_dirty(inode, dirty);
  786. return err;
  787. do_sync:
  788. spin_unlock(&ci->i_ceph_lock);
  789. err = ceph_send_removexattr(dentry, name);
  790. out:
  791. return err;
  792. }