xattr.c 26 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045
  1. /*
  2. * Copyright (C) International Business Machines Corp., 2000-2004
  3. * Copyright (C) Christoph Hellwig, 2002
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation; either version 2 of the License, or
  8. * (at your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
  13. * the GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  18. */
  19. #include <linux/capability.h>
  20. #include <linux/fs.h>
  21. #include <linux/xattr.h>
  22. #include <linux/posix_acl_xattr.h>
  23. #include <linux/slab.h>
  24. #include <linux/quotaops.h>
  25. #include <linux/security.h>
  26. #include "jfs_incore.h"
  27. #include "jfs_superblock.h"
  28. #include "jfs_dmap.h"
  29. #include "jfs_debug.h"
  30. #include "jfs_dinode.h"
  31. #include "jfs_extent.h"
  32. #include "jfs_metapage.h"
  33. #include "jfs_xattr.h"
  34. #include "jfs_acl.h"
  35. /*
  36. * jfs_xattr.c: extended attribute service
  37. *
  38. * Overall design --
  39. *
  40. * Format:
  41. *
  42. * Extended attribute lists (jfs_ea_list) consist of an overall size (32 bit
  43. * value) and a variable (0 or more) number of extended attribute
  44. * entries. Each extended attribute entry (jfs_ea) is a <name,value> double
  45. * where <name> is constructed from a null-terminated ascii string
  46. * (1 ... 255 bytes in the name) and <value> is arbitrary 8 bit data
  47. * (1 ... 65535 bytes). The in-memory format is
  48. *
  49. * 0 1 2 4 4 + namelen + 1
  50. * +-------+--------+--------+----------------+-------------------+
  51. * | Flags | Name | Value | Name String \0 | Data . . . . |
  52. * | | Length | Length | | |
  53. * +-------+--------+--------+----------------+-------------------+
  54. *
  55. * A jfs_ea_list then is structured as
  56. *
  57. * 0 4 4 + EA_SIZE(ea1)
  58. * +------------+-------------------+--------------------+-----
  59. * | Overall EA | First FEA Element | Second FEA Element | .....
  60. * | List Size | | |
  61. * +------------+-------------------+--------------------+-----
  62. *
  63. * On-disk:
  64. *
  65. * FEALISTs are stored on disk using blocks allocated by dbAlloc() and
  66. * written directly. An EA list may be in-lined in the inode if there is
  67. * sufficient room available.
  68. */
  69. struct ea_buffer {
  70. int flag; /* Indicates what storage xattr points to */
  71. int max_size; /* largest xattr that fits in current buffer */
  72. dxd_t new_ea; /* dxd to replace ea when modifying xattr */
  73. struct metapage *mp; /* metapage containing ea list */
  74. struct jfs_ea_list *xattr; /* buffer containing ea list */
  75. };
  76. /*
  77. * ea_buffer.flag values
  78. */
  79. #define EA_INLINE 0x0001
  80. #define EA_EXTENT 0x0002
  81. #define EA_NEW 0x0004
  82. #define EA_MALLOC 0x0008
  83. /*
  84. * Mapping of on-disk attribute names: for on-disk attribute names with an
  85. * unknown prefix (not "system.", "user.", "security.", or "trusted."), the
  86. * prefix "os2." is prepended. On the way back to disk, "os2." prefixes are
  87. * stripped and we make sure that the remaining name does not start with one
  88. * of the know prefixes.
  89. */
  90. static int is_known_namespace(const char *name)
  91. {
  92. if (strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN) &&
  93. strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN) &&
  94. strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) &&
  95. strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN))
  96. return false;
  97. return true;
  98. }
  99. static inline int name_size(struct jfs_ea *ea)
  100. {
  101. if (is_known_namespace(ea->name))
  102. return ea->namelen;
  103. else
  104. return ea->namelen + XATTR_OS2_PREFIX_LEN;
  105. }
  106. static inline int copy_name(char *buffer, struct jfs_ea *ea)
  107. {
  108. int len = ea->namelen;
  109. if (!is_known_namespace(ea->name)) {
  110. memcpy(buffer, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN);
  111. buffer += XATTR_OS2_PREFIX_LEN;
  112. len += XATTR_OS2_PREFIX_LEN;
  113. }
  114. memcpy(buffer, ea->name, ea->namelen);
  115. buffer[ea->namelen] = 0;
  116. return len;
  117. }
  118. /* Forward references */
  119. static void ea_release(struct inode *inode, struct ea_buffer *ea_buf);
  120. /*
  121. * NAME: ea_write_inline
  122. *
  123. * FUNCTION: Attempt to write an EA inline if area is available
  124. *
  125. * PRE CONDITIONS:
  126. * Already verified that the specified EA is small enough to fit inline
  127. *
  128. * PARAMETERS:
  129. * ip - Inode pointer
  130. * ealist - EA list pointer
  131. * size - size of ealist in bytes
  132. * ea - dxd_t structure to be filled in with necessary EA information
  133. * if we successfully copy the EA inline
  134. *
  135. * NOTES:
  136. * Checks if the inode's inline area is available. If so, copies EA inline
  137. * and sets <ea> fields appropriately. Otherwise, returns failure, EA will
  138. * have to be put into an extent.
  139. *
  140. * RETURNS: 0 for successful copy to inline area; -1 if area not available
  141. */
  142. static int ea_write_inline(struct inode *ip, struct jfs_ea_list *ealist,
  143. int size, dxd_t * ea)
  144. {
  145. struct jfs_inode_info *ji = JFS_IP(ip);
  146. /*
  147. * Make sure we have an EA -- the NULL EA list is valid, but you
  148. * can't copy it!
  149. */
  150. if (ealist && size > sizeof (struct jfs_ea_list)) {
  151. assert(size <= sizeof (ji->i_inline_ea));
  152. /*
  153. * See if the space is available or if it is already being
  154. * used for an inline EA.
  155. */
  156. if (!(ji->mode2 & INLINEEA) && !(ji->ea.flag & DXD_INLINE))
  157. return -EPERM;
  158. DXDsize(ea, size);
  159. DXDlength(ea, 0);
  160. DXDaddress(ea, 0);
  161. memcpy(ji->i_inline_ea, ealist, size);
  162. ea->flag = DXD_INLINE;
  163. ji->mode2 &= ~INLINEEA;
  164. } else {
  165. ea->flag = 0;
  166. DXDsize(ea, 0);
  167. DXDlength(ea, 0);
  168. DXDaddress(ea, 0);
  169. /* Free up INLINE area */
  170. if (ji->ea.flag & DXD_INLINE)
  171. ji->mode2 |= INLINEEA;
  172. }
  173. return 0;
  174. }
  175. /*
  176. * NAME: ea_write
  177. *
  178. * FUNCTION: Write an EA for an inode
  179. *
  180. * PRE CONDITIONS: EA has been verified
  181. *
  182. * PARAMETERS:
  183. * ip - Inode pointer
  184. * ealist - EA list pointer
  185. * size - size of ealist in bytes
  186. * ea - dxd_t structure to be filled in appropriately with where the
  187. * EA was copied
  188. *
  189. * NOTES: Will write EA inline if able to, otherwise allocates blocks for an
  190. * extent and synchronously writes it to those blocks.
  191. *
  192. * RETURNS: 0 for success; Anything else indicates failure
  193. */
  194. static int ea_write(struct inode *ip, struct jfs_ea_list *ealist, int size,
  195. dxd_t * ea)
  196. {
  197. struct super_block *sb = ip->i_sb;
  198. struct jfs_inode_info *ji = JFS_IP(ip);
  199. struct jfs_sb_info *sbi = JFS_SBI(sb);
  200. int nblocks;
  201. s64 blkno;
  202. int rc = 0, i;
  203. char *cp;
  204. s32 nbytes, nb;
  205. s32 bytes_to_write;
  206. struct metapage *mp;
  207. /*
  208. * Quick check to see if this is an in-linable EA. Short EAs
  209. * and empty EAs are all in-linable, provided the space exists.
  210. */
  211. if (!ealist || size <= sizeof (ji->i_inline_ea)) {
  212. if (!ea_write_inline(ip, ealist, size, ea))
  213. return 0;
  214. }
  215. /* figure out how many blocks we need */
  216. nblocks = (size + (sb->s_blocksize - 1)) >> sb->s_blocksize_bits;
  217. /* Allocate new blocks to quota. */
  218. rc = dquot_alloc_block(ip, nblocks);
  219. if (rc)
  220. return rc;
  221. rc = dbAlloc(ip, INOHINT(ip), nblocks, &blkno);
  222. if (rc) {
  223. /*Rollback quota allocation. */
  224. dquot_free_block(ip, nblocks);
  225. return rc;
  226. }
  227. /*
  228. * Now have nblocks worth of storage to stuff into the FEALIST.
  229. * loop over the FEALIST copying data into the buffer one page at
  230. * a time.
  231. */
  232. cp = (char *) ealist;
  233. nbytes = size;
  234. for (i = 0; i < nblocks; i += sbi->nbperpage) {
  235. /*
  236. * Determine how many bytes for this request, and round up to
  237. * the nearest aggregate block size
  238. */
  239. nb = min(PSIZE, nbytes);
  240. bytes_to_write =
  241. ((((nb + sb->s_blocksize - 1)) >> sb->s_blocksize_bits))
  242. << sb->s_blocksize_bits;
  243. if (!(mp = get_metapage(ip, blkno + i, bytes_to_write, 1))) {
  244. rc = -EIO;
  245. goto failed;
  246. }
  247. memcpy(mp->data, cp, nb);
  248. /*
  249. * We really need a way to propagate errors for
  250. * forced writes like this one. --hch
  251. *
  252. * (__write_metapage => release_metapage => flush_metapage)
  253. */
  254. #ifdef _JFS_FIXME
  255. if ((rc = flush_metapage(mp))) {
  256. /*
  257. * the write failed -- this means that the buffer
  258. * is still assigned and the blocks are not being
  259. * used. this seems like the best error recovery
  260. * we can get ...
  261. */
  262. goto failed;
  263. }
  264. #else
  265. flush_metapage(mp);
  266. #endif
  267. cp += PSIZE;
  268. nbytes -= nb;
  269. }
  270. ea->flag = DXD_EXTENT;
  271. DXDsize(ea, le32_to_cpu(ealist->size));
  272. DXDlength(ea, nblocks);
  273. DXDaddress(ea, blkno);
  274. /* Free up INLINE area */
  275. if (ji->ea.flag & DXD_INLINE)
  276. ji->mode2 |= INLINEEA;
  277. return 0;
  278. failed:
  279. /* Rollback quota allocation. */
  280. dquot_free_block(ip, nblocks);
  281. dbFree(ip, blkno, nblocks);
  282. return rc;
  283. }
  284. /*
  285. * NAME: ea_read_inline
  286. *
  287. * FUNCTION: Read an inlined EA into user's buffer
  288. *
  289. * PARAMETERS:
  290. * ip - Inode pointer
  291. * ealist - Pointer to buffer to fill in with EA
  292. *
  293. * RETURNS: 0
  294. */
  295. static int ea_read_inline(struct inode *ip, struct jfs_ea_list *ealist)
  296. {
  297. struct jfs_inode_info *ji = JFS_IP(ip);
  298. int ea_size = sizeDXD(&ji->ea);
  299. if (ea_size == 0) {
  300. ealist->size = 0;
  301. return 0;
  302. }
  303. /* Sanity Check */
  304. if ((sizeDXD(&ji->ea) > sizeof (ji->i_inline_ea)))
  305. return -EIO;
  306. if (le32_to_cpu(((struct jfs_ea_list *) &ji->i_inline_ea)->size)
  307. != ea_size)
  308. return -EIO;
  309. memcpy(ealist, ji->i_inline_ea, ea_size);
  310. return 0;
  311. }
  312. /*
  313. * NAME: ea_read
  314. *
  315. * FUNCTION: copy EA data into user's buffer
  316. *
  317. * PARAMETERS:
  318. * ip - Inode pointer
  319. * ealist - Pointer to buffer to fill in with EA
  320. *
  321. * NOTES: If EA is inline calls ea_read_inline() to copy EA.
  322. *
  323. * RETURNS: 0 for success; other indicates failure
  324. */
  325. static int ea_read(struct inode *ip, struct jfs_ea_list *ealist)
  326. {
  327. struct super_block *sb = ip->i_sb;
  328. struct jfs_inode_info *ji = JFS_IP(ip);
  329. struct jfs_sb_info *sbi = JFS_SBI(sb);
  330. int nblocks;
  331. s64 blkno;
  332. char *cp = (char *) ealist;
  333. int i;
  334. int nbytes, nb;
  335. s32 bytes_to_read;
  336. struct metapage *mp;
  337. /* quick check for in-line EA */
  338. if (ji->ea.flag & DXD_INLINE)
  339. return ea_read_inline(ip, ealist);
  340. nbytes = sizeDXD(&ji->ea);
  341. if (!nbytes) {
  342. jfs_error(sb, "nbytes is 0\n");
  343. return -EIO;
  344. }
  345. /*
  346. * Figure out how many blocks were allocated when this EA list was
  347. * originally written to disk.
  348. */
  349. nblocks = lengthDXD(&ji->ea) << sbi->l2nbperpage;
  350. blkno = addressDXD(&ji->ea) << sbi->l2nbperpage;
  351. /*
  352. * I have found the disk blocks which were originally used to store
  353. * the FEALIST. now i loop over each contiguous block copying the
  354. * data into the buffer.
  355. */
  356. for (i = 0; i < nblocks; i += sbi->nbperpage) {
  357. /*
  358. * Determine how many bytes for this request, and round up to
  359. * the nearest aggregate block size
  360. */
  361. nb = min(PSIZE, nbytes);
  362. bytes_to_read =
  363. ((((nb + sb->s_blocksize - 1)) >> sb->s_blocksize_bits))
  364. << sb->s_blocksize_bits;
  365. if (!(mp = read_metapage(ip, blkno + i, bytes_to_read, 1)))
  366. return -EIO;
  367. memcpy(cp, mp->data, nb);
  368. release_metapage(mp);
  369. cp += PSIZE;
  370. nbytes -= nb;
  371. }
  372. return 0;
  373. }
  374. /*
  375. * NAME: ea_get
  376. *
  377. * FUNCTION: Returns buffer containing existing extended attributes.
  378. * The size of the buffer will be the larger of the existing
  379. * attributes size, or min_size.
  380. *
  381. * The buffer, which may be inlined in the inode or in the
  382. * page cache must be release by calling ea_release or ea_put
  383. *
  384. * PARAMETERS:
  385. * inode - Inode pointer
  386. * ea_buf - Structure to be populated with ealist and its metadata
  387. * min_size- minimum size of buffer to be returned
  388. *
  389. * RETURNS: 0 for success; Other indicates failure
  390. */
  391. static int ea_get(struct inode *inode, struct ea_buffer *ea_buf, int min_size)
  392. {
  393. struct jfs_inode_info *ji = JFS_IP(inode);
  394. struct super_block *sb = inode->i_sb;
  395. int size;
  396. int ea_size = sizeDXD(&ji->ea);
  397. int blocks_needed, current_blocks;
  398. s64 blkno;
  399. int rc;
  400. int quota_allocation = 0;
  401. /* When fsck.jfs clears a bad ea, it doesn't clear the size */
  402. if (ji->ea.flag == 0)
  403. ea_size = 0;
  404. if (ea_size == 0) {
  405. if (min_size == 0) {
  406. ea_buf->flag = 0;
  407. ea_buf->max_size = 0;
  408. ea_buf->xattr = NULL;
  409. return 0;
  410. }
  411. if ((min_size <= sizeof (ji->i_inline_ea)) &&
  412. (ji->mode2 & INLINEEA)) {
  413. ea_buf->flag = EA_INLINE | EA_NEW;
  414. ea_buf->max_size = sizeof (ji->i_inline_ea);
  415. ea_buf->xattr = (struct jfs_ea_list *) ji->i_inline_ea;
  416. DXDlength(&ea_buf->new_ea, 0);
  417. DXDaddress(&ea_buf->new_ea, 0);
  418. ea_buf->new_ea.flag = DXD_INLINE;
  419. DXDsize(&ea_buf->new_ea, min_size);
  420. return 0;
  421. }
  422. current_blocks = 0;
  423. } else if (ji->ea.flag & DXD_INLINE) {
  424. if (min_size <= sizeof (ji->i_inline_ea)) {
  425. ea_buf->flag = EA_INLINE;
  426. ea_buf->max_size = sizeof (ji->i_inline_ea);
  427. ea_buf->xattr = (struct jfs_ea_list *) ji->i_inline_ea;
  428. goto size_check;
  429. }
  430. current_blocks = 0;
  431. } else {
  432. if (!(ji->ea.flag & DXD_EXTENT)) {
  433. jfs_error(sb, "invalid ea.flag\n");
  434. return -EIO;
  435. }
  436. current_blocks = (ea_size + sb->s_blocksize - 1) >>
  437. sb->s_blocksize_bits;
  438. }
  439. size = max(min_size, ea_size);
  440. if (size > PSIZE) {
  441. /*
  442. * To keep the rest of the code simple. Allocate a
  443. * contiguous buffer to work with
  444. */
  445. ea_buf->xattr = kmalloc(size, GFP_KERNEL);
  446. if (ea_buf->xattr == NULL)
  447. return -ENOMEM;
  448. ea_buf->flag = EA_MALLOC;
  449. ea_buf->max_size = (size + sb->s_blocksize - 1) &
  450. ~(sb->s_blocksize - 1);
  451. if (ea_size == 0)
  452. return 0;
  453. if ((rc = ea_read(inode, ea_buf->xattr))) {
  454. kfree(ea_buf->xattr);
  455. ea_buf->xattr = NULL;
  456. return rc;
  457. }
  458. goto size_check;
  459. }
  460. blocks_needed = (min_size + sb->s_blocksize - 1) >>
  461. sb->s_blocksize_bits;
  462. if (blocks_needed > current_blocks) {
  463. /* Allocate new blocks to quota. */
  464. rc = dquot_alloc_block(inode, blocks_needed);
  465. if (rc)
  466. return -EDQUOT;
  467. quota_allocation = blocks_needed;
  468. rc = dbAlloc(inode, INOHINT(inode), (s64) blocks_needed,
  469. &blkno);
  470. if (rc)
  471. goto clean_up;
  472. DXDlength(&ea_buf->new_ea, blocks_needed);
  473. DXDaddress(&ea_buf->new_ea, blkno);
  474. ea_buf->new_ea.flag = DXD_EXTENT;
  475. DXDsize(&ea_buf->new_ea, min_size);
  476. ea_buf->flag = EA_EXTENT | EA_NEW;
  477. ea_buf->mp = get_metapage(inode, blkno,
  478. blocks_needed << sb->s_blocksize_bits,
  479. 1);
  480. if (ea_buf->mp == NULL) {
  481. dbFree(inode, blkno, (s64) blocks_needed);
  482. rc = -EIO;
  483. goto clean_up;
  484. }
  485. ea_buf->xattr = ea_buf->mp->data;
  486. ea_buf->max_size = (min_size + sb->s_blocksize - 1) &
  487. ~(sb->s_blocksize - 1);
  488. if (ea_size == 0)
  489. return 0;
  490. if ((rc = ea_read(inode, ea_buf->xattr))) {
  491. discard_metapage(ea_buf->mp);
  492. dbFree(inode, blkno, (s64) blocks_needed);
  493. goto clean_up;
  494. }
  495. goto size_check;
  496. }
  497. ea_buf->flag = EA_EXTENT;
  498. ea_buf->mp = read_metapage(inode, addressDXD(&ji->ea),
  499. lengthDXD(&ji->ea) << sb->s_blocksize_bits,
  500. 1);
  501. if (ea_buf->mp == NULL) {
  502. rc = -EIO;
  503. goto clean_up;
  504. }
  505. ea_buf->xattr = ea_buf->mp->data;
  506. ea_buf->max_size = (ea_size + sb->s_blocksize - 1) &
  507. ~(sb->s_blocksize - 1);
  508. size_check:
  509. if (EALIST_SIZE(ea_buf->xattr) != ea_size) {
  510. printk(KERN_ERR "ea_get: invalid extended attribute\n");
  511. print_hex_dump(KERN_ERR, "", DUMP_PREFIX_ADDRESS, 16, 1,
  512. ea_buf->xattr, ea_size, 1);
  513. ea_release(inode, ea_buf);
  514. rc = -EIO;
  515. goto clean_up;
  516. }
  517. return ea_size;
  518. clean_up:
  519. /* Rollback quota allocation */
  520. if (quota_allocation)
  521. dquot_free_block(inode, quota_allocation);
  522. return (rc);
  523. }
  524. static void ea_release(struct inode *inode, struct ea_buffer *ea_buf)
  525. {
  526. if (ea_buf->flag & EA_MALLOC)
  527. kfree(ea_buf->xattr);
  528. else if (ea_buf->flag & EA_EXTENT) {
  529. assert(ea_buf->mp);
  530. release_metapage(ea_buf->mp);
  531. if (ea_buf->flag & EA_NEW)
  532. dbFree(inode, addressDXD(&ea_buf->new_ea),
  533. lengthDXD(&ea_buf->new_ea));
  534. }
  535. }
  536. static int ea_put(tid_t tid, struct inode *inode, struct ea_buffer *ea_buf,
  537. int new_size)
  538. {
  539. struct jfs_inode_info *ji = JFS_IP(inode);
  540. unsigned long old_blocks, new_blocks;
  541. int rc = 0;
  542. if (new_size == 0) {
  543. ea_release(inode, ea_buf);
  544. ea_buf = NULL;
  545. } else if (ea_buf->flag & EA_INLINE) {
  546. assert(new_size <= sizeof (ji->i_inline_ea));
  547. ji->mode2 &= ~INLINEEA;
  548. ea_buf->new_ea.flag = DXD_INLINE;
  549. DXDsize(&ea_buf->new_ea, new_size);
  550. DXDaddress(&ea_buf->new_ea, 0);
  551. DXDlength(&ea_buf->new_ea, 0);
  552. } else if (ea_buf->flag & EA_MALLOC) {
  553. rc = ea_write(inode, ea_buf->xattr, new_size, &ea_buf->new_ea);
  554. kfree(ea_buf->xattr);
  555. } else if (ea_buf->flag & EA_NEW) {
  556. /* We have already allocated a new dxd */
  557. flush_metapage(ea_buf->mp);
  558. } else {
  559. /* ->xattr must point to original ea's metapage */
  560. rc = ea_write(inode, ea_buf->xattr, new_size, &ea_buf->new_ea);
  561. discard_metapage(ea_buf->mp);
  562. }
  563. if (rc)
  564. return rc;
  565. old_blocks = new_blocks = 0;
  566. if (ji->ea.flag & DXD_EXTENT) {
  567. invalidate_dxd_metapages(inode, ji->ea);
  568. old_blocks = lengthDXD(&ji->ea);
  569. }
  570. if (ea_buf) {
  571. txEA(tid, inode, &ji->ea, &ea_buf->new_ea);
  572. if (ea_buf->new_ea.flag & DXD_EXTENT) {
  573. new_blocks = lengthDXD(&ea_buf->new_ea);
  574. if (ji->ea.flag & DXD_INLINE)
  575. ji->mode2 |= INLINEEA;
  576. }
  577. ji->ea = ea_buf->new_ea;
  578. } else {
  579. txEA(tid, inode, &ji->ea, NULL);
  580. if (ji->ea.flag & DXD_INLINE)
  581. ji->mode2 |= INLINEEA;
  582. ji->ea.flag = 0;
  583. ji->ea.size = 0;
  584. }
  585. /* If old blocks exist, they must be removed from quota allocation. */
  586. if (old_blocks)
  587. dquot_free_block(inode, old_blocks);
  588. inode->i_ctime = current_time(inode);
  589. return 0;
  590. }
  591. int __jfs_setxattr(tid_t tid, struct inode *inode, const char *name,
  592. const void *value, size_t value_len, int flags)
  593. {
  594. struct jfs_ea_list *ealist;
  595. struct jfs_ea *ea, *old_ea = NULL, *next_ea = NULL;
  596. struct ea_buffer ea_buf;
  597. int old_ea_size = 0;
  598. int xattr_size;
  599. int new_size;
  600. int namelen = strlen(name);
  601. int found = 0;
  602. int rc;
  603. int length;
  604. down_write(&JFS_IP(inode)->xattr_sem);
  605. xattr_size = ea_get(inode, &ea_buf, 0);
  606. if (xattr_size < 0) {
  607. rc = xattr_size;
  608. goto out;
  609. }
  610. again:
  611. ealist = (struct jfs_ea_list *) ea_buf.xattr;
  612. new_size = sizeof (struct jfs_ea_list);
  613. if (xattr_size) {
  614. for (ea = FIRST_EA(ealist); ea < END_EALIST(ealist);
  615. ea = NEXT_EA(ea)) {
  616. if ((namelen == ea->namelen) &&
  617. (memcmp(name, ea->name, namelen) == 0)) {
  618. found = 1;
  619. if (flags & XATTR_CREATE) {
  620. rc = -EEXIST;
  621. goto release;
  622. }
  623. old_ea = ea;
  624. old_ea_size = EA_SIZE(ea);
  625. next_ea = NEXT_EA(ea);
  626. } else
  627. new_size += EA_SIZE(ea);
  628. }
  629. }
  630. if (!found) {
  631. if (flags & XATTR_REPLACE) {
  632. rc = -ENODATA;
  633. goto release;
  634. }
  635. if (value == NULL) {
  636. rc = 0;
  637. goto release;
  638. }
  639. }
  640. if (value)
  641. new_size += sizeof (struct jfs_ea) + namelen + 1 + value_len;
  642. if (new_size > ea_buf.max_size) {
  643. /*
  644. * We need to allocate more space for merged ea list.
  645. * We should only have loop to again: once.
  646. */
  647. ea_release(inode, &ea_buf);
  648. xattr_size = ea_get(inode, &ea_buf, new_size);
  649. if (xattr_size < 0) {
  650. rc = xattr_size;
  651. goto out;
  652. }
  653. goto again;
  654. }
  655. /* Remove old ea of the same name */
  656. if (found) {
  657. /* number of bytes following target EA */
  658. length = (char *) END_EALIST(ealist) - (char *) next_ea;
  659. if (length > 0)
  660. memmove(old_ea, next_ea, length);
  661. xattr_size -= old_ea_size;
  662. }
  663. /* Add new entry to the end */
  664. if (value) {
  665. if (xattr_size == 0)
  666. /* Completely new ea list */
  667. xattr_size = sizeof (struct jfs_ea_list);
  668. /*
  669. * The size of EA value is limitted by on-disk format up to
  670. * __le16, there would be an overflow if the size is equal
  671. * to XATTR_SIZE_MAX (65536). In order to avoid this issue,
  672. * we can pre-checkup the value size against USHRT_MAX, and
  673. * return -E2BIG in this case, which is consistent with the
  674. * VFS setxattr interface.
  675. */
  676. if (value_len >= USHRT_MAX) {
  677. rc = -E2BIG;
  678. goto release;
  679. }
  680. ea = (struct jfs_ea *) ((char *) ealist + xattr_size);
  681. ea->flag = 0;
  682. ea->namelen = namelen;
  683. ea->valuelen = (cpu_to_le16(value_len));
  684. memcpy(ea->name, name, namelen);
  685. ea->name[namelen] = 0;
  686. if (value_len)
  687. memcpy(&ea->name[namelen + 1], value, value_len);
  688. xattr_size += EA_SIZE(ea);
  689. }
  690. /* DEBUG - If we did this right, these number match */
  691. if (xattr_size != new_size) {
  692. printk(KERN_ERR
  693. "__jfs_setxattr: xattr_size = %d, new_size = %d\n",
  694. xattr_size, new_size);
  695. rc = -EINVAL;
  696. goto release;
  697. }
  698. /*
  699. * If we're left with an empty list, there's no ea
  700. */
  701. if (new_size == sizeof (struct jfs_ea_list))
  702. new_size = 0;
  703. ealist->size = cpu_to_le32(new_size);
  704. rc = ea_put(tid, inode, &ea_buf, new_size);
  705. goto out;
  706. release:
  707. ea_release(inode, &ea_buf);
  708. out:
  709. up_write(&JFS_IP(inode)->xattr_sem);
  710. return rc;
  711. }
  712. ssize_t __jfs_getxattr(struct inode *inode, const char *name, void *data,
  713. size_t buf_size)
  714. {
  715. struct jfs_ea_list *ealist;
  716. struct jfs_ea *ea;
  717. struct ea_buffer ea_buf;
  718. int xattr_size;
  719. ssize_t size;
  720. int namelen = strlen(name);
  721. char *value;
  722. down_read(&JFS_IP(inode)->xattr_sem);
  723. xattr_size = ea_get(inode, &ea_buf, 0);
  724. if (xattr_size < 0) {
  725. size = xattr_size;
  726. goto out;
  727. }
  728. if (xattr_size == 0)
  729. goto not_found;
  730. ealist = (struct jfs_ea_list *) ea_buf.xattr;
  731. /* Find the named attribute */
  732. for (ea = FIRST_EA(ealist); ea < END_EALIST(ealist); ea = NEXT_EA(ea))
  733. if ((namelen == ea->namelen) &&
  734. memcmp(name, ea->name, namelen) == 0) {
  735. /* Found it */
  736. size = le16_to_cpu(ea->valuelen);
  737. if (!data)
  738. goto release;
  739. else if (size > buf_size) {
  740. size = -ERANGE;
  741. goto release;
  742. }
  743. value = ((char *) &ea->name) + ea->namelen + 1;
  744. memcpy(data, value, size);
  745. goto release;
  746. }
  747. not_found:
  748. size = -ENODATA;
  749. release:
  750. ea_release(inode, &ea_buf);
  751. out:
  752. up_read(&JFS_IP(inode)->xattr_sem);
  753. return size;
  754. }
  755. /*
  756. * No special permissions are needed to list attributes except for trusted.*
  757. */
  758. static inline int can_list(struct jfs_ea *ea)
  759. {
  760. return (strncmp(ea->name, XATTR_TRUSTED_PREFIX,
  761. XATTR_TRUSTED_PREFIX_LEN) ||
  762. capable(CAP_SYS_ADMIN));
  763. }
  764. ssize_t jfs_listxattr(struct dentry * dentry, char *data, size_t buf_size)
  765. {
  766. struct inode *inode = d_inode(dentry);
  767. char *buffer;
  768. ssize_t size = 0;
  769. int xattr_size;
  770. struct jfs_ea_list *ealist;
  771. struct jfs_ea *ea;
  772. struct ea_buffer ea_buf;
  773. down_read(&JFS_IP(inode)->xattr_sem);
  774. xattr_size = ea_get(inode, &ea_buf, 0);
  775. if (xattr_size < 0) {
  776. size = xattr_size;
  777. goto out;
  778. }
  779. if (xattr_size == 0)
  780. goto release;
  781. ealist = (struct jfs_ea_list *) ea_buf.xattr;
  782. /* compute required size of list */
  783. for (ea = FIRST_EA(ealist); ea < END_EALIST(ealist); ea = NEXT_EA(ea)) {
  784. if (can_list(ea))
  785. size += name_size(ea) + 1;
  786. }
  787. if (!data)
  788. goto release;
  789. if (size > buf_size) {
  790. size = -ERANGE;
  791. goto release;
  792. }
  793. /* Copy attribute names to buffer */
  794. buffer = data;
  795. for (ea = FIRST_EA(ealist); ea < END_EALIST(ealist); ea = NEXT_EA(ea)) {
  796. if (can_list(ea)) {
  797. int namelen = copy_name(buffer, ea);
  798. buffer += namelen + 1;
  799. }
  800. }
  801. release:
  802. ea_release(inode, &ea_buf);
  803. out:
  804. up_read(&JFS_IP(inode)->xattr_sem);
  805. return size;
  806. }
  807. static int __jfs_xattr_set(struct inode *inode, const char *name,
  808. const void *value, size_t size, int flags)
  809. {
  810. struct jfs_inode_info *ji = JFS_IP(inode);
  811. tid_t tid;
  812. int rc;
  813. tid = txBegin(inode->i_sb, 0);
  814. mutex_lock(&ji->commit_mutex);
  815. rc = __jfs_setxattr(tid, inode, name, value, size, flags);
  816. if (!rc)
  817. rc = txCommit(tid, 1, &inode, 0);
  818. txEnd(tid);
  819. mutex_unlock(&ji->commit_mutex);
  820. return rc;
  821. }
  822. static int jfs_xattr_get(const struct xattr_handler *handler,
  823. struct dentry *unused, struct inode *inode,
  824. const char *name, void *value, size_t size)
  825. {
  826. name = xattr_full_name(handler, name);
  827. return __jfs_getxattr(inode, name, value, size);
  828. }
  829. static int jfs_xattr_set(const struct xattr_handler *handler,
  830. struct dentry *unused, struct inode *inode,
  831. const char *name, const void *value,
  832. size_t size, int flags)
  833. {
  834. name = xattr_full_name(handler, name);
  835. return __jfs_xattr_set(inode, name, value, size, flags);
  836. }
  837. static int jfs_xattr_get_os2(const struct xattr_handler *handler,
  838. struct dentry *unused, struct inode *inode,
  839. const char *name, void *value, size_t size)
  840. {
  841. if (is_known_namespace(name))
  842. return -EOPNOTSUPP;
  843. return __jfs_getxattr(inode, name, value, size);
  844. }
  845. static int jfs_xattr_set_os2(const struct xattr_handler *handler,
  846. struct dentry *unused, struct inode *inode,
  847. const char *name, const void *value,
  848. size_t size, int flags)
  849. {
  850. if (is_known_namespace(name))
  851. return -EOPNOTSUPP;
  852. return __jfs_xattr_set(inode, name, value, size, flags);
  853. }
  854. static const struct xattr_handler jfs_user_xattr_handler = {
  855. .prefix = XATTR_USER_PREFIX,
  856. .get = jfs_xattr_get,
  857. .set = jfs_xattr_set,
  858. };
  859. static const struct xattr_handler jfs_os2_xattr_handler = {
  860. .prefix = XATTR_OS2_PREFIX,
  861. .get = jfs_xattr_get_os2,
  862. .set = jfs_xattr_set_os2,
  863. };
  864. static const struct xattr_handler jfs_security_xattr_handler = {
  865. .prefix = XATTR_SECURITY_PREFIX,
  866. .get = jfs_xattr_get,
  867. .set = jfs_xattr_set,
  868. };
  869. static const struct xattr_handler jfs_trusted_xattr_handler = {
  870. .prefix = XATTR_TRUSTED_PREFIX,
  871. .get = jfs_xattr_get,
  872. .set = jfs_xattr_set,
  873. };
  874. const struct xattr_handler *jfs_xattr_handlers[] = {
  875. #ifdef CONFIG_JFS_POSIX_ACL
  876. &posix_acl_access_xattr_handler,
  877. &posix_acl_default_xattr_handler,
  878. #endif
  879. &jfs_os2_xattr_handler,
  880. &jfs_user_xattr_handler,
  881. &jfs_security_xattr_handler,
  882. &jfs_trusted_xattr_handler,
  883. NULL,
  884. };
  885. #ifdef CONFIG_JFS_SECURITY
  886. static int jfs_initxattrs(struct inode *inode, const struct xattr *xattr_array,
  887. void *fs_info)
  888. {
  889. const struct xattr *xattr;
  890. tid_t *tid = fs_info;
  891. char *name;
  892. int err = 0;
  893. for (xattr = xattr_array; xattr->name != NULL; xattr++) {
  894. name = kmalloc(XATTR_SECURITY_PREFIX_LEN +
  895. strlen(xattr->name) + 1, GFP_NOFS);
  896. if (!name) {
  897. err = -ENOMEM;
  898. break;
  899. }
  900. strcpy(name, XATTR_SECURITY_PREFIX);
  901. strcpy(name + XATTR_SECURITY_PREFIX_LEN, xattr->name);
  902. err = __jfs_setxattr(*tid, inode, name,
  903. xattr->value, xattr->value_len, 0);
  904. kfree(name);
  905. if (err < 0)
  906. break;
  907. }
  908. return err;
  909. }
  910. int jfs_init_security(tid_t tid, struct inode *inode, struct inode *dir,
  911. const struct qstr *qstr)
  912. {
  913. return security_inode_init_security(inode, dir, qstr,
  914. &jfs_initxattrs, &tid);
  915. }
  916. #endif