inode.c 36 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508
  1. /*
  2. * Copyright (C) 2005, 2006
  3. * Avishay Traeger (avishay@gmail.com)
  4. * Copyright (C) 2008, 2009
  5. * Boaz Harrosh <bharrosh@panasas.com>
  6. *
  7. * Copyrights for code taken from ext2:
  8. * Copyright (C) 1992, 1993, 1994, 1995
  9. * Remy Card (card@masi.ibp.fr)
  10. * Laboratoire MASI - Institut Blaise Pascal
  11. * Universite Pierre et Marie Curie (Paris VI)
  12. * from
  13. * linux/fs/minix/inode.c
  14. * Copyright (C) 1991, 1992 Linus Torvalds
  15. *
  16. * This file is part of exofs.
  17. *
  18. * exofs is free software; you can redistribute it and/or modify
  19. * it under the terms of the GNU General Public License as published by
  20. * the Free Software Foundation. Since it is based on ext2, and the only
  21. * valid version of GPL for the Linux kernel is version 2, the only valid
  22. * version of GPL for exofs is version 2.
  23. *
  24. * exofs is distributed in the hope that it will be useful,
  25. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  26. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  27. * GNU General Public License for more details.
  28. *
  29. * You should have received a copy of the GNU General Public License
  30. * along with exofs; if not, write to the Free Software
  31. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  32. */
  33. #include <linux/slab.h>
  34. #include "exofs.h"
  35. #define EXOFS_DBGMSG2(M...) do {} while (0)
  36. enum {MAX_PAGES_KMALLOC = PAGE_SIZE / sizeof(struct page *), };
  37. unsigned exofs_max_io_pages(struct ore_layout *layout,
  38. unsigned expected_pages)
  39. {
  40. unsigned pages = min_t(unsigned, expected_pages, MAX_PAGES_KMALLOC);
  41. /* TODO: easily support bio chaining */
  42. pages = min_t(unsigned, pages, layout->max_io_length / PAGE_SIZE);
  43. return pages;
  44. }
  45. struct page_collect {
  46. struct exofs_sb_info *sbi;
  47. struct inode *inode;
  48. unsigned expected_pages;
  49. struct ore_io_state *ios;
  50. struct page **pages;
  51. unsigned alloc_pages;
  52. unsigned nr_pages;
  53. unsigned long length;
  54. loff_t pg_first; /* keep 64bit also in 32-arches */
  55. bool read_4_write; /* This means two things: that the read is sync
  56. * And the pages should not be unlocked.
  57. */
  58. struct page *that_locked_page;
  59. };
  60. static void _pcol_init(struct page_collect *pcol, unsigned expected_pages,
  61. struct inode *inode)
  62. {
  63. struct exofs_sb_info *sbi = inode->i_sb->s_fs_info;
  64. pcol->sbi = sbi;
  65. pcol->inode = inode;
  66. pcol->expected_pages = expected_pages;
  67. pcol->ios = NULL;
  68. pcol->pages = NULL;
  69. pcol->alloc_pages = 0;
  70. pcol->nr_pages = 0;
  71. pcol->length = 0;
  72. pcol->pg_first = -1;
  73. pcol->read_4_write = false;
  74. pcol->that_locked_page = NULL;
  75. }
  76. static void _pcol_reset(struct page_collect *pcol)
  77. {
  78. pcol->expected_pages -= min(pcol->nr_pages, pcol->expected_pages);
  79. pcol->pages = NULL;
  80. pcol->alloc_pages = 0;
  81. pcol->nr_pages = 0;
  82. pcol->length = 0;
  83. pcol->pg_first = -1;
  84. pcol->ios = NULL;
  85. pcol->that_locked_page = NULL;
  86. /* this is probably the end of the loop but in writes
  87. * it might not end here. don't be left with nothing
  88. */
  89. if (!pcol->expected_pages)
  90. pcol->expected_pages = MAX_PAGES_KMALLOC;
  91. }
  92. static int pcol_try_alloc(struct page_collect *pcol)
  93. {
  94. unsigned pages;
  95. /* TODO: easily support bio chaining */
  96. pages = exofs_max_io_pages(&pcol->sbi->layout, pcol->expected_pages);
  97. for (; pages; pages >>= 1) {
  98. pcol->pages = kmalloc(pages * sizeof(struct page *),
  99. GFP_KERNEL);
  100. if (likely(pcol->pages)) {
  101. pcol->alloc_pages = pages;
  102. return 0;
  103. }
  104. }
  105. EXOFS_ERR("Failed to kmalloc expected_pages=%u\n",
  106. pcol->expected_pages);
  107. return -ENOMEM;
  108. }
  109. static void pcol_free(struct page_collect *pcol)
  110. {
  111. kfree(pcol->pages);
  112. pcol->pages = NULL;
  113. if (pcol->ios) {
  114. ore_put_io_state(pcol->ios);
  115. pcol->ios = NULL;
  116. }
  117. }
  118. static int pcol_add_page(struct page_collect *pcol, struct page *page,
  119. unsigned len)
  120. {
  121. if (unlikely(pcol->nr_pages >= pcol->alloc_pages))
  122. return -ENOMEM;
  123. pcol->pages[pcol->nr_pages++] = page;
  124. pcol->length += len;
  125. return 0;
  126. }
  127. enum {PAGE_WAS_NOT_IN_IO = 17};
  128. static int update_read_page(struct page *page, int ret)
  129. {
  130. switch (ret) {
  131. case 0:
  132. /* Everything is OK */
  133. SetPageUptodate(page);
  134. if (PageError(page))
  135. ClearPageError(page);
  136. break;
  137. case -EFAULT:
  138. /* In this case we were trying to read something that wasn't on
  139. * disk yet - return a page full of zeroes. This should be OK,
  140. * because the object should be empty (if there was a write
  141. * before this read, the read would be waiting with the page
  142. * locked */
  143. clear_highpage(page);
  144. SetPageUptodate(page);
  145. if (PageError(page))
  146. ClearPageError(page);
  147. EXOFS_DBGMSG("recovered read error\n");
  148. /* fall through */
  149. case PAGE_WAS_NOT_IN_IO:
  150. ret = 0; /* recovered error */
  151. break;
  152. default:
  153. SetPageError(page);
  154. }
  155. return ret;
  156. }
  157. static void update_write_page(struct page *page, int ret)
  158. {
  159. if (unlikely(ret == PAGE_WAS_NOT_IN_IO))
  160. return; /* don't pass start don't collect $200 */
  161. if (ret) {
  162. mapping_set_error(page->mapping, ret);
  163. SetPageError(page);
  164. }
  165. end_page_writeback(page);
  166. }
  167. /* Called at the end of reads, to optionally unlock pages and update their
  168. * status.
  169. */
  170. static int __readpages_done(struct page_collect *pcol)
  171. {
  172. int i;
  173. u64 good_bytes;
  174. u64 length = 0;
  175. int ret = ore_check_io(pcol->ios, NULL);
  176. if (likely(!ret)) {
  177. good_bytes = pcol->length;
  178. ret = PAGE_WAS_NOT_IN_IO;
  179. } else {
  180. good_bytes = 0;
  181. }
  182. EXOFS_DBGMSG2("readpages_done(0x%lx) good_bytes=0x%llx"
  183. " length=0x%lx nr_pages=%u\n",
  184. pcol->inode->i_ino, _LLU(good_bytes), pcol->length,
  185. pcol->nr_pages);
  186. for (i = 0; i < pcol->nr_pages; i++) {
  187. struct page *page = pcol->pages[i];
  188. struct inode *inode = page->mapping->host;
  189. int page_stat;
  190. if (inode != pcol->inode)
  191. continue; /* osd might add more pages at end */
  192. if (likely(length < good_bytes))
  193. page_stat = 0;
  194. else
  195. page_stat = ret;
  196. EXOFS_DBGMSG2(" readpages_done(0x%lx, 0x%lx) %s\n",
  197. inode->i_ino, page->index,
  198. page_stat ? "bad_bytes" : "good_bytes");
  199. ret = update_read_page(page, page_stat);
  200. if (!pcol->read_4_write)
  201. unlock_page(page);
  202. length += PAGE_SIZE;
  203. }
  204. pcol_free(pcol);
  205. EXOFS_DBGMSG2("readpages_done END\n");
  206. return ret;
  207. }
  208. /* callback of async reads */
  209. static void readpages_done(struct ore_io_state *ios, void *p)
  210. {
  211. struct page_collect *pcol = p;
  212. __readpages_done(pcol);
  213. atomic_dec(&pcol->sbi->s_curr_pending);
  214. kfree(pcol);
  215. }
  216. static void _unlock_pcol_pages(struct page_collect *pcol, int ret, int rw)
  217. {
  218. int i;
  219. for (i = 0; i < pcol->nr_pages; i++) {
  220. struct page *page = pcol->pages[i];
  221. if (rw == READ)
  222. update_read_page(page, ret);
  223. else
  224. update_write_page(page, ret);
  225. unlock_page(page);
  226. }
  227. }
  228. static int _maybe_not_all_in_one_io(struct ore_io_state *ios,
  229. struct page_collect *pcol_src, struct page_collect *pcol)
  230. {
  231. /* length was wrong or offset was not page aligned */
  232. BUG_ON(pcol_src->nr_pages < ios->nr_pages);
  233. if (pcol_src->nr_pages > ios->nr_pages) {
  234. struct page **src_page;
  235. unsigned pages_less = pcol_src->nr_pages - ios->nr_pages;
  236. unsigned long len_less = pcol_src->length - ios->length;
  237. unsigned i;
  238. int ret;
  239. /* This IO was trimmed */
  240. pcol_src->nr_pages = ios->nr_pages;
  241. pcol_src->length = ios->length;
  242. /* Left over pages are passed to the next io */
  243. pcol->expected_pages += pages_less;
  244. pcol->nr_pages = pages_less;
  245. pcol->length = len_less;
  246. src_page = pcol_src->pages + pcol_src->nr_pages;
  247. pcol->pg_first = (*src_page)->index;
  248. ret = pcol_try_alloc(pcol);
  249. if (unlikely(ret))
  250. return ret;
  251. for (i = 0; i < pages_less; ++i)
  252. pcol->pages[i] = *src_page++;
  253. EXOFS_DBGMSG("Length was adjusted nr_pages=0x%x "
  254. "pages_less=0x%x expected_pages=0x%x "
  255. "next_offset=0x%llx next_len=0x%lx\n",
  256. pcol_src->nr_pages, pages_less, pcol->expected_pages,
  257. pcol->pg_first * PAGE_SIZE, pcol->length);
  258. }
  259. return 0;
  260. }
  261. static int read_exec(struct page_collect *pcol)
  262. {
  263. struct exofs_i_info *oi = exofs_i(pcol->inode);
  264. struct ore_io_state *ios;
  265. struct page_collect *pcol_copy = NULL;
  266. int ret;
  267. if (!pcol->pages)
  268. return 0;
  269. if (!pcol->ios) {
  270. int ret = ore_get_rw_state(&pcol->sbi->layout, &oi->oc, true,
  271. pcol->pg_first << PAGE_CACHE_SHIFT,
  272. pcol->length, &pcol->ios);
  273. if (ret)
  274. return ret;
  275. }
  276. ios = pcol->ios;
  277. ios->pages = pcol->pages;
  278. if (pcol->read_4_write) {
  279. ore_read(pcol->ios);
  280. return __readpages_done(pcol);
  281. }
  282. pcol_copy = kmalloc(sizeof(*pcol_copy), GFP_KERNEL);
  283. if (!pcol_copy) {
  284. ret = -ENOMEM;
  285. goto err;
  286. }
  287. *pcol_copy = *pcol;
  288. ios->done = readpages_done;
  289. ios->private = pcol_copy;
  290. /* pages ownership was passed to pcol_copy */
  291. _pcol_reset(pcol);
  292. ret = _maybe_not_all_in_one_io(ios, pcol_copy, pcol);
  293. if (unlikely(ret))
  294. goto err;
  295. EXOFS_DBGMSG2("read_exec(0x%lx) offset=0x%llx length=0x%llx\n",
  296. pcol->inode->i_ino, _LLU(ios->offset), _LLU(ios->length));
  297. ret = ore_read(ios);
  298. if (unlikely(ret))
  299. goto err;
  300. atomic_inc(&pcol->sbi->s_curr_pending);
  301. return 0;
  302. err:
  303. if (!pcol->read_4_write)
  304. _unlock_pcol_pages(pcol, ret, READ);
  305. pcol_free(pcol);
  306. kfree(pcol_copy);
  307. return ret;
  308. }
  309. /* readpage_strip is called either directly from readpage() or by the VFS from
  310. * within read_cache_pages(), to add one more page to be read. It will try to
  311. * collect as many contiguous pages as posible. If a discontinuity is
  312. * encountered, or it runs out of resources, it will submit the previous segment
  313. * and will start a new collection. Eventually caller must submit the last
  314. * segment if present.
  315. */
  316. static int readpage_strip(void *data, struct page *page)
  317. {
  318. struct page_collect *pcol = data;
  319. struct inode *inode = pcol->inode;
  320. struct exofs_i_info *oi = exofs_i(inode);
  321. loff_t i_size = i_size_read(inode);
  322. pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
  323. size_t len;
  324. int ret;
  325. /* FIXME: Just for debugging, will be removed */
  326. if (PageUptodate(page))
  327. EXOFS_ERR("PageUptodate(0x%lx, 0x%lx)\n", pcol->inode->i_ino,
  328. page->index);
  329. pcol->that_locked_page = page;
  330. if (page->index < end_index)
  331. len = PAGE_CACHE_SIZE;
  332. else if (page->index == end_index)
  333. len = i_size & ~PAGE_CACHE_MASK;
  334. else
  335. len = 0;
  336. if (!len || !obj_created(oi)) {
  337. /* this will be out of bounds, or doesn't exist yet.
  338. * Current page is cleared and the request is split
  339. */
  340. clear_highpage(page);
  341. SetPageUptodate(page);
  342. if (PageError(page))
  343. ClearPageError(page);
  344. if (!pcol->read_4_write)
  345. unlock_page(page);
  346. EXOFS_DBGMSG("readpage_strip(0x%lx) empty page len=%zx "
  347. "read_4_write=%d index=0x%lx end_index=0x%lx "
  348. "splitting\n", inode->i_ino, len,
  349. pcol->read_4_write, page->index, end_index);
  350. return read_exec(pcol);
  351. }
  352. try_again:
  353. if (unlikely(pcol->pg_first == -1)) {
  354. pcol->pg_first = page->index;
  355. } else if (unlikely((pcol->pg_first + pcol->nr_pages) !=
  356. page->index)) {
  357. /* Discontinuity detected, split the request */
  358. ret = read_exec(pcol);
  359. if (unlikely(ret))
  360. goto fail;
  361. goto try_again;
  362. }
  363. if (!pcol->pages) {
  364. ret = pcol_try_alloc(pcol);
  365. if (unlikely(ret))
  366. goto fail;
  367. }
  368. if (len != PAGE_CACHE_SIZE)
  369. zero_user(page, len, PAGE_CACHE_SIZE - len);
  370. EXOFS_DBGMSG2(" readpage_strip(0x%lx, 0x%lx) len=0x%zx\n",
  371. inode->i_ino, page->index, len);
  372. ret = pcol_add_page(pcol, page, len);
  373. if (ret) {
  374. EXOFS_DBGMSG2("Failed pcol_add_page pages[i]=%p "
  375. "this_len=0x%zx nr_pages=%u length=0x%lx\n",
  376. page, len, pcol->nr_pages, pcol->length);
  377. /* split the request, and start again with current page */
  378. ret = read_exec(pcol);
  379. if (unlikely(ret))
  380. goto fail;
  381. goto try_again;
  382. }
  383. return 0;
  384. fail:
  385. /* SetPageError(page); ??? */
  386. unlock_page(page);
  387. return ret;
  388. }
  389. static int exofs_readpages(struct file *file, struct address_space *mapping,
  390. struct list_head *pages, unsigned nr_pages)
  391. {
  392. struct page_collect pcol;
  393. int ret;
  394. _pcol_init(&pcol, nr_pages, mapping->host);
  395. ret = read_cache_pages(mapping, pages, readpage_strip, &pcol);
  396. if (ret) {
  397. EXOFS_ERR("read_cache_pages => %d\n", ret);
  398. return ret;
  399. }
  400. ret = read_exec(&pcol);
  401. if (unlikely(ret))
  402. return ret;
  403. return read_exec(&pcol);
  404. }
  405. static int _readpage(struct page *page, bool read_4_write)
  406. {
  407. struct page_collect pcol;
  408. int ret;
  409. _pcol_init(&pcol, 1, page->mapping->host);
  410. pcol.read_4_write = read_4_write;
  411. ret = readpage_strip(&pcol, page);
  412. if (ret) {
  413. EXOFS_ERR("_readpage => %d\n", ret);
  414. return ret;
  415. }
  416. return read_exec(&pcol);
  417. }
  418. /*
  419. * We don't need the file
  420. */
  421. static int exofs_readpage(struct file *file, struct page *page)
  422. {
  423. return _readpage(page, false);
  424. }
  425. /* Callback for osd_write. All writes are asynchronous */
  426. static void writepages_done(struct ore_io_state *ios, void *p)
  427. {
  428. struct page_collect *pcol = p;
  429. int i;
  430. u64 good_bytes;
  431. u64 length = 0;
  432. int ret = ore_check_io(ios, NULL);
  433. atomic_dec(&pcol->sbi->s_curr_pending);
  434. if (likely(!ret)) {
  435. good_bytes = pcol->length;
  436. ret = PAGE_WAS_NOT_IN_IO;
  437. } else {
  438. good_bytes = 0;
  439. }
  440. EXOFS_DBGMSG2("writepages_done(0x%lx) good_bytes=0x%llx"
  441. " length=0x%lx nr_pages=%u\n",
  442. pcol->inode->i_ino, _LLU(good_bytes), pcol->length,
  443. pcol->nr_pages);
  444. for (i = 0; i < pcol->nr_pages; i++) {
  445. struct page *page = pcol->pages[i];
  446. struct inode *inode = page->mapping->host;
  447. int page_stat;
  448. if (inode != pcol->inode)
  449. continue; /* osd might add more pages to a bio */
  450. if (likely(length < good_bytes))
  451. page_stat = 0;
  452. else
  453. page_stat = ret;
  454. update_write_page(page, page_stat);
  455. unlock_page(page);
  456. EXOFS_DBGMSG2(" writepages_done(0x%lx, 0x%lx) status=%d\n",
  457. inode->i_ino, page->index, page_stat);
  458. length += PAGE_SIZE;
  459. }
  460. pcol_free(pcol);
  461. kfree(pcol);
  462. EXOFS_DBGMSG2("writepages_done END\n");
  463. }
  464. static struct page *__r4w_get_page(void *priv, u64 offset, bool *uptodate)
  465. {
  466. struct page_collect *pcol = priv;
  467. pgoff_t index = offset / PAGE_SIZE;
  468. if (!pcol->that_locked_page ||
  469. (pcol->that_locked_page->index != index)) {
  470. struct page *page = find_get_page(pcol->inode->i_mapping, index);
  471. if (!page) {
  472. page = find_or_create_page(pcol->inode->i_mapping,
  473. index, GFP_NOFS);
  474. if (unlikely(!page)) {
  475. EXOFS_DBGMSG("grab_cache_page Failed "
  476. "index=0x%llx\n", _LLU(index));
  477. return NULL;
  478. }
  479. unlock_page(page);
  480. }
  481. if (PageDirty(page) || PageWriteback(page))
  482. *uptodate = true;
  483. else
  484. *uptodate = PageUptodate(page);
  485. EXOFS_DBGMSG("index=0x%lx uptodate=%d\n", index, *uptodate);
  486. return page;
  487. } else {
  488. EXOFS_DBGMSG("YES that_locked_page index=0x%lx\n",
  489. pcol->that_locked_page->index);
  490. *uptodate = true;
  491. return pcol->that_locked_page;
  492. }
  493. }
  494. static void __r4w_put_page(void *priv, struct page *page)
  495. {
  496. struct page_collect *pcol = priv;
  497. if (pcol->that_locked_page != page) {
  498. EXOFS_DBGMSG("index=0x%lx\n", page->index);
  499. page_cache_release(page);
  500. return;
  501. }
  502. EXOFS_DBGMSG("that_locked_page index=0x%lx\n", page->index);
  503. }
  504. static const struct _ore_r4w_op _r4w_op = {
  505. .get_page = &__r4w_get_page,
  506. .put_page = &__r4w_put_page,
  507. };
  508. static int write_exec(struct page_collect *pcol)
  509. {
  510. struct exofs_i_info *oi = exofs_i(pcol->inode);
  511. struct ore_io_state *ios;
  512. struct page_collect *pcol_copy = NULL;
  513. int ret;
  514. if (!pcol->pages)
  515. return 0;
  516. BUG_ON(pcol->ios);
  517. ret = ore_get_rw_state(&pcol->sbi->layout, &oi->oc, false,
  518. pcol->pg_first << PAGE_CACHE_SHIFT,
  519. pcol->length, &pcol->ios);
  520. if (unlikely(ret))
  521. goto err;
  522. pcol_copy = kmalloc(sizeof(*pcol_copy), GFP_KERNEL);
  523. if (!pcol_copy) {
  524. EXOFS_ERR("write_exec: Failed to kmalloc(pcol)\n");
  525. ret = -ENOMEM;
  526. goto err;
  527. }
  528. *pcol_copy = *pcol;
  529. ios = pcol->ios;
  530. ios->pages = pcol_copy->pages;
  531. ios->done = writepages_done;
  532. ios->r4w = &_r4w_op;
  533. ios->private = pcol_copy;
  534. /* pages ownership was passed to pcol_copy */
  535. _pcol_reset(pcol);
  536. ret = _maybe_not_all_in_one_io(ios, pcol_copy, pcol);
  537. if (unlikely(ret))
  538. goto err;
  539. EXOFS_DBGMSG2("write_exec(0x%lx) offset=0x%llx length=0x%llx\n",
  540. pcol->inode->i_ino, _LLU(ios->offset), _LLU(ios->length));
  541. ret = ore_write(ios);
  542. if (unlikely(ret)) {
  543. EXOFS_ERR("write_exec: ore_write() Failed\n");
  544. goto err;
  545. }
  546. atomic_inc(&pcol->sbi->s_curr_pending);
  547. return 0;
  548. err:
  549. _unlock_pcol_pages(pcol, ret, WRITE);
  550. pcol_free(pcol);
  551. kfree(pcol_copy);
  552. return ret;
  553. }
  554. /* writepage_strip is called either directly from writepage() or by the VFS from
  555. * within write_cache_pages(), to add one more page to be written to storage.
  556. * It will try to collect as many contiguous pages as possible. If a
  557. * discontinuity is encountered or it runs out of resources it will submit the
  558. * previous segment and will start a new collection.
  559. * Eventually caller must submit the last segment if present.
  560. */
  561. static int writepage_strip(struct page *page,
  562. struct writeback_control *wbc_unused, void *data)
  563. {
  564. struct page_collect *pcol = data;
  565. struct inode *inode = pcol->inode;
  566. struct exofs_i_info *oi = exofs_i(inode);
  567. loff_t i_size = i_size_read(inode);
  568. pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
  569. size_t len;
  570. int ret;
  571. BUG_ON(!PageLocked(page));
  572. ret = wait_obj_created(oi);
  573. if (unlikely(ret))
  574. goto fail;
  575. if (page->index < end_index)
  576. /* in this case, the page is within the limits of the file */
  577. len = PAGE_CACHE_SIZE;
  578. else {
  579. len = i_size & ~PAGE_CACHE_MASK;
  580. if (page->index > end_index || !len) {
  581. /* in this case, the page is outside the limits
  582. * (truncate in progress)
  583. */
  584. ret = write_exec(pcol);
  585. if (unlikely(ret))
  586. goto fail;
  587. if (PageError(page))
  588. ClearPageError(page);
  589. unlock_page(page);
  590. EXOFS_DBGMSG("writepage_strip(0x%lx, 0x%lx) "
  591. "outside the limits\n",
  592. inode->i_ino, page->index);
  593. return 0;
  594. }
  595. }
  596. try_again:
  597. if (unlikely(pcol->pg_first == -1)) {
  598. pcol->pg_first = page->index;
  599. } else if (unlikely((pcol->pg_first + pcol->nr_pages) !=
  600. page->index)) {
  601. /* Discontinuity detected, split the request */
  602. ret = write_exec(pcol);
  603. if (unlikely(ret))
  604. goto fail;
  605. EXOFS_DBGMSG("writepage_strip(0x%lx, 0x%lx) Discontinuity\n",
  606. inode->i_ino, page->index);
  607. goto try_again;
  608. }
  609. if (!pcol->pages) {
  610. ret = pcol_try_alloc(pcol);
  611. if (unlikely(ret))
  612. goto fail;
  613. }
  614. EXOFS_DBGMSG2(" writepage_strip(0x%lx, 0x%lx) len=0x%zx\n",
  615. inode->i_ino, page->index, len);
  616. ret = pcol_add_page(pcol, page, len);
  617. if (unlikely(ret)) {
  618. EXOFS_DBGMSG2("Failed pcol_add_page "
  619. "nr_pages=%u total_length=0x%lx\n",
  620. pcol->nr_pages, pcol->length);
  621. /* split the request, next loop will start again */
  622. ret = write_exec(pcol);
  623. if (unlikely(ret)) {
  624. EXOFS_DBGMSG("write_exec failed => %d", ret);
  625. goto fail;
  626. }
  627. goto try_again;
  628. }
  629. BUG_ON(PageWriteback(page));
  630. set_page_writeback(page);
  631. return 0;
  632. fail:
  633. EXOFS_DBGMSG("Error: writepage_strip(0x%lx, 0x%lx)=>%d\n",
  634. inode->i_ino, page->index, ret);
  635. set_bit(AS_EIO, &page->mapping->flags);
  636. unlock_page(page);
  637. return ret;
  638. }
  639. static int exofs_writepages(struct address_space *mapping,
  640. struct writeback_control *wbc)
  641. {
  642. struct page_collect pcol;
  643. long start, end, expected_pages;
  644. int ret;
  645. start = wbc->range_start >> PAGE_CACHE_SHIFT;
  646. end = (wbc->range_end == LLONG_MAX) ?
  647. start + mapping->nrpages :
  648. wbc->range_end >> PAGE_CACHE_SHIFT;
  649. if (start || end)
  650. expected_pages = end - start + 1;
  651. else
  652. expected_pages = mapping->nrpages;
  653. if (expected_pages < 32L)
  654. expected_pages = 32L;
  655. EXOFS_DBGMSG2("inode(0x%lx) wbc->start=0x%llx wbc->end=0x%llx "
  656. "nrpages=%lu start=0x%lx end=0x%lx expected_pages=%ld\n",
  657. mapping->host->i_ino, wbc->range_start, wbc->range_end,
  658. mapping->nrpages, start, end, expected_pages);
  659. _pcol_init(&pcol, expected_pages, mapping->host);
  660. ret = write_cache_pages(mapping, wbc, writepage_strip, &pcol);
  661. if (unlikely(ret)) {
  662. EXOFS_ERR("write_cache_pages => %d\n", ret);
  663. return ret;
  664. }
  665. ret = write_exec(&pcol);
  666. if (unlikely(ret))
  667. return ret;
  668. if (wbc->sync_mode == WB_SYNC_ALL) {
  669. return write_exec(&pcol); /* pump the last reminder */
  670. } else if (pcol.nr_pages) {
  671. /* not SYNC let the reminder join the next writeout */
  672. unsigned i;
  673. for (i = 0; i < pcol.nr_pages; i++) {
  674. struct page *page = pcol.pages[i];
  675. end_page_writeback(page);
  676. set_page_dirty(page);
  677. unlock_page(page);
  678. }
  679. }
  680. return 0;
  681. }
  682. /*
  683. static int exofs_writepage(struct page *page, struct writeback_control *wbc)
  684. {
  685. struct page_collect pcol;
  686. int ret;
  687. _pcol_init(&pcol, 1, page->mapping->host);
  688. ret = writepage_strip(page, NULL, &pcol);
  689. if (ret) {
  690. EXOFS_ERR("exofs_writepage => %d\n", ret);
  691. return ret;
  692. }
  693. return write_exec(&pcol);
  694. }
  695. */
  696. /* i_mutex held using inode->i_size directly */
  697. static void _write_failed(struct inode *inode, loff_t to)
  698. {
  699. if (to > inode->i_size)
  700. truncate_pagecache(inode, to, inode->i_size);
  701. }
  702. int exofs_write_begin(struct file *file, struct address_space *mapping,
  703. loff_t pos, unsigned len, unsigned flags,
  704. struct page **pagep, void **fsdata)
  705. {
  706. int ret = 0;
  707. struct page *page;
  708. page = *pagep;
  709. if (page == NULL) {
  710. ret = simple_write_begin(file, mapping, pos, len, flags, pagep,
  711. fsdata);
  712. if (ret) {
  713. EXOFS_DBGMSG("simple_write_begin failed\n");
  714. goto out;
  715. }
  716. page = *pagep;
  717. }
  718. /* read modify write */
  719. if (!PageUptodate(page) && (len != PAGE_CACHE_SIZE)) {
  720. loff_t i_size = i_size_read(mapping->host);
  721. pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
  722. size_t rlen;
  723. if (page->index < end_index)
  724. rlen = PAGE_CACHE_SIZE;
  725. else if (page->index == end_index)
  726. rlen = i_size & ~PAGE_CACHE_MASK;
  727. else
  728. rlen = 0;
  729. if (!rlen) {
  730. clear_highpage(page);
  731. SetPageUptodate(page);
  732. goto out;
  733. }
  734. ret = _readpage(page, true);
  735. if (ret) {
  736. /*SetPageError was done by _readpage. Is it ok?*/
  737. unlock_page(page);
  738. EXOFS_DBGMSG("__readpage failed\n");
  739. }
  740. }
  741. out:
  742. if (unlikely(ret))
  743. _write_failed(mapping->host, pos + len);
  744. return ret;
  745. }
  746. static int exofs_write_begin_export(struct file *file,
  747. struct address_space *mapping,
  748. loff_t pos, unsigned len, unsigned flags,
  749. struct page **pagep, void **fsdata)
  750. {
  751. *pagep = NULL;
  752. return exofs_write_begin(file, mapping, pos, len, flags, pagep,
  753. fsdata);
  754. }
  755. static int exofs_write_end(struct file *file, struct address_space *mapping,
  756. loff_t pos, unsigned len, unsigned copied,
  757. struct page *page, void *fsdata)
  758. {
  759. struct inode *inode = mapping->host;
  760. /* According to comment in simple_write_end i_mutex is held */
  761. loff_t i_size = inode->i_size;
  762. int ret;
  763. ret = simple_write_end(file, mapping,pos, len, copied, page, fsdata);
  764. if (unlikely(ret))
  765. _write_failed(inode, pos + len);
  766. /* TODO: once simple_write_end marks inode dirty remove */
  767. if (i_size != inode->i_size)
  768. mark_inode_dirty(inode);
  769. return ret;
  770. }
  771. static int exofs_releasepage(struct page *page, gfp_t gfp)
  772. {
  773. EXOFS_DBGMSG("page 0x%lx\n", page->index);
  774. WARN_ON(1);
  775. return 0;
  776. }
  777. static void exofs_invalidatepage(struct page *page, unsigned long offset)
  778. {
  779. EXOFS_DBGMSG("page 0x%lx offset 0x%lx\n", page->index, offset);
  780. WARN_ON(1);
  781. }
  782. const struct address_space_operations exofs_aops = {
  783. .readpage = exofs_readpage,
  784. .readpages = exofs_readpages,
  785. .writepage = NULL,
  786. .writepages = exofs_writepages,
  787. .write_begin = exofs_write_begin_export,
  788. .write_end = exofs_write_end,
  789. .releasepage = exofs_releasepage,
  790. .set_page_dirty = __set_page_dirty_nobuffers,
  791. .invalidatepage = exofs_invalidatepage,
  792. /* Not implemented Yet */
  793. .bmap = NULL, /* TODO: use osd's OSD_ACT_READ_MAP */
  794. .direct_IO = NULL, /* TODO: Should be trivial to do */
  795. /* With these NULL has special meaning or default is not exported */
  796. .get_xip_mem = NULL,
  797. .migratepage = NULL,
  798. .launder_page = NULL,
  799. .is_partially_uptodate = NULL,
  800. .error_remove_page = NULL,
  801. };
  802. /******************************************************************************
  803. * INODE OPERATIONS
  804. *****************************************************************************/
  805. /*
  806. * Test whether an inode is a fast symlink.
  807. */
  808. static inline int exofs_inode_is_fast_symlink(struct inode *inode)
  809. {
  810. struct exofs_i_info *oi = exofs_i(inode);
  811. return S_ISLNK(inode->i_mode) && (oi->i_data[0] != 0);
  812. }
  813. static int _do_truncate(struct inode *inode, loff_t newsize)
  814. {
  815. struct exofs_i_info *oi = exofs_i(inode);
  816. struct exofs_sb_info *sbi = inode->i_sb->s_fs_info;
  817. int ret;
  818. inode->i_mtime = inode->i_ctime = CURRENT_TIME;
  819. ret = ore_truncate(&sbi->layout, &oi->oc, (u64)newsize);
  820. if (likely(!ret))
  821. truncate_setsize(inode, newsize);
  822. EXOFS_DBGMSG("(0x%lx) size=0x%llx ret=>%d\n",
  823. inode->i_ino, newsize, ret);
  824. return ret;
  825. }
  826. /*
  827. * Set inode attributes - update size attribute on OSD if needed,
  828. * otherwise just call generic functions.
  829. */
  830. int exofs_setattr(struct dentry *dentry, struct iattr *iattr)
  831. {
  832. struct inode *inode = dentry->d_inode;
  833. int error;
  834. /* if we are about to modify an object, and it hasn't been
  835. * created yet, wait
  836. */
  837. error = wait_obj_created(exofs_i(inode));
  838. if (unlikely(error))
  839. return error;
  840. error = inode_change_ok(inode, iattr);
  841. if (unlikely(error))
  842. return error;
  843. if ((iattr->ia_valid & ATTR_SIZE) &&
  844. iattr->ia_size != i_size_read(inode)) {
  845. error = _do_truncate(inode, iattr->ia_size);
  846. if (unlikely(error))
  847. return error;
  848. }
  849. setattr_copy(inode, iattr);
  850. mark_inode_dirty(inode);
  851. return 0;
  852. }
  853. static const struct osd_attr g_attr_inode_file_layout = ATTR_DEF(
  854. EXOFS_APAGE_FS_DATA,
  855. EXOFS_ATTR_INODE_FILE_LAYOUT,
  856. 0);
  857. static const struct osd_attr g_attr_inode_dir_layout = ATTR_DEF(
  858. EXOFS_APAGE_FS_DATA,
  859. EXOFS_ATTR_INODE_DIR_LAYOUT,
  860. 0);
  861. /*
  862. * Read the Linux inode info from the OSD, and return it as is. In exofs the
  863. * inode info is in an application specific page/attribute of the osd-object.
  864. */
  865. static int exofs_get_inode(struct super_block *sb, struct exofs_i_info *oi,
  866. struct exofs_fcb *inode)
  867. {
  868. struct exofs_sb_info *sbi = sb->s_fs_info;
  869. struct osd_attr attrs[] = {
  870. [0] = g_attr_inode_data,
  871. [1] = g_attr_inode_file_layout,
  872. [2] = g_attr_inode_dir_layout,
  873. };
  874. struct ore_io_state *ios;
  875. struct exofs_on_disk_inode_layout *layout;
  876. int ret;
  877. ret = ore_get_io_state(&sbi->layout, &oi->oc, &ios);
  878. if (unlikely(ret)) {
  879. EXOFS_ERR("%s: ore_get_io_state failed.\n", __func__);
  880. return ret;
  881. }
  882. attrs[1].len = exofs_on_disk_inode_layout_size(sbi->oc.numdevs);
  883. attrs[2].len = exofs_on_disk_inode_layout_size(sbi->oc.numdevs);
  884. ios->in_attr = attrs;
  885. ios->in_attr_len = ARRAY_SIZE(attrs);
  886. ret = ore_read(ios);
  887. if (unlikely(ret)) {
  888. EXOFS_ERR("object(0x%llx) corrupted, return empty file=>%d\n",
  889. _LLU(oi->one_comp.obj.id), ret);
  890. memset(inode, 0, sizeof(*inode));
  891. inode->i_mode = 0040000 | (0777 & ~022);
  892. /* If object is lost on target we might as well enable it's
  893. * delete.
  894. */
  895. if ((ret == -ENOENT) || (ret == -EINVAL))
  896. ret = 0;
  897. goto out;
  898. }
  899. ret = extract_attr_from_ios(ios, &attrs[0]);
  900. if (ret) {
  901. EXOFS_ERR("%s: extract_attr of inode_data failed\n", __func__);
  902. goto out;
  903. }
  904. WARN_ON(attrs[0].len != EXOFS_INO_ATTR_SIZE);
  905. memcpy(inode, attrs[0].val_ptr, EXOFS_INO_ATTR_SIZE);
  906. ret = extract_attr_from_ios(ios, &attrs[1]);
  907. if (ret) {
  908. EXOFS_ERR("%s: extract_attr of inode_data failed\n", __func__);
  909. goto out;
  910. }
  911. if (attrs[1].len) {
  912. layout = attrs[1].val_ptr;
  913. if (layout->gen_func != cpu_to_le16(LAYOUT_MOVING_WINDOW)) {
  914. EXOFS_ERR("%s: unsupported files layout %d\n",
  915. __func__, layout->gen_func);
  916. ret = -ENOTSUPP;
  917. goto out;
  918. }
  919. }
  920. ret = extract_attr_from_ios(ios, &attrs[2]);
  921. if (ret) {
  922. EXOFS_ERR("%s: extract_attr of inode_data failed\n", __func__);
  923. goto out;
  924. }
  925. if (attrs[2].len) {
  926. layout = attrs[2].val_ptr;
  927. if (layout->gen_func != cpu_to_le16(LAYOUT_MOVING_WINDOW)) {
  928. EXOFS_ERR("%s: unsupported meta-data layout %d\n",
  929. __func__, layout->gen_func);
  930. ret = -ENOTSUPP;
  931. goto out;
  932. }
  933. }
  934. out:
  935. ore_put_io_state(ios);
  936. return ret;
  937. }
  938. static void __oi_init(struct exofs_i_info *oi)
  939. {
  940. init_waitqueue_head(&oi->i_wq);
  941. oi->i_flags = 0;
  942. }
  943. /*
  944. * Fill in an inode read from the OSD and set it up for use
  945. */
  946. struct inode *exofs_iget(struct super_block *sb, unsigned long ino)
  947. {
  948. struct exofs_i_info *oi;
  949. struct exofs_fcb fcb;
  950. struct inode *inode;
  951. int ret;
  952. inode = iget_locked(sb, ino);
  953. if (!inode)
  954. return ERR_PTR(-ENOMEM);
  955. if (!(inode->i_state & I_NEW))
  956. return inode;
  957. oi = exofs_i(inode);
  958. __oi_init(oi);
  959. exofs_init_comps(&oi->oc, &oi->one_comp, sb->s_fs_info,
  960. exofs_oi_objno(oi));
  961. /* read the inode from the osd */
  962. ret = exofs_get_inode(sb, oi, &fcb);
  963. if (ret)
  964. goto bad_inode;
  965. set_obj_created(oi);
  966. /* copy stuff from on-disk struct to in-memory struct */
  967. inode->i_mode = le16_to_cpu(fcb.i_mode);
  968. inode->i_uid = le32_to_cpu(fcb.i_uid);
  969. inode->i_gid = le32_to_cpu(fcb.i_gid);
  970. set_nlink(inode, le16_to_cpu(fcb.i_links_count));
  971. inode->i_ctime.tv_sec = (signed)le32_to_cpu(fcb.i_ctime);
  972. inode->i_atime.tv_sec = (signed)le32_to_cpu(fcb.i_atime);
  973. inode->i_mtime.tv_sec = (signed)le32_to_cpu(fcb.i_mtime);
  974. inode->i_ctime.tv_nsec =
  975. inode->i_atime.tv_nsec = inode->i_mtime.tv_nsec = 0;
  976. oi->i_commit_size = le64_to_cpu(fcb.i_size);
  977. i_size_write(inode, oi->i_commit_size);
  978. inode->i_blkbits = EXOFS_BLKSHIFT;
  979. inode->i_generation = le32_to_cpu(fcb.i_generation);
  980. oi->i_dir_start_lookup = 0;
  981. if ((inode->i_nlink == 0) && (inode->i_mode == 0)) {
  982. ret = -ESTALE;
  983. goto bad_inode;
  984. }
  985. if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
  986. if (fcb.i_data[0])
  987. inode->i_rdev =
  988. old_decode_dev(le32_to_cpu(fcb.i_data[0]));
  989. else
  990. inode->i_rdev =
  991. new_decode_dev(le32_to_cpu(fcb.i_data[1]));
  992. } else {
  993. memcpy(oi->i_data, fcb.i_data, sizeof(fcb.i_data));
  994. }
  995. inode->i_mapping->backing_dev_info = sb->s_bdi;
  996. if (S_ISREG(inode->i_mode)) {
  997. inode->i_op = &exofs_file_inode_operations;
  998. inode->i_fop = &exofs_file_operations;
  999. inode->i_mapping->a_ops = &exofs_aops;
  1000. } else if (S_ISDIR(inode->i_mode)) {
  1001. inode->i_op = &exofs_dir_inode_operations;
  1002. inode->i_fop = &exofs_dir_operations;
  1003. inode->i_mapping->a_ops = &exofs_aops;
  1004. } else if (S_ISLNK(inode->i_mode)) {
  1005. if (exofs_inode_is_fast_symlink(inode))
  1006. inode->i_op = &exofs_fast_symlink_inode_operations;
  1007. else {
  1008. inode->i_op = &exofs_symlink_inode_operations;
  1009. inode->i_mapping->a_ops = &exofs_aops;
  1010. }
  1011. } else {
  1012. inode->i_op = &exofs_special_inode_operations;
  1013. if (fcb.i_data[0])
  1014. init_special_inode(inode, inode->i_mode,
  1015. old_decode_dev(le32_to_cpu(fcb.i_data[0])));
  1016. else
  1017. init_special_inode(inode, inode->i_mode,
  1018. new_decode_dev(le32_to_cpu(fcb.i_data[1])));
  1019. }
  1020. unlock_new_inode(inode);
  1021. return inode;
  1022. bad_inode:
  1023. iget_failed(inode);
  1024. return ERR_PTR(ret);
  1025. }
  1026. int __exofs_wait_obj_created(struct exofs_i_info *oi)
  1027. {
  1028. if (!obj_created(oi)) {
  1029. EXOFS_DBGMSG("!obj_created\n");
  1030. BUG_ON(!obj_2bcreated(oi));
  1031. wait_event(oi->i_wq, obj_created(oi));
  1032. EXOFS_DBGMSG("wait_event done\n");
  1033. }
  1034. return unlikely(is_bad_inode(&oi->vfs_inode)) ? -EIO : 0;
  1035. }
  1036. /*
  1037. * Callback function from exofs_new_inode(). The important thing is that we
  1038. * set the obj_created flag so that other methods know that the object exists on
  1039. * the OSD.
  1040. */
  1041. static void create_done(struct ore_io_state *ios, void *p)
  1042. {
  1043. struct inode *inode = p;
  1044. struct exofs_i_info *oi = exofs_i(inode);
  1045. struct exofs_sb_info *sbi = inode->i_sb->s_fs_info;
  1046. int ret;
  1047. ret = ore_check_io(ios, NULL);
  1048. ore_put_io_state(ios);
  1049. atomic_dec(&sbi->s_curr_pending);
  1050. if (unlikely(ret)) {
  1051. EXOFS_ERR("object=0x%llx creation failed in pid=0x%llx",
  1052. _LLU(exofs_oi_objno(oi)),
  1053. _LLU(oi->one_comp.obj.partition));
  1054. /*TODO: When FS is corrupted creation can fail, object already
  1055. * exist. Get rid of this asynchronous creation, if exist
  1056. * increment the obj counter and try the next object. Until we
  1057. * succeed. All these dangling objects will be made into lost
  1058. * files by chkfs.exofs
  1059. */
  1060. }
  1061. set_obj_created(oi);
  1062. wake_up(&oi->i_wq);
  1063. }
  1064. /*
  1065. * Set up a new inode and create an object for it on the OSD
  1066. */
  1067. struct inode *exofs_new_inode(struct inode *dir, umode_t mode)
  1068. {
  1069. struct super_block *sb = dir->i_sb;
  1070. struct exofs_sb_info *sbi = sb->s_fs_info;
  1071. struct inode *inode;
  1072. struct exofs_i_info *oi;
  1073. struct ore_io_state *ios;
  1074. int ret;
  1075. inode = new_inode(sb);
  1076. if (!inode)
  1077. return ERR_PTR(-ENOMEM);
  1078. oi = exofs_i(inode);
  1079. __oi_init(oi);
  1080. set_obj_2bcreated(oi);
  1081. inode->i_mapping->backing_dev_info = sb->s_bdi;
  1082. inode_init_owner(inode, dir, mode);
  1083. inode->i_ino = sbi->s_nextid++;
  1084. inode->i_blkbits = EXOFS_BLKSHIFT;
  1085. inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
  1086. oi->i_commit_size = inode->i_size = 0;
  1087. spin_lock(&sbi->s_next_gen_lock);
  1088. inode->i_generation = sbi->s_next_generation++;
  1089. spin_unlock(&sbi->s_next_gen_lock);
  1090. insert_inode_hash(inode);
  1091. exofs_init_comps(&oi->oc, &oi->one_comp, sb->s_fs_info,
  1092. exofs_oi_objno(oi));
  1093. exofs_sbi_write_stats(sbi); /* Make sure new sbi->s_nextid is on disk */
  1094. mark_inode_dirty(inode);
  1095. ret = ore_get_io_state(&sbi->layout, &oi->oc, &ios);
  1096. if (unlikely(ret)) {
  1097. EXOFS_ERR("exofs_new_inode: ore_get_io_state failed\n");
  1098. return ERR_PTR(ret);
  1099. }
  1100. ios->done = create_done;
  1101. ios->private = inode;
  1102. ret = ore_create(ios);
  1103. if (ret) {
  1104. ore_put_io_state(ios);
  1105. return ERR_PTR(ret);
  1106. }
  1107. atomic_inc(&sbi->s_curr_pending);
  1108. return inode;
  1109. }
  1110. /*
  1111. * struct to pass two arguments to update_inode's callback
  1112. */
  1113. struct updatei_args {
  1114. struct exofs_sb_info *sbi;
  1115. struct exofs_fcb fcb;
  1116. };
  1117. /*
  1118. * Callback function from exofs_update_inode().
  1119. */
  1120. static void updatei_done(struct ore_io_state *ios, void *p)
  1121. {
  1122. struct updatei_args *args = p;
  1123. ore_put_io_state(ios);
  1124. atomic_dec(&args->sbi->s_curr_pending);
  1125. kfree(args);
  1126. }
  1127. /*
  1128. * Write the inode to the OSD. Just fill up the struct, and set the attribute
  1129. * synchronously or asynchronously depending on the do_sync flag.
  1130. */
  1131. static int exofs_update_inode(struct inode *inode, int do_sync)
  1132. {
  1133. struct exofs_i_info *oi = exofs_i(inode);
  1134. struct super_block *sb = inode->i_sb;
  1135. struct exofs_sb_info *sbi = sb->s_fs_info;
  1136. struct ore_io_state *ios;
  1137. struct osd_attr attr;
  1138. struct exofs_fcb *fcb;
  1139. struct updatei_args *args;
  1140. int ret;
  1141. args = kzalloc(sizeof(*args), GFP_KERNEL);
  1142. if (!args) {
  1143. EXOFS_DBGMSG("Failed kzalloc of args\n");
  1144. return -ENOMEM;
  1145. }
  1146. fcb = &args->fcb;
  1147. fcb->i_mode = cpu_to_le16(inode->i_mode);
  1148. fcb->i_uid = cpu_to_le32(inode->i_uid);
  1149. fcb->i_gid = cpu_to_le32(inode->i_gid);
  1150. fcb->i_links_count = cpu_to_le16(inode->i_nlink);
  1151. fcb->i_ctime = cpu_to_le32(inode->i_ctime.tv_sec);
  1152. fcb->i_atime = cpu_to_le32(inode->i_atime.tv_sec);
  1153. fcb->i_mtime = cpu_to_le32(inode->i_mtime.tv_sec);
  1154. oi->i_commit_size = i_size_read(inode);
  1155. fcb->i_size = cpu_to_le64(oi->i_commit_size);
  1156. fcb->i_generation = cpu_to_le32(inode->i_generation);
  1157. if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
  1158. if (old_valid_dev(inode->i_rdev)) {
  1159. fcb->i_data[0] =
  1160. cpu_to_le32(old_encode_dev(inode->i_rdev));
  1161. fcb->i_data[1] = 0;
  1162. } else {
  1163. fcb->i_data[0] = 0;
  1164. fcb->i_data[1] =
  1165. cpu_to_le32(new_encode_dev(inode->i_rdev));
  1166. fcb->i_data[2] = 0;
  1167. }
  1168. } else
  1169. memcpy(fcb->i_data, oi->i_data, sizeof(fcb->i_data));
  1170. ret = ore_get_io_state(&sbi->layout, &oi->oc, &ios);
  1171. if (unlikely(ret)) {
  1172. EXOFS_ERR("%s: ore_get_io_state failed.\n", __func__);
  1173. goto free_args;
  1174. }
  1175. attr = g_attr_inode_data;
  1176. attr.val_ptr = fcb;
  1177. ios->out_attr_len = 1;
  1178. ios->out_attr = &attr;
  1179. wait_obj_created(oi);
  1180. if (!do_sync) {
  1181. args->sbi = sbi;
  1182. ios->done = updatei_done;
  1183. ios->private = args;
  1184. }
  1185. ret = ore_write(ios);
  1186. if (!do_sync && !ret) {
  1187. atomic_inc(&sbi->s_curr_pending);
  1188. goto out; /* deallocation in updatei_done */
  1189. }
  1190. ore_put_io_state(ios);
  1191. free_args:
  1192. kfree(args);
  1193. out:
  1194. EXOFS_DBGMSG("(0x%lx) do_sync=%d ret=>%d\n",
  1195. inode->i_ino, do_sync, ret);
  1196. return ret;
  1197. }
  1198. int exofs_write_inode(struct inode *inode, struct writeback_control *wbc)
  1199. {
  1200. /* FIXME: fix fsync and use wbc->sync_mode == WB_SYNC_ALL */
  1201. return exofs_update_inode(inode, 1);
  1202. }
  1203. /*
  1204. * Callback function from exofs_delete_inode() - don't have much cleaning up to
  1205. * do.
  1206. */
  1207. static void delete_done(struct ore_io_state *ios, void *p)
  1208. {
  1209. struct exofs_sb_info *sbi = p;
  1210. ore_put_io_state(ios);
  1211. atomic_dec(&sbi->s_curr_pending);
  1212. }
  1213. /*
  1214. * Called when the refcount of an inode reaches zero. We remove the object
  1215. * from the OSD here. We make sure the object was created before we try and
  1216. * delete it.
  1217. */
  1218. void exofs_evict_inode(struct inode *inode)
  1219. {
  1220. struct exofs_i_info *oi = exofs_i(inode);
  1221. struct super_block *sb = inode->i_sb;
  1222. struct exofs_sb_info *sbi = sb->s_fs_info;
  1223. struct ore_io_state *ios;
  1224. int ret;
  1225. truncate_inode_pages(&inode->i_data, 0);
  1226. /* TODO: should do better here */
  1227. if (inode->i_nlink || is_bad_inode(inode))
  1228. goto no_delete;
  1229. inode->i_size = 0;
  1230. end_writeback(inode);
  1231. /* if we are deleting an obj that hasn't been created yet, wait.
  1232. * This also makes sure that create_done cannot be called with an
  1233. * already evicted inode.
  1234. */
  1235. wait_obj_created(oi);
  1236. /* ignore the error, attempt a remove anyway */
  1237. /* Now Remove the OSD objects */
  1238. ret = ore_get_io_state(&sbi->layout, &oi->oc, &ios);
  1239. if (unlikely(ret)) {
  1240. EXOFS_ERR("%s: ore_get_io_state failed\n", __func__);
  1241. return;
  1242. }
  1243. ios->done = delete_done;
  1244. ios->private = sbi;
  1245. ret = ore_remove(ios);
  1246. if (ret) {
  1247. EXOFS_ERR("%s: ore_remove failed\n", __func__);
  1248. ore_put_io_state(ios);
  1249. return;
  1250. }
  1251. atomic_inc(&sbi->s_curr_pending);
  1252. return;
  1253. no_delete:
  1254. end_writeback(inode);
  1255. }