audit_tree.c 24 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include "audit.h"
  3. #include <linux/fsnotify_backend.h>
  4. #include <linux/namei.h>
  5. #include <linux/mount.h>
  6. #include <linux/kthread.h>
  7. #include <linux/refcount.h>
  8. #include <linux/slab.h>
  9. struct audit_tree;
  10. struct audit_chunk;
  11. struct audit_tree {
  12. refcount_t count;
  13. int goner;
  14. struct audit_chunk *root;
  15. struct list_head chunks;
  16. struct list_head rules;
  17. struct list_head list;
  18. struct list_head same_root;
  19. struct rcu_head head;
  20. char pathname[];
  21. };
  22. struct audit_chunk {
  23. struct list_head hash;
  24. struct fsnotify_mark mark;
  25. struct list_head trees; /* with root here */
  26. int dead;
  27. int count;
  28. atomic_long_t refs;
  29. struct rcu_head head;
  30. struct node {
  31. struct list_head list;
  32. struct audit_tree *owner;
  33. unsigned index; /* index; upper bit indicates 'will prune' */
  34. } owners[];
  35. };
  36. static LIST_HEAD(tree_list);
  37. static LIST_HEAD(prune_list);
  38. static struct task_struct *prune_thread;
  39. /*
  40. * One struct chunk is attached to each inode of interest.
  41. * We replace struct chunk on tagging/untagging.
  42. * Rules have pointer to struct audit_tree.
  43. * Rules have struct list_head rlist forming a list of rules over
  44. * the same tree.
  45. * References to struct chunk are collected at audit_inode{,_child}()
  46. * time and used in AUDIT_TREE rule matching.
  47. * These references are dropped at the same time we are calling
  48. * audit_free_names(), etc.
  49. *
  50. * Cyclic lists galore:
  51. * tree.chunks anchors chunk.owners[].list hash_lock
  52. * tree.rules anchors rule.rlist audit_filter_mutex
  53. * chunk.trees anchors tree.same_root hash_lock
  54. * chunk.hash is a hash with middle bits of watch.inode as
  55. * a hash function. RCU, hash_lock
  56. *
  57. * tree is refcounted; one reference for "some rules on rules_list refer to
  58. * it", one for each chunk with pointer to it.
  59. *
  60. * chunk is refcounted by embedded fsnotify_mark + .refs (non-zero refcount
  61. * of watch contributes 1 to .refs).
  62. *
  63. * node.index allows to get from node.list to containing chunk.
  64. * MSB of that sucker is stolen to mark taggings that we might have to
  65. * revert - several operations have very unpleasant cleanup logics and
  66. * that makes a difference. Some.
  67. */
  68. static struct fsnotify_group *audit_tree_group;
  69. static struct audit_tree *alloc_tree(const char *s)
  70. {
  71. struct audit_tree *tree;
  72. tree = kmalloc(sizeof(struct audit_tree) + strlen(s) + 1, GFP_KERNEL);
  73. if (tree) {
  74. refcount_set(&tree->count, 1);
  75. tree->goner = 0;
  76. INIT_LIST_HEAD(&tree->chunks);
  77. INIT_LIST_HEAD(&tree->rules);
  78. INIT_LIST_HEAD(&tree->list);
  79. INIT_LIST_HEAD(&tree->same_root);
  80. tree->root = NULL;
  81. strcpy(tree->pathname, s);
  82. }
  83. return tree;
  84. }
  85. static inline void get_tree(struct audit_tree *tree)
  86. {
  87. refcount_inc(&tree->count);
  88. }
  89. static inline void put_tree(struct audit_tree *tree)
  90. {
  91. if (refcount_dec_and_test(&tree->count))
  92. kfree_rcu(tree, head);
  93. }
  94. /* to avoid bringing the entire thing in audit.h */
  95. const char *audit_tree_path(struct audit_tree *tree)
  96. {
  97. return tree->pathname;
  98. }
  99. static void free_chunk(struct audit_chunk *chunk)
  100. {
  101. int i;
  102. for (i = 0; i < chunk->count; i++) {
  103. if (chunk->owners[i].owner)
  104. put_tree(chunk->owners[i].owner);
  105. }
  106. kfree(chunk);
  107. }
  108. void audit_put_chunk(struct audit_chunk *chunk)
  109. {
  110. if (atomic_long_dec_and_test(&chunk->refs))
  111. free_chunk(chunk);
  112. }
  113. static void __put_chunk(struct rcu_head *rcu)
  114. {
  115. struct audit_chunk *chunk = container_of(rcu, struct audit_chunk, head);
  116. audit_put_chunk(chunk);
  117. }
  118. static void audit_tree_destroy_watch(struct fsnotify_mark *entry)
  119. {
  120. struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark);
  121. call_rcu(&chunk->head, __put_chunk);
  122. }
  123. static struct audit_chunk *alloc_chunk(int count)
  124. {
  125. struct audit_chunk *chunk;
  126. size_t size;
  127. int i;
  128. size = offsetof(struct audit_chunk, owners) + count * sizeof(struct node);
  129. chunk = kzalloc(size, GFP_KERNEL);
  130. if (!chunk)
  131. return NULL;
  132. INIT_LIST_HEAD(&chunk->hash);
  133. INIT_LIST_HEAD(&chunk->trees);
  134. chunk->count = count;
  135. atomic_long_set(&chunk->refs, 1);
  136. for (i = 0; i < count; i++) {
  137. INIT_LIST_HEAD(&chunk->owners[i].list);
  138. chunk->owners[i].index = i;
  139. }
  140. fsnotify_init_mark(&chunk->mark, audit_tree_group);
  141. chunk->mark.mask = FS_IN_IGNORED;
  142. return chunk;
  143. }
  144. enum {HASH_SIZE = 128};
  145. static struct list_head chunk_hash_heads[HASH_SIZE];
  146. static __cacheline_aligned_in_smp DEFINE_SPINLOCK(hash_lock);
  147. /* Function to return search key in our hash from inode. */
  148. static unsigned long inode_to_key(const struct inode *inode)
  149. {
  150. return (unsigned long)inode;
  151. }
  152. /*
  153. * Function to return search key in our hash from chunk. Key 0 is special and
  154. * should never be present in the hash.
  155. */
  156. static unsigned long chunk_to_key(struct audit_chunk *chunk)
  157. {
  158. /*
  159. * We have a reference to the mark so it should be attached to a
  160. * connector.
  161. */
  162. if (WARN_ON_ONCE(!chunk->mark.connector))
  163. return 0;
  164. return (unsigned long)chunk->mark.connector->inode;
  165. }
  166. static inline struct list_head *chunk_hash(unsigned long key)
  167. {
  168. unsigned long n = key / L1_CACHE_BYTES;
  169. return chunk_hash_heads + n % HASH_SIZE;
  170. }
  171. /* hash_lock & entry->lock is held by caller */
  172. static void insert_hash(struct audit_chunk *chunk)
  173. {
  174. unsigned long key = chunk_to_key(chunk);
  175. struct list_head *list;
  176. if (!(chunk->mark.flags & FSNOTIFY_MARK_FLAG_ATTACHED))
  177. return;
  178. list = chunk_hash(key);
  179. list_add_rcu(&chunk->hash, list);
  180. }
  181. /* called under rcu_read_lock */
  182. struct audit_chunk *audit_tree_lookup(const struct inode *inode)
  183. {
  184. unsigned long key = inode_to_key(inode);
  185. struct list_head *list = chunk_hash(key);
  186. struct audit_chunk *p;
  187. list_for_each_entry_rcu(p, list, hash) {
  188. if (chunk_to_key(p) == key) {
  189. atomic_long_inc(&p->refs);
  190. return p;
  191. }
  192. }
  193. return NULL;
  194. }
  195. bool audit_tree_match(struct audit_chunk *chunk, struct audit_tree *tree)
  196. {
  197. int n;
  198. for (n = 0; n < chunk->count; n++)
  199. if (chunk->owners[n].owner == tree)
  200. return true;
  201. return false;
  202. }
  203. /* tagging and untagging inodes with trees */
  204. static struct audit_chunk *find_chunk(struct node *p)
  205. {
  206. int index = p->index & ~(1U<<31);
  207. p -= index;
  208. return container_of(p, struct audit_chunk, owners[0]);
  209. }
  210. static void untag_chunk(struct node *p)
  211. {
  212. struct audit_chunk *chunk = find_chunk(p);
  213. struct fsnotify_mark *entry = &chunk->mark;
  214. struct audit_chunk *new = NULL;
  215. struct audit_tree *owner;
  216. int size = chunk->count - 1;
  217. int i, j;
  218. fsnotify_get_mark(entry);
  219. spin_unlock(&hash_lock);
  220. if (size)
  221. new = alloc_chunk(size);
  222. mutex_lock(&entry->group->mark_mutex);
  223. spin_lock(&entry->lock);
  224. /*
  225. * mark_mutex protects mark from getting detached and thus also from
  226. * mark->connector->inode getting NULL.
  227. */
  228. if (chunk->dead || !(entry->flags & FSNOTIFY_MARK_FLAG_ATTACHED)) {
  229. spin_unlock(&entry->lock);
  230. mutex_unlock(&entry->group->mark_mutex);
  231. if (new)
  232. fsnotify_put_mark(&new->mark);
  233. goto out;
  234. }
  235. owner = p->owner;
  236. if (!size) {
  237. chunk->dead = 1;
  238. spin_lock(&hash_lock);
  239. list_del_init(&chunk->trees);
  240. if (owner->root == chunk)
  241. owner->root = NULL;
  242. list_del_init(&p->list);
  243. list_del_rcu(&chunk->hash);
  244. spin_unlock(&hash_lock);
  245. spin_unlock(&entry->lock);
  246. mutex_unlock(&entry->group->mark_mutex);
  247. fsnotify_destroy_mark(entry, audit_tree_group);
  248. goto out;
  249. }
  250. if (!new)
  251. goto Fallback;
  252. if (fsnotify_add_mark_locked(&new->mark, entry->connector->inode,
  253. NULL, 1)) {
  254. fsnotify_put_mark(&new->mark);
  255. goto Fallback;
  256. }
  257. chunk->dead = 1;
  258. spin_lock(&hash_lock);
  259. list_replace_init(&chunk->trees, &new->trees);
  260. if (owner->root == chunk) {
  261. list_del_init(&owner->same_root);
  262. owner->root = NULL;
  263. }
  264. for (i = j = 0; j <= size; i++, j++) {
  265. struct audit_tree *s;
  266. if (&chunk->owners[j] == p) {
  267. list_del_init(&p->list);
  268. i--;
  269. continue;
  270. }
  271. s = chunk->owners[j].owner;
  272. new->owners[i].owner = s;
  273. new->owners[i].index = chunk->owners[j].index - j + i;
  274. if (!s) /* result of earlier fallback */
  275. continue;
  276. get_tree(s);
  277. list_replace_init(&chunk->owners[j].list, &new->owners[i].list);
  278. }
  279. list_replace_rcu(&chunk->hash, &new->hash);
  280. list_for_each_entry(owner, &new->trees, same_root)
  281. owner->root = new;
  282. spin_unlock(&hash_lock);
  283. spin_unlock(&entry->lock);
  284. mutex_unlock(&entry->group->mark_mutex);
  285. fsnotify_destroy_mark(entry, audit_tree_group);
  286. fsnotify_put_mark(&new->mark); /* drop initial reference */
  287. goto out;
  288. Fallback:
  289. // do the best we can
  290. spin_lock(&hash_lock);
  291. if (owner->root == chunk) {
  292. list_del_init(&owner->same_root);
  293. owner->root = NULL;
  294. }
  295. list_del_init(&p->list);
  296. p->owner = NULL;
  297. put_tree(owner);
  298. spin_unlock(&hash_lock);
  299. spin_unlock(&entry->lock);
  300. mutex_unlock(&entry->group->mark_mutex);
  301. out:
  302. fsnotify_put_mark(entry);
  303. spin_lock(&hash_lock);
  304. }
  305. static int create_chunk(struct inode *inode, struct audit_tree *tree)
  306. {
  307. struct fsnotify_mark *entry;
  308. struct audit_chunk *chunk = alloc_chunk(1);
  309. if (!chunk)
  310. return -ENOMEM;
  311. entry = &chunk->mark;
  312. if (fsnotify_add_mark(entry, inode, NULL, 0)) {
  313. fsnotify_put_mark(entry);
  314. return -ENOSPC;
  315. }
  316. spin_lock(&entry->lock);
  317. spin_lock(&hash_lock);
  318. if (tree->goner) {
  319. spin_unlock(&hash_lock);
  320. chunk->dead = 1;
  321. spin_unlock(&entry->lock);
  322. fsnotify_destroy_mark(entry, audit_tree_group);
  323. fsnotify_put_mark(entry);
  324. return 0;
  325. }
  326. chunk->owners[0].index = (1U << 31);
  327. chunk->owners[0].owner = tree;
  328. get_tree(tree);
  329. list_add(&chunk->owners[0].list, &tree->chunks);
  330. if (!tree->root) {
  331. tree->root = chunk;
  332. list_add(&tree->same_root, &chunk->trees);
  333. }
  334. insert_hash(chunk);
  335. spin_unlock(&hash_lock);
  336. spin_unlock(&entry->lock);
  337. fsnotify_put_mark(entry); /* drop initial reference */
  338. return 0;
  339. }
  340. /* the first tagged inode becomes root of tree */
  341. static int tag_chunk(struct inode *inode, struct audit_tree *tree)
  342. {
  343. struct fsnotify_mark *old_entry, *chunk_entry;
  344. struct audit_tree *owner;
  345. struct audit_chunk *chunk, *old;
  346. struct node *p;
  347. int n;
  348. old_entry = fsnotify_find_mark(&inode->i_fsnotify_marks,
  349. audit_tree_group);
  350. if (!old_entry)
  351. return create_chunk(inode, tree);
  352. old = container_of(old_entry, struct audit_chunk, mark);
  353. /* are we already there? */
  354. spin_lock(&hash_lock);
  355. for (n = 0; n < old->count; n++) {
  356. if (old->owners[n].owner == tree) {
  357. spin_unlock(&hash_lock);
  358. fsnotify_put_mark(old_entry);
  359. return 0;
  360. }
  361. }
  362. spin_unlock(&hash_lock);
  363. chunk = alloc_chunk(old->count + 1);
  364. if (!chunk) {
  365. fsnotify_put_mark(old_entry);
  366. return -ENOMEM;
  367. }
  368. chunk_entry = &chunk->mark;
  369. mutex_lock(&old_entry->group->mark_mutex);
  370. spin_lock(&old_entry->lock);
  371. /*
  372. * mark_mutex protects mark from getting detached and thus also from
  373. * mark->connector->inode getting NULL.
  374. */
  375. if (!(old_entry->flags & FSNOTIFY_MARK_FLAG_ATTACHED)) {
  376. /* old_entry is being shot, lets just lie */
  377. spin_unlock(&old_entry->lock);
  378. mutex_unlock(&old_entry->group->mark_mutex);
  379. fsnotify_put_mark(old_entry);
  380. fsnotify_put_mark(&chunk->mark);
  381. return -ENOENT;
  382. }
  383. if (fsnotify_add_mark_locked(chunk_entry,
  384. old_entry->connector->inode, NULL, 1)) {
  385. spin_unlock(&old_entry->lock);
  386. mutex_unlock(&old_entry->group->mark_mutex);
  387. fsnotify_put_mark(chunk_entry);
  388. fsnotify_put_mark(old_entry);
  389. return -ENOSPC;
  390. }
  391. /* even though we hold old_entry->lock, this is safe since chunk_entry->lock could NEVER have been grabbed before */
  392. spin_lock(&chunk_entry->lock);
  393. spin_lock(&hash_lock);
  394. /* we now hold old_entry->lock, chunk_entry->lock, and hash_lock */
  395. if (tree->goner) {
  396. spin_unlock(&hash_lock);
  397. chunk->dead = 1;
  398. spin_unlock(&chunk_entry->lock);
  399. spin_unlock(&old_entry->lock);
  400. mutex_unlock(&old_entry->group->mark_mutex);
  401. fsnotify_destroy_mark(chunk_entry, audit_tree_group);
  402. fsnotify_put_mark(chunk_entry);
  403. fsnotify_put_mark(old_entry);
  404. return 0;
  405. }
  406. list_replace_init(&old->trees, &chunk->trees);
  407. for (n = 0, p = chunk->owners; n < old->count; n++, p++) {
  408. struct audit_tree *s = old->owners[n].owner;
  409. p->owner = s;
  410. p->index = old->owners[n].index;
  411. if (!s) /* result of fallback in untag */
  412. continue;
  413. get_tree(s);
  414. list_replace_init(&old->owners[n].list, &p->list);
  415. }
  416. p->index = (chunk->count - 1) | (1U<<31);
  417. p->owner = tree;
  418. get_tree(tree);
  419. list_add(&p->list, &tree->chunks);
  420. list_replace_rcu(&old->hash, &chunk->hash);
  421. list_for_each_entry(owner, &chunk->trees, same_root)
  422. owner->root = chunk;
  423. old->dead = 1;
  424. if (!tree->root) {
  425. tree->root = chunk;
  426. list_add(&tree->same_root, &chunk->trees);
  427. }
  428. spin_unlock(&hash_lock);
  429. spin_unlock(&chunk_entry->lock);
  430. spin_unlock(&old_entry->lock);
  431. mutex_unlock(&old_entry->group->mark_mutex);
  432. fsnotify_destroy_mark(old_entry, audit_tree_group);
  433. fsnotify_put_mark(chunk_entry); /* drop initial reference */
  434. fsnotify_put_mark(old_entry); /* pair to fsnotify_find mark_entry */
  435. return 0;
  436. }
  437. static void audit_tree_log_remove_rule(struct audit_krule *rule)
  438. {
  439. struct audit_buffer *ab;
  440. ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
  441. if (unlikely(!ab))
  442. return;
  443. audit_log_format(ab, "op=remove_rule");
  444. audit_log_format(ab, " dir=");
  445. audit_log_untrustedstring(ab, rule->tree->pathname);
  446. audit_log_key(ab, rule->filterkey);
  447. audit_log_format(ab, " list=%d res=1", rule->listnr);
  448. audit_log_end(ab);
  449. }
  450. static void kill_rules(struct audit_tree *tree)
  451. {
  452. struct audit_krule *rule, *next;
  453. struct audit_entry *entry;
  454. list_for_each_entry_safe(rule, next, &tree->rules, rlist) {
  455. entry = container_of(rule, struct audit_entry, rule);
  456. list_del_init(&rule->rlist);
  457. if (rule->tree) {
  458. /* not a half-baked one */
  459. audit_tree_log_remove_rule(rule);
  460. if (entry->rule.exe)
  461. audit_remove_mark(entry->rule.exe);
  462. rule->tree = NULL;
  463. list_del_rcu(&entry->list);
  464. list_del(&entry->rule.list);
  465. call_rcu(&entry->rcu, audit_free_rule_rcu);
  466. }
  467. }
  468. }
  469. /*
  470. * finish killing struct audit_tree
  471. */
  472. static void prune_one(struct audit_tree *victim)
  473. {
  474. spin_lock(&hash_lock);
  475. while (!list_empty(&victim->chunks)) {
  476. struct node *p;
  477. p = list_entry(victim->chunks.next, struct node, list);
  478. untag_chunk(p);
  479. }
  480. spin_unlock(&hash_lock);
  481. put_tree(victim);
  482. }
  483. /* trim the uncommitted chunks from tree */
  484. static void trim_marked(struct audit_tree *tree)
  485. {
  486. struct list_head *p, *q;
  487. spin_lock(&hash_lock);
  488. if (tree->goner) {
  489. spin_unlock(&hash_lock);
  490. return;
  491. }
  492. /* reorder */
  493. for (p = tree->chunks.next; p != &tree->chunks; p = q) {
  494. struct node *node = list_entry(p, struct node, list);
  495. q = p->next;
  496. if (node->index & (1U<<31)) {
  497. list_del_init(p);
  498. list_add(p, &tree->chunks);
  499. }
  500. }
  501. while (!list_empty(&tree->chunks)) {
  502. struct node *node;
  503. node = list_entry(tree->chunks.next, struct node, list);
  504. /* have we run out of marked? */
  505. if (!(node->index & (1U<<31)))
  506. break;
  507. untag_chunk(node);
  508. }
  509. if (!tree->root && !tree->goner) {
  510. tree->goner = 1;
  511. spin_unlock(&hash_lock);
  512. mutex_lock(&audit_filter_mutex);
  513. kill_rules(tree);
  514. list_del_init(&tree->list);
  515. mutex_unlock(&audit_filter_mutex);
  516. prune_one(tree);
  517. } else {
  518. spin_unlock(&hash_lock);
  519. }
  520. }
  521. static void audit_schedule_prune(void);
  522. /* called with audit_filter_mutex */
  523. int audit_remove_tree_rule(struct audit_krule *rule)
  524. {
  525. struct audit_tree *tree;
  526. tree = rule->tree;
  527. if (tree) {
  528. spin_lock(&hash_lock);
  529. list_del_init(&rule->rlist);
  530. if (list_empty(&tree->rules) && !tree->goner) {
  531. tree->root = NULL;
  532. list_del_init(&tree->same_root);
  533. tree->goner = 1;
  534. list_move(&tree->list, &prune_list);
  535. rule->tree = NULL;
  536. spin_unlock(&hash_lock);
  537. audit_schedule_prune();
  538. return 1;
  539. }
  540. rule->tree = NULL;
  541. spin_unlock(&hash_lock);
  542. return 1;
  543. }
  544. return 0;
  545. }
  546. static int compare_root(struct vfsmount *mnt, void *arg)
  547. {
  548. return inode_to_key(d_backing_inode(mnt->mnt_root)) ==
  549. (unsigned long)arg;
  550. }
  551. void audit_trim_trees(void)
  552. {
  553. struct list_head cursor;
  554. mutex_lock(&audit_filter_mutex);
  555. list_add(&cursor, &tree_list);
  556. while (cursor.next != &tree_list) {
  557. struct audit_tree *tree;
  558. struct path path;
  559. struct vfsmount *root_mnt;
  560. struct node *node;
  561. int err;
  562. tree = container_of(cursor.next, struct audit_tree, list);
  563. get_tree(tree);
  564. list_del(&cursor);
  565. list_add(&cursor, &tree->list);
  566. mutex_unlock(&audit_filter_mutex);
  567. err = kern_path(tree->pathname, 0, &path);
  568. if (err)
  569. goto skip_it;
  570. root_mnt = collect_mounts(&path);
  571. path_put(&path);
  572. if (IS_ERR(root_mnt))
  573. goto skip_it;
  574. spin_lock(&hash_lock);
  575. list_for_each_entry(node, &tree->chunks, list) {
  576. struct audit_chunk *chunk = find_chunk(node);
  577. /* this could be NULL if the watch is dying else where... */
  578. node->index |= 1U<<31;
  579. if (iterate_mounts(compare_root,
  580. (void *)chunk_to_key(chunk),
  581. root_mnt))
  582. node->index &= ~(1U<<31);
  583. }
  584. spin_unlock(&hash_lock);
  585. trim_marked(tree);
  586. drop_collected_mounts(root_mnt);
  587. skip_it:
  588. put_tree(tree);
  589. mutex_lock(&audit_filter_mutex);
  590. }
  591. list_del(&cursor);
  592. mutex_unlock(&audit_filter_mutex);
  593. }
  594. int audit_make_tree(struct audit_krule *rule, char *pathname, u32 op)
  595. {
  596. if (pathname[0] != '/' ||
  597. rule->listnr != AUDIT_FILTER_EXIT ||
  598. op != Audit_equal ||
  599. rule->inode_f || rule->watch || rule->tree)
  600. return -EINVAL;
  601. rule->tree = alloc_tree(pathname);
  602. if (!rule->tree)
  603. return -ENOMEM;
  604. return 0;
  605. }
  606. void audit_put_tree(struct audit_tree *tree)
  607. {
  608. put_tree(tree);
  609. }
  610. static int tag_mount(struct vfsmount *mnt, void *arg)
  611. {
  612. return tag_chunk(d_backing_inode(mnt->mnt_root), arg);
  613. }
  614. /*
  615. * That gets run when evict_chunk() ends up needing to kill audit_tree.
  616. * Runs from a separate thread.
  617. */
  618. static int prune_tree_thread(void *unused)
  619. {
  620. for (;;) {
  621. if (list_empty(&prune_list)) {
  622. set_current_state(TASK_INTERRUPTIBLE);
  623. schedule();
  624. }
  625. mutex_lock(&audit_cmd_mutex);
  626. mutex_lock(&audit_filter_mutex);
  627. while (!list_empty(&prune_list)) {
  628. struct audit_tree *victim;
  629. victim = list_entry(prune_list.next,
  630. struct audit_tree, list);
  631. list_del_init(&victim->list);
  632. mutex_unlock(&audit_filter_mutex);
  633. prune_one(victim);
  634. mutex_lock(&audit_filter_mutex);
  635. }
  636. mutex_unlock(&audit_filter_mutex);
  637. mutex_unlock(&audit_cmd_mutex);
  638. }
  639. return 0;
  640. }
  641. static int audit_launch_prune(void)
  642. {
  643. if (prune_thread)
  644. return 0;
  645. prune_thread = kthread_run(prune_tree_thread, NULL,
  646. "audit_prune_tree");
  647. if (IS_ERR(prune_thread)) {
  648. pr_err("cannot start thread audit_prune_tree");
  649. prune_thread = NULL;
  650. return -ENOMEM;
  651. }
  652. return 0;
  653. }
  654. /* called with audit_filter_mutex */
  655. int audit_add_tree_rule(struct audit_krule *rule)
  656. {
  657. struct audit_tree *seed = rule->tree, *tree;
  658. struct path path;
  659. struct vfsmount *mnt;
  660. int err;
  661. rule->tree = NULL;
  662. list_for_each_entry(tree, &tree_list, list) {
  663. if (!strcmp(seed->pathname, tree->pathname)) {
  664. put_tree(seed);
  665. rule->tree = tree;
  666. list_add(&rule->rlist, &tree->rules);
  667. return 0;
  668. }
  669. }
  670. tree = seed;
  671. list_add(&tree->list, &tree_list);
  672. list_add(&rule->rlist, &tree->rules);
  673. /* do not set rule->tree yet */
  674. mutex_unlock(&audit_filter_mutex);
  675. if (unlikely(!prune_thread)) {
  676. err = audit_launch_prune();
  677. if (err)
  678. goto Err;
  679. }
  680. err = kern_path(tree->pathname, 0, &path);
  681. if (err)
  682. goto Err;
  683. mnt = collect_mounts(&path);
  684. path_put(&path);
  685. if (IS_ERR(mnt)) {
  686. err = PTR_ERR(mnt);
  687. goto Err;
  688. }
  689. get_tree(tree);
  690. err = iterate_mounts(tag_mount, tree, mnt);
  691. drop_collected_mounts(mnt);
  692. if (!err) {
  693. struct node *node;
  694. spin_lock(&hash_lock);
  695. list_for_each_entry(node, &tree->chunks, list)
  696. node->index &= ~(1U<<31);
  697. spin_unlock(&hash_lock);
  698. } else {
  699. trim_marked(tree);
  700. goto Err;
  701. }
  702. mutex_lock(&audit_filter_mutex);
  703. if (list_empty(&rule->rlist)) {
  704. put_tree(tree);
  705. return -ENOENT;
  706. }
  707. rule->tree = tree;
  708. put_tree(tree);
  709. return 0;
  710. Err:
  711. mutex_lock(&audit_filter_mutex);
  712. list_del_init(&tree->list);
  713. list_del_init(&tree->rules);
  714. put_tree(tree);
  715. return err;
  716. }
  717. int audit_tag_tree(char *old, char *new)
  718. {
  719. struct list_head cursor, barrier;
  720. int failed = 0;
  721. struct path path1, path2;
  722. struct vfsmount *tagged;
  723. int err;
  724. err = kern_path(new, 0, &path2);
  725. if (err)
  726. return err;
  727. tagged = collect_mounts(&path2);
  728. path_put(&path2);
  729. if (IS_ERR(tagged))
  730. return PTR_ERR(tagged);
  731. err = kern_path(old, 0, &path1);
  732. if (err) {
  733. drop_collected_mounts(tagged);
  734. return err;
  735. }
  736. mutex_lock(&audit_filter_mutex);
  737. list_add(&barrier, &tree_list);
  738. list_add(&cursor, &barrier);
  739. while (cursor.next != &tree_list) {
  740. struct audit_tree *tree;
  741. int good_one = 0;
  742. tree = container_of(cursor.next, struct audit_tree, list);
  743. get_tree(tree);
  744. list_del(&cursor);
  745. list_add(&cursor, &tree->list);
  746. mutex_unlock(&audit_filter_mutex);
  747. err = kern_path(tree->pathname, 0, &path2);
  748. if (!err) {
  749. good_one = path_is_under(&path1, &path2);
  750. path_put(&path2);
  751. }
  752. if (!good_one) {
  753. put_tree(tree);
  754. mutex_lock(&audit_filter_mutex);
  755. continue;
  756. }
  757. failed = iterate_mounts(tag_mount, tree, tagged);
  758. if (failed) {
  759. put_tree(tree);
  760. mutex_lock(&audit_filter_mutex);
  761. break;
  762. }
  763. mutex_lock(&audit_filter_mutex);
  764. spin_lock(&hash_lock);
  765. if (!tree->goner) {
  766. list_del(&tree->list);
  767. list_add(&tree->list, &tree_list);
  768. }
  769. spin_unlock(&hash_lock);
  770. put_tree(tree);
  771. }
  772. while (barrier.prev != &tree_list) {
  773. struct audit_tree *tree;
  774. tree = container_of(barrier.prev, struct audit_tree, list);
  775. get_tree(tree);
  776. list_del(&tree->list);
  777. list_add(&tree->list, &barrier);
  778. mutex_unlock(&audit_filter_mutex);
  779. if (!failed) {
  780. struct node *node;
  781. spin_lock(&hash_lock);
  782. list_for_each_entry(node, &tree->chunks, list)
  783. node->index &= ~(1U<<31);
  784. spin_unlock(&hash_lock);
  785. } else {
  786. trim_marked(tree);
  787. }
  788. put_tree(tree);
  789. mutex_lock(&audit_filter_mutex);
  790. }
  791. list_del(&barrier);
  792. list_del(&cursor);
  793. mutex_unlock(&audit_filter_mutex);
  794. path_put(&path1);
  795. drop_collected_mounts(tagged);
  796. return failed;
  797. }
  798. static void audit_schedule_prune(void)
  799. {
  800. wake_up_process(prune_thread);
  801. }
  802. /*
  803. * ... and that one is done if evict_chunk() decides to delay until the end
  804. * of syscall. Runs synchronously.
  805. */
  806. void audit_kill_trees(struct list_head *list)
  807. {
  808. mutex_lock(&audit_cmd_mutex);
  809. mutex_lock(&audit_filter_mutex);
  810. while (!list_empty(list)) {
  811. struct audit_tree *victim;
  812. victim = list_entry(list->next, struct audit_tree, list);
  813. kill_rules(victim);
  814. list_del_init(&victim->list);
  815. mutex_unlock(&audit_filter_mutex);
  816. prune_one(victim);
  817. mutex_lock(&audit_filter_mutex);
  818. }
  819. mutex_unlock(&audit_filter_mutex);
  820. mutex_unlock(&audit_cmd_mutex);
  821. }
  822. /*
  823. * Here comes the stuff asynchronous to auditctl operations
  824. */
  825. static void evict_chunk(struct audit_chunk *chunk)
  826. {
  827. struct audit_tree *owner;
  828. struct list_head *postponed = audit_killed_trees();
  829. int need_prune = 0;
  830. int n;
  831. if (chunk->dead)
  832. return;
  833. chunk->dead = 1;
  834. mutex_lock(&audit_filter_mutex);
  835. spin_lock(&hash_lock);
  836. while (!list_empty(&chunk->trees)) {
  837. owner = list_entry(chunk->trees.next,
  838. struct audit_tree, same_root);
  839. owner->goner = 1;
  840. owner->root = NULL;
  841. list_del_init(&owner->same_root);
  842. spin_unlock(&hash_lock);
  843. if (!postponed) {
  844. kill_rules(owner);
  845. list_move(&owner->list, &prune_list);
  846. need_prune = 1;
  847. } else {
  848. list_move(&owner->list, postponed);
  849. }
  850. spin_lock(&hash_lock);
  851. }
  852. list_del_rcu(&chunk->hash);
  853. for (n = 0; n < chunk->count; n++)
  854. list_del_init(&chunk->owners[n].list);
  855. spin_unlock(&hash_lock);
  856. mutex_unlock(&audit_filter_mutex);
  857. if (need_prune)
  858. audit_schedule_prune();
  859. }
  860. static int audit_tree_handle_event(struct fsnotify_group *group,
  861. struct inode *to_tell,
  862. struct fsnotify_mark *inode_mark,
  863. struct fsnotify_mark *vfsmount_mark,
  864. u32 mask, const void *data, int data_type,
  865. const unsigned char *file_name, u32 cookie,
  866. struct fsnotify_iter_info *iter_info)
  867. {
  868. return 0;
  869. }
  870. static void audit_tree_freeing_mark(struct fsnotify_mark *entry, struct fsnotify_group *group)
  871. {
  872. struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark);
  873. evict_chunk(chunk);
  874. /*
  875. * We are guaranteed to have at least one reference to the mark from
  876. * either the inode or the caller of fsnotify_destroy_mark().
  877. */
  878. BUG_ON(atomic_read(&entry->refcnt) < 1);
  879. }
  880. static const struct fsnotify_ops audit_tree_ops = {
  881. .handle_event = audit_tree_handle_event,
  882. .freeing_mark = audit_tree_freeing_mark,
  883. .free_mark = audit_tree_destroy_watch,
  884. };
  885. static int __init audit_tree_init(void)
  886. {
  887. int i;
  888. audit_tree_group = fsnotify_alloc_group(&audit_tree_ops);
  889. if (IS_ERR(audit_tree_group))
  890. audit_panic("cannot initialize fsnotify group for rectree watches");
  891. for (i = 0; i < HASH_SIZE; i++)
  892. INIT_LIST_HEAD(&chunk_hash_heads[i]);
  893. return 0;
  894. }
  895. __initcall(audit_tree_init);