x_tables.c 33 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396
  1. /*
  2. * x_tables core - Backend for {ip,ip6,arp}_tables
  3. *
  4. * Copyright (C) 2006-2006 Harald Welte <laforge@netfilter.org>
  5. *
  6. * Based on existing ip_tables code which is
  7. * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
  8. * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License version 2 as
  12. * published by the Free Software Foundation.
  13. *
  14. */
  15. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  16. #include <linux/kernel.h>
  17. #include <linux/socket.h>
  18. #include <linux/net.h>
  19. #include <linux/proc_fs.h>
  20. #include <linux/seq_file.h>
  21. #include <linux/string.h>
  22. #include <linux/vmalloc.h>
  23. #include <linux/mutex.h>
  24. #include <linux/mm.h>
  25. #include <linux/slab.h>
  26. #include <linux/audit.h>
  27. #include <net/net_namespace.h>
  28. #include <linux/netfilter/x_tables.h>
  29. #include <linux/netfilter_arp.h>
  30. #include <linux/netfilter_ipv4/ip_tables.h>
  31. #include <linux/netfilter_ipv6/ip6_tables.h>
  32. #include <linux/netfilter_arp/arp_tables.h>
  33. MODULE_LICENSE("GPL");
  34. MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
  35. MODULE_DESCRIPTION("{ip,ip6,arp,eb}_tables backend module");
  36. #define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
  37. struct compat_delta {
  38. unsigned int offset; /* offset in kernel */
  39. int delta; /* delta in 32bit user land */
  40. };
  41. struct xt_af {
  42. struct mutex mutex;
  43. struct list_head match;
  44. struct list_head target;
  45. #ifdef CONFIG_COMPAT
  46. struct mutex compat_mutex;
  47. struct compat_delta *compat_tab;
  48. unsigned int number; /* number of slots in compat_tab[] */
  49. unsigned int cur; /* number of used slots in compat_tab[] */
  50. #endif
  51. };
  52. static struct xt_af *xt;
  53. static const char *const xt_prefix[NFPROTO_NUMPROTO] = {
  54. [NFPROTO_UNSPEC] = "x",
  55. [NFPROTO_IPV4] = "ip",
  56. [NFPROTO_ARP] = "arp",
  57. [NFPROTO_BRIDGE] = "eb",
  58. [NFPROTO_IPV6] = "ip6",
  59. };
  60. /* Allow this many total (re)entries. */
  61. static const unsigned int xt_jumpstack_multiplier = 2;
  62. /* Registration hooks for targets. */
  63. int
  64. xt_register_target(struct xt_target *target)
  65. {
  66. u_int8_t af = target->family;
  67. int ret;
  68. ret = mutex_lock_interruptible(&xt[af].mutex);
  69. if (ret != 0)
  70. return ret;
  71. list_add(&target->list, &xt[af].target);
  72. mutex_unlock(&xt[af].mutex);
  73. return ret;
  74. }
  75. EXPORT_SYMBOL(xt_register_target);
  76. void
  77. xt_unregister_target(struct xt_target *target)
  78. {
  79. u_int8_t af = target->family;
  80. mutex_lock(&xt[af].mutex);
  81. list_del(&target->list);
  82. mutex_unlock(&xt[af].mutex);
  83. }
  84. EXPORT_SYMBOL(xt_unregister_target);
  85. int
  86. xt_register_targets(struct xt_target *target, unsigned int n)
  87. {
  88. unsigned int i;
  89. int err = 0;
  90. for (i = 0; i < n; i++) {
  91. err = xt_register_target(&target[i]);
  92. if (err)
  93. goto err;
  94. }
  95. return err;
  96. err:
  97. if (i > 0)
  98. xt_unregister_targets(target, i);
  99. return err;
  100. }
  101. EXPORT_SYMBOL(xt_register_targets);
  102. void
  103. xt_unregister_targets(struct xt_target *target, unsigned int n)
  104. {
  105. while (n-- > 0)
  106. xt_unregister_target(&target[n]);
  107. }
  108. EXPORT_SYMBOL(xt_unregister_targets);
  109. int
  110. xt_register_match(struct xt_match *match)
  111. {
  112. u_int8_t af = match->family;
  113. int ret;
  114. ret = mutex_lock_interruptible(&xt[af].mutex);
  115. if (ret != 0)
  116. return ret;
  117. list_add(&match->list, &xt[af].match);
  118. mutex_unlock(&xt[af].mutex);
  119. return ret;
  120. }
  121. EXPORT_SYMBOL(xt_register_match);
  122. void
  123. xt_unregister_match(struct xt_match *match)
  124. {
  125. u_int8_t af = match->family;
  126. mutex_lock(&xt[af].mutex);
  127. list_del(&match->list);
  128. mutex_unlock(&xt[af].mutex);
  129. }
  130. EXPORT_SYMBOL(xt_unregister_match);
  131. int
  132. xt_register_matches(struct xt_match *match, unsigned int n)
  133. {
  134. unsigned int i;
  135. int err = 0;
  136. for (i = 0; i < n; i++) {
  137. err = xt_register_match(&match[i]);
  138. if (err)
  139. goto err;
  140. }
  141. return err;
  142. err:
  143. if (i > 0)
  144. xt_unregister_matches(match, i);
  145. return err;
  146. }
  147. EXPORT_SYMBOL(xt_register_matches);
  148. void
  149. xt_unregister_matches(struct xt_match *match, unsigned int n)
  150. {
  151. while (n-- > 0)
  152. xt_unregister_match(&match[n]);
  153. }
  154. EXPORT_SYMBOL(xt_unregister_matches);
  155. /*
  156. * These are weird, but module loading must not be done with mutex
  157. * held (since they will register), and we have to have a single
  158. * function to use.
  159. */
  160. /* Find match, grabs ref. Returns ERR_PTR() on error. */
  161. struct xt_match *xt_find_match(u8 af, const char *name, u8 revision)
  162. {
  163. struct xt_match *m;
  164. int err = -ENOENT;
  165. if (mutex_lock_interruptible(&xt[af].mutex) != 0)
  166. return ERR_PTR(-EINTR);
  167. list_for_each_entry(m, &xt[af].match, list) {
  168. if (strcmp(m->name, name) == 0) {
  169. if (m->revision == revision) {
  170. if (try_module_get(m->me)) {
  171. mutex_unlock(&xt[af].mutex);
  172. return m;
  173. }
  174. } else
  175. err = -EPROTOTYPE; /* Found something. */
  176. }
  177. }
  178. mutex_unlock(&xt[af].mutex);
  179. if (af != NFPROTO_UNSPEC)
  180. /* Try searching again in the family-independent list */
  181. return xt_find_match(NFPROTO_UNSPEC, name, revision);
  182. return ERR_PTR(err);
  183. }
  184. EXPORT_SYMBOL(xt_find_match);
  185. struct xt_match *
  186. xt_request_find_match(uint8_t nfproto, const char *name, uint8_t revision)
  187. {
  188. struct xt_match *match;
  189. match = xt_find_match(nfproto, name, revision);
  190. if (IS_ERR(match)) {
  191. request_module("%st_%s", xt_prefix[nfproto], name);
  192. match = xt_find_match(nfproto, name, revision);
  193. }
  194. return match;
  195. }
  196. EXPORT_SYMBOL_GPL(xt_request_find_match);
  197. /* Find target, grabs ref. Returns ERR_PTR() on error. */
  198. struct xt_target *xt_find_target(u8 af, const char *name, u8 revision)
  199. {
  200. struct xt_target *t;
  201. int err = -ENOENT;
  202. if (mutex_lock_interruptible(&xt[af].mutex) != 0)
  203. return ERR_PTR(-EINTR);
  204. list_for_each_entry(t, &xt[af].target, list) {
  205. if (strcmp(t->name, name) == 0) {
  206. if (t->revision == revision) {
  207. if (try_module_get(t->me)) {
  208. mutex_unlock(&xt[af].mutex);
  209. return t;
  210. }
  211. } else
  212. err = -EPROTOTYPE; /* Found something. */
  213. }
  214. }
  215. mutex_unlock(&xt[af].mutex);
  216. if (af != NFPROTO_UNSPEC)
  217. /* Try searching again in the family-independent list */
  218. return xt_find_target(NFPROTO_UNSPEC, name, revision);
  219. return ERR_PTR(err);
  220. }
  221. EXPORT_SYMBOL(xt_find_target);
  222. struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision)
  223. {
  224. struct xt_target *target;
  225. target = xt_find_target(af, name, revision);
  226. if (IS_ERR(target)) {
  227. request_module("%st_%s", xt_prefix[af], name);
  228. target = xt_find_target(af, name, revision);
  229. }
  230. return target;
  231. }
  232. EXPORT_SYMBOL_GPL(xt_request_find_target);
  233. static int match_revfn(u8 af, const char *name, u8 revision, int *bestp)
  234. {
  235. const struct xt_match *m;
  236. int have_rev = 0;
  237. list_for_each_entry(m, &xt[af].match, list) {
  238. if (strcmp(m->name, name) == 0) {
  239. if (m->revision > *bestp)
  240. *bestp = m->revision;
  241. if (m->revision == revision)
  242. have_rev = 1;
  243. }
  244. }
  245. if (af != NFPROTO_UNSPEC && !have_rev)
  246. return match_revfn(NFPROTO_UNSPEC, name, revision, bestp);
  247. return have_rev;
  248. }
  249. static int target_revfn(u8 af, const char *name, u8 revision, int *bestp)
  250. {
  251. const struct xt_target *t;
  252. int have_rev = 0;
  253. list_for_each_entry(t, &xt[af].target, list) {
  254. if (strcmp(t->name, name) == 0) {
  255. if (t->revision > *bestp)
  256. *bestp = t->revision;
  257. if (t->revision == revision)
  258. have_rev = 1;
  259. }
  260. }
  261. if (af != NFPROTO_UNSPEC && !have_rev)
  262. return target_revfn(NFPROTO_UNSPEC, name, revision, bestp);
  263. return have_rev;
  264. }
  265. /* Returns true or false (if no such extension at all) */
  266. int xt_find_revision(u8 af, const char *name, u8 revision, int target,
  267. int *err)
  268. {
  269. int have_rev, best = -1;
  270. if (mutex_lock_interruptible(&xt[af].mutex) != 0) {
  271. *err = -EINTR;
  272. return 1;
  273. }
  274. if (target == 1)
  275. have_rev = target_revfn(af, name, revision, &best);
  276. else
  277. have_rev = match_revfn(af, name, revision, &best);
  278. mutex_unlock(&xt[af].mutex);
  279. /* Nothing at all? Return 0 to try loading module. */
  280. if (best == -1) {
  281. *err = -ENOENT;
  282. return 0;
  283. }
  284. *err = best;
  285. if (!have_rev)
  286. *err = -EPROTONOSUPPORT;
  287. return 1;
  288. }
  289. EXPORT_SYMBOL_GPL(xt_find_revision);
  290. static char *textify_hooks(char *buf, size_t size, unsigned int mask)
  291. {
  292. static const char *const names[] = {
  293. "PREROUTING", "INPUT", "FORWARD",
  294. "OUTPUT", "POSTROUTING", "BROUTING",
  295. };
  296. unsigned int i;
  297. char *p = buf;
  298. bool np = false;
  299. int res;
  300. *p = '\0';
  301. for (i = 0; i < ARRAY_SIZE(names); ++i) {
  302. if (!(mask & (1 << i)))
  303. continue;
  304. res = snprintf(p, size, "%s%s", np ? "/" : "", names[i]);
  305. if (res > 0) {
  306. size -= res;
  307. p += res;
  308. }
  309. np = true;
  310. }
  311. return buf;
  312. }
  313. int xt_check_match(struct xt_mtchk_param *par,
  314. unsigned int size, u_int8_t proto, bool inv_proto)
  315. {
  316. int ret;
  317. if (XT_ALIGN(par->match->matchsize) != size &&
  318. par->match->matchsize != -1) {
  319. /*
  320. * ebt_among is exempt from centralized matchsize checking
  321. * because it uses a dynamic-size data set.
  322. */
  323. pr_err("%s_tables: %s.%u match: invalid size "
  324. "%u (kernel) != (user) %u\n",
  325. xt_prefix[par->family], par->match->name,
  326. par->match->revision,
  327. XT_ALIGN(par->match->matchsize), size);
  328. return -EINVAL;
  329. }
  330. if (par->match->table != NULL &&
  331. strcmp(par->match->table, par->table) != 0) {
  332. pr_err("%s_tables: %s match: only valid in %s table, not %s\n",
  333. xt_prefix[par->family], par->match->name,
  334. par->match->table, par->table);
  335. return -EINVAL;
  336. }
  337. if (par->match->hooks && (par->hook_mask & ~par->match->hooks) != 0) {
  338. char used[64], allow[64];
  339. pr_err("%s_tables: %s match: used from hooks %s, but only "
  340. "valid from %s\n",
  341. xt_prefix[par->family], par->match->name,
  342. textify_hooks(used, sizeof(used), par->hook_mask),
  343. textify_hooks(allow, sizeof(allow), par->match->hooks));
  344. return -EINVAL;
  345. }
  346. if (par->match->proto && (par->match->proto != proto || inv_proto)) {
  347. pr_err("%s_tables: %s match: only valid for protocol %u\n",
  348. xt_prefix[par->family], par->match->name,
  349. par->match->proto);
  350. return -EINVAL;
  351. }
  352. if (par->match->checkentry != NULL) {
  353. ret = par->match->checkentry(par);
  354. if (ret < 0)
  355. return ret;
  356. else if (ret > 0)
  357. /* Flag up potential errors. */
  358. return -EIO;
  359. }
  360. return 0;
  361. }
  362. EXPORT_SYMBOL_GPL(xt_check_match);
  363. #ifdef CONFIG_COMPAT
  364. int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta)
  365. {
  366. struct xt_af *xp = &xt[af];
  367. if (!xp->compat_tab) {
  368. if (!xp->number)
  369. return -EINVAL;
  370. xp->compat_tab = vmalloc(sizeof(struct compat_delta) * xp->number);
  371. if (!xp->compat_tab)
  372. return -ENOMEM;
  373. xp->cur = 0;
  374. }
  375. if (xp->cur >= xp->number)
  376. return -EINVAL;
  377. if (xp->cur)
  378. delta += xp->compat_tab[xp->cur - 1].delta;
  379. xp->compat_tab[xp->cur].offset = offset;
  380. xp->compat_tab[xp->cur].delta = delta;
  381. xp->cur++;
  382. return 0;
  383. }
  384. EXPORT_SYMBOL_GPL(xt_compat_add_offset);
  385. void xt_compat_flush_offsets(u_int8_t af)
  386. {
  387. if (xt[af].compat_tab) {
  388. vfree(xt[af].compat_tab);
  389. xt[af].compat_tab = NULL;
  390. xt[af].number = 0;
  391. xt[af].cur = 0;
  392. }
  393. }
  394. EXPORT_SYMBOL_GPL(xt_compat_flush_offsets);
  395. int xt_compat_calc_jump(u_int8_t af, unsigned int offset)
  396. {
  397. struct compat_delta *tmp = xt[af].compat_tab;
  398. int mid, left = 0, right = xt[af].cur - 1;
  399. while (left <= right) {
  400. mid = (left + right) >> 1;
  401. if (offset > tmp[mid].offset)
  402. left = mid + 1;
  403. else if (offset < tmp[mid].offset)
  404. right = mid - 1;
  405. else
  406. return mid ? tmp[mid - 1].delta : 0;
  407. }
  408. return left ? tmp[left - 1].delta : 0;
  409. }
  410. EXPORT_SYMBOL_GPL(xt_compat_calc_jump);
  411. void xt_compat_init_offsets(u_int8_t af, unsigned int number)
  412. {
  413. xt[af].number = number;
  414. xt[af].cur = 0;
  415. }
  416. EXPORT_SYMBOL(xt_compat_init_offsets);
  417. int xt_compat_match_offset(const struct xt_match *match)
  418. {
  419. u_int16_t csize = match->compatsize ? : match->matchsize;
  420. return XT_ALIGN(match->matchsize) - COMPAT_XT_ALIGN(csize);
  421. }
  422. EXPORT_SYMBOL_GPL(xt_compat_match_offset);
  423. int xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
  424. unsigned int *size)
  425. {
  426. const struct xt_match *match = m->u.kernel.match;
  427. struct compat_xt_entry_match *cm = (struct compat_xt_entry_match *)m;
  428. int pad, off = xt_compat_match_offset(match);
  429. u_int16_t msize = cm->u.user.match_size;
  430. m = *dstptr;
  431. memcpy(m, cm, sizeof(*cm));
  432. if (match->compat_from_user)
  433. match->compat_from_user(m->data, cm->data);
  434. else
  435. memcpy(m->data, cm->data, msize - sizeof(*cm));
  436. pad = XT_ALIGN(match->matchsize) - match->matchsize;
  437. if (pad > 0)
  438. memset(m->data + match->matchsize, 0, pad);
  439. msize += off;
  440. m->u.user.match_size = msize;
  441. *size += off;
  442. *dstptr += msize;
  443. return 0;
  444. }
  445. EXPORT_SYMBOL_GPL(xt_compat_match_from_user);
  446. int xt_compat_match_to_user(const struct xt_entry_match *m,
  447. void __user **dstptr, unsigned int *size)
  448. {
  449. const struct xt_match *match = m->u.kernel.match;
  450. struct compat_xt_entry_match __user *cm = *dstptr;
  451. int off = xt_compat_match_offset(match);
  452. u_int16_t msize = m->u.user.match_size - off;
  453. if (copy_to_user(cm, m, sizeof(*cm)) ||
  454. put_user(msize, &cm->u.user.match_size) ||
  455. copy_to_user(cm->u.user.name, m->u.kernel.match->name,
  456. strlen(m->u.kernel.match->name) + 1))
  457. return -EFAULT;
  458. if (match->compat_to_user) {
  459. if (match->compat_to_user((void __user *)cm->data, m->data))
  460. return -EFAULT;
  461. } else {
  462. if (copy_to_user(cm->data, m->data, msize - sizeof(*cm)))
  463. return -EFAULT;
  464. }
  465. *size -= off;
  466. *dstptr += msize;
  467. return 0;
  468. }
  469. EXPORT_SYMBOL_GPL(xt_compat_match_to_user);
  470. #endif /* CONFIG_COMPAT */
  471. int xt_check_target(struct xt_tgchk_param *par,
  472. unsigned int size, u_int8_t proto, bool inv_proto)
  473. {
  474. int ret;
  475. if (XT_ALIGN(par->target->targetsize) != size) {
  476. pr_err("%s_tables: %s.%u target: invalid size "
  477. "%u (kernel) != (user) %u\n",
  478. xt_prefix[par->family], par->target->name,
  479. par->target->revision,
  480. XT_ALIGN(par->target->targetsize), size);
  481. return -EINVAL;
  482. }
  483. if (par->target->table != NULL &&
  484. strcmp(par->target->table, par->table) != 0) {
  485. pr_err("%s_tables: %s target: only valid in %s table, not %s\n",
  486. xt_prefix[par->family], par->target->name,
  487. par->target->table, par->table);
  488. return -EINVAL;
  489. }
  490. if (par->target->hooks && (par->hook_mask & ~par->target->hooks) != 0) {
  491. char used[64], allow[64];
  492. pr_err("%s_tables: %s target: used from hooks %s, but only "
  493. "usable from %s\n",
  494. xt_prefix[par->family], par->target->name,
  495. textify_hooks(used, sizeof(used), par->hook_mask),
  496. textify_hooks(allow, sizeof(allow), par->target->hooks));
  497. return -EINVAL;
  498. }
  499. if (par->target->proto && (par->target->proto != proto || inv_proto)) {
  500. pr_err("%s_tables: %s target: only valid for protocol %u\n",
  501. xt_prefix[par->family], par->target->name,
  502. par->target->proto);
  503. return -EINVAL;
  504. }
  505. if (par->target->checkentry != NULL) {
  506. ret = par->target->checkentry(par);
  507. if (ret < 0)
  508. return ret;
  509. else if (ret > 0)
  510. /* Flag up potential errors. */
  511. return -EIO;
  512. }
  513. return 0;
  514. }
  515. EXPORT_SYMBOL_GPL(xt_check_target);
  516. #ifdef CONFIG_COMPAT
  517. int xt_compat_target_offset(const struct xt_target *target)
  518. {
  519. u_int16_t csize = target->compatsize ? : target->targetsize;
  520. return XT_ALIGN(target->targetsize) - COMPAT_XT_ALIGN(csize);
  521. }
  522. EXPORT_SYMBOL_GPL(xt_compat_target_offset);
  523. void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
  524. unsigned int *size)
  525. {
  526. const struct xt_target *target = t->u.kernel.target;
  527. struct compat_xt_entry_target *ct = (struct compat_xt_entry_target *)t;
  528. int pad, off = xt_compat_target_offset(target);
  529. u_int16_t tsize = ct->u.user.target_size;
  530. t = *dstptr;
  531. memcpy(t, ct, sizeof(*ct));
  532. if (target->compat_from_user)
  533. target->compat_from_user(t->data, ct->data);
  534. else
  535. memcpy(t->data, ct->data, tsize - sizeof(*ct));
  536. pad = XT_ALIGN(target->targetsize) - target->targetsize;
  537. if (pad > 0)
  538. memset(t->data + target->targetsize, 0, pad);
  539. tsize += off;
  540. t->u.user.target_size = tsize;
  541. *size += off;
  542. *dstptr += tsize;
  543. }
  544. EXPORT_SYMBOL_GPL(xt_compat_target_from_user);
  545. int xt_compat_target_to_user(const struct xt_entry_target *t,
  546. void __user **dstptr, unsigned int *size)
  547. {
  548. const struct xt_target *target = t->u.kernel.target;
  549. struct compat_xt_entry_target __user *ct = *dstptr;
  550. int off = xt_compat_target_offset(target);
  551. u_int16_t tsize = t->u.user.target_size - off;
  552. if (copy_to_user(ct, t, sizeof(*ct)) ||
  553. put_user(tsize, &ct->u.user.target_size) ||
  554. copy_to_user(ct->u.user.name, t->u.kernel.target->name,
  555. strlen(t->u.kernel.target->name) + 1))
  556. return -EFAULT;
  557. if (target->compat_to_user) {
  558. if (target->compat_to_user((void __user *)ct->data, t->data))
  559. return -EFAULT;
  560. } else {
  561. if (copy_to_user(ct->data, t->data, tsize - sizeof(*ct)))
  562. return -EFAULT;
  563. }
  564. *size -= off;
  565. *dstptr += tsize;
  566. return 0;
  567. }
  568. EXPORT_SYMBOL_GPL(xt_compat_target_to_user);
  569. #endif
  570. struct xt_table_info *xt_alloc_table_info(unsigned int size)
  571. {
  572. struct xt_table_info *newinfo;
  573. int cpu;
  574. /* Pedantry: prevent them from hitting BUG() in vmalloc.c --RR */
  575. if ((SMP_ALIGN(size) >> PAGE_SHIFT) + 2 > totalram_pages)
  576. return NULL;
  577. newinfo = kzalloc(XT_TABLE_INFO_SZ, GFP_KERNEL);
  578. if (!newinfo)
  579. return NULL;
  580. newinfo->size = size;
  581. for_each_possible_cpu(cpu) {
  582. if (size <= PAGE_SIZE)
  583. newinfo->entries[cpu] = kmalloc_node(size,
  584. GFP_KERNEL,
  585. cpu_to_node(cpu));
  586. else
  587. newinfo->entries[cpu] = vmalloc_node(size,
  588. cpu_to_node(cpu));
  589. if (newinfo->entries[cpu] == NULL) {
  590. xt_free_table_info(newinfo);
  591. return NULL;
  592. }
  593. }
  594. return newinfo;
  595. }
  596. EXPORT_SYMBOL(xt_alloc_table_info);
  597. void xt_free_table_info(struct xt_table_info *info)
  598. {
  599. int cpu;
  600. for_each_possible_cpu(cpu) {
  601. if (info->size <= PAGE_SIZE)
  602. kfree(info->entries[cpu]);
  603. else
  604. vfree(info->entries[cpu]);
  605. }
  606. if (info->jumpstack != NULL) {
  607. if (sizeof(void *) * info->stacksize > PAGE_SIZE) {
  608. for_each_possible_cpu(cpu)
  609. vfree(info->jumpstack[cpu]);
  610. } else {
  611. for_each_possible_cpu(cpu)
  612. kfree(info->jumpstack[cpu]);
  613. }
  614. }
  615. if (sizeof(void **) * nr_cpu_ids > PAGE_SIZE)
  616. vfree(info->jumpstack);
  617. else
  618. kfree(info->jumpstack);
  619. free_percpu(info->stackptr);
  620. kfree(info);
  621. }
  622. EXPORT_SYMBOL(xt_free_table_info);
  623. /* Find table by name, grabs mutex & ref. Returns ERR_PTR() on error. */
  624. struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
  625. const char *name)
  626. {
  627. struct xt_table *t;
  628. if (mutex_lock_interruptible(&xt[af].mutex) != 0)
  629. return ERR_PTR(-EINTR);
  630. list_for_each_entry(t, &net->xt.tables[af], list)
  631. if (strcmp(t->name, name) == 0 && try_module_get(t->me))
  632. return t;
  633. mutex_unlock(&xt[af].mutex);
  634. return NULL;
  635. }
  636. EXPORT_SYMBOL_GPL(xt_find_table_lock);
  637. void xt_table_unlock(struct xt_table *table)
  638. {
  639. mutex_unlock(&xt[table->af].mutex);
  640. }
  641. EXPORT_SYMBOL_GPL(xt_table_unlock);
  642. #ifdef CONFIG_COMPAT
  643. void xt_compat_lock(u_int8_t af)
  644. {
  645. mutex_lock(&xt[af].compat_mutex);
  646. }
  647. EXPORT_SYMBOL_GPL(xt_compat_lock);
  648. void xt_compat_unlock(u_int8_t af)
  649. {
  650. mutex_unlock(&xt[af].compat_mutex);
  651. }
  652. EXPORT_SYMBOL_GPL(xt_compat_unlock);
  653. #endif
  654. DEFINE_PER_CPU(seqcount_t, xt_recseq);
  655. EXPORT_PER_CPU_SYMBOL_GPL(xt_recseq);
  656. static int xt_jumpstack_alloc(struct xt_table_info *i)
  657. {
  658. unsigned int size;
  659. int cpu;
  660. i->stackptr = alloc_percpu(unsigned int);
  661. if (i->stackptr == NULL)
  662. return -ENOMEM;
  663. size = sizeof(void **) * nr_cpu_ids;
  664. if (size > PAGE_SIZE)
  665. i->jumpstack = vmalloc(size);
  666. else
  667. i->jumpstack = kmalloc(size, GFP_KERNEL);
  668. if (i->jumpstack == NULL)
  669. return -ENOMEM;
  670. memset(i->jumpstack, 0, size);
  671. i->stacksize *= xt_jumpstack_multiplier;
  672. size = sizeof(void *) * i->stacksize;
  673. for_each_possible_cpu(cpu) {
  674. if (size > PAGE_SIZE)
  675. i->jumpstack[cpu] = vmalloc_node(size,
  676. cpu_to_node(cpu));
  677. else
  678. i->jumpstack[cpu] = kmalloc_node(size,
  679. GFP_KERNEL, cpu_to_node(cpu));
  680. if (i->jumpstack[cpu] == NULL)
  681. /*
  682. * Freeing will be done later on by the callers. The
  683. * chain is: xt_replace_table -> __do_replace ->
  684. * do_replace -> xt_free_table_info.
  685. */
  686. return -ENOMEM;
  687. }
  688. return 0;
  689. }
  690. struct xt_table_info *
  691. xt_replace_table(struct xt_table *table,
  692. unsigned int num_counters,
  693. struct xt_table_info *newinfo,
  694. int *error)
  695. {
  696. struct xt_table_info *private;
  697. int ret;
  698. ret = xt_jumpstack_alloc(newinfo);
  699. if (ret < 0) {
  700. *error = ret;
  701. return NULL;
  702. }
  703. /* Do the substitution. */
  704. local_bh_disable();
  705. private = table->private;
  706. /* Check inside lock: is the old number correct? */
  707. if (num_counters != private->number) {
  708. pr_debug("num_counters != table->private->number (%u/%u)\n",
  709. num_counters, private->number);
  710. local_bh_enable();
  711. *error = -EAGAIN;
  712. return NULL;
  713. }
  714. table->private = newinfo;
  715. newinfo->initial_entries = private->initial_entries;
  716. /*
  717. * Even though table entries have now been swapped, other CPU's
  718. * may still be using the old entries. This is okay, because
  719. * resynchronization happens because of the locking done
  720. * during the get_counters() routine.
  721. */
  722. local_bh_enable();
  723. #ifdef CONFIG_AUDIT
  724. if (audit_enabled) {
  725. struct audit_buffer *ab;
  726. ab = audit_log_start(current->audit_context, GFP_KERNEL,
  727. AUDIT_NETFILTER_CFG);
  728. if (ab) {
  729. audit_log_format(ab, "table=%s family=%u entries=%u",
  730. table->name, table->af,
  731. private->number);
  732. audit_log_end(ab);
  733. }
  734. }
  735. #endif
  736. return private;
  737. }
  738. EXPORT_SYMBOL_GPL(xt_replace_table);
  739. struct xt_table *xt_register_table(struct net *net,
  740. const struct xt_table *input_table,
  741. struct xt_table_info *bootstrap,
  742. struct xt_table_info *newinfo)
  743. {
  744. int ret;
  745. struct xt_table_info *private;
  746. struct xt_table *t, *table;
  747. /* Don't add one object to multiple lists. */
  748. table = kmemdup(input_table, sizeof(struct xt_table), GFP_KERNEL);
  749. if (!table) {
  750. ret = -ENOMEM;
  751. goto out;
  752. }
  753. ret = mutex_lock_interruptible(&xt[table->af].mutex);
  754. if (ret != 0)
  755. goto out_free;
  756. /* Don't autoload: we'd eat our tail... */
  757. list_for_each_entry(t, &net->xt.tables[table->af], list) {
  758. if (strcmp(t->name, table->name) == 0) {
  759. ret = -EEXIST;
  760. goto unlock;
  761. }
  762. }
  763. /* Simplifies replace_table code. */
  764. table->private = bootstrap;
  765. if (!xt_replace_table(table, 0, newinfo, &ret))
  766. goto unlock;
  767. private = table->private;
  768. pr_debug("table->private->number = %u\n", private->number);
  769. /* save number of initial entries */
  770. private->initial_entries = private->number;
  771. list_add(&table->list, &net->xt.tables[table->af]);
  772. mutex_unlock(&xt[table->af].mutex);
  773. return table;
  774. unlock:
  775. mutex_unlock(&xt[table->af].mutex);
  776. out_free:
  777. kfree(table);
  778. out:
  779. return ERR_PTR(ret);
  780. }
  781. EXPORT_SYMBOL_GPL(xt_register_table);
  782. void *xt_unregister_table(struct xt_table *table)
  783. {
  784. struct xt_table_info *private;
  785. mutex_lock(&xt[table->af].mutex);
  786. private = table->private;
  787. list_del(&table->list);
  788. mutex_unlock(&xt[table->af].mutex);
  789. kfree(table);
  790. return private;
  791. }
  792. EXPORT_SYMBOL_GPL(xt_unregister_table);
  793. #ifdef CONFIG_PROC_FS
  794. struct xt_names_priv {
  795. struct seq_net_private p;
  796. u_int8_t af;
  797. };
  798. static void *xt_table_seq_start(struct seq_file *seq, loff_t *pos)
  799. {
  800. struct xt_names_priv *priv = seq->private;
  801. struct net *net = seq_file_net(seq);
  802. u_int8_t af = priv->af;
  803. mutex_lock(&xt[af].mutex);
  804. return seq_list_start(&net->xt.tables[af], *pos);
  805. }
  806. static void *xt_table_seq_next(struct seq_file *seq, void *v, loff_t *pos)
  807. {
  808. struct xt_names_priv *priv = seq->private;
  809. struct net *net = seq_file_net(seq);
  810. u_int8_t af = priv->af;
  811. return seq_list_next(v, &net->xt.tables[af], pos);
  812. }
  813. static void xt_table_seq_stop(struct seq_file *seq, void *v)
  814. {
  815. struct xt_names_priv *priv = seq->private;
  816. u_int8_t af = priv->af;
  817. mutex_unlock(&xt[af].mutex);
  818. }
  819. static int xt_table_seq_show(struct seq_file *seq, void *v)
  820. {
  821. struct xt_table *table = list_entry(v, struct xt_table, list);
  822. if (strlen(table->name))
  823. return seq_printf(seq, "%s\n", table->name);
  824. else
  825. return 0;
  826. }
  827. static const struct seq_operations xt_table_seq_ops = {
  828. .start = xt_table_seq_start,
  829. .next = xt_table_seq_next,
  830. .stop = xt_table_seq_stop,
  831. .show = xt_table_seq_show,
  832. };
  833. static int xt_table_open(struct inode *inode, struct file *file)
  834. {
  835. int ret;
  836. struct xt_names_priv *priv;
  837. ret = seq_open_net(inode, file, &xt_table_seq_ops,
  838. sizeof(struct xt_names_priv));
  839. if (!ret) {
  840. priv = ((struct seq_file *)file->private_data)->private;
  841. priv->af = (unsigned long)PDE(inode)->data;
  842. }
  843. return ret;
  844. }
  845. static const struct file_operations xt_table_ops = {
  846. .owner = THIS_MODULE,
  847. .open = xt_table_open,
  848. .read = seq_read,
  849. .llseek = seq_lseek,
  850. .release = seq_release_net,
  851. };
  852. /*
  853. * Traverse state for ip{,6}_{tables,matches} for helping crossing
  854. * the multi-AF mutexes.
  855. */
  856. struct nf_mttg_trav {
  857. struct list_head *head, *curr;
  858. uint8_t class, nfproto;
  859. };
  860. enum {
  861. MTTG_TRAV_INIT,
  862. MTTG_TRAV_NFP_UNSPEC,
  863. MTTG_TRAV_NFP_SPEC,
  864. MTTG_TRAV_DONE,
  865. };
  866. static void *xt_mttg_seq_next(struct seq_file *seq, void *v, loff_t *ppos,
  867. bool is_target)
  868. {
  869. static const uint8_t next_class[] = {
  870. [MTTG_TRAV_NFP_UNSPEC] = MTTG_TRAV_NFP_SPEC,
  871. [MTTG_TRAV_NFP_SPEC] = MTTG_TRAV_DONE,
  872. };
  873. struct nf_mttg_trav *trav = seq->private;
  874. switch (trav->class) {
  875. case MTTG_TRAV_INIT:
  876. trav->class = MTTG_TRAV_NFP_UNSPEC;
  877. mutex_lock(&xt[NFPROTO_UNSPEC].mutex);
  878. trav->head = trav->curr = is_target ?
  879. &xt[NFPROTO_UNSPEC].target : &xt[NFPROTO_UNSPEC].match;
  880. break;
  881. case MTTG_TRAV_NFP_UNSPEC:
  882. trav->curr = trav->curr->next;
  883. if (trav->curr != trav->head)
  884. break;
  885. mutex_unlock(&xt[NFPROTO_UNSPEC].mutex);
  886. mutex_lock(&xt[trav->nfproto].mutex);
  887. trav->head = trav->curr = is_target ?
  888. &xt[trav->nfproto].target : &xt[trav->nfproto].match;
  889. trav->class = next_class[trav->class];
  890. break;
  891. case MTTG_TRAV_NFP_SPEC:
  892. trav->curr = trav->curr->next;
  893. if (trav->curr != trav->head)
  894. break;
  895. /* fallthru, _stop will unlock */
  896. default:
  897. return NULL;
  898. }
  899. if (ppos != NULL)
  900. ++*ppos;
  901. return trav;
  902. }
  903. static void *xt_mttg_seq_start(struct seq_file *seq, loff_t *pos,
  904. bool is_target)
  905. {
  906. struct nf_mttg_trav *trav = seq->private;
  907. unsigned int j;
  908. trav->class = MTTG_TRAV_INIT;
  909. for (j = 0; j < *pos; ++j)
  910. if (xt_mttg_seq_next(seq, NULL, NULL, is_target) == NULL)
  911. return NULL;
  912. return trav;
  913. }
  914. static void xt_mttg_seq_stop(struct seq_file *seq, void *v)
  915. {
  916. struct nf_mttg_trav *trav = seq->private;
  917. switch (trav->class) {
  918. case MTTG_TRAV_NFP_UNSPEC:
  919. mutex_unlock(&xt[NFPROTO_UNSPEC].mutex);
  920. break;
  921. case MTTG_TRAV_NFP_SPEC:
  922. mutex_unlock(&xt[trav->nfproto].mutex);
  923. break;
  924. }
  925. }
  926. static void *xt_match_seq_start(struct seq_file *seq, loff_t *pos)
  927. {
  928. return xt_mttg_seq_start(seq, pos, false);
  929. }
  930. static void *xt_match_seq_next(struct seq_file *seq, void *v, loff_t *ppos)
  931. {
  932. return xt_mttg_seq_next(seq, v, ppos, false);
  933. }
  934. static int xt_match_seq_show(struct seq_file *seq, void *v)
  935. {
  936. const struct nf_mttg_trav *trav = seq->private;
  937. const struct xt_match *match;
  938. switch (trav->class) {
  939. case MTTG_TRAV_NFP_UNSPEC:
  940. case MTTG_TRAV_NFP_SPEC:
  941. if (trav->curr == trav->head)
  942. return 0;
  943. match = list_entry(trav->curr, struct xt_match, list);
  944. return (*match->name == '\0') ? 0 :
  945. seq_printf(seq, "%s\n", match->name);
  946. }
  947. return 0;
  948. }
  949. static const struct seq_operations xt_match_seq_ops = {
  950. .start = xt_match_seq_start,
  951. .next = xt_match_seq_next,
  952. .stop = xt_mttg_seq_stop,
  953. .show = xt_match_seq_show,
  954. };
  955. static int xt_match_open(struct inode *inode, struct file *file)
  956. {
  957. struct seq_file *seq;
  958. struct nf_mttg_trav *trav;
  959. int ret;
  960. trav = kmalloc(sizeof(*trav), GFP_KERNEL);
  961. if (trav == NULL)
  962. return -ENOMEM;
  963. ret = seq_open(file, &xt_match_seq_ops);
  964. if (ret < 0) {
  965. kfree(trav);
  966. return ret;
  967. }
  968. seq = file->private_data;
  969. seq->private = trav;
  970. trav->nfproto = (unsigned long)PDE(inode)->data;
  971. return 0;
  972. }
  973. static const struct file_operations xt_match_ops = {
  974. .owner = THIS_MODULE,
  975. .open = xt_match_open,
  976. .read = seq_read,
  977. .llseek = seq_lseek,
  978. .release = seq_release_private,
  979. };
  980. static void *xt_target_seq_start(struct seq_file *seq, loff_t *pos)
  981. {
  982. return xt_mttg_seq_start(seq, pos, true);
  983. }
  984. static void *xt_target_seq_next(struct seq_file *seq, void *v, loff_t *ppos)
  985. {
  986. return xt_mttg_seq_next(seq, v, ppos, true);
  987. }
  988. static int xt_target_seq_show(struct seq_file *seq, void *v)
  989. {
  990. const struct nf_mttg_trav *trav = seq->private;
  991. const struct xt_target *target;
  992. switch (trav->class) {
  993. case MTTG_TRAV_NFP_UNSPEC:
  994. case MTTG_TRAV_NFP_SPEC:
  995. if (trav->curr == trav->head)
  996. return 0;
  997. target = list_entry(trav->curr, struct xt_target, list);
  998. return (*target->name == '\0') ? 0 :
  999. seq_printf(seq, "%s\n", target->name);
  1000. }
  1001. return 0;
  1002. }
  1003. static const struct seq_operations xt_target_seq_ops = {
  1004. .start = xt_target_seq_start,
  1005. .next = xt_target_seq_next,
  1006. .stop = xt_mttg_seq_stop,
  1007. .show = xt_target_seq_show,
  1008. };
  1009. static int xt_target_open(struct inode *inode, struct file *file)
  1010. {
  1011. struct seq_file *seq;
  1012. struct nf_mttg_trav *trav;
  1013. int ret;
  1014. trav = kmalloc(sizeof(*trav), GFP_KERNEL);
  1015. if (trav == NULL)
  1016. return -ENOMEM;
  1017. ret = seq_open(file, &xt_target_seq_ops);
  1018. if (ret < 0) {
  1019. kfree(trav);
  1020. return ret;
  1021. }
  1022. seq = file->private_data;
  1023. seq->private = trav;
  1024. trav->nfproto = (unsigned long)PDE(inode)->data;
  1025. return 0;
  1026. }
  1027. static const struct file_operations xt_target_ops = {
  1028. .owner = THIS_MODULE,
  1029. .open = xt_target_open,
  1030. .read = seq_read,
  1031. .llseek = seq_lseek,
  1032. .release = seq_release_private,
  1033. };
  1034. #define FORMAT_TABLES "_tables_names"
  1035. #define FORMAT_MATCHES "_tables_matches"
  1036. #define FORMAT_TARGETS "_tables_targets"
  1037. #endif /* CONFIG_PROC_FS */
  1038. /**
  1039. * xt_hook_link - set up hooks for a new table
  1040. * @table: table with metadata needed to set up hooks
  1041. * @fn: Hook function
  1042. *
  1043. * This function will take care of creating and registering the necessary
  1044. * Netfilter hooks for XT tables.
  1045. */
  1046. struct nf_hook_ops *xt_hook_link(const struct xt_table *table, nf_hookfn *fn)
  1047. {
  1048. unsigned int hook_mask = table->valid_hooks;
  1049. uint8_t i, num_hooks = hweight32(hook_mask);
  1050. uint8_t hooknum;
  1051. struct nf_hook_ops *ops;
  1052. int ret;
  1053. ops = kmalloc(sizeof(*ops) * num_hooks, GFP_KERNEL);
  1054. if (ops == NULL)
  1055. return ERR_PTR(-ENOMEM);
  1056. for (i = 0, hooknum = 0; i < num_hooks && hook_mask != 0;
  1057. hook_mask >>= 1, ++hooknum) {
  1058. if (!(hook_mask & 1))
  1059. continue;
  1060. ops[i].hook = fn;
  1061. ops[i].owner = table->me;
  1062. ops[i].pf = table->af;
  1063. ops[i].hooknum = hooknum;
  1064. ops[i].priority = table->priority;
  1065. ++i;
  1066. }
  1067. ret = nf_register_hooks(ops, num_hooks);
  1068. if (ret < 0) {
  1069. kfree(ops);
  1070. return ERR_PTR(ret);
  1071. }
  1072. return ops;
  1073. }
  1074. EXPORT_SYMBOL_GPL(xt_hook_link);
  1075. /**
  1076. * xt_hook_unlink - remove hooks for a table
  1077. * @ops: nf_hook_ops array as returned by nf_hook_link
  1078. * @hook_mask: the very same mask that was passed to nf_hook_link
  1079. */
  1080. void xt_hook_unlink(const struct xt_table *table, struct nf_hook_ops *ops)
  1081. {
  1082. nf_unregister_hooks(ops, hweight32(table->valid_hooks));
  1083. kfree(ops);
  1084. }
  1085. EXPORT_SYMBOL_GPL(xt_hook_unlink);
  1086. int xt_proto_init(struct net *net, u_int8_t af)
  1087. {
  1088. #ifdef CONFIG_PROC_FS
  1089. char buf[XT_FUNCTION_MAXNAMELEN];
  1090. struct proc_dir_entry *proc;
  1091. #endif
  1092. if (af >= ARRAY_SIZE(xt_prefix))
  1093. return -EINVAL;
  1094. #ifdef CONFIG_PROC_FS
  1095. strlcpy(buf, xt_prefix[af], sizeof(buf));
  1096. strlcat(buf, FORMAT_TABLES, sizeof(buf));
  1097. proc = proc_create_data(buf, 0440, net->proc_net, &xt_table_ops,
  1098. (void *)(unsigned long)af);
  1099. if (!proc)
  1100. goto out;
  1101. strlcpy(buf, xt_prefix[af], sizeof(buf));
  1102. strlcat(buf, FORMAT_MATCHES, sizeof(buf));
  1103. proc = proc_create_data(buf, 0440, net->proc_net, &xt_match_ops,
  1104. (void *)(unsigned long)af);
  1105. if (!proc)
  1106. goto out_remove_tables;
  1107. strlcpy(buf, xt_prefix[af], sizeof(buf));
  1108. strlcat(buf, FORMAT_TARGETS, sizeof(buf));
  1109. proc = proc_create_data(buf, 0440, net->proc_net, &xt_target_ops,
  1110. (void *)(unsigned long)af);
  1111. if (!proc)
  1112. goto out_remove_matches;
  1113. #endif
  1114. return 0;
  1115. #ifdef CONFIG_PROC_FS
  1116. out_remove_matches:
  1117. strlcpy(buf, xt_prefix[af], sizeof(buf));
  1118. strlcat(buf, FORMAT_MATCHES, sizeof(buf));
  1119. proc_net_remove(net, buf);
  1120. out_remove_tables:
  1121. strlcpy(buf, xt_prefix[af], sizeof(buf));
  1122. strlcat(buf, FORMAT_TABLES, sizeof(buf));
  1123. proc_net_remove(net, buf);
  1124. out:
  1125. return -1;
  1126. #endif
  1127. }
  1128. EXPORT_SYMBOL_GPL(xt_proto_init);
  1129. void xt_proto_fini(struct net *net, u_int8_t af)
  1130. {
  1131. #ifdef CONFIG_PROC_FS
  1132. char buf[XT_FUNCTION_MAXNAMELEN];
  1133. strlcpy(buf, xt_prefix[af], sizeof(buf));
  1134. strlcat(buf, FORMAT_TABLES, sizeof(buf));
  1135. proc_net_remove(net, buf);
  1136. strlcpy(buf, xt_prefix[af], sizeof(buf));
  1137. strlcat(buf, FORMAT_TARGETS, sizeof(buf));
  1138. proc_net_remove(net, buf);
  1139. strlcpy(buf, xt_prefix[af], sizeof(buf));
  1140. strlcat(buf, FORMAT_MATCHES, sizeof(buf));
  1141. proc_net_remove(net, buf);
  1142. #endif /*CONFIG_PROC_FS*/
  1143. }
  1144. EXPORT_SYMBOL_GPL(xt_proto_fini);
  1145. static int __net_init xt_net_init(struct net *net)
  1146. {
  1147. int i;
  1148. for (i = 0; i < NFPROTO_NUMPROTO; i++)
  1149. INIT_LIST_HEAD(&net->xt.tables[i]);
  1150. return 0;
  1151. }
  1152. static struct pernet_operations xt_net_ops = {
  1153. .init = xt_net_init,
  1154. };
  1155. static int __init xt_init(void)
  1156. {
  1157. unsigned int i;
  1158. int rv;
  1159. for_each_possible_cpu(i) {
  1160. seqcount_init(&per_cpu(xt_recseq, i));
  1161. }
  1162. xt = kmalloc(sizeof(struct xt_af) * NFPROTO_NUMPROTO, GFP_KERNEL);
  1163. if (!xt)
  1164. return -ENOMEM;
  1165. for (i = 0; i < NFPROTO_NUMPROTO; i++) {
  1166. mutex_init(&xt[i].mutex);
  1167. #ifdef CONFIG_COMPAT
  1168. mutex_init(&xt[i].compat_mutex);
  1169. xt[i].compat_tab = NULL;
  1170. #endif
  1171. INIT_LIST_HEAD(&xt[i].target);
  1172. INIT_LIST_HEAD(&xt[i].match);
  1173. }
  1174. rv = register_pernet_subsys(&xt_net_ops);
  1175. if (rv < 0)
  1176. kfree(xt);
  1177. return rv;
  1178. }
  1179. static void __exit xt_fini(void)
  1180. {
  1181. unregister_pernet_subsys(&xt_net_ops);
  1182. kfree(xt);
  1183. }
  1184. module_init(xt_init);
  1185. module_exit(xt_fini);