tcp_metrics.c 31 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187
  1. #include <linux/rcupdate.h>
  2. #include <linux/spinlock.h>
  3. #include <linux/jiffies.h>
  4. #include <linux/module.h>
  5. #include <linux/cache.h>
  6. #include <linux/slab.h>
  7. #include <linux/init.h>
  8. #include <linux/tcp.h>
  9. #include <linux/hash.h>
  10. #include <linux/tcp_metrics.h>
  11. #include <linux/vmalloc.h>
  12. #include <net/inet_connection_sock.h>
  13. #include <net/net_namespace.h>
  14. #include <net/request_sock.h>
  15. #include <net/inetpeer.h>
  16. #include <net/sock.h>
  17. #include <net/ipv6.h>
  18. #include <net/dst.h>
  19. #include <net/tcp.h>
  20. #include <net/genetlink.h>
  21. int sysctl_tcp_nometrics_save __read_mostly;
  22. static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *saddr,
  23. const struct inetpeer_addr *daddr,
  24. struct net *net, unsigned int hash);
  25. struct tcp_fastopen_metrics {
  26. u16 mss;
  27. u16 syn_loss:10, /* Recurring Fast Open SYN losses */
  28. try_exp:2; /* Request w/ exp. option (once) */
  29. unsigned long last_syn_loss; /* Last Fast Open SYN loss */
  30. struct tcp_fastopen_cookie cookie;
  31. };
  32. /* TCP_METRIC_MAX includes 2 extra fields for userspace compatibility
  33. * Kernel only stores RTT and RTTVAR in usec resolution
  34. */
  35. #define TCP_METRIC_MAX_KERNEL (TCP_METRIC_MAX - 2)
  36. struct tcp_metrics_block {
  37. struct tcp_metrics_block __rcu *tcpm_next;
  38. possible_net_t tcpm_net;
  39. struct inetpeer_addr tcpm_saddr;
  40. struct inetpeer_addr tcpm_daddr;
  41. unsigned long tcpm_stamp;
  42. u32 tcpm_ts;
  43. u32 tcpm_ts_stamp;
  44. u32 tcpm_lock;
  45. u32 tcpm_vals[TCP_METRIC_MAX_KERNEL + 1];
  46. struct tcp_fastopen_metrics tcpm_fastopen;
  47. struct rcu_head rcu_head;
  48. };
  49. static inline struct net *tm_net(struct tcp_metrics_block *tm)
  50. {
  51. return read_pnet(&tm->tcpm_net);
  52. }
  53. static bool tcp_metric_locked(struct tcp_metrics_block *tm,
  54. enum tcp_metric_index idx)
  55. {
  56. return tm->tcpm_lock & (1 << idx);
  57. }
  58. static u32 tcp_metric_get(struct tcp_metrics_block *tm,
  59. enum tcp_metric_index idx)
  60. {
  61. return tm->tcpm_vals[idx];
  62. }
  63. static void tcp_metric_set(struct tcp_metrics_block *tm,
  64. enum tcp_metric_index idx,
  65. u32 val)
  66. {
  67. tm->tcpm_vals[idx] = val;
  68. }
  69. static bool addr_same(const struct inetpeer_addr *a,
  70. const struct inetpeer_addr *b)
  71. {
  72. return inetpeer_addr_cmp(a, b) == 0;
  73. }
  74. struct tcpm_hash_bucket {
  75. struct tcp_metrics_block __rcu *chain;
  76. };
  77. static struct tcpm_hash_bucket *tcp_metrics_hash __read_mostly;
  78. static unsigned int tcp_metrics_hash_log __read_mostly;
  79. static DEFINE_SPINLOCK(tcp_metrics_lock);
  80. static void tcpm_suck_dst(struct tcp_metrics_block *tm,
  81. const struct dst_entry *dst,
  82. bool fastopen_clear)
  83. {
  84. u32 msval;
  85. u32 val;
  86. tm->tcpm_stamp = jiffies;
  87. val = 0;
  88. if (dst_metric_locked(dst, RTAX_RTT))
  89. val |= 1 << TCP_METRIC_RTT;
  90. if (dst_metric_locked(dst, RTAX_RTTVAR))
  91. val |= 1 << TCP_METRIC_RTTVAR;
  92. if (dst_metric_locked(dst, RTAX_SSTHRESH))
  93. val |= 1 << TCP_METRIC_SSTHRESH;
  94. if (dst_metric_locked(dst, RTAX_CWND))
  95. val |= 1 << TCP_METRIC_CWND;
  96. if (dst_metric_locked(dst, RTAX_REORDERING))
  97. val |= 1 << TCP_METRIC_REORDERING;
  98. tm->tcpm_lock = val;
  99. msval = dst_metric_raw(dst, RTAX_RTT);
  100. tm->tcpm_vals[TCP_METRIC_RTT] = msval * USEC_PER_MSEC;
  101. msval = dst_metric_raw(dst, RTAX_RTTVAR);
  102. tm->tcpm_vals[TCP_METRIC_RTTVAR] = msval * USEC_PER_MSEC;
  103. tm->tcpm_vals[TCP_METRIC_SSTHRESH] = dst_metric_raw(dst, RTAX_SSTHRESH);
  104. tm->tcpm_vals[TCP_METRIC_CWND] = dst_metric_raw(dst, RTAX_CWND);
  105. tm->tcpm_vals[TCP_METRIC_REORDERING] = dst_metric_raw(dst, RTAX_REORDERING);
  106. tm->tcpm_ts = 0;
  107. tm->tcpm_ts_stamp = 0;
  108. if (fastopen_clear) {
  109. tm->tcpm_fastopen.mss = 0;
  110. tm->tcpm_fastopen.syn_loss = 0;
  111. tm->tcpm_fastopen.try_exp = 0;
  112. tm->tcpm_fastopen.cookie.exp = false;
  113. tm->tcpm_fastopen.cookie.len = 0;
  114. }
  115. }
  116. #define TCP_METRICS_TIMEOUT (60 * 60 * HZ)
  117. static void tcpm_check_stamp(struct tcp_metrics_block *tm, struct dst_entry *dst)
  118. {
  119. if (tm && unlikely(time_after(jiffies, tm->tcpm_stamp + TCP_METRICS_TIMEOUT)))
  120. tcpm_suck_dst(tm, dst, false);
  121. }
  122. #define TCP_METRICS_RECLAIM_DEPTH 5
  123. #define TCP_METRICS_RECLAIM_PTR (struct tcp_metrics_block *) 0x1UL
  124. #define deref_locked(p) \
  125. rcu_dereference_protected(p, lockdep_is_held(&tcp_metrics_lock))
  126. static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst,
  127. struct inetpeer_addr *saddr,
  128. struct inetpeer_addr *daddr,
  129. unsigned int hash)
  130. {
  131. struct tcp_metrics_block *tm;
  132. struct net *net;
  133. bool reclaim = false;
  134. spin_lock_bh(&tcp_metrics_lock);
  135. net = dev_net(dst->dev);
  136. /* While waiting for the spin-lock the cache might have been populated
  137. * with this entry and so we have to check again.
  138. */
  139. tm = __tcp_get_metrics(saddr, daddr, net, hash);
  140. if (tm == TCP_METRICS_RECLAIM_PTR) {
  141. reclaim = true;
  142. tm = NULL;
  143. }
  144. if (tm) {
  145. tcpm_check_stamp(tm, dst);
  146. goto out_unlock;
  147. }
  148. if (unlikely(reclaim)) {
  149. struct tcp_metrics_block *oldest;
  150. oldest = deref_locked(tcp_metrics_hash[hash].chain);
  151. for (tm = deref_locked(oldest->tcpm_next); tm;
  152. tm = deref_locked(tm->tcpm_next)) {
  153. if (time_before(tm->tcpm_stamp, oldest->tcpm_stamp))
  154. oldest = tm;
  155. }
  156. tm = oldest;
  157. } else {
  158. tm = kmalloc(sizeof(*tm), GFP_ATOMIC);
  159. if (!tm)
  160. goto out_unlock;
  161. }
  162. write_pnet(&tm->tcpm_net, net);
  163. tm->tcpm_saddr = *saddr;
  164. tm->tcpm_daddr = *daddr;
  165. tcpm_suck_dst(tm, dst, true);
  166. if (likely(!reclaim)) {
  167. tm->tcpm_next = tcp_metrics_hash[hash].chain;
  168. rcu_assign_pointer(tcp_metrics_hash[hash].chain, tm);
  169. }
  170. out_unlock:
  171. spin_unlock_bh(&tcp_metrics_lock);
  172. return tm;
  173. }
  174. static struct tcp_metrics_block *tcp_get_encode(struct tcp_metrics_block *tm, int depth)
  175. {
  176. if (tm)
  177. return tm;
  178. if (depth > TCP_METRICS_RECLAIM_DEPTH)
  179. return TCP_METRICS_RECLAIM_PTR;
  180. return NULL;
  181. }
  182. static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *saddr,
  183. const struct inetpeer_addr *daddr,
  184. struct net *net, unsigned int hash)
  185. {
  186. struct tcp_metrics_block *tm;
  187. int depth = 0;
  188. for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm;
  189. tm = rcu_dereference(tm->tcpm_next)) {
  190. if (addr_same(&tm->tcpm_saddr, saddr) &&
  191. addr_same(&tm->tcpm_daddr, daddr) &&
  192. net_eq(tm_net(tm), net))
  193. break;
  194. depth++;
  195. }
  196. return tcp_get_encode(tm, depth);
  197. }
  198. static struct tcp_metrics_block *__tcp_get_metrics_req(struct request_sock *req,
  199. struct dst_entry *dst)
  200. {
  201. struct tcp_metrics_block *tm;
  202. struct inetpeer_addr saddr, daddr;
  203. unsigned int hash;
  204. struct net *net;
  205. saddr.family = req->rsk_ops->family;
  206. daddr.family = req->rsk_ops->family;
  207. switch (daddr.family) {
  208. case AF_INET:
  209. inetpeer_set_addr_v4(&saddr, inet_rsk(req)->ir_loc_addr);
  210. inetpeer_set_addr_v4(&daddr, inet_rsk(req)->ir_rmt_addr);
  211. hash = ipv4_addr_hash(inet_rsk(req)->ir_rmt_addr);
  212. break;
  213. #if IS_ENABLED(CONFIG_IPV6)
  214. case AF_INET6:
  215. inetpeer_set_addr_v6(&saddr, &inet_rsk(req)->ir_v6_loc_addr);
  216. inetpeer_set_addr_v6(&daddr, &inet_rsk(req)->ir_v6_rmt_addr);
  217. hash = ipv6_addr_hash(&inet_rsk(req)->ir_v6_rmt_addr);
  218. break;
  219. #endif
  220. default:
  221. return NULL;
  222. }
  223. net = dev_net(dst->dev);
  224. hash ^= net_hash_mix(net);
  225. hash = hash_32(hash, tcp_metrics_hash_log);
  226. for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm;
  227. tm = rcu_dereference(tm->tcpm_next)) {
  228. if (addr_same(&tm->tcpm_saddr, &saddr) &&
  229. addr_same(&tm->tcpm_daddr, &daddr) &&
  230. net_eq(tm_net(tm), net))
  231. break;
  232. }
  233. tcpm_check_stamp(tm, dst);
  234. return tm;
  235. }
  236. static struct tcp_metrics_block *__tcp_get_metrics_tw(struct inet_timewait_sock *tw)
  237. {
  238. struct tcp_metrics_block *tm;
  239. struct inetpeer_addr saddr, daddr;
  240. unsigned int hash;
  241. struct net *net;
  242. if (tw->tw_family == AF_INET) {
  243. inetpeer_set_addr_v4(&saddr, tw->tw_rcv_saddr);
  244. inetpeer_set_addr_v4(&daddr, tw->tw_daddr);
  245. hash = ipv4_addr_hash(tw->tw_daddr);
  246. }
  247. #if IS_ENABLED(CONFIG_IPV6)
  248. else if (tw->tw_family == AF_INET6) {
  249. if (ipv6_addr_v4mapped(&tw->tw_v6_daddr)) {
  250. inetpeer_set_addr_v4(&saddr, tw->tw_rcv_saddr);
  251. inetpeer_set_addr_v4(&daddr, tw->tw_daddr);
  252. hash = ipv4_addr_hash(tw->tw_daddr);
  253. } else {
  254. inetpeer_set_addr_v6(&saddr, &tw->tw_v6_rcv_saddr);
  255. inetpeer_set_addr_v6(&daddr, &tw->tw_v6_daddr);
  256. hash = ipv6_addr_hash(&tw->tw_v6_daddr);
  257. }
  258. }
  259. #endif
  260. else
  261. return NULL;
  262. net = twsk_net(tw);
  263. hash ^= net_hash_mix(net);
  264. hash = hash_32(hash, tcp_metrics_hash_log);
  265. for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm;
  266. tm = rcu_dereference(tm->tcpm_next)) {
  267. if (addr_same(&tm->tcpm_saddr, &saddr) &&
  268. addr_same(&tm->tcpm_daddr, &daddr) &&
  269. net_eq(tm_net(tm), net))
  270. break;
  271. }
  272. return tm;
  273. }
  274. static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk,
  275. struct dst_entry *dst,
  276. bool create)
  277. {
  278. struct tcp_metrics_block *tm;
  279. struct inetpeer_addr saddr, daddr;
  280. unsigned int hash;
  281. struct net *net;
  282. if (sk->sk_family == AF_INET) {
  283. inetpeer_set_addr_v4(&saddr, inet_sk(sk)->inet_saddr);
  284. inetpeer_set_addr_v4(&daddr, inet_sk(sk)->inet_daddr);
  285. hash = ipv4_addr_hash(inet_sk(sk)->inet_daddr);
  286. }
  287. #if IS_ENABLED(CONFIG_IPV6)
  288. else if (sk->sk_family == AF_INET6) {
  289. if (ipv6_addr_v4mapped(&sk->sk_v6_daddr)) {
  290. inetpeer_set_addr_v4(&saddr, inet_sk(sk)->inet_saddr);
  291. inetpeer_set_addr_v4(&daddr, inet_sk(sk)->inet_daddr);
  292. hash = ipv4_addr_hash(inet_sk(sk)->inet_daddr);
  293. } else {
  294. inetpeer_set_addr_v6(&saddr, &sk->sk_v6_rcv_saddr);
  295. inetpeer_set_addr_v6(&daddr, &sk->sk_v6_daddr);
  296. hash = ipv6_addr_hash(&sk->sk_v6_daddr);
  297. }
  298. }
  299. #endif
  300. else
  301. return NULL;
  302. net = dev_net(dst->dev);
  303. hash ^= net_hash_mix(net);
  304. hash = hash_32(hash, tcp_metrics_hash_log);
  305. tm = __tcp_get_metrics(&saddr, &daddr, net, hash);
  306. if (tm == TCP_METRICS_RECLAIM_PTR)
  307. tm = NULL;
  308. if (!tm && create)
  309. tm = tcpm_new(dst, &saddr, &daddr, hash);
  310. else
  311. tcpm_check_stamp(tm, dst);
  312. return tm;
  313. }
  314. /* Save metrics learned by this TCP session. This function is called
  315. * only, when TCP finishes successfully i.e. when it enters TIME-WAIT
  316. * or goes from LAST-ACK to CLOSE.
  317. */
  318. void tcp_update_metrics(struct sock *sk)
  319. {
  320. const struct inet_connection_sock *icsk = inet_csk(sk);
  321. struct dst_entry *dst = __sk_dst_get(sk);
  322. struct tcp_sock *tp = tcp_sk(sk);
  323. struct net *net = sock_net(sk);
  324. struct tcp_metrics_block *tm;
  325. unsigned long rtt;
  326. u32 val;
  327. int m;
  328. if (sysctl_tcp_nometrics_save || !dst)
  329. return;
  330. if (dst->flags & DST_HOST)
  331. dst_confirm(dst);
  332. rcu_read_lock();
  333. if (icsk->icsk_backoff || !tp->srtt_us) {
  334. /* This session failed to estimate rtt. Why?
  335. * Probably, no packets returned in time. Reset our
  336. * results.
  337. */
  338. tm = tcp_get_metrics(sk, dst, false);
  339. if (tm && !tcp_metric_locked(tm, TCP_METRIC_RTT))
  340. tcp_metric_set(tm, TCP_METRIC_RTT, 0);
  341. goto out_unlock;
  342. } else
  343. tm = tcp_get_metrics(sk, dst, true);
  344. if (!tm)
  345. goto out_unlock;
  346. rtt = tcp_metric_get(tm, TCP_METRIC_RTT);
  347. m = rtt - tp->srtt_us;
  348. /* If newly calculated rtt larger than stored one, store new
  349. * one. Otherwise, use EWMA. Remember, rtt overestimation is
  350. * always better than underestimation.
  351. */
  352. if (!tcp_metric_locked(tm, TCP_METRIC_RTT)) {
  353. if (m <= 0)
  354. rtt = tp->srtt_us;
  355. else
  356. rtt -= (m >> 3);
  357. tcp_metric_set(tm, TCP_METRIC_RTT, rtt);
  358. }
  359. if (!tcp_metric_locked(tm, TCP_METRIC_RTTVAR)) {
  360. unsigned long var;
  361. if (m < 0)
  362. m = -m;
  363. /* Scale deviation to rttvar fixed point */
  364. m >>= 1;
  365. if (m < tp->mdev_us)
  366. m = tp->mdev_us;
  367. var = tcp_metric_get(tm, TCP_METRIC_RTTVAR);
  368. if (m >= var)
  369. var = m;
  370. else
  371. var -= (var - m) >> 2;
  372. tcp_metric_set(tm, TCP_METRIC_RTTVAR, var);
  373. }
  374. if (tcp_in_initial_slowstart(tp)) {
  375. /* Slow start still did not finish. */
  376. if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
  377. val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
  378. if (val && (tp->snd_cwnd >> 1) > val)
  379. tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
  380. tp->snd_cwnd >> 1);
  381. }
  382. if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
  383. val = tcp_metric_get(tm, TCP_METRIC_CWND);
  384. if (tp->snd_cwnd > val)
  385. tcp_metric_set(tm, TCP_METRIC_CWND,
  386. tp->snd_cwnd);
  387. }
  388. } else if (!tcp_in_slow_start(tp) &&
  389. icsk->icsk_ca_state == TCP_CA_Open) {
  390. /* Cong. avoidance phase, cwnd is reliable. */
  391. if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH))
  392. tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
  393. max(tp->snd_cwnd >> 1, tp->snd_ssthresh));
  394. if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
  395. val = tcp_metric_get(tm, TCP_METRIC_CWND);
  396. tcp_metric_set(tm, TCP_METRIC_CWND, (val + tp->snd_cwnd) >> 1);
  397. }
  398. } else {
  399. /* Else slow start did not finish, cwnd is non-sense,
  400. * ssthresh may be also invalid.
  401. */
  402. if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
  403. val = tcp_metric_get(tm, TCP_METRIC_CWND);
  404. tcp_metric_set(tm, TCP_METRIC_CWND,
  405. (val + tp->snd_ssthresh) >> 1);
  406. }
  407. if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
  408. val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
  409. if (val && tp->snd_ssthresh > val)
  410. tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
  411. tp->snd_ssthresh);
  412. }
  413. if (!tcp_metric_locked(tm, TCP_METRIC_REORDERING)) {
  414. val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
  415. if (val < tp->reordering &&
  416. tp->reordering != net->ipv4.sysctl_tcp_reordering)
  417. tcp_metric_set(tm, TCP_METRIC_REORDERING,
  418. tp->reordering);
  419. }
  420. }
  421. tm->tcpm_stamp = jiffies;
  422. out_unlock:
  423. rcu_read_unlock();
  424. }
  425. /* Initialize metrics on socket. */
  426. void tcp_init_metrics(struct sock *sk)
  427. {
  428. struct dst_entry *dst = __sk_dst_get(sk);
  429. struct tcp_sock *tp = tcp_sk(sk);
  430. struct tcp_metrics_block *tm;
  431. u32 val, crtt = 0; /* cached RTT scaled by 8 */
  432. if (!dst)
  433. goto reset;
  434. dst_confirm(dst);
  435. rcu_read_lock();
  436. tm = tcp_get_metrics(sk, dst, true);
  437. if (!tm) {
  438. rcu_read_unlock();
  439. goto reset;
  440. }
  441. if (tcp_metric_locked(tm, TCP_METRIC_CWND))
  442. tp->snd_cwnd_clamp = tcp_metric_get(tm, TCP_METRIC_CWND);
  443. val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
  444. if (val) {
  445. tp->snd_ssthresh = val;
  446. if (tp->snd_ssthresh > tp->snd_cwnd_clamp)
  447. tp->snd_ssthresh = tp->snd_cwnd_clamp;
  448. } else {
  449. /* ssthresh may have been reduced unnecessarily during.
  450. * 3WHS. Restore it back to its initial default.
  451. */
  452. tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
  453. }
  454. val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
  455. if (val && tp->reordering != val) {
  456. tcp_disable_fack(tp);
  457. tcp_disable_early_retrans(tp);
  458. tp->reordering = val;
  459. }
  460. crtt = tcp_metric_get(tm, TCP_METRIC_RTT);
  461. rcu_read_unlock();
  462. reset:
  463. /* The initial RTT measurement from the SYN/SYN-ACK is not ideal
  464. * to seed the RTO for later data packets because SYN packets are
  465. * small. Use the per-dst cached values to seed the RTO but keep
  466. * the RTT estimator variables intact (e.g., srtt, mdev, rttvar).
  467. * Later the RTO will be updated immediately upon obtaining the first
  468. * data RTT sample (tcp_rtt_estimator()). Hence the cached RTT only
  469. * influences the first RTO but not later RTT estimation.
  470. *
  471. * But if RTT is not available from the SYN (due to retransmits or
  472. * syn cookies) or the cache, force a conservative 3secs timeout.
  473. *
  474. * A bit of theory. RTT is time passed after "normal" sized packet
  475. * is sent until it is ACKed. In normal circumstances sending small
  476. * packets force peer to delay ACKs and calculation is correct too.
  477. * The algorithm is adaptive and, provided we follow specs, it
  478. * NEVER underestimate RTT. BUT! If peer tries to make some clever
  479. * tricks sort of "quick acks" for time long enough to decrease RTT
  480. * to low value, and then abruptly stops to do it and starts to delay
  481. * ACKs, wait for troubles.
  482. */
  483. if (crtt > tp->srtt_us) {
  484. /* Set RTO like tcp_rtt_estimator(), but from cached RTT. */
  485. crtt /= 8 * USEC_PER_SEC / HZ;
  486. inet_csk(sk)->icsk_rto = crtt + max(2 * crtt, tcp_rto_min(sk));
  487. } else if (tp->srtt_us == 0) {
  488. /* RFC6298: 5.7 We've failed to get a valid RTT sample from
  489. * 3WHS. This is most likely due to retransmission,
  490. * including spurious one. Reset the RTO back to 3secs
  491. * from the more aggressive 1sec to avoid more spurious
  492. * retransmission.
  493. */
  494. tp->rttvar_us = jiffies_to_usecs(TCP_TIMEOUT_FALLBACK);
  495. tp->mdev_us = tp->mdev_max_us = tp->rttvar_us;
  496. inet_csk(sk)->icsk_rto = TCP_TIMEOUT_FALLBACK;
  497. }
  498. /* Cut cwnd down to 1 per RFC5681 if SYN or SYN-ACK has been
  499. * retransmitted. In light of RFC6298 more aggressive 1sec
  500. * initRTO, we only reset cwnd when more than 1 SYN/SYN-ACK
  501. * retransmission has occurred.
  502. */
  503. if (tp->total_retrans > 1)
  504. tp->snd_cwnd = 1;
  505. else
  506. tp->snd_cwnd = tcp_init_cwnd(tp, dst);
  507. tp->snd_cwnd_stamp = tcp_time_stamp;
  508. }
  509. bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst,
  510. bool paws_check, bool timestamps)
  511. {
  512. struct tcp_metrics_block *tm;
  513. bool ret;
  514. if (!dst)
  515. return false;
  516. rcu_read_lock();
  517. tm = __tcp_get_metrics_req(req, dst);
  518. if (paws_check) {
  519. if (tm &&
  520. (u32)get_seconds() - tm->tcpm_ts_stamp < TCP_PAWS_MSL &&
  521. ((s32)(tm->tcpm_ts - req->ts_recent) > TCP_PAWS_WINDOW ||
  522. !timestamps))
  523. ret = false;
  524. else
  525. ret = true;
  526. } else {
  527. if (tm && tcp_metric_get(tm, TCP_METRIC_RTT) && tm->tcpm_ts_stamp)
  528. ret = true;
  529. else
  530. ret = false;
  531. }
  532. rcu_read_unlock();
  533. return ret;
  534. }
  535. EXPORT_SYMBOL_GPL(tcp_peer_is_proven);
  536. void tcp_fetch_timewait_stamp(struct sock *sk, struct dst_entry *dst)
  537. {
  538. struct tcp_metrics_block *tm;
  539. rcu_read_lock();
  540. tm = tcp_get_metrics(sk, dst, true);
  541. if (tm) {
  542. struct tcp_sock *tp = tcp_sk(sk);
  543. if ((u32)get_seconds() - tm->tcpm_ts_stamp <= TCP_PAWS_MSL) {
  544. tp->rx_opt.ts_recent_stamp = tm->tcpm_ts_stamp;
  545. tp->rx_opt.ts_recent = tm->tcpm_ts;
  546. }
  547. }
  548. rcu_read_unlock();
  549. }
  550. EXPORT_SYMBOL_GPL(tcp_fetch_timewait_stamp);
  551. /* VJ's idea. Save last timestamp seen from this destination and hold
  552. * it at least for normal timewait interval to use for duplicate
  553. * segment detection in subsequent connections, before they enter
  554. * synchronized state.
  555. */
  556. bool tcp_remember_stamp(struct sock *sk)
  557. {
  558. struct dst_entry *dst = __sk_dst_get(sk);
  559. bool ret = false;
  560. if (dst) {
  561. struct tcp_metrics_block *tm;
  562. rcu_read_lock();
  563. tm = tcp_get_metrics(sk, dst, true);
  564. if (tm) {
  565. struct tcp_sock *tp = tcp_sk(sk);
  566. if ((s32)(tm->tcpm_ts - tp->rx_opt.ts_recent) <= 0 ||
  567. ((u32)get_seconds() - tm->tcpm_ts_stamp > TCP_PAWS_MSL &&
  568. tm->tcpm_ts_stamp <= (u32)tp->rx_opt.ts_recent_stamp)) {
  569. tm->tcpm_ts_stamp = (u32)tp->rx_opt.ts_recent_stamp;
  570. tm->tcpm_ts = tp->rx_opt.ts_recent;
  571. }
  572. ret = true;
  573. }
  574. rcu_read_unlock();
  575. }
  576. return ret;
  577. }
  578. bool tcp_tw_remember_stamp(struct inet_timewait_sock *tw)
  579. {
  580. struct tcp_metrics_block *tm;
  581. bool ret = false;
  582. rcu_read_lock();
  583. tm = __tcp_get_metrics_tw(tw);
  584. if (tm) {
  585. const struct tcp_timewait_sock *tcptw;
  586. struct sock *sk = (struct sock *) tw;
  587. tcptw = tcp_twsk(sk);
  588. if ((s32)(tm->tcpm_ts - tcptw->tw_ts_recent) <= 0 ||
  589. ((u32)get_seconds() - tm->tcpm_ts_stamp > TCP_PAWS_MSL &&
  590. tm->tcpm_ts_stamp <= (u32)tcptw->tw_ts_recent_stamp)) {
  591. tm->tcpm_ts_stamp = (u32)tcptw->tw_ts_recent_stamp;
  592. tm->tcpm_ts = tcptw->tw_ts_recent;
  593. }
  594. ret = true;
  595. }
  596. rcu_read_unlock();
  597. return ret;
  598. }
  599. static DEFINE_SEQLOCK(fastopen_seqlock);
  600. void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
  601. struct tcp_fastopen_cookie *cookie,
  602. int *syn_loss, unsigned long *last_syn_loss)
  603. {
  604. struct tcp_metrics_block *tm;
  605. rcu_read_lock();
  606. tm = tcp_get_metrics(sk, __sk_dst_get(sk), false);
  607. if (tm) {
  608. struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
  609. unsigned int seq;
  610. do {
  611. seq = read_seqbegin(&fastopen_seqlock);
  612. if (tfom->mss)
  613. *mss = tfom->mss;
  614. *cookie = tfom->cookie;
  615. if (cookie->len <= 0 && tfom->try_exp == 1)
  616. cookie->exp = true;
  617. *syn_loss = tfom->syn_loss;
  618. *last_syn_loss = *syn_loss ? tfom->last_syn_loss : 0;
  619. } while (read_seqretry(&fastopen_seqlock, seq));
  620. }
  621. rcu_read_unlock();
  622. }
  623. void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
  624. struct tcp_fastopen_cookie *cookie, bool syn_lost,
  625. u16 try_exp)
  626. {
  627. struct dst_entry *dst = __sk_dst_get(sk);
  628. struct tcp_metrics_block *tm;
  629. if (!dst)
  630. return;
  631. rcu_read_lock();
  632. tm = tcp_get_metrics(sk, dst, true);
  633. if (tm) {
  634. struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
  635. write_seqlock_bh(&fastopen_seqlock);
  636. if (mss)
  637. tfom->mss = mss;
  638. if (cookie && cookie->len > 0)
  639. tfom->cookie = *cookie;
  640. else if (try_exp > tfom->try_exp &&
  641. tfom->cookie.len <= 0 && !tfom->cookie.exp)
  642. tfom->try_exp = try_exp;
  643. if (syn_lost) {
  644. ++tfom->syn_loss;
  645. tfom->last_syn_loss = jiffies;
  646. } else
  647. tfom->syn_loss = 0;
  648. write_sequnlock_bh(&fastopen_seqlock);
  649. }
  650. rcu_read_unlock();
  651. }
  652. static struct genl_family tcp_metrics_nl_family = {
  653. .id = GENL_ID_GENERATE,
  654. .hdrsize = 0,
  655. .name = TCP_METRICS_GENL_NAME,
  656. .version = TCP_METRICS_GENL_VERSION,
  657. .maxattr = TCP_METRICS_ATTR_MAX,
  658. .netnsok = true,
  659. };
  660. static const struct nla_policy tcp_metrics_nl_policy[TCP_METRICS_ATTR_MAX + 1] = {
  661. [TCP_METRICS_ATTR_ADDR_IPV4] = { .type = NLA_U32, },
  662. [TCP_METRICS_ATTR_ADDR_IPV6] = { .type = NLA_BINARY,
  663. .len = sizeof(struct in6_addr), },
  664. /* Following attributes are not received for GET/DEL,
  665. * we keep them for reference
  666. */
  667. #if 0
  668. [TCP_METRICS_ATTR_AGE] = { .type = NLA_MSECS, },
  669. [TCP_METRICS_ATTR_TW_TSVAL] = { .type = NLA_U32, },
  670. [TCP_METRICS_ATTR_TW_TS_STAMP] = { .type = NLA_S32, },
  671. [TCP_METRICS_ATTR_VALS] = { .type = NLA_NESTED, },
  672. [TCP_METRICS_ATTR_FOPEN_MSS] = { .type = NLA_U16, },
  673. [TCP_METRICS_ATTR_FOPEN_SYN_DROPS] = { .type = NLA_U16, },
  674. [TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS] = { .type = NLA_MSECS, },
  675. [TCP_METRICS_ATTR_FOPEN_COOKIE] = { .type = NLA_BINARY,
  676. .len = TCP_FASTOPEN_COOKIE_MAX, },
  677. #endif
  678. };
  679. /* Add attributes, caller cancels its header on failure */
  680. static int tcp_metrics_fill_info(struct sk_buff *msg,
  681. struct tcp_metrics_block *tm)
  682. {
  683. struct nlattr *nest;
  684. int i;
  685. switch (tm->tcpm_daddr.family) {
  686. case AF_INET:
  687. if (nla_put_in_addr(msg, TCP_METRICS_ATTR_ADDR_IPV4,
  688. inetpeer_get_addr_v4(&tm->tcpm_daddr)) < 0)
  689. goto nla_put_failure;
  690. if (nla_put_in_addr(msg, TCP_METRICS_ATTR_SADDR_IPV4,
  691. inetpeer_get_addr_v4(&tm->tcpm_saddr)) < 0)
  692. goto nla_put_failure;
  693. break;
  694. case AF_INET6:
  695. if (nla_put_in6_addr(msg, TCP_METRICS_ATTR_ADDR_IPV6,
  696. inetpeer_get_addr_v6(&tm->tcpm_daddr)) < 0)
  697. goto nla_put_failure;
  698. if (nla_put_in6_addr(msg, TCP_METRICS_ATTR_SADDR_IPV6,
  699. inetpeer_get_addr_v6(&tm->tcpm_saddr)) < 0)
  700. goto nla_put_failure;
  701. break;
  702. default:
  703. return -EAFNOSUPPORT;
  704. }
  705. if (nla_put_msecs(msg, TCP_METRICS_ATTR_AGE,
  706. jiffies - tm->tcpm_stamp,
  707. TCP_METRICS_ATTR_PAD) < 0)
  708. goto nla_put_failure;
  709. if (tm->tcpm_ts_stamp) {
  710. if (nla_put_s32(msg, TCP_METRICS_ATTR_TW_TS_STAMP,
  711. (s32) (get_seconds() - tm->tcpm_ts_stamp)) < 0)
  712. goto nla_put_failure;
  713. if (nla_put_u32(msg, TCP_METRICS_ATTR_TW_TSVAL,
  714. tm->tcpm_ts) < 0)
  715. goto nla_put_failure;
  716. }
  717. {
  718. int n = 0;
  719. nest = nla_nest_start(msg, TCP_METRICS_ATTR_VALS);
  720. if (!nest)
  721. goto nla_put_failure;
  722. for (i = 0; i < TCP_METRIC_MAX_KERNEL + 1; i++) {
  723. u32 val = tm->tcpm_vals[i];
  724. if (!val)
  725. continue;
  726. if (i == TCP_METRIC_RTT) {
  727. if (nla_put_u32(msg, TCP_METRIC_RTT_US + 1,
  728. val) < 0)
  729. goto nla_put_failure;
  730. n++;
  731. val = max(val / 1000, 1U);
  732. }
  733. if (i == TCP_METRIC_RTTVAR) {
  734. if (nla_put_u32(msg, TCP_METRIC_RTTVAR_US + 1,
  735. val) < 0)
  736. goto nla_put_failure;
  737. n++;
  738. val = max(val / 1000, 1U);
  739. }
  740. if (nla_put_u32(msg, i + 1, val) < 0)
  741. goto nla_put_failure;
  742. n++;
  743. }
  744. if (n)
  745. nla_nest_end(msg, nest);
  746. else
  747. nla_nest_cancel(msg, nest);
  748. }
  749. {
  750. struct tcp_fastopen_metrics tfom_copy[1], *tfom;
  751. unsigned int seq;
  752. do {
  753. seq = read_seqbegin(&fastopen_seqlock);
  754. tfom_copy[0] = tm->tcpm_fastopen;
  755. } while (read_seqretry(&fastopen_seqlock, seq));
  756. tfom = tfom_copy;
  757. if (tfom->mss &&
  758. nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_MSS,
  759. tfom->mss) < 0)
  760. goto nla_put_failure;
  761. if (tfom->syn_loss &&
  762. (nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROPS,
  763. tfom->syn_loss) < 0 ||
  764. nla_put_msecs(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS,
  765. jiffies - tfom->last_syn_loss,
  766. TCP_METRICS_ATTR_PAD) < 0))
  767. goto nla_put_failure;
  768. if (tfom->cookie.len > 0 &&
  769. nla_put(msg, TCP_METRICS_ATTR_FOPEN_COOKIE,
  770. tfom->cookie.len, tfom->cookie.val) < 0)
  771. goto nla_put_failure;
  772. }
  773. return 0;
  774. nla_put_failure:
  775. return -EMSGSIZE;
  776. }
  777. static int tcp_metrics_dump_info(struct sk_buff *skb,
  778. struct netlink_callback *cb,
  779. struct tcp_metrics_block *tm)
  780. {
  781. void *hdr;
  782. hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
  783. &tcp_metrics_nl_family, NLM_F_MULTI,
  784. TCP_METRICS_CMD_GET);
  785. if (!hdr)
  786. return -EMSGSIZE;
  787. if (tcp_metrics_fill_info(skb, tm) < 0)
  788. goto nla_put_failure;
  789. genlmsg_end(skb, hdr);
  790. return 0;
  791. nla_put_failure:
  792. genlmsg_cancel(skb, hdr);
  793. return -EMSGSIZE;
  794. }
  795. static int tcp_metrics_nl_dump(struct sk_buff *skb,
  796. struct netlink_callback *cb)
  797. {
  798. struct net *net = sock_net(skb->sk);
  799. unsigned int max_rows = 1U << tcp_metrics_hash_log;
  800. unsigned int row, s_row = cb->args[0];
  801. int s_col = cb->args[1], col = s_col;
  802. for (row = s_row; row < max_rows; row++, s_col = 0) {
  803. struct tcp_metrics_block *tm;
  804. struct tcpm_hash_bucket *hb = tcp_metrics_hash + row;
  805. rcu_read_lock();
  806. for (col = 0, tm = rcu_dereference(hb->chain); tm;
  807. tm = rcu_dereference(tm->tcpm_next), col++) {
  808. if (!net_eq(tm_net(tm), net))
  809. continue;
  810. if (col < s_col)
  811. continue;
  812. if (tcp_metrics_dump_info(skb, cb, tm) < 0) {
  813. rcu_read_unlock();
  814. goto done;
  815. }
  816. }
  817. rcu_read_unlock();
  818. }
  819. done:
  820. cb->args[0] = row;
  821. cb->args[1] = col;
  822. return skb->len;
  823. }
  824. static int __parse_nl_addr(struct genl_info *info, struct inetpeer_addr *addr,
  825. unsigned int *hash, int optional, int v4, int v6)
  826. {
  827. struct nlattr *a;
  828. a = info->attrs[v4];
  829. if (a) {
  830. inetpeer_set_addr_v4(addr, nla_get_in_addr(a));
  831. if (hash)
  832. *hash = ipv4_addr_hash(inetpeer_get_addr_v4(addr));
  833. return 0;
  834. }
  835. a = info->attrs[v6];
  836. if (a) {
  837. struct in6_addr in6;
  838. if (nla_len(a) != sizeof(struct in6_addr))
  839. return -EINVAL;
  840. in6 = nla_get_in6_addr(a);
  841. inetpeer_set_addr_v6(addr, &in6);
  842. if (hash)
  843. *hash = ipv6_addr_hash(inetpeer_get_addr_v6(addr));
  844. return 0;
  845. }
  846. return optional ? 1 : -EAFNOSUPPORT;
  847. }
  848. static int parse_nl_addr(struct genl_info *info, struct inetpeer_addr *addr,
  849. unsigned int *hash, int optional)
  850. {
  851. return __parse_nl_addr(info, addr, hash, optional,
  852. TCP_METRICS_ATTR_ADDR_IPV4,
  853. TCP_METRICS_ATTR_ADDR_IPV6);
  854. }
  855. static int parse_nl_saddr(struct genl_info *info, struct inetpeer_addr *addr)
  856. {
  857. return __parse_nl_addr(info, addr, NULL, 0,
  858. TCP_METRICS_ATTR_SADDR_IPV4,
  859. TCP_METRICS_ATTR_SADDR_IPV6);
  860. }
  861. static int tcp_metrics_nl_cmd_get(struct sk_buff *skb, struct genl_info *info)
  862. {
  863. struct tcp_metrics_block *tm;
  864. struct inetpeer_addr saddr, daddr;
  865. unsigned int hash;
  866. struct sk_buff *msg;
  867. struct net *net = genl_info_net(info);
  868. void *reply;
  869. int ret;
  870. bool src = true;
  871. ret = parse_nl_addr(info, &daddr, &hash, 0);
  872. if (ret < 0)
  873. return ret;
  874. ret = parse_nl_saddr(info, &saddr);
  875. if (ret < 0)
  876. src = false;
  877. msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  878. if (!msg)
  879. return -ENOMEM;
  880. reply = genlmsg_put_reply(msg, info, &tcp_metrics_nl_family, 0,
  881. info->genlhdr->cmd);
  882. if (!reply)
  883. goto nla_put_failure;
  884. hash ^= net_hash_mix(net);
  885. hash = hash_32(hash, tcp_metrics_hash_log);
  886. ret = -ESRCH;
  887. rcu_read_lock();
  888. for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm;
  889. tm = rcu_dereference(tm->tcpm_next)) {
  890. if (addr_same(&tm->tcpm_daddr, &daddr) &&
  891. (!src || addr_same(&tm->tcpm_saddr, &saddr)) &&
  892. net_eq(tm_net(tm), net)) {
  893. ret = tcp_metrics_fill_info(msg, tm);
  894. break;
  895. }
  896. }
  897. rcu_read_unlock();
  898. if (ret < 0)
  899. goto out_free;
  900. genlmsg_end(msg, reply);
  901. return genlmsg_reply(msg, info);
  902. nla_put_failure:
  903. ret = -EMSGSIZE;
  904. out_free:
  905. nlmsg_free(msg);
  906. return ret;
  907. }
  908. static void tcp_metrics_flush_all(struct net *net)
  909. {
  910. unsigned int max_rows = 1U << tcp_metrics_hash_log;
  911. struct tcpm_hash_bucket *hb = tcp_metrics_hash;
  912. struct tcp_metrics_block *tm;
  913. unsigned int row;
  914. for (row = 0; row < max_rows; row++, hb++) {
  915. struct tcp_metrics_block __rcu **pp;
  916. spin_lock_bh(&tcp_metrics_lock);
  917. pp = &hb->chain;
  918. for (tm = deref_locked(*pp); tm; tm = deref_locked(*pp)) {
  919. if (net_eq(tm_net(tm), net)) {
  920. *pp = tm->tcpm_next;
  921. kfree_rcu(tm, rcu_head);
  922. } else {
  923. pp = &tm->tcpm_next;
  924. }
  925. }
  926. spin_unlock_bh(&tcp_metrics_lock);
  927. }
  928. }
  929. static int tcp_metrics_nl_cmd_del(struct sk_buff *skb, struct genl_info *info)
  930. {
  931. struct tcpm_hash_bucket *hb;
  932. struct tcp_metrics_block *tm;
  933. struct tcp_metrics_block __rcu **pp;
  934. struct inetpeer_addr saddr, daddr;
  935. unsigned int hash;
  936. struct net *net = genl_info_net(info);
  937. int ret;
  938. bool src = true, found = false;
  939. ret = parse_nl_addr(info, &daddr, &hash, 1);
  940. if (ret < 0)
  941. return ret;
  942. if (ret > 0) {
  943. tcp_metrics_flush_all(net);
  944. return 0;
  945. }
  946. ret = parse_nl_saddr(info, &saddr);
  947. if (ret < 0)
  948. src = false;
  949. hash ^= net_hash_mix(net);
  950. hash = hash_32(hash, tcp_metrics_hash_log);
  951. hb = tcp_metrics_hash + hash;
  952. pp = &hb->chain;
  953. spin_lock_bh(&tcp_metrics_lock);
  954. for (tm = deref_locked(*pp); tm; tm = deref_locked(*pp)) {
  955. if (addr_same(&tm->tcpm_daddr, &daddr) &&
  956. (!src || addr_same(&tm->tcpm_saddr, &saddr)) &&
  957. net_eq(tm_net(tm), net)) {
  958. *pp = tm->tcpm_next;
  959. kfree_rcu(tm, rcu_head);
  960. found = true;
  961. } else {
  962. pp = &tm->tcpm_next;
  963. }
  964. }
  965. spin_unlock_bh(&tcp_metrics_lock);
  966. if (!found)
  967. return -ESRCH;
  968. return 0;
  969. }
  970. static const struct genl_ops tcp_metrics_nl_ops[] = {
  971. {
  972. .cmd = TCP_METRICS_CMD_GET,
  973. .doit = tcp_metrics_nl_cmd_get,
  974. .dumpit = tcp_metrics_nl_dump,
  975. .policy = tcp_metrics_nl_policy,
  976. },
  977. {
  978. .cmd = TCP_METRICS_CMD_DEL,
  979. .doit = tcp_metrics_nl_cmd_del,
  980. .policy = tcp_metrics_nl_policy,
  981. .flags = GENL_ADMIN_PERM,
  982. },
  983. };
  984. static unsigned int tcpmhash_entries;
  985. static int __init set_tcpmhash_entries(char *str)
  986. {
  987. ssize_t ret;
  988. if (!str)
  989. return 0;
  990. ret = kstrtouint(str, 0, &tcpmhash_entries);
  991. if (ret)
  992. return 0;
  993. return 1;
  994. }
  995. __setup("tcpmhash_entries=", set_tcpmhash_entries);
  996. static int __net_init tcp_net_metrics_init(struct net *net)
  997. {
  998. size_t size;
  999. unsigned int slots;
  1000. if (!net_eq(net, &init_net))
  1001. return 0;
  1002. slots = tcpmhash_entries;
  1003. if (!slots) {
  1004. if (totalram_pages >= 128 * 1024)
  1005. slots = 16 * 1024;
  1006. else
  1007. slots = 8 * 1024;
  1008. }
  1009. tcp_metrics_hash_log = order_base_2(slots);
  1010. size = sizeof(struct tcpm_hash_bucket) << tcp_metrics_hash_log;
  1011. tcp_metrics_hash = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
  1012. if (!tcp_metrics_hash)
  1013. tcp_metrics_hash = vzalloc(size);
  1014. if (!tcp_metrics_hash)
  1015. return -ENOMEM;
  1016. return 0;
  1017. }
  1018. static void __net_exit tcp_net_metrics_exit(struct net *net)
  1019. {
  1020. tcp_metrics_flush_all(net);
  1021. }
  1022. static __net_initdata struct pernet_operations tcp_net_metrics_ops = {
  1023. .init = tcp_net_metrics_init,
  1024. .exit = tcp_net_metrics_exit,
  1025. };
  1026. void __init tcp_metrics_init(void)
  1027. {
  1028. int ret;
  1029. ret = register_pernet_subsys(&tcp_net_metrics_ops);
  1030. if (ret < 0)
  1031. panic("Could not allocate the tcp_metrics hash table\n");
  1032. ret = genl_register_family_with_ops(&tcp_metrics_nl_family,
  1033. tcp_metrics_nl_ops);
  1034. if (ret < 0)
  1035. panic("Could not register tcp_metrics generic netlink\n");
  1036. }