mirror of https://github.com/OpenIPC/firmware.git
106 lines
3.1 KiB
Diff
106 lines
3.1 KiB
Diff
--- linux-4.9.37/net/ipv4/tcp_input.c 2017-07-12 16:42:41.000000000 +0300
|
|
+++ linux-4.9.y/net/ipv4/tcp_input.c 2021-06-07 13:01:34.000000000 +0300
|
|
@@ -1312,7 +1312,7 @@
|
|
TCP_SKB_CB(skb)->seq += shifted;
|
|
|
|
tcp_skb_pcount_add(prev, pcount);
|
|
- BUG_ON(tcp_skb_pcount(skb) < pcount);
|
|
+ WARN_ON_ONCE(tcp_skb_pcount(skb) < pcount);
|
|
tcp_skb_pcount_add(skb, -pcount);
|
|
|
|
/* When we're adding to gso_segs == 1, gso_size will be zero,
|
|
@@ -1379,6 +1379,21 @@
|
|
return !skb_headlen(skb) && skb_is_nonlinear(skb);
|
|
}
|
|
|
|
+int tcp_skb_shift(struct sk_buff *to, struct sk_buff *from,
|
|
+ int pcount, int shiftlen)
|
|
+{
|
|
+ /* TCP min gso_size is 8 bytes (TCP_MIN_GSO_SIZE)
|
|
+ * Since TCP_SKB_CB(skb)->tcp_gso_segs is 16 bits, we need
|
|
+ * to make sure not storing more than 65535 * 8 bytes per skb,
|
|
+ * even if current MSS is bigger.
|
|
+ */
|
|
+ if (unlikely(to->len + shiftlen >= 65535 * TCP_MIN_GSO_SIZE))
|
|
+ return 0;
|
|
+ if (unlikely(tcp_skb_pcount(to) + pcount > 65535))
|
|
+ return 0;
|
|
+ return skb_shift(to, from, shiftlen);
|
|
+}
|
|
+
|
|
/* Try collapsing SACK blocks spanning across multiple skbs to a single
|
|
* skb.
|
|
*/
|
|
@@ -1390,6 +1405,7 @@
|
|
struct tcp_sock *tp = tcp_sk(sk);
|
|
struct sk_buff *prev;
|
|
int mss;
|
|
+ int next_pcount;
|
|
int pcount = 0;
|
|
int len;
|
|
int in_sack;
|
|
@@ -1487,7 +1503,7 @@
|
|
if (!after(TCP_SKB_CB(skb)->seq + len, tp->snd_una))
|
|
goto fallback;
|
|
|
|
- if (!skb_shift(prev, skb, len))
|
|
+ if (!tcp_skb_shift(prev, skb, pcount, len))
|
|
goto fallback;
|
|
if (!tcp_shifted_skb(sk, skb, state, pcount, len, mss, dup_sack))
|
|
goto out;
|
|
@@ -1506,11 +1522,11 @@
|
|
goto out;
|
|
|
|
len = skb->len;
|
|
- if (skb_shift(prev, skb, len)) {
|
|
- pcount += tcp_skb_pcount(skb);
|
|
- tcp_shifted_skb(sk, skb, state, tcp_skb_pcount(skb), len, mss, 0);
|
|
+ next_pcount = tcp_skb_pcount(skb);
|
|
+ if (tcp_skb_shift(prev, skb, next_pcount, len)) {
|
|
+ pcount += next_pcount;
|
|
+ tcp_shifted_skb(sk, skb, state, next_pcount, len, mss, 0);
|
|
}
|
|
-
|
|
out:
|
|
state->fack_count += pcount;
|
|
return prev;
|
|
@@ -4949,6 +4965,7 @@
|
|
* 2) not add too big latencies if thousands of packets sit there.
|
|
* (But if application shrinks SO_RCVBUF, we could still end up
|
|
* freeing whole queue here)
|
|
+ * 3) Drop at least 12.5 % of sk_rcvbuf to avoid malicious attacks.
|
|
*
|
|
* Return true if queue has shrunk.
|
|
*/
|
|
@@ -4956,20 +4973,26 @@
|
|
{
|
|
struct tcp_sock *tp = tcp_sk(sk);
|
|
struct rb_node *node, *prev;
|
|
+ int goal;
|
|
|
|
if (RB_EMPTY_ROOT(&tp->out_of_order_queue))
|
|
return false;
|
|
|
|
NET_INC_STATS(sock_net(sk), LINUX_MIB_OFOPRUNED);
|
|
+ goal = sk->sk_rcvbuf >> 3;
|
|
node = &tp->ooo_last_skb->rbnode;
|
|
do {
|
|
prev = rb_prev(node);
|
|
rb_erase(node, &tp->out_of_order_queue);
|
|
+ goal -= rb_entry_safe(node, struct sk_buff, rbnode)->truesize;
|
|
tcp_drop(sk, rb_entry(node, struct sk_buff, rbnode));
|
|
- sk_mem_reclaim(sk);
|
|
- if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
|
|
- !tcp_under_memory_pressure(sk))
|
|
- break;
|
|
+ if (!prev || goal <= 0) {
|
|
+ sk_mem_reclaim(sk);
|
|
+ if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
|
|
+ !tcp_under_memory_pressure(sk))
|
|
+ break;
|
|
+ goal = sk->sk_rcvbuf >> 3;
|
|
+ }
|
|
node = prev;
|
|
} while (node);
|
|
tp->ooo_last_skb = rb_entry(prev, struct sk_buff, rbnode);
|