diff --git a/.gitignore b/.gitignore index 1bdc6b279..93baf27cd 100644 --- a/.gitignore +++ b/.gitignore @@ -11,4 +11,6 @@ hub/charts **/__pycache__ **/*.pyc +# random stuff **/.DS_Store +**/*.bak diff --git a/vendor/google/gke/node-pool/config/core-pool-sysctl.yaml b/vendor/google/gke/node-pool/config/core-pool-sysctl.yaml index 793ae82e3..c81b79680 100644 --- a/vendor/google/gke/node-pool/config/core-pool-sysctl.yaml +++ b/vendor/google/gke/node-pool/config/core-pool-sysctl.yaml @@ -4,48 +4,48 @@ kubeletConfig: # allowedUnsafeSysctls: 'net.core.*,net.ipv4.*' linuxConfig: sysctl: - # tune the ipv4 settings to not cause nginx to use all of the tcp memory - # addresses: https://jira-secure.berkeley.edu/browse/DH-3 - # - # following this process: - # https://cloud.google.com/kubernetes-engine/docs/how-to/node-system-config - # - # man page: - # https://man7.org/linux/man-pages/man7/tcp.7.html - # - # figures below are measured in units of system page size (4096B), - # and gleaned from the following articles: - # https://cromwell-intl.com/open-source/performance-tuning/tcp.html - # https://www.ibm.com/docs/en/linux-on-systems?topic=tuning-tcpip-ipv4-settings - # https://www.ibm.com/docs/en/linux-on-systems?topic=tuning-network-stack-settings - # - # net.ipv4.tcp_mem seems to be automagically generated from the supplied tcp_rmem - # and tcp_wmem settings. i believe? - # - # here be dragons. - # - # original values (as of 2023-04-19): - # net.core.netdev_max_backlog=1000 - # net.core.rmem_max=212992 - # net.core.wmem_max=212992 - # net.ipv4.tcp_rmem=4096 87380 6291456 - # net.ipv4.tcp_wmem=4096 16384 4194304 - # - # changes and additional tweaks (2024-04-11): - # net.ipv4.tcp_max_syn_backlog=4096 - # net.core.rmem_max=3276800 - # net.core.wmem_max=3276800 - # net.ipv4.tcp_rmem=4096 87380 16777216 - # net.ipv4.tcp_wmem=4096 87380 16777216 - # net.core.somaxconn=1024 - # - # https://fasterdata.es.net/host-tuning/linux/#toc-anchor-2 - net.core.netdev_max_backlog: '30000' - net.core.somaxconn: '4096' - # net.ipv4.tcp_max_syn_backlog: '8192' + # tune the ipv4 settings to not cause nginx to use all of the tcp memory + # addresses: https://jira-secure.berkeley.edu/browse/DH-3 + # + # following this process: + # https://cloud.google.com/kubernetes-engine/docs/how-to/node-system-config + # + # man page: + # https://man7.org/linux/man-pages/man7/tcp.7.html + # + # figures below are measured in units of system page size (4096B), + # and gleaned from the following articles: + # https://cromwell-intl.com/open-source/performance-tuning/tcp.html + # https://www.ibm.com/docs/en/linux-on-systems?topic=tuning-tcpip-ipv4-settings + # https://www.ibm.com/docs/en/linux-on-systems?topic=tuning-network-stack-settings + # + # net.ipv4.tcp_mem seems to be automagically generated from the supplied tcp_rmem + # and tcp_wmem settings. i believe? + # + # here be dragons. + # + # original values (as of 2023-04-19): + # net.core.netdev_max_backlog=1000 + # net.core.rmem_max=212992 + # net.core.wmem_max=212992 + # net.ipv4.tcp_rmem=4096 87380 6291456 + # net.ipv4.tcp_wmem=4096 16384 4194304 + # + # changes and additional tweaks (2024-04-11): + # net.ipv4.tcp_max_syn_backlog=4096 + # net.core.rmem_max=3276800 + # net.core.wmem_max=3276800 + # net.ipv4.tcp_rmem=4096 87380 16777216 + # net.ipv4.tcp_wmem=4096 87380 16777216 + # net.core.somaxconn=1024 + # + # https://fasterdata.es.net/host-tuning/linux/#toc-anchor-2 + net.core.netdev_max_backlog: '30000' + net.core.somaxconn: '4096' + # net.ipv4.tcp_max_syn_backlog: '8192' - # these values are in bytes - net.core.rmem_max: '67108864' - net.core.wmem_max: '67108864' - net.ipv4.tcp_rmem: '4096 87380 33554432' - net.ipv4.tcp_wmem: '4096 87380 33554432' + # these values are in bytes + net.core.rmem_max: '67108864' + net.core.wmem_max: '67108864' + net.ipv4.tcp_rmem: '4096 87380 33554432' + net.ipv4.tcp_wmem: '4096 87380 33554432' diff --git a/vendor/google/gke/node-pool/config/core-pool-sysctl.yaml.bak b/vendor/google/gke/node-pool/config/core-pool-sysctl.yaml.bak deleted file mode 100644 index 1e22a09b9..000000000 --- a/vendor/google/gke/node-pool/config/core-pool-sysctl.yaml.bak +++ /dev/null @@ -1,43 +0,0 @@ -kubeletConfig: - cpuManagerPolicy: static -# kubeletExtraConfig: -# allowedUnsafeSysctls: 'net.core.*,net.ipv4.*' -linuxConfig: - sysctl: - # tune the ipv4 settings to not cause nginx to use all of the tcp memory - # addresses: https://jira-secure.berkeley.edu/browse/DH-3 - # - # following this process: - # https://cloud.google.com/kubernetes-engine/docs/how-to/node-system-config - # - # man page: - # https://man7.org/linux/man-pages/man7/tcp.7.html - # - # figures below are measured in units of system page size (4096B), - # and gleaned from the following articles: - # https://cromwell-intl.com/open-source/performance-tuning/tcp.html - # https://www.ibm.com/docs/en/linux-on-systems?topic=tuning-tcpip-ipv4-settings - # https://www.ibm.com/docs/en/linux-on-systems?topic=tuning-network-stack-settings - # - # net.ipv4.tcp_mem seems to be automagically generated from the supplied tcp_rmem - # and tcp_wmem settings. i believe? - # - # here be dragons. - # - # original values (as of 2023-19-04): - # net.core.netdev_max_backlog=1000 - # net.core.rmem_max=212992 - # net.core.wmem_max=212992 - # net.ipv4.tcp_rmem=4096 87380 6291456 - # net.ipv4.tcp_wmem=4096 16384 4194304 - # - # https://fasterdata.es.net/host-tuning/linux/#toc-anchor-2 - net.core.netdev_max_backlog: '30000' - net.ipv4.tcp_max_syn_backlog: '8192' - net.core.rmem_default: - net.core.rmem_max: '67108864' - net.core.wmem_max: '67108864' - net.ipv4.tcp_rmem: '4096 87380 33554432' - net.ipv4.tcp_wmem: '4096 87380 33554432' - # http://simonhf.wordpress.com/2010/10/01/node-js-versus-sxe-hello-world-complexity-speed-and-memory-usage/ - net.core.somaxconn: '65535'