02k8s-install-centos.sh 104 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860
  1. #!/usr/bin/env bash
  2. [[ -n $DEBUG ]] && set -x
  3. set -o errtrace # Make sure any error trap is inherited
  4. set -o nounset # Disallow expansion of unset variables
  5. set -o pipefail # Use last non-zero exit code in a pipeline
  6. # 版本
  7. KUBE_VERSION="${KUBE_VERSION:-latest}"
  8. FLANNEL_VERSION="${FLANNEL_VERSION:-0.17.0}"
  9. CALICO_VERSION="${CALICO_VERSION:-3.22.1}"
  10. CILIUM_VERSION="${CILIUM_VERSION:-1.9.13}"
  11. HELM_VERSION="${HELM_VERSION:-3.10.1}"
  12. INGRESS_NGINX="${INGRESS_NGINX:-4.2.5}"
  13. RANCHER_VERSION="${RANCHER_VERSION:-2.6.9}"
  14. #METRICS_SERVER_VERSION="${METRICS_SERVER_VERSION:-0.6.1}"
  15. #KUBE_PROMETHEUS_VERSION="${KUBE_PROMETHEUS_VERSION:-0.10.0}"
  16. #ELASTICSEARCH_VERSION="${ELASTICSEARCH_VERSION:-8.1.0}"
  17. #ROOK_VERSION="${ROOK_VERSION:-1.8.7}"
  18. #LONGHORN_VERSION="${LONGHORN_VERSION:-1.2.4}"
  19. # 集群配置
  20. KUBE_DNSDOMAIN="${KUBE_DNSDOMAIN:-cluster.local}"
  21. KUBE_APISERVER="${KUBE_APISERVER:-apiserver.$KUBE_DNSDOMAIN}"
  22. KUBE_POD_SUBNET="${KUBE_POD_SUBNET:-10.244.0.0/16}"
  23. KUBE_SERVICE_SUBNET="${KUBE_SERVICE_SUBNET:-10.96.0.0/16}"
  24. KUBE_IMAGE_REPO="${KUBE_IMAGE_REPO:-registry.cn-hangzhou.aliyuncs.com/kainstall}"
  25. KUBE_NETWORK="${KUBE_NETWORK:-flannel}"
  26. KUBE_INGRESS="${KUBE_INGRESS:-nginx}"
  27. KUBE_MONITOR="${KUBE_MONITOR:-prometheus}"
  28. KUBE_STORAGE="${KUBE_STORAGE:-rook}"
  29. KUBE_LOG="${KUBE_LOG:-elasticsearch}"
  30. KUBE_FLANNEL_TYPE="${KUBE_FLANNEL_TYPE:-vxlan}"
  31. KUBE_CRI="${KUBE_CRI:-docker}"
  32. KUBE_CRI_VERSION="${KUBE_CRI_VERSION:-latest}"
  33. KUBE_CRI_ENDPOINT="${KUBE_CRI_ENDPOINT:-/var/run/dockershim.sock}"
  34. # 定义的master和worker节点地址,以逗号分隔
  35. MASTER_NODES="${MASTER_NODES:-}"
  36. WORKER_NODES="${WORKER_NODES:-}"
  37. # 定义在哪个节点上进行设置
  38. MGMT_NODE="${MGMT_NODE:-127.0.0.1}"
  39. # 节点的连接信息
  40. SSH_USER="${SSH_USER:-root}"
  41. SSH_PASSWORD="${SSH_PASSWORD:-}"
  42. SSH_PRIVATE_KEY="${SSH_PRIVATE_KEY:-}"
  43. SSH_PORT="${SSH_PORT:-22}"
  44. SUDO_USER="${SUDO_USER:-root}"
  45. # 节点设置
  46. HOSTNAME_PREFIX="${HOSTNAME_PREFIX:-k8s}"
  47. # nginx的端口配置
  48. NGINX_HTTP_PORT="${NGINX_HTTP_PORT:-80}"
  49. # 脚本设置
  50. TMP_DIR="$(rm -rf /tmp/kainstall* && mktemp -d -t kainstall.XXXXXXXXXX)"
  51. LOG_FILE="${TMP_DIR}/kainstall.log"
  52. SSH_OPTIONS="-o ConnectTimeout=600 -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null"
  53. ERROR_INFO="\n\033[31mERROR Summary: \033[0m\n "
  54. ACCESS_INFO="\n\033[32mACCESS Summary: \033[0m\n "
  55. COMMAND_OUTPUT=""
  56. SCRIPT_PARAMETER="$*"
  57. OFFLINE_DIR="/tmp/kainstall-offline-file/"
  58. OFFLINE_FILE=""
  59. OS_SUPPORT="centos7 centos8"
  60. GITHUB_PROXY="${GITHUB_PROXY:-https://ghproxy.com/}"
  61. GCR_PROXY="${GCR_PROXY:-k8sgcr.lework.workers.dev}"
  62. SKIP_UPGRADE_PLAN=${SKIP_UPGRADE_PLAN:-false}
  63. SKIP_SET_OS_REPO=${SKIP_SET_OS_REPO:-false}
  64. trap trap::info 1 2 3 15 EXIT
  65. ######################################################################################################
  66. # 通用函数
  67. ######################################################################################################
  68. # 信号处理
  69. function trap::info() {
  70. [[ ${#ERROR_INFO} -gt 37 ]] && echo -e "$ERROR_INFO"
  71. [[ ${#ACCESS_INFO} -gt 38 ]] && echo -e "$ACCESS_INFO"
  72. [ -f "$LOG_FILE" ] && echo -e "\n\n See detailed log >>> $LOG_FILE \n\n"
  73. trap '' EXIT
  74. exit
  75. }
  76. # 错误日志
  77. function log::error() {
  78. local item; item="[$(date +'%Y-%m-%dT%H:%M:%S.%N%z')]: \033[31mERROR: \033[0m$*"
  79. ERROR_INFO="${ERROR_INFO}${item}\n "
  80. echo -e "${item}" | tee -a "$LOG_FILE"
  81. }
  82. # 基础日志
  83. function log::info() {
  84. printf "[%s]: \033[32mINFO: \033[0m%s\n" "$(date +'%Y-%m-%dT%H:%M:%S.%N%z')" "$*" | tee -a "$LOG_FILE"
  85. }
  86. # 警告日志
  87. function log::warning() {
  88. printf "[%s]: \033[33mWARNING: \033[0m%s\n" "$(date +'%Y-%m-%dT%H:%M:%S.%N%z')" "$*" | tee -a "$LOG_FILE"
  89. }
  90. # 访问信息
  91. function log::access() {
  92. ACCESS_INFO="${ACCESS_INFO}$*\n "
  93. printf "[%s]: \033[32mINFO: \033[0m%s\n" "$(date +'%Y-%m-%dT%H:%M:%S.%N%z')" "$*" | tee -a "$LOG_FILE"
  94. }
  95. # 执行日志
  96. function log::exec() {
  97. printf "[%s]: \033[34mEXEC: \033[0m%s\n" "$(date +'%Y-%m-%dT%H:%M:%S.%N%z')" "$*" >> "$LOG_FILE"
  98. }
  99. # 版本号转数字
  100. function utils::version_to_number() {
  101. echo "$@" | awk -F. '{ printf("%d%03d%03d%03d\n", $1,$2,$3,$4); }';
  102. }
  103. # 重试
  104. function utils::retry() {
  105. local retries=$1
  106. shift
  107. local count=0
  108. until eval "$*"; do
  109. exit=$?
  110. wait=$((2 ** count))
  111. count=$((count + 1))
  112. if [ "$count" -lt "$retries" ]; then
  113. echo "Retry $count/$retries exited $exit, retrying in $wait seconds..."
  114. sleep $wait
  115. else
  116. echo "Retry $count/$retries exited $exit, no more retries left."
  117. return $exit
  118. fi
  119. done
  120. return 0
  121. }
  122. # 转义引号
  123. function utils::quote() {
  124. # shellcheck disable=SC2046
  125. if [ $(echo "$*" | tr -d "\n" | wc -c) -eq 0 ]; then
  126. echo "''"
  127. elif [ $(echo "$*" | tr -d "[a-z][A-Z][0-9]:,.=~_/\n-" | wc -c) -gt 0 ]; then
  128. printf "%s" "$*" | sed -e "1h;2,\$H;\$!d;g" -e "s/'/\'\"\'\"\'/g" | sed -e "1h;2,\$H;\$!d;g" -e "s/^/'/g" -e "s/$/'/g"
  129. else
  130. echo "$*"
  131. fi
  132. }
  133. # 下载文件
  134. function utils::download_file() {
  135. local url="$1"
  136. local dest="$2"
  137. local unzip_tag="${3:-1}"
  138. local dest_dirname; dest_dirname=$(dirname "$dest")
  139. local filename; filename=$(basename "$dest")
  140. log::info "[download]" "${filename}"
  141. command::exec "${MGMT_NODE}" "
  142. set -e
  143. if [ ! -f \"${dest}\" ]; then
  144. [ ! -d \"${dest_dirname}\" ] && mkdir -pv \"${dest_dirname}\"
  145. wget --timeout=10 --waitretry=3 --tries=5 --retry-connrefused --no-check-certificate \"${url}\" -O \"${dest}\"
  146. if [[ \"${unzip_tag}\" == \"unzip\" ]]; then
  147. command -v unzip 2>/dev/null || yum install -y unzip
  148. unzip -o \"${dest}\" -d \"${dest_dirname}\"
  149. fi
  150. else
  151. echo \"${dest} is exists!\"
  152. fi
  153. "
  154. local status="$?"
  155. check::exit_code "$status" "download" "${filename}" "exit"
  156. return "$status"
  157. }
  158. # 判断是否在数组中存在元素
  159. function utils::is_element_in_array() {
  160. local -r element="${1}"
  161. local -r array=("${@:2}")
  162. local walker=''
  163. for walker in "${array[@]}"
  164. do
  165. [[ "${walker}" = "${element}" ]] && return 0
  166. done
  167. return 1
  168. }
  169. # 执行命令
  170. function command::exec() {
  171. local host=${1:-}
  172. shift
  173. local command="$*"
  174. if [[ "${SUDO_TAG:-}" == "1" ]]; then
  175. sudo_options="sudo -H -n -u ${SUDO_USER}"
  176. if [[ "${SUDO_PASSWORD:-}" != "" ]]; then
  177. sudo_options="${sudo_options// -n/} -p \"\" -S <<< \"${SUDO_PASSWORD}\""
  178. fi
  179. command="$sudo_options bash -c $(utils::quote "$command")"
  180. fi
  181. command="$(utils::quote "$command")"
  182. if [[ "${host}" == "127.0.0.1" ]]; then
  183. # 本地执行
  184. log::exec "[command]" "bash -c $(printf "%s" "${command//${SUDO_PASSWORD:-}/zzzzzz}")"
  185. # shellcheck disable=SC2094
  186. COMMAND_OUTPUT=$(eval bash -c "${command}" 2>> "$LOG_FILE" | tee -a "$LOG_FILE")
  187. local status=$?
  188. else
  189. # 远程执行
  190. local ssh_cmd="ssh"
  191. if [[ "${SSH_PASSWORD}" != "" ]]; then
  192. ssh_cmd="sshpass -p \"${SSH_PASSWORD}\" ${ssh_cmd}"
  193. elif [[ "$SSH_PRIVATE_KEY" != "" ]]; then
  194. [ -f "${SSH_PRIVATE_KEY}" ] || { log::error "[exec]" "ssh private_key:${SSH_PRIVATE_KEY} not found."; exit 1; }
  195. ssh_cmd="${ssh_cmd} -i $SSH_PRIVATE_KEY"
  196. fi
  197. log::exec "[command]" "${ssh_cmd//${SSH_PASSWORD:-}/zzzzzz} ${SSH_OPTIONS} ${SSH_USER}@${host} -p ${SSH_PORT} bash -c $(printf "%s" "${command//${SUDO_PASSWORD:-}/zzzzzz}")"
  198. # shellcheck disable=SC2094
  199. COMMAND_OUTPUT=$(eval "${ssh_cmd} ${SSH_OPTIONS} ${SSH_USER}@${host} -p ${SSH_PORT}" bash -c '"${command}"' 2>> "$LOG_FILE" | tee -a "$LOG_FILE")
  200. local status=$?
  201. fi
  202. return $status
  203. }
  204. # 拷贝文件
  205. function command::scp() {
  206. local host=${1:-}
  207. local src=${2:-}
  208. local dest=${3:-/tmp/}
  209. if [[ "${host}" == "127.0.0.1" ]]; then
  210. local command="cp -rf ${src} ${dest}"
  211. log::exec "[command]" "bash -c \"${command}\""
  212. # shellcheck disable=SC2094
  213. COMMAND_OUTPUT=$(bash -c "${command}" 2>> "$LOG_FILE" | tee -a "$LOG_FILE")
  214. local status=$?
  215. else
  216. local scp_cmd="scp"
  217. if [[ "${SSH_PASSWORD}" != "" ]]; then
  218. scp_cmd="sshpass -p \"${SSH_PASSWORD}\" ${scp_cmd}"
  219. elif [[ "$SSH_PRIVATE_KEY" != "" ]]; then
  220. [ -f "${SSH_PRIVATE_KEY}" ] || { log::error "[exec]" "ssh private_key:${SSH_PRIVATE_KEY} not found."; exit 1; }
  221. scp_cmd="${scp_cmd} -i $SSH_PRIVATE_KEY"
  222. fi
  223. log::exec "[command]" "${scp_cmd} ${SSH_OPTIONS} -P ${SSH_PORT} -r ${src} ${SSH_USER}@${host}:${dest}" >> "$LOG_FILE"
  224. # shellcheck disable=SC2094
  225. COMMAND_OUTPUT=$(eval "${scp_cmd} ${SSH_OPTIONS} -P ${SSH_PORT} -r ${src} ${SSH_USER}@${host}:${dest}" 2>> "$LOG_FILE" | tee -a "$LOG_FILE")
  226. local status=$?
  227. fi
  228. return $status
  229. }
  230. # 检查命令是否存在
  231. function check::command_exists() {
  232. local cmd=${1}
  233. local package=${2}
  234. if command -V "$cmd" > /dev/null 2>&1; then
  235. log::info "[check]" "$cmd command exists."
  236. else
  237. log::warning "[check]" "I require $cmd but it's not installed."
  238. log::warning "[check]" "install $package package."
  239. command::exec "127.0.0.1" "yum install -y ${package}"
  240. check::exit_code "$?" "check" "$package install" "exit"
  241. fi
  242. }
  243. ######################################################################################################
  244. # 安装函数
  245. ######################################################################################################
  246. # 节点初始化脚本
  247. function script::init_node() {
  248. # clean
  249. sed -i -e "/$KUBE_APISERVER/d" -e '/-worker-/d' -e '/-master-/d' /etc/hosts
  250. sed -i '/## Kainstall managed start/,/## Kainstall managed end/d' /etc/security/limits.conf /etc/systemd/system.conf /etc/bashrc /etc/rc.local /etc/audit/rules.d/audit.rules
  251. # Disable selinux
  252. sed -i '/SELINUX/s/enforcing/disabled/' /etc/selinux/config
  253. setenforce 0
  254. # Disable swap
  255. swapoff -a && sysctl -w vm.swappiness=0
  256. sed -ri '/^[^#]*swap/s@^@#@' /etc/fstab
  257. # Disable firewalld
  258. for target in firewalld python-firewall firewalld-filesystem iptables; do
  259. systemctl stop $target &>/dev/null || true
  260. systemctl disable $target &>/dev/null || true
  261. done
  262. # repo
  263. [[ -f /etc/yum.repos.d/CentOS-Base.repo && "${SKIP_SET_OS_REPO,,}" == "false" ]] && sed -e 's!^#baseurl=!baseurl=!g' \
  264. -e 's!^mirrorlist=!#mirrorlist=!g' \
  265. -e 's!mirror.centos.org!mirrors.aliyun.com!g' \
  266. -i /etc/yum.repos.d/CentOS-Base.repo
  267. [[ "${OFFLINE_TAG:-}" != "1" && "${SKIP_SET_OS_REPO,,}" == "false" ]] && yum install -y epel-release
  268. [[ -f /etc/yum.repos.d/epel.repo && "${SKIP_SET_OS_REPO,,}" == "false" ]] && sed -e 's!^mirrorlist=!#mirrorlist=!g' \
  269. -e 's!^metalink=!#metalink=!g' \
  270. -e 's!^#baseurl=!baseurl=!g' \
  271. -e 's!//download.*/pub!//mirrors.aliyun.com!g' \
  272. -e 's!http://mirrors\.aliyun!https://mirrors.aliyun!g' \
  273. -i /etc/yum.repos.d/epel.repo
  274. # Change limits
  275. [ ! -f /etc/security/limits.conf_bak ] && cp /etc/security/limits.conf{,_bak}
  276. cat << EOF >> /etc/security/limits.conf
  277. ## Kainstall managed start
  278. root soft nofile 655360
  279. root hard nofile 655360
  280. root soft nproc 655360
  281. root hard nproc 655360
  282. root soft core unlimited
  283. root hard core unlimited
  284. * soft nofile 655360
  285. * hard nofile 655360
  286. * soft nproc 655360
  287. * hard nproc 655360
  288. * soft core unlimited
  289. * hard core unlimited
  290. ## Kainstall managed end
  291. EOF
  292. # /etc/systemd/system.conf
  293. [ -f /etc/security/limits.d/20-nproc.conf ] && sed -i 's#4096#655360#g' /etc/security/limits.d/20-nproc.conf
  294. cat << EOF >> /etc/systemd/system.conf
  295. ## Kainstall managed start
  296. DefaultLimitCORE=infinity
  297. DefaultLimitNOFILE=655360
  298. DefaultLimitNPROC=655360
  299. DefaultTasksMax=75%
  300. ## Kainstall managed end
  301. EOF
  302. # Change sysctl
  303. cat << EOF > /etc/sysctl.d/99-kube.conf
  304. # https://www.kernel.org/doc/Documentation/sysctl/
  305. #############################################################################################
  306. # 调整虚拟内存
  307. #############################################################################################
  308. # Default: 30
  309. # 0 - 任何情况下都不使用swap。
  310. # 1 - 除非内存不足(OOM),否则不使用swap。
  311. vm.swappiness = 0
  312. # 内存分配策略
  313. #0 - 表示内核将检查是否有足够的可用内存供应用进程使用;如果有足够的可用内存,内存申请允许;否则,内存申请失败,并把错误返回给应用进程。
  314. #1 - 表示内核允许分配所有的物理内存,而不管当前的内存状态如何。
  315. #2 - 表示内核允许分配超过所有物理内存和交换空间总和的内存
  316. vm.overcommit_memory=1
  317. # OOM时处理
  318. # 1关闭,等于0时,表示当内存耗尽时,内核会触发OOM killer杀掉最耗内存的进程。
  319. vm.panic_on_oom=0
  320. # vm.dirty_background_ratio 用于调整内核如何处理必须刷新到磁盘的脏页。
  321. # Default value is 10.
  322. # 该值是系统内存总量的百分比,在许多情况下将此值设置为5是合适的。
  323. # 此设置不应设置为零。
  324. vm.dirty_background_ratio = 5
  325. # 内核强制同步操作将其刷新到磁盘之前允许的脏页总数
  326. # 也可以通过更改 vm.dirty_ratio 的值(将其增加到默认值30以上(也占系统内存的百分比))来增加
  327. # 推荐 vm.dirty_ratio 的值在60到80之间。
  328. vm.dirty_ratio = 60
  329. # vm.max_map_count 计算当前的内存映射文件数。
  330. # mmap 限制(vm.max_map_count)的最小值是打开文件的ulimit数量(cat /proc/sys/fs/file-max)。
  331. # 每128KB系统内存 map_count应该大约为1。 因此,在32GB系统上,max_map_count为262144。
  332. # Default: 65530
  333. vm.max_map_count = 2097152
  334. #############################################################################################
  335. # 调整文件
  336. #############################################################################################
  337. fs.may_detach_mounts = 1
  338. # 增加文件句柄和inode缓存的大小,并限制核心转储。
  339. fs.file-max = 2097152
  340. fs.nr_open = 2097152
  341. fs.suid_dumpable = 0
  342. # 文件监控
  343. fs.inotify.max_user_instances=8192
  344. fs.inotify.max_user_watches=524288
  345. fs.inotify.max_queued_events=16384
  346. #############################################################################################
  347. # 调整网络设置
  348. #############################################################################################
  349. # 为每个套接字的发送和接收缓冲区分配的默认内存量。
  350. net.core.wmem_default = 25165824
  351. net.core.rmem_default = 25165824
  352. # 为每个套接字的发送和接收缓冲区分配的最大内存量。
  353. net.core.wmem_max = 25165824
  354. net.core.rmem_max = 25165824
  355. # 除了套接字设置外,发送和接收缓冲区的大小
  356. # 必须使用net.ipv4.tcp_wmem和net.ipv4.tcp_rmem参数分别设置TCP套接字。
  357. # 使用三个以空格分隔的整数设置这些整数,分别指定最小,默认和最大大小。
  358. # 最大大小不能大于使用net.core.wmem_max和net.core.rmem_max为所有套接字指定的值。
  359. # 合理的设置是最小4KiB,默认64KiB和最大2MiB缓冲区。
  360. net.ipv4.tcp_wmem = 20480 12582912 25165824
  361. net.ipv4.tcp_rmem = 20480 12582912 25165824
  362. # 增加最大可分配的总缓冲区空间
  363. # 以页为单位(4096字节)进行度量
  364. net.ipv4.tcp_mem = 65536 25165824 262144
  365. net.ipv4.udp_mem = 65536 25165824 262144
  366. # 为每个套接字的发送和接收缓冲区分配的最小内存量。
  367. net.ipv4.udp_wmem_min = 16384
  368. net.ipv4.udp_rmem_min = 16384
  369. # 启用TCP窗口缩放,客户端可以更有效地传输数据,并允许在代理方缓冲该数据。
  370. net.ipv4.tcp_window_scaling = 1
  371. # 提高同时接受连接数。
  372. net.ipv4.tcp_max_syn_backlog = 10240
  373. # 将net.core.netdev_max_backlog的值增加到大于默认值1000
  374. # 可以帮助突发网络流量,特别是在使用数千兆位网络连接速度时,
  375. # 通过允许更多的数据包排队等待内核处理它们。
  376. net.core.netdev_max_backlog = 65536
  377. # 增加选项内存缓冲区的最大数量
  378. net.core.optmem_max = 25165824
  379. # 被动TCP连接的SYNACK次数。
  380. net.ipv4.tcp_synack_retries = 2
  381. # 允许的本地端口范围。
  382. net.ipv4.ip_local_port_range = 2048 65535
  383. # 防止TCP时间等待
  384. # Default: net.ipv4.tcp_rfc1337 = 0
  385. net.ipv4.tcp_rfc1337 = 1
  386. # 减少tcp_fin_timeout连接的时间默认值
  387. net.ipv4.tcp_fin_timeout = 15
  388. # 积压套接字的最大数量。
  389. # Default is 128.
  390. net.core.somaxconn = 32768
  391. # 打开syncookies以进行SYN洪水攻击保护。
  392. net.ipv4.tcp_syncookies = 1
  393. # 避免Smurf攻击
  394. # 发送伪装的ICMP数据包,目的地址设为某个网络的广播地址,源地址设为要攻击的目的主机,
  395. # 使所有收到此ICMP数据包的主机都将对目的主机发出一个回应,使被攻击主机在某一段时间内收到成千上万的数据包
  396. net.ipv4.icmp_echo_ignore_broadcasts = 1
  397. # 为icmp错误消息打开保护
  398. net.ipv4.icmp_ignore_bogus_error_responses = 1
  399. # 启用自动缩放窗口。
  400. # 如果延迟证明合理,这将允许TCP缓冲区超过其通常的最大值64K。
  401. net.ipv4.tcp_window_scaling = 1
  402. # 打开并记录欺骗,源路由和重定向数据包
  403. net.ipv4.conf.all.log_martians = 1
  404. net.ipv4.conf.default.log_martians = 1
  405. # 告诉内核有多少个未附加的TCP套接字维护用户文件句柄。 万一超过这个数字,
  406. # 孤立的连接会立即重置,并显示警告。
  407. # Default: net.ipv4.tcp_max_orphans = 65536
  408. net.ipv4.tcp_max_orphans = 65536
  409. # 不要在关闭连接时缓存指标
  410. net.ipv4.tcp_no_metrics_save = 1
  411. # 启用RFC1323中定义的时间戳记:
  412. # Default: net.ipv4.tcp_timestamps = 1
  413. net.ipv4.tcp_timestamps = 1
  414. # 启用选择确认。
  415. # Default: net.ipv4.tcp_sack = 1
  416. net.ipv4.tcp_sack = 1
  417. # 增加 tcp-time-wait 存储桶池大小,以防止简单的DOS攻击。
  418. # net.ipv4.tcp_tw_recycle 已从Linux 4.12中删除。请改用net.ipv4.tcp_tw_reuse。
  419. net.ipv4.tcp_max_tw_buckets = 14400
  420. net.ipv4.tcp_tw_reuse = 1
  421. # accept_source_route 选项使网络接口接受设置了严格源路由(SSR)或松散源路由(LSR)选项的数据包。
  422. # 以下设置将丢弃设置了SSR或LSR选项的数据包。
  423. net.ipv4.conf.all.accept_source_route = 0
  424. net.ipv4.conf.default.accept_source_route = 0
  425. # 打开反向路径过滤
  426. net.ipv4.conf.all.rp_filter = 1
  427. net.ipv4.conf.default.rp_filter = 1
  428. # 禁用ICMP重定向接受
  429. net.ipv4.conf.all.accept_redirects = 0
  430. net.ipv4.conf.default.accept_redirects = 0
  431. net.ipv4.conf.all.secure_redirects = 0
  432. net.ipv4.conf.default.secure_redirects = 0
  433. # 禁止发送所有IPv4 ICMP重定向数据包。
  434. net.ipv4.conf.all.send_redirects = 0
  435. net.ipv4.conf.default.send_redirects = 0
  436. # 开启IP转发.
  437. net.ipv4.ip_forward = 1
  438. # 禁止IPv6
  439. net.ipv6.conf.lo.disable_ipv6=1
  440. net.ipv6.conf.all.disable_ipv6 = 1
  441. net.ipv6.conf.default.disable_ipv6 = 1
  442. # 要求iptables不对bridge的数据进行处理
  443. net.bridge.bridge-nf-call-ip6tables = 1
  444. net.bridge.bridge-nf-call-iptables = 1
  445. net.bridge.bridge-nf-call-arptables = 1
  446. # arp缓存
  447. # 存在于 ARP 高速缓存中的最少层数,如果少于这个数,垃圾收集器将不会运行。缺省值是 128
  448. net.ipv4.neigh.default.gc_thresh1=2048
  449. # 保存在 ARP 高速缓存中的最多的记录软限制。垃圾收集器在开始收集前,允许记录数超过这个数字 5 秒。缺省值是 512
  450. net.ipv4.neigh.default.gc_thresh2=4096
  451. # 保存在 ARP 高速缓存中的最多记录的硬限制,一旦高速缓存中的数目高于此,垃圾收集器将马上运行。缺省值是 1024
  452. net.ipv4.neigh.default.gc_thresh3=8192
  453. # 持久连接
  454. net.ipv4.tcp_keepalive_time = 600
  455. net.ipv4.tcp_keepalive_intvl = 30
  456. net.ipv4.tcp_keepalive_probes = 10
  457. # conntrack表
  458. net.nf_conntrack_max=1048576
  459. net.netfilter.nf_conntrack_max=1048576
  460. net.netfilter.nf_conntrack_buckets=262144
  461. net.netfilter.nf_conntrack_tcp_timeout_fin_wait=30
  462. net.netfilter.nf_conntrack_tcp_timeout_time_wait=30
  463. net.netfilter.nf_conntrack_tcp_timeout_close_wait=15
  464. net.netfilter.nf_conntrack_tcp_timeout_established=300
  465. #############################################################################################
  466. # 调整内核参数
  467. #############################################################################################
  468. # 地址空间布局随机化(ASLR)是一种用于操作系统的内存保护过程,可防止缓冲区溢出攻击。
  469. # 这有助于确保与系统上正在运行的进程相关联的内存地址不可预测,
  470. # 因此,与这些流程相关的缺陷或漏洞将更加难以利用。
  471. # Accepted values: 0 = 关闭, 1 = 保守随机化, 2 = 完全随机化
  472. kernel.randomize_va_space = 2
  473. # 调高 PID 数量
  474. kernel.pid_max = 65536
  475. kernel.threads-max=30938
  476. # coredump
  477. kernel.core_pattern=core
  478. # 决定了检测到soft lockup时是否自动panic,缺省值是0
  479. kernel.softlockup_all_cpu_backtrace=1
  480. kernel.softlockup_panic=1
  481. EOF
  482. # history
  483. cat << EOF >> /etc/bashrc
  484. ## Kainstall managed start
  485. # history actions record,include action time, user, login ip
  486. HISTFILESIZE=5000
  487. HISTSIZE=5000
  488. USER_IP=\$(who -u am i 2>/dev/null | awk '{print \$NF}' | sed -e 's/[()]//g')
  489. if [ -z \$USER_IP ]
  490. then
  491. USER_IP=\$(hostname -i)
  492. fi
  493. HISTTIMEFORMAT="%Y-%m-%d %H:%M:%S \$USER_IP:\$(whoami) "
  494. export HISTFILESIZE HISTSIZE HISTTIMEFORMAT
  495. # PS1
  496. PS1='\[\033[0m\]\[\033[1;36m\][\u\[\033[0m\]@\[\033[1;32m\]\h\[\033[0m\] \[\033[1;31m\]\w\[\033[0m\]\[\033[1;36m\]]\[\033[33;1m\]\\$ \[\033[0m\]'
  497. ## Kainstall managed end
  498. EOF
  499. # journal
  500. mkdir -p /var/log/journal /etc/systemd/journald.conf.d
  501. cat << EOF > /etc/systemd/journald.conf.d/99-prophet.conf
  502. [Journal]
  503. # 持久化保存到磁盘
  504. Storage=persistent
  505. # 压缩历史日志
  506. Compress=yes
  507. SyncIntervalSec=5m
  508. RateLimitInterval=30s
  509. RateLimitBurst=1000
  510. # 最大占用空间 2G
  511. SystemMaxUse=2G
  512. # 单日志文件最大 100M
  513. SystemMaxFileSize=100M
  514. # 日志保存时间 3 周
  515. MaxRetentionSec=3week
  516. # 不将日志转发到 syslog
  517. ForwardToSyslog=no
  518. EOF
  519. # motd
  520. cat << EOF > /etc/profile.d/zz-ssh-login-info.sh
  521. #!/bin/sh
  522. #
  523. # @Time : 2020-02-04
  524. # @Author : lework
  525. # @Desc : ssh login banner
  526. export PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:\$PATH
  527. #shopt -q login_shell && : || return 0
  528. # os
  529. upSeconds="\$(cut -d. -f1 /proc/uptime)"
  530. secs=\$((\${upSeconds}%60))
  531. mins=\$((\${upSeconds}/60%60))
  532. hours=\$((\${upSeconds}/3600%24))
  533. days=\$((\${upSeconds}/86400))
  534. UPTIME_INFO=\$(printf "%d days, %02dh %02dm %02ds" "\$days" "\$hours" "\$mins" "\$secs")
  535. if [ -f /etc/redhat-release ] ; then
  536. PRETTY_NAME=\$(< /etc/redhat-release)
  537. elif [ -f /etc/debian_version ]; then
  538. DIST_VER=\$(</etc/debian_version)
  539. PRETTY_NAME="\$(grep PRETTY_NAME /etc/os-release | sed -e 's/PRETTY_NAME=//g' -e 's/"//g') (\$DIST_VER)"
  540. else
  541. PRETTY_NAME=\$(cat /etc/*-release | grep "PRETTY_NAME" | sed -e 's/PRETTY_NAME=//g' -e 's/"//g')
  542. fi
  543. if [[ -d "/system/app/" && -d "/system/priv-app" ]]; then
  544. model="\$(getprop ro.product.brand) \$(getprop ro.product.model)"
  545. elif [[ -f /sys/devices/virtual/dmi/id/product_name ||
  546. -f /sys/devices/virtual/dmi/id/product_version ]]; then
  547. model="\$(< /sys/devices/virtual/dmi/id/product_name)"
  548. model+=" \$(< /sys/devices/virtual/dmi/id/product_version)"
  549. elif [[ -f /sys/firmware/devicetree/base/model ]]; then
  550. model="\$(< /sys/firmware/devicetree/base/model)"
  551. elif [[ -f /tmp/sysinfo/model ]]; then
  552. model="\$(< /tmp/sysinfo/model)"
  553. fi
  554. MODEL_INFO=\${model}
  555. KERNEL=\$(uname -srmo)
  556. USER_NUM=\$(who -u | wc -l)
  557. RUNNING=\$(ps ax | wc -l | tr -d " ")
  558. # disk
  559. totaldisk=\$(df -h -x devtmpfs -x tmpfs -x debugfs -x aufs -x overlay --total 2>/dev/null | tail -1)
  560. disktotal=\$(awk '{print \$2}' <<< "\${totaldisk}")
  561. diskused=\$(awk '{print \$3}' <<< "\${totaldisk}")
  562. diskusedper=\$(awk '{print \$5}' <<< "\${totaldisk}")
  563. DISK_INFO="\033[0;33m\${diskused}\033[0m of \033[1;34m\${disktotal}\033[0m disk space used (\033[0;33m\${diskusedper}\033[0m)"
  564. # cpu
  565. cpu=\$(awk -F':' '/^model name/ {print \$2}' /proc/cpuinfo | uniq | sed -e 's/^[ \t]*//')
  566. cpun=\$(grep -c '^processor' /proc/cpuinfo)
  567. cpuc=\$(grep '^cpu cores' /proc/cpuinfo | tail -1 | awk '{print \$4}')
  568. cpup=\$(grep '^physical id' /proc/cpuinfo | wc -l)
  569. CPU_INFO="\${cpu} \${cpup}P \${cpuc}C \${cpun}L"
  570. # get the load averages
  571. read one five fifteen rest < /proc/loadavg
  572. LOADAVG_INFO="\033[0;33m\${one}\033[0m / \${five} / \${fifteen} with \033[1;34m\$(( cpun*cpuc ))\033[0m core(s) at \033[1;34m\$(grep '^cpu MHz' /proc/cpuinfo | tail -1 | awk '{print \$4}')\033 MHz"
  573. # mem
  574. MEM_INFO="\$(cat /proc/meminfo | awk '/MemTotal:/{total=\$2/1024/1024;next} /MemAvailable:/{use=total-\$2/1024/1024; printf("\033[0;33m%.2fGiB\033[0m of \033[1;34m%.2fGiB\033[0m RAM used (\033[0;33m%.2f%%\033[0m)",use,total,(use/total)*100);}')"
  575. # network
  576. # extranet_ip=" and \$(curl -s ip.cip.cc)"
  577. IP_INFO="\$(ip a|grep -E '^[0-9]+: em*|^[0-9]+: eno*|^[0-9]+: enp*|^[0-9]+: ens*|^[0-9]+: eth*|^[0-9]+: wlp*' -A2|grep inet|awk -F ' ' '{print $2}'|cut -f1 -d/|xargs echo)"
  578. # Container info
  579. CONTAINER_INFO="\$(sudo /usr/bin/crictl ps -a -o yaml 2> /dev/null | awk '/^ state: /{gsub("CONTAINER_", "", \$NF) ++S[\$NF]}END{for(m in S) printf "%s%s:%s ",substr(m,1,1),tolower(substr(m,2)),S[m]}')Images:\$(sudo /usr/bin/crictl images -q 2> /dev/null | wc -l)"
  580. # info
  581. echo -e "
  582. Information as of: \033[1;34m\$(date +"%Y-%m-%d %T")\033[0m
  583. \033[0;1;31mProduct\033[0m............: \${MODEL_INFO}
  584. \033[0;1;31mOS\033[0m.................: \${PRETTY_NAME}
  585. \033[0;1;31mKernel\033[0m.............: \${KERNEL}
  586. \033[0;1;31mCPU\033[0m................: \${CPU_INFO}
  587. \033[0;1;31mHostname\033[0m...........: \033[1;34m\$(hostname)\033[0m
  588. \033[0;1;31mIP Addresses\033[0m.......: \033[1;34m\${IP_INFO}\033[0m
  589. \033[0;1;31mUptime\033[0m.............: \033[0;33m\${UPTIME_INFO}\033[0m
  590. \033[0;1;31mMemory\033[0m.............: \${MEM_INFO}
  591. \033[0;1;31mLoad Averages\033[0m......: \${LOADAVG_INFO}
  592. \033[0;1;31mDisk Usage\033[0m.........: \${DISK_INFO}
  593. \033[0;1;31mUsers online\033[0m.......: \033[1;34m\${USER_NUM}\033[0m
  594. \033[0;1;31mRunning Processes\033[0m..: \033[1;34m\${RUNNING}\033[0m
  595. \033[0;1;31mContainer Info\033[0m.....: \${CONTAINER_INFO}
  596. "
  597. EOF
  598. chmod +x /etc/profile.d/zz-ssh-login-info.sh
  599. echo 'ALL ALL=(ALL) NOPASSWD:/usr/bin/crictl' > /etc/sudoers.d/crictl
  600. # time sync
  601. ntpd --help >/dev/null 2>&1 && yum remove -y ntp
  602. [[ "${OFFLINE_TAG:-}" != "1" ]] && yum install -y chrony
  603. [ ! -f /etc/chrony.conf_bak ] && cp /etc/chrony.conf{,_bak} #备份默认配置
  604. cat << EOF > /etc/chrony.conf
  605. server ntp.aliyun.com iburst
  606. server cn.ntp.org.cn iburst
  607. server ntp.shu.edu.cn iburst
  608. server 0.cn.pool.ntp.org iburst
  609. server 1.cn.pool.ntp.org iburst
  610. server 2.cn.pool.ntp.org iburst
  611. server 3.cn.pool.ntp.org iburst
  612. driftfile /var/lib/chrony/drift
  613. makestep 1.0 3
  614. logdir /var/log/chrony
  615. EOF
  616. timedatectl set-timezone Asia/Shanghai
  617. chronyd -q -t 1 'server cn.pool.ntp.org iburst maxsamples 1'
  618. systemctl enable chronyd
  619. systemctl start chronyd
  620. chronyc sources -v
  621. chronyc sourcestats
  622. hwclock --systohc
  623. # package
  624. [[ "${OFFLINE_TAG:-}" != "1" ]] && yum install -y curl wget
  625. # ipvs
  626. [[ "${OFFLINE_TAG:-}" != "1" ]] && yum install -y ipvsadm ipset sysstat conntrack libseccomp
  627. module=(
  628. ip_vs
  629. ip_vs_rr
  630. ip_vs_wrr
  631. ip_vs_sh
  632. overlay
  633. nf_conntrack
  634. br_netfilter
  635. )
  636. [ -f /etc/modules-load.d/ipvs.conf ] && cp -f /etc/modules-load.d/ipvs.conf{,_bak}
  637. for kernel_module in "${module[@]}";do
  638. /sbin/modinfo -F filename "$kernel_module" |& grep -qv ERROR && echo "$kernel_module" >> /etc/modules-load.d/ipvs.conf
  639. done
  640. systemctl restart systemd-modules-load
  641. systemctl enable systemd-modules-load
  642. sysctl --system
  643. # audit
  644. [[ "${OFFLINE_TAG:-}" != "1" ]] && yum install -y audit audit-libs
  645. # /etc/audit/rules.d/audit.rules
  646. cat << EOF >> /etc/audit/rules.d/audit.rules
  647. ## Kainstall managed start
  648. # Ignore errors
  649. -i
  650. # SYSCALL
  651. -a always,exit -F arch=b64 -S kill,tkill,tgkill -F a1=9 -F key=trace_kill_9
  652. -a always,exit -F arch=b64 -S kill,tkill,tgkill -F a1=15 -F key=trace_kill_15
  653. # docker
  654. -w /usr/bin/dockerd -k docker
  655. -w /var/lib/docker -k docker
  656. -w /etc/docker -k docker
  657. -w /usr/lib/systemd/system/docker.service -k docker
  658. -w /etc/systemd/system/docker.service -k docker
  659. -w /usr/lib/systemd/system/docker.socket -k docker
  660. -w /etc/default/docker -k docker
  661. -w /etc/sysconfig/docker -k docker
  662. -w /etc/docker/daemon.json -k docker
  663. # containerd
  664. -w /usr/bin/containerd -k containerd
  665. -w /var/lib/containerd -k containerd
  666. -w /usr/lib/systemd/system/containerd.service -k containerd
  667. -w /etc/containerd/config.toml -k containerd
  668. # cri-o
  669. -w /usr/bin/crio -k cri-o
  670. -w /etc/crio -k cri-o
  671. # runc
  672. -w /usr/bin/runc -k runc
  673. # kube
  674. -w /usr/bin/kubeadm -k kubeadm
  675. -w /usr/bin/kubelet -k kubelet
  676. -w /usr/bin/kubectl -k kubectl
  677. -w /var/lib/kubelet -k kubelet
  678. -w /etc/kubernetes -k kubernetes
  679. ## Kainstall managed end
  680. EOF
  681. chmod 600 /etc/audit/rules.d/audit.rules
  682. sed -i 's#max_log_file =.*#max_log_file = 80#g' /etc/audit/auditd.conf
  683. if [ -f /usr/libexec/initscripts/legacy-actions/auditd/restart ]; then
  684. /usr/libexec/initscripts/legacy-actions/auditd/restart
  685. else
  686. systemctl stop auditd && systemctl start auditd
  687. fi
  688. systemctl enable auditd
  689. grep single-request-reopen /etc/resolv.conf || sed -i '1ioptions timeout:2 attempts:3 rotate single-request-reopen' /etc/resolv.conf
  690. ipvsadm --clear
  691. iptables -F && iptables -t nat -F && iptables -t mangle -F && iptables -X
  692. }
  693. # 升级内核
  694. function script::upgrade_kernel() {
  695. local ver; ver=$(rpm --eval "%{centos_ver}")
  696. [[ "${OFFLINE_TAG:-}" != "1" ]] && yum install -y "https://www.elrepo.org/elrepo-release-${ver}.el${ver}.elrepo.noarch.rpm"
  697. sed -e "s/^mirrorlist=/#mirrorlist=/g" \
  698. -e "s/elrepo.org\/linux/mirrors.tuna.tsinghua.edu.cn\/elrepo/g" \
  699. -i /etc/yum.repos.d/elrepo.repo
  700. [[ "${OFFLINE_TAG:-}" != "1" ]] && yum install -y --disablerepo="*" --enablerepo=elrepo-kernel kernel-lt{,-devel}
  701. grub2-set-default 0 && grub2-mkconfig -o /etc/grub2.cfg
  702. grubby --default-kernel
  703. grubby --args="user_namespace.enable=1" --update-kernel="$(grubby --default-kernel)"
  704. }
  705. # 节点软件升级
  706. function script::upgrage_kube() {
  707. local role=${1:-init}
  708. local version="-${2:-latest}"
  709. version="${version#-latest}"
  710. set -e
  711. echo '[install] kubeadm'
  712. kubeadm version
  713. yum install -y "kubeadm${version}" --disableexcludes=kubernetes
  714. kubeadm version
  715. echo '[upgrade]'
  716. if [[ "$role" == "init" ]]; then
  717. local plan_info; plan_info=$(kubeadm upgrade plan)
  718. local v; v=$(printf "%s" "$plan_info" | grep 'kubeadm upgrade apply ' | awk '{print $4}'| tail -1 )
  719. printf "%s\n" "${plan_info}"
  720. kubeadm upgrade apply "${v}" -y
  721. else
  722. kubeadm upgrade node
  723. fi
  724. echo '[install] kubelet kubectl'
  725. kubectl version --client=true
  726. yum install -y "kubelet${version}" "kubectl${version}" --disableexcludes=kubernetes
  727. kubectl version --client=true
  728. [ -f /usr/lib/systemd/system/kubelet.service.d/10-kubeadm.conf ] && \
  729. sed -i 's#^\[Service\]#[Service]\nCPUAccounting=true\nMemoryAccounting=true#g' /usr/lib/systemd/system/kubelet.service.d/10-kubeadm.conf
  730. systemctl daemon-reload
  731. systemctl restart kubelet
  732. }
  733. # 安装 docker
  734. function script::install_docker() {
  735. local version="-${1:-latest}"
  736. version="${version#-latest}"
  737. cat << EOF > /etc/yum.repos.d/docker-ce.repo
  738. [docker-ce-stable]
  739. name=Docker CE Stable - \$basearch
  740. baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/$(rpm --eval '%{centos_ver}')/\$basearch/stable
  741. enabled=1
  742. gpgcheck=1
  743. gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg
  744. EOF
  745. if [[ "${OFFLINE_TAG:-}" != "1" ]];then
  746. [ -f "$(which docker)" ] && yum remove -y docker-ce docker-ce-cli containerd.io
  747. yum install -y "docker-ce${version}" "docker-ce-cli${version}" containerd.io bash-completion
  748. fi
  749. [ -f /usr/share/bash-completion/completions/docker ] && \
  750. cp -f /usr/share/bash-completion/completions/docker /etc/bash_completion.d/
  751. [ ! -d /etc/docker ] && mkdir /etc/docker
  752. # /etc/docker/daemon.json
  753. cat << EOF > /etc/docker/daemon.json
  754. {
  755. "data-root": "/var/lib/docker",
  756. "log-driver": "json-file",
  757. "log-opts": {
  758. "max-size": "100m",
  759. "max-file": "3"
  760. },
  761. "default-ulimits": {
  762. "nofile": {
  763. "Name": "nofile",
  764. "Hard": 655360,
  765. "Soft": 655360
  766. },
  767. "nproc": {
  768. "Name": "nproc",
  769. "Hard": 655360,
  770. "Soft": 655360
  771. }
  772. },
  773. "live-restore": true,
  774. "oom-score-adjust": -1000,
  775. "max-concurrent-downloads": 10,
  776. "max-concurrent-uploads": 10,
  777. "storage-driver": "overlay2",
  778. "storage-opts": ["overlay2.override_kernel_check=true"],
  779. "exec-opts": ["native.cgroupdriver=systemd"],
  780. "registry-mirrors": [
  781. "https://573d5l8e.mirror.aliyuncs.com"
  782. ]
  783. }
  784. EOF
  785. sed -i 's|#oom_score = 0|oom_score = -999|' /etc/containerd/config.toml
  786. # /etc/crictl.yaml
  787. cat << EOF > /etc/crictl.yaml
  788. runtime-endpoint: unix:///var/run/dockershim.sock
  789. image-endpoint: unix:///var/run/dockershim.sock
  790. timeout: 2
  791. debug: false
  792. pull-image-on-create: true
  793. disable-pull-on-run: false
  794. EOF
  795. systemctl enable containerd
  796. systemctl restart containerd
  797. systemctl enable docker
  798. systemctl restart docker
  799. }
  800. # 安装 containerd
  801. function script::install_containerd() {
  802. local version="-${1:-latest}"
  803. version="${version#-latest}"
  804. # /etc/yum.repos.d/docker-ce.repo
  805. cat << EOF > /etc/yum.repos.d/docker-ce.repo
  806. [docker-ce-stable]
  807. name=Docker CE Stable - \$basearch
  808. baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/$(rpm --eval '%{centos_ver}')/\$basearch/stable
  809. enabled=1
  810. gpgcheck=1
  811. gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg
  812. EOF
  813. if [[ "${OFFLINE_TAG:-}" != "1" ]];then
  814. [ -f "$(which runc)" ] && yum remove -y runc
  815. [ -f "$(which containerd)" ] && yum remove -y containerd.io
  816. yum install -y containerd.io"${version}" containernetworking bash-completion
  817. fi
  818. [ -d /etc/bash_completion.d ] && crictl completion bash > /etc/bash_completion.d/crictl
  819. containerd config default > /etc/containerd/config.toml
  820. sed -i -e "s#k8s.gcr.io#registry.cn-hangzhou.aliyuncs.com/kainstall#g" \
  821. -e "s#https://registry-1.docker.io#https://573d5l8e.mirror.aliyuncs.com#g" \
  822. -e "s#SystemdCgroup = false#SystemdCgroup = true#g" \
  823. -e "s#oom_score = 0#oom_score = -999#" \
  824. -e "s#max_concurrent_downloads = 3#max_concurrent_downloads = 10#g" /etc/containerd/config.toml
  825. grep docker.io /etc/containerd/config.toml || sed -i -e "/registry.mirrors]/a\ \ \ \ \ \ \ \ [plugins.\"io.containerd.grpc.v1.cri\".registry.mirrors.\"docker.io\"]\n endpoint = [\"https://573d5l8e.mirror.aliyuncs.com\"]" \
  826. /etc/containerd/config.toml
  827. # /etc/crictl.yaml
  828. cat << EOF > /etc/crictl.yaml
  829. runtime-endpoint: unix:///run/containerd/containerd.sock
  830. image-endpoint: unix:///run/containerd/containerd.sock
  831. timeout: 2
  832. debug: false
  833. pull-image-on-create: true
  834. disable-pull-on-run: false
  835. EOF
  836. systemctl restart containerd
  837. systemctl enable containerd
  838. }
  839. # 安装 cri-o
  840. function script::install_cri-o() {
  841. local version="${1:-latest}"
  842. version="${version#-latest}"
  843. os="CentOS_$(rpm --eval '%{centos_ver}')" && echo "${os}"
  844. # /etc/yum.repos.d/devel_kubic_libcontainers_stable.repo
  845. cat << EOF > /etc/yum.repos.d/devel_kubic_libcontainers_stable.repo
  846. [devel_kubic_libcontainers_stable]
  847. name=Stable Releases of Upstream github.com/containers packages
  848. type=rpm-md
  849. baseurl=https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/${os}/
  850. gpgcheck=1
  851. gpgkey=https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/${os}/repodata/repomd.xml.key
  852. enabled=1
  853. [devel_kubic_libcontainers_stable_cri-o]
  854. name=devel:kubic:libcontainers:stable:cri-o
  855. type=rpm-md
  856. baseurl=https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable:/cri-o:/${version}/${os}/
  857. gpgcheck=1
  858. gpgkey=https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable:/cri-o:/${version}/${os}/repodata/repomd.xml.key
  859. enabled=1
  860. EOF
  861. if [[ "${OFFLINE_TAG:-}" != "1" ]];then
  862. [ -f "$(which runc)" ] && yum remove -y runc
  863. [ -f "$(which crio)" ] && yum remove -y cri-o
  864. [ -f "$(which docker)" ] && yum remove -y docker-ce docker-ce-cli containerd.io
  865. yum install -y runc cri-o bash-completion --disablerepo=docker-ce-stable || yum install -y runc cri-o bash-completion
  866. fi
  867. [ -d /etc/bash_completion.d ] && \
  868. { crictl completion bash > /etc/bash_completion.d/crictl; \
  869. crio completion bash > /etc/bash_completion.d/crio; \
  870. crio-status completion bash > /etc/bash_completion.d/crio-status; }
  871. [ ! -f /etc/crio/crio.conf ] && crio config --default > /etc/crio/crio.conf
  872. sed -i -e "s#k8s.gcr.io#registry.cn-hangzhou.aliyuncs.com/kainstall#g" \
  873. -e 's|#registries = \[|registries = ["docker.io", "quay.io"]|g' /etc/crio/crio.conf
  874. # /etc/crio/crio.conf
  875. cat << EOF >> /etc/crio/crio.conf
  876. [crio.image]
  877. pause_image = "registry.cn-hangzhou.aliyuncs.com/kainstall/pause:3.6"
  878. EOF
  879. # /etc/containers/registries.conf.d/000-dockerio.conf
  880. [ -d /etc/containers/registries.conf.d ] && cat << EOF > /etc/containers/registries.conf.d/000-dockerio.conf
  881. [[registry]]
  882. prefix = "docker.io"
  883. insecure = false
  884. blocked = false
  885. location = "docker.io"
  886. [[registry.mirror]]
  887. location = "573d5l8e.mirror.aliyuncs.com"
  888. insecure = true
  889. EOF
  890. # /etc/crictl.yaml
  891. cat << EOF > /etc/crictl.yaml
  892. runtime-endpoint: unix:///var/run/crio/crio.sock
  893. image-endpoint: unix:///var/run/crio/crio.sock
  894. timeout: 2
  895. debug: false
  896. pull-image-on-create: true
  897. disable-pull-on-run: false
  898. EOF
  899. # /etc/cni/net.d/100-crio-bridge.conf
  900. sed -i "s#10.85.0.0/16#${KUBE_POD_SUBNET:-10.85.0.0/16}#g" /etc/cni/net.d/100-crio-bridge.conf
  901. # /etc/cni/net.d/10-crio.conf
  902. cat << EOF > /etc/cni/net.d/10-crio.conf
  903. {
  904. $(grep cniVersion /etc/cni/net.d/100-crio-bridge.conf)
  905. "name": "crio",
  906. "type": "flannel"
  907. }
  908. EOF
  909. mv /etc/cni/net.d/100-crio-bridge.conf /etc/cni/net.d/10-crio.conf /etc/cni/net.d/200-loopback.conf /tmp/
  910. systemctl restart crio
  911. systemctl enable crio
  912. }
  913. # 安装kube组件
  914. function script::install_kube() {
  915. local version="-${1:-latest}"
  916. version="${version#-latest}"
  917. # /etc/yum.repos.d/kubernetes.repo
  918. cat <<EOF > /etc/yum.repos.d/kubernetes.repo
  919. [kubernetes]
  920. name=Kubernetes
  921. baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
  922. enabled=1
  923. gpgcheck=0
  924. repo_gpgcheck=0
  925. gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
  926. EOF
  927. if [[ "${OFFLINE_TAG:-}" != "1" ]];then
  928. [ -f /usr/bin/kubeadm ] && yum remove -y kubeadm
  929. [ -f /usr/bin/kubelet ] && yum remove -y kubelet
  930. [ -f /usr/bin/kubectl ] && yum remove -y kubectl
  931. yum install -y "kubeadm${version}" "kubelet${version}" "kubectl${version}" --disableexcludes=kubernetes
  932. fi
  933. [ -d /etc/bash_completion.d ] && \
  934. { kubectl completion bash > /etc/bash_completion.d/kubectl; \
  935. kubeadm completion bash > /etc/bash_completion.d/kubadm; }
  936. [ ! -d /usr/lib/systemd/system/kubelet.service.d ] && mkdir -p /usr/lib/systemd/system/kubelet.service.d
  937. cat << EOF > /usr/lib/systemd/system/kubelet.service.d/11-cgroup.conf
  938. [Service]
  939. CPUAccounting=true
  940. MemoryAccounting=true
  941. BlockIOAccounting=true
  942. ExecStartPre=/bin/bash -c '/bin/mkdir -p /sys/fs/cgroup/{cpuset,memory,hugetlb,systemd,pids,"cpu,cpuacct"}/{system,kube,kubepods}.slice||:'
  943. Slice=kube.slice
  944. EOF
  945. systemctl daemon-reload
  946. systemctl enable kubelet
  947. systemctl restart kubelet
  948. }
  949. # 安装haproxy
  950. function script::install_haproxy() {
  951. local api_servers="$*"
  952. if [[ "${OFFLINE_TAG:-}" != "1" ]];then
  953. [ -f /usr/bin/haproxy ] && yum remove -y haproxy
  954. yum install -y haproxy
  955. fi
  956. # /etc/haproxy/haproxy.cfg
  957. [ ! -f /etc/haproxy/haproxy.cfg_bak ] && cp /etc/haproxy/haproxy.cfg{,_bak}
  958. cat << EOF > /etc/haproxy/haproxy.cfg
  959. global
  960. log /dev/log local0
  961. log /dev/log local1 notice
  962. tune.ssl.default-dh-param 2048
  963. defaults
  964. log global
  965. mode http
  966. option dontlognull
  967. timeout connect 5000ms
  968. timeout client 600000ms
  969. timeout server 600000ms
  970. listen stats
  971. bind :19090
  972. mode http
  973. balance
  974. stats uri /haproxy_stats
  975. stats auth admin:admin123
  976. stats admin if TRUE
  977. frontend kube-apiserver-https
  978. mode tcp
  979. option tcplog
  980. bind :6443
  981. default_backend kube-apiserver-backend
  982. backend kube-apiserver-backend
  983. mode tcp
  984. balance roundrobin
  985. stick-table type ip size 200k expire 30m
  986. stick on src
  987. $(index=1;for h in $api_servers;do echo " server apiserver${index} $h:6443 check";index=$((index+1));done)
  988. EOF
  989. systemctl enable haproxy
  990. systemctl restart haproxy
  991. }
  992. # 安装helm
  993. function script::install_helm() {
  994. local version="${1:-3.10.1}"
  995. version="${version#-3.10.1}"
  996. local path="/tmp"
  997. cd $path
  998. # 下载软件(国内源)
  999. wget https://mirrors.huaweicloud.com/helm/v$version/helm-v$version-linux-amd64.tar.gz
  1000. # 解压
  1001. tar -zxvf helm-v$version-linux-amd64.tar.gz
  1002. # 安装
  1003. sudo mv linux-amd64/helm /usr/local/bin/
  1004. # 清理
  1005. rm -rf helm-v$version-linux-amd64.tar.gz linux-amd64
  1006. # 验证
  1007. helm version
  1008. cd ~
  1009. }
  1010. # 检查用到的命令
  1011. function check::command() {
  1012. check::command_exists ssh openssh-clients
  1013. check::command_exists sshpass sshpass
  1014. check::command_exists wget wget
  1015. [[ "${OFFLINE_TAG:-}" == "1" ]] && check::command_exists tar tar
  1016. }
  1017. # 检查ssh连通性
  1018. function check::ssh_conn() {
  1019. for host in $MASTER_NODES $WORKER_NODES
  1020. do
  1021. [ "$host" == "127.0.0.1" ] && continue
  1022. command::exec "${host}" "echo 0"
  1023. check::exit_code "$?" "check" "ssh $host connection" "exit"
  1024. done
  1025. }
  1026. # 检查os系统支持
  1027. function check::os() {
  1028. log::info "[check]" "os support: ${OS_SUPPORT}"
  1029. for host in $MASTER_NODES $WORKER_NODES
  1030. do
  1031. command::exec "${host}" "
  1032. [ -f /etc/os-release ] && source /etc/os-release
  1033. echo client_os:\${ID:-}\${VERSION_ID:-}
  1034. if [[ \"${OS_SUPPORT}\" == *\"\${ID:-}\${VERSION_ID:-}\"* ]]; then
  1035. exit 0
  1036. fi
  1037. exit 1
  1038. "
  1039. check::exit_code "$?" "check" "$host os support" "exit"
  1040. done
  1041. }
  1042. # 检查os kernel 版本
  1043. function check::kernel() {
  1044. local version=${1:-}
  1045. log::info "[check]" "kernel version not less than ${version}"
  1046. version=$(echo "${version}" | awk -F. '{ printf("%d%03d%03d\n", $1,$2,$3); }')
  1047. for host in $MASTER_NODES $WORKER_NODES
  1048. do
  1049. command::exec "${host}" "
  1050. kernel_version=\$(uname -r)
  1051. kernel_version=\$(echo \${kernel_version/-*} | awk -F. '{ printf(\"%d%03d%03d\n\", \$1,\$2,\$3); }')
  1052. echo kernel_version \${kernel_version}
  1053. [[ \${kernel_version} -ge ${version} ]] && exit 0 || exit 1
  1054. "
  1055. check::exit_code "$?" "check" "$host kernel version" "exit"
  1056. done
  1057. }
  1058. # 检查api-server连通性
  1059. function check::apiserver_conn() {
  1060. command::exec "${MGMT_NODE}" "kubectl get node"
  1061. check::exit_code "$?" "check" "conn apiserver" "exit"
  1062. }
  1063. # 检查返回码
  1064. function check::exit_code() {
  1065. local code=${1:-}
  1066. local app=${2:-}
  1067. local desc=${3:-}
  1068. local exit_script=${4:-}
  1069. if [[ "${code}" == "0" ]]; then
  1070. log::info "[${app}]" "${desc} succeeded."
  1071. else
  1072. log::error "[${app}]" "${desc} failed."
  1073. [[ "$exit_script" == "exit" ]] && exit "$code"
  1074. fi
  1075. }
  1076. # 预检
  1077. function check::preflight() {
  1078. # check command
  1079. check::command
  1080. # check ssh conn
  1081. check::ssh_conn
  1082. # check os
  1083. check::os
  1084. # check os kernel
  1085. [[ "${KUBE_NETWORK:-}" == "cilium" ]] && check::kernel 4.9.17
  1086. # check api-server conn
  1087. if [[ $(( ${ADD_TAG:-0} + ${DEL_TAG:-0} + ${UPGRADE_TAG:-0} + ${RENEW_CERT_TAG:-0} )) -gt 0 ]]; then
  1088. check::apiserver_conn
  1089. fi
  1090. }
  1091. # 安装包
  1092. function install::package() {
  1093. # 检查k8s最新稳定版本
  1094. if [[ "${KUBE_CRI}" == "cri-o" && "${KUBE_CRI_VERSION}" == "latest" ]]; then
  1095. KUBE_CRI_VERSION="${KUBE_VERSION}"
  1096. if [[ "${KUBE_CRI_VERSION}" == "latest" ]]; then
  1097. if command::exec "127.0.0.1" "wget https://storage.googleapis.com/kubernetes-release/release/stable.txt -q -O -"; then
  1098. KUBE_CRI_VERSION="${COMMAND_OUTPUT#v}"
  1099. else
  1100. log::error "[install]" "get kubernetes stable version error. Please specify the version!"
  1101. exit 1
  1102. fi
  1103. fi
  1104. KUBE_CRI_VERSION="${KUBE_CRI_VERSION%.*}"
  1105. fi
  1106. # 安装 cri kube
  1107. for host in $MASTER_NODES $WORKER_NODES
  1108. do
  1109. # install cri
  1110. log::info "[install]" "install ${KUBE_CRI} on $host."
  1111. command::exec "${host}" "
  1112. export OFFLINE_TAG=${OFFLINE_TAG:-0}
  1113. $(declare -f script::install_"${KUBE_CRI}")
  1114. script::install_${KUBE_CRI} $KUBE_CRI_VERSION
  1115. "
  1116. check::exit_code "$?" "install" "install ${KUBE_CRI} on $host"
  1117. # install kube
  1118. log::info "[install]" "install kube on $host"
  1119. command::exec "${host}" "
  1120. export OFFLINE_TAG=${OFFLINE_TAG:-0}
  1121. $(declare -f script::install_kube)
  1122. script::install_kube $KUBE_VERSION
  1123. "
  1124. check::exit_code "$?" "install" "install kube on $host"
  1125. done
  1126. # 配置 kube
  1127. local apiservers=$MASTER_NODES
  1128. if [[ "$apiservers" == "127.0.0.1" ]]; then
  1129. command::exec "${MGMT_NODE}" "ip -o route get to 8.8.8.8 | sed -n 's/.*src \([0-9.]\+\).*/\1/p'"
  1130. get::command_output "apiservers" "$?"
  1131. fi
  1132. # 输出 api-servers 信息
  1133. if [[ "${ADD_TAG:-}" == "1" ]]; then
  1134. command::exec "${MGMT_NODE}" "
  1135. kubectl get node --selector='node-role.kubernetes.io/master' -o jsonpath='{$.items[*].status.addresses[?(@.type==\"InternalIP\")].address}'
  1136. "
  1137. get::command_output "apiservers" "$?"
  1138. fi
  1139. # 安装 haproxy
  1140. for host in $WORKER_NODES
  1141. do
  1142. # install haproxy
  1143. log::info "[install]" "install haproxy on $host"
  1144. command::exec "${host}" "
  1145. export OFFLINE_TAG=${OFFLINE_TAG:-0}
  1146. $(declare -f script::install_haproxy)
  1147. script::install_haproxy \"$apiservers\"
  1148. "
  1149. check::exit_code "$?" "install" "install haproxy on $host"
  1150. done
  1151. # 10年证书
  1152. if [[ "${CERT_YEAR_TAG:-}" == "1" ]]; then
  1153. local version="${KUBE_VERSION}"
  1154. if [[ "${version}" == "latest" ]]; then
  1155. if command::exec "127.0.0.1" "wget https://storage.googleapis.com/kubernetes-release/release/stable.txt -q -O -"; then
  1156. version="${COMMAND_OUTPUT#v}"
  1157. else
  1158. log::error "[install]" "get kubernetes stable version error. Please specify the version!"
  1159. exit 1
  1160. fi
  1161. fi
  1162. log::info "[install]" "download kubeadm 10 years certs client"
  1163. local certs_file="${OFFLINE_DIR}/bins/kubeadm-linux-amd64"
  1164. MGMT_NODE="127.0.0.1" utils::download_file "${GITHUB_PROXY}https://github.com/lework/kubeadm-certs/releases/download/v${version}/kubeadm-linux-amd64" "${certs_file}"
  1165. for host in $MASTER_NODES $WORKER_NODES
  1166. do
  1167. log::info "[install]" "scp kubeadm client to $host"
  1168. command::scp "${host}" "${certs_file}" "/tmp/kubeadm-linux-amd64"
  1169. check::exit_code "$?" "install" "scp kubeadm client to $host" "exit"
  1170. command::exec "${host}" "
  1171. set -e
  1172. if [[ -f /tmp/kubeadm-linux-amd64 ]]; then
  1173. [[ -f /usr/bin/kubeadm && ! -f /usr/bin/kubeadm_src ]] && mv -fv /usr/bin/kubeadm{,_src}
  1174. mv -fv /tmp/kubeadm-linux-amd64 /usr/bin/kubeadm
  1175. chmod +x /usr/bin/kubeadm
  1176. else
  1177. echo \"not found /tmp/kubeadm-linux-amd64\"
  1178. exit 1
  1179. fi
  1180. "
  1181. check::exit_code "$?" "install" "$host: use kubeadm 10 years certs client"
  1182. done
  1183. fi
  1184. # 安装helm
  1185. for host in $MASTER_NODES $WORKER_NODES
  1186. do
  1187. log::info "[install]" "install helm on $host"
  1188. command::exec "${host}" "
  1189. export OFFLINE_TAG=${OFFLINE_TAG:-0}
  1190. $(declare -f script::install_helm)
  1191. script::install_helm $HELM_VERSION
  1192. "
  1193. check::exit_code "$?" "install" "install helm on $host"
  1194. done
  1195. }
  1196. # 升级节点内核
  1197. function init::upgrade_kernel() {
  1198. [[ "${UPGRADE_KERNEL_TAG:-}" != "1" ]] && return
  1199. for host in $MASTER_NODES $WORKER_NODES
  1200. do
  1201. log::info "[init]" "upgrade kernel: $host"
  1202. command::exec "${host}" "
  1203. export OFFLINE_TAG=${OFFLINE_TAG:-0}
  1204. $(declare -f script::upgrade_kernel)
  1205. script::upgrade_kernel
  1206. "
  1207. check::exit_code "$?" "init" "upgrade kernel $host" "exit"
  1208. done
  1209. for host in $MASTER_NODES $WORKER_NODES
  1210. do
  1211. command::exec "${host}" "bash -c 'sleep 15 && reboot' &>/dev/null &"
  1212. check::exit_code "$?" "init" "$host: Wait for 15s to restart"
  1213. done
  1214. log::info "[notice]" "Please execute the command again!"
  1215. log::access "[command]" "bash $0 ${SCRIPT_PARAMETER// --upgrade-kernel/}"
  1216. exit 0
  1217. }
  1218. # 节点证书续期
  1219. function cert::renew_node() {
  1220. local role="${1:-master}"
  1221. local hosts=""
  1222. local kubelet_config=""
  1223. command::exec "${MGMT_NODE}" "
  1224. kubectl get node --selector='node-role.kubernetes.io/${role}' -o jsonpath='{range.items[*]}{.metadata.name } {end}'
  1225. "
  1226. get::command_output "hosts" "$?"
  1227. for host in ${hosts}
  1228. do
  1229. log::info "[cert]" "drain $host"
  1230. command::exec "${MGMT_NODE}" "kubectl drain $host --force --ignore-daemonsets --delete-local-data"
  1231. check::exit_code "$?" "cert" "$host: drain"
  1232. sleep 5
  1233. if [[ "${role}" == "master" ]]; then
  1234. command::exec "${host}" "cp -rf /etc/kubernetes /etc/kubernetes_\$(date +%Y-%m-%d)"
  1235. check::exit_code "$?" "cert" "$host: backup kubernetes config"
  1236. command::exec "${host}" "kubeadm certs renew all 2>/dev/null|| kubeadm alpha certs renew all"
  1237. check::exit_code "$?" "cert" "$host: renew certs"
  1238. command::exec "${host}" "
  1239. $(declare -f utils::retry)
  1240. kill -s SIGHUP \$(pidof etcd) && \
  1241. utils::retry 10 \"echo -n | openssl s_client -connect localhost:2379 2>&1 | sed -ne '/-BEGIN CERTIFICATE-/,/-END CERTIFICATE-/p' | openssl x509 -text -noout | grep Not\"
  1242. "
  1243. check::exit_code "$?" "cert" "$host: restart etcd"
  1244. command::exec "${host}" "
  1245. $(declare -f utils::retry)
  1246. kill -s SIGHUP \$(pidof kube-apiserver) && \
  1247. utils::retry 10 \"echo -n | openssl s_client -connect localhost:6443 2>&1 | sed -ne '/-BEGIN CERTIFICATE-/,/-END CERTIFICATE-/p' | openssl x509 -text -noout | grep Not\"
  1248. "
  1249. check::exit_code "$?" "cert" "$host: restart kube-apiserver"
  1250. command::exec "${host}" "
  1251. $(declare -f utils::retry)
  1252. kill -s SIGHUP \$(pidof kube-controller-manager) && \
  1253. utils::retry 10 \"echo -n | openssl s_client -connect localhost:10257 2>&1 | sed -ne '/-BEGIN CERTIFICATE-/,/-END CERTIFICATE-/p' | openssl x509 -text -noout | grep Not\"
  1254. "
  1255. check::exit_code "$?" "cert" "$host: restart kube-controller-manager"
  1256. command::exec "${host}" "
  1257. $(declare -f utils::retry)
  1258. kill -s SIGHUP \$(pidof kube-scheduler) && \
  1259. utils::retry 10 \"echo -n | openssl s_client -connect localhost:10259 2>&1 | sed -ne '/-BEGIN CERTIFICATE-/,/-END CERTIFICATE-/p' | openssl x509 -text -noout | grep Not\"
  1260. "
  1261. check::exit_code "$?" "cert" "$host: restart kube-scheduler"
  1262. fi
  1263. log::info "[cert]" "get kubelet config"
  1264. command::exec "${MGMT_NODE}" "
  1265. kubeadm kubeconfig user --org system:nodes --client-name system:node:${host} --config /etc/kubernetes/kubeadmcfg.yaml || kubeadm alpha kubeconfig user --org system:nodes --client-name system:node:${host} --config /etc/kubernetes/kubeadmcfg.yaml
  1266. "
  1267. get::command_output "kubelet_config" "$?" "exit"
  1268. if [[ "$kubelet_config" != "" ]]; then
  1269. log::info "[cert]" "copy kubelet config"
  1270. command::exec "${host}" "
  1271. cp /etc/kubernetes/kubelet.conf /etc/kubernetes/kubelet.conf_bak
  1272. echo '$(printf "%s" "${kubelet_config}" | sed 's#https://.*:#https://127.0.0.1:#g')' > /etc/kubernetes/kubelet.conf
  1273. "
  1274. check::exit_code "$?" "cert" "$host: copy kubelet config"
  1275. command::exec "${host}" "rm -rfv /var/lib/kubelet/pki/*"
  1276. check::exit_code "$?" "cert" "$host: delete kubelet pki files"
  1277. command::exec "${host}" "
  1278. $(declare -f utils::retry)
  1279. systemctl restart kubelet && \
  1280. utils::retry 10 \"echo -n | openssl s_client -connect localhost:10250 2>&1 | sed -ne '/-BEGIN CERTIFICATE-/,/-END CERTIFICATE-/p' | openssl x509 -text -noout | grep Not\"
  1281. "
  1282. local status="$?"
  1283. check::exit_code "${status}" "cert" "$host: restart kubelet"
  1284. if [[ "${status}" == "0" ]]; then
  1285. sleep 5
  1286. command::exec "${MGMT_NODE}" "kubectl uncordon ${host}"
  1287. check::exit_code "$?" "cert" "uncordon ${host} node"
  1288. fi
  1289. fi
  1290. done
  1291. }
  1292. # 证书续期
  1293. function cert::renew() {
  1294. log::info "[cert]" "renew cluster cert"
  1295. cert::renew_node "master"
  1296. cert::renew_node "worker"
  1297. log::info "[cert]" "cluster status"
  1298. command::exec "${MGMT_NODE}" "
  1299. echo
  1300. kubectl get node
  1301. echo
  1302. kubeadm certs check-expiration 2>/dev/null || kubeadm alpha certs check-expiration
  1303. " && printf "%s" "${COMMAND_OUTPUT}"
  1304. }
  1305. # 初始化节点配置
  1306. function init::node_config() {
  1307. local master_index=${master_index:-1}
  1308. local worker_index=${worker_index:-1}
  1309. log::info "[init]" "Get $MGMT_NODE InternalIP."
  1310. command::exec "${MGMT_NODE}" "
  1311. ip -4 route get 8.8.8.8 2>/dev/null | head -1 | awk '{print \$7}'
  1312. "
  1313. get::command_output "MGMT_NODE_IP" "$?" "exit"
  1314. # master
  1315. for host in $MASTER_NODES
  1316. do
  1317. log::info "[init]" "master: $host"
  1318. command::exec "${host}" "
  1319. export OFFLINE_TAG=${OFFLINE_TAG:-0} KUBE_APISERVER=${KUBE_APISERVER} SKIP_SET_OS_REPO=${SKIP_SET_OS_REPO:-false}
  1320. $(declare -f script::init_node)
  1321. script::init_node
  1322. "
  1323. check::exit_code "$?" "init" "init master $host" "exit"
  1324. # 设置主机名和解析
  1325. command::exec "${host}" "
  1326. printf \"\\n${MGMT_NODE_IP} $KUBE_APISERVER\\n$node_hosts\" >> /etc/hosts
  1327. hostnamectl set-hostname ${HOSTNAME_PREFIX}-master-node${master_index}
  1328. "
  1329. check::exit_code "$?" "init" "$host set hostname and hostname resolution"
  1330. # set audit-policy
  1331. log::info "[init]" "$host: set audit-policy file."
  1332. command::exec "${host}" "
  1333. [ ! -d etc/kubernetes ] && mkdir -p /etc/kubernetes
  1334. cat << EOF > /etc/kubernetes/audit-policy.yaml
  1335. # Log all requests at the Metadata level.
  1336. apiVersion: audit.k8s.io/v1
  1337. kind: Policy
  1338. rules:
  1339. - level: Metadata
  1340. EOF
  1341. "
  1342. check::exit_code "$?" "init" "$host: set audit-policy file" "exit"
  1343. master_index=$((master_index + 1))
  1344. done
  1345. # worker
  1346. for host in $WORKER_NODES
  1347. do
  1348. log::info "[init]" "worker: $host"
  1349. command::exec "${host}" "
  1350. export OFFLINE_TAG=${OFFLINE_TAG:-0} KUBE_APISERVER=${KUBE_APISERVER} SKIP_SET_OS_REPO=${SKIP_SET_OS_REPO:-false}
  1351. $(declare -f script::init_node)
  1352. script::init_node
  1353. "
  1354. check::exit_code "$?" "init" "init worker $host" "exit"
  1355. # 设置主机名和解析
  1356. command::exec "${host}" "
  1357. printf \"\\n127.0.0.1 $KUBE_APISERVER\\n$node_hosts\" >> /etc/hosts
  1358. hostnamectl set-hostname ${HOSTNAME_PREFIX}-worker-node${worker_index}
  1359. "
  1360. worker_index=$((worker_index + 1))
  1361. done
  1362. }
  1363. # 初始化节点
  1364. function init::node() {
  1365. init::upgrade_kernel
  1366. local node_hosts=""
  1367. local i=1
  1368. for h in $MASTER_NODES
  1369. do
  1370. node_hosts="${node_hosts}\n$h ${HOSTNAME_PREFIX}-master-node${i}"
  1371. i=$((i + 1))
  1372. done
  1373. local i=1
  1374. for h in $WORKER_NODES
  1375. do
  1376. node_hosts="${node_hosts}\n$h ${HOSTNAME_PREFIX}-worker-node${i}"
  1377. i=$((i + 1))
  1378. done
  1379. init::node_config
  1380. }
  1381. # 初始化添加的节点
  1382. function init::add_node() {
  1383. init::upgrade_kernel
  1384. local master_index=0
  1385. local worker_index=0
  1386. local node_hosts=""
  1387. local add_node_hosts=""
  1388. command::exec "${MGMT_NODE}" "
  1389. kubectl get node --selector='node-role.kubernetes.io/master' -o jsonpath='{range.items[*]}{.status.addresses[?(@.type==\"InternalIP\")].address } {end}' | awk '{print \$1}'
  1390. "
  1391. get::command_output "MGMT_NODE" "$?" "exit"
  1392. # 获取现有集群节点主机名
  1393. command::exec "${MGMT_NODE}" "
  1394. kubectl get node -o jsonpath='{range.items[*]}{.status.addresses[?(@.type==\"InternalIP\")].address} {.metadata.name }\\n{end}'
  1395. "
  1396. get::command_output "node_hosts" "$?" "exit"
  1397. for host in $MASTER_NODES $WORKER_NODES
  1398. do
  1399. if [[ $node_hosts == *"$host"* ]]; then
  1400. log::error "[init]" "The host $host is already in the cluster!"
  1401. exit 1
  1402. fi
  1403. done
  1404. if [[ "$MASTER_NODES" != "" ]]; then
  1405. command::exec "${MGMT_NODE}" "
  1406. kubectl get node --selector='node-role.kubernetes.io/master' -o jsonpath='{\$.items[*].metadata.name}' |grep -Eo 'node[0-9]*'|grep -Eo '[0-9]*'|awk -F ' ' 'BEGIN {max = 0} {if (\$0+0 > max+0) max=\$0} END {print max}'
  1407. "
  1408. get::command_output "master_index" "$?" "exit"
  1409. master_index=$(( master_index + 1 ))
  1410. local i=$master_index
  1411. for host in $MASTER_NODES
  1412. do
  1413. add_node_hosts="${add_node_hosts}\n${host:-} ${HOSTNAME_PREFIX}-master-node${i}"
  1414. i=$((i + 1))
  1415. done
  1416. fi
  1417. if [[ "$WORKER_NODES" != "" ]]; then
  1418. command::exec "${MGMT_NODE}" "
  1419. kubectl get node --selector='node-role.kubernetes.io/worker' -o jsonpath='{\$.items[*].metadata.name}'| grep -Eo 'node[0-9]*'|grep -Eo '[0-9]*'|awk 'BEGIN {max = 0} {if (\$0+0 > max+0) max=\$0} END {print max}' || echo 0
  1420. "
  1421. get::command_output "worker_index" "$?" "exit"
  1422. worker_index=$(( worker_index + 1 ))
  1423. local i=$worker_index
  1424. for host in $WORKER_NODES
  1425. do
  1426. add_node_hosts="${add_node_hosts}\n${host:-} ${HOSTNAME_PREFIX}-worker-node${i}"
  1427. i=$((i + 1))
  1428. done
  1429. fi
  1430. # 向集群节点添加新增的节点主机名解析
  1431. for host in $(echo -ne "$node_hosts" | awk '{print $1}')
  1432. do
  1433. command::exec "${host}" "
  1434. printf \"$add_node_hosts\" >> /etc/hosts
  1435. "
  1436. check::exit_code "$?" "init" "$host add new node hostname resolution"
  1437. done
  1438. node_hosts="${node_hosts}\n${add_node_hosts}"
  1439. init::node_config
  1440. }
  1441. # 集群初始化
  1442. function kubeadm::init() {
  1443. log::info "[kubeadm init]" "kubeadm init on ${MGMT_NODE}"
  1444. log::info "[kubeadm init]" "${MGMT_NODE}: set kubeadmcfg.yaml"
  1445. command::exec "${MGMT_NODE}" "
  1446. PAUSE_VERSION=$(kubeadm config images list 2>/dev/null | awk -F: '/pause/ {print $2}')
  1447. cat << EOF > /etc/kubernetes/kubeadmcfg.yaml
  1448. ---
  1449. apiVersion: kubeadm.k8s.io/v1beta2
  1450. kind: InitConfiguration
  1451. ${kubelet_nodeRegistration}
  1452. ---
  1453. apiVersion: kubeproxy.config.k8s.io/v1alpha1
  1454. kind: KubeProxyConfiguration
  1455. mode: ipvs
  1456. ipvs:
  1457. minSyncPeriod: 5s
  1458. syncPeriod: 5s
  1459. # ipvs 负载策略
  1460. scheduler: 'wrr'
  1461. ---
  1462. apiVersion: kubelet.config.k8s.io/v1beta1
  1463. kind: KubeletConfiguration
  1464. maxPods: 200
  1465. cgroupDriver: systemd
  1466. runtimeRequestTimeout: 5m
  1467. # 此配置保证了 kubelet 能在 swap 开启的情况下启动
  1468. failSwapOn: false
  1469. nodeStatusUpdateFrequency: 5s
  1470. rotateCertificates: true
  1471. imageGCLowThresholdPercent: 70
  1472. imageGCHighThresholdPercent: 80
  1473. # 软驱逐阀值
  1474. evictionSoft:
  1475. imagefs.available: 15%
  1476. memory.available: 512Mi
  1477. nodefs.available: 15%
  1478. nodefs.inodesFree: 10%
  1479. # 达到软阈值之后,持续时间超过多久才进行驱逐
  1480. evictionSoftGracePeriod:
  1481. imagefs.available: 3m
  1482. memory.available: 1m
  1483. nodefs.available: 3m
  1484. nodefs.inodesFree: 1m
  1485. # 硬驱逐阀值
  1486. evictionHard:
  1487. imagefs.available: 10%
  1488. memory.available: 256Mi
  1489. nodefs.available: 10%
  1490. nodefs.inodesFree: 5%
  1491. evictionMaxPodGracePeriod: 30
  1492. # 节点资源预留
  1493. kubeReserved:
  1494. cpu: 200m\$(if [[ \$(cat /proc/meminfo | awk '/MemTotal/ {print \$2}') -gt 3670016 ]]; then echo -e '\n memory: 256Mi';fi)
  1495. ephemeral-storage: 1Gi
  1496. systemReserved:
  1497. cpu: 300m\$(if [[ \$(cat /proc/meminfo | awk '/MemTotal/ {print \$2}') -gt 3670016 ]]; then echo -e '\n memory: 512Mi';fi)
  1498. ephemeral-storage: 1Gi
  1499. kubeReservedCgroup: /kube.slice
  1500. systemReservedCgroup: /system.slice
  1501. enforceNodeAllocatable:
  1502. - pods
  1503. ---
  1504. apiVersion: kubeadm.k8s.io/v1beta2
  1505. kind: ClusterConfiguration
  1506. kubernetesVersion: $KUBE_VERSION
  1507. controlPlaneEndpoint: $KUBE_APISERVER:6443
  1508. networking:
  1509. dnsDomain: $KUBE_DNSDOMAIN
  1510. podSubnet: $KUBE_POD_SUBNET
  1511. serviceSubnet: $KUBE_SERVICE_SUBNET
  1512. imageRepository: $KUBE_IMAGE_REPO
  1513. apiServer:
  1514. certSANs:
  1515. - 127.0.0.1
  1516. - $KUBE_APISERVER
  1517. $(for h in $MASTER_NODES;do echo " - $h";done)
  1518. extraArgs:
  1519. event-ttl: '720h'
  1520. service-node-port-range: '30000-50000'
  1521. # 审计日志相关配置
  1522. audit-log-maxage: '20'
  1523. audit-log-maxbackup: '10'
  1524. audit-log-maxsize: '100'
  1525. audit-log-path: /var/log/kube-audit/audit.log
  1526. audit-policy-file: /etc/kubernetes/audit-policy.yaml
  1527. extraVolumes:
  1528. - name: audit-config
  1529. hostPath: /etc/kubernetes/audit-policy.yaml
  1530. mountPath: /etc/kubernetes/audit-policy.yaml
  1531. readOnly: true
  1532. pathType: File
  1533. - name: audit-log
  1534. hostPath: /var/log/kube-audit
  1535. mountPath: /var/log/kube-audit
  1536. pathType: DirectoryOrCreate
  1537. - name: localtime
  1538. hostPath: /etc/localtime
  1539. mountPath: /etc/localtime
  1540. readOnly: true
  1541. pathType: File
  1542. controllerManager:
  1543. extraArgs:
  1544. bind-address: 0.0.0.0
  1545. node-cidr-mask-size: '24'
  1546. deployment-controller-sync-period: '10s'
  1547. node-monitor-grace-period: '20s'
  1548. pod-eviction-timeout: '2m'
  1549. terminated-pod-gc-threshold: '30'
  1550. experimental-cluster-signing-duration: 87600h
  1551. feature-gates: RotateKubeletServerCertificate=true
  1552. extraVolumes:
  1553. - hostPath: /etc/localtime
  1554. mountPath: /etc/localtime
  1555. name: localtime
  1556. readOnly: true
  1557. pathType: File
  1558. scheduler:
  1559. extraArgs:
  1560. bind-address: 0.0.0.0
  1561. extraVolumes:
  1562. - hostPath: /etc/localtime
  1563. mountPath: /etc/localtime
  1564. name: localtime
  1565. readOnly: true
  1566. pathType: File
  1567. $(if [[ "${KUBE_VERSION}" == "1.21.1" ]]; then
  1568. echo "dns:
  1569. type: CoreDNS
  1570. imageRepository: docker.io
  1571. imageTag: 1.8.0"
  1572. fi)
  1573. EOF
  1574. "
  1575. check::exit_code "$?" "kubeadm init" "${MGMT_NODE}: set kubeadmcfg.yaml" "exit"
  1576. log::info "[kubeadm init]" "${MGMT_NODE}: kubeadm init start."
  1577. command::exec "${MGMT_NODE}" "kubeadm init --config=/etc/kubernetes/kubeadmcfg.yaml --upload-certs"
  1578. check::exit_code "$?" "kubeadm init" "${MGMT_NODE}: kubeadm init" "exit"
  1579. sleep 3
  1580. log::info "[kubeadm init]" "${MGMT_NODE}: set kube config."
  1581. command::exec "${MGMT_NODE}" "
  1582. mkdir -p \$HOME/.kube
  1583. sudo cp -f /etc/kubernetes/admin.conf \$HOME/.kube/config
  1584. "
  1585. check::exit_code "$?" "kubeadm init" "${MGMT_NODE}: set kube config" "exit"
  1586. if [[ "$(echo "$MASTER_NODES" | wc -w)" == "1" ]]; then
  1587. log::info "[kubeadm init]" "${MGMT_NODE}: delete master taint"
  1588. command::exec "${MGMT_NODE}" "kubectl taint nodes --all node-role.kubernetes.io/master-"
  1589. check::exit_code "$?" "kubeadm init" "${MGMT_NODE}: delete master taint"
  1590. fi
  1591. command::exec "${MGMT_NODE}" "
  1592. kubectl create clusterrolebinding node-client-auto-approve-csr --clusterrole=system:certificates.k8s.io:certificatesigningrequests:nodeclient --user=kubelet-bootstrap
  1593. kubectl create clusterrolebinding node-client-auto-renew-crt --clusterrole=system:certificates.k8s.io:certificatesigningrequests:selfnodeclient --group=system:nodes
  1594. kubectl create clusterrolebinding node-server-auto-renew-crt --clusterrole=system:certificates.k8s.io:certificatesigningrequests:selfnodeserver --group=system:nodes
  1595. "
  1596. check::exit_code "$?" "kubeadm init" "Auto-Approve kubelet cert csr" "exit"
  1597. }
  1598. # 加入集群
  1599. function kubeadm::join() {
  1600. log::info "[kubeadm join]" "master: get join token and cert info"
  1601. command::exec "${MGMT_NODE}" "
  1602. openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | openssl dgst -sha256 -hex | sed 's/^.* //'
  1603. "
  1604. get::command_output "CACRT_HASH" "$?" "exit"
  1605. command::exec "${MGMT_NODE}" "
  1606. kubeadm init phase upload-certs --upload-certs --config /etc/kubernetes/kubeadmcfg.yaml 2>> /dev/null | tail -1
  1607. "
  1608. get::command_output "INTI_CERTKEY" "$?" "exit"
  1609. command::exec "${MGMT_NODE}" "
  1610. kubeadm token create
  1611. "
  1612. get::command_output "INIT_TOKEN" "$?" "exit"
  1613. command::exec "${MGMT_NODE}" "
  1614. kubeadm config images list 2>/dev/null | awk -F: '/pause/ {print \$2}'
  1615. "
  1616. get::command_output "PAUSE_VERSION" "$?"
  1617. for host in $MASTER_NODES
  1618. do
  1619. [[ "${MGMT_NODE}" == "$host" ]] && continue
  1620. log::info "[kubeadm join]" "master $host join cluster."
  1621. command::exec "${host}" "
  1622. cat << EOF > /etc/kubernetes/kubeadmcfg.yaml
  1623. ---
  1624. apiVersion: kubeadm.k8s.io/v1beta2
  1625. kind: JoinConfiguration
  1626. discovery:
  1627. bootstrapToken:
  1628. apiServerEndpoint: $KUBE_APISERVER:6443
  1629. caCertHashes:
  1630. - sha256:${CACRT_HASH:-}
  1631. token: ${INIT_TOKEN}
  1632. timeout: 5m0s
  1633. controlPlane:
  1634. certificateKey: ${INTI_CERTKEY:-}
  1635. ${kubelet_nodeRegistration}
  1636. EOF
  1637. kubeadm join --config /etc/kubernetes/kubeadmcfg.yaml
  1638. "
  1639. check::exit_code "$?" "kubeadm join" "master $host join cluster"
  1640. log::info "[kubeadm join]" "$host: set kube config."
  1641. command::exec "${host}" "
  1642. mkdir -p \$HOME/.kube
  1643. sudo cp -f /etc/kubernetes/admin.conf \$HOME/.kube/config
  1644. "
  1645. check::exit_code "$?" "kubeadm join" "$host: set kube config" "exit"
  1646. command::exec "${host}" "
  1647. sed -i 's#.*$KUBE_APISERVER#127.0.0.1 $KUBE_APISERVER#g' /etc/hosts
  1648. "
  1649. done
  1650. for host in $WORKER_NODES
  1651. do
  1652. log::info "[kubeadm join]" "worker $host join cluster."
  1653. command::exec "${host}" "
  1654. mkdir -p /etc/kubernetes/manifests
  1655. cat << EOF > /etc/kubernetes/kubeadmcfg.yaml
  1656. ---
  1657. apiVersion: kubeadm.k8s.io/v1beta2
  1658. kind: JoinConfiguration
  1659. discovery:
  1660. bootstrapToken:
  1661. apiServerEndpoint: $KUBE_APISERVER:6443
  1662. caCertHashes:
  1663. - sha256:${CACRT_HASH:-}
  1664. token: ${INIT_TOKEN}
  1665. timeout: 5m0s
  1666. ${kubelet_nodeRegistration}
  1667. EOF
  1668. kubeadm join --config /etc/kubernetes/kubeadmcfg.yaml
  1669. "
  1670. check::exit_code "$?" "kubeadm join" "worker $host join cluster"
  1671. log::info "[kubeadm join]" "set $host worker node role."
  1672. command::exec "${MGMT_NODE}" "
  1673. kubectl get node --selector='!node-role.kubernetes.io/master' | grep '<none>' | awk '{print \"kubectl label node \" \$1 \" node-role.kubernetes.io/worker= --overwrite\" }' | bash
  1674. "
  1675. check::exit_code "$?" "kubeadm join" "set $host worker node role"
  1676. done
  1677. }
  1678. # 等待资源完成
  1679. function kube::wait() {
  1680. local app=$1
  1681. local namespace=$2
  1682. local resource=$3
  1683. local selector=${4:-}
  1684. sleep 3
  1685. log::info "[waiting]" "waiting $app"
  1686. command::exec "${MGMT_NODE}" "
  1687. $(declare -f utils::retry)
  1688. utils::retry 6 kubectl wait --namespace ${namespace} \
  1689. --for=condition=ready ${resource} \
  1690. --selector=$selector \
  1691. --timeout=60s
  1692. "
  1693. local status="$?"
  1694. check::exit_code "$status" "waiting" "$app ${resource} ready"
  1695. return "$status"
  1696. }
  1697. # 应用manifest
  1698. function kube::apply() {
  1699. local file=$1
  1700. log::info "[apply]" "$file"
  1701. command::exec "${MGMT_NODE}" "
  1702. $(declare -f utils::retry)
  1703. if [ -f \"$file\" ]; then
  1704. utils::retry 6 kubectl apply --wait=true --timeout=10s -f \"$file\"
  1705. else
  1706. utils::retry 6 \"cat <<EOF | kubectl apply --wait=true --timeout=10s -f -
  1707. \$(printf \"%s\" \"${2:-}\")
  1708. EOF
  1709. \"
  1710. fi
  1711. "
  1712. local status="$?"
  1713. check::exit_code "$status" "apply" "add $file" "exit"
  1714. return "$status"
  1715. }
  1716. # 集群状态
  1717. function kube::status() {
  1718. sleep 5
  1719. log::info "[cluster]" "cluster status"
  1720. command::exec "${MGMT_NODE}" "
  1721. echo
  1722. kubectl get node -o wide
  1723. echo
  1724. kubectl get pods -A
  1725. " && printf "%s" "${COMMAND_OUTPUT}"
  1726. }
  1727. # 添加或删除haproxy的后端server
  1728. function config::haproxy_backend() {
  1729. local action=${1:-add}
  1730. local action_cmd=""
  1731. local master_nodes
  1732. if [[ "$MASTER_NODES" == "" || "$MASTER_NODES" == "127.0.0.1" ]]; then
  1733. return
  1734. fi
  1735. command::exec "${MGMT_NODE}" "
  1736. kubectl get node --selector='node-role.kubernetes.io/master' -o jsonpath='{\$.items[*].status.addresses[?(@.type==\"InternalIP\")].address}'
  1737. "
  1738. get::command_output "master_nodes" "$?" "exit"
  1739. for m in $MASTER_NODES
  1740. do
  1741. if [[ "${action}" == "add" ]]; then
  1742. num=$(echo "${m}"| awk -F'.' '{print $4}')
  1743. action_cmd="${action_cmd}\necho \" server apiserver${num} ${m}:6443 check\" >> /etc/haproxy/haproxy.cfg"
  1744. else
  1745. [[ "${master_nodes}" == *"${m}"* ]] || return
  1746. action_cmd="${action_cmd}\n sed -i -e \"/${m}/d\" /etc/haproxy/haproxy.cfg"
  1747. fi
  1748. done
  1749. command::exec "${MGMT_NODE}" "
  1750. kubectl get node --selector='!node-role.kubernetes.io/master' -o jsonpath='{\$.items[*].status.addresses[?(@.type==\"InternalIP\")].address}'
  1751. "
  1752. get::command_output "worker_nodes" "$?"
  1753. for host in ${worker_nodes:-}
  1754. do
  1755. log::info "[config]" "worker ${host}: ${action} apiserver from haproxy"
  1756. command::exec "${host}" "
  1757. $(echo -ne "${action_cmd}")
  1758. haproxy -c -f /etc/haproxy/haproxy.cfg && systemctl reload haproxy
  1759. "
  1760. check::exit_code "$?" "config" "worker ${host}: ${action} apiserver(${m}) from haproxy"
  1761. done
  1762. }
  1763. # 更新 etcd 备份副本
  1764. function config::etcd_snapshot() {
  1765. command::exec "${MGMT_NODE}" "
  1766. count=\$(kubectl get node --selector='node-role.kubernetes.io/master' --no-headers | wc -l)
  1767. kubectl -n kube-system patch cronjobs etcd-snapshot --patch \"
  1768. spec:
  1769. jobTemplate:
  1770. spec:
  1771. completions: \${count:-1}
  1772. parallelism: \${count:-1}
  1773. \"
  1774. "
  1775. check::exit_code "$?" "config" "etcd-snapshot completions options"
  1776. }
  1777. # 获取命令的返回值
  1778. function get::command_output() {
  1779. local app="$1"
  1780. local status="$2"
  1781. local is_exit="${3:-}"
  1782. if [[ "$status" == "0" && "${COMMAND_OUTPUT}" != "" ]]; then
  1783. log::info "[command]" "get $app value succeeded."
  1784. eval "$app=\"${COMMAND_OUTPUT}\""
  1785. else
  1786. log::error "[command]" "get $app value failed."
  1787. [[ "$is_exit" == "exit" ]] && exit "$status"
  1788. fi
  1789. return "$status"
  1790. }
  1791. # 获取ingress连接地址
  1792. function get::ingress_conn() {
  1793. local port="${1:-80}"
  1794. local ingress_name="${2:-ingress-${KUBE_INGRESS}-controller}"
  1795. command::exec "${MGMT_NODE}" "
  1796. kubectl get node -o jsonpath='{range .items[*]}{ .status.addresses[?(@.type==\"InternalIP\")].address} {.status.conditions[?(@.status == \"True\")].status}{\"\\n\"}{end}' | awk '{if(\$2==\"True\")a=\$1}END{print a}'
  1797. "
  1798. get::command_output "node_ip" "$?"
  1799. command::exec "${MGMT_NODE}" "
  1800. kubectl get svc --all-namespaces -o go-template=\"{{range .items}}{{if eq .metadata.name \\\"${ingress_name}\\\"}}{{range.spec.ports}}{{if eq .port ${port}}}{{.nodePort}}{{end}}{{end}}{{end}}{{end}}\"
  1801. "
  1802. get::command_output "node_port" "$?"
  1803. INGRESS_CONN="${node_ip:-nodeIP}:${node_port:-nodePort}"
  1804. }
  1805. ######################################################################################################
  1806. # 主调用逻辑
  1807. ######################################################################################################
  1808. # 添加network组件
  1809. function add::network() {
  1810. if [[ "$KUBE_NETWORK" == "flannel" ]]; then
  1811. log::info "[network]" "add flannel"
  1812. local flannel_file="${OFFLINE_DIR}/manifests/kube-flannel.yml"
  1813. utils::download_file "https://cdn.jsdelivr.net/gh/coreos/flannel@v${FLANNEL_VERSION}/Documentation/kube-flannel.yml" "${flannel_file}"
  1814. command::exec "${MGMT_NODE}" "
  1815. sed -i -e 's#10.244.0.0/16#${KUBE_POD_SUBNET}#g' \
  1816. -e 's#quay.io/coreos#${KUBE_IMAGE_REPO}#g' \
  1817. -e 's#\"Type\": \"vxlan\"#\"Type\": \"${KUBE_FLANNEL_TYPE}\"#g' \"${flannel_file}\"
  1818. if [[ \"${KUBE_FLANNEL_TYPE}\" == \"vxlan\" ]]; then
  1819. sed -i 's#\"Type\": \"vxlan\"#\"Type\": \"vxlan\", \"DirectRouting\": true#g' \"${flannel_file}\"
  1820. fi
  1821. "
  1822. check::exit_code "$?" "flannel" "change flannel pod subnet"
  1823. kube::apply "${flannel_file}"
  1824. kube::wait "flannel" "kube-system" "pods" "app=flannel"
  1825. elif [[ "$KUBE_NETWORK" == "calico" ]]; then
  1826. log::info "[network]" "add calico"
  1827. utils::download_file "https://projectcalico.docs.tigera.io/archive/v${CALICO_VERSION%.*}/manifests/calico.yaml" "${OFFLINE_DIR}/manifests/calico.yaml"
  1828. utils::download_file "https://projectcalico.docs.tigera.io/archive/v${CALICO_VERSION%.*}/manifests/calicoctl.yaml" "${OFFLINE_DIR}/manifests/calicoctl.yaml"
  1829. command::exec "${MGMT_NODE}" "
  1830. sed -i \"s#:v.*#:v${CALICO_VERSION}#g\" \"${OFFLINE_DIR}/manifests/calico.yaml\"
  1831. sed -i 's#value: \"Always\"#value: \"CrossSubnet\"#g' \"${OFFLINE_DIR}/manifests/calico.yaml\"
  1832. sed -i \"s#:v.*#:v${CALICO_VERSION}#g\" \"${OFFLINE_DIR}/manifests/calicoctl.yaml\"
  1833. "
  1834. check::exit_code "$?" "network" "change calico version to ${CALICO_VERSION}"
  1835. kube::apply "${OFFLINE_DIR}/manifests/calico.yaml"
  1836. kube::apply "${OFFLINE_DIR}/manifests/calicoctl.yaml"
  1837. kube::wait "calico-kube-controllers" "kube-system" "pods" "k8s-app=calico-kube-controllers"
  1838. kube::wait "calico-node" "kube-system" "pods" "k8s-app=calico-node"
  1839. elif [[ "$KUBE_NETWORK" == "cilium" ]]; then
  1840. log::info "[network]" "add cilium"
  1841. local cilium_file="${OFFLINE_DIR}/manifests/cilium.yml"
  1842. local cilium_hubble_file="${OFFLINE_DIR}/manifests/cilium_hubble.yml"
  1843. utils::download_file "https://cdn.jsdelivr.net/gh/cilium/cilium@${CILIUM_VERSION}/install/kubernetes/quick-install.yaml" "${cilium_file}"
  1844. utils::download_file "https://cdn.jsdelivr.net/gh/cilium/cilium@${CILIUM_VERSION}/install/kubernetes/quick-hubble-install.yaml" "${cilium_hubble_file}"
  1845. local all_node=""
  1846. if [[ "${MASTER_NODES}" == "" && "${WORKER_NODES}" == "" ]]; then
  1847. command::exec "${MGMT_NODE}" "
  1848. kubectl get node -o jsonpath='{range.items[*]}{.status.addresses[?(@.type==\"InternalIP\")].address} {end}'
  1849. "
  1850. get::command_output "all_node" "$?"
  1851. else
  1852. all_node="${MASTER_NODES} ${WORKER_NODES}"
  1853. fi
  1854. for host in $all_node
  1855. do
  1856. command::exec "${host}" "mount bpffs -t bpf /sys/fs/bpf"
  1857. check::exit_code "$?" "network" "${host}: mount bpf filesystem"
  1858. done
  1859. command::exec "${MGMT_NODE}" "
  1860. sed -i \"s#10.0.0.0/8#${KUBE_POD_SUBNET}#g\" \"${cilium_file}\"
  1861. "
  1862. kube::apply "${cilium_file}"
  1863. kube::wait "cilium-node" "kube-system" "pods" "k8s-app=cilium"
  1864. kube::wait "cilium-operator" "kube-system" "pods" "name=cilium-operator"
  1865. kube::apply "${cilium_hubble_file}"
  1866. kube::wait "hubble-relay" "kube-system" "pods" "k8s-app=hubble-relay"
  1867. log::info "[monitor]" "add hubble-ui ingress"
  1868. kube::apply "hubble-ui ingress" "
  1869. ---
  1870. apiVersion: networking.k8s.io/v1
  1871. kind: Ingress
  1872. metadata:
  1873. name: hubble-ui
  1874. namespace: kube-system
  1875. annotations:
  1876. kubernetes.io/ingress.class: ${KUBE_INGRESS}
  1877. spec:
  1878. rules:
  1879. - host: hubble-ui.cluster.local
  1880. http:
  1881. paths:
  1882. - path: /
  1883. pathType: Prefix
  1884. backend:
  1885. service:
  1886. name: hubble-ui
  1887. port:
  1888. number: 80
  1889. "
  1890. # shellcheck disable=SC2181
  1891. if [[ "$?" == "0" ]]; then
  1892. get::ingress_conn
  1893. log::access "[ingress]" "curl -H 'Host:hubble-ui.cluster.local' http://${INGRESS_CONN}"
  1894. fi
  1895. else
  1896. log::warning "[network]" "No $KUBE_NETWORK config."
  1897. fi
  1898. }
  1899. # 添加ingress组件
  1900. function add::ingress() {
  1901. # 安装 ingress-nginx
  1902. log::info "[ingress]" "add ingress-nginx"
  1903. command::exec "${MGMT_NODE}" "
  1904. $(declare -f utils::retry)
  1905. helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx
  1906. utils::retry 6 helm install ingress-nginx ingress-nginx/ingress-nginx \
  1907. --namespace ingress-nginx --create-namespace \
  1908. --version ${INGRESS_NGINX} \
  1909. --set controller.admissionWebhooks.patch.image.registry=registry.hub.docker.com \
  1910. --set controller.admissionWebhooks.patch.image.image=k8sgcrioingressnginx/kube-webhook-certgen \
  1911. --set controller.admissionWebhooks.patch.image.digest= \
  1912. --set controller.admissionWebhooks.enabled=true \
  1913. --set controller.admissionWebhooks.extraEnvs\[0\].name=\"TZ\" \
  1914. --set controller.admissionWebhooks.extraEnvs\[0\].value=\"Asia/Shanghai\" \
  1915. --set controller.kind=DaemonSet \
  1916. --set controller.replicaCount=1 \
  1917. --set controller.minAvailable=1 \
  1918. --set controller.image.registry=registry.hub.docker.com \
  1919. --set controller.image.image=k8sgcrioingressnginx/controller \
  1920. --set controller.image.digest= \
  1921. --set controller.ingressClassResource.name=nginx \
  1922. --set controller.ingressClassResource.enable=true \
  1923. --set controller.ingressClassResource.default=false \
  1924. --set controller.service.enabled=true \
  1925. --set controller.service.type=NodePort \
  1926. --set controller.service.enableHttp=true \
  1927. --set controller.service.enableHttps=true \
  1928. --set controller.service.nodePorts.http=30080 \
  1929. --set controller.service.nodePorts.https=30443 \
  1930. --set controller.extraEnvs\[0\].name=\"TZ\" \
  1931. --set controller.extraEnvs\[0\].value=\"Asia/Shanghai\" \
  1932. --set defaultBackend.image.registry=registry.hub.docker.com \
  1933. --set defaultBackend.image.image=gcmirrors/defaultbackend-amd64 \
  1934. --set defaultBackend.enabled=true \
  1935. --set defaultBackend.name=defaultbackend \
  1936. --set defaultBackend.replicaCount=1 \
  1937. --set defaultBackend.minAvailable=1 \
  1938. --set defaultBackend.extraEnvs\[0\].name=\"TZ\" \
  1939. --set defaultBackend.extraEnvs\[0\].value=\"Asia/Shanghai\" \
  1940. --set rbac.create=true \
  1941. --set serviceAccount.create=true \
  1942. --set podSecurityPolicy.enabled=true
  1943. kubectl get pod -n ingress-nginx -o wide
  1944. kubectl get svc -n ingress-nginx -o wide
  1945. "
  1946. # 安装 nginx
  1947. log::info "[nginx]" "add nginx"
  1948. command::exec "${MGMT_NODE}" "
  1949. sudo yum -y install nginx
  1950. nginx -v
  1951. sudo systemctl enable nginx
  1952. sudo service nginx start
  1953. cat << EOF > /etc/nginx/conf.d/k8s.ingress.conf
  1954. upstream k8s-ingress {
  1955. $(for h in $MASTER_NODES $WORKER_NODES;do echo " server $h:30080 max_fails=1 fail_timeout=15s;";done)
  1956. keepalive 128;
  1957. }
  1958. server {
  1959. listen ${NGINX_HTTP_PORT};
  1960. location / {
  1961. proxy_http_version 1.1;
  1962. proxy_set_header Connection \"\";
  1963. proxy_next_upstream error;
  1964. proxy_set_header X-Real-IP \\\$remote_addr;
  1965. proxy_set_header X-Forwarded-For \\\$proxy_add_x_forwarded_for;
  1966. proxy_set_header Host \\\$http_host;
  1967. proxy_set_header X-Nginx-Proxy true;
  1968. proxy_pass http://k8s-ingress/;
  1969. }
  1970. }
  1971. EOF
  1972. sudo nginx -s reload
  1973. "
  1974. }
  1975. # 添加addon组件
  1976. function add::addon() {
  1977. # TODO add addon
  1978. log::warning "[TODO]" "add addon"
  1979. }
  1980. # 添加监控组件
  1981. function add::monitor() {
  1982. # TODO add monitor
  1983. log::warning "[TODO]" "add monitor"
  1984. }
  1985. # 添加log组件
  1986. function add::log() {
  1987. # TODO add log
  1988. log::warning "[TODO]" "add log"
  1989. }
  1990. # 添加存储
  1991. function add::storage() {
  1992. # TODO add storage
  1993. log::warning "[TODO]" "add storage"
  1994. }
  1995. # 添加用户界面
  1996. function add::ui() {
  1997. local path="/tmp"
  1998. # 安装 rancher
  1999. log::info "[rancher]" "add rancher"
  2000. command::exec "${MGMT_NODE}" "
  2001. $(declare -f utils::retry)
  2002. cd ${path}
  2003. helm repo add rancher-stable http://rancher-mirror.oss-cn-beijing.aliyuncs.com/server-charts/stable
  2004. utils::retry 6 helm pull rancher-stable/rancher --version ${RANCHER_VERSION} --untar
  2005. cat << EOF > rancher/templates/service.yaml
  2006. apiVersion: v1
  2007. kind: Service
  2008. metadata:
  2009. name: {{ template \"rancher.fullname\" . }}
  2010. labels:
  2011. {{ include \"rancher.labels\" . | indent 4 }}
  2012. spec:
  2013. type: NodePort
  2014. ports:
  2015. - port: 80
  2016. targetPort: 80
  2017. protocol: TCP
  2018. name: http
  2019. # 使用nodePort端口
  2020. nodePort: 31080
  2021. - port: 443
  2022. targetPort: 444
  2023. protocol: TCP
  2024. name: https-internal
  2025. # 使用nodePort端口
  2026. nodePort: 31443
  2027. selector:
  2028. app: {{ template \"rancher.fullname\" . }}
  2029. EOF
  2030. helm install rancher ./rancher \
  2031. --namespace cattle-system --create-namespace \
  2032. --set replicas=1 \
  2033. --set extraEnv\[0\].name=\"TZ\" \
  2034. --set extraEnv\[0\].value=\"Asia/Shanghai\" \
  2035. --set ingress.tls.source=secret \
  2036. --set ingress.enabled=false
  2037. "
  2038. log::info "[rancher]" "获取初始密码 kubectl get secret --namespace cattle-system bootstrap-secret -o go-template='{{.data.bootstrapPassword|base64decode}}{{ \"\n\" }}'"
  2039. log::info "[rancher]" "重置初始密码 kubectl -n cattle-system exec \$(kubectl -n cattle-system get pods -l app=rancher | grep '1/1' | head -1 | awk '{ print \$1 }') -- reset-password"
  2040. }
  2041. # 运维操作
  2042. function add::ops() {
  2043. local master_num
  2044. master_num=$(awk '{print NF}' <<< "${MASTER_NODES}")
  2045. log::info "[ops]" "add anti-affinity strategy to coredns"
  2046. command::exec "${MGMT_NODE}" """
  2047. kubectl -n kube-system patch deployment coredns --patch '{\"spec\": {\"template\": {\"spec\": {\"affinity\":{\"podAntiAffinity\":{\"preferredDuringSchedulingIgnoredDuringExecution\":[{\"weight\":100,\"podAffinityTerm\":{\"labelSelector\":{\"matchExpressions\":[{\"key\":\"k8s-app\",\"operator\":\"In\",\"values\":[\"kube-dns\"]}]},\"topologyKey\":\"kubernetes.io/hostname\"}}]}}}}}}' --record
  2048. """
  2049. check::exit_code "$?" "ops" "add anti-affinity strategy to coredns"
  2050. log::info "[ops]" "add etcd snapshot cronjob"
  2051. command::exec "${MGMT_NODE}" "
  2052. kubeadm config images list --config=/etc/kubernetes/kubeadmcfg.yaml 2>/dev/null | grep etcd:
  2053. "
  2054. get::command_output "etcd_image" "$?"
  2055. command::exec "${MGMT_NODE}" "
  2056. kubectl get node --selector='node-role.kubernetes.io/master' --no-headers | wc -l
  2057. "
  2058. get::command_output "master_num" "$?"
  2059. [[ "${master_num:-0}" == "0" ]] && master_num=1
  2060. kube::apply "etcd-snapshot" """
  2061. ---
  2062. apiVersion: batch/v1beta1
  2063. kind: CronJob
  2064. metadata:
  2065. name: etcd-snapshot
  2066. namespace: kube-system
  2067. spec:
  2068. schedule: '0 */6 * * *'
  2069. successfulJobsHistoryLimit: 3
  2070. suspend: false
  2071. concurrencyPolicy: Allow
  2072. failedJobsHistoryLimit: 3
  2073. jobTemplate:
  2074. spec:
  2075. backoffLimit: 6
  2076. parallelism: ${master_num}
  2077. completions: ${master_num}
  2078. template:
  2079. metadata:
  2080. labels:
  2081. app: etcd-snapshot
  2082. spec:
  2083. affinity:
  2084. podAntiAffinity:
  2085. requiredDuringSchedulingIgnoredDuringExecution:
  2086. - labelSelector:
  2087. matchExpressions:
  2088. - key: app
  2089. operator: In
  2090. values:
  2091. - etcd-snapshot
  2092. topologyKey: 'kubernetes.io/hostname'
  2093. containers:
  2094. - name: etcd-snapshot
  2095. image: ${etcd_image:-${KUBE_IMAGE_REPO}/etcd:3.4.13-0}
  2096. imagePullPolicy: IfNotPresent
  2097. args:
  2098. - -c
  2099. - etcdctl --endpoints=https://127.0.0.1:2379 --cacert=/etc/kubernetes/pki/etcd/ca.crt
  2100. --cert=/etc/kubernetes/pki/etcd/healthcheck-client.crt --key=/etc/kubernetes/pki/etcd/healthcheck-client.key
  2101. snapshot save /backup/etcd-snapshot-\\\\\\\$(date +%Y-%m-%d_%H:%M:%S_%Z).db
  2102. && echo 'delete old backups' && { find /backup -type f -mtime +30 -exec rm -fv {} \\; || echo error; }
  2103. command:
  2104. - /usr/bin/bash
  2105. env:
  2106. - name: ETCDCTL_API
  2107. value: '3'
  2108. resources: {}
  2109. terminationMessagePath: /dev/termination-log
  2110. terminationMessagePolicy: File
  2111. volumeMounts:
  2112. - name: etcd-certs
  2113. mountPath: /etc/kubernetes/pki/etcd
  2114. readOnly: true
  2115. - name: backup
  2116. mountPath: /backup
  2117. - name: etc
  2118. mountPath: /etc
  2119. - name: bin
  2120. mountPath: /usr/bin
  2121. - name: lib64
  2122. mountPath: /lib64
  2123. dnsPolicy: ClusterFirst
  2124. hostNetwork: true
  2125. nodeSelector:
  2126. node-role.kubernetes.io/master: ''
  2127. tolerations:
  2128. - effect: NoSchedule
  2129. operator: Exists
  2130. restartPolicy: OnFailure
  2131. schedulerName: default-scheduler
  2132. securityContext: {}
  2133. terminationGracePeriodSeconds: 30
  2134. volumes:
  2135. - name: etcd-certs
  2136. hostPath:
  2137. path: /etc/kubernetes/pki/etcd
  2138. type: DirectoryOrCreate
  2139. - name: backup
  2140. hostPath:
  2141. path: /var/lib/etcd/backups
  2142. type: DirectoryOrCreate
  2143. - name: etc
  2144. hostPath:
  2145. path: /etc
  2146. - name: bin
  2147. hostPath:
  2148. path: /usr/bin
  2149. - name: lib64
  2150. hostPath:
  2151. path: /lib64
  2152. """
  2153. # shellcheck disable=SC2181
  2154. [[ "$?" == "0" ]] && log::access "[ops]" "etcd backup directory: /var/lib/etcd/backups"
  2155. command::exec "${MGMT_NODE}" "
  2156. jobname=\"etcd-snapshot-$(date +%s)\"
  2157. kubectl create job --from=cronjob/etcd-snapshot \${jobname} -n kube-system && \
  2158. kubectl wait --for=condition=complete job/\${jobname} -n kube-system
  2159. "
  2160. check::exit_code "$?" "ops" "trigger etcd backup"
  2161. }
  2162. # 重置节点
  2163. function reset::node() {
  2164. local host=$1
  2165. log::info "[reset]" "node $host"
  2166. command::exec "${host}" "
  2167. set +ex
  2168. cri_socket=\"\"
  2169. [ -S /var/run/crio/crio.sock ] && cri_socket=\"--cri-socket /var/run/crio/crio.sock\"
  2170. [ -S /run/containerd/containerd.sock ] && cri_socket=\"--cri-socket /run/containerd/containerd.sock\"
  2171. kubeadm reset -f \$cri_socket
  2172. [ -f \"\$(which kubelet)\" ] && { systemctl stop kubelet; find /var/lib/kubelet | xargs -n 1 findmnt -n -o TARGET -T | sort | uniq | xargs -r umount -v; yum remove -y kubeadm kubelet kubectl; }
  2173. [ -d /etc/kubernetes ] && rm -rf /etc/kubernetes/* /var/lib/kubelet/* /var/lib/etcd/* \$HOME/.kube /etc/cni/net.d/* /var/lib/dockershim/* /var/lib/cni/* /var/run/kubernetes/*
  2174. [ -f \"\$(which docker)\" ] && { docker rm -f -v \$(docker ps | grep kube | awk '{print \$1}'); systemctl stop docker; rm -rf \$HOME/.docker /etc/docker/* /var/lib/docker/*; yum remove -y docker; }
  2175. [ -f \"\$(which containerd)\" ] && { crictl rm \$(crictl ps -a -q); systemctl stop containerd; rm -rf /etc/containerd/* /var/lib/containerd/*; yum remove -y containerd.io; }
  2176. [ -f \"\$(which crio)\" ] && { crictl rm \$(crictl ps -a -q); systemctl stop crio; rm -rf /etc/crictl.yaml /etc/crio/* /var/run/crio/*; yum remove -y cri-o; }
  2177. [ -f \"\$(which runc)\" ] && { find /run/containers/ /var/lib/containers/ | xargs -n 1 findmnt -n -o TARGET -T | sort | uniq | xargs -r umount -v; rm -rf /var/lib/containers/* /var/run/containers/*; yum remove -y runc; }
  2178. [ -f \"\$(which haproxy)\" ] && { systemctl stop haproxy; rm -rf /etc/haproxy/*; yum remove -y haproxy; }
  2179. sed -i -e \"/$KUBE_APISERVER/d\" -e '/-worker-/d' -e '/-master-/d' /etc/hosts
  2180. sed -i '/## Kainstall managed start/,/## Kainstall managed end/d' /etc/security/limits.conf /etc/systemd/system.conf /etc/bashrc /etc/rc.local /etc/audit/rules.d/audit.rules
  2181. [ -d /var/lib/elasticsearch ] && rm -rf /var/lib/elasticsearch/*
  2182. [ -d /var/lib/longhorn ] && rm -rf /var/lib/longhorn/*
  2183. [ -d \"${OFFLINE_DIR:-/tmp/abc}\" ] && rm -rf \"${OFFLINE_DIR:-/tmp/abc}\"
  2184. for repo in kubernetes.repo docker-ce.repo devel_kubic_libcontainers_stable.repo elrepo.repo
  2185. do
  2186. [ -f /etc/yum.repos.d/\${repo} ] && rm -f /etc/yum.repos.d/\${repo}
  2187. done
  2188. ipvsadm --clear
  2189. iptables -F && iptables -t nat -F && iptables -t mangle -F && iptables -X
  2190. for int in kube-ipvs0 cni0 docker0 dummy0 flannel.1 cilium_host cilium_net cilium_vxlan lxc_health nodelocaldns
  2191. do
  2192. [ -d /sys/class/net/\${int} ] && ip link delete \${int}
  2193. done
  2194. modprobe -r ipip
  2195. echo done.
  2196. "
  2197. check::exit_code "$?" "reset" "$host: reset"
  2198. }
  2199. # 重置所有节点
  2200. function reset::cluster() {
  2201. local all_node=""
  2202. command::exec "${MGMT_NODE}" "
  2203. kubectl get node -o jsonpath='{range.items[*]}{.status.addresses[?(@.type==\"InternalIP\")].address} {end}'
  2204. "
  2205. get::command_output "all_node" "$?"
  2206. all_node=$(echo "${WORKER_NODES} ${MASTER_NODES} ${all_node}" | awk '{for (i=1;i<=NF;i++) if (!a[$i]++) printf("%s%s",$i,FS)}')
  2207. for host in $all_node
  2208. do
  2209. reset::node "$host"
  2210. done
  2211. }
  2212. # 节点加载离线包
  2213. function offline::load() {
  2214. local role="${1:-}"
  2215. local hosts=""
  2216. if [[ "${role}" == "master" ]]; then
  2217. hosts="${MASTER_NODES}"
  2218. elif [[ "${role}" == "worker" ]]; then
  2219. hosts="${WORKER_NODES}"
  2220. fi
  2221. for host in ${hosts}
  2222. do
  2223. log::info "[offline]" "${role} ${host}: load offline file"
  2224. command::exec "${host}" "[[ ! -d \"${OFFLINE_DIR}\" ]] && { mkdir -pv \"${OFFLINE_DIR}\"; chmod 777 \"${OFFLINE_DIR}\"; } ||:"
  2225. check::exit_code "$?" "offline" "$host: mkdir offline dir" "exit"
  2226. if [[ "${UPGRADE_KERNEL_TAG:-}" == "1" ]]; then
  2227. command::scp "${host}" "${TMP_DIR}/packages/kernel/*" "${OFFLINE_DIR}"
  2228. check::exit_code "$?" "offline" "scp kernel file to $host" "exit"
  2229. else
  2230. log::info "[offline]" "${role} ${host}: copy offline file"
  2231. command::scp "${host}" "${TMP_DIR}/packages/kubeadm/*" "${OFFLINE_DIR}"
  2232. check::exit_code "$?" "offline" "scp kube file to $host" "exit"
  2233. command::scp "${host}" "${TMP_DIR}/packages/all/*" "${OFFLINE_DIR}"
  2234. check::exit_code "$?" "offline" "scp all file to $host" "exit"
  2235. if [[ "${role}" == "worker" ]]; then
  2236. command::scp "${host}" "${TMP_DIR}/packages/worker/*" "${OFFLINE_DIR}"
  2237. check::exit_code "$?" "offline" "scp worker file to $host" "exit"
  2238. fi
  2239. command::scp "${host}" "${TMP_DIR}/images/${role}.tgz" "${OFFLINE_DIR}"
  2240. check::exit_code "$?" "offline" "scp ${role} images to $host" "exit"
  2241. command::scp "${host}" "${TMP_DIR}/images/all.tgz" "${OFFLINE_DIR}"
  2242. check::exit_code "$?" "offline" "scp all images to $host" "exit"
  2243. fi
  2244. log::info "[offline]" "${role} ${host}: install package"
  2245. command::exec "${host}" "yum localinstall -y --skip-broken ${OFFLINE_DIR}/*.rpm"
  2246. check::exit_code "$?" "offline" "${role} ${host}: install package" "exit"
  2247. if [[ "${UPGRADE_KERNEL_TAG:-}" != "1" ]]; then
  2248. command::exec "${host}" "
  2249. set -e
  2250. for target in firewalld python-firewall firewalld-filesystem iptables; do
  2251. systemctl stop \$target &>/dev/null || true
  2252. systemctl disable \$target &>/dev/null || true
  2253. done
  2254. systemctl start docker && \
  2255. cd ${OFFLINE_DIR} && \
  2256. gzip -d -c ${1}.tgz | docker load && gzip -d -c all.tgz | docker load
  2257. "
  2258. check::exit_code "$?" "offline" "$host: load images" "exit"
  2259. fi
  2260. command::exec "${host}" "rm -rf ${OFFLINE_DIR:-/tmp/abc}"
  2261. check::exit_code "$?" "offline" "$host: clean offline file"
  2262. done
  2263. command::scp "${MGMT_NODE}" "${TMP_DIR}/manifests" "${OFFLINE_DIR}"
  2264. check::exit_code "$?" "offline" "scp manifests file to ${MGMT_NODE}" "exit"
  2265. command::scp "${MGMT_NODE}" "${TMP_DIR}/bins" "${OFFLINE_DIR}"
  2266. check::exit_code "$?" "offline" "scp bins file to ${MGMT_NODE}" "exit"
  2267. }
  2268. # 集群节点加载离线包
  2269. function offline::cluster() {
  2270. [ ! -f "${OFFLINE_FILE}" ] && { log::error "[offline]" "not found ${OFFLINE_FILE}" ; exit 1; }
  2271. log::info "[offline]" "Unzip offline package on local."
  2272. tar zxf "${OFFLINE_FILE}" -C "${TMP_DIR}/"
  2273. check::exit_code "$?" "offline" "Unzip offline package"
  2274. offline::load "master"
  2275. offline::load "worker"
  2276. }
  2277. # 初始化集群
  2278. function init::cluster() {
  2279. MGMT_NODE=$(echo "${MASTER_NODES}" | awk '{print $1}')
  2280. # 加载离线包
  2281. [[ "${OFFLINE_TAG:-}" == "1" ]] && offline::cluster
  2282. # 1. 初始化节点
  2283. init::node
  2284. # 2. 安装包
  2285. install::package
  2286. # 3. 初始化kubeadm
  2287. kubeadm::init
  2288. # 4. 加入集群
  2289. kubeadm::join
  2290. # 5. 添加network
  2291. add::network
  2292. # 6. 安装addon
  2293. add::addon
  2294. # 7. 添加ingress
  2295. add::ingress
  2296. # 8. 添加storage
  2297. [[ "${STORAGE_TAG:-}" == "1" ]] && add::storage
  2298. # 9. 添加web ui
  2299. add::ui
  2300. # 10. 添加monitor
  2301. [[ "${MONITOR_TAG:-}" == "1" ]] && add::monitor
  2302. # 11. 添加log
  2303. [[ "${LOG_TAG:-}" == "1" ]] && add::log
  2304. # 12. 运维操作
  2305. add::ops
  2306. # 13. 查看集群状态
  2307. kube::status
  2308. }
  2309. # 添加节点
  2310. function add::node() {
  2311. # 加载离线包
  2312. [[ "${OFFLINE_TAG:-}" == "1" ]] && offline::cluster
  2313. # KUBE_VERSION未指定时,获取集群的版本
  2314. if [[ "${KUBE_VERSION}" == "" || "${KUBE_VERSION}" == "latest" ]]; then
  2315. command::exec "${MGMT_NODE}" "
  2316. kubectl get node --selector='node-role.kubernetes.io/master' -o jsonpath='{range.items[*]}{.status.nodeInfo.kubeletVersion } {end}' | awk -F'v| ' '{print \$2}'
  2317. "
  2318. get::command_output "KUBE_VERSION" "$?" "exit"
  2319. fi
  2320. # 1. 初始化节点
  2321. init::add_node
  2322. # 2. 安装包
  2323. install::package
  2324. # 3. 加入集群
  2325. kubeadm::join
  2326. # 4. haproxy添加apiserver
  2327. config::haproxy_backend "add"
  2328. # 5. 更新 etcd snapshot 副本
  2329. config::etcd_snapshot
  2330. # 6. 查看集群状态
  2331. kube::status
  2332. }
  2333. # 删除节点
  2334. function del::node() {
  2335. config::haproxy_backend "remove"
  2336. local cluster_nodes=""
  2337. local del_hosts_cmd=""
  2338. command::exec "${MGMT_NODE}" "
  2339. kubectl get node -o jsonpath='{range.items[*]}{.status.addresses[?(@.type==\"InternalIP\")].address} {.metadata.name }\\n{end}'
  2340. "
  2341. get::command_output "cluster_nodes" "$?" exit
  2342. for host in $MASTER_NODES
  2343. do
  2344. command::exec "${MGMT_NODE}" "
  2345. etcd_pod=\$(kubectl -n kube-system get pods -l component=etcd --field-selector=status.phase=Running -o jsonpath='{\$.items[0].metadata.name}')
  2346. etcd_node=\$(kubectl -n kube-system exec \$etcd_pod -- sh -c \"export ETCDCTL_API=3 ETCDCTL_CACERT=/etc/kubernetes/pki/etcd/ca.crt ETCDCTL_CERT=/etc/kubernetes/pki/etcd/server.crt ETCDCTL_KEY=/etc/kubernetes/pki/etcd/server.key ETCDCTL_ENDPOINTS=https://127.0.0.1:2379; etcdctl member list\"| grep $host | awk -F, '{print \$1}')
  2347. echo \"\$etcd_pod \$etcd_node\"
  2348. kubectl -n kube-system exec \$etcd_pod -- sh -c \"export ETCDCTL_API=3 ETCDCTL_CACERT=/etc/kubernetes/pki/etcd/ca.crt ETCDCTL_CERT=/etc/kubernetes/pki/etcd/server.crt ETCDCTL_KEY=/etc/kubernetes/pki/etcd/server.key ETCDCTL_ENDPOINTS=https://127.0.0.1:2379; etcdctl member remove \$etcd_node; etcdctl member list\"
  2349. "
  2350. check::exit_code "$?" "del" "remove $host etcd member"
  2351. done
  2352. for host in $MASTER_NODES $WORKER_NODES
  2353. do
  2354. log::info "[del]" "node $host"
  2355. local node_name; node_name=$(echo -ne "${cluster_nodes}" | grep "${host}" | awk '{print $2}')
  2356. if [[ "${node_name}" == "" ]]; then
  2357. log::warning "[del]" "node $host not found."
  2358. read -r -t 10 -n 1 -p "Do you need to reset the node (y/n)? " answer
  2359. [[ -z "$answer" || "$answer" != "y" ]] && exit || echo
  2360. else
  2361. log::info "[del]" "drain $host"
  2362. command::exec "${MGMT_NODE}" "kubectl drain $node_name --force --ignore-daemonsets --delete-local-data"
  2363. check::exit_code "$?" "del" "$host: drain"
  2364. log::info "[del]" "delete node $host"
  2365. command::exec "${MGMT_NODE}" "kubectl delete node $node_name"
  2366. check::exit_code "$?" "del" "$host: delete"
  2367. sleep 3
  2368. fi
  2369. reset::node "$host"
  2370. del_hosts_cmd="${del_hosts_cmd}\nsed -i "/$host/d" /etc/hosts"
  2371. done
  2372. for host in $(echo -ne "${cluster_nodes}" | awk '{print $1}')
  2373. do
  2374. log::info "[del]" "$host: remove del node hostname resolution"
  2375. command::exec "${host}" "
  2376. $(echo -ne "${del_hosts_cmd}")
  2377. "
  2378. check::exit_code "$?" "del" "remove del node hostname resolution"
  2379. done
  2380. [ "$MASTER_NODES" != "" ] && config::etcd_snapshot
  2381. kube::status
  2382. }
  2383. # 升级集群
  2384. function upgrade::cluster() {
  2385. log::info "[upgrade]" "upgrade to $KUBE_VERSION"
  2386. log::info "[upgrade]" "backup cluster"
  2387. add::ops
  2388. local stable_version="2"
  2389. command::exec "127.0.0.1" "wget https://storage.googleapis.com/kubernetes-release/release/stable.txt -q -O -"
  2390. get::command_output "stable_version" "$?" && stable_version="${stable_version#v}"
  2391. local node_hosts="$MASTER_NODES $WORKER_NODES"
  2392. if [[ "$node_hosts" == " " ]]; then
  2393. command::exec "${MGMT_NODE}" "
  2394. kubectl get node -o jsonpath='{range.items[*]}{.metadata.name } {end}'
  2395. "
  2396. get::command_output "node_hosts" "$?" exit
  2397. fi
  2398. local skip_plan=${SKIP_UPGRADE_PLAN,,}
  2399. for host in ${node_hosts}
  2400. do
  2401. log::info "[upgrade]" "node: $host"
  2402. local local_version=""
  2403. command::exec "${host}" "kubectl version --client --short | awk '{print \$3}'"
  2404. get::command_output "local_version" "$?" && local_version="${local_version#v}"
  2405. if [[ "${KUBE_VERSION}" != "latest" ]]; then
  2406. if [[ "${KUBE_VERSION}" == "${local_version}" ]];then
  2407. log::warning "[check]" "The specified version(${KUBE_VERSION}) is consistent with the local version(${local_version})!"
  2408. continue
  2409. fi
  2410. if [[ $(utils::version_to_number "$KUBE_VERSION") -lt $(utils::version_to_number "${local_version}") ]];then
  2411. log::warning "[check]" "The specified version($KUBE_VERSION) is less than the local version(${local_version})!"
  2412. continue
  2413. fi
  2414. if [[ $(utils::version_to_number "$KUBE_VERSION") -gt $(utils::version_to_number "${stable_version}") ]];then
  2415. log::warning "[check]" "The specified version($KUBE_VERSION) is more than the stable version(${stable_version})!"
  2416. continue
  2417. fi
  2418. else
  2419. if [[ $(utils::version_to_number "${local_version}") -ge $(utils::version_to_number "${stable_version}") ]];then
  2420. log::warning "[check]" "The local version($local_version) is greater or equal to the stable version(${stable_version})!"
  2421. continue
  2422. fi
  2423. fi
  2424. command::exec "${MGMT_NODE}" "kubectl drain ${host} --ignore-daemonsets --delete-local-data"
  2425. check::exit_code "$?" "upgrade" "drain ${host} node" "exit"
  2426. sleep 5
  2427. if [[ "${skip_plan}" == "false" ]]; then
  2428. command::exec "${host}" "$(declare -f script::upgrage_kube); script::upgrage_kube 'init' '$KUBE_VERSION'"
  2429. check::exit_code "$?" "upgrade" "plan and upgrade cluster on ${host}" "exit"
  2430. command::exec "${host}" "$(declare -f utils::retry); utils::retry 10 kubectl get node"
  2431. check::exit_code "$?" "upgrade" "${host}: upgrade" "exit"
  2432. skip_plan=true
  2433. else
  2434. command::exec "${host}" "$(declare -f script::upgrage_kube); script::upgrage_kube 'node' '$KUBE_VERSION'"
  2435. check::exit_code "$?" "upgrade" "upgrade ${host} node" "exit"
  2436. fi
  2437. command::exec "${MGMT_NODE}" "kubectl wait --for=condition=Ready node/${host} --timeout=120s"
  2438. check::exit_code "$?" "upgrade" "${host} ready"
  2439. sleep 5
  2440. command::exec "${MGMT_NODE}" "$(declare -f utils::retry); utils::retry 6 kubectl uncordon ${host}"
  2441. check::exit_code "$?" "upgrade" "uncordon ${host} node"
  2442. sleep 5
  2443. done
  2444. kube::status
  2445. }
  2446. # 脚本文件更新
  2447. function update::self() {
  2448. log::info "[update]" "download kainstall script to $0"
  2449. command::exec "127.0.0.1" "
  2450. wget --timeout=10 --waitretry=3 --tries=5 --retry-connrefused https://cdn.jsdelivr.net/gh/lework/kainstall@master/kainstall-centos.sh -O /tmp/kainstall-centos.sh || exit 1
  2451. /bin/mv -fv /tmp/kainstall-centos.sh \"$0\"
  2452. chmod +x \"$0\"
  2453. "
  2454. check::exit_code "$?" "update" "kainstall script"
  2455. }
  2456. # 数据处理及限制
  2457. function transform::data() {
  2458. MASTER_NODES=$(echo "${MASTER_NODES}" | tr ',' ' ')
  2459. WORKER_NODES=$(echo "${WORKER_NODES}" | tr ',' ' ')
  2460. if ! utils::is_element_in_array "$KUBE_CRI" docker containerd cri-o ; then
  2461. log::error "[limit]" "$KUBE_CRI is not supported, only [docker,containerd,cri-o]"
  2462. exit 1
  2463. fi
  2464. [[ "$KUBE_CRI" != "docker" && "${OFFLINE_TAG:-}" == "1" ]] && { log::error "[limit]" "$KUBE_CRI is not supported offline, only docker"; exit 1; }
  2465. [[ "$KUBE_CRI" == "containerd" && "${KUBE_CRI_ENDPOINT}" == "/var/run/dockershim.sock" ]] && KUBE_CRI_ENDPOINT="unix:///run/containerd/containerd.sock"
  2466. [[ "$KUBE_CRI" == "cri-o" && "${KUBE_CRI_ENDPOINT}" == "/var/run/dockershim.sock" ]] && KUBE_CRI_ENDPOINT="unix:///var/run/crio/crio.sock"
  2467. kubelet_nodeRegistration="nodeRegistration:
  2468. criSocket: ${KUBE_CRI_ENDPOINT:-/var/run/dockershim.sock}
  2469. kubeletExtraArgs:
  2470. runtime-cgroups: /system.slice/${KUBE_CRI//-/}.service
  2471. pod-infra-container-image: ${KUBE_IMAGE_REPO}/pause:${PAUSE_VERSION:-3.6}
  2472. "
  2473. }
  2474. # 使用帮助
  2475. function help::usage() {
  2476. cat << EOF
  2477. Install kubernetes cluster using kubeadm.
  2478. Usage:
  2479. $(basename "$0") [command]
  2480. Available Commands:
  2481. init 初始化Kubernetes集群
  2482. reset 重置Kubernetes集群
  2483. add 将节点添加到群集
  2484. del 从群集中删除节点
  2485. renew-cert 续订所有可用的证书
  2486. upgrade 升级kubeadm集群
  2487. update 更新脚本文件
  2488. Flag:
  2489. -m,--master master节点(逗号分隔), 默认: ''
  2490. -w,--worker work节点(逗号分隔), 默认: ''
  2491. -u,--user ssh用户, 默认: ${SSH_USER}
  2492. -p,--password ssh密码
  2493. --private-key ssh私钥
  2494. -P,--port ssh端口, 默认: ${SSH_PORT}
  2495. -v,--version kube版本, 默认: ${KUBE_VERSION}
  2496. -n,--network 群集网络, 选择: [flannel,calico,cilium], 默认: ${KUBE_NETWORK}
  2497. -i,--ingress ingress controller, choose: [nginx], 默认: ${KUBE_INGRESS}
  2498. -ui,--ui 群集web ui, use: [rancher]
  2499. -a,--addon 群集附加组件, use: []
  2500. -M,--monitor 群集监控, use: [prometheus]
  2501. -l,--log 群集日志, choose: [elasticsearch]
  2502. --cri cri tools, choose: [docker,containerd,cri-o], 默认: ${KUBE_CRI}
  2503. --cri-version cri version, 默认: ${KUBE_CRI_VERSION}
  2504. --cri-endpoint cri endpoint, 默认: ${KUBE_CRI_ENDPOINT}
  2505. -U,--upgrade-kernel 升级内核
  2506. -of,--offline-file 指定要加载的离线文件
  2507. --10years 证书期限为10年
  2508. --sudo sudo模式
  2509. --sudo-user sudo用户
  2510. --sudo-password sudo用户密码
  2511. Example:
  2512. [init cluster]
  2513. $0 init \\
  2514. --master 192.168.77.130,192.168.77.131,192.168.77.132 \\
  2515. --worker 192.168.77.133,192.168.77.134,192.168.77.135 \\
  2516. --user root \\
  2517. --password 123456 \\
  2518. --version 1.20.4
  2519. [reset cluster]
  2520. $0 reset \\
  2521. --user root \\
  2522. --password 123456
  2523. [add node]
  2524. $0 add \\
  2525. --master 192.168.77.140,192.168.77.141 \\
  2526. --worker 192.168.77.143,192.168.77.144 \\
  2527. --user root \\
  2528. --password 123456 \\
  2529. --version 1.20.4
  2530. [del node]
  2531. $0 del \\
  2532. --master 192.168.77.140,192.168.77.141 \\
  2533. --worker 192.168.77.143,192.168.77.144 \\
  2534. --user root \\
  2535. --password 123456
  2536. [other]
  2537. $0 renew-cert --user root --password 123456
  2538. $0 upgrade --version 1.20.4 --user root --password 123456
  2539. $0 update
  2540. $0 add --ingress traefik
  2541. $0 add --monitor prometheus
  2542. $0 add --log elasticsearch
  2543. $0 add --storage rook
  2544. $0 add --ui dashboard
  2545. $0 add --addon nodelocaldns
  2546. EOF
  2547. exit 1
  2548. }
  2549. ######################################################################################################
  2550. # main
  2551. ######################################################################################################
  2552. [ "$#" == "0" ] && help::usage
  2553. while [ "${1:-}" != "" ]; do
  2554. case $1 in
  2555. init ) INIT_TAG=1
  2556. ;;
  2557. reset ) RESET_TAG=1
  2558. ;;
  2559. add ) ADD_TAG=1
  2560. ;;
  2561. del ) DEL_TAG=1
  2562. ;;
  2563. renew-cert ) RENEW_CERT_TAG=1
  2564. ;;
  2565. upgrade ) UPGRADE_TAG=1
  2566. ;;
  2567. update ) UPDATE_TAG=1
  2568. ;;
  2569. -m | --master ) shift
  2570. MASTER_NODES=${1:-$MASTER_NODES}
  2571. ;;
  2572. -w | --worker ) shift
  2573. WORKER_NODES=${1:-$WORKER_NODES}
  2574. ;;
  2575. -u | --user ) shift
  2576. SSH_USER=${1:-$SSH_USER}
  2577. ;;
  2578. -p | --password ) shift
  2579. SSH_PASSWORD=${1:-$SSH_PASSWORD}
  2580. ;;
  2581. --private-key ) shift
  2582. SSH_PRIVATE_KEY=${1:-$SSH_SSH_PRIVATE_KEY}
  2583. ;;
  2584. -P | --port ) shift
  2585. SSH_PORT=${1:-$SSH_PORT}
  2586. ;;
  2587. -v | --version ) shift
  2588. KUBE_VERSION=${1:-$KUBE_VERSION}
  2589. ;;
  2590. -n | --network ) shift
  2591. NETWORK_TAG=1
  2592. KUBE_NETWORK=${1:-$KUBE_NETWORK}
  2593. ;;
  2594. -i | --ingress ) shift
  2595. INGRESS_TAG=1
  2596. KUBE_INGRESS=${1:-$KUBE_INGRESS}
  2597. ;;
  2598. -M | --monitor ) shift
  2599. MONITOR_TAG=1
  2600. KUBE_MONITOR=${1:-$KUBE_MONITOR}
  2601. ;;
  2602. -l | --log ) shift
  2603. LOG_TAG=1
  2604. KUBE_LOG=${1:-$KUBE_LOG}
  2605. ;;
  2606. -s | --storage ) shift
  2607. STORAGE_TAG=1
  2608. KUBE_STORAGE=${1:-$KUBE_STORAGE}
  2609. ;;
  2610. -ui | --ui ) shift
  2611. UI_TAG=1
  2612. ;;
  2613. -a | --addon ) shift
  2614. ADDON_TAG=1
  2615. ;;
  2616. --cri ) shift
  2617. KUBE_CRI=${1:-$KUBE_CRI}
  2618. ;;
  2619. --cri-version ) shift
  2620. KUBE_CRI_VERSION=${1:-$KUBE_CRI_VERSION}
  2621. ;;
  2622. --cri-endpoint ) shift
  2623. KUBE_CRI_ENDPOINT=${1:-$KUBE_CRI_ENDPOINT}
  2624. ;;
  2625. -U | --upgrade-kernel ) UPGRADE_KERNEL_TAG=1
  2626. ;;
  2627. -of | --offline-file ) shift
  2628. OFFLINE_TAG=1
  2629. OFFLINE_FILE=${1:-$OFFLINE_FILE}
  2630. ;;
  2631. --10years ) CERT_YEAR_TAG=1
  2632. ;;
  2633. --sudo ) SUDO_TAG=1
  2634. ;;
  2635. --sudo-user ) shift
  2636. SUDO_USER=${1:-$SUDO_USER}
  2637. ;;
  2638. --sudo-password ) shift
  2639. SUDO_PASSWORD=${1:-}
  2640. ;;
  2641. * ) help::usage
  2642. exit 1
  2643. esac
  2644. shift
  2645. done
  2646. # 开始
  2647. log::info "[start]" "bash $0 ${SCRIPT_PARAMETER//${SSH_PASSWORD:-${SUDO_PASSWORD:-}}/zzzzzz}"
  2648. # 数据处理
  2649. transform::data
  2650. # 预检
  2651. check::preflight
  2652. # 动作
  2653. if [[ "${INIT_TAG:-}" == "1" ]]; then
  2654. [[ "$MASTER_NODES" == "" ]] && MASTER_NODES="127.0.0.1"
  2655. init::cluster
  2656. elif [[ "${ADD_TAG:-}" == "1" ]]; then
  2657. [[ "${NETWORK_TAG:-}" == "1" ]] && { add::network; add=1; }
  2658. [[ "${INGRESS_TAG:-}" == "1" ]] && { add::ingress; add=1; }
  2659. [[ "${STORAGE_TAG:-}" == "1" ]] && { add::storage; add=1; }
  2660. [[ "${MONITOR_TAG:-}" == "1" ]] && { add::monitor; add=1; }
  2661. [[ "${LOG_TAG:-}" == "1" ]] && { add::log; add=1; }
  2662. [[ "${UI_TAG:-}" == "1" ]] && { add::ui; add=1; }
  2663. [[ "${ADDON_TAG:-}" == "1" ]] && { add::addon; add=1; }
  2664. [[ "$MASTER_NODES" != "" || "$WORKER_NODES" != "" ]] && { add::node; add=1; }
  2665. [[ "${add:-}" != "1" ]] && help::usage
  2666. elif [[ "${DEL_TAG:-}" == "1" ]]; then
  2667. if [[ "$MASTER_NODES" != "" || "$WORKER_NODES" != "" ]]; then del::node; else help::usage; fi
  2668. elif [[ "${RESET_TAG:-}" == "1" ]]; then
  2669. reset::cluster
  2670. elif [[ "${RENEW_CERT_TAG:-}" == "1" ]]; then
  2671. cert::renew
  2672. elif [[ "${UPGRADE_TAG:-}" == "1" ]]; then
  2673. upgrade::cluster
  2674. elif [[ "${UPDATE_TAG:-}" == "1" ]]; then
  2675. update::self
  2676. else
  2677. help::usage
  2678. fi