02k8s-install-centos.sh 104 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865
  1. #!/usr/bin/env bash
  2. # 参考 https://raw.githubusercontent.com/lework/kainstall/v1.4.9/kainstall-centos.sh
  3. [[ -n $DEBUG ]] && set -x
  4. set -o errtrace # Make sure any error trap is inherited
  5. set -o nounset # Disallow expansion of unset variables
  6. set -o pipefail # Use last non-zero exit code in a pipeline
  7. # 版本
  8. KUBE_VERSION="${KUBE_VERSION:-latest}"
  9. FLANNEL_VERSION="${FLANNEL_VERSION:-0.17.0}"
  10. CALICO_VERSION="${CALICO_VERSION:-3.22.1}"
  11. CILIUM_VERSION="${CILIUM_VERSION:-1.9.13}"
  12. HELM_VERSION="${HELM_VERSION:-3.10.1}"
  13. INGRESS_NGINX="${INGRESS_NGINX:-4.2.5}"
  14. RANCHER_VERSION="${RANCHER_VERSION:-2.6.9}"
  15. #METRICS_SERVER_VERSION="${METRICS_SERVER_VERSION:-0.6.1}"
  16. #KUBE_PROMETHEUS_VERSION="${KUBE_PROMETHEUS_VERSION:-0.10.0}"
  17. #ELASTICSEARCH_VERSION="${ELASTICSEARCH_VERSION:-8.1.0}"
  18. #ROOK_VERSION="${ROOK_VERSION:-1.8.7}"
  19. #LONGHORN_VERSION="${LONGHORN_VERSION:-1.2.4}"
  20. # 集群配置
  21. KUBE_DNSDOMAIN="${KUBE_DNSDOMAIN:-cluster.local}"
  22. KUBE_APISERVER="${KUBE_APISERVER:-apiserver.$KUBE_DNSDOMAIN}"
  23. KUBE_POD_SUBNET="${KUBE_POD_SUBNET:-10.244.0.0/16}"
  24. KUBE_SERVICE_SUBNET="${KUBE_SERVICE_SUBNET:-10.96.0.0/16}"
  25. KUBE_IMAGE_REPO="${KUBE_IMAGE_REPO:-registry.cn-hangzhou.aliyuncs.com/kainstall}"
  26. KUBE_NETWORK="${KUBE_NETWORK:-flannel}"
  27. KUBE_INGRESS="${KUBE_INGRESS:-nginx}"
  28. KUBE_MONITOR="${KUBE_MONITOR:-prometheus}"
  29. KUBE_STORAGE="${KUBE_STORAGE:-rook}"
  30. KUBE_LOG="${KUBE_LOG:-elasticsearch}"
  31. KUBE_FLANNEL_TYPE="${KUBE_FLANNEL_TYPE:-vxlan}"
  32. KUBE_CRI="${KUBE_CRI:-docker}"
  33. KUBE_CRI_VERSION="${KUBE_CRI_VERSION:-latest}"
  34. KUBE_CRI_ENDPOINT="${KUBE_CRI_ENDPOINT:-/var/run/dockershim.sock}"
  35. # 定义的master和worker节点地址,以逗号分隔
  36. MASTER_NODES="${MASTER_NODES:-}"
  37. WORKER_NODES="${WORKER_NODES:-}"
  38. # 定义在哪个节点上进行设置
  39. MGMT_NODE="${MGMT_NODE:-127.0.0.1}"
  40. # 节点的连接信息
  41. SSH_USER="${SSH_USER:-root}"
  42. SSH_PASSWORD="${SSH_PASSWORD:-}"
  43. SSH_PRIVATE_KEY="${SSH_PRIVATE_KEY:-}"
  44. SSH_PORT="${SSH_PORT:-22}"
  45. SUDO_USER="${SUDO_USER:-root}"
  46. # 节点设置
  47. HOSTNAME_PREFIX="${HOSTNAME_PREFIX:-k8s}"
  48. # nginx的端口配置
  49. NGINX_HTTP_PORT="${NGINX_HTTP_PORT:-80}"
  50. # 脚本设置
  51. TMP_DIR="$(rm -rf /tmp/kainstall* && mktemp -d -t kainstall.XXXXXXXXXX)"
  52. LOG_FILE="${TMP_DIR}/kainstall.log"
  53. SSH_OPTIONS="-o ConnectTimeout=600 -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null"
  54. ERROR_INFO="\n\033[31mERROR Summary: \033[0m\n "
  55. ACCESS_INFO="\n\033[32mACCESS Summary: \033[0m\n "
  56. COMMAND_OUTPUT=""
  57. SCRIPT_PARAMETER="$*"
  58. OFFLINE_DIR="/tmp/kainstall-offline-file/"
  59. OFFLINE_FILE=""
  60. OS_SUPPORT="centos7 centos8 rocky8.7"
  61. GITHUB_PROXY="${GITHUB_PROXY:-https://ghproxy.com/}"
  62. GCR_PROXY="${GCR_PROXY:-k8sgcr.lework.workers.dev}"
  63. SKIP_UPGRADE_PLAN=${SKIP_UPGRADE_PLAN:-false}
  64. SKIP_SET_OS_REPO=${SKIP_SET_OS_REPO:-false}
  65. trap trap::info 1 2 3 15 EXIT
  66. ######################################################################################################
  67. # 通用函数
  68. ######################################################################################################
  69. # 信号处理
  70. function trap::info() {
  71. [[ ${#ERROR_INFO} -gt 37 ]] && echo -e "$ERROR_INFO"
  72. [[ ${#ACCESS_INFO} -gt 38 ]] && echo -e "$ACCESS_INFO"
  73. [ -f "$LOG_FILE" ] && echo -e "\n\n See detailed log >>> $LOG_FILE \n\n"
  74. trap '' EXIT
  75. exit
  76. }
  77. # 错误日志
  78. function log::error() {
  79. local item; item="[$(date +'%Y-%m-%dT%H:%M:%S.%N%z')]: \033[31mERROR: \033[0m$*"
  80. ERROR_INFO="${ERROR_INFO}${item}\n "
  81. echo -e "${item}" | tee -a "$LOG_FILE"
  82. }
  83. # 基础日志
  84. function log::info() {
  85. printf "[%s]: \033[32mINFO: \033[0m%s\n" "$(date +'%Y-%m-%dT%H:%M:%S.%N%z')" "$*" | tee -a "$LOG_FILE"
  86. }
  87. # 警告日志
  88. function log::warning() {
  89. printf "[%s]: \033[33mWARNING: \033[0m%s\n" "$(date +'%Y-%m-%dT%H:%M:%S.%N%z')" "$*" | tee -a "$LOG_FILE"
  90. }
  91. # 访问信息
  92. function log::access() {
  93. ACCESS_INFO="${ACCESS_INFO}$*\n "
  94. printf "[%s]: \033[32mINFO: \033[0m%s\n" "$(date +'%Y-%m-%dT%H:%M:%S.%N%z')" "$*" | tee -a "$LOG_FILE"
  95. }
  96. # 执行日志
  97. function log::exec() {
  98. printf "[%s]: \033[34mEXEC: \033[0m%s\n" "$(date +'%Y-%m-%dT%H:%M:%S.%N%z')" "$*" >> "$LOG_FILE"
  99. }
  100. # 版本号转数字
  101. function utils::version_to_number() {
  102. echo "$@" | awk -F. '{ printf("%d%03d%03d%03d\n", $1,$2,$3,$4); }';
  103. }
  104. # 重试
  105. function utils::retry() {
  106. local retries=$1
  107. shift
  108. local count=0
  109. until eval "$*"; do
  110. exit=$?
  111. wait=$((2 ** count))
  112. count=$((count + 1))
  113. if [ "$count" -lt "$retries" ]; then
  114. echo "Retry $count/$retries exited $exit, retrying in $wait seconds..."
  115. sleep $wait
  116. else
  117. echo "Retry $count/$retries exited $exit, no more retries left."
  118. return $exit
  119. fi
  120. done
  121. return 0
  122. }
  123. # 转义引号
  124. function utils::quote() {
  125. # shellcheck disable=SC2046
  126. if [ $(echo "$*" | tr -d "\n" | wc -c) -eq 0 ]; then
  127. echo "''"
  128. elif [ $(echo "$*" | tr -d "[a-z][A-Z][0-9]:,.=~_/\n-" | wc -c) -gt 0 ]; then
  129. printf "%s" "$*" | sed -e "1h;2,\$H;\$!d;g" -e "s/'/\'\"\'\"\'/g" | sed -e "1h;2,\$H;\$!d;g" -e "s/^/'/g" -e "s/$/'/g"
  130. else
  131. echo "$*"
  132. fi
  133. }
  134. # 下载文件
  135. function utils::download_file() {
  136. local url="$1"
  137. local dest="$2"
  138. local unzip_tag="${3:-1}"
  139. local dest_dirname; dest_dirname=$(dirname "$dest")
  140. local filename; filename=$(basename "$dest")
  141. log::info "[download]" "${filename}"
  142. command::exec "${MGMT_NODE}" "
  143. set -e
  144. if [ ! -f \"${dest}\" ]; then
  145. [ ! -d \"${dest_dirname}\" ] && mkdir -pv \"${dest_dirname}\"
  146. wget --timeout=10 --waitretry=3 --tries=5 --retry-connrefused --no-check-certificate \"${url}\" -O \"${dest}\"
  147. if [[ \"${unzip_tag}\" == \"unzip\" ]]; then
  148. command -v unzip 2>/dev/null || yum install -y unzip
  149. unzip -o \"${dest}\" -d \"${dest_dirname}\"
  150. fi
  151. else
  152. echo \"${dest} is exists!\"
  153. fi
  154. "
  155. local status="$?"
  156. check::exit_code "$status" "download" "${filename}" "exit"
  157. return "$status"
  158. }
  159. # 判断是否在数组中存在元素
  160. function utils::is_element_in_array() {
  161. local -r element="${1}"
  162. local -r array=("${@:2}")
  163. local walker=''
  164. for walker in "${array[@]}"
  165. do
  166. [[ "${walker}" = "${element}" ]] && return 0
  167. done
  168. return 1
  169. }
  170. # 执行命令
  171. function command::exec() {
  172. local host=${1:-}
  173. shift
  174. local command="$*"
  175. if [[ "${SUDO_TAG:-}" == "1" ]]; then
  176. sudo_options="sudo -H -n -u ${SUDO_USER}"
  177. if [[ "${SUDO_PASSWORD:-}" != "" ]]; then
  178. sudo_options="${sudo_options// -n/} -p \"\" -S <<< \"${SUDO_PASSWORD}\""
  179. fi
  180. command="$sudo_options bash -c $(utils::quote "$command")"
  181. fi
  182. command="$(utils::quote "$command")"
  183. if [[ "${host}" == "127.0.0.1" ]]; then
  184. # 本地执行
  185. log::exec "[command]" "bash -c $(printf "%s" "${command//${SUDO_PASSWORD:-}/zzzzzz}")"
  186. # shellcheck disable=SC2094
  187. COMMAND_OUTPUT=$(eval bash -c "${command}" 2>> "$LOG_FILE" | tee -a "$LOG_FILE")
  188. local status=$?
  189. else
  190. # 远程执行
  191. local ssh_cmd="ssh"
  192. if [[ "${SSH_PASSWORD}" != "" ]]; then
  193. ssh_cmd="sshpass -p \"${SSH_PASSWORD}\" ${ssh_cmd}"
  194. elif [[ "$SSH_PRIVATE_KEY" != "" ]]; then
  195. [ -f "${SSH_PRIVATE_KEY}" ] || { log::error "[exec]" "ssh private_key:${SSH_PRIVATE_KEY} not found."; exit 1; }
  196. ssh_cmd="${ssh_cmd} -i $SSH_PRIVATE_KEY"
  197. fi
  198. log::exec "[command]" "${ssh_cmd//${SSH_PASSWORD:-}/zzzzzz} ${SSH_OPTIONS} ${SSH_USER}@${host} -p ${SSH_PORT} bash -c $(printf "%s" "${command//${SUDO_PASSWORD:-}/zzzzzz}")"
  199. # shellcheck disable=SC2094
  200. COMMAND_OUTPUT=$(eval "${ssh_cmd} ${SSH_OPTIONS} ${SSH_USER}@${host} -p ${SSH_PORT}" bash -c '"${command}"' 2>> "$LOG_FILE" | tee -a "$LOG_FILE")
  201. local status=$?
  202. fi
  203. return $status
  204. }
  205. # 拷贝文件
  206. function command::scp() {
  207. local host=${1:-}
  208. local src=${2:-}
  209. local dest=${3:-/tmp/}
  210. if [[ "${host}" == "127.0.0.1" ]]; then
  211. local command="cp -rf ${src} ${dest}"
  212. log::exec "[command]" "bash -c \"${command}\""
  213. # shellcheck disable=SC2094
  214. COMMAND_OUTPUT=$(bash -c "${command}" 2>> "$LOG_FILE" | tee -a "$LOG_FILE")
  215. local status=$?
  216. else
  217. local scp_cmd="scp"
  218. if [[ "${SSH_PASSWORD}" != "" ]]; then
  219. scp_cmd="sshpass -p \"${SSH_PASSWORD}\" ${scp_cmd}"
  220. elif [[ "$SSH_PRIVATE_KEY" != "" ]]; then
  221. [ -f "${SSH_PRIVATE_KEY}" ] || { log::error "[exec]" "ssh private_key:${SSH_PRIVATE_KEY} not found."; exit 1; }
  222. scp_cmd="${scp_cmd} -i $SSH_PRIVATE_KEY"
  223. fi
  224. log::exec "[command]" "${scp_cmd} ${SSH_OPTIONS} -P ${SSH_PORT} -r ${src} ${SSH_USER}@${host}:${dest}" >> "$LOG_FILE"
  225. # shellcheck disable=SC2094
  226. COMMAND_OUTPUT=$(eval "${scp_cmd} ${SSH_OPTIONS} -P ${SSH_PORT} -r ${src} ${SSH_USER}@${host}:${dest}" 2>> "$LOG_FILE" | tee -a "$LOG_FILE")
  227. local status=$?
  228. fi
  229. return $status
  230. }
  231. # 检查命令是否存在
  232. function check::command_exists() {
  233. local cmd=${1}
  234. local package=${2}
  235. if command -V "$cmd" > /dev/null 2>&1; then
  236. log::info "[check]" "$cmd command exists."
  237. else
  238. log::warning "[check]" "I require $cmd but it's not installed."
  239. log::warning "[check]" "install $package package."
  240. command::exec "127.0.0.1" "yum install -y ${package}"
  241. check::exit_code "$?" "check" "$package install" "exit"
  242. fi
  243. }
  244. ######################################################################################################
  245. # 安装函数
  246. ######################################################################################################
  247. # 节点初始化脚本
  248. function script::init_node() {
  249. # clean
  250. sed -i -e "/$KUBE_APISERVER/d" -e '/-worker-/d' -e '/-master-/d' /etc/hosts
  251. sed -i '/## Kainstall managed start/,/## Kainstall managed end/d' /etc/security/limits.conf /etc/systemd/system.conf /etc/bashrc /etc/rc.local /etc/audit/rules.d/audit.rules
  252. # Disable selinux
  253. sed -i '/SELINUX/s/enforcing/disabled/' /etc/selinux/config
  254. setenforce 0
  255. # Disable swap
  256. swapoff -a && sysctl -w vm.swappiness=0
  257. sed -ri '/^[^#]*swap/s@^@#@' /etc/fstab
  258. # Disable firewalld
  259. for target in firewalld python-firewall firewalld-filesystem iptables; do
  260. systemctl stop $target &>/dev/null || true
  261. systemctl disable $target &>/dev/null || true
  262. done
  263. # repo
  264. [[ -f /etc/yum.repos.d/CentOS-Base.repo && "${SKIP_SET_OS_REPO,,}" == "false" ]] && sed -e 's!^#baseurl=!baseurl=!g' \
  265. -e 's!^mirrorlist=!#mirrorlist=!g' \
  266. -e 's!mirror.centos.org!mirrors.aliyun.com!g' \
  267. -i /etc/yum.repos.d/CentOS-Base.repo
  268. [[ "${OFFLINE_TAG:-}" != "1" && "${SKIP_SET_OS_REPO,,}" == "false" ]] && yum install -y epel-release
  269. [[ -f /etc/yum.repos.d/epel.repo && "${SKIP_SET_OS_REPO,,}" == "false" ]] && sed -e 's!^mirrorlist=!#mirrorlist=!g' \
  270. -e 's!^metalink=!#metalink=!g' \
  271. -e 's!^#baseurl=!baseurl=!g' \
  272. -e 's!//download.*/pub!//mirrors.aliyun.com!g' \
  273. -e 's!http://mirrors\.aliyun!https://mirrors.aliyun!g' \
  274. -i /etc/yum.repos.d/epel.repo
  275. # Change limits
  276. [ ! -f /etc/security/limits.conf_bak ] && cp /etc/security/limits.conf{,_bak}
  277. cat << EOF >> /etc/security/limits.conf
  278. ## Kainstall managed start
  279. root soft nofile 655360
  280. root hard nofile 655360
  281. root soft nproc 655360
  282. root hard nproc 655360
  283. root soft core unlimited
  284. root hard core unlimited
  285. * soft nofile 655360
  286. * hard nofile 655360
  287. * soft nproc 655360
  288. * hard nproc 655360
  289. * soft core unlimited
  290. * hard core unlimited
  291. ## Kainstall managed end
  292. EOF
  293. # /etc/systemd/system.conf
  294. [ -f /etc/security/limits.d/20-nproc.conf ] && sed -i 's#4096#655360#g' /etc/security/limits.d/20-nproc.conf
  295. cat << EOF >> /etc/systemd/system.conf
  296. ## Kainstall managed start
  297. DefaultLimitCORE=infinity
  298. DefaultLimitNOFILE=655360
  299. DefaultLimitNPROC=655360
  300. DefaultTasksMax=75%
  301. ## Kainstall managed end
  302. EOF
  303. # Change sysctl
  304. cat << EOF > /etc/sysctl.d/99-kube.conf
  305. # https://www.kernel.org/doc/Documentation/sysctl/
  306. #############################################################################################
  307. # 调整虚拟内存
  308. #############################################################################################
  309. # Default: 30
  310. # 0 - 任何情况下都不使用swap。
  311. # 1 - 除非内存不足(OOM),否则不使用swap。
  312. vm.swappiness = 0
  313. # 内存分配策略
  314. #0 - 表示内核将检查是否有足够的可用内存供应用进程使用;如果有足够的可用内存,内存申请允许;否则,内存申请失败,并把错误返回给应用进程。
  315. #1 - 表示内核允许分配所有的物理内存,而不管当前的内存状态如何。
  316. #2 - 表示内核允许分配超过所有物理内存和交换空间总和的内存
  317. vm.overcommit_memory=1
  318. # OOM时处理
  319. # 1关闭,等于0时,表示当内存耗尽时,内核会触发OOM killer杀掉最耗内存的进程。
  320. vm.panic_on_oom=0
  321. # vm.dirty_background_ratio 用于调整内核如何处理必须刷新到磁盘的脏页。
  322. # Default value is 10.
  323. # 该值是系统内存总量的百分比,在许多情况下将此值设置为5是合适的。
  324. # 此设置不应设置为零。
  325. vm.dirty_background_ratio = 5
  326. # 内核强制同步操作将其刷新到磁盘之前允许的脏页总数
  327. # 也可以通过更改 vm.dirty_ratio 的值(将其增加到默认值30以上(也占系统内存的百分比))来增加
  328. # 推荐 vm.dirty_ratio 的值在60到80之间。
  329. vm.dirty_ratio = 60
  330. # vm.max_map_count 计算当前的内存映射文件数。
  331. # mmap 限制(vm.max_map_count)的最小值是打开文件的ulimit数量(cat /proc/sys/fs/file-max)。
  332. # 每128KB系统内存 map_count应该大约为1。 因此,在32GB系统上,max_map_count为262144。
  333. # Default: 65530
  334. vm.max_map_count = 2097152
  335. #############################################################################################
  336. # 调整文件
  337. #############################################################################################
  338. fs.may_detach_mounts = 1
  339. # 增加文件句柄和inode缓存的大小,并限制核心转储。
  340. fs.file-max = 2097152
  341. fs.nr_open = 2097152
  342. fs.suid_dumpable = 0
  343. # 文件监控
  344. fs.inotify.max_user_instances=8192
  345. fs.inotify.max_user_watches=524288
  346. fs.inotify.max_queued_events=16384
  347. #############################################################################################
  348. # 调整网络设置
  349. #############################################################################################
  350. # 为每个套接字的发送和接收缓冲区分配的默认内存量。
  351. net.core.wmem_default = 25165824
  352. net.core.rmem_default = 25165824
  353. # 为每个套接字的发送和接收缓冲区分配的最大内存量。
  354. net.core.wmem_max = 25165824
  355. net.core.rmem_max = 25165824
  356. # 除了套接字设置外,发送和接收缓冲区的大小
  357. # 必须使用net.ipv4.tcp_wmem和net.ipv4.tcp_rmem参数分别设置TCP套接字。
  358. # 使用三个以空格分隔的整数设置这些整数,分别指定最小,默认和最大大小。
  359. # 最大大小不能大于使用net.core.wmem_max和net.core.rmem_max为所有套接字指定的值。
  360. # 合理的设置是最小4KiB,默认64KiB和最大2MiB缓冲区。
  361. net.ipv4.tcp_wmem = 20480 12582912 25165824
  362. net.ipv4.tcp_rmem = 20480 12582912 25165824
  363. # 增加最大可分配的总缓冲区空间
  364. # 以页为单位(4096字节)进行度量
  365. net.ipv4.tcp_mem = 65536 25165824 262144
  366. net.ipv4.udp_mem = 65536 25165824 262144
  367. # 为每个套接字的发送和接收缓冲区分配的最小内存量。
  368. net.ipv4.udp_wmem_min = 16384
  369. net.ipv4.udp_rmem_min = 16384
  370. # 启用TCP窗口缩放,客户端可以更有效地传输数据,并允许在代理方缓冲该数据。
  371. net.ipv4.tcp_window_scaling = 1
  372. # 提高同时接受连接数。
  373. net.ipv4.tcp_max_syn_backlog = 10240
  374. # 将net.core.netdev_max_backlog的值增加到大于默认值1000
  375. # 可以帮助突发网络流量,特别是在使用数千兆位网络连接速度时,
  376. # 通过允许更多的数据包排队等待内核处理它们。
  377. net.core.netdev_max_backlog = 65536
  378. # 增加选项内存缓冲区的最大数量
  379. net.core.optmem_max = 25165824
  380. # 被动TCP连接的SYNACK次数。
  381. net.ipv4.tcp_synack_retries = 2
  382. # 允许的本地端口范围。
  383. net.ipv4.ip_local_port_range = 2048 65535
  384. # 防止TCP时间等待
  385. # Default: net.ipv4.tcp_rfc1337 = 0
  386. net.ipv4.tcp_rfc1337 = 1
  387. # 减少tcp_fin_timeout连接的时间默认值
  388. net.ipv4.tcp_fin_timeout = 15
  389. # 积压套接字的最大数量。
  390. # Default is 128.
  391. net.core.somaxconn = 32768
  392. # 打开syncookies以进行SYN洪水攻击保护。
  393. net.ipv4.tcp_syncookies = 1
  394. # 避免Smurf攻击
  395. # 发送伪装的ICMP数据包,目的地址设为某个网络的广播地址,源地址设为要攻击的目的主机,
  396. # 使所有收到此ICMP数据包的主机都将对目的主机发出一个回应,使被攻击主机在某一段时间内收到成千上万的数据包
  397. net.ipv4.icmp_echo_ignore_broadcasts = 1
  398. # 为icmp错误消息打开保护
  399. net.ipv4.icmp_ignore_bogus_error_responses = 1
  400. # 启用自动缩放窗口。
  401. # 如果延迟证明合理,这将允许TCP缓冲区超过其通常的最大值64K。
  402. net.ipv4.tcp_window_scaling = 1
  403. # 打开并记录欺骗,源路由和重定向数据包
  404. net.ipv4.conf.all.log_martians = 1
  405. net.ipv4.conf.default.log_martians = 1
  406. # 告诉内核有多少个未附加的TCP套接字维护用户文件句柄。 万一超过这个数字,
  407. # 孤立的连接会立即重置,并显示警告。
  408. # Default: net.ipv4.tcp_max_orphans = 65536
  409. net.ipv4.tcp_max_orphans = 65536
  410. # 不要在关闭连接时缓存指标
  411. net.ipv4.tcp_no_metrics_save = 1
  412. # 启用RFC1323中定义的时间戳记:
  413. # Default: net.ipv4.tcp_timestamps = 1
  414. net.ipv4.tcp_timestamps = 1
  415. # 启用选择确认。
  416. # Default: net.ipv4.tcp_sack = 1
  417. net.ipv4.tcp_sack = 1
  418. # 增加 tcp-time-wait 存储桶池大小,以防止简单的DOS攻击。
  419. # net.ipv4.tcp_tw_recycle 已从Linux 4.12中删除。请改用net.ipv4.tcp_tw_reuse。
  420. net.ipv4.tcp_max_tw_buckets = 14400
  421. net.ipv4.tcp_tw_reuse = 1
  422. # accept_source_route 选项使网络接口接受设置了严格源路由(SSR)或松散源路由(LSR)选项的数据包。
  423. # 以下设置将丢弃设置了SSR或LSR选项的数据包。
  424. net.ipv4.conf.all.accept_source_route = 0
  425. net.ipv4.conf.default.accept_source_route = 0
  426. # 打开反向路径过滤
  427. net.ipv4.conf.all.rp_filter = 1
  428. net.ipv4.conf.default.rp_filter = 1
  429. # 禁用ICMP重定向接受
  430. net.ipv4.conf.all.accept_redirects = 0
  431. net.ipv4.conf.default.accept_redirects = 0
  432. net.ipv4.conf.all.secure_redirects = 0
  433. net.ipv4.conf.default.secure_redirects = 0
  434. # 禁止发送所有IPv4 ICMP重定向数据包。
  435. net.ipv4.conf.all.send_redirects = 0
  436. net.ipv4.conf.default.send_redirects = 0
  437. # 开启IP转发.
  438. net.ipv4.ip_forward = 1
  439. # 禁止IPv6
  440. net.ipv6.conf.lo.disable_ipv6=1
  441. net.ipv6.conf.all.disable_ipv6 = 1
  442. net.ipv6.conf.default.disable_ipv6 = 1
  443. # 要求iptables不对bridge的数据进行处理
  444. net.bridge.bridge-nf-call-ip6tables = 1
  445. net.bridge.bridge-nf-call-iptables = 1
  446. net.bridge.bridge-nf-call-arptables = 1
  447. # arp缓存
  448. # 存在于 ARP 高速缓存中的最少层数,如果少于这个数,垃圾收集器将不会运行。缺省值是 128
  449. net.ipv4.neigh.default.gc_thresh1=2048
  450. # 保存在 ARP 高速缓存中的最多的记录软限制。垃圾收集器在开始收集前,允许记录数超过这个数字 5 秒。缺省值是 512
  451. net.ipv4.neigh.default.gc_thresh2=4096
  452. # 保存在 ARP 高速缓存中的最多记录的硬限制,一旦高速缓存中的数目高于此,垃圾收集器将马上运行。缺省值是 1024
  453. net.ipv4.neigh.default.gc_thresh3=8192
  454. # 持久连接
  455. net.ipv4.tcp_keepalive_time = 600
  456. net.ipv4.tcp_keepalive_intvl = 30
  457. net.ipv4.tcp_keepalive_probes = 10
  458. # conntrack表
  459. net.nf_conntrack_max=1048576
  460. net.netfilter.nf_conntrack_max=1048576
  461. net.netfilter.nf_conntrack_buckets=262144
  462. net.netfilter.nf_conntrack_tcp_timeout_fin_wait=30
  463. net.netfilter.nf_conntrack_tcp_timeout_time_wait=30
  464. net.netfilter.nf_conntrack_tcp_timeout_close_wait=15
  465. net.netfilter.nf_conntrack_tcp_timeout_established=300
  466. #############################################################################################
  467. # 调整内核参数
  468. #############################################################################################
  469. # 地址空间布局随机化(ASLR)是一种用于操作系统的内存保护过程,可防止缓冲区溢出攻击。
  470. # 这有助于确保与系统上正在运行的进程相关联的内存地址不可预测,
  471. # 因此,与这些流程相关的缺陷或漏洞将更加难以利用。
  472. # Accepted values: 0 = 关闭, 1 = 保守随机化, 2 = 完全随机化
  473. kernel.randomize_va_space = 2
  474. # 调高 PID 数量
  475. kernel.pid_max = 65536
  476. kernel.threads-max=30938
  477. # coredump
  478. kernel.core_pattern=core
  479. # 决定了检测到soft lockup时是否自动panic,缺省值是0
  480. kernel.softlockup_all_cpu_backtrace=1
  481. kernel.softlockup_panic=1
  482. EOF
  483. # history
  484. cat << EOF >> /etc/bashrc
  485. ## Kainstall managed start
  486. # history actions record,include action time, user, login ip
  487. HISTFILESIZE=5000
  488. HISTSIZE=5000
  489. USER_IP=\$(who -u am i 2>/dev/null | awk '{print \$NF}' | sed -e 's/[()]//g')
  490. if [ -z \$USER_IP ]
  491. then
  492. USER_IP=\$(hostname -i)
  493. fi
  494. HISTTIMEFORMAT="%Y-%m-%d %H:%M:%S \$USER_IP:\$(whoami) "
  495. export HISTFILESIZE HISTSIZE HISTTIMEFORMAT
  496. # PS1
  497. PS1='\[\033[0m\]\[\033[1;36m\][\u\[\033[0m\]@\[\033[1;32m\]\h\[\033[0m\] \[\033[1;31m\]\w\[\033[0m\]\[\033[1;36m\]]\[\033[33;1m\]\\$ \[\033[0m\]'
  498. ## Kainstall managed end
  499. EOF
  500. # journal
  501. mkdir -p /var/log/journal /etc/systemd/journald.conf.d
  502. cat << EOF > /etc/systemd/journald.conf.d/99-prophet.conf
  503. [Journal]
  504. # 持久化保存到磁盘
  505. Storage=persistent
  506. # 压缩历史日志
  507. Compress=yes
  508. SyncIntervalSec=5m
  509. RateLimitInterval=30s
  510. RateLimitBurst=1000
  511. # 最大占用空间 2G
  512. SystemMaxUse=2G
  513. # 单日志文件最大 100M
  514. SystemMaxFileSize=100M
  515. # 日志保存时间 3 周
  516. MaxRetentionSec=3week
  517. # 不将日志转发到 syslog
  518. ForwardToSyslog=no
  519. EOF
  520. # motd
  521. cat << EOF > /etc/profile.d/zz-ssh-login-info.sh
  522. #!/bin/sh
  523. #
  524. # @Time : 2020-02-04
  525. # @Author : lework
  526. # @Desc : ssh login banner
  527. export PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:\$PATH
  528. #shopt -q login_shell && : || return 0
  529. # os
  530. upSeconds="\$(cut -d. -f1 /proc/uptime)"
  531. secs=\$((\${upSeconds}%60))
  532. mins=\$((\${upSeconds}/60%60))
  533. hours=\$((\${upSeconds}/3600%24))
  534. days=\$((\${upSeconds}/86400))
  535. UPTIME_INFO=\$(printf "%d days, %02dh %02dm %02ds" "\$days" "\$hours" "\$mins" "\$secs")
  536. if [ -f /etc/redhat-release ] ; then
  537. PRETTY_NAME=\$(< /etc/redhat-release)
  538. elif [ -f /etc/debian_version ]; then
  539. DIST_VER=\$(</etc/debian_version)
  540. PRETTY_NAME="\$(grep PRETTY_NAME /etc/os-release | sed -e 's/PRETTY_NAME=//g' -e 's/"//g') (\$DIST_VER)"
  541. else
  542. PRETTY_NAME=\$(cat /etc/*-release | grep "PRETTY_NAME" | sed -e 's/PRETTY_NAME=//g' -e 's/"//g')
  543. fi
  544. if [[ -d "/system/app/" && -d "/system/priv-app" ]]; then
  545. model="\$(getprop ro.product.brand) \$(getprop ro.product.model)"
  546. elif [[ -f /sys/devices/virtual/dmi/id/product_name ||
  547. -f /sys/devices/virtual/dmi/id/product_version ]]; then
  548. model="\$(< /sys/devices/virtual/dmi/id/product_name)"
  549. model+=" \$(< /sys/devices/virtual/dmi/id/product_version)"
  550. elif [[ -f /sys/firmware/devicetree/base/model ]]; then
  551. model="\$(< /sys/firmware/devicetree/base/model)"
  552. elif [[ -f /tmp/sysinfo/model ]]; then
  553. model="\$(< /tmp/sysinfo/model)"
  554. fi
  555. MODEL_INFO=\${model}
  556. KERNEL=\$(uname -srmo)
  557. USER_NUM=\$(who -u | wc -l)
  558. RUNNING=\$(ps ax | wc -l | tr -d " ")
  559. # disk
  560. totaldisk=\$(df -h -x devtmpfs -x tmpfs -x debugfs -x aufs -x overlay --total 2>/dev/null | tail -1)
  561. disktotal=\$(awk '{print \$2}' <<< "\${totaldisk}")
  562. diskused=\$(awk '{print \$3}' <<< "\${totaldisk}")
  563. diskusedper=\$(awk '{print \$5}' <<< "\${totaldisk}")
  564. DISK_INFO="\033[0;33m\${diskused}\033[0m of \033[1;34m\${disktotal}\033[0m disk space used (\033[0;33m\${diskusedper}\033[0m)"
  565. # cpu
  566. cpu=\$(awk -F':' '/^model name/ {print \$2}' /proc/cpuinfo | uniq | sed -e 's/^[ \t]*//')
  567. cpun=\$(grep -c '^processor' /proc/cpuinfo)
  568. cpuc=\$(grep '^cpu cores' /proc/cpuinfo | tail -1 | awk '{print \$4}')
  569. cpup=\$(grep '^physical id' /proc/cpuinfo | wc -l)
  570. CPU_INFO="\${cpu} \${cpup}P \${cpuc}C \${cpun}L"
  571. # get the load averages
  572. read one five fifteen rest < /proc/loadavg
  573. LOADAVG_INFO="\033[0;33m\${one}\033[0m / \${five} / \${fifteen} with \033[1;34m\$(( cpun*cpuc ))\033[0m core(s) at \033[1;34m\$(grep '^cpu MHz' /proc/cpuinfo | tail -1 | awk '{print \$4}')\033 MHz"
  574. # mem
  575. MEM_INFO="\$(cat /proc/meminfo | awk '/MemTotal:/{total=\$2/1024/1024;next} /MemAvailable:/{use=total-\$2/1024/1024; printf("\033[0;33m%.2fGiB\033[0m of \033[1;34m%.2fGiB\033[0m RAM used (\033[0;33m%.2f%%\033[0m)",use,total,(use/total)*100);}')"
  576. # network
  577. # extranet_ip=" and \$(curl -s ip.cip.cc)"
  578. IP_INFO="\$(ip a|grep -E '^[0-9]+: em*|^[0-9]+: eno*|^[0-9]+: enp*|^[0-9]+: ens*|^[0-9]+: eth*|^[0-9]+: wlp*' -A2|grep inet|awk -F ' ' '{print $2}'|cut -f1 -d/|xargs echo)"
  579. # Container info
  580. CONTAINER_INFO="\$(sudo /usr/bin/crictl ps -a -o yaml 2> /dev/null | awk '/^ state: /{gsub("CONTAINER_", "", \$NF) ++S[\$NF]}END{for(m in S) printf "%s%s:%s ",substr(m,1,1),tolower(substr(m,2)),S[m]}')Images:\$(sudo /usr/bin/crictl images -q 2> /dev/null | wc -l)"
  581. # info
  582. echo -e "
  583. Information as of: \033[1;34m\$(date +"%Y-%m-%d %T")\033[0m
  584. \033[0;1;31mProduct\033[0m............: \${MODEL_INFO}
  585. \033[0;1;31mOS\033[0m.................: \${PRETTY_NAME}
  586. \033[0;1;31mKernel\033[0m.............: \${KERNEL}
  587. \033[0;1;31mCPU\033[0m................: \${CPU_INFO}
  588. \033[0;1;31mHostname\033[0m...........: \033[1;34m\$(hostname)\033[0m
  589. \033[0;1;31mIP Addresses\033[0m.......: \033[1;34m\${IP_INFO}\033[0m
  590. \033[0;1;31mUptime\033[0m.............: \033[0;33m\${UPTIME_INFO}\033[0m
  591. \033[0;1;31mMemory\033[0m.............: \${MEM_INFO}
  592. \033[0;1;31mLoad Averages\033[0m......: \${LOADAVG_INFO}
  593. \033[0;1;31mDisk Usage\033[0m.........: \${DISK_INFO}
  594. \033[0;1;31mUsers online\033[0m.......: \033[1;34m\${USER_NUM}\033[0m
  595. \033[0;1;31mRunning Processes\033[0m..: \033[1;34m\${RUNNING}\033[0m
  596. \033[0;1;31mContainer Info\033[0m.....: \${CONTAINER_INFO}
  597. "
  598. EOF
  599. chmod +x /etc/profile.d/zz-ssh-login-info.sh
  600. echo 'ALL ALL=(ALL) NOPASSWD:/usr/bin/crictl' > /etc/sudoers.d/crictl
  601. # time sync
  602. ntpd --help >/dev/null 2>&1 && yum remove -y ntp
  603. [[ "${OFFLINE_TAG:-}" != "1" ]] && yum install -y chrony
  604. [ ! -f /etc/chrony.conf_bak ] && cp /etc/chrony.conf{,_bak} #备份默认配置
  605. cat << EOF > /etc/chrony.conf
  606. server ntp.aliyun.com iburst
  607. server cn.ntp.org.cn iburst
  608. server ntp.shu.edu.cn iburst
  609. server 0.cn.pool.ntp.org iburst
  610. server 1.cn.pool.ntp.org iburst
  611. server 2.cn.pool.ntp.org iburst
  612. server 3.cn.pool.ntp.org iburst
  613. driftfile /var/lib/chrony/drift
  614. makestep 1.0 3
  615. logdir /var/log/chrony
  616. EOF
  617. timedatectl set-timezone Asia/Shanghai
  618. chronyd -q -t 1 'server cn.pool.ntp.org iburst maxsamples 1'
  619. systemctl enable chronyd
  620. systemctl start chronyd
  621. chronyc sources -v
  622. chronyc sourcestats
  623. hwclock --systohc
  624. # package
  625. [[ "${OFFLINE_TAG:-}" != "1" ]] && yum install -y curl wget
  626. # ipvs
  627. [[ "${OFFLINE_TAG:-}" != "1" ]] && yum install -y ipvsadm ipset sysstat conntrack libseccomp
  628. module=(
  629. ip_vs
  630. ip_vs_rr
  631. ip_vs_wrr
  632. ip_vs_sh
  633. overlay
  634. nf_conntrack
  635. br_netfilter
  636. )
  637. [ -f /etc/modules-load.d/ipvs.conf ] && cp -f /etc/modules-load.d/ipvs.conf{,_bak}
  638. for kernel_module in "${module[@]}";do
  639. /sbin/modinfo -F filename "$kernel_module" |& grep -qv ERROR && echo "$kernel_module" >> /etc/modules-load.d/ipvs.conf
  640. done
  641. systemctl restart systemd-modules-load
  642. systemctl enable systemd-modules-load
  643. sysctl --system
  644. # audit
  645. [[ "${OFFLINE_TAG:-}" != "1" ]] && yum install -y audit audit-libs
  646. # /etc/audit/rules.d/audit.rules
  647. cat << EOF >> /etc/audit/rules.d/audit.rules
  648. ## Kainstall managed start
  649. # Ignore errors
  650. -i
  651. # SYSCALL
  652. -a always,exit -F arch=b64 -S kill,tkill,tgkill -F a1=9 -F key=trace_kill_9
  653. -a always,exit -F arch=b64 -S kill,tkill,tgkill -F a1=15 -F key=trace_kill_15
  654. # docker
  655. -w /usr/bin/dockerd -k docker
  656. -w /var/lib/docker -k docker
  657. -w /etc/docker -k docker
  658. -w /usr/lib/systemd/system/docker.service -k docker
  659. -w /etc/systemd/system/docker.service -k docker
  660. -w /usr/lib/systemd/system/docker.socket -k docker
  661. -w /etc/default/docker -k docker
  662. -w /etc/sysconfig/docker -k docker
  663. -w /etc/docker/daemon.json -k docker
  664. # containerd
  665. -w /usr/bin/containerd -k containerd
  666. -w /var/lib/containerd -k containerd
  667. -w /usr/lib/systemd/system/containerd.service -k containerd
  668. -w /etc/containerd/config.toml -k containerd
  669. # cri-o
  670. -w /usr/bin/crio -k cri-o
  671. -w /etc/crio -k cri-o
  672. # runc
  673. -w /usr/bin/runc -k runc
  674. # kube
  675. -w /usr/bin/kubeadm -k kubeadm
  676. -w /usr/bin/kubelet -k kubelet
  677. -w /usr/bin/kubectl -k kubectl
  678. -w /var/lib/kubelet -k kubelet
  679. -w /etc/kubernetes -k kubernetes
  680. ## Kainstall managed end
  681. EOF
  682. chmod 600 /etc/audit/rules.d/audit.rules
  683. sed -i 's#max_log_file =.*#max_log_file = 80#g' /etc/audit/auditd.conf
  684. if [ -f /usr/libexec/initscripts/legacy-actions/auditd/restart ]; then
  685. /usr/libexec/initscripts/legacy-actions/auditd/restart
  686. else
  687. systemctl stop auditd && systemctl start auditd
  688. fi
  689. systemctl enable auditd
  690. grep single-request-reopen /etc/resolv.conf || sed -i '1ioptions timeout:2 attempts:3 rotate single-request-reopen' /etc/resolv.conf
  691. ipvsadm --clear
  692. iptables -F && iptables -t nat -F && iptables -t mangle -F && iptables -X
  693. }
  694. # 升级内核
  695. function script::upgrade_kernel() {
  696. local ver; ver=$(rpm --eval "%{centos_ver}")
  697. [[ "${OFFLINE_TAG:-}" != "1" ]] && yum install -y "https://www.elrepo.org/elrepo-release-${ver}.el${ver}.elrepo.noarch.rpm"
  698. sed -e "s/^mirrorlist=/#mirrorlist=/g" \
  699. -e "s/elrepo.org\/linux/mirrors.tuna.tsinghua.edu.cn\/elrepo/g" \
  700. -i /etc/yum.repos.d/elrepo.repo
  701. [[ "${OFFLINE_TAG:-}" != "1" ]] && yum install -y --disablerepo="*" --enablerepo=elrepo-kernel kernel-lt{,-devel}
  702. grub2-set-default 0 && grub2-mkconfig -o /etc/grub2.cfg
  703. grubby --default-kernel
  704. grubby --args="user_namespace.enable=1" --update-kernel="$(grubby --default-kernel)"
  705. }
  706. # 节点软件升级
  707. function script::upgrage_kube() {
  708. local role=${1:-init}
  709. local version="-${2:-latest}"
  710. version="${version#-latest}"
  711. set -e
  712. echo '[install] kubeadm'
  713. kubeadm version
  714. yum install -y "kubeadm${version}" --disableexcludes=kubernetes
  715. kubeadm version
  716. echo '[upgrade]'
  717. if [[ "$role" == "init" ]]; then
  718. local plan_info; plan_info=$(kubeadm upgrade plan)
  719. local v; v=$(printf "%s" "$plan_info" | grep 'kubeadm upgrade apply ' | awk '{print $4}'| tail -1 )
  720. printf "%s\n" "${plan_info}"
  721. kubeadm upgrade apply "${v}" -y
  722. else
  723. kubeadm upgrade node
  724. fi
  725. echo '[install] kubelet kubectl'
  726. kubectl version --client=true
  727. yum install -y "kubelet${version}" "kubectl${version}" --disableexcludes=kubernetes
  728. kubectl version --client=true
  729. [ -f /usr/lib/systemd/system/kubelet.service.d/10-kubeadm.conf ] && \
  730. sed -i 's#^\[Service\]#[Service]\nCPUAccounting=true\nMemoryAccounting=true#g' /usr/lib/systemd/system/kubelet.service.d/10-kubeadm.conf
  731. systemctl daemon-reload
  732. systemctl restart kubelet
  733. }
  734. # 安装 docker
  735. function script::install_docker() {
  736. local version="-${1:-latest}"
  737. version="${version#-latest}"
  738. cat << EOF > /etc/yum.repos.d/docker-ce.repo
  739. [docker-ce-stable]
  740. name=Docker CE Stable - \$basearch
  741. baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/$(rpm --eval '%{centos_ver}')/\$basearch/stable
  742. enabled=1
  743. gpgcheck=1
  744. gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg
  745. EOF
  746. if [[ "${OFFLINE_TAG:-}" != "1" ]];then
  747. [ -f "$(which docker)" ] && yum remove -y docker-ce docker-ce-cli containerd.io
  748. yum install -y "docker-ce${version}" "docker-ce-cli${version}" containerd.io bash-completion
  749. fi
  750. [ -f /usr/share/bash-completion/completions/docker ] && \
  751. cp -f /usr/share/bash-completion/completions/docker /etc/bash_completion.d/
  752. [ ! -d /etc/docker ] && mkdir /etc/docker
  753. # /etc/docker/daemon.json
  754. cat << EOF > /etc/docker/daemon.json
  755. {
  756. "data-root": "/var/lib/docker",
  757. "log-driver": "json-file",
  758. "log-opts": {
  759. "max-size": "100m",
  760. "max-file": "3"
  761. },
  762. "default-ulimits": {
  763. "nofile": {
  764. "Name": "nofile",
  765. "Hard": 655360,
  766. "Soft": 655360
  767. },
  768. "nproc": {
  769. "Name": "nproc",
  770. "Hard": 655360,
  771. "Soft": 655360
  772. }
  773. },
  774. "live-restore": true,
  775. "oom-score-adjust": -1000,
  776. "max-concurrent-downloads": 10,
  777. "max-concurrent-uploads": 10,
  778. "storage-driver": "overlay2",
  779. "storage-opts": ["overlay2.override_kernel_check=true"],
  780. "exec-opts": ["native.cgroupdriver=systemd"],
  781. "registry-mirrors": [
  782. ]
  783. }
  784. EOF
  785. sed -i 's|#oom_score = 0|oom_score = -999|' /etc/containerd/config.toml
  786. # /etc/crictl.yaml
  787. cat << EOF > /etc/crictl.yaml
  788. runtime-endpoint: unix:///var/run/dockershim.sock
  789. image-endpoint: unix:///var/run/dockershim.sock
  790. timeout: 2
  791. debug: false
  792. pull-image-on-create: true
  793. disable-pull-on-run: false
  794. EOF
  795. systemctl enable containerd
  796. systemctl restart containerd
  797. systemctl enable docker
  798. systemctl restart docker
  799. }
  800. # 安装 containerd
  801. function script::install_containerd() {
  802. local version="-${1:-latest}"
  803. version="${version#-latest}"
  804. # /etc/yum.repos.d/docker-ce.repo
  805. cat << EOF > /etc/yum.repos.d/docker-ce.repo
  806. [docker-ce-stable]
  807. name=Docker CE Stable - \$basearch
  808. baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/$(rpm --eval '%{centos_ver}')/\$basearch/stable
  809. enabled=1
  810. gpgcheck=1
  811. gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg
  812. EOF
  813. if [[ "${OFFLINE_TAG:-}" != "1" ]];then
  814. [ -f "$(which runc)" ] && yum remove -y runc
  815. [ -f "$(which containerd)" ] && yum remove -y containerd.io
  816. yum install -y containerd.io"${version}" containernetworking bash-completion
  817. fi
  818. [ -d /etc/bash_completion.d ] && crictl completion bash > /etc/bash_completion.d/crictl
  819. containerd config default > /etc/containerd/config.toml
  820. sed -i -e "s#k8s.gcr.io#registry.cn-hangzhou.aliyuncs.com/kainstall#g" \
  821. -e "s#SystemdCgroup = false#SystemdCgroup = true#g" \
  822. -e "s#oom_score = 0#oom_score = -999#" \
  823. -e "s#max_concurrent_downloads = 3#max_concurrent_downloads = 10#g" /etc/containerd/config.toml
  824. # /etc/crictl.yaml
  825. cat << EOF > /etc/crictl.yaml
  826. runtime-endpoint: unix:///run/containerd/containerd.sock
  827. image-endpoint: unix:///run/containerd/containerd.sock
  828. timeout: 2
  829. debug: false
  830. pull-image-on-create: true
  831. disable-pull-on-run: false
  832. EOF
  833. systemctl restart containerd
  834. systemctl enable containerd
  835. }
  836. # 安装 cri-o
  837. function script::install_cri-o() {
  838. local version="${1:-latest}"
  839. version="${version#-latest}"
  840. os="CentOS_$(rpm --eval '%{centos_ver}')" && echo "${os}"
  841. # /etc/yum.repos.d/devel_kubic_libcontainers_stable.repo
  842. cat << EOF > /etc/yum.repos.d/devel_kubic_libcontainers_stable.repo
  843. [devel_kubic_libcontainers_stable]
  844. name=Stable Releases of Upstream github.com/containers packages
  845. type=rpm-md
  846. baseurl=https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/${os}/
  847. gpgcheck=1
  848. gpgkey=https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/${os}/repodata/repomd.xml.key
  849. enabled=1
  850. [devel_kubic_libcontainers_stable_cri-o]
  851. name=devel:kubic:libcontainers:stable:cri-o
  852. type=rpm-md
  853. baseurl=https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable:/cri-o:/${version}/${os}/
  854. gpgcheck=1
  855. gpgkey=https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable:/cri-o:/${version}/${os}/repodata/repomd.xml.key
  856. enabled=1
  857. EOF
  858. if [[ "${OFFLINE_TAG:-}" != "1" ]];then
  859. [ -f "$(which runc)" ] && yum remove -y runc
  860. [ -f "$(which crio)" ] && yum remove -y cri-o
  861. [ -f "$(which docker)" ] && yum remove -y docker-ce docker-ce-cli containerd.io
  862. yum install -y runc cri-o bash-completion --disablerepo=docker-ce-stable || yum install -y runc cri-o bash-completion
  863. fi
  864. [ -d /etc/bash_completion.d ] && \
  865. { crictl completion bash > /etc/bash_completion.d/crictl; \
  866. crio completion bash > /etc/bash_completion.d/crio; \
  867. crio-status completion bash > /etc/bash_completion.d/crio-status; }
  868. [ ! -f /etc/crio/crio.conf ] && crio config --default > /etc/crio/crio.conf
  869. sed -i -e "s#k8s.gcr.io#registry.cn-hangzhou.aliyuncs.com/kainstall#g" \
  870. -e 's|#registries = \[|registries = ["docker.io", "quay.io"]|g' /etc/crio/crio.conf
  871. # /etc/crio/crio.conf
  872. cat << EOF >> /etc/crio/crio.conf
  873. [crio.image]
  874. pause_image = "registry.cn-hangzhou.aliyuncs.com/kainstall/pause:3.6"
  875. EOF
  876. # /etc/containers/registries.conf.d/000-dockerio.conf
  877. [ -d /etc/containers/registries.conf.d ] && cat << EOF > /etc/containers/registries.conf.d/000-dockerio.conf
  878. [[registry]]
  879. prefix = "docker.io"
  880. insecure = false
  881. blocked = false
  882. location = "docker.io"
  883. EOF
  884. # /etc/crictl.yaml
  885. cat << EOF > /etc/crictl.yaml
  886. runtime-endpoint: unix:///var/run/crio/crio.sock
  887. image-endpoint: unix:///var/run/crio/crio.sock
  888. timeout: 2
  889. debug: false
  890. pull-image-on-create: true
  891. disable-pull-on-run: false
  892. EOF
  893. # /etc/cni/net.d/100-crio-bridge.conf
  894. sed -i "s#10.85.0.0/16#${KUBE_POD_SUBNET:-10.85.0.0/16}#g" /etc/cni/net.d/100-crio-bridge.conf
  895. # /etc/cni/net.d/10-crio.conf
  896. cat << EOF > /etc/cni/net.d/10-crio.conf
  897. {
  898. $(grep cniVersion /etc/cni/net.d/100-crio-bridge.conf)
  899. "name": "crio",
  900. "type": "flannel"
  901. }
  902. EOF
  903. mv /etc/cni/net.d/100-crio-bridge.conf /etc/cni/net.d/10-crio.conf /etc/cni/net.d/200-loopback.conf /tmp/
  904. systemctl restart crio
  905. systemctl enable crio
  906. }
  907. # 安装kube组件
  908. function script::install_kube() {
  909. local version="-${1:-latest}"
  910. version="${version#-latest}"
  911. # /etc/yum.repos.d/kubernetes.repo
  912. cat <<EOF > /etc/yum.repos.d/kubernetes.repo
  913. [kubernetes]
  914. name=Kubernetes
  915. baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
  916. enabled=1
  917. gpgcheck=0
  918. repo_gpgcheck=0
  919. gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
  920. EOF
  921. if [[ "${OFFLINE_TAG:-}" != "1" ]];then
  922. [ -f /usr/bin/kubeadm ] && yum remove -y kubeadm
  923. [ -f /usr/bin/kubelet ] && yum remove -y kubelet
  924. [ -f /usr/bin/kubectl ] && yum remove -y kubectl
  925. yum install -y "kubeadm${version}" "kubelet${version}" "kubectl${version}" --disableexcludes=kubernetes
  926. fi
  927. [ -d /etc/bash_completion.d ] && \
  928. { kubectl completion bash > /etc/bash_completion.d/kubectl; \
  929. kubeadm completion bash > /etc/bash_completion.d/kubadm; }
  930. [ ! -d /usr/lib/systemd/system/kubelet.service.d ] && mkdir -p /usr/lib/systemd/system/kubelet.service.d
  931. cat << EOF > /usr/lib/systemd/system/kubelet.service.d/11-cgroup.conf
  932. [Service]
  933. CPUAccounting=true
  934. MemoryAccounting=true
  935. BlockIOAccounting=true
  936. ExecStartPre=/bin/bash -c '/bin/mkdir -p /sys/fs/cgroup/{cpuset,memory,hugetlb,systemd,pids,"cpu,cpuacct"}/{system,kube,kubepods}.slice||:'
  937. Slice=kube.slice
  938. EOF
  939. systemctl daemon-reload
  940. systemctl enable kubelet
  941. systemctl restart kubelet
  942. }
  943. # 安装haproxy
  944. function script::install_haproxy() {
  945. local api_servers="$*"
  946. if [[ "${OFFLINE_TAG:-}" != "1" ]];then
  947. [ -f /usr/bin/haproxy ] && yum remove -y haproxy
  948. yum install -y haproxy
  949. fi
  950. # /etc/haproxy/haproxy.cfg
  951. [ ! -f /etc/haproxy/haproxy.cfg_bak ] && cp /etc/haproxy/haproxy.cfg{,_bak}
  952. cat << EOF > /etc/haproxy/haproxy.cfg
  953. global
  954. log /dev/log local0
  955. log /dev/log local1 notice
  956. tune.ssl.default-dh-param 2048
  957. defaults
  958. log global
  959. mode http
  960. option dontlognull
  961. timeout connect 5000ms
  962. timeout client 600000ms
  963. timeout server 600000ms
  964. listen stats
  965. bind :19090
  966. mode http
  967. balance
  968. stats uri /haproxy_stats
  969. stats auth admin:admin123
  970. stats admin if TRUE
  971. frontend kube-apiserver-https
  972. mode tcp
  973. option tcplog
  974. bind :6443
  975. default_backend kube-apiserver-backend
  976. backend kube-apiserver-backend
  977. mode tcp
  978. balance roundrobin
  979. stick-table type ip size 200k expire 30m
  980. stick on src
  981. $(index=1;for h in $api_servers;do echo " server apiserver${index} $h:6443 check";index=$((index+1));done)
  982. EOF
  983. systemctl enable haproxy
  984. systemctl restart haproxy
  985. }
  986. # 安装helm
  987. function script::install_helm() {
  988. local version="${1:-3.10.1}"
  989. version="${version#-3.10.1}"
  990. local path="/tmp"
  991. cd $path
  992. # 下载软件(国内源)
  993. wget https://mirrors.huaweicloud.com/helm/v$version/helm-v$version-linux-amd64.tar.gz
  994. # 解压
  995. tar -zxvf helm-v$version-linux-amd64.tar.gz
  996. # 安装
  997. sudo mv linux-amd64/helm /usr/local/bin/
  998. # 清理
  999. rm -rf helm-v$version-linux-amd64.tar.gz linux-amd64
  1000. # 验证
  1001. helm version
  1002. cd ~
  1003. }
  1004. # 检查用到的命令
  1005. function check::command() {
  1006. check::command_exists ssh openssh-clients
  1007. check::command_exists sshpass sshpass
  1008. check::command_exists wget wget
  1009. [[ "${OFFLINE_TAG:-}" == "1" ]] && check::command_exists tar tar
  1010. }
  1011. # 检查ssh连通性
  1012. function check::ssh_conn() {
  1013. for host in $MASTER_NODES $WORKER_NODES
  1014. do
  1015. [ "$host" == "127.0.0.1" ] && continue
  1016. command::exec "${host}" "echo 0"
  1017. check::exit_code "$?" "check" "ssh $host connection" "exit"
  1018. done
  1019. }
  1020. # 检查os系统支持
  1021. function check::os() {
  1022. log::info "[check]" "os support: ${OS_SUPPORT}"
  1023. for host in $MASTER_NODES $WORKER_NODES
  1024. do
  1025. command::exec "${host}" "
  1026. [ -f /etc/os-release ] && source /etc/os-release
  1027. echo client_os:\${ID:-}\${VERSION_ID:-}
  1028. if [[ \"${OS_SUPPORT}\" == *\"\${ID:-}\${VERSION_ID:-}\"* ]]; then
  1029. exit 0
  1030. fi
  1031. exit 1
  1032. "
  1033. check::exit_code "$?" "check" "$host os support" "exit"
  1034. done
  1035. }
  1036. # 检查os kernel 版本
  1037. function check::kernel() {
  1038. local version=${1:-}
  1039. log::info "[check]" "kernel version not less than ${version}"
  1040. version=$(echo "${version}" | awk -F. '{ printf("%d%03d%03d\n", $1,$2,$3); }')
  1041. for host in $MASTER_NODES $WORKER_NODES
  1042. do
  1043. command::exec "${host}" "
  1044. kernel_version=\$(uname -r)
  1045. kernel_version=\$(echo \${kernel_version/-*} | awk -F. '{ printf(\"%d%03d%03d\n\", \$1,\$2,\$3); }')
  1046. echo kernel_version \${kernel_version}
  1047. [[ \${kernel_version} -ge ${version} ]] && exit 0 || exit 1
  1048. "
  1049. check::exit_code "$?" "check" "$host kernel version" "exit"
  1050. done
  1051. }
  1052. # 检查api-server连通性
  1053. function check::apiserver_conn() {
  1054. command::exec "${MGMT_NODE}" "kubectl get node"
  1055. check::exit_code "$?" "check" "conn apiserver" "exit"
  1056. }
  1057. # 检查返回码
  1058. function check::exit_code() {
  1059. local code=${1:-}
  1060. local app=${2:-}
  1061. local desc=${3:-}
  1062. local exit_script=${4:-}
  1063. if [[ "${code}" == "0" ]]; then
  1064. log::info "[${app}]" "${desc} succeeded."
  1065. else
  1066. log::error "[${app}]" "${desc} failed."
  1067. [[ "$exit_script" == "exit" ]] && exit "$code"
  1068. fi
  1069. }
  1070. # 预检
  1071. function check::preflight() {
  1072. # check command
  1073. check::command
  1074. # check ssh conn
  1075. check::ssh_conn
  1076. # check os
  1077. check::os
  1078. # check os kernel
  1079. [[ "${KUBE_NETWORK:-}" == "cilium" ]] && check::kernel 4.9.17
  1080. # check api-server conn
  1081. if [[ $(( ${ADD_TAG:-0} + ${DEL_TAG:-0} + ${UPGRADE_TAG:-0} + ${RENEW_CERT_TAG:-0} )) -gt 0 ]]; then
  1082. check::apiserver_conn
  1083. fi
  1084. }
  1085. # 安装包
  1086. function install::package() {
  1087. # 检查k8s最新稳定版本
  1088. if [[ "${KUBE_CRI}" == "cri-o" && "${KUBE_CRI_VERSION}" == "latest" ]]; then
  1089. KUBE_CRI_VERSION="${KUBE_VERSION}"
  1090. if [[ "${KUBE_CRI_VERSION}" == "latest" ]]; then
  1091. if command::exec "127.0.0.1" "wget https://storage.googleapis.com/kubernetes-release/release/stable.txt -q -O -"; then
  1092. KUBE_CRI_VERSION="${COMMAND_OUTPUT#v}"
  1093. else
  1094. log::error "[install]" "get kubernetes stable version error. Please specify the version!"
  1095. exit 1
  1096. fi
  1097. fi
  1098. KUBE_CRI_VERSION="${KUBE_CRI_VERSION%.*}"
  1099. fi
  1100. # 安装 cri kube
  1101. for host in $MASTER_NODES $WORKER_NODES
  1102. do
  1103. # install cri
  1104. log::info "[install]" "install ${KUBE_CRI} on $host."
  1105. command::exec "${host}" "
  1106. export OFFLINE_TAG=${OFFLINE_TAG:-0}
  1107. $(declare -f script::install_"${KUBE_CRI}")
  1108. script::install_${KUBE_CRI} $KUBE_CRI_VERSION
  1109. "
  1110. check::exit_code "$?" "install" "install ${KUBE_CRI} on $host"
  1111. # install kube
  1112. log::info "[install]" "install kube on $host"
  1113. command::exec "${host}" "
  1114. export OFFLINE_TAG=${OFFLINE_TAG:-0}
  1115. $(declare -f script::install_kube)
  1116. script::install_kube $KUBE_VERSION
  1117. "
  1118. check::exit_code "$?" "install" "install kube on $host"
  1119. done
  1120. # 配置 kube
  1121. local apiservers=$MASTER_NODES
  1122. if [[ "$apiservers" == "127.0.0.1" ]]; then
  1123. command::exec "${MGMT_NODE}" "ip -o route get to 8.8.8.8 | sed -n 's/.*src \([0-9.]\+\).*/\1/p'"
  1124. get::command_output "apiservers" "$?"
  1125. fi
  1126. # 输出 api-servers 信息
  1127. if [[ "${ADD_TAG:-}" == "1" ]]; then
  1128. command::exec "${MGMT_NODE}" "
  1129. kubectl get node --selector='node-role.kubernetes.io/master' -o jsonpath='{$.items[*].status.addresses[?(@.type==\"InternalIP\")].address}'
  1130. "
  1131. get::command_output "apiservers" "$?"
  1132. fi
  1133. # 安装 haproxy
  1134. for host in $WORKER_NODES
  1135. do
  1136. # install haproxy
  1137. log::info "[install]" "install haproxy on $host"
  1138. command::exec "${host}" "
  1139. export OFFLINE_TAG=${OFFLINE_TAG:-0}
  1140. $(declare -f script::install_haproxy)
  1141. script::install_haproxy \"$apiservers\"
  1142. "
  1143. check::exit_code "$?" "install" "install haproxy on $host"
  1144. done
  1145. # 10年证书
  1146. if [[ "${CERT_YEAR_TAG:-}" == "1" ]]; then
  1147. local version="${KUBE_VERSION}"
  1148. if [[ "${version}" == "latest" ]]; then
  1149. if command::exec "127.0.0.1" "wget https://storage.googleapis.com/kubernetes-release/release/stable.txt -q -O -"; then
  1150. version="${COMMAND_OUTPUT#v}"
  1151. else
  1152. log::error "[install]" "get kubernetes stable version error. Please specify the version!"
  1153. exit 1
  1154. fi
  1155. fi
  1156. log::info "[install]" "download kubeadm 10 years certs client"
  1157. local certs_file="${OFFLINE_DIR}/bins/kubeadm-linux-amd64"
  1158. MGMT_NODE="127.0.0.1" utils::download_file "${GITHUB_PROXY}https://github.com/lework/kubeadm-certs/releases/download/v${version}/kubeadm-linux-amd64" "${certs_file}"
  1159. for host in $MASTER_NODES $WORKER_NODES
  1160. do
  1161. log::info "[install]" "scp kubeadm client to $host"
  1162. command::scp "${host}" "${certs_file}" "/tmp/kubeadm-linux-amd64"
  1163. check::exit_code "$?" "install" "scp kubeadm client to $host" "exit"
  1164. command::exec "${host}" "
  1165. set -e
  1166. if [[ -f /tmp/kubeadm-linux-amd64 ]]; then
  1167. [[ -f /usr/bin/kubeadm && ! -f /usr/bin/kubeadm_src ]] && mv -fv /usr/bin/kubeadm{,_src}
  1168. mv -fv /tmp/kubeadm-linux-amd64 /usr/bin/kubeadm
  1169. chmod +x /usr/bin/kubeadm
  1170. else
  1171. echo \"not found /tmp/kubeadm-linux-amd64\"
  1172. exit 1
  1173. fi
  1174. "
  1175. check::exit_code "$?" "install" "$host: use kubeadm 10 years certs client"
  1176. done
  1177. fi
  1178. # 安装helm
  1179. for host in $MASTER_NODES $WORKER_NODES
  1180. do
  1181. log::info "[install]" "install helm on $host"
  1182. command::exec "${host}" "
  1183. export OFFLINE_TAG=${OFFLINE_TAG:-0}
  1184. $(declare -f script::install_helm)
  1185. script::install_helm $HELM_VERSION
  1186. "
  1187. check::exit_code "$?" "install" "install helm on $host"
  1188. done
  1189. }
  1190. # 升级节点内核
  1191. function init::upgrade_kernel() {
  1192. [[ "${UPGRADE_KERNEL_TAG:-}" != "1" ]] && return
  1193. for host in $MASTER_NODES $WORKER_NODES
  1194. do
  1195. log::info "[init]" "upgrade kernel: $host"
  1196. command::exec "${host}" "
  1197. export OFFLINE_TAG=${OFFLINE_TAG:-0}
  1198. $(declare -f script::upgrade_kernel)
  1199. script::upgrade_kernel
  1200. "
  1201. check::exit_code "$?" "init" "upgrade kernel $host" "exit"
  1202. done
  1203. for host in $MASTER_NODES $WORKER_NODES
  1204. do
  1205. command::exec "${host}" "bash -c 'sleep 15 && reboot' &>/dev/null &"
  1206. check::exit_code "$?" "init" "$host: Wait for 15s to restart"
  1207. done
  1208. log::info "[notice]" "Please execute the command again!"
  1209. log::access "[command]" "bash $0 ${SCRIPT_PARAMETER// --upgrade-kernel/}"
  1210. exit 0
  1211. }
  1212. # 节点证书续期
  1213. function cert::renew_node() {
  1214. local role="${1:-master}"
  1215. local hosts=""
  1216. local kubelet_config=""
  1217. command::exec "${MGMT_NODE}" "
  1218. kubectl get node --selector='node-role.kubernetes.io/${role}' -o jsonpath='{range.items[*]}{.metadata.name } {end}'
  1219. "
  1220. get::command_output "hosts" "$?"
  1221. for host in ${hosts}
  1222. do
  1223. log::info "[cert]" "drain $host"
  1224. command::exec "${MGMT_NODE}" "kubectl drain $host --force --ignore-daemonsets --delete-local-data"
  1225. check::exit_code "$?" "cert" "$host: drain"
  1226. sleep 5
  1227. if [[ "${role}" == "master" ]]; then
  1228. command::exec "${host}" "cp -rf /etc/kubernetes /etc/kubernetes_\$(date +%Y-%m-%d)"
  1229. check::exit_code "$?" "cert" "$host: backup kubernetes config"
  1230. command::exec "${host}" "kubeadm certs renew all 2>/dev/null|| kubeadm alpha certs renew all"
  1231. check::exit_code "$?" "cert" "$host: renew certs"
  1232. command::exec "${host}" "
  1233. $(declare -f utils::retry)
  1234. kill -s SIGHUP \$(pidof etcd) && \
  1235. utils::retry 10 \"echo -n | openssl s_client -connect localhost:2379 2>&1 | sed -ne '/-BEGIN CERTIFICATE-/,/-END CERTIFICATE-/p' | openssl x509 -text -noout | grep Not\"
  1236. "
  1237. check::exit_code "$?" "cert" "$host: restart etcd"
  1238. command::exec "${host}" "
  1239. $(declare -f utils::retry)
  1240. kill -s SIGHUP \$(pidof kube-apiserver) && \
  1241. utils::retry 10 \"echo -n | openssl s_client -connect localhost:6443 2>&1 | sed -ne '/-BEGIN CERTIFICATE-/,/-END CERTIFICATE-/p' | openssl x509 -text -noout | grep Not\"
  1242. "
  1243. check::exit_code "$?" "cert" "$host: restart kube-apiserver"
  1244. command::exec "${host}" "
  1245. $(declare -f utils::retry)
  1246. kill -s SIGHUP \$(pidof kube-controller-manager) && \
  1247. utils::retry 10 \"echo -n | openssl s_client -connect localhost:10257 2>&1 | sed -ne '/-BEGIN CERTIFICATE-/,/-END CERTIFICATE-/p' | openssl x509 -text -noout | grep Not\"
  1248. "
  1249. check::exit_code "$?" "cert" "$host: restart kube-controller-manager"
  1250. command::exec "${host}" "
  1251. $(declare -f utils::retry)
  1252. kill -s SIGHUP \$(pidof kube-scheduler) && \
  1253. utils::retry 10 \"echo -n | openssl s_client -connect localhost:10259 2>&1 | sed -ne '/-BEGIN CERTIFICATE-/,/-END CERTIFICATE-/p' | openssl x509 -text -noout | grep Not\"
  1254. "
  1255. check::exit_code "$?" "cert" "$host: restart kube-scheduler"
  1256. fi
  1257. log::info "[cert]" "get kubelet config"
  1258. command::exec "${MGMT_NODE}" "
  1259. kubeadm kubeconfig user --org system:nodes --client-name system:node:${host} --config /etc/kubernetes/kubeadmcfg.yaml || kubeadm alpha kubeconfig user --org system:nodes --client-name system:node:${host} --config /etc/kubernetes/kubeadmcfg.yaml
  1260. "
  1261. get::command_output "kubelet_config" "$?" "exit"
  1262. if [[ "$kubelet_config" != "" ]]; then
  1263. log::info "[cert]" "copy kubelet config"
  1264. command::exec "${host}" "
  1265. cp /etc/kubernetes/kubelet.conf /etc/kubernetes/kubelet.conf_bak
  1266. echo '$(printf "%s" "${kubelet_config}" | sed 's#https://.*:#https://127.0.0.1:#g')' > /etc/kubernetes/kubelet.conf
  1267. "
  1268. check::exit_code "$?" "cert" "$host: copy kubelet config"
  1269. command::exec "${host}" "rm -rfv /var/lib/kubelet/pki/*"
  1270. check::exit_code "$?" "cert" "$host: delete kubelet pki files"
  1271. command::exec "${host}" "
  1272. $(declare -f utils::retry)
  1273. systemctl restart kubelet && \
  1274. utils::retry 10 \"echo -n | openssl s_client -connect localhost:10250 2>&1 | sed -ne '/-BEGIN CERTIFICATE-/,/-END CERTIFICATE-/p' | openssl x509 -text -noout | grep Not\"
  1275. "
  1276. local status="$?"
  1277. check::exit_code "${status}" "cert" "$host: restart kubelet"
  1278. if [[ "${status}" == "0" ]]; then
  1279. sleep 5
  1280. command::exec "${MGMT_NODE}" "kubectl uncordon ${host}"
  1281. check::exit_code "$?" "cert" "uncordon ${host} node"
  1282. fi
  1283. fi
  1284. done
  1285. }
  1286. # 证书续期
  1287. function cert::renew() {
  1288. log::info "[cert]" "renew cluster cert"
  1289. cert::renew_node "master"
  1290. cert::renew_node "worker"
  1291. log::info "[cert]" "cluster status"
  1292. command::exec "${MGMT_NODE}" "
  1293. echo
  1294. kubectl get node
  1295. echo
  1296. kubeadm certs check-expiration 2>/dev/null || kubeadm alpha certs check-expiration
  1297. " && printf "%s" "${COMMAND_OUTPUT}"
  1298. }
  1299. # 初始化节点配置
  1300. function init::node_config() {
  1301. local master_index=${master_index:-1}
  1302. local worker_index=${worker_index:-1}
  1303. # 获取MGMT_NODE机器的内网IP
  1304. if [[ "$MGMT_NODE" == "127.0.0.1" || "$MGMT_NODE_IP" == "" ]]; then
  1305. log::info "[init]" "Get $MGMT_NODE InternalIP."
  1306. command::exec "${MGMT_NODE}" "
  1307. ip -4 route get 8.8.8.8 2>/dev/null | head -1 | awk '{print \$7}'
  1308. "
  1309. get::command_output "MGMT_NODE_IP" "$?" "exit"
  1310. if [[ "$MGMT_NODE" != "$MGMT_NODE_IP" ]]; then
  1311. log::warning "[init]" "ip不相同: $MGMT_NODE(MGMT_NODE) != $MGMT_NODE_IP(MGMT_NODE_IP)"
  1312. fi
  1313. else
  1314. MGMT_NODE_IP=$MGMT_NODE
  1315. fi
  1316. # master
  1317. for host in $MASTER_NODES
  1318. do
  1319. log::info "[init]" "master: $host"
  1320. command::exec "${host}" "
  1321. export OFFLINE_TAG=${OFFLINE_TAG:-0} KUBE_APISERVER=${KUBE_APISERVER} SKIP_SET_OS_REPO=${SKIP_SET_OS_REPO:-false}
  1322. $(declare -f script::init_node)
  1323. script::init_node
  1324. "
  1325. check::exit_code "$?" "init" "init master $host" "exit"
  1326. # 设置主机名和解析
  1327. command::exec "${host}" "
  1328. printf \"\\n${MGMT_NODE_IP} $KUBE_APISERVER\\n$node_hosts\" >> /etc/hosts
  1329. hostnamectl set-hostname ${HOSTNAME_PREFIX}-master-node${master_index}
  1330. "
  1331. check::exit_code "$?" "init" "$host set hostname and hostname resolution"
  1332. # set audit-policy
  1333. log::info "[init]" "$host: set audit-policy file."
  1334. command::exec "${host}" "
  1335. [ ! -d etc/kubernetes ] && mkdir -p /etc/kubernetes
  1336. cat << EOF > /etc/kubernetes/audit-policy.yaml
  1337. # Log all requests at the Metadata level.
  1338. apiVersion: audit.k8s.io/v1
  1339. kind: Policy
  1340. rules:
  1341. - level: Metadata
  1342. EOF
  1343. "
  1344. check::exit_code "$?" "init" "$host: set audit-policy file" "exit"
  1345. master_index=$((master_index + 1))
  1346. done
  1347. # worker
  1348. for host in $WORKER_NODES
  1349. do
  1350. log::info "[init]" "worker: $host"
  1351. command::exec "${host}" "
  1352. export OFFLINE_TAG=${OFFLINE_TAG:-0} KUBE_APISERVER=${KUBE_APISERVER} SKIP_SET_OS_REPO=${SKIP_SET_OS_REPO:-false}
  1353. $(declare -f script::init_node)
  1354. script::init_node
  1355. "
  1356. check::exit_code "$?" "init" "init worker $host" "exit"
  1357. # 设置主机名和解析
  1358. command::exec "${host}" "
  1359. printf \"\\n127.0.0.1 $KUBE_APISERVER\\n$node_hosts\" >> /etc/hosts
  1360. hostnamectl set-hostname ${HOSTNAME_PREFIX}-worker-node${worker_index}
  1361. "
  1362. worker_index=$((worker_index + 1))
  1363. done
  1364. }
  1365. # 初始化节点
  1366. function init::node() {
  1367. init::upgrade_kernel
  1368. local node_hosts=""
  1369. local i=1
  1370. for h in $MASTER_NODES
  1371. do
  1372. node_hosts="${node_hosts}\n$h ${HOSTNAME_PREFIX}-master-node${i}"
  1373. i=$((i + 1))
  1374. done
  1375. local i=1
  1376. for h in $WORKER_NODES
  1377. do
  1378. node_hosts="${node_hosts}\n$h ${HOSTNAME_PREFIX}-worker-node${i}"
  1379. i=$((i + 1))
  1380. done
  1381. init::node_config
  1382. }
  1383. # 初始化添加的节点
  1384. function init::add_node() {
  1385. init::upgrade_kernel
  1386. local master_index=0
  1387. local worker_index=0
  1388. local node_hosts=""
  1389. local add_node_hosts=""
  1390. command::exec "${MGMT_NODE}" "
  1391. kubectl get node --selector='node-role.kubernetes.io/master' -o jsonpath='{range.items[*]}{.status.addresses[?(@.type==\"InternalIP\")].address } {end}' | awk '{print \$1}'
  1392. "
  1393. get::command_output "MGMT_NODE" "$?" "exit"
  1394. # 获取现有集群节点主机名
  1395. command::exec "${MGMT_NODE}" "
  1396. kubectl get node -o jsonpath='{range.items[*]}{.status.addresses[?(@.type==\"InternalIP\")].address} {.metadata.name }\\n{end}'
  1397. "
  1398. get::command_output "node_hosts" "$?" "exit"
  1399. for host in $MASTER_NODES $WORKER_NODES
  1400. do
  1401. if [[ $node_hosts == *"$host"* ]]; then
  1402. log::error "[init]" "The host $host is already in the cluster!"
  1403. exit 1
  1404. fi
  1405. done
  1406. if [[ "$MASTER_NODES" != "" ]]; then
  1407. command::exec "${MGMT_NODE}" "
  1408. kubectl get node --selector='node-role.kubernetes.io/master' -o jsonpath='{\$.items[*].metadata.name}' |grep -Eo 'node[0-9]*'|grep -Eo '[0-9]*'|awk -F ' ' 'BEGIN {max = 0} {if (\$0+0 > max+0) max=\$0} END {print max}'
  1409. "
  1410. get::command_output "master_index" "$?" "exit"
  1411. master_index=$(( master_index + 1 ))
  1412. local i=$master_index
  1413. for host in $MASTER_NODES
  1414. do
  1415. add_node_hosts="${add_node_hosts}\n${host:-} ${HOSTNAME_PREFIX}-master-node${i}"
  1416. i=$((i + 1))
  1417. done
  1418. fi
  1419. if [[ "$WORKER_NODES" != "" ]]; then
  1420. command::exec "${MGMT_NODE}" "
  1421. kubectl get node --selector='node-role.kubernetes.io/worker' -o jsonpath='{\$.items[*].metadata.name}'| grep -Eo 'node[0-9]*'|grep -Eo '[0-9]*'|awk 'BEGIN {max = 0} {if (\$0+0 > max+0) max=\$0} END {print max}' || echo 0
  1422. "
  1423. get::command_output "worker_index" "$?" "exit"
  1424. worker_index=$(( worker_index + 1 ))
  1425. local i=$worker_index
  1426. for host in $WORKER_NODES
  1427. do
  1428. add_node_hosts="${add_node_hosts}\n${host:-} ${HOSTNAME_PREFIX}-worker-node${i}"
  1429. i=$((i + 1))
  1430. done
  1431. fi
  1432. # 向集群节点添加新增的节点主机名解析
  1433. for host in $(echo -ne "$node_hosts" | awk '{print $1}')
  1434. do
  1435. command::exec "${host}" "
  1436. printf \"$add_node_hosts\" >> /etc/hosts
  1437. "
  1438. check::exit_code "$?" "init" "$host add new node hostname resolution"
  1439. done
  1440. node_hosts="${node_hosts}\n${add_node_hosts}"
  1441. init::node_config
  1442. }
  1443. # 集群初始化
  1444. function kubeadm::init() {
  1445. log::info "[kubeadm init]" "kubeadm init on ${MGMT_NODE}"
  1446. log::info "[kubeadm init]" "${MGMT_NODE}: set kubeadmcfg.yaml"
  1447. command::exec "${MGMT_NODE}" "
  1448. PAUSE_VERSION=$(kubeadm config images list 2>/dev/null | awk -F: '/pause/ {print $2}')
  1449. cat << EOF > /etc/kubernetes/kubeadmcfg.yaml
  1450. ---
  1451. apiVersion: kubeadm.k8s.io/v1beta2
  1452. kind: InitConfiguration
  1453. ${kubelet_nodeRegistration}
  1454. ---
  1455. apiVersion: kubeproxy.config.k8s.io/v1alpha1
  1456. kind: KubeProxyConfiguration
  1457. mode: ipvs
  1458. ipvs:
  1459. minSyncPeriod: 5s
  1460. syncPeriod: 5s
  1461. # ipvs 负载策略
  1462. scheduler: 'wrr'
  1463. ---
  1464. apiVersion: kubelet.config.k8s.io/v1beta1
  1465. kind: KubeletConfiguration
  1466. maxPods: 200
  1467. cgroupDriver: systemd
  1468. runtimeRequestTimeout: 5m
  1469. # 此配置保证了 kubelet 能在 swap 开启的情况下启动
  1470. failSwapOn: false
  1471. nodeStatusUpdateFrequency: 5s
  1472. rotateCertificates: true
  1473. imageGCLowThresholdPercent: 70
  1474. imageGCHighThresholdPercent: 80
  1475. # 软驱逐阀值
  1476. evictionSoft:
  1477. imagefs.available: 15%
  1478. memory.available: 512Mi
  1479. nodefs.available: 15%
  1480. nodefs.inodesFree: 10%
  1481. # 达到软阈值之后,持续时间超过多久才进行驱逐
  1482. evictionSoftGracePeriod:
  1483. imagefs.available: 3m
  1484. memory.available: 1m
  1485. nodefs.available: 3m
  1486. nodefs.inodesFree: 1m
  1487. # 硬驱逐阀值
  1488. evictionHard:
  1489. imagefs.available: 10%
  1490. memory.available: 256Mi
  1491. nodefs.available: 10%
  1492. nodefs.inodesFree: 5%
  1493. evictionMaxPodGracePeriod: 30
  1494. # 节点资源预留
  1495. kubeReserved:
  1496. cpu: 200m\$(if [[ \$(cat /proc/meminfo | awk '/MemTotal/ {print \$2}') -gt 3670016 ]]; then echo -e '\n memory: 256Mi';fi)
  1497. ephemeral-storage: 1Gi
  1498. systemReserved:
  1499. cpu: 300m\$(if [[ \$(cat /proc/meminfo | awk '/MemTotal/ {print \$2}') -gt 3670016 ]]; then echo -e '\n memory: 512Mi';fi)
  1500. ephemeral-storage: 1Gi
  1501. kubeReservedCgroup: /kube.slice
  1502. systemReservedCgroup: /system.slice
  1503. enforceNodeAllocatable:
  1504. - pods
  1505. ---
  1506. apiVersion: kubeadm.k8s.io/v1beta2
  1507. kind: ClusterConfiguration
  1508. kubernetesVersion: $KUBE_VERSION
  1509. controlPlaneEndpoint: $KUBE_APISERVER:6443
  1510. networking:
  1511. dnsDomain: $KUBE_DNSDOMAIN
  1512. podSubnet: $KUBE_POD_SUBNET
  1513. serviceSubnet: $KUBE_SERVICE_SUBNET
  1514. imageRepository: $KUBE_IMAGE_REPO
  1515. apiServer:
  1516. certSANs:
  1517. - 127.0.0.1
  1518. - $KUBE_APISERVER
  1519. $(for h in $MASTER_NODES;do echo " - $h";done)
  1520. extraArgs:
  1521. event-ttl: '720h'
  1522. service-node-port-range: '30000-50000'
  1523. # 审计日志相关配置
  1524. audit-log-maxage: '20'
  1525. audit-log-maxbackup: '10'
  1526. audit-log-maxsize: '100'
  1527. audit-log-path: /var/log/kube-audit/audit.log
  1528. audit-policy-file: /etc/kubernetes/audit-policy.yaml
  1529. extraVolumes:
  1530. - name: audit-config
  1531. hostPath: /etc/kubernetes/audit-policy.yaml
  1532. mountPath: /etc/kubernetes/audit-policy.yaml
  1533. readOnly: true
  1534. pathType: File
  1535. - name: audit-log
  1536. hostPath: /var/log/kube-audit
  1537. mountPath: /var/log/kube-audit
  1538. pathType: DirectoryOrCreate
  1539. - name: localtime
  1540. hostPath: /etc/localtime
  1541. mountPath: /etc/localtime
  1542. readOnly: true
  1543. pathType: File
  1544. controllerManager:
  1545. extraArgs:
  1546. bind-address: 0.0.0.0
  1547. node-cidr-mask-size: '24'
  1548. deployment-controller-sync-period: '10s'
  1549. node-monitor-grace-period: '20s'
  1550. pod-eviction-timeout: '2m'
  1551. terminated-pod-gc-threshold: '30'
  1552. experimental-cluster-signing-duration: 87600h
  1553. feature-gates: RotateKubeletServerCertificate=true
  1554. extraVolumes:
  1555. - hostPath: /etc/localtime
  1556. mountPath: /etc/localtime
  1557. name: localtime
  1558. readOnly: true
  1559. pathType: File
  1560. scheduler:
  1561. extraArgs:
  1562. bind-address: 0.0.0.0
  1563. extraVolumes:
  1564. - hostPath: /etc/localtime
  1565. mountPath: /etc/localtime
  1566. name: localtime
  1567. readOnly: true
  1568. pathType: File
  1569. $(if [[ "${KUBE_VERSION}" == "1.21.1" ]]; then
  1570. echo "dns:
  1571. type: CoreDNS
  1572. imageRepository: docker.io
  1573. imageTag: 1.8.0"
  1574. fi)
  1575. EOF
  1576. "
  1577. check::exit_code "$?" "kubeadm init" "${MGMT_NODE}: set kubeadmcfg.yaml" "exit"
  1578. log::info "[kubeadm init]" "${MGMT_NODE}: kubeadm init start."
  1579. command::exec "${MGMT_NODE}" "kubeadm init --config=/etc/kubernetes/kubeadmcfg.yaml --upload-certs"
  1580. check::exit_code "$?" "kubeadm init" "${MGMT_NODE}: kubeadm init" "exit"
  1581. sleep 3
  1582. log::info "[kubeadm init]" "${MGMT_NODE}: set kube config."
  1583. command::exec "${MGMT_NODE}" "
  1584. mkdir -p \$HOME/.kube
  1585. sudo cp -f /etc/kubernetes/admin.conf \$HOME/.kube/config
  1586. "
  1587. check::exit_code "$?" "kubeadm init" "${MGMT_NODE}: set kube config" "exit"
  1588. if [[ "$(echo "$MASTER_NODES" | wc -w)" == "1" ]]; then
  1589. log::info "[kubeadm init]" "${MGMT_NODE}: delete master taint"
  1590. command::exec "${MGMT_NODE}" "kubectl taint nodes --all node-role.kubernetes.io/master-"
  1591. check::exit_code "$?" "kubeadm init" "${MGMT_NODE}: delete master taint"
  1592. fi
  1593. command::exec "${MGMT_NODE}" "
  1594. kubectl create clusterrolebinding node-client-auto-approve-csr --clusterrole=system:certificates.k8s.io:certificatesigningrequests:nodeclient --user=kubelet-bootstrap
  1595. kubectl create clusterrolebinding node-client-auto-renew-crt --clusterrole=system:certificates.k8s.io:certificatesigningrequests:selfnodeclient --group=system:nodes
  1596. kubectl create clusterrolebinding node-server-auto-renew-crt --clusterrole=system:certificates.k8s.io:certificatesigningrequests:selfnodeserver --group=system:nodes
  1597. "
  1598. check::exit_code "$?" "kubeadm init" "Auto-Approve kubelet cert csr" "exit"
  1599. }
  1600. # 加入集群
  1601. function kubeadm::join() {
  1602. log::info "[kubeadm join]" "master: get join token and cert info"
  1603. command::exec "${MGMT_NODE}" "
  1604. openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | openssl dgst -sha256 -hex | sed 's/^.* //'
  1605. "
  1606. get::command_output "CACRT_HASH" "$?" "exit"
  1607. command::exec "${MGMT_NODE}" "
  1608. kubeadm init phase upload-certs --upload-certs --config /etc/kubernetes/kubeadmcfg.yaml 2>> /dev/null | tail -1
  1609. "
  1610. get::command_output "INTI_CERTKEY" "$?" "exit"
  1611. command::exec "${MGMT_NODE}" "
  1612. kubeadm token create
  1613. "
  1614. get::command_output "INIT_TOKEN" "$?" "exit"
  1615. command::exec "${MGMT_NODE}" "
  1616. kubeadm config images list 2>/dev/null | awk -F: '/pause/ {print \$2}'
  1617. "
  1618. get::command_output "PAUSE_VERSION" "$?"
  1619. for host in $MASTER_NODES
  1620. do
  1621. [[ "${MGMT_NODE}" == "$host" ]] && continue
  1622. log::info "[kubeadm join]" "master $host join cluster."
  1623. command::exec "${host}" "
  1624. cat << EOF > /etc/kubernetes/kubeadmcfg.yaml
  1625. ---
  1626. apiVersion: kubeadm.k8s.io/v1beta2
  1627. kind: JoinConfiguration
  1628. discovery:
  1629. bootstrapToken:
  1630. apiServerEndpoint: $KUBE_APISERVER:6443
  1631. caCertHashes:
  1632. - sha256:${CACRT_HASH:-}
  1633. token: ${INIT_TOKEN}
  1634. timeout: 5m0s
  1635. controlPlane:
  1636. certificateKey: ${INTI_CERTKEY:-}
  1637. ${kubelet_nodeRegistration}
  1638. EOF
  1639. kubeadm join --config /etc/kubernetes/kubeadmcfg.yaml
  1640. "
  1641. check::exit_code "$?" "kubeadm join" "master $host join cluster"
  1642. log::info "[kubeadm join]" "$host: set kube config."
  1643. command::exec "${host}" "
  1644. mkdir -p \$HOME/.kube
  1645. sudo cp -f /etc/kubernetes/admin.conf \$HOME/.kube/config
  1646. "
  1647. check::exit_code "$?" "kubeadm join" "$host: set kube config" "exit"
  1648. command::exec "${host}" "
  1649. sed -i 's#.*$KUBE_APISERVER#127.0.0.1 $KUBE_APISERVER#g' /etc/hosts
  1650. "
  1651. done
  1652. for host in $WORKER_NODES
  1653. do
  1654. log::info "[kubeadm join]" "worker $host join cluster."
  1655. command::exec "${host}" "
  1656. mkdir -p /etc/kubernetes/manifests
  1657. cat << EOF > /etc/kubernetes/kubeadmcfg.yaml
  1658. ---
  1659. apiVersion: kubeadm.k8s.io/v1beta2
  1660. kind: JoinConfiguration
  1661. discovery:
  1662. bootstrapToken:
  1663. apiServerEndpoint: $KUBE_APISERVER:6443
  1664. caCertHashes:
  1665. - sha256:${CACRT_HASH:-}
  1666. token: ${INIT_TOKEN}
  1667. timeout: 5m0s
  1668. ${kubelet_nodeRegistration}
  1669. EOF
  1670. kubeadm join --config /etc/kubernetes/kubeadmcfg.yaml
  1671. "
  1672. check::exit_code "$?" "kubeadm join" "worker $host join cluster"
  1673. log::info "[kubeadm join]" "set $host worker node role."
  1674. command::exec "${MGMT_NODE}" "
  1675. kubectl get node --selector='!node-role.kubernetes.io/master' | grep '<none>' | awk '{print \"kubectl label node \" \$1 \" node-role.kubernetes.io/worker= --overwrite\" }' | bash
  1676. "
  1677. check::exit_code "$?" "kubeadm join" "set $host worker node role"
  1678. done
  1679. }
  1680. # 等待资源完成
  1681. function kube::wait() {
  1682. local app=$1
  1683. local namespace=$2
  1684. local resource=$3
  1685. local selector=${4:-}
  1686. sleep 3
  1687. log::info "[waiting]" "waiting $app"
  1688. command::exec "${MGMT_NODE}" "
  1689. $(declare -f utils::retry)
  1690. utils::retry 6 kubectl wait --namespace ${namespace} \
  1691. --for=condition=ready ${resource} \
  1692. --selector=$selector \
  1693. --timeout=60s
  1694. "
  1695. local status="$?"
  1696. check::exit_code "$status" "waiting" "$app ${resource} ready"
  1697. return "$status"
  1698. }
  1699. # 应用manifest
  1700. function kube::apply() {
  1701. local file=$1
  1702. log::info "[apply]" "$file"
  1703. command::exec "${MGMT_NODE}" "
  1704. $(declare -f utils::retry)
  1705. if [ -f \"$file\" ]; then
  1706. utils::retry 6 kubectl apply --wait=true --timeout=10s -f \"$file\"
  1707. else
  1708. utils::retry 6 \"cat <<EOF | kubectl apply --wait=true --timeout=10s -f -
  1709. \$(printf \"%s\" \"${2:-}\")
  1710. EOF
  1711. \"
  1712. fi
  1713. "
  1714. local status="$?"
  1715. check::exit_code "$status" "apply" "add $file" "exit"
  1716. return "$status"
  1717. }
  1718. # 集群状态
  1719. function kube::status() {
  1720. sleep 5
  1721. log::info "[cluster]" "cluster status"
  1722. command::exec "${MGMT_NODE}" "
  1723. echo
  1724. kubectl get node -o wide
  1725. echo
  1726. kubectl get pods -A
  1727. " && printf "%s" "${COMMAND_OUTPUT}"
  1728. }
  1729. # 添加或删除haproxy的后端server
  1730. function config::haproxy_backend() {
  1731. local action=${1:-add}
  1732. local action_cmd=""
  1733. local master_nodes
  1734. if [[ "$MASTER_NODES" == "" || "$MASTER_NODES" == "127.0.0.1" ]]; then
  1735. return
  1736. fi
  1737. command::exec "${MGMT_NODE}" "
  1738. kubectl get node --selector='node-role.kubernetes.io/master' -o jsonpath='{\$.items[*].status.addresses[?(@.type==\"InternalIP\")].address}'
  1739. "
  1740. get::command_output "master_nodes" "$?" "exit"
  1741. for m in $MASTER_NODES
  1742. do
  1743. if [[ "${action}" == "add" ]]; then
  1744. num=$(echo "${m}"| awk -F'.' '{print $4}')
  1745. action_cmd="${action_cmd}\necho \" server apiserver${num} ${m}:6443 check\" >> /etc/haproxy/haproxy.cfg"
  1746. else
  1747. [[ "${master_nodes}" == *"${m}"* ]] || return
  1748. action_cmd="${action_cmd}\n sed -i -e \"/${m}/d\" /etc/haproxy/haproxy.cfg"
  1749. fi
  1750. done
  1751. command::exec "${MGMT_NODE}" "
  1752. kubectl get node --selector='!node-role.kubernetes.io/master' -o jsonpath='{\$.items[*].status.addresses[?(@.type==\"InternalIP\")].address}'
  1753. "
  1754. get::command_output "worker_nodes" "$?"
  1755. for host in ${worker_nodes:-}
  1756. do
  1757. log::info "[config]" "worker ${host}: ${action} apiserver from haproxy"
  1758. command::exec "${host}" "
  1759. $(echo -ne "${action_cmd}")
  1760. haproxy -c -f /etc/haproxy/haproxy.cfg && systemctl reload haproxy
  1761. "
  1762. check::exit_code "$?" "config" "worker ${host}: ${action} apiserver(${m}) from haproxy"
  1763. done
  1764. }
  1765. # 更新 etcd 备份副本
  1766. function config::etcd_snapshot() {
  1767. command::exec "${MGMT_NODE}" "
  1768. count=\$(kubectl get node --selector='node-role.kubernetes.io/master' --no-headers | wc -l)
  1769. kubectl -n kube-system patch cronjobs etcd-snapshot --patch \"
  1770. spec:
  1771. jobTemplate:
  1772. spec:
  1773. completions: \${count:-1}
  1774. parallelism: \${count:-1}
  1775. \"
  1776. "
  1777. check::exit_code "$?" "config" "etcd-snapshot completions options"
  1778. }
  1779. # 获取命令的返回值
  1780. function get::command_output() {
  1781. local app="$1"
  1782. local status="$2"
  1783. local is_exit="${3:-}"
  1784. if [[ "$status" == "0" && "${COMMAND_OUTPUT}" != "" ]]; then
  1785. log::info "[command]" "get $app value succeeded."
  1786. eval "$app=\"${COMMAND_OUTPUT}\""
  1787. else
  1788. log::error "[command]" "get $app value failed."
  1789. [[ "$is_exit" == "exit" ]] && exit "$status"
  1790. fi
  1791. return "$status"
  1792. }
  1793. # 获取ingress连接地址
  1794. function get::ingress_conn() {
  1795. local port="${1:-80}"
  1796. local ingress_name="${2:-ingress-${KUBE_INGRESS}-controller}"
  1797. command::exec "${MGMT_NODE}" "
  1798. kubectl get node -o jsonpath='{range .items[*]}{ .status.addresses[?(@.type==\"InternalIP\")].address} {.status.conditions[?(@.status == \"True\")].status}{\"\\n\"}{end}' | awk '{if(\$2==\"True\")a=\$1}END{print a}'
  1799. "
  1800. get::command_output "node_ip" "$?"
  1801. command::exec "${MGMT_NODE}" "
  1802. kubectl get svc --all-namespaces -o go-template=\"{{range .items}}{{if eq .metadata.name \\\"${ingress_name}\\\"}}{{range.spec.ports}}{{if eq .port ${port}}}{{.nodePort}}{{end}}{{end}}{{end}}{{end}}\"
  1803. "
  1804. get::command_output "node_port" "$?"
  1805. INGRESS_CONN="${node_ip:-nodeIP}:${node_port:-nodePort}"
  1806. }
  1807. ######################################################################################################
  1808. # 主调用逻辑
  1809. ######################################################################################################
  1810. # 添加network组件
  1811. function add::network() {
  1812. if [[ "$KUBE_NETWORK" == "flannel" ]]; then
  1813. log::info "[network]" "add flannel"
  1814. local flannel_file="${OFFLINE_DIR}/manifests/kube-flannel.yml"
  1815. utils::download_file "https://cdn.jsdelivr.net/gh/coreos/flannel@v${FLANNEL_VERSION}/Documentation/kube-flannel.yml" "${flannel_file}"
  1816. command::exec "${MGMT_NODE}" "
  1817. sed -i -e 's#10.244.0.0/16#${KUBE_POD_SUBNET}#g' \
  1818. -e 's#quay.io/coreos#${KUBE_IMAGE_REPO}#g' \
  1819. -e 's#\"Type\": \"vxlan\"#\"Type\": \"${KUBE_FLANNEL_TYPE}\"#g' \"${flannel_file}\"
  1820. if [[ \"${KUBE_FLANNEL_TYPE}\" == \"vxlan\" ]]; then
  1821. sed -i 's#\"Type\": \"vxlan\"#\"Type\": \"vxlan\", \"DirectRouting\": true#g' \"${flannel_file}\"
  1822. fi
  1823. "
  1824. check::exit_code "$?" "flannel" "change flannel pod subnet"
  1825. kube::apply "${flannel_file}"
  1826. kube::wait "flannel" "kube-system" "pods" "app=flannel"
  1827. elif [[ "$KUBE_NETWORK" == "calico" ]]; then
  1828. log::info "[network]" "add calico"
  1829. utils::download_file "https://projectcalico.docs.tigera.io/archive/v${CALICO_VERSION%.*}/manifests/calico.yaml" "${OFFLINE_DIR}/manifests/calico.yaml"
  1830. utils::download_file "https://projectcalico.docs.tigera.io/archive/v${CALICO_VERSION%.*}/manifests/calicoctl.yaml" "${OFFLINE_DIR}/manifests/calicoctl.yaml"
  1831. command::exec "${MGMT_NODE}" "
  1832. sed -i \"s#:v.*#:v${CALICO_VERSION}#g\" \"${OFFLINE_DIR}/manifests/calico.yaml\"
  1833. sed -i 's#value: \"Always\"#value: \"CrossSubnet\"#g' \"${OFFLINE_DIR}/manifests/calico.yaml\"
  1834. sed -i \"s#:v.*#:v${CALICO_VERSION}#g\" \"${OFFLINE_DIR}/manifests/calicoctl.yaml\"
  1835. "
  1836. check::exit_code "$?" "network" "change calico version to ${CALICO_VERSION}"
  1837. kube::apply "${OFFLINE_DIR}/manifests/calico.yaml"
  1838. kube::apply "${OFFLINE_DIR}/manifests/calicoctl.yaml"
  1839. kube::wait "calico-kube-controllers" "kube-system" "pods" "k8s-app=calico-kube-controllers"
  1840. kube::wait "calico-node" "kube-system" "pods" "k8s-app=calico-node"
  1841. elif [[ "$KUBE_NETWORK" == "cilium" ]]; then
  1842. log::info "[network]" "add cilium"
  1843. local cilium_file="${OFFLINE_DIR}/manifests/cilium.yml"
  1844. local cilium_hubble_file="${OFFLINE_DIR}/manifests/cilium_hubble.yml"
  1845. utils::download_file "https://cdn.jsdelivr.net/gh/cilium/cilium@${CILIUM_VERSION}/install/kubernetes/quick-install.yaml" "${cilium_file}"
  1846. utils::download_file "https://cdn.jsdelivr.net/gh/cilium/cilium@${CILIUM_VERSION}/install/kubernetes/quick-hubble-install.yaml" "${cilium_hubble_file}"
  1847. local all_node=""
  1848. if [[ "${MASTER_NODES}" == "" && "${WORKER_NODES}" == "" ]]; then
  1849. command::exec "${MGMT_NODE}" "
  1850. kubectl get node -o jsonpath='{range.items[*]}{.status.addresses[?(@.type==\"InternalIP\")].address} {end}'
  1851. "
  1852. get::command_output "all_node" "$?"
  1853. else
  1854. all_node="${MASTER_NODES} ${WORKER_NODES}"
  1855. fi
  1856. for host in $all_node
  1857. do
  1858. command::exec "${host}" "mount bpffs -t bpf /sys/fs/bpf"
  1859. check::exit_code "$?" "network" "${host}: mount bpf filesystem"
  1860. done
  1861. command::exec "${MGMT_NODE}" "
  1862. sed -i \"s#10.0.0.0/8#${KUBE_POD_SUBNET}#g\" \"${cilium_file}\"
  1863. "
  1864. kube::apply "${cilium_file}"
  1865. kube::wait "cilium-node" "kube-system" "pods" "k8s-app=cilium"
  1866. kube::wait "cilium-operator" "kube-system" "pods" "name=cilium-operator"
  1867. kube::apply "${cilium_hubble_file}"
  1868. kube::wait "hubble-relay" "kube-system" "pods" "k8s-app=hubble-relay"
  1869. log::info "[monitor]" "add hubble-ui ingress"
  1870. kube::apply "hubble-ui ingress" "
  1871. ---
  1872. apiVersion: networking.k8s.io/v1
  1873. kind: Ingress
  1874. metadata:
  1875. name: hubble-ui
  1876. namespace: kube-system
  1877. annotations:
  1878. kubernetes.io/ingress.class: ${KUBE_INGRESS}
  1879. spec:
  1880. rules:
  1881. - host: hubble-ui.cluster.local
  1882. http:
  1883. paths:
  1884. - path: /
  1885. pathType: Prefix
  1886. backend:
  1887. service:
  1888. name: hubble-ui
  1889. port:
  1890. number: 80
  1891. "
  1892. # shellcheck disable=SC2181
  1893. if [[ "$?" == "0" ]]; then
  1894. get::ingress_conn
  1895. log::access "[ingress]" "curl -H 'Host:hubble-ui.cluster.local' http://${INGRESS_CONN}"
  1896. fi
  1897. else
  1898. log::warning "[network]" "No $KUBE_NETWORK config."
  1899. fi
  1900. }
  1901. # 添加ingress组件
  1902. function add::ingress() {
  1903. # 安装 ingress-nginx
  1904. log::info "[ingress]" "add ingress-nginx"
  1905. command::exec "${MGMT_NODE}" "
  1906. $(declare -f utils::retry)
  1907. helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx
  1908. utils::retry 6 helm install ingress-nginx ingress-nginx/ingress-nginx \
  1909. --namespace ingress-nginx --create-namespace \
  1910. --version ${INGRESS_NGINX} \
  1911. --set controller.admissionWebhooks.patch.image.registry=registry.hub.docker.com \
  1912. --set controller.admissionWebhooks.patch.image.image=k8sgcrioingressnginx/kube-webhook-certgen \
  1913. --set controller.admissionWebhooks.patch.image.digest= \
  1914. --set controller.admissionWebhooks.enabled=true \
  1915. --set controller.admissionWebhooks.extraEnvs\[0\].name=\"TZ\" \
  1916. --set controller.admissionWebhooks.extraEnvs\[0\].value=\"Asia/Shanghai\" \
  1917. --set controller.kind=DaemonSet \
  1918. --set controller.replicaCount=1 \
  1919. --set controller.minAvailable=1 \
  1920. --set controller.image.registry=registry.hub.docker.com \
  1921. --set controller.image.image=k8sgcrioingressnginx/controller \
  1922. --set controller.image.digest= \
  1923. --set controller.ingressClassResource.name=nginx \
  1924. --set controller.ingressClassResource.enable=true \
  1925. --set controller.ingressClassResource.default=false \
  1926. --set controller.service.enabled=true \
  1927. --set controller.service.type=NodePort \
  1928. --set controller.service.enableHttp=true \
  1929. --set controller.service.enableHttps=true \
  1930. --set controller.service.nodePorts.http=30080 \
  1931. --set controller.service.nodePorts.https=30443 \
  1932. --set controller.extraEnvs\[0\].name=\"TZ\" \
  1933. --set controller.extraEnvs\[0\].value=\"Asia/Shanghai\" \
  1934. --set defaultBackend.image.registry=registry.hub.docker.com \
  1935. --set defaultBackend.image.image=gcmirrors/defaultbackend-amd64 \
  1936. --set defaultBackend.enabled=true \
  1937. --set defaultBackend.name=defaultbackend \
  1938. --set defaultBackend.replicaCount=1 \
  1939. --set defaultBackend.minAvailable=1 \
  1940. --set defaultBackend.extraEnvs\[0\].name=\"TZ\" \
  1941. --set defaultBackend.extraEnvs\[0\].value=\"Asia/Shanghai\" \
  1942. --set rbac.create=true \
  1943. --set serviceAccount.create=true \
  1944. --set podSecurityPolicy.enabled=true
  1945. kubectl get pod -n ingress-nginx -o wide
  1946. kubectl get svc -n ingress-nginx -o wide
  1947. "
  1948. # 安装 nginx
  1949. log::info "[nginx]" "add nginx"
  1950. command::exec "${MGMT_NODE}" "
  1951. sudo yum -y install nginx
  1952. nginx -v
  1953. sudo systemctl enable nginx
  1954. sudo service nginx start
  1955. cat << EOF > /etc/nginx/conf.d/k8s.ingress.conf
  1956. upstream k8s-ingress {
  1957. $(for h in $MASTER_NODES $WORKER_NODES;do echo " server $h:30080 max_fails=1 fail_timeout=15s;";done)
  1958. keepalive 128;
  1959. }
  1960. server {
  1961. listen ${NGINX_HTTP_PORT};
  1962. location / {
  1963. proxy_http_version 1.1;
  1964. proxy_set_header Connection \"\";
  1965. proxy_next_upstream error;
  1966. proxy_set_header X-Real-IP \\\$remote_addr;
  1967. proxy_set_header X-Forwarded-For \\\$proxy_add_x_forwarded_for;
  1968. proxy_set_header Host \\\$http_host;
  1969. proxy_set_header X-Nginx-Proxy true;
  1970. proxy_pass http://k8s-ingress/;
  1971. }
  1972. }
  1973. EOF
  1974. sudo nginx -s reload
  1975. "
  1976. }
  1977. # 添加addon组件
  1978. function add::addon() {
  1979. # TODO add addon
  1980. log::warning "[TODO]" "add addon"
  1981. }
  1982. # 添加监控组件
  1983. function add::monitor() {
  1984. # TODO add monitor
  1985. log::warning "[TODO]" "add monitor"
  1986. }
  1987. # 添加log组件
  1988. function add::log() {
  1989. # TODO add log
  1990. log::warning "[TODO]" "add log"
  1991. }
  1992. # 添加存储
  1993. function add::storage() {
  1994. # TODO add storage
  1995. log::warning "[TODO]" "add storage"
  1996. }
  1997. # 添加用户界面
  1998. function add::ui() {
  1999. local path="/tmp"
  2000. # 安装 rancher
  2001. log::info "[rancher]" "add rancher"
  2002. command::exec "${MGMT_NODE}" "
  2003. $(declare -f utils::retry)
  2004. cd ${path}
  2005. helm repo add rancher-stable http://rancher-mirror.oss-cn-beijing.aliyuncs.com/server-charts/stable
  2006. utils::retry 6 helm pull rancher-stable/rancher --version ${RANCHER_VERSION} --untar
  2007. cat << EOF > rancher/templates/service.yaml
  2008. apiVersion: v1
  2009. kind: Service
  2010. metadata:
  2011. name: {{ template \"rancher.fullname\" . }}
  2012. labels:
  2013. {{ include \"rancher.labels\" . | indent 4 }}
  2014. spec:
  2015. type: NodePort
  2016. ports:
  2017. - port: 80
  2018. targetPort: 80
  2019. protocol: TCP
  2020. name: http
  2021. # 使用nodePort端口
  2022. nodePort: 31080
  2023. - port: 443
  2024. targetPort: 444
  2025. protocol: TCP
  2026. name: https-internal
  2027. # 使用nodePort端口
  2028. nodePort: 31443
  2029. selector:
  2030. app: {{ template \"rancher.fullname\" . }}
  2031. EOF
  2032. helm install rancher ./rancher \
  2033. --namespace cattle-system --create-namespace \
  2034. --set replicas=1 \
  2035. --set extraEnv\[0\].name=\"TZ\" \
  2036. --set extraEnv\[0\].value=\"Asia/Shanghai\" \
  2037. --set ingress.tls.source=secret \
  2038. --set ingress.enabled=false
  2039. "
  2040. log::info "[rancher]" "获取初始密码 kubectl get secret --namespace cattle-system bootstrap-secret -o go-template='{{.data.bootstrapPassword|base64decode}}{{ \"\n\" }}'"
  2041. log::info "[rancher]" "重置初始密码 kubectl -n cattle-system exec \$(kubectl -n cattle-system get pods -l app=rancher | grep '1/1' | head -1 | awk '{ print \$1 }') -- reset-password"
  2042. }
  2043. # 运维操作
  2044. function add::ops() {
  2045. local master_num
  2046. master_num=$(awk '{print NF}' <<< "${MASTER_NODES}")
  2047. log::info "[ops]" "add anti-affinity strategy to coredns"
  2048. command::exec "${MGMT_NODE}" """
  2049. kubectl -n kube-system patch deployment coredns --patch '{\"spec\": {\"template\": {\"spec\": {\"affinity\":{\"podAntiAffinity\":{\"preferredDuringSchedulingIgnoredDuringExecution\":[{\"weight\":100,\"podAffinityTerm\":{\"labelSelector\":{\"matchExpressions\":[{\"key\":\"k8s-app\",\"operator\":\"In\",\"values\":[\"kube-dns\"]}]},\"topologyKey\":\"kubernetes.io/hostname\"}}]}}}}}}' --record
  2050. """
  2051. check::exit_code "$?" "ops" "add anti-affinity strategy to coredns"
  2052. log::info "[ops]" "add etcd snapshot cronjob"
  2053. command::exec "${MGMT_NODE}" "
  2054. kubeadm config images list --config=/etc/kubernetes/kubeadmcfg.yaml 2>/dev/null | grep etcd:
  2055. "
  2056. get::command_output "etcd_image" "$?"
  2057. command::exec "${MGMT_NODE}" "
  2058. kubectl get node --selector='node-role.kubernetes.io/master' --no-headers | wc -l
  2059. "
  2060. get::command_output "master_num" "$?"
  2061. [[ "${master_num:-0}" == "0" ]] && master_num=1
  2062. kube::apply "etcd-snapshot" """
  2063. ---
  2064. apiVersion: batch/v1beta1
  2065. kind: CronJob
  2066. metadata:
  2067. name: etcd-snapshot
  2068. namespace: kube-system
  2069. spec:
  2070. schedule: '0 */6 * * *'
  2071. successfulJobsHistoryLimit: 3
  2072. suspend: false
  2073. concurrencyPolicy: Allow
  2074. failedJobsHistoryLimit: 3
  2075. jobTemplate:
  2076. spec:
  2077. backoffLimit: 6
  2078. parallelism: ${master_num}
  2079. completions: ${master_num}
  2080. template:
  2081. metadata:
  2082. labels:
  2083. app: etcd-snapshot
  2084. spec:
  2085. affinity:
  2086. podAntiAffinity:
  2087. requiredDuringSchedulingIgnoredDuringExecution:
  2088. - labelSelector:
  2089. matchExpressions:
  2090. - key: app
  2091. operator: In
  2092. values:
  2093. - etcd-snapshot
  2094. topologyKey: 'kubernetes.io/hostname'
  2095. containers:
  2096. - name: etcd-snapshot
  2097. image: ${etcd_image:-${KUBE_IMAGE_REPO}/etcd:3.4.13-0}
  2098. imagePullPolicy: IfNotPresent
  2099. args:
  2100. - -c
  2101. - etcdctl --endpoints=https://127.0.0.1:2379 --cacert=/etc/kubernetes/pki/etcd/ca.crt
  2102. --cert=/etc/kubernetes/pki/etcd/healthcheck-client.crt --key=/etc/kubernetes/pki/etcd/healthcheck-client.key
  2103. snapshot save /backup/etcd-snapshot-\\\\\\\$(date +%Y-%m-%d_%H:%M:%S_%Z).db
  2104. && echo 'delete old backups' && { find /backup -type f -mtime +30 -exec rm -fv {} \\; || echo error; }
  2105. command:
  2106. - /usr/bin/bash
  2107. env:
  2108. - name: ETCDCTL_API
  2109. value: '3'
  2110. resources: {}
  2111. terminationMessagePath: /dev/termination-log
  2112. terminationMessagePolicy: File
  2113. volumeMounts:
  2114. - name: etcd-certs
  2115. mountPath: /etc/kubernetes/pki/etcd
  2116. readOnly: true
  2117. - name: backup
  2118. mountPath: /backup
  2119. - name: etc
  2120. mountPath: /etc
  2121. - name: bin
  2122. mountPath: /usr/bin
  2123. - name: lib64
  2124. mountPath: /lib64
  2125. dnsPolicy: ClusterFirst
  2126. hostNetwork: true
  2127. nodeSelector:
  2128. node-role.kubernetes.io/master: ''
  2129. tolerations:
  2130. - effect: NoSchedule
  2131. operator: Exists
  2132. restartPolicy: OnFailure
  2133. schedulerName: default-scheduler
  2134. securityContext: {}
  2135. terminationGracePeriodSeconds: 30
  2136. volumes:
  2137. - name: etcd-certs
  2138. hostPath:
  2139. path: /etc/kubernetes/pki/etcd
  2140. type: DirectoryOrCreate
  2141. - name: backup
  2142. hostPath:
  2143. path: /var/lib/etcd/backups
  2144. type: DirectoryOrCreate
  2145. - name: etc
  2146. hostPath:
  2147. path: /etc
  2148. - name: bin
  2149. hostPath:
  2150. path: /usr/bin
  2151. - name: lib64
  2152. hostPath:
  2153. path: /lib64
  2154. """
  2155. # shellcheck disable=SC2181
  2156. [[ "$?" == "0" ]] && log::access "[ops]" "etcd backup directory: /var/lib/etcd/backups"
  2157. command::exec "${MGMT_NODE}" "
  2158. jobname=\"etcd-snapshot-$(date +%s)\"
  2159. kubectl create job --from=cronjob/etcd-snapshot \${jobname} -n kube-system && \
  2160. kubectl wait --for=condition=complete job/\${jobname} -n kube-system
  2161. "
  2162. check::exit_code "$?" "ops" "trigger etcd backup"
  2163. }
  2164. # 重置节点
  2165. function reset::node() {
  2166. local host=$1
  2167. log::info "[reset]" "node $host"
  2168. command::exec "${host}" "
  2169. set +ex
  2170. cri_socket=\"\"
  2171. [ -S /var/run/crio/crio.sock ] && cri_socket=\"--cri-socket /var/run/crio/crio.sock\"
  2172. [ -S /run/containerd/containerd.sock ] && cri_socket=\"--cri-socket /run/containerd/containerd.sock\"
  2173. kubeadm reset -f \$cri_socket
  2174. [ -f \"\$(which kubelet)\" ] && { systemctl stop kubelet; find /var/lib/kubelet | xargs -n 1 findmnt -n -o TARGET -T | sort | uniq | xargs -r umount -v; yum remove -y kubeadm kubelet kubectl; }
  2175. [ -d /etc/kubernetes ] && rm -rf /etc/kubernetes/* /var/lib/kubelet/* /var/lib/etcd/* \$HOME/.kube /etc/cni/net.d/* /var/lib/dockershim/* /var/lib/cni/* /var/run/kubernetes/*
  2176. [ -f \"\$(which docker)\" ] && { docker rm -f -v \$(docker ps | grep kube | awk '{print \$1}'); systemctl stop docker; rm -rf \$HOME/.docker /etc/docker/* /var/lib/docker/*; yum remove -y docker; }
  2177. [ -f \"\$(which containerd)\" ] && { crictl rm \$(crictl ps -a -q); systemctl stop containerd; rm -rf /etc/containerd/* /var/lib/containerd/*; yum remove -y containerd.io; }
  2178. [ -f \"\$(which crio)\" ] && { crictl rm \$(crictl ps -a -q); systemctl stop crio; rm -rf /etc/crictl.yaml /etc/crio/* /var/run/crio/*; yum remove -y cri-o; }
  2179. [ -f \"\$(which runc)\" ] && { find /run/containers/ /var/lib/containers/ | xargs -n 1 findmnt -n -o TARGET -T | sort | uniq | xargs -r umount -v; rm -rf /var/lib/containers/* /var/run/containers/*; yum remove -y runc; }
  2180. [ -f \"\$(which haproxy)\" ] && { systemctl stop haproxy; rm -rf /etc/haproxy/*; yum remove -y haproxy; }
  2181. sed -i -e \"/$KUBE_APISERVER/d\" -e '/-worker-/d' -e '/-master-/d' /etc/hosts
  2182. sed -i '/## Kainstall managed start/,/## Kainstall managed end/d' /etc/security/limits.conf /etc/systemd/system.conf /etc/bashrc /etc/rc.local /etc/audit/rules.d/audit.rules
  2183. [ -d /var/lib/elasticsearch ] && rm -rf /var/lib/elasticsearch/*
  2184. [ -d /var/lib/longhorn ] && rm -rf /var/lib/longhorn/*
  2185. [ -d \"${OFFLINE_DIR:-/tmp/abc}\" ] && rm -rf \"${OFFLINE_DIR:-/tmp/abc}\"
  2186. for repo in kubernetes.repo docker-ce.repo devel_kubic_libcontainers_stable.repo elrepo.repo
  2187. do
  2188. [ -f /etc/yum.repos.d/\${repo} ] && rm -f /etc/yum.repos.d/\${repo}
  2189. done
  2190. ipvsadm --clear
  2191. iptables -F && iptables -t nat -F && iptables -t mangle -F && iptables -X
  2192. for int in kube-ipvs0 cni0 docker0 dummy0 flannel.1 cilium_host cilium_net cilium_vxlan lxc_health nodelocaldns
  2193. do
  2194. [ -d /sys/class/net/\${int} ] && ip link delete \${int}
  2195. done
  2196. modprobe -r ipip
  2197. echo done.
  2198. "
  2199. check::exit_code "$?" "reset" "$host: reset"
  2200. }
  2201. # 重置所有节点
  2202. function reset::cluster() {
  2203. local all_node=""
  2204. command::exec "${MGMT_NODE}" "
  2205. kubectl get node -o jsonpath='{range.items[*]}{.status.addresses[?(@.type==\"InternalIP\")].address} {end}'
  2206. "
  2207. get::command_output "all_node" "$?"
  2208. all_node=$(echo "${WORKER_NODES} ${MASTER_NODES} ${all_node}" | awk '{for (i=1;i<=NF;i++) if (!a[$i]++) printf("%s%s",$i,FS)}')
  2209. for host in $all_node
  2210. do
  2211. reset::node "$host"
  2212. done
  2213. }
  2214. # 节点加载离线包
  2215. function offline::load() {
  2216. local role="${1:-}"
  2217. local hosts=""
  2218. if [[ "${role}" == "master" ]]; then
  2219. hosts="${MASTER_NODES}"
  2220. elif [[ "${role}" == "worker" ]]; then
  2221. hosts="${WORKER_NODES}"
  2222. fi
  2223. for host in ${hosts}
  2224. do
  2225. log::info "[offline]" "${role} ${host}: load offline file"
  2226. command::exec "${host}" "[[ ! -d \"${OFFLINE_DIR}\" ]] && { mkdir -pv \"${OFFLINE_DIR}\"; chmod 777 \"${OFFLINE_DIR}\"; } ||:"
  2227. check::exit_code "$?" "offline" "$host: mkdir offline dir" "exit"
  2228. if [[ "${UPGRADE_KERNEL_TAG:-}" == "1" ]]; then
  2229. command::scp "${host}" "${TMP_DIR}/packages/kernel/*" "${OFFLINE_DIR}"
  2230. check::exit_code "$?" "offline" "scp kernel file to $host" "exit"
  2231. else
  2232. log::info "[offline]" "${role} ${host}: copy offline file"
  2233. command::scp "${host}" "${TMP_DIR}/packages/kubeadm/*" "${OFFLINE_DIR}"
  2234. check::exit_code "$?" "offline" "scp kube file to $host" "exit"
  2235. command::scp "${host}" "${TMP_DIR}/packages/all/*" "${OFFLINE_DIR}"
  2236. check::exit_code "$?" "offline" "scp all file to $host" "exit"
  2237. if [[ "${role}" == "worker" ]]; then
  2238. command::scp "${host}" "${TMP_DIR}/packages/worker/*" "${OFFLINE_DIR}"
  2239. check::exit_code "$?" "offline" "scp worker file to $host" "exit"
  2240. fi
  2241. command::scp "${host}" "${TMP_DIR}/images/${role}.tgz" "${OFFLINE_DIR}"
  2242. check::exit_code "$?" "offline" "scp ${role} images to $host" "exit"
  2243. command::scp "${host}" "${TMP_DIR}/images/all.tgz" "${OFFLINE_DIR}"
  2244. check::exit_code "$?" "offline" "scp all images to $host" "exit"
  2245. fi
  2246. log::info "[offline]" "${role} ${host}: install package"
  2247. command::exec "${host}" "yum localinstall -y --skip-broken ${OFFLINE_DIR}/*.rpm"
  2248. check::exit_code "$?" "offline" "${role} ${host}: install package" "exit"
  2249. if [[ "${UPGRADE_KERNEL_TAG:-}" != "1" ]]; then
  2250. command::exec "${host}" "
  2251. set -e
  2252. for target in firewalld python-firewall firewalld-filesystem iptables; do
  2253. systemctl stop \$target &>/dev/null || true
  2254. systemctl disable \$target &>/dev/null || true
  2255. done
  2256. systemctl start docker && \
  2257. cd ${OFFLINE_DIR} && \
  2258. gzip -d -c ${1}.tgz | docker load && gzip -d -c all.tgz | docker load
  2259. "
  2260. check::exit_code "$?" "offline" "$host: load images" "exit"
  2261. fi
  2262. command::exec "${host}" "rm -rf ${OFFLINE_DIR:-/tmp/abc}"
  2263. check::exit_code "$?" "offline" "$host: clean offline file"
  2264. done
  2265. command::scp "${MGMT_NODE}" "${TMP_DIR}/manifests" "${OFFLINE_DIR}"
  2266. check::exit_code "$?" "offline" "scp manifests file to ${MGMT_NODE}" "exit"
  2267. command::scp "${MGMT_NODE}" "${TMP_DIR}/bins" "${OFFLINE_DIR}"
  2268. check::exit_code "$?" "offline" "scp bins file to ${MGMT_NODE}" "exit"
  2269. }
  2270. # 集群节点加载离线包
  2271. function offline::cluster() {
  2272. [ ! -f "${OFFLINE_FILE}" ] && { log::error "[offline]" "not found ${OFFLINE_FILE}" ; exit 1; }
  2273. log::info "[offline]" "Unzip offline package on local."
  2274. tar zxf "${OFFLINE_FILE}" -C "${TMP_DIR}/"
  2275. check::exit_code "$?" "offline" "Unzip offline package"
  2276. offline::load "master"
  2277. offline::load "worker"
  2278. }
  2279. # 初始化集群
  2280. function init::cluster() {
  2281. MGMT_NODE=$(echo "${MASTER_NODES}" | awk '{print $1}')
  2282. # 加载离线包
  2283. [[ "${OFFLINE_TAG:-}" == "1" ]] && offline::cluster
  2284. # 1. 初始化节点
  2285. init::node
  2286. # 2. 安装包
  2287. install::package
  2288. # 3. 初始化kubeadm
  2289. kubeadm::init
  2290. # 4. 加入集群
  2291. kubeadm::join
  2292. # 5. 添加network
  2293. add::network
  2294. # 6. 安装addon
  2295. add::addon
  2296. # 7. 添加ingress
  2297. add::ingress
  2298. # 8. 添加storage
  2299. [[ "${STORAGE_TAG:-}" == "1" ]] && add::storage
  2300. # 9. 添加web ui
  2301. add::ui
  2302. # 10. 添加monitor
  2303. [[ "${MONITOR_TAG:-}" == "1" ]] && add::monitor
  2304. # 11. 添加log
  2305. [[ "${LOG_TAG:-}" == "1" ]] && add::log
  2306. # 12. 运维操作
  2307. add::ops
  2308. # 13. 查看集群状态
  2309. kube::status
  2310. }
  2311. # 添加节点
  2312. function add::node() {
  2313. # 加载离线包
  2314. [[ "${OFFLINE_TAG:-}" == "1" ]] && offline::cluster
  2315. # KUBE_VERSION未指定时,获取集群的版本
  2316. if [[ "${KUBE_VERSION}" == "" || "${KUBE_VERSION}" == "latest" ]]; then
  2317. command::exec "${MGMT_NODE}" "
  2318. kubectl get node --selector='node-role.kubernetes.io/master' -o jsonpath='{range.items[*]}{.status.nodeInfo.kubeletVersion } {end}' | awk -F'v| ' '{print \$2}'
  2319. "
  2320. get::command_output "KUBE_VERSION" "$?" "exit"
  2321. fi
  2322. # 1. 初始化节点
  2323. init::add_node
  2324. # 2. 安装包
  2325. install::package
  2326. # 3. 加入集群
  2327. kubeadm::join
  2328. # 4. haproxy添加apiserver
  2329. config::haproxy_backend "add"
  2330. # 5. 更新 etcd snapshot 副本
  2331. config::etcd_snapshot
  2332. # 6. 查看集群状态
  2333. kube::status
  2334. }
  2335. # 删除节点
  2336. function del::node() {
  2337. config::haproxy_backend "remove"
  2338. local cluster_nodes=""
  2339. local del_hosts_cmd=""
  2340. command::exec "${MGMT_NODE}" "
  2341. kubectl get node -o jsonpath='{range.items[*]}{.status.addresses[?(@.type==\"InternalIP\")].address} {.metadata.name }\\n{end}'
  2342. "
  2343. get::command_output "cluster_nodes" "$?" exit
  2344. for host in $MASTER_NODES
  2345. do
  2346. command::exec "${MGMT_NODE}" "
  2347. etcd_pod=\$(kubectl -n kube-system get pods -l component=etcd --field-selector=status.phase=Running -o jsonpath='{\$.items[0].metadata.name}')
  2348. etcd_node=\$(kubectl -n kube-system exec \$etcd_pod -- sh -c \"export ETCDCTL_API=3 ETCDCTL_CACERT=/etc/kubernetes/pki/etcd/ca.crt ETCDCTL_CERT=/etc/kubernetes/pki/etcd/server.crt ETCDCTL_KEY=/etc/kubernetes/pki/etcd/server.key ETCDCTL_ENDPOINTS=https://127.0.0.1:2379; etcdctl member list\"| grep $host | awk -F, '{print \$1}')
  2349. echo \"\$etcd_pod \$etcd_node\"
  2350. kubectl -n kube-system exec \$etcd_pod -- sh -c \"export ETCDCTL_API=3 ETCDCTL_CACERT=/etc/kubernetes/pki/etcd/ca.crt ETCDCTL_CERT=/etc/kubernetes/pki/etcd/server.crt ETCDCTL_KEY=/etc/kubernetes/pki/etcd/server.key ETCDCTL_ENDPOINTS=https://127.0.0.1:2379; etcdctl member remove \$etcd_node; etcdctl member list\"
  2351. "
  2352. check::exit_code "$?" "del" "remove $host etcd member"
  2353. done
  2354. for host in $MASTER_NODES $WORKER_NODES
  2355. do
  2356. log::info "[del]" "node $host"
  2357. local node_name; node_name=$(echo -ne "${cluster_nodes}" | grep "${host}" | awk '{print $2}')
  2358. if [[ "${node_name}" == "" ]]; then
  2359. log::warning "[del]" "node $host not found."
  2360. read -r -t 10 -n 1 -p "Do you need to reset the node (y/n)? " answer
  2361. [[ -z "$answer" || "$answer" != "y" ]] && exit || echo
  2362. else
  2363. log::info "[del]" "drain $host"
  2364. command::exec "${MGMT_NODE}" "kubectl drain $node_name --force --ignore-daemonsets --delete-local-data"
  2365. check::exit_code "$?" "del" "$host: drain"
  2366. log::info "[del]" "delete node $host"
  2367. command::exec "${MGMT_NODE}" "kubectl delete node $node_name"
  2368. check::exit_code "$?" "del" "$host: delete"
  2369. sleep 3
  2370. fi
  2371. reset::node "$host"
  2372. del_hosts_cmd="${del_hosts_cmd}\nsed -i "/$host/d" /etc/hosts"
  2373. done
  2374. for host in $(echo -ne "${cluster_nodes}" | awk '{print $1}')
  2375. do
  2376. log::info "[del]" "$host: remove del node hostname resolution"
  2377. command::exec "${host}" "
  2378. $(echo -ne "${del_hosts_cmd}")
  2379. "
  2380. check::exit_code "$?" "del" "remove del node hostname resolution"
  2381. done
  2382. [ "$MASTER_NODES" != "" ] && config::etcd_snapshot
  2383. kube::status
  2384. }
  2385. # 升级集群
  2386. function upgrade::cluster() {
  2387. log::info "[upgrade]" "upgrade to $KUBE_VERSION"
  2388. log::info "[upgrade]" "backup cluster"
  2389. add::ops
  2390. local stable_version="2"
  2391. command::exec "127.0.0.1" "wget https://storage.googleapis.com/kubernetes-release/release/stable.txt -q -O -"
  2392. get::command_output "stable_version" "$?" && stable_version="${stable_version#v}"
  2393. local node_hosts="$MASTER_NODES $WORKER_NODES"
  2394. if [[ "$node_hosts" == " " ]]; then
  2395. command::exec "${MGMT_NODE}" "
  2396. kubectl get node -o jsonpath='{range.items[*]}{.metadata.name } {end}'
  2397. "
  2398. get::command_output "node_hosts" "$?" exit
  2399. fi
  2400. local skip_plan=${SKIP_UPGRADE_PLAN,,}
  2401. for host in ${node_hosts}
  2402. do
  2403. log::info "[upgrade]" "node: $host"
  2404. local local_version=""
  2405. command::exec "${host}" "kubectl version --client --short | awk '{print \$3}'"
  2406. get::command_output "local_version" "$?" && local_version="${local_version#v}"
  2407. if [[ "${KUBE_VERSION}" != "latest" ]]; then
  2408. if [[ "${KUBE_VERSION}" == "${local_version}" ]];then
  2409. log::warning "[check]" "The specified version(${KUBE_VERSION}) is consistent with the local version(${local_version})!"
  2410. continue
  2411. fi
  2412. if [[ $(utils::version_to_number "$KUBE_VERSION") -lt $(utils::version_to_number "${local_version}") ]];then
  2413. log::warning "[check]" "The specified version($KUBE_VERSION) is less than the local version(${local_version})!"
  2414. continue
  2415. fi
  2416. if [[ $(utils::version_to_number "$KUBE_VERSION") -gt $(utils::version_to_number "${stable_version}") ]];then
  2417. log::warning "[check]" "The specified version($KUBE_VERSION) is more than the stable version(${stable_version})!"
  2418. continue
  2419. fi
  2420. else
  2421. if [[ $(utils::version_to_number "${local_version}") -ge $(utils::version_to_number "${stable_version}") ]];then
  2422. log::warning "[check]" "The local version($local_version) is greater or equal to the stable version(${stable_version})!"
  2423. continue
  2424. fi
  2425. fi
  2426. command::exec "${MGMT_NODE}" "kubectl drain ${host} --ignore-daemonsets --delete-local-data"
  2427. check::exit_code "$?" "upgrade" "drain ${host} node" "exit"
  2428. sleep 5
  2429. if [[ "${skip_plan}" == "false" ]]; then
  2430. command::exec "${host}" "$(declare -f script::upgrage_kube); script::upgrage_kube 'init' '$KUBE_VERSION'"
  2431. check::exit_code "$?" "upgrade" "plan and upgrade cluster on ${host}" "exit"
  2432. command::exec "${host}" "$(declare -f utils::retry); utils::retry 10 kubectl get node"
  2433. check::exit_code "$?" "upgrade" "${host}: upgrade" "exit"
  2434. skip_plan=true
  2435. else
  2436. command::exec "${host}" "$(declare -f script::upgrage_kube); script::upgrage_kube 'node' '$KUBE_VERSION'"
  2437. check::exit_code "$?" "upgrade" "upgrade ${host} node" "exit"
  2438. fi
  2439. command::exec "${MGMT_NODE}" "kubectl wait --for=condition=Ready node/${host} --timeout=120s"
  2440. check::exit_code "$?" "upgrade" "${host} ready"
  2441. sleep 5
  2442. command::exec "${MGMT_NODE}" "$(declare -f utils::retry); utils::retry 6 kubectl uncordon ${host}"
  2443. check::exit_code "$?" "upgrade" "uncordon ${host} node"
  2444. sleep 5
  2445. done
  2446. kube::status
  2447. }
  2448. # 脚本文件更新
  2449. function update::self() {
  2450. log::info "[update]" "download kainstall script to $0"
  2451. command::exec "127.0.0.1" "
  2452. wget --timeout=10 --waitretry=3 --tries=5 --retry-connrefused https://cdn.jsdelivr.net/gh/lework/kainstall@master/kainstall-centos.sh -O /tmp/kainstall-centos.sh || exit 1
  2453. /bin/mv -fv /tmp/kainstall-centos.sh \"$0\"
  2454. chmod +x \"$0\"
  2455. "
  2456. check::exit_code "$?" "update" "kainstall script"
  2457. }
  2458. # 数据处理及限制
  2459. function transform::data() {
  2460. MASTER_NODES=$(echo "${MASTER_NODES}" | tr ',' ' ')
  2461. WORKER_NODES=$(echo "${WORKER_NODES}" | tr ',' ' ')
  2462. if ! utils::is_element_in_array "$KUBE_CRI" docker containerd cri-o ; then
  2463. log::error "[limit]" "$KUBE_CRI is not supported, only [docker,containerd,cri-o]"
  2464. exit 1
  2465. fi
  2466. [[ "$KUBE_CRI" != "docker" && "${OFFLINE_TAG:-}" == "1" ]] && { log::error "[limit]" "$KUBE_CRI is not supported offline, only docker"; exit 1; }
  2467. [[ "$KUBE_CRI" == "containerd" && "${KUBE_CRI_ENDPOINT}" == "/var/run/dockershim.sock" ]] && KUBE_CRI_ENDPOINT="unix:///run/containerd/containerd.sock"
  2468. [[ "$KUBE_CRI" == "cri-o" && "${KUBE_CRI_ENDPOINT}" == "/var/run/dockershim.sock" ]] && KUBE_CRI_ENDPOINT="unix:///var/run/crio/crio.sock"
  2469. kubelet_nodeRegistration="nodeRegistration:
  2470. criSocket: ${KUBE_CRI_ENDPOINT:-/var/run/dockershim.sock}
  2471. kubeletExtraArgs:
  2472. runtime-cgroups: /system.slice/${KUBE_CRI//-/}.service
  2473. pod-infra-container-image: ${KUBE_IMAGE_REPO}/pause:${PAUSE_VERSION:-3.6}
  2474. "
  2475. }
  2476. # 使用帮助
  2477. function help::usage() {
  2478. cat << EOF
  2479. Install kubernetes cluster using kubeadm.
  2480. Usage:
  2481. $(basename "$0") [command]
  2482. Available Commands:
  2483. init 初始化Kubernetes集群
  2484. reset 重置Kubernetes集群
  2485. add 将节点添加到群集
  2486. del 从群集中删除节点
  2487. renew-cert 续订所有可用的证书
  2488. upgrade 升级kubeadm集群
  2489. update 更新脚本文件
  2490. Flag:
  2491. -m,--master master节点(逗号分隔), 默认: ''
  2492. -w,--worker work节点(逗号分隔), 默认: ''
  2493. -u,--user ssh用户, 默认: ${SSH_USER}
  2494. -p,--password ssh密码
  2495. --private-key ssh私钥
  2496. -P,--port ssh端口, 默认: ${SSH_PORT}
  2497. -v,--version kube版本, 默认: ${KUBE_VERSION}
  2498. -n,--network 群集网络, 选择: [flannel,calico,cilium], 默认: ${KUBE_NETWORK}
  2499. -i,--ingress ingress controller, choose: [nginx], 默认: ${KUBE_INGRESS}
  2500. -ui,--ui 群集web ui, use: [rancher]
  2501. -a,--addon 群集附加组件, use: []
  2502. -M,--monitor 群集监控, use: [prometheus]
  2503. -l,--log 群集日志, choose: [elasticsearch]
  2504. --cri cri tools, choose: [docker,containerd,cri-o], 默认: ${KUBE_CRI}
  2505. --cri-version cri version, 默认: ${KUBE_CRI_VERSION}
  2506. --cri-endpoint cri endpoint, 默认: ${KUBE_CRI_ENDPOINT}
  2507. -U,--upgrade-kernel 升级内核
  2508. -of,--offline-file 指定要加载的离线文件
  2509. --10years 证书期限为10年
  2510. --sudo sudo模式
  2511. --sudo-user sudo用户
  2512. --sudo-password sudo用户密码
  2513. Example:
  2514. [init cluster]
  2515. $0 init \\
  2516. --master 192.168.77.130,192.168.77.131,192.168.77.132 \\
  2517. --worker 192.168.77.133,192.168.77.134,192.168.77.135 \\
  2518. --user root \\
  2519. --password 123456 \\
  2520. --version 1.20.4
  2521. [reset cluster]
  2522. $0 reset \\
  2523. --user root \\
  2524. --password 123456
  2525. [add node]
  2526. $0 add \\
  2527. --master 192.168.77.140,192.168.77.141 \\
  2528. --worker 192.168.77.143,192.168.77.144 \\
  2529. --user root \\
  2530. --password 123456 \\
  2531. --version 1.20.4
  2532. [del node]
  2533. $0 del \\
  2534. --master 192.168.77.140,192.168.77.141 \\
  2535. --worker 192.168.77.143,192.168.77.144 \\
  2536. --user root \\
  2537. --password 123456
  2538. [other]
  2539. $0 renew-cert --user root --password 123456
  2540. $0 upgrade --version 1.20.4 --user root --password 123456
  2541. $0 update
  2542. $0 add --ingress traefik
  2543. $0 add --monitor prometheus
  2544. $0 add --log elasticsearch
  2545. $0 add --storage rook
  2546. $0 add --ui dashboard
  2547. $0 add --addon nodelocaldns
  2548. EOF
  2549. exit 1
  2550. }
  2551. ######################################################################################################
  2552. # main
  2553. ######################################################################################################
  2554. [ "$#" == "0" ] && help::usage
  2555. while [ "${1:-}" != "" ]; do
  2556. case $1 in
  2557. init ) INIT_TAG=1
  2558. ;;
  2559. reset ) RESET_TAG=1
  2560. ;;
  2561. add ) ADD_TAG=1
  2562. ;;
  2563. del ) DEL_TAG=1
  2564. ;;
  2565. renew-cert ) RENEW_CERT_TAG=1
  2566. ;;
  2567. upgrade ) UPGRADE_TAG=1
  2568. ;;
  2569. update ) UPDATE_TAG=1
  2570. ;;
  2571. -m | --master ) shift
  2572. MASTER_NODES=${1:-$MASTER_NODES}
  2573. ;;
  2574. -w | --worker ) shift
  2575. WORKER_NODES=${1:-$WORKER_NODES}
  2576. ;;
  2577. -u | --user ) shift
  2578. SSH_USER=${1:-$SSH_USER}
  2579. ;;
  2580. -p | --password ) shift
  2581. SSH_PASSWORD=${1:-$SSH_PASSWORD}
  2582. ;;
  2583. --private-key ) shift
  2584. SSH_PRIVATE_KEY=${1:-$SSH_SSH_PRIVATE_KEY}
  2585. ;;
  2586. -P | --port ) shift
  2587. SSH_PORT=${1:-$SSH_PORT}
  2588. ;;
  2589. -v | --version ) shift
  2590. KUBE_VERSION=${1:-$KUBE_VERSION}
  2591. ;;
  2592. -n | --network ) shift
  2593. NETWORK_TAG=1
  2594. KUBE_NETWORK=${1:-$KUBE_NETWORK}
  2595. ;;
  2596. -i | --ingress ) shift
  2597. INGRESS_TAG=1
  2598. KUBE_INGRESS=${1:-$KUBE_INGRESS}
  2599. ;;
  2600. -M | --monitor ) shift
  2601. MONITOR_TAG=1
  2602. KUBE_MONITOR=${1:-$KUBE_MONITOR}
  2603. ;;
  2604. -l | --log ) shift
  2605. LOG_TAG=1
  2606. KUBE_LOG=${1:-$KUBE_LOG}
  2607. ;;
  2608. -s | --storage ) shift
  2609. STORAGE_TAG=1
  2610. KUBE_STORAGE=${1:-$KUBE_STORAGE}
  2611. ;;
  2612. -ui | --ui ) shift
  2613. UI_TAG=1
  2614. ;;
  2615. -a | --addon ) shift
  2616. ADDON_TAG=1
  2617. ;;
  2618. --cri ) shift
  2619. KUBE_CRI=${1:-$KUBE_CRI}
  2620. ;;
  2621. --cri-version ) shift
  2622. KUBE_CRI_VERSION=${1:-$KUBE_CRI_VERSION}
  2623. ;;
  2624. --cri-endpoint ) shift
  2625. KUBE_CRI_ENDPOINT=${1:-$KUBE_CRI_ENDPOINT}
  2626. ;;
  2627. -U | --upgrade-kernel ) UPGRADE_KERNEL_TAG=1
  2628. ;;
  2629. --mgmt-node-ip ) MGMT_NODE_IP="first_master_ip"
  2630. ;;
  2631. -of | --offline-file ) shift
  2632. OFFLINE_TAG=1
  2633. OFFLINE_FILE=${1:-$OFFLINE_FILE}
  2634. ;;
  2635. --10years ) CERT_YEAR_TAG=1
  2636. ;;
  2637. --sudo ) SUDO_TAG=1
  2638. ;;
  2639. --sudo-user ) shift
  2640. SUDO_USER=${1:-$SUDO_USER}
  2641. ;;
  2642. --sudo-password ) shift
  2643. SUDO_PASSWORD=${1:-}
  2644. ;;
  2645. * ) help::usage
  2646. exit 1
  2647. esac
  2648. shift
  2649. done
  2650. # 开始
  2651. log::info "[start]" "bash $0 ${SCRIPT_PARAMETER//${SSH_PASSWORD:-${SUDO_PASSWORD:-}}/zzzzzz}"
  2652. # 数据处理
  2653. transform::data
  2654. # 预检
  2655. check::preflight
  2656. # 动作
  2657. if [[ "${INIT_TAG:-}" == "1" ]]; then
  2658. [[ "$MASTER_NODES" == "" ]] && MASTER_NODES="127.0.0.1"
  2659. init::cluster
  2660. elif [[ "${ADD_TAG:-}" == "1" ]]; then
  2661. [[ "${NETWORK_TAG:-}" == "1" ]] && { add::network; add=1; }
  2662. [[ "${INGRESS_TAG:-}" == "1" ]] && { add::ingress; add=1; }
  2663. [[ "${STORAGE_TAG:-}" == "1" ]] && { add::storage; add=1; }
  2664. [[ "${MONITOR_TAG:-}" == "1" ]] && { add::monitor; add=1; }
  2665. [[ "${LOG_TAG:-}" == "1" ]] && { add::log; add=1; }
  2666. [[ "${UI_TAG:-}" == "1" ]] && { add::ui; add=1; }
  2667. [[ "${ADDON_TAG:-}" == "1" ]] && { add::addon; add=1; }
  2668. [[ "$MASTER_NODES" != "" || "$WORKER_NODES" != "" ]] && { add::node; add=1; }
  2669. [[ "${add:-}" != "1" ]] && help::usage
  2670. elif [[ "${DEL_TAG:-}" == "1" ]]; then
  2671. if [[ "$MASTER_NODES" != "" || "$WORKER_NODES" != "" ]]; then del::node; else help::usage; fi
  2672. elif [[ "${RESET_TAG:-}" == "1" ]]; then
  2673. reset::cluster
  2674. elif [[ "${RENEW_CERT_TAG:-}" == "1" ]]; then
  2675. cert::renew
  2676. elif [[ "${UPGRADE_TAG:-}" == "1" ]]; then
  2677. upgrade::cluster
  2678. elif [[ "${UPDATE_TAG:-}" == "1" ]]; then
  2679. update::self
  2680. else
  2681. help::usage
  2682. fi
  2683. # bash <(curl -s http://git.yvanui.com/lizhiwei/jztd-deploy/raw/master/sd_dsl/02k8s-install-centos.sh) [cmd]