From 05bc9f260fb7523ef0070eb37e43962e3abe3b83 Mon Sep 17 00:00:00 2001 From: LINxiansheng <4094166@qq.com> Date: Thu, 12 Feb 2026 15:02:28 +0800 Subject: [PATCH] support mac --- _cmd.py | 2 + _errno.py | 2 +- _plugin.py | 12 +- _stdio.py | 6 +- _workflow.py | 7 +- plugins-requirements3.txt | 1 + plugins/general/0.1/install_repo.py | 6 +- plugins/mysqltest/3.1.0/run_test.py | 45 ++- plugins/seekdb/1.0.0/environment_check.py | 8 +- .../seekdb/1.0.0/generate_general_config.py | 67 +++-- plugins/seekdb/1.0.0/health_check.py | 2 +- plugins/seekdb/1.0.0/init_pre.py | 9 +- plugins/seekdb/1.0.0/parameter_check.py | 9 +- plugins/seekdb/1.0.0/resource_check.py | 67 ++++- plugins/seekdb/1.0.0/start_pre.py | 2 +- plugins/seekdb/1.0.0/status.py | 2 +- plugins/seekdb/1.0.0/status_check.py | 2 +- plugins/seekdb/1.0.0/stop.py | 23 +- plugins/seekdb/1.0.0/system_limits_check.py | 108 ++++---- tgz/build.sh | 256 ++++++++++++++++++ tgz/install.sh | 101 +++++++ tgz/ob-deploy-build.sh | 17 ++ 22 files changed, 655 insertions(+), 99 deletions(-) create mode 100755 tgz/build.sh create mode 100755 tgz/install.sh create mode 100755 tgz/ob-deploy-build.sh diff --git a/_cmd.py b/_cmd.py index b1406cc9..873fd016 100644 --- a/_cmd.py +++ b/_cmd.py @@ -3076,6 +3076,8 @@ def __init__(self): self.parser._add_version_option() if __name__ == '__main__': + import multiprocessing + multiprocessing.freeze_support() defaultencoding = 'utf-8' if sys.getdefaultencoding() != defaultencoding: try: diff --git a/_errno.py b/_errno.py index 71a206e4..5f613f2d 100644 --- a/_errno.py +++ b/_errno.py @@ -111,7 +111,7 @@ class InitDirFailedErrorMessage(object): PERMISSION_DENIED = ': {path} permission denied .' -DOC_LINK = '' +DOC_LINK = '' DOC_LINK_MSG = 'See {}'.format(DOC_LINK if DOC_LINK else "https://www.oceanbase.com/product/ob-deployer/error-codes .") # generic error code diff --git a/_plugin.py b/_plugin.py index 956594b9..d71e13e8 100644 --- a/_plugin.py +++ b/_plugin.py @@ -283,7 +283,10 @@ def _new_func( namespace_vars.update(kwargs) if arg: idx = 0 - params = list(inspect2.signature(method).parameters.keys())[1:-2] + try: + params = list(inspect2.signature(method).parameters.keys())[1:-2] + except (ValueError, TypeError): + params = [] num = min(len(arg), len(params)) while idx < num: key = params[idx] @@ -871,6 +874,9 @@ def __init__(self, home_path, script_name=None, dev_mode=False, stdio=None): super(PyScriptPluginLoader, self).__init__(home_path, dev_mode=dev_mode, stdio=stdio) def _create_(self, script_name): + # Use an explicit namespace dict for exec() because Python 3.12+ (PEP 667) + # changed locals() to return a snapshot, so exec() results won't appear in locals(). + _ns = {} exec(''' class %s(PyScriptPlugin): @@ -890,8 +896,8 @@ def %s( repositories, components, clients, cluster_config, cmd, options, stdio, *arg, **kwargs): pass - ''' % (self.PLUGIN_TYPE.value, script_name, script_name, self.PLUGIN_TYPE.value, self.PLUGIN_TYPE.value, script_name)) - clz = locals()[self.PLUGIN_TYPE.value] + ''' % (self.PLUGIN_TYPE.value, script_name, script_name, self.PLUGIN_TYPE.value, self.PLUGIN_TYPE.value, script_name), globals(), _ns) + clz = _ns[self.PLUGIN_TYPE.value] setattr(sys.modules[__name__], self.PLUGIN_TYPE.value, clz) clz.set_plugin_type(self.PLUGIN_TYPE) return clz diff --git a/_stdio.py b/_stdio.py index a079b781..126f7249 100644 --- a/_stdio.py +++ b/_stdio.py @@ -1150,7 +1150,11 @@ def decorated(func): is_bond_method = True _type = type(func) func = func.__func__ - all_parameters = inspect2.signature(func).parameters + try: + all_parameters = inspect2.signature(func).parameters + except (ValueError, TypeError): + # Builtin slot wrappers (e.g. object.__init__) can't be inspected on Python 3.13+ + return _type(func) if is_bond_method else func if "stdio" in all_parameters: default_stdio_in_params = all_parameters["stdio"].default if not isinstance(default_stdio_in_params, Parameter.empty): diff --git a/_workflow.py b/_workflow.py index ac55f55e..7264ea9e 100644 --- a/_workflow.py +++ b/_workflow.py @@ -218,6 +218,9 @@ def __init__(self, home_path, workflow_name=None, dev_mode=False, stdio=None): self.workflow_name = workflow_name def _create_(self, workflow_name): + # Use an explicit namespace dict for exec() because Python 3.12+ (PEP 667) + # changed locals() to return a snapshot, so exec() results won't appear in locals(). + _ns = {} exec(''' class %s(PyScriptPlugin): @@ -237,8 +240,8 @@ def %s( repositories, components, clients, cluster_config, cmd, options, stdio, *arg, **kwargs): pass - ''' % (self.PLUGIN_TYPE.value, workflow_name, workflow_name, self.PLUGIN_TYPE.value, self.PLUGIN_TYPE.value, workflow_name)) - clz = locals()[self.PLUGIN_TYPE.value] + ''' % (self.PLUGIN_TYPE.value, workflow_name, workflow_name, self.PLUGIN_TYPE.value, self.PLUGIN_TYPE.value, workflow_name), globals(), _ns) + clz = _ns[self.PLUGIN_TYPE.value] setattr(sys.modules[__name__], self.PLUGIN_TYPE.value, clz) clz.set_plugin_type(self.PLUGIN_TYPE) return clz diff --git a/plugins-requirements3.txt b/plugins-requirements3.txt index 4e099cbc..01cbfa7a 100644 --- a/plugins-requirements3.txt +++ b/plugins-requirements3.txt @@ -4,4 +4,5 @@ bcrypt==4.0.0 configparser>=5.2.0 urllib3==2.5.0 influxdb==5.3.2 +PyYAML>=6.0 obshell \ No newline at end of file diff --git a/plugins/general/0.1/install_repo.py b/plugins/general/0.1/install_repo.py index d225733c..f0eae445 100644 --- a/plugins/general/0.1/install_repo.py +++ b/plugins/general/0.1/install_repo.py @@ -16,6 +16,7 @@ from __future__ import absolute_import, division, print_function import os +import platform import re from _plugin import InstallPlugin @@ -158,7 +159,10 @@ def check_lib(): for file_item in check_file_map.values(): if file_item.type == InstallPlugin.FileItemType.BIN: remote_file_path = os.path.join(remote_home_path, file_item.target_path) - ret = client.execute_command('ldd %s' % remote_file_path) + if platform.system() == 'Darwin': + ret = client.execute_command('otool -L %s' % remote_file_path) + else: + ret = client.execute_command('ldd %s' % remote_file_path) libs = re.findall('(/?[\w+\-/]+\.\w+[\.\w]+)[\s\\n]*\=\>[\s\\n]*not found', ret.stdout) if not libs: libs = re.findall('(/?[\w+\-/]+\.\w+[\.\w]+)[\s\\n]*\=\>[\s\\n]*not found', ret.stderr) diff --git a/plugins/mysqltest/3.1.0/run_test.py b/plugins/mysqltest/3.1.0/run_test.py index 4523892a..692b1537 100644 --- a/plugins/mysqltest/3.1.0/run_test.py +++ b/plugins/mysqltest/3.1.0/run_test.py @@ -19,9 +19,10 @@ import os import time import shlex +import platform import requests import urllib -from subprocess import Popen, PIPE +from subprocess import Popen, PIPE, TimeoutExpired from copy import deepcopy from ssh import LocalClient from tool import DirectoryUtil @@ -380,20 +381,42 @@ def return_true(**kw): LocalClient.execute_command('%s "alter system set _enable_static_typing_engine = %s;select sleep(2);"' % (exec_sql_cmd, opt['_enable_static_typing_engine']), stdio=stdio) start_time = time.time() - cmd = 'timeout %s %s %s' % (case_timeout, mysqltest_bin, str(Arguments(opt))) + IS_DARWIN = platform.system() == 'Darwin' + if IS_DARWIN: + # macOS does not have GNU timeout; use Python subprocess timeout instead + cmd = '%s %s' % (mysqltest_bin, str(Arguments(opt))) + else: + cmd = 'timeout %s %s %s' % (case_timeout, mysqltest_bin, str(Arguments(opt))) try: stdio.verbose('local execute: %s ' % cmd) p = Popen(shlex.split(cmd), env=test_env, stdout=PIPE, stderr=PIPE) - output, errput = p.communicate() - retcode = p.returncode - if retcode == 124: - output = '' - if 'source_limit' in opt and 'g.buffer' in opt['source_limit']: - errput = "%s secs out of soft limit (%s secs), sql may be hung, please check" % (opt['source_limit']['g.buffer'], case_timeout) + if IS_DARWIN: + try: + output, errput = p.communicate(timeout=case_timeout) + except TimeoutExpired: + p.kill() + output, errput = p.communicate() + retcode = 124 # mimic GNU timeout exit code + output = '' + if 'source_limit' in opt and 'g.buffer' in opt['source_limit']: + errput = "%s secs out of soft limit (%s secs), sql may be hung, please check" % (opt['source_limit']['g.buffer'], case_timeout) + else: + errput = "%s seconds timeout, sql may be hung, please check" % case_timeout else: - errput = "%s seconds timeout, sql may be hung, please check" % case_timeout - elif isinstance(errput, bytes): - errput = errput.decode(errors='replace') + retcode = p.returncode + if isinstance(errput, bytes): + errput = errput.decode(errors='replace') + else: + output, errput = p.communicate() + retcode = p.returncode + if retcode == 124: + output = '' + if 'source_limit' in opt and 'g.buffer' in opt['source_limit']: + errput = "%s secs out of soft limit (%s secs), sql may be hung, please check" % (opt['source_limit']['g.buffer'], case_timeout) + else: + errput = "%s seconds timeout, sql may be hung, please check" % case_timeout + elif isinstance(errput, bytes): + errput = errput.decode(errors='replace') except Exception as e: errput = str(e) output = '' diff --git a/plugins/seekdb/1.0.0/environment_check.py b/plugins/seekdb/1.0.0/environment_check.py index 79254464..04d58445 100644 --- a/plugins/seekdb/1.0.0/environment_check.py +++ b/plugins/seekdb/1.0.0/environment_check.py @@ -15,6 +15,7 @@ from __future__ import absolute_import, division, print_function import os +import platform import re import _errno as err @@ -22,6 +23,8 @@ from _rpm import Version from tool import get_port_socket_inode, contains_duplicate_nodes +IS_DARWIN = platform.system() == 'Darwin' + def environment_check(plugin_context, work_dir_empty_check=True, generate_configs={}, *args, **kwargs): cluster_config = plugin_context.cluster_config @@ -140,7 +143,10 @@ def environment_check(plugin_context, work_dir_empty_check=True, generate_config basearch = getBaseArch() stdio.verbose("basearch: %s" % basearch) - if 'x86' in basearch and len(re.findall(r'(^avx\s+)|(\s+avx\s+)|(\s+avx$)', client.execute_command('lscpu | grep avx').stdout)) == 0: + if IS_DARWIN: + # macOS does not have lscpu; Apple Silicon natively supports atomics and AVX is N/A. + stdio.verbose('Skip CPU instruction set check on macOS') + elif 'x86' in basearch and len(re.findall(r'(^avx\s+)|(\s+avx\s+)|(\s+avx$)', client.execute_command('lscpu | grep avx').stdout)) == 0: if not (Version('4.2.5.6') <= repository.version < Version('4.3.0.0') or Version('4.3.5.4') <= repository.version < Version('4.4.0.0') or Version('4.4.1.0') <= repository.version): critical(server, 'cpu', err.EC_CPU_NOT_SUPPORT_INSTRUCTION_SET.format(server=server, instruction_set='avx'), [err.SUG_CHANGE_SERVER.format()]) elif ('arm' in basearch or 'aarch' in basearch) and len(re.findall(r'(^atomics\s+)|(\s+atomics\s+)|(\s+atomics$)', client.execute_command('lscpu | grep atomics').stdout)) == 0 and 'nonlse' not in repository.release: diff --git a/plugins/seekdb/1.0.0/generate_general_config.py b/plugins/seekdb/1.0.0/generate_general_config.py index 3275fd03..5328eed4 100644 --- a/plugins/seekdb/1.0.0/generate_general_config.py +++ b/plugins/seekdb/1.0.0/generate_general_config.py @@ -15,6 +15,7 @@ from __future__ import absolute_import, division, print_function +import platform import re import os @@ -22,6 +23,8 @@ from _errno import EC_OBSERVER_NOT_ENOUGH_MEMORY_ALAILABLE, EC_OBSERVER_NOT_ENOUGH_MEMORY_CACHED, EC_OBSERVER_GET_MEMINFO_FAIL import _errno as err +IS_DARWIN = platform.system() == 'Darwin' + def get_system_memory(memory_limit): if memory_limit < 12 << 30: @@ -102,22 +105,44 @@ def generate_general_config(plugin_context, generate_config_mini=False, auto_dep min_pool_memory = server_config['__min_full_resource_pool_memory'] min_memory = max(system_memory, MIN_MEMORY) if ip not in ip_server_memory_info: - ret = client.execute_command('cat /proc/meminfo') - if ret: - ip_server_memory_info[ip] = server_memory_stats = {} - memory_key_map = { - 'MemTotal': 'total', - 'MemFree': 'free', - 'MemAvailable': 'available', - 'Buffers': 'buffers', - 'Cached': 'cached' - } - for key in memory_key_map: - server_memory_stats[memory_key_map[key]] = 0 - for k, v in re.findall('(\w+)\s*:\s*(\d+\s*\w+)', ret.stdout): - if k in memory_key_map: - key = memory_key_map[k] - server_memory_stats[key] = Capacity(str(v)).bytes + if IS_DARWIN: + ret = client.execute_command('sysctl hw.memsize') + if ret: + try: + total_mem = int(re.findall(r'hw\.memsize:\s*(\d+)', ret.stdout)[0]) + vm_ret = client.execute_command('vm_stat') + page_size = 16384 + ps_match = re.search(r'page size of (\d+) bytes', vm_ret.stdout) if vm_ret else None + if ps_match: + page_size = int(ps_match.group(1)) + free_pages = int(re.findall(r'Pages free:\s+(\d+)', vm_ret.stdout)[0]) if vm_ret else 0 + inactive_pages = int(re.findall(r'Pages inactive:\s+(\d+)', vm_ret.stdout)[0]) if vm_ret else 0 + ip_server_memory_info[ip] = server_memory_stats = { + 'total': total_mem, + 'free': free_pages * page_size, + 'available': (free_pages + inactive_pages) * page_size, + 'buffers': 0, + 'cached': inactive_pages * page_size + } + except Exception: + stdio.exception('Failed to parse macOS memory info') + else: + ret = client.execute_command('cat /proc/meminfo') + if ret: + ip_server_memory_info[ip] = server_memory_stats = {} + memory_key_map = { + 'MemTotal': 'total', + 'MemFree': 'free', + 'MemAvailable': 'available', + 'Buffers': 'buffers', + 'Cached': 'cached' + } + for key in memory_key_map: + server_memory_stats[memory_key_map[key]] = 0 + for k, v in re.findall('(\w+)\s*:\s*(\d+\s*\w+)', ret.stdout): + if k in memory_key_map: + key = memory_key_map[k] + server_memory_stats[key] = Capacity(str(v)).bytes if user_server_config.get('memory_limit_percentage'): if ip in ip_server_memory_info: @@ -180,7 +205,10 @@ def generate_general_config(plugin_context, generate_config_mini=False, auto_dep # cpu if not server_config.get('cpu_count'): - ret = client.execute_command("grep -e 'processor\s*:' /proc/cpuinfo | wc -l") + if IS_DARWIN: + ret = client.execute_command("sysctl -n hw.ncpu") + else: + ret = client.execute_command("grep -e 'processor\s*:' /proc/cpuinfo | wc -l") if ret and ret.stdout.strip().isdigit(): cpu_num = int(ret.stdout) server_config['cpu_count'] = max(MIN_CPU_COUNT, int(cpu_num - 2)) @@ -196,7 +224,8 @@ def generate_general_config(plugin_context, generate_config_mini=False, auto_dep log_disk_size = Capacity(server_config.get('log_disk_size', 0)).bytes if not server_config.get('datafile_size') or not server_config.get('log_disk_size'): disk = {'/': 0} - ret = client.execute_command('df --block-size=1024') + df_cmd = 'df -Pk' if IS_DARWIN else 'df --block-size=1024' + ret = client.execute_command(df_cmd) if ret: for total, used, avail, puse, path in re.findall('(\d+)\s+(\d+)\s+(\d+)\s+(\d+%)\s+(.+)', ret.stdout): disk[path] = { @@ -206,7 +235,7 @@ def generate_general_config(plugin_context, generate_config_mini=False, auto_dep } for include_dir in dirs.values(): while include_dir not in disk: - ret = client.execute_command('df --block-size=1024 %s' % include_dir) + ret = client.execute_command('%s %s' % (df_cmd, include_dir)) if ret: for total, used, avail, puse, path in re.findall('(\d+)\s+(\d+)\s+(\d+)\s+(\d+%)\s+(.+)', ret.stdout): disk[path] = { diff --git a/plugins/seekdb/1.0.0/health_check.py b/plugins/seekdb/1.0.0/health_check.py index 66fa278d..203b3b7b 100644 --- a/plugins/seekdb/1.0.0/health_check.py +++ b/plugins/seekdb/1.0.0/health_check.py @@ -37,7 +37,7 @@ def health_check(plugin_context, *args, **kwargs): remote_pid_path = '%s/run/seekdb.pid' % home_path stdio.verbose('%s program health check' % server) remote_pid = client.execute_command('cat %s' % remote_pid_path).stdout.strip() - if remote_pid and client.execute_command('ls /proc/%s' % remote_pid): + if remote_pid and client.execute_command('ps -p %s' % remote_pid): stdio.verbose('%s seekdb[pid: %s] started', server, remote_pid) else: failed.append(EC_OBSERVER_FAIL_TO_START.format(server=server)) diff --git a/plugins/seekdb/1.0.0/init_pre.py b/plugins/seekdb/1.0.0/init_pre.py index ee8a1de0..b79e8b86 100644 --- a/plugins/seekdb/1.0.0/init_pre.py +++ b/plugins/seekdb/1.0.0/init_pre.py @@ -15,6 +15,10 @@ from __future__ import absolute_import, division, print_function +import platform + +IS_DARWIN = platform.system() == 'Darwin' + def init_pre(plugin_context, *args, **kwargs): data_dir_same_redo_dir_keys = ['home_path', 'data_dir', 'clog_dir', 'slog_dir'] @@ -48,11 +52,12 @@ def rm_meta(client, home_path, critical, EC_FAIL_TO_INIT_PATH, server, InitDirFa def same_disk_check(stdio, client, server_config, critical, EC_FAIL_TO_INIT_PATH, server, *args, **kwargs): stdio.verbose("check slog dir in the same disk with data dir") slog_disk = data_disk = None - ret = client.execute_command("df --block-size=1024 %s | awk 'NR == 2 { print $1 }'" % server_config['slog_dir']) + df_cmd = 'df -Pk' if IS_DARWIN else 'df --block-size=1024' + ret = client.execute_command("%s %s | awk 'NR == 2 { print $1 }'" % (df_cmd, server_config['slog_dir'])) if ret: slog_disk = ret.stdout.strip() stdio.verbose('slog disk is {}'.format(slog_disk)) - ret = client.execute_command("df --block-size=1024 %s | awk 'NR == 2 { print $1 }'" % server_config['data_dir']) + ret = client.execute_command("%s %s | awk 'NR == 2 { print $1 }'" % (df_cmd, server_config['data_dir'])) if ret: data_disk = ret.stdout.strip() stdio.verbose('data disk is {}'.format(data_disk)) diff --git a/plugins/seekdb/1.0.0/parameter_check.py b/plugins/seekdb/1.0.0/parameter_check.py index 5475f444..f36a950a 100644 --- a/plugins/seekdb/1.0.0/parameter_check.py +++ b/plugins/seekdb/1.0.0/parameter_check.py @@ -15,10 +15,13 @@ from __future__ import absolute_import, division, print_function import os +import platform import _errno as err from _types import Capacity +IS_DARWIN = platform.system() == 'Darwin' + def parameter_check(plugin_context, generate_configs={}, *args, **kwargs): cluster_config = plugin_context.cluster_config @@ -109,7 +112,11 @@ def parameter_check(plugin_context, generate_configs={}, *args, **kwargs): devname = server_config.get('devname') if devname: - if not client.execute_command("grep -e '^ *%s:' /proc/net/dev" % devname): + if IS_DARWIN: + devname_check_cmd = "ifconfig %s 2>/dev/null" % devname + else: + devname_check_cmd = "grep -e '^ *%s:' /proc/net/dev" % devname + if not client.execute_command(devname_check_cmd): suggest = err.SUG_NO_SUCH_NET_DEVIC.format(ip=ip) suggest.auto_fix = 'devname' not in global_generate_config and 'devname' not in server_generate_config critical(server, 'net', err.EC_NO_SUCH_NET_DEVICE.format(server=server, devname=devname), suggests=[suggest]) diff --git a/plugins/seekdb/1.0.0/resource_check.py b/plugins/seekdb/1.0.0/resource_check.py index be8471b2..96bb2b34 100644 --- a/plugins/seekdb/1.0.0/resource_check.py +++ b/plugins/seekdb/1.0.0/resource_check.py @@ -14,6 +14,7 @@ # limitations under the License. from __future__ import absolute_import, division, print_function +import platform import re import os import time @@ -21,10 +22,16 @@ import _errno as err from _types import Capacity +IS_DARWIN = platform.system() == 'Darwin' + def get_disk_info_by_path(path, client, stdio): disk_info = {} - ret = client.execute_command('df --block-size=1024 {}'.format(path)) + if IS_DARWIN: + # Use -P for POSIX output format (no extra iused/ifree columns) + ret = client.execute_command('df -Pk {}'.format(path)) + else: + ret = client.execute_command('df --block-size=1024 {}'.format(path)) if ret: for total, used, avail, puse, path in re.findall(r'(\d+)\s+(\d+)\s+(\d+)\s+(\d+%)\s+(.+)', ret.stdout): disk_info[path] = {'total': int(total) << 10, 'avail': int(avail) << 10, 'need': 0} @@ -67,7 +74,15 @@ def get_mount_path(disk, _path): def time_delta(client): time_st = time.time() * 1000 - time_srv = int(client.execute_command('date +%s%N').stdout) / 1000000 + if IS_DARWIN: + # macOS date does not support %N (nanoseconds), use python fallback + ret = client.execute_command('python3 -c "import time; print(int(time.time()*1e6))"') + if ret: + time_srv = int(ret.stdout.strip()) / 1000 + else: + time_srv = int(client.execute_command('date +%s').stdout.strip()) * 1000 + else: + time_srv = int(client.execute_command('date +%s%N').stdout) / 1000000 time_ed = time.time() * 1000 time_it = time_ed - time_st @@ -109,6 +124,8 @@ def system_memory_check(): ip_server_memory_info = {} servers_disk = plugin_context.get_variable('need_check_servers_disk') + if servers_disk is None: + servers_disk = {} ip_servers = [] for ip in servers_disk: client = servers_clients[ip] @@ -116,8 +133,50 @@ def system_memory_check(): server_num = len(ip_servers) # memory - ret = client.execute_command('cat /proc/meminfo') - if ret: + if IS_DARWIN: + ret = client.execute_command('sysctl hw.memsize') + else: + ret = client.execute_command('cat /proc/meminfo') + if ret and IS_DARWIN: + try: + total_mem = int(re.findall(r'hw\.memsize:\s*(\d+)', ret.stdout)[0]) + # Get free/available memory from vm_stat + vm_ret = client.execute_command('vm_stat') + page_size = 16384 # default on Apple Silicon + ps_match = re.search(r'page size of (\d+) bytes', vm_ret.stdout) if vm_ret else None + if ps_match: + page_size = int(ps_match.group(1)) + free_pages = int(re.findall(r'Pages free:\s+(\d+)', vm_ret.stdout)[0]) if vm_ret else 0 + inactive_pages = int(re.findall(r'Pages inactive:\s+(\d+)', vm_ret.stdout)[0]) if vm_ret else 0 + server_memory_stats = { + 'total': total_mem, + 'free': free_pages * page_size, + 'available': (free_pages + inactive_pages) * page_size, + 'buffers': 0, + 'cached': inactive_pages * page_size + } + ip_server_memory_info[ip] = server_memory_stats + server_memory_stat = servers_memory[ip] + min_start_need = server_num * START_NEED_MEMORY + total_use = int(server_memory_stat['percentage'] * server_memory_stats['total'] / 100 + server_memory_stat['num']) + if min_start_need > server_memory_stats['available']: + for server in ip_servers: + error(server, 'mem', err.EC_OBSERVER_NOT_ENOUGH_MEMORY_ALAILABLE.format(ip=ip, available=str(Capacity(server_memory_stats['available'])), need=str(Capacity(min_start_need))), [err.SUG_OBSERVER_NOT_ENOUGH_MEMORY_ALAILABLE.format(ip=ip)]) + elif total_use > server_memory_stats['free'] + server_memory_stats['cached']: + for server in ip_servers: + server_generate_config = generate_configs.get(server, {}) + suggest = err.SUG_OBSERVER_REDUCE_MEM.format() + suggest.auto_fix = True + for key in ['memory_limit', 'memory_limit_percentage']: + if key in global_generate_config or key in server_generate_config: + suggest.auto_fix = False + break + error(server, 'mem', err.EC_OBSERVER_NOT_ENOUGH_MEMORY_CACHED.format(ip=ip, free=str(Capacity(server_memory_stats['free'])), cached=str(Capacity(server_memory_stats['cached'])), need=str(Capacity(total_use))), [suggest]) + else: + system_memory_check() + except Exception: + stdio.exception('Failed to parse macOS memory info') + elif ret: server_memory_stats = {} memory_key_map = { 'MemTotal': 'total', diff --git a/plugins/seekdb/1.0.0/start_pre.py b/plugins/seekdb/1.0.0/start_pre.py index 1a331f7f..5fda5632 100644 --- a/plugins/seekdb/1.0.0/start_pre.py +++ b/plugins/seekdb/1.0.0/start_pre.py @@ -96,7 +96,7 @@ def start_pre(plugin_context, *args, **kwargs): remote_pid_path = '%s/run/seekdb.pid' % home_path remote_pid = client.execute_command('cat %s' % remote_pid_path).stdout.strip() if remote_pid: - if client.execute_command('ls /proc/%s' % remote_pid): + if client.execute_command('ps -p %s' % remote_pid): continue stdio.verbose('%s start command construction' % server) diff --git a/plugins/seekdb/1.0.0/status.py b/plugins/seekdb/1.0.0/status.py index 42c709a9..f8860c25 100644 --- a/plugins/seekdb/1.0.0/status.py +++ b/plugins/seekdb/1.0.0/status.py @@ -30,6 +30,6 @@ def status(plugin_context, *args, **kwargs): continue remote_pid_path = '%s/run/seekdb.pid' % server_config['home_path'] remote_pid = client.execute_command('cat %s' % remote_pid_path).stdout.strip() - if remote_pid and client.execute_command('ls /proc/%s' % remote_pid): + if remote_pid and client.execute_command('ps -p %s' % remote_pid): cluster_status[server] = 1 return plugin_context.return_true(cluster_status=cluster_status) diff --git a/plugins/seekdb/1.0.0/status_check.py b/plugins/seekdb/1.0.0/status_check.py index 032f87c0..ee3dd7da 100644 --- a/plugins/seekdb/1.0.0/status_check.py +++ b/plugins/seekdb/1.0.0/status_check.py @@ -51,7 +51,7 @@ def status_check(plugin_context, work_dir_check=False, precheck=False, source_op remote_pid_path = '%s/run/seekdb.pid' % home_path remote_pid = client.execute_command('cat %s' % remote_pid_path).stdout.strip() if remote_pid: - if client.execute_command('ls /proc/%s' % remote_pid): + if client.execute_command('ps -p %s' % remote_pid): stdio.verbose('%s is runnning, skip' % server) wait_2_pass(server) work_dir_check = False diff --git a/plugins/seekdb/1.0.0/stop.py b/plugins/seekdb/1.0.0/stop.py index 157e307e..4e11e98e 100644 --- a/plugins/seekdb/1.0.0/stop.py +++ b/plugins/seekdb/1.0.0/stop.py @@ -16,12 +16,15 @@ from __future__ import absolute_import, division, print_function import json +import platform import time import requests from urllib.parse import urlparse from tool import NetUtil +IS_DARWIN = platform.system() == 'Darwin' + def is_ob_configserver(obconfig_url, stdio): parsed_url = urlparse(obconfig_url) @@ -52,6 +55,8 @@ def config_url(ocp_config_server, appname, cid): def get_port_socket_inode(client, port): + if IS_DARWIN: + return [] port = hex(port)[2:].zfill(4).upper() cmd = "bash -c 'cat /proc/net/{tcp*,udp*}' | awk -F' ' '{print $2,$10}' | grep '00000000:%s' | awk -F' ' '{print $2}' | uniq" % port res = client.execute_command(cmd) @@ -61,6 +66,13 @@ def get_port_socket_inode(client, port): def port_release_check(client, pid, port, count): + if IS_DARWIN: + # On macOS, use lsof to check if the port is still held by the process + ret = client.execute_command('lsof -nP -iTCP:%s -sTCP:LISTEN 2>/dev/null | grep -w %s' % (port, pid)) + if ret and ret.stdout.strip(): + return False + # Check if process still exists + return not client.execute_command('ps -p %s -o pid=' % pid).stdout.strip() socket_inodes = get_port_socket_inode(client, port) if not socket_inodes: return True @@ -136,9 +148,14 @@ def stop(plugin_context, *args, **kwargs): data = servers[server] server_config = cluster_config.get_server_conf(server) client = clients[server] - client.execute_command( - "if [[ -d /proc/%s ]]; then pkill -9 -u `whoami` -f '%s/bin/seekdb -p %s';fi" % - (data['pid'], server_config['home_path'], server_config['mysql_port'])) + if IS_DARWIN: + client.execute_command( + "if ps -p %s > /dev/null 2>&1; then pkill -9 -u `whoami` -f '%s/bin/seekdb -p %s'; fi" % + (data['pid'], server_config['home_path'], server_config['mysql_port'])) + else: + client.execute_command( + "if [[ -d /proc/%s ]]; then pkill -9 -u `whoami` -f '%s/bin/seekdb -p %s';fi" % + (data['pid'], server_config['home_path'], server_config['mysql_port'])) time.sleep(3) if servers: diff --git a/plugins/seekdb/1.0.0/system_limits_check.py b/plugins/seekdb/1.0.0/system_limits_check.py index 96c56301..26b97f56 100644 --- a/plugins/seekdb/1.0.0/system_limits_check.py +++ b/plugins/seekdb/1.0.0/system_limits_check.py @@ -14,10 +14,13 @@ # limitations under the License. from __future__ import absolute_import, division, print_function +import platform import re import _errno as err +IS_DARWIN = platform.system() == 'Darwin' + def system_limits_check(plugin_context, ulimits_min, generate_configs={}, strict_check=False, *args, **kwargs): stdio = plugin_context.stdio @@ -40,62 +43,75 @@ def system_limits_check(plugin_context, ulimits_min, generate_configs={}, strict ip_servers = servers_memory[ip]['servers'].keys() server_num = len(ip_servers) - ret = client.execute_command('cat /proc/sys/fs/aio-max-nr /proc/sys/fs/aio-nr') - if not ret: - for server in ip_servers: - critical(server, 'aio', err.EC_FAILED_TO_GET_AIO_NR.format(ip=ip), [err.SUG_CONNECT_EXCEPT.format()]) + if IS_DARWIN: + # macOS does not have /proc/sys/fs/aio-max-nr, skip AIO check + stdio.verbose('Skip AIO check on macOS') else: - try: - max_nr, nr = ret.stdout.strip().split('\n') - max_nr, nr = int(max_nr), int(nr) - need = server_num * 20000 - RECD_AIO = 1048576 - if need > max_nr - nr: - for server in ip_servers: - critical(server, 'aio', err.EC_AIO_NOT_ENOUGH.format(ip=ip, avail=max_nr - nr, need=need), [err.SUG_SYSCTL.format(var='fs.aio-max-nr', value=max(RECD_AIO, need), ip=ip)]) - elif int(max_nr) < RECD_AIO: - for server in ip_servers: - alert(server, 'aio', err.WC_AIO_NOT_ENOUGH.format(ip=ip, current=max_nr), [err.SUG_SYSCTL.format(var='fs.aio-max-nr', value=RECD_AIO, ip=ip)]) - except: - for server in ip_servers: - alert(server, 'aio', err.EC_FAILED_TO_GET_AIO_NR.format(ip=ip), [err.SUG_UNSUPPORT_OS.format()]) - stdio.exception('') - - ret = client.execute_command('bash -c "ulimit -a"') - ulimits = {} - src_data = re.findall('\s?([a-zA-Z\s]+[a-zA-Z])\s+\([a-zA-Z\-,\s]+\)\s+([\d[a-zA-Z]+)', ret.stdout) if ret else [] - for key, value in src_data: - ulimits[key] = value - for key in ulimits_min: - value = ulimits.get(key) - if value == 'unlimited': - continue - if not value or not (value.strip().isdigit()): + ret = client.execute_command('cat /proc/sys/fs/aio-max-nr /proc/sys/fs/aio-nr') + if not ret: for server in ip_servers: - alert(server, 'ulimit', '(%s) failed to get %s' % (ip, key), suggests=[err.SUG_UNSUPPORT_OS.format()]) + critical(server, 'aio', err.EC_FAILED_TO_GET_AIO_NR.format(ip=ip), [err.SUG_CONNECT_EXCEPT.format()]) else: - value = int(value) - need = ulimits_min[key]['need'](server_num) - if need > value: - if (strict_check or production_mode) and ulimits_min[key].get('below_recd_error_strict', True) and value < ulimits_min[key]['recd'](server_num): - need = ulimits_min[key]['recd'](server_num) - need = need if need != INF else 'unlimited' + try: + max_nr, nr = ret.stdout.strip().split('\n') + max_nr, nr = int(max_nr), int(nr) + need = server_num * 20000 + RECD_AIO = 1048576 + if need > max_nr - nr: + for server in ip_servers: + critical(server, 'aio', err.EC_AIO_NOT_ENOUGH.format(ip=ip, avail=max_nr - nr, need=need), [err.SUG_SYSCTL.format(var='fs.aio-max-nr', value=max(RECD_AIO, need), ip=ip)]) + elif int(max_nr) < RECD_AIO: + for server in ip_servers: + alert(server, 'aio', err.WC_AIO_NOT_ENOUGH.format(ip=ip, current=max_nr), [err.SUG_SYSCTL.format(var='fs.aio-max-nr', value=RECD_AIO, ip=ip)]) + except: + for server in ip_servers: + alert(server, 'aio', err.EC_FAILED_TO_GET_AIO_NR.format(ip=ip), [err.SUG_UNSUPPORT_OS.format()]) + stdio.exception('') + + if IS_DARWIN: + # macOS ulimit defaults are different and cannot be changed the same way as Linux. + # Skip ulimit checks on macOS to avoid false errors. + stdio.verbose('Skip ulimit check on macOS') + else: + ret = client.execute_command('bash -c "ulimit -a"') + ulimits = {} + src_data = re.findall('\s?([a-zA-Z\s]+[a-zA-Z])\s+\([a-zA-Z\-,\s]+\)\s+([\d[a-zA-Z]+)', ret.stdout) if ret else [] + for key, value in src_data: + ulimits[key] = value + for key in ulimits_min: + value = ulimits.get(key) + if value == 'unlimited': + continue + if not value or not (value.strip().isdigit()): for server in ip_servers: - if ulimits_min[key].get('below_need_error', True): - critical(server, 'ulimit', err.EC_ULIMIT_CHECK.format(server=ip, key=key, need=need, now=value), [err.SUG_ULIMIT.format(name=ulimits_min[key]['name'], value=need, ip=ip)]) - else: - alert(server, 'ulimit', err.EC_ULIMIT_CHECK.format(server=ip, key=key, need=need, now=value), suggests=[err.SUG_ULIMIT.format(name=ulimits_min[key]['name'], value=need, ip=ip)]) + alert(server, 'ulimit', '(%s) failed to get %s' % (ip, key), suggests=[err.SUG_UNSUPPORT_OS.format()]) else: - need = ulimits_min[key]['recd'](server_num) + value = int(value) + need = ulimits_min[key]['need'](server_num) if need > value: + if (strict_check or production_mode) and ulimits_min[key].get('below_recd_error_strict', True) and value < ulimits_min[key]['recd'](server_num): + need = ulimits_min[key]['recd'](server_num) need = need if need != INF else 'unlimited' for server in ip_servers: - if ulimits_min[key].get('below_recd_error_strict', True): - alert(server, 'ulimit', err.WC_ULIMIT_CHECK.format(server=ip, key=key, need=need, now=value), suggests=[err.SUG_ULIMIT.format(name=ulimits_min[key]['name'], value=need, ip=ip)]) + if ulimits_min[key].get('below_need_error', True): + critical(server, 'ulimit', err.EC_ULIMIT_CHECK.format(server=ip, key=key, need=need, now=value), [err.SUG_ULIMIT.format(name=ulimits_min[key]['name'], value=need, ip=ip)]) else: - stdio.warn(err.WC_ULIMIT_CHECK.format(server=ip, key=key, need=need, now=value)) + alert(server, 'ulimit', err.EC_ULIMIT_CHECK.format(server=ip, key=key, need=need, now=value), suggests=[err.SUG_ULIMIT.format(name=ulimits_min[key]['name'], value=need, ip=ip)]) + else: + need = ulimits_min[key]['recd'](server_num) + if need > value: + need = need if need != INF else 'unlimited' + for server in ip_servers: + if ulimits_min[key].get('below_recd_error_strict', True): + alert(server, 'ulimit', err.WC_ULIMIT_CHECK.format(server=ip, key=key, need=need, now=value), suggests=[err.SUG_ULIMIT.format(name=ulimits_min[key]['name'], value=need, ip=ip)]) + else: + stdio.warn(err.WC_ULIMIT_CHECK.format(server=ip, key=key, need=need, now=value)) - if kernel_check: + if IS_DARWIN: + # macOS sysctl params have different names than Linux kernel params. + # Skip kernel param check on macOS. + stdio.verbose('Skip kernel param check on macOS') + elif kernel_check: # check kernel params try: for server in plugin_context.cluster_config.servers: diff --git a/tgz/build.sh b/tgz/build.sh new file mode 100755 index 00000000..bb0121f4 --- /dev/null +++ b/tgz/build.sh @@ -0,0 +1,256 @@ +#!/bin/bash + +python_bin='python3' +W_DIR=`pwd` +VERSION=${VERSION:-'2.7.0'} +RELEASE=${RELEASE:-'1'} + +# macOS install paths (used by install.sh) +INSTALL_BIN_DIR="/usr/local/bin" +INSTALL_OBD_DIR="/usr/local/obd" +PROFILE_DIR="/usr/local/etc/profile.d" + +# Cross-platform sed -i wrapper (macOS BSD sed requires '' as backup extension) +sedi() { + sed -i '' "$@" +} + +# Download wrapper (prefer curl on macOS) +download_file() { + local url="$1" + local output="$2" + if command -v curl >/dev/null 2>&1; then + curl -fsSL "$url" -o "$output" + elif command -v wget >/dev/null 2>&1; then + wget "$url" -O "$output" + else + echo "Error: neither curl nor wget is available" + exit 1 + fi +} + +function python_version() +{ + return `$python_bin -c 'import sys; print (sys.version_info.major)'` +} + +function ispy3() +{ + python_version + if [ $? != 3 ]; then + echo 'need python3' + exit 1 + fi +} + +function cd2workdir() +{ + cd $W_DIR + DIR=`dirname $0` + cd $DIR +} + +function build_tgz() +{ + ispy3 + cd2workdir + DIR=`pwd` + SRC_DIR="$DIR/.." + + cd $SRC_DIR + + # Get git info + if [ `git log |head -n1 | awk -F' ' '{print $2}'` ]; then + CID=`git log |head -n1 | awk -F' ' '{print $2}'` + BRANCH=`git rev-parse --abbrev-ref HEAD` + else + CID='UNKNOWN' + BRANCH='UNKNOWN' + fi + DATE=`date '+%b %d %Y %H:%M:%S'` + VERSION="$VERSION".`date +%s` + + BUILD_DIR="$DIR/.build" + STAGE_DIR="$DIR/.stage" + rm -fr $BUILD_DIR $STAGE_DIR + + mkdir -p $BUILD_DIR/lib/site-packages + mkdir -p $BUILD_DIR/mirror/remote + + # Download OceanBase.repo + download_file https://mirrors.aliyun.com/oceanbase/OceanBase.repo $BUILD_DIR/mirror/remote/OceanBase.repo + + # Patch const.py placeholders + sedi "s//$CID/" const.py + sedi "s//$BRANCH/" const.py + sedi "s//$DATE/" const.py + sedi "s//$OBD_DUBUG/" const.py + sedi "s//$VERSION/" const.py + + # Generate obd.py entry point + cp -f _cmd.py obd.py + sedi "s||$OBD_DOC_LINK|" _errno.py + + # Install dependencies + pip install -r requirements3.txt || exit 1 + pip install -r plugins-requirements3.txt --target=$BUILD_DIR/lib/site-packages || exit 1 + + # Build with PyInstaller -D (directory) mode for faster startup + pyinstaller --hidden-import=decimal --hidden-import=configparser --hidden-import=yaml -D obd.py || exit 1 + rm -f obd.py obd.spec + + # ---- Assemble data files into BUILD_DIR ---- + \cp -rf plugins $BUILD_DIR/plugins + \cp -rf workflows $BUILD_DIR/workflows + \cp -rf config_parser $BUILD_DIR/config_parser + \cp -rf optimize $BUILD_DIR/optimize + \cp -rf example $BUILD_DIR/example + \cp -rf profile $BUILD_DIR/profile + + rm -fr $BUILD_DIR/config_parser/oceanbase-ce + + # Create symlinks in plugins (same as RPM spec) + cd $BUILD_DIR/plugins + ln -sf oceanbase oceanbase-ce + ln -sf oceanbase oceanbase-standalone + [ -d oceanbase-libs ] && ln -sf oceanbase-libs oceanbase-ce-libs + [ -d oceanbase-libs ] && ln -sf oceanbase-libs oceanbase-standalone-libs + [ -d oceanbase-ce-utils ] && ln -sf oceanbase-ce-utils oceanbase-standalone-utils + [ -d ocp-server ] && ln -sf ocp-server ocp-server-ce + [ -d obproxy ] && [ -d obproxy-ce ] && \cp -rf obproxy/* obproxy-ce/ + [ -d obproxy-ce ] && \cp -rf $SRC_DIR/plugins/obproxy-ce/* obproxy-ce/ 2>/dev/null + [ -d oms ] && ln -sf oms oms-ce + [ -d obbinlog-ce ] && ln -sf obbinlog-ce obbinlog + [ -d obproxy/3.1.0 ] && mv obproxy/3.1.0 obproxy/3.2.1 + + # Create symlinks in workflows + cd $BUILD_DIR/workflows + ln -sf oceanbase oceanbase-ce + ln -sf oceanbase oceanbase-standalone + [ -d ocp-server ] && ln -sf ocp-server ocp-server-ce + [ -d obproxy ] && ln -sf obproxy obproxy-ce + [ -d obbinlog-ce ] && ln -sf obbinlog-ce obbinlog + [ -d oms ] && ln -sf oms oms-ce + [ -d obproxy/3.1.0 ] && mv obproxy/3.1.0 obproxy/3.2.1 + + # Create symlinks in config_parser + cd $BUILD_DIR/config_parser + ln -sf oceanbase oceanbase-ce + ln -sf oceanbase oceanbase-standalone + + # Create symlinks in optimize + cd $BUILD_DIR/optimize + [ -d obproxy ] && ln -sf obproxy obproxy-ce + + # ---- Assemble tar.gz staging directory (mirrors RPM file layout) ---- + # + # Layout inside tar.gz (following file.txt): + # etc/profile.d/obd.sh + # usr/bin/obd -> symlink to ../obd/bin/obd + # usr/obd/bin/obd <- PyInstaller -D binary + # usr/obd/bin/_internal/ <- PyInstaller -D dependencies + # usr/obd/config_parser/ + # usr/obd/example/ + # usr/obd/lib/site-packages/ + # usr/obd/mirror/ + # usr/obd/optimize/ + # usr/obd/plugins/ + # usr/obd/workflows/ + # + ARCH=$(uname -m) + PKG_NAME="ob-deploy-${VERSION}-macos-${ARCH}" + PKG_ROOT="$STAGE_DIR" + + # usr/bin (binary symlink) + mkdir -p $PKG_ROOT/usr/bin + + # usr/obd (all obd data) + mkdir -p $PKG_ROOT/usr/obd/bin + mkdir -p $PKG_ROOT/usr/obd/lib + + # etc/profile.d + mkdir -p $PKG_ROOT/etc/profile.d + + # -- Copy PyInstaller -D output into usr/obd/bin/ -- + \cp -rf $SRC_DIR/dist/obd/* $PKG_ROOT/usr/obd/bin/ + + # -- Create usr/bin/obd symlink (relative path to ../obd/bin/obd) -- + cd $PKG_ROOT/usr/bin + ln -sf ../obd/bin/obd obd + + # -- Copy data directories into usr/obd/ -- + \cp -rf $BUILD_DIR/plugins $PKG_ROOT/usr/obd/plugins + \cp -rf $BUILD_DIR/workflows $PKG_ROOT/usr/obd/workflows + \cp -rf $BUILD_DIR/config_parser $PKG_ROOT/usr/obd/config_parser + \cp -rf $BUILD_DIR/optimize $PKG_ROOT/usr/obd/optimize + \cp -rf $BUILD_DIR/example $PKG_ROOT/usr/obd/example + \cp -rf $BUILD_DIR/mirror $PKG_ROOT/usr/obd/mirror + \cp -rf $BUILD_DIR/lib/site-packages $PKG_ROOT/usr/obd/lib/site-packages + + # -- Copy shell profile into etc/profile.d/ -- + \cp -rf $BUILD_DIR/profile/* $PKG_ROOT/etc/profile.d/ + + # -- Copy install script into package root -- + \cp -f $DIR/install.sh $PKG_ROOT/install.sh + chmod +x $PKG_ROOT/install.sh + + # Clean up build artifacts + rm -fr $SRC_DIR/dist $SRC_DIR/build + + # Create tar.gz (paths prefixed with ./ e.g. ./usr/bin/obd) + cd $STAGE_DIR + tar czf $DIR/${PKG_NAME}.tar.gz . + + # Clean up staging + rm -fr $STAGE_DIR $BUILD_DIR + + echo "" + echo "==========================================" + echo "Build successful!" + echo "Package: $DIR/${PKG_NAME}.tar.gz" + echo "==========================================" + echo "" + echo "To install:" + echo " mkdir ob-deploy && tar xzf ${PKG_NAME}.tar.gz -C ob-deploy" + echo " cd ob-deploy && sudo ./install.sh" +} + +function get_python() +{ + if [ `id -u` != 0 ] ; then + echo "Please use root (or sudo) to run" + fi + + obd_dir=`dirname $0` + python_path=`which python3 2>/dev/null || which python 2>/dev/null` + for bin in ${python_path[@]}; do + if [ -e $bin ]; then + python_bin=$bin + break 1 + fi + done + + if [ ${#python_path[*]} -gt 1 ]; then + read -p "Enter python path [default $python_bin]:" + if [ "x$REPLY" != "x" ]; then + python_bin=$REPLY + fi + fi +} + +case "x$1" in + xbuild) + get_python + build_tgz + ;; + xbuild_tgz) + build_tgz + ;; + *) + echo "Usage: $0 {build|build_tgz}" + echo "" + echo " build - detect python and build tar.gz package" + echo " build_tgz - build tar.gz package directly (requires python3)" + exit 1 + ;; +esac diff --git a/tgz/install.sh b/tgz/install.sh new file mode 100755 index 00000000..c62d15fd --- /dev/null +++ b/tgz/install.sh @@ -0,0 +1,101 @@ +#!/bin/bash +# +# OceanBase Deploy (OBD) macOS Installation Script +# This script installs OBD from the extracted tar.gz package. +# +# Package layout (mirrors RPM file.txt): +# etc/profile.d/obd.sh +# usr/bin/obd -> symlink to ../obd/bin/obd +# usr/obd/bin/ -> OBD binary (PyInstaller -D output) +# usr/obd/config_parser/ +# usr/obd/example/ +# usr/obd/lib/site-packages/ +# usr/obd/mirror/ +# usr/obd/optimize/ +# usr/obd/plugins/ +# usr/obd/workflows/ +# +# Installed layout (with /usr/local prefix on macOS): +# /usr/local/bin/obd -> symlink to ../obd/bin/obd +# /usr/local/obd/ -> all OBD data +# /usr/local/etc/profile.d/obd.sh -> shell completion +# + +set -e + +# macOS installation prefix +PREFIX="/usr/local" + +INSTALL_BIN_DIR="${PREFIX}/bin" +INSTALL_OBD_DIR="${PREFIX}/obd" +PROFILE_DIR="${PREFIX}/etc/profile.d" +CHOWN_GROUP="root:wheel" + +# Get the directory where this script is located (extracted package root) +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" + +echo "Installing OceanBase Deploy (OBD) ..." +echo " Binary: ${INSTALL_BIN_DIR}/obd" +echo " Home: ${INSTALL_OBD_DIR}/" +echo " Profile: ${PROFILE_DIR}/obd.sh" +echo "" + +# Check for root/sudo +if [ "$(id -u)" -ne 0 ]; then + echo "Warning: Running without root privileges. You may need 'sudo ./install.sh'." + echo "Attempting installation anyway..." + echo "" +fi + +# Remove old installation if exists +rm -rf ${INSTALL_OBD_DIR} +rm -f ${INSTALL_BIN_DIR}/obd + +# Create target directories +mkdir -p ${INSTALL_BIN_DIR} +mkdir -p ${INSTALL_OBD_DIR} +mkdir -p ${PROFILE_DIR} + +# ---- Install usr/obd/ -> /usr/local/obd/ ---- +echo " -> Installing usr/obd/ ..." +\cp -rf ${SCRIPT_DIR}/usr/obd/* ${INSTALL_OBD_DIR}/ + +# ---- Install usr/bin/obd -> /usr/local/bin/obd (symlink) ---- +echo " -> Installing usr/bin/obd ..." +ln -sf ../obd/bin/obd ${INSTALL_BIN_DIR}/obd + +# ---- Install etc/profile.d/ -> /usr/local/etc/profile.d/ ---- +echo " -> Installing etc/profile.d/ ..." +\cp -rf ${SCRIPT_DIR}/etc/profile.d/* ${PROFILE_DIR}/ + +# ---- Set permissions ---- +echo " -> Setting permissions ..." +chmod -R 755 ${INSTALL_OBD_DIR}/* +find ${INSTALL_OBD_DIR} -type f -exec chmod 644 {} \; +# Restore executable permission on binary and shared libraries +chmod +x ${INSTALL_OBD_DIR}/bin/obd +find ${INSTALL_OBD_DIR}/bin -name "*.so" -exec chmod 755 {} \; 2>/dev/null +find ${INSTALL_OBD_DIR}/bin -name "*.dylib" -exec chmod 755 {} \; 2>/dev/null +find ${INSTALL_OBD_DIR}/bin/_internal -type f -perm +0111 -exec chmod 755 {} \; 2>/dev/null + +# Set ownership if running as root +if [ "$(id -u)" -eq 0 ]; then + chown -R ${CHOWN_GROUP} ${INSTALL_OBD_DIR} +fi + +# Warm up: run once in background to populate OS page cache (faster subsequent launches) +${INSTALL_BIN_DIR}/obd --version > /dev/null 2>&1 & + +echo "" +echo "==========================================" +echo "Installation of OBD finished successfully!" +echo "==========================================" +echo "" +echo "Please source the profile to enable shell completion:" +echo " source ${PROFILE_DIR}/obd.sh" +echo "" +echo "Or add it to your shell profile permanently:" +echo " echo 'source ${PROFILE_DIR}/obd.sh' >> ~/.zshrc" +echo "" +echo "Verify installation:" +echo " obd --version" diff --git a/tgz/ob-deploy-build.sh b/tgz/ob-deploy-build.sh new file mode 100755 index 00000000..b6430d06 --- /dev/null +++ b/tgz/ob-deploy-build.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +PROJECT_DIR=$1 +PROJECT_NAME=$2 +VERSION=$3 +RELEASE=$4 + +CURDIR=$PWD +DIR=`dirname $0` +cd $DIR + +echo "[BUILD] args: CURDIR=${CURDIR} PROJECT_NAME=${PROJECT_NAME} VERSION=${VERSION} RELEASE=${RELEASE}" + +export PROJECT_NAME=${PROJECT_NAME} +export VERSION=${VERSION} +export RELEASE=${RELEASE} +./build.sh build