forked from HomeAssistant/fn_nas
Compare commits
2 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
fae53cf5b9 | ||
![]() |
f185b7e3ee |
@@ -1,62 +1,89 @@
|
|||||||
import logging
|
import logging
|
||||||
|
import asyncio
|
||||||
|
import asyncssh
|
||||||
from homeassistant.config_entries import ConfigEntry
|
from homeassistant.config_entries import ConfigEntry
|
||||||
from homeassistant.core import HomeAssistant
|
from homeassistant.core import HomeAssistant
|
||||||
from .const import DOMAIN, DATA_UPDATE_COORDINATOR, PLATFORMS, CONF_ENABLE_DOCKER # 导入新增常量
|
from homeassistant.helpers import config_validation as cv
|
||||||
|
|
||||||
|
from .const import (
|
||||||
|
DOMAIN, DATA_UPDATE_COORDINATOR, PLATFORMS, CONF_ENABLE_DOCKER,
|
||||||
|
CONF_HOST, DEFAULT_PORT
|
||||||
|
)
|
||||||
from .coordinator import FlynasCoordinator, UPSDataUpdateCoordinator
|
from .coordinator import FlynasCoordinator, UPSDataUpdateCoordinator
|
||||||
|
|
||||||
_LOGGER = logging.getLogger(__name__)
|
_LOGGER = logging.getLogger(__name__)
|
||||||
|
|
||||||
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
|
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
|
||||||
config = {**entry.data, **entry.options}
|
config = {**entry.data, **entry.options}
|
||||||
|
coordinator = FlynasCoordinator(hass, config, entry)
|
||||||
coordinator = FlynasCoordinator(hass, config)
|
# 直接初始化,不阻塞等待NAS上线
|
||||||
await coordinator.async_config_entry_first_refresh()
|
|
||||||
|
|
||||||
_LOGGER.debug("协调器类型: %s", type(coordinator).__name__)
|
|
||||||
_LOGGER.debug("协调器是否有control_vm方法: %s", hasattr(coordinator, 'control_vm'))
|
|
||||||
_LOGGER.debug("协调器是否有vm_manager属性: %s", hasattr(coordinator, 'vm_manager'))
|
|
||||||
|
|
||||||
# 检查是否启用Docker,并初始化Docker管理器(如果有)
|
|
||||||
enable_docker = config.get(CONF_ENABLE_DOCKER, False)
|
|
||||||
if enable_docker:
|
|
||||||
# 导入Docker管理器并初始化
|
|
||||||
from .docker_manager import DockerManager
|
|
||||||
coordinator.docker_manager = DockerManager(coordinator)
|
|
||||||
_LOGGER.debug("已启用Docker容器监控")
|
|
||||||
else:
|
|
||||||
coordinator.docker_manager = None
|
|
||||||
_LOGGER.debug("未启用Docker容器监控")
|
|
||||||
|
|
||||||
ups_coordinator = UPSDataUpdateCoordinator(hass, config, coordinator)
|
|
||||||
await ups_coordinator.async_config_entry_first_refresh()
|
|
||||||
|
|
||||||
hass.data.setdefault(DOMAIN, {})
|
hass.data.setdefault(DOMAIN, {})
|
||||||
hass.data[DOMAIN][entry.entry_id] = {
|
hass.data[DOMAIN][entry.entry_id] = {
|
||||||
DATA_UPDATE_COORDINATOR: coordinator,
|
DATA_UPDATE_COORDINATOR: coordinator,
|
||||||
"ups_coordinator": ups_coordinator,
|
"ups_coordinator": None,
|
||||||
CONF_ENABLE_DOCKER: enable_docker # 存储启用状态
|
CONF_ENABLE_DOCKER: coordinator.config.get(CONF_ENABLE_DOCKER, False)
|
||||||
}
|
}
|
||||||
|
# 异步后台初始化
|
||||||
await hass.config_entries.async_forward_entry_setups(entry, PLATFORMS)
|
hass.async_create_task(async_delayed_setup(hass, entry, coordinator))
|
||||||
entry.async_on_unload(entry.add_update_listener(async_update_entry))
|
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
async def async_delayed_setup(hass: HomeAssistant, entry: ConfigEntry, coordinator: FlynasCoordinator):
|
||||||
|
try:
|
||||||
|
# 不阻塞等待NAS上线,直接尝试刷新数据
|
||||||
|
await coordinator.async_config_entry_first_refresh()
|
||||||
|
enable_docker = coordinator.config.get(CONF_ENABLE_DOCKER, False)
|
||||||
|
if enable_docker:
|
||||||
|
from .docker_manager import DockerManager
|
||||||
|
coordinator.docker_manager = DockerManager(coordinator)
|
||||||
|
_LOGGER.debug("已启用Docker容器监控")
|
||||||
|
else:
|
||||||
|
coordinator.docker_manager = None
|
||||||
|
_LOGGER.debug("未启用Docker容器监控")
|
||||||
|
ups_coordinator = UPSDataUpdateCoordinator(hass, coordinator.config, coordinator)
|
||||||
|
await ups_coordinator.async_config_entry_first_refresh()
|
||||||
|
hass.data[DOMAIN][entry.entry_id]["ups_coordinator"] = ups_coordinator
|
||||||
|
await hass.config_entries.async_forward_entry_setups(entry, PLATFORMS)
|
||||||
|
entry.async_on_unload(entry.add_update_listener(async_update_entry))
|
||||||
|
_LOGGER.info("飞牛NAS集成初始化完成")
|
||||||
|
except Exception as e:
|
||||||
|
_LOGGER.error("飞牛NAS集成初始化失败: %s", str(e))
|
||||||
|
await coordinator.async_disconnect()
|
||||||
|
if hasattr(coordinator, '_ping_task') and coordinator._ping_task:
|
||||||
|
coordinator._ping_task.cancel()
|
||||||
|
|
||||||
async def async_update_entry(hass: HomeAssistant, entry: ConfigEntry):
|
async def async_update_entry(hass: HomeAssistant, entry: ConfigEntry):
|
||||||
await hass.config_entries.async_reload(entry.entry_id)
|
"""更新配置项"""
|
||||||
|
# 卸载现有集成
|
||||||
|
await async_unload_entry(hass, entry)
|
||||||
|
# 重新加载集成
|
||||||
|
await async_setup_entry(hass, entry)
|
||||||
|
|
||||||
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
|
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
|
||||||
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
|
"""卸载集成"""
|
||||||
|
# 获取集成数据
|
||||||
|
domain_data = hass.data.get(DOMAIN, {}).get(entry.entry_id, {})
|
||||||
|
unload_ok = True
|
||||||
|
|
||||||
if unload_ok:
|
if DATA_UPDATE_COORDINATOR in domain_data:
|
||||||
domain_data = hass.data[DOMAIN][entry.entry_id]
|
|
||||||
coordinator = domain_data[DATA_UPDATE_COORDINATOR]
|
coordinator = domain_data[DATA_UPDATE_COORDINATOR]
|
||||||
ups_coordinator = domain_data["ups_coordinator"]
|
ups_coordinator = domain_data.get("ups_coordinator")
|
||||||
|
|
||||||
# 关闭主协调器的SSH连接
|
# 卸载平台
|
||||||
await coordinator.async_disconnect()
|
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
|
||||||
# 关闭UPS协调器
|
|
||||||
await ups_coordinator.async_shutdown()
|
if unload_ok:
|
||||||
|
# 关闭主协调器的SSH连接
|
||||||
|
await coordinator.async_disconnect()
|
||||||
|
|
||||||
|
# 关闭UPS协调器(如果存在)
|
||||||
|
if ups_coordinator:
|
||||||
|
await ups_coordinator.async_shutdown()
|
||||||
|
|
||||||
|
# 取消监控任务(如果存在)
|
||||||
|
if hasattr(coordinator, '_ping_task') and coordinator._ping_task and not coordinator._ping_task.done():
|
||||||
|
coordinator._ping_task.cancel()
|
||||||
|
|
||||||
|
# 从DOMAIN中移除该entry的数据
|
||||||
|
hass.data[DOMAIN].pop(entry.entry_id, None)
|
||||||
|
|
||||||
# 从DOMAIN中移除该entry的数据
|
|
||||||
hass.data[DOMAIN].pop(entry.entry_id)
|
|
||||||
return unload_ok
|
return unload_ok
|
@@ -18,11 +18,7 @@ from .const import (
|
|||||||
CONF_UPS_SCAN_INTERVAL,
|
CONF_UPS_SCAN_INTERVAL,
|
||||||
DEFAULT_UPS_SCAN_INTERVAL,
|
DEFAULT_UPS_SCAN_INTERVAL,
|
||||||
CONF_ROOT_PASSWORD,
|
CONF_ROOT_PASSWORD,
|
||||||
CONF_ENABLE_DOCKER,
|
CONF_ENABLE_DOCKER
|
||||||
CONF_MAX_CONNECTIONS,
|
|
||||||
DEFAULT_MAX_CONNECTIONS,
|
|
||||||
CONF_CACHE_TIMEOUT,
|
|
||||||
DEFAULT_CACHE_TIMEOUT
|
|
||||||
)
|
)
|
||||||
|
|
||||||
_LOGGER = logging.getLogger(__name__)
|
_LOGGER = logging.getLogger(__name__)
|
||||||
@@ -75,17 +71,7 @@ class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
|
|||||||
default=DEFAULT_SCAN_INTERVAL
|
default=DEFAULT_SCAN_INTERVAL
|
||||||
): int,
|
): int,
|
||||||
# 添加启用Docker的选项
|
# 添加启用Docker的选项
|
||||||
vol.Optional(CONF_ENABLE_DOCKER, default=False): bool,
|
vol.Optional(CONF_ENABLE_DOCKER, default=False): bool
|
||||||
# 新增:最大连接数
|
|
||||||
vol.Optional(
|
|
||||||
CONF_MAX_CONNECTIONS,
|
|
||||||
default=DEFAULT_MAX_CONNECTIONS
|
|
||||||
): int,
|
|
||||||
# 新增:缓存超时时间(分钟)
|
|
||||||
vol.Optional(
|
|
||||||
CONF_CACHE_TIMEOUT,
|
|
||||||
default=DEFAULT_CACHE_TIMEOUT
|
|
||||||
): int
|
|
||||||
})
|
})
|
||||||
|
|
||||||
return self.async_show_form(
|
return self.async_show_form(
|
||||||
@@ -118,9 +104,6 @@ class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
|
|||||||
self.ssh_config[CONF_MAC] = selected_mac
|
self.ssh_config[CONF_MAC] = selected_mac
|
||||||
# 确保将CONF_ENABLE_DOCKER也存入配置项
|
# 确保将CONF_ENABLE_DOCKER也存入配置项
|
||||||
self.ssh_config[CONF_ENABLE_DOCKER] = enable_docker
|
self.ssh_config[CONF_ENABLE_DOCKER] = enable_docker
|
||||||
# 添加连接池和缓存配置
|
|
||||||
self.ssh_config[CONF_MAX_CONNECTIONS] = self.ssh_config.get(CONF_MAX_CONNECTIONS, DEFAULT_MAX_CONNECTIONS)
|
|
||||||
self.ssh_config[CONF_CACHE_TIMEOUT] = self.ssh_config.get(CONF_CACHE_TIMEOUT, DEFAULT_CACHE_TIMEOUT)
|
|
||||||
return self.async_create_entry(
|
return self.async_create_entry(
|
||||||
title=self.ssh_config[CONF_HOST],
|
title=self.ssh_config[CONF_HOST],
|
||||||
data=self.ssh_config
|
data=self.ssh_config
|
||||||
@@ -237,17 +220,7 @@ class OptionsFlowHandler(config_entries.OptionsFlow):
|
|||||||
vol.Optional(
|
vol.Optional(
|
||||||
CONF_ENABLE_DOCKER,
|
CONF_ENABLE_DOCKER,
|
||||||
default=data.get(CONF_ENABLE_DOCKER, False)
|
default=data.get(CONF_ENABLE_DOCKER, False)
|
||||||
): bool,
|
): bool
|
||||||
# 新增:最大连接数
|
|
||||||
vol.Optional(
|
|
||||||
CONF_MAX_CONNECTIONS,
|
|
||||||
default=data.get(CONF_MAX_CONNECTIONS, DEFAULT_MAX_CONNECTIONS)
|
|
||||||
): int,
|
|
||||||
# 新增:缓存超时时间(分钟)
|
|
||||||
vol.Optional(
|
|
||||||
CONF_CACHE_TIMEOUT,
|
|
||||||
default=data.get(CONF_CACHE_TIMEOUT, DEFAULT_CACHE_TIMEOUT)
|
|
||||||
): int
|
|
||||||
})
|
})
|
||||||
|
|
||||||
return self.async_show_form(
|
return self.async_show_form(
|
||||||
|
@@ -53,9 +53,3 @@ ICON_RESTART = "mdi:restart"
|
|||||||
DEVICE_ID_NAS = "flynas_nas_system"
|
DEVICE_ID_NAS = "flynas_nas_system"
|
||||||
DEVICE_ID_UPS = "flynas_ups"
|
DEVICE_ID_UPS = "flynas_ups"
|
||||||
CONF_NETWORK_MACS = "network_macs"
|
CONF_NETWORK_MACS = "network_macs"
|
||||||
|
|
||||||
# 新增配置常量
|
|
||||||
CONF_MAX_CONNECTIONS = "max_connections"
|
|
||||||
CONF_CACHE_TIMEOUT = "cache_timeout"
|
|
||||||
DEFAULT_MAX_CONNECTIONS = 3
|
|
||||||
DEFAULT_CACHE_TIMEOUT = 30 # 单位:分钟
|
|
@@ -1,8 +1,6 @@
|
|||||||
import logging
|
import logging
|
||||||
import re
|
|
||||||
import asyncssh
|
|
||||||
import asyncio
|
import asyncio
|
||||||
import time
|
import asyncssh
|
||||||
from datetime import timedelta
|
from datetime import timedelta
|
||||||
from homeassistant.core import HomeAssistant
|
from homeassistant.core import HomeAssistant
|
||||||
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
|
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
|
||||||
@@ -11,8 +9,7 @@ from .const import (
|
|||||||
DOMAIN, CONF_HOST, CONF_PORT, CONF_USERNAME, CONF_PASSWORD,
|
DOMAIN, CONF_HOST, CONF_PORT, CONF_USERNAME, CONF_PASSWORD,
|
||||||
CONF_IGNORE_DISKS, CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL,
|
CONF_IGNORE_DISKS, CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL,
|
||||||
DEFAULT_PORT, CONF_MAC, CONF_UPS_SCAN_INTERVAL, DEFAULT_UPS_SCAN_INTERVAL,
|
DEFAULT_PORT, CONF_MAC, CONF_UPS_SCAN_INTERVAL, DEFAULT_UPS_SCAN_INTERVAL,
|
||||||
CONF_ROOT_PASSWORD, CONF_ENABLE_DOCKER, CONF_MAX_CONNECTIONS, DEFAULT_MAX_CONNECTIONS,
|
CONF_ROOT_PASSWORD, CONF_ENABLE_DOCKER
|
||||||
CONF_CACHE_TIMEOUT, DEFAULT_CACHE_TIMEOUT
|
|
||||||
)
|
)
|
||||||
from .disk_manager import DiskManager
|
from .disk_manager import DiskManager
|
||||||
from .system_manager import SystemManager
|
from .system_manager import SystemManager
|
||||||
@@ -23,8 +20,10 @@ from .docker_manager import DockerManager
|
|||||||
_LOGGER = logging.getLogger(__name__)
|
_LOGGER = logging.getLogger(__name__)
|
||||||
|
|
||||||
class FlynasCoordinator(DataUpdateCoordinator):
|
class FlynasCoordinator(DataUpdateCoordinator):
|
||||||
def __init__(self, hass: HomeAssistant, config) -> None:
|
def __init__(self, hass: HomeAssistant, config, config_entry) -> None:
|
||||||
self.config = config
|
self.config = config
|
||||||
|
self.config_entry = config_entry
|
||||||
|
self.hass = hass
|
||||||
self.host = config[CONF_HOST]
|
self.host = config[CONF_HOST]
|
||||||
self.port = config.get(CONF_PORT, DEFAULT_PORT)
|
self.port = config.get(CONF_PORT, DEFAULT_PORT)
|
||||||
self.username = config[CONF_USERNAME]
|
self.username = config[CONF_USERNAME]
|
||||||
@@ -32,13 +31,12 @@ class FlynasCoordinator(DataUpdateCoordinator):
|
|||||||
self.root_password = config.get(CONF_ROOT_PASSWORD)
|
self.root_password = config.get(CONF_ROOT_PASSWORD)
|
||||||
self.mac = config.get(CONF_MAC, "")
|
self.mac = config.get(CONF_MAC, "")
|
||||||
self.enable_docker = config.get(CONF_ENABLE_DOCKER, False)
|
self.enable_docker = config.get(CONF_ENABLE_DOCKER, False)
|
||||||
self.max_connections = config.get(CONF_MAX_CONNECTIONS, DEFAULT_MAX_CONNECTIONS)
|
|
||||||
self.cache_timeout = config.get(CONF_CACHE_TIMEOUT, DEFAULT_CACHE_TIMEOUT) * 60
|
|
||||||
self.docker_manager = DockerManager(self) if self.enable_docker else None
|
self.docker_manager = DockerManager(self) if self.enable_docker else None
|
||||||
self.ssh_pool = [] # SSH连接池
|
self.ssh = None
|
||||||
self.active_commands = 0 # 当前活动命令数
|
self.ssh_closed = True
|
||||||
self.ssh_closed = True # 初始状态为关闭
|
self.ups_manager = UPSManager(self)
|
||||||
self.use_sudo = False # 初始化use_sudo属性
|
self.vm_manager = VMManager(self)
|
||||||
|
self.use_sudo = False
|
||||||
|
|
||||||
self.data = {
|
self.data = {
|
||||||
"disks": [],
|
"disks": [],
|
||||||
@@ -64,225 +62,259 @@ class FlynasCoordinator(DataUpdateCoordinator):
|
|||||||
|
|
||||||
self.disk_manager = DiskManager(self)
|
self.disk_manager = DiskManager(self)
|
||||||
self.system_manager = SystemManager(self)
|
self.system_manager = SystemManager(self)
|
||||||
self.ups_manager = UPSManager(self)
|
self._system_online = False
|
||||||
self.vm_manager = VMManager(self)
|
self._ping_task = None
|
||||||
|
self._retry_interval = 30 # 系统离线时的检测间隔(秒)
|
||||||
|
|
||||||
async def get_ssh_connection(self):
|
async def async_connect(self):
|
||||||
"""从连接池获取或创建SSH连接"""
|
if self.ssh is None or self.ssh_closed:
|
||||||
# 避免递归调用,改为循环等待
|
|
||||||
start_time = time.time()
|
|
||||||
while True:
|
|
||||||
# 如果连接池中有可用连接且没有超过最大活动命令数
|
|
||||||
while len(self.ssh_pool) > 0 and self.active_commands < self.max_connections:
|
|
||||||
conn = self.ssh_pool.pop()
|
|
||||||
if await self.is_connection_alive(conn):
|
|
||||||
self.active_commands += 1
|
|
||||||
return conn
|
|
||||||
else:
|
|
||||||
await self.close_connection(conn)
|
|
||||||
|
|
||||||
# 如果没有可用连接,创建新连接
|
|
||||||
if self.active_commands < self.max_connections:
|
|
||||||
try:
|
|
||||||
conn = await asyncssh.connect(
|
|
||||||
self.host,
|
|
||||||
port=self.port,
|
|
||||||
username=self.username,
|
|
||||||
password=self.password,
|
|
||||||
known_hosts=None,
|
|
||||||
connect_timeout=10
|
|
||||||
)
|
|
||||||
self.active_commands += 1
|
|
||||||
self.ssh_closed = False
|
|
||||||
|
|
||||||
# 确定是否需要sudo权限
|
|
||||||
await self.determine_sudo_setting(conn)
|
|
||||||
|
|
||||||
return conn
|
|
||||||
except Exception as e:
|
|
||||||
_LOGGER.error("创建SSH连接失败: %s", str(e), exc_info=True)
|
|
||||||
raise UpdateFailed(f"SSH连接失败: {str(e)}")
|
|
||||||
|
|
||||||
# 等待0.1秒后重试,避免递归
|
|
||||||
await asyncio.sleep(0.1)
|
|
||||||
|
|
||||||
# 设置超时(30秒)
|
|
||||||
if time.time() - start_time > 30:
|
|
||||||
raise UpdateFailed("获取SSH连接超时")
|
|
||||||
|
|
||||||
async def determine_sudo_setting(self, conn):
|
|
||||||
"""确定是否需要使用sudo权限"""
|
|
||||||
try:
|
|
||||||
# 检查当前用户是否是root
|
|
||||||
result = await conn.run("id -u", timeout=5)
|
|
||||||
if result.stdout.strip() == "0":
|
|
||||||
_LOGGER.debug("当前用户是root,不需要sudo")
|
|
||||||
self.use_sudo = False
|
|
||||||
return
|
|
||||||
except Exception as e:
|
|
||||||
_LOGGER.warning("检查用户ID失败: %s", str(e))
|
|
||||||
|
|
||||||
# 检查是否可以使用密码sudo
|
|
||||||
try:
|
|
||||||
result = await conn.run(
|
|
||||||
f"echo '{self.password}' | sudo -S whoami",
|
|
||||||
input=self.password + "\n",
|
|
||||||
timeout=10
|
|
||||||
)
|
|
||||||
if "root" in result.stdout:
|
|
||||||
_LOGGER.info("可以使用用户密码sudo")
|
|
||||||
self.use_sudo = True
|
|
||||||
return
|
|
||||||
except Exception as e:
|
|
||||||
_LOGGER.debug("无法使用用户密码sudo: %s", str(e))
|
|
||||||
|
|
||||||
# 如果有root密码,尝试使用root密码sudo
|
|
||||||
if self.root_password:
|
|
||||||
try:
|
try:
|
||||||
result = await conn.run(
|
self.ssh = await asyncssh.connect(
|
||||||
f"echo '{self.root_password}' | sudo -S whoami",
|
self.host,
|
||||||
input=self.root_password + "\n",
|
port=self.port,
|
||||||
timeout=10
|
username=self.username,
|
||||||
|
password=self.password,
|
||||||
|
known_hosts=None,
|
||||||
|
connect_timeout=5 # 缩短连接超时时间
|
||||||
)
|
)
|
||||||
if "root" in result.stdout:
|
|
||||||
_LOGGER.info("可以使用root密码sudo")
|
if await self.is_root_user():
|
||||||
self.use_sudo = True
|
_LOGGER.debug("当前用户是 root")
|
||||||
return
|
self.use_sudo = False
|
||||||
|
self.ssh_closed = False
|
||||||
|
return True
|
||||||
|
|
||||||
|
result = await self.ssh.run(
|
||||||
|
f"echo '{self.password}' | sudo -S -i",
|
||||||
|
input=self.password + "\n",
|
||||||
|
timeout=5
|
||||||
|
)
|
||||||
|
|
||||||
|
whoami_result = await self.ssh.run("whoami")
|
||||||
|
if "root" in whoami_result.stdout:
|
||||||
|
_LOGGER.info("成功切换到 root 会话(使用登录密码)")
|
||||||
|
self.use_sudo = False
|
||||||
|
self.ssh_closed = False
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
if self.root_password:
|
||||||
|
result = await self.ssh.run(
|
||||||
|
f"echo '{self.root_password}' | sudo -S -i",
|
||||||
|
input=self.root_password + "\n",
|
||||||
|
timeout=5
|
||||||
|
)
|
||||||
|
|
||||||
|
whoami_result = await self.ssh.run("whoami")
|
||||||
|
if "root" in whoami_result.stdout:
|
||||||
|
_LOGGER.info("成功切换到 root 会话(使用 root 密码)")
|
||||||
|
self.use_sudo = False
|
||||||
|
self.ssh_closed = False
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
# 切换到 root 会话失败,将使用 sudo
|
||||||
|
self.use_sudo = True
|
||||||
|
else:
|
||||||
|
# 非 root 用户且未提供 root 密码,将使用 sudo
|
||||||
|
self.use_sudo = True
|
||||||
|
|
||||||
|
self.ssh_closed = False
|
||||||
|
_LOGGER.info("SSH 连接已建立到 %s", self.host)
|
||||||
|
return True
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
_LOGGER.debug("无法使用root密码sudo: %s", str(e))
|
self.ssh = None
|
||||||
|
self.ssh_closed = True
|
||||||
|
_LOGGER.debug("连接失败: %s", str(e))
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
_LOGGER.warning("无法获取root权限,将使用普通用户执行命令")
|
async def is_root_user(self):
|
||||||
self.use_sudo = False
|
|
||||||
|
|
||||||
async def release_ssh_connection(self, conn):
|
|
||||||
"""释放连接回连接池"""
|
|
||||||
self.active_commands -= 1
|
|
||||||
if conn and not conn.is_closed():
|
|
||||||
if len(self.ssh_pool) < self.max_connections:
|
|
||||||
self.ssh_pool.append(conn)
|
|
||||||
else:
|
|
||||||
await self.close_connection(conn)
|
|
||||||
else:
|
|
||||||
# 如果连接已经关闭,直接丢弃
|
|
||||||
pass
|
|
||||||
|
|
||||||
async def close_connection(self, conn):
|
|
||||||
"""关闭SSH连接"""
|
|
||||||
try:
|
try:
|
||||||
if conn and not conn.is_closed():
|
result = await self.ssh.run("id -u", timeout=3)
|
||||||
conn.close()
|
return result.stdout.strip() == "0"
|
||||||
except Exception as e:
|
except Exception:
|
||||||
_LOGGER.debug("关闭SSH连接时出错: %s", str(e))
|
return False
|
||||||
|
|
||||||
|
async def async_disconnect(self):
|
||||||
|
if self.ssh is not None and not self.ssh_closed:
|
||||||
|
try:
|
||||||
|
self.ssh.close()
|
||||||
|
self.ssh_closed = True
|
||||||
|
_LOGGER.debug("SSH connection closed")
|
||||||
|
except Exception as e:
|
||||||
|
_LOGGER.debug("Error closing SSH connection: %s", str(e))
|
||||||
|
finally:
|
||||||
|
self.ssh = None
|
||||||
|
|
||||||
|
async def is_ssh_connected(self) -> bool:
|
||||||
|
if self.ssh is None or self.ssh_closed:
|
||||||
|
return False
|
||||||
|
|
||||||
async def is_connection_alive(self, conn) -> bool:
|
|
||||||
"""检查连接是否存活"""
|
|
||||||
try:
|
try:
|
||||||
# 发送一个简单的命令测试连接
|
test_command = "echo 'connection_test'"
|
||||||
result = await conn.run("echo 'connection_test'", timeout=2)
|
result = await self.ssh.run(test_command, timeout=2)
|
||||||
return result.exit_status == 0 and "connection_test" in result.stdout
|
return result.exit_status == 0 and "connection_test" in result.stdout
|
||||||
except (asyncssh.Error, TimeoutError, ConnectionResetError):
|
except (asyncssh.Error, TimeoutError):
|
||||||
|
return False
|
||||||
|
|
||||||
|
async def ping_system(self) -> bool:
|
||||||
|
"""轻量级系统状态检测"""
|
||||||
|
# 对于本地主机直接返回True
|
||||||
|
if self.host in ['localhost', '127.0.0.1']:
|
||||||
|
return True
|
||||||
|
|
||||||
|
try:
|
||||||
|
# 使用异步ping检测
|
||||||
|
proc = await asyncio.create_subprocess_exec(
|
||||||
|
'ping', '-c', '1', '-W', '1', self.host,
|
||||||
|
stdout=asyncio.subprocess.DEVNULL,
|
||||||
|
stderr=asyncio.subprocess.DEVNULL
|
||||||
|
)
|
||||||
|
await proc.wait()
|
||||||
|
return proc.returncode == 0
|
||||||
|
except Exception:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
async def run_command(self, command: str, retries=2) -> str:
|
async def run_command(self, command: str, retries=2) -> str:
|
||||||
"""使用连接池执行命令"""
|
# 系统离线时直接返回空字符串,避免抛出异常
|
||||||
conn = None
|
if not self._system_online:
|
||||||
current_retries = retries
|
return ""
|
||||||
|
|
||||||
while current_retries >= 0:
|
for attempt in range(retries):
|
||||||
try:
|
try:
|
||||||
conn = await self.get_ssh_connection()
|
if not await self.is_ssh_connected():
|
||||||
|
if not await self.async_connect():
|
||||||
|
if self.data and "system" in self.data:
|
||||||
|
self.data["system"]["status"] = "off"
|
||||||
|
return ""
|
||||||
|
|
||||||
# 根据sudo设置执行命令
|
|
||||||
if self.use_sudo:
|
if self.use_sudo:
|
||||||
password = self.root_password if self.root_password else self.password
|
if self.root_password or self.password:
|
||||||
if password:
|
password = self.root_password if self.root_password else self.password
|
||||||
full_command = f"sudo -S {command}"
|
full_command = f"sudo -S {command}"
|
||||||
result = await conn.run(full_command, input=password + "\n", check=True)
|
result = await self.ssh.run(full_command, input=password + "\n", check=True)
|
||||||
else:
|
else:
|
||||||
full_command = f"sudo {command}"
|
full_command = f"sudo {command}"
|
||||||
result = await conn.run(full_command, check=True)
|
result = await self.ssh.run(full_command, check=True)
|
||||||
else:
|
else:
|
||||||
result = await conn.run(command, check=True)
|
result = await self.ssh.run(command, check=True)
|
||||||
|
|
||||||
return result.stdout.strip()
|
return result.stdout.strip()
|
||||||
|
|
||||||
except asyncssh.process.ProcessError as e:
|
except asyncssh.process.ProcessError as e:
|
||||||
if e.exit_status in [4, 32]:
|
if e.exit_status in [4, 32]:
|
||||||
return ""
|
return ""
|
||||||
_LOGGER.error("Command failed: %s (exit %d)", command, e.exit_status)
|
_LOGGER.debug("Command failed: %s (exit %d)", command, e.exit_status)
|
||||||
# 连接可能已损坏,关闭它
|
self.ssh = None
|
||||||
await self.close_connection(conn)
|
self.ssh_closed = True
|
||||||
conn = None
|
if attempt == retries - 1:
|
||||||
if current_retries > 0:
|
return ""
|
||||||
current_retries -= 1
|
|
||||||
continue
|
|
||||||
else:
|
|
||||||
raise UpdateFailed(f"Command failed: {command}") from e
|
|
||||||
except asyncssh.Error as e:
|
except asyncssh.Error as e:
|
||||||
_LOGGER.error("SSH连接错误: %s", str(e))
|
_LOGGER.debug("SSH connection error: %s", str(e))
|
||||||
await self.close_connection(conn)
|
self.ssh = None
|
||||||
conn = None
|
self.ssh_closed = True
|
||||||
if current_retries > 0:
|
if attempt == retries - 1:
|
||||||
current_retries -= 1
|
return ""
|
||||||
continue
|
|
||||||
else:
|
|
||||||
raise UpdateFailed(f"SSH错误: {str(e)}") from e
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
_LOGGER.error("意外错误: %s", str(e), exc_info=True)
|
self.ssh = None
|
||||||
await self.close_connection(conn)
|
self.ssh_closed = True
|
||||||
conn = None
|
_LOGGER.debug("Unexpected error: %s", str(e))
|
||||||
if current_retries > 0:
|
if attempt == retries - 1:
|
||||||
current_retries -= 1
|
return ""
|
||||||
|
return ""
|
||||||
|
|
||||||
|
async def get_network_macs(self):
|
||||||
|
try:
|
||||||
|
output = await self.run_command("ip link show")
|
||||||
|
macs = {}
|
||||||
|
|
||||||
|
pattern = re.compile(r'^\d+: (\w+):.*\n\s+link/\w+\s+([0-9a-fA-F:]{17})', re.MULTILINE)
|
||||||
|
matches = pattern.findall(output)
|
||||||
|
|
||||||
|
for interface, mac in matches:
|
||||||
|
if interface == "lo" or mac == "00:00:00:00:00:00":
|
||||||
continue
|
continue
|
||||||
else:
|
macs[mac] = interface
|
||||||
raise UpdateFailed(f"意外错误: {str(e)}") from e
|
|
||||||
finally:
|
|
||||||
if conn:
|
|
||||||
await self.release_ssh_connection(conn)
|
|
||||||
|
|
||||||
async def async_connect(self):
|
return macs
|
||||||
"""建立SSH连接(使用连接池)"""
|
except Exception as e:
|
||||||
# 连接池已处理连接,此方法现在主要用于初始化
|
self.logger.debug("获取MAC地址失败: %s", str(e))
|
||||||
return True
|
return {}
|
||||||
|
|
||||||
async def is_ssh_connected(self) -> bool:
|
async def _monitor_system_status(self):
|
||||||
"""检查是否有活动的SSH连接"""
|
"""系统离线时轮询检测状态"""
|
||||||
return len(self.ssh_pool) > 0 or self.active_commands > 0
|
self.logger.debug("启动系统状态监控,每%d秒检测一次", self._retry_interval)
|
||||||
|
while True:
|
||||||
|
await asyncio.sleep(self._retry_interval)
|
||||||
|
|
||||||
async def async_disconnect(self):
|
if await self.ping_system():
|
||||||
"""关闭所有SSH连接"""
|
self.logger.info("检测到系统已开机,触发重新加载")
|
||||||
# 关闭连接池中的所有连接
|
# 触发集成重新加载
|
||||||
for conn in self.ssh_pool:
|
self.hass.async_create_task(
|
||||||
await self.close_connection(conn)
|
self.hass.config_entries.async_reload(self.config_entry.entry_id)
|
||||||
self.ssh_pool = []
|
)
|
||||||
self.active_commands = 0
|
break
|
||||||
self.ssh_closed = True
|
|
||||||
self.use_sudo = False # 重置sudo设置
|
|
||||||
|
|
||||||
async def _async_update_data(self):
|
async def _async_update_data(self):
|
||||||
_LOGGER.debug("Starting data update...")
|
_LOGGER.debug("Starting data update...")
|
||||||
|
is_online = await self.ping_system()
|
||||||
|
self._system_online = is_online
|
||||||
|
if not is_online:
|
||||||
|
_LOGGER.debug("系统离线,跳过数据更新")
|
||||||
|
# 修复:确保 self.data 结构有效
|
||||||
|
if self.data is None or not isinstance(self.data, dict):
|
||||||
|
self.data = {}
|
||||||
|
if "system" not in self.data or not isinstance(self.data.get("system"), dict):
|
||||||
|
self.data["system"] = {}
|
||||||
|
self.data["system"]["status"] = "off"
|
||||||
|
# 启动后台监控任务(非阻塞)
|
||||||
|
if not self._ping_task or self._ping_task.done():
|
||||||
|
self._ping_task = asyncio.create_task(self._monitor_system_status())
|
||||||
|
await self.async_disconnect()
|
||||||
|
# 直接返回空数据,不阻塞
|
||||||
|
return {
|
||||||
|
"disks": [],
|
||||||
|
"system": {
|
||||||
|
"uptime": "未知",
|
||||||
|
"cpu_temperature": "未知",
|
||||||
|
"motherboard_temperature": "未知",
|
||||||
|
"status": "off"
|
||||||
|
},
|
||||||
|
"ups": {},
|
||||||
|
"vms": [],
|
||||||
|
"docker_containers": []
|
||||||
|
}
|
||||||
|
|
||||||
|
# 系统在线处理
|
||||||
try:
|
try:
|
||||||
if await self.is_ssh_connected():
|
# 确保SSH连接
|
||||||
status = "on"
|
if not await self.async_connect():
|
||||||
else:
|
self.data["system"]["status"] = "off"
|
||||||
if not await self.async_connect():
|
return {
|
||||||
status = "off"
|
"disks": [],
|
||||||
else:
|
"system": {
|
||||||
status = "on"
|
"uptime": "未知",
|
||||||
|
"cpu_temperature": "未知",
|
||||||
|
"motherboard_temperature": "未知",
|
||||||
|
"status": "off"
|
||||||
|
},
|
||||||
|
"ups": {},
|
||||||
|
"vms": []
|
||||||
|
}
|
||||||
|
|
||||||
|
status = "on"
|
||||||
|
|
||||||
# 使用已初始化的管理器获取数据
|
|
||||||
disks = await self.disk_manager.get_disks_info()
|
disks = await self.disk_manager.get_disks_info()
|
||||||
system = await self.system_manager.get_system_info()
|
system = await self.system_manager.get_system_info()
|
||||||
ups_info = await self.ups_manager.get_ups_info()
|
ups_info = await self.ups_manager.get_ups_info()
|
||||||
vms = await self.vm_manager.get_vm_list()
|
vms = await self.vm_manager.get_vm_list()
|
||||||
|
|
||||||
# 获取虚拟机标题
|
|
||||||
for vm in vms:
|
for vm in vms:
|
||||||
vm["title"] = await self.vm_manager.get_vm_title(vm["name"])
|
vm["title"] = await self.vm_manager.get_vm_title(vm["name"])
|
||||||
|
|
||||||
# 获取Docker容器信息(如果启用)
|
|
||||||
docker_containers = []
|
docker_containers = []
|
||||||
if self.enable_docker and hasattr(self, 'docker_manager') and self.docker_manager:
|
if self.enable_docker:
|
||||||
docker_containers = await self.docker_manager.get_containers()
|
docker_containers = await self.docker_manager.get_containers()
|
||||||
|
|
||||||
data = {
|
data = {
|
||||||
@@ -299,7 +331,12 @@ class FlynasCoordinator(DataUpdateCoordinator):
|
|||||||
return data
|
return data
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
_LOGGER.error("Failed to update data: %s", str(e), exc_info=True)
|
_LOGGER.debug("数据更新失败: %s", str(e))
|
||||||
|
# 检查错误类型,如果是连接问题,标记为离线
|
||||||
|
self._system_online = False
|
||||||
|
if not self._ping_task or self._ping_task.done():
|
||||||
|
self._ping_task = asyncio.create_task(self._monitor_system_status())
|
||||||
|
|
||||||
return {
|
return {
|
||||||
"disks": [],
|
"disks": [],
|
||||||
"system": {
|
"system": {
|
||||||
@@ -339,10 +376,14 @@ class UPSDataUpdateCoordinator(DataUpdateCoordinator):
|
|||||||
self.ups_manager = UPSManager(main_coordinator)
|
self.ups_manager = UPSManager(main_coordinator)
|
||||||
|
|
||||||
async def _async_update_data(self):
|
async def _async_update_data(self):
|
||||||
|
# 如果主协调器检测到系统离线,跳过UPS更新
|
||||||
|
if not self.main_coordinator._system_online:
|
||||||
|
return {}
|
||||||
|
|
||||||
try:
|
try:
|
||||||
return await self.ups_manager.get_ups_info()
|
return await self.ups_manager.get_ups_info()
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
_LOGGER.error("Failed to update UPS data: %s", str(e), exc_info=True)
|
_LOGGER.debug("UPS数据更新失败: %s", str(e))
|
||||||
return {}
|
return {}
|
||||||
|
|
||||||
async def control_vm(self, vm_name, action):
|
async def control_vm(self, vm_name, action):
|
||||||
@@ -353,5 +394,5 @@ class UPSDataUpdateCoordinator(DataUpdateCoordinator):
|
|||||||
result = await self.vm_manager.control_vm(vm_name, action)
|
result = await self.vm_manager.control_vm(vm_name, action)
|
||||||
return result
|
return result
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
_LOGGER.error("虚拟机控制失败: %s", str(e), exc_info=True)
|
_LOGGER.debug("虚拟机控制失败: %s", str(e))
|
||||||
return False
|
return False
|
@@ -1,8 +1,7 @@
|
|||||||
import re
|
import re
|
||||||
import logging
|
import logging
|
||||||
import asyncio
|
import asyncio
|
||||||
import time
|
from .const import CONF_IGNORE_DISKS
|
||||||
from .const import CONF_IGNORE_DISKS, CONF_CACHE_TIMEOUT, DEFAULT_CACHE_TIMEOUT
|
|
||||||
|
|
||||||
_LOGGER = logging.getLogger(__name__)
|
_LOGGER = logging.getLogger(__name__)
|
||||||
|
|
||||||
@@ -15,9 +14,6 @@ class DiskManager:
|
|||||||
self.disk_full_info_cache = {} # 缓存磁盘完整信息
|
self.disk_full_info_cache = {} # 缓存磁盘完整信息
|
||||||
self.first_run = True # 首次运行标志
|
self.first_run = True # 首次运行标志
|
||||||
self.initial_detection_done = False # 首次完整检测完成标志
|
self.initial_detection_done = False # 首次完整检测完成标志
|
||||||
self.cache_expiry = {} # 缓存过期时间(时间戳)
|
|
||||||
# 获取缓存超时配置(分钟),转换为秒
|
|
||||||
self.cache_timeout = self.coordinator.config.get(CONF_CACHE_TIMEOUT, DEFAULT_CACHE_TIMEOUT) * 60
|
|
||||||
|
|
||||||
def extract_value(self, text: str, patterns, default="未知", format_func=None):
|
def extract_value(self, text: str, patterns, default="未知", format_func=None):
|
||||||
if not text:
|
if not text:
|
||||||
@@ -42,6 +38,7 @@ class DiskManager:
|
|||||||
async def check_disk_active(self, device: str, window: int = 30) -> bool:
|
async def check_disk_active(self, device: str, window: int = 30) -> bool:
|
||||||
"""检查硬盘在指定时间窗口内是否有活动"""
|
"""检查硬盘在指定时间窗口内是否有活动"""
|
||||||
try:
|
try:
|
||||||
|
# 正确的路径是 /sys/block/{device}/stat
|
||||||
stat_path = f"/sys/block/{device}/stat"
|
stat_path = f"/sys/block/{device}/stat"
|
||||||
|
|
||||||
# 读取统计文件
|
# 读取统计文件
|
||||||
@@ -94,6 +91,8 @@ class DiskManager:
|
|||||||
stats = stat_output.split()
|
stats = stat_output.split()
|
||||||
|
|
||||||
if len(stats) >= 11:
|
if len(stats) >= 11:
|
||||||
|
# 第9个字段是最近完成的读操作数
|
||||||
|
# 第10个字段是最近完成的写操作数
|
||||||
recent_reads = int(stats[8])
|
recent_reads = int(stats[8])
|
||||||
recent_writes = int(stats[9])
|
recent_writes = int(stats[9])
|
||||||
|
|
||||||
@@ -109,14 +108,6 @@ class DiskManager:
|
|||||||
async def get_disks_info(self) -> list[dict]:
|
async def get_disks_info(self) -> list[dict]:
|
||||||
disks = []
|
disks = []
|
||||||
try:
|
try:
|
||||||
# 清理过期缓存
|
|
||||||
now = time.time()
|
|
||||||
for device in list(self.disk_full_info_cache.keys()):
|
|
||||||
if now - self.cache_expiry.get(device, 0) > self.cache_timeout:
|
|
||||||
self.logger.debug(f"磁盘 {device} 的缓存已过期,清除")
|
|
||||||
del self.disk_full_info_cache[device]
|
|
||||||
del self.cache_expiry[device]
|
|
||||||
|
|
||||||
self.logger.debug("Fetching disk list...")
|
self.logger.debug("Fetching disk list...")
|
||||||
lsblk_output = await self.coordinator.run_command("lsblk -dno NAME,TYPE")
|
lsblk_output = await self.coordinator.run_command("lsblk -dno NAME,TYPE")
|
||||||
self.logger.debug("lsblk output: %s", lsblk_output)
|
self.logger.debug("lsblk output: %s", lsblk_output)
|
||||||
@@ -157,15 +148,14 @@ class DiskManager:
|
|||||||
# 检查是否有缓存的完整信息
|
# 检查是否有缓存的完整信息
|
||||||
cached_info = self.disk_full_info_cache.get(device, {})
|
cached_info = self.disk_full_info_cache.get(device, {})
|
||||||
|
|
||||||
# 首次运行时强制获取完整信息
|
# 优化点:首次运行时强制获取完整信息
|
||||||
if self.first_run:
|
if self.first_run:
|
||||||
self.logger.debug(f"首次运行,强制获取硬盘 {device} 的完整信息")
|
self.logger.debug(f"首次运行,强制获取硬盘 {device} 的完整信息")
|
||||||
try:
|
try:
|
||||||
# 执行完整的信息获取
|
# 执行完整的信息获取
|
||||||
await self._get_full_disk_info(disk_info, device_path)
|
await self._get_full_disk_info(disk_info, device_path)
|
||||||
# 更新缓存并设置过期时间
|
# 更新缓存
|
||||||
self.disk_full_info_cache[device] = disk_info.copy()
|
self.disk_full_info_cache[device] = disk_info.copy()
|
||||||
self.cache_expiry[device] = now
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
self.logger.warning(f"首次运行获取硬盘信息失败: {str(e)}", exc_info=True)
|
self.logger.warning(f"首次运行获取硬盘信息失败: {str(e)}", exc_info=True)
|
||||||
# 使用缓存信息(如果有)
|
# 使用缓存信息(如果有)
|
||||||
@@ -216,9 +206,8 @@ class DiskManager:
|
|||||||
try:
|
try:
|
||||||
# 执行完整的信息获取
|
# 执行完整的信息获取
|
||||||
await self._get_full_disk_info(disk_info, device_path)
|
await self._get_full_disk_info(disk_info, device_path)
|
||||||
# 更新缓存并设置过期时间
|
# 更新缓存
|
||||||
self.disk_full_info_cache[device] = disk_info.copy()
|
self.disk_full_info_cache[device] = disk_info.copy()
|
||||||
self.cache_expiry[device] = now
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
self.logger.warning(f"获取硬盘信息失败: {str(e)}", exc_info=True)
|
self.logger.warning(f"获取硬盘信息失败: {str(e)}", exc_info=True)
|
||||||
# 使用缓存信息(如果有)
|
# 使用缓存信息(如果有)
|
||||||
|
@@ -1,7 +1,7 @@
|
|||||||
{
|
{
|
||||||
"domain": "fn_nas",
|
"domain": "fn_nas",
|
||||||
"name": "飞牛NAS",
|
"name": "飞牛NAS",
|
||||||
"version": "1.3.3",
|
"version": "1.3.4",
|
||||||
"documentation": "https://github.com/anxms/fn_nas",
|
"documentation": "https://github.com/anxms/fn_nas",
|
||||||
"dependencies": [],
|
"dependencies": [],
|
||||||
"codeowners": ["@anxms"],
|
"codeowners": ["@anxms"],
|
||||||
|
@@ -10,9 +10,7 @@
|
|||||||
"username": "用户名",
|
"username": "用户名",
|
||||||
"password": "密码",
|
"password": "密码",
|
||||||
"scan_interval": "数据更新间隔(秒)",
|
"scan_interval": "数据更新间隔(秒)",
|
||||||
"enable_docker": "启用docker控制",
|
"enable_docker": "启用docker控制"
|
||||||
"max_connections": "最大连接数",
|
|
||||||
"cache_timeout": "缓存过期时间"
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"select_mac": {
|
"select_mac": {
|
||||||
|
Reference in New Issue
Block a user