2 Commits

Author SHA1 Message Date
xiaochao
fae53cf5b9 修复飞牛nas关机后重启home assistant后,飞牛nas开机后部分实体显示不可用的问题
去除ssh连接数限制和缓存清理时间
2025-07-12 01:18:26 +08:00
xiaochao
f185b7e3ee 修复飞牛nas在关机到开机时,home assistant实体状态无法更新的问题 2025-07-11 23:45:05 +08:00
7 changed files with 322 additions and 300 deletions

View File

@@ -1,62 +1,89 @@
import logging
import asyncio
import asyncssh
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from .const import DOMAIN, DATA_UPDATE_COORDINATOR, PLATFORMS, CONF_ENABLE_DOCKER # 导入新增常量
from homeassistant.helpers import config_validation as cv
from .const import (
DOMAIN, DATA_UPDATE_COORDINATOR, PLATFORMS, CONF_ENABLE_DOCKER,
CONF_HOST, DEFAULT_PORT
)
from .coordinator import FlynasCoordinator, UPSDataUpdateCoordinator
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
config = {**entry.data, **entry.options}
coordinator = FlynasCoordinator(hass, config)
await coordinator.async_config_entry_first_refresh()
_LOGGER.debug("协调器类型: %s", type(coordinator).__name__)
_LOGGER.debug("协调器是否有control_vm方法: %s", hasattr(coordinator, 'control_vm'))
_LOGGER.debug("协调器是否有vm_manager属性: %s", hasattr(coordinator, 'vm_manager'))
# 检查是否启用Docker并初始化Docker管理器如果有
enable_docker = config.get(CONF_ENABLE_DOCKER, False)
if enable_docker:
# 导入Docker管理器并初始化
from .docker_manager import DockerManager
coordinator.docker_manager = DockerManager(coordinator)
_LOGGER.debug("已启用Docker容器监控")
else:
coordinator.docker_manager = None
_LOGGER.debug("未启用Docker容器监控")
ups_coordinator = UPSDataUpdateCoordinator(hass, config, coordinator)
await ups_coordinator.async_config_entry_first_refresh()
coordinator = FlynasCoordinator(hass, config, entry)
# 直接初始化不阻塞等待NAS上线
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN][entry.entry_id] = {
DATA_UPDATE_COORDINATOR: coordinator,
"ups_coordinator": ups_coordinator,
CONF_ENABLE_DOCKER: enable_docker # 存储启用状态
"ups_coordinator": None,
CONF_ENABLE_DOCKER: coordinator.config.get(CONF_ENABLE_DOCKER, False)
}
await hass.config_entries.async_forward_entry_setups(entry, PLATFORMS)
entry.async_on_unload(entry.add_update_listener(async_update_entry))
# 异步后台初始化
hass.async_create_task(async_delayed_setup(hass, entry, coordinator))
return True
async def async_delayed_setup(hass: HomeAssistant, entry: ConfigEntry, coordinator: FlynasCoordinator):
try:
# 不阻塞等待NAS上线直接尝试刷新数据
await coordinator.async_config_entry_first_refresh()
enable_docker = coordinator.config.get(CONF_ENABLE_DOCKER, False)
if enable_docker:
from .docker_manager import DockerManager
coordinator.docker_manager = DockerManager(coordinator)
_LOGGER.debug("已启用Docker容器监控")
else:
coordinator.docker_manager = None
_LOGGER.debug("未启用Docker容器监控")
ups_coordinator = UPSDataUpdateCoordinator(hass, coordinator.config, coordinator)
await ups_coordinator.async_config_entry_first_refresh()
hass.data[DOMAIN][entry.entry_id]["ups_coordinator"] = ups_coordinator
await hass.config_entries.async_forward_entry_setups(entry, PLATFORMS)
entry.async_on_unload(entry.add_update_listener(async_update_entry))
_LOGGER.info("飞牛NAS集成初始化完成")
except Exception as e:
_LOGGER.error("飞牛NAS集成初始化失败: %s", str(e))
await coordinator.async_disconnect()
if hasattr(coordinator, '_ping_task') and coordinator._ping_task:
coordinator._ping_task.cancel()
async def async_update_entry(hass: HomeAssistant, entry: ConfigEntry):
await hass.config_entries.async_reload(entry.entry_id)
"""更新配置项"""
# 卸载现有集成
await async_unload_entry(hass, entry)
# 重新加载集成
await async_setup_entry(hass, entry)
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
"""卸载集成"""
# 获取集成数据
domain_data = hass.data.get(DOMAIN, {}).get(entry.entry_id, {})
unload_ok = True
if unload_ok:
domain_data = hass.data[DOMAIN][entry.entry_id]
if DATA_UPDATE_COORDINATOR in domain_data:
coordinator = domain_data[DATA_UPDATE_COORDINATOR]
ups_coordinator = domain_data["ups_coordinator"]
ups_coordinator = domain_data.get("ups_coordinator")
# 关闭主协调器的SSH连接
await coordinator.async_disconnect()
# 关闭UPS协调器
await ups_coordinator.async_shutdown()
# 卸载平台
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
# 从DOMAIN中移除该entry的数据
hass.data[DOMAIN].pop(entry.entry_id)
if unload_ok:
# 关闭主协调器的SSH连接
await coordinator.async_disconnect()
# 关闭UPS协调器如果存在
if ups_coordinator:
await ups_coordinator.async_shutdown()
# 取消监控任务(如果存在)
if hasattr(coordinator, '_ping_task') and coordinator._ping_task and not coordinator._ping_task.done():
coordinator._ping_task.cancel()
# 从DOMAIN中移除该entry的数据
hass.data[DOMAIN].pop(entry.entry_id, None)
return unload_ok

View File

@@ -18,11 +18,7 @@ from .const import (
CONF_UPS_SCAN_INTERVAL,
DEFAULT_UPS_SCAN_INTERVAL,
CONF_ROOT_PASSWORD,
CONF_ENABLE_DOCKER,
CONF_MAX_CONNECTIONS,
DEFAULT_MAX_CONNECTIONS,
CONF_CACHE_TIMEOUT,
DEFAULT_CACHE_TIMEOUT
CONF_ENABLE_DOCKER
)
_LOGGER = logging.getLogger(__name__)
@@ -75,17 +71,7 @@ class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
default=DEFAULT_SCAN_INTERVAL
): int,
# 添加启用Docker的选项
vol.Optional(CONF_ENABLE_DOCKER, default=False): bool,
# 新增:最大连接数
vol.Optional(
CONF_MAX_CONNECTIONS,
default=DEFAULT_MAX_CONNECTIONS
): int,
# 新增:缓存超时时间(分钟)
vol.Optional(
CONF_CACHE_TIMEOUT,
default=DEFAULT_CACHE_TIMEOUT
): int
vol.Optional(CONF_ENABLE_DOCKER, default=False): bool
})
return self.async_show_form(
@@ -118,9 +104,6 @@ class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
self.ssh_config[CONF_MAC] = selected_mac
# 确保将CONF_ENABLE_DOCKER也存入配置项
self.ssh_config[CONF_ENABLE_DOCKER] = enable_docker
# 添加连接池和缓存配置
self.ssh_config[CONF_MAX_CONNECTIONS] = self.ssh_config.get(CONF_MAX_CONNECTIONS, DEFAULT_MAX_CONNECTIONS)
self.ssh_config[CONF_CACHE_TIMEOUT] = self.ssh_config.get(CONF_CACHE_TIMEOUT, DEFAULT_CACHE_TIMEOUT)
return self.async_create_entry(
title=self.ssh_config[CONF_HOST],
data=self.ssh_config
@@ -237,17 +220,7 @@ class OptionsFlowHandler(config_entries.OptionsFlow):
vol.Optional(
CONF_ENABLE_DOCKER,
default=data.get(CONF_ENABLE_DOCKER, False)
): bool,
# 新增:最大连接数
vol.Optional(
CONF_MAX_CONNECTIONS,
default=data.get(CONF_MAX_CONNECTIONS, DEFAULT_MAX_CONNECTIONS)
): int,
# 新增:缓存超时时间(分钟)
vol.Optional(
CONF_CACHE_TIMEOUT,
default=data.get(CONF_CACHE_TIMEOUT, DEFAULT_CACHE_TIMEOUT)
): int
): bool
})
return self.async_show_form(

View File

@@ -52,10 +52,4 @@ ICON_RESTART = "mdi:restart"
# 设备标识符常量
DEVICE_ID_NAS = "flynas_nas_system"
DEVICE_ID_UPS = "flynas_ups"
CONF_NETWORK_MACS = "network_macs"
# 新增配置常量
CONF_MAX_CONNECTIONS = "max_connections"
CONF_CACHE_TIMEOUT = "cache_timeout"
DEFAULT_MAX_CONNECTIONS = 3
DEFAULT_CACHE_TIMEOUT = 30 # 单位:分钟
CONF_NETWORK_MACS = "network_macs"

View File

@@ -1,8 +1,6 @@
import logging
import re
import asyncssh
import asyncio
import time
import asyncssh
from datetime import timedelta
from homeassistant.core import HomeAssistant
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
@@ -11,8 +9,7 @@ from .const import (
DOMAIN, CONF_HOST, CONF_PORT, CONF_USERNAME, CONF_PASSWORD,
CONF_IGNORE_DISKS, CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL,
DEFAULT_PORT, CONF_MAC, CONF_UPS_SCAN_INTERVAL, DEFAULT_UPS_SCAN_INTERVAL,
CONF_ROOT_PASSWORD, CONF_ENABLE_DOCKER, CONF_MAX_CONNECTIONS, DEFAULT_MAX_CONNECTIONS,
CONF_CACHE_TIMEOUT, DEFAULT_CACHE_TIMEOUT
CONF_ROOT_PASSWORD, CONF_ENABLE_DOCKER
)
from .disk_manager import DiskManager
from .system_manager import SystemManager
@@ -23,8 +20,10 @@ from .docker_manager import DockerManager
_LOGGER = logging.getLogger(__name__)
class FlynasCoordinator(DataUpdateCoordinator):
def __init__(self, hass: HomeAssistant, config) -> None:
def __init__(self, hass: HomeAssistant, config, config_entry) -> None:
self.config = config
self.config_entry = config_entry
self.hass = hass
self.host = config[CONF_HOST]
self.port = config.get(CONF_PORT, DEFAULT_PORT)
self.username = config[CONF_USERNAME]
@@ -32,13 +31,12 @@ class FlynasCoordinator(DataUpdateCoordinator):
self.root_password = config.get(CONF_ROOT_PASSWORD)
self.mac = config.get(CONF_MAC, "")
self.enable_docker = config.get(CONF_ENABLE_DOCKER, False)
self.max_connections = config.get(CONF_MAX_CONNECTIONS, DEFAULT_MAX_CONNECTIONS)
self.cache_timeout = config.get(CONF_CACHE_TIMEOUT, DEFAULT_CACHE_TIMEOUT) * 60
self.docker_manager = DockerManager(self) if self.enable_docker else None
self.ssh_pool = [] # SSH连接池
self.active_commands = 0 # 当前活动命令数
self.ssh_closed = True # 初始状态为关闭
self.use_sudo = False # 初始化use_sudo属性
self.ssh = None
self.ssh_closed = True
self.ups_manager = UPSManager(self)
self.vm_manager = VMManager(self)
self.use_sudo = False
self.data = {
"disks": [],
@@ -64,225 +62,259 @@ class FlynasCoordinator(DataUpdateCoordinator):
self.disk_manager = DiskManager(self)
self.system_manager = SystemManager(self)
self.ups_manager = UPSManager(self)
self.vm_manager = VMManager(self)
self._system_online = False
self._ping_task = None
self._retry_interval = 30 # 系统离线时的检测间隔(秒)
async def get_ssh_connection(self):
"""从连接池获取或创建SSH连接"""
# 避免递归调用,改为循环等待
start_time = time.time()
while True:
# 如果连接池中有可用连接且没有超过最大活动命令数
while len(self.ssh_pool) > 0 and self.active_commands < self.max_connections:
conn = self.ssh_pool.pop()
if await self.is_connection_alive(conn):
self.active_commands += 1
return conn
else:
await self.close_connection(conn)
# 如果没有可用连接,创建新连接
if self.active_commands < self.max_connections:
try:
conn = await asyncssh.connect(
self.host,
port=self.port,
username=self.username,
password=self.password,
known_hosts=None,
connect_timeout=10
)
self.active_commands += 1
self.ssh_closed = False
# 确定是否需要sudo权限
await self.determine_sudo_setting(conn)
return conn
except Exception as e:
_LOGGER.error("创建SSH连接失败: %s", str(e), exc_info=True)
raise UpdateFailed(f"SSH连接失败: {str(e)}")
# 等待0.1秒后重试,避免递归
await asyncio.sleep(0.1)
# 设置超时30秒
if time.time() - start_time > 30:
raise UpdateFailed("获取SSH连接超时")
async def determine_sudo_setting(self, conn):
"""确定是否需要使用sudo权限"""
try:
# 检查当前用户是否是root
result = await conn.run("id -u", timeout=5)
if result.stdout.strip() == "0":
_LOGGER.debug("当前用户是root不需要sudo")
self.use_sudo = False
return
except Exception as e:
_LOGGER.warning("检查用户ID失败: %s", str(e))
# 检查是否可以使用密码sudo
try:
result = await conn.run(
f"echo '{self.password}' | sudo -S whoami",
input=self.password + "\n",
timeout=10
)
if "root" in result.stdout:
_LOGGER.info("可以使用用户密码sudo")
self.use_sudo = True
return
except Exception as e:
_LOGGER.debug("无法使用用户密码sudo: %s", str(e))
# 如果有root密码尝试使用root密码sudo
if self.root_password:
async def async_connect(self):
if self.ssh is None or self.ssh_closed:
try:
result = await conn.run(
f"echo '{self.root_password}' | sudo -S whoami",
input=self.root_password + "\n",
timeout=10
self.ssh = await asyncssh.connect(
self.host,
port=self.port,
username=self.username,
password=self.password,
known_hosts=None,
connect_timeout=5 # 缩短连接超时时间
)
if "root" in result.stdout:
_LOGGER.info("可以使用root密码sudo")
self.use_sudo = True
return
except Exception as e:
_LOGGER.debug("无法使用root密码sudo: %s", str(e))
_LOGGER.warning("无法获取root权限将使用普通用户执行命令")
self.use_sudo = False
async def release_ssh_connection(self, conn):
"""释放连接回连接池"""
self.active_commands -= 1
if conn and not conn.is_closed():
if len(self.ssh_pool) < self.max_connections:
self.ssh_pool.append(conn)
else:
await self.close_connection(conn)
else:
# 如果连接已经关闭,直接丢弃
pass
async def close_connection(self, conn):
"""关闭SSH连接"""
try:
if conn and not conn.is_closed():
conn.close()
except Exception as e:
_LOGGER.debug("关闭SSH连接时出错: %s", str(e))
async def is_connection_alive(self, conn) -> bool:
"""检查连接是否存活"""
try:
# 发送一个简单的命令测试连接
result = await conn.run("echo 'connection_test'", timeout=2)
return result.exit_status == 0 and "connection_test" in result.stdout
except (asyncssh.Error, TimeoutError, ConnectionResetError):
return False
async def run_command(self, command: str, retries=2) -> str:
"""使用连接池执行命令"""
conn = None
current_retries = retries
while current_retries >= 0:
try:
conn = await self.get_ssh_connection()
# 根据sudo设置执行命令
if await self.is_root_user():
_LOGGER.debug("当前用户是 root")
self.use_sudo = False
self.ssh_closed = False
return True
result = await self.ssh.run(
f"echo '{self.password}' | sudo -S -i",
input=self.password + "\n",
timeout=5
)
whoami_result = await self.ssh.run("whoami")
if "root" in whoami_result.stdout:
_LOGGER.info("成功切换到 root 会话(使用登录密码)")
self.use_sudo = False
self.ssh_closed = False
return True
else:
if self.root_password:
result = await self.ssh.run(
f"echo '{self.root_password}' | sudo -S -i",
input=self.root_password + "\n",
timeout=5
)
whoami_result = await self.ssh.run("whoami")
if "root" in whoami_result.stdout:
_LOGGER.info("成功切换到 root 会话(使用 root 密码)")
self.use_sudo = False
self.ssh_closed = False
return True
else:
# 切换到 root 会话失败,将使用 sudo
self.use_sudo = True
else:
# 非 root 用户且未提供 root 密码,将使用 sudo
self.use_sudo = True
self.ssh_closed = False
_LOGGER.info("SSH 连接已建立到 %s", self.host)
return True
except Exception as e:
self.ssh = None
self.ssh_closed = True
_LOGGER.debug("连接失败: %s", str(e))
return False
return True
async def is_root_user(self):
try:
result = await self.ssh.run("id -u", timeout=3)
return result.stdout.strip() == "0"
except Exception:
return False
async def async_disconnect(self):
if self.ssh is not None and not self.ssh_closed:
try:
self.ssh.close()
self.ssh_closed = True
_LOGGER.debug("SSH connection closed")
except Exception as e:
_LOGGER.debug("Error closing SSH connection: %s", str(e))
finally:
self.ssh = None
async def is_ssh_connected(self) -> bool:
if self.ssh is None or self.ssh_closed:
return False
try:
test_command = "echo 'connection_test'"
result = await self.ssh.run(test_command, timeout=2)
return result.exit_status == 0 and "connection_test" in result.stdout
except (asyncssh.Error, TimeoutError):
return False
async def ping_system(self) -> bool:
"""轻量级系统状态检测"""
# 对于本地主机直接返回True
if self.host in ['localhost', '127.0.0.1']:
return True
try:
# 使用异步ping检测
proc = await asyncio.create_subprocess_exec(
'ping', '-c', '1', '-W', '1', self.host,
stdout=asyncio.subprocess.DEVNULL,
stderr=asyncio.subprocess.DEVNULL
)
await proc.wait()
return proc.returncode == 0
except Exception:
return False
async def run_command(self, command: str, retries=2) -> str:
# 系统离线时直接返回空字符串,避免抛出异常
if not self._system_online:
return ""
for attempt in range(retries):
try:
if not await self.is_ssh_connected():
if not await self.async_connect():
if self.data and "system" in self.data:
self.data["system"]["status"] = "off"
return ""
if self.use_sudo:
password = self.root_password if self.root_password else self.password
if password:
if self.root_password or self.password:
password = self.root_password if self.root_password else self.password
full_command = f"sudo -S {command}"
result = await conn.run(full_command, input=password + "\n", check=True)
result = await self.ssh.run(full_command, input=password + "\n", check=True)
else:
full_command = f"sudo {command}"
result = await conn.run(full_command, check=True)
result = await self.ssh.run(full_command, check=True)
else:
result = await conn.run(command, check=True)
result = await self.ssh.run(command, check=True)
return result.stdout.strip()
except asyncssh.process.ProcessError as e:
if e.exit_status in [4, 32]:
return ""
_LOGGER.error("Command failed: %s (exit %d)", command, e.exit_status)
# 连接可能已损坏,关闭它
await self.close_connection(conn)
conn = None
if current_retries > 0:
current_retries -= 1
continue
else:
raise UpdateFailed(f"Command failed: {command}") from e
_LOGGER.debug("Command failed: %s (exit %d)", command, e.exit_status)
self.ssh = None
self.ssh_closed = True
if attempt == retries - 1:
return ""
except asyncssh.Error as e:
_LOGGER.error("SSH连接错误: %s", str(e))
await self.close_connection(conn)
conn = None
if current_retries > 0:
current_retries -= 1
continue
else:
raise UpdateFailed(f"SSH错误: {str(e)}") from e
_LOGGER.debug("SSH connection error: %s", str(e))
self.ssh = None
self.ssh_closed = True
if attempt == retries - 1:
return ""
except Exception as e:
_LOGGER.error("意外错误: %s", str(e), exc_info=True)
await self.close_connection(conn)
conn = None
if current_retries > 0:
current_retries -= 1
self.ssh = None
self.ssh_closed = True
_LOGGER.debug("Unexpected error: %s", str(e))
if attempt == retries - 1:
return ""
return ""
async def get_network_macs(self):
try:
output = await self.run_command("ip link show")
macs = {}
pattern = re.compile(r'^\d+: (\w+):.*\n\s+link/\w+\s+([0-9a-fA-F:]{17})', re.MULTILINE)
matches = pattern.findall(output)
for interface, mac in matches:
if interface == "lo" or mac == "00:00:00:00:00:00":
continue
else:
raise UpdateFailed(f"意外错误: {str(e)}") from e
finally:
if conn:
await self.release_ssh_connection(conn)
macs[mac] = interface
return macs
except Exception as e:
self.logger.debug("获取MAC地址失败: %s", str(e))
return {}
async def async_connect(self):
"""建立SSH连接使用连接池"""
# 连接池已处理连接,此方法现在主要用于初始化
return True
async def is_ssh_connected(self) -> bool:
"""检查是否有活动的SSH连接"""
return len(self.ssh_pool) > 0 or self.active_commands > 0
async def async_disconnect(self):
"""关闭所有SSH连接"""
# 关闭连接池中的所有连接
for conn in self.ssh_pool:
await self.close_connection(conn)
self.ssh_pool = []
self.active_commands = 0
self.ssh_closed = True
self.use_sudo = False # 重置sudo设置
async def _monitor_system_status(self):
"""系统离线时轮询检测状态"""
self.logger.debug("启动系统状态监控,每%d秒检测一次", self._retry_interval)
while True:
await asyncio.sleep(self._retry_interval)
if await self.ping_system():
self.logger.info("检测到系统已开机,触发重新加载")
# 触发集成重新加载
self.hass.async_create_task(
self.hass.config_entries.async_reload(self.config_entry.entry_id)
)
break
async def _async_update_data(self):
_LOGGER.debug("Starting data update...")
is_online = await self.ping_system()
self._system_online = is_online
if not is_online:
_LOGGER.debug("系统离线,跳过数据更新")
# 修复:确保 self.data 结构有效
if self.data is None or not isinstance(self.data, dict):
self.data = {}
if "system" not in self.data or not isinstance(self.data.get("system"), dict):
self.data["system"] = {}
self.data["system"]["status"] = "off"
# 启动后台监控任务(非阻塞)
if not self._ping_task or self._ping_task.done():
self._ping_task = asyncio.create_task(self._monitor_system_status())
await self.async_disconnect()
# 直接返回空数据,不阻塞
return {
"disks": [],
"system": {
"uptime": "未知",
"cpu_temperature": "未知",
"motherboard_temperature": "未知",
"status": "off"
},
"ups": {},
"vms": [],
"docker_containers": []
}
# 系统在线处理
try:
if await self.is_ssh_connected():
status = "on"
else:
if not await self.async_connect():
status = "off"
else:
status = "on"
# 确保SSH连接
if not await self.async_connect():
self.data["system"]["status"] = "off"
return {
"disks": [],
"system": {
"uptime": "未知",
"cpu_temperature": "未知",
"motherboard_temperature": "未知",
"status": "off"
},
"ups": {},
"vms": []
}
status = "on"
# 使用已初始化的管理器获取数据
disks = await self.disk_manager.get_disks_info()
system = await self.system_manager.get_system_info()
ups_info = await self.ups_manager.get_ups_info()
vms = await self.vm_manager.get_vm_list()
# 获取虚拟机标题
for vm in vms:
vm["title"] = await self.vm_manager.get_vm_title(vm["name"])
# 获取Docker容器信息如果启用
docker_containers = []
if self.enable_docker and hasattr(self, 'docker_manager') and self.docker_manager:
if self.enable_docker:
docker_containers = await self.docker_manager.get_containers()
data = {
@@ -299,7 +331,12 @@ class FlynasCoordinator(DataUpdateCoordinator):
return data
except Exception as e:
_LOGGER.error("Failed to update data: %s", str(e), exc_info=True)
_LOGGER.debug("数据更新失败: %s", str(e))
# 检查错误类型,如果是连接问题,标记为离线
self._system_online = False
if not self._ping_task or self._ping_task.done():
self._ping_task = asyncio.create_task(self._monitor_system_status())
return {
"disks": [],
"system": {
@@ -339,10 +376,14 @@ class UPSDataUpdateCoordinator(DataUpdateCoordinator):
self.ups_manager = UPSManager(main_coordinator)
async def _async_update_data(self):
# 如果主协调器检测到系统离线跳过UPS更新
if not self.main_coordinator._system_online:
return {}
try:
return await self.ups_manager.get_ups_info()
except Exception as e:
_LOGGER.error("Failed to update UPS data: %s", str(e), exc_info=True)
_LOGGER.debug("UPS数据更新失败: %s", str(e))
return {}
async def control_vm(self, vm_name, action):
@@ -353,5 +394,5 @@ class UPSDataUpdateCoordinator(DataUpdateCoordinator):
result = await self.vm_manager.control_vm(vm_name, action)
return result
except Exception as e:
_LOGGER.error("虚拟机控制失败: %s", str(e), exc_info=True)
_LOGGER.debug("虚拟机控制失败: %s", str(e))
return False

View File

@@ -1,8 +1,7 @@
import re
import logging
import asyncio
import time
from .const import CONF_IGNORE_DISKS, CONF_CACHE_TIMEOUT, DEFAULT_CACHE_TIMEOUT
from .const import CONF_IGNORE_DISKS
_LOGGER = logging.getLogger(__name__)
@@ -15,10 +14,7 @@ class DiskManager:
self.disk_full_info_cache = {} # 缓存磁盘完整信息
self.first_run = True # 首次运行标志
self.initial_detection_done = False # 首次完整检测完成标志
self.cache_expiry = {} # 缓存过期时间(时间戳)
# 获取缓存超时配置(分钟),转换为秒
self.cache_timeout = self.coordinator.config.get(CONF_CACHE_TIMEOUT, DEFAULT_CACHE_TIMEOUT) * 60
def extract_value(self, text: str, patterns, default="未知", format_func=None):
if not text:
return default
@@ -42,6 +38,7 @@ class DiskManager:
async def check_disk_active(self, device: str, window: int = 30) -> bool:
"""检查硬盘在指定时间窗口内是否有活动"""
try:
# 正确的路径是 /sys/block/{device}/stat
stat_path = f"/sys/block/{device}/stat"
# 读取统计文件
@@ -94,6 +91,8 @@ class DiskManager:
stats = stat_output.split()
if len(stats) >= 11:
# 第9个字段是最近完成的读操作数
# 第10个字段是最近完成的写操作数
recent_reads = int(stats[8])
recent_writes = int(stats[9])
@@ -109,14 +108,6 @@ class DiskManager:
async def get_disks_info(self) -> list[dict]:
disks = []
try:
# 清理过期缓存
now = time.time()
for device in list(self.disk_full_info_cache.keys()):
if now - self.cache_expiry.get(device, 0) > self.cache_timeout:
self.logger.debug(f"磁盘 {device} 的缓存已过期,清除")
del self.disk_full_info_cache[device]
del self.cache_expiry[device]
self.logger.debug("Fetching disk list...")
lsblk_output = await self.coordinator.run_command("lsblk -dno NAME,TYPE")
self.logger.debug("lsblk output: %s", lsblk_output)
@@ -157,15 +148,14 @@ class DiskManager:
# 检查是否有缓存的完整信息
cached_info = self.disk_full_info_cache.get(device, {})
# 首次运行时强制获取完整信息
# 优化点:首次运行时强制获取完整信息
if self.first_run:
self.logger.debug(f"首次运行,强制获取硬盘 {device} 的完整信息")
try:
# 执行完整的信息获取
await self._get_full_disk_info(disk_info, device_path)
# 更新缓存并设置过期时间
# 更新缓存
self.disk_full_info_cache[device] = disk_info.copy()
self.cache_expiry[device] = now
except Exception as e:
self.logger.warning(f"首次运行获取硬盘信息失败: {str(e)}", exc_info=True)
# 使用缓存信息(如果有)
@@ -216,9 +206,8 @@ class DiskManager:
try:
# 执行完整的信息获取
await self._get_full_disk_info(disk_info, device_path)
# 更新缓存并设置过期时间
# 更新缓存
self.disk_full_info_cache[device] = disk_info.copy()
self.cache_expiry[device] = now
except Exception as e:
self.logger.warning(f"获取硬盘信息失败: {str(e)}", exc_info=True)
# 使用缓存信息(如果有)

View File

@@ -1,7 +1,7 @@
{
"domain": "fn_nas",
"name": "飞牛NAS",
"version": "1.3.3",
"version": "1.3.4",
"documentation": "https://github.com/anxms/fn_nas",
"dependencies": [],
"codeowners": ["@anxms"],

View File

@@ -10,9 +10,7 @@
"username": "用户名",
"password": "密码",
"scan_interval": "数据更新间隔(秒)",
"enable_docker": "启用docker控制",
"max_connections": "最大连接数",
"cache_timeout": "缓存过期时间"
"enable_docker": "启用docker控制"
}
},
"select_mac": {