Working prototype
All the minimal features work now. - describing a device - describing a backup - link a backup to a device - schedule backups at day intervals - mount the device - put stuff on it - umount the device Downsides: - executed as root, fully! - all scripts are executed as root, dangerous! - Bad and undocumented code.. :) - No tests and only minimal checking (lotta TODOs)
This commit is contained in:
parent
3c4f9a59ac
commit
d1dfb809e3
|
@ -10,6 +10,7 @@ import logging
|
||||||
import json
|
import json
|
||||||
from backive.core.events import EventInterface
|
from backive.core.events import EventInterface
|
||||||
from backive.core.scheduler import Scheduler
|
from backive.core.scheduler import Scheduler
|
||||||
|
from backive.core.backup import Backup
|
||||||
from backive.config.config import Config
|
from backive.config.config import Config
|
||||||
|
|
||||||
|
|
||||||
|
@ -26,22 +27,44 @@ logging.info("Backive starting.")
|
||||||
|
|
||||||
class Backive:
|
class Backive:
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
|
self._scheduler = Scheduler()
|
||||||
self._config = Config()
|
self._config = Config()
|
||||||
self._events = None
|
self._events = None
|
||||||
|
self.initialize_scheduler()
|
||||||
|
|
||||||
|
def initialize_scheduler(self):
|
||||||
|
backups = self._config.get_backups()
|
||||||
|
for backup in backups:
|
||||||
|
self._scheduler.register_backup(backup.name, backup.get_frequency())
|
||||||
|
|
||||||
async def callback(self, data=None):
|
async def callback(self, data=None):
|
||||||
data_dict = json.loads(data)
|
data_dict = json.loads(data)
|
||||||
uuid = data_dict.get("ID_FS_UUID", None)
|
uuid = data_dict.get("ID_FS_UUID", None)
|
||||||
logging.info("UUID: %s", uuid)
|
logging.info("UUID: %s", uuid)
|
||||||
logging.debug(json.dumps(data_dict, indent=4))
|
|
||||||
|
|
||||||
if uuid and data_dict.get("ACTION") == "add":
|
if uuid and data_dict.get("ACTION") == "add":
|
||||||
|
logging.debug(json.dumps(data_dict, indent=4))
|
||||||
backups = await self._config.get_backups_by_device(uuid)
|
backups = await self._config.get_backups_by_device(uuid)
|
||||||
|
device = await self._config.get_uuid_device(uuid)
|
||||||
|
prefs = await self._config.get_preferences()
|
||||||
|
if backups:
|
||||||
|
logging.info("Mounting device '%s'", uuid)
|
||||||
|
mount_available = await device.mount(prefs.get("mount_root"))
|
||||||
|
if mount_available:
|
||||||
for backup in backups:
|
for backup in backups:
|
||||||
|
if await self._scheduler.should_run(backup.name):
|
||||||
logging.info("Running backup '%s'", backup.name)
|
logging.info("Running backup '%s'", backup.name)
|
||||||
result = await backup.run()
|
result = await backup.run()
|
||||||
logging.debug("Result: %s", str(result))
|
logging.debug("Result: %s", str(result))
|
||||||
|
await self._scheduler.register_run(backup.name)
|
||||||
|
else:
|
||||||
|
logging.info(
|
||||||
|
"Backup '%s' next run interval has not been reached.",
|
||||||
|
backup.name
|
||||||
|
)
|
||||||
|
logging.info("Unmounting device '%s'", uuid)
|
||||||
|
await device.unmount()
|
||||||
|
else:
|
||||||
|
logging.error("Device %s could not be mounted...", uuid)
|
||||||
|
|
||||||
def serve(self):
|
def serve(self):
|
||||||
loop = asyncio.get_event_loop()
|
loop = asyncio.get_event_loop()
|
||||||
|
|
|
@ -1,4 +1,3 @@
|
||||||
import logging
|
|
||||||
import os
|
import os
|
||||||
import pwd
|
import pwd
|
||||||
import json
|
import json
|
||||||
|
@ -6,9 +5,8 @@ from ruamel.yaml import YAML
|
||||||
import logging
|
import logging
|
||||||
import jsonschema
|
import jsonschema
|
||||||
|
|
||||||
from backive.core.backup import Backup
|
class RegularUserException(Exception):
|
||||||
from backive.core.device import Device
|
pass
|
||||||
|
|
||||||
|
|
||||||
class Config:
|
class Config:
|
||||||
__shared_state = dict()
|
__shared_state = dict()
|
||||||
|
@ -20,7 +18,7 @@ class Config:
|
||||||
logging.info("Loading configuration...")
|
logging.info("Loading configuration...")
|
||||||
self._schema = dict()
|
self._schema = dict()
|
||||||
self._backups = list()
|
self._backups = list()
|
||||||
self._devices = list()
|
self._devices = dict()
|
||||||
file_path = os.path.realpath(__file__)
|
file_path = os.path.realpath(__file__)
|
||||||
schema_path = os.path.join(
|
schema_path = os.path.join(
|
||||||
os.path.dirname(
|
os.path.dirname(
|
||||||
|
@ -42,6 +40,13 @@ class Config:
|
||||||
if uid == 0:
|
if uid == 0:
|
||||||
config_file = "/etc/backive.yml"
|
config_file = "/etc/backive.yml"
|
||||||
else:
|
else:
|
||||||
|
raise RegularUserException(
|
||||||
|
"""
|
||||||
|
It is planned to add functionality to use this service
|
||||||
|
as a regular user, but for the time being it is advised to
|
||||||
|
execute this service as root, because this feature is still
|
||||||
|
planned and needs more development time.
|
||||||
|
""")
|
||||||
config_file = os.path.join(
|
config_file = os.path.join(
|
||||||
os.path.expanduser("~"),
|
os.path.expanduser("~"),
|
||||||
".config",
|
".config",
|
||||||
|
@ -51,24 +56,39 @@ class Config:
|
||||||
|
|
||||||
with open(config_file, "r") as cfg:
|
with open(config_file, "r") as cfg:
|
||||||
self._config = YAML().load(cfg)
|
self._config = YAML().load(cfg)
|
||||||
logging.debug("Found config: %s", json.dumps(self._config, indent=4))
|
logging.debug(
|
||||||
|
"Found config: %s\n%s",
|
||||||
|
config_file,
|
||||||
|
json.dumps(self._config, indent=4)
|
||||||
|
)
|
||||||
jsonschema.validate(self._config, self._schema)
|
jsonschema.validate(self._config, self._schema)
|
||||||
|
except RegularUserException as e:
|
||||||
|
raise e
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logging.error(e)
|
logging.error(e)
|
||||||
|
|
||||||
def get_devices(self):
|
def get_devices(self):
|
||||||
|
from backive.core.device import Device
|
||||||
if self._config.get("devices", None) and not self._devices:
|
if self._config.get("devices", None) and not self._devices:
|
||||||
data = self._config.get("devices")
|
data = self._config.get("devices")
|
||||||
for device in data:
|
for device, values in data.items():
|
||||||
self._devices.append(
|
self._devices.update({
|
||||||
|
device:
|
||||||
Device.instance(
|
Device.instance(
|
||||||
device,
|
device,
|
||||||
data.get(device)
|
values
|
||||||
)
|
|
||||||
)
|
)
|
||||||
|
})
|
||||||
return self._devices
|
return self._devices
|
||||||
|
|
||||||
def get_backups(self):
|
async def get_device(self, name):
|
||||||
|
for device, value in self.get_devices().items():
|
||||||
|
if device == name:
|
||||||
|
return value
|
||||||
|
return None
|
||||||
|
|
||||||
|
def get_backups(self) -> list:
|
||||||
|
from backive.core.backup import Backup
|
||||||
if self._config.get("backups", None) and not self._backups:
|
if self._config.get("backups", None) and not self._backups:
|
||||||
data = self._config.get("backups")
|
data = self._config.get("backups")
|
||||||
for name in data:
|
for name in data:
|
||||||
|
@ -80,6 +100,16 @@ class Config:
|
||||||
)
|
)
|
||||||
return self._backups
|
return self._backups
|
||||||
|
|
||||||
|
async def get_uuid_device(self, uuid):
|
||||||
|
logging.debug("get device %s", uuid)
|
||||||
|
for device, value in self.get_devices().items():
|
||||||
|
logging.debug(
|
||||||
|
"device %s, config %s", device, json.dumps(value.config)
|
||||||
|
)
|
||||||
|
if value.config.get("uuid") == uuid:
|
||||||
|
return value
|
||||||
|
return None
|
||||||
|
|
||||||
async def get_backups_by_device(self, uuid):
|
async def get_backups_by_device(self, uuid):
|
||||||
name = None
|
name = None
|
||||||
if not self._config.get("devices"):
|
if not self._config.get("devices"):
|
||||||
|
@ -98,7 +128,7 @@ class Config:
|
||||||
backups.append(backup)
|
backups.append(backup)
|
||||||
return backups
|
return backups
|
||||||
|
|
||||||
def get_preferences(self):
|
async def get_preferences(self):
|
||||||
if self._config.get("preferences", None):
|
if self._config.get("preferences", None):
|
||||||
return self._config.get("preferences")
|
return self._config.get("preferences")
|
||||||
return {}
|
return {}
|
||||||
|
|
|
@ -2,6 +2,7 @@ import os
|
||||||
import logging
|
import logging
|
||||||
from subprocess import Popen
|
from subprocess import Popen
|
||||||
import asyncio
|
import asyncio
|
||||||
|
from backive.config.config import Config
|
||||||
|
|
||||||
|
|
||||||
class Backup:
|
class Backup:
|
||||||
|
@ -19,13 +20,37 @@ class Backup:
|
||||||
|
|
||||||
async def run(self):
|
async def run(self):
|
||||||
logging.debug("Running backup %s", self.name)
|
logging.debug("Running backup %s", self.name)
|
||||||
if self.config.get("scripts", None) is not None:
|
if self.config.get("script", None) is not None:
|
||||||
logging.debug("Executing script..")
|
logging.debug("Executing script..")
|
||||||
|
backup_env = os.environ.copy()
|
||||||
|
backup_env["BACKIVE_FROM"] = self.config.get("from")
|
||||||
|
backup_env["BACKIVE_TO"] = self.config.get("to")
|
||||||
|
backup_env["BACKIVE_MOUNT"] = os.path.join(
|
||||||
|
(await Config().get_preferences()).get("mount_root"),
|
||||||
|
(await Config().get_device(
|
||||||
|
self.config.get("target_device")
|
||||||
|
)).config.get("mountname")
|
||||||
|
)
|
||||||
proc = await asyncio.create_subprocess_shell(
|
proc = await asyncio.create_subprocess_shell(
|
||||||
self.config.get("script"),
|
"""mkdir -p {}""".format(
|
||||||
|
os.path.join(
|
||||||
|
backup_env["BACKIVE_MOUNT"],
|
||||||
|
backup_env["BACKIVE_TO"]
|
||||||
|
)
|
||||||
|
),
|
||||||
stdout=asyncio.subprocess.PIPE,
|
stdout=asyncio.subprocess.PIPE,
|
||||||
stderr=asyncio.subprocess.PIPE
|
stderr=asyncio.subprocess.PIPE,
|
||||||
)
|
)
|
||||||
stdout, stderr = await proc.communicate()
|
stdout, stderr = await proc.communicate()
|
||||||
logging.debug("stdout: %s", stdout)
|
logging.debug("stdout: %s", stdout)
|
||||||
|
logging.debug("stderr: %s", stderr.decode())
|
||||||
|
proc = await asyncio.create_subprocess_shell(
|
||||||
|
self.config.get("script"),
|
||||||
|
stdout=asyncio.subprocess.PIPE,
|
||||||
|
stderr=asyncio.subprocess.PIPE,
|
||||||
|
env=backup_env
|
||||||
|
)
|
||||||
|
stdout, stderr = await proc.communicate()
|
||||||
|
logging.debug("stdout: %s", stdout.decode())
|
||||||
|
logging.debug("stderr: %s", stderr.decode())
|
||||||
return stdout
|
return stdout
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
import os
|
import os
|
||||||
import logging
|
import logging
|
||||||
|
import asyncio
|
||||||
import backive.config.config as cfg
|
import backive.config.config as cfg
|
||||||
|
|
||||||
|
|
||||||
|
@ -9,6 +10,7 @@ class Device:
|
||||||
def __init__(self, name, config=None):
|
def __init__(self, name, config=None):
|
||||||
self.name = name
|
self.name = name
|
||||||
self.config = config
|
self.config = config
|
||||||
|
self._mount_dir = None
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def instance(cls, name, config=None):
|
def instance(cls, name, config=None):
|
||||||
|
@ -22,9 +24,36 @@ class Device:
|
||||||
return uuids
|
return uuids
|
||||||
return []
|
return []
|
||||||
|
|
||||||
def mount(self, path):
|
async def mount(self, path):
|
||||||
pass
|
self._mount_dir = os.path.join(path, self.config.get("mountname"))
|
||||||
|
dev_path = os.path.join(self.disks_by_uuid, self.config.get("uuid"))
|
||||||
def unmount(self):
|
logging.debug("dev: %s ;; mount: %s", dev_path, self._mount_dir)
|
||||||
pass
|
# TODO: use mkdir as indicator for correct access rights (when backive
|
||||||
|
# is run as user!)
|
||||||
|
proc = await asyncio.create_subprocess_shell(
|
||||||
|
"""mkdir -p {mountpoint}
|
||||||
|
sudo mount -v -o users {dev_path} {mountpoint}""".format(
|
||||||
|
mountpoint=self._mount_dir,
|
||||||
|
dev_path=dev_path
|
||||||
|
),
|
||||||
|
stdout=asyncio.subprocess.PIPE,
|
||||||
|
stderr=asyncio.subprocess.PIPE,
|
||||||
|
)
|
||||||
|
stdout, stderr = await proc.communicate()
|
||||||
|
logging.debug("stdout: %s", stdout)
|
||||||
|
# TODO: Also add a touch operation in the target mount if the correct
|
||||||
|
# access rights are given! (when backive is run as user)
|
||||||
|
return True # on success, False on failure
|
||||||
|
|
||||||
|
async def unmount(self):
|
||||||
|
if not self._mount_dir:
|
||||||
|
return
|
||||||
|
proc = await asyncio.create_subprocess_shell(
|
||||||
|
"""sync
|
||||||
|
sudo umount -v %s
|
||||||
|
""" % self._mount_dir,
|
||||||
|
stdout=asyncio.subprocess.PIPE,
|
||||||
|
stderr=asyncio.subprocess.PIPE,
|
||||||
|
)
|
||||||
|
stdout, stderr = await proc.communicate()
|
||||||
|
logging.debug("stdout: %s", stdout)
|
||||||
|
|
|
@ -26,6 +26,8 @@ class Scheduler():
|
||||||
if not os.path.exists(os.path.dirname(self._data_file)):
|
if not os.path.exists(os.path.dirname(self._data_file)):
|
||||||
os.makedirs(os.path.dirname(self._data_file))
|
os.makedirs(os.path.dirname(self._data_file))
|
||||||
self.save()
|
self.save()
|
||||||
|
else:
|
||||||
|
self.load()
|
||||||
|
|
||||||
def save(self):
|
def save(self):
|
||||||
with open(self._data_file, "w") as stream:
|
with open(self._data_file, "w") as stream:
|
||||||
|
@ -36,6 +38,7 @@ class Scheduler():
|
||||||
self.__data = json.load(stream)
|
self.__data = json.load(stream)
|
||||||
|
|
||||||
def register_backup(self, name, frequency):
|
def register_backup(self, name, frequency):
|
||||||
|
logging.debug("Registering %s, freq %s in Scheduler", name, frequency)
|
||||||
backups = self.__data.get("backups", dict())
|
backups = self.__data.get("backups", dict())
|
||||||
if not backups:
|
if not backups:
|
||||||
self.__data["backups"] = backups
|
self.__data["backups"] = backups
|
||||||
|
@ -46,7 +49,8 @@ class Scheduler():
|
||||||
backups[name] = frequency
|
backups[name] = frequency
|
||||||
self.save()
|
self.save()
|
||||||
|
|
||||||
def register_run(self, name):
|
async def register_run(self, name):
|
||||||
|
logging.info("Registered run of backup '%s'", name)
|
||||||
runs = self.__data.get("runs", dict())
|
runs = self.__data.get("runs", dict())
|
||||||
if not runs:
|
if not runs:
|
||||||
self.__data["runs"] = runs
|
self.__data["runs"] = runs
|
||||||
|
@ -56,22 +60,42 @@ class Scheduler():
|
||||||
runs[name].append(datetime.now().isoformat())
|
runs[name].append(datetime.now().isoformat())
|
||||||
self.save()
|
self.save()
|
||||||
|
|
||||||
def should_run(self, name):
|
async def should_run(self, name):
|
||||||
|
logging.debug("Checking if %s may run...", name)
|
||||||
runs = self.__data.get("runs", dict())
|
runs = self.__data.get("runs", dict())
|
||||||
if name not in runs:
|
if name not in runs:
|
||||||
|
logging.debug("Not registered, so YES")
|
||||||
return True
|
return True
|
||||||
|
frequency = 0
|
||||||
if name in runs:
|
if name in runs:
|
||||||
|
logging.debug("Registered, checking...")
|
||||||
backups = self.__data.get("backups", dict())
|
backups = self.__data.get("backups", dict())
|
||||||
if name in backups:
|
if name in backups:
|
||||||
|
logging.debug("retrieving frequency")
|
||||||
frequency = backups[name]
|
frequency = backups[name]
|
||||||
last_ts = runs[name][-1]
|
last_ts = runs[name][-1]
|
||||||
now = datetime.now()
|
now = datetime.now()
|
||||||
last = datetime.fromisoformat(last_ts)
|
last = datetime.fromisoformat(last_ts)
|
||||||
diff = now - last
|
diff = now - last
|
||||||
days = diff.days
|
days = diff.days
|
||||||
if days > frequency and days >= 1:
|
if days >= frequency and days >= 1 or frequency == 0:
|
||||||
|
logging.debug("YES, should run.")
|
||||||
return True
|
return True
|
||||||
|
logging.debug("No should not run.")
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def get_overtimed(self):
|
def get_overtimed(self):
|
||||||
return list()
|
overtime = list()
|
||||||
|
now = datetime.now()
|
||||||
|
runs = self.__data.get("runs", dict())
|
||||||
|
for bkp_name, freq in self.__data.get("backups").items():
|
||||||
|
if bkp_name not in runs.keys():
|
||||||
|
overtime.append(bkp_name)
|
||||||
|
else:
|
||||||
|
last_ts = runs[bkp_name][-1]
|
||||||
|
last = datetime.fromisoformat(last_ts)
|
||||||
|
diff = now - last
|
||||||
|
days = diff.days
|
||||||
|
if days > freq and freq != 0:
|
||||||
|
overtime.append(bkp_name)
|
||||||
|
return overtime
|
||||||
|
|
Loading…
Reference in New Issue