#!/usr/bin/env python3
from alterator_bindings import backend3
from alterator_bindings.backend3 import *

from alterator_bindings.backend3 import translate as _

from blivetstorage.logging import setup_logging
from blivetstorage.manager import DeviceTreeManager
from blivetstorage import btrfs
from blivetstorage import utils
from blivetstorage.errors import BadMountpointError, BTRFSError
from blivetstorage.partitioning import PartSpec

from bytesize import GiB, ROUND_UP
from blivet.size import Size
from blivet.devices.storage import StorageDevice

from enum import StrEnum
from dataclasses import dataclass
import itertools
from typing import Dict, Iterable, List
import logging

backend3.ALTERATOR_DEBUG = True
backend3.TEXTDOMAIN = "alterator-blivet"

setup_logging(backend3.ALTERATOR_DEBUG)
log = logging.getLogger("alterator-blivet")

STRANGE_ERROR_MESSAGE = _("Something went wrong, please retry!")
mng = DeviceTreeManager()
recommended_root_size = utils.get_sufficient_size_for_root()
short_product_name = utils.get_product_short_name()


#################### PVE-specific stuff ####################

# Used by PVE distro to setup storage.cfg for cluster
PVE_CONFIG_DIR = "/tmp/pve"


def get_lvm_thin_config(thinpool_name: str, vg_name: str):
    """storage.cfg for PVE distro with thinpool"""

    storage_cfg_lvmthin = f"""\
dir: local
    path /var/lib/vz
    content iso,vztmpl,backup

lvmthin: local-lvm
    thinpool {thinpool_name}
    vgname {vg_name}
    content rootdir,images
"""
    return storage_cfg_lvmthin


def get_local_config():
    """storage.cfg for PVE distro without thinpool"""
    storage_cfg_local = """\
dir: local
	path /var/lib/vz
	content iso,vztmpl,backup,rootdir,images
"""
    return storage_cfg_local


def get_btrfs_config():
    """storage.cfg for PVE distro with BTRFS"""

    storage_cfg_btrfs = """\
dir: local
	path /var/lib/vz
	content iso,vztmpl,backup
	disable

btrfs: local-btrfs
	path /var/lib/pve/local-btrfs
	content iso,vztmpl,backup,images,rootdir
"""
    return storage_cfg_btrfs


def save_pve_storage_cfg(storage_cfg: str):
    try:
        os.makedirs(PVE_CONFIG_DIR, exist_ok=True)
        with open(os.path.join(PVE_CONFIG_DIR, "storage.cfg"), "w") as f:
            f.write(storage_cfg)
    except Exception as e:
        log.warning("Failed to save storage.cfg: %s" % str(e))


############################################################


class Scheme(StrEnum):
    EXT4 = "ext4"
    # XFS = "xfs"
    BTRFS_RAID0 = "btrfs_raid0"
    BTRFS_RAID1 = "btrfs_raid1"
    BTRFS_RAID10 = "btrfs_raid10"
    BTRFS_SINGLE = "btrfs_single"

    def is_btrfs(self):
        """If FS type is BTRFS Raid, returns corresponding instance of btrfs.RaidLevel.

        Returns None otherwise."""
        match self:
            case Scheme.BTRFS_RAID0:
                return btrfs.RaidLevel.RAID0
            case Scheme.BTRFS_RAID1:
                return btrfs.RaidLevel.RAID1
            case Scheme.BTRFS_RAID10:
                return btrfs.RaidLevel.RAID10
            case Scheme.BTRFS_SINGLE:
                return btrfs.RaidLevel.SINGLE
            case default:
                return None

    def pretty_str(self):
        btrfs_mode = self.is_btrfs()
        if btrfs_mode:
            return "btrfs (%s)" % btrfs_mode.upper()
        else:
            return "%s (LVM)" % self


class LayoutApproach(StrEnum):
    PARTITION = "partition"
    LOGICAL_VOLUME = "lv"
    BTRFS_VOLUME = "vol"
    BTRFS_SUBVOLUME = "subvol"

    def pretty_str(self):
        match self:
            case LayoutApproach.PARTITION:
                return _("Separate partition")
            case LayoutApproach.LOGICAL_VOLUME:
                return _("LVM Logical Volume")
            case LayoutApproach.BTRFS_VOLUME:
                return _("Separate btrfs volume")
            case LayoutApproach.BTRFS_SUBVOLUME:
                return _("Additional btrfs subvolume")


class MountPathValidator:
    """Class to represent mount path in a 'classic' form
    (no trailing backslashes, etc.)
    Also checks it for correctness.

    Prohibits "/" (so user can't add "/" by himself,
    only via dedicated volume)
    """

    FORBIDDEN_MOUNTPOINTS = list(
        map(
            os.path.normpath,
            [
                "/",
                "/proc",
                "/sys",
                "/dev",
                "/lost+found",
                "/dev/pts",
                "/var/lib/pve/local-btrfs",
            ],
        )
    )

    def __init__(self, mount_path: str):
        mount_path = os.path.normpath(mount_path)
        if (mount_path in MountPathValidator.FORBIDDEN_MOUNTPOINTS) or (
            not mount_path.startswith("/")
        ):
            raise BadMountpointError(
                "%s: '%s'" % (_("Forbidden mount path"), mount_path)
            )
        elif mount_path == os.path.normpath("/boot/efi"):
            raise BadMountpointError(
                "'%s' %s" % (mount_path, _("will be created automatically, if needed!"))
            )

        self.path = mount_path

    def __str__(self) -> str:
        return self.mount_path


@dataclass
class VolumeKey:
    """Portrays volume as key with some pretty description

    Used for Template-like desriptions (ComboBox in interface).
    And as key for set of planned volumes
    """

    id: str
    description: str

    @staticmethod
    def from_mountpoint(mountpoint: str):
        mountpoint = MountPathValidator(mountpoint)
        description = mountpoint.path

        id = mountpoint.path
        return VolumeKey(id, description)

    @staticmethod
    def from_id(id: str):
        """We recieved only id, and want to have description"""
        if id == "root":
            description = _("'root' LV ('/')")
        elif id == "root_btrfs_vol":
            description = _("Root btrfs volume ('/')")
        elif id == "swap":
            description = _("'swap' LV")
        elif id == "data":
            description = _("'data' LV (Thin Pool) for VMs")
        elif id == "custom_mountpoint":
            description = _("Set custom mountpoint")
        else:
            # Then it's a Template-mountpoint
            path = id
            mountpoint = MountPathValidator(path)
            description = str(mountpoint)

        return VolumeKey(id, description)

    def get_mountpoint(self) -> str:
        """mountpoint of self, if any"""
        id = self.id

        if id == "root" or id == "root_btrfs_vol":
            return "/"
        elif id == "swap" or id == "data" or id == "custom_mountpoint":
            return None
        # Then it's mountpoint-type itself
        return id

    def should_be_listed(self, scheme: Scheme):
        """Should this Volume Template be listed in Add list options"""
        if self == VolumeKey.from_id("root") or self == VolumeKey.from_id(
            "root_btrfs_vol"
        ):
            return False
        if scheme.is_btrfs() and (
            self == VolumeKey.from_id("swap") or self == VolumeKey.from_id("data")
        ):
            return False
        return True

    def needs_mount_path(self):
        return self == VolumeKey.from_id("custom_mountpoint")

    def _try_find_in(self, planned_volumes: list["VolumeEntry"]):
        found_volume = next((vol for vol in planned_volumes if vol.key == self), None)
        return found_volume

    def layout_approach_options(self, scheme: Scheme) -> list[LayoutApproach]:
        if self == VolumeKey.from_id("root"):
            return [LayoutApproach.LOGICAL_VOLUME]
        if self == VolumeKey.from_id("swap"):
            return [LayoutApproach.LOGICAL_VOLUME]
        if self == VolumeKey.from_id("data"):
            return [LayoutApproach.LOGICAL_VOLUME]
        if self == VolumeKey.from_id("root_btrfs_vol"):
            return [LayoutApproach.BTRFS_VOLUME]

        if scheme.is_btrfs():
            return [LayoutApproach.BTRFS_SUBVOLUME, LayoutApproach.BTRFS_VOLUME]

        elif self == VolumeKey.from_mountpoint("/boot"):
            # Separate partition for "/boot" is preferred option
            return [LayoutApproach.PARTITION, LayoutApproach.LOGICAL_VOLUME]
        return [LayoutApproach.LOGICAL_VOLUME, LayoutApproach.PARTITION]

    def default_approach(self, scheme: Scheme, planned_volumes: list["VolumeEntry"]):
        existing_volume = self._try_find_in(planned_volumes)
        if existing_volume:
            return existing_volume.approach
        return self.layout_approach_options(scheme)[0]

    def needs_size(self, approach: LayoutApproach):
        if approach == LayoutApproach.BTRFS_SUBVOLUME:
            return False
        return True

    ##################
    # default size constraints
    # used only if 'needs_size'
    # or when reinitializing volumes list

    def min_size(self) -> Size:
        if self == VolumeKey.from_id("root") or self == VolumeKey.from_id(
            "root_btrfs_vol"
        ):
            return recommended_root_size
        return Size("1GiB")

    def max_size(self, planned_volumes: list["VolumeEntry"], space_limit: Size) -> Size:
        sum_size = sum(
            [
                (vol.desired_size if vol.desired_size else Size(0))
                for vol in planned_volumes
            ]
        )
        existing_volume = self._try_find_in(planned_volumes)
        if existing_volume:
            # Don't consider already existing volume (in case of Edit)
            sum_size -= (
                existing_volume.desired_size
                if existing_volume.desired_size
                else Size(0)
            )
        return Size(space_limit - sum_size)

    def default_size(
        self, planned_volumes: list["VolumeEntry"], space_limit: Size
    ) -> Size:

        existing_volume = self._try_find_in(planned_volumes)
        if existing_volume and existing_volume.desired_size:
            return existing_volume.desired_size

        max_size = self.max_size(planned_volumes, space_limit)

        if self == VolumeKey.from_id("root"):
            good_size = 2 * recommended_root_size
        elif self == VolumeKey.from_id("swap"):
            good_size = Size("2GiB")
        elif self == VolumeKey.from_id("data"):
            # Hack to achieve behaviour where we have
            # root + swap + data and 1-2 GiB free for snapshot
            good_size = max(Size(0), max_size - Size("2GiB"))
        elif self == VolumeKey.from_id("root_btrfs_vol"):
            good_size = max_size
        elif self == VolumeKey.from_id("custom_mountpoint"):
            good_size = Size("1GiB")
        elif self == VolumeKey.from_mountpoint("/boot"):
            good_size = Size("1GiB")
        else:
            good_size = Size("10GiB")

        return Size(min(good_size, max_size))

    ##################

    def needs_mntopts(self):
        if self == VolumeKey.from_id("swap") or self == VolumeKey.from_id("data"):
            return False
        return True

    # default mntopts
    def default_mntopts(self, planned_volumes: list["VolumeEntry"]):
        existing_volume = self._try_find_in(planned_volumes)
        if existing_volume:
            return existing_volume.mntopts or ""

        if self == VolumeKey.from_id("root") or self == VolumeKey.from_id(
            "root_btrfs_vol"
        ):
            return "relatime"
        elif self == VolumeKey.from_id("swap") or self == VolumeKey.from_id("data"):
            # Isn't reachable anyway (shouldn't be called)
            return ""
        elif self == VolumeKey.from_id("custom_mountpoint"):
            return "nosuid,nodev,noexec"
        elif self == VolumeKey.from_mountpoint("/boot"):
            return "nodev,nosuid,noexec,relatime"
        elif self == VolumeKey.from_mountpoint(
            "/var"
        ) or self == VolumeKey.from_mountpoint("/var/log"):
            return "nosuid,relatime"
        elif self == VolumeKey.from_mountpoint("/opt"):
            return "nosuid,nodev,relatime"
        else:
            return "nosuid,nodev,noexec"

    def __hash__(self):
        return hash(self.id)

    def __eq__(self, other):
        if isinstance(other, VolumeKey):
            return self.id == other.id
        return False


# Possible variants
VOLUME_TEMPLATES = [
    VolumeKey.from_mountpoint("/boot"),
    VolumeKey.from_mountpoint("/var"),
    VolumeKey.from_mountpoint("/var/log"),
    VolumeKey.from_mountpoint("/opt"),
    VolumeKey.from_id("root"),
    VolumeKey.from_id("swap"),
    VolumeKey.from_id("data"),
    VolumeKey.from_id("root_btrfs_vol"),
    VolumeKey.from_id("custom_mountpoint"),
]


class VolumeEntry:
    """Describes abstract object (volume/mountpoint) with it's options/size/etc.,
    that's will be scheduled.
    """

    def __init__(
        self,
        key: VolumeKey,
        approach: LayoutApproach,
        mount_path: str = None,
        desired_size: Size = None,
        mntopts: str = None,
    ):
        self.key = key
        self.approach = approach
        if mount_path:
            self.mount_path = mount_path
        self.desired_size = desired_size
        self.mntopts = mntopts

    def __hash__(self):
        return hash(self.key)

    def __eq__(self, other):
        if isinstance(other, VolumeEntry):
            return self.key == other.key
        return False


class StorageInterfaceState:
    """Describes current user's choices"""

    def __init__(
        self,
        planned_volumes: list[VolumeEntry] = [],
        current_scheme: Scheme = None,
        current_disks: list[StorageDevice] = None,
        size_limit: Size = None,
    ):
        self.planned_volumes = planned_volumes
        self.current_scheme = current_scheme
        self.current_disks = current_disks

        # All available space (i.e. (pvsize + additional partitions)/smallest disk)
        self.size_limit = size_limit

    def possible_volumes(self):
        for i, volume in enumerate(VOLUME_TEMPLATES):
            if not volume.should_be_listed(self.current_scheme):
                continue

            write_enum_item(
                str(i),
                _(volume.description),
            )

    def list_current_volumes(self):
        for vol in self.planned_volumes:
            write_table_item(
                {
                    "volume_name": _(vol.key.description),
                    "volume_approach": vol.approach.pretty_str(),
                    "volume_size": (
                        vol.desired_size.human_readable("GiB")
                        if vol.approach != LayoutApproach.BTRFS_SUBVOLUME
                        else ""
                    ),
                    "volume_mntopts": vol.mntopts if vol.mntopts is not None else "",
                }
            )

    def remove_volume(self, data: dict):
        index = read_index(data, "index", len(self.planned_volumes))
        if index is None:
            return

        key = self.planned_volumes[index].key
        if key == VolumeKey.from_id("root") or key == VolumeKey.from_id(
            "root_btrfs_vol"
        ):
            write_error(_("Can't remove 'root' volume!"))
            return

        self.planned_volumes.pop(index)

    def reinit_volumes_list(self):
        self.planned_volumes = []
        if self.current_scheme.is_btrfs():
            initial_vols_order = [VolumeKey.from_id("root_btrfs_vol")]
        else:
            initial_vols_order = [
                VolumeKey.from_id("root"),
                VolumeKey.from_id("swap"),
                VolumeKey.from_id("data"),
            ]

        for vol_key in initial_vols_order:
            volume = VolumeEntry(
                vol_key,
                approach=vol_key.default_approach(
                    self.current_scheme, self.planned_volumes
                ),
                mount_path=vol_key.get_mountpoint(),
                desired_size=vol_key.default_size(
                    self.planned_volumes, self.size_limit
                ),
                mntopts=vol_key.default_mntopts(self.planned_volumes),
            )
            self.planned_volumes.append(volume)

    def reset_lvm_size_limit(self):
        write_string_param(
            "min_lvm_size_limit", str(recommended_root_size.convert_to(GiB))
        )
        write_string_param("max_lvm_size_limit", str(self.size_limit.convert_to(GiB)))
        write_string_param("cur_lvm_size_limit", str(self.size_limit.convert_to(GiB)))

    def store_disks(self, data: dict):
        self.current_disks = read_disks(data)
        space = Size(0)
        if self.current_disks:
            space = Size(min([disk.size for disk in self.current_disks]))
        self.size_limit = Size(space.round_to_nearest(GiB, ROUND_UP))

    def _extract_volume_key(self, data: dict) -> tuple[VolumeKey, int, bool]:
        edit_index = read_index(data, "edit_mode", len(self.planned_volumes))
        template_index = read_index(data, "template", len(VOLUME_TEMPLATES))
        editting = False
        if edit_index is not None:
            editting = True
            vol_key = self.planned_volumes[edit_index].key
            return (vol_key, edit_index, editting)
        elif data.get("edit_mode"):
            write_error(_("No volume selected to edit!"))
        elif template_index is not None:
            vol_key = VOLUME_TEMPLATES[template_index]
            return (vol_key, template_index, editting)
        else:
            write_error(_(STRANGE_ERROR_MESSAGE))

        return None

    def list_approach_options(self, data: dict):
        res = self._extract_volume_key(data)
        if res is None:
            return

        vol_key, index, editting = res
        for opt in vol_key.layout_approach_options(self.current_scheme):
            write_enum_item(opt, opt.pretty_str())

    def get_default_approach(self, data: dict):
        res = self._extract_volume_key(data)
        if res is None:
            return

        vol_key, index, editting = res
        write_string_param(
            "volume_approach",
            vol_key.default_approach(self.current_scheme, self.planned_volumes),
        )

    def get_widgets_config(self, data: dict):
        res = self._extract_volume_key(data)
        if res is None:
            return

        vol_key, index, editting = res
        approach = LayoutApproach(data.get("approach"))
        write_bool_param("path_edit_needed", vol_key.needs_mount_path())
        write_bool_param("size_slider_needed", vol_key.needs_size(approach))

        if vol_key.needs_size(approach):
            write_string_param("min_size", str(vol_key.min_size().convert_to(GiB)))
            write_string_param(
                "max_size",
                str(
                    vol_key.max_size(self.planned_volumes, self.size_limit).convert_to(
                        GiB
                    )
                ),
            )
            write_string_param(
                "default_size",
                str(
                    vol_key.default_size(
                        self.planned_volumes, self.size_limit
                    ).convert_to(GiB)
                ),
            )

        write_bool_param("mntopts_edit_needed", vol_key.needs_mntopts())
        if vol_key.needs_mntopts():
            write_string_param(
                "default_mntopts", vol_key.default_mntopts(self.planned_volumes)
            )

        if editting:
            write_string_param("editting_label", _(vol_key.description))

    def write_volume(self, data: dict):
        res = self._extract_volume_key(data)
        if res is None:
            return

        vol_key, index, editting = res

        proper_key = vol_key
        if not editting and vol_key == VolumeKey.from_id("custom_mountpoint"):
            # We are adding a custom mountpoint
            # So the proper volume key is the one with
            # requested mount path
            proper_key = VolumeKey.from_mountpoint(data.get("volume_mount_path"))

        if not editting:
            # Check if this Volume not planned already
            # Not to add duplicate
            if proper_key._try_find_in(self.planned_volumes):
                write_error(_("Volume already exists!"))
                return

        approach = LayoutApproach(data.get("volume_approach"))
        volume_size = None
        mntopts = None

        if approach not in vol_key.layout_approach_options(self.current_scheme):
            # Theoretically may happen if clicked too fast on different
            # buttons (ex.: Edit + Remove)
            write_error(_(STRANGE_ERROR_MESSAGE))
            return

        if proper_key.needs_size(approach):
            volume_size = int_to_GiB(validate_type(data, "volume_size", int))
            if volume_size == Size(0):
                write_error(
                    _(
                        "Please free up space for the volume by "
                        "reducing other volumes, choosing alternative disks, "
                        "or expanding the size limit."
                    )
                )
                return

            if volume_size < proper_key.min_size():
                write_error(
                    _("The volume size should be at least %s")
                    % str(proper_key.min_size())
                )
                return

            if volume_size > proper_key.max_size(self.planned_volumes, self.size_limit):
                # Almost never happens
                # Just to be sure
                write_error(_(STRANGE_ERROR_MESSAGE))
                return

        if proper_key.needs_mntopts():
            mntopts = data.get("volume_mntopts")

        new_volume = VolumeEntry(
            key=proper_key,
            approach=approach,
            mount_path=proper_key.get_mountpoint(),
            desired_size=volume_size,
            mntopts=mntopts,
        )

        if editting:
            self.planned_volumes[index] = new_volume
        else:
            self.planned_volumes.append(new_volume)

    def _check_size_constraints(self, volumes: Iterable[VolumeEntry]) -> bool:
        all_size = Size(0)
        for volume in volumes:
            all_size += volume.desired_size
            if volume.desired_size < volume.key.min_size():
                write_error(
                    _("The size of '%s' volume is less then minimal (%s)!")
                    % (volume.key.id, str(volume.key.min_size()))
                )
                return False

        if Size(all_size) > self.size_limit:
            write_error(
                _(
                    "No enough space for installation with selected disks, volumes or parameters!"
                )
            )
            return False

        return True

    def _schedule_btrfs_layout(self, raid_level: btrfs.RaidLevel):
        if not self.current_disks:
            write_error(_("At least one disk should be selected!"))
            return

        try:
            raid_level.validate_setup(len(self.current_disks))
        except BTRFSError as e:
            write_error(str(e))
            return

        # btrfs volume -> list of btrfs subvolumes
        volumes_map: Dict[VolumeEntry, List[VolumeEntry]] = {}

        root_volume = VolumeKey.from_id("root_btrfs_vol")._try_find_in(
            self.planned_volumes
        )
        if root_volume is None:
            write_error(_("No 'root' volume scheduled!"))
            return

        if not root_volume.desired_size:
            write_error(_("The size of 'root' volume is zero!"))
            return

        # Find all btrfs Volumes. Skip zero-sized (there actually shouldn't be any)
        for volume in self.planned_volumes:
            if volume.approach == LayoutApproach.BTRFS_VOLUME and volume.desired_size:
                volumes_map[volume] = [volume]

        # Sort all subvolumes into 'root' volume
        for volume in self.planned_volumes:
            if volume.approach == LayoutApproach.BTRFS_SUBVOLUME:
                volumes_map[root_volume].append(volume)

        # Check sizes correctness (just in case)
        if not self._check_size_constraints(volumes_map.keys()):
            return

        mng.select_disks([disk.device_id for disk in self.current_disks])
        for disk in self.current_disks:
            mng.wipe_disk(disk)

        boots = mng.schedule_boot_partition(self.current_disks)
        scheduled_root_subvol = None

        for volume in volumes_map.keys():
            scheduled_volume = mng.schedule_btrfs_raid_volume(
                disks=self.current_disks,
                raid_level=raid_level,
                max_size=volume.desired_size,
                grow=True,
            )
            for subvolume in volumes_map[volume]:
                scheduled_subvol = mng.schedule_btrfs_subvolume(
                    scheduled_volume,
                    mountpoint=subvolume.mount_path,
                    mountopts=subvolume.mntopts,
                )
                if subvolume == root_volume:
                    # Remember which one contains "/"
                    scheduled_root_subvol = scheduled_subvol

                    # Add local-btrfs storage for PVE distro
                    if short_product_name == "pve":
                        mng.schedule_btrfs_subvolume(
                            scheduled_volume,
                            mountpoint="/var/lib/pve/local-btrfs",
                        )

        # Now let's ensure that boot partition will be mounted
        # exactly on disk, that '/' is mounted on

        # XXX This may be subject to change in Blivet
        # TODO: Better redisign it to something more general
        # (or re-do schedule_boot_partition() without mounting)
        #
        # subvolume -> volume -> partition -> first disk in RAID
        root_disk = scheduled_root_subvol.parents[0].parents[0].req_disks[0]

        # Let's prune mountpoints for all boots, except needed
        # They are all have same mountpoint due to schedule_boot_partition()
        # It's actually not something bad, but if we leave it as is
        # only one of them will be mounted (some 'random' disk, which may be
        # counterintuitive for user)
        for boot in boots:
            # In our algo there only one parent disk anyway
            boot_disk = boot.req_disks[0]

            if not hasattr(boot.format, "mountpoint"):
                log.debug(
                    "%s: unmountable boot partition format (biosboot), no need to erase mountpoint"
                    % boot_disk.name
                )
                continue

            if boot_disk.device_id != root_disk.device_id:
                boot.format.mountpoint = None

        return get_btrfs_config()

    def _schedule_lvm_layout(self):
        if not self.current_disks:
            # Actually never happens (just to be sure)
            write_error(_("At least one disk should be selected!"))
            return

        target_disk = self.current_disks[0]
        size_limit = self.size_limit

        if target_disk.size < recommended_root_size:
            write_error(
                _("Selected disk size is less then recommended for installation!")
            )
            return

        # only integers allowed, so it's okay to set size_limit=34GiB, when hdsize=33.15GiB
        if size_limit > target_disk.size.round_to_nearest(GiB, ROUND_UP):
            write_error(_("Chosen used space limit is bigger then disk itself!"))
            return

        root_volume = VolumeKey.from_id("root")._try_find_in(self.planned_volumes)
        if root_volume is None:
            write_error(_("No 'root' volume scheduled!"))
            return

        if not root_volume.desired_size:
            write_error(_("The size of 'root' volume is zero!"))
            return

        lvs: List[VolumeEntry] = []
        partitions: List[VolumeEntry] = []

        # Find all LVs and Separate Partitions Volumes. Skip zero-sized
        for volume in self.planned_volumes:
            if volume.desired_size:
                if volume.approach == LayoutApproach.LOGICAL_VOLUME:
                    lvs.append(volume)
                elif volume.approach == LayoutApproach.PARTITION:
                    partitions.append(volume)

        # Check sizes correctness (just in case)
        if not self._check_size_constraints(itertools.chain(lvs, partitions)):
            return

        mng.select_disks([target_disk.device_id])

        mng.wipe_disk(target_disk)

        boot = mng.schedule_boot_partition([target_disk])
        fs_type = self.current_scheme
        # Schedule Separate Partitions
        for partition in partitions:
            mng._schedule_partition(
                [target_disk],
                PartSpec(
                    mountpoint=partition.mount_path,
                    mountopts=partition.mntopts,
                    fstype=fs_type,
                    max_size=partition.desired_size,
                    grow=True,
                ),
            )

        pvsize = self.size_limit - sum(
            [partition.desired_size for partition in partitions]
        )
        # Create Physical Volume on the rest of disk's allowed space
        # Allocating through max_size and growing, so we wouldn't need to
        # deal with alignment and etc.
        # (we can't actually take all disk space)
        pv = mng.schedule_lvm_pv(target_disk, max_size=pvsize)

        # Create Volume Group with specific name ('pve' for pve distro)
        vg = mng.schedule_lvm_vg([pv], short_product_name)
        storage_cfg = get_local_config()
        for lv in lvs:
            # We are setting initial size to 1GiB, so LVs will be able to grow
            # (blivet grows LVs based on their size, so we need to set something)
            if lv.key == VolumeKey.from_id("data"):
                data = mng.schedule_lvm_thin_pool(
                    vg,
                    pool_name="data",
                    size=Size("1GiB"),
                    grow=True,
                    max_size=lv.desired_size,
                )
                storage_cfg = get_lvm_thin_config(data.lvname, vg.name)
            elif lv.key == VolumeKey.from_id("swap"):
                mng.schedule_lvm_lv(
                    vg, lv_name="swap", fstype="swap", size=lv.desired_size, grow=False
                )
            else:
                mng.schedule_lvm_lv(
                    vg,
                    lv_name=lv.key.id,
                    fstype=fs_type,
                    mountpoint=lv.mount_path,
                    mountopts=lv.mntopts,
                    size=Size("1GiB"),
                    grow=True,
                    max_size=lv.desired_size,
                )

        return storage_cfg

    def schedule_storage_layout(self):
        try:
            btrfs_lvl = self.current_scheme.is_btrfs()
            if btrfs_lvl:
                storage_cfg = self._schedule_btrfs_layout(btrfs_lvl)
            else:
                storage_cfg = self._schedule_lvm_layout()

            # Write storage.cfg to /tmp/pve (It's okay if not needed)
            save_pve_storage_cfg(storage_cfg)
        except Exception as e:
            mng.reset_configuration()
            write_error("%s (%s)" % (_(STRANGE_ERROR_MESSAGE), str(e)))
            log.exception("Schedule failed")


############################################################


#################### Helpers functions #####################


def available_schemes():
    for fs in Scheme:
        write_enum_item(fs, fs.pretty_str())


def available_disks():
    disks = mng.get_usable_disks()
    if not disks:
        write_error(_("No disks detected! Please connect some devices and retry!"))
        return

    for disk in disks:
        write_enum_item(
            disk.device_id,
            "%s (~%s, %s)"
            % (disk.path, disk.size.round_to_nearest(GiB, ROUND_UP), disk.model),
        )


def read_disks(data: dict):
    disklist = data.get("disks")
    if not disklist:
        return []

    disks = list(map(lambda id: mng.find_device(id), disklist.split(";")))
    return disks


def read_index(message: dict, name: str, upper_limit: int) -> int:
    index = message.get(name)
    if not index:
        return None

    index = int(index)
    if index == -1 or index >= upper_limit:
        return None

    return index


############################################################


class ValidationError(Exception):
    """Failed to cast data from front-end. Used to unwind stack back to on_message().

    TODO: This semantic probably should be implemented in write_error of alterator-python-functions
    """

    pass


def validate_type(message: dict, name: str, cls):
    """Checks if front-end passed right type.

    Writes error otherwise"""
    try:
        return cls(message.get(name))
    except:
        write_error("'%s' should have a type of %s" % (name, cls.__name__))
        raise ValidationError


def int_to_GiB(size: int) -> Size:
    return Size(str(size) + "GiB")


def apply_storage_layout():
    try:
        mng.apply_configuration()
    except Exception as e:
        mng.reset_configuration()
        write_error("%s (%s)" % (_(STRANGE_ERROR_MESSAGE), str(e)))
        log.exception("Apply failed")


############################################################

state = StorageInterfaceState()


def message_handler(message: dict):
    action = message.get("action")
    object = message.get("_objects")

    if action == "list":
        match object:
            case "available_schemes":
                available_schemes()
            case "available_disks":
                available_disks()

            case "possible_volumes":
                state.possible_volumes()

            case "list_current_volumes":
                state.list_current_volumes()

            case "list_approach_options":
                state.list_approach_options(message)

    elif action == "write":
        match object:
            case "schedule_storage_layout":
                state.schedule_storage_layout()
            case "apply_storage_layout":
                apply_storage_layout()
            case "reset_configuration":
                mng.reset_configuration()

            case "store_scheme":
                state.current_scheme = Scheme(message.get("scheme"))
            case "store_disks":
                state.store_disks(message)
            case "store_lvm_size_limit":
                state.size_limit = int_to_GiB(
                    validate_type(message, "lvm_size_limit", int)
                )
            case "reinit_volumes_list":
                state.reinit_volumes_list()

            case "write_volume":
                state.write_volume(message)

    elif action == "read":
        match object:
            case "reset_lvm_size_limit":
                state.reset_lvm_size_limit()

            case "get_default_approach":
                state.get_default_approach(message)
            case "get_widgets_config":
                state.get_widgets_config(message)

    elif action == "delete":
        match object:
            case "remove_volume":
                state.remove_volume(message)


def on_message(message: dict):
    try:
        message_handler(message)
    except Exception as e:
        write_error(str(e))
        log.exception("Backend failed")
        return


message_loop(on_message)
