updated the installer so that it should actually work
Some checks failed
Build / build (push) Failing after 5m23s

This commit is contained in:
tumillanino
2025-11-11 18:57:02 +11:00
parent a7bd4d9457
commit 33dd952ad4
583 changed files with 161651 additions and 67 deletions

2
.gitignore vendored
View File

@@ -4,5 +4,3 @@ miasma-installer
*.swo
*~
.DS_Store
CRUSH.md
.crush

View File

@@ -1,16 +1,28 @@
# Miasma OS Installer
Opinionated Arch Linux installer built with Go and Bubble Tea TUI framework.
This is still in very early development and not yet ready to use, if you happen to stumble across this page.
This installer creates a security-hardened, immutable Miasma OS with Cosmic Desktop.
## Features
- **Filesystem**: btrfs with optimized subvolume layout
- **Desktop**: Cosmic Desktop
- **Kernel**: linux-hardened
- **Filesystem**: btrfs with optimized subvolume layout and snapshots
- **Desktop**: Cosmic Desktop (Rust-based, memory-safe)
- **Kernel**: linux-hardened for security
- **Encryption**: LUKS2 by default (optional)
- **Boot**: UEFI only (systemd-boot)
- **Future**: Secure Boot support planned
- **Boot**: UEFI only (systemd-boot) with Secure Boot support
- **Configuration**: Generates archinstall-compatible JSON configuration files
- **Security**:
- OpenDoas instead of sudo
- Firejail and AppArmor for application containerization
- nftables firewall with restrictive defaults
- hardened-malloc from GrapheneOS
- XWayland-Satellite for better X11 app isolation
- **Additional Features**:
- Chromium with Wayland support
- Neovim with LazyVim configuration
- Kanagawa Dragon theme everywhere
- Alacritty terminal emulator
- Zsh shell with Oh My Zsh
## Quick Install
@@ -29,6 +41,14 @@ chmod +x miasma-installer
sudo ./miasma-installer
```
## Configuration Files
The installer generates archinstall-compatible configuration files:
- `user_configuration.json` - System configuration (disk layout, packages, services, etc.)
- `user_credentials.json` - User credentials (passwords, encryption keys)
These files are saved to `/var/log/archinstall/` on the installed system for reference and reproducibility.
## Development
### Prerequisites
@@ -71,6 +91,7 @@ make vet
.
├── config/ # Installation configuration and archinstall JSON generation
├── scripts/ # Post-install shell scripts
├── overlay/ # System overlays (Arkdep-like approach)
├── tui/
│ ├── model.go # Root state machine
│ ├── steps/ # Installation step models
@@ -85,6 +106,7 @@ Custom shell scripts in `scripts/` run after archinstall completes:
- `01-cosmic-setup.sh` - Cosmic Desktop configuration
- `02-hardening.sh` - System hardening tweaks
- `03-miasma-features.sh` - Additional Miasma OS features
Scripts execute in alphabetical order.

View File

@@ -1,5 +1,11 @@
package config
import (
"encoding/json"
"fmt"
"os"
)
type InstallConfig struct {
Disk string
EnableLUKS bool
@@ -12,21 +18,68 @@ type InstallConfig struct {
}
type ArchInstallConfig struct {
Version string `json:"version"`
Script string `json:"script"`
ArchinstallLanguage string `json:"archinstall-language"`
Bootloader string `json:"bootloader"`
Kernels []string `json:"kernels"`
Filesystem string `json:"filesystem"`
Disk string `json:"disk"`
Encryption *Encryption `json:"encryption,omitempty"`
Hostname string `json:"hostname"`
Timezone string `json:"timezone"`
Locale string `json:"locale"`
Desktop string `json:"desktop"`
Users []User `json:"users"`
LocaleConfig LocaleConfig `json:"locale_config"`
DiskConfig DiskConfig `json:"disk_config"`
ProfileConfig ProfileConfig `json:"profile_config"`
AuthConfig AuthConfig `json:"auth_config"`
Packages []string `json:"packages"`
Services []string `json:"services"`
}
type Encryption struct {
type LocaleConfig struct {
KbLayout string `json:"kb_layout"`
SysEnc string `json:"sys_enc"`
SysLang string `json:"sys_lang"`
}
type DiskConfig struct {
ConfigType string `json:"config_type"`
DeviceModifications []DeviceModification `json:"device_modifications"`
}
type DeviceModification struct {
Device string `json:"device"`
Partitions []Partition `json:"partitions"`
Wipe bool `json:"wipe"`
}
type Partition struct {
Btrfs []interface{} `json:"btrfs"`
Flags []string `json:"flags,omitempty"`
FsType string `json:"fs_type"`
Size Size `json:"size"`
MountOptions []string `json:"mount_options"`
Mountpoint string `json:"mountpoint,omitempty"`
ObjId string `json:"obj_id"`
Start Size `json:"start"`
Status string `json:"status"`
Type string `json:"type"`
Password string `json:"password"`
}
type Size struct {
SectorSize *int `json:"sector_size"`
Unit string `json:"unit"`
Value float64 `json:"value"`
}
type ProfileConfig struct {
Profile Profile `json:"profile"`
}
type Profile struct {
Main string `json:"main"`
Details []string `json:"details,omitempty"`
}
type AuthConfig struct {
Users []User `json:"users"`
}
type User struct {
@@ -36,15 +89,110 @@ type User struct {
}
func (c *InstallConfig) ToArchInstall() *ArchInstallConfig {
// Create partitions for EFI and root
partitions := []Partition{
{
Btrfs: []interface{}{},
Flags: []string{"boot"},
FsType: "fat32",
Size: Size{SectorSize: nil, Unit: "MiB", Value: 512},
MountOptions: []string{},
Mountpoint: "/boot",
ObjId: "efi-partition-id",
Start: Size{SectorSize: nil, Unit: "MiB", Value: 1},
Status: "create",
Type: "primary",
},
{
Btrfs: []interface{}{},
Flags: []string{},
FsType: "btrfs",
Size: Size{SectorSize: nil, Unit: "Percent", Value: 100},
MountOptions: []string{},
Mountpoint: "/",
ObjId: "root-partition-id",
Start: Size{SectorSize: nil, Unit: "MiB", Value: 513},
Status: "create",
Type: "primary",
},
}
// Miasma OS specific packages based on ProductDescription.md
miasmaPackages := []string{
// Base system
"base",
"base-devel",
"linux-hardened",
"linux-firmware",
"btrfs-progs",
// Text editors
"neovim",
// Shell
"zsh",
// Terminal
"alacritty",
// System tools
"tmux",
"git",
"networkmanager",
// Security tools
"opendoas",
"firejail",
"apparmor",
"nftables", // Using nftables instead of ufw as requested
"hardened-malloc",
// Browsers
"chromium", // Using regular chromium instead of ungoogled-chromium as requested
// Wayland support
"xwayland-satellite",
// Cosmic Desktop
"cosmic-session",
"cosmic-greeter",
"cosmic-files",
"cosmic-edit",
"cosmic-term",
"cosmic-store",
"cosmic-settings",
}
ac := &ArchInstallConfig{
Bootloader: "systemd-boot",
Version: "2.8.6", // Match archinstall version
Script: "guided",
ArchinstallLanguage: "English",
Bootloader: "Systemd-boot",
Kernels: []string{"linux-hardened"},
Filesystem: "btrfs",
Disk: c.Disk,
Hostname: c.Hostname,
Timezone: c.Timezone,
Locale: c.Locale,
Desktop: "cosmic",
LocaleConfig: LocaleConfig{
KbLayout: "us",
SysEnc: "UTF-8",
SysLang: "en_US",
},
DiskConfig: DiskConfig{
ConfigType: "default_layout",
DeviceModifications: []DeviceModification{
{
Device: c.Disk,
Partitions: partitions,
Wipe: true,
},
},
},
ProfileConfig: ProfileConfig{
Profile: Profile{
Main: "Desktop",
Details: []string{"Cosmic"},
},
},
AuthConfig: AuthConfig{
Users: []User{
{
Username: c.Username,
@@ -52,14 +200,52 @@ func (c *InstallConfig) ToArchInstall() *ArchInstallConfig {
Sudo: true,
},
},
}
if c.EnableLUKS {
ac.Encryption = &Encryption{
Type: "luks2",
Password: c.RootPassword,
}
},
Packages: miasmaPackages,
Services: []string{
"NetworkManager",
"nftables",
"apparmor",
"cosmic-greeter",
},
}
return ac
}
func (c *InstallConfig) SaveArchInstallConfig(filepath string) error {
archConfig := c.ToArchInstall()
data, err := json.MarshalIndent(archConfig, "", " ")
if err != nil {
return fmt.Errorf("failed to marshal config: %w", err)
}
return os.WriteFile(filepath, data, 0644)
}
func (c *InstallConfig) SaveUserCredentials(filepath string) error {
// For credentials, we only save the sensitive information
creds := map[string]interface{}{}
if c.EnableLUKS && c.RootPassword != "" {
creds["encryption_password"] = c.RootPassword
}
creds["users"] = []map[string]string{
{
"username": c.Username,
"password": c.UserPassword,
},
}
if c.RootPassword != "" {
creds["root_enc_password"] = c.RootPassword
}
data, err := json.MarshalIndent(creds, "", " ")
if err != nil {
return fmt.Errorf("failed to marshal credentials: %w", err)
}
return os.WriteFile(filepath, data, 0600)
}

145
config/config_test.go Normal file
View File

@@ -0,0 +1,145 @@
package config
import (
"encoding/json"
"os"
"testing"
)
func TestArchInstallConfigGeneration(t *testing.T) {
config := &InstallConfig{
Disk: "/dev/sda",
EnableLUKS: true,
Hostname: "miasma-test",
Username: "testuser",
RootPassword: "rootpass",
UserPassword: "userpass",
Timezone: "UTC",
Locale: "en_US.UTF-8",
}
archConfig := config.ToArchInstall()
// Test that the config has the expected structure
if archConfig.Version == "" {
t.Error("Version should not be empty")
}
if archConfig.Bootloader != "Systemd-boot" {
t.Errorf("Expected bootloader to be 'Systemd-boot', got '%s'", archConfig.Bootloader)
}
if len(archConfig.Kernels) != 1 || archConfig.Kernels[0] != "linux-hardened" {
t.Errorf("Expected kernels to contain 'linux-hardened', got %v", archConfig.Kernels)
}
if archConfig.Hostname != "miasma-test" {
t.Errorf("Expected hostname 'miasma-test', got '%s'", archConfig.Hostname)
}
// Test disk configuration
if len(archConfig.DiskConfig.DeviceModifications) != 1 {
t.Errorf("Expected 1 device modification, got %d", len(archConfig.DiskConfig.DeviceModifications))
}
deviceMod := archConfig.DiskConfig.DeviceModifications[0]
if deviceMod.Device != "/dev/sda" {
t.Errorf("Expected device '/dev/sda', got '%s'", deviceMod.Device)
}
if len(deviceMod.Partitions) != 2 {
t.Errorf("Expected 2 partitions, got %d", len(deviceMod.Partitions))
}
// Test profile configuration
if archConfig.ProfileConfig.Profile.Main != "Desktop" {
t.Errorf("Expected profile main 'Desktop', got '%s'", archConfig.ProfileConfig.Profile.Main)
}
// Test user configuration
if len(archConfig.AuthConfig.Users) != 1 {
t.Errorf("Expected 1 user, got %d", len(archConfig.AuthConfig.Users))
}
user := archConfig.AuthConfig.Users[0]
if user.Username != "testuser" {
t.Errorf("Expected username 'testuser', got '%s'", user.Username)
}
// Test package list
expectedPackages := []string{
"base", "base-devel", "linux-hardened", "linux-firmware", "btrfs-progs",
"neovim", "zsh", "alacritty", "tmux", "git", "opendoas",
"firejail", "apparmor", "nftables", "hardened-malloc",
"chromium", "xwayland-satellite",
"cosmic-session", "cosmic-greeter", "cosmic-files",
"cosmic-edit", "cosmic-term", "cosmic-store", "cosmic-settings",
}
for _, pkg := range expectedPackages {
found := false
for _, installedPkg := range archConfig.Packages {
if installedPkg == pkg {
found = true
break
}
}
if !found {
t.Errorf("Expected package '%s' in package list", pkg)
}
}
}
func TestSaveConfigurationFiles(t *testing.T) {
config := &InstallConfig{
Disk: "/dev/sda",
EnableLUKS: false,
Hostname: "miasma-test",
Username: "testuser",
RootPassword: "rootpass",
UserPassword: "userpass",
Timezone: "UTC",
Locale: "en_US.UTF-8",
}
// Test saving configuration
err := config.SaveArchInstallConfig("/tmp/test_user_config.json")
if err != nil {
t.Fatalf("Failed to save user configuration: %v", err)
}
// Test saving credentials
err = config.SaveUserCredentials("/tmp/test_user_creds.json")
if err != nil {
t.Fatalf("Failed to save user credentials: %v", err)
}
// Verify the files were created
if _, err := os.Stat("/tmp/test_user_config.json"); os.IsNotExist(err) {
t.Error("User configuration file was not created")
}
if _, err := os.Stat("/tmp/test_user_creds.json"); os.IsNotExist(err) {
t.Error("User credentials file was not created")
}
// Verify the configuration file content
data, err := os.ReadFile("/tmp/test_user_config.json")
if err != nil {
t.Fatalf("Failed to read configuration file: %v", err)
}
var parsedConfig ArchInstallConfig
err = json.Unmarshal(data, &parsedConfig)
if err != nil {
t.Fatalf("Failed to parse configuration file: %v", err)
}
if parsedConfig.Hostname != "miasma-test" {
t.Errorf("Expected hostname 'miasma-test', got '%s'", parsedConfig.Hostname)
}
// Clean up test files
os.Remove("/tmp/test_user_config.json")
os.Remove("/tmp/test_user_creds.json")
}

View File

@@ -0,0 +1,13 @@
# http://editorconfig.org
# See coding conventions in CONTRIBUTING.md
root = true
[*]
charset = utf-8
end_of_line = lf
insert_final_newline = true
[*.py]
indent_style = tab
indent_size = 4
trim_trailing_whitespace = true

View File

@@ -0,0 +1,8 @@
[flake8]
count = True
ignore = W191,W503,E704,E203
max-complexity = 40
max-line-length = 160
show-source = True
statistics = True
exclude = .git,__pycache__,build,docs,actions-runner

View File

@@ -0,0 +1,7 @@
# As per https://docs.github.com/en/github/creating-cloning-and-archiving-repositories/about-code-owners#example-of-a-codeowners-file
* @Torxed
# Any PKGBUILD changes should tag grazzolini
/PKGBUILDs/ @grazzolini
/PKGBUILD @grazzolini

View File

@@ -0,0 +1,85 @@
name: bug report
description: archinstall crashed or could not install properly?
body:
- type: markdown
attributes:
value: >
Please read the ~5 known issues first:
https://archinstall.archlinux.page/help/known_issues.html
- type: markdown
attributes:
value: >
**NOTE: Always try the latest official ISO**
- type: input
id: iso
attributes:
label: Which ISO version are you using?
description: 'Always use the latest ISO version'
placeholder: '"2024-12-01" or "Dec 1:st"'
validations:
required: true
- type: textarea
id: bug-report
attributes:
label: The installation log
description: 'note: located at `/var/log/archinstall/install.log`'
placeholder: |
Hardware model detected: Dell Inc. Precision 7670; UEFI mode: True
Processor model detected: 12th Gen Intel(R) Core(TM) i7-12850HX
Memory statistics: 31111048 available out of 32545396 total installed
Disk states before installing: {'blockdevices': ... }
Testing connectivity to the Arch Linux mirrors ...
...
render: json
validations:
required: true
- type: markdown
attributes:
value: >
**Note**: Assuming you have network connectivity,
you can easily post the installation log using the following command:
`curl -F'file=@/var/log/archinstall/install.log' https://0x0.st`
- type: textarea
id: freeform
attributes:
label: describe the problem
description: >
Please describe your issue as best as you can.
And please consider personal preferences vs what the recommended
steps/values are in https://wiki.archlinux.org/title/Installation_guide
as we try to abide by them as best we can.
value: |
#### Description of the issue
I was installing on X hardware ...
Then X Y Z happened and archinstall crashed ...
#### Virtual machine config:
```xml
<domain type="kvm">
<name>my-arch-machine</name>
...
</devices>
</domain>
```
```console
/usr/bin/qemu-system-x86_64 -name guest=my-arch-machine,debug-threads=on -object ...
```
validations:
required: true
- type: markdown
attributes:
value: >
**Note**: Feel free to modify the textarea above as you wish.
But it will grately help us in testing if we can generate the specific qemu command line,
for instance via:
`sudo virsh domxml-to-native qemu-argv --domain my-arch-machine`

View File

@@ -0,0 +1,17 @@
name: feature request
description: a new feature!
body:
- type: markdown
attributes:
value: >
Please read our short mission statement before requesting more features:
https://github.com/archlinux/archinstall?tab=readme-ov-file#mission-statement
- type: textarea
id: freeform
attributes:
label: describe the request
description: >
Feel free to write any feature you think others might benefit from:
validations:
required: true

View File

@@ -0,0 +1,12 @@
on: [ push, pull_request ]
name: Bandit security checkup
jobs:
bandit:
runs-on: ubuntu-latest
container:
image: archlinux/archlinux:latest
steps:
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5
- run: pacman --noconfirm -Syu bandit
- name: Security checkup with Bandit
run: bandit -r archinstall || exit 0

View File

@@ -0,0 +1,23 @@
on: [ push, pull_request ]
name: flake8 linting
jobs:
flake8:
runs-on: ubuntu-latest
container:
image: archlinux/archlinux:latest
steps:
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5
- name: Prepare arch
run: |
pacman-key --init
pacman --noconfirm -Sy archlinux-keyring
pacman --noconfirm -Syyu
pacman --noconfirm -Sy python-pip python-pyparted pkgconfig gcc
- run: pip install --break-system-packages --upgrade pip
# this will install the exact version of flake8 that is in the pyproject.toml file
- name: Install archinstall dependencies
run: pip install --break-system-packages .[dev]
- run: python --version
- run: flake8 --version
- name: Lint with flake8
run: flake8

View File

@@ -0,0 +1,41 @@
name: documentation
on:
push:
paths:
- "docs/**"
pull_request:
paths:
- "docs/**"
workflow_dispatch:
permissions:
contents: write
jobs:
docs:
runs-on: ubuntu-latest
container:
image: archlinux/archlinux:latest
options: --privileged
steps:
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5
- uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6
- name: Install pre-dependencies
run: |
pacman -Sy --noconfirm tree git python-pyparted python-setuptools python-sphinx python-sphinx_rtd_theme python-build python-installer python-wheel
- name: Sphinx build
run: |
sphinx-build docs _build
- name: Deploy to GitHub Pages
uses: peaceiris/actions-gh-pages@4f9cc6602d3f66b9c108549d475ec49e8ef4d45e # v4
if: ${{ github.event_name != 'pull_request' }}
with:
publish_branch: gh-pages
github_token: ${{ secrets.GITHUB_TOKEN }}
publish_dir: _build/
force_orphan: true
enable_jekyll: false # This is required to preserve _static (and thus the theme)
cname: archinstall.archlinux.page

View File

@@ -0,0 +1,39 @@
# This workflow will build an Arch Linux ISO file with the commit on it
name: Build Arch ISO with ArchInstall Commit
on:
push:
branches:
- master
- main # In case we adopt this convention in the future
pull_request:
paths-ignore:
- 'docs/**'
- '**.editorconfig'
- '**.gitignore'
- '**.md'
- 'LICENSE'
- 'PKGBUILD'
release:
types:
- created
jobs:
build:
runs-on: ubuntu-latest
container:
image: archlinux/archlinux:latest
options: --privileged
steps:
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5
- run: pwd
- run: find .
- run: cat /etc/os-release
- run: pacman-key --init
- run: pacman --noconfirm -Sy archlinux-keyring
- run: ./build_iso.sh
- uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5
with:
name: Arch Live ISO
path: /tmp/archlive/out/*.iso

View File

@@ -0,0 +1,23 @@
on: [ push, pull_request ]
name: mypy type checking
jobs:
mypy:
runs-on: ubuntu-latest
container:
image: archlinux/archlinux:latest
steps:
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5
- name: Prepare arch
run: |
pacman-key --init
pacman --noconfirm -Sy archlinux-keyring
pacman --noconfirm -Syyu
pacman --noconfirm -Sy python-pip python-pyparted pkgconfig gcc
- run: pip install --break-system-packages --upgrade pip
# this will install the exact version of mypy that is in the pyproject.toml file
- name: Install archinstall dependencies
run: pip install --break-system-packages .[dev]
- run: python --version
- run: mypy --version
- name: run mypy
run: mypy --config-file pyproject.toml

View File

@@ -0,0 +1,22 @@
on: [ push, pull_request ]
name: Pylint linting
jobs:
pylint:
runs-on: ubuntu-latest
container:
image: archlinux/archlinux:latest
steps:
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5
- name: Prepare arch
run: |
pacman-key --init
pacman --noconfirm -Sy archlinux-keyring
pacman --noconfirm -Syyu
pacman --noconfirm -Sy python-pip python-pyparted pkgconfig gcc
- run: pip install --break-system-packages --upgrade pip
- name: Install Pylint
run: pip install --break-system-packages .[dev]
- run: python --version
- run: pylint --version
- name: Lint with Pylint
run: pylint .

View File

@@ -0,0 +1,21 @@
on: [ push, pull_request ]
name: pytest test validation
jobs:
pytest:
runs-on: ubuntu-latest
container:
image: archlinux/archlinux:latest
options: --privileged
steps:
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5
- name: Prepare arch
run: |
pacman-key --init
pacman --noconfirm -Sy archlinux-keyring
pacman --noconfirm -Syyu
pacman --noconfirm -Sy python-pip python-pyparted pkgconfig gcc
- run: pip install --break-system-packages --upgrade pip
- name: Install archinstall dependencies
run: pip install --break-system-packages .[dev]
- name: Test with pytest
run: pytest

View File

@@ -0,0 +1,39 @@
# This workflow will build Python packages on every commit.
name: Build archinstall
on: [ push, pull_request ]
jobs:
deploy:
runs-on: ubuntu-latest
container:
image: archlinux/archlinux:latest
options: --privileged
steps:
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5
- name: Prepare arch
run: |
pacman-key --init
pacman --noconfirm -Sy archlinux-keyring
pacman --noconfirm -Syyu
pacman --noconfirm -Sy python-uv python-setuptools python-pip
pacman --noconfirm -Sy python-pyparted python-pydantic
- name: Remove existing archinstall (if any)
run:
uv pip uninstall archinstall --break-system-packages --system
- name: Build archinstall
run: uv build --no-build-isolation --wheel
- name: Install archinstall
run: |
uv pip install dist/*.whl --break-system-packages --system --no-build --no-deps
- name: Run archinstall
run: |
python -V
archinstall --script guided -v
archinstall --script only_hd -v
archinstall --script minimal -v
- uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5
with:
name: archinstall
path: dist/*

View File

@@ -0,0 +1,33 @@
# This workflow will upload a Python Package when a release is created
# For more information see: https://help.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions#publishing-to-package-registries
name: Upload archinstall to PyPi
on:
release:
types: [ published ]
jobs:
deploy:
runs-on: ubuntu-latest
permissions:
# IMPORTANT: this permission is mandatory for Trusted Publishing
id-token: write
container:
image: archlinux/archlinux:latest
options: --privileged
steps:
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5
- name: Prepare arch
run: |
pacman-key --init
pacman --noconfirm -Sy archlinux-keyring
pacman --noconfirm -Syyu
pacman --noconfirm -Sy python python-uv python-setuptools python-pip python-pyparted python-pydantic
- name: Build archinstall
run: |
uv build --no-build-isolation --wheel
- name: Publish archinstall to PyPi
run: |
uv publish --trusted-publishing always

View File

@@ -0,0 +1,9 @@
on: [ push, pull_request ]
name: ruff check formatting
jobs:
ruff_format_check:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5
- uses: astral-sh/ruff-action@57714a7c8a2e59f32539362ba31877a1957dded1 # v3.5.1
- run: ruff format --diff

View File

@@ -0,0 +1,8 @@
on: [ push, pull_request ]
name: ruff check linting
jobs:
ruff:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5
- uses: astral-sh/ruff-action@57714a7c8a2e59f32539362ba31877a1957dded1 # v3.5.1

View File

@@ -0,0 +1,28 @@
#on:
# push:
# paths:
# - 'archinstall/locales/**'
# pull_request:
# paths:
# - 'archinstall/locales/**'
#name: Verify local_generate script was run on translation changes
#jobs:
# translation-check:
# runs-on: ubuntu-latest
# container:
# image: archlinux/archlinux:latest
# steps:
# - uses: actions/checkout@v4
# - run: pacman --noconfirm -Syu python git diffutils
# - name: Verify all translation scripts are up to date
# run: |
# cd ..
# cp -r archinstall archinstall_orig
# cd archinstall/archinstall/locales
# bash locales_generator.sh 1> /dev/null
# cd ../../..
# git diff \
# --quiet --no-index --name-only \
# archinstall_orig/archinstall/locales \
# archinstall/archinstall/locales \
# || (echo "Translation files have not been updated after translation, please run ./locales_generator.sh once more and commit" && exit 1)

42
examples/archinstall/.gitignore vendored Normal file
View File

@@ -0,0 +1,42 @@
**/**__pycache__
SAFETY_LOCK
**/**old.*
**/**.img
**/**pwfile
**/**build
**/**dist
**/**.egg*
**/**.sh
!archinstall/locales/locales_generator.sh
**/**.egg-info/
**/**build/
**/**src/
**/**pkg/
**/**dist/
**/**archinstall.build/
**/**archinstall-v*/
**/**.pkg.*.xz
**/**archinstall-*.tar.gz
**/**.zst
**/**.network
**/**.target
**/**.qcow2
**/**.log
**/**.fd
/test*.py
**/archiso
/guided.py
venv
.venv
.idea/**
**/install.log
.DS_Store
**/cmd_history.txt
**/*.*~
/*.sig
/*.json
requirements.txt
/.gitconfig
/actions-runner
/cmd_output.txt
uv.lock

View File

@@ -0,0 +1,92 @@
# This file contains GitLab CI/CD configuration for the ArchInstall project.
# It defines several jobs that get run when a new commit is made, and is comparable to the GitHub workflows.
# There is an expectation that a runner exists that has the --privileged flag enabled for the build ISO job to run correctly.
# These jobs should leverage the same tag as that runner. If necessary, change the tag from 'docker' to the one it uses.
# All jobs will be run in the official archlinux container image, so we will declare that here.
image: archlinux/archlinux:latest
# This can be used to handle common actions. In this case, we do a pacman -Sy to make sure repos are ready to use.
before_script:
- pacman -Sy
stages:
- lint
- test
- build
- publish
mypy:
stage: lint
tags:
- docker
script:
- pacman --noconfirm -Syu python mypy
- mypy . --ignore-missing-imports || exit 0
flake8:
stage: lint
tags:
- docker
script:
- pacman --noconfirm -Syu python python-pip
- python -m pip install --upgrade pip
- pip install flake8
- flake8 . --count --select=E9,F63,F7 --show-source --statistics
- flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
# We currently do not have unit tests implemented but this stage is written in anticipation of their future usage.
# When a stage name is preceeded with a '.' it's treated as "disabled" by GitLab and is not executed, so it's fine for it to be declared.
.pytest:
stage: test
tags:
- docker
script:
- pacman --noconfirm -Syu python python-pip
- python -m pip install --upgrade pip
- pip install pytest
- pytest
# This stage might fail with exit code 137 on a shared runner. This is probably due to the CPU/memory consumption needed to run the build.
build_iso:
stage: build
tags:
- docker
script:
- pwd
- find .
- cat /etc/os-release
- mkdir -p /tmp/archlive/airootfs/root/archinstall-git; cp -r . /tmp/archlive/airootfs/root/archinstall-git
- echo "pip uninstall archinstall -y; cd archinstall-git; python setup.py install" > /tmp/archlive/airootfs/root/.zprofile
- echo "echo \"This is an unofficial ISO for development and testing of archinstall. No support will be provided.\"" >> /tmp/archlive/airootfs/root/.zprofile
- echo "echo \"This ISO was built from Git SHA $CI_COMMIT_SHA\"" >> /tmp/archlive/airootfs/root/.zprofile
- echo "echo \"Type archinstall to launch the installer.\"" >> /tmp/archlive/airootfs/root/.zprofile
- cat /tmp/archlive/airootfs/root/.zprofile
- pacman --noconfirm -S git archiso
- cp -r /usr/share/archiso/configs/releng/* /tmp/archlive
- echo -e "git\npython\npython-pip\npython-setuptools" >> /tmp/archlive/packages.x86_64
- find /tmp/archlive
- cd /tmp/archlive; mkarchiso -v -w work/ -o out/ ./
artifacts:
name: "Arch Live ISO"
paths:
- /tmp/archlive/out/*.iso
expire_in: 1 week
## This job only runs when a tag is created on the master branch. This is because we do not want to try to publish to PyPi every time we commit.
## The following CI/CD variables need to be set to the PyPi username and password in the GitLab project's settings for this stage to work.
# * FLIT_USERNAME
# * FLIT_PASSWORD
publish_pypi:
stage: publish
tags:
- docker
script:
- pacman --noconfirm -S python python-pip
- python -m pip install --upgrade pip
- pip install setuptools wheel flit
- flit
only:
- tags
except:
- branches

View File

@@ -0,0 +1,55 @@
default_stages: ['pre-commit']
repos:
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.14.4
hooks:
# fix unused imports and sort them
- id: ruff
args: ["--extend-select", "I", "--fix"]
# format the code
- id: ruff-format
# run the linter
- id: ruff
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v6.0.0
hooks:
# general hooks:
- id: check-added-large-files # Prevent giant files from being committed
args: ['--maxkb=5000']
- id: check-merge-conflict # Check for files that contain merge conflict strings
- id: check-symlinks # Checks for symlinks which do not point to anything
- id: check-yaml # Attempts to load all yaml files to verify syntax
- id: destroyed-symlinks # Detects symlinks which are changed to regular files
- id: detect-private-key # Checks for the existence of private keys
# Python specific hooks:
- id: check-ast # Simply check whether files parse as valid python
- id: check-docstring-first # Checks for a common error of placing code before the docstring
- repo: https://github.com/pycqa/flake8
rev: 7.3.0
hooks:
- id: flake8
args: [--config=.flake8]
fail_fast: true
- repo: https://github.com/pre-commit/mirrors-mypy
rev: v1.18.2
hooks:
- id: mypy
args: [
'--config-file=pyproject.toml'
]
fail_fast: true
additional_dependencies:
- pydantic
- pytest
- pytest-mock
- cryptography
- textual
- repo: local
hooks:
- id: pylint
name: pylint
entry: pylint
language: system
types: [python]
fail_fast: true
require_serial: true

View File

@@ -0,0 +1,6 @@
[distutils]
index-servers =
pypi
[pypi]
repository = https://upload.pypi.org/legacy/

View File

@@ -0,0 +1,15 @@
# .readthedocs.yml
# Read the Docs configuration file
# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
version: 2
sphinx:
builder: html
configuration: docs/conf.py
fail_on_warning: true
build:
os: "ubuntu-22.04"
tools:
python: "3.12"

View File

@@ -0,0 +1,71 @@
# Contributing to archinstall
Any contributions through pull requests are welcome as this project aims to be a community based project to ease some Arch Linux installation steps.
Bear in mind that in the future this repo might be transferred to the official [GitLab repo under Arch Linux](http://gitlab.archlinux.org/archlinux/) *(if GitLab becomes open to the general public)*.
Therefore, guidelines and style changes to the code might come into effect as well as guidelines surrounding bug reporting and discussions.
## Branches
`master` is currently the default branch, and that's where all future feature work is being done, this means that `master` is a living entity and will most likely never be in a fully stable state.
For stable releases, please see the tagged commits.
Patch releases will be done against their own branches, branched from stable tagged releases and will be named according to the version it will become on release.
*(Patches to `v2.1.4` will be done on branch `v2.1.5` for instance)*.
## Discussions
Currently, questions, bugs and suggestions should be reported through [GitHub issue tracker](https://github.com/archlinux/archinstall/issues).<br>
For less formal discussions there is also an [archinstall Discord server](https://discord.gg/aDeMffrxNg).
## Coding convention
ArchInstall's goal is to follow [PEP8](https://www.python.org/dev/peps/pep-0008/) as best as it can with some minor exceptions.<br>
The exceptions to PEP8 are:
* Archinstall uses [tabs instead of spaces](https://www.python.org/dev/peps/pep-0008/#tabs-or-spaces) simply to make it
easier for non-IDE developers to navigate the code *(Tab display-width should be equal to 4 spaces)*. Exception to the
rule are comments that need fine-tuned indentation for documentation purposes.
* [Line length](https://www.python.org/dev/peps/pep-0008/#maximum-line-length) a maximum line length is enforced via flake8 with 160 characters
* Archinstall should always be saved with **Unix-formatted line endings** and no other platform-specific formats.
* [String quotes](https://www.python.org/dev/peps/pep-0008/#string-quotes) follow PEP8, the exception being when
creating formatted strings, double-quoted strings are *preferred* but not required on the outer edges *(
Example: `f"Welcome {name}"` rather than `f'Welcome {name}'`)*.
Most of these style guidelines have been put into place after the fact *(in an attempt to clean up the code)*.<br>
There might therefore be older code which does not follow the coding convention and the code is subject to change.
## Git hooks
`archinstall` ships pre-commit hooks that make it easier to run checks such as `mypy`, `ruff check`, and `flake8` locally.
The checks are listed in `.pre-commit-config.yaml` and can be installed via
```
pre-commit install
```
This will install the pre-commit hook and run it every time a `git commit` is executed.
## Documentation
If you'd like to contribute to the documentation, refer to [this guide](docs/README.md) on how to build the documentation locally.
## Submitting Changes
Archinstall uses GitHub's pull-request workflow and all contributions in terms of code should be done through pull requests.<br>
Anyone interested in archinstall may review your code. One of the core developers will merge your pull request when they
think it is ready. For every pull request, we aim to promptly either merge it or say why it is not yet ready; if you go
a few days without a reply, please feel free to ping the thread by adding a new comment.
To get your pull request merged sooner, you should explain why you are making the change. For example, you can point to
a code sample that is outdated in terms of Arch Linux command lines. It is also helpful to add links to online
documentation or to the implementation of the code you are changing.
Also, do not squash your commits after you have submitted a pull request, as this erases context during review. We will
squash commits when the pull request is merged.
Maintainer:
* Anton Hvornum ([@Torxed](https://github.com/Torxed))
[Contributors](https://github.com/archlinux/archinstall/graphs/contributors)

View File

@@ -0,0 +1,674 @@
GNU GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU General Public License is a free, copyleft license for
software and other kinds of works.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
the GNU General Public License is intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users. We, the Free Software Foundation, use the
GNU General Public License for most of our software; it applies also to
any other work released this way by its authors. You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
To protect your rights, we need to prevent others from denying you
these rights or asking you to surrender the rights. Therefore, you have
certain responsibilities if you distribute copies of the software, or if
you modify it: responsibilities to respect the freedom of others.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must pass on to the recipients the same
freedoms that you received. You must make sure that they, too, receive
or can get the source code. And you must show them these terms so they
know their rights.
Developers that use the GNU GPL protect your rights with two steps:
(1) assert copyright on the software, and (2) offer you this License
giving you legal permission to copy, distribute and/or modify it.
For the developers' and authors' protection, the GPL clearly explains
that there is no warranty for this free software. For both users' and
authors' sake, the GPL requires that modified versions be marked as
changed, so that their problems will not be attributed erroneously to
authors of previous versions.
Some devices are designed to deny users access to install or run
modified versions of the software inside them, although the manufacturer
can do so. This is fundamentally incompatible with the aim of
protecting users' freedom to change the software. The systematic
pattern of such abuse occurs in the area of products for individuals to
use, which is precisely where it is most unacceptable. Therefore, we
have designed this version of the GPL to prohibit the practice for those
products. If such problems arise substantially in other domains, we
stand ready to extend this provision to those domains in future versions
of the GPL, as needed to protect the freedom of users.
Finally, every program is threatened constantly by software patents.
States should not allow patents to restrict development and use of
software on general-purpose computers, but in those that do, we wish to
avoid the special danger that patents applied to a free program could
make it effectively proprietary. To prevent this, the GPL assures that
patents cannot be used to render the program non-free.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Use with the GNU Affero General Public License.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU Affero General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the special requirements of the GNU Affero General Public License,
section 13, concerning interaction through a network will apply to the
combination as such.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
Also add information on how to contact you by electronic and paper mail.
If the program does terminal interaction, make it output a short
notice like this when it starts in an interactive mode:
<program> Copyright (C) <year> <name of author>
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, your program's commands
might be different; for a GUI interface, you would use an "about box".
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU GPL, see
<https://www.gnu.org/licenses/>.
The GNU General Public License does not permit incorporating your program
into proprietary programs. If your program is a subroutine library, you
may consider it more useful to permit linking proprietary applications with
the library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License. But first, please read
<https://www.gnu.org/licenses/why-not-lgpl.html>.

View File

@@ -0,0 +1,86 @@
# Maintainer: David Runge <dvzrv@archlinux.org>
# Maintainer: Giancarlo Razzolini <grazzolini@archlinux.org>
# Maintainer: Anton Hvornum <torxed@archlinux.org>
# Contributor: Anton Hvornum <anton@hvornum.se>
# Contributor: demostanis worlds <demostanis@protonmail.com>
pkgname=archinstall
pkgver=3.0.13
pkgrel=1
pkgdesc="Just another guided/automated Arch Linux installer with a twist"
arch=(any)
url="https://github.com/archlinux/archinstall"
license=(GPL-3.0-only)
depends=(
'arch-install-scripts'
'btrfs-progs'
'coreutils'
'cryptsetup'
'dosfstools'
'e2fsprogs'
'glibc'
'kbd'
'libcrypt.so'
'libxcrypt'
'pciutils'
'procps-ng'
'python'
'python-cryptography'
'python-pydantic'
'python-pyparted'
'python-textual'
'systemd'
'util-linux'
'xfsprogs'
'lvm2'
'f2fs-tools'
'ntfs-3g'
)
makedepends=(
'python-build'
'python-installer'
'python-setuptools'
'python-sphinx'
'python-wheel'
'python-sphinx_rtd_theme'
'python-pylint'
'ruff'
)
optdepends=(
'python-systemd: Adds journald logging'
)
provides=(python-archinstall archinstall)
conflicts=(python-archinstall archinstall-git)
replaces=(python-archinstall archinstall-git)
source=(
$pkgname-$pkgver.tar.gz::$url/archive/refs/tags/$pkgver.tar.gz
$pkgname-$pkgver.tar.gz.sig::$url/releases/download/$pkgver/$pkgname-$pkgver.tar.gz.sig
)
sha512sums=()
b2sums=()
validpgpkeys=('8AA2213C8464C82D879C8127D4B58E897A929F2E') # torxed@archlinux.org
check() {
cd $pkgname-$pkgver
ruff check
}
pkgver() {
cd $pkgname-$pkgver
awk '$1 ~ /^__version__/ {gsub("\"", ""); print $3}' archinstall/__init__.py
}
build() {
cd $pkgname-$pkgver
python -m build --wheel --no-isolation
PYTHONDONTWRITEBYTECODE=1 make man -C docs
}
package() {
cd "$pkgname-$pkgver"
python -m installer --destdir="$pkgdir" dist/*.whl
install -vDm 644 docs/_build/man/archinstall.1 -t "$pkgdir/usr/share/man/man1/"
}

View File

@@ -0,0 +1,218 @@
<!-- <div align="center"> -->
<img src="https://github.com/archlinux/archinstall/raw/master/docs/logo.png" alt="drawing" width="200"/>
<!-- </div> -->
# Arch Installer
[![Lint Python and Find Syntax Errors](https://github.com/archlinux/archinstall/actions/workflows/flake8.yaml/badge.svg)](https://github.com/archlinux/archinstall/actions/workflows/flake8.yaml)
Just another guided/automated [Arch Linux](https://wiki.archlinux.org/index.php/Arch_Linux) installer with a twist.
The installer also doubles as a python library to install Arch Linux and manage services, packages, and other things inside the installed system *(Usually from a live medium)*.
* archinstall [discord](https://discord.gg/aDeMffrxNg) server
* archinstall [#archinstall:matrix.org](https://matrix.to/#/#archinstall:matrix.org) Matrix channel
* archinstall [#archinstall@irc.libera.chat:6697](https://web.libera.chat/?channel=#archinstall)
* archinstall [documentation](https://archinstall.archlinux.page/)
# Installation & Usage
```shell
sudo pacman -S archinstall
```
Alternative ways to install are `git clone` the repository or `pip install --upgrade archinstall`.
## Running the [guided](https://github.com/archlinux/archinstall/blob/master/archinstall/scripts/guided.py) installer
Assuming you are on an Arch Linux live-ISO or installed via `pip`:
```shell
archinstall
```
## Running the [guided](https://github.com/archlinux/archinstall/blob/master/archinstall/scripts/guided.py) installer using `git`
```shell
# cd archinstall-git
# python -m archinstall
```
#### Advanced
Some additional options that most users do not need are hidden behind the `--advanced` flag.
## Running from a declarative configuration file or URL
`archinstall` can be run with a JSON configuration file. There are 2 different configuration files to consider,
the `user_configuration.json` contains all general installation configuration, whereas the `user_credentials.json`
contains the sensitive user configuration such as user password, root password, and encryption password.
An example of the user configuration file can be found here
[configuration file](https://github.com/archlinux/archinstall/blob/master/examples/config-sample.json)
and an example of the credentials configuration here
[credentials file](https://github.com/archlinux/archinstall/blob/master/examples/creds-sample.json).
**HINT:** The configuration files can be auto-generated by starting `archinstall`, configuring all desired menu
points and then going to `Save configuration`.
To load the configuration file into `archinstall` run the following command
```shell
archinstall --config <path to user config file or URL> --creds <path to user credentials config file or URL>
```
### Credentials configuration file encryption
By default all user account credentials are hashed with `yescrypt` and only the hash is stored in the saved `user_credentials.json` file.
This is not possible for disk encryption password which needs to be stored in plaintext to be able to apply it.
However, when selecting to save configuration files, `archinstall` will prompt for the option to encrypt the `user_credentials.json` file content.
A prompt will require to enter a encryption password to encrypt the file. When providing an encrypted `user_configuration.json` as a argument with `--creds <user_credentials.json>`
there are multiple ways to provide the decryption key:
* Provide the decryption key via the command line argument `--creds-decryption-key <password>`
* Store the encryption key in the environment variable `ARCHINSTALL_CREDS_DECRYPTION_KEY` which will be read automatically
* If none of the above is provided a prompt will be shown to enter the decryption key manually
# Help or Issues
If you come across any issues, kindly submit your issue here on Github or post your query in the
[discord](https://discord.gg/aDeMffrxNg) help channel.
When submitting an issue, please:
* Provide the stacktrace of the output if applicable
* Attach the `/var/log/archinstall/install.log` to the issue ticket. This helps us help you!
* To extract the log from the ISO image, one way is to use<br>
```shell
curl -F'file=@/var/log/archinstall/install.log' https://0x0.st
```
# Available Languages
Archinstall is available in different languages which have been contributed and are maintained by the community.
The language can be switched inside the installer (first menu entry). Bear in mind that not all languages provide
full translations as we rely on contributors to do the translations. Each language has an indicator that shows
how much has been translated.
Any contributions to the translations are more than welcome,
to get started please follow [the guide](https://github.com/archlinux/archinstall/blob/master/archinstall/locales/README.md)
## Fonts
The ISO does not ship with all fonts needed for different languages.
Fonts that use a different character set than Latin will not be displayed correctly. If those languages
want to be selected then a proper font has to be set manually in the console.
All available console fonts can be found in `/usr/share/kbd/consolefonts` and set with `setfont LatGrkCyr-8x16`.
# Scripting your own installation
## Scripting interactive installation
For an example of a fully scripted, interactive installation please refer to the example
[interactive_installation.py](https://github.com/archlinux/archinstall/blob/master/archinstall/scripts/guided.py)
> **To create your own ISO with this script in it:** Follow [ArchISO](https://wiki.archlinux.org/index.php/archiso)'s guide on creating your own ISO.
## Script non-interactive automated installation
For an example of a fully scripted, automated installation please refer to the example
[full_automated_installation.py](https://github.com/archlinux/archinstall/blob/master/examples/full_automated_installation.py)
# Profiles
`archinstall` comes with a set of pre-configured profiles available for selection during the installation process.
- [Desktop](https://github.com/archlinux/archinstall/tree/master/archinstall/default_profiles/desktops)
- [Server](https://github.com/archlinux/archinstall/tree/master/archinstall/default_profiles/servers)
The profiles' definitions and the packages they will install can be directly viewed in the menu, or
[default profiles](https://github.com/archlinux/archinstall/tree/master/archinstall/default_profiles)
# Testing
## Using a Live ISO Image
If you want to test a commit, branch, or bleeding edge release from the repository using the standard Arch Linux Live ISO image,
replace the archinstall version with a newer one and execute the subsequent steps defined below.
*Note: When booting from a live USB, the space on the ramdisk is limited and may not be sufficient to allow
running a re-installation or upgrade of the installer. In case one runs into this issue, any of the following can be used
- Resize the root partition https://wiki.archlinux.org/title/Archiso#Adjusting_the_size_of_the_root_file_system
- The boot parameter `copytoram=y` (https://gitlab.archlinux.org/archlinux/mkinitcpio/mkinitcpio-archiso/-/blob/master/docs/README.bootparams#L26)
can be specified which will copy the root filesystem to tmpfs.*
1. You need a working network connection
2. Install the build requirements with `pacman -Sy; pacman -S git python-pip gcc pkgconf`
*(note that this may or may not work depending on your RAM and current state of the squashfs maximum filesystem free space)*
3. Uninstall the previous version of archinstall with `pip uninstall --break-system-packages archinstall`
4. Now clone the latest repository with `git clone https://github.com/archlinux/archinstall`
5. Enter the repository with `cd archinstall`
*At this stage, you can choose to check out a feature branch for instance with `git checkout v2.3.1-rc1`*
6. To run the source code, there are 2 different options:
- Run a specific branch version from source directly using `python -m archinstall`, in most cases this will work just fine, the
rare case it will not work is if the source has introduced any new dependencies that are not installed yet
- Installing the branch version with `pip install --break-system-packages .` and `archinstall`
## Without a Live ISO Image
To test this without a live ISO, the simplest approach is to use a local image and create a loop device.<br>
This can be done by installing `pacman -S arch-install-scripts util-linux` locally and doing the following:
# truncate -s 20G testimage.img
# losetup --partscan --show --find ./testimage.img
# pip install --upgrade archinstall
# python -m archinstall --script guided
# qemu-system-x86_64 -enable-kvm -machine q35,accel=kvm -device intel-iommu -cpu host -m 4096 -boot order=d -drive file=./testimage.img,format=raw -drive if=pflash,format=raw,readonly,file=/usr/share/ovmf/x64/OVMF.4m.fd -drive if=pflash,format=raw,readonly,file=/usr/share/ovmf/x64/OVMF.4m.fd
This will create a *20 GB* `testimage.img` and create a loop device which we can use to format and install to.<br>
`archinstall` is installed and executed in [guided mode](#docs-todo). Once the installation is complete, ~~you can use qemu/kvm to boot the test media.~~<br>
*(You'd actually need to do some EFI magic in order to point the EFI vars to the partition 0 in the test medium, so this won't work entirely out of the box, but that gives you a general idea of what we're going for here)*
There's also a [Building and Testing](https://github.com/archlinux/archinstall/wiki/Building-and-Testing) guide.<br>
It will go through everything from packaging, building and running *(with qemu)* the installer against a dev branch.
# FAQ
## Keyring out-of-date
For a description of the problem see https://archinstall.archlinux.page/help/known_issues.html#keyring-is-out-of-date-2213 and discussion in issue https://github.com/archlinux/archinstall/issues/2213.
For a quick fix the below command will install the latest keyrings
```pacman -Sy archlinux-keyring```
## How to dual boot with Windows
To install Arch Linux alongside an existing Windows installation using `archinstall`, follow these steps:
1. Ensure some unallocated space is available for the Linux installation after the Windows installation.
2. Boot into the ISO and run `archinstall`.
3. Choose `Disk configuration` -> `Manual partitioning`.
4. Select the disk on which Windows resides.
5. Select `Create a new partition`.
6. Choose a filesystem type.
7. Determine the start and end sectors for the new partition location (values can be suffixed with various units).
8. Assign the mountpoint `/` to the new partition.
9. Assign the `Boot/ESP` partition the mountpoint `/boot` from the partitioning menu.
10. Confirm your settings and exit to the main menu by choosing `Confirm and exit`.
11. Modify any additional settings for your installation as necessary.
12. Start the installation upon completion of setup.
# Mission Statement
Archinstall promises to ship a [guided installer](https://github.com/archlinux/archinstall/blob/master/archinstall/scripts/guided.py) that follows
the [Arch Linux Principles](https://wiki.archlinux.org/index.php/Arch_Linux#Principles) as well as a library to manage services, packages, and other Arch Linux aspects.
The guided installer ensures a user-friendly experience, offering optional selections throughout the process. Emphasizing its flexible nature, these options are never obligatory.
In addition, the decision to use the guided installer remains entirely with the user, reflecting the Linux philosophy of providing full freedom and flexibility.
---
Archinstall primarily functions as a flexible library for managing services, packages, and other elements within an Arch Linux system.
This core library is the backbone for the guided installer that Archinstall provides. It is also designed to be used by those who wish to script their own custom installations.
Therefore, Archinstall will try its best to not introduce any breaking changes except for major releases which may break backward compatibility after notifying about such changes.
# Contributing
Please see [CONTRIBUTING.md](https://github.com/archlinux/archinstall/blob/master/CONTRIBUTING.md)

View File

@@ -0,0 +1,166 @@
"""Arch Linux installer - guided, templates etc."""
import importlib
import os
import sys
import time
import traceback
from archinstall.lib.args import arch_config_handler
from archinstall.lib.disk.utils import disk_layouts
from archinstall.lib.network.wifi_handler import wifi_handler
from archinstall.lib.networking import ping
from archinstall.lib.packages.packages import check_package_upgrade
from archinstall.tui.ui.components import tui as ttui
from .lib.hardware import SysInfo
from .lib.output import FormattedOutput, debug, error, info, log, warn
from .lib.pacman import Pacman
from .lib.plugins import load_plugin, plugins
from .lib.translationhandler import Language, tr, translation_handler
from .tui.curses_menu import Tui
# @archinstall.plugin decorator hook to programmatically add
# plugins in runtime. Useful in profiles_bck and other things.
def plugin(f, *args, **kwargs) -> None: # type: ignore[no-untyped-def]
plugins[f.__name__] = f
def _log_sys_info() -> None:
# Log various information about hardware before starting the installation. This might assist in troubleshooting
debug(f'Hardware model detected: {SysInfo.sys_vendor()} {SysInfo.product_name()}; UEFI mode: {SysInfo.has_uefi()}')
debug(f'Processor model detected: {SysInfo.cpu_model()}')
debug(f'Memory statistics: {SysInfo.mem_available()} available out of {SysInfo.mem_total()} total installed')
debug(f'Virtualization detected: {SysInfo.virtualization()}; is VM: {SysInfo.is_vm()}')
debug(f'Graphics devices detected: {SysInfo._graphics_devices().keys()}')
# For support reasons, we'll log the disk layout pre installation to match against post-installation layout
debug(f'Disk states before installing:\n{disk_layouts()}')
def _check_online() -> None:
try:
ping('1.1.1.1')
except OSError as ex:
if 'Network is unreachable' in str(ex):
if not arch_config_handler.args.skip_wifi_check:
success = not wifi_handler.setup()
if not success:
exit(0)
def _fetch_arch_db() -> None:
info('Fetching Arch Linux package database...')
try:
Pacman.run('-Sy')
except Exception as e:
error('Failed to sync Arch Linux package database.')
if 'could not resolve host' in str(e).lower():
error('Most likely due to a missing network connection or DNS issue.')
error('Run archinstall --debug and check /var/log/archinstall/install.log for details.')
debug(f'Failed to sync Arch Linux package database: {e}')
exit(1)
def check_version_upgrade() -> str | None:
info('Checking version...')
upgrade = None
upgrade = check_package_upgrade('archinstall')
if upgrade is None:
debug('No archinstall upgrades found')
return None
text = tr('New version available') + f': {upgrade}'
info(text)
return text
def main() -> int:
"""
This can either be run as the compiled and installed application: python setup.py install
OR straight as a module: python -m archinstall
In any case we will be attempting to load the provided script to be run from the scripts/ folder
"""
if '--help' in sys.argv or '-h' in sys.argv:
arch_config_handler.print_help()
return 0
if os.getuid() != 0:
print(tr('Archinstall requires root privileges to run. See --help for more.'))
return 1
_log_sys_info()
ttui.global_header = 'Archinstall'
if not arch_config_handler.args.offline:
_check_online()
_fetch_arch_db()
if not arch_config_handler.args.skip_version_check:
new_version = check_version_upgrade()
if new_version:
ttui.global_header = f'{ttui.global_header} {new_version}'
info(new_version)
time.sleep(3)
script = arch_config_handler.get_script()
mod_name = f'archinstall.scripts.{script}'
# by loading the module we'll automatically run the script
importlib.import_module(mod_name)
return 0
def run_as_a_module() -> None:
rc = 0
exc = None
try:
rc = main()
except Exception as e:
exc = e
finally:
# restore the terminal to the original state
Tui.shutdown()
if exc:
err = ''.join(traceback.format_exception(exc))
error(err)
text = (
'Archinstall experienced the above error. If you think this is a bug, please report it to\n'
'https://github.com/archlinux/archinstall and include the log file "/var/log/archinstall/install.log".\n\n'
"Hint: To extract the log from a live ISO \ncurl -F'file=@/var/log/archinstall/install.log' https://0x0.st\n"
)
warn(text)
rc = 1
exit(rc)
__all__ = [
'FormattedOutput',
'Language',
'Pacman',
'SysInfo',
'Tui',
'arch_config_handler',
'debug',
'disk_layouts',
'error',
'info',
'load_plugin',
'log',
'plugin',
'translation_handler',
'warn',
]

View File

@@ -0,0 +1,4 @@
import archinstall
if __name__ == '__main__':
archinstall.run_as_a_module()

View File

@@ -0,0 +1,80 @@
from typing import TYPE_CHECKING
from archinstall.lib.hardware import SysInfo
from archinstall.lib.models.application import Audio, AudioConfiguration
from archinstall.lib.models.users import User
from archinstall.lib.output import debug
if TYPE_CHECKING:
from archinstall.lib.installer import Installer
class AudioApp:
@property
def pulseaudio_packages(self) -> list[str]:
return [
'pulseaudio',
]
@property
def pipewire_packages(self) -> list[str]:
return [
'pipewire',
'pipewire-alsa',
'pipewire-jack',
'pipewire-pulse',
'gst-plugin-pipewire',
'libpulse',
'wireplumber',
]
def _enable_pipewire(
self,
install_session: 'Installer',
users: list['User'] | None = None,
) -> None:
if users is None:
return
for user in users:
# Create the full path for enabling the pipewire systemd items
service_dir = install_session.target / 'home' / user.username / '.config' / 'systemd' / 'user' / 'default.target.wants'
service_dir.mkdir(parents=True, exist_ok=True)
# Set ownership of the entire user catalogue
install_session.arch_chroot(f'chown -R {user.username}:{user.username} /home/{user.username}')
# symlink in the correct pipewire systemd items
install_session.arch_chroot(
f'ln -sf /usr/lib/systemd/user/pipewire-pulse.service /home/{user.username}/.config/systemd/user/default.target.wants/pipewire-pulse.service',
run_as=user.username,
)
install_session.arch_chroot(
f'ln -sf /usr/lib/systemd/user/pipewire-pulse.socket /home/{user.username}/.config/systemd/user/default.target.wants/pipewire-pulse.socket',
run_as=user.username,
)
def install(
self,
install_session: 'Installer',
audio_config: AudioConfiguration,
users: list[User] | None = None,
) -> None:
debug(f'Installing audio server: {audio_config.audio.value}')
if audio_config.audio == Audio.NO_AUDIO:
debug('No audio server selected, skipping installation.')
return
if SysInfo.requires_sof_fw():
install_session.add_additional_packages('sof-firmware')
if SysInfo.requires_alsa_fw():
install_session.add_additional_packages('alsa-firmware')
match audio_config.audio:
case Audio.PIPEWIRE:
install_session.add_additional_packages(self.pipewire_packages)
self._enable_pipewire(install_session, users)
case Audio.PULSEAUDIO:
install_session.add_additional_packages(self.pulseaudio_packages)

View File

@@ -0,0 +1,26 @@
from typing import TYPE_CHECKING
from archinstall.lib.output import debug
if TYPE_CHECKING:
from archinstall.lib.installer import Installer
class BluetoothApp:
@property
def packages(self) -> list[str]:
return [
'bluez',
'bluez-utils',
]
@property
def services(self) -> list[str]:
return [
'bluetooth.service',
]
def install(self, install_session: 'Installer') -> None:
debug('Installing Bluetooth')
install_session.add_additional_packages(self.packages)
install_session.enable_service(self.services)

View File

@@ -0,0 +1,217 @@
# from typing import List, Dict, Optional, TYPE_CHECKING, Any
#
# from ..lib import menu
# from archinstall.lib.output import log, FormattedOutput
# from archinstall.lib.profile.profiles_handler import profile_handler
# from archinstall.default_profiles.profile import Profile, ProfileType, SelectResult, ProfileInfo, TProfile
#
# if TYPE_CHECKING:
# from archinstall.lib.installer import Installer
# _: Any
#
#
# class CustomProfileList(menu.ListManager):
# def __init__(self, prompt: str, profiles: List[TProfile]):
# self._actions = [
# str(_('Add profile')),
# str(_('Edit profile')),
# str(_('Delete profile'))
# ]
# super().__init__(prompt, profiles, [self._actions[0]], self._actions[1:])
#
# def reformat(self, data: List[TProfile]) -> Dict[str, Optional[TProfile]]:
# table = FormattedOutput.as_table(data)
# rows = table.split('\n')
#
# # these are the header rows of the table and do not map to any profile obviously
# # we're adding 2 spaces as prefix because the menu selector '> ' will be put before
# # the selectable rows so the header has to be aligned
# display_data: Dict[str, Optional[TProfile]] = {f' {rows[0]}': None, f' {rows[1]}': None}
#
# for row, profile in zip(rows[2:], data):
# row = row.replace('|', '\\|')
# display_data[row] = profile
#
# return display_data
#
# def selected_action_display(self, profile: TProfile) -> str:
# return profile.name
#
# def handle_action(
# self,
# action: str,
# entry: Optional['CustomTypeProfile'],
# data: List['CustomTypeProfile']
# ) -> List['CustomTypeProfile']:
# if action == self._actions[0]: # add
# new_profile = self._add_profile()
# if new_profile is not None:
# # in case a profile with the same name as an existing profile
# # was created we'll replace the existing one
# data = [d for d in data if d.name != new_profile.name]
# data += [new_profile]
# elif entry is not None:
# if action == self._actions[1]: # edit
# new_profile = self._add_profile(entry)
# if new_profile is not None:
# # we'll remove the original profile and add the modified version
# data = [d for d in data if d.name != entry.name and d.name != new_profile.name]
# data += [new_profile]
# elif action == self._actions[2]: # delete
# data = [d for d in data if d != entry]
#
# return data
#
# def _is_new_profile_name(self, name: str) -> bool:
# existing_profile = profile_handler.get_profile_by_name(name)
# if existing_profile is not None and existing_profile.profile_type != ProfileType.CustomType:
# return False
# return True
#
# def _add_profile(self, editing: Optional['CustomTypeProfile'] = None) -> Optional['CustomTypeProfile']:
# name_prompt = '\n\n' + str(_('Profile name: '))
#
# while True:
# profile_name = menu.TextInput(name_prompt, editing.name if editing else '').run().strip()
#
# if not profile_name:
# return None
#
# if not self._is_new_profile_name(profile_name):
# error_prompt = str(_("The profile name you entered is already in use. Try again"))
# print(error_prompt)
# else:
# break
#
# packages_prompt = str(_('Packages to be install with this profile (space separated, leave blank to skip): '))
# edit_packages = ' '.join(editing.packages) if editing else ''
# packages = menu.TextInput(packages_prompt, edit_packages).run().strip()
#
# services_prompt = str(_('Services to be enabled with this profile (space separated, leave blank to skip): '))
# edit_services = ' '.join(editing.services) if editing else ''
# services = menu.TextInput(services_prompt, edit_services).run().strip()
#
# choice = menu.Menu(
# str(_('Should this profile be enabled for installation?')),
# menu.Menu.yes_no(),
# skip=False,
# default_option=menu.Menu.no(),
# clear_screen=False,
# show_search_hint=False
# ).run()
#
# enable_profile = True if choice.value == menu.Menu.yes() else False
#
# profile = CustomTypeProfile(
# profile_name,
# enabled=enable_profile,
# packages=packages.split(' '),
# services=services.split(' ')
# )
#
# return profile
#
#
# # TODO
# # Still needs some ironing out
# class CustomProfile():
# def __init__(self):
# super().__init__(
# 'Custom',
# ProfileType.Custom,
# )
#
# def json(self) -> Dict[str, Any]:
# data: Dict[str, Any] = {'main': self.name, 'gfx_driver': self.gfx_driver, 'custom': []}
#
# for profile in self._current_selection:
# data['custom'].append({
# 'name': profile.name,
# 'packages': profile.packages,
# 'services': profile.services,
# 'enabled': profile.custom_enabled
# })
#
# return data
#
# def do_on_select(self) -> SelectResult:
# custom_profile_list = CustomProfileList('', profile_handler.get_custom_profiles())
# custom_profiles = custom_profile_list.run()
#
# # we'll first remove existing custom default_profiles with
# # the same name and then add the new ones this
# # will avoid errors of default_profiles with duplicate naming
# profile_handler.remove_custom_profiles(custom_profiles)
# profile_handler.add_custom_profiles(custom_profiles)
#
# self.set_current_selection(custom_profiles)
#
# if custom_profile_list.is_last_choice_cancel():
# return SelectResult.SameSelection
#
# enabled_profiles = [p for p in self._current_selection if p.custom_enabled]
# # in case we only created inactive default_profiles we wanna store them but
# # we want to reset the original setting
# if not enabled_profiles:
# return SelectResult.ResetCurrent
#
# return SelectResult.NewSelection
#
# def post_install(self, install_session: 'Installer'):
# for profile in self._current_selection:
# profile.post_install(install_session)
#
# def install(self, install_session: 'Installer'):
# driver_packages = self.gfx_driver_packages()
# install_session.add_additional_packages(driver_packages)
#
# for profile in self._current_selection:
# if profile.custom_enabled:
# log(f'Installing custom profile {profile.name}...')
#
# install_session.add_additional_packages(profile.packages)
# install_session.enable_service(profile.services)
#
# profile.install(install_session)
#
# def info(self) -> Optional[ProfileInfo]:
# enabled_profiles = [p for p in self._current_selection if p.custom_enabled]
# if enabled_profiles:
# details = ', '.join([p.name for p in enabled_profiles])
# gfx_driver = self.gfx_driver
# return ProfileInfo(self.name, details, gfx_driver)
#
# return None
#
# def reset(self):
# for profile in self._current_selection:
# profile.set_enabled(False)
#
# self.gfx_driver = None
#
#
# class CustomTypeProfile(Profile):
# def __init__(
# self,
# name: str,
# enabled: bool = False,
# packages: List[str] = [],
# services: List[str] = []
# ):
# super().__init__(
# name,
# ProfileType.CustomType,
# packages=packages,
# services=services,
# support_gfx_driver=True
# )
#
# self.custom_enabled = enabled
#
# def json(self) -> Dict[str, Any]:
# return {
# 'name': self.name,
# 'packages': self.packages,
# 'services': self.services,
# 'enabled': self.custom_enabled
# }

View File

@@ -0,0 +1,108 @@
from typing import TYPE_CHECKING, override
from archinstall.default_profiles.profile import GreeterType, Profile, ProfileType, SelectResult
from archinstall.lib.output import info
from archinstall.lib.profile.profiles_handler import profile_handler
from archinstall.tui.curses_menu import SelectMenu
from archinstall.tui.menu_item import MenuItem, MenuItemGroup
from archinstall.tui.result import ResultType
from archinstall.tui.types import FrameProperties, PreviewStyle
if TYPE_CHECKING:
from archinstall.lib.installer import Installer
class DesktopProfile(Profile):
def __init__(self, current_selection: list[Profile] = []) -> None:
super().__init__(
'Desktop',
ProfileType.Desktop,
current_selection=current_selection,
support_greeter=True,
)
@property
@override
def packages(self) -> list[str]:
return [
'nano',
'vim',
'openssh',
'htop',
'wget',
'iwd',
'wireless_tools',
'wpa_supplicant',
'smartmontools',
'xdg-utils',
]
@property
@override
def default_greeter_type(self) -> GreeterType | None:
combined_greeters: dict[GreeterType, int] = {}
for profile in self.current_selection:
if profile.default_greeter_type:
combined_greeters.setdefault(profile.default_greeter_type, 0)
combined_greeters[profile.default_greeter_type] += 1
if len(combined_greeters) >= 1:
return list(combined_greeters)[0]
return None
def _do_on_select_profiles(self) -> None:
for profile in self.current_selection:
profile.do_on_select()
@override
def do_on_select(self) -> SelectResult:
items = [
MenuItem(
p.name,
value=p,
preview_action=lambda x: x.value.preview_text(),
)
for p in profile_handler.get_desktop_profiles()
]
group = MenuItemGroup(items, sort_items=True, sort_case_sensitive=False)
group.set_selected_by_value(self.current_selection)
result = SelectMenu[Profile](
group,
multi=True,
allow_reset=True,
allow_skip=True,
preview_style=PreviewStyle.RIGHT,
preview_size='auto',
preview_frame=FrameProperties.max('Info'),
).run()
match result.type_:
case ResultType.Selection:
self.current_selection = result.get_values()
self._do_on_select_profiles()
return SelectResult.NewSelection
case ResultType.Skip:
return SelectResult.SameSelection
case ResultType.Reset:
return SelectResult.ResetCurrent
@override
def post_install(self, install_session: 'Installer') -> None:
for profile in self.current_selection:
profile.post_install(install_session)
@override
def install(self, install_session: 'Installer') -> None:
# Install common packages for all desktop environments
install_session.add_additional_packages(self.packages)
for profile in self.current_selection:
info(f'Installing profile {profile.name}...')
install_session.add_additional_packages(profile.packages)
install_session.enable_service(profile.services)
profile.install(install_session)

View File

@@ -0,0 +1,6 @@
from enum import Enum
class SeatAccess(Enum):
seatd = 'seatd'
polkit = 'polkit'

View File

@@ -0,0 +1,64 @@
from typing import TYPE_CHECKING, override
from archinstall.default_profiles.profile import ProfileType
from archinstall.default_profiles.xorg import XorgProfile
if TYPE_CHECKING:
from archinstall.lib.installer import Installer
class AwesomeProfile(XorgProfile):
def __init__(self) -> None:
super().__init__('Awesome', ProfileType.WindowMgr)
@property
@override
def packages(self) -> list[str]:
return super().packages + [
'awesome',
'alacritty',
'xorg-xinit',
'xorg-xrandr',
'xterm',
'feh',
'slock',
'terminus-font',
'gnu-free-fonts',
'ttf-liberation',
'xsel',
]
@override
def install(self, install_session: 'Installer') -> None:
super().install(install_session)
# TODO: Copy a full configuration to ~/.config/awesome/rc.lua instead.
with open(f'{install_session.target}/etc/xdg/awesome/rc.lua') as fh:
awesome_lua = fh.read()
# Replace xterm with alacritty for a smoother experience.
awesome_lua = awesome_lua.replace('"xterm"', '"alacritty"')
with open(f'{install_session.target}/etc/xdg/awesome/rc.lua', 'w') as fh:
fh.write(awesome_lua)
# TODO: Configure the right-click-menu to contain the above packages that were installed. (as a user config)
# TODO: check if we selected a greeter,
# but for now, awesome is intended to run without one.
with open(f'{install_session.target}/etc/X11/xinit/xinitrc') as xinitrc:
xinitrc_data = xinitrc.read()
for line in xinitrc_data.split('\n'):
if 'twm &' in line:
xinitrc_data = xinitrc_data.replace(line, f'# {line}')
if 'xclock' in line:
xinitrc_data = xinitrc_data.replace(line, f'# {line}')
if 'xterm' in line:
xinitrc_data = xinitrc_data.replace(line, f'# {line}')
xinitrc_data += '\n'
xinitrc_data += 'exec awesome\n'
with open(f'{install_session.target}/etc/X11/xinit/xinitrc', 'w') as xinitrc:
xinitrc.write(xinitrc_data)

View File

@@ -0,0 +1,26 @@
from typing import override
from archinstall.default_profiles.profile import GreeterType, ProfileType
from archinstall.default_profiles.xorg import XorgProfile
class BspwmProfile(XorgProfile):
def __init__(self) -> None:
super().__init__('Bspwm', ProfileType.WindowMgr)
@property
@override
def packages(self) -> list[str]:
# return super().packages + [
return [
'bspwm',
'sxhkd',
'dmenu',
'xdo',
'rxvt-unicode',
]
@property
@override
def default_greeter_type(self) -> GreeterType:
return GreeterType.Lightdm

View File

@@ -0,0 +1,25 @@
from typing import override
from archinstall.default_profiles.profile import GreeterType, ProfileType
from archinstall.default_profiles.xorg import XorgProfile
class BudgieProfile(XorgProfile):
def __init__(self) -> None:
super().__init__('Budgie', ProfileType.DesktopEnv)
@property
@override
def packages(self) -> list[str]:
return [
'materia-gtk-theme',
'budgie',
'mate-terminal',
'nemo',
'papirus-icon-theme',
]
@property
@override
def default_greeter_type(self) -> GreeterType:
return GreeterType.LightdmSlick

View File

@@ -0,0 +1,29 @@
from typing import override
from archinstall.default_profiles.profile import GreeterType, ProfileType
from archinstall.default_profiles.xorg import XorgProfile
class CinnamonProfile(XorgProfile):
def __init__(self) -> None:
super().__init__('Cinnamon', ProfileType.DesktopEnv)
@property
@override
def packages(self) -> list[str]:
return [
'cinnamon',
'system-config-printer',
'gnome-keyring',
'gnome-terminal',
'engrampa',
'gnome-screenshot',
'gvfs-smb',
'xed',
'xdg-user-dirs-gtk',
]
@property
@override
def default_greeter_type(self) -> GreeterType:
return GreeterType.Lightdm

View File

@@ -0,0 +1,22 @@
from typing import override
from archinstall.default_profiles.profile import GreeterType, ProfileType
from archinstall.default_profiles.xorg import XorgProfile
class CosmicProfile(XorgProfile):
def __init__(self) -> None:
super().__init__('cosmic-epoch', ProfileType.DesktopEnv, advanced=True)
@property
@override
def packages(self) -> list[str]:
return [
'cosmic',
'xdg-user-dirs',
]
@property
@override
def default_greeter_type(self) -> GreeterType:
return GreeterType.CosmicSession

View File

@@ -0,0 +1,22 @@
from typing import override
from archinstall.default_profiles.profile import GreeterType, ProfileType
from archinstall.default_profiles.xorg import XorgProfile
class CutefishProfile(XorgProfile):
def __init__(self) -> None:
super().__init__('Cutefish', ProfileType.DesktopEnv)
@property
@override
def packages(self) -> list[str]:
return [
'cutefish',
'noto-fonts',
]
@property
@override
def default_greeter_type(self) -> GreeterType:
return GreeterType.Sddm

View File

@@ -0,0 +1,23 @@
from typing import override
from archinstall.default_profiles.profile import GreeterType, ProfileType
from archinstall.default_profiles.xorg import XorgProfile
class DeepinProfile(XorgProfile):
def __init__(self) -> None:
super().__init__('Deepin', ProfileType.DesktopEnv)
@property
@override
def packages(self) -> list[str]:
return [
'deepin',
'deepin-terminal',
'deepin-editor',
]
@property
@override
def default_greeter_type(self) -> GreeterType:
return GreeterType.Lightdm

View File

@@ -0,0 +1,22 @@
from typing import override
from archinstall.default_profiles.profile import GreeterType, ProfileType
from archinstall.default_profiles.xorg import XorgProfile
class EnlighenmentProfile(XorgProfile):
def __init__(self) -> None:
super().__init__('Enlightenment', ProfileType.WindowMgr)
@property
@override
def packages(self) -> list[str]:
return [
'enlightenment',
'terminology',
]
@property
@override
def default_greeter_type(self) -> GreeterType:
return GreeterType.Lightdm

View File

@@ -0,0 +1,22 @@
from typing import override
from archinstall.default_profiles.profile import GreeterType, ProfileType
from archinstall.default_profiles.xorg import XorgProfile
class GnomeProfile(XorgProfile):
def __init__(self) -> None:
super().__init__('GNOME', ProfileType.DesktopEnv)
@property
@override
def packages(self) -> list[str]:
return [
'gnome',
'gnome-tweaks',
]
@property
@override
def default_greeter_type(self) -> GreeterType:
return GreeterType.Gdm

View File

@@ -0,0 +1,74 @@
from typing import override
from archinstall.default_profiles.desktops import SeatAccess
from archinstall.default_profiles.profile import GreeterType, ProfileType
from archinstall.default_profiles.xorg import XorgProfile
from archinstall.lib.translationhandler import tr
from archinstall.tui.curses_menu import SelectMenu
from archinstall.tui.menu_item import MenuItem, MenuItemGroup
from archinstall.tui.result import ResultType
from archinstall.tui.types import Alignment, FrameProperties
class HyprlandProfile(XorgProfile):
def __init__(self) -> None:
super().__init__('Hyprland', ProfileType.DesktopEnv)
self.custom_settings = {'seat_access': None}
@property
@override
def packages(self) -> list[str]:
return [
'hyprland',
'dunst',
'kitty',
'uwsm',
'dolphin',
'wofi',
'xdg-desktop-portal-hyprland',
'qt5-wayland',
'qt6-wayland',
'polkit-kde-agent',
'grim',
'slurp',
]
@property
@override
def default_greeter_type(self) -> GreeterType:
return GreeterType.Sddm
@property
@override
def services(self) -> list[str]:
if pref := self.custom_settings.get('seat_access', None):
return [pref]
return []
def _ask_seat_access(self) -> None:
# need to activate seat service and add to seat group
header = tr('Hyprland needs access to your seat (collection of hardware devices i.e. keyboard, mouse, etc)')
header += '\n' + tr('Choose an option to give Hyprland access to your hardware') + '\n'
items = [MenuItem(s.value, value=s) for s in SeatAccess]
group = MenuItemGroup(items, sort_items=True)
default = self.custom_settings.get('seat_access', None)
group.set_default_by_value(default)
result = SelectMenu[SeatAccess](
group,
header=header,
allow_skip=False,
frame=FrameProperties.min(tr('Seat access')),
alignment=Alignment.CENTER,
).run()
if result.type_ == ResultType.Selection:
self.custom_settings['seat_access'] = result.get_value().value
@override
def do_on_select(self) -> None:
self._ask_seat_access()
return None

View File

@@ -0,0 +1,29 @@
from typing import override
from archinstall.default_profiles.profile import GreeterType, ProfileType
from archinstall.default_profiles.xorg import XorgProfile
class I3wmProfile(XorgProfile):
def __init__(self) -> None:
super().__init__('i3-wm', ProfileType.WindowMgr)
@property
@override
def packages(self) -> list[str]:
return [
'i3-wm',
'i3lock',
'i3status',
'i3blocks',
'xss-lock',
'xterm',
'lightdm-gtk-greeter',
'lightdm',
'dmenu',
]
@property
@override
def default_greeter_type(self) -> GreeterType:
return GreeterType.Lightdm

View File

@@ -0,0 +1,71 @@
from typing import override
from archinstall.default_profiles.desktops import SeatAccess
from archinstall.default_profiles.profile import GreeterType, ProfileType
from archinstall.default_profiles.xorg import XorgProfile
from archinstall.lib.translationhandler import tr
from archinstall.tui.curses_menu import SelectMenu
from archinstall.tui.menu_item import MenuItem, MenuItemGroup
from archinstall.tui.result import ResultType
from archinstall.tui.types import Alignment, FrameProperties
class LabwcProfile(XorgProfile):
def __init__(self) -> None:
super().__init__(
'Labwc',
ProfileType.WindowMgr,
)
self.custom_settings = {'seat_access': None}
@property
@override
def packages(self) -> list[str]:
additional = []
if seat := self.custom_settings.get('seat_access', None):
additional = [seat]
return [
'alacritty',
'labwc',
] + additional
@property
@override
def default_greeter_type(self) -> GreeterType:
return GreeterType.Lightdm
@property
@override
def services(self) -> list[str]:
if pref := self.custom_settings.get('seat_access', None):
return [pref]
return []
def _ask_seat_access(self) -> None:
# need to activate seat service and add to seat group
header = tr('labwc needs access to your seat (collection of hardware devices i.e. keyboard, mouse, etc)')
header += '\n' + tr('Choose an option to give labwc access to your hardware') + '\n'
items = [MenuItem(s.value, value=s) for s in SeatAccess]
group = MenuItemGroup(items, sort_items=True)
default = self.custom_settings.get('seat_access', None)
group.set_default_by_value(default)
result = SelectMenu[SeatAccess](
group,
header=header,
allow_skip=False,
frame=FrameProperties.min(tr('Seat access')),
alignment=Alignment.CENTER,
).run()
if result.type_ == ResultType.Selection:
self.custom_settings['seat_access'] = result.get_value().value
@override
def do_on_select(self) -> None:
self._ask_seat_access()
return None

View File

@@ -0,0 +1,30 @@
from typing import override
from archinstall.default_profiles.profile import GreeterType, ProfileType
from archinstall.default_profiles.xorg import XorgProfile
class LxqtProfile(XorgProfile):
def __init__(self) -> None:
super().__init__('Lxqt', ProfileType.DesktopEnv)
# NOTE: SDDM is the only officially supported greeter for LXQt, so unlike other DEs, lightdm is not used here.
# LXQt works with lightdm, but since this is not supported, we will not default to this.
# https://github.com/lxqt/lxqt/issues/795
@property
@override
def packages(self) -> list[str]:
return [
'lxqt',
'breeze-icons',
'oxygen-icons',
'xdg-utils',
'ttf-freefont',
'l3afpad',
'slock',
]
@property
@override
def default_greeter_type(self) -> GreeterType:
return GreeterType.Sddm

View File

@@ -0,0 +1,22 @@
from typing import override
from archinstall.default_profiles.profile import GreeterType, ProfileType
from archinstall.default_profiles.xorg import XorgProfile
class MateProfile(XorgProfile):
def __init__(self) -> None:
super().__init__('Mate', ProfileType.DesktopEnv)
@property
@override
def packages(self) -> list[str]:
return [
'mate',
'mate-extra',
]
@property
@override
def default_greeter_type(self) -> GreeterType:
return GreeterType.Lightdm

View File

@@ -0,0 +1,79 @@
from typing import override
from archinstall.default_profiles.desktops import SeatAccess
from archinstall.default_profiles.profile import GreeterType, ProfileType
from archinstall.default_profiles.xorg import XorgProfile
from archinstall.lib.translationhandler import tr
from archinstall.tui.curses_menu import SelectMenu
from archinstall.tui.menu_item import MenuItem, MenuItemGroup
from archinstall.tui.result import ResultType
from archinstall.tui.types import Alignment, FrameProperties
class NiriProfile(XorgProfile):
def __init__(self) -> None:
super().__init__(
'Niri',
ProfileType.WindowMgr,
)
self.custom_settings = {'seat_access': None}
@property
@override
def packages(self) -> list[str]:
additional = []
if seat := self.custom_settings.get('seat_access', None):
additional = [seat]
return [
'niri',
'alacritty',
'fuzzel',
'mako',
'xorg-xwayland',
'waybar',
'swaybg',
'swayidle',
'swaylock',
'xdg-desktop-portal-gnome',
] + additional
@property
@override
def default_greeter_type(self) -> GreeterType:
return GreeterType.Lightdm
@property
@override
def services(self) -> list[str]:
if pref := self.custom_settings.get('seat_access', None):
return [pref]
return []
def _ask_seat_access(self) -> None:
# need to activate seat service and add to seat group
header = tr('niri needs access to your seat (collection of hardware devices i.e. keyboard, mouse, etc)')
header += '\n' + tr('Choose an option to give niri access to your hardware') + '\n'
items = [MenuItem(s.value, value=s) for s in SeatAccess]
group = MenuItemGroup(items, sort_items=True)
default = self.custom_settings.get('seat_access', None)
group.set_default_by_value(default)
result = SelectMenu[SeatAccess](
group,
header=header,
allow_skip=False,
frame=FrameProperties.min(tr('Seat access')),
alignment=Alignment.CENTER,
).run()
if result.type_ == ResultType.Selection:
self.custom_settings['seat_access'] = result.get_value().value
@override
def do_on_select(self) -> None:
self._ask_seat_access()
return None

View File

@@ -0,0 +1,26 @@
from typing import override
from archinstall.default_profiles.profile import GreeterType, ProfileType
from archinstall.default_profiles.xorg import XorgProfile
class PlasmaProfile(XorgProfile):
def __init__(self) -> None:
super().__init__('KDE Plasma', ProfileType.DesktopEnv)
@property
@override
def packages(self) -> list[str]:
return [
'plasma-meta',
'konsole',
'kate',
'dolphin',
'ark',
'plasma-workspace',
]
@property
@override
def default_greeter_type(self) -> GreeterType:
return GreeterType.Sddm

View File

@@ -0,0 +1,22 @@
from typing import override
from archinstall.default_profiles.profile import GreeterType, ProfileType
from archinstall.default_profiles.xorg import XorgProfile
class QtileProfile(XorgProfile):
def __init__(self) -> None:
super().__init__('Qtile', ProfileType.WindowMgr)
@property
@override
def packages(self) -> list[str]:
return [
'qtile',
'alacritty',
]
@property
@override
def default_greeter_type(self) -> GreeterType:
return GreeterType.Lightdm

View File

@@ -0,0 +1,23 @@
from typing import override
from archinstall.default_profiles.profile import GreeterType, ProfileType
from archinstall.default_profiles.xorg import XorgProfile
class RiverProfile(XorgProfile):
def __init__(self) -> None:
super().__init__('River', ProfileType.WindowMgr)
@property
@override
def packages(self) -> list[str]:
return [
'foot',
'xdg-desktop-portal-wlr',
'river',
]
@property
@override
def default_greeter_type(self) -> GreeterType:
return GreeterType.Lightdm

View File

@@ -0,0 +1,81 @@
from typing import override
from archinstall.default_profiles.desktops import SeatAccess
from archinstall.default_profiles.profile import GreeterType, ProfileType
from archinstall.default_profiles.xorg import XorgProfile
from archinstall.lib.translationhandler import tr
from archinstall.tui.curses_menu import SelectMenu
from archinstall.tui.menu_item import MenuItem, MenuItemGroup
from archinstall.tui.result import ResultType
from archinstall.tui.types import Alignment, FrameProperties
class SwayProfile(XorgProfile):
def __init__(self) -> None:
super().__init__(
'Sway',
ProfileType.WindowMgr,
)
self.custom_settings = {'seat_access': None}
@property
@override
def packages(self) -> list[str]:
additional = []
if seat := self.custom_settings.get('seat_access', None):
additional = [seat]
return [
'sway',
'swaybg',
'swaylock',
'swayidle',
'waybar',
'wmenu',
'brightnessctl',
'grim',
'slurp',
'pavucontrol',
'foot',
'xorg-xwayland',
] + additional
@property
@override
def default_greeter_type(self) -> GreeterType:
return GreeterType.Lightdm
@property
@override
def services(self) -> list[str]:
if pref := self.custom_settings.get('seat_access', None):
return [pref]
return []
def _ask_seat_access(self) -> None:
# need to activate seat service and add to seat group
header = tr('Sway needs access to your seat (collection of hardware devices i.e. keyboard, mouse, etc)')
header += '\n' + tr('Choose an option to give Sway access to your hardware') + '\n'
items = [MenuItem(s.value, value=s) for s in SeatAccess]
group = MenuItemGroup(items, sort_items=True)
default = self.custom_settings.get('seat_access', None)
group.set_default_by_value(default)
result = SelectMenu[SeatAccess](
group,
header=header,
allow_skip=False,
frame=FrameProperties.min(tr('Seat access')),
alignment=Alignment.CENTER,
).run()
if result.type_ == ResultType.Selection:
self.custom_settings['seat_access'] = result.get_value().value
@override
def do_on_select(self) -> None:
self._ask_seat_access()
return None

View File

@@ -0,0 +1,25 @@
from typing import override
from archinstall.default_profiles.profile import GreeterType, ProfileType
from archinstall.default_profiles.xorg import XorgProfile
class Xfce4Profile(XorgProfile):
def __init__(self) -> None:
super().__init__('Xfce4', ProfileType.DesktopEnv)
@property
@override
def packages(self) -> list[str]:
return [
'xfce4',
'xfce4-goodies',
'pavucontrol',
'gvfs',
'xarchiver',
]
@property
@override
def default_greeter_type(self) -> GreeterType:
return GreeterType.Lightdm

View File

@@ -0,0 +1,25 @@
from typing import override
from archinstall.default_profiles.profile import GreeterType, ProfileType
from archinstall.default_profiles.xorg import XorgProfile
class XmonadProfile(XorgProfile):
def __init__(self) -> None:
super().__init__('Xmonad', ProfileType.WindowMgr)
@property
@override
def packages(self) -> list[str]:
return [
'xmonad',
'xmonad-contrib',
'xmonad-extras',
'xterm',
'dmenu',
]
@property
@override
def default_greeter_type(self) -> GreeterType:
return GreeterType.Lightdm

View File

@@ -0,0 +1,9 @@
from archinstall.default_profiles.profile import Profile, ProfileType
class MinimalProfile(Profile):
def __init__(self) -> None:
super().__init__(
'Minimal',
ProfileType.Minimal,
)

View File

@@ -0,0 +1,204 @@
from __future__ import annotations
import sys
from enum import Enum, auto
from typing import TYPE_CHECKING
from archinstall.lib.translationhandler import tr
if TYPE_CHECKING:
from ..lib.installer import Installer
class ProfileType(Enum):
# top level default_profiles
Server = 'Server'
Desktop = 'Desktop'
Xorg = 'Xorg'
Minimal = 'Minimal'
Custom = 'Custom'
# detailed selection default_profiles
ServerType = 'ServerType'
WindowMgr = 'Window Manager'
DesktopEnv = 'Desktop Environment'
CustomType = 'CustomType'
# special things
Tailored = 'Tailored'
Application = 'Application'
class GreeterType(Enum):
Lightdm = 'lightdm-gtk-greeter'
LightdmSlick = 'lightdm-slick-greeter'
Sddm = 'sddm'
Gdm = 'gdm'
Ly = 'ly'
# .. todo:: Remove when we un-hide cosmic behind --advanced
if '--advanced' in sys.argv:
CosmicSession = 'cosmic-greeter'
class SelectResult(Enum):
NewSelection = auto()
SameSelection = auto()
ResetCurrent = auto()
class Profile:
def __init__(
self,
name: str,
profile_type: ProfileType,
current_selection: list[Profile] = [],
packages: list[str] = [],
services: list[str] = [],
support_gfx_driver: bool = False,
support_greeter: bool = False,
advanced: bool = False,
) -> None:
self.name = name
self.profile_type = profile_type
self.custom_settings: dict[str, str | None] = {}
self.advanced = advanced
self._support_gfx_driver = support_gfx_driver
self._support_greeter = support_greeter
# self.gfx_driver: str | None = None
self.current_selection = current_selection
self._packages = packages
self._services = services
# Only used for custom default_profiles
self.custom_enabled = False
@property
def packages(self) -> list[str]:
"""
Returns a list of packages that should be installed when
this profile is among the chosen ones
"""
return self._packages
@property
def services(self) -> list[str]:
"""
Returns a list of services that should be enabled when
this profile is among the chosen ones
"""
return self._services
@property
def default_greeter_type(self) -> GreeterType | None:
"""
Setting a default greeter type for a desktop profile
"""
return None
def _advanced_check(self) -> bool:
"""
Used to control if the Profile() should be visible or not in different contexts.
Returns True if --advanced is given on a Profile(advanced=True) instance.
"""
from archinstall.lib.args import arch_config_handler
return self.advanced is False or arch_config_handler.args.advanced is True
def install(self, install_session: 'Installer') -> None:
"""
Performs installation steps when this profile was selected
"""
def post_install(self, install_session: 'Installer') -> None:
"""
Hook that will be called when the installation process is
finished and custom installation steps for specific default_profiles
are needed
"""
def json(self) -> dict[str, str]:
"""
Returns a json representation of the profile
"""
return {}
def do_on_select(self) -> SelectResult | None:
"""
Hook that will be called when a profile is selected
"""
return SelectResult.NewSelection
def set_custom_settings(self, settings: dict[str, str | None]) -> None:
"""
Set the custom settings for the profile.
This is also called when the settings are parsed from the config
and can be overridden to perform further actions based on the profile
"""
self.custom_settings = settings
def current_selection_names(self) -> list[str]:
if self.current_selection:
return [s.name for s in self.current_selection]
return []
def reset(self) -> None:
self.current_selection = []
def is_top_level_profile(self) -> bool:
top_levels = [ProfileType.Desktop, ProfileType.Server, ProfileType.Xorg, ProfileType.Minimal, ProfileType.Custom]
return self.profile_type in top_levels
def is_desktop_profile(self) -> bool:
return self.profile_type == ProfileType.Desktop if self._advanced_check() else False
def is_server_type_profile(self) -> bool:
return self.profile_type == ProfileType.ServerType
def is_desktop_type_profile(self) -> bool:
return (self.profile_type == ProfileType.DesktopEnv or self.profile_type == ProfileType.WindowMgr) if self._advanced_check() else False
def is_xorg_type_profile(self) -> bool:
return self.profile_type == ProfileType.Xorg if self._advanced_check() else False
def is_tailored(self) -> bool:
return self.profile_type == ProfileType.Tailored
def is_custom_type_profile(self) -> bool:
return self.profile_type == ProfileType.CustomType
def is_graphic_driver_supported(self) -> bool:
if not self.current_selection:
return self._support_gfx_driver
else:
if any([p._support_gfx_driver for p in self.current_selection]):
return True
return False
def is_greeter_supported(self) -> bool:
return self._support_greeter
def preview_text(self) -> str:
"""
Override this method to provide a preview text for the profile
"""
return self.packages_text()
def packages_text(self, include_sub_packages: bool = False) -> str:
packages = set()
if self.packages:
packages = set(self.packages)
if include_sub_packages:
for sub_profile in self.current_selection:
if sub_profile.packages:
packages.update(sub_profile.packages)
text = tr('Installed packages') + ':\n'
for pkg in sorted(packages):
text += f'\t- {pkg}\n'
return text

View File

@@ -0,0 +1,74 @@
from typing import TYPE_CHECKING, override
from archinstall.default_profiles.profile import Profile, ProfileType, SelectResult
from archinstall.lib.output import info
from archinstall.lib.profile.profiles_handler import profile_handler
from archinstall.tui.curses_menu import SelectMenu
from archinstall.tui.menu_item import MenuItem, MenuItemGroup
from archinstall.tui.result import ResultType
from archinstall.tui.types import FrameProperties, PreviewStyle
if TYPE_CHECKING:
from archinstall.lib.installer import Installer
class ServerProfile(Profile):
def __init__(self, current_value: list[Profile] = []):
super().__init__(
'Server',
ProfileType.Server,
current_selection=current_value,
)
@override
def do_on_select(self) -> SelectResult:
items = [
MenuItem(
p.name,
value=p,
preview_action=lambda x: x.value.preview_text(),
)
for p in profile_handler.get_server_profiles()
]
group = MenuItemGroup(items, sort_items=True)
group.set_selected_by_value(self.current_selection)
result = SelectMenu[Profile](
group,
allow_reset=True,
allow_skip=True,
preview_style=PreviewStyle.RIGHT,
preview_size='auto',
preview_frame=FrameProperties.max('Info'),
multi=True,
).run()
match result.type_:
case ResultType.Selection:
selections = result.get_values()
self.current_selection = selections
return SelectResult.NewSelection
case ResultType.Skip:
return SelectResult.SameSelection
case ResultType.Reset:
return SelectResult.ResetCurrent
@override
def post_install(self, install_session: 'Installer') -> None:
for profile in self.current_selection:
profile.post_install(install_session)
@override
def install(self, install_session: 'Installer') -> None:
server_info = self.current_selection_names()
details = ', '.join(server_info)
info(f'Now installing the selected servers: {details}')
for server in self.current_selection:
info(f'Installing {server.name}...')
install_session.add_additional_packages(server.packages)
install_session.enable_service(server.services)
server.install(install_session)
info('If your selections included multiple servers with the same port, you may have to reconfigure them.')

View File

@@ -0,0 +1,21 @@
from typing import override
from archinstall.default_profiles.profile import Profile, ProfileType
class CockpitProfile(Profile):
def __init__(self) -> None:
super().__init__(
'Cockpit',
ProfileType.ServerType,
)
@property
@override
def packages(self) -> list[str]:
return ['cockpit', 'udisks2', 'packagekit']
@property
@override
def services(self) -> list[str]:
return ['cockpit.socket']

View File

@@ -0,0 +1,32 @@
from typing import TYPE_CHECKING, override
from archinstall.default_profiles.profile import Profile, ProfileType
if TYPE_CHECKING:
from archinstall.lib.installer import Installer
class DockerProfile(Profile):
def __init__(self) -> None:
super().__init__(
'Docker',
ProfileType.ServerType,
)
@property
@override
def packages(self) -> list[str]:
return ['docker']
@property
@override
def services(self) -> list[str]:
return ['docker']
@override
def post_install(self, install_session: 'Installer') -> None:
from archinstall.lib.args import arch_config_handler
if auth_config := arch_config_handler.config.auth_config:
for user in auth_config.users:
install_session.arch_chroot(f'usermod -a -G docker {user.username}')

View File

@@ -0,0 +1,21 @@
from typing import override
from archinstall.default_profiles.profile import Profile, ProfileType
class HttpdProfile(Profile):
def __init__(self) -> None:
super().__init__(
'httpd',
ProfileType.ServerType,
)
@property
@override
def packages(self) -> list[str]:
return ['apache']
@property
@override
def services(self) -> list[str]:
return ['httpd']

View File

@@ -0,0 +1,21 @@
from typing import override
from archinstall.default_profiles.profile import Profile, ProfileType
class LighttpdProfile(Profile):
def __init__(self) -> None:
super().__init__(
'Lighttpd',
ProfileType.ServerType,
)
@property
@override
def packages(self) -> list[str]:
return ['lighttpd']
@property
@override
def services(self) -> list[str]:
return ['lighttpd']

View File

@@ -0,0 +1,28 @@
from typing import TYPE_CHECKING, override
from archinstall.default_profiles.profile import Profile, ProfileType
if TYPE_CHECKING:
from archinstall.lib.installer import Installer
class MariadbProfile(Profile):
def __init__(self) -> None:
super().__init__(
'Mariadb',
ProfileType.ServerType,
)
@property
@override
def packages(self) -> list[str]:
return ['mariadb']
@property
@override
def services(self) -> list[str]:
return ['mariadb']
@override
def post_install(self, install_session: 'Installer') -> None:
install_session.arch_chroot('mariadb-install-db --user=mysql --basedir=/usr --datadir=/var/lib/mysql')

View File

@@ -0,0 +1,21 @@
from typing import override
from archinstall.default_profiles.profile import Profile, ProfileType
class NginxProfile(Profile):
def __init__(self) -> None:
super().__init__(
'Nginx',
ProfileType.ServerType,
)
@property
@override
def packages(self) -> list[str]:
return ['nginx']
@property
@override
def services(self) -> list[str]:
return ['nginx']

View File

@@ -0,0 +1,28 @@
from typing import TYPE_CHECKING, override
from archinstall.default_profiles.profile import Profile, ProfileType
if TYPE_CHECKING:
from archinstall.lib.installer import Installer
class PostgresqlProfile(Profile):
def __init__(self) -> None:
super().__init__(
'Postgresql',
ProfileType.ServerType,
)
@property
@override
def packages(self) -> list[str]:
return ['postgresql']
@property
@override
def services(self) -> list[str]:
return ['postgresql']
@override
def post_install(self, install_session: 'Installer') -> None:
install_session.arch_chroot('initdb -D /var/lib/postgres/data', run_as='postgres')

View File

@@ -0,0 +1,21 @@
from typing import override
from archinstall.default_profiles.profile import Profile, ProfileType
class SshdProfile(Profile):
def __init__(self) -> None:
super().__init__(
'sshd',
ProfileType.ServerType,
)
@property
@override
def packages(self) -> list[str]:
return ['openssh']
@property
@override
def services(self) -> list[str]:
return ['sshd']

View File

@@ -0,0 +1,21 @@
from typing import override
from archinstall.default_profiles.profile import Profile, ProfileType
class TomcatProfile(Profile):
def __init__(self) -> None:
super().__init__(
'Tomcat',
ProfileType.ServerType,
)
@property
@override
def packages(self) -> list[str]:
return ['tomcat10']
@property
@override
def services(self) -> list[str]:
return ['tomcat10']

View File

@@ -0,0 +1,22 @@
from typing import TYPE_CHECKING, override
from archinstall.default_profiles.profile import ProfileType
from archinstall.default_profiles.xorg import XorgProfile
if TYPE_CHECKING:
from archinstall.lib.installer import Installer
class TailoredProfile(XorgProfile):
def __init__(self) -> None:
super().__init__('52-54-00-12-34-56', ProfileType.Tailored)
@property
@override
def packages(self) -> list[str]:
return ['nano', 'wget', 'git']
@override
def install(self, install_session: 'Installer') -> None:
super().install(install_session)
# do whatever you like here :)

View File

@@ -0,0 +1,34 @@
from typing import override
from archinstall.default_profiles.profile import Profile, ProfileType
from archinstall.lib.translationhandler import tr
class XorgProfile(Profile):
def __init__(
self,
name: str = 'Xorg',
profile_type: ProfileType = ProfileType.Xorg,
advanced: bool = False,
):
super().__init__(
name,
profile_type,
support_gfx_driver=True,
advanced=advanced,
)
@override
def preview_text(self) -> str:
text = tr('Environment type: {}').format(self.profile_type.value)
if packages := self.packages_text():
text += f'\n{packages}'
return text
@property
@override
def packages(self) -> list[str]:
return [
'xorg-server',
]

View File

@@ -0,0 +1 @@
../examples/

View File

@@ -0,0 +1,29 @@
from typing import TYPE_CHECKING
from archinstall.applications.audio import AudioApp
from archinstall.applications.bluetooth import BluetoothApp
from archinstall.lib.models import Audio
from archinstall.lib.models.application import ApplicationConfiguration
from archinstall.lib.models.users import User
if TYPE_CHECKING:
from archinstall.lib.installer import Installer
class ApplicationHandler:
def __init__(self) -> None:
pass
def install_applications(self, install_session: 'Installer', app_config: ApplicationConfiguration, users: list['User'] | None = None) -> None:
if app_config.bluetooth_config and app_config.bluetooth_config.enabled:
BluetoothApp().install(install_session)
if app_config.audio_config and app_config.audio_config.audio != Audio.NO_AUDIO:
AudioApp().install(
install_session,
app_config.audio_config,
users,
)
application_handler = ApplicationHandler()

View File

@@ -0,0 +1,117 @@
from typing import override
from archinstall.lib.menu.abstract_menu import AbstractSubMenu
from archinstall.lib.models.application import ApplicationConfiguration, Audio, AudioConfiguration, BluetoothConfiguration
from archinstall.lib.translationhandler import tr
from archinstall.tui.curses_menu import SelectMenu
from archinstall.tui.menu_item import MenuItem, MenuItemGroup
from archinstall.tui.result import ResultType
from archinstall.tui.types import Alignment, FrameProperties, Orientation
class ApplicationMenu(AbstractSubMenu[ApplicationConfiguration]):
def __init__(
self,
preset: ApplicationConfiguration | None = None,
):
if preset:
self._app_config = preset
else:
self._app_config = ApplicationConfiguration()
menu_optioons = self._define_menu_options()
self._item_group = MenuItemGroup(menu_optioons, checkmarks=True)
super().__init__(
self._item_group,
config=self._app_config,
allow_reset=True,
)
@override
def run(self, additional_title: str | None = None) -> ApplicationConfiguration:
super().run(additional_title=additional_title)
return self._app_config
def _define_menu_options(self) -> list[MenuItem]:
return [
MenuItem(
text=tr('Bluetooth'),
action=select_bluetooth,
value=self._app_config.bluetooth_config,
preview_action=self._prev_bluetooth,
key='bluetooth_config',
),
MenuItem(
text=tr('Audio'),
action=select_audio,
preview_action=self._prev_audio,
key='audio_config',
),
]
def _prev_bluetooth(self, item: MenuItem) -> str | None:
if item.value is not None:
bluetooth_config: BluetoothConfiguration = item.value
output = 'Bluetooth: '
output += tr('Enabled') if bluetooth_config.enabled else tr('Disabled')
return output
return None
def _prev_audio(self, item: MenuItem) -> str | None:
if item.value is not None:
config: AudioConfiguration = item.value
return f'{tr("Audio")}: {config.audio.value}'
return None
def select_bluetooth(preset: BluetoothConfiguration | None) -> BluetoothConfiguration | None:
group = MenuItemGroup.yes_no()
group.focus_item = MenuItem.no()
if preset is not None:
group.set_selected_by_value(preset.enabled)
header = tr('Would you like to configure Bluetooth?') + '\n'
result = SelectMenu[bool](
group,
header=header,
alignment=Alignment.CENTER,
columns=2,
orientation=Orientation.HORIZONTAL,
allow_skip=True,
).run()
match result.type_:
case ResultType.Selection:
enabled = result.item() == MenuItem.yes()
return BluetoothConfiguration(enabled)
case ResultType.Skip:
return preset
case _:
raise ValueError('Unhandled result type')
def select_audio(preset: AudioConfiguration | None = None) -> AudioConfiguration | None:
items = [MenuItem(a.value, value=a) for a in Audio]
group = MenuItemGroup(items)
if preset:
group.set_focus_by_value(preset.audio)
result = SelectMenu[Audio](
group,
allow_skip=True,
alignment=Alignment.CENTER,
frame=FrameProperties.min(tr('Audio')),
).run()
match result.type_:
case ResultType.Skip:
return preset
case ResultType.Selection:
return AudioConfiguration(audio=result.get_value())
case ResultType.Reset:
raise ValueError('Unhandled result type')

View File

@@ -0,0 +1,559 @@
import argparse
import json
import os
import urllib.error
import urllib.parse
from argparse import ArgumentParser, Namespace
from dataclasses import dataclass, field
from importlib.metadata import version
from pathlib import Path
from typing import Any
from urllib.request import Request, urlopen
from pydantic.dataclasses import dataclass as p_dataclass
from archinstall.lib.crypt import decrypt
from archinstall.lib.models.application import ApplicationConfiguration
from archinstall.lib.models.authentication import AuthenticationConfiguration
from archinstall.lib.models.bootloader import Bootloader
from archinstall.lib.models.device import DiskEncryption, DiskLayoutConfiguration
from archinstall.lib.models.locale import LocaleConfiguration
from archinstall.lib.models.mirrors import MirrorConfiguration
from archinstall.lib.models.network import NetworkConfiguration
from archinstall.lib.models.packages import Repository
from archinstall.lib.models.profile import ProfileConfiguration
from archinstall.lib.models.users import Password, User, UserSerialization
from archinstall.lib.output import debug, error, logger, warn
from archinstall.lib.plugins import load_plugin
from archinstall.lib.translationhandler import Language, tr, translation_handler
from archinstall.lib.utils.util import get_password
from archinstall.tui.curses_menu import Tui
@p_dataclass
class Arguments:
config: Path | None = None
config_url: str | None = None
creds: Path | None = None
creds_url: str | None = None
creds_decryption_key: str | None = None
silent: bool = False
dry_run: bool = False
script: str | None = None
mountpoint: Path = Path('/mnt')
skip_ntp: bool = False
skip_wkd: bool = False
skip_boot: bool = False
debug: bool = False
offline: bool = False
no_pkg_lookups: bool = False
plugin: str | None = None
skip_version_check: bool = False
skip_wifi_check: bool = False
advanced: bool = False
verbose: bool = False
@dataclass
class ArchConfig:
version: str | None = None
script: str | None = None
locale_config: LocaleConfiguration | None = None
archinstall_language: Language = field(default_factory=lambda: translation_handler.get_language_by_abbr('en'))
disk_config: DiskLayoutConfiguration | None = None
profile_config: ProfileConfiguration | None = None
mirror_config: MirrorConfiguration | None = None
network_config: NetworkConfiguration | None = None
bootloader: Bootloader | None = None
uki: bool = False
app_config: ApplicationConfiguration | None = None
auth_config: AuthenticationConfiguration | None = None
hostname: str = 'archlinux'
kernels: list[str] = field(default_factory=lambda: ['linux'])
ntp: bool = True
packages: list[str] = field(default_factory=list)
parallel_downloads: int = 0
swap: bool = True
timezone: str = 'UTC'
services: list[str] = field(default_factory=list)
custom_commands: list[str] = field(default_factory=list)
def unsafe_json(self) -> dict[str, Any]:
config: dict[str, list[UserSerialization] | str | None] = {}
if self.auth_config:
if self.auth_config.users:
config['users'] = [user.json() for user in self.auth_config.users]
if self.auth_config.root_enc_password:
config['root_enc_password'] = self.auth_config.root_enc_password.enc_password
if self.disk_config:
disk_encryption = self.disk_config.disk_encryption
if disk_encryption and disk_encryption.encryption_password:
config['encryption_password'] = disk_encryption.encryption_password.plaintext
return config
def safe_json(self) -> dict[str, Any]:
config: Any = {
'version': self.version,
'script': self.script,
'archinstall-language': self.archinstall_language.json(),
'hostname': self.hostname,
'kernels': self.kernels,
'uki': self.uki,
'ntp': self.ntp,
'packages': self.packages,
'parallel_downloads': self.parallel_downloads,
'swap': self.swap,
'timezone': self.timezone,
'services': self.services,
'custom_commands': self.custom_commands,
'bootloader': self.bootloader.json() if self.bootloader else None,
'app_config': self.app_config.json() if self.app_config else None,
'auth_config': self.auth_config.json() if self.auth_config else None,
}
if self.locale_config:
config['locale_config'] = self.locale_config.json()
if self.disk_config:
config['disk_config'] = self.disk_config.json()
if self.profile_config:
config['profile_config'] = self.profile_config.json()
if self.mirror_config:
config['mirror_config'] = self.mirror_config.json()
if self.network_config:
config['network_config'] = self.network_config.json()
return config
@classmethod
def from_config(cls, args_config: dict[str, Any], args: Arguments) -> 'ArchConfig':
arch_config = ArchConfig()
arch_config.locale_config = LocaleConfiguration.parse_arg(args_config)
if script := args_config.get('script', None):
arch_config.script = script
if archinstall_lang := args_config.get('archinstall-language', None):
arch_config.archinstall_language = translation_handler.get_language_by_name(archinstall_lang)
if disk_config := args_config.get('disk_config', {}):
enc_password = args_config.get('encryption_password', '')
password = Password(plaintext=enc_password) if enc_password else None
arch_config.disk_config = DiskLayoutConfiguration.parse_arg(disk_config, password)
# DEPRECATED
# backwards compatibility for main level disk_encryption entry
disk_encryption: DiskEncryption | None = None
if args_config.get('disk_encryption', None) is not None and arch_config.disk_config is not None:
disk_encryption = DiskEncryption.parse_arg(
arch_config.disk_config,
args_config['disk_encryption'],
Password(plaintext=args_config.get('encryption_password', '')),
)
if disk_encryption:
arch_config.disk_config.disk_encryption = disk_encryption
if profile_config := args_config.get('profile_config', None):
arch_config.profile_config = ProfileConfiguration.parse_arg(profile_config)
if mirror_config := args_config.get('mirror_config', None):
backwards_compatible_repo = []
if additional_repositories := args_config.get('additional-repositories', []):
backwards_compatible_repo = [Repository(r) for r in additional_repositories]
arch_config.mirror_config = MirrorConfiguration.parse_args(
mirror_config,
backwards_compatible_repo,
)
if net_config := args_config.get('network_config', None):
arch_config.network_config = NetworkConfiguration.parse_arg(net_config)
if bootloader_config := args_config.get('bootloader', None):
arch_config.bootloader = Bootloader.from_arg(bootloader_config, args.skip_boot)
arch_config.uki = args_config.get('uki', False)
if args_config.get('uki') and (arch_config.bootloader is None or not arch_config.bootloader.has_uki_support()):
arch_config.uki = False
# deprecated: backwards compatibility
audio_config_args = args_config.get('audio_config', None)
app_config_args = args_config.get('app_config', None)
if audio_config_args is not None or app_config_args is not None:
arch_config.app_config = ApplicationConfiguration.parse_arg(app_config_args, audio_config_args)
if auth_config_args := args_config.get('auth_config', None):
arch_config.auth_config = AuthenticationConfiguration.parse_arg(auth_config_args)
if hostname := args_config.get('hostname', ''):
arch_config.hostname = hostname
if kernels := args_config.get('kernels', []):
arch_config.kernels = kernels
arch_config.ntp = args_config.get('ntp', True)
if packages := args_config.get('packages', []):
arch_config.packages = packages
if parallel_downloads := args_config.get('parallel_downloads', 0):
arch_config.parallel_downloads = parallel_downloads
arch_config.swap = args_config.get('swap', True)
if timezone := args_config.get('timezone', 'UTC'):
arch_config.timezone = timezone
if services := args_config.get('services', []):
arch_config.services = services
# DEPRECATED: backwards compatibility
root_password = None
if root_password := args_config.get('!root-password', None):
root_password = Password(plaintext=root_password)
if enc_password := args_config.get('root_enc_password', None):
root_password = Password(enc_password=enc_password)
if root_password is not None:
if arch_config.auth_config is None:
arch_config.auth_config = AuthenticationConfiguration()
arch_config.auth_config.root_enc_password = root_password
# DEPRECATED: backwards copatibility
users: list[User] = []
if args_users := args_config.get('!users', None):
users = User.parse_arguments(args_users)
if args_users := args_config.get('users', None):
users = User.parse_arguments(args_users)
if users:
if arch_config.auth_config is None:
arch_config.auth_config = AuthenticationConfiguration()
arch_config.auth_config.users = users
if custom_commands := args_config.get('custom_commands', []):
arch_config.custom_commands = custom_commands
return arch_config
class ArchConfigHandler:
def __init__(self) -> None:
self._parser: ArgumentParser = self._define_arguments()
args: Arguments = self._parse_args()
self._args = args
config = self._parse_config()
try:
self._config = ArchConfig.from_config(config, args)
self._config.version = self._get_version()
except ValueError as err:
warn(str(err))
exit(1)
@property
def config(self) -> ArchConfig:
return self._config
@property
def args(self) -> Arguments:
return self._args
def get_script(self) -> str:
if script := self.args.script:
return script
if script := self.config.script:
return script
return 'guided'
def print_help(self) -> None:
self._parser.print_help()
def _get_version(self) -> str:
try:
return version('archinstall')
except Exception:
return 'Archinstall version not found'
def _define_arguments(self) -> ArgumentParser:
parser = ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'-v',
'--version',
action='version',
default=False,
version='%(prog)s ' + self._get_version(),
)
parser.add_argument(
'--config',
type=Path,
nargs='?',
default=None,
help='JSON configuration file',
)
parser.add_argument(
'--config-url',
type=str,
nargs='?',
default=None,
help='Url to a JSON configuration file',
)
parser.add_argument(
'--creds',
type=Path,
nargs='?',
default=None,
help='JSON credentials configuration file',
)
parser.add_argument(
'--creds-url',
type=str,
nargs='?',
default=None,
help='Url to a JSON credentials configuration file',
)
parser.add_argument(
'--creds-decryption-key',
type=str,
nargs='?',
default=None,
help='Decryption key for credentials file',
)
parser.add_argument(
'--silent',
action='store_true',
default=False,
help='WARNING: Disables all prompts for input and confirmation. If no configuration is provided, this is ignored',
)
parser.add_argument(
'--dry-run',
'--dry_run',
action='store_true',
default=False,
help='Generates a configuration file and then exits instead of performing an installation',
)
parser.add_argument(
'--script',
nargs='?',
help='Script to run for installation',
type=str,
)
parser.add_argument(
'--mountpoint',
type=Path,
nargs='?',
default=Path('/mnt'),
help='Define an alternate mount point for installation',
)
parser.add_argument(
'--skip-ntp',
action='store_true',
help='Disables NTP checks during installation',
default=False,
)
parser.add_argument(
'--skip-wkd',
action='store_true',
help='Disables checking if archlinux keyring wkd sync is complete.',
default=False,
)
parser.add_argument(
'--skip-boot',
action='store_true',
help='Disables installation of a boot loader (note: only use this when problems arise with the boot loader step).',
default=False,
)
parser.add_argument(
'--debug',
action='store_true',
default=False,
help='Adds debug info into the log',
)
parser.add_argument(
'--offline',
action='store_true',
default=False,
help='Disabled online upstream services such as package search and key-ring auto update.',
)
parser.add_argument(
'--no-pkg-lookups',
action='store_true',
default=False,
help='Disabled package validation specifically prior to starting installation.',
)
parser.add_argument(
'--plugin',
nargs='?',
type=str,
default=None,
help='File path to a plugin to load',
)
parser.add_argument(
'--skip-version-check',
action='store_true',
default=False,
help='Skip the version check when running archinstall',
)
parser.add_argument(
'--skip-wifi-check',
action='store_true',
default=False,
help='Skip wifi check when running archinstall',
)
parser.add_argument(
'--advanced',
action='store_true',
default=False,
help='Enabled advanced options',
)
parser.add_argument(
'--verbose',
action='store_true',
default=False,
help='Enabled verbose options',
)
return parser
def _parse_args(self) -> Arguments:
argparse_args = vars(self._parser.parse_args())
args: Arguments = Arguments(**argparse_args)
# amend the parameters (check internal consistency)
# Installation can't be silent if config is not passed
if args.config is None and args.config_url is None:
args.silent = False
if args.debug:
warn(f'Warning: --debug mode will write certain credentials to {logger.path}!')
if args.plugin:
plugin_path = Path(args.plugin)
load_plugin(plugin_path)
if args.creds_decryption_key is None:
if os.environ.get('ARCHINSTALL_CREDS_DECRYPTION_KEY'):
args.creds_decryption_key = os.environ.get('ARCHINSTALL_CREDS_DECRYPTION_KEY')
return args
def _parse_config(self) -> dict[str, Any]:
config: dict[str, Any] = {}
config_data: str | None = None
creds_data: str | None = None
if self._args.config is not None:
config_data = self._read_file(self._args.config)
elif self._args.config_url is not None:
config_data = self._fetch_from_url(self._args.config_url)
if config_data is not None:
config.update(json.loads(config_data))
if self._args.creds is not None:
creds_data = self._read_file(self._args.creds)
elif self._args.creds_url is not None:
creds_data = self._fetch_from_url(self._args.creds_url)
if creds_data is not None:
json_data = self._process_creds_data(creds_data)
if json_data is not None:
config.update(json_data)
config = self._cleanup_config(config)
return config
def _process_creds_data(self, creds_data: str) -> dict[str, Any] | None:
if creds_data.startswith('$'): # encrypted data
if self._args.creds_decryption_key is not None:
try:
creds_data = decrypt(creds_data, self._args.creds_decryption_key)
return json.loads(creds_data)
except ValueError as err:
if 'Invalid password' in str(err):
error(tr('Incorrect credentials file decryption password'))
exit(1)
else:
debug(f'Error decrypting credentials file: {err}')
raise err from err
else:
incorrect_password = False
with Tui():
while True:
header = tr('Incorrect password') if incorrect_password else None
decryption_pwd = get_password(
text=tr('Credentials file decryption password'),
header=header,
allow_skip=False,
skip_confirmation=True,
)
if not decryption_pwd:
return None
try:
creds_data = decrypt(creds_data, decryption_pwd.plaintext)
break
except ValueError as err:
if 'Invalid password' in str(err):
debug('Incorrect credentials file decryption password')
incorrect_password = True
else:
debug(f'Error decrypting credentials file: {err}')
raise err from err
return json.loads(creds_data)
def _fetch_from_url(self, url: str) -> str:
if urllib.parse.urlparse(url).scheme:
try:
req = Request(url, headers={'User-Agent': 'ArchInstall'})
with urlopen(req) as resp:
return resp.read().decode('utf-8')
except urllib.error.HTTPError as err:
error(f'Could not fetch JSON from {url}: {err}')
else:
error('Not a valid url')
exit(1)
def _read_file(self, path: Path) -> str:
if not path.exists():
error(f'Could not find file {path}')
exit(1)
return path.read_text()
def _cleanup_config(self, config: Namespace | dict[str, Any]) -> dict[str, Any]:
clean_args = {}
for key, val in config.items():
if isinstance(val, dict):
val = self._cleanup_config(val)
if val is not None:
clean_args[key] = val
return clean_args
arch_config_handler: ArchConfigHandler = ArchConfigHandler()

View File

@@ -0,0 +1,130 @@
import getpass
from pathlib import Path
from typing import TYPE_CHECKING
from archinstall.lib.general import SysCommandWorker
from archinstall.lib.models.authentication import AuthenticationConfiguration, U2FLoginConfiguration, U2FLoginMethod
from archinstall.lib.models.users import User
from archinstall.lib.output import debug
from archinstall.lib.translationhandler import tr
from archinstall.tui.curses_menu import Tui
if TYPE_CHECKING:
from archinstall.lib.installer import Installer
class AuthenticationHandler:
def setup_auth(
self,
install_session: 'Installer',
auth_config: AuthenticationConfiguration,
hostname: str,
) -> None:
if auth_config.u2f_config and auth_config.users is not None:
self._setup_u2f_login(install_session, auth_config.u2f_config, auth_config.users, hostname)
def _setup_u2f_login(self, install_session: 'Installer', u2f_config: U2FLoginConfiguration, users: list[User], hostname: str) -> None:
self._configure_u2f_mapping(install_session, u2f_config, users, hostname)
self._update_pam_config(install_session, u2f_config)
def _update_pam_config(
self,
install_session: 'Installer',
u2f_config: U2FLoginConfiguration,
) -> None:
match u2f_config.u2f_login_method:
case U2FLoginMethod.Passwordless:
config_entry = 'auth sufficient pam_u2f.so authfile=/etc/u2f_mappings cue'
case U2FLoginMethod.SecondFactor:
config_entry = 'auth required pam_u2f.so authfile=/etc/u2f_mappings cue'
case _:
raise ValueError(f'Unknown U2F login method: {u2f_config.u2f_login_method}')
debug(f'U2F PAM configuration: {config_entry}')
debug(f'Passwordless sudo enabled: {u2f_config.passwordless_sudo}')
sudo_config = install_session.target / 'etc/pam.d/sudo'
sys_login = install_session.target / 'etc/pam.d/system-login'
if u2f_config.passwordless_sudo:
self._add_u2f_entry(sudo_config, config_entry)
self._add_u2f_entry(sys_login, config_entry)
def _add_u2f_entry(self, file: Path, entry: str) -> None:
if not file.exists():
debug(f'File does not exist: {file}')
return None
content = file.read_text().splitlines()
# remove any existing u2f auth entry
content = [line for line in content if 'pam_u2f.so' not in line]
# add the u2f auth entry as the first one after comments
for i, line in enumerate(content):
if not line.startswith('#'):
content.insert(i, entry)
break
else:
content.append(entry)
file.write_text('\n'.join(content) + '\n')
def _configure_u2f_mapping(
self,
install_session: 'Installer',
u2f_config: U2FLoginConfiguration,
users: list[User],
hostname: str,
) -> None:
debug(f'Setting up U2F login: {u2f_config.u2f_login_method.value}')
install_session.pacman.strap('pam-u2f')
Tui.print(tr(f'Setting up U2F login: {u2f_config.u2f_login_method.value}'))
# https://developers.yubico.com/pam-u2f/
u2f_auth_file = install_session.target / 'etc/u2f_mappings'
u2f_auth_file.touch()
existing_keys = u2f_auth_file.read_text()
registered_keys: list[str] = []
for user in users:
Tui.print('')
Tui.print(tr('Setting up U2F device for user: {}').format(user.username))
Tui.print(tr('You may need to enter the PIN and then touch your U2F device to register it'))
cmd = ' '.join(
['arch-chroot', '-S', str(install_session.target), 'pamu2fcfg', '-u', user.username, '-o', f'pam://{hostname}', '-i', f'pam://{hostname}']
)
debug(f'Enrolling U2F device: {cmd}')
worker = SysCommandWorker(cmd, peek_output=True)
pin_inputted = False
while worker.is_alive():
if pin_inputted is False:
if bytes('enter pin for', 'UTF-8') in worker._trace_log.lower():
worker.write(bytes(getpass.getpass(''), 'UTF-8'))
pin_inputted = True
output = worker.decode().strip().splitlines()
debug(f'Output from pamu2fcfg: {output}')
key = output[-1].strip()
registered_keys.append(key)
all_keys = '\n'.join(registered_keys)
if existing_keys:
existing_keys += f'\n{all_keys}'
else:
existing_keys = all_keys
u2f_auth_file.write_text(existing_keys)
auth_handler = AuthenticationHandler()

View File

@@ -0,0 +1,159 @@
from typing import override
from archinstall.lib.disk.fido import Fido2
from archinstall.lib.interactions.manage_users_conf import ask_for_additional_users
from archinstall.lib.menu.abstract_menu import AbstractSubMenu
from archinstall.lib.models.authentication import AuthenticationConfiguration, U2FLoginConfiguration, U2FLoginMethod
from archinstall.lib.models.users import Password, User
from archinstall.lib.output import FormattedOutput
from archinstall.lib.translationhandler import tr
from archinstall.lib.utils.util import get_password
from archinstall.tui.curses_menu import SelectMenu
from archinstall.tui.menu_item import MenuItem, MenuItemGroup
from archinstall.tui.result import ResultType
from archinstall.tui.types import Alignment, FrameProperties, Orientation
class AuthenticationMenu(AbstractSubMenu[AuthenticationConfiguration]):
def __init__(self, preset: AuthenticationConfiguration | None = None):
if preset:
self._auth_config = preset
else:
self._auth_config = AuthenticationConfiguration()
menu_optioons = self._define_menu_options()
self._item_group = MenuItemGroup(menu_optioons, checkmarks=True)
super().__init__(
self._item_group,
config=self._auth_config,
allow_reset=True,
)
@override
def run(self, additional_title: str | None = None) -> AuthenticationConfiguration:
super().run(additional_title=additional_title)
return self._auth_config
def _define_menu_options(self) -> list[MenuItem]:
return [
MenuItem(
text=tr('Root password'),
action=select_root_password,
preview_action=self._prev_root_pwd,
key='root_enc_password',
),
MenuItem(
text=tr('User account'),
action=self._create_user_account,
preview_action=self._prev_users,
key='users',
),
MenuItem(
text=tr('U2F login setup'),
action=select_u2f_login,
value=self._auth_config.u2f_config,
preview_action=self._prev_u2f_login,
key='u2f_config',
),
]
def _create_user_account(self, preset: list[User] | None = None) -> list[User]:
preset = [] if preset is None else preset
users = ask_for_additional_users(defined_users=preset)
return users
def _prev_users(self, item: MenuItem) -> str | None:
users: list[User] | None = item.value
if users:
return FormattedOutput.as_table(users)
return None
def _prev_root_pwd(self, item: MenuItem) -> str | None:
if item.value is not None:
password: Password = item.value
return f'{tr("Root password")}: {password.hidden()}'
return None
def _depends_on_u2f(self) -> bool:
devices = Fido2.get_fido2_devices()
if not devices:
return False
return True
def _prev_u2f_login(self, item: MenuItem) -> str | None:
if item.value is not None:
u2f_config: U2FLoginConfiguration = item.value
login_method = u2f_config.u2f_login_method.display_value()
output = tr('U2F login method: ') + login_method
output += '\n'
output += tr('Passwordless sudo: ') + (tr('Enabled') if u2f_config.passwordless_sudo else tr('Disabled'))
return output
devices = Fido2.get_fido2_devices()
if not devices:
return tr('No U2F devices found')
return None
def select_root_password(preset: str | None = None) -> Password | None:
password = get_password(text=tr('Root password'), allow_skip=True)
return password
def select_u2f_login(preset: U2FLoginConfiguration) -> U2FLoginConfiguration | None:
devices = Fido2.get_fido2_devices()
if not devices:
return None
items = []
for method in U2FLoginMethod:
items.append(MenuItem(method.display_value(), value=method))
group = MenuItemGroup(items)
if preset is not None:
group.set_selected_by_value(preset.u2f_login_method)
result = SelectMenu[U2FLoginMethod](
group,
alignment=Alignment.CENTER,
frame=FrameProperties.min(tr('U2F Login Method')),
allow_skip=True,
allow_reset=True,
).run()
match result.type_:
case ResultType.Selection:
u2f_method = result.get_value()
group = MenuItemGroup.yes_no()
group.focus_item = MenuItem.no()
header = tr('Enable passwordless sudo?')
result_sudo = SelectMenu[bool](
group,
header=header,
alignment=Alignment.CENTER,
columns=2,
orientation=Orientation.HORIZONTAL,
allow_skip=True,
).run()
passwordless_sudo = result_sudo.item() == MenuItem.yes()
return U2FLoginConfiguration(
u2f_login_method=u2f_method,
passwordless_sudo=passwordless_sudo,
)
case ResultType.Skip:
return preset
case ResultType.Reset:
return None
case _:
raise ValueError('Unhandled result type')

View File

@@ -0,0 +1,116 @@
import time
from collections.abc import Iterator
from types import TracebackType
from .exceptions import SysCallError
from .general import SysCommand, SysCommandWorker, locate_binary
from .installer import Installer
from .output import error
from .storage import storage
class Boot:
def __init__(self, installation: Installer):
self.instance = installation
self.container_name = 'archinstall'
self.session: SysCommandWorker | None = None
self.ready = False
def __enter__(self) -> 'Boot':
if (existing_session := storage.get('active_boot', None)) and existing_session.instance != self.instance:
raise KeyError('Archinstall only supports booting up one instance and another session is already active.')
if existing_session:
self.session = existing_session.session
self.ready = existing_session.ready
else:
# '-P' or --console=pipe could help us not having to do a bunch
# of os.write() calls, but instead use pipes (stdin, stdout and stderr) as usual.
self.session = SysCommandWorker(
[
'systemd-nspawn',
'-D',
str(self.instance.target),
'--timezone=off',
'-b',
'--no-pager',
'--machine',
self.container_name,
]
)
if not self.ready and self.session:
while self.session.is_alive():
if b' login:' in self.session:
self.ready = True
break
storage['active_boot'] = self
return self
def __exit__(self, exc_type: type[BaseException] | None, exc_value: BaseException | None, traceback: TracebackType | None) -> None:
# b''.join(sys_command('sync')) # No need to, since the underlying fs() object will call sync.
# TODO: https://stackoverflow.com/questions/28157929/how-to-safely-handle-an-exception-inside-a-context-manager
if exc_type is not None:
error(
str(exc_value),
f'The error above occurred in a temporary boot-up of the installation {self.instance}',
)
shutdown = None
shutdown_exit_code: int | None = -1
try:
shutdown = SysCommand(f'systemd-run --machine={self.container_name} --pty shutdown now')
except SysCallError as err:
shutdown_exit_code = err.exit_code
if self.session:
while self.session.is_alive():
time.sleep(0.25)
if shutdown and shutdown.exit_code:
shutdown_exit_code = shutdown.exit_code
if self.session and (self.session.exit_code == 0 or shutdown_exit_code == 0):
storage['active_boot'] = None
else:
session_exit_code = self.session.exit_code if self.session else -1
raise SysCallError(
f'Could not shut down temporary boot of {self.instance}: {session_exit_code}/{shutdown_exit_code}',
exit_code=next(filter(bool, [session_exit_code, shutdown_exit_code])),
)
def __iter__(self) -> Iterator[bytes]:
if self.session:
yield from self.session
def __contains__(self, key: bytes) -> bool:
if self.session is None:
return False
return key in self.session
def is_alive(self) -> bool:
if self.session is None:
return False
return self.session.is_alive()
def SysCommand(self, cmd: list[str], *args, **kwargs) -> SysCommand: # type: ignore[no-untyped-def]
if cmd[0][0] != '/' and cmd[0][:2] != './':
# This check is also done in SysCommand & SysCommandWorker.
# However, that check is done for `machinectl` and not for our chroot command.
# So this wrapper for SysCommand will do this additionally.
cmd[0] = locate_binary(cmd[0])
return SysCommand(['systemd-run', f'--machine={self.container_name}', '--pty', *cmd], *args, **kwargs)
def SysCommandWorker(self, cmd: list[str], *args, **kwargs) -> SysCommandWorker: # type: ignore[no-untyped-def]
if cmd[0][0] != '/' and cmd[0][:2] != './':
cmd[0] = locate_binary(cmd[0])
return SysCommandWorker(['systemd-run', f'--machine={self.container_name}', '--pty', *cmd], *args, **kwargs)

View File

@@ -0,0 +1,244 @@
import json
import readline
import stat
from pathlib import Path
from archinstall.lib.translationhandler import tr
from archinstall.tui.curses_menu import SelectMenu, Tui
from archinstall.tui.menu_item import MenuItem, MenuItemGroup
from archinstall.tui.result import ResultType
from archinstall.tui.types import Alignment, FrameProperties, Orientation, PreviewStyle
from .args import ArchConfig
from .crypt import encrypt
from .general import JSON, UNSAFE_JSON
from .output import debug, logger, warn
from .utils.util import get_password, prompt_dir
class ConfigurationOutput:
def __init__(self, config: ArchConfig):
"""
Configuration output handler to parse the existing
configuration data structure and prepare for output on the
console and for saving it to configuration files
:param config: Archinstall configuration object
:type config: ArchConfig
"""
self._config = config
self._default_save_path = logger.directory
self._user_config_file = Path('user_configuration.json')
self._user_creds_file = Path('user_credentials.json')
@property
def user_configuration_file(self) -> Path:
return self._user_config_file
@property
def user_credentials_file(self) -> Path:
return self._user_creds_file
def user_config_to_json(self) -> str:
out = self._config.safe_json()
return json.dumps(out, indent=4, sort_keys=True, cls=JSON)
def user_credentials_to_json(self) -> str:
out = self._config.unsafe_json()
return json.dumps(out, indent=4, sort_keys=True, cls=UNSAFE_JSON)
def write_debug(self) -> None:
debug(' -- Chosen configuration --')
debug(self.user_config_to_json())
def confirm_config(self) -> bool:
header = f'{tr("The specified configuration will be applied")}. '
header += tr('Would you like to continue?') + '\n'
with Tui():
group = MenuItemGroup.yes_no()
group.focus_item = MenuItem.yes()
group.set_preview_for_all(lambda x: self.user_config_to_json())
result = SelectMenu[bool](
group,
header=header,
alignment=Alignment.CENTER,
columns=2,
orientation=Orientation.HORIZONTAL,
allow_skip=False,
preview_size='auto',
preview_style=PreviewStyle.BOTTOM,
preview_frame=FrameProperties.max(tr('Configuration')),
).run()
if result.item() != MenuItem.yes():
return False
return True
def _is_valid_path(self, dest_path: Path) -> bool:
dest_path_ok = dest_path.exists() and dest_path.is_dir()
if not dest_path_ok:
warn(
f'Destination directory {dest_path.resolve()} does not exist or is not a directory\n.',
'Configuration files can not be saved',
)
return dest_path_ok
def save_user_config(self, dest_path: Path) -> None:
if self._is_valid_path(dest_path):
target = dest_path / self._user_config_file
target.write_text(self.user_config_to_json())
target.chmod(stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP)
def save_user_creds(
self,
dest_path: Path,
password: str | None = None,
) -> None:
data = self.user_credentials_to_json()
if password:
data = encrypt(password, data)
if self._is_valid_path(dest_path):
target = dest_path / self._user_creds_file
target.write_text(data)
target.chmod(stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP)
def save(
self,
dest_path: Path | None = None,
creds: bool = False,
password: str | None = None,
) -> None:
save_path = dest_path or self._default_save_path
if self._is_valid_path(save_path):
self.save_user_config(save_path)
if creds:
self.save_user_creds(save_path, password=password)
def save_config(config: ArchConfig) -> None:
def preview(item: MenuItem) -> str | None:
match item.value:
case 'user_config':
serialized = config_output.user_config_to_json()
return f'{config_output.user_configuration_file}\n{serialized}'
case 'user_creds':
if maybe_serial := config_output.user_credentials_to_json():
return f'{config_output.user_credentials_file}\n{maybe_serial}'
return tr('No configuration')
case 'all':
output = [str(config_output.user_configuration_file)]
config_output.user_credentials_to_json()
output.append(str(config_output.user_credentials_file))
return '\n'.join(output)
return None
config_output = ConfigurationOutput(config)
items = [
MenuItem(
tr('Save user configuration (including disk layout)'),
value='user_config',
preview_action=preview,
),
MenuItem(
tr('Save user credentials'),
value='user_creds',
preview_action=preview,
),
MenuItem(
tr('Save all'),
value='all',
preview_action=preview,
),
]
group = MenuItemGroup(items)
result = SelectMenu[str](
group,
allow_skip=True,
preview_frame=FrameProperties.max(tr('Configuration')),
preview_size='auto',
preview_style=PreviewStyle.RIGHT,
).run()
match result.type_:
case ResultType.Skip:
return
case ResultType.Selection:
save_option = result.get_value()
case _:
raise ValueError('Unhandled return type')
readline.set_completer_delims('\t\n=')
readline.parse_and_bind('tab: complete')
dest_path = prompt_dir(
tr('Directory'),
tr('Enter a directory for the configuration(s) to be saved (tab completion enabled)') + '\n',
allow_skip=True,
)
if not dest_path:
return
header = tr('Do you want to save the configuration file(s) to {}?').format(dest_path)
group = MenuItemGroup.yes_no()
group.focus_item = MenuItem.yes()
result = SelectMenu(
group,
header=header,
allow_skip=False,
alignment=Alignment.CENTER,
columns=2,
orientation=Orientation.HORIZONTAL,
).run()
match result.type_:
case ResultType.Selection:
if result.item() == MenuItem.no():
return
debug(f'Saving configuration files to {dest_path.absolute()}')
header = tr('Do you want to encrypt the user_credentials.json file?')
group = MenuItemGroup.yes_no()
group.focus_item = MenuItem.no()
result = SelectMenu(
group,
header=header,
allow_skip=False,
alignment=Alignment.CENTER,
columns=2,
orientation=Orientation.HORIZONTAL,
).run()
enc_password: str | None = None
match result.type_:
case ResultType.Selection:
if result.item() == MenuItem.yes():
password = get_password(
text=tr('Credentials file encryption password'),
allow_skip=True,
)
if password:
enc_password = password.plaintext
match save_option:
case 'user_config':
config_output.save_user_config(dest_path)
case 'user_creds':
config_output.save_user_creds(dest_path, password=enc_password)
case 'all':
config_output.save(dest_path, creds=True, password=enc_password)

View File

@@ -0,0 +1,125 @@
import base64
import ctypes
import os
from pathlib import Path
from cryptography.fernet import Fernet, InvalidToken
from cryptography.hazmat.primitives.kdf.argon2 import Argon2id
from .output import debug
libcrypt = ctypes.CDLL('libcrypt.so')
libcrypt.crypt.argtypes = [ctypes.c_char_p, ctypes.c_char_p]
libcrypt.crypt.restype = ctypes.c_char_p
libcrypt.crypt_gensalt.argtypes = [ctypes.c_char_p, ctypes.c_ulong, ctypes.c_char_p, ctypes.c_int]
libcrypt.crypt_gensalt.restype = ctypes.c_char_p
LOGIN_DEFS = Path('/etc/login.defs')
def _search_login_defs(key: str) -> str | None:
defs = LOGIN_DEFS.read_text()
for line in defs.split('\n'):
line = line.strip()
if line.startswith('#'):
continue
if line.startswith(key):
value = line.split(' ')[1]
return value
return None
def crypt_gen_salt(prefix: str | bytes, rounds: int) -> bytes:
if isinstance(prefix, str):
prefix = prefix.encode('utf-8')
setting = libcrypt.crypt_gensalt(prefix, rounds, None, 0)
if setting is None:
raise ValueError(f'crypt_gensalt() returned NULL for prefix {prefix!r} and rounds {rounds}')
return setting
def crypt_yescrypt(plaintext: str) -> str:
"""
By default chpasswd in Arch uses PAM to to hash the password with crypt_yescrypt
the PAM code https://github.com/linux-pam/linux-pam/blob/master/modules/pam_unix/support.c
shows that the hashing rounds are determined from YESCRYPT_COST_FACTOR in /etc/login.defs
If no value was specified (or commented out) a default of 5 is choosen
"""
value = _search_login_defs('YESCRYPT_COST_FACTOR')
if value is not None:
rounds = int(value)
if rounds < 3:
rounds = 3
elif rounds > 11:
rounds = 11
else:
rounds = 5
debug(f'Creating yescrypt hash with rounds {rounds}')
enc_plaintext = plaintext.encode('utf-8')
salt = crypt_gen_salt('$y$', rounds)
crypt_hash = libcrypt.crypt(enc_plaintext, salt)
if crypt_hash is None:
raise ValueError('crypt() returned NULL')
return crypt_hash.decode('utf-8')
def _get_fernet(salt: bytes, password: str) -> Fernet:
# https://cryptography.io/en/latest/hazmat/primitives/key-derivation-functions/#argon2id
kdf = Argon2id(
salt=salt,
length=32,
iterations=1,
lanes=4,
memory_cost=64 * 1024,
ad=None,
secret=None,
)
key = base64.urlsafe_b64encode(
kdf.derive(
password.encode('utf-8'),
),
)
return Fernet(key)
def encrypt(password: str, data: str) -> str:
salt = os.urandom(16)
f = _get_fernet(salt, password)
token = f.encrypt(data.encode('utf-8'))
encoded_token = base64.urlsafe_b64encode(token).decode('utf-8')
encoded_salt = base64.urlsafe_b64encode(salt).decode('utf-8')
return f'$argon2id${encoded_salt}${encoded_token}'
def decrypt(data: str, password: str) -> str:
_, algo, encoded_salt, encoded_token = data.split('$')
salt = base64.urlsafe_b64decode(encoded_salt)
token = base64.urlsafe_b64decode(encoded_token)
if algo != 'argon2id':
raise ValueError(f'Unsupported algorithm {algo!r}')
f = _get_fernet(salt, password)
try:
decrypted = f.decrypt(token)
except InvalidToken:
raise ValueError('Invalid password')
return decrypted.decode('utf-8')

View File

@@ -0,0 +1,851 @@
from __future__ import annotations
import json
import logging
import os
import time
from collections.abc import Iterable
from pathlib import Path
from typing import Literal, overload
from parted import Device, Disk, DiskException, FileSystem, Geometry, IOException, Partition, PartitionException, freshDisk, getAllDevices, getDevice, newDisk
from ..exceptions import DiskError, SysCallError, UnknownFilesystemFormat
from ..general import SysCommand, SysCommandWorker
from ..luks import Luks2
from ..models.device import (
DEFAULT_ITER_TIME,
BDevice,
BtrfsMountOption,
DeviceModification,
DiskEncryption,
FilesystemType,
LsblkInfo,
LvmGroupInfo,
LvmPVInfo,
LvmVolume,
LvmVolumeGroup,
LvmVolumeInfo,
ModificationStatus,
PartitionFlag,
PartitionGUID,
PartitionModification,
PartitionTable,
SectorSize,
Size,
SubvolumeModification,
Unit,
_BtrfsSubvolumeInfo,
_DeviceInfo,
_PartitionInfo,
)
from ..models.users import Password
from ..output import debug, error, info, log
from ..utils.util import is_subpath
from .utils import (
find_lsblk_info,
get_all_lsblk_info,
get_lsblk_info,
umount,
)
class DeviceHandler:
_TMP_BTRFS_MOUNT = Path('/mnt/arch_btrfs')
def __init__(self) -> None:
self._devices: dict[Path, BDevice] = {}
self._partition_table = PartitionTable.default()
self.load_devices()
@property
def devices(self) -> list[BDevice]:
return list(self._devices.values())
@property
def partition_table(self) -> PartitionTable:
return self._partition_table
def load_devices(self) -> None:
block_devices = {}
self.udev_sync()
all_lsblk_info = get_all_lsblk_info()
devices = getAllDevices()
devices.extend(self.get_loop_devices())
archiso_mountpoint = Path('/run/archiso/airootfs')
for device in devices:
dev_lsblk_info = find_lsblk_info(device.path, all_lsblk_info)
if not dev_lsblk_info:
debug(f'Device lsblk info not found: {device.path}')
continue
if dev_lsblk_info.type == 'rom':
continue
# exclude archiso loop device
if dev_lsblk_info.mountpoint == archiso_mountpoint:
continue
try:
if dev_lsblk_info.pttype:
disk = newDisk(device)
else:
disk = freshDisk(device, self.partition_table.value)
except DiskException as err:
debug(f'Unable to get disk from {device.path}: {err}')
continue
device_info = _DeviceInfo.from_disk(disk)
partition_infos = []
for partition in disk.partitions:
lsblk_info = find_lsblk_info(partition.path, dev_lsblk_info.children)
if not lsblk_info:
debug(f'Partition lsblk info not found: {partition.path}')
continue
fs_type = self._determine_fs_type(partition, lsblk_info)
subvol_infos = []
if fs_type == FilesystemType.Btrfs:
subvol_infos = self.get_btrfs_info(partition.path, lsblk_info)
partition_infos.append(
_PartitionInfo.from_partition(
partition,
lsblk_info,
fs_type,
subvol_infos,
),
)
block_device = BDevice(disk, device_info, partition_infos)
block_devices[block_device.device_info.path] = block_device
self._devices = block_devices
@staticmethod
def get_loop_devices() -> list[Device]:
devices = []
try:
loop_devices = SysCommand(['losetup', '-a'])
except SysCallError as err:
debug(f'Failed to get loop devices: {err}')
else:
for ld_info in str(loop_devices).splitlines():
try:
loop_device_path, _ = ld_info.split(':', maxsplit=1)
except ValueError:
continue
try:
loop_device = getDevice(loop_device_path)
except IOException as err:
debug(f'Failed to get loop device: {err}')
else:
devices.append(loop_device)
return devices
def _determine_fs_type(
self,
partition: Partition,
lsblk_info: LsblkInfo | None = None,
) -> FilesystemType | None:
try:
if partition.fileSystem:
if partition.fileSystem.type == FilesystemType.LinuxSwap.parted_value:
return FilesystemType.LinuxSwap
return FilesystemType(partition.fileSystem.type)
elif lsblk_info is not None:
return FilesystemType(lsblk_info.fstype) if lsblk_info.fstype else None
return None
except ValueError:
debug(f'Could not determine the filesystem: {partition.fileSystem}')
return None
def get_device(self, path: Path) -> BDevice | None:
return self._devices.get(path, None)
def get_device_by_partition_path(self, partition_path: Path) -> BDevice | None:
partition = self.find_partition(partition_path)
if partition:
device: Device = partition.disk.device
return self.get_device(Path(device.path))
return None
def find_partition(self, path: Path) -> _PartitionInfo | None:
for device in self._devices.values():
part = next(filter(lambda x: str(x.path) == str(path), device.partition_infos), None)
if part is not None:
return part
return None
def get_parent_device_path(self, dev_path: Path) -> Path:
lsblk = get_lsblk_info(dev_path)
return Path(f'/dev/{lsblk.pkname}')
def get_unique_path_for_device(self, dev_path: Path) -> Path | None:
paths = Path('/dev/disk/by-id').glob('*')
linked_targets = {p.resolve(): p for p in paths}
linked_wwn_targets = {p: linked_targets[p] for p in linked_targets if p.name.startswith('wwn-') or p.name.startswith('nvme-eui.')}
if dev_path in linked_wwn_targets:
return linked_wwn_targets[dev_path]
if dev_path in linked_targets:
return linked_targets[dev_path]
return None
def get_uuid_for_path(self, path: Path) -> str | None:
partition = self.find_partition(path)
return partition.partuuid if partition else None
def get_btrfs_info(
self,
dev_path: Path,
lsblk_info: LsblkInfo | None = None,
) -> list[_BtrfsSubvolumeInfo]:
if not lsblk_info:
lsblk_info = get_lsblk_info(dev_path)
subvol_infos: list[_BtrfsSubvolumeInfo] = []
if not lsblk_info.mountpoint:
self.mount(dev_path, self._TMP_BTRFS_MOUNT, create_target_mountpoint=True)
mountpoint = self._TMP_BTRFS_MOUNT
else:
# when multiple subvolumes are mounted then the lsblk output may look like
# "mountpoint": "/mnt/archinstall/var/log"
# "mountpoints": ["/mnt/archinstall/var/log", "/mnt/archinstall/home", ..]
# so we'll determine the minimum common path and assume that's the root
try:
common_path = os.path.commonpath(lsblk_info.mountpoints)
except ValueError:
return subvol_infos
mountpoint = Path(common_path)
try:
result = SysCommand(f'btrfs subvolume list {mountpoint}').decode()
except SysCallError as err:
debug(f'Failed to read btrfs subvolume information: {err}')
return subvol_infos
# It is assumed that lsblk will contain the fields as
# "mountpoints": ["/mnt/archinstall/log", "/mnt/archinstall/home", "/mnt/archinstall", ...]
# "fsroots": ["/@log", "/@home", "/@"...]
# we'll thereby map the fsroot, which are the mounted filesystem roots
# to the corresponding mountpoints
btrfs_subvol_info = dict(zip(lsblk_info.fsroots, lsblk_info.mountpoints))
# ID 256 gen 16 top level 5 path @
for line in result.splitlines():
# expected output format:
# ID 257 gen 8 top level 5 path @home
name = Path(line.split(' ')[-1])
sub_vol_mountpoint = btrfs_subvol_info.get('/' / name, None)
subvol_infos.append(_BtrfsSubvolumeInfo(name, sub_vol_mountpoint))
if not lsblk_info.mountpoint:
umount(dev_path)
return subvol_infos
def format(
self,
fs_type: FilesystemType,
path: Path,
additional_parted_options: list[str] = [],
) -> None:
mkfs_type = fs_type.value
command = None
options = []
match fs_type:
case FilesystemType.Btrfs | FilesystemType.Xfs:
# Force overwrite
options.append('-f')
case FilesystemType.F2fs:
options.append('-f')
options.extend(('-O', 'extra_attr'))
case FilesystemType.Ext2 | FilesystemType.Ext3 | FilesystemType.Ext4:
# Force create
options.append('-F')
case FilesystemType.Fat12 | FilesystemType.Fat16 | FilesystemType.Fat32:
mkfs_type = 'fat'
# Set FAT size
options.extend(('-F', fs_type.value.removeprefix(mkfs_type)))
case FilesystemType.Ntfs:
# Skip zeroing and bad sector check
options.append('--fast')
case FilesystemType.LinuxSwap:
command = 'mkswap'
case _:
raise UnknownFilesystemFormat(f'Filetype "{fs_type.value}" is not supported')
if not command:
command = f'mkfs.{mkfs_type}'
cmd = [command, *options, *additional_parted_options, str(path)]
debug('Formatting filesystem:', ' '.join(cmd))
try:
SysCommand(cmd)
except SysCallError as err:
msg = f'Could not format {path} with {fs_type.value}: {err.message}'
error(msg)
raise DiskError(msg) from err
def encrypt(
self,
dev_path: Path,
mapper_name: str | None,
enc_password: Password | None,
lock_after_create: bool = True,
iter_time: int = DEFAULT_ITER_TIME,
) -> Luks2:
luks_handler = Luks2(
dev_path,
mapper_name=mapper_name,
password=enc_password,
)
key_file = luks_handler.encrypt(iter_time=iter_time)
self.udev_sync()
luks_handler.unlock(key_file=key_file)
if not luks_handler.mapper_dev:
raise DiskError('Failed to unlock luks device')
if lock_after_create:
debug(f'luks2 locking device: {dev_path}')
luks_handler.lock()
return luks_handler
def format_encrypted(
self,
dev_path: Path,
mapper_name: str | None,
fs_type: FilesystemType,
enc_conf: DiskEncryption,
) -> None:
if not enc_conf.encryption_password:
raise ValueError('No encryption password provided')
luks_handler = Luks2(
dev_path,
mapper_name=mapper_name,
password=enc_conf.encryption_password,
)
key_file = luks_handler.encrypt(iter_time=enc_conf.iter_time)
self.udev_sync()
luks_handler.unlock(key_file=key_file)
if not luks_handler.mapper_dev:
raise DiskError('Failed to unlock luks device')
info(f'luks2 formatting mapper dev: {luks_handler.mapper_dev}')
self.format(fs_type, luks_handler.mapper_dev)
info(f'luks2 locking device: {dev_path}')
luks_handler.lock()
def _lvm_info(
self,
cmd: str,
info_type: Literal['lv', 'vg', 'pvseg'],
) -> LvmVolumeInfo | LvmGroupInfo | LvmPVInfo | None:
raw_info = SysCommand(cmd).decode().split('\n')
# for whatever reason the output sometimes contains
# "File descriptor X leaked leaked on vgs invocation
data = '\n'.join([raw for raw in raw_info if 'File descriptor' not in raw])
debug(f'LVM info: {data}')
reports = json.loads(data)
for report in reports['report']:
if len(report[info_type]) != 1:
raise ValueError('Report does not contain any entry')
entry = report[info_type][0]
match info_type:
case 'pvseg':
return LvmPVInfo(
pv_name=Path(entry['pv_name']),
lv_name=entry['lv_name'],
vg_name=entry['vg_name'],
)
case 'lv':
return LvmVolumeInfo(
lv_name=entry['lv_name'],
vg_name=entry['vg_name'],
lv_size=Size(int(entry['lv_size'][:-1]), Unit.B, SectorSize.default()),
)
case 'vg':
return LvmGroupInfo(
vg_uuid=entry['vg_uuid'],
vg_size=Size(int(entry['vg_size'][:-1]), Unit.B, SectorSize.default()),
)
return None
@overload
def _lvm_info_with_retry(self, cmd: str, info_type: Literal['lv']) -> LvmVolumeInfo | None: ...
@overload
def _lvm_info_with_retry(self, cmd: str, info_type: Literal['vg']) -> LvmGroupInfo | None: ...
@overload
def _lvm_info_with_retry(self, cmd: str, info_type: Literal['pvseg']) -> LvmPVInfo | None: ...
def _lvm_info_with_retry(
self,
cmd: str,
info_type: Literal['lv', 'vg', 'pvseg'],
) -> LvmVolumeInfo | LvmGroupInfo | LvmPVInfo | None:
while True:
try:
return self._lvm_info(cmd, info_type)
except ValueError:
time.sleep(3)
def lvm_vol_info(self, lv_name: str) -> LvmVolumeInfo | None:
cmd = f'lvs --reportformat json --unit B -S lv_name={lv_name}'
return self._lvm_info_with_retry(cmd, 'lv')
def lvm_group_info(self, vg_name: str) -> LvmGroupInfo | None:
cmd = f'vgs --reportformat json --unit B -o vg_name,vg_uuid,vg_size -S vg_name={vg_name}'
return self._lvm_info_with_retry(cmd, 'vg')
def lvm_pvseg_info(self, vg_name: str, lv_name: str) -> LvmPVInfo | None:
cmd = f'pvs --segments -o+lv_name,vg_name -S vg_name={vg_name},lv_name={lv_name} --reportformat json '
return self._lvm_info_with_retry(cmd, 'pvseg')
def lvm_vol_change(self, vol: LvmVolume, activate: bool) -> None:
active_flag = 'y' if activate else 'n'
cmd = f'lvchange -a {active_flag} {vol.safe_dev_path}'
debug(f'lvchange volume: {cmd}')
SysCommand(cmd)
def lvm_export_vg(self, vg: LvmVolumeGroup) -> None:
cmd = f'vgexport {vg.name}'
debug(f'vgexport: {cmd}')
SysCommand(cmd)
def lvm_import_vg(self, vg: LvmVolumeGroup) -> None:
cmd = f'vgimport {vg.name}'
debug(f'vgimport: {cmd}')
SysCommand(cmd)
def lvm_vol_reduce(self, vol_path: Path, amount: Size) -> None:
val = amount.format_size(Unit.B, include_unit=False)
cmd = f'lvreduce -L -{val}B {vol_path}'
debug(f'Reducing LVM volume size: {cmd}')
SysCommand(cmd)
def lvm_pv_create(self, pvs: Iterable[Path]) -> None:
cmd = 'pvcreate ' + ' '.join([str(pv) for pv in pvs])
debug(f'Creating LVM PVS: {cmd}')
worker = SysCommandWorker(cmd)
worker.poll()
worker.write(b'y\n', line_ending=False)
def lvm_vg_create(self, pvs: Iterable[Path], vg_name: str) -> None:
pvs_str = ' '.join([str(pv) for pv in pvs])
cmd = f'vgcreate --yes {vg_name} {pvs_str}'
debug(f'Creating LVM group: {cmd}')
worker = SysCommandWorker(cmd)
worker.poll()
worker.write(b'y\n', line_ending=False)
def lvm_vol_create(self, vg_name: str, volume: LvmVolume, offset: Size | None = None) -> None:
if offset is not None:
length = volume.length - offset
else:
length = volume.length
length_str = length.format_size(Unit.B, include_unit=False)
cmd = f'lvcreate --yes -L {length_str}B {vg_name} -n {volume.name}'
debug(f'Creating volume: {cmd}')
worker = SysCommandWorker(cmd)
worker.poll()
worker.write(b'y\n', line_ending=False)
volume.vg_name = vg_name
volume.dev_path = Path(f'/dev/{vg_name}/{volume.name}')
def _setup_partition(
self,
part_mod: PartitionModification,
block_device: BDevice,
disk: Disk,
requires_delete: bool,
) -> None:
# when we require a delete and the partition to be (re)created
# already exists then we have to delete it first
if requires_delete and part_mod.status in [ModificationStatus.Modify, ModificationStatus.Delete]:
info(f'Delete existing partition: {part_mod.safe_dev_path}')
part_info = self.find_partition(part_mod.safe_dev_path)
if not part_info:
raise DiskError(f'No partition for dev path found: {part_mod.safe_dev_path}')
disk.deletePartition(part_info.partition)
if part_mod.status == ModificationStatus.Delete:
return
start_sector = part_mod.start.convert(
Unit.sectors,
block_device.device_info.sector_size,
)
length_sector = part_mod.length.convert(
Unit.sectors,
block_device.device_info.sector_size,
)
geometry = Geometry(
device=block_device.disk.device,
start=start_sector.value,
length=length_sector.value,
)
fs_value = part_mod.safe_fs_type.parted_value
filesystem = FileSystem(type=fs_value, geometry=geometry)
partition = Partition(
disk=disk,
type=part_mod.type.get_partition_code(),
fs=filesystem,
geometry=geometry,
)
for flag in part_mod.flags:
partition.setFlag(flag.flag_id)
debug(f'\tType: {part_mod.type.value}')
debug(f'\tFilesystem: {fs_value}')
debug(f'\tGeometry: {start_sector.value} start sector, {length_sector.value} length')
try:
disk.addPartition(partition=partition, constraint=disk.device.optimalAlignedConstraint)
except PartitionException as ex:
raise DiskError(f'Unable to add partition, most likely due to overlapping sectors: {ex}') from ex
if disk.type == PartitionTable.GPT.value:
if part_mod.is_root():
partition.type_uuid = PartitionGUID.LINUX_ROOT_X86_64.bytes
elif PartitionFlag.LINUX_HOME not in part_mod.flags and part_mod.is_home():
partition.setFlag(PartitionFlag.LINUX_HOME.flag_id)
# the partition has a path now that it has been added
part_mod.dev_path = Path(partition.path)
def fetch_part_info(self, path: Path) -> LsblkInfo:
lsblk_info = get_lsblk_info(path)
if not lsblk_info.partn:
debug(f'Unable to determine new partition number: {path}\n{lsblk_info}')
raise DiskError(f'Unable to determine new partition number: {path}')
if not lsblk_info.partuuid:
debug(f'Unable to determine new partition uuid: {path}\n{lsblk_info}')
raise DiskError(f'Unable to determine new partition uuid: {path}')
if not lsblk_info.uuid:
debug(f'Unable to determine new uuid: {path}\n{lsblk_info}')
raise DiskError(f'Unable to determine new uuid: {path}')
debug(f'partition information found: {lsblk_info.model_dump_json()}')
return lsblk_info
def create_lvm_btrfs_subvolumes(
self,
path: Path,
btrfs_subvols: list[SubvolumeModification],
mount_options: list[str],
) -> None:
info(f'Creating subvolumes: {path}')
self.mount(path, self._TMP_BTRFS_MOUNT, create_target_mountpoint=True)
for sub_vol in sorted(btrfs_subvols, key=lambda x: x.name):
debug(f'Creating subvolume: {sub_vol.name}')
subvol_path = self._TMP_BTRFS_MOUNT / sub_vol.name
SysCommand(f'btrfs subvolume create -p {subvol_path}')
if BtrfsMountOption.nodatacow.value in mount_options:
try:
SysCommand(f'chattr +C {subvol_path}')
except SysCallError as err:
raise DiskError(f'Could not set nodatacow attribute at {subvol_path}: {err}')
if BtrfsMountOption.compress.value in mount_options:
try:
SysCommand(f'chattr +c {subvol_path}')
except SysCallError as err:
raise DiskError(f'Could not set compress attribute at {subvol_path}: {err}')
umount(path)
def create_btrfs_volumes(
self,
part_mod: PartitionModification,
enc_conf: DiskEncryption | None = None,
) -> None:
info(f'Creating subvolumes: {part_mod.safe_dev_path}')
# unlock the partition first if it's encrypted
if enc_conf is not None and part_mod in enc_conf.partitions:
if not part_mod.mapper_name:
raise ValueError('No device path specified for modification')
luks_handler = self.unlock_luks2_dev(
part_mod.safe_dev_path,
part_mod.mapper_name,
enc_conf.encryption_password,
)
if not luks_handler.mapper_dev:
raise DiskError('Failed to unlock luks device')
dev_path = luks_handler.mapper_dev
else:
luks_handler = None
dev_path = part_mod.safe_dev_path
self.mount(
dev_path,
self._TMP_BTRFS_MOUNT,
create_target_mountpoint=True,
options=part_mod.mount_options,
)
for sub_vol in sorted(part_mod.btrfs_subvols, key=lambda x: x.name):
debug(f'Creating subvolume: {sub_vol.name}')
subvol_path = self._TMP_BTRFS_MOUNT / sub_vol.name
SysCommand(f'btrfs subvolume create -p {subvol_path}')
umount(dev_path)
if luks_handler is not None and luks_handler.mapper_dev is not None:
luks_handler.lock()
def unlock_luks2_dev(
self,
dev_path: Path,
mapper_name: str,
enc_password: Password | None,
) -> Luks2:
luks_handler = Luks2(dev_path, mapper_name=mapper_name, password=enc_password)
if not luks_handler.is_unlocked():
luks_handler.unlock()
return luks_handler
def umount_all_existing(self, device_path: Path) -> None:
debug(f'Unmounting all existing partitions: {device_path}')
existing_partitions = self._devices[device_path].partition_infos
for partition in existing_partitions:
debug(f'Unmounting: {partition.path}')
# un-mount for existing encrypted partitions
if partition.fs_type == FilesystemType.Crypto_luks:
Luks2(partition.path).lock()
else:
umount(partition.path, recursive=True)
def partition(
self,
modification: DeviceModification,
partition_table: PartitionTable | None = None,
) -> None:
"""
Create a partition table on the block device and create all partitions.
"""
partition_table = partition_table or self.partition_table
# WARNING: the entire device will be wiped and all data lost
if modification.wipe:
if partition_table.is_mbr() and len(modification.partitions) > 3:
raise DiskError('Too many partitions on disk, MBR disks can only have 3 primary partitions')
self.wipe_dev(modification.device)
disk = freshDisk(modification.device.disk.device, partition_table.value)
else:
info(f'Use existing device: {modification.device_path}')
disk = modification.device.disk
info(f'Creating partitions: {modification.device_path}')
# don't touch existing partitions
filtered_part = [p for p in modification.partitions if not p.exists()]
for part_mod in filtered_part:
# if the entire disk got nuked then we don't have to delete
# any existing partitions anymore because they're all gone already
requires_delete = modification.wipe is False
self._setup_partition(part_mod, modification.device, disk, requires_delete=requires_delete)
disk.commit()
@staticmethod
def swapon(path: Path) -> None:
try:
SysCommand(['swapon', str(path)])
except SysCallError as err:
raise DiskError(f'Could not enable swap {path}:\n{err.message}')
def mount(
self,
dev_path: Path,
target_mountpoint: Path,
mount_fs: str | None = None,
create_target_mountpoint: bool = True,
options: list[str] = [],
) -> None:
if create_target_mountpoint and not target_mountpoint.exists():
target_mountpoint.mkdir(parents=True, exist_ok=True)
if not target_mountpoint.exists():
raise ValueError('Target mountpoint does not exist')
lsblk_info = get_lsblk_info(dev_path)
if target_mountpoint in lsblk_info.mountpoints:
info(f'Device already mounted at {target_mountpoint}')
return
cmd = ['mount']
if len(options):
cmd.extend(('-o', ','.join(options)))
if mount_fs:
cmd.extend(('-t', mount_fs))
cmd.extend((str(dev_path), str(target_mountpoint)))
command = ' '.join(cmd)
debug(f'Mounting {dev_path}: {command}')
try:
SysCommand(command)
except SysCallError as err:
raise DiskError(f'Could not mount {dev_path}: {command}\n{err.message}')
def detect_pre_mounted_mods(self, base_mountpoint: Path) -> list[DeviceModification]:
part_mods: dict[Path, list[PartitionModification]] = {}
for device in self.devices:
for part_info in device.partition_infos:
for mountpoint in part_info.mountpoints:
if is_subpath(mountpoint, base_mountpoint):
path = Path(part_info.disk.device.path)
part_mods.setdefault(path, [])
part_mod = PartitionModification.from_existing_partition(part_info)
if part_mod.mountpoint:
part_mod.mountpoint = mountpoint.root / mountpoint.relative_to(base_mountpoint)
else:
for subvol in part_mod.btrfs_subvols:
if sm := subvol.mountpoint:
subvol.mountpoint = sm.root / sm.relative_to(base_mountpoint)
part_mods[path].append(part_mod)
break
device_mods: list[DeviceModification] = []
for device_path, mods in part_mods.items():
device_mod = DeviceModification(self._devices[device_path], False, mods)
device_mods.append(device_mod)
return device_mods
def partprobe(self, path: Path | None = None) -> None:
if path is not None:
command = f'partprobe {path}'
else:
command = 'partprobe'
try:
debug(f'Calling partprobe: {command}')
SysCommand(command)
except SysCallError as err:
if 'have been written, but we have been unable to inform the kernel of the change' in str(err):
log(f'Partprobe was not able to inform the kernel of the new disk state (ignoring error): {err}', fg='gray', level=logging.INFO)
else:
error(f'"{command}" failed to run (continuing anyway): {err}')
def _wipe(self, dev_path: Path) -> None:
"""
Wipe a device (partition or otherwise) of meta-data, be it file system, LVM, etc.
@param dev_path: Device path of the partition to be wiped.
@type dev_path: str
"""
with open(dev_path, 'wb') as p:
p.write(bytearray(1024))
def wipe_dev(self, block_device: BDevice) -> None:
"""
Wipe the block device of meta-data, be it file system, LVM, etc.
This is not intended to be secure, but rather to ensure that
auto-discovery tools don't recognize anything here.
"""
info(f'Wiping partitions and metadata: {block_device.device_info.path}')
for partition in block_device.partition_infos:
luks = Luks2(partition.path)
if luks.isLuks():
luks.erase()
self._wipe(partition.path)
self._wipe(block_device.device_info.path)
@staticmethod
def udev_sync() -> None:
try:
SysCommand('udevadm settle')
except SysCallError as err:
debug(f'Failed to synchronize with udev: {err}')
device_handler = DeviceHandler()

View File

@@ -0,0 +1,278 @@
from dataclasses import dataclass
from typing import override
from archinstall.lib.disk.encryption_menu import DiskEncryptionMenu
from archinstall.lib.models.device import (
DEFAULT_ITER_TIME,
BtrfsOptions,
DiskEncryption,
DiskLayoutConfiguration,
DiskLayoutType,
EncryptionType,
LvmConfiguration,
SnapshotConfig,
SnapshotType,
)
from archinstall.lib.translationhandler import tr
from archinstall.tui.curses_menu import SelectMenu
from archinstall.tui.menu_item import MenuItem, MenuItemGroup
from archinstall.tui.result import ResultType
from archinstall.tui.types import Alignment, FrameProperties
from ..interactions.disk_conf import select_disk_config, select_lvm_config
from ..menu.abstract_menu import AbstractSubMenu
from ..output import FormattedOutput
@dataclass
class DiskMenuConfig:
disk_config: DiskLayoutConfiguration | None
lvm_config: LvmConfiguration | None
btrfs_snapshot_config: SnapshotConfig | None
disk_encryption: DiskEncryption | None
class DiskLayoutConfigurationMenu(AbstractSubMenu[DiskLayoutConfiguration]):
def __init__(self, disk_layout_config: DiskLayoutConfiguration | None):
if not disk_layout_config:
self._disk_menu_config = DiskMenuConfig(
disk_config=None,
lvm_config=None,
btrfs_snapshot_config=None,
disk_encryption=None,
)
else:
snapshot_config = disk_layout_config.btrfs_options.snapshot_config if disk_layout_config.btrfs_options else None
self._disk_menu_config = DiskMenuConfig(
disk_config=disk_layout_config,
lvm_config=disk_layout_config.lvm_config,
disk_encryption=disk_layout_config.disk_encryption,
btrfs_snapshot_config=snapshot_config,
)
menu_optioons = self._define_menu_options()
self._item_group = MenuItemGroup(menu_optioons, sort_items=False, checkmarks=True)
super().__init__(
self._item_group,
self._disk_menu_config,
allow_reset=True,
)
def _define_menu_options(self) -> list[MenuItem]:
return [
MenuItem(
text=tr('Partitioning'),
action=self._select_disk_layout_config,
value=self._disk_menu_config.disk_config,
preview_action=self._prev_disk_layouts,
key='disk_config',
),
MenuItem(
text='LVM',
action=self._select_lvm_config,
value=self._disk_menu_config.lvm_config,
preview_action=self._prev_lvm_config,
dependencies=[self._check_dep_lvm],
key='lvm_config',
),
MenuItem(
text=tr('Disk encryption'),
action=self._select_disk_encryption,
preview_action=self._prev_disk_encryption,
dependencies=['disk_config'],
key='disk_encryption',
),
MenuItem(
text='Btrfs snapshots',
action=self._select_btrfs_snapshots,
value=self._disk_menu_config.btrfs_snapshot_config,
preview_action=self._prev_btrfs_snapshots,
dependencies=[self._check_dep_btrfs],
key='btrfs_snapshot_config',
),
]
@override
def run(self, additional_title: str | None = None) -> DiskLayoutConfiguration | None:
super().run(additional_title=additional_title)
if self._disk_menu_config.disk_config:
self._disk_menu_config.disk_config.lvm_config = self._disk_menu_config.lvm_config
self._disk_menu_config.disk_config.btrfs_options = BtrfsOptions(snapshot_config=self._disk_menu_config.btrfs_snapshot_config)
self._disk_menu_config.disk_config.disk_encryption = self._disk_menu_config.disk_encryption
return self._disk_menu_config.disk_config
return None
def _check_dep_lvm(self) -> bool:
disk_layout_conf: DiskLayoutConfiguration | None = self._menu_item_group.find_by_key('disk_config').value
if disk_layout_conf and disk_layout_conf.config_type == DiskLayoutType.Default:
return True
return False
def _check_dep_btrfs(self) -> bool:
disk_layout_conf: DiskLayoutConfiguration | None = self._menu_item_group.find_by_key('disk_config').value
if disk_layout_conf:
return disk_layout_conf.has_default_btrfs_vols()
return False
def _select_disk_encryption(self, preset: DiskEncryption | None) -> DiskEncryption | None:
disk_config: DiskLayoutConfiguration | None = self._item_group.find_by_key('disk_config').value
lvm_config: LvmConfiguration | None = self._item_group.find_by_key('lvm_config').value
if not disk_config:
return preset
modifications = disk_config.device_modifications
if not DiskEncryption.validate_enc(modifications, lvm_config):
return None
disk_encryption = DiskEncryptionMenu(modifications, lvm_config=lvm_config, preset=preset).run()
return disk_encryption
def _select_disk_layout_config(self, preset: DiskLayoutConfiguration | None) -> DiskLayoutConfiguration | None:
disk_config = select_disk_config(preset)
if disk_config != preset:
self._menu_item_group.find_by_key('lvm_config').value = None
self._menu_item_group.find_by_key('disk_encryption').value = None
return disk_config
def _select_lvm_config(self, preset: LvmConfiguration | None) -> LvmConfiguration | None:
disk_config: DiskLayoutConfiguration | None = self._item_group.find_by_key('disk_config').value
if not disk_config:
return preset
lvm_config = select_lvm_config(disk_config, preset=preset)
if lvm_config != preset:
self._menu_item_group.find_by_key('disk_encryption').value = None
return lvm_config
def _select_btrfs_snapshots(self, preset: SnapshotConfig | None) -> SnapshotConfig | None:
preset_type = preset.snapshot_type if preset else None
group = MenuItemGroup.from_enum(
SnapshotType,
sort_items=True,
preset=preset_type,
)
result = SelectMenu[SnapshotType](
group,
allow_reset=True,
allow_skip=True,
frame=FrameProperties.min(tr('Snapshot type')),
alignment=Alignment.CENTER,
).run()
match result.type_:
case ResultType.Skip:
return preset
case ResultType.Reset:
return None
case ResultType.Selection:
return SnapshotConfig(snapshot_type=result.get_value())
def _prev_disk_layouts(self, item: MenuItem) -> str | None:
if not item.value:
return None
disk_layout_conf = item.get_value()
if disk_layout_conf.config_type == DiskLayoutType.Pre_mount:
msg = tr('Configuration type: {}').format(disk_layout_conf.config_type.display_msg()) + '\n'
msg += tr('Mountpoint') + ': ' + str(disk_layout_conf.mountpoint)
return msg
device_mods = [d for d in disk_layout_conf.device_modifications if d.partitions]
if device_mods:
output_partition = '{}: {}\n'.format(tr('Configuration'), disk_layout_conf.config_type.display_msg())
output_btrfs = ''
for mod in device_mods:
# create partition table
partition_table = FormattedOutput.as_table(mod.partitions)
output_partition += f'{mod.device_path}: {mod.device.device_info.model}\n'
output_partition += '{}: {}\n'.format(tr('Wipe'), mod.wipe)
output_partition += partition_table + '\n'
# create btrfs table
btrfs_partitions = [p for p in mod.partitions if p.btrfs_subvols]
for partition in btrfs_partitions:
output_btrfs += FormattedOutput.as_table(partition.btrfs_subvols) + '\n'
output = output_partition + output_btrfs
return output.rstrip()
return None
def _prev_lvm_config(self, item: MenuItem) -> str | None:
if not item.value:
return None
lvm_config: LvmConfiguration = item.value
output = '{}: {}\n'.format(tr('Configuration'), lvm_config.config_type.display_msg())
for vol_gp in lvm_config.vol_groups:
pv_table = FormattedOutput.as_table(vol_gp.pvs)
output += '{}:\n{}'.format(tr('Physical volumes'), pv_table)
output += f'\nVolume Group: {vol_gp.name}'
lvm_volumes = FormattedOutput.as_table(vol_gp.volumes)
output += '\n\n{}:\n{}'.format(tr('Volumes'), lvm_volumes)
return output
return None
def _prev_btrfs_snapshots(self, item: MenuItem) -> str | None:
if not item.value:
return None
snapshot_config: SnapshotConfig = item.value
return tr('Snapshot type: {}').format(snapshot_config.snapshot_type.value)
def _prev_disk_encryption(self, item: MenuItem) -> str | None:
disk_config: DiskLayoutConfiguration | None = self._item_group.find_by_key('disk_config').value
enc_config: DiskEncryption | None = item.value
if disk_config and not DiskEncryption.validate_enc(disk_config.device_modifications, disk_config.lvm_config):
return tr('LVM disk encryption with more than 2 partitions is currently not supported')
if enc_config:
enc_type = enc_config.encryption_type
output = tr('Encryption type') + f': {EncryptionType.type_to_text(enc_type)}\n'
if enc_config.encryption_password:
output += tr('Password') + f': {enc_config.encryption_password.hidden()}\n'
if enc_type != EncryptionType.NoEncryption:
output += tr('Iteration time') + f': {enc_config.iter_time or DEFAULT_ITER_TIME}ms\n'
if enc_config.partitions:
output += f'Partitions: {len(enc_config.partitions)} selected\n'
elif enc_config.lvm_volumes:
output += f'LVM volumes: {len(enc_config.lvm_volumes)} selected\n'
if enc_config.hsm_device:
output += f'HSM: {enc_config.hsm_device.manufacturer}'
return output
return None

View File

@@ -0,0 +1,417 @@
from pathlib import Path
from typing import override
from archinstall.lib.menu.menu_helper import MenuHelper
from archinstall.lib.models.device import (
DeviceModification,
DiskEncryption,
EncryptionType,
LvmConfiguration,
LvmVolume,
PartitionModification,
)
from archinstall.lib.translationhandler import tr
from archinstall.tui.curses_menu import EditMenu, SelectMenu
from archinstall.tui.menu_item import MenuItem, MenuItemGroup
from archinstall.tui.result import ResultType
from archinstall.tui.types import Alignment, FrameProperties
from ..menu.abstract_menu import AbstractSubMenu
from ..models.device import DEFAULT_ITER_TIME, Fido2Device
from ..models.users import Password
from ..output import FormattedOutput
from ..utils.util import get_password
from .fido import Fido2
class DiskEncryptionMenu(AbstractSubMenu[DiskEncryption]):
def __init__(
self,
device_modifications: list[DeviceModification],
lvm_config: LvmConfiguration | None = None,
preset: DiskEncryption | None = None,
):
if preset:
self._enc_config = preset
else:
self._enc_config = DiskEncryption()
self._device_modifications = device_modifications
self._lvm_config = lvm_config
menu_optioons = self._define_menu_options()
self._item_group = MenuItemGroup(menu_optioons, sort_items=False, checkmarks=True)
super().__init__(
self._item_group,
self._enc_config,
allow_reset=True,
)
def _define_menu_options(self) -> list[MenuItem]:
return [
MenuItem(
text=tr('Encryption type'),
action=lambda x: select_encryption_type(self._device_modifications, self._lvm_config, x),
value=self._enc_config.encryption_type,
preview_action=self._preview,
key='encryption_type',
),
MenuItem(
text=tr('Encryption password'),
action=lambda x: select_encrypted_password(),
value=self._enc_config.encryption_password,
dependencies=[self._check_dep_enc_type],
preview_action=self._preview,
key='encryption_password',
),
MenuItem(
text=tr('Iteration time'),
action=select_iteration_time,
value=self._enc_config.iter_time,
dependencies=[self._check_dep_enc_type],
preview_action=self._preview,
key='iter_time',
),
MenuItem(
text=tr('Partitions'),
action=lambda x: select_partitions_to_encrypt(self._device_modifications, x),
value=self._enc_config.partitions,
dependencies=[self._check_dep_partitions],
preview_action=self._preview,
key='partitions',
),
MenuItem(
text=tr('LVM volumes'),
action=self._select_lvm_vols,
value=self._enc_config.lvm_volumes,
dependencies=[self._check_dep_lvm_vols],
preview_action=self._preview,
key='lvm_volumes',
),
MenuItem(
text=tr('HSM'),
action=select_hsm,
value=self._enc_config.hsm_device,
dependencies=[self._check_dep_enc_type],
preview_action=self._preview,
key='hsm_device',
),
]
def _select_lvm_vols(self, preset: list[LvmVolume]) -> list[LvmVolume]:
if self._lvm_config:
return select_lvm_vols_to_encrypt(self._lvm_config, preset=preset)
return []
def _check_dep_enc_type(self) -> bool:
enc_type: EncryptionType | None = self._item_group.find_by_key('encryption_type').value
if enc_type and enc_type != EncryptionType.NoEncryption:
return True
return False
def _check_dep_partitions(self) -> bool:
enc_type: EncryptionType | None = self._item_group.find_by_key('encryption_type').value
if enc_type and enc_type in [EncryptionType.Luks, EncryptionType.LvmOnLuks]:
return True
return False
def _check_dep_lvm_vols(self) -> bool:
enc_type: EncryptionType | None = self._item_group.find_by_key('encryption_type').value
if enc_type and enc_type == EncryptionType.LuksOnLvm:
return True
return False
@override
def run(self, additional_title: str | None = None) -> DiskEncryption | None:
super().run(additional_title=additional_title)
enc_type: EncryptionType | None = self._item_group.find_by_key('encryption_type').value
enc_password: Password | None = self._item_group.find_by_key('encryption_password').value
iter_time: int | None = self._item_group.find_by_key('iter_time').value
enc_partitions = self._item_group.find_by_key('partitions').value
enc_lvm_vols = self._item_group.find_by_key('lvm_volumes').value
assert enc_type is not None
assert enc_partitions is not None
assert enc_lvm_vols is not None
if enc_type in [EncryptionType.Luks, EncryptionType.LvmOnLuks] and enc_partitions:
enc_lvm_vols = []
if enc_type == EncryptionType.LuksOnLvm:
enc_partitions = []
if enc_type != EncryptionType.NoEncryption and enc_password and (enc_partitions or enc_lvm_vols):
return DiskEncryption(
encryption_password=enc_password,
encryption_type=enc_type,
partitions=enc_partitions,
lvm_volumes=enc_lvm_vols,
hsm_device=self._enc_config.hsm_device,
iter_time=iter_time or DEFAULT_ITER_TIME,
)
return None
def _preview(self, item: MenuItem) -> str | None:
output = ''
if (enc_type := self._prev_type()) is not None:
output += enc_type
if (enc_pwd := self._prev_password()) is not None:
output += f'\n{enc_pwd}'
if (iter_time := self._prev_iter_time()) is not None:
output += f'\n{iter_time}'
if (fido_device := self._prev_hsm()) is not None:
output += f'\n{fido_device}'
if (partitions := self._prev_partitions()) is not None:
output += f'\n\n{partitions}'
if (lvm := self._prev_lvm_vols()) is not None:
output += f'\n\n{lvm}'
if not output:
return None
return output
def _prev_type(self) -> str | None:
enc_type = self._item_group.find_by_key('encryption_type').value
if enc_type:
enc_text = EncryptionType.type_to_text(enc_type)
return f'{tr("Encryption type")}: {enc_text}'
return None
def _prev_password(self) -> str | None:
enc_pwd = self._item_group.find_by_key('encryption_password').value
if enc_pwd:
return f'{tr("Encryption password")}: {enc_pwd.hidden()}'
return None
def _prev_partitions(self) -> str | None:
partitions: list[PartitionModification] | None = self._item_group.find_by_key('partitions').value
if partitions:
output = tr('Partitions to be encrypted') + '\n'
output += FormattedOutput.as_table(partitions)
return output.rstrip()
return None
def _prev_lvm_vols(self) -> str | None:
volumes: list[PartitionModification] | None = self._item_group.find_by_key('lvm_volumes').value
if volumes:
output = tr('LVM volumes to be encrypted') + '\n'
output += FormattedOutput.as_table(volumes)
return output.rstrip()
return None
def _prev_hsm(self) -> str | None:
fido_device: Fido2Device | None = self._item_group.find_by_key('hsm_device').value
if not fido_device:
return None
output = str(fido_device.path)
output += f' ({fido_device.manufacturer}, {fido_device.product})'
return f'{tr("HSM device")}: {output}'
def _prev_iter_time(self) -> str | None:
iter_time = self._item_group.find_by_key('iter_time').value
enc_type = self._item_group.find_by_key('encryption_type').value
if iter_time and enc_type != EncryptionType.NoEncryption:
return f'{tr("Iteration time")}: {iter_time}ms'
return None
def select_encryption_type(
device_modifications: list[DeviceModification],
lvm_config: LvmConfiguration | None = None,
preset: EncryptionType | None = None,
) -> EncryptionType | None:
options: list[EncryptionType] = []
if lvm_config:
options = [EncryptionType.LvmOnLuks, EncryptionType.LuksOnLvm]
else:
options = [EncryptionType.Luks]
if not preset:
preset = options[0]
preset_value = EncryptionType.type_to_text(preset)
items = [MenuItem(EncryptionType.type_to_text(o), value=o) for o in options]
group = MenuItemGroup(items)
group.set_focus_by_value(preset_value)
result = SelectMenu[EncryptionType](
group,
allow_skip=True,
allow_reset=True,
alignment=Alignment.CENTER,
frame=FrameProperties.min(tr('Encryption type')),
).run()
match result.type_:
case ResultType.Reset:
return None
case ResultType.Skip:
return preset
case ResultType.Selection:
return result.get_value()
def select_encrypted_password() -> Password | None:
header = tr('Enter disk encryption password (leave blank for no encryption)') + '\n'
password = get_password(
text=tr('Disk encryption password'),
header=header,
allow_skip=True,
)
return password
def select_hsm(preset: Fido2Device | None = None) -> Fido2Device | None:
header = tr('Select a FIDO2 device to use for HSM') + '\n'
try:
fido_devices = Fido2.get_cryptenroll_devices()
except ValueError:
return None
if fido_devices:
group = MenuHelper(data=fido_devices).create_menu_group()
result = SelectMenu[Fido2Device](
group,
header=header,
alignment=Alignment.CENTER,
allow_skip=True,
).run()
match result.type_:
case ResultType.Reset:
return None
case ResultType.Skip:
return preset
case ResultType.Selection:
return result.get_value()
return None
def select_partitions_to_encrypt(
modification: list[DeviceModification],
preset: list[PartitionModification],
) -> list[PartitionModification]:
partitions: list[PartitionModification] = []
# do not allow encrypting the boot partition
for mod in modification:
partitions += [p for p in mod.partitions if p.mountpoint != Path('/boot') and not p.is_swap()]
# do not allow encrypting existing partitions that are not marked as wipe
avail_partitions = [p for p in partitions if not p.exists()]
if avail_partitions:
group = MenuHelper(data=avail_partitions).create_menu_group()
group.set_selected_by_value(preset)
result = SelectMenu[PartitionModification](
group,
alignment=Alignment.CENTER,
multi=True,
allow_skip=True,
).run()
match result.type_:
case ResultType.Reset:
return []
case ResultType.Skip:
return preset
case ResultType.Selection:
partitions = result.get_values()
return partitions
return []
def select_lvm_vols_to_encrypt(
lvm_config: LvmConfiguration,
preset: list[LvmVolume],
) -> list[LvmVolume]:
volumes: list[LvmVolume] = lvm_config.get_all_volumes()
if volumes:
group = MenuHelper(data=volumes).create_menu_group()
result = SelectMenu[LvmVolume](
group,
alignment=Alignment.CENTER,
multi=True,
).run()
match result.type_:
case ResultType.Reset:
return []
case ResultType.Skip:
return preset
case ResultType.Selection:
volumes = result.get_values()
return volumes
return []
def select_iteration_time(preset: int | None = None) -> int | None:
header = tr('Enter iteration time for LUKS encryption (in milliseconds)') + '\n'
header += tr('Higher values increase security but slow down boot time') + '\n'
header += tr(f'Default: {DEFAULT_ITER_TIME}ms, Recommended range: 1000-60000') + '\n'
def validate_iter_time(value: str | None) -> str | None:
if not value:
return None
try:
iter_time = int(value)
if iter_time < 100:
return tr('Iteration time must be at least 100ms')
if iter_time > 120000:
return tr('Iteration time must be at most 120000ms')
return None
except ValueError:
return tr('Please enter a valid number')
result = EditMenu(
tr('Iteration time'),
header=header,
alignment=Alignment.CENTER,
allow_skip=True,
default_text=str(preset) if preset else str(DEFAULT_ITER_TIME),
validator=validate_iter_time,
).input()
match result.type_:
case ResultType.Skip:
return preset
case ResultType.Selection:
if not result.text():
return preset
return int(result.text())
case ResultType.Reset:
return None

View File

@@ -0,0 +1,125 @@
from __future__ import annotations
import getpass
from pathlib import Path
from typing import ClassVar
from archinstall.lib.models.device import Fido2Device
from ..exceptions import SysCallError
from ..general import SysCommand, SysCommandWorker, clear_vt100_escape_codes_from_str
from ..models.users import Password
from ..output import error, info
class Fido2:
_loaded_cryptsetup: bool = False
_loaded_u2f: bool = False
_cryptenroll_devices: ClassVar[list[Fido2Device]] = []
_u2f_devices: ClassVar[list[Fido2Device]] = []
@classmethod
def get_fido2_devices(cls) -> list[Fido2Device]:
"""
fido2-tool output example:
/dev/hidraw4: vendor=0x1050, product=0x0407 (Yubico YubiKey OTP+FIDO+CCID)
"""
if not cls._loaded_u2f:
cls._loaded_u2f = True
try:
ret = SysCommand('fido2-token -L').decode()
except Exception as e:
error(f'failed to read fido2 devices: {e}')
return []
fido_devices = clear_vt100_escape_codes_from_str(ret)
if not fido_devices:
return []
for line in fido_devices.splitlines():
path, details = line.replace(',', '').split(':', maxsplit=1)
_, product, manufacturer = details.strip().split(' ', maxsplit=2)
cls._u2f_devices.append(Fido2Device(Path(path.strip()), manufacturer.strip(), product.strip().split('=')[1]))
return cls._u2f_devices
@classmethod
def get_cryptenroll_devices(cls, reload: bool = False) -> list[Fido2Device]:
"""
Uses systemd-cryptenroll to list the FIDO2 devices
connected that supports FIDO2.
Some devices might show up in udevadm as FIDO2 compliant
when they are in fact not.
The drawback of systemd-cryptenroll is that it uses human readable format.
That means we get this weird table like structure that is of no use.
So we'll look for `MANUFACTURER` and `PRODUCT`, we take their index
and we split each line based on those positions.
Output example:
PATH MANUFACTURER PRODUCT
/dev/hidraw1 Yubico YubiKey OTP+FIDO+CCID
"""
# to prevent continuous reloading which will slow
# down moving the cursor in the menu
if not cls._loaded_cryptsetup or reload:
try:
ret = SysCommand('systemd-cryptenroll --fido2-device=list').decode()
except SysCallError:
error('fido2 support is most likely not installed')
raise ValueError('HSM devices can not be detected, is libfido2 installed?')
fido_devices = clear_vt100_escape_codes_from_str(ret)
manufacturer_pos = 0
product_pos = 0
devices = []
for line in fido_devices.split('\r\n'):
if '/dev' not in line:
manufacturer_pos = line.find('MANUFACTURER')
product_pos = line.find('PRODUCT')
continue
path = line[:manufacturer_pos].rstrip()
manufacturer = line[manufacturer_pos:product_pos].rstrip()
product = line[product_pos:]
devices.append(
Fido2Device(Path(path), manufacturer, product),
)
cls._loaded_cryptsetup = True
cls._cryptenroll_devices = devices
return cls._cryptenroll_devices
@classmethod
def fido2_enroll(
cls,
hsm_device: Fido2Device,
dev_path: Path,
password: Password,
) -> None:
worker = SysCommandWorker(f'systemd-cryptenroll --fido2-device={hsm_device.path} {dev_path}', peek_output=True)
pw_inputted = False
pin_inputted = False
while worker.is_alive():
if pw_inputted is False:
if bytes(f'please enter current passphrase for disk {dev_path}', 'UTF-8') in worker._trace_log.lower():
worker.write(bytes(password.plaintext, 'UTF-8'))
pw_inputted = True
elif pin_inputted is False:
if bytes('please enter security token pin', 'UTF-8') in worker._trace_log.lower():
worker.write(bytes(getpass.getpass(' '), 'UTF-8'))
pin_inputted = True
info('You might need to touch the FIDO2 device to unlock it if no prompt comes up after 3 seconds')

View File

@@ -0,0 +1,353 @@
from __future__ import annotations
import math
import time
from pathlib import Path
from archinstall.lib.translationhandler import tr
from archinstall.tui.curses_menu import Tui
from ..interactions.general_conf import ask_abort
from ..luks import Luks2
from ..models.device import (
DiskEncryption,
DiskLayoutConfiguration,
DiskLayoutType,
EncryptionType,
FilesystemType,
LvmConfiguration,
LvmVolume,
LvmVolumeGroup,
PartitionModification,
SectorSize,
Size,
Unit,
)
from ..output import debug, info
from .device_handler import device_handler
class FilesystemHandler:
def __init__(self, disk_config: DiskLayoutConfiguration):
self._disk_config = disk_config
self._enc_config = disk_config.disk_encryption
def perform_filesystem_operations(self, show_countdown: bool = True) -> None:
if self._disk_config.config_type == DiskLayoutType.Pre_mount:
debug('Disk layout configuration is set to pre-mount, not performing any operations')
return
device_mods = [d for d in self._disk_config.device_modifications if d.partitions]
if not device_mods:
debug('No modifications required')
return
if show_countdown:
self._final_warning()
# Setup the blockdevice, filesystem (and optionally encryption).
# Once that's done, we'll hand over to perform_installation()
# make sure all devices are unmounted
for mod in device_mods:
device_handler.umount_all_existing(mod.device_path)
for mod in device_mods:
device_handler.partition(mod)
device_handler.udev_sync()
if self._disk_config.lvm_config:
for mod in device_mods:
if boot_part := mod.get_boot_partition():
debug(f'Formatting boot partition: {boot_part.dev_path}')
self._format_partitions([boot_part])
self.perform_lvm_operations()
else:
for mod in device_mods:
self._format_partitions(mod.partitions)
for part_mod in mod.partitions:
if part_mod.fs_type == FilesystemType.Btrfs and part_mod.is_create_or_modify():
device_handler.create_btrfs_volumes(part_mod, enc_conf=self._enc_config)
def _format_partitions(
self,
partitions: list[PartitionModification],
) -> None:
"""
Format can be given an overriding path, for instance /dev/null to test
the formatting functionality and in essence the support for the given filesystem.
"""
# don't touch existing partitions
create_or_modify_parts = [p for p in partitions if p.is_create_or_modify()]
self._validate_partitions(create_or_modify_parts)
for part_mod in create_or_modify_parts:
# partition will be encrypted
if self._enc_config is not None and part_mod in self._enc_config.partitions:
device_handler.format_encrypted(
part_mod.safe_dev_path,
part_mod.mapper_name,
part_mod.safe_fs_type,
self._enc_config,
)
else:
device_handler.format(part_mod.safe_fs_type, part_mod.safe_dev_path)
# synchronize with udev before using lsblk
device_handler.udev_sync()
lsblk_info = device_handler.fetch_part_info(part_mod.safe_dev_path)
part_mod.partn = lsblk_info.partn
part_mod.partuuid = lsblk_info.partuuid
part_mod.uuid = lsblk_info.uuid
def _validate_partitions(self, partitions: list[PartitionModification]) -> None:
checks = {
# verify that all partitions have a path set (which implies that they have been created)
lambda x: x.dev_path is None: ValueError('When formatting, all partitions must have a path set'),
# crypto luks is not a valid file system type
lambda x: x.fs_type is FilesystemType.Crypto_luks: ValueError('Crypto luks cannot be set as a filesystem type'),
# file system type must be set
lambda x: x.fs_type is None: ValueError('File system type must be set for modification'),
}
for check, exc in checks.items():
found = next(filter(check, partitions), None)
if found is not None:
raise exc
def perform_lvm_operations(self) -> None:
info('Setting up LVM config...')
if not self._disk_config.lvm_config:
return
if self._enc_config:
self._setup_lvm_encrypted(
self._disk_config.lvm_config,
self._enc_config,
)
else:
self._setup_lvm(self._disk_config.lvm_config)
self._format_lvm_vols(self._disk_config.lvm_config)
def _setup_lvm_encrypted(self, lvm_config: LvmConfiguration, enc_config: DiskEncryption) -> None:
if enc_config.encryption_type == EncryptionType.LvmOnLuks:
enc_mods = self._encrypt_partitions(enc_config, lock_after_create=False)
self._setup_lvm(lvm_config, enc_mods)
self._format_lvm_vols(lvm_config)
# export the lvm group safely otherwise the Luks cannot be closed
self._safely_close_lvm(lvm_config)
for luks in enc_mods.values():
luks.lock()
elif enc_config.encryption_type == EncryptionType.LuksOnLvm:
self._setup_lvm(lvm_config)
enc_vols = self._encrypt_lvm_vols(lvm_config, enc_config, False)
self._format_lvm_vols(lvm_config, enc_vols)
for luks in enc_vols.values():
luks.lock()
self._safely_close_lvm(lvm_config)
def _safely_close_lvm(self, lvm_config: LvmConfiguration) -> None:
for vg in lvm_config.vol_groups:
for vol in vg.volumes:
device_handler.lvm_vol_change(vol, False)
device_handler.lvm_export_vg(vg)
def _setup_lvm(
self,
lvm_config: LvmConfiguration,
enc_mods: dict[PartitionModification, Luks2] = {},
) -> None:
self._lvm_create_pvs(lvm_config, enc_mods)
for vg in lvm_config.vol_groups:
pv_dev_paths = self._get_all_pv_dev_paths(vg.pvs, enc_mods)
device_handler.lvm_vg_create(pv_dev_paths, vg.name)
# figure out what the actual available size in the group is
vg_info = device_handler.lvm_group_info(vg.name)
if not vg_info:
raise ValueError('Unable to fetch VG info')
# the actual available LVM Group size will be smaller than the
# total PVs size due to reserved metadata storage etc.
# so we'll have a look at the total avail. size, check the delta
# to the desired sizes and subtract some equally from the actually
# created volume
avail_size = vg_info.vg_size
desired_size = sum([vol.length for vol in vg.volumes], Size(0, Unit.B, SectorSize.default()))
delta = desired_size - avail_size
delta_bytes = delta.convert(Unit.B)
# Round the offset up to the next physical extent (PE, 4 MiB by default)
# to ensure lvcreate`s internal rounding doesn`t consume space reserved
# for subsequent logical volumes.
pe_bytes = Size(4, Unit.MiB, SectorSize.default()).convert(Unit.B)
pe_count = math.ceil(delta_bytes.value / pe_bytes.value)
rounded_offset = pe_count * pe_bytes.value
max_vol_offset = Size(rounded_offset, Unit.B, SectorSize.default())
max_vol = max(vg.volumes, key=lambda x: x.length)
for lv in vg.volumes:
offset = max_vol_offset if lv == max_vol else None
debug(f'vg: {vg.name}, vol: {lv.name}, offset: {offset}')
device_handler.lvm_vol_create(vg.name, lv, offset)
while True:
debug('Fetching LVM volume info')
lv_info = device_handler.lvm_vol_info(lv.name)
if lv_info is not None:
break
time.sleep(1)
self._lvm_vol_handle_e2scrub(vg)
def _format_lvm_vols(
self,
lvm_config: LvmConfiguration,
enc_vols: dict[LvmVolume, Luks2] = {},
) -> None:
for vol in lvm_config.get_all_volumes():
if enc_vol := enc_vols.get(vol, None):
if not enc_vol.mapper_dev:
raise ValueError('No mapper device defined')
path = enc_vol.mapper_dev
else:
path = vol.safe_dev_path
# wait a bit otherwise the mkfs will fail as it can't
# find the mapper device yet
device_handler.format(vol.fs_type, path)
if vol.fs_type == FilesystemType.Btrfs:
device_handler.create_lvm_btrfs_subvolumes(path, vol.btrfs_subvols, vol.mount_options)
def _lvm_create_pvs(
self,
lvm_config: LvmConfiguration,
enc_mods: dict[PartitionModification, Luks2] = {},
) -> None:
pv_paths: set[Path] = set()
for vg in lvm_config.vol_groups:
pv_paths |= self._get_all_pv_dev_paths(vg.pvs, enc_mods)
device_handler.lvm_pv_create(pv_paths)
def _get_all_pv_dev_paths(
self,
pvs: list[PartitionModification],
enc_mods: dict[PartitionModification, Luks2] = {},
) -> set[Path]:
pv_paths: set[Path] = set()
for pv in pvs:
if enc_pv := enc_mods.get(pv, None):
if mapper := enc_pv.mapper_dev:
pv_paths.add(mapper)
else:
pv_paths.add(pv.safe_dev_path)
return pv_paths
def _encrypt_lvm_vols(
self,
lvm_config: LvmConfiguration,
enc_config: DiskEncryption,
lock_after_create: bool = True,
) -> dict[LvmVolume, Luks2]:
enc_vols: dict[LvmVolume, Luks2] = {}
for vol in lvm_config.get_all_volumes():
if vol in enc_config.lvm_volumes:
luks_handler = device_handler.encrypt(
vol.safe_dev_path,
vol.mapper_name,
enc_config.encryption_password,
lock_after_create,
iter_time=enc_config.iter_time,
)
enc_vols[vol] = luks_handler
return enc_vols
def _encrypt_partitions(
self,
enc_config: DiskEncryption,
lock_after_create: bool = True,
) -> dict[PartitionModification, Luks2]:
enc_mods: dict[PartitionModification, Luks2] = {}
for mod in self._disk_config.device_modifications:
partitions = mod.partitions
# don't touch existing partitions
filtered_part = [p for p in partitions if not p.exists()]
self._validate_partitions(filtered_part)
enc_mods = {}
for part_mod in filtered_part:
if part_mod in enc_config.partitions:
luks_handler = device_handler.encrypt(
part_mod.safe_dev_path,
part_mod.mapper_name,
enc_config.encryption_password,
lock_after_create=lock_after_create,
iter_time=enc_config.iter_time,
)
enc_mods[part_mod] = luks_handler
return enc_mods
def _lvm_vol_handle_e2scrub(self, vol_gp: LvmVolumeGroup) -> None:
# from arch wiki:
# If a logical volume will be formatted with ext4, leave at least 256 MiB
# free space in the volume group to allow using e2scrub
if any([vol.fs_type == FilesystemType.Ext4 for vol in vol_gp.volumes]):
largest_vol = max(vol_gp.volumes, key=lambda x: x.length)
device_handler.lvm_vol_reduce(
largest_vol.safe_dev_path,
Size(256, Unit.MiB, SectorSize.default()),
)
def _final_warning(self) -> bool:
# Issue a final warning before we continue with something un-revertable.
# We mention the drive one last time, and count from 5 to 0.
out = tr('Starting device modifications in ')
Tui.print(out, row=0, endl='', clear_screen=True)
try:
countdown = '\n5...4...3...2...1\n'
for c in countdown:
Tui.print(c, row=0, endl='')
time.sleep(0.25)
except KeyboardInterrupt:
with Tui():
ask_abort()
return True

View File

@@ -0,0 +1,589 @@
from __future__ import annotations
import re
from pathlib import Path
from typing import override
from archinstall.lib.models.device import (
BtrfsMountOption,
DeviceModification,
FilesystemType,
ModificationStatus,
PartitionFlag,
PartitionModification,
PartitionTable,
PartitionType,
SectorSize,
Size,
Unit,
)
from archinstall.lib.translationhandler import tr
from archinstall.tui.curses_menu import EditMenu, SelectMenu
from archinstall.tui.menu_item import MenuItem, MenuItemGroup
from archinstall.tui.result import ResultType
from archinstall.tui.types import Alignment, FrameProperties, Orientation
from ..menu.list_manager import ListManager
from ..output import FormattedOutput
from ..utils.util import prompt_dir
from .subvolume_menu import SubvolumeMenu
class FreeSpace:
def __init__(self, start: Size, end: Size) -> None:
self.start = start
self.end = end
@property
def length(self) -> Size:
return self.end - self.start
def table_data(self) -> dict[str, str]:
"""
Called for displaying data in table format
"""
return {
'Start': self.start.format_size(Unit.sectors, self.start.sector_size, include_unit=False),
'End': self.end.format_size(Unit.sectors, self.start.sector_size, include_unit=False),
'Size': self.length.format_highest(),
}
class DiskSegment:
def __init__(self, segment: PartitionModification | FreeSpace) -> None:
self.segment = segment
def table_data(self) -> dict[str, str]:
"""
Called for displaying data in table format
"""
if isinstance(self.segment, PartitionModification):
return self.segment.table_data()
part_mod = PartitionModification(
status=ModificationStatus.Create,
type=PartitionType._Unknown,
start=self.segment.start,
length=self.segment.length,
)
data = part_mod.table_data()
data.update({'Status': 'free', 'Type': '', 'FS type': ''})
return data
class PartitioningList(ListManager[DiskSegment]):
def __init__(
self,
device_mod: DeviceModification,
partition_table: PartitionTable,
) -> None:
device = device_mod.device
self._device = device
self._wipe = device_mod.wipe
self._buffer = Size(1, Unit.MiB, device.device_info.sector_size)
self._using_gpt = device_mod.using_gpt(partition_table)
self._actions = {
'suggest_partition_layout': tr('Suggest partition layout'),
'remove_added_partitions': tr('Remove all newly added partitions'),
'assign_mountpoint': tr('Assign mountpoint'),
'mark_formatting': tr('Mark/Unmark to be formatted (wipes data)'),
'mark_bootable': tr('Mark/Unmark as bootable'),
}
if self._using_gpt:
self._actions.update(
{
'mark_esp': tr('Mark/Unmark as ESP'),
'mark_xbootldr': tr('Mark/Unmark as XBOOTLDR'),
}
)
self._actions.update(
{
'set_filesystem': tr('Change filesystem'),
'btrfs_mark_compressed': tr('Mark/Unmark as compressed'), # btrfs only
'btrfs_mark_nodatacow': tr('Mark/Unmark as nodatacow'), # btrfs only
'btrfs_set_subvolumes': tr('Set subvolumes'), # btrfs only
'delete_partition': tr('Delete partition'),
}
)
device_partitions = []
if not device_mod.partitions:
# we'll display the existing partitions of the device
for partition in device.partition_infos:
device_partitions.append(
PartitionModification.from_existing_partition(partition),
)
else:
device_partitions = device_mod.partitions
prompt = tr('Partition management: {}').format(device.device_info.path) + '\n'
prompt += tr('Total length: {}').format(device.device_info.total_size.format_size(Unit.MiB))
self._info = prompt + '\n'
display_actions = list(self._actions.values())
super().__init__(
self.as_segments(device_partitions),
display_actions[:1],
display_actions[2:],
self._info + self.wipe_str(),
)
def wipe_str(self) -> str:
return '{}: {}'.format(tr('Wipe'), self._wipe)
def as_segments(self, device_partitions: list[PartitionModification]) -> list[DiskSegment]:
end = self._device.device_info.total_size
if self._using_gpt:
end = end.gpt_end()
end = end.align()
# Reorder device_partitions to move all deleted partitions to the top
device_partitions.sort(key=lambda p: p.is_delete(), reverse=True)
partitions = [DiskSegment(p) for p in device_partitions if not p.is_delete()]
segments = [DiskSegment(p) for p in device_partitions]
if not partitions:
free_space = FreeSpace(self._buffer, end)
if free_space.length > self._buffer:
return segments + [DiskSegment(free_space)]
return segments
first_part_index, first_partition = next(
(i, disk_segment)
for i, disk_segment in enumerate(segments)
if isinstance(disk_segment.segment, PartitionModification) and not disk_segment.segment.is_delete()
)
prev_partition = first_partition
index = 0
for partition in segments[1:]:
index += 1
if isinstance(partition.segment, PartitionModification) and partition.segment.is_delete():
continue
if prev_partition.segment.end < partition.segment.start:
free_space = FreeSpace(prev_partition.segment.end, partition.segment.start)
if free_space.length > self._buffer:
segments.insert(index, DiskSegment(free_space))
index += 1
prev_partition = partition
if first_partition.segment.start > self._buffer:
free_space = FreeSpace(self._buffer, first_partition.segment.start)
if free_space.length > self._buffer:
segments.insert(first_part_index, DiskSegment(free_space))
if partitions[-1].segment.end < end:
free_space = FreeSpace(partitions[-1].segment.end, end)
if free_space.length > self._buffer:
segments.append(DiskSegment(free_space))
return segments
@staticmethod
def get_part_mods(disk_segments: list[DiskSegment]) -> list[PartitionModification]:
return [s.segment for s in disk_segments if isinstance(s.segment, PartitionModification)]
def get_device_mod(self) -> DeviceModification:
disk_segments = super().run()
partitions = self.get_part_mods(disk_segments)
return DeviceModification(self._device, self._wipe, partitions)
@override
def _run_actions_on_entry(self, entry: DiskSegment) -> None:
# Do not create a menu when the segment is free space
if isinstance(entry.segment, FreeSpace):
self._data = self.handle_action('', entry, self._data)
else:
super()._run_actions_on_entry(entry)
@override
def selected_action_display(self, selection: DiskSegment) -> str:
if isinstance(selection.segment, PartitionModification):
if selection.segment.status == ModificationStatus.Create:
return tr('Partition - New')
elif selection.segment.is_delete() and selection.segment.dev_path:
title = tr('Partition') + '\n\n'
title += 'status: delete\n'
title += f'device: {selection.segment.dev_path}\n'
for part in self._device.partition_infos:
if part.path == selection.segment.dev_path:
if part.partuuid:
title += f'partuuid: {part.partuuid}'
return title
return str(selection.segment.dev_path)
return ''
@override
def filter_options(self, selection: DiskSegment, options: list[str]) -> list[str]:
not_filter = []
if isinstance(selection.segment, PartitionModification):
if selection.segment.is_delete():
not_filter = list(self._actions.values())
# only display formatting if the partition exists already
elif not selection.segment.exists():
not_filter += [self._actions['mark_formatting']]
else:
# only allow options if the existing partition
# was marked as formatting, otherwise we run into issues where
# 1. select a new fs -> potentially mark as wipe now
# 2. Switch back to old filesystem -> should unmark wipe now, but
# how do we know it was the original one?
not_filter += [
self._actions['set_filesystem'],
self._actions['mark_bootable'],
]
if self._using_gpt:
not_filter += [
self._actions['mark_esp'],
self._actions['mark_xbootldr'],
]
not_filter += [
self._actions['btrfs_mark_compressed'],
self._actions['btrfs_mark_nodatacow'],
self._actions['btrfs_set_subvolumes'],
]
# non btrfs partitions shouldn't get btrfs options
if selection.segment.fs_type != FilesystemType.Btrfs:
not_filter += [
self._actions['btrfs_mark_compressed'],
self._actions['btrfs_mark_nodatacow'],
self._actions['btrfs_set_subvolumes'],
]
else:
not_filter += [self._actions['assign_mountpoint']]
return [o for o in options if o not in not_filter]
@override
def handle_action(
self,
action: str,
entry: DiskSegment | None,
data: list[DiskSegment],
) -> list[DiskSegment]:
if not entry:
action_key = [k for k, v in self._actions.items() if v == action][0]
match action_key:
case 'suggest_partition_layout':
part_mods = self.get_part_mods(data)
device_mod = self._suggest_partition_layout(part_mods)
if device_mod and device_mod.partitions:
data = self.as_segments(device_mod.partitions)
self._wipe = device_mod.wipe
self._prompt = self._info + self.wipe_str()
case 'remove_added_partitions':
if self._reset_confirmation():
data = [s for s in data if isinstance(s.segment, PartitionModification) and s.segment.is_exists_or_modify()]
elif isinstance(entry.segment, PartitionModification):
partition = entry.segment
action_key = [k for k, v in self._actions.items() if v == action][0]
match action_key:
case 'assign_mountpoint':
new_mountpoint = self._prompt_mountpoint()
if not partition.is_swap():
if partition.is_home():
partition.invert_flag(PartitionFlag.LINUX_HOME)
partition.mountpoint = new_mountpoint
if partition.is_root():
partition.flags = []
if partition.is_boot():
partition.flags = []
partition.set_flag(PartitionFlag.BOOT)
if self._using_gpt:
partition.set_flag(PartitionFlag.ESP)
if partition.is_home():
partition.flags = []
partition.set_flag(PartitionFlag.LINUX_HOME)
case 'mark_formatting':
self._prompt_formatting(partition)
case 'mark_bootable':
if not partition.is_swap():
partition.invert_flag(PartitionFlag.BOOT)
case 'mark_esp':
if not partition.is_root() and not partition.is_home() and not partition.is_swap():
if PartitionFlag.XBOOTLDR in partition.flags:
partition.invert_flag(PartitionFlag.XBOOTLDR)
partition.invert_flag(PartitionFlag.ESP)
case 'mark_xbootldr':
if not partition.is_root() and not partition.is_home() and not partition.is_swap():
if PartitionFlag.ESP in partition.flags:
partition.invert_flag(PartitionFlag.ESP)
partition.invert_flag(PartitionFlag.XBOOTLDR)
case 'set_filesystem':
fs_type = self._prompt_partition_fs_type()
if partition.is_swap():
partition.invert_flag(PartitionFlag.SWAP)
partition.fs_type = fs_type
if partition.is_swap():
partition.mountpoint = None
partition.flags = []
partition.set_flag(PartitionFlag.SWAP)
# btrfs subvolumes will define mountpoints
if fs_type == FilesystemType.Btrfs:
partition.mountpoint = None
case 'btrfs_mark_compressed':
self._toggle_mount_option(partition, BtrfsMountOption.compress)
case 'btrfs_mark_nodatacow':
self._toggle_mount_option(partition, BtrfsMountOption.nodatacow)
case 'btrfs_set_subvolumes':
self._set_btrfs_subvolumes(partition)
case 'delete_partition':
data = self._delete_partition(partition, data)
else:
part_mods = self.get_part_mods(data)
index = data.index(entry)
part_mods.insert(index, self._create_new_partition(entry.segment))
data = self.as_segments(part_mods)
return data
def _delete_partition(
self,
entry: PartitionModification,
data: list[DiskSegment],
) -> list[DiskSegment]:
if entry.is_exists_or_modify():
entry.status = ModificationStatus.Delete
part_mods = self.get_part_mods(data)
else:
part_mods = [d.segment for d in data if isinstance(d.segment, PartitionModification) and d.segment != entry]
return self.as_segments(part_mods)
def _toggle_mount_option(
self,
partition: PartitionModification,
option: BtrfsMountOption,
) -> None:
if option.value not in partition.mount_options:
if option == BtrfsMountOption.compress:
partition.mount_options = [o for o in partition.mount_options if o != BtrfsMountOption.nodatacow.value]
partition.mount_options = [o for o in partition.mount_options if not o.startswith(BtrfsMountOption.compress.name)]
partition.mount_options.append(option.value)
else:
partition.mount_options = [o for o in partition.mount_options if o != option.value]
def _set_btrfs_subvolumes(self, partition: PartitionModification) -> None:
partition.btrfs_subvols = SubvolumeMenu(
partition.btrfs_subvols,
None,
).run()
def _prompt_formatting(self, partition: PartitionModification) -> None:
# an existing partition can toggle between Exist or Modify
if partition.is_modify():
partition.status = ModificationStatus.Exist
return
elif partition.exists():
partition.status = ModificationStatus.Modify
# If we mark a partition for formatting, but the format is CRYPTO LUKS, there's no point in formatting it really
# without asking the user which inner-filesystem they want to use. Since the flag 'encrypted' = True is already set,
# it's safe to change the filesystem for this partition.
if partition.fs_type == FilesystemType.Crypto_luks:
prompt = tr('This partition is currently encrypted, to format it a filesystem has to be specified') + '\n'
fs_type = self._prompt_partition_fs_type(prompt)
partition.fs_type = fs_type
if fs_type == FilesystemType.Btrfs:
partition.mountpoint = None
def _prompt_mountpoint(self) -> Path:
header = tr('Partition mount-points are relative to inside the installation, the boot would be /boot as an example.') + '\n'
prompt = tr('Mountpoint')
mountpoint = prompt_dir(prompt, header, validate=False, allow_skip=False)
assert mountpoint
return mountpoint
def _prompt_partition_fs_type(self, prompt: str | None = None) -> FilesystemType:
fs_types = filter(lambda fs: fs != FilesystemType.Crypto_luks, FilesystemType)
items = [MenuItem(fs.value, value=fs) for fs in fs_types]
group = MenuItemGroup(items, sort_items=False)
result = SelectMenu[FilesystemType](
group,
header=prompt,
alignment=Alignment.CENTER,
frame=FrameProperties.min(tr('Filesystem')),
allow_skip=False,
).run()
match result.type_:
case ResultType.Selection:
return result.get_value()
case _:
raise ValueError('Unhandled result type')
def _validate_value(
self,
sector_size: SectorSize,
max_size: Size,
text: str,
) -> Size | None:
match = re.match(r'([0-9]+)([a-zA-Z|%]*)', text, re.I)
if not match:
return None
str_value, unit = match.groups()
if unit == '%':
value = int(max_size.value * (int(str_value) / 100))
unit = max_size.unit.name
else:
value = int(str_value)
if unit and unit not in Unit.get_all_units():
return None
unit = Unit[unit] if unit else Unit.sectors
size = Size(value, unit, sector_size)
if size.format_highest() == max_size.format_highest():
return max_size
elif size > max_size or size < self._buffer:
return None
return size
def _prompt_size(self, free_space: FreeSpace) -> Size:
def validate(value: str | None) -> str | None:
if not value:
return None
size = self._validate_value(sector_size, max_size, value)
if not size:
return tr('Invalid size')
return None
device_info = self._device.device_info
sector_size = device_info.sector_size
text = tr('Selected free space segment on device {}:').format(device_info.path) + '\n\n'
free_space_table = FormattedOutput.as_table([free_space])
prompt = text + free_space_table + '\n'
max_sectors = free_space.length.format_size(Unit.sectors, sector_size)
max_bytes = free_space.length.format_size(Unit.B)
prompt += tr('Size: {} / {}').format(max_sectors, max_bytes) + '\n\n'
prompt += tr('All entered values can be suffixed with a unit: %, B, KB, KiB, MB, MiB...') + '\n'
prompt += tr('If no unit is provided, the value is interpreted as sectors') + '\n'
max_size = free_space.length
title = tr('Size (default: {}): ').format(max_size.format_highest())
result = EditMenu(
title,
header=f'{prompt}\b',
allow_skip=True,
validator=validate,
).input()
size: Size | None = None
match result.type_:
case ResultType.Skip:
size = max_size
case ResultType.Selection:
value = result.text()
if value:
size = self._validate_value(sector_size, max_size, value)
else:
size = max_size
assert size
return size
def _create_new_partition(self, free_space: FreeSpace) -> PartitionModification:
length = self._prompt_size(free_space)
fs_type = self._prompt_partition_fs_type()
mountpoint = None
if fs_type not in (FilesystemType.Btrfs, FilesystemType.LinuxSwap):
mountpoint = self._prompt_mountpoint()
partition = PartitionModification(
status=ModificationStatus.Create,
type=PartitionType.Primary,
start=free_space.start,
length=length,
fs_type=fs_type,
mountpoint=mountpoint,
)
if partition.mountpoint == Path('/boot'):
partition.set_flag(PartitionFlag.BOOT)
if self._using_gpt:
partition.set_flag(PartitionFlag.ESP)
elif partition.is_swap():
partition.mountpoint = None
partition.flags = []
partition.set_flag(PartitionFlag.SWAP)
return partition
def _reset_confirmation(self) -> bool:
prompt = tr('This will remove all newly added partitions, continue?') + '\n'
result = SelectMenu[bool](
MenuItemGroup.yes_no(),
header=prompt,
alignment=Alignment.CENTER,
orientation=Orientation.HORIZONTAL,
columns=2,
reset_warning_msg=prompt,
allow_skip=False,
).run()
return result.item() == MenuItem.yes()
def _suggest_partition_layout(
self,
data: list[PartitionModification],
) -> DeviceModification | None:
# if modifications have been done already, inform the user
# that this operation will erase those modifications
if any([not entry.exists() for entry in data]):
if not self._reset_confirmation():
return None
from ..interactions.disk_conf import suggest_single_disk_layout
return suggest_single_disk_layout(self._device)
def manual_partitioning(
device_mod: DeviceModification,
partition_table: PartitionTable,
) -> DeviceModification | None:
menu_list = PartitioningList(device_mod, partition_table)
mod = menu_list.get_device_mod()
if menu_list.is_last_choice_cancel():
return device_mod
if mod.partitions:
return mod
return None

View File

@@ -0,0 +1,102 @@
from pathlib import Path
from typing import assert_never, override
from archinstall.lib.models.device import SubvolumeModification
from archinstall.lib.translationhandler import tr
from archinstall.tui.curses_menu import EditMenu
from archinstall.tui.result import ResultType
from archinstall.tui.types import Alignment
from ..menu.list_manager import ListManager
from ..utils.util import prompt_dir
class SubvolumeMenu(ListManager[SubvolumeModification]):
def __init__(
self,
btrfs_subvols: list[SubvolumeModification],
prompt: str | None = None,
):
self._actions = [
tr('Add subvolume'),
tr('Edit subvolume'),
tr('Delete subvolume'),
]
super().__init__(
btrfs_subvols,
[self._actions[0]],
self._actions[1:],
prompt,
)
@override
def selected_action_display(self, selection: SubvolumeModification) -> str:
return str(selection.name)
def _add_subvolume(self, preset: SubvolumeModification | None = None) -> SubvolumeModification | None:
def validate(value: str | None) -> str | None:
if value:
return None
return tr('Value cannot be empty')
result = EditMenu(
tr('Subvolume name'),
alignment=Alignment.CENTER,
allow_skip=True,
default_text=str(preset.name) if preset else None,
validator=validate,
).input()
match result.type_:
case ResultType.Skip:
return preset
case ResultType.Selection:
name = result.text()
case ResultType.Reset:
raise ValueError('Unhandled result type')
case _:
assert_never(result.type_)
header = f'{tr("Subvolume name")}: {name}\n'
path = prompt_dir(
tr('Subvolume mountpoint'),
header=header,
allow_skip=True,
validate=True,
must_exist=False,
)
if not path:
return preset
return SubvolumeModification(Path(name), path)
@override
def handle_action(
self,
action: str,
entry: SubvolumeModification | None,
data: list[SubvolumeModification],
) -> list[SubvolumeModification]:
if action == self._actions[0]: # add
new_subvolume = self._add_subvolume()
if new_subvolume is not None:
# in case a user with the same username as an existing user
# was created we'll replace the existing one
data = [d for d in data if d.name != new_subvolume.name]
data += [new_subvolume]
elif entry is not None: # edit
if action == self._actions[1]: # edit subvolume
new_subvolume = self._add_subvolume(entry)
if new_subvolume is not None:
# we'll remove the original subvolume and add the modified version
data = [d for d in data if d.name != entry.name and d.name != new_subvolume.name]
data += [new_subvolume]
elif action == self._actions[2]: # delete
data = [d for d in data if d != entry]
return data

View File

@@ -0,0 +1,128 @@
from pathlib import Path
from pydantic import BaseModel
from archinstall.lib.exceptions import DiskError, SysCallError
from archinstall.lib.general import SysCommand
from archinstall.lib.models.device import LsblkInfo
from archinstall.lib.output import debug, warn
class LsblkOutput(BaseModel):
blockdevices: list[LsblkInfo]
def _fetch_lsblk_info(
dev_path: Path | str | None = None,
reverse: bool = False,
full_dev_path: bool = False,
) -> LsblkOutput:
cmd = ['lsblk', '--json', '--bytes', '--output', ','.join(LsblkInfo.fields())]
if reverse:
cmd.append('--inverse')
if full_dev_path:
cmd.append('--paths')
if dev_path:
cmd.append(str(dev_path))
try:
worker = SysCommand(cmd)
except SysCallError as err:
# Get the output minus the message/info from lsblk if it returns a non-zero exit code.
if err.worker_log:
debug(f'Error calling lsblk: {err.worker_log.decode()}')
if dev_path:
raise DiskError(f'Failed to read disk "{dev_path}" with lsblk')
raise err
output = worker.output(remove_cr=False)
return LsblkOutput.model_validate_json(output)
def get_lsblk_info(
dev_path: Path | str,
reverse: bool = False,
full_dev_path: bool = False,
) -> LsblkInfo:
infos = _fetch_lsblk_info(dev_path, reverse=reverse, full_dev_path=full_dev_path)
if infos.blockdevices:
return infos.blockdevices[0]
raise DiskError(f'lsblk failed to retrieve information for "{dev_path}"')
def get_all_lsblk_info() -> list[LsblkInfo]:
return _fetch_lsblk_info().blockdevices
def get_lsblk_output() -> LsblkOutput:
return _fetch_lsblk_info()
def find_lsblk_info(
dev_path: Path | str,
info: list[LsblkInfo],
) -> LsblkInfo | None:
if isinstance(dev_path, str):
dev_path = Path(dev_path)
for lsblk_info in info:
if lsblk_info.path == dev_path:
return lsblk_info
return None
def get_lsblk_by_mountpoint(mountpoint: Path, as_prefix: bool = False) -> list[LsblkInfo]:
def _check(infos: list[LsblkInfo]) -> list[LsblkInfo]:
devices = []
for entry in infos:
if as_prefix:
matches = [m for m in entry.mountpoints if str(m).startswith(str(mountpoint))]
if matches:
devices += [entry]
elif mountpoint in entry.mountpoints:
devices += [entry]
if len(entry.children) > 0:
if len(match := _check(entry.children)) > 0:
devices += match
return devices
all_info = get_all_lsblk_info()
return _check(all_info)
def disk_layouts() -> str:
try:
lsblk_output = get_lsblk_output()
except SysCallError as err:
warn(f'Could not return disk layouts: {err}')
return ''
return lsblk_output.model_dump_json(indent=4)
def umount(mountpoint: Path, recursive: bool = False) -> None:
lsblk_info = get_lsblk_info(mountpoint)
if not lsblk_info.mountpoints:
return
debug(f'Partition {mountpoint} is currently mounted at: {[str(m) for m in lsblk_info.mountpoints]}')
cmd = ['umount']
if recursive:
cmd.append('-R')
for path in lsblk_info.mountpoints:
debug(f'Unmounting mountpoint: {path}')
SysCommand(cmd + [str(path)])

View File

@@ -0,0 +1,40 @@
class RequirementError(Exception):
pass
class DiskError(Exception):
pass
class UnknownFilesystemFormat(Exception):
pass
class SysCallError(Exception):
def __init__(self, message: str, exit_code: int | None = None, worker_log: bytes = b'') -> None:
super().__init__(message)
self.message = message
self.exit_code = exit_code
self.worker_log = worker_log
class HardwareIncompatibilityError(Exception):
pass
class ServiceException(Exception):
pass
class PackageError(Exception):
pass
class Deprecated(Exception):
pass
class DownloadTimeout(Exception):
"""
Download timeout exception raised by DownloadTimer.
"""

View File

@@ -0,0 +1,461 @@
from __future__ import annotations
import json
import os
import re
import secrets
import shlex
import stat
import string
import subprocess
import sys
import time
from collections.abc import Iterator
from datetime import date, datetime
from enum import Enum
from pathlib import Path
from select import EPOLLHUP, EPOLLIN, epoll
from shutil import which
from types import TracebackType
from typing import Any, override
from .exceptions import RequirementError, SysCallError
from .output import debug, error, logger
# https://stackoverflow.com/a/43627833/929999
_VT100_ESCAPE_REGEX = r'\x1B\[[?0-9;]*[a-zA-Z]'
_VT100_ESCAPE_REGEX_BYTES = _VT100_ESCAPE_REGEX.encode()
def generate_password(length: int = 64) -> str:
haystack = string.printable # digits, ascii_letters, punctuation (!"#$[] etc) and whitespace
return ''.join(secrets.choice(haystack) for _ in range(length))
def locate_binary(name: str) -> str:
if path := which(name):
return path
raise RequirementError(f'Binary {name} does not exist.')
def clear_vt100_escape_codes(data: bytes) -> bytes:
return re.sub(_VT100_ESCAPE_REGEX_BYTES, b'', data)
def clear_vt100_escape_codes_from_str(data: str) -> str:
return re.sub(_VT100_ESCAPE_REGEX, '', data)
def jsonify(obj: object, safe: bool = True) -> object:
"""
Converts objects into json.dumps() compatible nested dictionaries.
Setting safe to True skips dictionary keys starting with a bang (!)
"""
compatible_types = str, int, float, bool
if isinstance(obj, dict):
return {
key: jsonify(value, safe)
for key, value in obj.items()
if isinstance(key, compatible_types) and not (isinstance(key, str) and key.startswith('!') and safe)
}
if isinstance(obj, Enum):
return obj.value
if hasattr(obj, 'json'):
# json() is a friendly name for json-helper, it should return
# a dictionary representation of the object so that it can be
# processed by the json library.
return jsonify(obj.json(), safe)
if isinstance(obj, datetime | date):
return obj.isoformat()
if isinstance(obj, list | set | tuple):
return [jsonify(item, safe) for item in obj]
if isinstance(obj, Path):
return str(obj)
if hasattr(obj, '__dict__'):
return vars(obj)
return obj
class JSON(json.JSONEncoder, json.JSONDecoder):
"""
A safe JSON encoder that will omit private information in dicts (starting with !)
"""
@override
def encode(self, o: object) -> str:
return super().encode(jsonify(o))
class UNSAFE_JSON(json.JSONEncoder, json.JSONDecoder):
"""
UNSAFE_JSON will call/encode and keep private information in dicts (starting with !)
"""
@override
def encode(self, o: object) -> str:
return super().encode(jsonify(o, safe=False))
class SysCommandWorker:
def __init__(
self,
cmd: str | list[str],
peek_output: bool | None = False,
environment_vars: dict[str, str] | None = None,
working_directory: str = './',
remove_vt100_escape_codes_from_lines: bool = True,
):
if isinstance(cmd, str):
cmd = shlex.split(cmd)
if cmd and not cmd[0].startswith(('/', './')): # Path() does not work well
cmd[0] = locate_binary(cmd[0])
self.cmd = cmd
self.peek_output = peek_output
# define the standard locale for command outputs. For now the C ascii one. Can be overridden
self.environment_vars = {'LC_ALL': 'C'}
if environment_vars:
self.environment_vars.update(environment_vars)
self.working_directory = working_directory
self.exit_code: int | None = None
self._trace_log = b''
self._trace_log_pos = 0
self.poll_object = epoll()
self.child_fd: int | None = None
self.started: float | None = None
self.ended: float | None = None
self.remove_vt100_escape_codes_from_lines: bool = remove_vt100_escape_codes_from_lines
def __contains__(self, key: bytes) -> bool:
"""
Contains will also move the current buffert position forward.
This is to avoid re-checking the same data when looking for output.
"""
assert isinstance(key, bytes)
index = self._trace_log.find(key, self._trace_log_pos)
if index >= 0:
self._trace_log_pos += index + len(key)
return True
return False
def __iter__(self, *args: str, **kwargs: dict[str, Any]) -> Iterator[bytes]:
last_line = self._trace_log.rfind(b'\n')
lines = filter(None, self._trace_log[self._trace_log_pos : last_line].splitlines())
for line in lines:
if self.remove_vt100_escape_codes_from_lines:
line = clear_vt100_escape_codes(line)
yield line + b'\n'
self._trace_log_pos = last_line
@override
def __repr__(self) -> str:
self.make_sure_we_are_executing()
return str(self._trace_log)
@override
def __str__(self) -> str:
try:
return self._trace_log.decode('utf-8')
except UnicodeDecodeError:
return str(self._trace_log)
def __enter__(self) -> 'SysCommandWorker':
return self
def __exit__(self, exc_type: type[BaseException] | None, exc_value: BaseException | None, traceback: TracebackType | None) -> None:
# b''.join(sys_command('sync')) # No need to, since the underlying fs() object will call sync.
# TODO: https://stackoverflow.com/questions/28157929/how-to-safely-handle-an-exception-inside-a-context-manager
if self.child_fd:
try:
os.close(self.child_fd)
except Exception:
pass
if self.peek_output:
# To make sure any peaked output didn't leave us hanging
# on the same line we were on.
sys.stdout.write('\n')
sys.stdout.flush()
if exc_type is not None:
debug(str(exc_value))
if self.exit_code != 0:
raise SysCallError(
f'{self.cmd} exited with abnormal exit code [{self.exit_code}]: {str(self)[-500:]}',
self.exit_code,
worker_log=self._trace_log,
)
def is_alive(self) -> bool:
self.poll()
if self.started and self.ended is None:
return True
return False
def write(self, data: bytes, line_ending: bool = True) -> int:
assert isinstance(data, bytes) # TODO: Maybe we can support str as well and encode it
self.make_sure_we_are_executing()
if self.child_fd:
return os.write(self.child_fd, data + (b'\n' if line_ending else b''))
return 0
def make_sure_we_are_executing(self) -> bool:
if not self.started:
return self.execute()
return True
def tell(self) -> int:
self.make_sure_we_are_executing()
return self._trace_log_pos
def seek(self, pos: int) -> None:
self.make_sure_we_are_executing()
# Safety check to ensure 0 < pos < len(tracelog)
self._trace_log_pos = min(max(0, pos), len(self._trace_log))
def peak(self, output: str | bytes) -> bool:
if self.peek_output:
if isinstance(output, bytes):
try:
output = output.decode('UTF-8')
except UnicodeDecodeError:
return False
_cmd_output(output)
sys.stdout.write(output)
sys.stdout.flush()
return True
def poll(self) -> None:
self.make_sure_we_are_executing()
if self.child_fd:
got_output = False
for _fileno, _event in self.poll_object.poll(0.1):
try:
output = os.read(self.child_fd, 8192)
got_output = True
self.peak(output)
self._trace_log += output
except OSError:
self.ended = time.time()
break
if self.ended or (not got_output and not _pid_exists(self.pid)):
self.ended = time.time()
try:
wait_status = os.waitpid(self.pid, 0)[1]
self.exit_code = os.waitstatus_to_exitcode(wait_status)
except ChildProcessError:
try:
wait_status = os.waitpid(self.child_fd, 0)[1]
self.exit_code = os.waitstatus_to_exitcode(wait_status)
except ChildProcessError:
self.exit_code = 1
def execute(self) -> bool:
import pty
if (old_dir := os.getcwd()) != self.working_directory:
os.chdir(str(self.working_directory))
# Note: If for any reason, we get a Python exception between here
# and until os.close(), the traceback will get locked inside
# stdout of the child_fd object. `os.read(self.child_fd, 8192)` is the
# only way to get the traceback without losing it.
self.pid, self.child_fd = pty.fork()
# https://stackoverflow.com/questions/4022600/python-pty-fork-how-does-it-work
if not self.pid:
_cmd_history(self.cmd)
try:
os.execve(self.cmd[0], list(self.cmd), {**os.environ, **self.environment_vars})
except FileNotFoundError:
error(f'{self.cmd[0]} does not exist.')
self.exit_code = 1
return False
else:
# Only parent process moves back to the original working directory
os.chdir(old_dir)
self.started = time.time()
self.poll_object.register(self.child_fd, EPOLLIN | EPOLLHUP)
return True
def decode(self, encoding: str = 'UTF-8') -> str:
return self._trace_log.decode(encoding)
class SysCommand:
def __init__(
self,
cmd: str | list[str],
peek_output: bool | None = False,
environment_vars: dict[str, str] | None = None,
working_directory: str = './',
remove_vt100_escape_codes_from_lines: bool = True,
):
self.cmd = cmd
self.peek_output = peek_output
self.environment_vars = environment_vars
self.working_directory = working_directory
self.remove_vt100_escape_codes_from_lines = remove_vt100_escape_codes_from_lines
self.session: SysCommandWorker | None = None
self.create_session()
def __enter__(self) -> SysCommandWorker | None:
return self.session
def __exit__(self, exc_type: type[BaseException] | None, exc_value: BaseException | None, traceback: TracebackType | None) -> None:
# b''.join(sys_command('sync')) # No need to, since the underlying fs() object will call sync.
# TODO: https://stackoverflow.com/questions/28157929/how-to-safely-handle-an-exception-inside-a-context-manager
if exc_type is not None:
error(str(exc_value))
def __iter__(self, *args: list[Any], **kwargs: dict[str, Any]) -> Iterator[bytes]:
if self.session:
yield from self.session
def __getitem__(self, key: slice) -> bytes:
if not self.session:
raise KeyError('SysCommand() does not have an active session.')
elif type(key) is slice:
start = key.start or 0
end = key.stop or len(self.session._trace_log)
return self.session._trace_log[start:end]
else:
raise ValueError("SysCommand() doesn't have key & value pairs, only slices, SysCommand('ls')[:10] as an example.")
@override
def __repr__(self, *args: list[Any], **kwargs: dict[str, Any]) -> str:
return self.decode('UTF-8', errors='backslashreplace') or ''
def create_session(self) -> bool:
"""
Initiates a :ref:`SysCommandWorker` session in this class ``.session``.
It then proceeds to poll the process until it ends, after which it also
clears any printed output if ``.peek_output=True``.
"""
if self.session:
return True
with SysCommandWorker(
self.cmd,
peek_output=self.peek_output,
environment_vars=self.environment_vars,
remove_vt100_escape_codes_from_lines=self.remove_vt100_escape_codes_from_lines,
working_directory=self.working_directory,
) as session:
self.session = session
while not self.session.ended:
self.session.poll()
if self.peek_output:
sys.stdout.write('\n')
sys.stdout.flush()
return True
def decode(self, encoding: str = 'utf-8', errors: str = 'backslashreplace', strip: bool = True) -> str:
if not self.session:
raise ValueError('No session available to decode')
val = self.session._trace_log.decode(encoding, errors=errors)
if strip:
return val.strip()
return val
def output(self, remove_cr: bool = True) -> bytes:
if not self.session:
raise ValueError('No session available')
if remove_cr:
return self.session._trace_log.replace(b'\r\n', b'\n')
return self.session._trace_log
@property
def exit_code(self) -> int | None:
if self.session:
return self.session.exit_code
else:
return None
@property
def trace_log(self) -> bytes | None:
if self.session:
return self.session._trace_log
return None
def _append_log(file: str, content: str) -> None:
path = logger.directory / file
change_perm = not path.exists()
try:
with path.open('a') as f:
f.write(content)
if change_perm:
path.chmod(stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP)
except (PermissionError, FileNotFoundError):
# If the file does not exist, ignore the error
pass
def _cmd_history(cmd: list[str]) -> None:
content = f'{time.time()} {cmd}\n'
_append_log('cmd_history.txt', content)
def _cmd_output(output: str) -> None:
_append_log('cmd_output.txt', output)
def run(
cmd: list[str],
input_data: bytes | None = None,
) -> subprocess.CompletedProcess[bytes]:
_cmd_history(cmd)
return subprocess.run(
cmd,
input=input_data,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
check=True,
)
def _pid_exists(pid: int) -> bool:
try:
return any(subprocess.check_output(['ps', '--no-headers', '-o', 'pid', '-p', str(pid)]).strip())
except subprocess.CalledProcessError:
return False

View File

@@ -0,0 +1,581 @@
from __future__ import annotations
from typing import override
from archinstall.lib.disk.disk_menu import DiskLayoutConfigurationMenu
from archinstall.lib.models.application import ApplicationConfiguration
from archinstall.lib.models.authentication import AuthenticationConfiguration
from archinstall.lib.models.device import DiskLayoutConfiguration, DiskLayoutType, EncryptionType, FilesystemType, PartitionModification
from archinstall.lib.packages import list_available_packages
from archinstall.tui.menu_item import MenuItem, MenuItemGroup
from .applications.application_menu import ApplicationMenu
from .args import ArchConfig
from .authentication.authentication_menu import AuthenticationMenu
from .configuration import save_config
from .hardware import SysInfo
from .interactions.general_conf import (
add_number_of_parallel_downloads,
ask_additional_packages_to_install,
ask_for_a_timezone,
ask_hostname,
ask_ntp,
)
from .interactions.network_menu import ask_to_configure_network
from .interactions.system_conf import ask_for_bootloader, ask_for_swap, ask_for_uki, select_kernel
from .locale.locale_menu import LocaleMenu
from .menu.abstract_menu import CONFIG_KEY, AbstractMenu
from .mirrors import MirrorMenu
from .models.bootloader import Bootloader
from .models.locale import LocaleConfiguration
from .models.mirrors import MirrorConfiguration
from .models.network import NetworkConfiguration, NicType
from .models.packages import Repository
from .models.profile import ProfileConfiguration
from .output import FormattedOutput
from .pacman.config import PacmanConfig
from .translationhandler import Language, tr, translation_handler
class GlobalMenu(AbstractMenu[None]):
def __init__(self, arch_config: ArchConfig) -> None:
self._arch_config = arch_config
menu_optioons = self._get_menu_options()
self._item_group = MenuItemGroup(
menu_optioons,
sort_items=False,
checkmarks=True,
)
super().__init__(self._item_group, config=arch_config)
def _get_menu_options(self) -> list[MenuItem]:
menu_options = [
MenuItem(
text=tr('Archinstall language'),
action=self._select_archinstall_language,
display_action=lambda x: x.display_name if x else '',
key='archinstall_language',
),
MenuItem(
text=tr('Locales'),
action=self._locale_selection,
preview_action=self._prev_locale,
key='locale_config',
),
MenuItem(
text=tr('Mirrors and repositories'),
action=self._mirror_configuration,
preview_action=self._prev_mirror_config,
key='mirror_config',
),
MenuItem(
text=tr('Disk configuration'),
action=self._select_disk_config,
preview_action=self._prev_disk_config,
mandatory=True,
key='disk_config',
),
MenuItem(
text=tr('Swap'),
value=True,
action=ask_for_swap,
preview_action=self._prev_swap,
key='swap',
),
MenuItem(
text=tr('Bootloader'),
value=Bootloader.get_default(),
action=self._select_bootloader,
preview_action=self._prev_bootloader,
mandatory=True,
key='bootloader',
),
MenuItem(
text=tr('Unified kernel images'),
value=False,
enabled=SysInfo.has_uefi(),
action=ask_for_uki,
preview_action=self._prev_uki,
key='uki',
),
MenuItem(
text=tr('Hostname'),
value='archlinux',
action=ask_hostname,
preview_action=self._prev_hostname,
key='hostname',
),
MenuItem(
text=tr('Authentication'),
action=self._select_authentication,
preview_action=self._prev_authentication,
key='auth_config',
),
MenuItem(
text=tr('Profile'),
action=self._select_profile,
preview_action=self._prev_profile,
key='profile_config',
),
MenuItem(
text=tr('Applications'),
action=self._select_applications,
value=[],
preview_action=self._prev_applications,
key='app_config',
),
MenuItem(
text=tr('Kernels'),
value=['linux'],
action=select_kernel,
preview_action=self._prev_kernel,
mandatory=True,
key='kernels',
),
MenuItem(
text=tr('Network configuration'),
action=ask_to_configure_network,
value={},
preview_action=self._prev_network_config,
key='network_config',
),
MenuItem(
text=tr('Parallel Downloads'),
action=add_number_of_parallel_downloads,
value=0,
preview_action=self._prev_parallel_dw,
key='parallel_downloads',
),
MenuItem(
text=tr('Additional packages'),
action=self._select_additional_packages,
value=[],
preview_action=self._prev_additional_pkgs,
key='packages',
),
MenuItem(
text=tr('Timezone'),
action=ask_for_a_timezone,
value='UTC',
preview_action=self._prev_tz,
key='timezone',
),
MenuItem(
text=tr('Automatic time sync (NTP)'),
action=ask_ntp,
value=True,
preview_action=self._prev_ntp,
key='ntp',
),
MenuItem(
text='',
),
MenuItem(
text=tr('Save configuration'),
action=lambda x: self._safe_config(),
key=f'{CONFIG_KEY}_save',
),
MenuItem(
text=tr('Install'),
preview_action=self._prev_install_invalid_config,
key=f'{CONFIG_KEY}_install',
),
MenuItem(
text=tr('Abort'),
action=lambda x: exit(1),
key=f'{CONFIG_KEY}_abort',
),
]
return menu_options
def _safe_config(self) -> None:
# data: dict[str, Any] = {}
# for item in self._item_group.items:
# if item.key is not None:
# data[item.key] = item.value
self.sync_all_to_config()
save_config(self._arch_config)
def _missing_configs(self) -> list[str]:
item: MenuItem = self._item_group.find_by_key('auth_config')
auth_config: AuthenticationConfiguration | None = item.value
def check(s: str) -> bool:
item = self._item_group.find_by_key(s)
return item.has_value()
def has_superuser() -> bool:
if auth_config and auth_config.users:
return any([u.sudo for u in auth_config.users])
return False
missing = set()
if (auth_config is None or auth_config.root_enc_password is None) and not has_superuser():
missing.add(
tr('Either root-password or at least 1 user with sudo privileges must be specified'),
)
for item in self._item_group.items:
if item.mandatory:
assert item.key is not None
if not check(item.key):
missing.add(item.text)
return list(missing)
@override
def _is_config_valid(self) -> bool:
"""
Checks the validity of the current configuration.
"""
if len(self._missing_configs()) != 0:
return False
return self._validate_bootloader() is None
def _select_archinstall_language(self, preset: Language) -> Language:
from .interactions.general_conf import select_archinstall_language
language = select_archinstall_language(translation_handler.translated_languages, preset)
translation_handler.activate(language)
self._update_lang_text()
return language
def _select_applications(self, preset: ApplicationConfiguration | None) -> ApplicationConfiguration | None:
app_config = ApplicationMenu(preset).run()
return app_config
def _select_authentication(self, preset: AuthenticationConfiguration | None) -> AuthenticationConfiguration | None:
auth_config = AuthenticationMenu(preset).run()
return auth_config
def _update_lang_text(self) -> None:
"""
The options for the global menu are generated with a static text;
each entry of the menu needs to be updated with the new translation
"""
new_options = self._get_menu_options()
for o in new_options:
if o.key is not None:
self._item_group.find_by_key(o.key).text = o.text
def _locale_selection(self, preset: LocaleConfiguration) -> LocaleConfiguration:
locale_config = LocaleMenu(preset).run()
return locale_config
def _prev_locale(self, item: MenuItem) -> str | None:
if not item.value:
return None
config: LocaleConfiguration = item.value
return config.preview()
def _prev_network_config(self, item: MenuItem) -> str | None:
if item.value:
network_config: NetworkConfiguration = item.value
if network_config.type == NicType.MANUAL:
output = FormattedOutput.as_table(network_config.nics)
else:
output = f'{tr("Network configuration")}:\n{network_config.type.display_msg()}'
return output
return None
def _prev_additional_pkgs(self, item: MenuItem) -> str | None:
if item.value:
output = '\n'.join(sorted(item.value))
return output
return None
def _prev_authentication(self, item: MenuItem) -> str | None:
if item.value:
auth_config: AuthenticationConfiguration = item.value
output = ''
if auth_config.root_enc_password:
output += f'{tr("Root password")}: {auth_config.root_enc_password.hidden()}\n'
if auth_config.users:
output += FormattedOutput.as_table(auth_config.users) + '\n'
if auth_config.u2f_config:
u2f_config = auth_config.u2f_config
login_method = u2f_config.u2f_login_method.display_value()
output = tr('U2F login method: ') + login_method
output += '\n'
output += tr('Passwordless sudo: ') + (tr('Enabled') if u2f_config.passwordless_sudo else tr('Disabled'))
return output
return None
def _prev_applications(self, item: MenuItem) -> str | None:
if item.value:
app_config: ApplicationConfiguration = item.value
output = ''
if app_config.bluetooth_config:
output += f'{tr("Bluetooth")}: '
output += tr('Enabled') if app_config.bluetooth_config.enabled else tr('Disabled')
output += '\n'
if app_config.audio_config:
audio_config = app_config.audio_config
output += f'{tr("Audio")}: {audio_config.audio.value}'
output += '\n'
return output
return None
def _prev_tz(self, item: MenuItem) -> str | None:
if item.value:
return f'{tr("Timezone")}: {item.value}'
return None
def _prev_ntp(self, item: MenuItem) -> str | None:
if item.value is not None:
output = f'{tr("NTP")}: '
output += tr('Enabled') if item.value else tr('Disabled')
return output
return None
def _prev_disk_config(self, item: MenuItem) -> str | None:
disk_layout_conf: DiskLayoutConfiguration | None = item.value
if disk_layout_conf:
output = tr('Configuration type: {}').format(disk_layout_conf.config_type.display_msg()) + '\n'
if disk_layout_conf.config_type == DiskLayoutType.Pre_mount:
output += tr('Mountpoint') + ': ' + str(disk_layout_conf.mountpoint)
if disk_layout_conf.lvm_config:
output += '{}: {}'.format(tr('LVM configuration type'), disk_layout_conf.lvm_config.config_type.display_msg()) + '\n'
if disk_layout_conf.disk_encryption:
output += tr('Disk encryption') + ': ' + EncryptionType.type_to_text(disk_layout_conf.disk_encryption.encryption_type) + '\n'
if disk_layout_conf.btrfs_options:
btrfs_options = disk_layout_conf.btrfs_options
if btrfs_options.snapshot_config:
output += tr('Btrfs snapshot type: {}').format(btrfs_options.snapshot_config.snapshot_type.value) + '\n'
return output
return None
def _prev_swap(self, item: MenuItem) -> str | None:
if item.value is not None:
output = f'{tr("Swap on zram")}: '
output += tr('Enabled') if item.value else tr('Disabled')
return output
return None
def _prev_uki(self, item: MenuItem) -> str | None:
if item.value is not None:
output = f'{tr("Unified kernel images")}: '
output += tr('Enabled') if item.value else tr('Disabled')
return output
return None
def _prev_hostname(self, item: MenuItem) -> str | None:
if item.value is not None:
return f'{tr("Hostname")}: {item.value}'
return None
def _prev_parallel_dw(self, item: MenuItem) -> str | None:
if item.value is not None:
return f'{tr("Parallel Downloads")}: {item.value}'
return None
def _prev_kernel(self, item: MenuItem) -> str | None:
if item.value:
kernel = ', '.join(item.value)
return f'{tr("Kernel")}: {kernel}'
return None
def _prev_bootloader(self, item: MenuItem) -> str | None:
if item.value is not None:
return f'{tr("Bootloader")}: {item.value.value}'
return None
def _validate_bootloader(self) -> str | None:
"""
Checks the selected bootloader is valid for the selected filesystem
type of the boot partition.
Returns [`None`] if the bootloader is valid, otherwise returns a
string with the error message.
XXX: The caller is responsible for wrapping the string with the translation
shim if necessary.
"""
bootloader: Bootloader | None = None
root_partition: PartitionModification | None = None
boot_partition: PartitionModification | None = None
efi_partition: PartitionModification | None = None
bootloader = self._item_group.find_by_key('bootloader').value
if bootloader == Bootloader.NO_BOOTLOADER:
return None
if disk_config := self._item_group.find_by_key('disk_config').value:
for layout in disk_config.device_modifications:
if root_partition := layout.get_root_partition():
break
for layout in disk_config.device_modifications:
if boot_partition := layout.get_boot_partition():
break
if SysInfo.has_uefi():
for layout in disk_config.device_modifications:
if efi_partition := layout.get_efi_partition():
break
else:
return 'No disk layout selected'
if root_partition is None:
return 'Root partition not found'
if boot_partition is None:
return 'Boot partition not found'
if SysInfo.has_uefi():
if efi_partition is None:
return 'EFI system partition (ESP) not found'
if efi_partition.fs_type not in [FilesystemType.Fat12, FilesystemType.Fat16, FilesystemType.Fat32]:
return 'ESP must be formatted as a FAT filesystem'
if bootloader == Bootloader.Limine:
if boot_partition.fs_type not in [FilesystemType.Fat12, FilesystemType.Fat16, FilesystemType.Fat32]:
return 'Limine does not support booting with a non-FAT boot partition'
return None
def _prev_install_invalid_config(self, item: MenuItem) -> str | None:
if missing := self._missing_configs():
text = tr('Missing configurations:\n')
for m in missing:
text += f'- {m}\n'
return text[:-1] # remove last new line
if error := self._validate_bootloader():
return tr(f'Invalid configuration: {error}')
return None
def _prev_profile(self, item: MenuItem) -> str | None:
profile_config: ProfileConfiguration | None = item.value
if profile_config and profile_config.profile:
output = tr('Profiles') + ': '
if profile_names := profile_config.profile.current_selection_names():
output += ', '.join(profile_names) + '\n'
else:
output += profile_config.profile.name + '\n'
if profile_config.gfx_driver:
output += tr('Graphics driver') + ': ' + profile_config.gfx_driver.value + '\n'
if profile_config.greeter:
output += tr('Greeter') + ': ' + profile_config.greeter.value + '\n'
return output
return None
def _select_disk_config(
self,
preset: DiskLayoutConfiguration | None = None,
) -> DiskLayoutConfiguration | None:
disk_config = DiskLayoutConfigurationMenu(preset).run()
return disk_config
def _select_bootloader(self, preset: Bootloader | None) -> Bootloader | None:
bootloader = ask_for_bootloader(preset)
if bootloader:
uki = self._item_group.find_by_key('uki')
if not SysInfo.has_uefi() or not bootloader.has_uki_support():
uki.value = False
uki.enabled = False
else:
uki.enabled = True
return bootloader
def _select_profile(self, current_profile: ProfileConfiguration | None) -> ProfileConfiguration | None:
from .profile.profile_menu import ProfileMenu
profile_config = ProfileMenu(preset=current_profile).run()
return profile_config
def _select_additional_packages(self, preset: list[str]) -> list[str]:
config: MirrorConfiguration | None = self._item_group.find_by_key('mirror_config').value
repositories: set[Repository] = set()
if config:
repositories = set(config.optional_repositories)
packages = ask_additional_packages_to_install(
preset,
repositories=repositories,
)
return packages
def _mirror_configuration(self, preset: MirrorConfiguration | None = None) -> MirrorConfiguration:
mirror_configuration = MirrorMenu(preset=preset).run()
if mirror_configuration.optional_repositories:
# reset the package list cache in case the repository selection has changed
list_available_packages.cache_clear()
# enable the repositories in the config
pacman_config = PacmanConfig(None)
pacman_config.enable(mirror_configuration.optional_repositories)
pacman_config.apply()
return mirror_configuration
def _prev_mirror_config(self, item: MenuItem) -> str | None:
if not item.value:
return None
mirror_config: MirrorConfiguration = item.value
output = ''
if mirror_config.mirror_regions:
title = tr('Selected mirror regions')
divider = '-' * len(title)
regions = mirror_config.region_names
output += f'{title}\n{divider}\n{regions}\n\n'
if mirror_config.custom_servers:
title = tr('Custom servers')
divider = '-' * len(title)
servers = mirror_config.custom_server_urls
output += f'{title}\n{divider}\n{servers}\n\n'
if mirror_config.optional_repositories:
title = tr('Optional repositories')
divider = '-' * len(title)
repos = ', '.join([r.value for r in mirror_config.optional_repositories])
output += f'{title}\n{divider}\n{repos}\n\n'
if mirror_config.custom_repositories:
title = tr('Custom repositories')
table = FormattedOutput.as_table(mirror_config.custom_repositories)
output += f'{title}:\n\n{table}'
return output.strip()

View File

@@ -0,0 +1,323 @@
import os
from enum import Enum
from functools import cached_property
from pathlib import Path
from .exceptions import SysCallError
from .general import SysCommand
from .networking import enrich_iface_types, list_interfaces
from .output import debug
from .translationhandler import tr
class CpuVendor(Enum):
AuthenticAMD = 'amd'
GenuineIntel = 'intel'
_Unknown = 'unknown'
@classmethod
def get_vendor(cls, name: str) -> 'CpuVendor':
if vendor := getattr(cls, name, None):
return vendor
else:
debug(f"Unknown CPU vendor '{name}' detected.")
return cls._Unknown
def _has_microcode(self) -> bool:
match self:
case CpuVendor.AuthenticAMD | CpuVendor.GenuineIntel:
return True
case _:
return False
def get_ucode(self) -> Path | None:
if self._has_microcode():
return Path(self.value + '-ucode.img')
return None
class GfxPackage(Enum):
Dkms = 'dkms'
IntelMediaDriver = 'intel-media-driver'
LibvaIntelDriver = 'libva-intel-driver'
LibvaMesaDriver = 'libva-mesa-driver'
LibvaNvidiaDriver = 'libva-nvidia-driver'
Mesa = 'mesa'
NvidiaDkms = 'nvidia-dkms'
NvidiaOpenDkms = 'nvidia-open-dkms'
VulkanIntel = 'vulkan-intel'
VulkanRadeon = 'vulkan-radeon'
VulkanNouveau = 'vulkan-nouveau'
Xf86VideoAmdgpu = 'xf86-video-amdgpu'
Xf86VideoAti = 'xf86-video-ati'
Xf86VideoNouveau = 'xf86-video-nouveau'
XorgServer = 'xorg-server'
XorgXinit = 'xorg-xinit'
class GfxDriver(Enum):
AllOpenSource = 'All open-source'
AmdOpenSource = 'AMD / ATI (open-source)'
IntelOpenSource = 'Intel (open-source)'
NvidiaOpenKernel = 'Nvidia (open kernel module for newer GPUs, Turing+)'
NvidiaOpenSource = 'Nvidia (open-source nouveau driver)'
NvidiaProprietary = 'Nvidia (proprietary)'
VMOpenSource = 'VirtualBox (open-source)'
def is_nvidia(self) -> bool:
match self:
case GfxDriver.NvidiaProprietary | GfxDriver.NvidiaOpenSource | GfxDriver.NvidiaOpenKernel:
return True
case _:
return False
def packages_text(self) -> str:
pkg_names = [p.value for p in self.gfx_packages()]
text = tr('Installed packages') + ':\n'
for p in sorted(pkg_names):
text += f'\t- {p}\n'
return text
def gfx_packages(self) -> list[GfxPackage]:
packages = [GfxPackage.XorgServer, GfxPackage.XorgXinit]
match self:
case GfxDriver.AllOpenSource:
packages += [
GfxPackage.Mesa,
GfxPackage.Xf86VideoAmdgpu,
GfxPackage.Xf86VideoAti,
GfxPackage.Xf86VideoNouveau,
GfxPackage.LibvaMesaDriver,
GfxPackage.LibvaIntelDriver,
GfxPackage.IntelMediaDriver,
GfxPackage.VulkanRadeon,
GfxPackage.VulkanIntel,
GfxPackage.VulkanNouveau,
]
case GfxDriver.AmdOpenSource:
packages += [
GfxPackage.Mesa,
GfxPackage.Xf86VideoAmdgpu,
GfxPackage.Xf86VideoAti,
GfxPackage.LibvaMesaDriver,
GfxPackage.VulkanRadeon,
]
case GfxDriver.IntelOpenSource:
packages += [
GfxPackage.Mesa,
GfxPackage.LibvaIntelDriver,
GfxPackage.IntelMediaDriver,
GfxPackage.VulkanIntel,
]
case GfxDriver.NvidiaOpenKernel:
packages += [
GfxPackage.NvidiaOpenDkms,
GfxPackage.Dkms,
GfxPackage.LibvaNvidiaDriver,
]
case GfxDriver.NvidiaOpenSource:
packages += [
GfxPackage.Mesa,
GfxPackage.Xf86VideoNouveau,
GfxPackage.LibvaMesaDriver,
GfxPackage.VulkanNouveau,
]
case GfxDriver.NvidiaProprietary:
packages += [
GfxPackage.NvidiaDkms,
GfxPackage.Dkms,
GfxPackage.LibvaNvidiaDriver,
]
case GfxDriver.VMOpenSource:
packages += [
GfxPackage.Mesa,
]
return packages
class _SysInfo:
def __init__(self) -> None:
pass
@cached_property
def cpu_info(self) -> dict[str, str]:
"""
Returns system cpu information
"""
cpu_info_path = Path('/proc/cpuinfo')
cpu: dict[str, str] = {}
with cpu_info_path.open() as file:
for line in file:
if line := line.strip():
key, value = line.split(':', maxsplit=1)
cpu[key.strip()] = value.strip()
return cpu
@cached_property
def mem_info(self) -> dict[str, int]:
"""
Returns system memory information
"""
mem_info_path = Path('/proc/meminfo')
mem_info: dict[str, int] = {}
with mem_info_path.open() as file:
for line in file:
key, value = line.strip().split(':')
num = value.split()[0]
mem_info[key] = int(num)
return mem_info
def mem_info_by_key(self, key: str) -> int:
return self.mem_info[key]
@cached_property
def loaded_modules(self) -> list[str]:
"""
Returns loaded kernel modules
"""
modules_path = Path('/proc/modules')
modules: list[str] = []
with modules_path.open() as file:
for line in file:
module = line.split(maxsplit=1)[0]
modules.append(module)
return modules
_sys_info = _SysInfo()
class SysInfo:
@staticmethod
def has_wifi() -> bool:
ifaces = list(list_interfaces().values())
return 'WIRELESS' in enrich_iface_types(ifaces).values()
@staticmethod
def has_uefi() -> bool:
return os.path.isdir('/sys/firmware/efi')
@staticmethod
def _graphics_devices() -> dict[str, str]:
cards: dict[str, str] = {}
for line in SysCommand('lspci'):
if b' VGA ' in line or b' 3D ' in line:
_, identifier = line.split(b': ', 1)
cards[identifier.strip().decode('UTF-8')] = str(line)
return cards
@staticmethod
def has_nvidia_graphics() -> bool:
return any('nvidia' in x.lower() for x in SysInfo._graphics_devices())
@staticmethod
def has_amd_graphics() -> bool:
return any('amd' in x.lower() for x in SysInfo._graphics_devices())
@staticmethod
def has_intel_graphics() -> bool:
return any('intel' in x.lower() for x in SysInfo._graphics_devices())
@staticmethod
def cpu_vendor() -> CpuVendor | None:
if vendor := _sys_info.cpu_info.get('vendor_id'):
return CpuVendor.get_vendor(vendor)
return None
@staticmethod
def cpu_model() -> str | None:
return _sys_info.cpu_info.get('model name', None)
@staticmethod
def sys_vendor() -> str | None:
try:
with open('/sys/devices/virtual/dmi/id/sys_vendor') as vendor:
return vendor.read().strip()
except FileNotFoundError:
return None
@staticmethod
def product_name() -> str | None:
try:
with open('/sys/devices/virtual/dmi/id/product_name') as product:
return product.read().strip()
except FileNotFoundError:
return None
@staticmethod
def mem_available() -> int:
return _sys_info.mem_info_by_key('MemAvailable')
@staticmethod
def mem_free() -> int:
return _sys_info.mem_info_by_key('MemFree')
@staticmethod
def mem_total() -> int:
return _sys_info.mem_info_by_key('MemTotal')
@staticmethod
def virtualization() -> str | None:
try:
return str(SysCommand('systemd-detect-virt')).strip('\r\n')
except SysCallError as err:
debug(f'Could not detect virtual system: {err}')
return None
@staticmethod
def is_vm() -> bool:
try:
result = SysCommand('systemd-detect-virt')
return b'none' not in b''.join(result).lower()
except SysCallError as err:
debug(f'System is not running in a VM: {err}')
return False
@staticmethod
def requires_sof_fw() -> bool:
return 'snd_sof' in _sys_info.loaded_modules
@staticmethod
def requires_alsa_fw() -> bool:
modules = (
'snd_asihpi',
'snd_cs46xx',
'snd_darla20',
'snd_darla24',
'snd_echo3g',
'snd_emu10k1',
'snd_gina20',
'snd_gina24',
'snd_hda_codec_ca0132',
'snd_hdsp',
'snd_indigo',
'snd_indigodj',
'snd_indigodjx',
'snd_indigoio',
'snd_indigoiox',
'snd_layla20',
'snd_layla24',
'snd_mia',
'snd_mixart',
'snd_mona',
'snd_pcxhr',
'snd_vx_lib',
)
for loaded_module in _sys_info.loaded_modules:
if loaded_module in modules:
return True
return False

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More