create images and ISOs using poi-container

Change-Id: Ib994e97e85234390061cc60702a541e342fb9cfc
Reviewed-on: http://photon-jenkins.eng.vmware.com:8082/c/photon/+/21627
Tested-by: gerrit-photon <photon-checkins@vmware.com>
Reviewed-by: Ankit Jain <ankitja@vmware.com>
This commit is contained in:
Oliver Kurth 2023-08-18 14:17:14 -07:00 committed by Oliver Kurth
parent 146c53ffd5
commit 83f323856f
43 changed files with 815 additions and 1332 deletions

View File

@ -17,14 +17,10 @@ from urllib.parse import urlparse
sys.path.append(
f"{os.path.dirname(os.path.realpath(__file__))}/support/package-builder"
)
sys.path.append(
f"{os.path.dirname(os.path.realpath(__file__))}/support/image-builder",
)
sys.path.append(
f"{os.path.dirname(os.path.realpath(__file__))}/support/spec-checker",
)
import imagebuilder
import GenerateOSSFiles
import PullSources as downloader
@ -37,7 +33,7 @@ from StringUtils import StringUtils
from SpecDeps import SpecDependencyGenerator
from SpecData import SPECS
from check_spec import check_specs
from utils import Utils
targetDict = {
"image": [
@ -975,8 +971,6 @@ class CheckTools:
CheckTools.check_sanity()
CheckTools.check_docker()
CheckTools.create_ph_builder_img()
CheckTools.check_photon_installer()
CheckTools.check_open_vmdk()
CheckTools.check_contain()
CheckTools.check_git_hooks()
check_prerequesite["check-pre-reqs"] = True
@ -1083,50 +1077,6 @@ class CheckTools:
check_prerequesite["check-spec-files"] = True
def check_photon_installer():
url = "https://github.com/vmware/photon-os-installer.git"
install_cmd = f"pip3 install git+{url}"
def install_from_url(cmd):
runBashCmd(cmd)
try:
import photon_installer
except Exception as e:
print("Warning: %s" % e)
install_from_url(install_cmd)
return
key = "SKIP_INSTALLER_UPDATE"
if key in os.environ and cmdUtils.strtobool(os.environ[key]):
print("%s is enabled, not checking for updates" % key)
return
if hasattr(photon_installer, "__version__"):
local_hash = photon_installer.__version__.split("+")[1]
cmd = f"git ls-remote {url} HEAD | cut -f1"
remote_hash, _, _ = runBashCmd(cmd, capture=True)
if not remote_hash.startswith(local_hash):
print(
"Upstream photon-installer is updated, updating local copy .."
)
install_from_url(install_cmd)
else:
install_from_url(install_cmd)
def check_open_vmdk():
url = "https://github.com/vmware/open-vmdk.git"
branch = "master"
if shutil.which("ova-compose") is None:
runBashCmd(
f"rm -rf open-vmdk && "
f"git clone --depth 1 --single-branch --branch {branch} {url} "
f"&& cd open-vmdk && make && sudo make install"
)
"""
class BuildImage does the job of building all the images like iso, rpi, ami, gce, azure, ova and ls1012afrwy
@ -1145,7 +1095,11 @@ class BuildImage:
self.stage_path = Build_Config.stagePath
self.log_path = constants.logPath
self.log_level = constants.logLevel
self.config_file = configdict["additional-path"]["conf-file"]
self.config_file = None
if configdict["additional-path"]["conf-file"] is not None:
self.config_file = os.path.abspath(configdict["additional-path"]["conf-file"])
self.img_name = imgName
self.rpm_path = constants.rpmPath
self.srpm_path = constants.sourceRpmPath
@ -1158,6 +1112,8 @@ class BuildImage:
self.ph_builder_tag = configdict["photon-build-param"][
"ph-builder-tag"
]
self.poi_image = configdict["photon-build-param"].get("poi-image", None)
self.ova_cloud_images = ["ami", "gce", "azure", "ova"]
self.photon_release_version = constants.releaseVersion
@ -1233,6 +1189,17 @@ class BuildImage:
return retval
def run_poi(self):
args = []
if self.config_file is not None:
args.append(f"--config={self.config_file}")
if self.poi_image is not None:
args.append(f"--docker-image={self.poi_image}")
args = " ".join(args)
cmd = f"cd {photonDir}/support/poi && ./poi.py {args} {self.img_name}"
print(f"running {cmd}")
runBashCmd(cmd)
def build_iso(self):
if self.img_present(self.img_name):
return
@ -1258,7 +1225,10 @@ class BuildImage:
self.generated_data_path = Build_Config.generatedDataPath
print("Building Full ISO...")
imagebuilder.createIso(self)
self.run_poi()
# poi puts the image into stage/iso/, build expects it in stage/
for iso in glob.glob(os.path.join(Build_Config.stagePath, "iso", "*.iso")):
shutil.move(iso, Build_Config.stagePath)
def build_image(self):
if self.img_present(self.img_name):
@ -1274,7 +1244,7 @@ class BuildImage:
RpmBuildTarget.ostree_repo()
print(f"Building {self.img_name} image")
imagebuilder.createImage(self)
self.run_poi()
@staticmethod
def photon_docker_image():
@ -1608,6 +1578,7 @@ def process_env_build_params(ph_build_param):
"SCHEDULER_SERVER": "start-scheduler-server",
"BUILD_EXTRA_PKGS": "build-extra-pkgs",
"RESUME_BUILD": "resume-build",
"POI_IMAGE": "poi-image",
}
os.environ["PHOTON_RELEASE_VER"] = ph_build_param["photon-release-version"]
@ -1729,7 +1700,8 @@ def main():
configdict["additional-path"]["conf-file"] = os.path.abspath(
os.environ["CONFIG"]
)
jsonData = Utils.jsonread(os.environ["CONFIG"])
with open(os.environ["CONFIG"], "rt") as f:
jsonData = json.load(f)
targetName = jsonData["image_type"]
if "IMG_NAME" in os.environ:
@ -1778,7 +1750,6 @@ def main():
buildImage.build_iso()
elif targetName in buildImage.ova_cloud_images + [
"rpi",
"ls1012afrwy",
]:
buildImage.build_image()
else:

View File

@ -4,7 +4,7 @@ Before you build the ISO, verify that you have the performed the following tasks
* Installed a build operating system running the 64-bit version of Ubuntu 14.04 or later version.
* Downloaded and installed the following packages: `bison`, `gawk`, `g++`, `createrepo`, `python-aptdaemon`, `genisoimage`, `texinfo`, `python-requests`, `libfuse-dev, libssl-dev, uuid-dev, libreadline-dev, kpartx, git, bc`
* Downloaded and installed the following packages: `bison`, `gawk`, `g++`, `createrepo`, `python-aptdaemon`, `texinfo`, `python-requests`, `libfuse-dev, libssl-dev, uuid-dev, libreadline-dev, git, bc`
* Installed Docker

View File

@ -1,20 +0,0 @@
{
"installer": {
"hostname": "photon-machine",
"postinstallscripts": [
"ami-patch.sh",
"../relocate-rpmdb.sh"
],
"additional_files": [
{
"cloud-photon.cfg": "/etc/cloud/cloud.cfg"
}
],
"packagelist_file": "packages_ami.json",
"linux_flavor": "linux"
},
"image_type": "ami",
"size": 8192,
"artifacttype": "tgz",
"keeprawdisk": false
}

View File

@ -1,20 +0,0 @@
{
"installer": {
"hostname": "photon-machine",
"postinstallscripts": [
"azure-patch.sh",
"../relocate-rpmdb.sh"
],
"additional_files": [
{
"cloud-photon.cfg": "/etc/cloud/cloud.cfg"
}
],
"packagelist_file": "packages_azure.json",
"linux_flavor": "linux"
},
"image_type": "azure",
"size": 16384,
"artifacttype": "vhd.gz",
"keeprawdisk": false
}

View File

@ -1,23 +0,0 @@
{
"installer": {
"hostname": "photon-machine",
"postinstallscripts": [
"gce-patch.sh",
"../relocate-rpmdb.sh"
],
"additional_files": [
{
"cloud-photon.cfg": "/etc/cloud/cloud.cfg"
},
{
"ntpd.service": "/usr/lib/systemd/system/ntpd.service"
}
],
"packagelist_file": "packages_gce.json",
"linux_flavor": "linux"
},
"image_type": "gce",
"size": 16384,
"artifacttype": "tgz",
"keeprawdisk": false
}

View File

@ -1,457 +0,0 @@
#!/usr/bin/env python3
import os
import shutil
import imagegenerator
from utils import Utils
from argparse import ArgumentParser
from CommandUtils import CommandUtils
cmdUtils = CommandUtils()
def runInstaller(options, install_config, working_directory):
try:
from photon_installer.installer import Installer
except ModuleNotFoundError:
url = "https://github.com/vmware/photon-os-installer.git"
raise ImportError(
f"Module photon_installer not found!\n"
f"Run 'pip3 install git+{url}'"
)
# Run the installer
installer = Installer(
working_directory=working_directory,
log_path=options.log_path,
photon_release_version=options.photon_release_version,
)
installer.configure(install_config)
installer.execute()
def get_file_name_with_last_folder(filename):
basename = os.path.basename(filename)
dirname = os.path.dirname(filename)
lastfolder = os.path.basename(dirname)
name = os.path.join(lastfolder, basename)
return name
def create_pkg_list_to_copy_to_iso(build_install_option, output_data_path):
option_list_json = Utils.jsonread(build_install_option)
options_sorted = option_list_json.items()
packages = []
for install_option in options_sorted:
if install_option[0] != "iso":
file_path = os.path.join(
output_data_path,
os.path.splitext(install_option[1]["packagelist_file"])[0]
+ "_expanded.json",
)
package_list_json = Utils.jsonread(file_path)
packages += package_list_json["packages"]
return packages
def create_additional_file_list_to_copy_in_iso(
base_path, build_install_option
):
option_list_json = Utils.jsonread(build_install_option)
options_sorted = option_list_json.items()
file_list = []
for install_option in options_sorted:
if "additional-files" in install_option[1]:
file_list = file_list + list(
map(
lambda filename: os.path.join(base_path, filename),
install_option[1].get("additional-files"),
)
)
return file_list
"""
copy_flags 1: add the rpm file for the package
2: add debuginfo rpm file for the package.
4: add src rpm file for the package
"""
def create_rpm_list_to_be_copied_to_iso(
pkg_to_rpm_map_file, build_install_option, copy_flags, output_data_path
):
packages = []
if build_install_option:
packages = create_pkg_list_to_copy_to_iso(
build_install_option, output_data_path
)
rpm_list = []
pkg_to_rpm_map = Utils.jsonread(pkg_to_rpm_map_file)
for k in pkg_to_rpm_map:
if build_install_option is None or k in packages:
if not pkg_to_rpm_map[k]["rpm"] is None and bool(copy_flags & 1):
filename = pkg_to_rpm_map[k]["rpm"]
rpm_list.append(get_file_name_with_last_folder(filename))
if not pkg_to_rpm_map[k]["debugrpm"] is None and bool(
copy_flags & 2
):
filename = pkg_to_rpm_map[k]["debugrpm"]
rpm_list.append(pkg_to_rpm_map[k]["debugrpm"])
if not pkg_to_rpm_map[k]["sourcerpm"] is None and bool(
copy_flags & 4
):
rpm_list.append(pkg_to_rpm_map[k]["sourcerpm"])
return rpm_list
def make_debug_iso(working_directory, debug_iso_path, rpm_list):
if os.path.exists(working_directory) and os.path.isdir(working_directory):
shutil.rmtree(working_directory)
cmdUtils.runBashCmd(f"mkdir -p {working_directory}/DEBUGRPMS")
for rpmfile in rpm_list:
if os.path.isfile(rpmfile):
dirname = os.path.dirname(rpmfile)
lastfolder = os.path.basename(dirname)
dest_working_directory = os.path.join(
working_directory, "DEBUGRPMS", lastfolder
)
cmdUtils.runBashCmd(f"mkdir -p {dest_working_directory}")
shutil.copy2(rpmfile, dest_working_directory)
cmdUtils.runBashCmd(f"mkisofs -r -o {debug_iso_path} {working_directory}")
shutil.rmtree(working_directory)
def make_src_iso(working_directory, src_iso_path, rpm_list):
if os.path.exists(working_directory) and os.path.isdir(working_directory):
shutil.rmtree(working_directory)
cmdUtils.runBashCmd(f"mkdir -p {working_directory}/SRPMS")
for rpmfile in rpm_list:
if os.path.isfile(rpmfile):
shutil.copy2(rpmfile, os.path.join(working_directory, "SRPMS"))
cmdUtils.runBashCmd(f"mkisofs -r -o {src_iso_path} {working_directory}")
shutil.rmtree(working_directory)
def createIso(options):
working_directory = os.path.abspath(
os.path.join(options.stage_path, "photon_iso")
)
script_directory = os.path.dirname(os.path.realpath(__file__))
# Making the iso if needed
if options.iso_path:
# Additional RPMs to copy to ISO"s /RPMS/ folder
rpm_list = " ".join(
create_rpm_list_to_be_copied_to_iso(
options.pkg_to_rpm_map_file,
options.pkg_to_be_copied_conf_file,
1,
options.generated_data_path,
)
)
# Additional files to copy to ISO"s / folder
files_to_copy = " ".join(
create_additional_file_list_to_copy_in_iso(
os.path.abspath(options.stage_path), options.package_list_file
)
)
initrd_pkg_list_file = (
f"{options.generated_data_path}/"
f"packages_installer_initrd_expanded.json"
)
initrd_pkgs = " ".join(
Utils.jsonread(initrd_pkg_list_file)["packages"]
)
cmdUtils.runBashCmd(
f"{script_directory}/iso/mk-install-iso.sh"
f' "{working_directory}" "{options.iso_path}"'
f' "{options.rpm_path}" "{options.package_list_file}"'
f' "{rpm_list}" "{options.stage_path}"'
f' "{files_to_copy}" "{options.generated_data_path}"'
f' "{initrd_pkgs}" "{options.ph_docker_image}"'
f' "{options.ph_builder_tag}" "{options.photon_release_version}"'
)
if options.debug_iso_path:
debug_rpm_list = create_rpm_list_to_be_copied_to_iso(
options.pkg_to_rpm_map_file,
options.pkg_to_be_copied_conf_file,
2,
options.generated_data_path,
)
make_debug_iso(
working_directory, options.debug_iso_path, debug_rpm_list
)
if options.src_iso_path:
rpm_list = create_rpm_list_to_be_copied_to_iso(
options.pkg_to_rpm_map_file,
options.pkg_to_be_copied_conf_file,
4,
options.generated_data_path,
)
make_src_iso(working_directory, options.src_iso_path, rpm_list)
if os.path.exists(working_directory) and os.path.isdir(working_directory):
shutil.rmtree(working_directory)
def replaceScript(script_dir, img, script_name, parent_script_dir=None):
if not parent_script_dir:
parent_script_dir = script_dir
script = f"{parent_script_dir}/{script_name}"
if os.path.isfile(f"{script_dir}/{img}/{script_name}"):
script = f"{script_dir}/{img}/{script_name}"
return script
def verifyImageTypeAndConfig(config_file, img_name):
"""
All of the below combinations are supported
1. make image IMG_NAME=<name>
2. make image IMG_NAME=<name> CONFIG=<config_file_path>
3. make image CONFIG=<config_file_path>
"""
config = None
if img_name and img_name != "":
# Verify there is a directory corresponding to image
if img_name not in next(os.walk(os.path.dirname(__file__)))[1]:
return (False, config)
if config_file and config_file != "" and os.path.isfile(config_file):
config = Utils.jsonread(config_file)
if "image_type" in config and config["image_type"] != img_name:
return (False, config)
else:
config_file = (
os.path.dirname(__file__)
+ f"/{img_name}/config_{img_name}.json"
)
if os.path.isfile(config_file):
config = Utils.jsonread(config_file)
if "image_type" not in config:
config["image_type"] = img_name
else:
return (False, config)
return (True, config)
if not config_file or config_file == "":
return (False, config)
config = Utils.jsonread(config_file)
return ("image_type" in config, config)
# Detach loop device and remove raw image
def cleanup(loop_devices, raw_image):
for i, loop_dev in enumerate(loop_devices):
cmdUtils.runBashCmd(f"losetup -d {loop_dev}")
os.remove(raw_image[i])
def createImage(options):
(validImage, config) = verifyImageTypeAndConfig(
options.config_file, options.img_name
)
if not validImage:
raise Exception("Image type/config not supported")
if "ova" in config["artifacttype"] and shutil.which("ova-compose") is None:
raise Exception(
"ova-compose is not available - download and install open-vmdk"
)
install_config = config["installer"]
image_type = config["image_type"]
image_name = config.get("image_name", f"photon-{image_type}")
workingDir = os.path.abspath(f"{options.stage_path}/{image_type}")
if os.path.exists(workingDir) and os.path.isdir(workingDir):
shutil.rmtree(workingDir)
os.mkdir(workingDir)
script_dir = os.path.dirname(os.path.realpath(__file__))
grub_script = replaceScript(script_dir, image_type, "mk-setup-grub.sh")
if os.path.isfile(grub_script):
install_config["setup_grub_script"] = grub_script
# Set absolute path for "packagelist_file"
if "packagelist_file" in install_config:
plf = install_config["packagelist_file"]
if not plf.startswith("/"):
plf = os.path.join(options.generated_data_path, plf)
install_config["packagelist_file"] = plf
os.chdir(workingDir)
if "log_level" not in install_config:
install_config["log_level"] = options.log_level
install_config["search_path"] = [
os.path.abspath(os.path.join(script_dir, image_type)),
os.path.abspath(script_dir),
]
"""
if "photon_docker_image" is defined in config_<img>.json then ignore
commandline param "PHOTON_DOCKER_IMAGE" and "config.json" value
"""
if "photon_docker_image" not in install_config:
install_config["photon_docker_image"] = options.ph_docker_image
"""
Take default "repo" baseurl as options.rpm_path
if not specified in config_<img>.json
"""
if "repos" not in install_config:
install_config["repos"] = {
"photon-local": {
"name": "VMware Photon OS Installer",
"baseurl": f"file://{os.path.abspath(options.rpm_path)}",
"gpgcheck": 0,
"enabled": 1,
}
}
if "size" in config and "disks" in config:
raise Exception(
"Both 'size' and 'disks' key should not be defined together."
"\nPlease use 'disks' for defining multidisks only."
)
elif "size" in config:
"""
"BOOTDISK" key name doesn't matter.
It is just a name given for better understanding
"""
config["disks"] = {"BOOTDISK": config["size"]}
elif "disks" not in config:
raise Exception("Disk size not defined!!")
image_file = []
loop_device = {}
# Create disk image
for ndisk, k in enumerate(config["disks"]):
image_file.append(f"{workingDir}/{image_name}-{ndisk}.raw")
cmdUtils.runBashCmd(
"dd if=/dev/zero of={} bs=1024 seek={} count=0".format(
image_file[ndisk], config["disks"].get(k) * 1024
)
)
cmdUtils.runBashCmd("chmod 755 {}".format(image_file[ndisk]))
"""
Associating loopdevice to raw disk and save the name as
target's "disk"
"""
out, _, _ = cmdUtils.runBashCmd(
"losetup --show -f {}".format(image_file[ndisk]), capture=True
)
loop_device[k] = out.rstrip("\n")
# Assigning first loop device as BOOTDISK
install_config["disk"] = loop_device[next(iter(loop_device))]
# Mapping the given disks to the partition table disk
# Assigning the appropriate loop device to the partition "disk"
if "partitions" in install_config:
for partition in install_config["partitions"]:
if len(loop_device) == 1:
partition["disk"] = install_config["disk"]
elif "disk" in partition:
if partition["disk"] in loop_device.keys():
partition["disk"] = loop_device[partition["disk"]]
else:
cleanup(loop_device.values(), image_file)
raise Exception(
f"disk name:{partition['disk']} "
"defined in partition table not found in "
"list of 'disks'!!"
)
else:
cleanup(loop_device.values(), image_file)
raise Exception(
"disk name must be defined in partition table "
"for multidisks!!"
)
# No return value, it throws exception on error.
runInstaller(options, install_config, workingDir)
# Detaching loop device from vmdk
for loop_dev in loop_device.values():
cmdUtils.runBashCmd(f"losetup -d {loop_dev}")
os.chdir(script_dir)
imagegenerator.createOutputArtifact(
image_file, config, options.src_root, f"{options.src_root}/tools/bin/"
)
if __name__ == "__main__":
parser = ArgumentParser()
# Common args
parser.add_argument("-e", "--src-root", dest="src_root", default="../..")
parser.add_argument(
"-g",
"--generated-data-path",
dest="generated_data_path",
default="../../stage/common/data",
)
parser.add_argument(
"-s", "--stage-path", dest="stage_path", default="../../stage"
)
parser.add_argument(
"-l", "--log-path", dest="log_path", default="../../stage/LOGS"
)
parser.add_argument("-y", "--log-level", dest="log_level", default="debug")
# Image builder args for ami, gce, azure, ova, rpi3 etc.
parser.add_argument("-c", "--config-file", dest="config_file")
parser.add_argument("-i", "--img-name", dest="img_name")
# ISO builder args
parser.add_argument("-j", "--iso-path", dest="iso_path")
parser.add_argument("-k", "--debug-iso-path", dest="debug_iso_path")
parser.add_argument("-m", "--src-iso-path", dest="src_iso_path")
parser.add_argument(
"-r", "--rpm-path", dest="rpm_path", default="../../stage/RPMS"
)
parser.add_argument(
"-x", "--srpm-path", dest="srpm_path", default="../../stage/SRPMS"
)
parser.add_argument(
"-p",
"--package-list-file",
dest="package_list_file",
default="../../common/data/build_install_options_all.json",
)
parser.add_argument(
"-d",
"--pkg-to-rpm-map-file",
dest="pkg_to_rpm_map_file",
default="../../stage/pkg_info.json",
)
parser.add_argument(
"-z", "--pkg-to-be-copied-conf-file", dest="pkg_to_be_copied_conf_file"
)
parser.add_argument(
"-q",
"--photon-docker-image",
dest="ph_docker_image",
default="photon:latest",
)
parser.add_argument(
"-v", "--photon-release-version", dest="photon_release_version"
)
options = parser.parse_args()
if options.config_file and options.config_file != "":
options.config_file = os.path.abspath(options.config_file)
# Create ISO
os.chdir(os.path.dirname(os.path.realpath(__file__)))
if options.iso_path or options.debug_iso_path or options.src_iso_path:
createIso(options)
elif options.config_file or options.img_name:
createImage(options)
else:
raise Exception("No supported image type defined")

View File

@ -1,186 +0,0 @@
#!/usr/bin/env python3
import os
import shutil
import tarfile
import lzma as xz
import json
import ovagenerator
from utils import Utils
from argparse import ArgumentParser
from CommandUtils import CommandUtils
imgUtils = Utils()
def create_container_cmd(src_root, photon_docker_image, cmd):
cmd = (
f"docker run --ulimit nofile=1024:1024 --rm"
f" -v {src_root}:/mnt:rw {photon_docker_image}"
f' /bin/bash -c "{cmd}"'
)
return cmd
def createOutputArtifact(raw_image_path, config, src_root, tools_bin_path):
cmdUtils = CommandUtils()
photon_release_ver = os.environ["PHOTON_RELEASE_VER"]
photon_build_num = os.environ["PHOTON_BUILD_NUM"]
image_name = config.get(
"image_name",
f"photon-{config['image_type']}-{photon_release_ver}-"
f"{photon_build_num}.{imgUtils.buildArch}",
)
photon_docker_image = config["installer"].get(
"photon_docker_image", "photon:latest"
)
new_name = []
if type(raw_image_path) is not list:
raw_image_path = [raw_image_path]
img_path = os.path.dirname(os.path.realpath(raw_image_path[0]))
# Rename gce image to disk.raw
if config["image_type"] == "gce":
new_name.append(f"{img_path}/disk.raw")
else:
for img_num in range(len(raw_image_path)):
new_name.append(f"{img_path}/{image_name}.raw")
for img_num, raw_img in enumerate(raw_image_path):
shutil.move(raw_img, new_name[img_num])
raw_image = new_name
compressed = True
# Only for artifactype="ova", multidisk support is applicable
# For other artifacttype, only one disk support (i.e. raw_image[0])
if config["artifacttype"] == "tgz":
print("Generating the tar.gz artifact ...")
outputfile = f"{img_path}/{image_name}.tar.gz"
compressed = generateCompressedFile(raw_image[0], outputfile, "w:gz")
elif config["artifacttype"] == "xz":
print("Generating the xz artifact ...")
outputfile = f"{img_path}/{image_name}.xz"
compressed = generateCompressedFile(raw_image[0], outputfile, "w:xz")
elif "vhd" in config["artifacttype"]:
relrawpath = os.path.relpath(raw_image[0], src_root)
vhdname = f"{image_name}.vhd"
dockerenv = False
print("Check if inside docker env")
out, _, _ = cmdUtils.runBashCmd(
"grep -c docker /proc/self/cgroup || :", capture=True
)
if out.rstrip() != "0":
dockerenv = True
print("Converting raw disk to vhd ...")
cmd = (
"tdnf install -qy qemu-img; qemu-img info -f raw --output json {}"
)
if not dockerenv:
cmd = cmd.format(f"/mnt/{relrawpath}")
cmd = create_container_cmd(src_root, photon_docker_image, cmd)
else:
cmd = cmd.format(raw_image[0])
info_out, _, _ = cmdUtils.runBashCmd(cmd, capture=True)
mbsize = 1024 * 1024
mbroundedsize = (
int(json.loads(info_out)["virtual-size"]) / mbsize + 1
) * mbsize
cmd = "tdnf install -qy qemu-img; qemu-img resize -f raw {} {}"
if not dockerenv:
cmd = cmd.format(f"/mnt/{relrawpath}", f"{mbroundedsize}")
cmd = create_container_cmd(src_root, photon_docker_image, cmd)
else:
cmd = cmd.format(raw_image[0], mbroundedsize)
cmdUtils.runBashCmd(cmd)
cmd = "tdnf install -qy qemu-img; "
cmd += "qemu-img convert {} -O vpc -o subformat=fixed,force_size {}"
if not dockerenv:
cmd = cmd.format(
f"/mnt/{relrawpath}",
f"/mnt/{os.path.dirname(relrawpath)}/{vhdname}",
)
cmd = create_container_cmd(src_root, photon_docker_image, cmd)
else:
cmd = cmd.format(
raw_image[0], f"{os.path.dirname(raw_image[0])}/{vhdname}"
)
cmdUtils.runBashCmd(cmd)
if config["artifacttype"] == "vhd.gz":
outputfile = f"{img_path}/{image_name}.vhd.tar.gz"
compressed = generateCompressedFile(
f"{img_path}/{vhdname}", outputfile, "w:gz"
)
# remove raw image and call the vhd as raw image
os.remove(raw_image[0])
raw_image = f"{img_path}/{vhdname}"
elif config["artifacttype"] == "ova":
ovagenerator.create_ova(
raw_image,
config,
image_name=image_name,
eulafile=os.path.join(src_root, "EULA.txt"),
)
elif config["artifacttype"] == "raw":
pass
else:
raise ValueError("Unknown output format")
if not compressed:
print("ERROR: Image compression failed!")
# Leave the raw disk around if compression failed
return
if not config["keeprawdisk"]:
if type(raw_image) is list:
for raw_img in raw_image:
os.remove(raw_img)
else:
os.remove(raw_image)
def generateCompressedFile(inputfile, outputfile, formatstring):
try:
if formatstring == "w:xz":
in_file = open(inputfile, "rb")
in_data = in_file.read()
out_file = open(outputfile, "wb")
out_file.write(xz.compress(in_data))
in_file.close()
out_file.close()
else:
tarout = tarfile.open(outputfile, formatstring, format=tarfile.GNU_FORMAT)
tarout.add(inputfile, arcname=os.path.basename(inputfile))
tarout.close()
except Exception as e:
print(e)
return False
return True
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("-r", "--raw-image-path", dest="raw_image_path")
parser.add_argument("-c", "--config-path", dest="config_path")
parser.add_argument("-t", "--tools-bin-path", dest="tools_bin_path")
parser.add_argument("-s", "--src-root", dest="src_root")
options = parser.parse_args()
if not options.config_path:
raise Exception("No config file defined")
config = imgUtils.jsonread(options.config_path)
createOutputArtifact(
options.raw_image_path,
config,
options.src_root,
options.tools_bin_path,
)

View File

@ -1,13 +0,0 @@
set default=0
set timeout=3
loadfont ascii
set gfxmode="1024x768"
gfxpayload=keep
set theme=/boot/grub2/themes/photon/theme.txt
terminal_output gfxterm
probe -s photondisk -u ($root)
menuentry "Install" {
linux /isolinux/vmlinuz root=/dev/ram0 loglevel=3 photon.media=UUID=$photondisk
initrd /isolinux/initrd.img
}

View File

@ -1,13 +0,0 @@
# Begin /etc/fstab for a bootable CD
# file system mount-point type options dump fsck
# order
#/dev/EDITME / EDITME defaults 1 1
#/dev/EDITME swap swap pri=1 0 0
proc /proc proc defaults 0 0
sysfs /sys sysfs defaults 0 0
devpts /dev/pts devpts gid=4,mode=620 0 0
tmpfs /dev/shm tmpfs defaults 0 0
tmpfs /run tmpfs defaults 0 0
devtmpfs /dev devtmpfs mode=0755,nosuid 0 0
# End /etc/fstab

View File

@ -1,5 +0,0 @@
# D-I config version 2.0
include menu.cfg
default vesamenu.c32
prompt 0
timeout 0

View File

@ -1,17 +0,0 @@
menu hshift 7
menu width 61
menu autoboot
timeout 1
noescape 1
totaltimeout 1
menu title Photon boot menu
include stdmenu.cfg
default PhotonOS
label PhotonOS
menu label ^PhotonOS
menu default
kernel vmlinuz
append initrd=initrd.img root=/dev/ram0 loglevel=3

View File

@ -1,13 +0,0 @@
menu hshift 7
menu width 61
menu title Photon installer boot menu
include stdmenu.cfg
default install
label install
menu label ^Install
menu default
kernel vmlinuz
append initrd=initrd.img root=/dev/ram0 loglevel=3 photon.media=cdrom

Binary file not shown.

Before

Width:  |  Height:  |  Size: 78 KiB

View File

@ -1,20 +0,0 @@
menu background splash.png
menu color title 1;40;47 #FF000000 #00000000 none
menu color border * #00000000 #00000000 none
menu color sel * #FF000000 #D00B77BB none
menu color hotsel 1;7;37;40 #FF000000 #D00B77BB none
menu color unsel 37;44 #FF000000 #00000000 none
menu color hotkey 1;37;44 #FF000000 #00000000 none
menu color tabmsg * #ffffffff #00000000 *
menu color help 37;40 #ffdddd00 #00000000 none
# XXX When adjusting vshift, take care that rows is set to a small
# enough value so any possible menu will fit on the screen,
# rather than falling off the bottom.
menu vshift 12
menu rows 10
menu helpmsgrow 15
# The command line must be at least one line from the bottom.
menu cmdlinerow 16
menu timeoutrow 16
menu tabmsgrow 18
menu tabmsg Press ENTER to boot or TAB to edit a menu entry

View File

@ -1,308 +0,0 @@
#!/bin/bash
set -ex
SCRIPT_PATH=$(dirname $(realpath -s $0))
PRGNAME=${0##*/} # script name minus the path
# Should be changed when there is a python version change
PY_VER="3.11"
WORKINGDIR=$1
shift 1
ISO_OUTPUT_NAME=$1
RPMS_PATH=$2
PACKAGE_LIST_FILE=$3
RPM_LIST=$4
STAGE_PATH=$5
ADDITIONAL_FILES_TO_COPY_FROM_STAGE=$6
OUTPUT_DATA_PATH=$7
PHOTON_COMMON_DIR=$(dirname "${PACKAGE_LIST_FILE}")
PACKAGE_LIST_FILE_BASE_NAME=$(basename "${PACKAGE_LIST_FILE}")
INITRD=${WORKINGDIR}/photon-chroot
PACKAGES=$8
PHOTON_DOCKER_IMAGE=$9
PH_BUILDER_TAG=${10}
PH_VERSION=${11}
ARCH="$(uname -m)"
LICENSE_TEXT="VMWARE $PH_VERSION"
if ! eval "$(grep -m 1 -w 'BETA LICENSE AGREEMENT' $STAGE_PATH/EULA.txt)"; then
LICENSE_TEXT+=" BETA"
fi
LICENSE_TEXT+=" LICENSE AGREEMENT"
rm -rf $WORKINGDIR/*
mkdir -m 755 -p $INITRD
tar -xf $SCRIPT_PATH/open_source_license.tar.gz -C $WORKINGDIR/
cp $STAGE_PATH/NOTICE-Apachev2 \
$STAGE_PATH/NOTICE-GPL2.0 \
$WORKINGDIR/
# 1. install rpms into initrd path
cat > ${WORKINGDIR}/photon-local.repo <<EOF
[photon-local]
name=VMware Photon Linux
baseurl=file://${RPMS_PATH}
gpgcheck=0
enabled=1
skip_if_unavailable=True
EOF
# we need to remove repodir & use --setopt=reposdir option once we use
# tdnf-3.2.x in Photon-3.0 docker images
cat > ${WORKINGDIR}/tdnf.conf <<EOF
[main]
gpgcheck=0
installonly_limit=3
clean_requirements_on_remove=true
repodir=${WORKINGDIR}
EOF
TDNF_CMD="tdnf install -qy \
--releasever $PHOTON_RELEASE_VER \
--installroot $INITRD \
--rpmverbosity 10 \
-c ${WORKINGDIR}/tdnf.conf \
${PACKAGES}"
# Run host's tdnf, if fails - try one from photon:latest docker image
$TDNF_CMD || docker run --ulimit nofile=1024:1024 --rm -v $RPMS_PATH:$RPMS_PATH -v $WORKINGDIR:$WORKINGDIR $PHOTON_DOCKER_IMAGE /bin/bash -c "$TDNF_CMD"
rm -f ${WORKINGDIR}/photon-local.repo ${WORKINGDIR}/tdnf.conf
# 3. finalize initrd system (mk-finalize-system.sh)
chroot ${INITRD} /usr/sbin/pwconv
chroot ${INITRD} /usr/sbin/grpconv
# Workaround Failed to generate randomized machine ID: Function not implemented
chroot ${INITRD} /bin/systemd-machine-id-setup || chroot ${INITRD} date -Ins | md5sum | cut -f1 -d' ' > /etc/machine-id
echo "LANG=en_US.UTF-8" > $INITRD/etc/locale.conf
echo "photon-installer" > $INITRD/etc/hostname
# locales/en_GB should be moved to glibc main package to make it working
#chroot ${INITRD} /usr/bin/localedef -c -i en_US -f UTF-8 en_US.UTF-8
# Importing the pubkey (photon-repos required)
#chroot ${INITRD} rpm --import /etc/pki/rpm-gpg/*
rm -rf ${INITRD}/var/cache/tdnf
# Move entire /boot from initrd to ISO
mv ${INITRD}/boot ${WORKINGDIR}/
cp -pr $SCRIPT_PATH/BUILD_DVD/isolinux \
$SCRIPT_PATH/BUILD_DVD/boot \
${WORKINGDIR}/
#Generate efiboot image
# efiboot is a fat16 image that has at least EFI/BOOT/bootx64.efi
EFI_IMAGE=boot/grub2/efiboot.img
EFI_FOLDER=$(readlink -f ${STAGE_PATH}/efiboot)
dd if=/dev/zero of=${WORKINGDIR}/${EFI_IMAGE} bs=3K count=1024
mkdosfs ${WORKINGDIR}/${EFI_IMAGE}
mkdir -p $EFI_FOLDER
mount -o loop ${WORKINGDIR}/${EFI_IMAGE} $EFI_FOLDER
mv ${WORKINGDIR}/boot/efi/EFI $EFI_FOLDER/
ls -lR $EFI_FOLDER
umount $EFI_FOLDER
rm -rf $EFI_FOLDER
#mcopy -s -i ${WORKINGDIR}/${EFI_IMAGE} ./EFI '::/'
mkdir -p $INITRD/installer
cp $SCRIPT_PATH/sample_ks.cfg ${WORKINGDIR}/isolinux
cp $SCRIPT_PATH/sample_ui.cfg ${INITRD}/installer
cp $STAGE_PATH/EULA.txt ${INITRD}/installer
mv ${WORKINGDIR}/boot/vmlinuz* ${WORKINGDIR}/isolinux/vmlinuz
# Copy package list json files, dereference symlinks
cp -rf -L $OUTPUT_DATA_PATH/*.json ${INITRD}/installer/
#ID in the initrd.gz now is PHOTON_VMWARE_CD . This is how we recognize that the cd is actually ours. touch this file there.
touch ${WORKINGDIR}/PHOTON_VMWARE_CD
# Step 4.5 Create necessary devices
mkfifo ${INITRD}/dev/initctl
mknod ${INITRD}/dev/ram0 b 1 0
mknod ${INITRD}/dev/ram1 b 1 1
mknod ${INITRD}/dev/ram2 b 1 2
mknod ${INITRD}/dev/ram3 b 1 3
mknod ${INITRD}/dev/sda b 8 0
#- Step 5 - Creating the boot script
mkdir -p ${INITRD}/etc/systemd/scripts
# Step 6 create fstab
cp $SCRIPT_PATH/BUILD_DVD/fstab ${INITRD}/etc/fstab
mkdir -p ${INITRD}/etc/yum.repos.d
cat > ${INITRD}/etc/yum.repos.d/photon-iso.repo << EOF
[photon-iso]
name=VMWare Photon Linux ${PH_VERSION}(${ARCH})
baseurl=file:///mnt/media/RPMS
gpgkey=file:///etc/pki/rpm-gpg/VMWARE-RPM-GPG-KEY
gpgcheck=1
enabled=1
skip_if_unavailable=True
EOF
#- Step 7 - Create installer script
cat >> ${INITRD}/bin/bootphotoninstaller << EOF
#!/bin/bash
cd /installer
ACTIVE_CONSOLE="\$(< /sys/devices/virtual/tty/console/active)"
install() {
LANG=en_US.UTF-8 photon-installer -i iso -o $PACKAGE_LIST_FILE_BASE_NAME -e EULA.txt -t "$LICENSE_TEXT" -v $PHOTON_RELEASE_VER && shutdown -r now
}
try_run_installer() {
if [ "\$ACTIVE_CONSOLE" == "tty0" ]; then
[ "\$(tty)" == '/dev/tty1' ] && install
else
[ "\$(tty)" == "/dev/\$ACTIVE_CONSOLE" ] && install
fi
}
try_run_installer || exec /bin/bash
EOF
chmod 755 ${INITRD}/bin/bootphotoninstaller
cat >> ${INITRD}/init << EOF
mount -t proc proc /proc
/lib/systemd/systemd
EOF
chmod 755 ${INITRD}/init
# Adding autologin to the root user and set TERM=linux for installer
sed -i "s/ExecStart.*/ExecStart=-\/sbin\/agetty --autologin root --noclear %I linux/g" ${INITRD}/lib/systemd/system/getty@.service
sed -i "s/ExecStart.*/ExecStart=-\/sbin\/agetty --autologin root --keep-baud 115200,38400,9600 %I screen/g" ${INITRD}/lib/systemd/system/serial-getty@.service
rm -rf ${INITRD}/etc/systemd/system/getty.target.wants/console-getty.service
# Step 7 - Create installer script
sed -i "s/root:.*/root:x:0:0:root:\/root:\/bin\/bootphotoninstaller/g" ${INITRD}/etc/passwd
mkdir -p ${INITRD}/mnt/photon-root/photon-chroot
rm -rf ${INITRD}/RPMS
echo ${RPMS_PATH}
#cp -r ${RPMS_PATH} ${WORKINGDIR}/
(
cd ${RPMS_PATH}
mkdir -p ${WORKINGDIR}/RPMS
cp --verbose --parents ${RPM_LIST} ${WORKINGDIR}/RPMS/
chmod 644 ${WORKINGDIR}/RPMS/${ARCH}/*.rpm ${WORKINGDIR}/RPMS/noarch/*.rpm
)
# Work in sub-shell using ( ... ) to come back to original folder.
(
file_list=""
cd $STAGE_PATH
for file_name in $ADDITIONAL_FILES_TO_COPY_FROM_STAGE; do
[ -n "$file_name" ] && file_list+="${file_name} "
done
if [ -n "${file_list}" ]; then
cp ${file_list} ${WORKINGDIR}
fi
)
# Creating rpm repo in cd..
createrepo --update --database ${WORKINGDIR}/RPMS
repodatadir=${WORKINGDIR}/RPMS/repodata
if [ -d $repodatadir ]; then
pushd $repodatadir
metaDataFile=$(find -type f -name "*primary.xml.gz")
ln -sfv $metaDataFile primary.xml.gz
popd
fi
rm -rf ${INITRD}/LOGS
# Cleaning up
find ${INITRD}/usr/lib/ -maxdepth 1 -mindepth 1 -type f -print0 | \
xargs -0 -r -P$(nproc) -n32 sh -c "file \"\$@\" | \
sed -n -e 's/^\(.*\):[ ]*ELF.*, not stripped.*/\1/p' | \
xargs -I\{\} strip \{\}" ARG0
rm -rf ${INITRD}/home/* \
${INITRD}/var/lib/rpm* \
${INITRD}/var/lib/.rpm* \
${INITRD}/usr/lib/sysimage/rpm* \
${INITRD}/usr/lib/sysimage/.rpm* \
${INITRD}/cache \
${INITRD}/boot \
${INITRD}/usr/include \
${INITRD}/usr/sbin/sln \
${INITRD}/usr/bin/iconv \
${INITRD}/usr/bin/oldfind \
${INITRD}/usr/bin/localedef \
${INITRD}/usr/bin/sqlite3 \
${INITRD}/usr/bin/grub2-* \
${INITRD}/usr/bin/bsdcpio \
${INITRD}/usr/bin/bsdtar \
${INITRD}/usr/bin/networkctl \
${INITRD}/usr/bin/machinectl \
${INITRD}/usr/bin/pkg-config \
${INITRD}/usr/bin/openssl \
${INITRD}/usr/bin/timedatectl \
${INITRD}/usr/bin/localectl \
${INITRD}/usr/bin/systemd-cgls \
${INITRD}/usr/bin/systemd-analyze \
${INITRD}/usr/bin/systemd-nspawn \
${INITRD}/usr/bin/systemd-inhibit \
${INITRD}/usr/bin/systemd-studio-bridge \
${INITRD}/usr/lib/python${PY_VER}/lib2to3 \
${INITRD}/usr/lib/python${PY_VER}/lib-tk \
${INITRD}/usr/lib/python${PY_VER}/ensurepip \
${INITRD}/usr/lib/python${PY_VER}/distutils \
${INITRD}/usr/lib/python${PY_VER}/pydoc_data \
${INITRD}/usr/lib/python${PY_VER}/idlelib \
${INITRD}/usr/lib/python${PY_VER}/unittest \
${INITRD}/usr/lib/librpmbuild.so* \
${INITRD}/usr/lib/libdb_cxx* \
${INITRD}/usr/lib/libnss_compat* \
${INITRD}/usr/lib/grub/i386-pc/*.module \
${INITRD}/usr/lib/grub/x86_64-efi/*.module \
${INITRD}/usr/lib/grub/arm64-efi/*.module \
${INITRD}/lib/libmvec* \
${INITRD}/usr/lib/gconv
find "${INITRD}/usr/sbin" -mindepth 1 -maxdepth 1 -name "grub2*" \
! -name grub2-install -print0 | \
xargs -0 -r -P$(nproc) -n32 rm -rvf
find "${INITRD}/usr/share" -mindepth 1 -maxdepth 1 \
! -name terminfo \
! -name cracklib \
! -name grub \
! -name factory \
! -name dbus-1 -print0 |
xargs -0 -r -P$(nproc) -n32 rm -rvf
# Set password max days to 99999 (disable aging)
chroot ${INITRD} /bin/bash -c "chage -M 99999 root"
# Generate the initrd
pushd $INITRD
(find . | cpio -o -H newc --quiet | gzip -9) > ${WORKINGDIR}/isolinux/initrd.img
popd
rm -rf $INITRD
# Step 9 Generate the ISO!!!!
pushd $WORKINGDIR
mkisofs -R -l -L -D -b isolinux/isolinux.bin \
-c isolinux/boot.cat \
-no-emul-boot \
-boot-load-size 4 \
-boot-info-table \
-eltorito-alt-boot -e ${EFI_IMAGE} \
-no-emul-boot \
-V "PHOTON_$(date +%Y%m%d)" \
$WORKINGDIR > $ISO_OUTPUT_NAME
popd

View File

@ -2,9 +2,6 @@
"installer": {
"hostname": "photon-machine",
"packagelist_file": "packages_ova.json",
"postinstallscripts": [
"../relocate-rpmdb.sh"
],
"password": {
"age": 0,
"crypted": false,

View File

@ -1,69 +0,0 @@
#!/usr/bin/env python3
import os
from utils import Utils
from argparse import ArgumentParser
from CommandUtils import CommandUtils
def create_ova(
raw_image_names, config, skip_convert=False, image_name=None, eulafile=None
):
cmdUtils = CommandUtils()
config_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)), config["image_type"]
)
if image_name is None:
image_name = config.get("image_name", f"photon-{config['image_type']}")
if type(raw_image_names) is str:
raw_image_names = [raw_image_names]
assert type(raw_image_names) is list
output_path = os.path.dirname(os.path.realpath(raw_image_names[0]))
vmdk_paths = []
for i, raw_img in enumerate(raw_image_names):
vmdk_paths.append(os.path.join(output_path, f"{image_name}{i}.vmdk"))
if not skip_convert:
cmdUtils.runBashCmd(
f"vmdk-convert -t 2147483647 {raw_img} {vmdk_paths[i]}"
)
ova_config_file = os.path.join(config_path, config["ova_config"])
ova_file = os.path.join(output_path, f"{image_name}.ova")
compose_cmd = [f"ova-compose -i {ova_config_file} -o {ova_file}"]
if eulafile:
compose_cmd.append(f"--param eulafile={eulafile}")
for i, d in enumerate(vmdk_paths):
compose_cmd.append(f"--param disk{i}={d}")
cmdUtils.runBashCmd(" ".join(compose_cmd))
cmdUtils.runBashCmd(f"rm -f {output_path}/*.vmdk")
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("-r", "--raw-image-path", dest="raw_image_path")
parser.add_argument("-c", "--config-path", dest="config_path")
parser.add_argument(
"-v", "--skip-convert", dest="skip_convert", action="store_true"
)
options = parser.parse_args()
if options.config_path:
config = Utils.jsonread(options.config_path)
else:
raise Exception("No config file defined")
create_ova(
options.raw_image_path, config, skip_convert=options.skip_convert
)

View File

@ -1,107 +0,0 @@
#!/usr/bin/env python3
import os
import ctypes
import ctypes.util
import json
import collections
import fileinput
import platform
import re
import copy
import shutil
class Utils(object):
def __init__(self):
self.buildArch = platform.machine()
self.filesystems = []
with open("/proc/filesystems") as fs:
for line in fs:
self.filesystems.append(line.rstrip("\n").split("\t")[1])
self.libcloader = ctypes.CDLL(
ctypes.util.find_library("c"), use_errno=True
)
def mount(self, source, destination, filesystem, flags):
if not os.access(source, os.R_OK):
raise Exception(f"Could not find path {source}")
if not os.access(destination, os.F_OK):
os.mkdir(destination)
if not os.access(destination, os.W_OK):
raise Exception(f"Could not write to path {destination}")
if filesystem not in self.filesystems:
raise ValueError("Filesystem unknown")
ret = self.libcloader.mount(
ctypes.c_char_p(source),
ctypes.c_char_p(destination),
ctypes.c_char_p(filesystem),
ctypes.c_char_p(flags),
0,
)
if ret != 0:
raise RuntimeError(
"Cannot mount {} : {}".format(
source, os.strerror(ctypes.get_errno())
)
)
def umount(self, destination):
ret = self.libcloader.umount(ctypes.c_char_p(destination))
if ret != 0:
raise RuntimeError(
"Cannot umount {} : {}".format(
destination, os.strerror(ctypes.get_errno())
)
)
@staticmethod
def jsonread(filename):
json_data = open(filename)
data = json.load(json_data, object_pairs_hook=collections.OrderedDict)
json_data.close()
return data
@staticmethod
def replaceinfile(filename, pattern, sub):
for line in fileinput.input(filename, inplace=True):
line = re.sub(pattern, sub, line)
print(line)
@staticmethod
def replaceandsaveasnewfile(old_file, new_file, pattern, sub):
with open(old_file, "r") as old, open(new_file, "w") as new:
for line in old:
line = re.sub(pattern, sub, line)
new.write(line)
@staticmethod
def generatePhotonVmx(old_file, new_file, pattern, disk):
with open(old_file, "r") as old, open(new_file, "w") as new:
for line in old:
line_as_is = True
for device in ["scsi0:", "sata0:"]:
if device in line:
line_as_is = False
line = re.sub(pattern, f"{pattern}0", line)
new.write(line)
for i in range(1, disk):
nline = copy.copy(line)
nline = re.sub(
f"{device}0", f"{device}{i}", nline
)
nline = re.sub(
f"{pattern}0", f"{pattern}{i}", nline
)
new.write(nline)
if line_as_is:
new.write(line)
@staticmethod
def copyallfiles(src, target):
files = os.listdir(src)
for file in files:
filename = os.path.join(src, file)
if os.path.isfile(filename):
shutil.copy(filename, target)

View File

@ -0,0 +1,18 @@
live: false
hostname: photon-machine
disks:
default:
filename: !param imgfile=photon-ami.img
size: 8192
postinstallscripts:
- ami-patch.sh
additional_files:
- cloud-photon.cfg: /etc/cloud/cloud.cfg
packagelist_file: packages_ami.json
linux_flavor: linux

View File

@ -0,0 +1,21 @@
live: false
hostname: photon-machine
password:
crypted: false
text: changeme
disks:
default:
filename: !param imgfile=photon-azure.img
size: 16384
packagelist_file: packages_azure.json
linux_flavor: linux
postinstallscripts:
- azure-patch.sh
additional_files:
- cloud-photon.cfg: /etc/cloud/cloud.cfg

View File

@ -0,0 +1,19 @@
live: false
hostname: photon-machine
disks:
default:
filename: !param imgfile=photon-gce.img
size: 16384
postinstallscripts:
- gce-patch.sh
additional_files:
- cloud-photon.cfg: /etc/cloud/cloud.cfg
- ntpd.service: /usr/lib/systemd/system/ntpd.service
packagelist_file: packages_gce.json
linux_flavor: linux

View File

@ -0,0 +1,22 @@
install_options_file: /photon/common/data/build_install_options_all.json
iso_files:
sample_ks.cfg: isolinux/
open_source_license.tar.gz: ""
/photon/EULA.txt: ""
/photon/NOTICE-Apachev2: ""
/photon/NOTICE-GPL2.0: ""
/photon/stage/ostree-repo.tar.gz: ""
initrd_files:
/photon/common/data/build_install_options_all.json: installer/
# make sure all files referenced in the above are included
# use `cat build_install_options_all.json | jq '.[].packagelist_file' -r`
/photon/common/data/packages_minimal.json: installer/
/photon/common/data/packages_developer.json: installer/
/photon/common/data/packages_ostree_host.json: installer/
/photon/common/data/packages_rt.json: installer/
/photon/common/data/packages_appliance.json: installer/
sample_ui.cfg: installer/
/photon/EULA.txt: installer/

View File

@ -0,0 +1,19 @@
live: false
hostname: photon-machine
disks:
default:
filename: !param imgfile=photon.img
size: 16384
packagelist_file: packages_ova.json
password:
age: 0
crypted: false
text: changeme
public_key: ""
linux_flavor: linux-esx

View File

@ -25,7 +25,7 @@ hardware:
rootdisk:
type: hard_disk
parent: scsi1
disk_image: !param disk0
disk_image: !param disk
usb1:
type: usb_controller
ethernet1:

View File

@ -0,0 +1,50 @@
#! /bin/bash
set -o errexit # exit if error...insurance ;)
set -o nounset # exit if variable not initalized
set +h # deactivate hashall
SCRIPT_PATH=$(dirname $(realpath -s $0))
BUILDROOT=$1
ROOT_PARTITION_PATH=$2
BOOT_PARTITION_PATH=$3
BOOT_DIRECTORY=$4
FSUUID=$(blkid -s UUID -o value $ROOT_PARTITION_PATH)
EXTRA_PARAMS="rootwait rw console=ttyS0,115200n8 console=tty0"
cat > $BUILDROOT/boot/grub2/grub.cfg << EOF
# Begin /boot/grub2/grub.cfg
set default=0
set timeout=2
loadfont ascii
insmod all_video
insmod gfxterm
insmod png
insmod ext2
set gfxmode="800x600"
gfxpayload=keep
terminal_output gfxterm
set theme=${BOOT_DIRECTORY}grub2/themes/photon/theme.txt
load_env -f ${BOOT_DIRECTORY}photon.cfg
if [ -f ${BOOT_DIRECTORY}systemd.cfg ]; then
load_env -f ${BOOT_DIRECTORY}systemd.cfg
else
set systemd_cmdline=net.ifnames=0
fi
set rootpartition=UUID=$FSUUID
menuentry "Photon" {
linux ${BOOT_DIRECTORY}\$photon_linux root=\$rootpartition \$photon_cmdline \$systemd_cmdline $EXTRA_PARAMS
if [ -f ${BOOT_DIRECTORY}\$photon_initrd ]; then
initrd ${BOOT_DIRECTORY}\$photon_initrd
fi
}
# End /boot/grub2/grub.cfg
EOF

View File

@ -0,0 +1,15 @@
[Unit]
Description=Resizes rootfs and creates swap partition
DefaultDependencies=no
Conflicts=shutdown.target
After=systemd-remount-fs.service
Before=systemd-sysusers.service sysinit.target shutdown.target
ConditionPathExists=!/dev/disk/by-label/PHOTON_SWAP
[Service]
Type=oneshot
RemainAfterExit=yes
ExecStart=/usr/local/bin/resizefs.sh
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,80 @@
#!/bin/bash
set -e
SWAP_LABEL="PHOTON_SWAP"
DEV="$(findmnt -n -o SOURCE -M /)"
DEV="$(basename "$DEV")"
SYSFSP="/sys/class/block/$DEV"
SIZE="$(<"$SYSFSP/size")"
START="$(<"$SYSFSP/start")"
PARENT_DEV="$(basename "$(readlink -f "$SYSFSP/..")")"
PARENT_SIZE="$(<"$SYSFSP/../size")"
PARTNUM="$(<"$SYSFSP/partition")"
# Align to 1M (512 * 2048)
WANT_SIZE="$(( PARENT_SIZE / 2048 * 9 / 10 * 2048))"
get_partmap() {
parted -m -s "/dev/$PARENT_DEV" unit s p
}
# Don't resize if we are alreay large enough
if [ "$SIZE" -ge "$WANT_SIZE" ]; then
echo "root partition already resized, skip resizing" >&2
else
# Workaround partition-in-use issue:
# https://bugs.launchpad.net/ubuntu/+source/parted/+bug/1270203
echo -e "yes\n$((WANT_SIZE + START - 1))" | \
parted ---pretend-input-tty "/dev/$PARENT_DEV" unit s resizepart "$PARTNUM"
partprobe "/dev/$PARENT_DEV"
resize2fs "/dev/$DEV"
fi
if blkid -L "$SWAP_LABEL" >/dev/null ; then
echo "swap partition already created, nothing to do">&2
exit 0
fi
OLD_MAPF="$(mktemp)"
exec 3>"$OLD_MAPF"
exec 4<"$OLD_MAPF"
rm "$OLD_MAPF"
get_partmap >&3
parted -s "/dev/$PARENT_DEV" unit s mkpart primary linux-swap "$((WANT_SIZE + START))" 100%
partprobe "/dev/$PARENT_DEV"
# Find out the partition number we just created
# It should be in the format of
# "number":"begin":"end":"size":"filesystem-type":"partition-name":"flags-set";
IFS= mapfile -d: -t DIFF_MAP < <(join -v 2 - <(get_partmap) <&4)
# Sanity check: new partition should begin after root partition
if ! [ "${DIFF_MAP[1]%"s"}" -eq "${DIFF_MAP[1]%"s"}" ] ||
[ "${DIFF_MAP[1]%"s"}" -lt "$((WANT_SIZE + START))" ]; then
echo "Failed parsing partition map" >&2
exit 1
fi
enable_swap() {
mkswap "$1"
swaplabel -L "$SWAP_LABEL" "$1"
swapon "$1"
}
# Find out the block device name of the new partition,
# and enable swap on it.
shopt -s nullglob
for P in "$SYSFSP/../$PARENT_DEV"* ; do
NEW_PART="$(<"$P/partition")" || continue
[ "$NEW_PART" -eq "${DIFF_MAP[0]}" ] || continue
enable_swap "/dev/$(basename "$P")"
exit 0
done
echo "Failed to find new partition" >&2
exit 1

View File

@ -0,0 +1,18 @@
#!/bin/bash
echo "LABEL=PHOTON_SWAP none swap defaults 0 0" >> /etc/fstab
chmod +x /usr/local/bin/resizefs.sh
ln -s /lib/systemd/system/resizefs.service /etc/systemd/system/multi-user.target.wants/resizefs.service
ln -s /lib/systemd/system/sshd-keygen.service /etc/systemd/system/multi-user.target.wants/sshd-keygen.service
ln -s /lib/systemd/system/sshd.service /etc/systemd/system/multi-user.target.wants/sshd.service
cat > /etc/modules-load.d/genet.conf << "EOF"
# To fix https://github.com/raspberrypi/linux/issues/3108
# Issue: bcmgenet_mii_probe() is getting called before unimac_mdio_probe() finishes
# which causes the ethernet "failed to connect PHY" in rpi4
# Sol: Load the modules in below order using systemd-modules
mdio_bcm_unimac
genet
EOF

View File

@ -0,0 +1,39 @@
live: false
arch: aarch64
bootmode: efi
hostname: photon-machine
disks:
default:
filename: !param imgfile=photon-rpi.img
size: 768
packagelist_file: packages_rpi.json
partition_type: msdos
partitions:
- mountpoint: /boot/efi
size: 30
filesystem: vfat
type: 0c01
- mountpoint: /
size: 0
filesystem: ext4
password:
age: 0
crypted: false
text: changeme
additional_files:
- resizefs.sh: /usr/local/bin/resizefs.sh
- resizefs.service: /lib/systemd/system/resizefs.service
postinstallscripts:
- rpi-custom-patch.sh
linux_flavor: linux

485
support/poi/poi.py Executable file
View File

@ -0,0 +1,485 @@
#!/usr/bin/env python3
import getopt
import glob
import json
import os
import platform
import shutil
import subprocess
import sys
THIS_ARCH = platform.machine()
RELEASE_VER = "5.0"
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
PHOTON_DIR = os.path.abspath(os.path.join(THIS_DIR, "../.."))
EULA_PATH = os.path.join(PHOTON_DIR, "EULA.txt")
STAGE_DIR = os.path.join(PHOTON_DIR, "stage")
REPO_DIR = os.path.join(STAGE_DIR, "RPMS")
ARCH_MAP = {'x86_64': "amd64", 'aarch64': "arm64"}
if THIS_ARCH == "x86_64":
POI_IMAGE = "projects.registry.vmware.com/photon/installer:ob-22704994"
elif THIS_ARCH == "aarch64":
POI_IMAGE = "projects.registry.vmware.com/photon/installer-arm64:ob-22705318"
else:
raise Exception(f"unknown arch {THIS_ARCH}")
class Poi(object):
def __init__(self,
arch=THIS_ARCH,
release_ver=RELEASE_VER,
repo_dir=None,
poi_image=POI_IMAGE,
photon_dir=PHOTON_DIR,
stage_dir=STAGE_DIR,
eula_path=EULA_PATH):
self.arch = arch
self.release_ver = release_ver
self.repo_dir = repo_dir
self.poi_image = poi_image
self.photon_dir = photon_dir
self.stage_dir = stage_dir
self.eula_path = EULA_PATH
if self.repo_dir is None:
self.repo_dir = os.path.join(self.stage_dir, "RPMS")
self.docker_arch = ARCH_MAP[self.arch]
def run_poi(self, command, workdir=None):
if workdir is None:
workdir = os.getcwd()
poi_cmd = ["docker", "run", "--rm", "--privileged",
"-v", "/dev:/dev",
"-v", f"{self.repo_dir}:/repo",
"-v", f"{workdir}:/workdir",
"-v", f"{self.photon_dir}:/photon",
"-w", "/workdir"]
if self.arch != THIS_ARCH:
poi_cmd.append(f"--platform=linux/{self.docker_arch}")
poi_cmd.append(self.poi_image)
poi_cmd.extend(command)
print(f"running {poi_cmd}")
out = subprocess.run(poi_cmd, check=True)
#
# copy config files from configs/{type} to stage dir
# packages json from common/data overrides the one from configs, if present
#
def create_config(self, type, subtype=None, subdir=None):
if subdir is None:
subdir = type
if subtype is None:
subtype = type
stage_cfg_dir = os.path.join(self.stage_dir, subdir)
os.makedirs(stage_cfg_dir, exist_ok=True)
cfg_dir = os.path.join(THIS_DIR, "configs", subdir)
if os.path.isdir(cfg_dir):
shutil.copytree(cfg_dir, stage_cfg_dir, dirs_exist_ok=True)
else:
print(f"{cfg_dir} not found, ignoring")
pkg_list_file = os.path.join(self.photon_dir,
"common", "data",
f"packages_{subtype}.json")
if os.path.isfile(pkg_list_file):
print(f"using pkg_list_file {pkg_list_file}")
shutil.copy(pkg_list_file, stage_cfg_dir)
else:
print(f"{pkg_list_file} not found, ignoring")
def create_config_from_custom(self,
type, custom_file,
subtype=None, subdir=None):
if subdir is None:
subdir = type
if subtype is None:
subtype = type
image_file = self.image_filename(type)
stage_cfg_dir = os.path.join(self.stage_dir, subdir)
os.makedirs(stage_cfg_dir, exist_ok=True)
ks_file = os.path.join(stage_cfg_dir, f"{type}_ks.yaml")
print(f"generating {ks_file} from {custom_file}")
with open(custom_file, "rt") as f:
custom_config = json.load(f)
ks_config = custom_config['installer']
ks_config['disks'] = {
'default': {
'filename': image_file,
'size': custom_config['size']
}
}
ks_config['live'] = False
# remove this code when "../relocate-rpmdb.sh" is removed from
# build/photon-aarch64.json in photon-cfg
if 'postinstallscripts' in ks_config:
ks_config['postinstallscripts'] = list(filter(
lambda item: "relocate-rpmdb.sh" not in item,
ks_config['postinstallscripts']
))
# saving with .yaml extension although it's just json because that's
# what we are going to use in create_image() - but json is a subset
# of yaml anyway
with open(ks_file, "wt") as f:
json.dump(ks_config, f, indent=4)
pkg_list_file = os.path.join(self.photon_dir,
"common", "data",
ks_config['packagelist_file'])
if os.path.isfile(pkg_list_file):
shutil.copy(pkg_list_file, stage_cfg_dir)
else:
print(f"{pkg_list_file} not found, ignoring")
def create_raw_image(self, type, image_file, subdir=None):
if subdir is None:
subdir = type
stage_cfg_dir = os.path.join(self.stage_dir, subdir)
ks_file = f"{type}_ks.yaml"
self.run_poi(["create-image",
"-c", ks_file,
"-v", self.release_ver,
"--param", f"imgfile={image_file}"
],
workdir=stage_cfg_dir)
return os.path.join(stage_cfg_dir, image_file)
def create_ova(self, image_file, subdir="ova", cleanup=True):
stage_cfg_dir = os.path.join(self.stage_dir, subdir)
shutil.copy(self.eula_path, stage_cfg_dir)
# strip the extension:
ova_name = image_file.rsplit(".", 1)[0]
self.run_poi(["create-ova",
"--raw-images", image_file,
"--ova-config", "photon.yaml",
"--ova-name", ova_name,
"--param", "eulafile=EULA.txt"
],
workdir=stage_cfg_dir)
if cleanup:
os.remove(os.path.join(stage_cfg_dir, image_file))
def create_azure(self, image_file, subdir="azure"):
stage_cfg_dir = os.path.join(self.stage_dir, subdir)
self.run_poi(["create-azure",
"--raw-image", image_file,
],
workdir=stage_cfg_dir)
# no cleanup, done by create-azure
def _create_tar_gz(self, image_file, tarfile, subdir=None, cleanup=True):
if subdir is None:
raise Exception("subdir must be set")
stage_cfg_dir = os.path.join(self.stage_dir, subdir)
print(f"tarring {image_file} to {tarfile}")
subprocess.run(["tar", "zcf", tarfile, image_file],
cwd=stage_cfg_dir)
if cleanup:
os.remove(os.path.join(stage_cfg_dir, image_file))
def create_ami(self, image_file, subdir=None, cleanup=True):
if subdir is None:
raise Exception("subdir must be set")
stage_cfg_dir = os.path.join(self.stage_dir, subdir)
# strip the extension:
basename = image_file.rsplit(".", 1)[0]
# our scripts expect the extension ".raw":
image_file_raw = f"{basename}.raw"
os.rename(os.path.join(stage_cfg_dir, image_file),
os.path.join(stage_cfg_dir, image_file_raw))
self._create_tar_gz(image_file_raw, f"{basename}.tar.gz",
subdir=subdir, cleanup=cleanup)
def create_gce(self, image_file, subdir=None, cleanup=True):
if subdir is None:
raise Exception("subdir must be set")
stage_cfg_dir = os.path.join(self.stage_dir, subdir)
# strip the extension:
basename = image_file.rsplit(".", 1)[0]
# gce expects the name "disk.raw":
image_file_raw = "disk.raw"
os.rename(os.path.join(stage_cfg_dir, image_file),
os.path.join(stage_cfg_dir, image_file_raw))
self._create_tar_gz(image_file_raw, f"{basename}.tar.gz",
subdir=subdir, cleanup=cleanup)
def create_rpi(self, image_file, subdir=None, cleanup=True):
if subdir is None:
raise Exception("subdir must be set")
stage_cfg_dir = os.path.join(self.stage_dir, subdir)
# strip the extension:
basename = image_file.rsplit(".", 1)[0]
image_path_xz = os.path.join(stage_cfg_dir, f"{basename}.xz")
print(f"compressing {image_file} to {basename}.xz")
with open(image_path_xz, "w") as fout:
subprocess.run(["xz", "-c", image_file],
stdout=fout,
cwd=stage_cfg_dir)
if cleanup:
os.remove(os.path.join(stage_cfg_dir, image_file))
def create_rpm_list(self, iso_file, type=None, subdir="iso"):
basename = iso_file.rsplit(".", 1)[0]
stage_cfg_dir = os.path.join(self.stage_dir, subdir)
rpm_list = []
pkg_info_json = os.path.join(self.stage_dir, "pkg_info.json")
if type != "source":
repo_dir = self.repo_dir
else:
repo_dir = os.path.abspath(self.repo_dir, "..", "SRPMS")
if os.path.isfile(pkg_info_json):
print("using pkg_info.json for RPM list")
key = 'rpm'
if type in ["source", "debug"]:
key = f'{type}rpm'
with open(pkg_info_json, "rt") as f:
pkg_info = json.load(f)
for pkg_name, pkg in pkg_info.items():
pkg_file = pkg.get(key, None)
if pkg_file is not None:
rpm_list.append(pkg_file.replace(repo_dir, "/repo"))
else:
print("pkg_info.json not found, shipping all RPMs")
for arch in ["noarch", self.arch]:
for p in glob.glob(os.path.join(repo_dir, arch, "*.rpm")):
if ("-debuginfo-" in p) == (type == "debug"):
# replace leading directory path with path as seen
# in container
rpm_list.append(p.replace(repo_dir.rstrip("/"), "/repo"))
if type is None:
rpm_list_file = os.path.join(stage_cfg_dir, f"{basename}.rpm-list")
else:
rpm_list_file = os.path.join(stage_cfg_dir,
f"{basename}.{type}.rpm-list")
with open(rpm_list_file, "wt") as f:
for line in rpm_list:
f.write(f"{line}\n")
def create_full_iso(self, iso_file, subdir=None):
if subdir is None:
subdir = "iso"
stage_cfg_dir = os.path.join(self.stage_dir, subdir)
for cfg_file in ["packages_installer_initrd.json",
"packages_minimal.json"]:
cfg_path = os.path.join(self.photon_dir,
"common", "data",
cfg_file)
shutil.copy(cfg_path, stage_cfg_dir)
basename = iso_file.rsplit(".", 1)[0]
self.run_poi(["photon-iso-builder",
"-f", "build-iso",
"-v", self.release_ver,
"-p", "packages_minimal.json",
"--initrd-pkgs-list-file", "packages_installer_initrd.json",
"--repo-paths=/repo",
"--rpms-list-file", f"{basename}.rpm-list",
"--config", "iso.yaml",
"--name", iso_file
],
workdir=stage_cfg_dir)
def create_full_special_iso(self, iso_file, type=None, subdir=None):
if subdir is None:
subdir = "iso"
if type == "debug":
repo_subdir = "DEBUGRPMS"
elif type == "source":
repo_subdir = "SRPMS"
else:
raise Exception(f"unknown iso type '{type}'")
stage_cfg_dir = os.path.join(self.stage_dir, subdir)
basename = iso_file.rsplit(".", 1)[0]
rpm_list_file = rpm_list_file = f"{basename}.{type}.rpm-list"
script = f"""
cd /workdir && \
mkdir -p iso/{repo_subdir} && \
while read f ; do \
cp ${{f}} /workdir/iso/{repo_subdir}/ ; \
done < {rpm_list_file} && \
createrepo /workdir/iso/{repo_subdir}/ && \
mkisofs -quiet -r -o {iso_file} /workdir/iso/ &&
rm -rf iso/
"""
self.run_poi(["bash", "-c", script], workdir=stage_cfg_dir)
def create_custom_iso(self, iso_file, type=None, subdir=None):
if subdir is None:
subdir = f"{type}-iso"
stage_cfg_dir = os.path.join(self.stage_dir, subdir)
initrd_pkgs_list_path = os.path.join(self.photon_dir,
"common", "data",
"packages_installer_initrd.json")
shutil.copy(initrd_pkgs_list_path, stage_cfg_dir)
self.run_poi(["photon-iso-builder",
"-f", "build-iso",
"-v", self.release_ver,
"-p", f"packages_{type}.json",
"--initrd-pkgs-list-file", "packages_installer_initrd.json",
"--repo-paths=/repo",
"--name", iso_file
],
workdir=stage_cfg_dir)
def get_git_sha(self):
out = subprocess.run(["git", "rev-parse", "--short", "HEAD"],
capture_output=True,
check=True,
cwd=self.photon_dir)
return out.stdout.decode().strip()
# ova, azure etc. have the type in the name
def image_filename(self, type, ext="img"):
sha = self.get_git_sha()
return f"photon-{type}-{self.release_ver}-{sha}.{self.arch}.{ext}"
# full ISOs have no special name, but an extension
def full_iso_name(self, type=None):
sha = self.get_git_sha()
if type is None:
return f"photon-{self.release_ver}-{sha}.{self.arch}.iso"
else:
return f"photon-{self.release_ver}-{sha}.{self.arch}.{type}.iso"
# custom ISOs have the type in the name, and just an 'iso' extension
# (debug/source not supported here)
def iso_name(self, type=type):
sha = self.get_git_sha()
return f"photon-{type}-{self.release_ver}-{sha}.{self.arch}.iso"
def main():
config = None
poi_image = POI_IMAGE
stage_dir = STAGE_DIR
repo_dir = None
arch = THIS_ARCH
try:
opts, args = getopt.getopt(
sys.argv[1:],
"c:",
longopts=["config=", "docker-image=", "arch=", "stage-dir=", "repo-dir="])
except getopt.GetoptError as e:
print(e)
sys.exit(2)
for o, a in opts:
if o == "--arch":
arch = a
elif o in ["-c", "--config"]:
config = a
elif o == "--docker-image":
poi_image = a
elif o == "--stage_dir":
stage_dir = a
elif o == "--repo-dir":
repo_dir = a
else:
assert False, f"unhandled option {o}"
assert arch in ARCH_MAP, "unsupported arch {arch}"
target = args[0]
poi = Poi(arch=arch, poi_image=poi_image, stage_dir=stage_dir, repo_dir=repo_dir)
if target in ["ova", "azure", "ami", "gce", "rpi"]:
assert target != "rpi" or arch == "aarch64", "arch must be aarch64 to build RPi image"
poi.create_config(target)
if config is not None:
poi.create_config_from_custom(target, config)
raw_image_file = poi.image_filename(target, "img")
raw_image_path = poi.create_raw_image(target, raw_image_file)
assert os.path.isfile(raw_image_path)
if target == "ova":
poi.create_ova(raw_image_file)
elif target == "azure":
poi.create_azure(raw_image_file)
elif target == "ami":
poi.create_ami(raw_image_file, subdir="ami")
elif target == "gce":
poi.create_gce(raw_image_file, subdir="gce")
elif target == "rpi":
poi.create_rpi(raw_image_file, subdir="rpi")
elif target in ["iso", "debug-iso", "source-iso"]:
poi.create_config("iso")
# type is None indicates the 'normal' full ISO
# otherwise it's 'special' (source or debug)
type = None
if target.startswith("debug-"):
type = "debug"
elif target.startswith("source-"):
type = "source"
iso_file = poi.full_iso_name(type=type)
poi.create_rpm_list(iso_file, type=type)
if type is None:
poi.create_full_iso(iso_file)
else:
poi.create_full_special_iso(iso_file, type=type)
elif target in ["basic-iso", "minimal-iso", "rt-iso"]:
# strip "-iso" from the end:
type = target[:-4]
poi.create_config(target, subtype=type)
iso_file = poi.iso_name(type=type)
poi.create_custom_iso(iso_file, type=type)
else:
assert False, f"unknown target {target}"
if __name__ == '__main__':
main()