b867612961
Note that the cmd is and will always be licensed under GPL, NOT LGPL, like the rest of the program.
711 lines
21 KiB
Go
711 lines
21 KiB
Go
package lib
|
|
|
|
import (
|
|
"archive/tar"
|
|
"bytes"
|
|
"crypto/ed25519"
|
|
"encoding/binary"
|
|
"encoding/json"
|
|
"errors"
|
|
"io"
|
|
"os"
|
|
"os/exec"
|
|
"path/filepath"
|
|
"strconv"
|
|
"strings"
|
|
|
|
"github.com/Masterminds/semver"
|
|
"github.com/cespare/xxhash/v2"
|
|
"github.com/klauspost/compress/zstd"
|
|
)
|
|
|
|
// SpecialFiles is a struct that contains the special files that are not to be deleted or replaced
|
|
type SpecialFiles struct {
|
|
NoDelete []string
|
|
NoReplace []string
|
|
}
|
|
|
|
// Metadata is a struct that contains the metadata of the package
|
|
type Metadata struct {
|
|
Name string
|
|
Description string
|
|
LongDescription string
|
|
Version semver.Version
|
|
Author string
|
|
License string
|
|
Architecture string
|
|
Dependencies []string
|
|
SpecialFiles SpecialFiles
|
|
}
|
|
|
|
// Build is a struct that contains the build configuration of the package
|
|
type Build struct {
|
|
Type string
|
|
Dependencies []string
|
|
Steps []string
|
|
TargetRoot string
|
|
HooksFolder PotentiallyNullString
|
|
FilesFolder PotentiallyNullString
|
|
}
|
|
|
|
// PotentiallyNullString is a struct that contains a string that may be null
|
|
type PotentiallyNullString struct {
|
|
Value string
|
|
Null bool
|
|
}
|
|
|
|
// Config is a struct that contains the configuration of the package
|
|
type Config struct {
|
|
Metadata Metadata
|
|
Build Build
|
|
}
|
|
|
|
// Log is a struct that contains the log information
|
|
type Log struct {
|
|
Level string
|
|
Content string
|
|
Prompt bool
|
|
}
|
|
|
|
// Logger is a struct that contains the functions and properties of the logger
|
|
type Logger struct {
|
|
LogFunc func(Log) string
|
|
PromptSupported bool
|
|
}
|
|
|
|
var ErrEternityJsonOpenError = errors.New("error opening eternity.json")
|
|
var ErrEternityJsonReadError = errors.New("error reading eternity.json")
|
|
var ErrEternityJsonParseError = errors.New("error parsing eternity.json")
|
|
var ErrEternityJsonMapError = errors.New("error mapping eternity.json")
|
|
|
|
// interfaceToStringSlice converts an interface slice to a string slice
|
|
func interfaceToStringSlice(interfaceSlice []interface{}, interfaceName string) ([]string, error) {
|
|
// Yes, it's meant to be empty and not nil: JSON arrays are empty, not nil
|
|
//goland:noinspection GoPreferNilSlice
|
|
stringSlice := []string{}
|
|
for _, interfaceValue := range interfaceSlice {
|
|
stringValue, ok := interfaceValue.(string)
|
|
if !ok {
|
|
return nil, errors.New(interfaceName + " are not strings")
|
|
}
|
|
stringSlice = append(stringSlice, stringValue)
|
|
}
|
|
|
|
return stringSlice, nil
|
|
}
|
|
|
|
// ParseConfig parses the eternity.json file
|
|
func ParseConfig(path string, logger *Logger) (Config, error, error) {
|
|
// Open eternity.json
|
|
logger.LogFunc(Log{
|
|
Level: "INFO",
|
|
Content: "Parsing eternity.json",
|
|
Prompt: false,
|
|
})
|
|
file, err := os.Open(path)
|
|
if err != nil {
|
|
return Config{}, err, ErrEternityJsonOpenError
|
|
}
|
|
|
|
// Convert the file to a byte buffer
|
|
var fileBytes bytes.Buffer
|
|
_, err = io.Copy(&fileBytes, file)
|
|
if err != nil {
|
|
return Config{}, err, ErrEternityJsonReadError
|
|
}
|
|
|
|
// Parse the file as JSON
|
|
var config map[string]interface{}
|
|
err = json.Unmarshal(fileBytes.Bytes(), &config)
|
|
if err != nil {
|
|
return Config{}, err, ErrEternityJsonParseError
|
|
}
|
|
|
|
// Map SpecialFiles
|
|
var parsedSpecialFiles SpecialFiles
|
|
specialFiles, ok := config["specialFiles"].(map[string]interface{})
|
|
if !ok {
|
|
return Config{}, errors.New("specialFiles is not an object"), ErrEternityJsonMapError
|
|
}
|
|
noDelete, ok := specialFiles["noDelete"].([]interface{})
|
|
if !ok {
|
|
return Config{}, errors.New("noDelete is not an array"), ErrEternityJsonMapError
|
|
}
|
|
parsedSpecialFiles.NoDelete, err = interfaceToStringSlice(noDelete, "noDelete")
|
|
if err != nil {
|
|
return Config{}, err, ErrEternityJsonMapError
|
|
}
|
|
noReplace, ok := specialFiles["noReplace"].([]interface{})
|
|
if !ok {
|
|
return Config{}, errors.New("noReplace is not an array"), ErrEternityJsonMapError
|
|
}
|
|
parsedSpecialFiles.NoReplace, err = interfaceToStringSlice(noReplace, "noReplace")
|
|
if err != nil {
|
|
return Config{}, err, ErrEternityJsonMapError
|
|
}
|
|
|
|
// Declare the parsedMetadata object
|
|
var parsedMetadata Metadata
|
|
|
|
// Append parsedSpecialFiles to parsedMetadata
|
|
parsedMetadata.SpecialFiles = parsedSpecialFiles
|
|
|
|
// Map the metadata
|
|
parsedMetadata.Name, ok = config["name"].(string)
|
|
if !ok {
|
|
return Config{}, errors.New("name is not a string"), ErrEternityJsonMapError
|
|
}
|
|
parsedMetadata.Description, ok = config["desc"].(string)
|
|
if !ok {
|
|
return Config{}, errors.New("description is not a string"), ErrEternityJsonMapError
|
|
}
|
|
parsedMetadata.LongDescription, ok = config["longDesc"].(string)
|
|
if !ok {
|
|
return Config{}, errors.New("longDesc is not a string"), ErrEternityJsonMapError
|
|
}
|
|
versionString, ok := config["version"].(string)
|
|
if !ok {
|
|
return Config{}, errors.New("version is not a string"), ErrEternityJsonMapError
|
|
}
|
|
versionPointer, err := semver.NewVersion(versionString)
|
|
if err != nil {
|
|
return Config{}, err, ErrEternityJsonMapError
|
|
}
|
|
parsedMetadata.Version = *versionPointer
|
|
parsedMetadata.Author, ok = config["author"].(string)
|
|
if !ok {
|
|
return Config{}, errors.New("author is not a string"), ErrEternityJsonMapError
|
|
}
|
|
parsedMetadata.License, ok = config["license"].(string)
|
|
if !ok {
|
|
return Config{}, errors.New("license is not a string"), ErrEternityJsonMapError
|
|
}
|
|
parsedMetadata.Architecture, ok = config["arch"].(string)
|
|
if !ok {
|
|
return Config{}, errors.New("arch is not a string"), ErrEternityJsonMapError
|
|
}
|
|
dependencies, ok := config["deps"].([]interface{})
|
|
if !ok {
|
|
return Config{}, errors.New("deps is not an array"), ErrEternityJsonMapError
|
|
}
|
|
parsedMetadata.Dependencies, err = interfaceToStringSlice(dependencies, "dependencies")
|
|
if err != nil {
|
|
return Config{}, err, ErrEternityJsonMapError
|
|
}
|
|
|
|
// Map build
|
|
var parsedBuild Build
|
|
build, ok := config["build"].(map[string]interface{})
|
|
if !ok {
|
|
return Config{}, errors.New("build is not an object"), ErrEternityJsonMapError
|
|
}
|
|
parsedBuild.Type, ok = build["type"].(string)
|
|
if !ok {
|
|
return Config{}, errors.New("type is not a string"), ErrEternityJsonMapError
|
|
}
|
|
buildDependencies, ok := build["deps"].([]interface{})
|
|
if !ok {
|
|
return Config{}, errors.New("deps is not an array"), ErrEternityJsonMapError
|
|
}
|
|
parsedBuild.Dependencies, err = interfaceToStringSlice(buildDependencies, "deps")
|
|
if err != nil {
|
|
return Config{}, err, ErrEternityJsonMapError
|
|
}
|
|
steps, ok := build["steps"].([]interface{})
|
|
if !ok {
|
|
return Config{}, errors.New("steps is not an array"), ErrEternityJsonMapError
|
|
}
|
|
parsedBuild.Steps, err = interfaceToStringSlice(steps, "steps")
|
|
if err != nil {
|
|
return Config{}, err, ErrEternityJsonMapError
|
|
}
|
|
parsedBuild.TargetRoot, ok = build["root"].(string)
|
|
if !ok {
|
|
return Config{}, errors.New("root is not a string"), ErrEternityJsonMapError
|
|
}
|
|
hooksFolder, ok := build["hooks"].(string)
|
|
if !ok {
|
|
parsedBuild.HooksFolder = PotentiallyNullString{Null: true}
|
|
} else {
|
|
parsedBuild.HooksFolder = PotentiallyNullString{Null: false, Value: hooksFolder}
|
|
}
|
|
filesFolder, ok := build["files"].(string)
|
|
if !ok {
|
|
parsedBuild.FilesFolder = PotentiallyNullString{Null: true}
|
|
} else {
|
|
parsedBuild.FilesFolder = PotentiallyNullString{Null: false, Value: filesFolder}
|
|
}
|
|
|
|
// Create the final Config object
|
|
parsedConfig := Config{
|
|
Metadata: parsedMetadata,
|
|
Build: parsedBuild,
|
|
}
|
|
|
|
// Return the final Config object
|
|
return parsedConfig, nil, nil
|
|
}
|
|
|
|
var ErrBuildEPKTemporaryDirectoryError = errors.New("error creating temporary directory")
|
|
var ErrBuildEPKCreateHooksError = errors.New("error creating hooks directory")
|
|
var ErrBuildEPKCopyHooksError = errors.New("error copying hooks")
|
|
var ErrBuildEPKChrootError = errors.New("chroot builds are not supported yet")
|
|
var ErrBuildEPKUnrestrictedError = errors.New("unrestricted builds are not supported yet")
|
|
var ErrBuildEPKBuildShError = errors.New("error creating build.sh")
|
|
var ErrBuildEPKWritingBuildShError = errors.New("error writing to build.sh")
|
|
var ErrBuildEPKTargetRootError = errors.New("error creating target root")
|
|
var ErrBuildEPKExecutingBuildShError = errors.New("error executing build.sh")
|
|
var ErrBuildEPKCountingFilesError = errors.New("error counting files")
|
|
|
|
// BuildEPK builds the EPK package into a build directory
|
|
func BuildEPK(projectDir string, inMemory bool, buildConfig Build, logger *Logger) (string, error, error) {
|
|
var tempDir string
|
|
|
|
switch buildConfig.Type {
|
|
case "chroot":
|
|
return "", nil, ErrBuildEPKChrootError
|
|
case "unrestricted":
|
|
return "", nil, ErrBuildEPKUnrestrictedError
|
|
case "host":
|
|
// Set up the temp dir
|
|
var err error
|
|
if inMemory {
|
|
// Builds in /tmp. This means that the program must fit in RAM. Luckily, most programs do.
|
|
// If you're building a large program, you might want to consider using a disk build.
|
|
tempDir, err = os.MkdirTemp("/tmp", "eternity-build-")
|
|
} else {
|
|
// Builds on disk. This is slower but if your program can't fit in RAM, you're out of luck.
|
|
// If your program can fit in RAM, you might want to consider using an in-memory build.
|
|
tempDir, err = os.MkdirTemp(projectDir, "eternity-build-")
|
|
}
|
|
if err != nil {
|
|
return tempDir, err, ErrBuildEPKTemporaryDirectoryError
|
|
}
|
|
|
|
// Copy the hooks folder
|
|
if buildConfig.HooksFolder.Null != true {
|
|
hooksDir := filepath.Join(projectDir, buildConfig.HooksFolder.Value)
|
|
targetHooksDir := filepath.Join(tempDir, buildConfig.HooksFolder.Value)
|
|
logger.LogFunc(Log{
|
|
Level: "INFO", Content: "Copying hooks from " + hooksDir + " to " + targetHooksDir, Prompt: false,
|
|
})
|
|
|
|
err = os.MkdirAll(targetHooksDir, 0755)
|
|
if err != nil {
|
|
return tempDir, err, ErrBuildEPKCreateHooksError
|
|
}
|
|
|
|
err = os.CopyFS(targetHooksDir, os.DirFS(hooksDir))
|
|
if err != nil {
|
|
return tempDir, err, ErrBuildEPKCopyHooksError
|
|
}
|
|
}
|
|
|
|
// Generate the shell script
|
|
logger.LogFunc(Log{
|
|
Level: "INFO", Content: "Generating shell script", Prompt: false,
|
|
})
|
|
|
|
// Create the shell script
|
|
shellScript := "#!/bin/sh\n"
|
|
for _, step := range buildConfig.Steps {
|
|
shellScript += step + "\n"
|
|
}
|
|
|
|
file, err := os.OpenFile(tempDir+"/build.sh", os.O_CREATE|os.O_RDWR, 0755)
|
|
if err != nil {
|
|
return tempDir, err, ErrBuildEPKBuildShError
|
|
}
|
|
|
|
_, err = file.WriteString(shellScript)
|
|
if err != nil {
|
|
return tempDir, err, ErrBuildEPKWritingBuildShError
|
|
}
|
|
|
|
// Set up the target root
|
|
targetRoot := filepath.Join(tempDir, buildConfig.TargetRoot)
|
|
err = os.MkdirAll(targetRoot, 0755)
|
|
if err != nil {
|
|
return tempDir, err, ErrBuildEPKTargetRootError
|
|
}
|
|
|
|
// Execute the shell script in BWrap
|
|
logger.LogFunc(Log{
|
|
Level: "INFO", Content: "Starting up container environment (replicating host files)", Prompt: false,
|
|
})
|
|
|
|
// Allow me to explain why it's in BWrap. It's very difficult to cut off internet access without root, so I just
|
|
// copy-pasted most of the host files into the container, then disabled networking. This also allows us to use
|
|
// fakeroot and minimises the blast radius of a malicious package (hopefully) by not allowing the home directory
|
|
// or any files owned by root to be viewed or modified (too bad if you've got sensitive data in /var or /etc :P)
|
|
arguments := []string{
|
|
"--unshare-net",
|
|
"--bind", "/bin", "/bin",
|
|
"--bind", "/lib", "/lib",
|
|
"--bind", "/lib64", "/lib64",
|
|
"--bind", "/usr", "/usr",
|
|
"--bind", "/etc", "/etc",
|
|
"--bind", "/var", "/var",
|
|
"--bind", "/sys", "/sys",
|
|
"--bind", "/opt", "/opt",
|
|
"--bind", targetRoot, filepath.Join("/", buildConfig.TargetRoot),
|
|
"--bind", tempDir, "/eternity",
|
|
"--dev", "/dev",
|
|
"--tmpfs", "/run",
|
|
"--tmpfs", "/tmp",
|
|
"--proc", "/proc",
|
|
"/usr/bin/fakeroot-tcp", "--",
|
|
"/bin/sh", "/eternity/build.sh",
|
|
}
|
|
|
|
if buildConfig.FilesFolder.Null != true {
|
|
arguments = arguments[:len(arguments)-4]
|
|
arguments = append(
|
|
arguments, "--bind", filepath.Join(projectDir, buildConfig.FilesFolder.Value), filepath.Join("/", buildConfig.FilesFolder.Value),
|
|
"/usr/bin/fakeroot-tcp", "--",
|
|
"/bin/sh", "/eternity/build.sh",
|
|
)
|
|
}
|
|
|
|
err = exec.Command("bwrap", arguments...).Run()
|
|
if err != nil {
|
|
return tempDir, err, ErrBuildEPKExecutingBuildShError
|
|
}
|
|
|
|
// Hopefully, the build was successful. Let's give the user a file count.
|
|
var fileCount int
|
|
// We start at -1 because the root directory is not counted
|
|
dirCount := -1
|
|
err = filepath.Walk(targetRoot, func(path string, info os.FileInfo, err error) error {
|
|
if info.IsDir() {
|
|
dirCount++
|
|
} else {
|
|
fileCount++
|
|
}
|
|
return nil
|
|
})
|
|
if err != nil {
|
|
return tempDir, err, ErrBuildEPKCountingFilesError
|
|
}
|
|
|
|
logger.LogFunc(Log{
|
|
Level: "INFO", Content: "Build successful. " + strconv.Itoa(fileCount) + " files and " + strconv.Itoa(dirCount) + " directories created.", Prompt: false,
|
|
})
|
|
}
|
|
|
|
return tempDir, nil, nil
|
|
}
|
|
|
|
// CreateTar creates a tar archive from a directory
|
|
func CreateTar(targetDir string, output io.Writer) error {
|
|
tarWriter := tar.NewWriter(output)
|
|
err := filepath.Walk(targetDir, func(path string, info os.FileInfo, err error) error {
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
if !info.Mode().IsRegular() {
|
|
return nil
|
|
}
|
|
|
|
header, err := tar.FileInfoHeader(info, path)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
header.Name = strings.TrimPrefix(strings.Replace(path, targetDir, "", -1), string(filepath.Separator))
|
|
|
|
err = tarWriter.WriteHeader(header)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
file, err := os.Open(path)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
_, err = io.Copy(tarWriter, file)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
err = file.Close()
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
return nil
|
|
})
|
|
|
|
if err != nil {
|
|
return err
|
|
} else {
|
|
err = tarWriter.Close()
|
|
if err != nil {
|
|
return err
|
|
} else {
|
|
return nil
|
|
}
|
|
}
|
|
}
|
|
|
|
// These errors are in the wrong order due to this function being rewritten.
|
|
// Oh well, not like it matters.
|
|
|
|
// ConstPackageEPKMagicNumber is the magic number for an EPK file: "epk" in ASCII / UTF-8
|
|
var ConstPackageEPKMagicNumber = []byte{0x65, 0x70, 0x6B}
|
|
|
|
// ConstPackageEPKBigEndian is the letter "b" in ASCII / UTF-8
|
|
var ConstPackageEPKBigEndian = []byte{0x62}
|
|
|
|
// ConstPackageEPKLittleEndian is the letter "l" in ASCII / UTF-8
|
|
var ConstPackageEPKLittleEndian = []byte{0x6C}
|
|
|
|
// ConstPackageEPKInitialByteOffset is the initial byte offset for an EPK file until we arrive at the signature. 12 = 3 + 1 + 8: 3 for the magic number, 1 for the endian, and 8 for the tar offset
|
|
var ConstPackageEPKInitialByteOffset = 12
|
|
|
|
// SignatureLength is the length of the signature
|
|
var SignatureLength = 64
|
|
|
|
// PublicKeyLength is the length of the public key
|
|
var PublicKeyLength = 32
|
|
|
|
// ConstPackageEPKMetadataOffset is the offset of the metadata in the EPK file
|
|
var ConstPackageEPKMetadataOffset = 108
|
|
|
|
// All these errors are out of order once I rewrote this to stream instead of using a buffer.
|
|
// Oh well, not like it matters.
|
|
|
|
var ErrPackageEPKFailedToWriteHash = errors.New("error writing hash to EPK file")
|
|
var ErrPackageEPKFailedToSeek = errors.New("error seeking in EPK file")
|
|
var ErrPackageEPKCreateDistDirError = errors.New("error creating dist directory")
|
|
var ErrPackageEPKMoveToDistError = errors.New("error moving to dist directory")
|
|
var ErrPackageEPKTarError = errors.New("error creating tar")
|
|
var ErrPackageEPKJSONMarshal = errors.New("error marshalling JSON")
|
|
var ErrPackageEPKCreateCompressionWriterError = errors.New("error creating ZStandard writer")
|
|
var ErrPackageEPKCompressCloseError = errors.New("error closing EPK ZStandard writer")
|
|
var ErrPackageEPKCannotOpenFile = errors.New("error opening EPK file for writing")
|
|
var ErrPackageEPKCannotWriteFile = errors.New("error writing to EPK file")
|
|
var ErrPackageEPKCannotCloseFile = errors.New("error closing EPK file")
|
|
|
|
// PackageEPK packages the EPK work directory into an EPK file
|
|
func PackageEPK(metaData Metadata, build Build, tempDir string, output string, privateKey ed25519.PrivateKey, logger *Logger) (error, error) {
|
|
// Create the EPK
|
|
logger.LogFunc(Log{
|
|
Level: "INFO", Content: "Packaging EPK", Prompt: false,
|
|
})
|
|
|
|
// Ok. Let's construct targetDir, then hooksDir
|
|
targetDir := filepath.Join(tempDir, build.TargetRoot)
|
|
distDir := tempDir + "/dist"
|
|
|
|
err := os.MkdirAll(distDir, 0755)
|
|
if err != nil {
|
|
return err, ErrPackageEPKCreateDistDirError
|
|
}
|
|
|
|
err = os.Rename(targetDir, distDir+"/root")
|
|
if err != nil {
|
|
return err, ErrPackageEPKMoveToDistError
|
|
}
|
|
|
|
if build.HooksFolder.Null != true {
|
|
hooksDir := filepath.Join(tempDir, build.HooksFolder.Value)
|
|
err = os.Rename(hooksDir, distDir+"/hooks")
|
|
if err != nil {
|
|
return err, ErrPackageEPKMoveToDistError
|
|
}
|
|
}
|
|
|
|
// Map the metadata to a JSON string
|
|
logger.LogFunc(Log{
|
|
Level: "INFO", Content: "Calculating package metadata", Prompt: false,
|
|
})
|
|
|
|
dataTemplate := map[string]interface{}{
|
|
"name": metaData.Name,
|
|
"author": metaData.Author,
|
|
"version": metaData.Version.String(),
|
|
"desc": metaData.Description,
|
|
"longDesc": metaData.LongDescription,
|
|
"license": metaData.License,
|
|
"arch": metaData.Architecture,
|
|
"deps": metaData.Dependencies,
|
|
"specialFiles": map[string][]string{
|
|
"noDelete": metaData.SpecialFiles.NoDelete,
|
|
"noReplace": metaData.SpecialFiles.NoReplace,
|
|
},
|
|
}
|
|
|
|
// Make the data template into a JSON string
|
|
dataTemplateBytes, err := json.Marshal(dataTemplate)
|
|
if err != nil {
|
|
return err, ErrPackageEPKJSONMarshal
|
|
}
|
|
|
|
// Calculate the offsets
|
|
logger.LogFunc(Log{
|
|
Level: "INFO", Content: "Calculating binary offsets", Prompt: false,
|
|
})
|
|
|
|
// Calculate the length of the data template
|
|
var dataTemplateLength int64
|
|
for range dataTemplateBytes {
|
|
dataTemplateLength++
|
|
}
|
|
|
|
// Calculate the tar offset
|
|
var tarOffset int64
|
|
tarOffset = int64(ConstPackageEPKMetadataOffset) + dataTemplateLength
|
|
|
|
logger.LogFunc(Log{
|
|
Level: "INFO", Content: "Calculating binary properties", Prompt: false,
|
|
})
|
|
|
|
// We need to determine the endianness of the architecture so that it's optimal for the target system
|
|
// We assume little-endian by default because most architectures are little-endian (why would you use big-endian?)
|
|
littleEndian := true
|
|
switch metaData.Architecture {
|
|
case "ppc64":
|
|
littleEndian = false
|
|
case "ppc":
|
|
littleEndian = false
|
|
case "mips64":
|
|
littleEndian = false
|
|
case "mips":
|
|
littleEndian = false
|
|
case "s390":
|
|
littleEndian = false
|
|
case "s390x":
|
|
littleEndian = false
|
|
case "sparc64":
|
|
littleEndian = false
|
|
case "sparc":
|
|
littleEndian = false
|
|
default:
|
|
littleEndian = true
|
|
}
|
|
|
|
// Create the byte arrays for the tar offset
|
|
tarOffsetBytes := make([]byte, 8)
|
|
|
|
// Write as much as we can to the file
|
|
logger.LogFunc(Log{
|
|
Level: "INFO", Content: "Writing to file", Prompt: false,
|
|
})
|
|
|
|
// Open the file buffer
|
|
file, err := os.OpenFile(output, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0644)
|
|
if err != nil {
|
|
return err, ErrPackageEPKCannotOpenFile
|
|
}
|
|
|
|
// Write the magic number
|
|
_, err = file.Write(ConstPackageEPKMagicNumber)
|
|
if err != nil {
|
|
return err, ErrPackageEPKCannotWriteFile
|
|
}
|
|
|
|
// Write the endianness and correct the offsets based on the endianness
|
|
// I know it's wasteful to convert an int64 to an uint64, but binary doesn't support int64 with custom Endian-ness
|
|
// and file.Seek doesn't
|
|
// support uint64.
|
|
if littleEndian {
|
|
binary.LittleEndian.PutUint64(tarOffsetBytes, uint64(tarOffset))
|
|
_, err = file.Write(ConstPackageEPKLittleEndian)
|
|
} else {
|
|
binary.BigEndian.PutUint64(tarOffsetBytes, uint64(tarOffset))
|
|
_, err = file.Write(ConstPackageEPKBigEndian)
|
|
}
|
|
if err != nil {
|
|
return err, ErrPackageEPKCannotWriteFile
|
|
}
|
|
|
|
_, err = file.Write(tarOffsetBytes)
|
|
if err != nil {
|
|
return err, ErrPackageEPKCannotWriteFile
|
|
}
|
|
_, err = file.WriteAt(dataTemplateBytes, int64(ConstPackageEPKMetadataOffset))
|
|
if err != nil {
|
|
return err, ErrPackageEPKCannotWriteFile
|
|
}
|
|
|
|
// Create the tar archive
|
|
logger.LogFunc(Log{
|
|
Level: "INFO", Content: "Creating tar", Prompt: false,
|
|
})
|
|
|
|
// Move the file pointer to the tar offset so that we can write the tar archive
|
|
seek, err := file.Seek(tarOffset, io.SeekStart)
|
|
if err != nil {
|
|
return err, ErrPackageEPKFailedToSeek
|
|
}
|
|
if seek != tarOffset {
|
|
return err, ErrPackageEPKFailedToSeek
|
|
}
|
|
|
|
// Create the hash writer
|
|
sha512Hash := xxhash.New()
|
|
_, err = sha512Hash.Write(dataTemplateBytes)
|
|
if err != nil {
|
|
return err, nil
|
|
}
|
|
|
|
// Create a multi-writer so we can write to the file and the hash at the same time
|
|
multiWriter := io.MultiWriter(file, sha512Hash)
|
|
|
|
// Create the ZStandard writer
|
|
writer, err := zstd.NewWriter(multiWriter, zstd.WithEncoderLevel(zstd.SpeedDefault))
|
|
if err != nil {
|
|
return err, ErrPackageEPKCreateCompressionWriterError
|
|
}
|
|
|
|
// We start writing the tar archive
|
|
err = CreateTar(distDir, writer)
|
|
if err != nil {
|
|
return err, ErrPackageEPKTarError
|
|
}
|
|
|
|
// Close the ZStandard writer
|
|
err = writer.Close()
|
|
if err != nil {
|
|
return err, ErrPackageEPKCompressCloseError
|
|
}
|
|
|
|
// Great, let's sign the EPK
|
|
logger.LogFunc(Log{
|
|
Level: "INFO", Content: "Signing EPK", Prompt: false,
|
|
})
|
|
|
|
// Sign the hash
|
|
signature := ed25519.Sign(privateKey, sha512Hash.Sum(nil))
|
|
publicKey := privateKey.Public().(ed25519.PublicKey)
|
|
|
|
// Write the signature and public key to the file
|
|
// Reverse the pointer back to the start of the file so our offsets are correct
|
|
_, err = file.Seek(0, io.SeekStart)
|
|
if err != nil {
|
|
return err, ErrPackageEPKFailedToSeek
|
|
}
|
|
|
|
// Write the signature
|
|
_, err = file.WriteAt(signature, int64(ConstPackageEPKInitialByteOffset))
|
|
if err != nil {
|
|
return err, ErrPackageEPKCannotWriteFile
|
|
}
|
|
|
|
// Write the public key
|
|
_, err = file.WriteAt(publicKey, int64(ConstPackageEPKInitialByteOffset)+int64(SignatureLength))
|
|
if err != nil {
|
|
return err, ErrPackageEPKCannotWriteFile
|
|
}
|
|
|
|
// Close the file
|
|
err = file.Close()
|
|
if err != nil {
|
|
return err, ErrPackageEPKCannotCloseFile
|
|
}
|
|
|
|
return nil, nil
|
|
}
|