997 lines
30 KiB
Go
997 lines
30 KiB
Go
package lib
|
|
|
|
import (
|
|
"archive/tar"
|
|
"bytes"
|
|
"crypto/ed25519"
|
|
"encoding/binary"
|
|
"encoding/json"
|
|
"errors"
|
|
"io"
|
|
"math/big"
|
|
"os"
|
|
"os/exec"
|
|
"path/filepath"
|
|
"strconv"
|
|
"strings"
|
|
|
|
"github.com/Masterminds/semver"
|
|
"github.com/cespare/xxhash/v2"
|
|
"github.com/klauspost/compress/zstd"
|
|
)
|
|
|
|
// SpecialFiles is a struct that contains the special files that are not to be deleted or replaced
|
|
type SpecialFiles struct {
|
|
NoDelete []string
|
|
NoReplace []string
|
|
}
|
|
|
|
// Metadata is a struct that contains the metadata of the package
|
|
type Metadata struct {
|
|
Name string
|
|
Description string
|
|
LongDescription string
|
|
Version semver.Version
|
|
Author string
|
|
License string
|
|
Architecture string
|
|
// The decompressed size may be larger than the int64 allocated for a compressed file
|
|
DecompressedSize big.Int
|
|
Dependencies []string
|
|
SpecialFiles SpecialFiles
|
|
}
|
|
|
|
// Build is a struct that contains the build configuration of the package
|
|
type Build struct {
|
|
Type string
|
|
Dependencies []string
|
|
Steps []string
|
|
TargetRoot string
|
|
HooksFolder PotentiallyNullString
|
|
FilesFolder PotentiallyNullString
|
|
}
|
|
|
|
// PotentiallyNullString is a struct that contains a string that may be null
|
|
type PotentiallyNullString struct {
|
|
Value string
|
|
Null bool
|
|
}
|
|
|
|
// Config is a struct that contains the configuration of the package
|
|
type Config struct {
|
|
Metadata Metadata
|
|
Build Build
|
|
}
|
|
|
|
// Log is a struct that contains the log information
|
|
type Log struct {
|
|
Level string
|
|
Content string
|
|
Prompt bool
|
|
}
|
|
|
|
// Logger is a struct that contains the functions and properties of the logger
|
|
type Logger struct {
|
|
LogFunc func(Log) string
|
|
PromptSupported bool
|
|
}
|
|
|
|
var ErrEternityJsonOpenError = errors.New("error opening eternity.json")
|
|
var ErrEternityJsonReadError = errors.New("error reading eternity.json")
|
|
var ErrEternityJsonParseError = errors.New("error parsing eternity.json")
|
|
var ErrEternityJsonMapError = errors.New("error mapping eternity.json")
|
|
|
|
// interfaceToStringSlice converts an interface slice to a string slice
|
|
func interfaceToStringSlice(interfaceSlice []interface{}, interfaceName string) ([]string, error) {
|
|
// Yes, it's meant to be empty and not nil: JSON arrays are empty, not nil
|
|
//goland:noinspection GoPreferNilSlice
|
|
stringSlice := []string{}
|
|
for _, interfaceValue := range interfaceSlice {
|
|
stringValue, ok := interfaceValue.(string)
|
|
if !ok {
|
|
return nil, errors.New(interfaceName + " are not strings")
|
|
}
|
|
stringSlice = append(stringSlice, stringValue)
|
|
}
|
|
|
|
return stringSlice, nil
|
|
}
|
|
|
|
// ParseConfig parses the eternity.json file
|
|
func ParseConfig(path string, logger *Logger) (Config, error, error) {
|
|
// Open eternity.json
|
|
logger.LogFunc(Log{
|
|
Level: "INFO",
|
|
Content: "Parsing eternity.json",
|
|
Prompt: false,
|
|
})
|
|
file, err := os.Open(path)
|
|
if err != nil {
|
|
return Config{}, err, ErrEternityJsonOpenError
|
|
}
|
|
|
|
// Convert the file to a byte buffer
|
|
var fileBytes bytes.Buffer
|
|
_, err = io.Copy(&fileBytes, file)
|
|
if err != nil {
|
|
return Config{}, err, ErrEternityJsonReadError
|
|
}
|
|
|
|
// Parse the file as JSON
|
|
var config map[string]interface{}
|
|
err = json.Unmarshal(fileBytes.Bytes(), &config)
|
|
if err != nil {
|
|
return Config{}, err, ErrEternityJsonParseError
|
|
}
|
|
|
|
// Map SpecialFiles
|
|
var parsedSpecialFiles SpecialFiles
|
|
specialFiles, ok := config["specialFiles"].(map[string]interface{})
|
|
if !ok {
|
|
return Config{}, errors.New("specialFiles is not an object"), ErrEternityJsonMapError
|
|
}
|
|
noDelete, ok := specialFiles["noDelete"].([]interface{})
|
|
if !ok {
|
|
return Config{}, errors.New("noDelete is not an array"), ErrEternityJsonMapError
|
|
}
|
|
parsedSpecialFiles.NoDelete, err = interfaceToStringSlice(noDelete, "noDelete")
|
|
if err != nil {
|
|
return Config{}, err, ErrEternityJsonMapError
|
|
}
|
|
noReplace, ok := specialFiles["noReplace"].([]interface{})
|
|
if !ok {
|
|
return Config{}, errors.New("noReplace is not an array"), ErrEternityJsonMapError
|
|
}
|
|
parsedSpecialFiles.NoReplace, err = interfaceToStringSlice(noReplace, "noReplace")
|
|
if err != nil {
|
|
return Config{}, err, ErrEternityJsonMapError
|
|
}
|
|
|
|
// Declare the parsedMetadata object
|
|
var parsedMetadata Metadata
|
|
|
|
// Append parsedSpecialFiles to parsedMetadata
|
|
parsedMetadata.SpecialFiles = parsedSpecialFiles
|
|
|
|
// Map the metadata
|
|
parsedMetadata.Name, ok = config["name"].(string)
|
|
if !ok {
|
|
return Config{}, errors.New("name is not a string"), ErrEternityJsonMapError
|
|
}
|
|
parsedMetadata.Description, ok = config["desc"].(string)
|
|
if !ok {
|
|
return Config{}, errors.New("description is not a string"), ErrEternityJsonMapError
|
|
}
|
|
parsedMetadata.LongDescription, ok = config["longDesc"].(string)
|
|
if !ok {
|
|
return Config{}, errors.New("longDesc is not a string"), ErrEternityJsonMapError
|
|
}
|
|
versionString, ok := config["version"].(string)
|
|
if !ok {
|
|
return Config{}, errors.New("version is not a string"), ErrEternityJsonMapError
|
|
}
|
|
versionPointer, err := semver.NewVersion(versionString)
|
|
if err != nil {
|
|
return Config{}, err, ErrEternityJsonMapError
|
|
}
|
|
parsedMetadata.Version = *versionPointer
|
|
parsedMetadata.Author, ok = config["author"].(string)
|
|
if !ok {
|
|
return Config{}, errors.New("author is not a string"), ErrEternityJsonMapError
|
|
}
|
|
parsedMetadata.License, ok = config["license"].(string)
|
|
if !ok {
|
|
return Config{}, errors.New("license is not a string"), ErrEternityJsonMapError
|
|
}
|
|
parsedMetadata.Architecture, ok = config["arch"].(string)
|
|
if !ok {
|
|
return Config{}, errors.New("arch is not a string"), ErrEternityJsonMapError
|
|
}
|
|
dependencies, ok := config["deps"].([]interface{})
|
|
if !ok {
|
|
return Config{}, errors.New("deps is not an array"), ErrEternityJsonMapError
|
|
}
|
|
parsedMetadata.Dependencies, err = interfaceToStringSlice(dependencies, "dependencies")
|
|
if err != nil {
|
|
return Config{}, err, ErrEternityJsonMapError
|
|
}
|
|
|
|
// Map build
|
|
var parsedBuild Build
|
|
build, ok := config["build"].(map[string]interface{})
|
|
if !ok {
|
|
return Config{}, errors.New("build is not an object"), ErrEternityJsonMapError
|
|
}
|
|
parsedBuild.Type, ok = build["type"].(string)
|
|
if !ok {
|
|
return Config{}, errors.New("type is not a string"), ErrEternityJsonMapError
|
|
}
|
|
buildDependencies, ok := build["deps"].([]interface{})
|
|
if !ok {
|
|
return Config{}, errors.New("deps is not an array"), ErrEternityJsonMapError
|
|
}
|
|
parsedBuild.Dependencies, err = interfaceToStringSlice(buildDependencies, "deps")
|
|
if err != nil {
|
|
return Config{}, err, ErrEternityJsonMapError
|
|
}
|
|
steps, ok := build["steps"].([]interface{})
|
|
if !ok {
|
|
return Config{}, errors.New("steps is not an array"), ErrEternityJsonMapError
|
|
}
|
|
parsedBuild.Steps, err = interfaceToStringSlice(steps, "steps")
|
|
if err != nil {
|
|
return Config{}, err, ErrEternityJsonMapError
|
|
}
|
|
parsedBuild.TargetRoot, ok = build["root"].(string)
|
|
if !ok {
|
|
return Config{}, errors.New("root is not a string"), ErrEternityJsonMapError
|
|
}
|
|
hooksFolder, ok := build["hooks"].(string)
|
|
if !ok {
|
|
parsedBuild.HooksFolder = PotentiallyNullString{Null: true}
|
|
} else {
|
|
parsedBuild.HooksFolder = PotentiallyNullString{Null: false, Value: hooksFolder}
|
|
}
|
|
filesFolder, ok := build["files"].(string)
|
|
if !ok {
|
|
parsedBuild.FilesFolder = PotentiallyNullString{Null: true}
|
|
} else {
|
|
parsedBuild.FilesFolder = PotentiallyNullString{Null: false, Value: filesFolder}
|
|
}
|
|
|
|
// Create the final Config object
|
|
parsedConfig := Config{
|
|
Metadata: parsedMetadata,
|
|
Build: parsedBuild,
|
|
}
|
|
|
|
// Return the final Config object
|
|
return parsedConfig, nil, nil
|
|
}
|
|
|
|
var ErrBuildEPKTemporaryDirectoryError = errors.New("error creating temporary directory")
|
|
var ErrBuildEPKCreateHooksError = errors.New("error creating hooks directory")
|
|
var ErrBuildEPKCopyHooksError = errors.New("error copying hooks")
|
|
var ErrBuildEPKChrootError = errors.New("chroot builds are not supported yet")
|
|
var ErrBuildEPKUnrestrictedError = errors.New("unrestricted builds are not supported yet")
|
|
var ErrBuildEPKBuildShError = errors.New("error creating build.sh")
|
|
var ErrBuildEPKWritingBuildShError = errors.New("error writing to build.sh")
|
|
var ErrBuildEPKTargetRootError = errors.New("error creating target root")
|
|
var ErrBuildEPKExecutingBuildShError = errors.New("error executing build.sh")
|
|
var ErrBuildEPKCountingFilesError = errors.New("error counting files")
|
|
var ErrBuildEPKBadBuildType = errors.New("bad build type")
|
|
|
|
// BuildEPK builds the EPK package into a build directory
|
|
func BuildEPK(projectDir string, inMemory bool, buildConfig Build, logger *Logger) (big.Int, string, error, error) {
|
|
var tempDir string
|
|
|
|
switch buildConfig.Type {
|
|
case "chroot":
|
|
return *big.NewInt(0), "", nil, ErrBuildEPKChrootError
|
|
case "unrestricted":
|
|
return *big.NewInt(0), "", nil, ErrBuildEPKUnrestrictedError
|
|
case "host":
|
|
// Set up the temp dir
|
|
var err error
|
|
if inMemory {
|
|
// Builds in /tmp. This means that the program must fit in RAM. Luckily, most programs do.
|
|
// If you're building a large program, you might want to consider using a disk build.
|
|
tempDir, err = os.MkdirTemp("/tmp", "eternity-build-")
|
|
} else {
|
|
// Builds on disk. This is slower but if your program can't fit in RAM, you're out of luck.
|
|
// If your program can fit in RAM, you might want to consider using an in-memory build.
|
|
tempDir, err = os.MkdirTemp(projectDir, "eternity-build-")
|
|
}
|
|
if err != nil {
|
|
return *big.NewInt(0), tempDir, err, ErrBuildEPKTemporaryDirectoryError
|
|
}
|
|
|
|
// Copy the hooks folder
|
|
if buildConfig.HooksFolder.Null != true {
|
|
hooksDir := filepath.Join(projectDir, buildConfig.HooksFolder.Value)
|
|
targetHooksDir := filepath.Join(tempDir, buildConfig.HooksFolder.Value)
|
|
logger.LogFunc(Log{
|
|
Level: "INFO", Content: "Copying hooks from " + hooksDir + " to " + targetHooksDir, Prompt: false,
|
|
})
|
|
|
|
err = os.MkdirAll(targetHooksDir, 0755)
|
|
if err != nil {
|
|
return *big.NewInt(0), tempDir, err, ErrBuildEPKCreateHooksError
|
|
}
|
|
|
|
err = os.CopyFS(targetHooksDir, os.DirFS(hooksDir))
|
|
if err != nil {
|
|
return *big.NewInt(0), tempDir, err, ErrBuildEPKCopyHooksError
|
|
}
|
|
}
|
|
|
|
// Generate the shell script
|
|
logger.LogFunc(Log{
|
|
Level: "INFO", Content: "Generating shell script", Prompt: false,
|
|
})
|
|
|
|
// Create the shell script
|
|
shellScript := "#!/bin/sh\n"
|
|
for _, step := range buildConfig.Steps {
|
|
shellScript += step + "\n"
|
|
}
|
|
|
|
file, err := os.OpenFile(tempDir+"/build.sh", os.O_CREATE|os.O_RDWR, 0755)
|
|
if err != nil {
|
|
return *big.NewInt(0), tempDir, err, ErrBuildEPKBuildShError
|
|
}
|
|
|
|
_, err = file.WriteString(shellScript)
|
|
if err != nil {
|
|
return *big.NewInt(0), tempDir, err, ErrBuildEPKWritingBuildShError
|
|
}
|
|
|
|
// Set up the target root
|
|
targetRoot := filepath.Join(tempDir, buildConfig.TargetRoot)
|
|
err = os.MkdirAll(targetRoot, 0755)
|
|
if err != nil {
|
|
return *big.NewInt(0), tempDir, err, ErrBuildEPKTargetRootError
|
|
}
|
|
|
|
// Execute the shell script in BWrap
|
|
logger.LogFunc(Log{
|
|
Level: "INFO", Content: "Starting up container environment (replicating host files)", Prompt: false,
|
|
})
|
|
|
|
// Allow me to explain why it's in BWrap. It's very difficult to cut off internet access without root, so I just
|
|
// copy-pasted most of the host files into the container, then disabled networking. This also allows us to use
|
|
// fakeroot and minimises the blast radius of a malicious package (hopefully) by not allowing the home directory
|
|
// or any files owned by root to be viewed or modified (too bad if you've got sensitive data in /var or /etc :P)
|
|
arguments := []string{
|
|
"--unshare-net",
|
|
"--bind", "/bin", "/bin",
|
|
"--bind", "/lib", "/lib",
|
|
"--bind", "/lib64", "/lib64",
|
|
"--bind", "/usr", "/usr",
|
|
"--bind", "/etc", "/etc",
|
|
"--bind", "/var", "/var",
|
|
"--bind", "/sys", "/sys",
|
|
"--bind", "/opt", "/opt",
|
|
"--bind", targetRoot, filepath.Join("/", buildConfig.TargetRoot),
|
|
"--bind", tempDir, "/eternity",
|
|
"--dev", "/dev",
|
|
"--tmpfs", "/run",
|
|
"--tmpfs", "/tmp",
|
|
"--proc", "/proc",
|
|
"/usr/bin/fakeroot-tcp", "--",
|
|
"/bin/sh", "/eternity/build.sh",
|
|
}
|
|
|
|
if buildConfig.FilesFolder.Null != true {
|
|
arguments = arguments[:len(arguments)-4]
|
|
arguments = append(
|
|
arguments, "--bind", filepath.Join(projectDir, buildConfig.FilesFolder.Value), filepath.Join("/", buildConfig.FilesFolder.Value),
|
|
"/usr/bin/fakeroot-tcp", "--",
|
|
"/bin/sh", "/eternity/build.sh",
|
|
)
|
|
}
|
|
|
|
err = exec.Command("bwrap", arguments...).Run()
|
|
if err != nil {
|
|
return *big.NewInt(0), tempDir, err, ErrBuildEPKExecutingBuildShError
|
|
}
|
|
|
|
// Hopefully, the build was successful. Let's give the user a file and size count.
|
|
var fileCount int
|
|
var sizeCount big.Int
|
|
// We start at -1 because the root directory is not counted
|
|
dirCount := -1
|
|
err = filepath.Walk(targetRoot, func(path string, info os.FileInfo, err error) error {
|
|
if info.IsDir() {
|
|
dirCount++
|
|
} else {
|
|
fileCount++
|
|
}
|
|
// Both directories and files need to have their sizes counted
|
|
sizeCount.Add(&sizeCount, big.NewInt(info.Size()))
|
|
return nil
|
|
})
|
|
if err != nil {
|
|
return *big.NewInt(0), tempDir, err, ErrBuildEPKCountingFilesError
|
|
}
|
|
|
|
logger.LogFunc(Log{
|
|
Level: "INFO",
|
|
Content: "Build successful. " + strconv.Itoa(fileCount) + " files and " + strconv.Itoa(dirCount) +
|
|
" directories created," + " totalling " + sizeCount.String() + " bytes.",
|
|
Prompt: false,
|
|
})
|
|
|
|
return sizeCount, tempDir, nil, nil
|
|
default:
|
|
return *big.NewInt(0), "", errors.New(buildConfig.Type), ErrBuildEPKBadBuildType
|
|
}
|
|
}
|
|
|
|
// CreateTar creates a tar archive from a directory
|
|
func CreateTar(targetDir string, output io.Writer) error {
|
|
tarWriter := tar.NewWriter(output)
|
|
err := filepath.Walk(targetDir, func(path string, info os.FileInfo, err error) error {
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
if !info.Mode().IsRegular() {
|
|
return nil
|
|
}
|
|
|
|
header, err := tar.FileInfoHeader(info, path)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
header.Name = strings.TrimPrefix(strings.Replace(path, targetDir, "", -1), string(filepath.Separator))
|
|
|
|
err = tarWriter.WriteHeader(header)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
file, err := os.Open(path)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
_, err = io.Copy(tarWriter, file)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
err = file.Close()
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
return nil
|
|
})
|
|
|
|
if err != nil {
|
|
return err
|
|
} else {
|
|
err = tarWriter.Close()
|
|
if err != nil {
|
|
return err
|
|
} else {
|
|
return nil
|
|
}
|
|
}
|
|
}
|
|
|
|
// These errors are in the wrong order due to this function being rewritten.
|
|
// Oh well, not like it matters.
|
|
|
|
// ConstPackageEPKMagicNumber is the magic number for an EPK file: "epk" in ASCII / UTF-8
|
|
var ConstPackageEPKMagicNumber = []byte{0x65, 0x70, 0x6B}
|
|
|
|
// ConstPackageEPKBigEndian is the letter "b" in ASCII / UTF-8
|
|
var ConstPackageEPKBigEndian = []byte{0x62}
|
|
|
|
// ConstPackageEPKLittleEndian is the letter "l" in ASCII / UTF-8
|
|
var ConstPackageEPKLittleEndian = []byte{0x6C}
|
|
|
|
// ConstPackageEPKInitialByteOffset is the initial byte offset for an EPK file until we arrive at the signature. 12 = 3 + 1 + 8: 3 for the magic number, 1 for the endian, and 8 for the tar offset
|
|
var ConstPackageEPKInitialByteOffset int64 = 12
|
|
|
|
// ConstPackageEPKSignatureLength is the length of the signature
|
|
var ConstPackageEPKSignatureLength int64 = 64
|
|
|
|
// ConstPackageEPKPublicKeyLength is the length of the public key
|
|
var ConstPackageEPKPublicKeyLength int64 = 32
|
|
|
|
// ConstPackageEPKMetadataOffset is the offset of the metadata in the EPK file
|
|
var ConstPackageEPKMetadataOffset = 108
|
|
|
|
// All these errors are out of order once I rewrote this to stream instead of using a buffer.
|
|
// Oh well, not like it matters.
|
|
|
|
var ErrPackageEPKFailedToWriteHash = errors.New("error writing hash to EPK file")
|
|
var ErrPackageEPKFailedToSeek = errors.New("error seeking in EPK file")
|
|
var ErrPackageEPKCreateDistDirError = errors.New("error creating dist directory")
|
|
var ErrPackageEPKMoveToDistError = errors.New("error moving to dist directory")
|
|
var ErrPackageEPKTarError = errors.New("error creating tar")
|
|
var ErrPackageEPKJSONMarshal = errors.New("error marshalling JSON")
|
|
var ErrPackageEPKCreateCompressionWriterError = errors.New("error creating ZStandard writer")
|
|
var ErrPackageEPKCompressCloseError = errors.New("error closing EPK ZStandard writer")
|
|
var ErrPackageEPKCannotOpenFile = errors.New("error opening EPK file for writing")
|
|
var ErrPackageEPKCannotWriteFile = errors.New("error writing to EPK file")
|
|
var ErrPackageEPKCannotCloseFile = errors.New("error closing EPK file")
|
|
|
|
// PackageEPK packages the EPK work directory into an EPK file
|
|
func PackageEPK(metaData Metadata, build Build, tempDir string, output string, privateKey ed25519.PrivateKey, logger *Logger) (error, error) {
|
|
// Create the EPK
|
|
logger.LogFunc(Log{
|
|
Level: "INFO", Content: "Packaging EPK", Prompt: false,
|
|
})
|
|
|
|
// Ok. Let's construct targetDir, then hooksDir
|
|
targetDir := filepath.Join(tempDir, build.TargetRoot)
|
|
distDir := tempDir + "/dist"
|
|
|
|
err := os.MkdirAll(distDir, 0755)
|
|
if err != nil {
|
|
return err, ErrPackageEPKCreateDistDirError
|
|
}
|
|
|
|
err = os.Rename(targetDir, distDir+"/root")
|
|
if err != nil {
|
|
return err, ErrPackageEPKMoveToDistError
|
|
}
|
|
|
|
if build.HooksFolder.Null != true {
|
|
hooksDir := filepath.Join(tempDir, build.HooksFolder.Value)
|
|
err = os.Rename(hooksDir, distDir+"/hooks")
|
|
if err != nil {
|
|
return err, ErrPackageEPKMoveToDistError
|
|
}
|
|
}
|
|
|
|
// Map the metadata to a JSON string
|
|
logger.LogFunc(Log{
|
|
Level: "INFO", Content: "Calculating package metadata", Prompt: false,
|
|
})
|
|
|
|
dataTemplate := map[string]interface{}{
|
|
"name": metaData.Name,
|
|
"author": metaData.Author,
|
|
"version": metaData.Version.String(),
|
|
"desc": metaData.Description,
|
|
"longDesc": metaData.LongDescription,
|
|
"license": metaData.License,
|
|
"arch": metaData.Architecture,
|
|
"deps": metaData.Dependencies,
|
|
"specialFiles": map[string][]string{
|
|
"noDelete": metaData.SpecialFiles.NoDelete,
|
|
"noReplace": metaData.SpecialFiles.NoReplace,
|
|
},
|
|
"size": metaData.DecompressedSize.String(),
|
|
}
|
|
|
|
// Make the data template into a JSON string
|
|
dataTemplateBytes, err := json.Marshal(dataTemplate)
|
|
if err != nil {
|
|
return err, ErrPackageEPKJSONMarshal
|
|
}
|
|
|
|
// Calculate the offsets
|
|
logger.LogFunc(Log{
|
|
Level: "INFO", Content: "Calculating binary offsets", Prompt: false,
|
|
})
|
|
|
|
// Calculate the length of the data template
|
|
var dataTemplateLength int64
|
|
for range dataTemplateBytes {
|
|
dataTemplateLength++
|
|
}
|
|
|
|
// Calculate the tar offset
|
|
var tarOffset int64
|
|
tarOffset = int64(ConstPackageEPKMetadataOffset) + dataTemplateLength
|
|
|
|
logger.LogFunc(Log{
|
|
Level: "INFO", Content: "Calculating binary properties", Prompt: false,
|
|
})
|
|
|
|
// We need to determine the endianness of the architecture so that it's optimal for the target system
|
|
// We assume little-endian by default because most architectures are little-endian (why would you use big-endian?)
|
|
littleEndian := true
|
|
switch metaData.Architecture {
|
|
case "ppc64":
|
|
littleEndian = false
|
|
case "ppc":
|
|
littleEndian = false
|
|
case "mips64":
|
|
littleEndian = false
|
|
case "mips":
|
|
littleEndian = false
|
|
case "s390":
|
|
littleEndian = false
|
|
case "s390x":
|
|
littleEndian = false
|
|
case "sparc64":
|
|
littleEndian = false
|
|
case "sparc":
|
|
littleEndian = false
|
|
default:
|
|
littleEndian = true
|
|
}
|
|
|
|
// Create the byte arrays for the tar offset
|
|
tarOffsetBytes := make([]byte, 8)
|
|
|
|
// Write as much as we can to the file
|
|
logger.LogFunc(Log{
|
|
Level: "INFO", Content: "Writing to file", Prompt: false,
|
|
})
|
|
|
|
// Open the file buffer
|
|
file, err := os.OpenFile(output, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0644)
|
|
if err != nil {
|
|
return err, ErrPackageEPKCannotOpenFile
|
|
}
|
|
|
|
// Write the magic number
|
|
_, err = file.Write(ConstPackageEPKMagicNumber)
|
|
if err != nil {
|
|
return err, ErrPackageEPKCannotWriteFile
|
|
}
|
|
|
|
// Write the endianness and correct the offsets based on the endianness
|
|
// I know it's wasteful to convert an int64 to an uint64, but binary doesn't support int64 with custom Endian-ness
|
|
// and file.Seek doesn't
|
|
// support uint64.
|
|
if littleEndian {
|
|
binary.LittleEndian.PutUint64(tarOffsetBytes, uint64(tarOffset))
|
|
_, err = file.Write(ConstPackageEPKLittleEndian)
|
|
} else {
|
|
binary.BigEndian.PutUint64(tarOffsetBytes, uint64(tarOffset))
|
|
_, err = file.Write(ConstPackageEPKBigEndian)
|
|
}
|
|
if err != nil {
|
|
return err, ErrPackageEPKCannotWriteFile
|
|
}
|
|
|
|
_, err = file.Write(tarOffsetBytes)
|
|
if err != nil {
|
|
return err, ErrPackageEPKCannotWriteFile
|
|
}
|
|
_, err = file.WriteAt(dataTemplateBytes, int64(ConstPackageEPKMetadataOffset))
|
|
if err != nil {
|
|
return err, ErrPackageEPKCannotWriteFile
|
|
}
|
|
|
|
// Create the tar archive
|
|
logger.LogFunc(Log{
|
|
Level: "INFO", Content: "Creating tar", Prompt: false,
|
|
})
|
|
|
|
// Move the file pointer to the tar offset so that we can write the tar archive
|
|
seek, err := file.Seek(tarOffset, io.SeekStart)
|
|
if err != nil {
|
|
return err, ErrPackageEPKFailedToSeek
|
|
}
|
|
if seek != tarOffset {
|
|
return err, ErrPackageEPKFailedToSeek
|
|
}
|
|
|
|
// Create the hash writer
|
|
xxHash := xxhash.New()
|
|
_, err = xxHash.Write(dataTemplateBytes)
|
|
if err != nil {
|
|
return err, nil
|
|
}
|
|
|
|
// Create a multi-writer so we can write to the file and the hash at the same time
|
|
multiWriter := io.MultiWriter(file, xxHash)
|
|
|
|
// Create the ZStandard writer
|
|
writer, err := zstd.NewWriter(multiWriter, zstd.WithEncoderLevel(zstd.SpeedDefault))
|
|
if err != nil {
|
|
return err, ErrPackageEPKCreateCompressionWriterError
|
|
}
|
|
|
|
// We start writing the tar archive
|
|
err = CreateTar(distDir, writer)
|
|
if err != nil {
|
|
return err, ErrPackageEPKTarError
|
|
}
|
|
|
|
// Close the ZStandard writer
|
|
err = writer.Close()
|
|
if err != nil {
|
|
return err, ErrPackageEPKCompressCloseError
|
|
}
|
|
|
|
// Great, let's sign the EPK
|
|
logger.LogFunc(Log{
|
|
Level: "INFO", Content: "Signing EPK", Prompt: false,
|
|
})
|
|
|
|
// Sign the hash
|
|
signature := ed25519.Sign(privateKey, xxHash.Sum(nil))
|
|
publicKey := privateKey.Public().(ed25519.PublicKey)
|
|
|
|
// Write the signature and public key to the file
|
|
// Reverse the pointer back to the start of the file so our offsets are correct
|
|
_, err = file.Seek(0, io.SeekStart)
|
|
if err != nil {
|
|
return err, ErrPackageEPKFailedToSeek
|
|
}
|
|
|
|
// Write the signature
|
|
_, err = file.WriteAt(signature, ConstPackageEPKInitialByteOffset)
|
|
if err != nil {
|
|
return err, ErrPackageEPKCannotWriteFile
|
|
}
|
|
|
|
// Write the public key
|
|
_, err = file.WriteAt(publicKey, ConstPackageEPKInitialByteOffset+ConstPackageEPKSignatureLength)
|
|
if err != nil {
|
|
return err, ErrPackageEPKCannotWriteFile
|
|
}
|
|
|
|
// Close the file
|
|
err = file.Close()
|
|
if err != nil {
|
|
return err, ErrPackageEPKCannotCloseFile
|
|
}
|
|
|
|
return nil, nil
|
|
}
|
|
|
|
// ConstGenerateRepositoryRepoDataOffset is the offset of the repository data in the repository.json file: it is 3 (magic) + 64 (the signature) + 32 (the public key) = 99
|
|
var ConstGenerateRepositoryRepoDataOffset int64 = 99
|
|
|
|
// ConstGenerateRepositoryEPKMagicNumber is the magic number for an EPK repository: "eon" in ASCII / UTF-8, for obvious reasons
|
|
var ConstGenerateRepositoryEPKMagicNumber = []byte{0x65, 0x6F, 0x6E}
|
|
|
|
var ErrGenerateRepositoryStatError = errors.New("error stating file or directory")
|
|
var ErrGenerateRepositoryNotDirectory = errors.New("not a directory")
|
|
var ErrGenerateRepositoryRepositoryNameContainsSlash = errors.New("repository name contains a slash")
|
|
var ErrGenerateRepositoryFailedToWalk = errors.New("error walking directory")
|
|
var ErrGenerateRepositoryCannotUnmarshalJSON = errors.New("error unmarshalling JSON")
|
|
var ErrGenerateRepositoryCannotMarshalJSON = errors.New("error marshalling JSON")
|
|
var ErrGenerateRepositoryCannotOpenFile = errors.New("error opening file for writing")
|
|
var ErrGenerateRepositoryCannotWriteFile = errors.New("error writing to file")
|
|
var ErrGenerateRepositoryCannotCloseFile = errors.New("error closing file")
|
|
|
|
func GenerateRepository(directory string, privateKey ed25519.PrivateKey, logger *Logger) (error, error) {
|
|
// First, we need to see if the directory exists
|
|
logger.LogFunc(Log{
|
|
Level: "INFO", Content: "Generating repository", Prompt: false,
|
|
})
|
|
|
|
info, err := os.Stat(directory)
|
|
if err != nil {
|
|
return err, ErrGenerateRepositoryStatError
|
|
}
|
|
|
|
if !info.IsDir() {
|
|
return nil, ErrGenerateRepositoryNotDirectory
|
|
}
|
|
|
|
// Create the EPK map
|
|
epkMap := make(map[string]interface{})
|
|
|
|
// See if the repository.json file exists
|
|
_, err = os.Stat(directory + "/repository.erf")
|
|
if err != nil {
|
|
if !errors.Is(err, os.ErrNotExist) {
|
|
return err, ErrGenerateRepositoryStatError
|
|
} else {
|
|
if logger.PromptSupported {
|
|
// Ask the user for the name of the repository
|
|
repoName := logger.LogFunc(Log{
|
|
Level: "PROMPT", Content: "Enter the name of the repository", Prompt: true,
|
|
})
|
|
|
|
// Check the repository name does not contain any slashes
|
|
if strings.Contains(repoName, "/") {
|
|
return nil, ErrGenerateRepositoryRepositoryNameContainsSlash
|
|
}
|
|
|
|
// Ask the user for the description of the repository
|
|
repoDesc := logger.LogFunc(Log{
|
|
Level: "PROMPT", Content: "Enter a short description of the repository", Prompt: true,
|
|
})
|
|
|
|
// Ask the user for the author of the repository
|
|
repoAuthor := logger.LogFunc(Log{
|
|
Level: "PROMPT",
|
|
Content: "Enter your preferred author name. This must be the same as the author name used in " +
|
|
"eternity.json and associated with your keypair, otherwise it will cause issues with EPK" +
|
|
" verification and your repository will be rejected by Eon and cannot be trusted.",
|
|
Prompt: true,
|
|
})
|
|
|
|
// Now append the metadata to the EPK map
|
|
epkMap["name"] = repoName
|
|
epkMap["desc"] = repoDesc
|
|
epkMap["author"] = repoAuthor
|
|
} else {
|
|
logger.LogFunc(Log{
|
|
Level: "FATAL",
|
|
Content: "Please fill in the author, name, and description of the repository in repository.json. " +
|
|
"Your author name must be the same as the author name used in eternity.json and associated with " +
|
|
"your keypair, otherwise it will cause issues with EPK verification and your repository will be " +
|
|
"rejected by Eon and cannot be trusted.",
|
|
Prompt: false,
|
|
})
|
|
}
|
|
}
|
|
} else {
|
|
// Since it does exist, we can extract the name and description from it
|
|
file, err := os.ReadFile(directory + "/repository.erf")
|
|
if err != nil {
|
|
return err, ErrGenerateRepositoryCannotOpenFile
|
|
}
|
|
|
|
// Unmarshal the JSON
|
|
var oldRepositoryMap map[string]interface{}
|
|
|
|
err = json.Unmarshal(file[ConstGenerateRepositoryRepoDataOffset:], &oldRepositoryMap)
|
|
if err != nil {
|
|
return err, ErrGenerateRepositoryCannotUnmarshalJSON
|
|
}
|
|
|
|
// Copy the author, name, and description to the EPK map
|
|
epkMap["name"] = oldRepositoryMap["name"]
|
|
epkMap["desc"] = oldRepositoryMap["desc"]
|
|
epkMap["author"] = oldRepositoryMap["author"]
|
|
}
|
|
|
|
// Add a list of packages to the EPK map
|
|
epkMap["packages"] = make([]map[string]interface{}, 0)
|
|
|
|
// Now, walk the directory
|
|
err = filepath.Walk(directory, func(path string, info os.FileInfo, err error) error {
|
|
// If error is not nil, return it
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
// Ignore directories
|
|
if info.IsDir() {
|
|
return nil
|
|
}
|
|
|
|
// Ok. We need to check if the file actually is an EPK file
|
|
file, err := os.Open(path)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
// Read the first 3 bytes
|
|
magicNumber := make([]byte, 3)
|
|
_, err = file.Read(magicNumber)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
// Check if the magic number is correct
|
|
if !bytes.Equal(magicNumber, ConstPackageEPKMagicNumber) {
|
|
// It isn't an EPK file, so we can ignore it
|
|
return nil
|
|
}
|
|
|
|
// We need to create a hash of the file
|
|
xxHash := xxhash.New()
|
|
_, err = io.Copy(xxHash, file)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
// Extract the metadata. First, we get the endian-ness
|
|
var littleEndian bool
|
|
endian := make([]byte, 1)
|
|
_, err = file.ReadAt(endian, 3)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
if bytes.Equal(endian, ConstPackageEPKLittleEndian) {
|
|
littleEndian = true
|
|
} else if bytes.Equal(endian, ConstPackageEPKBigEndian) {
|
|
littleEndian = false
|
|
} else {
|
|
return errors.New("invalid endianness")
|
|
}
|
|
|
|
// Now we get the tar offset
|
|
var tarOffset int64
|
|
tarOffsetBytes := make([]byte, 8)
|
|
_, err = file.ReadAt(tarOffsetBytes, 4)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
// Now we convert the tar offset to an int64
|
|
if littleEndian {
|
|
tarOffset = int64(binary.LittleEndian.Uint64(tarOffsetBytes))
|
|
} else {
|
|
tarOffset = int64(binary.BigEndian.Uint64(tarOffsetBytes))
|
|
}
|
|
|
|
// Now we can read in the metadata
|
|
metadataBytes := make([]byte, tarOffset-int64(ConstPackageEPKMetadataOffset))
|
|
_, err = file.ReadAt(metadataBytes, int64(ConstPackageEPKMetadataOffset))
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
// Now we can unmarshal the metadata
|
|
var metadata map[string]interface{}
|
|
err = json.Unmarshal(metadataBytes, &metadata)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
// Now we have the hash, we need to add it to our data template
|
|
dataTemplate := make(map[string]interface{})
|
|
dataTemplate["hash"] = xxHash.Sum64()
|
|
|
|
// Now we add some basic metadata
|
|
dataTemplate["name"] = metadata["name"]
|
|
dataTemplate["author"] = metadata["author"]
|
|
dataTemplate["version"] = metadata["version"]
|
|
dataTemplate["size"] = metadata["size"]
|
|
dataTemplate["arch"] = metadata["arch"]
|
|
dataTemplate["desc"] = metadata["desc"]
|
|
dataTemplate["deps"] = metadata["deps"]
|
|
|
|
// We add the path to the EPK file, relative to the directory
|
|
relativePath, err := filepath.Rel(directory, path)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
dataTemplate["path"] = relativePath
|
|
|
|
// Append it to a list in the EPK map
|
|
epkMap["packages"] = append(epkMap["packages"].([]map[string]interface{}), dataTemplate)
|
|
|
|
return nil
|
|
})
|
|
|
|
// This error message is a bit vague, but meh.
|
|
if err != nil {
|
|
return err, ErrGenerateRepositoryFailedToWalk
|
|
}
|
|
|
|
// Great, now we need to marshal the EPK map and write it to a file
|
|
epkMapBytes, err := json.Marshal(epkMap)
|
|
if err != nil {
|
|
return err, ErrGenerateRepositoryCannotMarshalJSON
|
|
}
|
|
|
|
// Write the EPK map to a file
|
|
file, err := os.OpenFile(directory+"/repository.erf", os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0644)
|
|
if err != nil {
|
|
return err, ErrGenerateRepositoryCannotOpenFile
|
|
}
|
|
|
|
// Sign the epk map
|
|
xxHash := xxhash.New()
|
|
_, err = xxHash.Write(epkMapBytes)
|
|
if err != nil {
|
|
return err, nil
|
|
}
|
|
|
|
signature := ed25519.Sign(privateKey, xxHash.Sum(nil))
|
|
publicKey := privateKey.Public().(ed25519.PublicKey)
|
|
|
|
// Write magic number
|
|
_, err = file.Write(ConstGenerateRepositoryEPKMagicNumber)
|
|
if err != nil {
|
|
return err, ErrGenerateRepositoryCannotWriteFile
|
|
}
|
|
|
|
// Write signature
|
|
_, err = file.WriteAt(signature, 3)
|
|
if err != nil {
|
|
return err, ErrGenerateRepositoryCannotWriteFile
|
|
}
|
|
|
|
// Write public key
|
|
_, err = file.WriteAt(publicKey, 67)
|
|
if err != nil {
|
|
return err, ErrGenerateRepositoryCannotWriteFile
|
|
}
|
|
|
|
// Write the EPK map to the file
|
|
_, err = file.WriteAt(epkMapBytes, ConstGenerateRepositoryRepoDataOffset)
|
|
if err != nil {
|
|
return err, ErrGenerateRepositoryCannotWriteFile
|
|
}
|
|
|
|
// Close the file
|
|
err = file.Close()
|
|
if err != nil {
|
|
return err, ErrGenerateRepositoryCannotCloseFile
|
|
}
|
|
|
|
return nil, nil
|
|
}
|