871 lines
26 KiB
Go
871 lines
26 KiB
Go
package lib
|
|
|
|
import (
|
|
"bytes"
|
|
"errors"
|
|
"io"
|
|
"os"
|
|
"strconv"
|
|
"strings"
|
|
|
|
"archive/tar"
|
|
"crypto/ed25519"
|
|
"encoding/binary"
|
|
"encoding/json"
|
|
"os/exec"
|
|
"path/filepath"
|
|
|
|
"github.com/Masterminds/semver"
|
|
"github.com/cespare/xxhash/v2"
|
|
"github.com/klauspost/compress/zstd"
|
|
)
|
|
|
|
// SpecialFiles is a struct that contains the special files that are not to be deleted or replaced
|
|
type SpecialFiles struct {
|
|
NoDelete []string `json:"noDelete"`
|
|
NoReplace []string `json:"noReplace"`
|
|
}
|
|
|
|
// Metadata is a struct that contains the metadata of the package
|
|
type Metadata struct {
|
|
Name string `json:"name"`
|
|
Description string `json:"desc"`
|
|
LongDescription string `json:"longDesc"`
|
|
Version *semver.Version
|
|
VersionString string `json:"version"`
|
|
Author string `json:"author"`
|
|
License string `json:"license"`
|
|
Architecture string `json:"arch"`
|
|
// The decompressed size may be larger than the int64 allocated for a compressed file
|
|
DecompressedSize int64
|
|
Dependencies []string `json:"deps"`
|
|
SpecialFiles SpecialFiles `json:"specialFiles"`
|
|
}
|
|
|
|
// Build is a struct that contains the build configuration of the package
|
|
type Build struct {
|
|
Type string `json:"type"`
|
|
Dependencies []string `json:"deps"`
|
|
Steps []string `json:"steps"`
|
|
TargetRoot string `json:"root"`
|
|
HooksFolder string `json:"hooks"`
|
|
FilesFolder string `json:"files"`
|
|
}
|
|
|
|
// Config is a struct that contains the configuration of the package
|
|
type Config struct {
|
|
Metadata Metadata `json:"metadata"`
|
|
Build Build `json:"build"`
|
|
}
|
|
|
|
// Log is a struct that contains the log information
|
|
type Log struct {
|
|
Level string
|
|
Content string
|
|
Prompt bool
|
|
}
|
|
|
|
// Logger is a struct that contains the functions and properties of the logger
|
|
type Logger struct {
|
|
LogFunc func(Log) string
|
|
PromptSupported bool
|
|
StdoutSupported bool
|
|
Stdout io.Writer
|
|
}
|
|
|
|
var ErrEternityJsonOpenError = errors.New("error opening eternity.json")
|
|
var ErrEternityJsonReadError = errors.New("error reading eternity.json")
|
|
var ErrEternityJsonParseError = errors.New("error parsing eternity.json")
|
|
var ErrEternityJsonMapError = errors.New("error mapping eternity.json")
|
|
|
|
// ParseConfig parses the eternity.json file
|
|
func ParseConfig(path string, logger *Logger) (Config, error, error) {
|
|
// Open eternity.json
|
|
logger.LogFunc(Log{
|
|
Level: "INFO",
|
|
Content: "Parsing eternity.json",
|
|
Prompt: false,
|
|
})
|
|
file, err := os.Open(path)
|
|
if err != nil {
|
|
return Config{}, err, ErrEternityJsonOpenError
|
|
}
|
|
|
|
// Parse the file as JSON
|
|
var config Config
|
|
decoder := json.NewDecoder(file)
|
|
err = decoder.Decode(&config)
|
|
if err != nil {
|
|
return Config{}, err, ErrEternityJsonParseError
|
|
}
|
|
|
|
// Map the JSON version to a semver version
|
|
config.Metadata.Version, err = semver.NewVersion(config.Metadata.VersionString)
|
|
if err != nil {
|
|
return Config{}, err, ErrEternityJsonMapError
|
|
}
|
|
|
|
// Return the final Config object
|
|
return config, nil, nil
|
|
}
|
|
|
|
var ErrBuildEPKTemporaryDirectoryError = errors.New("error creating temporary directory")
|
|
var ErrBuildEPKCreateHooksError = errors.New("error creating hooks directory")
|
|
var ErrBuildEPKCopyHooksError = errors.New("error copying hooks")
|
|
var ErrBuildEPKChrootError = errors.New("chroot builds are not supported yet")
|
|
var ErrBuildEPKUnrestrictedError = errors.New("unrestricted builds are not supported yet")
|
|
var ErrBuildEPKBuildShError = errors.New("error creating build.sh")
|
|
var ErrBuildEPKWritingBuildShError = errors.New("error writing to build.sh")
|
|
var ErrBuildEPKTargetRootError = errors.New("error creating target root")
|
|
var ErrBuildEPKExecutingBuildShError = errors.New("error executing build.sh")
|
|
var ErrBuildEPKCountingFilesError = errors.New("error counting files")
|
|
var ErrBuildEPKBadBuildType = errors.New("bad build type")
|
|
|
|
// BuildEPK builds the EPK package into a build directory
|
|
func BuildEPK(projectDir string, inMemory bool, buildConfig Build, logger *Logger) (int64, string, error, error) {
|
|
var tempDir string
|
|
|
|
switch buildConfig.Type {
|
|
case "chroot":
|
|
return 0, "", nil, ErrBuildEPKChrootError
|
|
case "unrestricted":
|
|
return 0, "", nil, ErrBuildEPKUnrestrictedError
|
|
case "host":
|
|
// Set up the temp dir
|
|
var err error
|
|
if inMemory {
|
|
// Builds in /tmp. This means that the program must fit in RAM. Luckily, most programs do.
|
|
// If you're building a large program, you might want to consider using a disk build.
|
|
tempDir, err = os.MkdirTemp("/tmp", "eternity-build-")
|
|
} else {
|
|
// Builds on disk. This is slower but if your program can't fit in RAM, you're out of luck.
|
|
// If your program can fit in RAM, you might want to consider using an in-memory build.
|
|
tempDir, err = os.MkdirTemp(projectDir, "eternity-build-")
|
|
}
|
|
if err != nil {
|
|
return 0, tempDir, err, ErrBuildEPKTemporaryDirectoryError
|
|
}
|
|
|
|
// Copy the hooks folder
|
|
if buildConfig.HooksFolder != "" {
|
|
hooksDir := filepath.Join(projectDir, buildConfig.HooksFolder)
|
|
targetHooksDir := filepath.Join(tempDir, buildConfig.HooksFolder)
|
|
logger.LogFunc(Log{
|
|
Level: "INFO", Content: "Copying hooks from " + hooksDir + " to " + targetHooksDir, Prompt: false,
|
|
})
|
|
|
|
err = os.MkdirAll(targetHooksDir, 0755)
|
|
if err != nil {
|
|
return 0, tempDir, err, ErrBuildEPKCreateHooksError
|
|
}
|
|
|
|
err = os.CopyFS(targetHooksDir, os.DirFS(hooksDir))
|
|
if err != nil {
|
|
return 0, tempDir, err, ErrBuildEPKCopyHooksError
|
|
}
|
|
}
|
|
|
|
// Generate the shell script
|
|
logger.LogFunc(Log{
|
|
Level: "INFO", Content: "Generating shell script", Prompt: false,
|
|
})
|
|
|
|
// Create the shell script
|
|
shellScript := "#!/bin/sh\n"
|
|
for _, step := range buildConfig.Steps {
|
|
shellScript += step + "\n"
|
|
}
|
|
|
|
file, err := os.OpenFile(tempDir+"/build.sh", os.O_CREATE|os.O_RDWR, 0755)
|
|
if err != nil {
|
|
return 0, tempDir, err, ErrBuildEPKBuildShError
|
|
}
|
|
|
|
_, err = file.WriteString(shellScript)
|
|
if err != nil {
|
|
return 0, tempDir, err, ErrBuildEPKWritingBuildShError
|
|
}
|
|
|
|
// Set up the target root
|
|
targetRoot := filepath.Join(tempDir, buildConfig.TargetRoot)
|
|
err = os.MkdirAll(targetRoot, 0755)
|
|
if err != nil {
|
|
return 0, tempDir, err, ErrBuildEPKTargetRootError
|
|
}
|
|
|
|
// Execute the shell script in BWrap
|
|
logger.LogFunc(Log{
|
|
Level: "INFO", Content: "Starting up container environment (replicating host files)", Prompt: false,
|
|
})
|
|
|
|
// Allow me to explain why it's in BWrap. It's very difficult to cut off internet access without root, so I just
|
|
// copy-pasted most of the host files into the container, then disabled networking. This also allows us to use
|
|
// fakeroot and minimises the blast radius of a malicious package (hopefully) by not allowing the home directory
|
|
// or any files owned by root to be viewed or modified (too bad if you've got sensitive data in /var or /etc :P)
|
|
arguments := []string{
|
|
"--unshare-net",
|
|
"--bind", "/bin", "/bin",
|
|
"--bind", "/lib", "/lib",
|
|
"--bind", "/lib64", "/lib64",
|
|
"--bind", "/usr", "/usr",
|
|
"--bind", "/etc", "/etc",
|
|
"--bind", "/var", "/var",
|
|
"--bind", "/sys", "/sys",
|
|
"--bind", "/opt", "/opt",
|
|
"--bind", targetRoot, filepath.Join("/", buildConfig.TargetRoot),
|
|
"--bind", tempDir, "/eternity",
|
|
"--dev", "/dev",
|
|
"--tmpfs", "/run",
|
|
"--tmpfs", "/tmp",
|
|
"--proc", "/proc",
|
|
"/usr/bin/fakeroot-tcp", "--",
|
|
"/bin/sh", "/eternity/build.sh",
|
|
}
|
|
|
|
if buildConfig.FilesFolder != "" {
|
|
arguments = arguments[:len(arguments)-4]
|
|
arguments = append(
|
|
arguments, "--bind", filepath.Join(projectDir, buildConfig.FilesFolder), filepath.Join("/", buildConfig.FilesFolder),
|
|
"/usr/bin/fakeroot-tcp", "--",
|
|
"/bin/sh", "/eternity/build.sh",
|
|
)
|
|
}
|
|
|
|
cmd := exec.Command("bwrap", arguments...)
|
|
if logger.StdoutSupported {
|
|
cmd.Stdout = logger.Stdout
|
|
}
|
|
err = cmd.Run()
|
|
if err != nil {
|
|
return 0, tempDir, err, ErrBuildEPKExecutingBuildShError
|
|
}
|
|
|
|
// Hopefully, the build was successful. Let's give the user a file and size count.
|
|
var fileCount int
|
|
var sizeCount int64
|
|
// We start at -1 because the root directory is not counted
|
|
dirCount := -1
|
|
err = filepath.Walk(targetRoot, func(path string, info os.FileInfo, err error) error {
|
|
if info.IsDir() {
|
|
dirCount++
|
|
} else {
|
|
fileCount++
|
|
}
|
|
// Both directories and files need to have their sizes counted
|
|
sizeCount += info.Size()
|
|
return nil
|
|
})
|
|
if err != nil {
|
|
return 0, tempDir, err, ErrBuildEPKCountingFilesError
|
|
}
|
|
|
|
logger.LogFunc(Log{
|
|
Level: "INFO",
|
|
Content: "Build successful. " + strconv.Itoa(fileCount) + " files and " + strconv.Itoa(dirCount) +
|
|
" directories created," + " totalling " + strconv.FormatInt(sizeCount, 10) + " bytes.",
|
|
Prompt: false,
|
|
})
|
|
|
|
return sizeCount, tempDir, nil, nil
|
|
default:
|
|
return 0, "", errors.New(buildConfig.Type), ErrBuildEPKBadBuildType
|
|
}
|
|
}
|
|
|
|
// CreateTar creates a tar archive from a directory
|
|
func CreateTar(targetDir string, output io.Writer) error {
|
|
tarWriter := tar.NewWriter(output)
|
|
err := filepath.Walk(targetDir, func(path string, info os.FileInfo, err error) error {
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
if !info.Mode().IsRegular() && info.Mode()&os.ModeSymlink == 0 {
|
|
return nil
|
|
}
|
|
|
|
header, err := tar.FileInfoHeader(info, path)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
header.Name = strings.TrimPrefix(strings.Replace(path, targetDir, "", -1), string(filepath.Separator))
|
|
|
|
if info.Mode()&os.ModeSymlink != 0 {
|
|
linkTarget, err := os.Readlink(path)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
header.Linkname = linkTarget
|
|
}
|
|
|
|
err = tarWriter.WriteHeader(header)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
if info.Mode().IsRegular() {
|
|
file, err := os.Open(path)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
_, err = io.Copy(tarWriter, file)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
err = file.Close()
|
|
if err != nil {
|
|
return err
|
|
}
|
|
}
|
|
|
|
return nil
|
|
})
|
|
|
|
if err != nil {
|
|
return err
|
|
} else {
|
|
err = tarWriter.Close()
|
|
if err != nil {
|
|
return err
|
|
} else {
|
|
return nil
|
|
}
|
|
}
|
|
}
|
|
|
|
// These errors are in the wrong order due to this function being rewritten.
|
|
// Oh well, not like it matters.
|
|
|
|
// ConstPackageEPKMagicNumber is the magic number for an EPK file: "epk" in ASCII / UTF-8
|
|
var ConstPackageEPKMagicNumber = []byte{0x65, 0x70, 0x6B}
|
|
|
|
// ConstPackageEPKBigEndian is the letter "b" in ASCII / UTF-8
|
|
var ConstPackageEPKBigEndian = []byte{0x62}
|
|
|
|
// ConstPackageEPKLittleEndian is the letter "l" in ASCII / UTF-8
|
|
var ConstPackageEPKLittleEndian = []byte{0x6C}
|
|
|
|
// ConstPackageEPKInitialByteOffset is the initial byte offset for an EPK file until we arrive at the signature. 12 = 3 + 1 + 8: 3 for the magic number, 1 for the endian, and 8 for the tar offset
|
|
var ConstPackageEPKInitialByteOffset int64 = 12
|
|
|
|
// ConstPackageEPKSignatureLength is the length of the signature
|
|
var ConstPackageEPKSignatureLength int64 = 64
|
|
|
|
// ConstPackageEPKPublicKeyLength is the length of the public key
|
|
var ConstPackageEPKPublicKeyLength int64 = 32
|
|
|
|
// ConstPackageEPKMetadataOffset is the offset of the metadata in the EPK file
|
|
var ConstPackageEPKMetadataOffset = 108
|
|
|
|
// All these errors are out of order once I rewrote this to stream instead of using a buffer.
|
|
// Oh well, not like it matters.
|
|
|
|
var ErrPackageEPKFailedToWriteHash = errors.New("error writing hash to EPK file")
|
|
var ErrPackageEPKFailedToSeek = errors.New("error seeking in EPK file")
|
|
var ErrPackageEPKCreateDistDirError = errors.New("error creating dist directory")
|
|
var ErrPackageEPKMoveToDistError = errors.New("error moving to dist directory")
|
|
var ErrPackageEPKTarError = errors.New("error creating tar")
|
|
var ErrPackageEPKJSONMarshal = errors.New("error marshalling JSON")
|
|
var ErrPackageEPKCreateCompressionWriterError = errors.New("error creating ZStandard writer")
|
|
var ErrPackageEPKCompressCloseError = errors.New("error closing EPK ZStandard writer")
|
|
var ErrPackageEPKCannotOpenFile = errors.New("error opening EPK file for writing")
|
|
var ErrPackageEPKCannotWriteFile = errors.New("error writing to EPK file")
|
|
var ErrPackageEPKCannotCloseFile = errors.New("error closing EPK file")
|
|
|
|
// PackageEPK packages the EPK work directory into an EPK file
|
|
func PackageEPK(metaData Metadata, build Build, tempDir string, output string, privateKey ed25519.PrivateKey, logger *Logger) (error, error) {
|
|
// Create the EPK
|
|
logger.LogFunc(Log{
|
|
Level: "INFO", Content: "Packaging EPK", Prompt: false,
|
|
})
|
|
|
|
// Ok. Let's construct targetDir, then hooksDir
|
|
targetDir := filepath.Join(tempDir, build.TargetRoot)
|
|
distDir := tempDir + "/dist"
|
|
|
|
err := os.MkdirAll(distDir, 0755)
|
|
if err != nil {
|
|
return err, ErrPackageEPKCreateDistDirError
|
|
}
|
|
|
|
err = os.Rename(targetDir, distDir+"/root")
|
|
if err != nil {
|
|
return err, ErrPackageEPKMoveToDistError
|
|
}
|
|
|
|
if build.HooksFolder != "" {
|
|
hooksDir := filepath.Join(tempDir, build.HooksFolder)
|
|
err = os.Rename(hooksDir, distDir+"/hooks")
|
|
if err != nil {
|
|
return err, ErrPackageEPKMoveToDistError
|
|
}
|
|
}
|
|
|
|
// Map the metadata to a JSON string
|
|
logger.LogFunc(Log{
|
|
Level: "INFO", Content: "Calculating package metadata", Prompt: false,
|
|
})
|
|
|
|
dataTemplate := map[string]interface{}{
|
|
"name": metaData.Name,
|
|
"author": metaData.Author,
|
|
"version": metaData.Version.String(),
|
|
"desc": metaData.Description,
|
|
"longDesc": metaData.LongDescription,
|
|
"license": metaData.License,
|
|
"arch": metaData.Architecture,
|
|
"deps": metaData.Dependencies,
|
|
"specialFiles": map[string][]string{
|
|
"noDelete": metaData.SpecialFiles.NoDelete,
|
|
"noReplace": metaData.SpecialFiles.NoReplace,
|
|
},
|
|
"size": metaData.DecompressedSize,
|
|
}
|
|
|
|
// Make the data template into a JSON string
|
|
dataTemplateBytes, err := json.Marshal(dataTemplate)
|
|
if err != nil {
|
|
return err, ErrPackageEPKJSONMarshal
|
|
}
|
|
|
|
// Calculate the offsets
|
|
logger.LogFunc(Log{
|
|
Level: "INFO", Content: "Calculating binary offsets", Prompt: false,
|
|
})
|
|
|
|
// Calculate the length of the data template
|
|
var dataTemplateLength int64
|
|
for range dataTemplateBytes {
|
|
dataTemplateLength++
|
|
}
|
|
|
|
// Calculate the tar offset
|
|
var tarOffset int64
|
|
tarOffset = int64(ConstPackageEPKMetadataOffset) + dataTemplateLength
|
|
|
|
logger.LogFunc(Log{
|
|
Level: "INFO", Content: "Calculating binary properties", Prompt: false,
|
|
})
|
|
|
|
// We need to determine the endianness of the architecture so that it's optimal for the target system
|
|
// We assume little-endian by default because most architectures are little-endian (why would you use big-endian?)
|
|
littleEndian := true
|
|
switch metaData.Architecture {
|
|
case "ppc64":
|
|
littleEndian = false
|
|
case "ppc":
|
|
littleEndian = false
|
|
case "mips64":
|
|
littleEndian = false
|
|
case "mips":
|
|
littleEndian = false
|
|
case "s390":
|
|
littleEndian = false
|
|
case "s390x":
|
|
littleEndian = false
|
|
case "sparc64":
|
|
littleEndian = false
|
|
case "sparc":
|
|
littleEndian = false
|
|
default:
|
|
littleEndian = true
|
|
}
|
|
|
|
// Create the byte arrays for the tar offset
|
|
tarOffsetBytes := make([]byte, 8)
|
|
|
|
// Write as much as we can to the file
|
|
logger.LogFunc(Log{
|
|
Level: "INFO", Content: "Writing to file", Prompt: false,
|
|
})
|
|
|
|
// Open the file buffer
|
|
file, err := os.OpenFile(output, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0644)
|
|
if err != nil {
|
|
return err, ErrPackageEPKCannotOpenFile
|
|
}
|
|
|
|
// Write the magic number
|
|
_, err = file.Write(ConstPackageEPKMagicNumber)
|
|
if err != nil {
|
|
return err, ErrPackageEPKCannotWriteFile
|
|
}
|
|
|
|
// Write the endianness and correct the offsets based on the endianness
|
|
// I know it's wasteful to convert an int64 to an uint64, but binary doesn't support int64 with custom Endian-ness
|
|
// and file.Seek doesn't
|
|
// support uint64.
|
|
if littleEndian {
|
|
binary.LittleEndian.PutUint64(tarOffsetBytes, uint64(tarOffset))
|
|
_, err = file.Write(ConstPackageEPKLittleEndian)
|
|
} else {
|
|
binary.BigEndian.PutUint64(tarOffsetBytes, uint64(tarOffset))
|
|
_, err = file.Write(ConstPackageEPKBigEndian)
|
|
}
|
|
if err != nil {
|
|
return err, ErrPackageEPKCannotWriteFile
|
|
}
|
|
|
|
_, err = file.Write(tarOffsetBytes)
|
|
if err != nil {
|
|
return err, ErrPackageEPKCannotWriteFile
|
|
}
|
|
_, err = file.WriteAt(dataTemplateBytes, int64(ConstPackageEPKMetadataOffset))
|
|
if err != nil {
|
|
return err, ErrPackageEPKCannotWriteFile
|
|
}
|
|
|
|
// Create the tar archive
|
|
logger.LogFunc(Log{
|
|
Level: "INFO", Content: "Creating tar", Prompt: false,
|
|
})
|
|
|
|
// Move the file pointer to the tar offset so that we can write the tar archive
|
|
seek, err := file.Seek(tarOffset, io.SeekStart)
|
|
if err != nil {
|
|
return err, ErrPackageEPKFailedToSeek
|
|
}
|
|
if seek != tarOffset {
|
|
return err, ErrPackageEPKFailedToSeek
|
|
}
|
|
|
|
// Create the hash writer
|
|
xxHash := xxhash.New()
|
|
_, err = xxHash.Write(dataTemplateBytes)
|
|
if err != nil {
|
|
return err, nil
|
|
}
|
|
|
|
// Create a multi-writer so we can write to the file and the hash at the same time
|
|
multiWriter := io.MultiWriter(file, xxHash)
|
|
|
|
// Create the ZStandard writer
|
|
writer, err := zstd.NewWriter(multiWriter, zstd.WithEncoderLevel(zstd.SpeedDefault))
|
|
if err != nil {
|
|
return err, ErrPackageEPKCreateCompressionWriterError
|
|
}
|
|
|
|
// We start writing the tar archive
|
|
err = CreateTar(distDir, writer)
|
|
if err != nil {
|
|
return err, ErrPackageEPKTarError
|
|
}
|
|
|
|
// Close the ZStandard writer
|
|
err = writer.Close()
|
|
if err != nil {
|
|
return err, ErrPackageEPKCompressCloseError
|
|
}
|
|
|
|
// Great, let's sign the EPK
|
|
logger.LogFunc(Log{
|
|
Level: "INFO", Content: "Signing EPK", Prompt: false,
|
|
})
|
|
|
|
// Sign the hash
|
|
signature := ed25519.Sign(privateKey, xxHash.Sum(nil))
|
|
publicKey := privateKey.Public().(ed25519.PublicKey)
|
|
|
|
// Write the signature and public key to the file
|
|
// Reverse the pointer back to the start of the file so our offsets are correct
|
|
_, err = file.Seek(0, io.SeekStart)
|
|
if err != nil {
|
|
return err, ErrPackageEPKFailedToSeek
|
|
}
|
|
|
|
// Write the signature
|
|
_, err = file.WriteAt(signature, ConstPackageEPKInitialByteOffset)
|
|
if err != nil {
|
|
return err, ErrPackageEPKCannotWriteFile
|
|
}
|
|
|
|
// Write the public key
|
|
_, err = file.WriteAt(publicKey, ConstPackageEPKInitialByteOffset+ConstPackageEPKSignatureLength)
|
|
if err != nil {
|
|
return err, ErrPackageEPKCannotWriteFile
|
|
}
|
|
|
|
// Close the file
|
|
err = file.Close()
|
|
if err != nil {
|
|
return err, ErrPackageEPKCannotCloseFile
|
|
}
|
|
|
|
return nil, nil
|
|
}
|
|
|
|
// ConstGenerateRepositoryRepoDataOffset is the offset of the repository data in the repository.json file: it is 3 (magic) + 64 (the signature) + 32 (the public key) = 99
|
|
var ConstGenerateRepositoryRepoDataOffset int64 = 99
|
|
|
|
// ConstGenerateRepositoryEPKMagicNumber is the magic number for an EPK repository: "eon" in ASCII / UTF-8, for obvious reasons
|
|
var ConstGenerateRepositoryEPKMagicNumber = []byte{0x65, 0x6F, 0x6E}
|
|
|
|
var ErrGenerateRepositoryStatError = errors.New("error stating file or directory")
|
|
var ErrGenerateRepositoryNotDirectory = errors.New("not a directory")
|
|
var ErrGenerateRepositoryRepositoryNameContainsSlash = errors.New("repository name contains a slash")
|
|
var ErrGenerateRepositoryFailedToWalk = errors.New("error walking directory")
|
|
var ErrGenerateRepositoryCannotUnmarshalJSON = errors.New("error unmarshalling JSON")
|
|
var ErrGenerateRepositoryCannotMarshalJSON = errors.New("error marshalling JSON")
|
|
var ErrGenerateRepositoryCannotOpenFile = errors.New("error opening file for writing")
|
|
var ErrGenerateRepositoryCannotWriteFile = errors.New("error writing to file")
|
|
var ErrGenerateRepositoryCannotCloseFile = errors.New("error closing file")
|
|
|
|
func GenerateRepository(directory string, privateKey ed25519.PrivateKey, logger *Logger) (error, error) {
|
|
// First, we need to see if the directory exists
|
|
logger.LogFunc(Log{
|
|
Level: "INFO", Content: "Generating repository", Prompt: false,
|
|
})
|
|
|
|
info, err := os.Stat(directory)
|
|
if err != nil {
|
|
return err, ErrGenerateRepositoryStatError
|
|
}
|
|
|
|
if !info.IsDir() {
|
|
return nil, ErrGenerateRepositoryNotDirectory
|
|
}
|
|
|
|
// Create the EPK map
|
|
epkMap := make(map[string]interface{})
|
|
|
|
// See if the repository.json file exists
|
|
_, err = os.Stat(directory + "/repository.erf")
|
|
if err != nil {
|
|
if !errors.Is(err, os.ErrNotExist) {
|
|
return err, ErrGenerateRepositoryStatError
|
|
} else {
|
|
if logger.PromptSupported {
|
|
// Ask the user for the name of the repository
|
|
repoName := logger.LogFunc(Log{
|
|
Level: "PROMPT", Content: "Enter the name of the repository", Prompt: true,
|
|
})
|
|
|
|
// Check the repository name does not contain any slashes
|
|
if strings.Contains(repoName, "/") {
|
|
return nil, ErrGenerateRepositoryRepositoryNameContainsSlash
|
|
}
|
|
|
|
// Ask the user for the description of the repository
|
|
repoDesc := logger.LogFunc(Log{
|
|
Level: "PROMPT", Content: "Enter a short description of the repository", Prompt: true,
|
|
})
|
|
|
|
// Ask the user for the author of the repository
|
|
repoAuthor := logger.LogFunc(Log{
|
|
Level: "PROMPT",
|
|
Content: "Enter your preferred author name. This must be the same as the author name used in " +
|
|
"eternity.json and associated with your keypair, otherwise it will cause issues with EPK" +
|
|
" verification and your repository will be rejected by Eon and cannot be trusted.",
|
|
Prompt: true,
|
|
})
|
|
|
|
// Now append the metadata to the EPK map
|
|
epkMap["name"] = repoName
|
|
epkMap["desc"] = repoDesc
|
|
epkMap["author"] = repoAuthor
|
|
} else {
|
|
logger.LogFunc(Log{
|
|
Level: "FATAL",
|
|
Content: "Please fill in the author, name, and description of the repository in repository.json. " +
|
|
"Your author name must be the same as the author name used in eternity.json and associated with " +
|
|
"your keypair, otherwise it will cause issues with EPK verification and your repository will be " +
|
|
"rejected by Eon and cannot be trusted.",
|
|
Prompt: false,
|
|
})
|
|
}
|
|
}
|
|
} else {
|
|
// Since it does exist, we can extract the name and description from it
|
|
file, err := os.ReadFile(directory + "/repository.erf")
|
|
if err != nil {
|
|
return err, ErrGenerateRepositoryCannotOpenFile
|
|
}
|
|
|
|
// Unmarshal the JSON
|
|
var oldRepositoryMap map[string]interface{}
|
|
|
|
err = json.Unmarshal(file[ConstGenerateRepositoryRepoDataOffset:], &oldRepositoryMap)
|
|
if err != nil {
|
|
return err, ErrGenerateRepositoryCannotUnmarshalJSON
|
|
}
|
|
|
|
// Copy the author, name, and description to the EPK map
|
|
epkMap["name"] = oldRepositoryMap["name"]
|
|
epkMap["desc"] = oldRepositoryMap["desc"]
|
|
epkMap["author"] = oldRepositoryMap["author"]
|
|
}
|
|
|
|
// Add a list of packages to the EPK map
|
|
epkMap["packages"] = make([]map[string]interface{}, 0)
|
|
|
|
// Now, walk the directory
|
|
err = filepath.Walk(directory, func(path string, info os.FileInfo, err error) error {
|
|
// If error is not nil, return it
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
// Ignore directories
|
|
if info.IsDir() {
|
|
return nil
|
|
}
|
|
|
|
// Ok. We need to check if the file actually is an EPK file
|
|
file, err := os.Open(path)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
// Read the first 3 bytes
|
|
magicNumber := make([]byte, 3)
|
|
_, err = file.Read(magicNumber)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
// Check if the magic number is correct
|
|
if !bytes.Equal(magicNumber, ConstPackageEPKMagicNumber) {
|
|
// It isn't an EPK file, so we can ignore it
|
|
return nil
|
|
}
|
|
|
|
// We need to create a hash of the file
|
|
xxHash := xxhash.New()
|
|
_, err = io.Copy(xxHash, file)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
// Extract the metadata. First, we get the endian-ness
|
|
var littleEndian bool
|
|
endian := make([]byte, 1)
|
|
_, err = file.ReadAt(endian, 3)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
if bytes.Equal(endian, ConstPackageEPKLittleEndian) {
|
|
littleEndian = true
|
|
} else if bytes.Equal(endian, ConstPackageEPKBigEndian) {
|
|
littleEndian = false
|
|
} else {
|
|
return errors.New("invalid endianness")
|
|
}
|
|
|
|
// Now we get the tar offset
|
|
var tarOffset int64
|
|
tarOffsetBytes := make([]byte, 8)
|
|
_, err = file.ReadAt(tarOffsetBytes, 4)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
// Now we convert the tar offset to an int64
|
|
if littleEndian {
|
|
tarOffset = int64(binary.LittleEndian.Uint64(tarOffsetBytes))
|
|
} else {
|
|
tarOffset = int64(binary.BigEndian.Uint64(tarOffsetBytes))
|
|
}
|
|
|
|
// Now we can read in the metadata
|
|
metadataBytes := make([]byte, tarOffset-int64(ConstPackageEPKMetadataOffset))
|
|
_, err = file.ReadAt(metadataBytes, int64(ConstPackageEPKMetadataOffset))
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
// Now we can unmarshal the metadata
|
|
var metadata map[string]interface{}
|
|
err = json.Unmarshal(metadataBytes, &metadata)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
// Now we have the hash, we need to add it to our data template
|
|
dataTemplate := make(map[string]interface{})
|
|
dataTemplate["hash"] = xxHash.Sum64()
|
|
|
|
// Now we add some basic metadata
|
|
dataTemplate["name"] = metadata["name"]
|
|
dataTemplate["author"] = metadata["author"]
|
|
dataTemplate["version"] = metadata["version"]
|
|
dataTemplate["size"] = metadata["size"]
|
|
dataTemplate["arch"] = metadata["arch"]
|
|
dataTemplate["desc"] = metadata["desc"]
|
|
dataTemplate["deps"] = metadata["deps"]
|
|
|
|
// We add the path to the EPK file, relative to the directory
|
|
relativePath, err := filepath.Rel(directory, path)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
dataTemplate["path"] = relativePath
|
|
|
|
// Append it to a list in the EPK map
|
|
epkMap["packages"] = append(epkMap["packages"].([]map[string]interface{}), dataTemplate)
|
|
|
|
return nil
|
|
})
|
|
|
|
// This error message is a bit vague, but meh.
|
|
if err != nil {
|
|
return err, ErrGenerateRepositoryFailedToWalk
|
|
}
|
|
|
|
// Great, now we need to marshal the EPK map and write it to a file
|
|
epkMapBytes, err := json.Marshal(epkMap)
|
|
if err != nil {
|
|
return err, ErrGenerateRepositoryCannotMarshalJSON
|
|
}
|
|
|
|
// Write the EPK map to a file
|
|
file, err := os.OpenFile(directory+"/repository.erf", os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0644)
|
|
if err != nil {
|
|
return err, ErrGenerateRepositoryCannotOpenFile
|
|
}
|
|
|
|
// Sign the epk map
|
|
xxHash := xxhash.New()
|
|
_, err = xxHash.Write(epkMapBytes)
|
|
if err != nil {
|
|
return err, nil
|
|
}
|
|
|
|
signature := ed25519.Sign(privateKey, xxHash.Sum(nil))
|
|
publicKey := privateKey.Public().(ed25519.PublicKey)
|
|
|
|
// Write magic number
|
|
_, err = file.Write(ConstGenerateRepositoryEPKMagicNumber)
|
|
if err != nil {
|
|
return err, ErrGenerateRepositoryCannotWriteFile
|
|
}
|
|
|
|
// Write signature
|
|
_, err = file.WriteAt(signature, 3)
|
|
if err != nil {
|
|
return err, ErrGenerateRepositoryCannotWriteFile
|
|
}
|
|
|
|
// Write public key
|
|
_, err = file.WriteAt(publicKey, 67)
|
|
if err != nil {
|
|
return err, ErrGenerateRepositoryCannotWriteFile
|
|
}
|
|
|
|
// Write the EPK map to the file
|
|
_, err = file.WriteAt(epkMapBytes, ConstGenerateRepositoryRepoDataOffset)
|
|
if err != nil {
|
|
return err, ErrGenerateRepositoryCannotWriteFile
|
|
}
|
|
|
|
// Close the file
|
|
err = file.Close()
|
|
if err != nil {
|
|
return err, ErrGenerateRepositoryCannotCloseFile
|
|
}
|
|
|
|
return nil, nil
|
|
}
|