eon/lib/main.go

1741 lines
57 KiB
Go
Raw Normal View History

2024-09-01 12:27:25 -07:00
package lib
import (
"bufio"
"bytes"
"errors"
"io"
"os"
"strconv"
"strings"
"time"
"archive/tar"
"crypto/ed25519"
"encoding/binary"
"encoding/hex"
"encoding/json"
"net/http"
2024-09-09 23:38:47 -07:00
"net/url"
2024-09-01 12:27:25 -07:00
"os/exec"
"path/filepath"
"github.com/Masterminds/semver"
"github.com/cespare/xxhash/v2"
"github.com/dustin/go-humanize"
"github.com/klauspost/compress/zstd"
)
// RemoteEPK is a struct that contains the metadata of an EPK from a remote repository
type RemoteEPK struct {
2024-09-03 11:58:53 -07:00
Repository Repository
2024-09-01 12:27:25 -07:00
Name string
Author string
Description string
Version semver.Version
Architecture string
2024-10-10 02:24:56 -07:00
CompressedSize uint64
2024-09-01 12:27:25 -07:00
Dependencies []string
Path string
Arch string
EPKHash uint64
}
// Repository is a struct that contains the repository information
type Repository struct {
Name string
URL string
Owner string
Description string
}
// SpecialFiles is a struct that contains the special files that are not to be deleted or replaced
type SpecialFiles struct {
NoDelete []string
NoReplace []string
}
// Metadata is a struct that contains the metadata of the package
type Metadata struct {
Name string
Description string
LongDescription string
Version semver.Version
Author string
License string
Architecture string
Dependencies []string
SpecialFiles SpecialFiles
2024-10-10 02:24:56 -07:00
Size uint64
DecompressedSize uint64
2024-09-01 12:27:25 -07:00
}
// EPKPreMap is a struct that contains the metadata of the EPK
type EPKPreMap struct {
2024-09-09 23:38:47 -07:00
DisplayData DisplayData
MetadataMap map[string]interface{}
IsLittleEndian bool
IsUpgrade bool
2024-10-10 02:24:56 -07:00
TarOffset uint64
2024-09-09 23:38:47 -07:00
}
// DisplayData is a struct that contains the display data of the EPK
type DisplayData struct {
2024-09-01 12:27:25 -07:00
Name string
Author string
Architecture string
2024-09-09 23:38:47 -07:00
Description string
2024-09-01 12:27:25 -07:00
Version semver.Version
2024-10-10 02:24:56 -07:00
Size uint64
DecompressedSize uint64
2024-09-01 12:27:25 -07:00
Dependencies []string
IsDependency bool
2024-09-01 12:27:25 -07:00
}
// PotentiallyNullEPKPreMap is a EPKPreMap that can be nil
type PotentiallyNullEPKPreMap struct {
EPKPreMap *EPKPreMap
Null bool
}
// Log is a struct that contains the log information
type Log struct {
Level string
Content string
Prompt bool
PlaySound bool
2024-10-10 02:24:56 -07:00
Progress uint64
Total uint64
2024-09-01 12:27:25 -07:00
Overwrite bool
}
// StreamOrBytes is a struct that contains either a stream or bytes, allowing optimising for memory or speed
type StreamOrBytes struct {
2024-09-03 11:58:53 -07:00
FileStream *os.File
RepositoryName string
URL string
Bytes []byte
IsURL bool
2024-09-09 23:38:47 -07:00
IsRemote bool
2024-09-03 11:58:53 -07:00
IsFileStream bool
2024-09-01 12:27:25 -07:00
}
// Logger is a struct that contains the functions and properties of the logger
type Logger struct {
LogFunc func(Log) string
PromptSupported bool
ProgressSupported bool
}
// Epk is a struct that contains the metadata and the tar archive of the EPK
type Epk struct {
Metadata Metadata
TarArchive []byte
}
// interfaceToStringSlice converts an interface slice to a string slice
func interfaceToStringSlice(interfaceSlice []interface{}, interfaceName string) ([]string, error) {
// Yes, it's meant to be empty and not nil: JSON arrays are empty, not nil
//goland:noinspection GoPreferNilSlice
stringSlice := []string{}
for _, interfaceValue := range interfaceSlice {
stringValue, ok := interfaceValue.(string)
if !ok {
return nil, errors.New(interfaceName + " are not strings")
}
stringSlice = append(stringSlice, stringValue)
}
return stringSlice, nil
}
// ByteToFingerprint converts a byte slice to an Eon fingerprint, which is similar to a legacy-style OpenSSH fingerprint
func ByteToFingerprint(input []byte) string {
xxHashWriter := xxhash.New()
_, _ = xxHashWriter.Write(input)
inputString := hex.EncodeToString(xxhash.New().Sum(nil))
var result []string
var previousChar rune
for index, char := range inputString {
if index%2 == 0 && index != 0 {
result = append(result, string(previousChar)+string(char))
} else {
previousChar = char
}
}
return strings.Join(result, ":")
}
2024-09-09 23:38:47 -07:00
// MkdirAllWithPaths mimics os.MkdirAll but returns the created directories
func MkdirAllWithPaths(path string, perm os.FileMode) ([]string, error) {
// Make sure to return absolute paths
absPath, err := filepath.Abs(path)
if err != nil {
return nil, err
}
// Split the path into individual directories
var dirs []string
currentPath := absPath
for currentPath != "/" {
dirs = append([]string{currentPath}, dirs...)
currentPath = filepath.Dir(currentPath)
}
// Slice to hold the created directory paths
var createdDirs []string
// Iterate through each directory and create if not exists
for _, dir := range dirs {
_, err := os.Stat(dir)
if errors.Is(err, os.ErrNotExist) {
// Directory doesn't exist, create it
err := os.Mkdir(dir, perm)
if err != nil {
return createdDirs, err
}
// Append created directory's absolute path
createdDirs = append(createdDirs, dir)
}
}
return createdDirs, nil
}
2024-10-10 02:24:56 -07:00
func preMapEpkFromBytes(metaDataBytes []byte, littleEndian bool, size uint64, offset uint64) (EPKPreMap, error) {
2024-09-03 11:58:53 -07:00
// Unmarshal the JSON
var displayDataMap map[string]interface{}
2024-10-10 02:24:56 -07:00
decoder := json.NewDecoder(bytes.NewReader(metaDataBytes))
decoder.UseNumber()
err := decoder.Decode(&displayDataMap)
2024-09-03 11:58:53 -07:00
if err != nil {
return EPKPreMap{}, errors.New("metadata is not valid JSON")
}
// Declare the parsedDisplayData object
var parsedDisplayData EPKPreMap
// Add some of our data so that the full EPK can be mapped with less effort
parsedDisplayData.MetadataMap = displayDataMap
parsedDisplayData.IsLittleEndian = littleEndian
parsedDisplayData.TarOffset = offset
2024-09-09 23:38:47 -07:00
parsedDisplayData.DisplayData.Size = size
2024-09-03 11:58:53 -07:00
// Map the display data
var ok bool
// Set the size
2024-10-10 02:24:56 -07:00
sizeJSON, ok := displayDataMap["size"].(json.Number)
2024-09-03 11:58:53 -07:00
if !ok {
2024-10-10 02:24:56 -07:00
return EPKPreMap{}, errors.New("size is not a number")
}
parsedDisplayData.DisplayData.DecompressedSize, err = strconv.ParseUint(sizeJSON.String(), 10, 64)
if err != nil {
return EPKPreMap{}, err
2024-09-03 11:58:53 -07:00
}
// Set the name, author, version, arch, and dependencies
2024-09-09 23:38:47 -07:00
parsedDisplayData.DisplayData.Name, ok = displayDataMap["name"].(string)
2024-09-03 11:58:53 -07:00
if !ok {
return EPKPreMap{}, errors.New("name is not a string")
}
2024-09-09 23:38:47 -07:00
parsedDisplayData.DisplayData.Author, ok = displayDataMap["author"].(string)
2024-09-03 11:58:53 -07:00
if !ok {
return EPKPreMap{}, errors.New("author is not a string")
}
versionString, ok := displayDataMap["version"].(string)
if !ok {
return EPKPreMap{}, errors.New("version is not a string")
}
versionPointer, err := semver.NewVersion(versionString)
if err != nil {
return EPKPreMap{}, err
}
2024-09-09 23:38:47 -07:00
parsedDisplayData.DisplayData.Version = *versionPointer
parsedDisplayData.DisplayData.Architecture, ok = displayDataMap["arch"].(string)
2024-09-03 11:58:53 -07:00
if !ok {
return EPKPreMap{}, errors.New("arch is not a string")
}
dependencies, ok := displayDataMap["deps"].([]interface{})
if !ok {
return EPKPreMap{}, errors.New("dependencies is not an array")
}
2024-09-09 23:38:47 -07:00
parsedDisplayData.DisplayData.Dependencies, err = interfaceToStringSlice(dependencies, "dependencies")
2024-09-03 11:58:53 -07:00
if err != nil {
return EPKPreMap{}, err
}
return parsedDisplayData, nil
}
2024-09-01 12:27:25 -07:00
// ConstMapEPKMetadataOffset is the offset of the metadata in the EPK: 3 magic bytes, 1 endian byte, 8 offset bytes, 64 signature bytes, and 32 public key bytes
2024-10-10 02:24:56 -07:00
var ConstMapEPKMetadataOffset uint64 = 108
2024-09-01 12:27:25 -07:00
2024-09-09 23:38:47 -07:00
var ErrPreMapEPKCouldNotRead = errors.New("could not read EPK")
var ErrPreMapEPKHasNetworkStream = errors.New("network streams are not supported")
var ErrPreMapEPKHasNotGotEPK = errors.New("has not got an EPK")
var ErrPreMapEPKHasInvalidEndian = errors.New("has invalid endian")
var ErrPreMapEPKCouldNotMapJSON = errors.New("could not map metadata")
2024-09-01 12:27:25 -07:00
// PreMapEPK maps enough data to create the display summary of an EPK
2024-10-10 02:24:56 -07:00
func PreMapEPK(epkBytes StreamOrBytes, epkSize uint64) (EPKPreMap, error, error) {
2024-09-03 11:58:53 -07:00
// Say that we don't support network streams
if epkBytes.IsURL {
2024-09-09 23:38:47 -07:00
return EPKPreMap{}, nil, ErrPreMapEPKHasNetworkStream
2024-09-03 11:58:53 -07:00
}
2024-09-01 12:27:25 -07:00
// First, we need to check if it even is a EPK by checking the first 3 magic bytes
2024-09-03 11:58:53 -07:00
if epkBytes.IsFileStream {
2024-09-01 12:27:25 -07:00
var magicBytes = make([]byte, 3)
2024-09-03 11:58:53 -07:00
_, err := epkBytes.FileStream.ReadAt(magicBytes, 0)
2024-09-01 12:27:25 -07:00
if err != nil {
2024-09-09 23:38:47 -07:00
return EPKPreMap{}, err, ErrPreMapEPKCouldNotRead
2024-09-01 12:27:25 -07:00
}
if string(magicBytes) != "epk" {
2024-09-09 23:38:47 -07:00
return EPKPreMap{}, nil, ErrPreMapEPKHasNotGotEPK
2024-09-01 12:27:25 -07:00
}
} else {
if string(epkBytes.Bytes[0:3]) != "epk" {
2024-09-09 23:38:47 -07:00
return EPKPreMap{}, nil, ErrPreMapEPKHasNotGotEPK
2024-09-01 12:27:25 -07:00
}
}
// Let's determine the endian-ness of the EPK via the 3rd byte
var littleEndian bool
2024-09-03 11:58:53 -07:00
if epkBytes.IsFileStream {
2024-09-01 12:27:25 -07:00
var littleEndianByte = make([]byte, 1)
2024-09-03 11:58:53 -07:00
_, err := epkBytes.FileStream.ReadAt(littleEndianByte, 3)
2024-09-01 12:27:25 -07:00
if err != nil {
2024-09-09 23:38:47 -07:00
return EPKPreMap{}, err, ErrPreMapEPKCouldNotRead
2024-09-01 12:27:25 -07:00
}
if littleEndianByte[0] == 0x6C {
littleEndian = true
} else if littleEndianByte[0] == 0x62 {
littleEndian = false
} else {
2024-09-09 23:38:47 -07:00
return EPKPreMap{}, nil, ErrPreMapEPKHasInvalidEndian
2024-09-01 12:27:25 -07:00
}
} else {
if epkBytes.Bytes[3] == 0x6C {
littleEndian = true
} else if epkBytes.Bytes[3] == 0x62 {
littleEndian = false
} else {
2024-09-09 23:38:47 -07:00
return EPKPreMap{}, nil, ErrPreMapEPKHasInvalidEndian
2024-09-01 12:27:25 -07:00
}
}
// Now we can get the offsets of the tar archive
2024-10-10 02:24:56 -07:00
var tarArchiveOffset uint64
2024-09-03 11:58:53 -07:00
if epkBytes.IsFileStream {
2024-09-01 12:27:25 -07:00
var tarArchiveOffsetBytes = make([]byte, 8)
2024-09-03 11:58:53 -07:00
_, err := epkBytes.FileStream.ReadAt(tarArchiveOffsetBytes, 4)
2024-09-01 12:27:25 -07:00
if err != nil {
2024-09-09 23:38:47 -07:00
return EPKPreMap{}, err, ErrPreMapEPKCouldNotRead
2024-09-01 12:27:25 -07:00
}
if littleEndian {
2024-10-10 02:24:56 -07:00
tarArchiveOffset = binary.LittleEndian.Uint64(tarArchiveOffsetBytes)
2024-09-01 12:27:25 -07:00
} else {
2024-10-10 02:24:56 -07:00
tarArchiveOffset = binary.BigEndian.Uint64(tarArchiveOffsetBytes)
2024-09-01 12:27:25 -07:00
}
} else {
if littleEndian {
2024-10-10 02:24:56 -07:00
tarArchiveOffset = binary.LittleEndian.Uint64(epkBytes.Bytes[4:12])
2024-09-01 12:27:25 -07:00
} else {
2024-10-10 02:24:56 -07:00
tarArchiveOffset = binary.BigEndian.Uint64(epkBytes.Bytes[4:12])
2024-09-01 12:27:25 -07:00
}
}
// We don't need to validate the signature yet. We will do that when we map the full EPK, because it means
// we have to read the entire thing, which is a waste of resources, since we only need the metadata.
2024-09-03 11:58:53 -07:00
var preMapEpk EPKPreMap
if epkBytes.IsFileStream {
2024-09-01 12:27:25 -07:00
var metadataBuffer = make([]byte, tarArchiveOffset-ConstMapEPKMetadataOffset)
2024-10-10 02:24:56 -07:00
_, err := epkBytes.FileStream.ReadAt(metadataBuffer, int64(ConstMapEPKMetadataOffset))
2024-09-01 12:27:25 -07:00
if err != nil {
2024-09-09 23:38:47 -07:00
return EPKPreMap{}, err, ErrPreMapEPKCouldNotRead
2024-09-01 12:27:25 -07:00
}
2024-09-03 11:58:53 -07:00
preMapEpk, err = preMapEpkFromBytes(metadataBuffer, littleEndian, epkSize, tarArchiveOffset)
2024-10-10 02:24:56 -07:00
if err != nil {
return EPKPreMap{}, err, ErrPreMapEPKCouldNotMapJSON
}
2024-09-01 12:27:25 -07:00
} else {
2024-09-03 11:58:53 -07:00
var err error
preMapEpk, err = preMapEpkFromBytes(epkBytes.Bytes[ConstMapEPKMetadataOffset:tarArchiveOffset], littleEndian, epkSize, tarArchiveOffset)
2024-09-01 12:27:25 -07:00
if err != nil {
2024-09-09 23:38:47 -07:00
return EPKPreMap{}, err, ErrPreMapEPKCouldNotMapJSON
2024-09-01 12:27:25 -07:00
}
}
2024-09-03 11:58:53 -07:00
return preMapEpk, nil, nil
}
2024-09-01 12:27:25 -07:00
2024-09-09 23:38:47 -07:00
var ErrPreMapRemoteEPKCouldNotCreateURL = errors.New("could not create URL")
var ErrPreMapRemoteEPKCouldNotCreateRequest = errors.New("could not create request")
var ErrPreMapRemoteEPKCouldNotSendRequest = errors.New("could not send request")
var ErrPreMapRemoteEPKCouldNotRead = errors.New("could not read EPK")
var ErrPreMapRemoteEPKCouldNotCloseConnection = errors.New("could not close connection")
var ErrPreMapRemoteEPKUnexpectedStatusCode = errors.New("unexpected status code")
var ErrPreMapEPKHasNotGotEPKMagic = errors.New("not an EPK")
var ErrPreMapRemoteEPKInvalidEndian = errors.New("invalid endian")
var ErrPreMapRemoteEPKCouldNotMapJSON = errors.New("error mapping metadata")
2024-09-03 11:58:53 -07:00
func PreMapRemoteEPK(remoteEPK RemoteEPK, logger *Logger) (EPKPreMap, error, error) {
// Fetch the first 12 bytes of the EPK - this contains the magic, endian, and offset
// We use the range header to only fetch the first 12 bytes
packageUrl, err := url.JoinPath(remoteEPK.Repository.URL, remoteEPK.Path)
if err != nil {
2024-09-09 23:38:47 -07:00
return EPKPreMap{}, err, ErrPreMapRemoteEPKCouldNotCreateURL
2024-09-01 12:27:25 -07:00
}
2024-09-03 11:58:53 -07:00
req, err := http.NewRequest("GET", packageUrl, nil)
if err != nil {
2024-09-09 23:38:47 -07:00
return EPKPreMap{}, err, ErrPreMapRemoteEPKCouldNotCreateRequest
2024-09-01 12:27:25 -07:00
}
2024-09-03 11:58:53 -07:00
req.Header.Set("Range", "bytes=0-12")
resp, err := http.DefaultClient.Do(req)
if err != nil {
2024-09-09 23:38:47 -07:00
return EPKPreMap{}, err, ErrPreMapRemoteEPKCouldNotSendRequest
2024-09-01 12:27:25 -07:00
}
2024-09-03 11:58:53 -07:00
// Check if the status code is 206 (partial content)
var epkHeaderBytes = make([]byte, 12)
var rangeSupported bool
if resp.StatusCode == 200 {
// We have the entire file. Not great, not terrible.
// We'll have to cut off the connection early later. To optimise things slightly, we'll reuse this connection
// to read the metadata later.
// I'm deadly serious about the radiation. It could cause a bit flip causing the Range header to be malformed.
// It's amazing how many times I error handled for this, and I hope I can save someone from cancer one day.
logger.LogFunc(Log{
Level: "INFO",
Content: "The server does not support range requests. The installation process will be significantly slower." +
"Is the repository owner using python3's SimpleHTTPServer or similar? If so, please use a proper web " +
"server like Nginx, Ailur HTTP Server, or Apache. If not, please report this to the repository owner or " +
"check for sources of radiation around your computer.",
})
_, err := resp.Body.Read(epkHeaderBytes)
if err != nil {
2024-09-09 23:38:47 -07:00
return EPKPreMap{}, err, ErrPreMapRemoteEPKCouldNotRead
2024-09-03 11:58:53 -07:00
}
rangeSupported = false
} else if resp.StatusCode == 206 {
// Great, everything is working as expected.
_, err := io.ReadFull(resp.Body, epkHeaderBytes)
if err != nil {
2024-09-09 23:38:47 -07:00
return EPKPreMap{}, err, ErrPreMapRemoteEPKCouldNotRead
2024-09-03 11:58:53 -07:00
}
rangeSupported = true
// Close the connection
err = resp.Body.Close()
if err != nil {
2024-09-09 23:38:47 -07:00
return EPKPreMap{}, err, ErrPreMapRemoteEPKCouldNotCloseConnection
2024-09-03 11:58:53 -07:00
}
} else if resp.StatusCode == 404 {
// Repository not found
return EPKPreMap{}, errors.New("repository not found: " + strconv.Itoa(resp.StatusCode)), ErrPreMapRemoteEPKUnexpectedStatusCode
2024-09-03 11:58:53 -07:00
} else {
// Something went wrong
2024-09-09 23:38:47 -07:00
return EPKPreMap{}, errors.New("unexpected status code: " + strconv.Itoa(resp.StatusCode)), ErrPreMapRemoteEPKUnexpectedStatusCode
2024-09-01 12:27:25 -07:00
}
2024-09-03 11:58:53 -07:00
// Now we verify the magic bytes
if string(epkHeaderBytes[0:3]) != "epk" {
2024-09-09 23:38:47 -07:00
return EPKPreMap{}, nil, ErrPreMapEPKHasNotGotEPKMagic
2024-09-01 12:27:25 -07:00
}
2024-09-03 11:58:53 -07:00
// Let's determine the endian-ness of the EPK via the 3rd byte
var littleEndian bool
if epkHeaderBytes[3] == 0x6C {
littleEndian = true
} else if epkHeaderBytes[3] == 0x62 {
littleEndian = false
} else {
2024-09-09 23:38:47 -07:00
return EPKPreMap{}, nil, ErrPreMapRemoteEPKInvalidEndian
2024-09-01 12:27:25 -07:00
}
2024-09-03 11:58:53 -07:00
// Now we can get the offsets of the tar archive
2024-10-10 02:24:56 -07:00
var tarArchiveOffset uint64
2024-09-03 11:58:53 -07:00
if littleEndian {
2024-10-10 02:24:56 -07:00
tarArchiveOffset = binary.LittleEndian.Uint64(epkHeaderBytes[4:12])
2024-09-03 11:58:53 -07:00
} else {
2024-10-10 02:24:56 -07:00
tarArchiveOffset = binary.BigEndian.Uint64(epkHeaderBytes[4:12])
2024-09-01 12:27:25 -07:00
}
2024-09-03 11:58:53 -07:00
// No signature verification for you
// Let's fetch the display data bytes
displayDataBytes := make([]byte, tarArchiveOffset-ConstMapEPKMetadataOffset)
if rangeSupported {
// Send another request to fetch the display data
2024-10-10 02:24:56 -07:00
req.Header.Set("Range", "bytes=108-"+strconv.FormatUint(tarArchiveOffset-1, 10))
2024-09-03 11:58:53 -07:00
resp, err = http.DefaultClient.Do(req)
if err != nil {
2024-09-09 23:38:47 -07:00
return EPKPreMap{}, err, ErrPreMapRemoteEPKCouldNotSendRequest
2024-09-03 11:58:53 -07:00
}
// Read the display data
_, err = io.ReadFull(resp.Body, displayDataBytes)
if err != nil {
2024-09-09 23:38:47 -07:00
return EPKPreMap{}, err, ErrPreMapRemoteEPKCouldNotRead
2024-09-03 11:58:53 -07:00
}
// Close the connection
err = resp.Body.Close()
if err != nil {
2024-09-09 23:38:47 -07:00
return EPKPreMap{}, err, ErrPreMapRemoteEPKCouldNotCloseConnection
2024-09-03 11:58:53 -07:00
}
} else {
// Re-use the connection to read the display data
// The offset will move automatically because we are reading from the same connection, therefore
// meaning that the web server will have already iterated past the header
_, err = io.ReadFull(resp.Body, displayDataBytes)
if err != nil {
2024-09-09 23:38:47 -07:00
return EPKPreMap{}, err, ErrPreMapRemoteEPKCouldNotRead
2024-09-03 11:58:53 -07:00
}
// You didn't have to cut me off, make out like it never happened and that we were nothing
// All I wanted was a header part, but you just had to go and give me the whole thing
// Now you're just some obscure web server that I used to know
err = resp.Body.Close()
if err != nil {
2024-09-09 23:38:47 -07:00
return EPKPreMap{}, err, ErrPreMapRemoteEPKCouldNotCloseConnection
2024-09-03 11:58:53 -07:00
}
}
// Now we can map the display data
var preMapEpk EPKPreMap
preMapEpk, err = preMapEpkFromBytes(displayDataBytes, littleEndian, remoteEPK.CompressedSize, tarArchiveOffset)
2024-09-01 12:27:25 -07:00
if err != nil {
2024-09-09 23:38:47 -07:00
return EPKPreMap{}, err, ErrPreMapRemoteEPKCouldNotMapJSON
2024-09-01 12:27:25 -07:00
}
2024-09-03 11:58:53 -07:00
return preMapEpk, nil, nil
2024-09-01 12:27:25 -07:00
}
func handlePublicKeyCheck(exists bool, matchingAuthor bool, matchingFingerprint bool, publicKey []byte, author string, addFingerprintToDB func([]byte, string, bool) error, logger *Logger) error {
if !exists {
if logger.PromptSupported {
response := logger.LogFunc(Log{
Level: "WARN",
Content: "Public key not found in database.\nthe public key fingerprint is: " + author + " " + ByteToFingerprint(publicKey) +
"\nWould you like to trust this key (y/n)?",
Prompt: true,
})
if strings.ToLower(response) == "y" {
err := addFingerprintToDB(publicKey, author, false)
if err != nil {
return err
} else {
logger.LogFunc(Log{
Level: "INFO",
Content: "Public key added to database.",
})
}
} else {
logger.LogFunc(Log{
Level: "INFO",
Content: "Installation cancelled.",
})
}
} else {
logger.LogFunc(Log{
Level: "FATAL",
Content: "Public key not found in database.\nthe public key fingerprint is:" + author + " " + ByteToFingerprint(publicKey) +
"\nSince non-interactive mode is enabled, the installation will not proceed.",
})
}
} else if !matchingAuthor {
if logger.PromptSupported {
response := logger.LogFunc(Log{
Level: "WARN",
Content: "Public key does not match the author.\nthe public key fingerprint is: " + author + " " + ByteToFingerprint(publicKey) +
"\nWould you like to replace the key (y/n)?",
Prompt: true,
})
if strings.ToLower(response) == "y" {
err := addFingerprintToDB(publicKey, author, true)
if err != nil {
return err
} else {
logger.LogFunc(Log{
Level: "INFO",
Content: "Public key replaced in database.",
})
}
} else {
logger.LogFunc(Log{
Level: "FATAL",
Content: "Installation cancelled.",
})
}
} else {
logger.LogFunc(Log{
Level: "FATAL",
Content: "Public key does not match the author.\nThe public key is :" + author + " " + ByteToFingerprint(publicKey) +
"\nSince non-interactive mode is enabled, the installation will not proceed.",
})
}
} else if !matchingFingerprint {
if logger.PromptSupported {
response := logger.LogFunc(Log{
Level: "WARN",
Content: "Public key fingerprint does not match the author.\nThe public key is :" + author + " " + ByteToFingerprint(publicKey) +
"\nThis may be a security risk. To replace the key, type \"Yes, do as I say!\". Otherwise, type anything else.",
Prompt: true,
})
if response == "Yes, do as I say!" {
err := addFingerprintToDB(publicKey, author, true)
if err != nil {
return err
} else {
logger.LogFunc(Log{
Level: "INFO",
Content: "Public key replaced in database.",
})
}
} else {
logger.LogFunc(Log{
Level: "FATAL",
Content: "Installation cancelled.",
})
}
} else {
logger.LogFunc(Log{
Level: "FATAL",
Content: "Public key fingerprint does not match the author.\nthe public key fingerprint is: " + author + " " + ByteToFingerprint(publicKey) +
"\nSince non-interactive mode is enabled, the installation will not proceed.",
})
}
}
return nil
}
2024-09-09 23:38:47 -07:00
var ErrFullyMapMetadataCouldNotRead = errors.New("could not read EPK")
var ErrFullyMapMetadataCouldNotJump = errors.New("could not jump to offset")
var ErrFullyMapMetadataCouldNotAddFingerprint = errors.New("could not add fingerprint")
var ErrFullyMapMetadataCouldNotGetFingerprint = errors.New("could not get fingerprint")
var ErrFullyMapMetadataHasInvalidSignature = errors.New("invalid signature")
var ErrFullyMapMetadataCouldNotMapJSON = errors.New("error mapping metadata")
2024-09-01 12:27:25 -07:00
// FullyMapMetadata maps an EPK file, but is significantly slower than PreMapEPK. Use PreMapEPK if you only need the display data.
// it pulls data from PreMapEPK to reduce the amount of work needed to map the EPK.
2024-09-09 23:38:47 -07:00
func FullyMapMetadata(epkBytes StreamOrBytes, preMap *EPKPreMap, checkFingerprintInDB func([]byte, string) (bool, bool, bool, error), addFingerprintToDB func([]byte, string, bool) error, warnUserAboutNoRange func(*Logger), logger *Logger) (*Metadata, error, error) {
2024-09-01 12:27:25 -07:00
// We define the signature and public key bytes here so that we can read them later
signature := make([]byte, 64)
publicKey := make([]byte, 32)
2024-09-03 11:58:53 -07:00
var connection io.ReadCloser
if epkBytes.IsFileStream {
2024-09-01 12:27:25 -07:00
// Before we continue, check if the signature is valid
// To get the signature, we read from the 12th byte to the 76th byte
2024-09-03 11:58:53 -07:00
_, err := epkBytes.FileStream.ReadAt(signature, 12)
2024-09-01 12:27:25 -07:00
if err != nil {
2024-09-09 23:38:47 -07:00
return &Metadata{}, err, ErrFullyMapMetadataCouldNotRead
2024-09-01 12:27:25 -07:00
}
// To get the public key, we read from the 76th byte to the 108th byte
2024-09-03 11:58:53 -07:00
_, err = epkBytes.FileStream.ReadAt(publicKey, 76)
if err != nil {
2024-09-09 23:38:47 -07:00
return &Metadata{}, err, ErrFullyMapMetadataCouldNotRead
2024-09-03 11:58:53 -07:00
}
} else if epkBytes.IsURL {
// Before we continue, check if the signature is valid
// Fetch range 12 - EOF and read them in
req, err := http.NewRequest("GET", epkBytes.URL, nil)
if err != nil {
2024-09-09 23:38:47 -07:00
return &Metadata{}, err, ErrFullyMapMetadataCouldNotRead
2024-09-03 11:58:53 -07:00
}
req.Header.Set("Range", "bytes=12-")
resp, err := http.DefaultClient.Do(req)
if err != nil {
2024-09-09 23:38:47 -07:00
return &Metadata{}, err, ErrFullyMapMetadataCouldNotRead
2024-09-03 11:58:53 -07:00
}
// Set the connection
connection = resp.Body
// Check the status code
if resp.StatusCode == 200 {
// Not great, not terrible.
// We'll have to cut off the connection early later.
// Warn the user
2024-09-09 23:38:47 -07:00
warnUserAboutNoRange(logger)
2024-09-03 11:58:53 -07:00
// Discard the first 12 bytes
_, err := io.CopyN(io.Discard, connection, 12)
if err != nil {
2024-09-09 23:38:47 -07:00
return &Metadata{}, err, ErrFullyMapMetadataCouldNotRead
2024-09-03 11:58:53 -07:00
}
} else if resp.StatusCode != 206 {
2024-09-09 23:38:47 -07:00
return &Metadata{}, errors.New("unexpected status code: " + strconv.Itoa(resp.StatusCode)), ErrFullyMapMetadataCouldNotRead
2024-09-03 11:58:53 -07:00
}
// Read the signature
_, err = io.ReadFull(connection, signature)
if err != nil {
2024-09-09 23:38:47 -07:00
return &Metadata{}, err, ErrFullyMapMetadataCouldNotRead
2024-09-03 11:58:53 -07:00
}
// Read the public key
_, err = io.ReadFull(connection, publicKey)
2024-09-01 12:27:25 -07:00
if err != nil {
2024-09-09 23:38:47 -07:00
return &Metadata{}, err, ErrFullyMapMetadataCouldNotRead
2024-09-01 12:27:25 -07:00
}
} else {
// Make signature and public key the optimised bytes
signature = epkBytes.Bytes[12:76]
publicKey = epkBytes.Bytes[76:108]
}
// Let's check for the public key in the database
2024-09-09 23:38:47 -07:00
exists, matchingAuthor, matchingFingerprint, err := checkFingerprintInDB(publicKey, preMap.DisplayData.Author)
2024-09-01 12:27:25 -07:00
if err != nil {
2024-09-09 23:38:47 -07:00
return &Metadata{}, err, ErrFullyMapMetadataCouldNotGetFingerprint
2024-09-01 12:27:25 -07:00
} else {
2024-09-09 23:38:47 -07:00
err := handlePublicKeyCheck(exists, matchingAuthor, matchingFingerprint, publicKey, preMap.DisplayData.Author, addFingerprintToDB, logger)
2024-09-01 12:27:25 -07:00
if err != nil {
2024-09-09 23:38:47 -07:00
return &Metadata{}, err, ErrFullyMapMetadataCouldNotAddFingerprint
2024-09-01 12:27:25 -07:00
}
}
// We need to create a new xxHash instance
xxHash := xxhash.New()
2024-09-03 11:58:53 -07:00
if epkBytes.IsFileStream {
2024-09-01 12:27:25 -07:00
// Now we can verify the signature. First, we need to take the checksum of the metadata
// Seeking is better than using ReadAt because it allows us to not have to load the entire file into memory
2024-10-10 02:24:56 -07:00
_, err = epkBytes.FileStream.Seek(int64(ConstMapEPKMetadataOffset), io.SeekStart)
2024-09-01 12:27:25 -07:00
if err != nil {
2024-09-09 23:38:47 -07:00
return &Metadata{}, err, ErrFullyMapMetadataCouldNotJump
2024-09-01 12:27:25 -07:00
}
// Streaming bytes to the hash is more memory efficient
2024-09-03 11:58:53 -07:00
_, err = epkBytes.FileStream.WriteTo(xxHash)
if err != nil {
2024-09-09 23:38:47 -07:00
return &Metadata{}, err, ErrFullyMapMetadataCouldNotRead
2024-09-03 11:58:53 -07:00
}
// Verify the signature (we verify the hash because it's cheaper than verifying the entire EPK)
if !ed25519.Verify(publicKey, xxHash.Sum(nil), signature) {
2024-09-09 23:38:47 -07:00
return &Metadata{}, nil, ErrFullyMapMetadataHasInvalidSignature
2024-09-03 11:58:53 -07:00
}
} else if epkBytes.IsURL {
// Now we can verify the signature. We can just stream the rest of the EPK to the hash
_, err = io.Copy(xxHash, connection)
if err != nil {
2024-09-09 23:38:47 -07:00
return &Metadata{}, err, ErrFullyMapMetadataCouldNotRead
2024-09-03 11:58:53 -07:00
}
// You didn't have to cut me off...
// Don't worry, we are reading to EOF anyway, no matter if we do have a non-range supported server, so we
// (probably) won't upset the server owner.
err = connection.Close()
2024-09-01 12:27:25 -07:00
if err != nil {
2024-09-09 23:38:47 -07:00
return &Metadata{}, err, ErrFullyMapMetadataCouldNotRead
2024-09-01 12:27:25 -07:00
}
2024-09-03 11:58:53 -07:00
2024-09-01 12:27:25 -07:00
// Verify the signature (we verify the hash because it's cheaper than verifying the entire EPK)
if !ed25519.Verify(publicKey, xxHash.Sum(nil), signature) {
2024-09-09 23:38:47 -07:00
return &Metadata{}, nil, ErrFullyMapMetadataHasInvalidSignature
2024-09-01 12:27:25 -07:00
}
} else {
// We now verify the signature in one go without streaming
2024-09-03 11:58:53 -07:00
_, err := xxHash.Write(epkBytes.Bytes[ConstMapEPKMetadataOffset:])
if err != nil {
2024-09-09 23:38:47 -07:00
return &Metadata{}, err, ErrFullyMapMetadataCouldNotRead
2024-09-03 11:58:53 -07:00
}
if !ed25519.Verify(publicKey, xxHash.Sum(nil), signature) {
2024-09-09 23:38:47 -07:00
return &Metadata{}, nil, ErrFullyMapMetadataHasInvalidSignature
2024-09-01 12:27:25 -07:00
}
}
// Great, the EPK is valid. Let's map the metadata.
// We use the metadata map provided by PreMapEPK to reduce the amount of work needed to map the EPK
// First, map SpecialFiles
var parsedSpecialFiles SpecialFiles
specialFilesMap, ok := preMap.MetadataMap["specialFiles"].(map[string]interface{})
if !ok {
2024-09-09 23:38:47 -07:00
return &Metadata{}, errors.New("specialFiles is not an object"), ErrFullyMapMetadataCouldNotMapJSON
2024-09-01 12:27:25 -07:00
}
noDelete, ok := specialFilesMap["noDelete"].([]interface{})
if !ok {
2024-09-09 23:38:47 -07:00
return &Metadata{}, errors.New("noDelete is not an array"), ErrFullyMapMetadataCouldNotMapJSON
2024-09-01 12:27:25 -07:00
}
parsedSpecialFiles.NoDelete, err = interfaceToStringSlice(noDelete, "noDelete")
if err != nil {
2024-09-09 23:38:47 -07:00
return &Metadata{}, err, ErrFullyMapMetadataCouldNotMapJSON
2024-09-01 12:27:25 -07:00
}
noReplace, ok := specialFilesMap["noReplace"].([]interface{})
if !ok {
2024-09-09 23:38:47 -07:00
return &Metadata{}, errors.New("noReplace is not an array"), ErrFullyMapMetadataCouldNotMapJSON
2024-09-01 12:27:25 -07:00
}
parsedSpecialFiles.NoReplace, err = interfaceToStringSlice(noReplace, "noReplace")
if err != nil {
2024-09-09 23:38:47 -07:00
return &Metadata{}, err, ErrFullyMapMetadataCouldNotMapJSON
2024-09-01 12:27:25 -07:00
}
// Declare the parsedMetadata object
var parsedMetadata Metadata
// Append parsedSpecialFiles to parsedMetadata
parsedMetadata.SpecialFiles = parsedSpecialFiles
// Steal some data from the PreMapEPK object
2024-09-09 23:38:47 -07:00
parsedMetadata.Name = preMap.DisplayData.Name
parsedMetadata.Version = preMap.DisplayData.Version
parsedMetadata.Architecture = preMap.DisplayData.Architecture
parsedMetadata.Size = preMap.DisplayData.Size
parsedMetadata.Dependencies = preMap.DisplayData.Dependencies
2024-09-01 12:27:25 -07:00
// Map the metadata
parsedMetadata.Description, ok = preMap.MetadataMap["desc"].(string)
if !ok {
2024-09-09 23:38:47 -07:00
return &Metadata{}, errors.New("description is not a string"), ErrFullyMapMetadataCouldNotMapJSON
2024-09-01 12:27:25 -07:00
}
parsedMetadata.LongDescription, ok = preMap.MetadataMap["longDesc"].(string)
if !ok {
2024-09-09 23:38:47 -07:00
return &Metadata{}, errors.New("longDesc is not a string"), ErrFullyMapMetadataCouldNotMapJSON
2024-09-01 12:27:25 -07:00
}
parsedMetadata.Author, ok = preMap.MetadataMap["author"].(string)
if !ok {
2024-09-09 23:38:47 -07:00
return &Metadata{}, errors.New("author is not a string"), ErrFullyMapMetadataCouldNotMapJSON
2024-09-01 12:27:25 -07:00
}
parsedMetadata.License, ok = preMap.MetadataMap["license"].(string)
if !ok {
2024-09-09 23:38:47 -07:00
return &Metadata{}, errors.New("license is not a string"), ErrFullyMapMetadataCouldNotMapJSON
2024-09-01 12:27:25 -07:00
}
2024-10-10 02:24:56 -07:00
decompressedSizeJSON, ok := preMap.MetadataMap["size"].(json.Number)
2024-09-01 12:27:25 -07:00
if !ok {
2024-10-10 02:24:56 -07:00
return &Metadata{}, errors.New("size is not a number"), ErrFullyMapMetadataCouldNotMapJSON
}
parsedMetadata.DecompressedSize, err = strconv.ParseUint(decompressedSizeJSON.String(), 10, 64)
if err != nil {
return &Metadata{}, err, ErrFullyMapMetadataCouldNotMapJSON
2024-09-01 12:27:25 -07:00
}
return &parsedMetadata, nil, nil
}
2024-09-09 23:38:47 -07:00
var ErrInstallEPKCouldNotCreateTempDir = errors.New("could not create temporary directory")
var ErrInstallEPKCouldNotCreateZStandardReader = errors.New("could not create ZStandard reader")
var ErrInstallEPKCouldNotDecompressTarArchive = errors.New("could not decompress tar archive")
var ErrInstallEPKCouldNotCreateDir = errors.New("could not create directory")
var ErrInstallEPKCouldNotStatDir = errors.New("could not stat directory")
var ErrInstallEPKCouldNotStatFile = errors.New("could not stat file")
var ErrInstallEPKCouldNotCreateFile = errors.New("could not create file")
var ErrInstallEPKCouldNotCloseTarReader = errors.New("could not close tar reader")
var ErrInstallEPKCouldNotStatHook = errors.New("could not stat hook")
var ErrInstallEPKCouldNotRunHook = errors.New("could not run hook")
var ErrInstallEPKCouldNotAddEPKToDB = errors.New("could not add EPK to database")
var ErrInstallEPKCouldNotRemoveTempDir = errors.New("could not remove temporary directory")
2024-09-01 12:27:25 -07:00
2024-09-03 11:58:53 -07:00
// ProgressWriter implements a writer that intercepts writes in order to log progress
type ProgressWriter struct {
Logger *Logger
2024-10-10 02:24:56 -07:00
Total uint64
2024-09-03 11:58:53 -07:00
Writer io.Writer
}
// Write writes to the ProgressWriter
func (writer *ProgressWriter) Write(p []byte) (n int, err error) {
2024-10-10 02:24:56 -07:00
var byteCount uint64
2024-09-03 11:58:53 -07:00
for range p {
2024-10-10 02:24:56 -07:00
byteCount++
2024-09-03 11:58:53 -07:00
}
if writer.Logger.ProgressSupported {
writer.Logger.LogFunc(Log{
Level: "PROGRESS",
Progress: byteCount,
Total: writer.Total,
Overwrite: true,
})
} else {
writer.Logger.LogFunc(Log{
Level: "INFO",
2024-10-10 02:24:56 -07:00
Content: "Written " + humanize.Bytes(byteCount) + " out of " + humanize.Bytes(writer.Total),
2024-09-03 11:58:53 -07:00
Prompt: false,
})
}
written, err := writer.Writer.Write(p)
if err != nil {
return written, err
}
return written, nil
}
2024-09-01 12:27:25 -07:00
// InstallEPK installs an EPK file
2024-10-10 02:24:56 -07:00
func InstallEPK(epkBytes StreamOrBytes, metadata *Metadata, preMap *EPKPreMap, addEPKToDB func(*Metadata, []string, []byte, bool, bool, uint64, uint64, string) error, logger *Logger) (string, error, error) {
2024-09-01 12:27:25 -07:00
// Create the temporary directory
tempDir, err := os.MkdirTemp("/tmp", "eon-install-")
if err != nil {
2024-09-09 23:38:47 -07:00
return tempDir, err, ErrInstallEPKCouldNotCreateTempDir
2024-09-01 12:27:25 -07:00
}
var zStandardReader *zstd.Decoder
2024-09-03 11:58:53 -07:00
var connection io.ReadCloser
if epkBytes.IsFileStream {
2024-09-01 12:27:25 -07:00
// Seek to the correct position in the EPK
2024-10-10 02:24:56 -07:00
_, err = epkBytes.FileStream.Seek(int64(preMap.TarOffset), io.SeekStart)
2024-09-01 12:27:25 -07:00
if err != nil {
return "", err, nil
}
// Create a ZStandard reader reading from the EPK
2024-09-03 11:58:53 -07:00
zStandardReader, err = zstd.NewReader(epkBytes.FileStream)
if err != nil {
2024-09-09 23:38:47 -07:00
return tempDir, err, ErrInstallEPKCouldNotCreateZStandardReader
2024-09-03 11:58:53 -07:00
}
} else if epkBytes.IsURL {
// Range header to the tar offset
req, err := http.NewRequest("GET", epkBytes.URL, nil)
if err != nil {
2024-09-09 23:38:47 -07:00
return tempDir, err, ErrInstallEPKCouldNotCreateZStandardReader
2024-09-03 11:58:53 -07:00
}
// Set the range header
2024-10-10 02:24:56 -07:00
req.Header.Set("Range", "bytes="+strconv.FormatUint(preMap.TarOffset, 10)+"-")
2024-09-03 11:58:53 -07:00
// Send the request
resp, err := http.DefaultClient.Do(req)
if err != nil {
2024-09-09 23:38:47 -07:00
return tempDir, err, ErrInstallEPKCouldNotCreateZStandardReader
2024-09-03 11:58:53 -07:00
}
// Set connection to the response body
connection = resp.Body
// Check the status code
if resp.StatusCode == 200 {
// Not great, is terrible in this case, we have to keep reading bytes and discarding them until we reach the offset
// The user will have already been warned about 300 times, so we don't need to warn them again
// God this is painful. Let's give the user a progress bar to make it less painful
_, err := io.CopyN(&ProgressWriter{
Logger: logger,
2024-10-10 02:24:56 -07:00
Total: preMap.TarOffset,
2024-09-03 11:58:53 -07:00
Writer: io.Discard,
2024-10-10 02:24:56 -07:00
}, connection, int64(preMap.TarOffset))
2024-09-03 11:58:53 -07:00
if err != nil {
2024-09-09 23:38:47 -07:00
return tempDir, err, ErrInstallEPKCouldNotDecompressTarArchive
2024-09-03 11:58:53 -07:00
}
} else if resp.StatusCode != 206 {
// Something went wrong
2024-09-09 23:38:47 -07:00
return tempDir, errors.New("unexpected status code: " + strconv.Itoa(resp.StatusCode)), ErrInstallEPKCouldNotCreateZStandardReader
2024-09-03 11:58:53 -07:00
}
// Create a ZStandard reader reading from the EPK
zStandardReader, err = zstd.NewReader(connection)
2024-09-01 12:27:25 -07:00
if err != nil {
2024-09-09 23:38:47 -07:00
return tempDir, err, ErrInstallEPKCouldNotCreateZStandardReader
2024-09-01 12:27:25 -07:00
}
} else {
// Create a ZStandard reader reading from the EPKs in-memory bytes
zStandardReader, err = zstd.NewReader(bytes.NewReader(epkBytes.Bytes[preMap.TarOffset:]))
if err != nil {
2024-09-09 23:38:47 -07:00
return tempDir, err, ErrInstallEPKCouldNotCreateZStandardReader
2024-09-01 12:27:25 -07:00
}
}
// Create a tar reader reading from the ZStandard reader
tarReader := tar.NewReader(zStandardReader)
// Create a goroutine to see how much of the decompressed size we have decompressed
2024-10-10 02:24:56 -07:00
var written uint64
2024-09-01 12:27:25 -07:00
stop := make(chan bool)
go func() {
for {
select {
case <-stop:
return
default:
if logger.ProgressSupported {
logger.LogFunc(Log{
Level: "PROGRESS",
2024-09-03 11:58:53 -07:00
Progress: written,
Total: metadata.DecompressedSize,
2024-09-01 12:27:25 -07:00
Overwrite: true,
})
} else {
logger.LogFunc(Log{
Level: "INFO",
2024-10-10 02:24:56 -07:00
Content: "Decompressed " + humanize.Bytes(written) + " of " + humanize.Bytes(metadata.DecompressedSize),
2024-09-01 12:27:25 -07:00
Prompt: false,
})
time.Sleep(1 * time.Second)
}
}
}
}()
2024-09-09 23:38:47 -07:00
// Create a slice of the installed files
var installedFiles []string
2024-09-01 12:27:25 -07:00
// Iterate through the tar archive
for {
// Read the next header
header, err := tarReader.Next()
if err != nil {
break
}
switch {
// If we are done, break
case err == io.EOF:
break
// If there was an error, return the error
case err != nil:
2024-09-09 23:38:47 -07:00
return tempDir, err, ErrInstallEPKCouldNotDecompressTarArchive
2024-09-01 12:27:25 -07:00
// This should never happen, but if it does, we should just continue
case header == nil:
continue
}
// Get the target path
var target string
2024-09-09 23:38:47 -07:00
var isHook bool
2024-09-01 12:27:25 -07:00
if strings.HasPrefix(header.Name, "root") {
target = strings.TrimPrefix(header.Name, "root")
} else if strings.HasPrefix(header.Name, "hooks") {
target = filepath.Join(tempDir, header.Name)
2024-09-09 23:38:47 -07:00
isHook = true
2024-09-01 12:27:25 -07:00
} else {
2024-09-09 23:38:47 -07:00
return tempDir, errors.New("invalid path in EPK: " + header.Name), ErrInstallEPKCouldNotDecompressTarArchive
2024-09-01 12:27:25 -07:00
}
switch header.Typeflag {
case tar.TypeDir:
// Check if the directory exists
_, err := os.Stat(target)
if err != nil {
// If the directory does not exist, create it
2024-09-09 23:38:47 -07:00
if errors.Is(err, os.ErrNotExist) {
// All directories are 0755
paths, err := MkdirAllWithPaths(target, 0755)
2024-09-01 12:27:25 -07:00
if err != nil {
2024-09-09 23:38:47 -07:00
return tempDir, err, ErrInstallEPKCouldNotCreateDir
} else {
// Check if the files are in noDelete
for _, file := range metadata.SpecialFiles.NoDelete {
if strings.TrimSuffix(target, "/") == strings.TrimSuffix(file, "/") {
// This file is a special file and should not be deleted
continue
}
}
if !isHook {
// Add the directory to the installed files
installedFiles = append(installedFiles, target)
// Add the paths to the installed files
if paths != nil {
installedFiles = append(installedFiles, paths...)
}
}
2024-09-01 12:27:25 -07:00
}
} else {
2024-09-09 23:38:47 -07:00
return tempDir, err, ErrInstallEPKCouldNotStatDir
2024-09-01 12:27:25 -07:00
}
} else {
// If it does exist, don't touch it
continue
}
case tar.TypeReg:
// Check if the file has anywhere to go
_, err := os.Stat(filepath.Dir(target))
if err != nil {
// No, it doesn't. Create the directory
2024-09-09 23:38:47 -07:00
if errors.Is(err, os.ErrNotExist) {
// We assume 0755 for directories
paths, err := MkdirAllWithPaths(filepath.Dir(target), 0755)
2024-09-01 12:27:25 -07:00
if err != nil {
2024-09-09 23:38:47 -07:00
return tempDir, err, ErrInstallEPKCouldNotCreateDir
} else {
// Check if the files are in noDelete
for _, file := range metadata.SpecialFiles.NoDelete {
if strings.TrimSuffix(target, "/") == strings.TrimSuffix(file, "/") {
// This file is a special file and should not be deleted
continue
}
}
if !isHook {
// Add the directory to the installed files
installedFiles = append(installedFiles, filepath.Dir(target))
// Add the paths to the installed files
if paths != nil {
installedFiles = append(installedFiles, paths...)
}
}
2024-09-01 12:27:25 -07:00
}
} else {
2024-09-09 23:38:47 -07:00
return tempDir, err, ErrInstallEPKCouldNotStatDir
2024-09-01 12:27:25 -07:00
}
}
// Check if the file already exists
_, err = os.Stat(target)
if err != nil {
2024-09-09 23:38:47 -07:00
if errors.Is(err, os.ErrNotExist) {
2024-09-01 12:27:25 -07:00
// Great, the file does not exist. Let's create it.
file, err := os.OpenFile(target, os.O_CREATE|os.O_RDWR|os.O_TRUNC, os.FileMode(header.Mode))
if err != nil {
2024-09-09 23:38:47 -07:00
return tempDir, err, ErrInstallEPKCouldNotCreateFile
2024-09-01 12:27:25 -07:00
}
writtenFile, err := io.Copy(file, tarReader)
if err != nil {
2024-09-09 23:38:47 -07:00
return tempDir, err, ErrInstallEPKCouldNotDecompressTarArchive
2024-09-01 12:27:25 -07:00
}
2024-10-10 02:24:56 -07:00
written += uint64(writtenFile)
2024-09-01 12:27:25 -07:00
err = file.Close()
if err != nil {
2024-09-09 23:38:47 -07:00
return tempDir, err, ErrInstallEPKCouldNotCloseTarReader
2024-10-10 02:24:56 -07:00
} else {
if !isHook {
// Add the file to the installed files
installedFiles = append(installedFiles, target)
}
}
} else {
return tempDir, err, ErrInstallEPKCouldNotStatFile
}
} else {
// See if it's an upgrade or not
if preMap.IsUpgrade {
// Check if it's a special file
for _, file := range metadata.SpecialFiles.NoReplace {
if strings.TrimSuffix(target, "/") == strings.TrimSuffix(file, "/") {
// This file is a special file and should not be replaced
continue
}
}
}
// It's not special, so we can replace it
file, err := os.OpenFile(target, os.O_CREATE|os.O_RDWR|os.O_TRUNC, os.FileMode(header.Mode))
if err != nil {
return tempDir, err, ErrInstallEPKCouldNotCreateFile
}
writtenFile, err := io.Copy(file, tarReader)
if err != nil {
return tempDir, err, ErrInstallEPKCouldNotDecompressTarArchive
}
written += uint64(writtenFile)
err = file.Close()
if err != nil {
return tempDir, err, ErrInstallEPKCouldNotCloseTarReader
} else {
if !isHook {
// Add the file to the installed files
installedFiles = append(installedFiles, target)
}
}
}
case tar.TypeSymlink:
// Check if the symlink has anywhere to go
_, err := os.Stat(filepath.Dir(target))
if err != nil {
// No, it doesn't. Create the directory
if errors.Is(err, os.ErrNotExist) {
// We assume 0755 for directories
paths, err := MkdirAllWithPaths(filepath.Dir(target), 0755)
if err != nil {
return tempDir, err, ErrInstallEPKCouldNotCreateDir
2024-09-09 23:38:47 -07:00
} else {
// Check if the files are in noDelete
for _, file := range metadata.SpecialFiles.NoDelete {
if strings.TrimSuffix(target, "/") == strings.TrimSuffix(file, "/") {
// This file is a special file and should not be deleted
continue
}
}
if !isHook {
2024-10-10 02:24:56 -07:00
// Add the directory to the installed files
installedFiles = append(installedFiles, filepath.Dir(target))
// Add the paths to the installed files
if paths != nil {
installedFiles = append(installedFiles, paths...)
}
}
}
} else {
return tempDir, err, ErrInstallEPKCouldNotStatDir
}
}
// Check if the symlink already exists
_, err = os.Lstat(target)
if err != nil {
if errors.Is(err, os.ErrNotExist) {
// Great, the symlink does not exist. Let's create it.
err = os.Symlink(header.Linkname, target)
if err != nil {
return tempDir, err, ErrInstallEPKCouldNotCreateFile
} else {
if !isHook {
// Add the symlink to the installed files
2024-09-09 23:38:47 -07:00
installedFiles = append(installedFiles, target)
}
2024-09-01 12:27:25 -07:00
}
} else {
2024-09-09 23:38:47 -07:00
return tempDir, err, ErrInstallEPKCouldNotStatFile
2024-09-01 12:27:25 -07:00
}
} else {
// See if it's an upgrade or not
if preMap.IsUpgrade {
// Check if it's a special file
for _, file := range metadata.SpecialFiles.NoReplace {
if strings.TrimSuffix(target, "/") == strings.TrimSuffix(file, "/") {
// This file is a special file and should not be replaced
continue
}
}
}
2024-10-10 02:24:56 -07:00
// It's not special, so we can replace it
err = os.Remove(target)
if err != nil {
return tempDir, err, ErrInstallEPKCouldNotCreateFile
}
err = os.Symlink(header.Linkname, target)
if err != nil {
return tempDir, err, ErrInstallEPKCouldNotCreateFile
} else {
if !isHook {
// Add the symlink to the installed files
installedFiles = append(installedFiles, target)
}
}
2024-09-01 12:27:25 -07:00
}
}
}
zStandardReader.Close()
2024-09-03 11:58:53 -07:00
// Close the connection if it's a URL
if epkBytes.IsURL {
err = connection.Close()
if err != nil {
2024-09-09 23:38:47 -07:00
return tempDir, err, ErrInstallEPKCouldNotCloseTarReader
2024-09-03 11:58:53 -07:00
}
}
2024-09-01 12:27:25 -07:00
// Now let's run the hooks
if preMap.IsUpgrade {
_, err := os.Stat(filepath.Join(tempDir, "hooks", "upgrade.sh"))
if err != nil {
2024-09-09 23:38:47 -07:00
if !errors.Is(err, os.ErrNotExist) {
return tempDir, err, ErrInstallEPKCouldNotStatHook
2024-09-01 12:27:25 -07:00
}
} else {
cmd := exec.Command("/bin/sh", filepath.Join(tempDir, "hooks", "upgrade.sh"), metadata.Version.String())
stderr, err := cmd.StderrPipe()
if err != nil {
2024-09-09 23:38:47 -07:00
return tempDir, err, ErrInstallEPKCouldNotRunHook
2024-09-01 12:27:25 -07:00
}
err = cmd.Start()
if err != nil {
2024-09-09 23:38:47 -07:00
return tempDir, err, ErrInstallEPKCouldNotRunHook
2024-09-01 12:27:25 -07:00
}
scanner := bufio.NewScanner(stderr)
scanner.Split(bufio.ScanWords)
for scanner.Scan() {
message := scanner.Text()
2024-09-03 11:58:53 -07:00
logger.LogFunc(Log{
Level: "INFO",
Content: message,
})
2024-09-01 12:27:25 -07:00
}
err = cmd.Wait()
if err != nil {
2024-09-09 23:38:47 -07:00
return tempDir, err, ErrInstallEPKCouldNotRunHook
2024-09-01 12:27:25 -07:00
}
}
} else {
_, err := os.Stat(filepath.Join(tempDir, "hooks", "install.sh"))
if err != nil {
2024-09-09 23:38:47 -07:00
if !errors.Is(err, os.ErrNotExist) {
return tempDir, err, ErrInstallEPKCouldNotStatHook
2024-09-01 12:27:25 -07:00
}
} else {
cmd := exec.Command("/bin/sh", filepath.Join(tempDir, "hooks", "install.sh"), metadata.Version.String())
stderr, err := cmd.StderrPipe()
if err != nil {
2024-09-09 23:38:47 -07:00
return tempDir, err, ErrInstallEPKCouldNotRunHook
2024-09-01 12:27:25 -07:00
}
err = cmd.Start()
if err != nil {
2024-09-09 23:38:47 -07:00
return tempDir, err, ErrInstallEPKCouldNotRunHook
2024-09-01 12:27:25 -07:00
}
scanner := bufio.NewScanner(stderr)
scanner.Split(bufio.ScanWords)
for scanner.Scan() {
message := scanner.Text()
2024-09-03 11:58:53 -07:00
logger.LogFunc(Log{
Level: "INFO",
Content: message,
})
2024-09-01 12:27:25 -07:00
}
err = cmd.Wait()
if err != nil {
2024-09-09 23:38:47 -07:00
return tempDir, err, ErrInstallEPKCouldNotRunHook
}
}
}
// Do one more double-check to make sure nothing in installedFiles is in noDelete
for _, file := range metadata.SpecialFiles.NoDelete {
for index, installedFile := range installedFiles {
if strings.TrimSuffix(installedFile, "/") == strings.TrimSuffix(file, "/") {
// Remove the file from the installed files
installedFiles = append(installedFiles[:index], installedFiles[index+1:]...)
2024-09-01 12:27:25 -07:00
}
}
}
// Finally, add the EPK and remove script to the database
file, err := os.ReadFile(filepath.Join(tempDir, "hooks", "remove.sh"))
if err != nil {
2024-09-09 23:38:47 -07:00
if !errors.Is(err, os.ErrNotExist) {
return tempDir, err, ErrInstallEPKCouldNotAddEPKToDB
} else {
2024-09-03 11:58:53 -07:00
var err error
2024-09-09 23:38:47 -07:00
if !epkBytes.IsRemote {
err = addEPKToDB(metadata, installedFiles, []byte{}, preMap.DisplayData.IsDependency, false, metadata.Size, metadata.DecompressedSize, "Local file")
2024-09-03 11:58:53 -07:00
} else {
err = addEPKToDB(metadata, installedFiles, []byte{}, preMap.DisplayData.IsDependency, false, metadata.Size, metadata.DecompressedSize, epkBytes.RepositoryName)
2024-09-03 11:58:53 -07:00
}
2024-09-01 12:27:25 -07:00
if err != nil {
2024-09-09 23:38:47 -07:00
return tempDir, err, ErrInstallEPKCouldNotAddEPKToDB
2024-09-01 12:27:25 -07:00
}
}
} else {
2024-09-03 11:58:53 -07:00
var err error
2024-09-09 23:38:47 -07:00
if !epkBytes.IsRemote {
err = addEPKToDB(metadata, installedFiles, file, preMap.DisplayData.IsDependency, true, metadata.Size, metadata.DecompressedSize, "Local file")
2024-09-03 11:58:53 -07:00
} else {
err = addEPKToDB(metadata, installedFiles, file, preMap.DisplayData.IsDependency, true, metadata.Size, metadata.DecompressedSize, epkBytes.RepositoryName)
2024-09-03 11:58:53 -07:00
}
2024-09-01 12:27:25 -07:00
if err != nil {
2024-09-09 23:38:47 -07:00
return tempDir, err, ErrInstallEPKCouldNotAddEPKToDB
2024-09-01 12:27:25 -07:00
}
}
// Remove the temporary directory
err = os.RemoveAll(tempDir)
if err != nil {
2024-09-09 23:38:47 -07:00
return tempDir, err, ErrInstallEPKCouldNotRemoveTempDir
2024-09-01 12:27:25 -07:00
}
stop <- true
logger.LogFunc(Log{
Level: "PROGRESS",
2024-10-10 02:24:56 -07:00
Progress: 1,
Total: 1,
2024-09-01 12:27:25 -07:00
Overwrite: true,
})
return "", nil, nil
}
2024-09-09 23:38:47 -07:00
var ErrAddRepositoryCouldNotCreateRequest = errors.New("could not create request")
var ErrAddRepositoryCouldNotSendRequest = errors.New("could not send request")
var ErrAddRepositoryHasUnexpectedStatusCode = errors.New("unexpected status code")
var ErrAddRepositoryCouldNotReadResponse = errors.New("could not read response")
var ErrAddRepositoryHasInvalidMagic = errors.New("invalid magic")
var ErrAddRepositoryCouldNotHash = errors.New("could not write to hash")
var ErrAddRepositoryCouldNotUnmarshalMetadata = errors.New("could not unmarshal metadata")
var ErrAddRepositoryCouldNotGetFingerprint = errors.New("could not get fingerprint")
var ErrAddRepositoryCouldNotAddFingerprint = errors.New("could not add fingerprint")
var ErrAddRepositoryHasInvalidMetadata = errors.New("invalid metadata")
var ErrAddRepositoryCouldNotAddPackage = errors.New("could not add package to database")
var ErrAddRepositoryHasRepositoryExists = errors.New("repository already exists")
var ErrAddRepositoryCouldNotAddRepository = errors.New("could not add repository to database")
2024-09-01 12:27:25 -07:00
2024-09-03 11:58:53 -07:00
// AddRepository adds a repository to the database
func AddRepository(url string, addRepositoryToDB func(Repository, bool) error, getFingerprintFromDB func([]byte, string) (bool, bool, bool, error), addFingerprintToDB func([]byte, string, bool) error, addRemotePackageToDB func(RemoteEPK) error, checkRepositoryInDB func(string) (bool, error), forceReplace bool, logger *Logger) (string, error, error) {
2024-09-01 12:27:25 -07:00
// First, fetch range 0-3 of /repository.erf
// Then, check if the first 3 bytes are "eon"
// Create the request
magicRequest, err := http.NewRequest("GET", url+"/repository.erf", nil)
if err != nil {
2024-09-09 23:38:47 -07:00
return "", err, ErrAddRepositoryCouldNotCreateRequest
2024-09-01 12:27:25 -07:00
}
// Add the range header
magicRequest.Header.Add("Range", "bytes=0-3")
// Send the request
magicResponse, err := http.DefaultClient.Do(magicRequest)
if err != nil {
2024-09-09 23:38:47 -07:00
return "", err, ErrAddRepositoryCouldNotSendRequest
2024-09-01 12:27:25 -07:00
}
// Check if the status code is 206
var hasEntireFile bool
if magicResponse.StatusCode != 206 {
if magicResponse.StatusCode == 200 {
// This web server does not support range requests, meaning we now have the entire file.
// Mark it as such.
hasEntireFile = true
} else {
2024-09-09 23:38:47 -07:00
return "", errors.New("status code " + strconv.Itoa(magicResponse.StatusCode)), ErrAddRepositoryHasUnexpectedStatusCode
2024-09-01 12:27:25 -07:00
}
}
// Check the magic bytes
var magicBytes = make([]byte, 3)
_, err = magicResponse.Body.Read(magicBytes)
if err != nil {
2024-09-09 23:38:47 -07:00
return "", err, ErrAddRepositoryCouldNotReadResponse
2024-09-01 12:27:25 -07:00
}
// Check if the magic bytes are "eon"
if string(magicBytes) != "eon" {
2024-09-09 23:38:47 -07:00
return "", nil, ErrAddRepositoryHasInvalidMagic
2024-09-01 12:27:25 -07:00
}
2024-09-03 11:58:53 -07:00
// Great. We either confirmed the repository is an Eon repository or we have the entire file.
2024-09-01 12:27:25 -07:00
var fullFetch *http.Response
if !hasEntireFile {
// Download the rest of the file
var err error
fullFetch, err = http.Get(url + "/repository.erf")
if err != nil {
2024-09-09 23:38:47 -07:00
return "", err, ErrAddRepositoryCouldNotSendRequest
2024-09-01 12:27:25 -07:00
}
} else {
fullFetch = magicResponse
}
// Now we get the contents of the file
contents, err := io.ReadAll(fullFetch.Body)
if err != nil {
2024-09-09 23:38:47 -07:00
return "", err, ErrAddRepositoryCouldNotReadResponse
2024-09-01 12:27:25 -07:00
}
// Verify the file's signature
// Unmarshal the repository metadata, which is NOT the same as the EPK metadata
var repositoryMetadata map[string]interface{}
// We use a decoder instead of unmarshal here because we need to use JSON numbers: float64 is not enough
var jsonDecoder = json.NewDecoder(bytes.NewReader(contents[99:]))
jsonDecoder.UseNumber()
err = jsonDecoder.Decode(&repositoryMetadata)
if err != nil {
2024-09-09 23:38:47 -07:00
return "", err, ErrAddRepositoryCouldNotUnmarshalMetadata
2024-09-01 12:27:25 -07:00
}
// Get the public key and signature
signature := contents[3:67]
publicKey := contents[67:99]
// Look for the public key in the database
exists, matchingAuthor, matchingFingerprint, err := getFingerprintFromDB(publicKey, repositoryMetadata["author"].(string))
if err != nil {
2024-09-09 23:38:47 -07:00
return "", err, ErrAddRepositoryCouldNotGetFingerprint
2024-09-01 12:27:25 -07:00
} else {
err := handlePublicKeyCheck(exists, matchingAuthor, matchingFingerprint, publicKey, repositoryMetadata["author"].(string), addFingerprintToDB, logger)
if err != nil {
2024-09-09 23:38:47 -07:00
return "", err, ErrAddRepositoryCouldNotAddFingerprint
2024-09-01 12:27:25 -07:00
}
}
// We need to create a new xxHash instance
xxHash := xxhash.New()
_, err = xxHash.Write(contents[99:])
if err != nil {
2024-09-09 23:38:47 -07:00
return "", err, ErrAddRepositoryCouldNotHash
2024-09-01 12:27:25 -07:00
}
// Verify the signature
if !ed25519.Verify(publicKey, xxHash.Sum(nil), signature) {
2024-09-09 23:38:47 -07:00
return "", errors.New("invalid signature"), ErrAddRepositoryHasInvalidMetadata
2024-09-01 12:27:25 -07:00
}
// Now we can create the repository object
var repository Repository
var ok bool
repository.URL = url
repository.Name, ok = repositoryMetadata["name"].(string)
if !ok {
2024-09-09 23:38:47 -07:00
return "", errors.New("name is not a string"), ErrAddRepositoryHasInvalidMetadata
2024-09-03 11:58:53 -07:00
}
// In force replace mode, we don't check if the repository already exists and just replace it
if !forceReplace {
// Side quest: check if the repository already exists
repoExists, err := checkRepositoryInDB(repository.Name)
if err != nil {
2024-09-09 23:38:47 -07:00
return "", err, ErrAddRepositoryCouldNotAddRepository
2024-09-03 11:58:53 -07:00
} else if repoExists {
2024-09-09 23:38:47 -07:00
return "", nil, ErrAddRepositoryHasRepositoryExists
2024-09-03 11:58:53 -07:00
}
2024-09-01 12:27:25 -07:00
}
2024-09-03 11:58:53 -07:00
2024-09-01 12:27:25 -07:00
repository.Description, ok = repositoryMetadata["desc"].(string)
if !ok {
2024-09-09 23:38:47 -07:00
return "", errors.New("desc is not a string"), ErrAddRepositoryHasInvalidMetadata
2024-09-01 12:27:25 -07:00
}
repository.Owner, ok = repositoryMetadata["author"].(string)
if !ok {
2024-09-09 23:38:47 -07:00
return "", errors.New("author is not a string"), ErrAddRepositoryHasInvalidMetadata
2024-09-01 12:27:25 -07:00
}
// Write the contents of the repository to the database
packageList, ok := repositoryMetadata["packages"].([]interface{})
if !ok {
2024-09-09 23:38:47 -07:00
return "", errors.New("packages is not an array"), ErrAddRepositoryHasInvalidMetadata
2024-09-01 12:27:25 -07:00
}
var remoteEPKs []RemoteEPK
for _, epk := range packageList {
epk, ok := epk.(map[string]interface{})
if !ok {
2024-09-09 23:38:47 -07:00
return "", errors.New("package is not an object"), ErrAddRepositoryHasInvalidMetadata
2024-09-01 12:27:25 -07:00
}
name, ok := epk["name"].(string)
if !ok {
2024-09-09 23:38:47 -07:00
return "", errors.New("package name is not a string"), ErrAddRepositoryHasInvalidMetadata
2024-09-01 12:27:25 -07:00
}
author, ok := epk["author"].(string)
if !ok {
2024-09-09 23:38:47 -07:00
return "", errors.New("package author is not a string"), ErrAddRepositoryHasInvalidMetadata
2024-09-01 12:27:25 -07:00
}
arch, ok := epk["arch"].(string)
if !ok {
2024-09-09 23:38:47 -07:00
return "", errors.New("package arch is not a string"), ErrAddRepositoryHasInvalidMetadata
2024-09-01 12:27:25 -07:00
}
versionString, ok := epk["version"].(string)
if !ok {
2024-09-09 23:38:47 -07:00
return "", errors.New("package version is not a string"), ErrAddRepositoryHasInvalidMetadata
2024-09-01 12:27:25 -07:00
}
versionPointer, err := semver.NewVersion(versionString)
if err != nil {
2024-09-09 23:38:47 -07:00
return "", errors.New("package version is not a valid semver version"), ErrAddRepositoryHasInvalidMetadata
2024-09-01 12:27:25 -07:00
}
2024-10-10 02:24:56 -07:00
sizeString, ok := epk["size"].(json.Number)
2024-09-01 12:27:25 -07:00
if !ok {
2024-10-10 02:24:56 -07:00
return "", errors.New("package size is not a number"), ErrAddRepositoryHasInvalidMetadata
2024-09-01 12:27:25 -07:00
}
2024-10-10 02:24:56 -07:00
size, err := strconv.ParseUint(sizeString.String(), 10, 64)
2024-09-01 12:27:25 -07:00
if err != nil {
2024-09-09 23:38:47 -07:00
return "", errors.New("package size is not a number"), ErrAddRepositoryHasInvalidMetadata
2024-09-01 12:27:25 -07:00
}
dependenciesInterface, ok := epk["deps"].([]interface{})
if !ok {
2024-09-09 23:38:47 -07:00
return "", errors.New("package dependencies is not an array"), ErrAddRepositoryHasInvalidMetadata
2024-09-01 12:27:25 -07:00
}
dependencies, err := interfaceToStringSlice(dependenciesInterface, "dependencies")
if err != nil {
2024-09-09 23:38:47 -07:00
return "", err, ErrAddRepositoryHasInvalidMetadata
2024-09-01 12:27:25 -07:00
}
hashJsonNumber, ok := epk["hash"].(json.Number)
if !ok {
2024-09-09 23:38:47 -07:00
return "", errors.New("package hash is not a number"), ErrAddRepositoryHasInvalidMetadata
2024-09-01 12:27:25 -07:00
}
var hash uint64
hash, err = strconv.ParseUint(hashJsonNumber.String(), 10, 64)
if err != nil {
2024-09-09 23:38:47 -07:00
return "", errors.New("package hash is not a valid number"), ErrAddRepositoryHasInvalidMetadata
2024-09-01 12:27:25 -07:00
}
path, ok := epk["path"].(string)
if !ok {
2024-09-09 23:38:47 -07:00
return "", errors.New("package path is not a string"), ErrAddRepositoryHasInvalidMetadata
2024-09-01 12:27:25 -07:00
}
description, ok := epk["desc"].(string)
if !ok {
2024-09-09 23:38:47 -07:00
return "", errors.New("package description is not a string"), ErrAddRepositoryHasInvalidMetadata
2024-09-01 12:27:25 -07:00
}
remoteEPKs = append(remoteEPKs, RemoteEPK{
Name: name,
Author: author,
Description: description,
Version: *versionPointer,
Architecture: arch,
CompressedSize: size,
Dependencies: dependencies,
Path: path,
Arch: arch,
EPKHash: hash,
2024-09-03 11:58:53 -07:00
Repository: repository,
2024-09-01 12:27:25 -07:00
})
if err != nil {
2024-09-09 23:38:47 -07:00
return "", err, ErrAddRepositoryCouldNotAddPackage
2024-09-01 12:27:25 -07:00
}
}
// We add packages afterward so that if there is an error, we don't have to remove the packages
for _, epk := range remoteEPKs {
2024-09-03 11:58:53 -07:00
err := addRemotePackageToDB(epk)
2024-09-01 12:27:25 -07:00
if err != nil {
2024-09-09 23:38:47 -07:00
return "", err, ErrAddRepositoryCouldNotAddPackage
2024-09-01 12:27:25 -07:00
}
}
// Add the repository to the database
2024-09-03 11:58:53 -07:00
err = addRepositoryToDB(repository, forceReplace)
2024-09-01 12:27:25 -07:00
if err != nil {
2024-09-09 23:38:47 -07:00
return "", err, ErrAddRepositoryCouldNotAddRepository
2024-09-01 12:27:25 -07:00
}
2024-09-03 11:58:53 -07:00
return repository.Name, nil, nil
2024-09-01 12:27:25 -07:00
}
2024-09-09 23:38:47 -07:00
var ErrRemoveRepositoryDoesNotExist = errors.New("repository does not exist")
var ErrRemoveRepositoryCouldNotFindRepository = errors.New("could not check for repository")
var ErrRemoveRepositoryCouldNotRemoveRepository = errors.New("could not remove repository")
var ErrRemoveRepositoryCouldNotRemoveRepositoryFromDB = errors.New("could not remove repository from database")
2024-09-01 12:27:25 -07:00
2024-09-09 23:38:47 -07:00
func RemoveRepository(repository string, removeRepositoryFromDB func(string) error, checkRepositoryInDB func(string) (bool, error), logger *Logger) (error, error) {
2024-09-03 11:58:53 -07:00
// First check if the repository exists
exists, err := checkRepositoryInDB(repository)
2024-09-01 12:27:25 -07:00
if err != nil {
2024-09-09 23:38:47 -07:00
return err, ErrRemoveRepositoryCouldNotFindRepository
2024-09-01 12:27:25 -07:00
}
2024-09-03 11:58:53 -07:00
if !exists {
2024-09-09 23:38:47 -07:00
return nil, ErrRemoveRepositoryDoesNotExist
2024-09-01 12:27:25 -07:00
}
// Purge the download cache
err = os.RemoveAll(filepath.Join("/var/cache/eon/repositories/", repository))
if err != nil {
2024-09-09 23:38:47 -07:00
return err, ErrRemoveRepositoryCouldNotRemoveRepository
2024-09-01 12:27:25 -07:00
}
// Remove the repository from the database
err = removeRepositoryFromDB(repository)
if err != nil {
2024-09-09 23:38:47 -07:00
return err, ErrRemoveRepositoryCouldNotRemoveRepositoryFromDB
2024-09-01 12:27:25 -07:00
}
// Alright, we're done here.
logger.LogFunc(Log{
Level: "INFO",
Content: "Removed repository " + repository + " from the database.",
})
return nil, nil
}
2024-09-09 23:38:47 -07:00
var ErrRemoveEPKCouldNotFindEPK = errors.New("could not get EPK from database")
var ErrRemoveEPKCouldNotCreateTempFile = errors.New("could not create temporary file")
var ErrRemoveEPKCouldNotWriteTempFile = errors.New("could not write to temporary file")
var ErrRemoveEPKCouldNotRunRemoveHook = errors.New("could not run remove hook")
var ErrRemoveEPKCouldNotRemoveEPKFromDB = errors.New("could not remove EPK from database")
var ErrRemoveEPKCouldNotRemoveFiles = errors.New("could not remove files")
func RemoveEPK(name string, removeEPKFromDB func(string) error, getEPKRemoveInfoFromDB func(name string) (string, []string, error), logger *Logger) (error, error) {
// Try to fetch the EPK from the database
removeScript, installedPaths, err := getEPKRemoveInfoFromDB(name)
if err != nil {
return err, ErrRemoveEPKCouldNotFindEPK
}
// Save the remove script to a temporary file
removeScriptFile, err := os.CreateTemp("", "eon-remove-*.sh")
if err != nil {
return err, ErrRemoveEPKCouldNotCreateTempFile
}
// Write the remove script to the file
_, err = removeScriptFile.Write([]byte(removeScript))
if err != nil {
return err, ErrRemoveEPKCouldNotWriteTempFile
}
// Run the remove script
cmd := exec.Command("/bin/sh", removeScriptFile.Name())
stderr, err := cmd.StderrPipe()
if err != nil {
return err, ErrRemoveEPKCouldNotRunRemoveHook
}
// Start the command
err = cmd.Start()
if err != nil {
return err, ErrRemoveEPKCouldNotRunRemoveHook
}
// Read the output
scanner := bufio.NewScanner(stderr)
scanner.Split(bufio.ScanWords)
for scanner.Scan() {
message := scanner.Text()
logger.LogFunc(Log{
Level: "INFO",
Content: message,
})
}
// Close the file
err = removeScriptFile.Close()
if err != nil {
return err, ErrRemoveEPKCouldNotRunRemoveHook
}
// Remove the EPK from the database
err = removeEPKFromDB(name)
if err != nil {
return err, ErrRemoveEPKCouldNotRemoveEPKFromDB
}
// Remove the installed files
for _, path := range installedPaths {
// Check if there is anything in the paths not in installedPaths
// If there is, we should not remove the directory
err := filepath.Walk(path, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
// Check if the path is in installedPaths
for _, installedPath := range installedPaths {
if path == installedPath {
return nil
}
}
// If it's not, return an error
return errors.New("path is not in installedPaths")
})
if err != nil {
if errors.Is(err, errors.New("path is not in installedPaths")) {
// The path is not in installedPaths, so we should not remove it
continue
} else if !errors.Is(err, os.ErrNotExist) {
// Something else went wrong
return err, ErrRemoveEPKCouldNotRemoveFiles
}
} else {
// The path is in installedPaths, so we should remove it
err := os.RemoveAll(path)
if err != nil {
if !errors.Is(err, os.ErrNotExist) {
return err, ErrRemoveEPKCouldNotRemoveFiles
} else {
// The file does not exist - we must have deleted its parent directory or the user has done our job for us
continue
}
}
}
}
return nil, nil
}