1633 lines
53 KiB
Go
1633 lines
53 KiB
Go
package lib
|
|
|
|
import (
|
|
"bufio"
|
|
"bytes"
|
|
"errors"
|
|
"io"
|
|
"os"
|
|
"strconv"
|
|
"strings"
|
|
"time"
|
|
|
|
"archive/tar"
|
|
"crypto/ed25519"
|
|
"encoding/binary"
|
|
"encoding/hex"
|
|
"encoding/json"
|
|
"math/big"
|
|
"net/http"
|
|
"net/url"
|
|
"os/exec"
|
|
"path/filepath"
|
|
|
|
"github.com/Masterminds/semver"
|
|
"github.com/cespare/xxhash/v2"
|
|
"github.com/dustin/go-humanize"
|
|
"github.com/klauspost/compress/zstd"
|
|
)
|
|
|
|
// RemoteEPK is a struct that contains the metadata of an EPK from a remote repository
|
|
type RemoteEPK struct {
|
|
Repository Repository
|
|
Name string
|
|
Author string
|
|
Description string
|
|
Version semver.Version
|
|
Architecture string
|
|
CompressedSize int64
|
|
Dependencies []string
|
|
Path string
|
|
Arch string
|
|
EPKHash uint64
|
|
}
|
|
|
|
// Repository is a struct that contains the repository information
|
|
type Repository struct {
|
|
Name string
|
|
URL string
|
|
Owner string
|
|
Description string
|
|
}
|
|
|
|
// SpecialFiles is a struct that contains the special files that are not to be deleted or replaced
|
|
type SpecialFiles struct {
|
|
NoDelete []string
|
|
NoReplace []string
|
|
}
|
|
|
|
// Metadata is a struct that contains the metadata of the package
|
|
type Metadata struct {
|
|
Name string
|
|
Description string
|
|
LongDescription string
|
|
Version semver.Version
|
|
Author string
|
|
License string
|
|
Architecture string
|
|
Dependencies []string
|
|
SpecialFiles SpecialFiles
|
|
Size int64
|
|
DecompressedSize *big.Int
|
|
}
|
|
|
|
// EPKPreMap is a struct that contains the metadata of the EPK
|
|
type EPKPreMap struct {
|
|
DisplayData DisplayData
|
|
MetadataMap map[string]interface{}
|
|
IsLittleEndian bool
|
|
IsUpgrade bool
|
|
TarOffset int64
|
|
}
|
|
|
|
// DisplayData is a struct that contains the display data of the EPK
|
|
type DisplayData struct {
|
|
Name string
|
|
Author string
|
|
Architecture string
|
|
Description string
|
|
Version semver.Version
|
|
Size int64
|
|
DecompressedSize *big.Int
|
|
Dependencies []string
|
|
}
|
|
|
|
// PotentiallyNullEPKPreMap is a EPKPreMap that can be nil
|
|
type PotentiallyNullEPKPreMap struct {
|
|
EPKPreMap *EPKPreMap
|
|
Null bool
|
|
}
|
|
|
|
// Log is a struct that contains the log information
|
|
type Log struct {
|
|
Level string
|
|
Content string
|
|
Prompt bool
|
|
PlaySound bool
|
|
Progress *big.Int
|
|
Total *big.Int
|
|
Overwrite bool
|
|
}
|
|
|
|
// StreamOrBytes is a struct that contains either a stream or bytes, allowing optimising for memory or speed
|
|
type StreamOrBytes struct {
|
|
FileStream *os.File
|
|
RepositoryName string
|
|
URL string
|
|
Bytes []byte
|
|
IsURL bool
|
|
IsRemote bool
|
|
IsFileStream bool
|
|
}
|
|
|
|
// Logger is a struct that contains the functions and properties of the logger
|
|
type Logger struct {
|
|
LogFunc func(Log) string
|
|
PromptSupported bool
|
|
ProgressSupported bool
|
|
}
|
|
|
|
// Epk is a struct that contains the metadata and the tar archive of the EPK
|
|
type Epk struct {
|
|
Metadata Metadata
|
|
TarArchive []byte
|
|
}
|
|
|
|
// interfaceToStringSlice converts an interface slice to a string slice
|
|
func interfaceToStringSlice(interfaceSlice []interface{}, interfaceName string) ([]string, error) {
|
|
// Yes, it's meant to be empty and not nil: JSON arrays are empty, not nil
|
|
//goland:noinspection GoPreferNilSlice
|
|
stringSlice := []string{}
|
|
for _, interfaceValue := range interfaceSlice {
|
|
stringValue, ok := interfaceValue.(string)
|
|
if !ok {
|
|
return nil, errors.New(interfaceName + " are not strings")
|
|
}
|
|
stringSlice = append(stringSlice, stringValue)
|
|
}
|
|
|
|
return stringSlice, nil
|
|
}
|
|
|
|
// ByteToFingerprint converts a byte slice to an Eon fingerprint, which is similar to a legacy-style OpenSSH fingerprint
|
|
func ByteToFingerprint(input []byte) string {
|
|
xxHashWriter := xxhash.New()
|
|
_, _ = xxHashWriter.Write(input)
|
|
inputString := hex.EncodeToString(xxhash.New().Sum(nil))
|
|
var result []string
|
|
var previousChar rune
|
|
for index, char := range inputString {
|
|
if index%2 == 0 && index != 0 {
|
|
result = append(result, string(previousChar)+string(char))
|
|
} else {
|
|
previousChar = char
|
|
}
|
|
}
|
|
return strings.Join(result, ":")
|
|
}
|
|
|
|
// MkdirAllWithPaths mimics os.MkdirAll but returns the created directories
|
|
func MkdirAllWithPaths(path string, perm os.FileMode) ([]string, error) {
|
|
// Make sure to return absolute paths
|
|
absPath, err := filepath.Abs(path)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
// Split the path into individual directories
|
|
var dirs []string
|
|
currentPath := absPath
|
|
for currentPath != "/" {
|
|
dirs = append([]string{currentPath}, dirs...)
|
|
currentPath = filepath.Dir(currentPath)
|
|
}
|
|
|
|
// Slice to hold the created directory paths
|
|
var createdDirs []string
|
|
|
|
// Iterate through each directory and create if not exists
|
|
for _, dir := range dirs {
|
|
_, err := os.Stat(dir)
|
|
if errors.Is(err, os.ErrNotExist) {
|
|
// Directory doesn't exist, create it
|
|
err := os.Mkdir(dir, perm)
|
|
if err != nil {
|
|
return createdDirs, err
|
|
}
|
|
// Append created directory's absolute path
|
|
createdDirs = append(createdDirs, dir)
|
|
}
|
|
}
|
|
|
|
return createdDirs, nil
|
|
}
|
|
|
|
func preMapEpkFromBytes(metaDataBytes []byte, littleEndian bool, size int64, offset int64) (EPKPreMap, error) {
|
|
// Unmarshal the JSON
|
|
var displayDataMap map[string]interface{}
|
|
err := json.Unmarshal(metaDataBytes, &displayDataMap)
|
|
if err != nil {
|
|
return EPKPreMap{}, errors.New("metadata is not valid JSON")
|
|
}
|
|
|
|
// Declare the parsedDisplayData object
|
|
var parsedDisplayData EPKPreMap
|
|
|
|
// Add some of our data so that the full EPK can be mapped with less effort
|
|
parsedDisplayData.MetadataMap = displayDataMap
|
|
parsedDisplayData.IsLittleEndian = littleEndian
|
|
parsedDisplayData.TarOffset = offset
|
|
parsedDisplayData.DisplayData.Size = size
|
|
|
|
// Map the display data
|
|
var ok bool
|
|
// Set the size
|
|
sizeBigInt, ok := displayDataMap["size"].(string)
|
|
if !ok {
|
|
return EPKPreMap{}, errors.New("size is not a string")
|
|
}
|
|
parsedDisplayData.DisplayData.DecompressedSize = new(big.Int)
|
|
parsedDisplayData.DisplayData.DecompressedSize.SetString(sizeBigInt, 10)
|
|
// Set the name, author, version, arch, and dependencies
|
|
parsedDisplayData.DisplayData.Name, ok = displayDataMap["name"].(string)
|
|
if !ok {
|
|
return EPKPreMap{}, errors.New("name is not a string")
|
|
}
|
|
parsedDisplayData.DisplayData.Author, ok = displayDataMap["author"].(string)
|
|
if !ok {
|
|
return EPKPreMap{}, errors.New("author is not a string")
|
|
}
|
|
versionString, ok := displayDataMap["version"].(string)
|
|
if !ok {
|
|
return EPKPreMap{}, errors.New("version is not a string")
|
|
}
|
|
versionPointer, err := semver.NewVersion(versionString)
|
|
if err != nil {
|
|
return EPKPreMap{}, err
|
|
}
|
|
parsedDisplayData.DisplayData.Version = *versionPointer
|
|
parsedDisplayData.DisplayData.Architecture, ok = displayDataMap["arch"].(string)
|
|
if !ok {
|
|
return EPKPreMap{}, errors.New("arch is not a string")
|
|
}
|
|
dependencies, ok := displayDataMap["deps"].([]interface{})
|
|
if !ok {
|
|
return EPKPreMap{}, errors.New("dependencies is not an array")
|
|
}
|
|
parsedDisplayData.DisplayData.Dependencies, err = interfaceToStringSlice(dependencies, "dependencies")
|
|
if err != nil {
|
|
return EPKPreMap{}, err
|
|
}
|
|
|
|
return parsedDisplayData, nil
|
|
}
|
|
|
|
// ConstMapEPKMetadataOffset is the offset of the metadata in the EPK: 3 magic bytes, 1 endian byte, 8 offset bytes, 64 signature bytes, and 32 public key bytes
|
|
var ConstMapEPKMetadataOffset int64 = 108
|
|
|
|
var ErrPreMapEPKCouldNotRead = errors.New("could not read EPK")
|
|
var ErrPreMapEPKHasNetworkStream = errors.New("network streams are not supported")
|
|
var ErrPreMapEPKHasNotGotEPK = errors.New("has not got an EPK")
|
|
var ErrPreMapEPKHasInvalidEndian = errors.New("has invalid endian")
|
|
var ErrPreMapEPKCouldNotMapJSON = errors.New("could not map metadata")
|
|
|
|
// PreMapEPK maps enough data to create the display summary of an EPK
|
|
func PreMapEPK(epkBytes StreamOrBytes, epkSize int64) (EPKPreMap, error, error) {
|
|
// Say that we don't support network streams
|
|
if epkBytes.IsURL {
|
|
return EPKPreMap{}, nil, ErrPreMapEPKHasNetworkStream
|
|
}
|
|
|
|
// First, we need to check if it even is a EPK by checking the first 3 magic bytes
|
|
if epkBytes.IsFileStream {
|
|
var magicBytes = make([]byte, 3)
|
|
_, err := epkBytes.FileStream.ReadAt(magicBytes, 0)
|
|
if err != nil {
|
|
return EPKPreMap{}, err, ErrPreMapEPKCouldNotRead
|
|
}
|
|
if string(magicBytes) != "epk" {
|
|
return EPKPreMap{}, nil, ErrPreMapEPKHasNotGotEPK
|
|
}
|
|
} else {
|
|
if string(epkBytes.Bytes[0:3]) != "epk" {
|
|
return EPKPreMap{}, nil, ErrPreMapEPKHasNotGotEPK
|
|
}
|
|
}
|
|
|
|
// Let's determine the endian-ness of the EPK via the 3rd byte
|
|
var littleEndian bool
|
|
if epkBytes.IsFileStream {
|
|
var littleEndianByte = make([]byte, 1)
|
|
_, err := epkBytes.FileStream.ReadAt(littleEndianByte, 3)
|
|
if err != nil {
|
|
return EPKPreMap{}, err, ErrPreMapEPKCouldNotRead
|
|
}
|
|
if littleEndianByte[0] == 0x6C {
|
|
littleEndian = true
|
|
} else if littleEndianByte[0] == 0x62 {
|
|
littleEndian = false
|
|
} else {
|
|
return EPKPreMap{}, nil, ErrPreMapEPKHasInvalidEndian
|
|
}
|
|
} else {
|
|
if epkBytes.Bytes[3] == 0x6C {
|
|
littleEndian = true
|
|
} else if epkBytes.Bytes[3] == 0x62 {
|
|
littleEndian = false
|
|
} else {
|
|
return EPKPreMap{}, nil, ErrPreMapEPKHasInvalidEndian
|
|
}
|
|
}
|
|
|
|
// Now we can get the offsets of the tar archive
|
|
var tarArchiveOffset int64
|
|
if epkBytes.IsFileStream {
|
|
var tarArchiveOffsetBytes = make([]byte, 8)
|
|
_, err := epkBytes.FileStream.ReadAt(tarArchiveOffsetBytes, 4)
|
|
if err != nil {
|
|
return EPKPreMap{}, err, ErrPreMapEPKCouldNotRead
|
|
}
|
|
if littleEndian {
|
|
tarArchiveOffset = int64(binary.LittleEndian.Uint64(tarArchiveOffsetBytes))
|
|
} else {
|
|
tarArchiveOffset = int64(binary.BigEndian.Uint64(tarArchiveOffsetBytes))
|
|
}
|
|
} else {
|
|
if littleEndian {
|
|
tarArchiveOffset = int64(binary.LittleEndian.Uint64(epkBytes.Bytes[4:12]))
|
|
} else {
|
|
tarArchiveOffset = int64(binary.BigEndian.Uint64(epkBytes.Bytes[4:12]))
|
|
}
|
|
}
|
|
|
|
// We don't need to validate the signature yet. We will do that when we map the full EPK, because it means
|
|
// we have to read the entire thing, which is a waste of resources, since we only need the metadata.
|
|
var preMapEpk EPKPreMap
|
|
if epkBytes.IsFileStream {
|
|
var metadataBuffer = make([]byte, tarArchiveOffset-ConstMapEPKMetadataOffset)
|
|
_, err := epkBytes.FileStream.ReadAt(metadataBuffer, ConstMapEPKMetadataOffset)
|
|
if err != nil {
|
|
return EPKPreMap{}, err, ErrPreMapEPKCouldNotRead
|
|
}
|
|
preMapEpk, err = preMapEpkFromBytes(metadataBuffer, littleEndian, epkSize, tarArchiveOffset)
|
|
} else {
|
|
var err error
|
|
preMapEpk, err = preMapEpkFromBytes(epkBytes.Bytes[ConstMapEPKMetadataOffset:tarArchiveOffset], littleEndian, epkSize, tarArchiveOffset)
|
|
if err != nil {
|
|
return EPKPreMap{}, err, ErrPreMapEPKCouldNotMapJSON
|
|
}
|
|
}
|
|
|
|
return preMapEpk, nil, nil
|
|
}
|
|
|
|
var ErrPreMapRemoteEPKCouldNotCreateURL = errors.New("could not create URL")
|
|
var ErrPreMapRemoteEPKCouldNotCreateRequest = errors.New("could not create request")
|
|
var ErrPreMapRemoteEPKCouldNotSendRequest = errors.New("could not send request")
|
|
var ErrPreMapRemoteEPKCouldNotRead = errors.New("could not read EPK")
|
|
var ErrPreMapRemoteEPKCouldNotCloseConnection = errors.New("could not close connection")
|
|
var ErrPreMapRemoteEPKUnexpectedStatusCode = errors.New("unexpected status code")
|
|
var ErrPreMapEPKHasNotGotEPKMagic = errors.New("not an EPK")
|
|
var ErrPreMapRemoteEPKInvalidEndian = errors.New("invalid endian")
|
|
var ErrPreMapRemoteEPKCouldNotMapJSON = errors.New("error mapping metadata")
|
|
|
|
func PreMapRemoteEPK(remoteEPK RemoteEPK, logger *Logger) (EPKPreMap, error, error) {
|
|
// Fetch the first 12 bytes of the EPK - this contains the magic, endian, and offset
|
|
// We use the range header to only fetch the first 12 bytes
|
|
packageUrl, err := url.JoinPath(remoteEPK.Repository.URL, remoteEPK.Path)
|
|
if err != nil {
|
|
return EPKPreMap{}, err, ErrPreMapRemoteEPKCouldNotCreateURL
|
|
}
|
|
req, err := http.NewRequest("GET", packageUrl, nil)
|
|
if err != nil {
|
|
return EPKPreMap{}, err, ErrPreMapRemoteEPKCouldNotCreateRequest
|
|
}
|
|
req.Header.Set("Range", "bytes=0-12")
|
|
resp, err := http.DefaultClient.Do(req)
|
|
if err != nil {
|
|
return EPKPreMap{}, err, ErrPreMapRemoteEPKCouldNotSendRequest
|
|
}
|
|
|
|
// Check if the status code is 206 (partial content)
|
|
var epkHeaderBytes = make([]byte, 12)
|
|
var rangeSupported bool
|
|
if resp.StatusCode == 200 {
|
|
// We have the entire file. Not great, not terrible.
|
|
// We'll have to cut off the connection early later. To optimise things slightly, we'll reuse this connection
|
|
// to read the metadata later.
|
|
|
|
// I'm deadly serious about the radiation. It could cause a bit flip causing the Range header to be malformed.
|
|
// It's amazing how many times I error handled for this, and I hope I can save someone from cancer one day.
|
|
logger.LogFunc(Log{
|
|
Level: "INFO",
|
|
Content: "The server does not support range requests. The installation process will be significantly slower." +
|
|
"Is the repository owner using python3's SimpleHTTPServer or similar? If so, please use a proper web " +
|
|
"server like Nginx, Ailur HTTP Server, or Apache. If not, please report this to the repository owner or " +
|
|
"check for sources of radiation around your computer.",
|
|
})
|
|
|
|
_, err := resp.Body.Read(epkHeaderBytes)
|
|
if err != nil {
|
|
return EPKPreMap{}, err, ErrPreMapRemoteEPKCouldNotRead
|
|
}
|
|
rangeSupported = false
|
|
} else if resp.StatusCode == 206 {
|
|
// Great, everything is working as expected.
|
|
_, err := io.ReadFull(resp.Body, epkHeaderBytes)
|
|
if err != nil {
|
|
return EPKPreMap{}, err, ErrPreMapRemoteEPKCouldNotRead
|
|
}
|
|
rangeSupported = true
|
|
// Close the connection
|
|
err = resp.Body.Close()
|
|
if err != nil {
|
|
return EPKPreMap{}, err, ErrPreMapRemoteEPKCouldNotCloseConnection
|
|
}
|
|
} else {
|
|
// Something went wrong
|
|
return EPKPreMap{}, errors.New("unexpected status code: " + strconv.Itoa(resp.StatusCode)), ErrPreMapRemoteEPKUnexpectedStatusCode
|
|
}
|
|
|
|
// Now we verify the magic bytes
|
|
if string(epkHeaderBytes[0:3]) != "epk" {
|
|
return EPKPreMap{}, nil, ErrPreMapEPKHasNotGotEPKMagic
|
|
}
|
|
|
|
// Let's determine the endian-ness of the EPK via the 3rd byte
|
|
var littleEndian bool
|
|
if epkHeaderBytes[3] == 0x6C {
|
|
littleEndian = true
|
|
} else if epkHeaderBytes[3] == 0x62 {
|
|
littleEndian = false
|
|
} else {
|
|
return EPKPreMap{}, nil, ErrPreMapRemoteEPKInvalidEndian
|
|
}
|
|
|
|
// Now we can get the offsets of the tar archive
|
|
var tarArchiveOffset int64
|
|
if littleEndian {
|
|
tarArchiveOffset = int64(binary.LittleEndian.Uint64(epkHeaderBytes[4:12]))
|
|
} else {
|
|
tarArchiveOffset = int64(binary.BigEndian.Uint64(epkHeaderBytes[4:12]))
|
|
}
|
|
|
|
// No signature verification for you
|
|
|
|
// Let's fetch the display data bytes
|
|
displayDataBytes := make([]byte, tarArchiveOffset-ConstMapEPKMetadataOffset)
|
|
if rangeSupported {
|
|
// Send another request to fetch the display data
|
|
req.Header.Set("Range", "bytes=108-"+strconv.FormatInt(tarArchiveOffset-1, 10))
|
|
resp, err = http.DefaultClient.Do(req)
|
|
if err != nil {
|
|
return EPKPreMap{}, err, ErrPreMapRemoteEPKCouldNotSendRequest
|
|
}
|
|
// Read the display data
|
|
_, err = io.ReadFull(resp.Body, displayDataBytes)
|
|
if err != nil {
|
|
return EPKPreMap{}, err, ErrPreMapRemoteEPKCouldNotRead
|
|
}
|
|
// Close the connection
|
|
err = resp.Body.Close()
|
|
if err != nil {
|
|
return EPKPreMap{}, err, ErrPreMapRemoteEPKCouldNotCloseConnection
|
|
}
|
|
} else {
|
|
// Re-use the connection to read the display data
|
|
// The offset will move automatically because we are reading from the same connection, therefore
|
|
// meaning that the web server will have already iterated past the header
|
|
_, err = io.ReadFull(resp.Body, displayDataBytes)
|
|
if err != nil {
|
|
return EPKPreMap{}, err, ErrPreMapRemoteEPKCouldNotRead
|
|
}
|
|
|
|
// You didn't have to cut me off, make out like it never happened and that we were nothing
|
|
// All I wanted was a header part, but you just had to go and give me the whole thing
|
|
// Now you're just some obscure web server that I used to know
|
|
err = resp.Body.Close()
|
|
if err != nil {
|
|
return EPKPreMap{}, err, ErrPreMapRemoteEPKCouldNotCloseConnection
|
|
}
|
|
}
|
|
|
|
// Now we can map the display data
|
|
var preMapEpk EPKPreMap
|
|
preMapEpk, err = preMapEpkFromBytes(displayDataBytes, littleEndian, remoteEPK.CompressedSize, tarArchiveOffset)
|
|
if err != nil {
|
|
return EPKPreMap{}, err, ErrPreMapRemoteEPKCouldNotMapJSON
|
|
}
|
|
|
|
return preMapEpk, nil, nil
|
|
}
|
|
|
|
func handlePublicKeyCheck(exists bool, matchingAuthor bool, matchingFingerprint bool, publicKey []byte, author string, addFingerprintToDB func([]byte, string, bool) error, logger *Logger) error {
|
|
if !exists {
|
|
if logger.PromptSupported {
|
|
response := logger.LogFunc(Log{
|
|
Level: "WARN",
|
|
Content: "Public key not found in database.\nthe public key fingerprint is: " + author + " " + ByteToFingerprint(publicKey) +
|
|
"\nWould you like to trust this key (y/n)?",
|
|
Prompt: true,
|
|
})
|
|
if strings.ToLower(response) == "y" {
|
|
err := addFingerprintToDB(publicKey, author, false)
|
|
if err != nil {
|
|
return err
|
|
} else {
|
|
logger.LogFunc(Log{
|
|
Level: "INFO",
|
|
Content: "Public key added to database.",
|
|
})
|
|
}
|
|
} else {
|
|
logger.LogFunc(Log{
|
|
Level: "INFO",
|
|
Content: "Installation cancelled.",
|
|
})
|
|
}
|
|
} else {
|
|
logger.LogFunc(Log{
|
|
Level: "FATAL",
|
|
Content: "Public key not found in database.\nthe public key fingerprint is:" + author + " " + ByteToFingerprint(publicKey) +
|
|
"\nSince non-interactive mode is enabled, the installation will not proceed.",
|
|
})
|
|
}
|
|
} else if !matchingAuthor {
|
|
if logger.PromptSupported {
|
|
response := logger.LogFunc(Log{
|
|
Level: "WARN",
|
|
Content: "Public key does not match the author.\nthe public key fingerprint is: " + author + " " + ByteToFingerprint(publicKey) +
|
|
"\nWould you like to replace the key (y/n)?",
|
|
Prompt: true,
|
|
})
|
|
if strings.ToLower(response) == "y" {
|
|
err := addFingerprintToDB(publicKey, author, true)
|
|
if err != nil {
|
|
return err
|
|
} else {
|
|
logger.LogFunc(Log{
|
|
Level: "INFO",
|
|
Content: "Public key replaced in database.",
|
|
})
|
|
}
|
|
} else {
|
|
logger.LogFunc(Log{
|
|
Level: "FATAL",
|
|
Content: "Installation cancelled.",
|
|
})
|
|
}
|
|
} else {
|
|
logger.LogFunc(Log{
|
|
Level: "FATAL",
|
|
Content: "Public key does not match the author.\nThe public key is :" + author + " " + ByteToFingerprint(publicKey) +
|
|
"\nSince non-interactive mode is enabled, the installation will not proceed.",
|
|
})
|
|
}
|
|
} else if !matchingFingerprint {
|
|
if logger.PromptSupported {
|
|
response := logger.LogFunc(Log{
|
|
Level: "WARN",
|
|
Content: "Public key fingerprint does not match the author.\nThe public key is :" + author + " " + ByteToFingerprint(publicKey) +
|
|
"\nThis may be a security risk. To replace the key, type \"Yes, do as I say!\". Otherwise, type anything else.",
|
|
Prompt: true,
|
|
})
|
|
if response == "Yes, do as I say!" {
|
|
err := addFingerprintToDB(publicKey, author, true)
|
|
if err != nil {
|
|
return err
|
|
} else {
|
|
logger.LogFunc(Log{
|
|
Level: "INFO",
|
|
Content: "Public key replaced in database.",
|
|
})
|
|
}
|
|
} else {
|
|
logger.LogFunc(Log{
|
|
Level: "FATAL",
|
|
Content: "Installation cancelled.",
|
|
})
|
|
}
|
|
} else {
|
|
logger.LogFunc(Log{
|
|
Level: "FATAL",
|
|
Content: "Public key fingerprint does not match the author.\nthe public key fingerprint is: " + author + " " + ByteToFingerprint(publicKey) +
|
|
"\nSince non-interactive mode is enabled, the installation will not proceed.",
|
|
})
|
|
}
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
var ErrFullyMapMetadataCouldNotRead = errors.New("could not read EPK")
|
|
var ErrFullyMapMetadataCouldNotJump = errors.New("could not jump to offset")
|
|
var ErrFullyMapMetadataCouldNotAddFingerprint = errors.New("could not add fingerprint")
|
|
var ErrFullyMapMetadataCouldNotGetFingerprint = errors.New("could not get fingerprint")
|
|
var ErrFullyMapMetadataHasInvalidSignature = errors.New("invalid signature")
|
|
var ErrFullyMapMetadataCouldNotMapJSON = errors.New("error mapping metadata")
|
|
|
|
// FullyMapMetadata maps an EPK file, but is significantly slower than PreMapEPK. Use PreMapEPK if you only need the display data.
|
|
// it pulls data from PreMapEPK to reduce the amount of work needed to map the EPK.
|
|
func FullyMapMetadata(epkBytes StreamOrBytes, preMap *EPKPreMap, checkFingerprintInDB func([]byte, string) (bool, bool, bool, error), addFingerprintToDB func([]byte, string, bool) error, warnUserAboutNoRange func(*Logger), logger *Logger) (*Metadata, error, error) {
|
|
// We define the signature and public key bytes here so that we can read them later
|
|
signature := make([]byte, 64)
|
|
publicKey := make([]byte, 32)
|
|
|
|
var connection io.ReadCloser
|
|
if epkBytes.IsFileStream {
|
|
// Before we continue, check if the signature is valid
|
|
// To get the signature, we read from the 12th byte to the 76th byte
|
|
_, err := epkBytes.FileStream.ReadAt(signature, 12)
|
|
if err != nil {
|
|
return &Metadata{}, err, ErrFullyMapMetadataCouldNotRead
|
|
}
|
|
|
|
// To get the public key, we read from the 76th byte to the 108th byte
|
|
_, err = epkBytes.FileStream.ReadAt(publicKey, 76)
|
|
if err != nil {
|
|
return &Metadata{}, err, ErrFullyMapMetadataCouldNotRead
|
|
}
|
|
} else if epkBytes.IsURL {
|
|
// Before we continue, check if the signature is valid
|
|
// Fetch range 12 - EOF and read them in
|
|
req, err := http.NewRequest("GET", epkBytes.URL, nil)
|
|
if err != nil {
|
|
return &Metadata{}, err, ErrFullyMapMetadataCouldNotRead
|
|
}
|
|
req.Header.Set("Range", "bytes=12-")
|
|
resp, err := http.DefaultClient.Do(req)
|
|
if err != nil {
|
|
return &Metadata{}, err, ErrFullyMapMetadataCouldNotRead
|
|
}
|
|
// Set the connection
|
|
connection = resp.Body
|
|
// Check the status code
|
|
if resp.StatusCode == 200 {
|
|
// Not great, not terrible.
|
|
// We'll have to cut off the connection early later.
|
|
// Warn the user
|
|
warnUserAboutNoRange(logger)
|
|
// Discard the first 12 bytes
|
|
_, err := io.CopyN(io.Discard, connection, 12)
|
|
if err != nil {
|
|
return &Metadata{}, err, ErrFullyMapMetadataCouldNotRead
|
|
}
|
|
} else if resp.StatusCode != 206 {
|
|
return &Metadata{}, errors.New("unexpected status code: " + strconv.Itoa(resp.StatusCode)), ErrFullyMapMetadataCouldNotRead
|
|
}
|
|
|
|
// Read the signature
|
|
_, err = io.ReadFull(connection, signature)
|
|
if err != nil {
|
|
return &Metadata{}, err, ErrFullyMapMetadataCouldNotRead
|
|
}
|
|
|
|
// Read the public key
|
|
_, err = io.ReadFull(connection, publicKey)
|
|
if err != nil {
|
|
return &Metadata{}, err, ErrFullyMapMetadataCouldNotRead
|
|
}
|
|
} else {
|
|
// Make signature and public key the optimised bytes
|
|
signature = epkBytes.Bytes[12:76]
|
|
publicKey = epkBytes.Bytes[76:108]
|
|
}
|
|
|
|
// Let's check for the public key in the database
|
|
exists, matchingAuthor, matchingFingerprint, err := checkFingerprintInDB(publicKey, preMap.DisplayData.Author)
|
|
if err != nil {
|
|
return &Metadata{}, err, ErrFullyMapMetadataCouldNotGetFingerprint
|
|
} else {
|
|
err := handlePublicKeyCheck(exists, matchingAuthor, matchingFingerprint, publicKey, preMap.DisplayData.Author, addFingerprintToDB, logger)
|
|
if err != nil {
|
|
return &Metadata{}, err, ErrFullyMapMetadataCouldNotAddFingerprint
|
|
}
|
|
}
|
|
|
|
// We need to create a new xxHash instance
|
|
xxHash := xxhash.New()
|
|
if epkBytes.IsFileStream {
|
|
// Now we can verify the signature. First, we need to take the checksum of the metadata
|
|
// Seeking is better than using ReadAt because it allows us to not have to load the entire file into memory
|
|
_, err = epkBytes.FileStream.Seek(ConstMapEPKMetadataOffset, io.SeekStart)
|
|
if err != nil {
|
|
return &Metadata{}, err, ErrFullyMapMetadataCouldNotJump
|
|
}
|
|
|
|
// Streaming bytes to the hash is more memory efficient
|
|
_, err = epkBytes.FileStream.WriteTo(xxHash)
|
|
if err != nil {
|
|
return &Metadata{}, err, ErrFullyMapMetadataCouldNotRead
|
|
}
|
|
// Verify the signature (we verify the hash because it's cheaper than verifying the entire EPK)
|
|
if !ed25519.Verify(publicKey, xxHash.Sum(nil), signature) {
|
|
return &Metadata{}, nil, ErrFullyMapMetadataHasInvalidSignature
|
|
}
|
|
} else if epkBytes.IsURL {
|
|
// Now we can verify the signature. We can just stream the rest of the EPK to the hash
|
|
_, err = io.Copy(xxHash, connection)
|
|
if err != nil {
|
|
return &Metadata{}, err, ErrFullyMapMetadataCouldNotRead
|
|
}
|
|
// You didn't have to cut me off...
|
|
// Don't worry, we are reading to EOF anyway, no matter if we do have a non-range supported server, so we
|
|
// (probably) won't upset the server owner.
|
|
err = connection.Close()
|
|
if err != nil {
|
|
return &Metadata{}, err, ErrFullyMapMetadataCouldNotRead
|
|
}
|
|
|
|
// Verify the signature (we verify the hash because it's cheaper than verifying the entire EPK)
|
|
if !ed25519.Verify(publicKey, xxHash.Sum(nil), signature) {
|
|
return &Metadata{}, nil, ErrFullyMapMetadataHasInvalidSignature
|
|
}
|
|
} else {
|
|
// We now verify the signature in one go without streaming
|
|
_, err := xxHash.Write(epkBytes.Bytes[ConstMapEPKMetadataOffset:])
|
|
if err != nil {
|
|
return &Metadata{}, err, ErrFullyMapMetadataCouldNotRead
|
|
}
|
|
if !ed25519.Verify(publicKey, xxHash.Sum(nil), signature) {
|
|
return &Metadata{}, nil, ErrFullyMapMetadataHasInvalidSignature
|
|
}
|
|
}
|
|
|
|
// Great, the EPK is valid. Let's map the metadata.
|
|
// We use the metadata map provided by PreMapEPK to reduce the amount of work needed to map the EPK
|
|
|
|
// First, map SpecialFiles
|
|
var parsedSpecialFiles SpecialFiles
|
|
specialFilesMap, ok := preMap.MetadataMap["specialFiles"].(map[string]interface{})
|
|
if !ok {
|
|
return &Metadata{}, errors.New("specialFiles is not an object"), ErrFullyMapMetadataCouldNotMapJSON
|
|
}
|
|
noDelete, ok := specialFilesMap["noDelete"].([]interface{})
|
|
if !ok {
|
|
return &Metadata{}, errors.New("noDelete is not an array"), ErrFullyMapMetadataCouldNotMapJSON
|
|
}
|
|
parsedSpecialFiles.NoDelete, err = interfaceToStringSlice(noDelete, "noDelete")
|
|
if err != nil {
|
|
return &Metadata{}, err, ErrFullyMapMetadataCouldNotMapJSON
|
|
}
|
|
noReplace, ok := specialFilesMap["noReplace"].([]interface{})
|
|
if !ok {
|
|
return &Metadata{}, errors.New("noReplace is not an array"), ErrFullyMapMetadataCouldNotMapJSON
|
|
}
|
|
parsedSpecialFiles.NoReplace, err = interfaceToStringSlice(noReplace, "noReplace")
|
|
if err != nil {
|
|
return &Metadata{}, err, ErrFullyMapMetadataCouldNotMapJSON
|
|
}
|
|
|
|
// Declare the parsedMetadata object
|
|
var parsedMetadata Metadata
|
|
|
|
// Append parsedSpecialFiles to parsedMetadata
|
|
parsedMetadata.SpecialFiles = parsedSpecialFiles
|
|
|
|
// Steal some data from the PreMapEPK object
|
|
parsedMetadata.Name = preMap.DisplayData.Name
|
|
parsedMetadata.Version = preMap.DisplayData.Version
|
|
parsedMetadata.Architecture = preMap.DisplayData.Architecture
|
|
parsedMetadata.Size = preMap.DisplayData.Size
|
|
parsedMetadata.Dependencies = preMap.DisplayData.Dependencies
|
|
|
|
// Map the metadata
|
|
parsedMetadata.Description, ok = preMap.MetadataMap["desc"].(string)
|
|
if !ok {
|
|
return &Metadata{}, errors.New("description is not a string"), ErrFullyMapMetadataCouldNotMapJSON
|
|
}
|
|
parsedMetadata.LongDescription, ok = preMap.MetadataMap["longDesc"].(string)
|
|
if !ok {
|
|
return &Metadata{}, errors.New("longDesc is not a string"), ErrFullyMapMetadataCouldNotMapJSON
|
|
}
|
|
parsedMetadata.Author, ok = preMap.MetadataMap["author"].(string)
|
|
if !ok {
|
|
return &Metadata{}, errors.New("author is not a string"), ErrFullyMapMetadataCouldNotMapJSON
|
|
}
|
|
parsedMetadata.License, ok = preMap.MetadataMap["license"].(string)
|
|
if !ok {
|
|
return &Metadata{}, errors.New("license is not a string"), ErrFullyMapMetadataCouldNotMapJSON
|
|
}
|
|
decompressedSizeString, ok := preMap.MetadataMap["size"].(string)
|
|
if !ok {
|
|
return &Metadata{}, errors.New("size is not a string"), ErrFullyMapMetadataCouldNotMapJSON
|
|
}
|
|
parsedMetadata.DecompressedSize = new(big.Int)
|
|
parsedMetadata.DecompressedSize.SetString(decompressedSizeString, 10)
|
|
|
|
return &parsedMetadata, nil, nil
|
|
}
|
|
|
|
var ErrInstallEPKCouldNotCreateTempDir = errors.New("could not create temporary directory")
|
|
var ErrInstallEPKCouldNotCreateZStandardReader = errors.New("could not create ZStandard reader")
|
|
var ErrInstallEPKCouldNotDecompressTarArchive = errors.New("could not decompress tar archive")
|
|
var ErrInstallEPKCouldNotCreateDir = errors.New("could not create directory")
|
|
var ErrInstallEPKCouldNotStatDir = errors.New("could not stat directory")
|
|
var ErrInstallEPKCouldNotStatFile = errors.New("could not stat file")
|
|
var ErrInstallEPKCouldNotCreateFile = errors.New("could not create file")
|
|
var ErrInstallEPKCouldNotCloseTarReader = errors.New("could not close tar reader")
|
|
var ErrInstallEPKCouldNotStatHook = errors.New("could not stat hook")
|
|
var ErrInstallEPKCouldNotRunHook = errors.New("could not run hook")
|
|
var ErrInstallEPKCouldNotAddEPKToDB = errors.New("could not add EPK to database")
|
|
var ErrInstallEPKCouldNotRemoveTempDir = errors.New("could not remove temporary directory")
|
|
|
|
// ProgressWriter implements a writer that intercepts writes in order to log progress
|
|
type ProgressWriter struct {
|
|
Logger *Logger
|
|
Total *big.Int
|
|
Writer io.Writer
|
|
}
|
|
|
|
// Write writes to the ProgressWriter
|
|
func (writer *ProgressWriter) Write(p []byte) (n int, err error) {
|
|
byteCount := new(big.Int)
|
|
for range p {
|
|
byteCount.Add(byteCount, big.NewInt(1))
|
|
}
|
|
if writer.Logger.ProgressSupported {
|
|
writer.Logger.LogFunc(Log{
|
|
Level: "PROGRESS",
|
|
Progress: byteCount,
|
|
Total: writer.Total,
|
|
Overwrite: true,
|
|
})
|
|
} else {
|
|
writer.Logger.LogFunc(Log{
|
|
Level: "INFO",
|
|
Content: "Written " + humanize.BigIBytes(byteCount) + " out of " + humanize.BigIBytes(writer.Total),
|
|
Prompt: false,
|
|
})
|
|
}
|
|
|
|
written, err := writer.Writer.Write(p)
|
|
if err != nil {
|
|
return written, err
|
|
}
|
|
|
|
return written, nil
|
|
}
|
|
|
|
// InstallEPK installs an EPK file
|
|
func InstallEPK(epkBytes StreamOrBytes, metadata *Metadata, preMap *EPKPreMap, addEPKToDB func(*Metadata, []string, []byte, bool, bool, int64, ...string) error, logger *Logger) (string, error, error) {
|
|
// Create the temporary directory
|
|
tempDir, err := os.MkdirTemp("/tmp", "eon-install-")
|
|
if err != nil {
|
|
return tempDir, err, ErrInstallEPKCouldNotCreateTempDir
|
|
}
|
|
|
|
var zStandardReader *zstd.Decoder
|
|
var connection io.ReadCloser
|
|
if epkBytes.IsFileStream {
|
|
// Seek to the correct position in the EPK
|
|
_, err = epkBytes.FileStream.Seek(preMap.TarOffset, io.SeekStart)
|
|
if err != nil {
|
|
return "", err, nil
|
|
}
|
|
|
|
// Create a ZStandard reader reading from the EPK
|
|
zStandardReader, err = zstd.NewReader(epkBytes.FileStream)
|
|
if err != nil {
|
|
return tempDir, err, ErrInstallEPKCouldNotCreateZStandardReader
|
|
}
|
|
} else if epkBytes.IsURL {
|
|
// Range header to the tar offset
|
|
req, err := http.NewRequest("GET", epkBytes.URL, nil)
|
|
if err != nil {
|
|
return tempDir, err, ErrInstallEPKCouldNotCreateZStandardReader
|
|
}
|
|
// Set the range header
|
|
req.Header.Set("Range", "bytes="+strconv.FormatInt(preMap.TarOffset, 10)+"-")
|
|
// Send the request
|
|
resp, err := http.DefaultClient.Do(req)
|
|
if err != nil {
|
|
return tempDir, err, ErrInstallEPKCouldNotCreateZStandardReader
|
|
}
|
|
// Set connection to the response body
|
|
connection = resp.Body
|
|
// Check the status code
|
|
if resp.StatusCode == 200 {
|
|
// Not great, is terrible in this case, we have to keep reading bytes and discarding them until we reach the offset
|
|
// The user will have already been warned about 300 times, so we don't need to warn them again
|
|
// God this is painful. Let's give the user a progress bar to make it less painful
|
|
_, err := io.CopyN(&ProgressWriter{
|
|
Logger: logger,
|
|
Total: big.NewInt(preMap.TarOffset),
|
|
Writer: io.Discard,
|
|
}, connection, preMap.TarOffset)
|
|
if err != nil {
|
|
return tempDir, err, ErrInstallEPKCouldNotDecompressTarArchive
|
|
}
|
|
} else if resp.StatusCode != 206 {
|
|
// Something went wrong
|
|
return tempDir, errors.New("unexpected status code: " + strconv.Itoa(resp.StatusCode)), ErrInstallEPKCouldNotCreateZStandardReader
|
|
}
|
|
// Create a ZStandard reader reading from the EPK
|
|
zStandardReader, err = zstd.NewReader(connection)
|
|
if err != nil {
|
|
return tempDir, err, ErrInstallEPKCouldNotCreateZStandardReader
|
|
}
|
|
} else {
|
|
// Create a ZStandard reader reading from the EPKs in-memory bytes
|
|
zStandardReader, err = zstd.NewReader(bytes.NewReader(epkBytes.Bytes[preMap.TarOffset:]))
|
|
if err != nil {
|
|
return tempDir, err, ErrInstallEPKCouldNotCreateZStandardReader
|
|
}
|
|
}
|
|
|
|
// Create a tar reader reading from the ZStandard reader
|
|
tarReader := tar.NewReader(zStandardReader)
|
|
|
|
// Create a goroutine to see how much of the decompressed size we have decompressed
|
|
written := new(big.Int)
|
|
stop := make(chan bool)
|
|
go func() {
|
|
for {
|
|
select {
|
|
case <-stop:
|
|
return
|
|
default:
|
|
if logger.ProgressSupported {
|
|
logger.LogFunc(Log{
|
|
Level: "PROGRESS",
|
|
Progress: written,
|
|
Total: metadata.DecompressedSize,
|
|
Overwrite: true,
|
|
})
|
|
} else {
|
|
logger.LogFunc(Log{
|
|
Level: "INFO",
|
|
Content: "Decompressed " + humanize.Bytes(uint64(written.Int64())) + " of " + humanize.Bytes(uint64(metadata.DecompressedSize.Int64())),
|
|
Prompt: false,
|
|
})
|
|
time.Sleep(1 * time.Second)
|
|
}
|
|
}
|
|
}
|
|
}()
|
|
|
|
// Create a slice of the installed files
|
|
var installedFiles []string
|
|
|
|
// Iterate through the tar archive
|
|
for {
|
|
// Read the next header
|
|
header, err := tarReader.Next()
|
|
if err != nil {
|
|
break
|
|
}
|
|
|
|
switch {
|
|
// If we are done, break
|
|
case err == io.EOF:
|
|
break
|
|
|
|
// If there was an error, return the error
|
|
case err != nil:
|
|
return tempDir, err, ErrInstallEPKCouldNotDecompressTarArchive
|
|
|
|
// This should never happen, but if it does, we should just continue
|
|
case header == nil:
|
|
continue
|
|
}
|
|
|
|
// Get the target path
|
|
var target string
|
|
var isHook bool
|
|
if strings.HasPrefix(header.Name, "root") {
|
|
target = strings.TrimPrefix(header.Name, "root")
|
|
} else if strings.HasPrefix(header.Name, "hooks") {
|
|
target = filepath.Join(tempDir, header.Name)
|
|
isHook = true
|
|
} else {
|
|
return tempDir, errors.New("invalid path in EPK: " + header.Name), ErrInstallEPKCouldNotDecompressTarArchive
|
|
}
|
|
|
|
switch header.Typeflag {
|
|
case tar.TypeDir:
|
|
// Check if the directory exists
|
|
_, err := os.Stat(target)
|
|
if err != nil {
|
|
// If the directory does not exist, create it
|
|
if errors.Is(err, os.ErrNotExist) {
|
|
// All directories are 0755
|
|
paths, err := MkdirAllWithPaths(target, 0755)
|
|
if err != nil {
|
|
return tempDir, err, ErrInstallEPKCouldNotCreateDir
|
|
} else {
|
|
// Check if the files are in noDelete
|
|
for _, file := range metadata.SpecialFiles.NoDelete {
|
|
if strings.TrimSuffix(target, "/") == strings.TrimSuffix(file, "/") {
|
|
// This file is a special file and should not be deleted
|
|
continue
|
|
}
|
|
}
|
|
if !isHook {
|
|
// Add the directory to the installed files
|
|
installedFiles = append(installedFiles, target)
|
|
|
|
// Add the paths to the installed files
|
|
if paths != nil {
|
|
installedFiles = append(installedFiles, paths...)
|
|
}
|
|
}
|
|
}
|
|
} else {
|
|
return tempDir, err, ErrInstallEPKCouldNotStatDir
|
|
}
|
|
} else {
|
|
// If it does exist, don't touch it
|
|
continue
|
|
}
|
|
case tar.TypeReg:
|
|
// Check if the file has anywhere to go
|
|
_, err := os.Stat(filepath.Dir(target))
|
|
if err != nil {
|
|
// No, it doesn't. Create the directory
|
|
if errors.Is(err, os.ErrNotExist) {
|
|
// We assume 0755 for directories
|
|
paths, err := MkdirAllWithPaths(filepath.Dir(target), 0755)
|
|
if err != nil {
|
|
return tempDir, err, ErrInstallEPKCouldNotCreateDir
|
|
} else {
|
|
// Check if the files are in noDelete
|
|
for _, file := range metadata.SpecialFiles.NoDelete {
|
|
if strings.TrimSuffix(target, "/") == strings.TrimSuffix(file, "/") {
|
|
// This file is a special file and should not be deleted
|
|
continue
|
|
}
|
|
}
|
|
if !isHook {
|
|
// Add the directory to the installed files
|
|
installedFiles = append(installedFiles, filepath.Dir(target))
|
|
|
|
// Add the paths to the installed files
|
|
if paths != nil {
|
|
installedFiles = append(installedFiles, paths...)
|
|
}
|
|
}
|
|
}
|
|
} else {
|
|
return tempDir, err, ErrInstallEPKCouldNotStatDir
|
|
}
|
|
}
|
|
|
|
// Check if the file already exists
|
|
_, err = os.Stat(target)
|
|
if err != nil {
|
|
if errors.Is(err, os.ErrNotExist) {
|
|
// Great, the file does not exist. Let's create it.
|
|
file, err := os.OpenFile(target, os.O_CREATE|os.O_RDWR|os.O_TRUNC, os.FileMode(header.Mode))
|
|
if err != nil {
|
|
return tempDir, err, ErrInstallEPKCouldNotCreateFile
|
|
}
|
|
|
|
writtenFile, err := io.Copy(file, tarReader)
|
|
if err != nil {
|
|
return tempDir, err, ErrInstallEPKCouldNotDecompressTarArchive
|
|
}
|
|
|
|
written.Add(written, big.NewInt(writtenFile))
|
|
|
|
err = file.Close()
|
|
if err != nil {
|
|
return tempDir, err, ErrInstallEPKCouldNotCloseTarReader
|
|
} else {
|
|
// Check if the files are in noDelete
|
|
for _, file := range metadata.SpecialFiles.NoDelete {
|
|
if strings.TrimSuffix(target, "/") == strings.TrimSuffix(file, "/") {
|
|
// This file is a special file and should not be deleted
|
|
continue
|
|
}
|
|
}
|
|
if !isHook {
|
|
// Add the file to the installed files
|
|
installedFiles = append(installedFiles, target)
|
|
}
|
|
}
|
|
} else {
|
|
return tempDir, err, ErrInstallEPKCouldNotStatFile
|
|
}
|
|
} else {
|
|
// See if it's an upgrade or not
|
|
if preMap.IsUpgrade {
|
|
// Check if it's a special file
|
|
for _, file := range metadata.SpecialFiles.NoReplace {
|
|
if strings.TrimSuffix(target, "/") == strings.TrimSuffix(file, "/") {
|
|
// This file is a special file and should not be replaced
|
|
continue
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
zStandardReader.Close()
|
|
|
|
// Close the connection if it's a URL
|
|
if epkBytes.IsURL {
|
|
err = connection.Close()
|
|
if err != nil {
|
|
return tempDir, err, ErrInstallEPKCouldNotCloseTarReader
|
|
}
|
|
}
|
|
|
|
// Now let's run the hooks
|
|
if preMap.IsUpgrade {
|
|
_, err := os.Stat(filepath.Join(tempDir, "hooks", "upgrade.sh"))
|
|
if err != nil {
|
|
if !errors.Is(err, os.ErrNotExist) {
|
|
return tempDir, err, ErrInstallEPKCouldNotStatHook
|
|
}
|
|
} else {
|
|
cmd := exec.Command("/bin/sh", filepath.Join(tempDir, "hooks", "upgrade.sh"), metadata.Version.String())
|
|
stderr, err := cmd.StderrPipe()
|
|
if err != nil {
|
|
return tempDir, err, ErrInstallEPKCouldNotRunHook
|
|
}
|
|
err = cmd.Start()
|
|
if err != nil {
|
|
return tempDir, err, ErrInstallEPKCouldNotRunHook
|
|
}
|
|
scanner := bufio.NewScanner(stderr)
|
|
scanner.Split(bufio.ScanWords)
|
|
for scanner.Scan() {
|
|
message := scanner.Text()
|
|
logger.LogFunc(Log{
|
|
Level: "INFO",
|
|
Content: message,
|
|
})
|
|
}
|
|
err = cmd.Wait()
|
|
if err != nil {
|
|
return tempDir, err, ErrInstallEPKCouldNotRunHook
|
|
}
|
|
}
|
|
} else {
|
|
_, err := os.Stat(filepath.Join(tempDir, "hooks", "install.sh"))
|
|
if err != nil {
|
|
if !errors.Is(err, os.ErrNotExist) {
|
|
return tempDir, err, ErrInstallEPKCouldNotStatHook
|
|
}
|
|
} else {
|
|
cmd := exec.Command("/bin/sh", filepath.Join(tempDir, "hooks", "install.sh"), metadata.Version.String())
|
|
stderr, err := cmd.StderrPipe()
|
|
if err != nil {
|
|
return tempDir, err, ErrInstallEPKCouldNotRunHook
|
|
}
|
|
err = cmd.Start()
|
|
if err != nil {
|
|
return tempDir, err, ErrInstallEPKCouldNotRunHook
|
|
}
|
|
scanner := bufio.NewScanner(stderr)
|
|
scanner.Split(bufio.ScanWords)
|
|
for scanner.Scan() {
|
|
message := scanner.Text()
|
|
logger.LogFunc(Log{
|
|
Level: "INFO",
|
|
Content: message,
|
|
})
|
|
}
|
|
err = cmd.Wait()
|
|
if err != nil {
|
|
return tempDir, err, ErrInstallEPKCouldNotRunHook
|
|
}
|
|
}
|
|
}
|
|
|
|
// Do one more double-check to make sure nothing in installedFiles is in noDelete
|
|
for _, file := range metadata.SpecialFiles.NoDelete {
|
|
for index, installedFile := range installedFiles {
|
|
if strings.TrimSuffix(installedFile, "/") == strings.TrimSuffix(file, "/") {
|
|
// Remove the file from the installed files
|
|
installedFiles = append(installedFiles[:index], installedFiles[index+1:]...)
|
|
}
|
|
}
|
|
}
|
|
|
|
// Finally, add the EPK and remove script to the database
|
|
file, err := os.ReadFile(filepath.Join(tempDir, "hooks", "remove.sh"))
|
|
if err != nil {
|
|
if !errors.Is(err, os.ErrNotExist) {
|
|
var err error
|
|
if !epkBytes.IsRemote {
|
|
err = addEPKToDB(metadata, installedFiles, []byte{}, false, false, metadata.Size)
|
|
} else {
|
|
err = addEPKToDB(metadata, installedFiles, []byte{}, false, false, metadata.Size, epkBytes.RepositoryName)
|
|
}
|
|
if err != nil {
|
|
return tempDir, err, ErrInstallEPKCouldNotAddEPKToDB
|
|
}
|
|
} else {
|
|
return tempDir, err, ErrInstallEPKCouldNotAddEPKToDB
|
|
}
|
|
} else {
|
|
var err error
|
|
if !epkBytes.IsRemote {
|
|
err = addEPKToDB(metadata, installedFiles, file, false, true, metadata.Size)
|
|
} else {
|
|
err = addEPKToDB(metadata, installedFiles, file, false, true, metadata.Size, epkBytes.RepositoryName)
|
|
}
|
|
if err != nil {
|
|
return tempDir, err, ErrInstallEPKCouldNotAddEPKToDB
|
|
}
|
|
}
|
|
|
|
// Remove the temporary directory
|
|
err = os.RemoveAll(tempDir)
|
|
if err != nil {
|
|
return tempDir, err, ErrInstallEPKCouldNotRemoveTempDir
|
|
}
|
|
|
|
stop <- true
|
|
logger.LogFunc(Log{
|
|
Level: "PROGRESS",
|
|
Progress: big.NewInt(1),
|
|
Total: big.NewInt(1),
|
|
Overwrite: true,
|
|
})
|
|
|
|
return "", nil, nil
|
|
}
|
|
|
|
var ErrAddRepositoryCouldNotCreateRequest = errors.New("could not create request")
|
|
var ErrAddRepositoryCouldNotSendRequest = errors.New("could not send request")
|
|
var ErrAddRepositoryHasUnexpectedStatusCode = errors.New("unexpected status code")
|
|
var ErrAddRepositoryCouldNotReadResponse = errors.New("could not read response")
|
|
var ErrAddRepositoryHasInvalidMagic = errors.New("invalid magic")
|
|
var ErrAddRepositoryCouldNotHash = errors.New("could not write to hash")
|
|
var ErrAddRepositoryCouldNotUnmarshalMetadata = errors.New("could not unmarshal metadata")
|
|
var ErrAddRepositoryCouldNotGetFingerprint = errors.New("could not get fingerprint")
|
|
var ErrAddRepositoryCouldNotAddFingerprint = errors.New("could not add fingerprint")
|
|
var ErrAddRepositoryHasInvalidMetadata = errors.New("invalid metadata")
|
|
var ErrAddRepositoryCouldNotAddPackage = errors.New("could not add package to database")
|
|
var ErrAddRepositoryHasRepositoryExists = errors.New("repository already exists")
|
|
var ErrAddRepositoryCouldNotAddRepository = errors.New("could not add repository to database")
|
|
|
|
// AddRepository adds a repository to the database
|
|
func AddRepository(url string, addRepositoryToDB func(Repository, bool) error, getFingerprintFromDB func([]byte, string) (bool, bool, bool, error), addFingerprintToDB func([]byte, string, bool) error, addRemotePackageToDB func(RemoteEPK) error, checkRepositoryInDB func(string) (bool, error), forceReplace bool, logger *Logger) (string, error, error) {
|
|
// First, fetch range 0-3 of /repository.erf
|
|
// Then, check if the first 3 bytes are "eon"
|
|
|
|
// Create the request
|
|
magicRequest, err := http.NewRequest("GET", url+"/repository.erf", nil)
|
|
if err != nil {
|
|
return "", err, ErrAddRepositoryCouldNotCreateRequest
|
|
}
|
|
|
|
// Add the range header
|
|
magicRequest.Header.Add("Range", "bytes=0-3")
|
|
|
|
// Send the request
|
|
magicResponse, err := http.DefaultClient.Do(magicRequest)
|
|
if err != nil {
|
|
return "", err, ErrAddRepositoryCouldNotSendRequest
|
|
}
|
|
|
|
// Check if the status code is 206
|
|
var hasEntireFile bool
|
|
if magicResponse.StatusCode != 206 {
|
|
if magicResponse.StatusCode == 200 {
|
|
// This web server does not support range requests, meaning we now have the entire file.
|
|
// Mark it as such.
|
|
hasEntireFile = true
|
|
} else {
|
|
return "", errors.New("status code " + strconv.Itoa(magicResponse.StatusCode)), ErrAddRepositoryHasUnexpectedStatusCode
|
|
}
|
|
}
|
|
|
|
// Check the magic bytes
|
|
var magicBytes = make([]byte, 3)
|
|
_, err = magicResponse.Body.Read(magicBytes)
|
|
if err != nil {
|
|
return "", err, ErrAddRepositoryCouldNotReadResponse
|
|
}
|
|
|
|
// Check if the magic bytes are "eon"
|
|
if string(magicBytes) != "eon" {
|
|
return "", nil, ErrAddRepositoryHasInvalidMagic
|
|
}
|
|
|
|
// Great. We either confirmed the repository is an Eon repository or we have the entire file.
|
|
var fullFetch *http.Response
|
|
if !hasEntireFile {
|
|
// Download the rest of the file
|
|
var err error
|
|
fullFetch, err = http.Get(url + "/repository.erf")
|
|
if err != nil {
|
|
return "", err, ErrAddRepositoryCouldNotSendRequest
|
|
}
|
|
} else {
|
|
fullFetch = magicResponse
|
|
}
|
|
|
|
// Now we get the contents of the file
|
|
contents, err := io.ReadAll(fullFetch.Body)
|
|
if err != nil {
|
|
return "", err, ErrAddRepositoryCouldNotReadResponse
|
|
}
|
|
|
|
// Verify the file's signature
|
|
|
|
// Unmarshal the repository metadata, which is NOT the same as the EPK metadata
|
|
var repositoryMetadata map[string]interface{}
|
|
// We use a decoder instead of unmarshal here because we need to use JSON numbers: float64 is not enough
|
|
var jsonDecoder = json.NewDecoder(bytes.NewReader(contents[99:]))
|
|
jsonDecoder.UseNumber()
|
|
err = jsonDecoder.Decode(&repositoryMetadata)
|
|
if err != nil {
|
|
return "", err, ErrAddRepositoryCouldNotUnmarshalMetadata
|
|
}
|
|
|
|
// Get the public key and signature
|
|
signature := contents[3:67]
|
|
publicKey := contents[67:99]
|
|
|
|
// Look for the public key in the database
|
|
exists, matchingAuthor, matchingFingerprint, err := getFingerprintFromDB(publicKey, repositoryMetadata["author"].(string))
|
|
if err != nil {
|
|
return "", err, ErrAddRepositoryCouldNotGetFingerprint
|
|
} else {
|
|
err := handlePublicKeyCheck(exists, matchingAuthor, matchingFingerprint, publicKey, repositoryMetadata["author"].(string), addFingerprintToDB, logger)
|
|
if err != nil {
|
|
return "", err, ErrAddRepositoryCouldNotAddFingerprint
|
|
}
|
|
}
|
|
|
|
// We need to create a new xxHash instance
|
|
xxHash := xxhash.New()
|
|
_, err = xxHash.Write(contents[99:])
|
|
if err != nil {
|
|
return "", err, ErrAddRepositoryCouldNotHash
|
|
}
|
|
|
|
// Verify the signature
|
|
if !ed25519.Verify(publicKey, xxHash.Sum(nil), signature) {
|
|
return "", errors.New("invalid signature"), ErrAddRepositoryHasInvalidMetadata
|
|
}
|
|
|
|
// Now we can create the repository object
|
|
var repository Repository
|
|
var ok bool
|
|
repository.URL = url
|
|
repository.Name, ok = repositoryMetadata["name"].(string)
|
|
if !ok {
|
|
return "", errors.New("name is not a string"), ErrAddRepositoryHasInvalidMetadata
|
|
}
|
|
|
|
// In force replace mode, we don't check if the repository already exists and just replace it
|
|
if !forceReplace {
|
|
// Side quest: check if the repository already exists
|
|
repoExists, err := checkRepositoryInDB(repository.Name)
|
|
if err != nil {
|
|
return "", err, ErrAddRepositoryCouldNotAddRepository
|
|
} else if repoExists {
|
|
return "", nil, ErrAddRepositoryHasRepositoryExists
|
|
}
|
|
}
|
|
|
|
repository.Description, ok = repositoryMetadata["desc"].(string)
|
|
if !ok {
|
|
return "", errors.New("desc is not a string"), ErrAddRepositoryHasInvalidMetadata
|
|
}
|
|
repository.Owner, ok = repositoryMetadata["author"].(string)
|
|
if !ok {
|
|
return "", errors.New("author is not a string"), ErrAddRepositoryHasInvalidMetadata
|
|
}
|
|
|
|
// Write the contents of the repository to the database
|
|
packageList, ok := repositoryMetadata["packages"].([]interface{})
|
|
if !ok {
|
|
return "", errors.New("packages is not an array"), ErrAddRepositoryHasInvalidMetadata
|
|
}
|
|
|
|
var remoteEPKs []RemoteEPK
|
|
for _, epk := range packageList {
|
|
epk, ok := epk.(map[string]interface{})
|
|
if !ok {
|
|
return "", errors.New("package is not an object"), ErrAddRepositoryHasInvalidMetadata
|
|
}
|
|
|
|
name, ok := epk["name"].(string)
|
|
if !ok {
|
|
return "", errors.New("package name is not a string"), ErrAddRepositoryHasInvalidMetadata
|
|
}
|
|
|
|
author, ok := epk["author"].(string)
|
|
if !ok {
|
|
return "", errors.New("package author is not a string"), ErrAddRepositoryHasInvalidMetadata
|
|
}
|
|
|
|
arch, ok := epk["arch"].(string)
|
|
if !ok {
|
|
return "", errors.New("package arch is not a string"), ErrAddRepositoryHasInvalidMetadata
|
|
}
|
|
|
|
versionString, ok := epk["version"].(string)
|
|
if !ok {
|
|
return "", errors.New("package version is not a string"), ErrAddRepositoryHasInvalidMetadata
|
|
}
|
|
|
|
versionPointer, err := semver.NewVersion(versionString)
|
|
if err != nil {
|
|
return "", errors.New("package version is not a valid semver version"), ErrAddRepositoryHasInvalidMetadata
|
|
}
|
|
|
|
sizeString, ok := epk["size"].(string)
|
|
if !ok {
|
|
return "", errors.New("package size is not a string"), ErrAddRepositoryHasInvalidMetadata
|
|
}
|
|
|
|
size, err := strconv.ParseInt(sizeString, 10, 64)
|
|
if err != nil {
|
|
return "", errors.New("package size is not a number"), ErrAddRepositoryHasInvalidMetadata
|
|
}
|
|
|
|
dependenciesInterface, ok := epk["deps"].([]interface{})
|
|
if !ok {
|
|
return "", errors.New("package dependencies is not an array"), ErrAddRepositoryHasInvalidMetadata
|
|
}
|
|
|
|
dependencies, err := interfaceToStringSlice(dependenciesInterface, "dependencies")
|
|
if err != nil {
|
|
return "", err, ErrAddRepositoryHasInvalidMetadata
|
|
}
|
|
|
|
hashJsonNumber, ok := epk["hash"].(json.Number)
|
|
if !ok {
|
|
return "", errors.New("package hash is not a number"), ErrAddRepositoryHasInvalidMetadata
|
|
}
|
|
|
|
var hash uint64
|
|
hash, err = strconv.ParseUint(hashJsonNumber.String(), 10, 64)
|
|
if err != nil {
|
|
return "", errors.New("package hash is not a valid number"), ErrAddRepositoryHasInvalidMetadata
|
|
}
|
|
|
|
path, ok := epk["path"].(string)
|
|
if !ok {
|
|
return "", errors.New("package path is not a string"), ErrAddRepositoryHasInvalidMetadata
|
|
}
|
|
|
|
description, ok := epk["desc"].(string)
|
|
if !ok {
|
|
return "", errors.New("package description is not a string"), ErrAddRepositoryHasInvalidMetadata
|
|
}
|
|
|
|
remoteEPKs = append(remoteEPKs, RemoteEPK{
|
|
Name: name,
|
|
Author: author,
|
|
Description: description,
|
|
Version: *versionPointer,
|
|
Architecture: arch,
|
|
CompressedSize: size,
|
|
Dependencies: dependencies,
|
|
Path: path,
|
|
Arch: arch,
|
|
EPKHash: hash,
|
|
Repository: repository,
|
|
})
|
|
if err != nil {
|
|
return "", err, ErrAddRepositoryCouldNotAddPackage
|
|
}
|
|
}
|
|
|
|
// We add packages afterward so that if there is an error, we don't have to remove the packages
|
|
for _, epk := range remoteEPKs {
|
|
err := addRemotePackageToDB(epk)
|
|
if err != nil {
|
|
return "", err, ErrAddRepositoryCouldNotAddPackage
|
|
}
|
|
}
|
|
|
|
// Add the repository to the database
|
|
err = addRepositoryToDB(repository, forceReplace)
|
|
if err != nil {
|
|
return "", err, ErrAddRepositoryCouldNotAddRepository
|
|
}
|
|
|
|
return repository.Name, nil, nil
|
|
}
|
|
|
|
var ErrRemoveRepositoryDoesNotExist = errors.New("repository does not exist")
|
|
var ErrRemoveRepositoryCouldNotFindRepository = errors.New("could not check for repository")
|
|
var ErrRemoveRepositoryCouldNotRemoveRepository = errors.New("could not remove repository")
|
|
var ErrRemoveRepositoryCouldNotRemoveRepositoryFromDB = errors.New("could not remove repository from database")
|
|
|
|
func RemoveRepository(repository string, removeRepositoryFromDB func(string) error, checkRepositoryInDB func(string) (bool, error), logger *Logger) (error, error) {
|
|
// First check if the repository exists
|
|
exists, err := checkRepositoryInDB(repository)
|
|
if err != nil {
|
|
return err, ErrRemoveRepositoryCouldNotFindRepository
|
|
}
|
|
|
|
if !exists {
|
|
return nil, ErrRemoveRepositoryDoesNotExist
|
|
}
|
|
|
|
// Purge the download cache
|
|
err = os.RemoveAll(filepath.Join("/var/cache/eon/repositories/", repository))
|
|
if err != nil {
|
|
return err, ErrRemoveRepositoryCouldNotRemoveRepository
|
|
}
|
|
|
|
// Remove the repository from the database
|
|
err = removeRepositoryFromDB(repository)
|
|
if err != nil {
|
|
return err, ErrRemoveRepositoryCouldNotRemoveRepositoryFromDB
|
|
}
|
|
|
|
// Alright, we're done here.
|
|
logger.LogFunc(Log{
|
|
Level: "INFO",
|
|
Content: "Removed repository " + repository + " from the database.",
|
|
})
|
|
|
|
return nil, nil
|
|
}
|
|
|
|
var ErrRemoveEPKCouldNotFindEPK = errors.New("could not get EPK from database")
|
|
var ErrRemoveEPKCouldNotCreateTempFile = errors.New("could not create temporary file")
|
|
var ErrRemoveEPKCouldNotWriteTempFile = errors.New("could not write to temporary file")
|
|
var ErrRemoveEPKCouldNotRunRemoveHook = errors.New("could not run remove hook")
|
|
var ErrRemoveEPKCouldNotRemoveEPKFromDB = errors.New("could not remove EPK from database")
|
|
var ErrRemoveEPKCouldNotRemoveFiles = errors.New("could not remove files")
|
|
|
|
func RemoveEPK(name string, removeEPKFromDB func(string) error, getEPKRemoveInfoFromDB func(name string) (string, []string, error), logger *Logger) (error, error) {
|
|
// Try to fetch the EPK from the database
|
|
removeScript, installedPaths, err := getEPKRemoveInfoFromDB(name)
|
|
if err != nil {
|
|
return err, ErrRemoveEPKCouldNotFindEPK
|
|
}
|
|
|
|
// Save the remove script to a temporary file
|
|
removeScriptFile, err := os.CreateTemp("", "eon-remove-*.sh")
|
|
if err != nil {
|
|
return err, ErrRemoveEPKCouldNotCreateTempFile
|
|
}
|
|
|
|
// Write the remove script to the file
|
|
_, err = removeScriptFile.Write([]byte(removeScript))
|
|
if err != nil {
|
|
return err, ErrRemoveEPKCouldNotWriteTempFile
|
|
}
|
|
|
|
// Run the remove script
|
|
cmd := exec.Command("/bin/sh", removeScriptFile.Name())
|
|
stderr, err := cmd.StderrPipe()
|
|
if err != nil {
|
|
return err, ErrRemoveEPKCouldNotRunRemoveHook
|
|
}
|
|
|
|
// Start the command
|
|
err = cmd.Start()
|
|
if err != nil {
|
|
return err, ErrRemoveEPKCouldNotRunRemoveHook
|
|
}
|
|
|
|
// Read the output
|
|
scanner := bufio.NewScanner(stderr)
|
|
scanner.Split(bufio.ScanWords)
|
|
for scanner.Scan() {
|
|
message := scanner.Text()
|
|
logger.LogFunc(Log{
|
|
Level: "INFO",
|
|
Content: message,
|
|
})
|
|
}
|
|
|
|
// Close the file
|
|
err = removeScriptFile.Close()
|
|
if err != nil {
|
|
return err, ErrRemoveEPKCouldNotRunRemoveHook
|
|
}
|
|
|
|
// Remove the EPK from the database
|
|
err = removeEPKFromDB(name)
|
|
if err != nil {
|
|
return err, ErrRemoveEPKCouldNotRemoveEPKFromDB
|
|
}
|
|
|
|
// Remove the installed files
|
|
for _, path := range installedPaths {
|
|
// Check if there is anything in the paths not in installedPaths
|
|
// If there is, we should not remove the directory
|
|
err := filepath.Walk(path, func(path string, info os.FileInfo, err error) error {
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
// Check if the path is in installedPaths
|
|
for _, installedPath := range installedPaths {
|
|
if path == installedPath {
|
|
return nil
|
|
}
|
|
}
|
|
|
|
// If it's not, return an error
|
|
return errors.New("path is not in installedPaths")
|
|
})
|
|
if err != nil {
|
|
if errors.Is(err, errors.New("path is not in installedPaths")) {
|
|
// The path is not in installedPaths, so we should not remove it
|
|
continue
|
|
} else if !errors.Is(err, os.ErrNotExist) {
|
|
// Something else went wrong
|
|
return err, ErrRemoveEPKCouldNotRemoveFiles
|
|
}
|
|
} else {
|
|
// The path is in installedPaths, so we should remove it
|
|
err := os.RemoveAll(path)
|
|
if err != nil {
|
|
if !errors.Is(err, os.ErrNotExist) {
|
|
return err, ErrRemoveEPKCouldNotRemoveFiles
|
|
} else {
|
|
// The file does not exist - we must have deleted its parent directory or the user has done our job for us
|
|
continue
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
return nil, nil
|
|
}
|