diff --git a/Dockerfile b/Dockerfile index a242fe8..e973d56 100644 --- a/Dockerfile +++ b/Dockerfile @@ -6,7 +6,7 @@ WORKDIR /app RUN go mod download -RUN env CGO_ENABLED=0 go build -o main ./src +RUN env CGO_ENABLED=0 go build -o main ./cmd/blindbit-oracle FROM busybox COPY --from=buildstage /app/main . diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..d24a8ea --- /dev/null +++ b/Makefile @@ -0,0 +1,32 @@ +# Benchmark targets +.PHONY: benchmark +benchmark: build-benchmark + @echo "Running benchmark..." + ./bin/benchmark $(ARGS) + +.PHONY: benchmark-v1 +benchmark-v1: build-benchmark + @echo "Running v1 HTTP benchmark only..." + ./bin/benchmark -v1 -v2=false $(ARGS) + +.PHONY: benchmark-v2 +benchmark-v2: build-benchmark + @echo "Running v2 gRPC benchmark only..." + ./bin/benchmark -v1=false -v2 $(ARGS) + +.PHONY: compare +compare: build-benchmark + @echo "Comparing v1 and v2 data..." + ./bin/benchmark -compare $(ARGS) + +.PHONY: build-benchmark +build-benchmark: + @echo "Building benchmark tool..." + @mkdir -p bin + go build -o bin/benchmark ./cmd/benchmark + +# Example usage: +# make benchmark ARGS="-startheight=100 -endheight=200" +# make benchmark-v1 ARGS="-startheight=100 -endheight=200" +# make benchmark-v2 ARGS="-startheight=100 -endheight=200" +# make compare ARGS="-startheight=100 -endheight=200" diff --git a/NOTES.md b/NOTES.md deleted file mode 100644 index ba76f88..0000000 --- a/NOTES.md +++ /dev/null @@ -1,84 +0,0 @@ -# Notes - -This file is to keep track of changes made over time and to have reference points for the implementation. Very old -information has been removed. Specification details about the communication protocol between indexing server and light -clients can be found [here](https://github.com/setavenger/BIP0352-light-client-specification.git). - -## Tweak Computation Performance - -Results from Benchmarking. Running v2 is a clear win in terms of speed for all types of blocks (many txs and few txs). -Spinning up a go routine for every tweak seems very efficient. But can it be improved? Can push the performance even a -bit more? -Next I want to try to assign a number of tweaks to a goroutine before it spins up, -so that we don't have the overhead of a goroutine spinning up all the time. - -We variations between different benchmarking calls, -as seen by v1 where the only one thread is used, and we still see some discrepancies. -Overall the pattern becomes clear. v4 reduces the overhead of goroutine spawning significantly but does not outperform -v2 in any real way. -v2 seems to be quite optimised at this point. I'm not quite sure what one could try to boost performance except of -course just utilizing more cores. -Using more cores clearly improves the performance (almost linearly in some cases). -Parallel processing could be used for extracting [spent UTXOs](./src/core/extractutxos.go) (L:31) as well. -This is not a priority at the moment as the processing time seems to be low. - -It should be noted that these are benchmarking results when solely running the tweak computation function. -During initial syncing/indexing there are also a lot of parallel processes for the rpc calls. - -The functions can be found [here](./src/core/tweak.go). - -### 12 Goroutines - -```text -goos: darwin -goarch: amd64 -pkg: SilentPaymentAppBackend/src/core -cpu: Intel(R) Core(TM) i9-9880H CPU @ 2.30GHz -BenchmarkTweakV4Block833000-16 66 18221455 ns/op -BenchmarkTweakV3Block833000-16 51 23240305 ns/op -BenchmarkTweakV2Block833000-16 66 18484002 ns/op -BenchmarkTweakV1Block833000-16 8 158669041 ns/op -BenchmarkTweakV4Block833010-16 42 28893857 ns/op -BenchmarkTweakV3Block833010-16 30 37702025 ns/op -BenchmarkTweakV2Block833010-16 42 28723057 ns/op -BenchmarkTweakV1Block833010-16 5 212243446 ns/op -BenchmarkTweakV4Block833013-16 44 28600250 ns/op -BenchmarkTweakV3Block833013-16 36 34166821 ns/op -BenchmarkTweakV2Block833013-16 42 28579243 ns/op -BenchmarkTweakV1Block833013-16 6 190190890 ns/op -BenchmarkTweakV4Block834469-16 86 13207238 ns/op -BenchmarkTweakV3Block834469-16 91 12260387 ns/op -BenchmarkTweakV2Block834469-16 82 13145223 ns/op -BenchmarkTweakV1Block834469-16 15 75665007 ns/op -PASS -ok SilentPaymentAppBackend/src/core 25.640s -``` - -### 6 Goroutines - -```text -Allowed number of parallel processes (`common.MaxParallelTweakComputations`) was 6. - -goos: darwin -goarch: amd64 -pkg: SilentPaymentAppBackend/src/core -cpu: Intel(R) Core(TM) i9-9880H CPU @ 2.30GHz -BenchmarkTweakV4Block833000-16 34 37608796 ns/op -BenchmarkTweakV3Block833000-16 31 37644976 ns/op -BenchmarkTweakV2Block833000-16 43 35005047 ns/op -BenchmarkTweakV1Block833000-16 8 132897864 ns/op -BenchmarkTweakV4Block833010-16 24 58622605 ns/op -BenchmarkTweakV3Block833010-16 19 62589645 ns/op -BenchmarkTweakV2Block833010-16 21 52516952 ns/op -BenchmarkTweakV1Block833010-16 5 204381619 ns/op -BenchmarkTweakV4Block833013-16 21 54992341 ns/op -BenchmarkTweakV3Block833013-16 18 57974175 ns/op -BenchmarkTweakV2Block833013-16 28 49971872 ns/op -BenchmarkTweakV1Block833013-16 6 184793615 ns/op -BenchmarkTweakV4Block834469-16 66 21655617 ns/op -BenchmarkTweakV3Block834469-16 67 16031086 ns/op -BenchmarkTweakV2Block834469-16 50 20486003 ns/op -BenchmarkTweakV1Block834469-16 15 68968977 ns/op -PASS -ok SilentPaymentAppBackend/src/core 27.134s -``` diff --git a/blindbit.example.toml b/blindbit.example.toml index 6b7f3ef..e90aefd 100644 --- a/blindbit.example.toml +++ b/blindbit.example.toml @@ -1,3 +1,6 @@ +# possible values: trace, debug, info, warn, error +log_level = "debug" + # 0.0.0.0:8000 to expose outside of localhost # default: "127.0.0.1:8000" host = "127.0.0.1:8000" diff --git a/cmd/benchmark/README.md b/cmd/benchmark/README.md new file mode 100644 index 0000000..848945b --- /dev/null +++ b/cmd/benchmark/README.md @@ -0,0 +1,71 @@ +# Oracle Benchmark Tool + +This tool benchmarks the performance difference between v1 (HTTP) and v2 (gRPC streaming) APIs for fetching block data. + +## Building + +```bash +make build-benchmark +``` + +## Usage + +### Run both benchmarks +```bash +./bin/benchmark -startheight=100 -endheight=200 +``` + +### Run only v1 HTTP benchmark +```bash +./bin/benchmark -v1 -v2=false -startheight=100 -endheight=200 +``` + +### Run only v2 gRPC benchmark +```bash +./bin/benchmark -v1=false -v2 -startheight=100 -endheight=200 +``` + +### Compare v1 and v2 data (validation) +```bash +./bin/benchmark -compare -startheight=100 -endheight=200 +``` + +### Using Makefile +```bash +# Run both +make benchmark ARGS="-startheight=100 -endheight=200" + +# Run only v1 +make benchmark-v1 ARGS="-startheight=100 -endheight=200" + +# Run only v2 +make benchmark-v2 ARGS="-startheight=100 -endheight=200" + +# Compare data +make benchmark ARGS="-compare -startheight=264100 -endheight=264200" +``` + +## Command Line Flags + +- `-startheight`: Start block height (default: 1) +- `-endheight`: End block height (default: 10) +- `-http`: HTTP API base URL (default: "http://127.0.0.1:8000") +- `-grpc`: gRPC server host:port (default: "127.0.0.1:50051") +- `-v1`: Run v1 HTTP benchmark (default: true) +- `-v2`: Run v2 gRPC benchmark (default: true) +- `-compare`: Compare v1 and v2 data instead of benchmarking (default: false) + +## What it measures + +The benchmark fetches block data (tweaks, filters) for each block height in the range and measures: + +- Total time to fetch all blocks +- Blocks processed per second +- Individual block fetch times + +## Expected Results + +- **v1 (HTTP)**: Makes individual HTTP requests for each block, good for small ranges +- **v2 (gRPC)**: Uses streaming to fetch all blocks in one connection, better for large ranges + +The gRPC streaming approach should show better performance for larger block ranges due to reduced connection overhead and better batching. diff --git a/cmd/benchmark/main.go b/cmd/benchmark/main.go new file mode 100644 index 0000000..0a54029 --- /dev/null +++ b/cmd/benchmark/main.go @@ -0,0 +1,55 @@ +package main + +import ( + "flag" + + "github.com/rs/zerolog" + "github.com/setavenger/blindbit-lib/logging" + "github.com/setavenger/blindbit-oracle/internal/benchmark" +) + +func main() { + var ( + startHeight = flag.Uint64("startheight", 1, "Start block height") + endHeight = flag.Uint64("endheight", 10, "End block height") + httpURL = flag.String("http", "http://127.0.0.1:8000", "HTTP API base URL") + grpcHost = flag.String("grpc", "127.0.0.1:50051", "gRPC server host:port") + runV1 = flag.Bool("v1", true, "Run v1 HTTP benchmark") + runV2 = flag.Bool("v2", true, "Run v2 gRPC benchmark") + compare = flag.Bool("compare", false, "Compare v1 and v2 data instead of benchmarking") + ) + flag.Parse() + + // Setup logging + logging.SetLogLevel(zerolog.InfoLevel) + + if *compare { + logging.L.Info(). + Uint64("start_height", *startHeight). + Uint64("end_height", *endHeight). + Msg("Starting data comparison") + + benchmark.CompareV1V2Results(*startHeight, *endHeight, *httpURL, *grpcHost) + return + } + + logging.L.Info(). + Uint64("start_height", *startHeight). + Uint64("end_height", *endHeight). + Msg("Starting benchmark") + + // heat up cache or whatever to keep it somewhat fair + benchmark.BenchmarkV2(*startHeight, *endHeight, *grpcHost) + + if *runV1 { + logging.L.Info().Msg("=== Running V1 HTTP Benchmark ===") + benchmark.BenchmarkV1(*startHeight, *endHeight, *httpURL) + } + + if *runV2 { + logging.L.Info().Msg("=== Running V2 gRPC Streaming Benchmark ===") + benchmark.BenchmarkV2(*startHeight, *endHeight, *grpcHost) + } + + logging.L.Info().Msg("Benchmark completed") +} diff --git a/cmd/blindbit-oracle/main.go b/cmd/blindbit-oracle/main.go new file mode 100644 index 0000000..af13d23 --- /dev/null +++ b/cmd/blindbit-oracle/main.go @@ -0,0 +1,185 @@ +package main + +import ( + "errors" + "flag" + "fmt" + "path" + + "os" + "os/signal" + "strings" + "time" + + "github.com/setavenger/blindbit-lib/logging" + "github.com/setavenger/blindbit-oracle/internal/config" + "github.com/setavenger/blindbit-oracle/internal/core" + "github.com/setavenger/blindbit-oracle/internal/dataexport" + "github.com/setavenger/blindbit-oracle/internal/dblevel" + "github.com/setavenger/blindbit-oracle/internal/server" + v2 "github.com/setavenger/blindbit-oracle/internal/server/v2" +) + +var ( + displayVersion bool + pruneOnStart bool + exportData bool + Version = "0.0.0" +) + +func init() { + flag.StringVar( + &config.BaseDirectory, + "datadir", + config.DefaultBaseDirectory, + "Set the base directory for blindbit oracle. Default directory is ~/.blindbit-oracle", + ) + flag.BoolVar( + &displayVersion, + "version", + false, + "show version of blindbit-oracle", + ) + flag.BoolVar( + &pruneOnStart, + "reprune", + false, + "set this flag if you want to prune on startup", + ) + flag.BoolVar( + &exportData, + "export-data", + false, + "export the databases", + ) + flag.Parse() + + if displayVersion { + // we only need the version for this + return + } + + config.SetDirectories() // todo a proper set settings function which does it all would be good to avoid several small function calls + err := os.Mkdir(config.BaseDirectory, 0750) + if err != nil && !errors.Is(err, os.ErrExist) { + logging.L.Fatal().Err(err).Msg("error creating base directory") + } + + logging.L.Info().Msgf("base directory %s", config.BaseDirectory) + + // load after loggers are instantiated + config.LoadConfigs(path.Join(config.BaseDirectory, config.ConfigFileName)) + + // create DB path + err = os.Mkdir(config.DBPath, 0750) + if err != nil && !strings.Contains(err.Error(), "file exists") { + logging.L.Fatal().Err(err).Msg("error creating db path") + } + + // open levelDB connections + openLevelDBConnections() + + if config.CookiePath != "" { + data, err := os.ReadFile(config.CookiePath) + if err != nil { + logging.L.Fatal().Err(err).Msg("error reading cookie file") + } + + credentials := strings.Split(string(data), ":") + if len(credentials) != 2 { + logging.L.Fatal().Msg("cookie file is invalid") + } + config.RpcUser = credentials[0] + config.RpcPass = credentials[1] + } + + if config.RpcUser == "" { + logging.L.Fatal().Msg("rpc user not set") // todo use cookie file to circumvent this requirement + } + + if config.RpcPass == "" { + logging.L.Fatal().Msg("rpc pass not set") // todo use cookie file to circumvent this requirement + } +} + +func main() { + if displayVersion { + fmt.Println("blindbit-oracle version:", Version) // using fmt because loggers are not initialised + os.Exit(0) + } + defer logging.L.Info().Msg("Program shut down") + defer dblevel.CloseDBs() + + interrupt := make(chan os.Signal, 1) + signal.Notify(interrupt, os.Interrupt) + + logging.L.Info().Msg("Program Started") + + // make sure everything is ready before we receive data + + //todo create proper handling for exporting data + + if exportData { + logging.L.Info().Msg("Exporting data") + dataexport.ExportAll() + // dataexport.ExportUTXOs(fmt.Sprintf("%s/export/utxos.csv", config.BaseDirectory)) + return + } + + //moved into go routine such that the interrupt signal will apply properly + go func() { + if pruneOnStart { + startPrune := time.Now() + core.PruneAllUTXOs() + logging.L.Info().Msgf("Pruning took: %s", time.Since(startPrune).String()) + } + startSync := time.Now() + err := core.PreSyncHeaders() + if err != nil { + logging.L.Fatal().Err(err).Msg("error pre-syncing headers") + return + } + + // so we can start fetching data while not fully synced. Requires headers to be synced to avoid grave errors. + go server.RunServer(&server.ApiHandler{}) + + // keep it optional for now + if config.GRPCHost != "" { + go v2.RunGRPCServer() + } + + // todo buggy for sync catchup from 0, needs to be 1 or higher + err = core.SyncChain() + if err != nil { + logging.L.Fatal().Err(err).Msg("error syncing chain") + return + } + logging.L.Info().Msgf("Sync took: %s", time.Since(startSync).String()) + go core.CheckForNewBlockRoutine() + + // only call this if you need to reindex. It doesn't delete anything but takes a couple of minutes to finish + //err := core.ReindexDustLimitsOnly() + //if err != nil { + // logging.L.Err(err).Msg("error reindexing dust limits") + // return + //} + }() + + for { + <-interrupt + logging.L.Info().Msg("Program interrupted") + return + } +} + +func openLevelDBConnections() { + dblevel.HeadersDB = dblevel.OpenDBConnection(config.DBPathHeaders) + dblevel.HeadersInvDB = dblevel.OpenDBConnection(config.DBPathHeadersInv) + dblevel.NewUTXOsFiltersDB = dblevel.OpenDBConnection(config.DBPathFilters) + dblevel.TweaksDB = dblevel.OpenDBConnection(config.DBPathTweaks) + dblevel.TweakIndexDB = dblevel.OpenDBConnection(config.DBPathTweakIndex) + dblevel.TweakIndexDustDB = dblevel.OpenDBConnection(config.DBPathTweakIndexDust) + dblevel.UTXOsDB = dblevel.OpenDBConnection(config.DBPathUTXOs) + dblevel.SpentOutpointsIndexDB = dblevel.OpenDBConnection(config.DBPathSpentOutpointsIndex) + dblevel.SpentOutpointsFilterDB = dblevel.OpenDBConnection(config.DBPathSpentOutpointsFilter) +} diff --git a/go.mod b/go.mod index 5a882b8..34ecca3 100644 --- a/go.mod +++ b/go.mod @@ -1,32 +1,35 @@ -module SilentPaymentAppBackend +module github.com/setavenger/blindbit-oracle -go 1.22.4 +go 1.24.1 require ( - github.com/btcsuite/btcd/btcec/v2 v2.3.3 - github.com/btcsuite/btcd/btcutil v1.1.5 + github.com/btcsuite/btcd/btcutil v1.1.6 github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0 github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d github.com/gin-contrib/cors v1.7.2 github.com/gin-contrib/gzip v1.2.2 github.com/gin-gonic/gin v1.10.0 - github.com/rs/zerolog v1.33.0 - github.com/setavenger/go-bip352 v0.1.6 - github.com/shopspring/decimal v1.3.1 - github.com/spf13/viper v1.18.2 + github.com/rs/zerolog v1.34.0 + github.com/setavenger/blindbit-lib v0.0.1 + github.com/setavenger/go-bip352 v0.1.8-0.20250807125845-136879952399 + github.com/setavenger/go-libsecp256k1 v0.0.0-20250601142217-61f26e074fd5 + github.com/spf13/viper v1.19.0 github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 - golang.org/x/crypto v0.32.0 + google.golang.org/grpc v1.62.1 + google.golang.org/protobuf v1.36.2 ) require ( github.com/aead/siphash v1.0.1 // indirect - github.com/btcsuite/btcd v0.23.5-0.20231215221805-96c9fd8078fd // indirect + github.com/btcsuite/btcd v0.24.2 // indirect + github.com/btcsuite/btcd/btcec/v2 v2.3.5 // indirect github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f // indirect github.com/bytedance/sonic v1.12.7 // indirect github.com/bytedance/sonic/loader v0.2.2 // indirect github.com/cloudwego/base64x v0.1.4 // indirect - github.com/decred/dcrd/crypto/blake256 v1.0.0 // indirect - github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/decred/dcrd/crypto/blake256 v1.1.0 // indirect + github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/gabriel-vasile/mimetype v1.4.8 // indirect github.com/gin-contrib/sse v1.0.0 // indirect @@ -34,6 +37,7 @@ require ( github.com/go-playground/universal-translator v0.18.1 // indirect github.com/go-playground/validator/v10 v10.24.0 // indirect github.com/goccy/go-json v0.10.4 // indirect + github.com/golang/protobuf v1.5.3 // indirect github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect @@ -47,23 +51,28 @@ require ( github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/pelletier/go-toml/v2 v2.2.3 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/sagikazarmark/locafero v0.4.0 // indirect github.com/sagikazarmark/slog-shim v0.1.0 // indirect + github.com/shopspring/decimal v1.4.0 // indirect github.com/sourcegraph/conc v0.3.0 // indirect github.com/spf13/afero v1.11.0 // indirect github.com/spf13/cast v1.6.0 // indirect github.com/spf13/pflag v1.0.5 // indirect + github.com/stretchr/testify v1.10.0 // indirect github.com/subosito/gotenv v1.6.0 // indirect github.com/twitchyliquid64/golang-asm v0.15.1 // indirect + github.com/tyler-smith/go-bip39 v1.1.0 // indirect github.com/ugorji/go/codec v1.2.12 // indirect go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.9.0 // indirect golang.org/x/arch v0.13.0 // indirect - golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect + golang.org/x/crypto v0.36.0 // indirect + golang.org/x/exp v0.0.0-20250305212735-054e65f0b394 // indirect golang.org/x/net v0.34.0 // indirect - golang.org/x/sys v0.29.0 // indirect - golang.org/x/text v0.21.0 // indirect - google.golang.org/protobuf v1.36.2 // indirect + golang.org/x/sys v0.31.0 // indirect + golang.org/x/text v0.23.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240314234333-6e1732d8331c // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/go.sum b/go.sum index 504e163..066f83b 100644 --- a/go.sum +++ b/go.sum @@ -2,16 +2,18 @@ github.com/aead/siphash v1.0.1 h1:FwHfE/T45KPKYuuSAKyyvE+oPWcaQ+CUmFW0bPlM+kg= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= github.com/btcsuite/btcd v0.22.0-beta.0.20220111032746-97732e52810c/go.mod h1:tjmYdS6MLJ5/s0Fj4DbLgSbDHbEqLJrtnHecBFkdz5M= -github.com/btcsuite/btcd v0.23.5-0.20231215221805-96c9fd8078fd h1:js1gPwhcFflTZ7Nzl7WHaOTlTr5hIrR4n1NM4v9n4Kw= github.com/btcsuite/btcd v0.23.5-0.20231215221805-96c9fd8078fd/go.mod h1:nm3Bko6zh6bWP60UxwoT5LzdGJsQJaPo6HjduXq9p6A= +github.com/btcsuite/btcd v0.24.2 h1:aLmxPguqxza+4ag8R1I2nnJjSu2iFn/kqtHTIImswcY= +github.com/btcsuite/btcd v0.24.2/go.mod h1:5C8ChTkl5ejr3WHj8tkQSCmydiMEPB0ZhQhehpq7Dgg= github.com/btcsuite/btcd/btcec/v2 v2.1.0/go.mod h1:2VzYrv4Gm4apmbVVsSq5bqf1Ec8v56E48Vt0Y/umPgA= github.com/btcsuite/btcd/btcec/v2 v2.1.3/go.mod h1:ctjw4H1kknNJmRN4iP1R7bTQ+v3GJkZBd6mui8ZsAZE= -github.com/btcsuite/btcd/btcec/v2 v2.3.3 h1:6+iXlDKE8RMtKsvK0gshlXIuPbyWM/h84Ensb7o3sC0= -github.com/btcsuite/btcd/btcec/v2 v2.3.3/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04= +github.com/btcsuite/btcd/btcec/v2 v2.3.5 h1:dpAlnAwmT1yIBm3exhT1/8iUSD98RDJM5vqJVQDQLiU= +github.com/btcsuite/btcd/btcec/v2 v2.3.5/go.mod h1:m22FrOAiuxl/tht9wIqAoGHcbnCCaPWyauO8y2LGGtQ= github.com/btcsuite/btcd/btcutil v1.0.0/go.mod h1:Uoxwv0pqYWhD//tfTiipkxNfdhG9UrLwaeswfjfdF0A= github.com/btcsuite/btcd/btcutil v1.1.0/go.mod h1:5OapHB7A2hBBWLm48mmw4MOHNJCcUBTwmWH/0Jn8VHE= -github.com/btcsuite/btcd/btcutil v1.1.5 h1:+wER79R5670vs/ZusMTF1yTcRYE5GUsFbdjdisflzM8= github.com/btcsuite/btcd/btcutil v1.1.5/go.mod h1:PSZZ4UitpLBWzxGd5VGOrLnmOjtPP/a6HaFo12zMs00= +github.com/btcsuite/btcd/btcutil v1.1.6 h1:zFL2+c3Lb9gEgqKNzowKUPQNb8jV7v5Oaodi/AYFd6c= +github.com/btcsuite/btcd/btcutil v1.1.6/go.mod h1:9dFymx8HpuLqBnsPELrImQeTQfKBQqzqGbbV3jK55aE= github.com/btcsuite/btcd/chaincfg/chainhash v1.0.0/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0 h1:59Kx4K6lzOW5w6nFlA0v5+lk/6sjybR934QNHSJZPTQ= @@ -41,10 +43,12 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0= github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc= -github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 h1:YLtO71vCjJRCBcrPMtQ9nqBsqpA1m5sE92cU+pd5Mcc= +github.com/decred/dcrd/crypto/blake256 v1.1.0 h1:zPMNGQCm0g4QTY27fOCorQW7EryeQ/U0x++OzVrdms8= +github.com/decred/dcrd/crypto/blake256 v1.1.0/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeCxkaw7y45JueMRL4DIyJDKs= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 h1:NMZiJj8QnKe1LgsbDayM4UoHwbvwDRwnI3hwNaAHRnc= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40= github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= @@ -80,14 +84,18 @@ github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrU github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk= github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= @@ -145,17 +153,21 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= -github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= -github.com/rs/zerolog v1.33.0 h1:1cU2KZkvPxNyfgEmhHAz/1A9Bz+llsdYzklWFzgp0r8= -github.com/rs/zerolog v1.33.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss= +github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0= +github.com/rs/zerolog v1.34.0 h1:k43nTLIwcTVQAncfCw4KZ2VY6ukYoZaBPNOE8txlOeY= +github.com/rs/zerolog v1.34.0/go.mod h1:bJsvje4Z08ROH4Nhs5iH600c3IkWhwp44iRc54W6wYQ= github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= -github.com/setavenger/go-bip352 v0.1.6 h1:7D1/RMLa+1XaP1ccdseGwgUUfUY20jVf5unAY679x3Y= -github.com/setavenger/go-bip352 v0.1.6/go.mod h1:ajjkB64QrjbF0+MEUjeeBlBxDaJk7VmYUN8XbOK+EKo= -github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= -github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/setavenger/blindbit-lib v0.0.1 h1:t38+RzKFryEULLg4uuapzEGF1HKziH2PzKAZNIFwIKg= +github.com/setavenger/blindbit-lib v0.0.1/go.mod h1:wE/PNqVqW/oqoocOY3berjC/PsmCrrcrs72onzTyQOE= +github.com/setavenger/go-bip352 v0.1.8-0.20250807125845-136879952399 h1:SZtpspHUg1VOm8YWDK2mSfyOukxE3xHgtlNT/dOs+4k= +github.com/setavenger/go-bip352 v0.1.8-0.20250807125845-136879952399/go.mod h1:j+5v4nI/7n9IBEnvr6LagRpe6whTAHjIBRtqJKyJKG4= +github.com/setavenger/go-libsecp256k1 v0.0.0-20250601142217-61f26e074fd5 h1:KKpLemBDPEQxNmTz2+5M8bWhlArls9ZkhkCHPPVh3RY= +github.com/setavenger/go-libsecp256k1 v0.0.0-20250601142217-61f26e074fd5/go.mod h1:TOguhOrbK0FoBk466b3CjYHbaYrZcB6CJteeq/K5w/4= +github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k= +github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME= github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= @@ -164,8 +176,8 @@ github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ= -github.com/spf13/viper v1.18.2/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMVB+yk= +github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI= +github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= @@ -184,6 +196,8 @@ github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70 github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI= github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08= +github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8= +github.com/tyler-smith/go-bip39 v1.1.0/go.mod h1:gUYDtqQw1JS3ZJ8UWVcGTGqqr6YIN3CWg+kkNaLt55U= github.com/ugorji/go/codec v1.2.12 h1:9LC83zGrHhuUA9l16C9AHXAqEV/2wBQ4nkvumAE65EE= github.com/ugorji/go/codec v1.2.12/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= @@ -195,10 +209,10 @@ golang.org/x/arch v0.13.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys= golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc= -golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc= -golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g= -golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k= +golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34= +golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= +golang.org/x/exp v0.0.0-20250305212735-054e65f0b394 h1:nDVHiLt8aIbd/VzvPWN6kSOPE7+F/fNFDSXLVYkE/Iw= +golang.org/x/exp v0.0.0-20250305212735-054e65f0b394/go.mod h1:sIifuuw/Yco/y6yb6+bDNfyeQ/MdPUy/hKEMYQV17cM= golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -219,24 +233,30 @@ golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= -golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= +golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= -golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= +golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= +golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240314234333-6e1732d8331c h1:lfpJ/2rWPa/kJgxyyXM8PrNnfCzcmxJ265mADgwmvLI= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240314234333-6e1732d8331c/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= +google.golang.org/grpc v1.62.1 h1:B4n+nfKzOICUXMgyrNd19h/I9oH0L1pizfk1d4zSgTk= +google.golang.org/grpc v1.62.1/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.36.2 h1:R8FeyR1/eLmkutZOM5CWghmo5itiG9z0ktFlTVLuTmU= google.golang.org/protobuf v1.36.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/internal/benchmark/benchmark_v1.go b/internal/benchmark/benchmark_v1.go new file mode 100644 index 0000000..ad7a0c7 --- /dev/null +++ b/internal/benchmark/benchmark_v1.go @@ -0,0 +1,110 @@ +package benchmark + +import ( + "sync" + "time" + + "github.com/setavenger/blindbit-lib/logging" + "github.com/setavenger/blindbit-lib/networking" +) + +// BenchmarkV1 runs the v1 HTTP API benchmark using the existing ClientBlindBit +func BenchmarkV1(startHeight, endHeight uint64, baseURL string) { + logging.L.Info().Msgf("Starting v1 HTTP benchmark from height %d to %d", startHeight, endHeight) + + // Create client using existing networking code + client := &networking.ClientBlindBit{BaseURL: baseURL} + + startTime := time.Now() + + // Fetch data for each height + for height := startHeight; height <= endHeight; height++ { + blockData, err := fetchBlockDataV1(height, client) + if err != nil { + logging.L.Err(err).Uint64("height", height).Msg("failed to fetch block data") + continue + } + + logging.L.Debug().Uint64("height", height).Msg("fetched block data") + _ = blockData // Use blockData to avoid compiler warning + } + + duration := time.Since(startTime) + blocksProcessed := endHeight - startHeight + 1 + + logging.L.Info(). + Uint64("start_height", startHeight). + Uint64("end_height", endHeight). + Uint64("blocks_processed", blocksProcessed). + Dur("total_duration", duration). + Float64("blocks_per_second", float64(blocksProcessed)/duration.Seconds()). + Msg("v1 HTTP benchmark completed") +} + +// fetchBlockDataV1 fetches block data for a single height using existing ClientBlindBit +func fetchBlockDataV1(height uint64, client *networking.ClientBlindBit) (*BlockDataV1, error) { + var wg sync.WaitGroup + wg.Add(3) + + errChan := make(chan error, 3) + + var filterNew, filterSpent *networking.Filter + var tweaks [][33]byte + + // Fetch new UTXOs filter + go func() { + defer wg.Done() + var err error + filterNew, err = client.GetFilter(height, networking.NewUTXOFilterType) + if err != nil { + logging.L.Err(err).Msg("failed to get new utxos filter") + errChan <- err + } + }() + + // Fetch spent outpoints filter + go func() { + defer wg.Done() + var err error + filterSpent, err = client.GetFilter(height, networking.SpentOutpointsFilterType) + if err != nil { + logging.L.Err(err).Msg("failed to get spent outpoints filter") + errChan <- err + } + }() + + // Fetch tweaks + go func() { + defer wg.Done() + var err error + tweaks, err = client.GetTweaks(height, 0) // 0 = no dust limit + if err != nil { + logging.L.Err(err).Msg("failed to pull tweaks") + errChan <- err + } + }() + + wg.Wait() + + select { + case err := <-errChan: + return nil, err + default: + // No errors + } + + return &BlockDataV1{ + Height: height, + FilterNew: filterNew, + FilterSpent: filterSpent, + Tweaks: tweaks, + }, nil +} + +// BlockDataV1 represents the block data structure for v1 +type BlockDataV1 struct { + Height uint64 + FilterNew *networking.Filter + FilterSpent *networking.Filter + Tweaks [][33]byte +} diff --git a/internal/benchmark/benchmark_v2.go b/internal/benchmark/benchmark_v2.go new file mode 100644 index 0000000..43e5c1a --- /dev/null +++ b/internal/benchmark/benchmark_v2.go @@ -0,0 +1,72 @@ +package benchmark + +import ( + "context" + "time" + + "github.com/setavenger/blindbit-lib/logging" + "github.com/setavenger/blindbit-lib/proto/pb" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" +) + +// BenchmarkV2 runs the v2 gRPC streaming benchmark +func BenchmarkV2(startHeight, endHeight uint64, grpcHost string) { + logging.L.Info().Msgf("Starting v2 gRPC streaming benchmark from height %d to %d", startHeight, endHeight) + + // Connect to gRPC server + conn, err := grpc.Dial(grpcHost, grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + logging.L.Err(err).Msg("failed to connect to gRPC server") + return + } + defer conn.Close() + + client := pb.NewOracleServiceClient(conn) + + startTime := time.Now() + + // Use streaming API to fetch all blocks at once + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + req := &pb.RangedBlockHeightRequest{ + Start: startHeight, + End: endHeight, + } + + stream, err := client.StreamBlockBatchSlim(ctx, req) + if err != nil { + logging.L.Err(err).Msg("failed to start streaming") + return + } + + blocksProcessed := uint64(0) + + for { + batch, err := stream.Recv() + if err != nil { + if err.Error() == "EOF" { + break + } + logging.L.Err(err).Msg("failed to receive batch") + break + } + + blocksProcessed++ + logging.L.Debug().Uint64("height", uint64(batch.BlockIdentifier.BlockHeight)).Msg("received block batch") + + // Use batch to avoid compiler warning + _ = batch + } + + duration := time.Since(startTime) + + logging.L.Info(). + Uint64("start_height", startHeight). + Uint64("end_height", endHeight). + Uint64("blocks_processed", blocksProcessed). + Dur("total_duration", duration). + Float64("blocks_per_second", float64(blocksProcessed)/duration.Seconds()). + Msg("v2 gRPC streaming benchmark completed") +} diff --git a/internal/benchmark/compare.go b/internal/benchmark/compare.go new file mode 100644 index 0000000..4b61a9a --- /dev/null +++ b/internal/benchmark/compare.go @@ -0,0 +1,145 @@ +package benchmark + +import ( + "context" + "fmt" + "time" + + "github.com/setavenger/blindbit-lib/logging" + "github.com/setavenger/blindbit-lib/networking" + "github.com/setavenger/blindbit-lib/proto/pb" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" +) + +// CompareV1V2Results compares v1 and v2 results for validation +func CompareV1V2Results(startHeight, endHeight uint64, httpURL, grpcHost string) { + logging.L.Info().Msgf("Comparing v1 and v2 results from height %d to %d", startHeight, endHeight) + + for height := startHeight; height <= endHeight; height++ { + logging.L.Info().Uint64("height", height).Msg("comparing block") + + // Fetch v1 data + v1Data, err := fetchBlockDataV1(height, &networking.ClientBlindBit{BaseURL: httpURL}) + if err != nil { + logging.L.Err(err).Uint64("height", height).Msg("failed to fetch v1 data") + continue + } + + // Fetch v2 data + v2Data, err := fetchBlockDataV2(height, grpcHost) + if err != nil { + logging.L.Err(err).Uint64("height", height).Msg("failed to fetch v2 data") + continue + } + + // Quick validation + if len(v1Data.Tweaks) != len(v2Data.Tweaks) { + logging.L.Warn().Uint64("height", height). + Int("v1_tweaks", len(v1Data.Tweaks)). + Int("v2_tweaks", len(v2Data.Tweaks)). + Msg("tweak count mismatch") + } + + if v1Data.FilterNew != nil && v2Data.FilterNew != nil { + if len(v1Data.FilterNew.Data) != len(v2Data.FilterNew.Data) { + logging.L.Warn().Uint64("height", height). + Int("v1_filter_new", len(v1Data.FilterNew.Data)). + Int("v2_filter_new", len(v2Data.FilterNew.Data)). + Msg("new UTXOs filter data length mismatch") + } + } + + if v1Data.FilterSpent != nil && v2Data.FilterSpent != nil { + if len(v1Data.FilterSpent.Data) != len(v2Data.FilterSpent.Data) { + logging.L.Warn().Uint64("height", height). + Int("v1_filter_spent", len(v1Data.FilterSpent.Data)). + Int("v2_filter_spent", len(v2Data.FilterSpent.Data)). + Msg("spent outpoints filter data length mismatch") + } + } + + logging.L.Info().Uint64("height", height).Msg("block comparison completed") + } + + logging.L.Info().Msg("All block comparisons completed") +} + +// fetchBlockDataV2 fetches block data using v2 gRPC API +func fetchBlockDataV2(height uint64, grpcHost string) (*BlockDataV2, error) { + // Connect to gRPC server + conn, err := grpc.Dial(grpcHost, grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + return nil, fmt.Errorf("failed to connect to gRPC server: %v", err) + } + defer conn.Close() + + client := pb.NewOracleServiceClient(conn) + + // Use streaming API to fetch single block + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + req := &pb.RangedBlockHeightRequest{ + Start: height, + End: height, + } + + stream, err := client.StreamBlockBatchSlim(ctx, req) + if err != nil { + return nil, fmt.Errorf("failed to start streaming: %v", err) + } + + // Receive the single batch + batch, err := stream.Recv() + if err != nil { + return nil, fmt.Errorf("failed to receive batch: %v", err) + } + + // Convert protobuf data to our format + return &BlockDataV2{ + Height: height, + FilterNew: convertFilterData(batch.NewUtxosFilter, batch.BlockIdentifier), + FilterSpent: convertFilterData(batch.SpentUtxosFilter, batch.BlockIdentifier), + Tweaks: convertTweaksToArray(batch.Tweaks), + }, nil +} + +// convertTweaksToArray converts [][]byte to [][33]byte +func convertTweaksToArray(tweaks [][]byte) [][33]byte { + result := make([][33]byte, len(tweaks)) + for i, tweak := range tweaks { + if len(tweak) == 33 { + copy(result[i][:], tweak) + } + } + return result +} + +// convertFilterData converts protobuf FilterData to networking.Filter +func convertFilterData(pbFilter *pb.FilterData, blockIdentifier *pb.BlockIdentifier) *networking.Filter { + if pbFilter == nil { + return nil + } + + // Convert block hash from bytes to [32]byte + var blockHash [32]byte + if len(blockIdentifier.BlockHash) == 32 { + copy(blockHash[:], blockIdentifier.BlockHash) + } + + return &networking.Filter{ + FilterType: uint8(pbFilter.FilterType), + BlockHeight: blockIdentifier.BlockHeight, + BlockHash: blockHash, + Data: pbFilter.Data, + } +} + +// BlockDataV2 represents the block data structure for v2 +type BlockDataV2 struct { + Height uint64 + FilterNew *networking.Filter + FilterSpent *networking.Filter + Tweaks [][33]byte +} diff --git a/internal/benchmark/compare_test.go b/internal/benchmark/compare_test.go new file mode 100644 index 0000000..7bfadc4 --- /dev/null +++ b/internal/benchmark/compare_test.go @@ -0,0 +1,124 @@ +package benchmark + +import ( + "testing" + + "github.com/rs/zerolog" + "github.com/setavenger/blindbit-lib/logging" + "github.com/setavenger/blindbit-lib/networking" +) + +// TestCompareV1V2Results compares results from v1 HTTP and v2 gRPC endpoints +func TestCompareV1V2Results(t *testing.T) { + // Configuration + httpURL := "http://127.0.0.1:8000" + grpcHost := "127.0.0.1:50051" + testHeight := uint64(100) // Adjust to a height you know has data + + // Setup logging + logging.SetLogLevel(zerolog.InfoLevel) + + t.Logf("Comparing v1 and v2 results for block height %d", testHeight) + + // Fetch data from v1 (HTTP) + v1Data, err := fetchBlockDataV1(testHeight, &networking.ClientBlindBit{BaseURL: httpURL}) + if err != nil { + t.Fatalf("Failed to fetch v1 data: %v", err) + } + + // Fetch data from v2 (gRPC) + v2Data, err := fetchBlockDataV2(testHeight, grpcHost) + if err != nil { + t.Fatalf("Failed to fetch v2 data: %v", err) + } + + // Compare results + t.Run("CompareTweaks", func(t *testing.T) { + compareTweaks(t, v1Data.Tweaks, v2Data.Tweaks) + }) + + t.Run("CompareNewUTXOsFilter", func(t *testing.T) { + compareFilter(t, "NewUTXOs", v1Data.FilterNew, v2Data.FilterNew) + }) + + t.Run("CompareSpentOutpointsFilter", func(t *testing.T) { + compareFilter(t, "SpentOutpoints", v1Data.FilterSpent, v2Data.FilterSpent) + }) + + t.Log("All comparisons passed - v1 and v2 endpoints return identical data") +} + +// compareTweaks compares tweak arrays from v1 and v2 +func compareTweaks(t *testing.T, v1Tweaks, v2Tweaks [][33]byte) { + if len(v1Tweaks) != len(v2Tweaks) { + t.Errorf("Tweak count mismatch: v1=%d, v2=%d", len(v1Tweaks), len(v2Tweaks)) + return + } + + t.Logf("Comparing %d tweaks", len(v1Tweaks)) + + for i, v1Tweak := range v1Tweaks { + if i >= len(v2Tweaks) { + t.Errorf("v2 tweaks array too short at index %d", i) + continue + } + + v2Tweak := v2Tweaks[i] + if v1Tweak != v2Tweak { + t.Errorf("Tweak mismatch at index %d: v1=%x, v2=%x", + i, v1Tweak, v2Tweak) + } + } +} + +// compareFilter compares filter data from v1 and v2 +func compareFilter(t *testing.T, filterName string, v1Filter, v2Filter *networking.Filter) { + if v1Filter == nil && v2Filter == nil { + t.Logf("%s filter: both nil (no data)", filterName) + return + } + + if v1Filter == nil { + t.Errorf("%s filter: v1 is nil but v2 is not", filterName) + return + } + + if v2Filter == nil { + t.Errorf("%s filter: v2 is nil but v1 is not", filterName) + return + } + + t.Logf("%s filter: comparing data", filterName) + + // Compare filter data + if len(v1Filter.Data) != len(v2Filter.Data) { + t.Errorf("%s filter data length mismatch: v1=%d, v2=%d", + filterName, len(v1Filter.Data), len(v2Filter.Data)) + return + } + + for i, v1Byte := range v1Filter.Data { + if i >= len(v2Filter.Data) { + t.Errorf("%s filter: v2 data too short at index %d", filterName, i) + continue + } + + v2Byte := v2Filter.Data[i] + if v1Byte != v2Byte { + t.Errorf("%s filter data mismatch at index %d: v1=%x, v2=%x", + filterName, i, v1Byte, v2Byte) + } + } + + // Compare block hash + if v1Filter.BlockHash != v2Filter.BlockHash { + t.Errorf("%s filter block hash mismatch: v1=%x, v2=%x", + filterName, v1Filter.BlockHash, v2Filter.BlockHash) + } + + // Compare block height + if v1Filter.BlockHeight != v2Filter.BlockHeight { + t.Errorf("%s filter block height mismatch: v1=%d, v2=%d", + filterName, v1Filter.BlockHeight, v2Filter.BlockHeight) + } +} diff --git a/src/common/config.go b/internal/config/config.go similarity index 64% rename from src/common/config.go rename to internal/config/config.go index 23d773e..a29300a 100644 --- a/src/common/config.go +++ b/internal/config/config.go @@ -1,9 +1,10 @@ -package common +package config import ( "errors" - "os" + "github.com/rs/zerolog" + "github.com/setavenger/blindbit-lib/logging" "github.com/spf13/viper" ) @@ -13,13 +14,14 @@ func LoadConfigs(pathToConfig string) { // Handle errors reading the config file if err := viper.ReadInConfig(); err != nil { - WarningLogger.Println("No config file detected", err.Error()) + logging.L.Warn().Err(err).Msg("No config file detected") } /* set defaults */ // network viper.SetDefault("max_parallel_requests", MaxParallelRequests) - viper.SetDefault("host", Host) + viper.SetDefault("http_host", HTTPHost) + viper.SetDefault("grpc_host", GRPCHost) viper.SetDefault("chain", "signet") // RPC endpoint only. Fails if others are not set @@ -30,10 +32,11 @@ func LoadConfigs(pathToConfig string) { viper.SetDefault("tweaks_full_basic", true) viper.SetDefault("tweaks_full_with_dust_filter", false) viper.SetDefault("tweaks_cut_through_with_dust_filter", false) - + viper.SetDefault("log_level", "info") // Bind viper keys to environment variables (optional, for backup) viper.AutomaticEnv() - viper.BindEnv("host", "HOST") + viper.BindEnv("http_host", "HTTP_HOST") + viper.BindEnv("grpc_host", "GRPC_HOST") viper.BindEnv("chain", "CHAIN") viper.BindEnv("rpc_endpoint", "RPC_ENDPOINT") viper.BindEnv("cookie_path", "COOKIE_PATH") @@ -46,12 +49,14 @@ func LoadConfigs(pathToConfig string) { viper.BindEnv("tweaks_full_basic", "TWEAKS_FULL_BASIC") viper.BindEnv("tweaks_full_with_dust_filter", "TWEAKS_FULL_WITH_DUST_FILTER") viper.BindEnv("tweaks_cut_through_with_dust_filter", "TWEAKS_CUT_THROUGH_WITH_DUST_FILTER") + viper.BindEnv("log_level", "LOG_LEVEL") /* read and set config variables */ // General SyncStartHeight = viper.GetUint32("sync_start_height") - Host = viper.GetString("host") - + HTTPHost = viper.GetString("http_host") + GRPCHost = viper.GetString("grpc_host") + LogLevel = viper.GetString("log_level") // Performance MaxParallelRequests = viper.GetUint16("max_parallel_requests") MaxParallelTweakComputations = viper.GetInt("max_parallel_tweak_computations") @@ -80,23 +85,37 @@ func LoadConfigs(pathToConfig string) { case "testnet": Chain = Testnet3 default: - panic("chain undefined") + logging.L.Fatal().Msg("chain undefined") + return + } + + switch LogLevel { + case "trace": + logging.SetLogLevel(zerolog.TraceLevel) + case "info": + logging.SetLogLevel(zerolog.InfoLevel) + case "debug": + logging.SetLogLevel(zerolog.DebugLevel) + case "warn": + logging.SetLogLevel(zerolog.WarnLevel) + case "error": + logging.SetLogLevel(zerolog.ErrorLevel) } // todo print settings - InfoLogger.Printf("tweaks_only: %t\n", TweaksOnly) - InfoLogger.Printf("tweaks_full_basic: %t\n", TweakIndexFullNoDust) - InfoLogger.Printf("tweaks_full_with_dust_filter: %t\n", TweakIndexFullIncludingDust) - InfoLogger.Printf("tweaks_cut_through_with_dust_filter: %t\n", TweaksCutThroughWithDust) + logging.L.Info().Msgf("tweaks_only: %t", TweaksOnly) + logging.L.Info().Msgf("tweaks_full_basic: %t", TweakIndexFullNoDust) + logging.L.Info().Msgf("tweaks_full_with_dust_filter: %t", TweakIndexFullIncludingDust) + logging.L.Info().Msgf("tweaks_cut_through_with_dust_filter: %t", TweaksCutThroughWithDust) if !TweakIndexFullNoDust && !TweakIndexFullIncludingDust && !TweaksCutThroughWithDust { - WarningLogger.Println("no tweaks are being collected, all tweak settings were set to 0") - WarningLogger.Println("make sure your configuration loaded correctly, check example blindbit.toml for configuration") + logging.L.Warn().Msg("no tweaks are being collected, all tweak settings were set to 0") + logging.L.Warn().Msg("make sure your configuration loaded correctly, check example blindbit.toml for configuration") } if TweaksCutThroughWithDust && TweaksOnly { err := errors.New("cut through requires tweaks_only to be set to 0") - ErrorLogger.Println(err) - os.Exit(1) + logging.L.Fatal().Err(err).Msg("cut through requires tweaks_only to be set to 0") + return } } diff --git a/internal/config/endpoints.go b/internal/config/endpoints.go new file mode 100644 index 0000000..f326441 --- /dev/null +++ b/internal/config/endpoints.go @@ -0,0 +1,6 @@ +package config + +const MempoolEndpointMainnet = "http://localhost:80/api/tx/" +const MempoolEndpointSignet = "https://mempool.space/signet/api/tx" +const MempoolEndpointTestnet3 = "https://mempool.space/testnet/api/tx" +const MempoolEndpointTestnet4 = "https://mempool.space/testnet4/api/tx" diff --git a/src/common/vars.go b/internal/config/vars.go similarity index 77% rename from src/common/vars.go rename to internal/config/vars.go index ce51381..add09ab 100644 --- a/src/common/vars.go +++ b/internal/config/vars.go @@ -1,17 +1,31 @@ -package common +package config + +import ( + "github.com/setavenger/blindbit-lib/logging" + "github.com/setavenger/blindbit-lib/utils" +) // TaprootActivation // todo might be inapplicable due to transactions that have taproot prevouts from before the activation // // is relevant for the height-to-hash lookup in the db -const TaprootActivation uint32 = 709632 -const ConfigFileName string = "blindbit.toml" -const DefaultBaseDirectory = "~/.blindbit-oracle" -var TweaksOnly bool -var TweakIndexFullNoDust bool -var TweakIndexFullIncludingDust bool -var TweaksCutThroughWithDust bool +var ( + LogLevel = "info" +) + +const ( + TaprootActivation uint32 = 709632 + ConfigFileName string = "blindbit.toml" + DefaultBaseDirectory string = "~/.blindbit-oracle" +) + +var ( + TweaksOnly bool + TweakIndexFullNoDust bool + TweakIndexFullIncludingDust bool + TweaksCutThroughWithDust bool +) var ( RpcEndpoint = "http://127.0.0.1:8332" // default local node @@ -23,7 +37,8 @@ var ( DBPath = "" LogsPath = "" - Host = "127.0.0.1:8000" + HTTPHost = "127.0.0.1:8000" + GRPCHost = "127.0.0.1:50051" ) type chain int @@ -56,7 +71,7 @@ var ( PruneFrequency = 72 ) -// one has to call SetDirectories otherwise common.DBPath will be empty +// one has to call SetDirectories otherwise config.DBPath will be empty var ( DBPathHeaders string DBPathHeadersInv string // for height to blockHash mapping @@ -73,7 +88,7 @@ var ( var NumsH = []byte{80, 146, 155, 116, 193, 160, 73, 84, 183, 139, 75, 96, 53, 233, 122, 94, 7, 138, 90, 15, 40, 236, 150, 213, 71, 191, 238, 154, 206, 128, 58, 192} func SetDirectories() { - BaseDirectory = ResolvePath(BaseDirectory) + BaseDirectory = utils.ResolvePath(BaseDirectory) DBPath = BaseDirectory + "/data" LogsPath = BaseDirectory + "/logs" @@ -92,6 +107,7 @@ func SetDirectories() { func HeaderMustSyncHeight() uint32 { switch Chain { case Mainnet: + // height based on heuristic checks to see where no old taproot style coins were locked return 500_000 case Signet: return 1 @@ -100,7 +116,8 @@ func HeaderMustSyncHeight() uint32 { case Testnet3: return 1 case Unknown: - panic("chain not defined") + logging.L.Panic().Msg("chain not defined") + return 0 default: return 1 } @@ -117,7 +134,8 @@ func ChainToString(c chain) string { case Testnet3: return "testnet" default: - panic("chain not defined") + logging.L.Panic().Msg("chain not defined") + return "" } } diff --git a/internal/core/block_test.go b/internal/core/block_test.go new file mode 100644 index 0000000..b695448 --- /dev/null +++ b/internal/core/block_test.go @@ -0,0 +1,36 @@ +package core + +import ( + "testing" + + "github.com/setavenger/blindbit-lib/logging" + "github.com/setavenger/blindbit-oracle/internal/testhelpers" + "github.com/setavenger/blindbit-oracle/internal/types" +) + +func TestBlockAnalysis(t *testing.T) { + var block types.Block + err := testhelpers.LoadBlockFromFile("/Users/setorblagogee/dev/sp-test-dir/block-716120.json", &block) + if err != nil { + logging.L.Fatal().Err(err).Msg("error loading block from file") + t.FailNow() + } + + tweaks, err := ComputeTweaksForBlock(&block) + if err != nil { + logging.L.Fatal().Err(err).Msg("error computing tweaks for block") + t.FailNow() + } + + for _, tweak := range tweaks { + logging.L.Info().Hex("tweak", tweak.TweakData[:]).Hex("txid", tweak.Txid[:]).Msg("tweak") + } + + for _, tx := range block.Txs { + for _, tweak := range tweaks { + if tx.Txid == tweak.Txid { + logging.L.Info().Hex("tweak", tweak.TweakData[:]).Msg("tweak") + } + } + } +} diff --git a/src/core/cfilter.go b/internal/core/cfilter.go similarity index 60% rename from src/core/cfilter.go rename to internal/core/cfilter.go index e4a4027..9360c86 100644 --- a/src/core/cfilter.go +++ b/internal/core/cfilter.go @@ -1,15 +1,15 @@ package core import ( - "SilentPaymentAppBackend/src/common" - "SilentPaymentAppBackend/src/common/types" - "bytes" "encoding/binary" "encoding/hex" "github.com/btcsuite/btcd/btcutil/gcs/builder" "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcutil/gcs" + "github.com/setavenger/blindbit-lib/logging" + "github.com/setavenger/blindbit-lib/utils" + "github.com/setavenger/blindbit-oracle/internal/types" ) // BuildTaprootOnlyFilter creates the taproot only filter @@ -21,8 +21,11 @@ func BuildNewUTXOsFilter(block *types.Block) (types.Filter, error) { if vout.ScriptPubKey.Type == "witness_v1_taproot" { scriptAsBytes, err := hex.DecodeString(vout.ScriptPubKey.Hex) if err != nil { - common.DebugLogger.Printf("Failed to build taproot filter for block: %s (%d)\n", block.Hash, block.Height) - common.ErrorLogger.Fatalln(err) + logging.L.Fatal(). + Err(err). + Str("blockhash", block.Hash). + Uint32("height", block.Height). + Msg("Failed to build taproot filter for block") return types.Filter{}, err } // only append the x-only pubKey. reduces complexity @@ -33,30 +36,33 @@ func BuildNewUTXOsFilter(block *types.Block) (types.Filter, error) { blockHashBytes, err := hex.DecodeString(block.Hash) if err != nil { - common.DebugLogger.Println("blockHash", block.Hash) - common.ErrorLogger.Fatalln(err) + logging.L.Fatal().Err(err).Str("blockhash", block.Hash).Msg("Failed to decode block hash") return types.Filter{}, err } c := chainhash.Hash{} - err = c.SetBytes(common.ReverseBytes(blockHashBytes)) + err = c.SetBytes(utils.ReverseBytes(blockHashBytes)) if err != nil { - common.DebugLogger.Println("blockHash", block.Hash) - common.ErrorLogger.Fatalln(err) + logging.L.Fatal().Err(err).Str("blockhash", block.Hash).Msg("Failed to set block hash") return types.Filter{}, err - } key := builder.DeriveKey(&c) filter, err := gcs.BuildGCSFilter(builder.DefaultP, builder.DefaultM, key, taprootOutput) if err != nil { - common.ErrorLogger.Fatalln(err) + logging.L.Fatal().Err(err).Str("blockhash", block.Hash).Msg("Failed to build GCS filter") return types.Filter{}, err } nBytes, err := filter.NBytes() if err != nil { - common.ErrorLogger.Fatalln(err) + logging.L.Fatal().Err(err).Str("blockhash", block.Hash).Msg("Failed to get NBytes") + return types.Filter{}, err + } + + blockhashBytes, err := hex.DecodeString(block.Hash) + if err != nil { + logging.L.Fatal().Err(err).Str("blockhash", block.Hash).Msg("Failed to decode block hash") return types.Filter{}, err } @@ -64,25 +70,17 @@ func BuildNewUTXOsFilter(block *types.Block) (types.Filter, error) { FilterType: 4, BlockHeight: block.Height, Data: nBytes, - BlockHash: block.Hash, + BlockHash: [32]byte(blockhashBytes), }, nil } // BuildSpentUTXOsFilter creates a filter based on the spent func BuildSpentUTXOsFilter(spentOutpointsIndex types.SpentOutpointsIndex) (types.Filter, error) { - - blockHashBytes, err := hex.DecodeString(spentOutpointsIndex.BlockHash) - if err != nil { - common.DebugLogger.Println("blockHash", spentOutpointsIndex.BlockHash) - common.ErrorLogger.Fatalln(err) - return types.Filter{}, err - } c := chainhash.Hash{} - err = c.SetBytes(common.ReverseBytes(blockHashBytes)) + err := c.SetBytes(utils.ReverseBytes(spentOutpointsIndex.BlockHash[:])) if err != nil { - common.DebugLogger.Println("blockHash", spentOutpointsIndex.BlockHash) - common.ErrorLogger.Fatalln(err) + logging.L.Fatal().Err(err).Hex("blockhash", spentOutpointsIndex.BlockHash[:]).Msg("Failed to set block hash") return types.Filter{}, err } @@ -98,13 +96,13 @@ func BuildSpentUTXOsFilter(spentOutpointsIndex types.SpentOutpointsIndex) (types filter, err := gcs.BuildGCSFilter(builder.DefaultP, builder.DefaultM, key, data) if err != nil { - common.ErrorLogger.Fatalln(err) + logging.L.Fatal().Err(err).Hex("blockhash", spentOutpointsIndex.BlockHash[:]).Msg("Failed to build GCS filter") return types.Filter{}, err } nBytes, err := filter.NBytes() if err != nil { - common.ErrorLogger.Fatalln(err) + logging.L.Fatal().Err(err).Hex("blockhash", spentOutpointsIndex.BlockHash[:]).Msg("Failed to get NBytes") return types.Filter{}, err } @@ -117,18 +115,10 @@ func BuildSpentUTXOsFilter(spentOutpointsIndex types.SpentOutpointsIndex) (types } func SerialiseToOutpoint(utxo types.UTXO) ([]byte, error) { - var buf bytes.Buffer - - txidBytes, err := hex.DecodeString(utxo.Txid) - if err != nil { - common.DebugLogger.Println(utxo.Txid) - common.ErrorLogger.Println(err) - return nil, err - } + out := make([]byte, 32+4) - // err is always nil - buf.Write(common.ReverseBytes(txidBytes)) + copy(out[:32], utils.ReverseBytesCopy(utxo.Txid[:])) + binary.LittleEndian.PutUint32(out[32:], utxo.Vout) - binary.Write(&buf, binary.LittleEndian, utxo.Vout) - return buf.Bytes(), err + return out, nil } diff --git a/src/core/cleanup.go b/internal/core/cleanup.go similarity index 74% rename from src/core/cleanup.go rename to internal/core/cleanup.go index b3be3ed..7099412 100644 --- a/src/core/cleanup.go +++ b/internal/core/cleanup.go @@ -1,31 +1,34 @@ package core import ( - "SilentPaymentAppBackend/src/common" - "SilentPaymentAppBackend/src/common/types" - "SilentPaymentAppBackend/src/db/dblevel" "errors" + + "github.com/setavenger/blindbit-lib/logging" + "github.com/setavenger/blindbit-oracle/internal/config" + "github.com/setavenger/blindbit-oracle/internal/dblevel" + "github.com/setavenger/blindbit-oracle/internal/types" ) func overwriteUTXOsWithLookUp(utxos []types.UTXO) error { - common.DebugLogger.Println("overwriting utxos with lookup") - var utxosToOverwrite []types.UTXO + logging.L.Trace().Msg("overwriting utxos with lookup") + var utxosToOverwrite []*types.UTXO for _, utxo := range utxos { _, err := dblevel.FetchByBlockHashAndTxidUTXOs(utxo.BlockHash, utxo.Txid) if err != nil && !errors.Is(err, dblevel.NoEntryErr{}) { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error fetching utxos") return err } else if err != nil && errors.Is(err, dblevel.NoEntryErr{}) { // we skip if no entry was found. We don't want to insert those continue } - // we actually don't have to check the fetched UTXOs. If any utxos were found for this transaction it means that it was eligible. - // hence all taproot utxos have to be present - utxosToOverwrite = append(utxosToOverwrite, utxo) + // We actually don't have to check the fetched UTXOs. + // If any utxos were found for this transaction it means that it was eligible. + // Hence all taproot utxos have to be present + utxosToOverwrite = append(utxosToOverwrite, &utxo) } err := dblevel.InsertUTXOs(utxosToOverwrite) - alreadyCheckedTxids := make(map[string]struct{}) + alreadyCheckedTxids := make(map[[32]byte]struct{}) for _, utxo := range utxosToOverwrite { if _, ok := alreadyCheckedTxids[utxo.Txid]; ok { continue @@ -33,19 +36,19 @@ func overwriteUTXOsWithLookUp(utxos []types.UTXO) error { var key []byte key, err = utxo.SerialiseKey() if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error serialising utxo key") return err } err = dblevel.PruneUTXOs(key[:64]) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error pruning utxos") return err } alreadyCheckedTxids[utxo.Txid] = struct{}{} } if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error pruning utxos") return err } return err @@ -53,11 +56,11 @@ func overwriteUTXOsWithLookUp(utxos []types.UTXO) error { // todo construct the subsequent deletion of all utxos per transaction once all per transaction are spent func markSpentUTXOsAndTweaks(utxos []types.UTXO) error { - common.DebugLogger.Println("marking utxos") + logging.L.Trace().Msg("marking utxos") if len(utxos) == 0 { - if common.Chain == common.Mainnet { + if config.Chain == config.Mainnet { // no warnings on other chains as it is very likely to not have any taproot outputs for several blocks on end - common.DebugLogger.Println("no utxos to mark as spent") + logging.L.Trace().Msg("no utxos to mark as spent") } return nil } @@ -74,12 +77,12 @@ func markSpentUTXOsAndTweaks(utxos []types.UTXO) error { // First overwrite the spend UTXOs which now have the spent flag set err := overwriteUTXOsWithLookUp(utxos) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error overwriting utxos with lookup") return err } // we can only delete old tweaks if we actually have the index available - if !common.TweaksCutThroughWithDust { + if !config.TweaksCutThroughWithDust { return err } @@ -87,7 +90,7 @@ func markSpentUTXOsAndTweaks(utxos []types.UTXO) error { // we only need to check for one utxo per txid, so we reduce the number of utxos -> fewer lookups in DB var cleanUTXOs []types.UTXO - includedTxids := make(map[string]bool) + includedTxids := make(map[[32]byte]bool) for _, utxo := range utxos { if _, exists := includedTxids[utxo.Txid]; !exists { @@ -104,7 +107,7 @@ func markSpentUTXOsAndTweaks(utxos []types.UTXO) error { remainingUTXOs, err = dblevel.FetchByBlockHashAndTxidUTXOs(utxo.BlockHash, utxo.Txid) if err != nil && !errors.Is(err, dblevel.NoEntryErr{}) { // this is an actual error - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error fetching utxos") return err } else if err != nil && errors.Is(err, dblevel.NoEntryErr{}) { // utxos can be already deleted at this point. @@ -135,7 +138,7 @@ func markSpentUTXOsAndTweaks(utxos []types.UTXO) error { var newBiggest *uint64 newBiggest, err = types.FindBiggestRemainingUTXO(utxo, remainingUTXOs) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error finding biggest remaining utxo") return err } if newBiggest != nil { @@ -152,13 +155,13 @@ func markSpentUTXOsAndTweaks(utxos []types.UTXO) error { err = dblevel.DeleteBatchTweaks(tweaksToDelete) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error deleting tweaks") return err } err = dblevel.OverWriteTweaks(tweaksToOverwrite) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error overwriting tweaks") return err } @@ -167,19 +170,19 @@ func markSpentUTXOsAndTweaks(utxos []types.UTXO) error { // ReindexDustLimitsOnly this routine adds the dust limit data to tweaks after a sync func ReindexDustLimitsOnly() error { - common.InfoLogger.Println("Reindexing dust limit from synced data") + logging.L.Info().Msg("Reindexing dust limit from synced data") err := dblevel.DustOverwriteRoutine() if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error reindexing dust limit") return err } - common.InfoLogger.Println("Reindexing dust limit done") + logging.L.Info().Msg("Reindexing dust limit done") return nil } // PruneUTXOs // This function searches the UTXO set for transactions where all UTXOs are marked as spent, and removes those UTXOs. func PruneAllUTXOs() error { - common.InfoLogger.Println("Pruning All UTXOs") + logging.L.Info().Msg("Pruning All UTXOs") return dblevel.PruneUTXOs(nil) } diff --git a/src/core/extractutxos.go b/internal/core/extractutxos.go similarity index 53% rename from src/core/extractutxos.go rename to internal/core/extractutxos.go index ba21aa0..b37bad5 100644 --- a/src/core/extractutxos.go +++ b/internal/core/extractutxos.go @@ -1,16 +1,19 @@ package core import ( - "SilentPaymentAppBackend/src/common" - "SilentPaymentAppBackend/src/common/types" - "SilentPaymentAppBackend/src/db/dblevel" + "encoding/hex" + + "github.com/setavenger/blindbit-lib/logging" + "github.com/setavenger/blindbit-lib/utils" + + "github.com/setavenger/blindbit-oracle/internal/dblevel" + "github.com/setavenger/blindbit-oracle/internal/types" ) -func ExtractNewUTXOs(block *types.Block, eligible map[string]struct{}) []types.UTXO { - common.DebugLogger.Println("Getting new UTXOs") - var utxos []types.UTXO +func ExtractNewUTXOs(block *types.Block, eligible map[string]struct{}) []*types.UTXO { + logging.L.Trace().Msg("Getting new UTXOs") + var utxos []*types.UTXO for _, tx := range block.Txs { - // only transactions with tweaks (pre-filtered by tweak computation) are going to be added _, ok := eligible[tx.Txid] if !ok { @@ -18,14 +21,20 @@ func ExtractNewUTXOs(block *types.Block, eligible map[string]struct{}) []types.U } for _, vout := range tx.Vout { if vout.ScriptPubKey.Type == "witness_v1_taproot" { - utxos = append(utxos, types.UTXO{ - Txid: tx.Txid, + // we use the fix sized conversion below with a panic + txidBytes, _ := hex.DecodeString(tx.Txid) + blockHashBytes, _ := hex.DecodeString(block.Hash) + + value := utils.ConvertFloatBTCtoSats(vout.Value) + utxos = append(utxos, &types.UTXO{ + Txid: utils.ConvertToFixedLength32(txidBytes), Vout: vout.N, - Value: common.ConvertFloatBTCtoSats(vout.Value), + Value: value, ScriptPubKey: vout.ScriptPubKey.Hex, BlockHeight: block.Height, - BlockHash: block.Hash, + BlockHash: utils.ConvertToFixedLength32(blockHashBytes), Timestamp: block.Timestamp, + Spent: value == 0, // Mark as spent if value is 0 }) } } @@ -51,9 +60,7 @@ func extractSpentTaprootPubKeysFromTx(tx *types.Transaction, block *types.Block) continue } // todo change switch to simple if statement - switch vin.Prevout.ScriptPubKey.Type { - - case "witness_v1_taproot": + if vin.Prevout.ScriptPubKey.Type == "witness_v1_taproot" { // requires a pre-sync of height from taproot activation 709632 for blockHash mapping, // todo fails if CPFP prevout.height will be current block check for that var blockHash string @@ -65,25 +72,28 @@ func extractSpentTaprootPubKeysFromTx(tx *types.Transaction, block *types.Block) // after making sure we don't have prevout and vin in the same block we can do a standard lookup headerInv, err := dblevel.FetchByBlockHeightBlockHeaderInv(vin.Prevout.Height) if err != nil { - common.ErrorLogger.Println(err) - // panic becuase if this fails it means we have incomplete data which requires a sync - common.ErrorLogger.Printf("tx: %+v\n", tx) - common.ErrorLogger.Printf("prevout: %+v\n", vin.Prevout) - common.ErrorLogger.Println("Headers not synced from first taproot like occurrence. Either build complete index or fully sync headers only.") - panic(err) + logging.L.Err(err).Msg("Failed to fetch by block height block header inv") + logging.L.Debug().Any("tx", tx).Any("prevout", vin.Prevout).Msg("Failed to fetch by block height block header inv") + // panic because if this fails it means we have incomplete data which requires a sync + logging.L.Panic().Err(err).Msg("Headers not synced from first taproot like occurrence. Either build complete index or fully sync headers only.") + return nil } - blockHash = headerInv.Hash + blockHash = hex.EncodeToString(headerInv.Hash[:]) } + // we use the fix sized conversion below with a panic + txidBytes, _ := hex.DecodeString(vin.Txid) + blockHashBytes, _ := hex.DecodeString(blockHash) + spentUTXOs = append(spentUTXOs, types.UTXO{ - Txid: vin.Txid, + Txid: utils.ConvertToFixedLength32(txidBytes), Vout: vin.Vout, - Value: common.ConvertFloatBTCtoSats(vin.Prevout.Value), + Value: utils.ConvertFloatBTCtoSats(vin.Prevout.Value), ScriptPubKey: vin.Prevout.ScriptPubKey.Hex, - BlockHash: blockHash, + BlockHash: utils.ConvertToFixedLength32(blockHashBytes), Spent: true, }) - default: + } else { continue } } diff --git a/src/core/routine.go b/internal/core/routine.go similarity index 58% rename from src/core/routine.go rename to internal/core/routine.go index 8ead7b0..c891f23 100644 --- a/src/core/routine.go +++ b/internal/core/routine.go @@ -1,27 +1,30 @@ package core import ( - "SilentPaymentAppBackend/src/common" - "SilentPaymentAppBackend/src/common/types" - "SilentPaymentAppBackend/src/db/dblevel" - "errors" + "encoding/hex" "fmt" "time" + + "github.com/setavenger/blindbit-lib/logging" + "github.com/setavenger/blindbit-lib/utils" + "github.com/setavenger/blindbit-oracle/internal/config" + "github.com/setavenger/blindbit-oracle/internal/dblevel" + "github.com/setavenger/blindbit-oracle/internal/types" ) func CheckForNewBlockRoutine() { - common.InfoLogger.Println("starting check_for_new_block_routine") + logging.L.Info().Msg("starting check_for_new_block_routine") for { <-time.NewTicker(3 * time.Second).C blockHash, err := GetBestBlockHash() if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error getting best block hash") // todo fail or restart after too many fails? continue } err = FullProcessBlockHash(blockHash) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error processing block") return } } @@ -30,21 +33,22 @@ func CheckForNewBlockRoutine() { func FullProcessBlockHash(blockHash string) error { block, err := PullBlock(blockHash) if err != nil && err.Error() != "block already processed" { // todo built in error - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error pulling block from node") return err } if block == nil { return nil } + // check whether previous block has already been processed // we do the check before so that we can subsequently delete spent UTXOs // this should not be a problem and only apply in very few cases // the index should be caught up on startup and hence a previous block // will most likely only be squeezed in if there were several blocks in between tip queries - if block.Height > common.SyncStartHeight { + if block.Height > config.SyncStartHeight { err = FullProcessBlockHash(block.PreviousBlockHash) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error processing previous block") return err } } @@ -55,48 +59,52 @@ func FullProcessBlockHash(blockHash string) error { func PullBlock(blockHash string) (*types.Block, error) { if len(blockHash) != 64 { - common.ErrorLogger.Println("block_hash invalid:", blockHash) + logging.L.Err(fmt.Errorf("block_hash invalid: %s", blockHash)).Msg("block_hash invalid") return nil, fmt.Errorf("block_hash invalid: %s", blockHash) } // this method is preferred over lastHeader because then this function can be called for PreviousBlockHash - header, err := dblevel.FetchByBlockHashBlockHeader(blockHash) - if err != nil && !errors.Is(err, dblevel.NoEntryErr{}) { - // we ignore no entry error - common.ErrorLogger.Println(err) - return nil, err - } + // hashByteSlice, err := hex.DecodeString(blockHash) + // if err != nil { + // logging.L.Err(err).Msg("failed to hex decode blockhash") + // return nil, err + // } + // header, err := dblevel.FetchByBlockHashBlockHeader(utils.ConvertToFixedLength32(hashByteSlice)) + // if err != nil && !errors.Is(err, dblevel.NoEntryErr{}) { + // // we ignore no entry error + // logging.L.Err(err).Msg("error fetching block header") + // return nil, err + // } - if header != nil { - // todo might not want to constantly log this - // common.DebugLogger.Printf("Block: %s has already been processed\n", blockHash) - // if we already processed the header into our DB don't do anything - return nil, errors.New("block already processed") - } + // if header != nil { + // // if we already processed the header into our DB don't do anything + // return nil, errors.New("block already processed") + // } block, err := GetFullBlockPerBlockHash(blockHash) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error getting full block per block hash") return nil, err } - //common.InfoLogger.Println("Received block:", blockHash) return block, nil } // CheckBlock checks whether the block hash has already been processed and will process the block if needed +// todo: needs to throw an error func CheckBlock(block *types.Block) { // todo add return type error // todo this should fail at the highest instance were its wrapped in, // fatal made sense here while it only had one use, // but might not want to exit the program if used in other locations - //common.InfoLogger.Println("Processing block:", block.Height) - common.DebugLogger.Println("block:", block.Height) + + // logging.L.Info().Msgf("Processing block: %d", block.Height) + logging.L.Trace().Msgf("block: %d", block.Height) err := HandleBlock(block) if err != nil { // todo handle better more gracefully, maybe retries - common.DebugLogger.Println("failed for block:", block.Hash) + logging.L.Err(err).Msgf("failed for block: %s", block.Hash) // program should exit here because it means we are missing a block and this needs immediate attention - common.ErrorLogger.Fatalln(err) + logging.L.Fatal().Err(err).Msgf("failed for block: %s", block.Hash) return } @@ -108,21 +116,35 @@ func CheckBlock(block *types.Block) { Height: block.Height, }) if err != nil { - common.DebugLogger.Println("could not insert header for:", block.Hash) + logging.L.Err(err). + Str("blockhash", block.Hash). + Msgf("could not insert header for: %s", block.Hash) + return } + hashByteSlice, err := hex.DecodeString(block.Hash) + if err != nil { + logging.L.Err(err).Msg("could not decode blockhash hex") + return + } err = dblevel.InsertBlockHeaderInv(types.BlockHeaderInv{ - Hash: block.Hash, + Hash: utils.ConvertToFixedLength32(hashByteSlice), Height: block.Height, Flag: true, }) if err != nil { - common.DebugLogger.Println("could not insert inverted header for:", block.Height, block.Hash) + logging.L.Err(err). + Uint32("height", block.Height). + Str("blockhash", block.Hash). + Msg("could not insert inverted header for") return } - common.InfoLogger.Println("successfully processed block:", block.Height) + logging.L.Info(). + Uint32("height", block.Height). + Str("blockhash", block.Hash). + Msg("successfully processed block") } @@ -130,66 +152,68 @@ func HandleBlock(block *types.Block) error { // todo the next sections can potentially be optimized by combining them into one loop where // all things are extracted from the blocks transaction data - common.DebugLogger.Println("Computing tweaks...") + logging.L.Debug().Msg("Computing tweaks...") tweaksForBlock, err := ComputeTweaksForBlock(block) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error computing tweaks") return err } - common.DebugLogger.Println("Tweaks computed...") + logging.L.Debug().Msg("Tweaks computed...") - if common.TweakIndexFullNoDust || common.TweakIndexFullIncludingDust { + if config.TweakIndexFullNoDust || config.TweakIndexFullIncludingDust { // build map for sorting tweaksForBlockMap := map[string]types.Tweak{} for _, tweak := range tweaksForBlock { - tweaksForBlockMap[tweak.Txid] = tweak + tweaksForBlockMap[hex.EncodeToString(tweak.Txid[:])] = tweak } // we only create one of the two filters no dust can be derived from dust but not vice versa // So we build the dust index if dust is needed and no-dust if off but not both - if common.TweakIndexFullIncludingDust { + if config.TweakIndexFullIncludingDust { // full index with dust filter possibility // todo should we sort, overhead created tweakIndexDust := types.TweakIndexDustFromTweakArray(tweaksForBlockMap, block) - tweakIndexDust.BlockHash = block.Hash + blockHashBytes, _ := hex.DecodeString(block.Hash) + tweakIndexDust.BlockHash = utils.ConvertToFixedLength32(blockHashBytes) tweakIndexDust.BlockHeight = block.Height err = dblevel.InsertTweakIndexDust(tweakIndexDust) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error inserting tweak index dust") return err } } else { // normal full index no dust // todo should we sort, overhead created tweakIndex := types.TweakIndexFromTweakArray(tweaksForBlockMap, block) - tweakIndex.BlockHash = block.Hash + blockHashBytes, _ := hex.DecodeString(block.Hash) + tweakIndex.BlockHash = utils.ConvertToFixedLength32(blockHashBytes) tweakIndex.BlockHeight = block.Height err = dblevel.InsertTweakIndex(tweakIndex) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error inserting tweak index") return err } } } - if common.TweaksCutThroughWithDust { + if config.TweaksCutThroughWithDust { err = dblevel.InsertBatchTweaks(tweaksForBlock) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error inserting batch tweaks") return err } } // if we only want to generate the tweaks we exit here - if common.TweaksOnly { + if config.TweaksOnly { return nil } // mark all transaction which have eligible outputs eligibleTransaction := map[string]struct{}{} for _, tweak := range tweaksForBlock { - eligibleTransaction[tweak.Txid] = struct{}{} + eligibleTransaction[hex.EncodeToString(tweak.Txid[:])] = struct{}{} } // first we need to get the new outputs because some of them might/will be spent in the same block @@ -197,7 +221,7 @@ func HandleBlock(block *types.Block) error { newUTXOs := ExtractNewUTXOs(block, eligibleTransaction) err = dblevel.InsertUTXOs(newUTXOs) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error inserting utxos") return err } @@ -208,45 +232,45 @@ func HandleBlock(block *types.Block) error { // this will overwrite new UTXOs which were spent in the same block err = markSpentUTXOsAndTweaks(taprootSpent) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error marking spent utxos and tweaks") return err } // create special block filter cFilterNewUTXOs, err := BuildNewUTXOsFilter(block) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error building new utxos filter") return err } // err = dblevel.InsertNewUTXOsFilter(cFilterNewUTXOs) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error inserting new utxos filter") return err } spentOutpointsIndex, err := BuildSpentUTXOIndex(taprootSpent, block) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error building spent utxo index") return err } err = dblevel.InsertSpentOutpointsIndex(&spentOutpointsIndex) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error inserting spent utxo index") return err } cFilterSpentUTXOs, err := BuildSpentUTXOsFilter(spentOutpointsIndex) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error building spent utxos filter") return err } err = dblevel.InsertSpentOutpointsFilter(cFilterSpentUTXOs) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error inserting spent utxos filter") return err } diff --git a/src/core/rpc.go b/internal/core/rpc.go similarity index 72% rename from src/core/rpc.go rename to internal/core/rpc.go index 2cbd62a..2df3378 100644 --- a/src/core/rpc.go +++ b/internal/core/rpc.go @@ -1,8 +1,6 @@ package core import ( - "SilentPaymentAppBackend/src/common" - "SilentPaymentAppBackend/src/common/types" "bytes" "encoding/base64" "encoding/json" @@ -10,34 +8,37 @@ import ( "fmt" "io" "net/http" -) -// todo might need to unify common.types and the types here for consistency + "github.com/setavenger/blindbit-lib/logging" + "github.com/setavenger/blindbit-oracle/internal/config" + "github.com/setavenger/blindbit-oracle/internal/types" +) func makeRPCRequest(rpcData interface{}, result interface{}) error { payload, err := json.Marshal(rpcData) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error marshaling RPC data") return fmt.Errorf("error marshaling RPC data: %v", err) } // Prepare the request... - req, err := http.NewRequest("POST", common.RpcEndpoint, bytes.NewBuffer(payload)) + req, err := http.NewRequest("POST", config.RpcEndpoint, bytes.NewBuffer(payload)) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error creating request") return fmt.Errorf("error creating request: %v", err) } // Set headers and auth... req.Header.Set("Content-Type", "application/json") - auth := base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%s:%s", common.RpcUser, common.RpcPass))) + authText := fmt.Sprintf("%s:%s", config.RpcUser, config.RpcPass) + auth := base64.StdEncoding.EncodeToString([]byte(authText)) req.Header.Add("Authorization", "Basic "+auth) // Make the HTTP request... client := &http.Client{} resp, err := client.Do(req) if err != nil { - common.DebugLogger.Printf("response %+v\n", resp) + logging.L.Err(err).Msg("error performing request") return fmt.Errorf("error performing request: %v", err) } defer resp.Body.Close() @@ -45,23 +46,36 @@ func makeRPCRequest(rpcData interface{}, result interface{}) error { // Read and unmarshal the response... body, err := io.ReadAll(resp.Body) if err != nil { - common.DebugLogger.Println("status code:", resp.Status) - common.DebugLogger.Println("status body:", resp.Body) - return fmt.Errorf("error reading response body: %v", err) + logging.L.Err(err). + Int("status_code", resp.StatusCode). + Str("body", string(body)). + Msg("error reading response body") + return err + } + + if resp.StatusCode >= 400 { + err = fmt.Errorf("request failed") + logging.L.Err(err). + Int("status_code", resp.StatusCode). + Str("body", string(body)). + Msg("error unmarshaling response") + return err } err = json.Unmarshal(body, result) if err != nil { - common.DebugLogger.Println("status code:", resp.Status) - common.DebugLogger.Println("data:", string(body)) - return fmt.Errorf("error unmarshaling response: %v", err) + logging.L.Err(err). + Int("status_code", resp.StatusCode). + Str("body", string(body)). + Msg("error unmarshaling response") + + return err } return nil } func GetFullBlockPerBlockHash(blockHash string) (*types.Block, error) { - //common.InfoLogger.Println("Fetching block:", blockHash) rpcData := types.RPCRequest{ JSONRPC: "1.0", ID: "blindbit-silent-payment-backend-v0", @@ -72,13 +86,14 @@ func GetFullBlockPerBlockHash(blockHash string) (*types.Block, error) { var rpcResponse types.RPCResponseBlock err := makeRPCRequest(rpcData, &rpcResponse) if err != nil { - common.ErrorLogger.Printf("%v\n", err) + logging.L.Err(err).Msg("error getting full block per block hash") return nil, err } if rpcResponse.Error != "" { - common.ErrorLogger.Printf("RPC Error: %v\n", rpcResponse.Error) - return nil, errors.New(string(rpcResponse.Error)) + err = errors.New(string(rpcResponse.Error)) + logging.L.Err(err).Msg("RPC error") + return nil, err } return &rpcResponse.Block, nil @@ -92,20 +107,16 @@ func GetBestBlockHash() (string, error) { Params: []interface{}{}, } - var rpcResponse struct { // Anonymous struct for this specific response - ID string `json:"id"` - Result string `json:"result,omitempty"` - Error interface{} `json:"error,omitempty"` - } - + var rpcResponse types.RPCResponseHighestHash err := makeRPCRequest(rpcData, &rpcResponse) if err != nil { - common.ErrorLogger.Printf("%v\n", err) + logging.L.Err(err).Msg("error getting best block hash") return "", err } - if rpcResponse.Error != nil { - common.ErrorLogger.Printf("RPC Error: %v\n", rpcResponse.Error) + if rpcResponse.Error != "" { + err = errors.New(string(rpcResponse.Error)) + logging.L.Err(err).Msg("RPC error") return "", err } @@ -189,13 +200,14 @@ func GetBlockchainInfo() (*types.BlockchainInfo, error) { err := makeRPCRequest(rpcData, &rpcResponse) if err != nil { - common.ErrorLogger.Printf("%v\n", err) + logging.L.Err(err).Msg("error getting blockchain info") return nil, err } if rpcResponse.Error != nil { - common.ErrorLogger.Printf("RPC Error: %v\n", rpcResponse.Error) - return nil, fmt.Errorf("RPC Error: %v", rpcResponse.Error) + err = fmt.Errorf("RPC Error: %v", rpcResponse.Error) + logging.L.Err(err).Msg("RPC error") + return nil, err } return &rpcResponse.Result, nil diff --git a/src/core/spentutxos.go b/internal/core/spentutxos.go similarity index 68% rename from src/core/spentutxos.go rename to internal/core/spentutxos.go index 445bd2c..1dd8c66 100644 --- a/src/core/spentutxos.go +++ b/internal/core/spentutxos.go @@ -1,25 +1,27 @@ package core import ( - "SilentPaymentAppBackend/src/common" - "SilentPaymentAppBackend/src/common/types" "crypto/sha256" "encoding/hex" + + "github.com/setavenger/blindbit-lib/logging" + "github.com/setavenger/blindbit-lib/utils" + "github.com/setavenger/blindbit-oracle/internal/types" ) func BuildSpentUTXOIndex(utxos []types.UTXO, block *types.Block) (types.SpentOutpointsIndex, error) { blockHashBytes, err := hex.DecodeString(block.Hash) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error decoding block hash") return types.SpentOutpointsIndex{}, err } // reverse byte order to make little endian - blockHashBytes = common.ReverseBytes(blockHashBytes) + blockHashBytes = utils.ReverseBytes(blockHashBytes) spentOutpointsIndex := types.SpentOutpointsIndex{ - BlockHash: block.Hash, + BlockHash: utils.ConvertToFixedLength32(blockHashBytes), BlockHeight: block.Height, } @@ -27,7 +29,7 @@ func BuildSpentUTXOIndex(utxos []types.UTXO, block *types.Block) (types.SpentOut var outpoint []byte outpoint, err = SerialiseToOutpoint(utxo) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error serialising to outpoint") return types.SpentOutpointsIndex{}, err } diff --git a/src/core/sync.go b/internal/core/sync.go similarity index 65% rename from src/core/sync.go rename to internal/core/sync.go index 288a337..e5e0859 100644 --- a/src/core/sync.go +++ b/internal/core/sync.go @@ -1,31 +1,36 @@ package core import ( - "SilentPaymentAppBackend/src/common" - "SilentPaymentAppBackend/src/common/types" - "SilentPaymentAppBackend/src/db/dblevel" + "encoding/hex" "errors" "sort" "sync" + + "github.com/setavenger/blindbit-lib/logging" + "github.com/setavenger/blindbit-lib/utils" + "github.com/setavenger/blindbit-oracle/internal/config" + "github.com/setavenger/blindbit-oracle/internal/dblevel" + "github.com/setavenger/blindbit-oracle/internal/types" + "github.com/syndtr/goleveldb/leveldb" ) func SyncChain() error { - common.InfoLogger.Println("starting sync") + logging.L.Info().Msg("starting sync") blockchainInfo, err := GetBlockchainInfo() if err != nil { - common.ErrorLogger.Fatalln(err) + logging.L.Err(err).Msg("error getting blockchain info") return err } - common.InfoLogger.Printf("blockchain info: %+v\n", blockchainInfo) + logging.L.Info().Msgf("blockchain info: %+v\n", blockchainInfo) // Current logic is that we always just sync from where the user wants us to sync. // We won't sync below the height - syncFromHeight := common.SyncStartHeight + syncFromHeight := config.SyncStartHeight // todo might need to change flow control to use break // number of headers that will maximally be fetched at once - step := common.SyncHeadersMaxPerCall + step := config.SyncHeadersMaxPerCall for i := syncFromHeight; i < blockchainInfo.Blocks; { // Adjust for the last run when there are fewer headers left than the step; avoids index out of range if i+step > blockchainInfo.Blocks { @@ -33,7 +38,7 @@ func SyncChain() error { } var headers []types.BlockHeader - common.InfoLogger.Println("Getting next batch of headers from:", i) + logging.L.Info().Msgf("Getting next batch of headers from: %d", i) // todo find a way to skip ahead to the next unprocessed block from the catch up point. // Maybe iterate over db before querying. Can either do before every query or // once to get to a decent height to continue from. Anyways it should not be the case @@ -46,14 +51,14 @@ func SyncChain() error { var heightsClean []uint32 heightsClean, err = dblevel.GetMissingHeadersInvFlag(heights, false) // only find unprocessed blocks heights if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error getting missing headers inv flag") return err } if len(heightsClean) == 0 { err = updateBlockchainInfo(blockchainInfo) if err != nil { - common.WarningLogger.Println(err) + logging.L.Warn().Err(err).Msg("error updating blockchain info") return err } i += step @@ -62,7 +67,7 @@ func SyncChain() error { headers, err = GetBlockHeadersBatch(heightsClean) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error getting block headers batch") return err } sort.Slice(headers, func(i, j int) bool { @@ -71,7 +76,7 @@ func SyncChain() error { err = processHeaders(headers) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error processing headers") return err } @@ -81,7 +86,7 @@ func SyncChain() error { // if syncing takes longer we avoid querying too many previous blocks in `HandleBlock` err = updateBlockchainInfo(blockchainInfo) if err != nil { - common.WarningLogger.Println(err) + logging.L.Warn().Err(err).Msg("error updating blockchain info") return err } } @@ -93,23 +98,23 @@ func updateBlockchainInfo(blockchainInfo *types.BlockchainInfo) error { previousHeight := blockchainInfo.Blocks blockchainInfo, err = GetBlockchainInfo() if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error getting blockchain info") return err } if previousHeight < blockchainInfo.Blocks { - common.InfoLogger.Println("increasing block height to:", blockchainInfo.Blocks) + logging.L.Info().Msgf("increasing block height to: %d", blockchainInfo.Blocks) } return nil } func processHeaders(headers []types.BlockHeader) error { - common.InfoLogger.Printf("Processing %d headers\n", len(headers)) + logging.L.Info().Msgf("Processing %d headers\n", len(headers)) if len(headers) == 0 { - common.WarningLogger.Println("No headers were passed") + logging.L.Warn().Msg("No headers were passed") return nil } - fetchedBlocks := make(chan *types.Block, common.MaxParallelRequests) - semaphore := make(chan struct{}, common.MaxParallelRequests) + fetchedBlocks := make(chan *types.Block, config.MaxParallelRequests) + semaphore := make(chan struct{}, config.MaxParallelRequests) var errG error var mu sync.Mutex // Mutex to protect shared resources @@ -118,13 +123,12 @@ func processHeaders(headers []types.BlockHeader) error { go func() { for _, header := range headers { if errG != nil { - common.ErrorLogger.Println(errG) + logging.L.Err(errG).Msg("error processing headers") break // If an error occurred, break the loop } semaphore <- struct{}{} // Acquire a slot go func(_header types.BlockHeader) { - //start := time.Now() defer func() { <-semaphore // Release the slot }() @@ -135,10 +139,10 @@ func processHeaders(headers []types.BlockHeader) error { // Log and skip this block since it's already been processed // send empty block to signal it was processed, will be skipped in processing loop fetchedBlocks <- &types.Block{Height: _header.Height} - common.InfoLogger.Printf("Block %d already processed\n", _header.Height) + logging.L.Info().Msgf("Block %d already processed\n", _header.Height) } else { // For other errors, log and store the first occurrence, then exit - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error processing headers") mu.Lock() if errG == nil { errG = err // Store the first error that occurs @@ -149,7 +153,6 @@ func processHeaders(headers []types.BlockHeader) error { } else { fetchedBlocks <- block // Send the fetched block to the channel } - //common.InfoLogger.Printf("It took %d ms to pull block %d\n", time.Now().Sub(start).Milliseconds(), _header.Height) }(header) } }() @@ -179,59 +182,59 @@ func processHeaders(headers []types.BlockHeader) error { if nextExpectedBlock == 0 { break } - select { - case block := <-fetchedBlocks: - //common.InfoLogger.Println("Got block:", block.Height) - // check whether the block is a filler block with only the height - if block.Height != nextExpectedBlock { - // Temporarily store out-of-order block header - outOfOrderBlocks[block.Height] = block + + block := <-fetchedBlocks + // check whether the block is a filler block with only the height + if block.Height != nextExpectedBlock { + // Temporarily store out-of-order block header + outOfOrderBlocks[block.Height] = block + } else { + if block.Hash == "" { + nextExpectedBlock = nextExpectedBlockMap[nextExpectedBlock] } else { - if block.Hash == "" { - nextExpectedBlock = nextExpectedBlockMap[nextExpectedBlock] - } else { - // Process block using its hash - CheckBlock(block) - nextExpectedBlock = nextExpectedBlockMap[nextExpectedBlock] - } + // Process block using its hash + CheckBlock(block) + nextExpectedBlock = nextExpectedBlockMap[nextExpectedBlock] } + } - var ok = true - for ok { - if block, ok = outOfOrderBlocks[nextExpectedBlock]; ok { - if block.Hash == "" { - delete(outOfOrderBlocks, nextExpectedBlock) - nextExpectedBlock = nextExpectedBlockMap[nextExpectedBlock] - continue - } - CheckBlock(block) + var ok = true + for ok { + if block, ok = outOfOrderBlocks[nextExpectedBlock]; ok { + if block.Hash == "" { delete(outOfOrderBlocks, nextExpectedBlock) - // Update next expected block nextExpectedBlock = nextExpectedBlockMap[nextExpectedBlock] + continue } + CheckBlock(block) + delete(outOfOrderBlocks, nextExpectedBlock) + // Update next expected block + nextExpectedBlock = nextExpectedBlockMap[nextExpectedBlock] } } } + return nil } func PreSyncHeaders() error { - common.InfoLogger.Println("Syncing headers") + logging.L.Info().Msg("Syncing headers") headerInv, err := dblevel.FetchHighestBlockHeaderInv() - if err != nil && !errors.Is(err, dblevel.NoEntryErr{}) { - common.ErrorLogger.Println(err) + if err != nil && !errors.Is(err, leveldb.ErrNotFound) { + logging.L.Err(err).Msg("error fetching highest block header inv") return err } var syncFromHeight uint32 - if err != nil && errors.Is(err, dblevel.NoEntryErr{}) { + // nothing was found so we go to default + if headerInv == nil { // we have to start before taproot activation height // some taproot style pubKeys exist since height ~614000 (the last height I checked) - syncFromHeight = common.HeaderMustSyncHeight() + syncFromHeight = config.HeaderMustSyncHeight() } else { // Sync from where the last header was set - syncFromHeight = common.HeaderMustSyncHeight() + syncFromHeight = config.HeaderMustSyncHeight() if syncFromHeight <= headerInv.Height { syncFromHeight = headerInv.Height + 1 } @@ -239,14 +242,14 @@ func PreSyncHeaders() error { blockchainInfo, err := GetBlockchainInfo() if err != nil { - common.ErrorLogger.Fatalln(err) + logging.L.Fatal().Err(err).Msg("error getting blockchain info") return err } - common.InfoLogger.Printf("blockchain info: %+v\n", blockchainInfo) + logging.L.Info().Any("blockchain_info", blockchainInfo).Msg("blockchain info") // todo might need to change flow control to use break // number of headers that will maximally be fetched at once - step := common.SyncHeadersMaxPerCall + step := config.SyncHeadersMaxPerCall for i := syncFromHeight; i < blockchainInfo.Blocks; { // Adjust for the last run when there are fewer headers left than the step; avoids index out of range if i+step > blockchainInfo.Blocks { @@ -254,7 +257,7 @@ func PreSyncHeaders() error { } var headers []types.BlockHeader - common.InfoLogger.Println("Getting next batch of headers from:", i) + logging.L.Info().Msgf("Getting next batch of headers from: %d", i) var heights []uint32 for height := i; height < i+step; height++ { @@ -263,14 +266,14 @@ func PreSyncHeaders() error { var heightsClean []uint32 heightsClean, err = dblevel.GetMissingHeadersInv(heights) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error getting missing headers inv") return err } if len(heightsClean) == 0 { err = updateBlockchainInfo(blockchainInfo) if err != nil { - common.WarningLogger.Println(err) + logging.L.Warn().Err(err).Msg("error updating blockchain info") return err } i += step @@ -279,7 +282,7 @@ func PreSyncHeaders() error { headers, err = GetBlockHeadersBatch(heightsClean) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error getting block headers batch") return err } sort.Slice(headers, func(i, j int) bool { @@ -289,15 +292,23 @@ func PreSyncHeaders() error { // convert BlockHeaders to BlockerHeadersInv var headersInv []types.BlockHeaderInv for _, header := range headers { + blockHashSlice, err := hex.DecodeString(header.Hash) + if err != nil { + // todo: remove all hex.Decode or hex.Encode code + // there are only very few places where this actually needed + logging.L.Err(err).Msg("blockhash could not be hex decoded") + return err + } + headersInv = append(headersInv, types.BlockHeaderInv{ - Hash: header.Hash, + Hash: utils.ConvertToFixedLength32(blockHashSlice), Height: header.Height, Flag: false, }) } err = dblevel.InsertBatchBlockHeaderInv(headersInv) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error inserting batch block header inv") return err } @@ -310,7 +321,7 @@ func PreSyncHeaders() error { // if syncing takes longer we avoid querying too many previous blocks in `HandleBlock` err = updateBlockchainInfo(blockchainInfo) if err != nil { - common.WarningLogger.Println(err) + logging.L.Warn().Err(err).Msg("error updating blockchain info") return err } } diff --git a/src/core/tweak.go b/internal/core/tweak.go similarity index 69% rename from src/core/tweak.go rename to internal/core/tweak.go index 1b055b8..93e8c98 100644 --- a/src/core/tweak.go +++ b/internal/core/tweak.go @@ -1,20 +1,20 @@ package core import ( - "SilentPaymentAppBackend/src/common" - "SilentPaymentAppBackend/src/common/types" "bytes" "encoding/binary" "encoding/hex" "errors" - "fmt" - "math/big" "sort" "strings" "sync" - "github.com/btcsuite/btcd/btcec/v2" + "github.com/setavenger/blindbit-lib/logging" + "github.com/setavenger/blindbit-lib/utils" + "github.com/setavenger/blindbit-oracle/internal/config" + "github.com/setavenger/blindbit-oracle/internal/types" "github.com/setavenger/go-bip352" + golibsecp256k1 "github.com/setavenger/go-libsecp256k1" ) func ComputeTweaksForBlock(block *types.Block) ([]types.Tweak, error) { @@ -37,10 +37,10 @@ func ComputeTweaksForBlockV4(block *types.Block) ([]types.Tweak, error) { txChannel := make(chan types.Transaction) resultsChannel := make(chan types.Tweak) - semaphore := make(chan struct{}, common.MaxParallelTweakComputations) + semaphore := make(chan struct{}, config.MaxParallelTweakComputations) // Start worker goroutines - for i := 0; i < common.MaxParallelTweakComputations; i++ { + for i := 0; i < config.MaxParallelTweakComputations; i++ { wg.Add(1) go func() { defer wg.Done() @@ -50,12 +50,13 @@ func ComputeTweaksForBlockV4(block *types.Block) ([]types.Tweak, error) { if vout.ScriptPubKey.Type == "witness_v1_taproot" { tweakPerTx, err := ComputeTweakPerTx(tx) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error computing tweak per tx") // todo errG break } if tweakPerTx != nil { - tweakPerTx.BlockHash = block.Hash + blockHashBytes, _ := hex.DecodeString(block.Hash) + tweakPerTx.BlockHash = utils.ConvertToFixedLength32(blockHashBytes) tweakPerTx.BlockHeight = block.Height resultsChannel <- *tweakPerTx } @@ -90,9 +91,9 @@ func ComputeTweaksForBlockV4(block *types.Block) ([]types.Tweak, error) { // ComputeTweaksForBlockV3 performs worse for high tx count but faster for low tx count <800-1000 txs func ComputeTweaksForBlockV3(block *types.Block) ([]types.Tweak, error) { - if block.Txs == nil || len(block.Txs) == 0 { - common.DebugLogger.Printf("%+v", block) - common.WarningLogger.Println("Block had zero transactions") + if len(block.Txs) == 0 { + logging.L.Debug().Any("block", block).Msg("Block had zero transactions") + logging.L.Warn().Msg("Block had zero transactions") return []types.Tweak{}, nil } var tweaks []types.Tweak @@ -101,7 +102,7 @@ func ComputeTweaksForBlockV3(block *types.Block) ([]types.Tweak, error) { var wg sync.WaitGroup totalTxs := len(block.Txs) - numGoroutines := common.MaxParallelTweakComputations // Number of goroutines you want to spin up + numGoroutines := config.MaxParallelTweakComputations // Number of goroutines you want to spin up baseBatchSize := totalTxs / numGoroutines // Base number of transactions per goroutine remainder := totalTxs % numGoroutines // Transactions that need to be distributed var errG error @@ -131,7 +132,7 @@ func ComputeTweaksForBlockV3(block *types.Block) ([]types.Tweak, error) { if vout.ScriptPubKey.Type == "witness_v1_taproot" { tweakPerTx, err := ComputeTweakPerTx(_tx) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error computing tweak per tx") muErr.Lock() if errG == nil { errG = err // Store the first error that occurs @@ -140,7 +141,8 @@ func ComputeTweaksForBlockV3(block *types.Block) ([]types.Tweak, error) { break } if tweakPerTx != nil { - tweakPerTx.BlockHash = block.Hash + blockHashBytes, _ := hex.DecodeString(block.Hash) + tweakPerTx.BlockHash = utils.ConvertToFixedLength32(blockHashBytes) tweakPerTx.BlockHeight = block.Height localTweaks = append(localTweaks, *tweakPerTx) @@ -158,7 +160,8 @@ func ComputeTweaksForBlockV3(block *types.Block) ([]types.Tweak, error) { } if errG != nil { - panic(errG) + logging.L.Panic().Err(errG).Msg("error computing tweaks") + return nil, errG } wg.Wait() @@ -167,10 +170,9 @@ func ComputeTweaksForBlockV3(block *types.Block) ([]types.Tweak, error) { func ComputeTweaksForBlockV2(block *types.Block) ([]types.Tweak, error) { // moved outside of function avoid issues with benchmarking - //common.InfoLogger.Println("Computing tweaks...") var tweaks []types.Tweak - semaphore := make(chan struct{}, common.MaxParallelTweakComputations) + semaphore := make(chan struct{}, config.MaxParallelTweakComputations) var errG error var muTweaks sync.Mutex // Mutex to protect tweaks @@ -180,14 +182,13 @@ func ComputeTweaksForBlockV2(block *types.Block) ([]types.Tweak, error) { // block fetcher routine for _, tx := range block.Txs { if errG != nil { - common.ErrorLogger.Println(errG) + logging.L.Err(errG).Msg("error computing tweaks") break // If an error occurred, break the loop } semaphore <- struct{}{} // Acquire a slot wg.Add(1) // make the function wait for this slot go func(_tx types.Transaction) { - //start := time.Now() defer func() { <-semaphore // Release the slot wg.Done() @@ -198,7 +199,7 @@ func ComputeTweaksForBlockV2(block *types.Block) ([]types.Tweak, error) { if vout.ScriptPubKey.Type == "witness_v1_taproot" { tweakPerTx, err := ComputeTweakPerTx(_tx) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error computing tweak per tx") muErr.Lock() if errG == nil { errG = err // Store the first error that occurs @@ -210,7 +211,8 @@ func ComputeTweaksForBlockV2(block *types.Block) ([]types.Tweak, error) { // they are not supposed to throw an error // but also don't have a tweak that can be computed if tweakPerTx != nil { - tweakPerTx.BlockHash = block.Hash + blockHashBytes, _ := hex.DecodeString(block.Hash) + tweakPerTx.BlockHash = utils.ConvertToFixedLength32(blockHashBytes) tweakPerTx.BlockHeight = block.Height muTweaks.Lock() @@ -224,16 +226,15 @@ func ComputeTweaksForBlockV2(block *types.Block) ([]types.Tweak, error) { } if errG != nil { - panic(errG) + logging.L.Panic().Err(errG).Msg("error computing tweaks") + return nil, errG } wg.Wait() - //common.InfoLogger.Println("Tweaks computed...") return tweaks, nil } // Deprecated: slowest of them all, do not use anywhere func ComputeTweaksForBlockV1(block *types.Block) ([]types.Tweak, error) { - //common.InfoLogger.Println("Computing tweaks...") var tweaks []types.Tweak for _, tx := range block.Txs { @@ -242,14 +243,15 @@ func ComputeTweaksForBlockV1(block *types.Block) ([]types.Tweak, error) { if vout.ScriptPubKey.Type == "witness_v1_taproot" { tweakPerTx, err := ComputeTweakPerTx(tx) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error computing tweak per tx") return []types.Tweak{}, err } // we do this check for not eligible transactions like coinbase transactions // they are not supposed to throw an error // but also don't have a tweak that can be computed if tweakPerTx != nil { - tweakPerTx.BlockHash = block.Hash + blockHashBytes, _ := hex.DecodeString(block.Hash) + tweakPerTx.BlockHash = utils.ConvertToFixedLength32(blockHashBytes) tweakPerTx.BlockHeight = block.Height tweaks = append(tweaks, *tweakPerTx) } @@ -257,56 +259,50 @@ func ComputeTweaksForBlockV1(block *types.Block) ([]types.Tweak, error) { } } } - //common.InfoLogger.Println("Tweaks computed...") return tweaks, nil } func ComputeTweakPerTx(tx types.Transaction) (*types.Tweak, error) { - //common.DebugLogger.Println("computing tweak for:", tx.Txid) pubKeys := extractPubKeys(tx) if pubKeys == nil { // for example if coinbase transaction does not return any pubKeys (as it should) return nil, nil } - summedKey, err := sumPublicKeys(pubKeys) + + fixSizePubKeys := utils.ConvertPubkeySliceToFixedLength33(pubKeys) + + summedKey, err := bip352.SumPublicKeys(fixSizePubKeys) if err != nil { if strings.Contains(err.Error(), "not on secp256k1 curve") { - common.WarningLogger.Println(err, "-", tx.Txid) + logging.L.Warn().Str("txid", tx.Txid).Err(err).Msg("error computing tweak per tx") return nil, nil } - common.DebugLogger.Println("tx:", tx.Txid) - common.ErrorLogger.Println(err) + logging.L.Debug().Str("txid", tx.Txid).Msg("error computing tweak per tx") + logging.L.Err(err).Msg("error computing tweak per tx") return nil, err } hash, err := ComputeInputHash(tx, summedKey) if err != nil { - common.DebugLogger.Println("tx:", tx.Txid) - common.ErrorLogger.Println(err) + logging.L.Debug().Str("txid", tx.Txid).Msg("error computing tweak per tx") + logging.L.Err(err).Msg("error computing tweak per tx") return nil, err } - curve := btcec.S256() - x, y := curve.ScalarMult(summedKey.X(), summedKey.Y(), hash[:]) + golibsecp256k1.PubKeyTweakMul(summedKey, &hash) - tweakBytes := [33]byte{} - mod := y.Mod(y, big.NewInt(2)) - if mod.Cmp(big.NewInt(0)) == 0 { - tweakBytes[0] = 0x02 - } else { - tweakBytes[0] = 0x03 - } - - x.FillBytes(tweakBytes[1:]) + tweakBytes := summedKey highestValue, err := FindBiggestOutputFromTx(tx) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error computing tweak per tx") return nil, err } + txidBytes, _ := hex.DecodeString(tx.Txid) + tweak := types.Tweak{ - Txid: tx.Txid, - TweakData: tweakBytes, + Txid: utils.ConvertToFixedLength32(txidBytes), + TweakData: *tweakBytes, HighestValue: highestValue, } @@ -320,23 +316,22 @@ func FindBiggestOutputFromTx(tx types.Transaction) (uint64, error) { if output.ScriptPubKey.Type != "witness_v1_taproot" { continue } - valueOutput := common.ConvertFloatBTCtoSats(output.Value) + valueOutput := utils.ConvertFloatBTCtoSats(output.Value) if valueOutput > biggest { biggest = valueOutput } } if biggest == 0 { - common.DebugLogger.Printf("%+v\n", tx) - common.ErrorLogger.Printf("Highest value was 0 txid: %s", tx.Txid) - // panic("highest value was 0") // This should not happen, but we don't kill the program + logging.L.Debug().Any("tx", tx).Msg("highest value was 0") + logging.L.Err(errors.New("highest value was 0")).Msg("highest value was 0") } return biggest, nil } -func extractPubKeys(tx types.Transaction) []string { - var pubKeys []string +func extractPubKeys(tx types.Transaction) [][]byte { + var pubKeys [][]byte for _, vin := range tx.Vin { if vin.Coinbase != "" { @@ -347,41 +342,64 @@ func extractPubKeys(tx types.Transaction) []string { // todo needs some extra parsing see reference implementation and bitcoin core wallet pubKey, err := extractPubKeyFromP2TR(vin) if err != nil { - common.DebugLogger.Println("txid:", tx.Txid) - common.DebugLogger.Println("Could not extract public key") - common.ErrorLogger.Println(err) - panic(err) //todo make this return an error + logging.L.Debug().Str("txid", tx.Txid).Msg("Could not extract public key") + logging.L.Panic().Err(err).Msg("Could not extract public key") + return nil } // todo what to do if none is matched if pubKey != "" { - pubKeys = append(pubKeys, pubKey) + pubKeyBytes, _ := hex.DecodeString(pubKey) + + // pubKeyBytes, err := hex.DecodeString(pubKey) + // // todo: can we ignore this error? RPC of Bitcoin Core should not return invalid hex formatted pubkeys + // if err != nil { + // logging.L.Err(err).Msg("error decoding public key") + // return nil + // } + pubKeys = append(pubKeys, pubKeyBytes) } + case "witness_v0_keyhash": // last element in the witness data is public key; skip uncompressed if len(vin.Txinwitness[len(vin.Txinwitness)-1]) == 66 { - pubKeys = append(pubKeys, vin.Txinwitness[len(vin.Txinwitness)-1]) + pubKeyBytes, _ := hex.DecodeString(vin.Txinwitness[len(vin.Txinwitness)-1]) + // pubKeyBytes, err := hex.DecodeString(vin.Txinwitness[len(vin.Txinwitness)-1]) + // // todo: can we ignore this error? RPC of Bitcoin Core should not return invalid hex formatted pubkeys + // if err != nil { + // logging.L.Err(err).Msg("error decoding public key") + // return nil + // } + pubKeys = append(pubKeys, pubKeyBytes) } case "scripthash": if len(vin.ScriptSig.Hex) == 46 { if vin.ScriptSig.Hex[:6] == "160014" { if len(vin.Txinwitness[len(vin.Txinwitness)-1]) == 66 { - pubKeys = append(pubKeys, vin.Txinwitness[len(vin.Txinwitness)-1]) + pubKeyBytes, _ := hex.DecodeString(vin.Txinwitness[len(vin.Txinwitness)-1]) + + // pubKeyBytes, err := hex.DecodeString(vin.Txinwitness[len(vin.Txinwitness)-1]) + // // todo: can we ignore this error? RPC of Bitcoin Core should not return invalid hex formatted pubkeys + // if err != nil { + // logging.L.Err(err).Msg("error decoding public key") + // return nil + // } + pubKeys = append(pubKeys, pubKeyBytes) } } } + case "pubkeyhash": pubKey, err := extractFromP2PKH(vin) if err != nil { - common.DebugLogger.Println("txid:", tx.Txid) - common.DebugLogger.Println("Could not extract public key") - common.ErrorLogger.Println(err) + logging.L.Debug().Str("txid", tx.Txid).Msg("Could not extract public key") + logging.L.Err(err).Msg("Could not extract public key") continue } // todo what to do if none is matched if pubKey != nil { - pubKeys = append(pubKeys, hex.EncodeToString(pubKey)) + pubKeys = append(pubKeys, pubKey) } default: @@ -397,23 +415,23 @@ func extractFromP2PKH(vin types.Vin) ([]byte, error) { spkHashHex := vin.Prevout.ScriptPubKey.Hex[6:46] // Skip op_codes and grab the hash spkHash, err := hex.DecodeString(spkHashHex) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error decoding spk hash") return nil, err } scriptSigBytes, err := hex.DecodeString(vin.ScriptSig.Hex) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error decoding script sig") return nil, err } - // todo inefficient implementation copied from reference implementation + // todo: inefficient implementation copied from reference implementation // should be improved upon for i := len(scriptSigBytes); i >= 33; i-- { pubKeyBytes := scriptSigBytes[i-33 : i] - pubKeyHash := common.Hash160(pubKeyBytes) + pubKeyHash := bip352.Hash160(pubKeyBytes) if bytes.Equal(pubKeyHash, spkHash) { - return pubKeyBytes, err + return pubKeyBytes, nil } } @@ -433,14 +451,14 @@ func extractPubKeyFromP2TR(vin types.Vin) (string, error) { // Script-path spend controlBlock, err := hex.DecodeString(witnessStack[len(witnessStack)-1]) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error decoding control block") return "", err } // Control block format: <32-byte internal key> [<32-byte hash>...] if len(controlBlock) >= 33 { internalKey := controlBlock[1:33] - if bytes.Equal(internalKey, common.NumsH) { + if bytes.Equal(internalKey, bip352.NumsH) { // Skip if internal key is NUMS_H return "", nil } @@ -455,59 +473,21 @@ func extractPubKeyFromP2TR(vin types.Vin) (string, error) { return "", nil } -func sumPublicKeys(pubKeys []string) (*btcec.PublicKey, error) { - var lastPubKey *btcec.PublicKey - curve := btcec.KoblitzCurve{} - - for idx, pubKey := range pubKeys { - bytesPubKey, err := hex.DecodeString(pubKey) - if err != nil { - common.ErrorLogger.Println(err) - // todo remove panics - return nil, err - } - - // for extracted keys which are only 32 bytes (taproot) we assume even parity - // as we don't need the y-coordinate for any computation we can simply prepend 0x02 - if len(bytesPubKey) == 32 { - bytesPubKey = bytes.Join([][]byte{{0x02}, bytesPubKey}, []byte{}) - } - publicKey, err := btcec.ParsePubKey(bytesPubKey) - if err != nil { - common.ErrorLogger.Println(err) - return nil, err - } - - if idx == 0 { - lastPubKey = publicKey - } else { - x, y := curve.Add(lastPubKey.X(), lastPubKey.Y(), publicKey.X(), publicKey.Y()) - - lastPubKey, err = bip352.ConvertPointsToPublicKey(x, y) - if err != nil { - common.ErrorLogger.Println(err) - return nil, err - } - } - } - return lastPubKey, nil -} - // ComputeInputHash computes the input_hash for a transaction as per the specification. -func ComputeInputHash(tx types.Transaction, sumPublicKeys *btcec.PublicKey) ([32]byte, error) { +func ComputeInputHash(tx types.Transaction, sumPublicKeys *[33]byte) ([32]byte, error) { smallestOutpoint, err := findSmallestOutpoint(tx) if err != nil { - common.ErrorLogger.Println(err) // todo why do we send a custom error - return [32]byte{}, fmt.Errorf("error finding smallest outpoint: %w", err) + logging.L.Err(err).Msg("error finding smallest outpoint") + return [32]byte{}, err } // Concatenate outpointL and A var buffer bytes.Buffer buffer.Write(smallestOutpoint) // Serialize the x-coordinate of the sumPublicKeys - buffer.Write(sumPublicKeys.SerializeCompressed()) + buffer.Write(sumPublicKeys[:]) - inputHash := common.HashTagged("BIP0352/Inputs", buffer.Bytes()) + inputHash := bip352.HashTagged("BIP0352/Inputs", buffer.Bytes()) return inputHash, nil } @@ -529,16 +509,16 @@ func findSmallestOutpoint(tx types.Transaction) ([]byte, error) { // Decode the Txid (hex to bytes) and reverse it to match little-endian format txidBytes, err := hex.DecodeString(vin.Txid) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error decoding txid") return nil, err } - reversedTxid := common.ReverseBytes(txidBytes) + reversedTxid := utils.ReverseBytes(txidBytes) // Serialize the Vout as little-endian bytes voutBytes := new(bytes.Buffer) err = binary.Write(voutBytes, binary.LittleEndian, vin.Vout) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error serializing vout") return nil, err } // Concatenate reversed Txid and Vout bytes diff --git a/src/core/tweak_bench_test.go b/internal/core/tweak_bench_test.go similarity index 54% rename from src/core/tweak_bench_test.go rename to internal/core/tweak_bench_test.go index 1d42499..3e5be6d 100644 --- a/src/core/tweak_bench_test.go +++ b/internal/core/tweak_bench_test.go @@ -1,39 +1,41 @@ package core import ( - "SilentPaymentAppBackend/src/common/types" - "SilentPaymentAppBackend/src/testhelpers" - "log" "testing" + + "github.com/setavenger/blindbit-lib/logging" + "github.com/setavenger/blindbit-oracle/internal/testhelpers" + "github.com/setavenger/blindbit-oracle/internal/types" ) /* todo blocks to test - 000000000000000000030fcdb1ee03e49a5c50c0d457441a7bf4215920048824 ~4.8k txs - 000000000000000000027bd4698820dc77142b578a0bb824af9bdc799e731b85 ~5.2k txs - 000000000000000000028988a6b092b1bd1aa64211495e280ed274985fbfada5 ~6.1k txs - 00000000000000000000d1b78dabafed74c4483fdde4d899952274fafb70998c ~0.9k txs but 19k taproot UTXOs + + 000000000000000000030fcdb1ee03e49a5c50c0d457441a7bf4215920048824 ~4.8k txs + 000000000000000000027bd4698820dc77142b578a0bb824af9bdc799e731b85 ~5.2k txs + 000000000000000000028988a6b092b1bd1aa64211495e280ed274985fbfada5 ~6.1k txs + 00000000000000000000d1b78dabafed74c4483fdde4d899952274fafb70998c ~0.9k txs but 19k taproot UTXOs */ var ( block833000, block833010, block833013, block834469 types.Block ) func init() { - err := testhelpers.LoadAndUnmarshalBlockFromFile("../test_data/block_833000.json", &block833000) + err := testhelpers.LoadAndUnmarshalBlockFromFile("../../test_data/block_833000.json", &block833000) if err != nil { - log.Fatalln(err) + logging.L.Fatal().Err(err).Msg("error loading block 833000") } - err = testhelpers.LoadAndUnmarshalBlockFromFile("../test_data/block_833010.json", &block833010) + err = testhelpers.LoadAndUnmarshalBlockFromFile("../../test_data/block_833010.json", &block833010) if err != nil { - log.Fatalln(err) + logging.L.Fatal().Err(err).Msg("error loading block 833010") } - err = testhelpers.LoadAndUnmarshalBlockFromFile("../test_data/block_833013.json", &block833013) + err = testhelpers.LoadAndUnmarshalBlockFromFile("../../test_data/block_833013.json", &block833013) if err != nil { - log.Fatalln(err) + logging.L.Fatal().Err(err).Msg("error loading block 833013") } - err = testhelpers.LoadAndUnmarshalBlockFromFile("../test_data/block_834469.json", &block834469) + err = testhelpers.LoadAndUnmarshalBlockFromFile("../../test_data/block_834469.json", &block834469) if err != nil { - log.Fatalln(err) + logging.L.Fatal().Err(err).Msg("error loading block 834469") } } @@ -41,7 +43,7 @@ func BenchmarkTweakV4Block833000(b *testing.B) { for i := 0; i < b.N; i++ { _, err := ComputeTweaksForBlockV4(&block833000) if err != nil { - log.Fatalln(err) + logging.L.Fatal().Err(err).Msg("error computing tweaks for block 833000") } } } @@ -50,7 +52,7 @@ func BenchmarkTweakV3Block833000(b *testing.B) { for i := 0; i < b.N; i++ { _, err := ComputeTweaksForBlockV3(&block833000) if err != nil { - log.Fatalln(err) + logging.L.Fatal().Err(err).Msg("error computing tweaks for block 833000") } } } @@ -59,7 +61,7 @@ func BenchmarkTweakV2Block833000(b *testing.B) { for i := 0; i < b.N; i++ { _, err := ComputeTweaksForBlockV2(&block833000) if err != nil { - log.Fatalln(err) + logging.L.Fatal().Err(err).Msg("error computing tweaks for block 833000") } } } @@ -68,7 +70,7 @@ func BenchmarkTweakV1Block833000(b *testing.B) { for i := 0; i < b.N; i++ { _, err := ComputeTweaksForBlockV1(&block833000) if err != nil { - log.Fatalln(err) + logging.L.Fatal().Err(err).Msg("error computing tweaks for block 833000") } } } @@ -77,7 +79,7 @@ func BenchmarkTweakV4Block833010(b *testing.B) { for i := 0; i < b.N; i++ { _, err := ComputeTweaksForBlockV4(&block833010) if err != nil { - log.Fatalln(err) + logging.L.Fatal().Err(err).Msg("error computing tweaks for block 833010") } } } @@ -86,7 +88,7 @@ func BenchmarkTweakV3Block833010(b *testing.B) { for i := 0; i < b.N; i++ { _, err := ComputeTweaksForBlockV3(&block833010) if err != nil { - log.Fatalln(err) + logging.L.Fatal().Err(err).Msg("error computing tweaks for block 833010") } } } @@ -95,7 +97,7 @@ func BenchmarkTweakV2Block833010(b *testing.B) { for i := 0; i < b.N; i++ { _, err := ComputeTweaksForBlockV2(&block833010) if err != nil { - log.Fatalln(err) + logging.L.Fatal().Err(err).Msg("error computing tweaks for block 833010") } } } @@ -104,7 +106,7 @@ func BenchmarkTweakV1Block833010(b *testing.B) { for i := 0; i < b.N; i++ { _, err := ComputeTweaksForBlockV1(&block833010) if err != nil { - log.Fatalln(err) + logging.L.Fatal().Err(err).Msg("error computing tweaks for block 833010") } } } @@ -113,7 +115,7 @@ func BenchmarkTweakV4Block833013(b *testing.B) { for i := 0; i < b.N; i++ { _, err := ComputeTweaksForBlockV4(&block833013) if err != nil { - log.Fatalln(err) + logging.L.Fatal().Err(err).Msg("error computing tweaks for block 833013") } } } @@ -122,7 +124,7 @@ func BenchmarkTweakV3Block833013(b *testing.B) { for i := 0; i < b.N; i++ { _, err := ComputeTweaksForBlockV3(&block833013) if err != nil { - log.Fatalln(err) + logging.L.Fatal().Err(err).Msg("error computing tweaks for block 833013") } } } @@ -131,7 +133,7 @@ func BenchmarkTweakV2Block833013(b *testing.B) { for i := 0; i < b.N; i++ { _, err := ComputeTweaksForBlockV2(&block833013) if err != nil { - log.Fatalln(err) + logging.L.Fatal().Err(err).Msg("error computing tweaks for block 833013") } } } @@ -140,7 +142,7 @@ func BenchmarkTweakV1Block833013(b *testing.B) { for i := 0; i < b.N; i++ { _, err := ComputeTweaksForBlockV1(&block833013) if err != nil { - log.Fatalln(err) + logging.L.Fatal().Err(err).Msg("error computing tweaks for block 833013") } } } @@ -149,7 +151,7 @@ func BenchmarkTweakV4Block834469(b *testing.B) { for i := 0; i < b.N; i++ { _, err := ComputeTweaksForBlockV4(&block834469) if err != nil { - log.Fatalln(err) + logging.L.Fatal().Err(err).Msg("error computing tweaks for block 834469") } } } @@ -158,7 +160,7 @@ func BenchmarkTweakV3Block834469(b *testing.B) { for i := 0; i < b.N; i++ { _, err := ComputeTweaksForBlockV3(&block834469) if err != nil { - log.Fatalln(err) + logging.L.Fatal().Err(err).Msg("error computing tweaks for block 834469") } } } @@ -167,7 +169,7 @@ func BenchmarkTweakV2Block834469(b *testing.B) { for i := 0; i < b.N; i++ { _, err := ComputeTweaksForBlockV2(&block834469) if err != nil { - log.Fatalln(err) + logging.L.Fatal().Err(err).Msg("error computing tweaks for block 834469") } } } @@ -176,7 +178,7 @@ func BenchmarkTweakV1Block834469(b *testing.B) { for i := 0; i < b.N; i++ { _, err := ComputeTweaksForBlockV1(&block834469) if err != nil { - log.Fatalln(err) + logging.L.Fatal().Err(err).Msg("error computing tweaks for block 834469") } } } diff --git a/src/core/tweak_test.go b/internal/core/tweak_test.go similarity index 75% rename from src/core/tweak_test.go rename to internal/core/tweak_test.go index 07af18d..e19919f 100644 --- a/src/core/tweak_test.go +++ b/internal/core/tweak_test.go @@ -1,26 +1,22 @@ package core import ( - "SilentPaymentAppBackend/src/common" - "SilentPaymentAppBackend/src/common/types" - "SilentPaymentAppBackend/src/testhelpers" "encoding/hex" - "log" - "os" "testing" + + "github.com/setavenger/blindbit-lib/logging" + "github.com/setavenger/blindbit-lib/utils" + "github.com/setavenger/blindbit-oracle/internal/testhelpers" + "github.com/setavenger/blindbit-oracle/internal/types" + "github.com/setavenger/go-bip352" ) var b833000 types.Block func init() { - common.DebugLogger = log.New(os.Stdout, "[DEBUG] ", log.Ldate|log.Lmicroseconds|log.Lshortfile|log.Lmsgprefix) - common.InfoLogger = log.New(os.Stdout, "[INFO] ", log.Ldate|log.Lmicroseconds|log.Lshortfile|log.Lmsgprefix) - common.WarningLogger = log.New(os.Stdout, "[WARNING] ", log.Ldate|log.Lmicroseconds|log.Lshortfile|log.Lmsgprefix) - common.ErrorLogger = log.New(os.Stdout, "[ERROR] ", log.Ldate|log.Lmicroseconds|log.Lshortfile|log.Lmsgprefix) - - err := testhelpers.LoadAndUnmarshalBlockFromFile("../test_data/block_833000.json", &b833000) + err := testhelpers.LoadAndUnmarshalBlockFromFile("../../test_data/block_833000.json", &b833000) if err != nil { - log.Fatalln(err) + logging.L.Fatal().Err(err).Msg("error loading block 833000") } } @@ -45,24 +41,26 @@ func TestSimpleInputHash(t *testing.T) { return } - summedKey, err := sumPublicKeys(pubKeys) + fixSizePubKeys := utils.ConvertPubkeySliceToFixedLength33(pubKeys) + + summedKey, err := bip352.SumPublicKeys(fixSizePubKeys) if err != nil { t.Error(err) return } - common.DebugLogger.Println(hex.EncodeToString(summedKey.SerializeCompressed())) + // logging.L.Debug().Hex("summed_key", summedKey[:]).Msg("summed key") inputHash, err := ComputeInputHash(tx, summedKey) if err != nil { t.Error(err) return } - common.DebugLogger.Println(hex.EncodeToString(inputHash[:])) + // logging.L.Debug().Hex("input_hash", inputHash[:]).Msg("input hash") inputHashHex := hex.EncodeToString(inputHash[:]) if inputHashHex != controlInputHash { t.Errorf("computed input hash does not match: %s - %s\n", inputHashHex, controlInputHash) return } - common.InfoLogger.Println("Done") + logging.L.Info().Msg("Done") } func TestComputeAllReceivingTweaks(t *testing.T) { @@ -73,7 +71,7 @@ func TestComputeAllReceivingTweaks(t *testing.T) { } for _, testCase := range testCases { - common.InfoLogger.Println(testCase.Comment) + logging.L.Info().Msg(testCase.Comment) for _, caseDetail := range testCase.Receiving { tx, err := testhelpers.TransformTestCaseDetailToTransaction(caseDetail) @@ -102,27 +100,27 @@ func TestComputeAllReceivingTweaks(t *testing.T) { } func TestBlockProcessingTime(t *testing.T) { - common.InfoLogger.Println("Starting v3 computation") + logging.L.Info().Msg("Starting v3 computation") _, err := ComputeTweaksForBlockV3(&b833000) if err != nil { t.Error(err) return } - common.InfoLogger.Println("Finished v3 computation") - common.InfoLogger.Println("Starting v2 computation") + logging.L.Info().Msg("Finished v3 computation") + logging.L.Info().Msg("Starting v2 computation") _, err = ComputeTweaksForBlockV2(&b833000) if err != nil { t.Error(err) return } - common.InfoLogger.Println("Finished v2 computation") - common.InfoLogger.Println("Starting v1 computation") + logging.L.Info().Msg("Finished v2 computation") + logging.L.Info().Msg("Starting v1 computation") _, err = ComputeTweaksForBlockV1(&b833000) if err != nil { t.Error(err) return } - common.InfoLogger.Println("Finished v1 computation") + logging.L.Info().Msg("Finished v1 computation") } func TestV3NoTxs(t *testing.T) { diff --git a/src/dataexport/exportcsv.go b/internal/dataexport/exportcsv.go similarity index 68% rename from src/dataexport/exportcsv.go rename to internal/dataexport/exportcsv.go index dc05606..6108624 100644 --- a/src/dataexport/exportcsv.go +++ b/internal/dataexport/exportcsv.go @@ -1,55 +1,45 @@ package dataexport import ( - "SilentPaymentAppBackend/src/common" - "SilentPaymentAppBackend/src/common/types" - "SilentPaymentAppBackend/src/db/dblevel" "encoding/csv" "encoding/hex" - "fmt" - "log" "os" "strconv" + "strings" + + "github.com/setavenger/blindbit-lib/logging" + "github.com/setavenger/blindbit-oracle/internal/config" + "github.com/setavenger/blindbit-oracle/internal/dblevel" + "github.com/setavenger/blindbit-oracle/internal/types" ) func writeToCSV(path string, records [][]string) error { // Create a new file - os.MkdirAll(fmt.Sprintf("%s/export", common.BaseDirectory), 0750) + os.MkdirAll(path[:strings.LastIndex(path, "/")], 0750) + logging.L.Info().Msgf("Writing to %s", path) file, err := os.Create(path) if err != nil { - log.Fatalf("failed creating file: %s", err) + logging.L.Fatal().Err(err).Msg("failed creating file") } - defer func(file *os.File) { - err = file.Close() - if err != nil { - common.ErrorLogger.Println(err) - } - }(file) // Ensure the file is closed at the end + defer file.Close() - // Create a new CSV writer writer := csv.NewWriter(file) - defer writer.Flush() // Flush writes any buffered data to the underlying io.Writer + defer writer.Flush() - // Write all CSV records - err = writer.WriteAll(records) // calls Flush internally - if err != nil { - common.ErrorLogger.Println("error writing record to csv:", err) - return err - } - return err + return writer.WriteAll(records) } -// UTXOS +/* UTXOS */ func ExportUTXOs(path string) error { allEntries, err := dblevel.FetchAllUTXOs() if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error fetching all utxos") return err } records, err := convertUTXOsToRecords(allEntries) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error converting utxos to records") return err } return writeToCSV(path, records) @@ -67,8 +57,8 @@ func convertUTXOsToRecords(utxos []types.UTXO) ([][]string, error) { }) for _, pair := range utxos { records = append(records, []string{ - pair.BlockHash, - pair.Txid, + hex.EncodeToString(pair.BlockHash[:]), + hex.EncodeToString(pair.Txid[:]), strconv.FormatUint(uint64(pair.Vout), 10), pair.ScriptPubKey, strconv.FormatUint(pair.Value, 10), @@ -77,17 +67,17 @@ func convertUTXOsToRecords(utxos []types.UTXO) ([][]string, error) { return records, nil } -// Filters +/* Filters */ func ExportFilters(path string) error { allEntries, err := dblevel.FetchAllNewUTXOsFilters() if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error fetching all new utxos filters") return err } records, err := convertFiltersToRecords(allEntries) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error converting filters to records") return err } return writeToCSV(path, records) @@ -103,7 +93,7 @@ func convertFiltersToRecords(data []types.Filter) ([][]string, error) { }) for _, pair := range data { records = append(records, []string{ - pair.BlockHash, + hex.EncodeToString(pair.BlockHash[:]), strconv.FormatUint(uint64(pair.FilterType), 10), hex.EncodeToString(pair.Data), }) @@ -111,17 +101,17 @@ func convertFiltersToRecords(data []types.Filter) ([][]string, error) { return records, nil } -// Tweaks +/* Tweaks */ func ExportTweaks(path string) error { allEntries, err := dblevel.FetchAllTweaks() if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error fetching all tweaks") return err } records, err := convertTweaksToRecords(allEntries) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error converting tweaks to records") return err } return writeToCSV(path, records) @@ -134,28 +124,34 @@ func convertTweaksToRecords(data []types.Tweak) ([][]string, error) { "blockHash", "txid", "data", + "highestValue", }) for _, pair := range data { records = append(records, []string{ - pair.BlockHash, - pair.Txid, + hex.EncodeToString(pair.BlockHash[:]), + hex.EncodeToString(pair.Txid[:]), hex.EncodeToString(pair.TweakData[:]), + strconv.FormatUint(pair.HighestValue, 10), }) } return records, nil } -// TweakIndex +/* TweakIndex */ func ExportTweakIndices(path string) error { + // we skip this because there will be no data + if config.TweakIndexFullIncludingDust { + return nil + } allEntries, err := dblevel.FetchAllTweakIndices() if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error fetching all tweak indices") return err } records, err := convertTweakIndicesToRecords(allEntries) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error converting tweak indices to records") return err } return writeToCSV(path, records) @@ -178,24 +174,24 @@ func convertTweakIndicesToRecords(data []types.TweakIndex) ([][]string, error) { } records = append(records, []string{ - pair.BlockHash, + hex.EncodeToString(pair.BlockHash[:]), hex.EncodeToString(flattened), }) } return records, nil } -// HeadersInv +/* HeadersInv */ func ExportHeadersInv(path string) error { allEntries, err := dblevel.FetchAllHeadersInv() if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error fetching all headers inv") return err } records, err := convertHeadersInvToRecords(allEntries) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error converting headers inv to records") return err } return writeToCSV(path, records) @@ -211,7 +207,7 @@ func convertHeadersInvToRecords(data []types.BlockHeaderInv) ([][]string, error) for _, pair := range data { records = append(records, []string{ strconv.FormatUint(uint64(pair.Height), 10), - pair.Hash, + hex.EncodeToString(pair.Hash[:]), }) } return records, nil diff --git a/internal/dataexport/utils.go b/internal/dataexport/utils.go new file mode 100644 index 0000000..a8dbed3 --- /dev/null +++ b/internal/dataexport/utils.go @@ -0,0 +1,54 @@ +package dataexport + +import ( + "fmt" + "os" + "time" + + "github.com/setavenger/blindbit-lib/logging" + "github.com/setavenger/blindbit-oracle/internal/config" +) + +func ExportAll() { + // todo manage memory better, bloats completely during export + logging.L.Info().Msg("Exporting data") + timestamp := time.Now() + + logging.L.Info().Msg("Exporting UTXOs") + err := ExportUTXOs(fmt.Sprintf("%s/data-export/utxos-%d.csv", config.BaseDirectory, timestamp.Unix())) + if err != nil { + logging.L.Fatal().Err(err).Msg("error exporting utxos") + } + logging.L.Info().Msg("Finished UTXOs") + + logging.L.Info().Msg("Exporting Filters") + err = ExportFilters(fmt.Sprintf("%s/data-export/filters-%d.csv", config.BaseDirectory, timestamp.Unix())) + if err != nil { + logging.L.Fatal().Err(err).Msg("error exporting filters") + } + logging.L.Info().Msg("Finished Filters") + + logging.L.Info().Msg("Exporting Tweaks") + err = ExportTweaks(fmt.Sprintf("%s/data-export/tweaks-%d.csv", config.BaseDirectory, timestamp.Unix())) + if err != nil { + logging.L.Fatal().Err(err).Msg("error exporting tweaks") + } + logging.L.Info().Msg("Finished Tweaks") + + logging.L.Info().Msg("Exporting Tweak Indices") + err = ExportTweakIndices(fmt.Sprintf("%s/data-export/tweak-indices-%d.csv", config.BaseDirectory, timestamp.Unix())) + if err != nil { + logging.L.Fatal().Err(err).Msg("error exporting tweak indices") + } + logging.L.Info().Msg("Finished Tweak Indices") + + logging.L.Info().Msg("Exporting HeadersInv") + err = ExportHeadersInv(fmt.Sprintf("%s/data-export/headers-inv-%d.csv", config.BaseDirectory, timestamp.Unix())) + if err != nil { + logging.L.Fatal().Err(err).Msg("error exporting headers inv") + } + logging.L.Info().Msg("Finished HeadersInv") + + logging.L.Info().Msg("Export Done") + os.Exit(0) +} diff --git a/internal/dblevel/blockheader.go b/internal/dblevel/blockheader.go new file mode 100644 index 0000000..959916f --- /dev/null +++ b/internal/dblevel/blockheader.go @@ -0,0 +1,32 @@ +package dblevel + +import ( + "errors" + + "github.com/setavenger/blindbit-lib/logging" + "github.com/setavenger/blindbit-oracle/internal/types" +) + +func InsertBlockHeader(pair types.BlockHeader) error { + err := insertSimple(HeadersDB, &pair) + if err != nil { + logging.L.Err(err).Msg("error inserting block header") + return err + } + logging.L.Trace().Msg("block_header inserted") + return nil +} + +func FetchByBlockHashBlockHeader(blockHash [32]byte) (*types.BlockHeader, error) { + var pair types.BlockHeader + err := retrieveByBlockHash(HeadersDB, blockHash, &pair) + if err != nil && !errors.Is(err, NoEntryErr{}) { + logging.L.Err(err).Msg("error fetching block header") + return nil, err + } else if errors.Is(err, NoEntryErr{}) { // todo why do we return the error anyways? + // todo find good solution, muted because it will show up for every pull we make + // logging.L.Err(err).Msg("error fetching block header") + return nil, err + } + return &pair, nil +} diff --git a/src/db/dblevel/blockheaderinv.go b/internal/dblevel/blockheaderinv.go similarity index 80% rename from src/db/dblevel/blockheaderinv.go rename to internal/dblevel/blockheaderinv.go index 7b2317b..8ec5278 100644 --- a/src/db/dblevel/blockheaderinv.go +++ b/internal/dblevel/blockheaderinv.go @@ -1,26 +1,28 @@ package dblevel import ( - "SilentPaymentAppBackend/src/common" - "SilentPaymentAppBackend/src/common/types" "bytes" "encoding/binary" + "errors" + "github.com/setavenger/blindbit-lib/logging" + "github.com/setavenger/blindbit-oracle/internal/types" + "github.com/syndtr/goleveldb/leveldb" "github.com/syndtr/goleveldb/leveldb/util" ) func InsertBlockHeaderInv(pair types.BlockHeaderInv) error { err := insertSimple(HeadersInvDB, &pair) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error inserting block header inv") return err } - common.DebugLogger.Println("header-inv inserted") + logging.L.Trace().Msg("header-inv inserted") return nil } func InsertBatchBlockHeaderInv(headersInv []types.BlockHeaderInv) error { - common.DebugLogger.Println("Inserting headers-inv...") + logging.L.Info().Msg("Inserting headers-inv...") // Create a slice of types.Pair with the same length as pairs pairs := make([]types.Pair, len(headersInv)) @@ -33,21 +35,19 @@ func InsertBatchBlockHeaderInv(headersInv []types.BlockHeaderInv) error { err := insertBatch(HeadersInvDB, pairs) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error inserting block header inv") return err } - //for _, headerInv := range headersInv { - // common.InfoLogger.Printf("Inserted height: %d\n", headerInv.Height) - //} - common.DebugLogger.Printf("Inserted %d headers-inv\n", len(headersInv)) + logging.L.Trace().Msgf("Inserted %d headers-inv", len(headersInv)) return nil } +// FetchByBlockHeightBlockHeaderInv change height 32 to (u)int64 func FetchByBlockHeightBlockHeaderInv(height uint32) (types.BlockHeaderInv, error) { var pair types.BlockHeaderInv err := retrieveByBlockHeight(HeadersInvDB, height, &pair) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error fetching block header inv") return types.BlockHeaderInv{}, err } return pair, nil @@ -62,26 +62,26 @@ func FetchHighestBlockHeaderInv() (*types.BlockHeaderInv, error) { if iter.Last() { err := result.DeSerialiseData(iter.Value()) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error deserialising data") return nil, err } err = result.DeSerialiseKey(iter.Key()) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error deserialising key") return nil, err } } err := iter.Error() - if err != nil { - common.ErrorLogger.Println(err) + if err != nil && !errors.Is(err, leveldb.ErrNotFound) { + logging.L.Err(err).Msg("error iterating over headers inv") return nil, err } - if result.Hash == "" { - common.WarningLogger.Println("no entry found") - return nil, NoEntryErr{} + if result.Hash == [32]byte{} { + logging.L.Warn().Msg("no entry found") + return nil, nil } - return &result, err + return &result, nil } // FetchHighestBlockHeaderInvByFlag gets the block with the highest height which has the corresponding flag set @@ -93,7 +93,7 @@ func FetchHighestBlockHeaderInvByFlag(flag bool) (*types.BlockHeaderInv, error) ok := iter.Last() if !ok { - return nil, NoEntryErr{} + return nil, nil } // Process the last element first, then continue with previous elements. @@ -101,13 +101,13 @@ func FetchHighestBlockHeaderInvByFlag(flag bool) (*types.BlockHeaderInv, error) // Deserialize data first err := result.DeSerialiseData(iter.Value()) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error deserialising data") return nil, err } if result.Flag == flag { err = result.DeSerialiseKey(iter.Key()) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error deserialising key") return nil, err } break @@ -119,26 +119,9 @@ func FetchHighestBlockHeaderInvByFlag(flag bool) (*types.BlockHeaderInv, error) } } - //for iter.Prev() { - // // Deserialize data first - // err := result.DeSerialiseData(iter.Value()) - // if err != nil { - // common.ErrorLogger.Println(err) - // return nil, err - // } - // if result.Flag == flag { - // err = result.DeSerialiseKey(iter.Key()) - // if err != nil { - // common.ErrorLogger.Println(err) - // return nil, err - // } - // break - // } - //} - err := iter.Error() if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error iterating over headers inv") return nil, err } return &result, err @@ -152,7 +135,7 @@ func GetMissingHeadersInv(heights []uint32) ([]uint32, error) { // keep an eye on performance around this function if len(heights) == 0 { - common.ErrorLogger.Println("passed an empty slice to check") + logging.L.Error().Msg("passed an empty slice to check") return []uint32{}, nil } @@ -173,13 +156,13 @@ func GetMissingHeadersInv(heights []uint32) ([]uint32, error) { var minHeightBuf bytes.Buffer err := binary.Write(&minHeightBuf, binary.BigEndian, minHeight) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error writing min height") return nil, err } var maxHeightBuf bytes.Buffer err = binary.Write(&maxHeightBuf, binary.BigEndian, maxHeight) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error writing max height") return nil, err } @@ -198,7 +181,7 @@ func GetMissingHeadersInv(heights []uint32) ([]uint32, error) { // we only need the key for the height err = pair.DeSerialiseKey(iter.Key()) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error deserialising key") return nil, err } pairs = append(pairs, pair) @@ -211,7 +194,7 @@ func GetMissingHeadersInv(heights []uint32) ([]uint32, error) { err = iter.Error() if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error iterating over headers inv") return nil, err } @@ -247,7 +230,7 @@ func GetMissingHeadersInvFlag(heights []uint32, flag bool) ([]uint32, error) { // keep an eye on performance around this function if len(heights) == 0 { - common.ErrorLogger.Println("passed an empty slice to check") + logging.L.Error().Msg("passed an empty slice to check") return []uint32{}, nil } @@ -268,13 +251,13 @@ func GetMissingHeadersInvFlag(heights []uint32, flag bool) ([]uint32, error) { var minHeightBuf bytes.Buffer err := binary.Write(&minHeightBuf, binary.BigEndian, minHeight) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error writing min height") return nil, err } var maxHeightBuf bytes.Buffer err = binary.Write(&maxHeightBuf, binary.BigEndian, maxHeight) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error writing max height") return nil, err } @@ -293,7 +276,7 @@ func GetMissingHeadersInvFlag(heights []uint32, flag bool) ([]uint32, error) { // Deserialize data first err = pair.DeSerialiseData(iter.Value()) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error deserialising data") return nil, err } @@ -302,7 +285,7 @@ func GetMissingHeadersInvFlag(heights []uint32, flag bool) ([]uint32, error) { if pair.Flag == !flag { err = pair.DeSerialiseKey(iter.Key()) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error deserialising key") return nil, err } pairs = append(pairs, pair) @@ -315,7 +298,7 @@ func GetMissingHeadersInvFlag(heights []uint32, flag bool) ([]uint32, error) { err = iter.Error() if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error iterating over headers inv") return nil, err } @@ -346,12 +329,12 @@ func GetMissingHeadersInvFlag(heights []uint32, flag bool) ([]uint32, error) { func FetchAllHeadersInv() ([]types.BlockHeaderInv, error) { pairs, err := retrieveAll(HeadersInvDB, types.PairFactoryBlockHeaderInv) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error iterating over headers inv") return nil, err } if len(pairs) == 0 { - common.WarningLogger.Println("Nothing returned") - return nil, NoEntryErr{} + logging.L.Warn().Msg("Nothing returned") + return nil, nil } result := make([]types.BlockHeaderInv, len(pairs)) @@ -360,8 +343,7 @@ func FetchAllHeadersInv() ([]types.BlockHeaderInv, error) { if pairPtr, ok := pair.(*types.BlockHeaderInv); ok { result[i] = *pairPtr } else { - common.ErrorLogger.Printf("%+v\n", pair) - panic("wrong pair struct returned") + logging.L.Panic().Err(err).Msg("wrong pair struct returned") } } return result, err diff --git a/src/db/dblevel/client.go b/internal/dblevel/client.go similarity index 66% rename from src/db/dblevel/client.go rename to internal/dblevel/client.go index cef4d1f..1f0fd12 100644 --- a/src/db/dblevel/client.go +++ b/internal/dblevel/client.go @@ -1,18 +1,21 @@ package dblevel import ( - "SilentPaymentAppBackend/src/common" - "SilentPaymentAppBackend/src/common/types" "bytes" "encoding/binary" - "encoding/hex" "errors" + "github.com/setavenger/blindbit-lib/logging" + "github.com/setavenger/blindbit-oracle/internal/types" "github.com/syndtr/goleveldb/leveldb" "github.com/syndtr/goleveldb/leveldb/util" ) // todo change to `var NoEntryErr = errors.new("[no entry found]")` + +// NoEntryErr was used to handle empty responses from level db +// +// Deprecated: Will probably not survive next rewrite. Bad design type NoEntryErr struct{} func (e NoEntryErr) Error() string { @@ -36,7 +39,7 @@ var ( func OpenDBConnection(path string) *leveldb.DB { db, err := leveldb.OpenFile(path, nil) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error opening db connection") panic(err) } return db @@ -46,12 +49,12 @@ func OpenDBConnection(path string) *leveldb.DB { func extractKeyValue(pair types.Pair) ([]byte, []byte) { key, err := pair.SerialiseKey() if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error serialising key") panic(err) } value, err := pair.SerialiseData() if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error serialising data") panic(err) } return key, value @@ -61,7 +64,7 @@ func extractKeyValue(pair types.Pair) ([]byte, []byte) { func extractKey(pair types.Pair) []byte { key, err := pair.SerialiseKey() if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error serialising key") panic(err) } return key @@ -72,7 +75,7 @@ func insertSimple(db *leveldb.DB, pair types.Pair) error { // Use the key and value as separate arguments for db.Put err := db.Put(key, value, nil) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error inserting simple") return err } @@ -88,41 +91,36 @@ func insertBatch(db *leveldb.DB, pairs []types.Pair) error { err := db.Write(batch, nil) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error inserting batch") return err } return err } -func retrieveByBlockHash(db *leveldb.DB, blockHash string, pair types.Pair) error { - blockHashBytes, err := hex.DecodeString(blockHash) - if err != nil { - common.ErrorLogger.Println(err) - return err - } - - data, err := db.Get(blockHashBytes, nil) +func retrieveByBlockHash(db *leveldb.DB, blockHash [32]byte, pair types.Pair) error { + data, err := db.Get(blockHash[:], nil) if err != nil && !errors.Is(err, leveldb.ErrNotFound) { // todo this error probably exists as var/type somewhere - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error getting block hash") return err - } else if err != nil && errors.Is(err, leveldb.ErrNotFound) { // todo this error probably exists as var/type somewhere - // todo we don't need separate patterns if just return the errors anyways? or maybe just to avoid unnecessary logging - return NoEntryErr{} + } else if err != nil && errors.Is(err, leveldb.ErrNotFound) { + return nil } if len(data) == 0 { - // todo this should be a different type of error case - return NoEntryErr{} + // we do not return any errors anymore for empty results + //empty results are to be expected now and then + // only make error handling even more complicated + return nil } - err = pair.DeSerialiseKey(blockHashBytes) + err = pair.DeSerialiseKey(blockHash[:]) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error deserialising key") return err } err = pair.DeSerialiseData(data) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error deserialising data") return err } @@ -133,31 +131,30 @@ func retrieveByBlockHeight(db *leveldb.DB, blockHeight uint32, pair types.Pair) var buf bytes.Buffer err := binary.Write(&buf, binary.BigEndian, blockHeight) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error writing block height") return err } data, err := db.Get(buf.Bytes(), nil) if err != nil && err.Error() != "leveldb: not found" { // todo this error probably exists as var/type somewhere - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error getting block height") return err - } else if err != nil && err.Error() == "leveldb: not found" { // todo this error probably exists as var/type somewhere - return NoEntryErr{} + } else if err != nil && errors.Is(err, leveldb.ErrNotFound) { // todo this error probably exists as var/type somewhere + return nil } if len(data) == 0 { - // todo this should be a different type of error case - return NoEntryErr{} + return nil } err = pair.DeSerialiseKey(buf.Bytes()) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error deserialising key") return err } err = pair.DeSerialiseData(data) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error deserialising data") return err } @@ -174,12 +171,12 @@ func retrieveAll(db *leveldb.DB, factory types.PairFactory) ([]types.Pair, error pair := factory() err = pair.DeSerialiseKey(iter.Key()) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error deserialising key") return nil, err } err = pair.DeSerialiseData(iter.Value()) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error deserialising data") return nil, err } results = append(results, pair) @@ -187,32 +184,30 @@ func retrieveAll(db *leveldb.DB, factory types.PairFactory) ([]types.Pair, error err = iter.Error() if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error iterating over db") return nil, err } return results, err } -func retrieveManyByBlockHash(db *leveldb.DB, blockHash string, factory types.PairFactory) ([]types.Pair, error) { - blockHashBytes, err := hex.DecodeString(blockHash) - if err != nil { - common.ErrorLogger.Println(err) - return nil, err - } +func retrieveManyByBlockHash(db *leveldb.DB, blockHash [32]byte, factory types.PairFactory) ([]types.Pair, error) { + blockHashBytes := blockHash[:] iter := db.NewIterator(util.BytesPrefix(blockHashBytes), nil) defer iter.Release() + + var err error var results []types.Pair for iter.Next() { pair := factory() err = pair.DeSerialiseKey(iter.Key()) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error deserialising key") return nil, err } err = pair.DeSerialiseData(iter.Value()) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error deserialising data") return nil, err } results = append(results, pair) @@ -220,40 +215,33 @@ func retrieveManyByBlockHash(db *leveldb.DB, blockHash string, factory types.Pai err = iter.Error() if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error iterating over db") return nil, err } return results, err } -func retrieveManyByBlockHashAndTxid(db *leveldb.DB, blockHash, txid string, factory types.PairFactory) ([]types.Pair, error) { - blockHashBytes, err := hex.DecodeString(blockHash) - if err != nil { - common.ErrorLogger.Println(err) - return nil, err - } - txidBytes, err := hex.DecodeString(txid) - if err != nil { - common.ErrorLogger.Println(err) - return nil, err - } - - prefix := append(blockHashBytes, txidBytes...) +func retrieveManyByBlockHashAndTxid(db *leveldb.DB, blockHash, txid [32]byte, factory types.PairFactory) ([]types.Pair, error) { + var prefix [64]byte + copy(prefix[:32], blockHash[:]) + copy(prefix[32:], txid[:]) - iter := db.NewIterator(util.BytesPrefix(prefix), nil) + iter := db.NewIterator(util.BytesPrefix(prefix[:]), nil) defer iter.Release() + + var err error var results []types.Pair for iter.Next() { pair := factory() err = pair.DeSerialiseKey(iter.Key()) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error deserialising key") return nil, err } err = pair.DeSerialiseData(iter.Value()) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error deserialising data") return nil, err } results = append(results, pair) @@ -261,11 +249,11 @@ func retrieveManyByBlockHashAndTxid(db *leveldb.DB, blockHash, txid string, fact err = iter.Error() if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error iterating over db") return nil, err } if results == nil { - return nil, NoEntryErr{} + return nil, nil } return results, err } @@ -279,7 +267,7 @@ func deleteBatch(db *leveldb.DB, pairs []types.Pair) error { err := db.Write(batch, nil) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error deleting batch") return err } return err diff --git a/src/db/dblevel/newutxosfilter.go b/internal/dblevel/newutxosfilter.go similarity index 62% rename from src/db/dblevel/newutxosfilter.go rename to internal/dblevel/newutxosfilter.go index 6b82f77..0ab5044 100644 --- a/src/db/dblevel/newutxosfilter.go +++ b/internal/dblevel/newutxosfilter.go @@ -1,25 +1,25 @@ package dblevel import ( - "SilentPaymentAppBackend/src/common" - "SilentPaymentAppBackend/src/common/types" + "github.com/setavenger/blindbit-lib/logging" + "github.com/setavenger/blindbit-oracle/internal/types" ) func InsertNewUTXOsFilter(pair types.Filter) error { err := insertSimple(NewUTXOsFiltersDB, &pair) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error inserting new utxos filter") return err } - common.DebugLogger.Println("Taproot Filter inserted") + logging.L.Trace().Msg("Taproot Filter inserted") return nil } -func FetchByBlockHashNewUTXOsFilter(blockHash string) (types.Filter, error) { +func FetchByBlockHashNewUTXOsFilter(blockHash [32]byte) (types.Filter, error) { var pair types.Filter err := retrieveByBlockHash(NewUTXOsFiltersDB, blockHash, &pair) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error fetching new utxos filter by block hash") return types.Filter{}, err } return pair, nil @@ -29,11 +29,11 @@ func FetchByBlockHashNewUTXOsFilter(blockHash string) (types.Filter, error) { func FetchAllNewUTXOsFilters() ([]types.Filter, error) { pairs, err := retrieveAll(NewUTXOsFiltersDB, types.PairFactoryFilter) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error fetching all new utxos filters") return nil, err } if len(pairs) == 0 { - common.WarningLogger.Println("Nothing returned") + logging.L.Warn().Msg("nothing returned") return nil, NoEntryErr{} } @@ -43,7 +43,7 @@ func FetchAllNewUTXOsFilters() ([]types.Filter, error) { if pairPtr, ok := pair.(*types.Filter); ok { result[i] = *pairPtr } else { - common.ErrorLogger.Printf("%+v\n", pair) + logging.L.Err(err).Any("pair", pair).Msg("wrong pair struct returned") panic("wrong pair struct returned") } } diff --git a/src/db/dblevel/spentoutpointsfilter.go b/internal/dblevel/spentoutpointsfilter.go similarity index 55% rename from src/db/dblevel/spentoutpointsfilter.go rename to internal/dblevel/spentoutpointsfilter.go index 99c79da..4284fc1 100644 --- a/src/db/dblevel/spentoutpointsfilter.go +++ b/internal/dblevel/spentoutpointsfilter.go @@ -1,40 +1,40 @@ package dblevel import ( - "SilentPaymentAppBackend/src/common" - "SilentPaymentAppBackend/src/common/types" + "github.com/setavenger/blindbit-lib/logging" + "github.com/setavenger/blindbit-oracle/internal/types" ) func InsertSpentOutpointsFilter(pair types.Filter) error { err := insertSimple(SpentOutpointsFilterDB, &pair) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error inserting spent outpoints filter") return err } - common.DebugLogger.Println("Taproot Filter inserted") + logging.L.Trace().Msg("Taproot Filter inserted") return nil } -func FetchByBlockHashSpentOutpointsFilter(blockHash string) (types.Filter, error) { +func FetchByBlockHashSpentOutpointsFilter(blockHash [32]byte) (types.Filter, error) { var pair types.Filter err := retrieveByBlockHash(SpentOutpointsFilterDB, blockHash, &pair) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error fetching spent outpoints filter") return types.Filter{}, err } return pair, nil } -// FetchAllFilters returns all types.Filter in the DB +// FetchAllSpentOutpointsFilters returns all types.Filter in the DB func FetchAllSpentOutpointsFilters() ([]types.Filter, error) { pairs, err := retrieveAll(SpentOutpointsFilterDB, types.PairFactoryFilter) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error fetching all spent outpoints filters") return nil, err } if len(pairs) == 0 { - common.WarningLogger.Println("Nothing returned") - return nil, NoEntryErr{} + logging.L.Warn().Msg("Nothing returned") + return nil, nil } result := make([]types.Filter, len(pairs)) @@ -43,8 +43,7 @@ func FetchAllSpentOutpointsFilters() ([]types.Filter, error) { if pairPtr, ok := pair.(*types.Filter); ok { result[i] = *pairPtr } else { - common.ErrorLogger.Printf("%+v\n", pair) - panic("wrong pair struct returned") + logging.L.Panic().Err(err).Msg("wrong pair struct returned") } } return result, err diff --git a/src/db/dblevel/spentoutpointsindex.go b/internal/dblevel/spentoutpointsindex.go similarity index 58% rename from src/db/dblevel/spentoutpointsindex.go rename to internal/dblevel/spentoutpointsindex.go index 20bcd06..10b5e51 100644 --- a/src/db/dblevel/spentoutpointsindex.go +++ b/internal/dblevel/spentoutpointsindex.go @@ -1,44 +1,45 @@ package dblevel import ( - "SilentPaymentAppBackend/src/common" - "SilentPaymentAppBackend/src/common/types" "errors" + + "github.com/setavenger/blindbit-lib/logging" + "github.com/setavenger/blindbit-oracle/internal/types" ) func InsertSpentOutpointsIndex(pair *types.SpentOutpointsIndex) error { err := insertSimple(SpentOutpointsIndexDB, pair) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error inserting spent outpoints index") return err } - common.DebugLogger.Println("tweak index inserted") + logging.L.Trace().Msg("spent outpoints index inserted") return nil } -func FetchByBlockHashSpentOutpointIndex(blockHash string) (*types.SpentOutpointsIndex, error) { +func FetchByBlockHashSpentOutpointIndex(blockHash [32]byte) (*types.SpentOutpointsIndex, error) { var pair types.SpentOutpointsIndex err := retrieveByBlockHash(SpentOutpointsIndexDB, blockHash, &pair) if err != nil && !errors.Is(err, NoEntryErr{}) { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error fetching spent outpoints index") return nil, err } else if errors.Is(err, NoEntryErr{}) { // todo why do we return the error anyways? - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error fetching spent outpoints index") return nil, err } return &pair, nil } -// FetchAllTweakIndices returns all types.TweakIndex in the DB +// FetchAllSpenOutpointsIndices returns all types.TweakIndex in the DB func FetchAllSpenOutpointsIndices() ([]types.SpentOutpointsIndex, error) { pairs, err := retrieveAll(SpentOutpointsIndexDB, types.PairFactorySpentOutpointsIndex) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error fetching all spent outpoints indices") return nil, err } if len(pairs) == 0 { - common.WarningLogger.Println("Nothing returned") - return nil, NoEntryErr{} + logging.L.Warn().Msg("Nothing returned") + return nil, nil } result := make([]types.SpentOutpointsIndex, len(pairs)) @@ -47,8 +48,7 @@ func FetchAllSpenOutpointsIndices() ([]types.SpentOutpointsIndex, error) { if pairPtr, ok := pair.(*types.SpentOutpointsIndex); ok { result[i] = *pairPtr } else { - common.ErrorLogger.Printf("%+v\n", pair) - panic("wrong pair struct returned") + logging.L.Panic().Err(err).Msg("wrong pair struct returned") } } return result, err diff --git a/src/db/dblevel/tweak.go b/internal/dblevel/tweak.go similarity index 69% rename from src/db/dblevel/tweak.go rename to internal/dblevel/tweak.go index 7fbc35f..2d5eb78 100644 --- a/src/db/dblevel/tweak.go +++ b/internal/dblevel/tweak.go @@ -1,18 +1,16 @@ package dblevel import ( - "SilentPaymentAppBackend/src/common" - "SilentPaymentAppBackend/src/common/types" - "encoding/hex" "errors" "math" + "github.com/setavenger/blindbit-lib/logging" + "github.com/setavenger/blindbit-oracle/internal/types" "github.com/syndtr/goleveldb/leveldb/util" ) // InsertBatchTweaks index implements cut through and dust func InsertBatchTweaks(tweaks []types.Tweak) error { - // common.DebugLogger.Println("Inserting tweaks...") // Create a slice of types.Pair with the same length as pairs pairs := make([]types.Pair, len(tweaks)) @@ -24,10 +22,10 @@ func InsertBatchTweaks(tweaks []types.Tweak) error { err := insertBatch(TweaksDB, pairs) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error inserting tweaks") return err } - common.DebugLogger.Printf("Inserted %d tweaks", len(tweaks)) + logging.L.Trace().Msgf("Inserted %d tweaks", len(tweaks)) return nil } @@ -36,12 +34,12 @@ func OverWriteTweaks(tweaks []types.Tweak) error { for _, tweak := range tweaks { pairs, err := retrieveManyByBlockHashAndTxid(TweaksDB, tweak.BlockHash, tweak.Txid, types.PairFactoryTweak) if err != nil && !errors.Is(err, NoEntryErr{}) { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error retrieving tweaks") return err } else if err != nil && errors.Is(err, NoEntryErr{}) { // This should not happen because the overwrites are computed from remaining UTXOs. // Getting this error would mean that we have UTXOs without corresponding tweaks in the DB - common.ErrorLogger.Println("no entries for a tweak were found. this should not happen") + logging.L.Err(err).Msg("no entries for a tweak were found. this should not happen") return err // keep this as an error. if this happens we have to know } @@ -49,9 +47,9 @@ func OverWriteTweaks(tweaks []types.Tweak) error { if len(pairs) != 1 { // this scenario should never happen. The database should not have >1 entries for one transaction. <1 (0) should give no entry error // prev - common.ErrorLogger.Printf("%+v", pairs) - common.ErrorLogger.Println("number of tweaks was not exactly 1") - return errors.New("number of tweaks was not exactly 1") + err = errors.New("number of tweaks was not exactly 1") + logging.L.Err(err).Any("pairs", pairs).Msg("number of tweaks was not exactly 1") + return err } var result types.Tweak @@ -59,7 +57,7 @@ func OverWriteTweaks(tweaks []types.Tweak) error { if pairPtr, ok := pairs[0].(*types.Tweak); ok { result = *pairPtr } else { - common.ErrorLogger.Printf("%+v\n", pairs[0]) + logging.L.Err(err).Any("pair", pairs[0]).Msg("wrong pair struct returned") panic("wrong pair struct returned") } tweak.TweakData = result.TweakData @@ -70,21 +68,21 @@ func OverWriteTweaks(tweaks []types.Tweak) error { err := InsertBatchTweaks(tweaksToOverwrite) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error overwriting tweaks") return err } return err } -func FetchByBlockHashTweaks(blockHash string) ([]types.Tweak, error) { - common.DebugLogger.Println("Fetching tweaks") +func FetchByBlockHashTweaks(blockHash [32]byte) ([]types.Tweak, error) { + logging.L.Trace().Msg("Fetching tweaks") pairs, err := retrieveManyByBlockHash(TweaksDB, blockHash, types.PairFactoryTweak) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error fetching tweaks") return nil, err } if len(pairs) == 0 { - return nil, NoEntryErr{} + return nil, nil } result := make([]types.Tweak, len(pairs)) @@ -93,19 +91,19 @@ func FetchByBlockHashTweaks(blockHash string) ([]types.Tweak, error) { if pairPtr, ok := pair.(*types.Tweak); ok { result[i] = *pairPtr } else { - common.ErrorLogger.Printf("%+v\n", pair) + logging.L.Err(err).Any("pair", pair).Msg("wrong pair struct returned") panic("wrong pair struct returned") } } - common.DebugLogger.Printf("Fetched %d tweaks\n", len(result)) + logging.L.Trace().Msgf("Fetched %d tweaks", len(result)) return result, nil } func DeleteBatchTweaks(tweaks []types.Tweak) error { - common.DebugLogger.Println("Deleting Tweaks...") + logging.L.Trace().Msg("Deleting Tweaks...") if len(tweaks) == 0 { - common.DebugLogger.Println("no tweaks to delete") + logging.L.Debug().Msg("no tweaks to delete") return nil } // Create a slice of types.Pair with the same length as pairs @@ -118,10 +116,10 @@ func DeleteBatchTweaks(tweaks []types.Tweak) error { } err := deleteBatch(TweaksDB, pairs) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error deleting tweaks") return err } - common.DebugLogger.Printf("Deleted %d Tweaks\n", len(tweaks)) + logging.L.Trace().Msgf("Deleted %d Tweaks", len(tweaks)) return err } @@ -129,12 +127,12 @@ func DeleteBatchTweaks(tweaks []types.Tweak) error { func FetchAllTweaks() ([]types.Tweak, error) { pairs, err := retrieveAll(TweaksDB, types.PairFactoryTweak) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error fetching all tweaks") return nil, err } if len(pairs) == 0 { - common.WarningLogger.Println("Nothing returned") - return nil, NoEntryErr{} + logging.L.Warn().Msg("nothing returned") + return nil, nil } result := make([]types.Tweak, len(pairs)) @@ -143,8 +141,8 @@ func FetchAllTweaks() ([]types.Tweak, error) { if pairPtr, ok := pair.(*types.Tweak); ok { result[i] = *pairPtr } else { - common.ErrorLogger.Printf("%+v\n", pair) - panic("wrong pair struct returned") + logging.L.Panic().Err(err).Any("pair", pair).Msg("wrong pair struct returned") + return nil, err } } return result, err @@ -163,36 +161,36 @@ func DustOverwriteRoutine() error { tweak := types.Tweak{} err := tweak.DeSerialiseData(iter.Value()) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error deserialising data") return err } err = tweak.DeSerialiseKey(iter.Key()) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error deserialising key") return err } utxos, err := FetchByBlockHashAndTxidUTXOs(tweak.BlockHash, tweak.Txid) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error fetching utxos") return err } // we insert a fake spentUTXO such that the highest of the remaining will be taken. highestValue, err := types.FindBiggestRemainingUTXO(types.UTXO{Value: math.MaxUint64}, utxos) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error finding biggest remaining utxo") return err } // todo highestValue might be nil here tweak.HighestValue = *highestValue tweaksForBatchInsert = append(tweaksForBatchInsert, tweak) if counter%2_500 == 0 { - common.InfoLogger.Println("Inserting for", counter) + logging.L.Info().Msgf("Inserting for %d", counter) // we use insert instead of overwrite because we already have all the information ready err = InsertBatchTweaks(tweaksForBatchInsert) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error inserting tweaks") return err } tweaksForBatchInsert = []types.Tweak{} @@ -202,40 +200,36 @@ func DustOverwriteRoutine() error { // insert the remaining tweaks err := InsertBatchTweaks(tweaksForBatchInsert) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error inserting tweaks") return err } err = iter.Error() if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error iterating over tweaks") return err } return err } -func FetchByBlockHashDustLimitTweaks(blockHash string, dustLimit uint64) ([]types.Tweak, error) { - blockHashBytes, err := hex.DecodeString(blockHash) - if err != nil { - common.ErrorLogger.Println(err) - return nil, err - } - iter := TweaksDB.NewIterator(util.BytesPrefix(blockHashBytes), nil) +func FetchByBlockHashDustLimitTweaks(blockHash [32]byte, dustLimit uint64) ([]types.Tweak, error) { + iter := TweaksDB.NewIterator(util.BytesPrefix(blockHash[:]), nil) defer iter.Release() var results []types.Tweak + var err error for iter.Next() { tweak := types.Tweak{} // Deserialize data first err = tweak.DeSerialiseData(iter.Value()) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error deserialising data") return nil, err } if tweak.HighestValue >= dustLimit { err = tweak.DeSerialiseKey(iter.Key()) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error deserialising key") return nil, err } results = append(results, tweak) @@ -244,7 +238,7 @@ func FetchByBlockHashDustLimitTweaks(blockHash string, dustLimit uint64) ([]type err = iter.Error() if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error iterating over tweaks") return nil, err } diff --git a/src/db/dblevel/tweakindex.go b/internal/dblevel/tweakindex.go similarity index 64% rename from src/db/dblevel/tweakindex.go rename to internal/dblevel/tweakindex.go index 53c40f9..adfdf9c 100644 --- a/src/db/dblevel/tweakindex.go +++ b/internal/dblevel/tweakindex.go @@ -1,29 +1,30 @@ package dblevel import ( - "SilentPaymentAppBackend/src/common" - "SilentPaymentAppBackend/src/common/types" "errors" + + "github.com/setavenger/blindbit-lib/logging" + "github.com/setavenger/blindbit-oracle/internal/types" ) func InsertTweakIndex(pair *types.TweakIndex) error { err := insertSimple(TweakIndexDB, pair) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error inserting tweak index") return err } - common.DebugLogger.Println("tweak index inserted") + logging.L.Trace().Msg("tweak index inserted") return nil } -func FetchByBlockHashTweakIndex(blockHash string) (*types.TweakIndex, error) { +func FetchByBlockHashTweakIndex(blockHash [32]byte) (*types.TweakIndex, error) { var pair types.TweakIndex err := retrieveByBlockHash(TweakIndexDB, blockHash, &pair) if err != nil && !errors.Is(err, NoEntryErr{}) { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error fetching tweak index") return nil, err } else if err != nil && errors.Is(err, NoEntryErr{}) { // todo why do we return the error anyways? - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error fetching tweak index") return nil, err } // todo this probably does not need to be a pointer @@ -34,12 +35,12 @@ func FetchByBlockHashTweakIndex(blockHash string) (*types.TweakIndex, error) { func FetchAllTweakIndices() ([]types.TweakIndex, error) { pairs, err := retrieveAll(TweakIndexDB, types.PairFactoryTweakIndex) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error fetching all tweak indices") return nil, err } if len(pairs) == 0 { - common.WarningLogger.Println("Nothing returned") - return nil, NoEntryErr{} + logging.L.Warn().Msg("Nothing returned") + return nil, nil } result := make([]types.TweakIndex, len(pairs)) @@ -48,8 +49,7 @@ func FetchAllTweakIndices() ([]types.TweakIndex, error) { if pairPtr, ok := pair.(*types.TweakIndex); ok { result[i] = *pairPtr } else { - common.ErrorLogger.Printf("%+v\n", pair) - panic("wrong pair struct returned") + logging.L.Panic().Err(err).Msg("wrong pair struct returned") } } return result, err diff --git a/src/db/dblevel/tweakindex_dusted.go b/internal/dblevel/tweakindex_dusted.go similarity index 61% rename from src/db/dblevel/tweakindex_dusted.go rename to internal/dblevel/tweakindex_dusted.go index 39878b1..792e783 100644 --- a/src/db/dblevel/tweakindex_dusted.go +++ b/internal/dblevel/tweakindex_dusted.go @@ -1,29 +1,30 @@ package dblevel import ( - "SilentPaymentAppBackend/src/common" - "SilentPaymentAppBackend/src/common/types" "errors" + + "github.com/setavenger/blindbit-lib/logging" + "github.com/setavenger/blindbit-oracle/internal/types" ) func InsertTweakIndexDust(pair *types.TweakIndexDust) error { err := insertSimple(TweakIndexDustDB, pair) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error inserting tweak index with dust filter") return err } - common.DebugLogger.Println("tweak index with dust filter inserted") + logging.L.Trace().Msg("tweak index with dust filter inserted") return nil } -func FetchByBlockHashTweakIndexDust(blockHash string) (*types.TweakIndexDust, error) { +func FetchByBlockHashTweakIndexDust(blockHash [32]byte) (*types.TweakIndexDust, error) { var pair types.TweakIndexDust err := retrieveByBlockHash(TweakIndexDustDB, blockHash, &pair) if err != nil && !errors.Is(err, NoEntryErr{}) { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error fetching tweak index with dust filter") return nil, err } else if err != nil && errors.Is(err, NoEntryErr{}) { // todo why do we return the error anyways? - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error fetching tweak index with dust filter") return nil, err } // todo this probably does not need to be a pointer @@ -34,12 +35,12 @@ func FetchByBlockHashTweakIndexDust(blockHash string) (*types.TweakIndexDust, er func FetchAllTweakIndicesDust() ([]types.TweakIndexDust, error) { pairs, err := retrieveAll(TweakIndexDustDB, types.PairFactoryTweakIndexDust) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error fetching all tweak indices with dust filter") return nil, err } if len(pairs) == 0 { - common.WarningLogger.Println("Nothing returned") - return nil, NoEntryErr{} + logging.L.Warn().Msg("Nothing returned") + return nil, nil } result := make([]types.TweakIndexDust, len(pairs)) @@ -48,8 +49,7 @@ func FetchAllTweakIndicesDust() ([]types.TweakIndexDust, error) { if pairPtr, ok := pair.(*types.TweakIndexDust); ok { result[i] = *pairPtr } else { - common.ErrorLogger.Printf("%+v\n", pair) - panic("wrong pair struct returned") + logging.L.Panic().Err(err).Msg("wrong pair struct returned") } } return result, err diff --git a/internal/dblevel/utils.go b/internal/dblevel/utils.go new file mode 100644 index 0000000..72cafdb --- /dev/null +++ b/internal/dblevel/utils.go @@ -0,0 +1,44 @@ +package dblevel + +import "github.com/setavenger/blindbit-lib/logging" + +func CloseDBs() { + err := HeadersDB.Close() + if err != nil { + logging.L.Err(err).Msg("error closing headers db") + } + err = HeadersInvDB.Close() + if err != nil { + logging.L.Err(err).Msg("error closing headers inv db") + } + err = NewUTXOsFiltersDB.Close() + if err != nil { + logging.L.Err(err).Msg("error closing new utxos filters db") + } + err = TweaksDB.Close() + if err != nil { + logging.L.Err(err).Msg("error closing tweaks db") + } + err = TweakIndexDB.Close() + if err != nil { + logging.L.Err(err).Msg("error closing tweak index db") + } + err = TweakIndexDustDB.Close() + if err != nil { + logging.L.Err(err).Msg("error closing tweak index dust db") + } + err = UTXOsDB.Close() + if err != nil { + logging.L.Err(err).Msg("error closing utxos db") + } + err = SpentOutpointsIndexDB.Close() + if err != nil { + logging.L.Err(err).Msg("error closing spent outpoints index db") + } + err = SpentOutpointsFilterDB.Close() + if err != nil { + logging.L.Err(err).Msg("error closing spent outpoints filter db") + } + + logging.L.Info().Msg("DBs closed") +} diff --git a/src/db/dblevel/utxo.go b/internal/dblevel/utxo.go similarity index 69% rename from src/db/dblevel/utxo.go rename to internal/dblevel/utxo.go index 868f177..0824198 100644 --- a/src/db/dblevel/utxo.go +++ b/internal/dblevel/utxo.go @@ -1,42 +1,40 @@ package dblevel import ( - "SilentPaymentAppBackend/src/common" - "SilentPaymentAppBackend/src/common/types" "errors" + "github.com/setavenger/blindbit-lib/logging" + "github.com/setavenger/blindbit-oracle/internal/types" "github.com/syndtr/goleveldb/leveldb/util" ) -func InsertUTXOs(utxos []types.UTXO) error { - // common.DebugLogger.Println("Inserting UTXOs...") +func InsertUTXOs(utxos []*types.UTXO) error { // Create a slice of types.Pair with the same length as pairs pairs := make([]types.Pair, len(utxos)) // Convert each UTXO to a Pair and assign it to the new slice for i, pair := range utxos { pairCopy := pair // Create a new variable that is a copy of pair - pairs[i] = &pairCopy + pairs[i] = pairCopy } err := insertBatch(UTXOsDB, pairs) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error inserting utxos") return err } - common.DebugLogger.Printf("Inserted %d UTXOs", len(utxos)) + logging.L.Trace().Msgf("Inserted %d UTXOs", len(utxos)) return nil } -func FetchByBlockHashUTXOs(blockHash string) ([]types.UTXO, error) { - //common.InfoLogger.Println("Fetching UTXOs") +func FetchByBlockHashUTXOs(blockHash [32]byte) ([]types.UTXO, error) { pairs, err := retrieveManyByBlockHash(UTXOsDB, blockHash, types.PairFactoryUTXO) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error fetching utxos by block hash") return nil, err } if len(pairs) == 0 { - return nil, NoEntryErr{} + return nil, nil } result := make([]types.UTXO, len(pairs)) @@ -45,28 +43,26 @@ func FetchByBlockHashUTXOs(blockHash string) ([]types.UTXO, error) { if pairPtr, ok := pair.(*types.UTXO); ok { result[i] = *pairPtr } else { - common.ErrorLogger.Printf("%+v\n", pair) + logging.L.Err(err).Msg("wrong pair struct returned") panic("wrong pair struct returned") } } - //common.InfoLogger.Printf("Fetched %d UTXOs\n", len(result)) return result, nil } -func FetchByBlockHashAndTxidUTXOs(blockHash, txid string) ([]types.UTXO, error) { - //common.InfoLogger.Println("Fetching UTXOs") +func FetchByBlockHashAndTxidUTXOs(blockHash, txid [32]byte) ([]types.UTXO, error) { pairs, err := retrieveManyByBlockHashAndTxid(UTXOsDB, blockHash, txid, types.PairFactoryUTXO) if err != nil { if errors.Is(err, NoEntryErr{}) { // don't print if it's a no entry error return nil, err } - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error fetching utxos by block hash and txid") return nil, err } if len(pairs) == 0 { - return nil, NoEntryErr{} + return nil, nil } result := make([]types.UTXO, len(pairs)) @@ -75,17 +71,15 @@ func FetchByBlockHashAndTxidUTXOs(blockHash, txid string) ([]types.UTXO, error) if pairPtr, ok := pair.(*types.UTXO); ok { result[i] = *pairPtr } else { - common.ErrorLogger.Printf("%+v\n", pair) + logging.L.Err(err).Msg("wrong pair struct returned") panic("wrong pair struct returned") } } - //common.InfoLogger.Printf("Fetched %d UTXOs\n", len(result)) return result, nil } func DeleteBatchUTXOs(utxos []types.UTXO) error { - // common.DebugLogger.Println("Deleting UTXOs...") // Create a slice of types.Pair with the same length as pairs pairs := make([]types.Pair, len(utxos)) @@ -96,10 +90,9 @@ func DeleteBatchUTXOs(utxos []types.UTXO) error { } err := deleteBatch(UTXOsDB, pairs) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error deleting utxos") return err } - // common.DebugLogger.Printf("Deleted %d UTXOs\n", len(utxos)) return nil } @@ -107,12 +100,12 @@ func DeleteBatchUTXOs(utxos []types.UTXO) error { func FetchAllUTXOs() ([]types.UTXO, error) { pairs, err := retrieveAll(UTXOsDB, types.PairFactoryUTXO) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error fetching all utxos") return nil, err } if len(pairs) == 0 { - common.WarningLogger.Println("Nothing returned") - return nil, NoEntryErr{} + logging.L.Warn().Msg("nothing returned") + return nil, nil } result := make([]types.UTXO, len(pairs)) @@ -121,8 +114,7 @@ func FetchAllUTXOs() ([]types.UTXO, error) { if pairPtr, ok := pair.(*types.UTXO); ok { result[i] = *pairPtr } else { - common.ErrorLogger.Printf("%+v\n", pair) - panic("wrong pair struct returned") + logging.L.Panic().Err(err).Any("pair", pair).Msg("wrong pair struct returned") } } return result, err @@ -137,7 +129,7 @@ func PruneUTXOs(prefix []byte) error { // totalSet is for the final batch deletion var totalSetToDelete []types.UTXO - var lastTxid string + var lastTxid [32]byte var canBeRemoved = true var currentSet []types.UTXO @@ -149,11 +141,11 @@ func PruneUTXOs(prefix []byte) error { err = value.DeSerialiseKey(iter.Key()) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error deserialising key") return err } - if lastTxid == "" { + if lastTxid == [32]byte{} { lastTxid = value.Txid } @@ -163,7 +155,7 @@ func PruneUTXOs(prefix []byte) error { err = value.DeSerialiseData(iter.Value()) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error deserialising data") return err } @@ -177,9 +169,8 @@ func PruneUTXOs(prefix []byte) error { // delete the current set of UTXOs if eligible // do deletion - if lastTxid != "" && canBeRemoved { + if lastTxid != [32]byte{} && canBeRemoved { totalSetToDelete = append(totalSetToDelete, currentSet...) - // common.DebugLogger.Printf("Added %d UTXOs for deletion - %s\n", len(currentSet), lastTxid) } // reset state @@ -192,17 +183,16 @@ func PruneUTXOs(prefix []byte) error { // Handle the last batch of UTXOs after the loop if canBeRemoved && len(currentSet) > 0 { totalSetToDelete = append(totalSetToDelete, currentSet...) - // common.DebugLogger.Printf("Added %d UTXOs for deletion - %s\n", len(currentSet), lastTxid) } err = iter.Error() if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error iterating over utxos") return err } err = DeleteBatchUTXOs(totalSetToDelete) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error deleting utxos") return err } return nil diff --git a/src/server/api.go b/internal/server/api.go similarity index 75% rename from src/server/api.go rename to internal/server/api.go index 6b87537..8bde970 100644 --- a/src/server/api.go +++ b/internal/server/api.go @@ -1,9 +1,6 @@ package server import ( - "SilentPaymentAppBackend/src/common" - "SilentPaymentAppBackend/src/common/types" - "SilentPaymentAppBackend/src/db/dblevel" "bytes" "encoding/hex" "errors" @@ -12,6 +9,11 @@ import ( "strconv" "github.com/gin-gonic/gin" + "github.com/setavenger/blindbit-lib/api" + "github.com/setavenger/blindbit-lib/logging" + "github.com/setavenger/blindbit-oracle/internal/config" + "github.com/setavenger/blindbit-oracle/internal/dblevel" + "github.com/setavenger/blindbit-oracle/internal/types" ) // ApiHandler todo might not need ApiHandler struct if no data is stored within. @@ -19,31 +21,22 @@ import ( // Will keep for now just in case, so I don't have to refactor twice type ApiHandler struct{} -type Info struct { - Network string `json:"network"` - Height uint32 `json:"height"` - TweaksOnly bool `json:"tweaks_only"` - TweaksFullBasic bool `json:"tweaks_full_basic"` - TweaksFullWithDustFilter bool `json:"tweaks_full_with_dust_filter"` - TweaksCutThroughWithDustFilter bool `json:"tweaks_cut_through_with_dust_filter"` -} - func (h *ApiHandler) GetInfo(c *gin.Context) { lastHeader, err := dblevel.FetchHighestBlockHeaderInvByFlag(true) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error fetching highest block header inv") c.JSON(http.StatusInternalServerError, gin.H{ "error": "could could not retrieve data from database", }) return } - c.JSON(http.StatusOK, Info{ - Network: common.ChainToString(common.Chain), + c.JSON(http.StatusOK, api.InfoResponseOracle{ + Network: config.ChainToString(config.Chain), Height: lastHeader.Height, - TweaksOnly: common.TweaksOnly, - TweaksFullBasic: common.TweakIndexFullNoDust, - TweaksFullWithDustFilter: common.TweakIndexFullIncludingDust, - TweaksCutThroughWithDustFilter: common.TweaksCutThroughWithDust, + TweaksOnly: config.TweaksOnly, + TweaksFullBasic: config.TweakIndexFullNoDust, + TweaksFullWithDustFilter: config.TweakIndexFullIncludingDust, + TweaksCutThroughWithDustFilter: config.TweaksCutThroughWithDust, }) } @@ -51,14 +44,14 @@ func (h *ApiHandler) GetBestBlockHeight(c *gin.Context) { // todo returns one height too low lastHeader, err := dblevel.FetchHighestBlockHeaderInvByFlag(true) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error fetching highest block header inv") c.JSON(http.StatusInternalServerError, gin.H{ "error": "could could not retrieve data from database", }) return } - c.JSON(http.StatusOK, gin.H{ - "block_height": lastHeader.Height, + c.JSON(http.StatusOK, api.BlockHeightResponseOracle{ + BlockHeight: lastHeader.Height, }) } @@ -74,7 +67,9 @@ func (h *ApiHandler) GetBlockHashByHeight(c *gin.Context) { return } - c.JSON(http.StatusOK, gin.H{"block_hash": hInv.Hash}) + c.JSON(http.StatusOK, api.BlockHashResponseOracle{ + BlockHash: hex.EncodeToString(hInv.Hash[:]), + }) } func (h *ApiHandler) GetCFilterByHeight(c *gin.Context) { @@ -97,7 +92,7 @@ func (h *ApiHandler) GetCFilterByHeight(c *gin.Context) { case "spent": cFilter, err = dblevel.FetchByBlockHashSpentOutpointsFilter(hInv.Hash) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error fetching spent outpoints filter") c.JSON(http.StatusInternalServerError, gin.H{ "error": "could not get filter from db", }) @@ -106,7 +101,7 @@ func (h *ApiHandler) GetCFilterByHeight(c *gin.Context) { case "new-utxos": cFilter, err = dblevel.FetchByBlockHashNewUTXOsFilter(hInv.Hash) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error fetching new utxos filter") c.JSON(http.StatusInternalServerError, gin.H{ "error": "could not get filter from db", }) @@ -119,11 +114,11 @@ func (h *ApiHandler) GetCFilterByHeight(c *gin.Context) { return } - data := gin.H{ - "filter_type": cFilter.FilterType, - "block_height": hInv.Height, - "block_hash": cFilter.BlockHash, - "data": hex.EncodeToString(cFilter.Data), + data := api.FilterResponseOracle{ + FilterType: cFilter.FilterType, + BlockHeight: hInv.Height, + BlockHash: hex.EncodeToString(cFilter.BlockHash[:]), + Data: hex.EncodeToString(cFilter.Data), } c.JSON(200, data) @@ -142,7 +137,7 @@ func (h *ApiHandler) GetUtxosByHeight(c *gin.Context) { } utxos, err := dblevel.FetchByBlockHashUTXOs(hInv.Hash) if err != nil && !errors.Is(err, dblevel.NoEntryErr{}) { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error fetching utxos") c.JSON(http.StatusInternalServerError, gin.H{ "error": "could could not retrieve data from database", }) @@ -151,7 +146,7 @@ func (h *ApiHandler) GetUtxosByHeight(c *gin.Context) { if utxos != nil { c.JSON(200, utxos) } else { - c.JSON(200, []interface{}{}) + c.JSON(200, []any{}) } } @@ -170,28 +165,30 @@ func (h *ApiHandler) GetTweakDataByHeight(c *gin.Context) { return } var tweaks []types.Tweak - + var err error // Extracting the dustLimit query parameter and converting it to uint64 dustLimitStr := c.DefaultQuery("dustLimit", "0") // Default to "0" if not provided - dustLimit, err := strconv.ParseUint(dustLimitStr, 10, 64) - if err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": "Invalid dustLimit parameter"}) - return - } - if dustLimit == 0 { + + if dustLimitStr == "0" { // this query should have a better performance due to no required checks tweaks, err = dblevel.FetchByBlockHashTweaks(hInv.Hash) if err != nil && !errors.Is(err, dblevel.NoEntryErr{}) { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error fetching tweaks") c.JSON(http.StatusInternalServerError, gin.H{ "error": "could could not retrieve data from database", }) return } } else { + // moved conversion inside to optimise performance of above query + dustLimit, err := strconv.ParseUint(dustLimitStr, 10, 64) + if err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": "Invalid dustLimit parameter"}) + return + } tweaks, err = dblevel.FetchByBlockHashDustLimitTweaks(hInv.Hash, dustLimit) if err != nil && !errors.Is(err, dblevel.NoEntryErr{}) { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error fetching dust limit tweaks") c.JSON(http.StatusInternalServerError, gin.H{ "error": "could could not retrieve data from database", }) @@ -215,11 +212,13 @@ func (h *ApiHandler) GetTweakDataByHeight(c *gin.Context) { func (h *ApiHandler) GetTweakIndexDataByHeight(c *gin.Context) { headerInv, exists := c.Get("headerInv") if !exists { + logging.L.Error().Msg("headerInv not found") c.JSON(http.StatusInternalServerError, gin.H{"error": "headerInv not found"}) return } hInv, ok := headerInv.(types.BlockHeaderInv) // Assuming HeaderInventory is the expected type if !ok { + logging.L.Error().Msg("invalid headerInv type") c.JSON(http.StatusInternalServerError, gin.H{"error": "invalid headerInv type"}) return } @@ -228,23 +227,23 @@ func (h *ApiHandler) GetTweakIndexDataByHeight(c *gin.Context) { dustLimitStr := c.DefaultQuery("dustLimit", "0") // Default to "0" if not provided dustLimit, err := strconv.ParseUint(dustLimitStr, 10, 64) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error parsing dust limit") c.JSON(http.StatusBadRequest, gin.H{"error": "Invalid dustLimit parameter"}) return } - if dustLimit != 0 && !common.TweakIndexFullIncludingDust { - common.DebugLogger.Println("tried accessing dust limits") + if dustLimit != 0 && !config.TweakIndexFullIncludingDust { + logging.L.Debug().Msg("tried accessing dust limits") c.JSON(http.StatusBadRequest, gin.H{"error": "Server does not allow dustLimits"}) return } // todo basically duplicate code could be simplified and generalised with interface/(generics?) - if common.TweakIndexFullIncludingDust { + if config.TweakIndexFullIncludingDust { var tweakIndex *types.TweakIndexDust tweakIndex, err = dblevel.FetchByBlockHashTweakIndexDust(hInv.Hash) if err != nil && !errors.Is(err, dblevel.NoEntryErr{}) { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error fetching tweak index dust") c.JSON(http.StatusInternalServerError, gin.H{ "error": "could could not retrieve data from database", }) @@ -271,7 +270,7 @@ func (h *ApiHandler) GetTweakIndexDataByHeight(c *gin.Context) { // this query should have a better performance due to no required checks tweakIndex, err := dblevel.FetchByBlockHashTweakIndex(hInv.Hash) if err != nil && !errors.Is(err, dblevel.NoEntryErr{}) { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error fetching tweak index") c.JSON(http.StatusInternalServerError, gin.H{ "error": "could could not retrieve data from database", }) @@ -306,7 +305,7 @@ func (h *ApiHandler) GetSpentOutpointsIndex(c *gin.Context) { } spentOutpointsIndex, err := dblevel.FetchByBlockHashSpentOutpointIndex(hInv.Hash) if err != nil && !errors.Is(err, dblevel.NoEntryErr{}) { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error fetching spent outpoints index") c.JSON(http.StatusInternalServerError, gin.H{ "error": "could could not retrieve data from database", }) @@ -323,10 +322,10 @@ func (h *ApiHandler) GetSpentOutpointsIndex(c *gin.Context) { Data []string `json:"data"` } - result.BlockHash = spentOutpointsIndex.BlockHash + result.BlockHash = hex.EncodeToString(spentOutpointsIndex.BlockHash[:]) if len(spentOutpointsIndex.Data) == 0 { - common.WarningLogger.Println("spentOutpointsIndex was empty") + logging.L.Debug().Msg("spentOutpointsIndex was empty") result.Data = []string{} c.JSON(http.StatusOK, result) return @@ -349,13 +348,13 @@ type TxRequest struct { func (h *ApiHandler) ForwardRawTX(c *gin.Context) { var txRequest TxRequest if err := c.ShouldBind(&txRequest); err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error binding tx request") c.Status(http.StatusBadRequest) return } err := forwardTxToMemPool(txRequest.Data) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error forwarding tx to mempool") c.Status(http.StatusInternalServerError) return } @@ -363,21 +362,32 @@ func (h *ApiHandler) ForwardRawTX(c *gin.Context) { } func forwardTxToMemPool(txHex string) error { - //url := "http://localhost/api/tx" + var url string + + switch config.Chain { + case config.Mainnet: + url = config.MempoolEndpointMainnet + case config.Testnet3: + url = config.MempoolEndpointTestnet3 + case config.Signet: + url = config.MempoolEndpointSignet + default: + return errors.New("invalid chain") + } - resp, err := http.Post(common.MempoolEndpoint, "application/x-www-form-urlencoded", bytes.NewBufferString(txHex)) + resp, err := http.Post(url, "application/x-www-form-urlencoded", bytes.NewBufferString(txHex)) if err != nil { - common.ErrorLogger.Printf("Failed to make request: %s\n", err) + logging.L.Err(err).Msg("error forwarding tx to mempool") return err } defer resp.Body.Close() body, err := io.ReadAll(resp.Body) if err != nil { - common.ErrorLogger.Printf("Failed to read response: %s\n", err) + logging.L.Err(err).Msg("error reading response") return err } - common.DebugLogger.Println("Response:", string(body)) + logging.L.Debug().Msgf("Response: %s", string(body)) return nil } diff --git a/src/server/middleware.go b/internal/server/middleware.go similarity index 77% rename from src/server/middleware.go rename to internal/server/middleware.go index 6332b18..8e1d9e2 100644 --- a/src/server/middleware.go +++ b/internal/server/middleware.go @@ -1,12 +1,12 @@ package server import ( - "SilentPaymentAppBackend/src/common" - "SilentPaymentAppBackend/src/db/dblevel" "net/http" "strconv" "github.com/gin-gonic/gin" + "github.com/setavenger/blindbit-lib/logging" + "github.com/setavenger/blindbit-oracle/internal/dblevel" ) func FetchHeaderInvMiddleware(c *gin.Context) { @@ -19,7 +19,7 @@ func FetchHeaderInvMiddleware(c *gin.Context) { height, err := strconv.ParseUint(heightStr, 10, 32) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("could not parse block height") c.JSON(http.StatusBadRequest, gin.H{"error": "could not parse block height"}) c.Abort() return @@ -27,7 +27,7 @@ func FetchHeaderInvMiddleware(c *gin.Context) { headerInv, err := dblevel.FetchByBlockHeightBlockHeaderInv(uint32(height)) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("could not fetch header inv") c.JSON(http.StatusInternalServerError, gin.H{"error": "could not fetch header inv"}) c.Abort() return diff --git a/src/server/run.go b/internal/server/run.go similarity index 78% rename from src/server/run.go rename to internal/server/run.go index 1471514..a55905d 100644 --- a/src/server/run.go +++ b/internal/server/run.go @@ -1,15 +1,18 @@ package server import ( - "SilentPaymentAppBackend/src/common" + "time" "github.com/gin-contrib/cors" "github.com/gin-contrib/gzip" "github.com/gin-gonic/gin" + + "github.com/setavenger/blindbit-lib/logging" + "github.com/setavenger/blindbit-oracle/internal/config" ) func RunServer(api *ApiHandler) { - // todo merge gin logging into common logging + // todo merge gin logging into blindbit lib logging router := gin.Default() router.Use(gzip.Gzip(gzip.DefaultCompression)) @@ -17,6 +20,7 @@ func RunServer(api *ApiHandler) { AllowOrigins: []string{"*"}, AllowMethods: []string{"GET", "PUT"}, AllowHeaders: []string{"Content-Type", "Authorization"}, + MaxAge: 12 * time.Hour, AllowCredentials: true, })) @@ -31,7 +35,7 @@ func RunServer(api *ApiHandler) { router.POST("/forward-tx", api.ForwardRawTX) - if err := router.Run(common.Host); err != nil { - common.ErrorLogger.Fatal(err) + if err := router.Run(config.HTTPHost); err != nil { + logging.L.Err(err).Msg("could not run server") } } diff --git a/internal/server/v2/server.go b/internal/server/v2/server.go new file mode 100644 index 0000000..3536306 --- /dev/null +++ b/internal/server/v2/server.go @@ -0,0 +1,38 @@ +package v2 + +import ( + "net" + + "github.com/setavenger/blindbit-lib/logging" + "github.com/setavenger/blindbit-lib/proto/pb" + "github.com/setavenger/blindbit-oracle/internal/config" + "google.golang.org/grpc" + "google.golang.org/grpc/reflection" +) + +func RunGRPCServer() { + // Create gRPC server + grpcServer := grpc.NewServer() + + // Register the OracleService + oracleService := NewOracleService() + pb.RegisterOracleServiceServer(grpcServer, oracleService) + + // Enable reflection for debugging (optional) + reflection.Register(grpcServer) + + // Create listener for gRPC + lis, err := net.Listen("tcp", config.GRPCHost) + if err != nil { + logging.L.Err(err).Msg("failed to listen for gRPC") + panic(err) + } + + logging.L.Info().Msgf("Starting gRPC server on host %s", config.GRPCHost) + + // Start gRPC server + if err := grpcServer.Serve(lis); err != nil { + logging.L.Err(err).Msg("failed to serve gRPC") + panic(err) + } +} diff --git a/internal/server/v2/service.go b/internal/server/v2/service.go new file mode 100644 index 0000000..54bd05b --- /dev/null +++ b/internal/server/v2/service.go @@ -0,0 +1,423 @@ +package v2 + +import ( + "context" + "encoding/hex" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/known/emptypb" + + "github.com/setavenger/blindbit-lib/logging" + "github.com/setavenger/blindbit-lib/proto/pb" + "github.com/setavenger/blindbit-oracle/internal/config" + "github.com/setavenger/blindbit-oracle/internal/dblevel" + "github.com/setavenger/blindbit-oracle/internal/types" +) + +// OracleService implements the gRPC OracleService interface +type OracleService struct { + pb.UnimplementedOracleServiceServer +} + +// NewOracleService creates a new OracleService instance +func NewOracleService() *OracleService { + return &OracleService{} +} + +// GetInfo returns oracle information +func (s *OracleService) GetInfo(ctx context.Context, _ *emptypb.Empty) (*pb.InfoResponse, error) { + lastHeader, err := dblevel.FetchHighestBlockHeaderInvByFlag(true) + if err != nil { + logging.L.Err(err).Msg("error fetching highest block header inv") + return nil, status.Errorf(codes.Internal, "could not retrieve data from database") + } + + return &pb.InfoResponse{ + Network: config.ChainToString(config.Chain), + Height: uint64(lastHeader.Height), + TweaksOnly: config.TweaksOnly, + TweaksFullBasic: config.TweakIndexFullNoDust, + TweaksFullWithDustFilter: config.TweakIndexFullIncludingDust, + TweaksCutThroughWithDustFilter: config.TweaksCutThroughWithDust, + }, nil +} + +// GetBestBlockHeight returns the current best block height +func (s *OracleService) GetBestBlockHeight( + ctx context.Context, _ *emptypb.Empty, +) (*pb.BlockHeightResponse, error) { + lastHeader, err := dblevel.FetchHighestBlockHeaderInvByFlag(true) + if err != nil { + logging.L.Err(err).Msg("error fetching highest block header inv") + return nil, status.Errorf(codes.Internal, "could not retrieve data from database") + } + + return &pb.BlockHeightResponse{ + BlockHeight: uint64(lastHeader.Height), + }, nil +} + +// GetBlockHashByHeight returns the block hash for a given height +func (s *OracleService) GetBlockHashByHeight( + ctx context.Context, req *pb.BlockHeightRequest, +) (*pb.BlockHashResponse, error) { + headerInv, err := dblevel.FetchByBlockHeightBlockHeaderInv(uint32(req.BlockHeight)) + if err != nil { + logging.L.Err(err).Msg("error fetching block header inv") + return nil, status.Errorf(codes.Internal, "could not retrieve block data") + } + + return &pb.BlockHashResponse{ + BlockHash: headerInv.Hash[:], + }, nil +} + +// GetTweakArray returns tweaks for a specific block height +func (s *OracleService) GetTweakArray(ctx context.Context, req *pb.BlockHeightRequest) (*pb.TweakArray, error) { + headerInv, err := dblevel.FetchByBlockHeightBlockHeaderInv(uint32(req.BlockHeight)) + if err != nil { + logging.L.Err(err).Msg("error fetching block header inv") + return nil, status.Errorf(codes.Internal, "could not retrieve block data") + } + + tweaks, err := dblevel.FetchByBlockHashTweaks(headerInv.Hash) + if err != nil { + logging.L.Err(err).Msg("error fetching tweaks") + return nil, status.Errorf(codes.Internal, "could not retrieve tweak data") + } + + // Convert tweaks to bytes + tweakBytes := make([][]byte, len(tweaks)) + for i, tweak := range tweaks { + tweakBytes[i] = tweak.TweakData[:] + } + + return &pb.TweakArray{ + BlockIdentifier: &pb.BlockIdentifier{ + BlockHash: headerInv.Hash[:], + BlockHeight: uint64(headerInv.Height), + }, + Tweaks: tweakBytes, + }, nil +} + +// GetTweakIndexArray returns tweak index data for a specific block height +func (s *OracleService) GetTweakIndexArray( + ctx context.Context, req *pb.GetTweakIndexRequest, +) (*pb.TweakArray, error) { + headerInv, err := dblevel.FetchByBlockHeightBlockHeaderInv(uint32(req.BlockHeight)) + if err != nil { + logging.L.Err(err).Msg("error fetching block header inv") + return nil, status.Errorf(codes.Internal, "could not retrieve block data") + } + + var tweaks [][33]byte + + if req.DustLimit > 0 { + // todo: think about adding not supported error + var rawTweaks []types.Tweak + rawTweaks, err = dblevel.FetchByBlockHashDustLimitTweaks(headerInv.Hash, req.DustLimit) + if err != nil { + return nil, status.Errorf(codes.Internal, "could not retrieve dusted tweak data") + } + tweaks = make([][33]byte, len(rawTweaks)) + for i := range rawTweaks { + tweaks[i] = rawTweaks[i].TweakData + } + } else { + var tweakIndex *types.TweakIndex + tweakIndex, err = dblevel.FetchByBlockHashTweakIndex(headerInv.Hash) + if err == nil { + tweaks = tweakIndex.Data + } + } + + if err != nil { + logging.L.Err(err).Msg("error fetching tweak index") + return nil, status.Errorf(codes.Internal, "could not retrieve tweak index data") + } + + // Convert tweaks to bytes + tweakBytes := make([][]byte, len(tweaks)) + for i, tweak := range tweaks { + tweakBytes[i] = tweak[:] + } + + return &pb.TweakArray{ + BlockIdentifier: &pb.BlockIdentifier{ + BlockHash: headerInv.Hash[:], + BlockHeight: uint64(headerInv.Height), + }, + Tweaks: tweakBytes, + }, nil +} + +// GetUTXOArray returns UTXOs for a specific block height +func (s *OracleService) GetUTXOArray(ctx context.Context, req *pb.BlockHeightRequest) (*pb.UTXOArrayResponse, error) { + headerInv, err := dblevel.FetchByBlockHeightBlockHeaderInv(uint32(req.BlockHeight)) + if err != nil { + logging.L.Err(err).Msg("error fetching block header inv") + return nil, status.Errorf(codes.Internal, "could not retrieve block data") + } + + utxos, err := dblevel.FetchByBlockHashUTXOs(headerInv.Hash) + if err != nil { + logging.L.Err(err).Msg("error fetching UTXOs") + return nil, status.Errorf(codes.Internal, "could not retrieve UTXO data") + } + + // Convert internal UTXO types to protobuf types + pbUtxos := make([]*pb.UTXO, len(utxos)) + for i, utxo := range utxos { + // todo: g in and change scriptpubKey to at least Byte slice if not even array + scriptPubKeyBytes, _ := hex.DecodeString(utxo.ScriptPubKey) + pbUtxos[i] = &pb.UTXO{ + Txid: utxo.Txid[:], + Vout: utxo.Vout, + Value: utxo.Value, + ScriptPubKey: scriptPubKeyBytes, + BlockHeight: uint64(utxo.BlockHeight), + BlockHash: utxo.BlockHash[:], + Timestamp: utxo.Timestamp, + Spent: utxo.Spent, + } + } + + return &pb.UTXOArrayResponse{ + BlockIdentifier: &pb.BlockIdentifier{ + BlockHash: headerInv.Hash[:], + BlockHeight: uint64(headerInv.Height), + }, + Utxos: pbUtxos, + }, nil +} + +// GetFilter returns filter data for a specific block height and type +func (s *OracleService) GetFilter(ctx context.Context, req *pb.GetFilterRequest) (*pb.FilterRepsonse, error) { + headerInv, err := dblevel.FetchByBlockHeightBlockHeaderInv(uint32(req.BlockHeight)) + if err != nil { + logging.L.Err(err).Msg("error fetching block header inv") + return nil, status.Errorf(codes.Internal, "could not retrieve block data") + } + + var filter types.Filter + var err2 error + + switch req.FilterType { + case pb.FilterType_FILTER_TYPE_SPENT: + filter, err2 = dblevel.FetchByBlockHashSpentOutpointsFilter(headerInv.Hash) + case pb.FilterType_FILTER_TYPE_NEW_UTXOS: + filter, err2 = dblevel.FetchByBlockHashNewUTXOsFilter(headerInv.Hash) + default: + return nil, status.Errorf(codes.InvalidArgument, "invalid filter type") + } + + if err2 != nil { + logging.L.Err(err2).Msg("error fetching filter") + return nil, status.Errorf(codes.Internal, "could not retrieve filter data") + } + + return &pb.FilterRepsonse{ + BlockIdentifier: &pb.BlockIdentifier{ + BlockHash: headerInv.Hash[:], + BlockHeight: uint64(headerInv.Height), + }, + FilterData: &pb.FilterData{ + FilterType: req.FilterType, + Data: filter.Data, + }, + }, nil +} + +// GetSpentOutpointsIndex returns spent outpoints index for a specific block height +func (s *OracleService) GetSpentOutpointsIndex(ctx context.Context, req *pb.BlockHeightRequest) (*pb.SpentOutpointsIndexResponse, error) { + headerInv, err := dblevel.FetchByBlockHeightBlockHeaderInv(uint32(req.BlockHeight)) + if err != nil { + logging.L.Err(err).Msg("error fetching block header inv") + return nil, status.Errorf(codes.Internal, "could not retrieve block data") + } + + spentOutpoints, err := dblevel.FetchByBlockHashSpentOutpointIndex(headerInv.Hash) + if err != nil { + logging.L.Err(err).Msg("error fetching spent outpoints index") + return nil, status.Errorf(codes.Internal, "could not retrieve spent outpoints data") + } + + spentOutpointsSliced := make([][]byte, len(spentOutpoints.Data)) + for i := range spentOutpointsSliced { + spentOutpointsSliced[i] = spentOutpoints.Data[i][:] + } + + return &pb.SpentOutpointsIndexResponse{ + BlockIdentifier: &pb.BlockIdentifier{ + BlockHash: headerInv.Hash[:], + BlockHeight: uint64(headerInv.Height), + }, + Data: spentOutpointsSliced, + }, nil +} + +// StreamBlockBatchSlim streams lightweight block batches for efficient processing +func (s *OracleService) StreamBlockBatchSlim( + req *pb.RangedBlockHeightRequest, + stream pb.OracleService_StreamBlockBatchSlimServer, +) error { + logging.L.Info().Msgf("streaming slim batches from %d to %d", req.Start, req.End) + for height := req.Start; height <= req.End; height++ { + headerInv, err := dblevel.FetchByBlockHeightBlockHeaderInv(uint32(height)) + if err != nil { + logging.L.Err(err).Msg("error fetching block header inv") + return status.Errorf(codes.Internal, "could not retrieve block data for height %d", height) + } + + // Fetch filters + spentFilter, err := dblevel.FetchByBlockHashSpentOutpointsFilter(headerInv.Hash) + if err != nil { + logging.L.Err(err).Msg("error fetching spent filter") + return status.Errorf(codes.Internal, "could not retrieve spent filter for height %d", height) + } + + newUtxosFilter, err := dblevel.FetchByBlockHashNewUTXOsFilter(headerInv.Hash) + if err != nil { + logging.L.Err(err).Msg("error fetching new UTXOs filter") + return status.Errorf(codes.Internal, "could not retrieve new UTXOs filter for height %d", height) + } + + // Fetch tweaks + // todo: make dependant on which index is supported + // for now it's always full basic + var tweakIndex *types.TweakIndex + tweakIndex, err = dblevel.FetchByBlockHashTweakIndex(headerInv.Hash) + if err != nil { + logging.L.Err(err).Uint64("block_height", height).Msg("failed to pull tweaks") + return status.Errorf(codes.Internal, "failed to pull tweak index for height %d", height) + } + + // Convert tweaks to bytes + tweakBytes := make([][]byte, len(tweakIndex.Data)) + for i, tweak := range tweakIndex.Data { + tweakBytes[i] = tweak[:] + } + + batch := &pb.BlockBatchSlim{ + BlockIdentifier: &pb.BlockIdentifier{ + BlockHash: headerInv.Hash[:], + BlockHeight: uint64(headerInv.Height), + }, + Tweaks: tweakBytes, + NewUtxosFilter: &pb.FilterData{ + FilterType: pb.FilterType_FILTER_TYPE_NEW_UTXOS, Data: newUtxosFilter.Data, + }, + SpentUtxosFilter: &pb.FilterData{ + FilterType: pb.FilterType_FILTER_TYPE_SPENT, Data: spentFilter.Data, + }, + } + + if err := stream.Send(batch); err != nil { + logging.L.Err(err).Msg("error sending block batch") + return status.Errorf(codes.Internal, "failed to send block batch for height %d", height) + } + } + + return nil +} + +// StreamBlockBatchFull streams complete block batches with all data +func (s *OracleService) StreamBlockBatchFull( + req *pb.RangedBlockHeightRequest, stream pb.OracleService_StreamBlockBatchFullServer, +) error { + for height := req.Start; height <= req.End; height++ { + select { + case <-stream.Context().Done(): + logging.L.Debug().Msg("stream context cancelled") + return nil + default: + } + headerInv, err := dblevel.FetchByBlockHeightBlockHeaderInv(uint32(height)) + if err != nil { + logging.L.Err(err).Msg("error fetching block header inv") + return status.Errorf(codes.Internal, "could not retrieve block data for height %d", height) + } + + // Fetch all data for this block + // todo: make dependant on which index is supported + // for now it's always full basic + var tweakIndex *types.TweakIndex + tweakIndex, err = dblevel.FetchByBlockHashTweakIndex(headerInv.Hash) + if err != nil { + logging.L.Err(err).Uint64("block_height", height).Msg("failed to pull tweaks") + return status.Errorf(codes.Internal, "failed to pull tweak index for height %d", height) + } + + utxos, err := dblevel.FetchByBlockHashUTXOs(headerInv.Hash) + if err != nil { + logging.L.Err(err).Msg("error fetching UTXOs") + return status.Errorf(codes.Internal, "could not retrieve UTXOs for height %d", height) + } + + spentFilter, err := dblevel.FetchByBlockHashSpentOutpointsFilter(headerInv.Hash) + if err != nil { + logging.L.Err(err).Msg("error fetching spent filter") + return status.Errorf(codes.Internal, "could not retrieve spent filter for height %d", height) + } + + newUtxosFilter, err := dblevel.FetchByBlockHashNewUTXOsFilter(headerInv.Hash) + if err != nil { + logging.L.Err(err).Msg("error fetching new UTXOs filter") + return status.Errorf(codes.Internal, "could not retrieve new UTXOs filter for height %d", height) + } + + spentOutpoints, err := dblevel.FetchByBlockHashSpentOutpointIndex(headerInv.Hash) + if err != nil { + logging.L.Err(err).Msg("error fetching spent outpoints") + return status.Errorf(codes.Internal, "could not retrieve spent outpoints for height %d", height) + } + + // Convert tweaks to bytes + tweakBytes := make([][]byte, len(tweakIndex.Data)) + for i, tweak := range tweakIndex.Data { + tweakBytes[i] = tweak[:] + } + + // Convert UTXOs to protobuf format + pbUtxos := make([]*pb.UTXO, len(utxos)) + for i, utxo := range utxos { + scripPubKeyBytes, _ := hex.DecodeString(utxo.ScriptPubKey) + pbUtxos[i] = &pb.UTXO{ + Txid: utxo.Txid[:], + Vout: uint32(utxo.Vout), + Value: utxo.Value, + ScriptPubKey: scripPubKeyBytes, + BlockHeight: uint64(utxo.BlockHeight), + BlockHash: utxo.BlockHash[:], + Timestamp: utxo.Timestamp, + Spent: utxo.Spent, + } + } + + spentOutpointsSliced := make([][]byte, len(spentOutpoints.Data)) + for i := range spentOutpointsSliced { + spentOutpointsSliced[i] = spentOutpoints.Data[i][:] + } + + batch := &pb.BlockBatchFull{ + BlockIdentifier: &pb.BlockIdentifier{ + BlockHash: headerInv.Hash[:], + BlockHeight: uint64(headerInv.Height), + }, + Tweaks: tweakBytes, + Utxos: pbUtxos, + NewUtxosFilter: &pb.FilterData{FilterType: pb.FilterType_FILTER_TYPE_NEW_UTXOS, Data: newUtxosFilter.Data}, + SpentUtxosFilter: &pb.FilterData{FilterType: pb.FilterType_FILTER_TYPE_SPENT, Data: spentFilter.Data}, + SpentUtxos: spentOutpointsSliced, + } + + if err := stream.Send(batch); err != nil { + logging.L.Err(err).Msg("error sending block batch") + return status.Errorf(codes.Internal, "failed to send block batch for height %d", height) + } + } + + return nil +} diff --git a/src/testhelpers/testhelpers.go b/internal/testhelpers/testhelpers.go similarity index 94% rename from src/testhelpers/testhelpers.go rename to internal/testhelpers/testhelpers.go index d7093c9..217b278 100644 --- a/src/testhelpers/testhelpers.go +++ b/internal/testhelpers/testhelpers.go @@ -1,14 +1,15 @@ package testhelpers import ( - "SilentPaymentAppBackend/src/common" - "SilentPaymentAppBackend/src/common/types" "encoding/hex" "encoding/json" "fmt" "log" "os" "testing" + + "github.com/setavenger/blindbit-lib/logging" + "github.com/setavenger/blindbit-oracle/internal/types" ) type TestCase struct { @@ -61,7 +62,7 @@ func TransformTestCaseDetailToTransaction(detail TestCaseDetail) (types.Transact for _, vinDetail := range detail.Given.Vin { witnessScript, err := parseWitnessScript(vinDetail.Txinwitness) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("could not parse witness script") return types.Transaction{}, err } vin := types.Vin{ @@ -88,7 +89,7 @@ func TransformTestCaseDetailToTransaction(detail TestCaseDetail) (types.Transact } func LoadCaseData(t *testing.T) ([]TestCase, error) { - filePath := "../test_data/send_and_receive_test_vectors_with_type.json" + filePath := "../../test_data/send_and_receive_test_vectors_with_type.json" // Read the JSON file data, err := os.ReadFile(filePath) @@ -145,7 +146,7 @@ func parseWitnessScript(script string) ([]string, error) { // Decode the hex-encoded script data, err := hex.DecodeString(script) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("could not decode hex-encoded script") return nil, err } diff --git a/src/common/types/blockheader.go b/internal/types/blockheader.go similarity index 78% rename from src/common/types/blockheader.go rename to internal/types/blockheader.go index 95ee310..bf12e79 100644 --- a/src/common/types/blockheader.go +++ b/internal/types/blockheader.go @@ -1,11 +1,12 @@ package types import ( - "SilentPaymentAppBackend/src/common" "bytes" "encoding/binary" "encoding/hex" "errors" + + "github.com/setavenger/blindbit-lib/logging" ) // BlockHeader struct to hold relevant BlockHeader data @@ -32,17 +33,17 @@ func (v *BlockHeader) SerialiseData() ([]byte, error) { err := binary.Write(&buf, binary.BigEndian, v.Timestamp) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error serialising block header") return nil, err } err = binary.Write(&buf, binary.BigEndian, v.Height) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error serialising block header") return nil, err } blockHashBytes, err := hex.DecodeString(v.PrevBlockHash) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error serialising block header") return nil, err } buf.Write(blockHashBytes) @@ -52,8 +53,9 @@ func (v *BlockHeader) SerialiseData() ([]byte, error) { func (v *BlockHeader) DeSerialiseKey(key []byte) error { if len(key) != 32 { - common.ErrorLogger.Printf("wrong key length: %+v", key) - return errors.New("key is wrong length. should not happen") + err := errors.New("key is wrong length. should not happen") + logging.L.Err(err).Hex("key", key).Msg("wrong key length") + return err } v.Hash = hex.EncodeToString(key) @@ -64,12 +66,12 @@ func (v *BlockHeader) DeSerialiseKey(key []byte) error { func (v *BlockHeader) DeSerialiseData(data []byte) error { err := binary.Read(bytes.NewReader(data[:8]), binary.BigEndian, &v.Timestamp) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error deserialising block header") return err } err = binary.Read(bytes.NewReader(data[8:12]), binary.BigEndian, &v.Height) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error deserialising block header") return err } v.Hash = hex.EncodeToString(data[12:]) diff --git a/src/common/types/blockheaderinv.go b/internal/types/blockheaderinv.go similarity index 63% rename from src/common/types/blockheaderinv.go rename to internal/types/blockheaderinv.go index 1de1642..8ab6cae 100644 --- a/src/common/types/blockheaderinv.go +++ b/internal/types/blockheaderinv.go @@ -2,18 +2,18 @@ package types import ( - "SilentPaymentAppBackend/src/common" "bytes" "encoding/binary" - "encoding/hex" "errors" + + "github.com/setavenger/blindbit-lib/logging" ) // BlockHeaderInv struct to hold the inverse BlockHeader data. // Required because we need different Serialisation for Pair interface // todo change naming to be consistent? type BlockHeaderInv struct { - Hash string + Hash [32]byte Height uint32 Flag bool // indicates whether this Block has been processed } @@ -29,31 +29,28 @@ func (v *BlockHeaderInv) SerialiseKey() ([]byte, error) { } func (v *BlockHeaderInv) SerialiseData() ([]byte, error) { + // todo: this should be optimisable by using a fixed size byte arrays var buf bytes.Buffer err := binary.Write(&buf, binary.BigEndian, v.Flag) if err != nil { - common.ErrorLogger.Println(err) - return nil, err - } - blockHashBytes, err := hex.DecodeString(v.Hash) - if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error serialising block header inv") return nil, err } - buf.Write(blockHashBytes) + buf.Write(v.Hash[:]) return buf.Bytes(), nil } func (v *BlockHeaderInv) DeSerialiseKey(key []byte) error { if len(key) != 4 { - common.ErrorLogger.Printf("wrong key length: %+v\n", key) - return errors.New("key is wrong length. should not happen") + err := errors.New("key is wrong length. should not happen") + logging.L.Err(err).Hex("key", key).Msg("wrong key length") + return err } err := binary.Read(bytes.NewReader(key[:4]), binary.BigEndian, &v.Height) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error deserialising block header inv") return err } @@ -62,25 +59,21 @@ func (v *BlockHeaderInv) DeSerialiseKey(key []byte) error { func (v *BlockHeaderInv) DeSerialiseData(data []byte) error { if len(data) != 1+32 { - common.ErrorLogger.Printf("wrong data length: %+v", data) - return errors.New("data is wrong length. should not happen") + err := errors.New("data is wrong length. should not happen") + logging.L.Err(err).Hex("data", data).Msg("wrong data length") + return err } err := binary.Read(bytes.NewReader(data[:1]), binary.BigEndian, &v.Flag) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error deserialising block header inv") return err } - v.Hash = hex.EncodeToString(data[1:]) + copy(v.Hash[:], data[1:]) return nil } func GetKeyBlockHeaderInv(height uint32) ([]byte, error) { - var buf bytes.Buffer - - err := binary.Write(&buf, binary.BigEndian, height) - if err != nil { - common.ErrorLogger.Println(err) - return nil, err - } - return buf.Bytes(), nil + var key [4]byte + binary.BigEndian.PutUint32(key[:], height) + return key[:], nil } diff --git a/src/common/types/filter.go b/internal/types/filter.go similarity index 54% rename from src/common/types/filter.go rename to internal/types/filter.go index fede7fc..6a8e5b8 100644 --- a/src/common/types/filter.go +++ b/internal/types/filter.go @@ -1,18 +1,18 @@ package types import ( - "SilentPaymentAppBackend/src/common" "bytes" "encoding/binary" - "encoding/hex" "errors" + + "github.com/setavenger/blindbit-lib/logging" ) type Filter struct { - FilterType uint8 `json:"filter_type"` - BlockHeight uint32 `json:"block_height"` - Data []byte `json:"data"` - BlockHash string `json:"block_hash"` + FilterType uint8 `json:"filter_type"` + BlockHeight uint32 `json:"block_height"` + Data []byte `json:"data"` + BlockHash [32]byte `json:"block_hash"` } func PairFactoryFilter() Pair { @@ -30,7 +30,7 @@ func (v *Filter) SerialiseData() ([]byte, error) { // start with filter type as that's fixed length err := binary.Write(&buf, binary.BigEndian, v.FilterType) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error serialising filter") return nil, err } @@ -40,11 +40,12 @@ func (v *Filter) SerialiseData() ([]byte, error) { func (v *Filter) DeSerialiseKey(key []byte) error { if len(key) != 32 { - common.ErrorLogger.Printf("wrong key length: %+v", key) - return errors.New("key is wrong length. should not happen") + err := errors.New("key is wrong length. should not happen") + logging.L.Err(err).Hex("key", key).Msg("wrong key length") + return err } // The block hash is fixed length, decode the block hash part - v.BlockHash = hex.EncodeToString(key) + copy(v.BlockHash[:], key) return nil } @@ -55,13 +56,6 @@ func (v *Filter) DeSerialiseData(data []byte) error { return nil } -func GetDBKeyFilter(blockHash string) ([]byte, error) { - var buf bytes.Buffer - blockHashBytes, err := hex.DecodeString(blockHash) - if err != nil { - common.ErrorLogger.Println(err) - return nil, err - } - buf.Write(blockHashBytes) - return buf.Bytes(), nil +func GetDBKeyFilter(blockHash [32]byte) ([]byte, error) { + return blockHash[:], nil } diff --git a/src/common/types/pair.go b/internal/types/pair.go similarity index 100% rename from src/common/types/pair.go rename to internal/types/pair.go diff --git a/src/common/types/spentoutpoints.go b/internal/types/spentoutpoints.go similarity index 63% rename from src/common/types/spentoutpoints.go rename to internal/types/spentoutpoints.go index 57fa64a..a63754e 100644 --- a/src/common/types/spentoutpoints.go +++ b/internal/types/spentoutpoints.go @@ -1,17 +1,15 @@ package types import ( - "bytes" - "encoding/hex" "errors" - "SilentPaymentAppBackend/src/common" + "github.com/setavenger/blindbit-lib/logging" ) const LenOutpointHashShort = 8 type SpentOutpointsIndex struct { - BlockHash string `json:"block_hash"` + BlockHash [32]byte `json:"block_hash"` BlockHeight uint32 `json:"block_height"` Data [][LenOutpointHashShort]byte `json:"data"` } @@ -22,7 +20,7 @@ func PairFactorySpentOutpointsIndex() Pair { } func (v *SpentOutpointsIndex) SerialiseKey() ([]byte, error) { - return GetDBKeyTweakIndex(v.BlockHash) + return GetDBKeySpentOutpointsIndex(v.BlockHash) } func (v *SpentOutpointsIndex) SerialiseData() ([]byte, error) { @@ -40,19 +38,21 @@ func (v *SpentOutpointsIndex) SerialiseData() ([]byte, error) { func (v *SpentOutpointsIndex) DeSerialiseKey(key []byte) error { if len(key) != 32 { - common.ErrorLogger.Printf("wrong key length: %+v", key) - return errors.New("key is wrong length. should not happen") + err := errors.New("key is wrong length. should not happen") + logging.L.Err(err).Hex("key", key).Msg("wrong key length") + return err } - v.BlockHash = hex.EncodeToString(key) + copy(v.BlockHash[:], key) return nil } func (v *SpentOutpointsIndex) DeSerialiseData(data []byte) error { if len(data)%LenOutpointHashShort != 0 { - common.ErrorLogger.Printf("wrong data length: %+v", data) - return errors.New("data is wrong length. should not happen") + err := errors.New("data is wrong length. should not happen") + logging.L.Err(err).Hex("data", data).Msg("wrong data length") + return err } numArrays := len(data) / LenOutpointHashShort @@ -64,14 +64,6 @@ func (v *SpentOutpointsIndex) DeSerialiseData(data []byte) error { return nil } -func GetDBKeySpentSpentOutpointsIndex(blockHash string) ([]byte, error) { - var buf bytes.Buffer - blockHashBytes, err := hex.DecodeString(blockHash) - if err != nil { - common.ErrorLogger.Println(err) - return nil, err - } - buf.Write(blockHashBytes) - - return buf.Bytes(), nil +func GetDBKeySpentOutpointsIndex(blockHash [32]byte) ([]byte, error) { + return blockHash[:], nil } diff --git a/src/common/types/tweak.go b/internal/types/tweak.go similarity index 61% rename from src/common/types/tweak.go rename to internal/types/tweak.go index 862a86f..de21062 100644 --- a/src/common/types/tweak.go +++ b/internal/types/tweak.go @@ -1,20 +1,20 @@ package types import ( - "SilentPaymentAppBackend/src/common" "bytes" "encoding/binary" - "encoding/hex" "errors" + + "github.com/setavenger/blindbit-lib/logging" ) const TweakDataLength = 33 type Tweak struct { - BlockHash string `json:"block_hash"` + BlockHash [32]byte `json:"block_hash"` // BlockHeight todo not really used at the moment, could be added on a per request basis in the API handler BlockHeight uint32 `json:"block_height"` - Txid string `json:"txid"` + Txid [32]byte `json:"txid"` TweakData [TweakDataLength]byte `json:"tweak_data"` // HighestValue indicates the value of the UTXO with the most value for a specific tweak HighestValue uint64 @@ -36,7 +36,7 @@ func (v *Tweak) SerialiseData() ([]byte, error) { err := binary.Write(&buf, binary.BigEndian, v.HighestValue) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error serialising tweak") return nil, err } return buf.Bytes(), nil @@ -44,12 +44,13 @@ func (v *Tweak) SerialiseData() ([]byte, error) { func (v *Tweak) DeSerialiseKey(key []byte) error { if len(key) != 64 { - common.ErrorLogger.Printf("wrong key length: %+v", key) - return errors.New("key is wrong length. should not happen") + err := errors.New("key is wrong length. should not happen") + logging.L.Err(err).Hex("key", key).Msg("wrong key length") + return err } - v.BlockHash = hex.EncodeToString(key[:32]) - v.Txid = hex.EncodeToString(key[32:]) + copy(v.BlockHash[:], key[:32]) + copy(v.Txid[:], key[32:]) return nil } @@ -57,8 +58,9 @@ func (v *Tweak) DeSerialiseKey(key []byte) error { func (v *Tweak) DeSerialiseData(data []byte) error { // todo why did we check both dusted and non dusted if len(data) != TweakDataLength+8 { - common.ErrorLogger.Printf("wrong data length: %+v", data) - return errors.New("data is wrong length. should not happen") + err := errors.New("data is wrong length. should not happen") + logging.L.Err(err).Hex("data", data).Msg("wrong data length") + return err } copy(v.TweakData[:], data[:TweakDataLength]) @@ -66,27 +68,16 @@ func (v *Tweak) DeSerialiseData(data []byte) error { // revoke: if the data is not there it seems like an implementation error. prior, where dust was an option it made sense err := binary.Read(bytes.NewReader(data[TweakDataLength:]), binary.BigEndian, &v.HighestValue) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error deserialising tweak") return err } return nil } -func GetDBKeyTweak(blockHash, txid string) ([]byte, error) { - var buf bytes.Buffer - blockHashBytes, err := hex.DecodeString(blockHash) - if err != nil { - common.ErrorLogger.Println(err) - return nil, err - } - txidBytes, err := hex.DecodeString(txid) - if err != nil { - common.ErrorLogger.Println(err) - return nil, err - } - buf.Write(blockHashBytes) - buf.Write(txidBytes) - - return buf.Bytes(), nil +func GetDBKeyTweak(blockHash [32]byte, txid [32]byte) ([]byte, error) { + key := make([]byte, 64) + copy(key[:32], blockHash[:]) + copy(key[32:], txid[:]) + return key, nil } diff --git a/src/common/types/tweak_interface.go b/internal/types/tweak_interface.go similarity index 100% rename from src/common/types/tweak_interface.go rename to internal/types/tweak_interface.go diff --git a/src/common/types/tweakindex.go b/internal/types/tweakindex.go similarity index 77% rename from src/common/types/tweakindex.go rename to internal/types/tweakindex.go index d3ad15d..eab63ee 100644 --- a/src/common/types/tweakindex.go +++ b/internal/types/tweakindex.go @@ -1,17 +1,16 @@ package types import ( - "SilentPaymentAppBackend/src/common" - "bytes" - "encoding/hex" "errors" + + "github.com/setavenger/blindbit-lib/logging" ) // TweakIndex stores a full index per blockHash and not separate entries like Tweak // there is no transaction cut-through, so it will keep a full history. // The tweaks will most likely not be sorted in any meaningful way and have no metadata attached. type TweakIndex struct { - BlockHash string `json:"block_hash"` + BlockHash [32]byte `json:"block_hash"` BlockHeight uint32 `json:"block_height"` Data [][TweakDataLength]byte `json:"data"` } @@ -27,7 +26,6 @@ func (v *TweakIndex) SerialiseKey() ([]byte, error) { } func (v *TweakIndex) SerialiseData() ([]byte, error) { - // todo can this be made more efficiently? totalLength := len(v.Data) * TweakDataLength flattened := make([]byte, 0, totalLength) @@ -41,19 +39,21 @@ func (v *TweakIndex) SerialiseData() ([]byte, error) { func (v *TweakIndex) DeSerialiseKey(key []byte) error { if len(key) != 32 { - common.ErrorLogger.Printf("wrong key length: %+v", key) - return errors.New("key is wrong length. should not happen") + err := errors.New("key is wrong length. should not happen") + logging.L.Err(err).Hex("key", key).Msg("wrong key length") + return err } - v.BlockHash = hex.EncodeToString(key) + copy(v.BlockHash[:], key) return nil } func (v *TweakIndex) DeSerialiseData(data []byte) error { if len(data)%TweakDataLength != 0 { - common.ErrorLogger.Printf("wrong data length: %+v", data) - return errors.New("data is wrong length. should not happen") + err := errors.New("data is wrong length. should not happen") + logging.L.Err(err).Hex("data", data).Msg("wrong data length") + return err } numArrays := len(data) / TweakDataLength @@ -87,21 +87,13 @@ func (v *TweakIndex) ToTweakArray() (tweaks []Tweak) { tweaks = append(tweaks, Tweak{ BlockHash: v.BlockHash, BlockHeight: v.BlockHeight, - Txid: "", + Txid: [32]byte{}, TweakData: data, }) } return } -func GetDBKeyTweakIndex(blockHash string) ([]byte, error) { - var buf bytes.Buffer - blockHashBytes, err := hex.DecodeString(blockHash) - if err != nil { - common.ErrorLogger.Println(err) - return nil, err - } - buf.Write(blockHashBytes) - - return buf.Bytes(), nil +func GetDBKeyTweakIndex(blockHash [32]byte) ([]byte, error) { + return blockHash[:], nil } diff --git a/src/common/types/tweakindex_dust.go b/internal/types/tweakindex_dust.go similarity index 81% rename from src/common/types/tweakindex_dust.go rename to internal/types/tweakindex_dust.go index fa6abb1..d28d96f 100644 --- a/src/common/types/tweakindex_dust.go +++ b/internal/types/tweakindex_dust.go @@ -1,11 +1,11 @@ package types import ( - "SilentPaymentAppBackend/src/common" "bytes" "encoding/binary" - "encoding/hex" "errors" + + "github.com/setavenger/blindbit-lib/logging" ) // TweakIndexDust stores a full index per blockHash and not separate entries like Tweak @@ -13,7 +13,7 @@ import ( // The tweaks will most likely not be sorted in any meaningful way and have no metadata attached. // TweakIndexDust differs from TweakIndex as it has the highest value per tx stored as well type TweakIndexDust struct { - BlockHash string `json:"block_hash"` + BlockHash [32]byte `json:"block_hash"` BlockHeight uint32 `json:"block_height"` Data []TweakDusted `json:"data"` } @@ -60,7 +60,7 @@ func (v *TweakIndexDust) SerialiseData() ([]byte, error) { buffer.Write(data[:]) err := binary.Write(&buffer, binary.BigEndian, tweakDusted.HighestValue()) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error serialising tweak index dust") return nil, err } flattened = append(flattened, buffer.Bytes()...) @@ -71,19 +71,21 @@ func (v *TweakIndexDust) SerialiseData() ([]byte, error) { func (v *TweakIndexDust) DeSerialiseKey(key []byte) error { if len(key) != 32 { - common.ErrorLogger.Printf("wrong key length: %+v", key) - return errors.New("key is wrong length. should not happen") + err := errors.New("key is wrong length. should not happen") + logging.L.Err(err).Hex("key", key).Msg("wrong key length") + return err } - v.BlockHash = hex.EncodeToString(key) + copy(v.BlockHash[:], key) return nil } func (v *TweakIndexDust) DeSerialiseData(data []byte) error { if len(data)%lengthDataTweakIndexDust != 0 { - common.ErrorLogger.Printf("wrong data length: %+v", data) - return errors.New("data is wrong length. should not happen") + err := errors.New("data is wrong length. should not happen") + logging.L.Err(err).Hex("data", data).Msg("wrong data length") + return err } numArrays := len(data) / lengthDataTweakIndexDust @@ -98,7 +100,7 @@ func (v *TweakIndexDust) DeSerialiseData(data []byte) error { copy(tweakDusted.Data[:], data[idx:idx+TweakDataLength]) err := binary.Read(bytes.NewReader(data[idx+TweakDataLength:idx+TweakDataLength+8]), binary.BigEndian, &tweakDusted.Value) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error deserialising tweak index dust") return err } v.Data[i] = tweakDusted @@ -131,7 +133,7 @@ func (v *TweakIndexDust) ToTweakArray() []Tweak { tweaks = append(tweaks, Tweak{ BlockHash: v.BlockHash, BlockHeight: v.BlockHeight, - Txid: "", // cannot be determined as it's not stored in the index + Txid: [32]byte{}, // cannot be determined as it's not stored in the index TweakData: data.Tweak(), HighestValue: data.HighestValue(), }) @@ -139,14 +141,6 @@ func (v *TweakIndexDust) ToTweakArray() []Tweak { return tweaks } -func GetDBKeyTweakIndexDust(blockHash string) ([]byte, error) { - var buf bytes.Buffer - blockHashBytes, err := hex.DecodeString(blockHash) - if err != nil { - common.ErrorLogger.Println(err) - return nil, err - } - buf.Write(blockHashBytes) - - return buf.Bytes(), nil +func GetDBKeyTweakIndexDust(blockHash [32]byte) ([]byte, error) { + return blockHash[:], nil } diff --git a/src/common/types/types.go b/internal/types/types.go similarity index 90% rename from src/common/types/types.go rename to internal/types/types.go index f0dc03b..fcfa953 100644 --- a/src/common/types/types.go +++ b/internal/types/types.go @@ -61,10 +61,10 @@ type ScriptSig struct { // RPC Types type RPCRequest struct { - JSONRPC string `json:"jsonrpc"` - ID string `json:"id"` - Method string `json:"method"` - Params []interface{} `json:"params"` + JSONRPC string `json:"jsonrpc"` + ID string `json:"id"` + Method string `json:"method"` + Params []any `json:"params"` } type ErrorRPC string @@ -96,10 +96,16 @@ type BlockHeaderRPC struct { NextBlockHash string `json:"nextblockhash"` } +type RPCResponseHighestHash struct { + ID string `json:"id"` + Result string `json:"result,omitempty"` + Error ErrorRPC `json:"error,omitempty"` +} + type RPCResponseBlockchainInfo struct { ID string `json:"id"` Result BlockchainInfo `json:"result,omitempty"` - Error interface{} `json:"error,omitempty"` + Error any `json:"error,omitempty"` } // BlockchainInfo represents the structure of the blockchain information diff --git a/src/common/types/utxo.go b/internal/types/utxo.go similarity index 61% rename from src/common/types/utxo.go rename to internal/types/utxo.go index 8c0036f..7f6a300 100644 --- a/src/common/types/utxo.go +++ b/internal/types/utxo.go @@ -1,11 +1,12 @@ package types import ( - "SilentPaymentAppBackend/src/common" "bytes" "encoding/binary" "encoding/hex" "errors" + + "github.com/setavenger/blindbit-lib/logging" ) // UTXO @@ -13,14 +14,14 @@ import ( // // unused fields could just be omitted from serialisation and de-serialisation type UTXO struct { - Txid string `json:"txid"` - Vout uint32 `json:"vout"` - Value uint64 `json:"value"` - ScriptPubKey string `json:"scriptpubkey"` - BlockHeight uint32 `json:"block_height"` // not used - BlockHash string `json:"block_hash"` - Timestamp uint64 `json:"timestamp"` // not used - Spent bool `json:"spent"` + Txid [32]byte `json:"txid"` + Vout uint32 `json:"vout"` + Value uint64 `json:"value"` + ScriptPubKey string `json:"scriptpubkey"` + BlockHeight uint32 `json:"block_height"` // not used + BlockHash [32]byte `json:"block_hash"` + Timestamp uint64 `json:"timestamp"` // not used + Spent bool `json:"spent"` } const SerialisedKeyLengthUtxo = 32 + 32 + 4 @@ -39,7 +40,7 @@ func (v *UTXO) SerialiseData() ([]byte, error) { var buf bytes.Buffer scriptPubKeyBytes, err := hex.DecodeString(v.ScriptPubKey) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Str("scriptPubKey", v.ScriptPubKey).Msg("error decoding script pubkey") return nil, err } @@ -47,22 +48,23 @@ func (v *UTXO) SerialiseData() ([]byte, error) { err = binary.Write(&buf, binary.BigEndian, v.Value) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error serialising utxo") return nil, err } err = binary.Write(&buf, binary.BigEndian, v.Timestamp) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error serialising utxo") return nil, err } err = binary.Write(&buf, binary.BigEndian, v.Spent) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error serialising utxo") return nil, err } data := buf.Bytes() if len(data) != SerialisedDataLengthUtxo { - common.ErrorLogger.Printf("wrong data length: %d %+v", len(data), data) + err := errors.New("data is wrong length. should not happen") + logging.L.Err(err).Int("length", len(data)).Msg("wrong data length") return nil, err } @@ -71,15 +73,16 @@ func (v *UTXO) SerialiseData() ([]byte, error) { func (v *UTXO) DeSerialiseKey(key []byte) error { if len(key) != SerialisedKeyLengthUtxo { - common.ErrorLogger.Printf("wrong key length: %d %+v", len(key), key) - return errors.New("key is wrong length. should not happen") + err := errors.New("key is wrong length. should not happen") + logging.L.Err(err).Int("length", len(key)).Msg("wrong key length") + return err } - v.BlockHash = hex.EncodeToString(key[:32]) - v.Txid = hex.EncodeToString(key[32:64]) + copy(v.BlockHash[:], key[:32]) + copy(v.Txid[:], key[32:64]) err := binary.Read(bytes.NewReader(key[64:]), binary.BigEndian, &v.Vout) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error deserialising utxo") return err } return nil @@ -87,50 +90,42 @@ func (v *UTXO) DeSerialiseKey(key []byte) error { func (v *UTXO) DeSerialiseData(data []byte) error { if len(data) != SerialisedDataLengthUtxo { - common.ErrorLogger.Printf("wrong data length: %d %+v", len(data), data) - return errors.New("data is wrong length. should not happen") + err := errors.New("data is wrong length. should not happen") + logging.L.Err(err).Int("length", len(data)).Msg("wrong data length") + return err } v.ScriptPubKey = hex.EncodeToString(data[:34]) err := binary.Read(bytes.NewReader(data[34:34+8]), binary.BigEndian, &v.Value) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error deserialising utxo") return err } err = binary.Read(bytes.NewReader(data[34+8:34+8+8]), binary.BigEndian, &v.Timestamp) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error deserialising utxo") return err } err = binary.Read(bytes.NewReader(data[34+8+8:]), binary.BigEndian, &v.Spent) if err != nil { - common.ErrorLogger.Println(err) + logging.L.Err(err).Msg("error deserialising utxo") return err } return nil } -func GetDBKeyUTXO(blockHash, txid string, vout uint32) ([]byte, error) { - var buf bytes.Buffer - blockHashBytes, err := hex.DecodeString(blockHash) - if err != nil { - common.ErrorLogger.Println(err) - return nil, err - } - txidBytes, err := hex.DecodeString(txid) - if err != nil { - common.ErrorLogger.Println(err) - return nil, err - } - buf.Write(blockHashBytes) - buf.Write(txidBytes) +func GetDBKeyUTXO(blockHash, txid [32]byte, vout uint32) ([]byte, error) { + key := make([]byte, 32+32+4) - err = binary.Write(&buf, binary.BigEndian, vout) - if err != nil { - common.ErrorLogger.Println(err) - return nil, err - } + // Copy blockHash (32 bytes) + copy(key[:32], blockHash[:]) + + // Copy txid (32 bytes) + copy(key[32:64], txid[:]) + + // Write vout (4 bytes) in big-endian format + binary.BigEndian.PutUint32(key[64:], vout) - return buf.Bytes(), nil + return key, nil } // FindBiggestRemainingUTXO returns nil if the spent utxo was not the largest and @@ -158,11 +153,6 @@ func FindBiggestRemainingUTXO(utxoSpent UTXO, utxos []UTXO) (*uint64, error) { } if spentIsMax { - // if valueMax == 0 { - // common.ErrorLogger.Printf("%+v", utxoSpent) - // common.ErrorLogger.Printf("%+v", utxos) - // return nil, errors.New("valueMax was 0. this should not happen") - // } // If the spent UTXO was the largest, return the max value among the remaining UTXOs. return &valueMax, nil } else { diff --git a/src/common/types/utxo_test.go b/internal/types/utxo_test.go similarity index 100% rename from src/common/types/utxo_test.go rename to internal/types/utxo_test.go diff --git a/src/common/endpoints.go b/src/common/endpoints.go deleted file mode 100644 index 443a4d0..0000000 --- a/src/common/endpoints.go +++ /dev/null @@ -1,4 +0,0 @@ -package common - -//const MempoolEndpoint = "http://localhost:80/api/tx/" -const MempoolEndpoint = "https://mempool.space/signet/api/tx" diff --git a/src/common/util.go b/src/common/util.go deleted file mode 100644 index f177406..0000000 --- a/src/common/util.go +++ /dev/null @@ -1,56 +0,0 @@ -package common - -import ( - "crypto/sha256" - "os/user" - "strings" - - "github.com/shopspring/decimal" - "golang.org/x/crypto/ripemd160" -) - -// ReverseBytes reverses the bytes inside the byte slice and returns the same slice. It does not return a copy. -func ReverseBytes(bytes []byte) []byte { - for i, j := 0, len(bytes)-1; i < j; i, j = i+1, j-1 { - bytes[i], bytes[j] = bytes[j], bytes[i] - } - return bytes -} - -func ConvertFloatBTCtoSats(value float64) uint64 { - valueBTC := decimal.NewFromFloat(value) - satsConstant := decimal.NewFromInt(100_000_000) - // Multiply the BTC value by the number of Satoshis per Bitcoin - resultInDecimal := valueBTC.Mul(satsConstant) - // Get the integer part of the result - resultInInt := resultInDecimal.IntPart() - // Convert the integer result to uint64 and return - if resultInInt < 0 { - DebugLogger.Println("value:", value, "result:", resultInInt) - ErrorLogger.Fatalln("value was converted to negative value") - } - - return uint64(resultInInt) -} - -func HashTagged(tag string, msg []byte) [32]byte { - tagHash := sha256.Sum256([]byte(tag)) - data := append(tagHash[:], tagHash[:]...) - data = append(data, msg...) - return sha256.Sum256(data) -} - -// Hash160 performs a RIPEMD160(SHA256(data)) hash on the given data -func Hash160(data []byte) []byte { - sha256Hash := sha256.Sum256(data) - ripemd160Hasher := ripemd160.New() - ripemd160Hasher.Write(sha256Hash[:]) // Hash the SHA256 hash - return ripemd160Hasher.Sum(nil) -} - -func ResolvePath(path string) string { - usr, _ := user.Current() - dir := usr.HomeDir - - return strings.Replace(path, "~", dir, 1) -} diff --git a/src/core/block_test.go b/src/core/block_test.go deleted file mode 100644 index 96e7404..0000000 --- a/src/core/block_test.go +++ /dev/null @@ -1,35 +0,0 @@ -package core - -import ( - "SilentPaymentAppBackend/src/common/types" - "SilentPaymentAppBackend/src/testhelpers" - "fmt" - "log" - "testing" -) - -func TestBlockAnalysis(t *testing.T) { - var block types.Block - err := testhelpers.LoadBlockFromFile("/Users/setorblagogee/dev/sp-test-dir/block-716120.json", &block) - if err != nil { - log.Fatalln(err) - } - - tweaks, err := ComputeTweaksForBlock(&block) - if err != nil { - log.Fatalln(err) - } - - for _, tweak := range tweaks { - fmt.Printf("%x - %s\n", tweak.TweakData, tweak.Txid) - } - - for _, tx := range block.Txs { - for _, tweak := range tweaks { - if tx.Txid == tweak.Txid { - fmt.Printf("%x\n", tweak.TweakData) - } - } - } - -} diff --git a/src/db/dblevel/blockheader.go b/src/db/dblevel/blockheader.go deleted file mode 100644 index f6043b7..0000000 --- a/src/db/dblevel/blockheader.go +++ /dev/null @@ -1,30 +0,0 @@ -package dblevel - -import ( - "SilentPaymentAppBackend/src/common" - "SilentPaymentAppBackend/src/common/types" - "errors" -) - -func InsertBlockHeader(pair types.BlockHeader) error { - err := insertSimple(HeadersDB, &pair) - if err != nil { - common.ErrorLogger.Println(err) - return err - } - common.DebugLogger.Println("block_header inserted") - return nil -} - -func FetchByBlockHashBlockHeader(blockHash string) (*types.BlockHeader, error) { - var pair types.BlockHeader - err := retrieveByBlockHash(HeadersDB, blockHash, &pair) - if err != nil && !errors.Is(err, NoEntryErr{}) { - common.ErrorLogger.Println(err) - return nil, err - } else if errors.Is(err, NoEntryErr{}) { // todo why do we return the error anyways? - //common.ErrorLogger.Println(err) don't print case is ignored above anyways - return nil, err - } - return &pair, nil -} diff --git a/src/main.go b/src/main.go deleted file mode 100644 index 88a3005..0000000 --- a/src/main.go +++ /dev/null @@ -1,262 +0,0 @@ -package main - -import ( - "SilentPaymentAppBackend/src/common" - "SilentPaymentAppBackend/src/core" - "SilentPaymentAppBackend/src/dataexport" - "SilentPaymentAppBackend/src/db/dblevel" - "SilentPaymentAppBackend/src/server" - "flag" - "fmt" - "log" - "path" - - "os" - "os/signal" - "strings" - "time" -) - -var ( - displayVersion bool - pruneOnStart bool - exportData bool - Version = "0.0.0" -) - -func init() { - flag.StringVar(&common.BaseDirectory, "datadir", common.DefaultBaseDirectory, "Set the base directory for blindbit oracle. Default directory is ~/.blindbit-oracle") - flag.BoolVar(&displayVersion, "version", false, "show version of blindbit-oracle") - flag.BoolVar(&pruneOnStart, "reprune", false, "set this flag if you want to prune on startup") - flag.BoolVar(&exportData, "export-data", false, "export the databases") - flag.Parse() - - if displayVersion { - // we only need the version for this - return - } - common.SetDirectories() // todo a proper set settings function which does it all would be good to avoid several small function calls - err := os.Mkdir(common.BaseDirectory, 0750) - if err != nil && !strings.Contains(err.Error(), "file exists") { - fmt.Println(err.Error()) - log.Fatal(err) - } - - err = os.Mkdir(common.LogsPath, 0750) - if err != nil && !strings.Contains(err.Error(), "file exists") { - fmt.Println(err.Error()) - log.Fatal(err) - } - - // file, err := os.OpenFile(fmt.Sprintf("%s/logs.log", common.LogsPath), os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0666) - // if err != nil { - // log.Fatal(err) - // } - // fileDebug, err := os.OpenFile(fmt.Sprintf("%s/logs-debug.log", common.LogsPath), os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0666) - // if err != nil { - // log.Fatal(err) - // } - - // multi := io.MultiWriter(file, os.Stdout) - //multiDebug := io.MultiWriter(fileDebug, os.Stdout) - - // common.DebugLogger = log.New(fileDebug, "[DEBUG] ", log.Ldate|log.Lmicroseconds|log.Lshortfile|log.Lmsgprefix) - // common.InfoLogger = log.New(multi, "[INFO] ", log.Ldate|log.Lmicroseconds|log.Lshortfile|log.Lmsgprefix) - // common.WarningLogger = log.New(multi, "[WARNING] ", log.Ldate|log.Lmicroseconds|log.Lshortfile|log.Lmsgprefix) - // common.ErrorLogger = log.New(multi, "[ERROR] ", log.Ldate|log.Lmicroseconds|log.Llongfile|log.Lmsgprefix) - - common.InfoLogger.Println("base directory", common.BaseDirectory) - - // load after loggers are instantiated - common.LoadConfigs(path.Join(common.BaseDirectory, common.ConfigFileName)) - - // create DB path - err = os.Mkdir(common.DBPath, 0750) - if err != nil && !strings.Contains(err.Error(), "file exists") { - common.ErrorLogger.Println(err) - panic(err) - } - - // open levelDB connections - openLevelDBConnections() - - if common.CookiePath != "" { - data, err := os.ReadFile(common.CookiePath) - if err != nil { - panic(err) - } - - credentials := strings.Split(string(data), ":") - if len(credentials) != 2 { - panic("cookie file is invalid") - } - common.RpcUser = credentials[0] - common.RpcPass = credentials[1] - } - - if common.RpcUser == "" { - panic("rpc user not set") // todo use cookie file to circumvent this requirement - } - - if common.RpcPass == "" { - panic("rpc pass not set") // todo use cookie file to circumvent this requirement - } -} - -func main() { - if displayVersion { - fmt.Println("blindbit-oracle version:", Version) // using fmt because loggers are not initialised - os.Exit(0) - } - defer common.InfoLogger.Println("Program shut down") - defer closeDBs() - - //log.SetFlags(log.LstdFlags | log.Lshortfile | log.Lmicroseconds) - interrupt := make(chan os.Signal, 1) - signal.Notify(interrupt, os.Interrupt) - - common.InfoLogger.Println("Program Started") - - // make sure everything is ready before we receive data - - //todo create proper handling for exporting data - - if exportData { - common.InfoLogger.Println("Exporting data") - dataexport.ExportUTXOs(fmt.Sprintf("%s/export/utxos.csv", common.BaseDirectory)) - return - } - - //moved into go routine such that the interrupt signal will apply properly - go func() { - if pruneOnStart { - startPrune := time.Now() - core.PruneAllUTXOs() - common.InfoLogger.Printf("Pruning took: %s", time.Since(startPrune).String()) - } - startSync := time.Now() - err := core.PreSyncHeaders() - if err != nil { - common.ErrorLogger.Fatalln(err) - return - } - - // so we can start fetching data while not fully synced. Requires headers to be synced to avoid grave errors. - go server.RunServer(&server.ApiHandler{}) - - // todo buggy for sync catchup from 0, needs to be 1 or higher - err = core.SyncChain() - if err != nil { - common.ErrorLogger.Fatalln(err) - return - } - common.InfoLogger.Printf("Sync took: %s", time.Since(startSync).String()) - go core.CheckForNewBlockRoutine() - - // only call this if you need to reindex. It doesn't delete anything but takes a couple of minutes to finish - //err := core.ReindexDustLimitsOnly() - //if err != nil { - // common.ErrorLogger.Fatalln(err) - // return - //} - - }() - - for { - <-interrupt - common.InfoLogger.Println("Program interrupted") - return - } -} - -func openLevelDBConnections() { - dblevel.HeadersDB = dblevel.OpenDBConnection(common.DBPathHeaders) - dblevel.HeadersInvDB = dblevel.OpenDBConnection(common.DBPathHeadersInv) - dblevel.NewUTXOsFiltersDB = dblevel.OpenDBConnection(common.DBPathFilters) - dblevel.TweaksDB = dblevel.OpenDBConnection(common.DBPathTweaks) - dblevel.TweakIndexDB = dblevel.OpenDBConnection(common.DBPathTweakIndex) - dblevel.TweakIndexDustDB = dblevel.OpenDBConnection(common.DBPathTweakIndexDust) - dblevel.UTXOsDB = dblevel.OpenDBConnection(common.DBPathUTXOs) - dblevel.SpentOutpointsIndexDB = dblevel.OpenDBConnection(common.DBPathSpentOutpointsIndex) - dblevel.SpentOutpointsFilterDB = dblevel.OpenDBConnection(common.DBPathSpentOutpointsFilter) -} - -func closeDBs() { - err := dblevel.HeadersDB.Close() - if err != nil { - common.ErrorLogger.Println(err) - } - err = dblevel.HeadersInvDB.Close() - if err != nil { - common.ErrorLogger.Println(err) - } - err = dblevel.NewUTXOsFiltersDB.Close() - if err != nil { - common.ErrorLogger.Println(err) - } - err = dblevel.TweaksDB.Close() - if err != nil { - common.ErrorLogger.Println(err) - } - err = dblevel.TweakIndexDB.Close() - if err != nil { - common.ErrorLogger.Println(err) - } - err = dblevel.TweakIndexDustDB.Close() - if err != nil { - common.ErrorLogger.Println(err) - } - err = dblevel.UTXOsDB.Close() - if err != nil { - common.ErrorLogger.Println(err) - } - err = dblevel.SpentOutpointsIndexDB.Close() - if err != nil { - common.ErrorLogger.Println(err) - } - err = dblevel.SpentOutpointsFilterDB.Close() - if err != nil { - common.ErrorLogger.Println(err) - } - - common.InfoLogger.Println("DBs closed") -} - -func exportAll() { - // todo manage memory better, bloats completely during export - common.InfoLogger.Println("Exporting data") - timestamp := time.Now() - - err := dataexport.ExportUTXOs(fmt.Sprintf("./data-export/utxos-%d.csv", timestamp.Unix())) - if err != nil { - panic(err) - } - common.InfoLogger.Println("Finished UTXOs") - - err = dataexport.ExportFilters(fmt.Sprintf("./data-export/filters-%d.csv", timestamp.Unix())) - if err != nil { - panic(err) - } - common.InfoLogger.Println("Finished Filters") - - err = dataexport.ExportTweaks(fmt.Sprintf("./data-export/tweaks-%d.csv", timestamp.Unix())) - if err != nil { - panic(err) - } - common.InfoLogger.Println("Finished Tweaks") - - err = dataexport.ExportTweakIndices(fmt.Sprintf("./data-export/tweak-indices-%d.csv", timestamp.Unix())) - if err != nil { - panic(err) - } - common.InfoLogger.Println("Finished Tweak Indices") - - err = dataexport.ExportHeadersInv(fmt.Sprintf("./data-export/headers-inv-%d.csv", timestamp.Unix())) - if err != nil { - panic(err) - } - common.InfoLogger.Println("Finished HeadersInv") - - common.InfoLogger.Println("All exported") - os.Exit(0) -} diff --git a/src/test_data/block_833000.json b/test_data/block_833000.json similarity index 100% rename from src/test_data/block_833000.json rename to test_data/block_833000.json diff --git a/src/test_data/block_833010.json b/test_data/block_833010.json similarity index 100% rename from src/test_data/block_833010.json rename to test_data/block_833010.json diff --git a/src/test_data/block_833013.json b/test_data/block_833013.json similarity index 100% rename from src/test_data/block_833013.json rename to test_data/block_833013.json diff --git a/src/test_data/block_834469.json b/test_data/block_834469.json similarity index 100% rename from src/test_data/block_834469.json rename to test_data/block_834469.json diff --git a/src/test_data/send_and_receive_test_vectors.json b/test_data/send_and_receive_test_vectors.json similarity index 100% rename from src/test_data/send_and_receive_test_vectors.json rename to test_data/send_and_receive_test_vectors.json diff --git a/src/test_data/send_and_receive_test_vectors_with_type.json b/test_data/send_and_receive_test_vectors_with_type.json similarity index 100% rename from src/test_data/send_and_receive_test_vectors_with_type.json rename to test_data/send_and_receive_test_vectors_with_type.json