fix patch bgs lexicon null
This commit is contained in:
@@ -72,11 +72,8 @@ typeset -a PATCH_FILES
|
|||||||
PATCH_FILES=(
|
PATCH_FILES=(
|
||||||
"170-pds-oauth-same-site-fix.patch"
|
"170-pds-oauth-same-site-fix.patch"
|
||||||
"8980-social-app-disable-proxy.diff"
|
"8980-social-app-disable-proxy.diff"
|
||||||
"disable-statsig-sdk.diff"
|
|
||||||
"140-social-app-yarn-network-timeout.patch"
|
"140-social-app-yarn-network-timeout.patch"
|
||||||
"130-atproto-ozone-enable-daemon-v2.patch"
|
"130-atproto-ozone-enable-daemon-v2.patch"
|
||||||
"152-indigo-newpds-dayper-limit-pr707.diff"
|
|
||||||
"190-bgs-disable-ratelimit.patch"
|
|
||||||
"200-feed-generator-custom.patch"
|
"200-feed-generator-custom.patch"
|
||||||
"210-bgs-since-empty-fix.patch"
|
"210-bgs-since-empty-fix.patch"
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -1,106 +0,0 @@
|
|||||||
diff --git a/bgs/bgs.go b/bgs/bgs.go
|
|
||||||
index 35dfab9d..7e225f6c 100644
|
|
||||||
--- a/bgs/bgs.go
|
|
||||||
+++ b/bgs/bgs.go
|
|
||||||
@@ -112,6 +112,7 @@ type BGSConfig struct {
|
|
||||||
DefaultRepoLimit int64
|
|
||||||
ConcurrencyPerPDS int64
|
|
||||||
MaxQueuePerPDS int64
|
|
||||||
+ InitialNewPDSPerDayLimit int64
|
|
||||||
NumCompactionWorkers int
|
|
||||||
}
|
|
||||||
|
|
||||||
@@ -122,6 +123,7 @@ func DefaultBGSConfig() *BGSConfig {
|
|
||||||
DefaultRepoLimit: 100,
|
|
||||||
ConcurrencyPerPDS: 100,
|
|
||||||
MaxQueuePerPDS: 1_000,
|
|
||||||
+ InitialNewPDSPerDayLimit: 10,
|
|
||||||
NumCompactionWorkers: 2,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -159,6 +161,7 @@ func NewBGS(db *gorm.DB, ix *indexer.Indexer, repoman *repomgr.RepoManager, evtm
|
|
||||||
slOpts.DefaultRepoLimit = config.DefaultRepoLimit
|
|
||||||
slOpts.ConcurrencyPerPDS = config.ConcurrencyPerPDS
|
|
||||||
slOpts.MaxQueuePerPDS = config.MaxQueuePerPDS
|
|
||||||
+ slOpts.DefaultNewPDSPerDayLimit = config.InitialNewPDSPerDayLimit
|
|
||||||
s, err := NewSlurper(db, bgs.handleFedEvent, slOpts)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
diff --git a/bgs/fedmgr.go b/bgs/fedmgr.go
|
|
||||||
index 42ce7407..9072ba05 100644
|
|
||||||
--- a/bgs/fedmgr.go
|
|
||||||
+++ b/bgs/fedmgr.go
|
|
||||||
@@ -45,6 +45,7 @@ type Slurper struct {
|
|
||||||
MaxQueuePerPDS int64
|
|
||||||
|
|
||||||
NewPDSPerDayLimiter *slidingwindow.Limiter
|
|
||||||
+ initialNewPDSPerDayLimit int64
|
|
||||||
|
|
||||||
newSubsDisabled bool
|
|
||||||
trustedDomains []string
|
|
||||||
@@ -70,6 +71,7 @@ type SlurperOptions struct {
|
|
||||||
DefaultRepoLimit int64
|
|
||||||
ConcurrencyPerPDS int64
|
|
||||||
MaxQueuePerPDS int64
|
|
||||||
+ DefaultNewPDSPerDayLimit int64
|
|
||||||
}
|
|
||||||
|
|
||||||
func DefaultSlurperOptions() *SlurperOptions {
|
|
||||||
@@ -82,6 +84,7 @@ func DefaultSlurperOptions() *SlurperOptions {
|
|
||||||
DefaultRepoLimit: 100,
|
|
||||||
ConcurrencyPerPDS: 100,
|
|
||||||
MaxQueuePerPDS: 1_000,
|
|
||||||
+ DefaultNewPDSPerDayLimit: 10,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@@ -112,6 +115,7 @@ func NewSlurper(db *gorm.DB, cb IndexCallback, opts *SlurperOptions) (*Slurper,
|
|
||||||
ssl: opts.SSL,
|
|
||||||
shutdownChan: make(chan bool),
|
|
||||||
shutdownResult: make(chan []error),
|
|
||||||
+ initialNewPDSPerDayLimit: opts.DefaultNewPDSPerDayLimit,
|
|
||||||
}
|
|
||||||
if err := s.loadConfig(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
@@ -224,13 +228,15 @@ func (s *Slurper) loadConfig() error {
|
|
||||||
}
|
|
||||||
|
|
||||||
if sc.ID == 0 {
|
|
||||||
- if err := s.db.Create(&SlurpConfig{}).Error; err != nil {
|
|
||||||
+ sc.NewPDSPerDayLimit = s.initialNewPDSPerDayLimit
|
|
||||||
+ if err := s.db.Create(&SlurpConfig{ NewPDSPerDayLimit: s.initialNewPDSPerDayLimit, }).Error; err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
s.newSubsDisabled = sc.NewSubsDisabled
|
|
||||||
s.trustedDomains = sc.TrustedDomains
|
|
||||||
+ s.initialNewPDSPerDayLimit = sc.NewPDSPerDayLimit
|
|
||||||
|
|
||||||
s.NewPDSPerDayLimiter, _ = slidingwindow.NewLimiter(time.Hour*24, sc.NewPDSPerDayLimit, windowFunc)
|
|
||||||
|
|
||||||
diff --git a/cmd/bigsky/main.go b/cmd/bigsky/main.go
|
|
||||||
index 540796f5..cb946aed 100644
|
|
||||||
--- a/cmd/bigsky/main.go
|
|
||||||
+++ b/cmd/bigsky/main.go
|
|
||||||
@@ -195,6 +195,12 @@ func run(args []string) error {
|
|
||||||
EnvVars: []string{"RELAY_EVENT_PLAYBACK_TTL"},
|
|
||||||
Value: 72 * time.Hour,
|
|
||||||
},
|
|
||||||
+ &cli.Int64Flag{
|
|
||||||
+ Name: "newpds-perday-limit",
|
|
||||||
+ EnvVars: []string{"RELAY_NEWPDS_PERDAY_LIMIT"},
|
|
||||||
+ Value: 10,
|
|
||||||
+ Usage: "initial value for NewPDSPerDayLimit",
|
|
||||||
+ },
|
|
||||||
&cli.IntFlag{
|
|
||||||
Name: "num-compaction-workers",
|
|
||||||
EnvVars: []string{"RELAY_NUM_COMPACTION_WORKERS"},
|
|
||||||
@@ -418,6 +424,7 @@ func runBigsky(cctx *cli.Context) error {
|
|
||||||
bgsConfig.ConcurrencyPerPDS = cctx.Int64("concurrency-per-pds")
|
|
||||||
bgsConfig.MaxQueuePerPDS = cctx.Int64("max-queue-per-pds")
|
|
||||||
bgsConfig.DefaultRepoLimit = cctx.Int64("default-repo-limit")
|
|
||||||
+ bgsConfig.InitialNewPDSPerDayLimit = cctx.Int64("newpds-perday-limit")
|
|
||||||
bgsConfig.NumCompactionWorkers = cctx.Int("num-compaction-workers")
|
|
||||||
bgs, err := libbgs.NewBGS(db, ix, repoman, evtman, cachedidr, rf, hr, bgsConfig)
|
|
||||||
if err != nil {
|
|
||||||
@@ -1,12 +0,0 @@
|
|||||||
diff --git a/bgs/fedmgr.go b/bgs/fedmgr.go
|
|
||||||
index 2235c6e..e69de29 100644
|
|
||||||
--- a/bgs/fedmgr.go
|
|
||||||
+++ b/bgs/fedmgr.go
|
|
||||||
@@ -342,6 +342,7 @@ var ErrNewSubsDisabled = fmt.Errorf("new subscriptions temporarily disabled")
|
|
||||||
// Checks whether a host is allowed to be subscribed to
|
|
||||||
// must be called with the slurper lock held
|
|
||||||
func (s *Slurper) canSlurpHost(host string) bool {
|
|
||||||
+ return true
|
|
||||||
// Check if we're over the limit for new PDSs today
|
|
||||||
if !s.NewPDSPerDayLimiter.Allow() {
|
|
||||||
return false
|
|
||||||
@@ -1,26 +1,61 @@
|
|||||||
diff --git a/indexer/indexer.go b/indexer/indexer.go
|
diff --git a/events/dbpersist/dbpersist.go b/events/dbpersist/dbpersist.go
|
||||||
index e3c28ec1..66663de0 100644
|
index 04e9fb87..5e47218e 100644
|
||||||
--- a/indexer/indexer.go
|
--- a/events/dbpersist/dbpersist.go
|
||||||
+++ b/indexer/indexer.go
|
+++ b/events/dbpersist/dbpersist.go
|
||||||
@@ -104,13 +104,20 @@ func (ix *Indexer) HandleRepoEvent(ctx context.Context, evt *repomgr.RepoEvent)
|
@@ -306,6 +306,12 @@ func (p *DbPersistence) RecordFromRepoCommit(ctx context.Context, evt *comatprot
|
||||||
toobig = true
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
+ // Normalize empty string to nil for since field
|
+ // Normalize empty string to nil for since field
|
||||||
+ // Empty string fails TID validation on consumers
|
|
||||||
+ var since *string
|
+ var since *string
|
||||||
+ if evt.Since != nil && *evt.Since != "" {
|
+ if evt.Since != nil && *evt.Since != "" {
|
||||||
+ since = evt.Since
|
+ since = evt.Since
|
||||||
+ }
|
+ }
|
||||||
+
|
+
|
||||||
ix.log.Debug("Sending event", "did", did)
|
rer := RepoEventRecord{
|
||||||
if err := ix.events.AddEvent(ctx, &events.XRPCStreamEvent{
|
Commit: &models.DbCID{CID: cid.Cid(evt.Commit)},
|
||||||
RepoCommit: &comatproto.SyncSubscribeRepos_Commit{
|
//Prev
|
||||||
Repo: did,
|
@@ -315,7 +321,7 @@ func (p *DbPersistence) RecordFromRepoCommit(ctx context.Context, evt *comatprot
|
||||||
Blocks: slice,
|
Time: t,
|
||||||
Rev: evt.Rev,
|
Rebase: evt.Rebase,
|
||||||
- Since: evt.Since,
|
Rev: evt.Rev,
|
||||||
+ Since: since,
|
- Since: evt.Since,
|
||||||
Commit: lexutil.LexLink(evt.NewRoot),
|
+ Since: since,
|
||||||
Time: time.Now().Format(util.ISO8601),
|
}
|
||||||
Ops: outops,
|
|
||||||
|
opsb, err := json.Marshal(evt.Ops)
|
||||||
|
@@ -339,6 +345,12 @@ func (p *DbPersistence) RecordFromRepoSync(ctx context.Context, evt *comatproto.
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
+ // Normalize empty string to nil for since field
|
||||||
|
+ var since *string
|
||||||
|
+ if evt.Since != nil && *evt.Since != "" {
|
||||||
|
+ since = evt.Since
|
||||||
|
+ }
|
||||||
|
+
|
||||||
|
rer := RepoEventRecord{
|
||||||
|
Repo: uid,
|
||||||
|
Type: "repo_sync",
|
||||||
|
@@ -555,6 +567,12 @@ func (p *DbPersistence) hydrateCommit(ctx context.Context, rer *RepoEventRecord)
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
+ // Normalize empty string to nil for since field
|
||||||
|
+ var since *string
|
||||||
|
+ if rer.Since != nil && *rer.Since != "" {
|
||||||
|
+ since = rer.Since
|
||||||
|
+ }
|
||||||
|
+
|
||||||
|
out := &comatproto.SyncSubscribeRepos_Commit{
|
||||||
|
Seq: int64(rer.Seq),
|
||||||
|
Repo: did,
|
||||||
|
@@ -564,7 +582,7 @@ func (p *DbPersistence) hydrateCommit(ctx context.Context, rer *RepoEventRecord)
|
||||||
|
Rebase: rer.Rebase,
|
||||||
|
Ops: ops,
|
||||||
|
Rev: rer.Rev,
|
||||||
|
- Since: rer.Since,
|
||||||
|
+ Since: since,
|
||||||
|
}
|
||||||
|
|
||||||
|
cs, err := p.readCarSlice(ctx, rer)
|
||||||
|
|||||||
@@ -1,22 +0,0 @@
|
|||||||
diff --git a/src/lib/statsig/statsig.tsx b/src/lib/statsig/statsig.tsx
|
|
||||||
index 1234567..89abcdef 100644
|
|
||||||
--- a/src/lib/statsig/statsig.tsx
|
|
||||||
+++ b/src/lib/statsig/statsig.tsx
|
|
||||||
@@ -266,6 +266,7 @@ export async function tryFetchGates(
|
|
||||||
}
|
|
||||||
|
|
||||||
export function initialize() {
|
|
||||||
+ if (!SDK_KEY) return Promise.resolve()
|
|
||||||
return Statsig.initialize(SDK_KEY, null, createStatsigOptions([]))
|
|
||||||
}
|
|
||||||
|
|
||||||
@@ -310,6 +311,9 @@ export function Provider({children}: {children: React.ReactNode}) {
|
|
||||||
return () => clearInterval(id)
|
|
||||||
}, [handleIntervalTick])
|
|
||||||
|
|
||||||
+ if (!SDK_KEY) {
|
|
||||||
+ return <GateCache.Provider value={gateCache}>{children}</GateCache.Provider>
|
|
||||||
+ }
|
|
||||||
return (
|
|
||||||
<GateCache.Provider value={gateCache}>
|
|
||||||
<StatsigProvider
|
|
||||||
Reference in New Issue
Block a user