-
Notifications
You must be signed in to change notification settings - Fork 988
/
Copy pathstellar-core_example.cfg
920 lines (794 loc) · 39.4 KB
/
stellar-core_example.cfg
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
#
# This file gives details of the various configuration parameters you can set
# when running stellar-core. You will need to edit to fit your own set up.
#
# This is a TOML file. See https://github.com/toml-lang/toml for syntax.
###########################
## General admin settings
# LOG_FILE_PATH (string) default "stellar-core-{datetime:%Y-%m-%d_%H-%M-%S}.log"
# Path to the file you want stellar-core to write its log to.
# You can set to "" for no log file.
LOG_FILE_PATH="stellar-core-{datetime:%Y-%m-%d_%H-%M-%S}.log"
# LOG_COLOR (boolean) default false
# Whether to highlight stdout log messages with ANSI terminal colors.
LOG_COLOR=false
# HISTOGRAM_WINDOW_SIZE (integer) default 30
# The size of a histogram window for metrics in seconds.
# Core reports percentiles based on the previous
# HISTOGRAM_WINDOW_SIZE-second window.
HISTOGRAM_WINDOW_SIZE=30
# BUCKET_DIR_PATH (string) default "buckets"
# Specifies the directory where stellar-core should store the bucket list.
# This will get written to a lot and will grow as the size of the ledger grows.
BUCKET_DIR_PATH="buckets"
# DATABASE (string) default "sqlite3://:memory:"
# Sets the DB connection string for SOCI.
# Defaults to an in memory database.
# If using sqlite, a string like:
#
# "sqlite3://path/to/dbname.db"
#
# alternatively, if using postgresql, a string like:
#
# "postgresql://dbname=stellar user=xxxx password=yyyy host=10.0.x.y"
#
# taking any combination of parameters from:
#
# http://www.postgresql.org/docs/devel/static/libpq-connect.html#LIBPQ-PARAMKEYWORDS
#
DATABASE="sqlite3://stellar.db"
# Data layer cache configuration
# - ENTRY_CACHE_SIZE controls the maximum number of LedgerEntry objects
# that will be stored in the cache (default 4096)
# - PREFETCH_BATCH_SIZE determines batch size for bulk loads used for
# prefetching
ENTRY_CACHE_SIZE=100000
PREFETCH_BATCH_SIZE=1000
# HTTP_PORT (integer) default 11626
# What port stellar-core listens for commands on.
# If set to 0, disable HTTP commands interface entirely
# (note that this does not disable HTTP query interface).
# Must not be the same as HTTP_QUERY_PORT if not 0.
HTTP_PORT=11626
# PUBLIC_HTTP_PORT (true or false) default false
# If false you only accept stellar commands from localhost.
# Do not set to true and expose the port to the open internet. This will allow
# random people to run stellar commands on your server. (such as `stop`)
PUBLIC_HTTP_PORT=false
# Maximum number of simultaneous HTTP clients
HTTP_MAX_CLIENT=128
# COMMANDS (list of strings) default is empty
# List of commands to run on startup.
# Right now only setting log levels really makes sense.
COMMANDS=[
"ll?level=info&partition=Herder"
]
# HTTP_QUERY_PORT (integer) default 0
# What port stellar-core listens for query commands on,
# such as getledgerentryraw.
# If set to 0, disable HTTP query interface entirely
# (note that this does not disable HTTP commands interface).
# Must not be the same as HTTP_PORT if not 0.
HTTP_QUERY_PORT=0
# QUERY_THREAD_POOL_SIZE (integer) default 4
# Number of threads available for processing query commands.
# If HTTP_QUERY_PORT == 0, this option is ignored.
QUERY_THREAD_POOL_SIZE=4
# QUERY_SNAPSHOT_LEDGERS (integer) default 0
# Number of historical ledger snapshots to maintain for
# query commands. Note: Setting this to large values may
# significantly impact performance. Additionally, these
# snapshots are a "best effort" only and not persisted on
# restart. On restart, only the current ledger will be
# available, with snapshots avaiable as ledgers close.
QUERY_SNAPSHOT_LEDGERS = 0
# convenience mapping of common names to node IDs. The common names can be used
# in the .cfg. `$common_name`. If set, they will also appear in your logs
# instead of the less friendly nodeID.
NODE_NAMES=[
"GA22N4YGO7IJDRF2SISA5KHULGYYKDXBQGYIWUVNMSNHF5G2DNBKP3M5 eliza",
"GCDENOCHA6TQL6DFC4FS54HIH7RP7XR7VZCQZFANMGLT2WXJ7D7KGV2P hal9000"
]
###########################
## Configure which network this instance should talk to
NETWORK_PASSPHRASE="Public Global Stellar Network ; September 2015"
###########################
## Overlay configuration
# PEER_PORT (Integer) defaults to 11625
# The port other instances of stellar-core can connect to you on.
PEER_PORT=11625
# TARGET_PEER_CONNECTIONS (Integer) default 8
# This controls how aggressively the server will connect to other peers.
# It will send outbound connection attempts until it is at this
# number of outbound peer connections.
# This value may be additionally capped by OS limits of open connections.
TARGET_PEER_CONNECTIONS=8
# MAX_ADDITIONAL_PEER_CONNECTIONS (Integer) default -1
# Numbers of peers allowed to make inbound connection to this instance
# Setting this too low will result in peers stranded out of the network
# This value may be additionally capped by OS limits of open connections.
# -1: use TARGET_PEER_CONNECTIONS*8 as value for this field
MAX_ADDITIONAL_PEER_CONNECTIONS=-1
# MAX_PENDING_CONNECTIONS (Integer) default 500
# Maximum number of pending (non authenticated) connections to this server.
# This value is split between inbound and oubound connections in the same
# proportion as MAX_ADDITIONAL_PEER_CONNECTIONS is to TARGET_PEER_CONNECTIONS.
# This value may be additionally capped by OS limits of open connections.
# Additionally, 2 more inbound connections are allowed if coming from
# preferred peers.
MAX_PENDING_CONNECTIONS=500
# PEER_AUTHENTICATION_TIMEOUT (Integer) default 2
# This server will drop peer that does not authenticate itself during that
# time.
PEER_AUTHENTICATION_TIMEOUT=2
# PEER_TIMEOUT (Integer) default 30
# This server will drop peer that does not send or receive anything during that
# time when authenticated.
PEER_TIMEOUT=30
# PEER_STRAGGLER_TIMEOUT (Integer) default 120
# This server will drop peer that does not drain its outgoing queue during that
# time when authenticated.
PEER_STRAGGLER_TIMEOUT=120
# MAX_BATCH_WRITE_COUNT (Integer) default 1024
# How many messages can this server send at once to a peer
MAX_BATCH_WRITE_COUNT=1024
# MAX_BATCH_WRITE_BYTES (Integer) default 1048576 (1 Megabyte)
# How many bytes can this server send at once to a peer
MAX_BATCH_WRITE_BYTES=1048576
# FLOOD_OP_RATE_PER_LEDGER (Floating point) default 1.0
# Used to derive how many operations get flooded per ledger
# FLOOD_OP_RATE_PER_LEDGER*<maximum number of operations per ledger>
FLOOD_OP_RATE_PER_LEDGER = 1.0
# FLOOD_TX_PERIOD_MS (Integer) default 200
# Time in milliseconds between transaction flood events
# Transaction flooding is delayed and governed by
# FLOOD_OP_RATE_PER_LEDGER so that the target rate is met on
# a per ledger basis
FLOOD_TX_PERIOD_MS=200
# FLOOD_SOROBAN_RATE_PER_LEDGER (Floating point) default 1.0
# Used to derive how many Soroban transactions get flooded per ledger
FLOOD_SOROBAN_RATE_PER_LEDGER = 1.0
# FLOOD_SOROBAN_TX_PERIOD_MS (Integer) default 200
# Time in milliseconds between Soroban transaction flood events
# Soroban transaction flooding is delayed and governed by
# FLOOD_SOROBAN_RATE_PER_LEDGER so that the target rate is met on
# a per ledger basis
FLOOD_SOROBAN_TX_PERIOD_MS = 200
# FLOOD_ARB_BASE_ALLOWANCE (Integer) default 5
# Number of cyclical path-payments (arbitrage attempts) to flood per
# asset pair, per flood period, before appplying damping function.
# Set to -1 to disable traffic damping on arbitrage traffic.
FLOOD_ARB_TX_BASE_ALLOWANCE = 5
# FLOOD_ARB_TX_DAMPING_FACTOR (floating point) default 0.8
# Parameter > 0.0 and <= 1.0 that controls intensity of geometric
# damping of cyclical path-payments (arbitrage attempts). Higher
# numbers make for more forceful damping.
FLOOD_ARB_TX_DAMPING_FACTOR = 0.8
# FLOOD_DEMAND_PERIOD_MS (Integer) default 200
# Time in milliseconds between pull-mode demands
FLOOD_DEMAND_PERIOD_MS = 200
# FLOOD_ADVERT_PERIOD_MS (Integer) default 100
# Time in milliseconds between pull-mode adverts
FLOOD_ADVERT_PERIOD_MS = 100
# FLOOD_DEMAND_BACKOFF_DELAY_MS (Integer) default 500
# Time in milliseconds used for the linear-backoff strategy
# in pull mode. The n-th demand will be made n * FLOOD_DEMAND_BACKOFF_DELAY_MS
# ms after the (n-1)th demand.
FLOOD_DEMAND_BACKOFF_DELAY_MS = 500
# Maximum allowed number of DEX-related operations in the transaction set.
#
# Transaction is considered to have DEX-related operations if it has path
# payments or manage offer operations.
#
# Setting this to non-zero value results in the following:
# - The node will limit the number of accepted DEX-related transactions
# proportional to `MAX_DEX_TX_OPERATIONS_IN_TX_SET / maxTxSetSize`
# (ledger header parameter).
# - The node will broadcast less DEX-related transactions according to the
# proportion above.
# - Starting from protocol 20 the node will nominate TX sets that respect
# this limit and potentially have DEX-related transactions surge-priced
# against each other.
MAX_DEX_TX_OPERATIONS_IN_TX_SET = 0
# BUCKETLIST_DB_INDEX_PAGE_SIZE_EXPONENT (Integer) default 14
# Determines page size used by BucketListDB for range indexes, where
# pageSize == 2^BUCKETLIST_DB_INDEX_PAGE_SIZE_EXPONENT. If set to
# 0, indiviudal index is always used. Default page size 16 kb.
BUCKETLIST_DB_INDEX_PAGE_SIZE_EXPONENT = 14
# BUCKETLIST_DB_CACHED_PERCENT (Integer) default 10
# Percentage of entries cached by BucketListDB when Bucket size is larger
# than BUCKETLIST_DB_INDEX_CUTOFF. Note that this value does not impact
# Buckets smaller than BUCKETLIST_DB_INDEX_CUTOFF, as they are always
# completely held in memory. Roughly speaking, RAM usage for BucketList
# cache == BucketListSize * (BUCKETLIST_DB_CACHED_PERCENT / 100).
BUCKETLIST_DB_CACHED_PERCENT = 10
# BUCKETLIST_DB_INDEX_CUTOFF (Integer) default 100
# Size, in MB, determining whether a bucket should have an individual
# key index or a key range index. If bucket size is below this value, range
# based index will be used. If set to 0, all buckets are range indexed. If
# BUCKETLIST_DB_INDEX_PAGE_SIZE_EXPONENT == 0, value ignored and all
# buckets have individual key index.
BUCKETLIST_DB_INDEX_CUTOFF = 100
# BUCKETLIST_DB_PERSIST_INDEX (bool) default true
# Determines whether BucketListDB indexes are saved to disk for faster
# startup. Should only be set to false for testing.
# Validators do not currently support persisted indexes. If NODE_IS_VALIDATOR=true,
# this value is ingnored and indexes are never persisted.
BUCKETLIST_DB_PERSIST_INDEX = true
# EXPERIMENTAL_BACKGROUND_OVERLAY_PROCESSING (bool) default false
# Determines whether some of overlay processing occurs in the background
# thread.
EXPERIMENTAL_BACKGROUND_OVERLAY_PROCESSING = false
# PREFERRED_PEERS (list of strings) default is empty
# These are IP:port strings that this server will add to its DB of peers.
# This server will try to always stay connected to the other peers on this list.
PREFERRED_PEERS=["127.0.0.1:7000","127.0.0.1:8000"]
# PREFERRED_PEER_KEYS (list of strings) default is empty
# These are public key identities that this server will treat as preferred
# when connecting, similar to the PREFERRED_PEERS list.
# can use a name already defined in the .cfg
PREFERRED_PEER_KEYS=[
"GBKXI3TVIFHD6QDSNMUOTJFDWHDYDVRRPWIHN4IM2YFXIUEWDTY7DSSI",
"GBDOAYUPGQCPLJCP2HYJQ4W3ADODJFZISHRBQTQB7SFVR4BRUX46RYIP optional_common_name",
"$eliza"]
# PREFERRED_PEERS_ONLY (boolean) default is false
# When set to true, this peer will only connect to PREFERRED_PEERS and will
# only accept connections from PREFERRED_PEERS or PREFERRED_PEER_KEYS
PREFERRED_PEERS_ONLY=false
# SURVEYOR_KEYS (list of strings) default is empty
# These are public key identities. If empty, this node will relay/respond to survey messages
# originating from a node in this nodes transitive quorum. If this list is NOT empty,
# this node will only relay/respond to messages that originate from nodes in this list
# can use a name already defined in the .cfg
SURVEYOR_KEYS=[
"GBKXI3TVIFHD6QDSNMUOTJFDWHDYDVRRPWIHN4IM2YFXIUEWDTY7DSSI",
"$eliza"]
# KNOWN_PEERS (list of strings) default is empty
# These are IP:port strings that this server will add to its DB of peers.
# It will try to connect to these when it is below TARGET_PEER_CONNECTIONS.
KNOWN_PEERS=[
"core-testnet1.stellar.org",
"core-testnet2.stellar.org",
"core-testnet3.stellar.org"]
#######################
## SCP settings
# NODE_SEED (string) default random, regenerated each run.
# The seed used for generating the public key this node will
# be identified with in SCP.
# Your seed should be unique. Protect your seed. Treat it like a password.
# If you don't set a NODE_SEED one will be generated for you randomly
# on each startup.
#
# To generate a new, stable seed (and associated public key), run:
#
# stellar-core gen-seed
#
# You only need to keep the seed from this; you can always recover the
# public key from the seed by running:
#
# stellar-core convert-id <seed>
#
# This example also adds a common name to NODE_NAMES list named `self` with the
# public key associated to this seed
NODE_SEED="SBI3CZU7XZEWVXU7OZLW5MMUQAP334JFOPXSLTPOH43IRTEQ2QYXU5RG self"
# NODE_IS_VALIDATOR (boolean) default false.
# Only nodes that want to participate in SCP should set NODE_IS_VALIDATOR=true.
# Most instances should operate in observer mode with NODE_IS_VALIDATOR=false.
# See QUORUM_SET below.
NODE_IS_VALIDATOR=false
# NODE_HOME_DOMAIN (string) default empty.
# HOME_DOMAIN for this validator
# Required when NODE_IS_VALIDATOR=true
# When set, this validator will be grouped with other validators with the
# same HOME_DOMAIN (as defined in VALIDATORS/HOME_DOMAINS)
NODE_HOME_DOMAIN=""
###########################
# Consensus settings
# FAILURE_SAFETY (integer) default -1
# Most people should leave this to -1
# This is the maximum number of validator failures from your QUORUM_SET that
# you want to be able to tolerate.
# Typically, you will need at least 3f+1 nodes in your quorum set.
# If you don't have enough nodes in your quorum set to tolerate the level you
# set here stellar-core won't run as a precaution.
# A value of -1 indicates to use (n-1)/3 (n being the number of nodes
# and groups from the top level of your QUORUM_SET)
# A value of 0 is only allowed if UNSAFE_QUORUM is set
# Note: The value of 1 below is the maximum number derived from the value of
# QUORUM_SET in this configuration file
FAILURE_SAFETY=-1
# UNSAFE_QUORUM (true or false) default false
# Most people should leave this to false.
# If set to true allows to specify a potentially unsafe quorum set.
# Otherwise it won't start if
# a threshold % is set too low (threshold below 66% for the top level,
# 51% for other levels)
# FAILURE_SAFETY at 0 or above the number of failures that can occur
# You might want to set this if you are running your own network and
# aren't concerned with byzantine failures or if you fully understand how the
# quorum sets of other nodes relate to yours when it comes to
# quorum intersection.
UNSAFE_QUORUM=false
#########################
## History
# CATCHUP_COMPLETE (true or false) defaults to false
# if true will catchup to the network "completely" (replaying all history)
# if false will look for CATCHUP_RECENT for catchup settings
CATCHUP_COMPLETE=false
# CATCHUP_RECENT (integer) default to 0
# if CATCHUP_COMPLETE is true this option is ignored
# if set to 0 will catchup "minimally", using deltas to the most recent
# snapshot.
# if set to any other number, will catchup "minimally" to some past snapshot,
# then will replay history from that point to current snapshot, ensuring that
# at least CATCHUP_RECENT number of ledger entries will be present in database
# if "some past snapshot" is already present in database, it just replays all
# new history
CATCHUP_RECENT=0
# WORKER_THREADS (integer) default 11
# Number of threads available for doing long durations jobs, like bucket
# merging and vertification.
WORKER_THREADS=11
# QUORUM_INTERSECTION_CHECKER (boolean) default true
# Enable/disable computation of quorum intersection monitoring
QUORUM_INTERSECTION_CHECKER=true
# MAX_CONCURRENT_SUBPROCESSES (integer) default 16
# History catchup can potentially spawn a bunch of sub-processes.
# This limits the number that will be active at a time.
MAX_CONCURRENT_SUBPROCESSES=16
# AUTOMATIC_MAINTENANCE_PERIOD (integer, seconds) default 359
# Interval between automatic maintenance executions
# Set to 0 to disable automatic maintenance
AUTOMATIC_MAINTENANCE_PERIOD=359
# AUTOMATIC_MAINTENANCE_COUNT (integer) default 400
# Number of unneeded ledgers in each table that will be removed during one
# maintenance run.
# NB: make sure that enough ledgers are deleted as to offset the growth of
# data accumulated by closing ledgers (catchup and normal operation)
# Set to 0 to disable automatic maintenance
AUTOMATIC_MAINTENANCE_COUNT=400
# AUTOMATIC_SELF_CHECK_PERIOD (integer, seconds) default 10800
# Interval between automatic self-checks, including connectivity
# and consistency checking against configured history archives.
# Set to zero to disable automatic self-checks.
AUTOMATIC_SELF_CHECK_PERIOD=10800
###############################
## The following options should probably never be set. They are used primarily
## for testing.
# RUN_STANDALONE (true or false) defaults to false
# This is a mode for testing. It prevents you from trying to connect
# to other peers
RUN_STANDALONE=false
# INVARIANT_CHECKS (list of strings) default is empty
# Setting this will cause specified invariants to be checked on ledger close and
# on bucket apply.
# Strings specified are matched (as regex) against the list of invariants.
# For example, to enable all invariants use ".*"
# INVARIANT_CHECKS = [.*]
#
# List of invariants:
# - "AccountSubEntriesCountIsValid"
# Setting this will cause additional work on each operation apply - it
# checks if the change in the number of subentries of account (signers +
# offers + data + trustlines) equals the change in the value numsubentries
# store in account. This check is only performed for accounts modified in
# any way in given ledger.
# The overhead may cause slower systems to not perform as fast as the rest
# of the network, caution is advised when using this.
# - "BucketListIsConsistentWithDatabase"
# Setting this will cause additional work on each bucket apply - it checks a
# variety of properties that should be satisfied by an applied bucket, for
# detailed information about what is checked see the comment in the header
# invariant/BucketListIsConsistentWithDatabase.h.
# The overhead may cause a system to catch-up more than once before being
# in sync with the network. This may also increase startup time as it checks
# the entire state of the database.
# - "CacheIsConsistentWithDatabase"
# Setting this will cause additional work on each operation apply - it
# checks if internal cache of ledger entries is consistent with content of
# database. It is equivalent to PARANOID_MODE from older versions of
# stellar-core.
# The overhead may cause slower systems to not perform as fast as the rest
# of the network, caution is advised when using this.
# - "ConservationOfLumens"
# Setting this will cause additional work on each operation apply - it
# checks that the total number of lumens only changes during inflation.
# The overhead may cause slower systems to not perform as fast as the rest
# of the network, caution is advised when using this.
# - "LedgerEntryIsValid"
# Setting this will cause additional work on each operation apply - it
# checks a variety of properties that must be true for a LedgerEntry to be
# valid.
# The overhead may cause slower systems to not perform as fast as the rest
# of the network, caution is advised when using this.
# - "LiabilitiesMatchOffers"
# Setting this will cause additional work on each operation apply - it
# checks that accounts, trust lines, and offers satisfy all constraints
# associated with liabilities. For additional information, see the comment
# in the header invariant/LiabilitiesMatchOffers.h.
# The overhead may cause slower systems to not perform as fast as the rest
# of the network, caution is advised when using this.
INVARIANT_CHECKS = [ "AccountSubEntriesCountIsValid",
"ConservationOfLumens",
"ConstantProductInvariant",
"LedgerEntryIsValid",
"LiabilitiesMatchOffers",
"SponsorshipCountIsValid" ]
# MANUAL_CLOSE (true or false) defaults to false
# Mode for testing. Ledger will only close when stellar-core gets
# the `manualclose` command
MANUAL_CLOSE=false
# ARTIFICIALLY_GENERATE_LOAD_FOR_TESTING (true or false) defaults to false
# Enables synthetic load generation on demand.
# The load is triggered by the `generateload` runtime command or the `apply-load` command line command.
# This option only exists for stress-testing and should not be enabled in
# production networks.
ARTIFICIALLY_GENERATE_LOAD_FOR_TESTING=false
# ARTIFICIALLY_ACCELERATE_TIME_FOR_TESTING (true or false) defaults to false
# Reduces ledger close time to 1s and checkpoint frequency to every 8 ledgers.
# Do not ever set this in production, as it will make your history archives
# incompatible with those of anyone else.
ARTIFICIALLY_ACCELERATE_TIME_FOR_TESTING=false
# ARTIFICIALLY_SET_CLOSE_TIME_FOR_TESTING (in seconds), defaults to no override
# Overrides the close time to the specified value but does not change checkpoint
# frequency - this may cause network instability.
# Do not use in production.
ARTIFICIALLY_SET_CLOSE_TIME_FOR_TESTING=0
# ALLOW_LOCALHOST_FOR_TESTING defaults to false
# Allows to connect to localhost, should not be enabled on production systems
# as this is a security threat.
ALLOW_LOCALHOST_FOR_TESTING=false
# CATCHUP_WAIT_MERGES_TX_APPLY_FOR_TESTING defaults to false
# When set, during catchup, waits for bucket merges to complete
# before applying transactions.
CATCHUP_WAIT_MERGES_TX_APPLY_FOR_TESTING=false
# ARTIFICIALLY_SET_SURVEY_PHASE_DURATION_FOR_TESTING (in minutes), defaults to
# no override. Overrides the maximum survey phase duration for both the
# collecting and reporting phase to the specified value. Performs no override if
# set to 0. Do not use in production. This option is ignored in builds without
# tests enabled.
ARTIFICIALLY_SET_SURVEY_PHASE_DURATION_FOR_TESTING=0
# PEER_READING_CAPACITY defaults to 200
# Controls how many messages from a particular peer
# core can process simultaneously, and throttles reading from a peer when at
# capacity.
PEER_READING_CAPACITY=200
# PEER_FLOOD_READING_CAPACITY defaults to 200
# Controls how many flood messages (tx or SCP) from
# a particular peer core can process simultaneously.
PEER_FLOOD_READING_CAPACITY=200
# FLOW_CONTROL_SEND_MORE_BATCH_SIZE defaults to 40
# Controls how often peers ask for more data when flow control is enabled.
FLOW_CONTROL_SEND_MORE_BATCH_SIZE=40
# A config parameter that controls how many bytes worth of flood messages
# (tx or SCP) from a particular peer core can process simultaneously
PEER_FLOOD_READING_CAPACITY_BYTES=300000
# When flow control is enabled, peer asks for more data every time it
# processes `FLOW_CONTROL_SEND_MORE_BATCH_SIZE_BYTES` bytes
FLOW_CONTROL_SEND_MORE_BATCH_SIZE_BYTES=100000
# Byte limit for outbound transaction queue.
OUTBOUND_TX_QUEUE_BYTE_LIMIT=3145728
# MAXIMUM_LEDGER_CLOSETIME_DRIFT (in seconds) defaults to
# (MAX_SLOTS_TO_REMEMBER + 2) * EXP_LEDGER_TIMESPAN_SECONDS or 90 (whichever
# is smaller)
# Maximum drift between the local clock and the network time.
# When joining the network for the first time, ignore SCP messages that are
# unlikely to be for the latest ledger.
MAXIMUM_LEDGER_CLOSETIME_DRIFT=70
# DISABLE_XDR_FSYNC (true or false) defaults to false.
# If set to true, writing an XDR file (a bucket or a checkpoint) will not
# be followed by an fsync on the file. This in turn means that XDR files
# (which hold the canonical state of the ledger) may be corrupted if the
# operating system suddenly crashes or loses power, causing the node to
# diverge and get stuck on restart, or potentially even publish bad
# history. This option only exists as an escape hatch if the local
# filesystem is so unusably slow that you prefer operating without
# durability guarantees. Do not set it to true unless you're very certain
# you want to make that trade.
DISABLE_XDR_FSYNC=false
# MAX_SLOTS_TO_REMEMBER (in ledgers) defaults to 12
# Most people should leave this to 12
# Number of most recent ledgers keep in memory. Storing more ledgers allows other
# nodes to join the network without catching up. This is useful for simulation
# testing purposes. Note that the SCP checkpoint message is always kept and does
# not count towards this limit.
MAX_SLOTS_TO_REMEMBER=12
# METADATA_OUTPUT_STREAM defaults to "", disabling it.
# A string specifying a stream to write fine-grained metadata to for each ledger
# close while running. This will be opened at startup and synchronously
# streamed-to during both catchup and live ledger-closing.
#
# Streams may be specified either as a pathname (typically a named FIFO on POSIX
# or a named pipe on Windows, though plain files also work) or a string of the
# form "fd:N" for some integer N which, on POSIX, specifies the existing open
# file descriptor N inherited by the process (for example to write to an
# anonymous pipe).
#
# As a further safety check, this option is mutually exclusive with
# NODE_IS_VALIDATOR, as its typical use writing to a pipe with a reader process
# on the other end introduces a potentially-unbounded synchronous delay in
# closing a ledger, and should not be used on a node participating in consensus,
# only a passive "watcher" node.
METADATA_OUTPUT_STREAM=""
# Number of ledgers worth of transaction metadata to preserve on disk for
# debugging purposes. These records are automatically maintained and rotated
# during processing, and are helpful for recovery in case of a serious error;
# they should only be reduced or disabled if disk space is at a premium.
METADATA_DEBUG_LEDGERS=0
# When true, Core will emit the
# breakdown of the charged resource fees for every Soroban transaction (refundable,
# non-refundable and rent components) in the extension of `SorobanData`.
# This requires updating the XDR parsing code to at least the following commit of
# Stellar XDR:
# https://github.com/stellar/stellar-xdr/commit/cdc339f5e74a75e8e558fd1a853397da71f1659a
EMIT_SOROBAN_TRANSACTION_META_EXT_V1=false
# When true, Core will emit the dynamic Soroban write fee for every ledger in the
# `LedgerCloseMeta` extension.
# This requires updating the XDR parsing code to at least the following commit of
# Stellar XDR:
# https://github.com/stellar/stellar-xdr/commit/cdc339f5e74a75e8e558fd1a853397da71f1659a
EMIT_LEDGER_CLOSE_META_EXT_V1=false
# When set to true, Core will revert to using the old, application-agnostic
# nomination weight function for SCP leader election.
FORCE_OLD_STYLE_LEADER_ELECTION=false
# EXCLUDE_TRANSACTIONS_CONTAINING_OPERATION_TYPE (list of strings) default is empty
# Setting this will cause the node to reject transactions that it receives if
# they contain any operation in this list. It will not, however, stop the node
# from voting for or applying such transactions.
#
# Strings specified are matched against the names of values for OperationType,
# such as "CREATE_ACCOUNT" or "PATH_PAYMENT_STRICT_SEND".
EXCLUDE_TRANSACTIONS_CONTAINING_OPERATION_TYPE=[]
# Config parameters that force transaction application during ledger
# close to sleep for a certain amount of time for testing only.
# The probability that it sleeps for
# OP_APPLY_SLEEP_TIME_DURATION_FOR_TESTING[i] microseconds is
# OP_APPLY_SLEEP_TIME_WEIGHT_FOR_TESTING[i] divided by
# (OP_APPLY_SLEEP_TIME_WEIGHT_FOR_TESTING[0] +
# OP_APPLY_SLEEP_TIME_WEIGHT_FOR_TESTING[1] + ...) for each i. These
# options are only for consensus and overlay simulation testing. These two
# must be used together.
OP_APPLY_SLEEP_TIME_DURATION_FOR_TESTING=[]
OP_APPLY_SLEEP_TIME_WEIGHT_FOR_TESTING=[]
# Config parameters that LoadGen uses to decide the number of operations
# to include in each transaction and its distribution for testing only.
# The probability that transactions will contain COUNT[i] operations
# is DISTRIBUTION[i] / (DISTRIBUTION[0] + DISTRIBUTION[1] + ...) for each
# i.
LOADGEN_OP_COUNT_FOR_TESTING=[]
LOADGEN_OP_COUNT_DISTRIBUTION_FOR_TESTING=[]
# Size of wasm blobs for SOROBAN_UPLOAD and MIX_CLASSIC_SOROBAN loadgen modes
# The probability that wasm blobs will contain WASM_BYTES[i] bytes is
# DISTRIBUTION[i] / (DISTRIBUTION[0] + DISTRIBUTION[1] + ...) for each i.
LOADGEN_WASM_BYTES_FOR_TESTING=[]
LOADGEN_WASM_BYTES_DISTRIBUTION_FOR_TESTING=[]
# Number of data entries for SOROBAN_INVOKE and MIX_CLASSIC_SOROBAN loadgen
# modes. The probability that invocations will read/write NUM_DATA_ENTRIES[i]
# data entries is DISTRIBUTION[i] / (DISTRIBUTION[0] + DISTRIBUTION[1] + ...)
# for each i.
LOADGEN_NUM_DATA_ENTRIES_FOR_TESTING=[]
LOADGEN_NUM_DATA_ENTRIES_DISTRIBUTION_FOR_TESTING=[]
# Total kilobytes of reads and writes per transaction for SOROBAN_INVOKE and
# MIX_CLASSIC_SOROBAN loadgen modes. The probability that transactions will
# perform IO_KILOBYTES[i] of IO is DISTRIBUTION[i] / (DISTRIBUTION[0] +
# DISTRIBUTION[1] + ...) for each i.
LOADGEN_IO_KILOBYTES_FOR_TESTING=[]
LOADGEN_IO_KILOBYTES_DISTRIBUTION_FOR_TESTING=[]
# Transaction size in bytes for SOROBAN_INVOKE and MIX_CLASSIC_SOROBAN loadgen
# modes. The probability that transactions will contain TX_SIZE_BYTES[i] bytes
# is DISTRIBUTION[i] / (DISTRIBUTION[0] + DISTRIBUTION[1] + ...) for each i.
LOADGEN_TX_SIZE_BYTES_FOR_TESTING=[]
LOADGEN_TX_SIZE_BYTES_DISTRIBUTION_FOR_TESTING=[]
# Instructions per transaction for SOROBAN_INVOKE and MIX_CLASSIC_SOROBAN
# loadgen modes. The probability that invocations will execute INSTRUCTIONS[i]
# instructions is DISTRIBUTION[i] / (DISTRIBUTION[0] + DISTRIBUTION[1] + ...)
# for each i.
LOADGEN_INSTRUCTIONS_FOR_TESTING=[]
LOADGEN_INSTRUCTIONS_DISTRIBUTION_FOR_TESTING=[]
# HALT_ON_INTERNAL_TRANSACTION_ERROR defaults to false.
# If set to true, the application will halt when an internal error is
# encountered during applying a transaction. Otherwise, the txINTERNAL_ERROR
# transaction is created but not applied.
# Enabling this is useful for debugging the transaction errors caused by
# the core's internal errors via catching them early.
HALT_ON_INTERNAL_TRANSACTION_ERROR=false
# ENABLE_SOROBAN_DIAGNOSTIC_EVENTS defaults to false.
# If set to true, additional diagnostic Soroban events that are not part
# of the protocol will be generated while applying Soroban transactions.
# These events will be put into a list in the non-hashed portion of the
# meta, and this list will contain all events so ordering can be
# maintained between all events. The default value is false, and this
# cannot be enabled on validators.
ENABLE_SOROBAN_DIAGNOSTIC_EVENTS=false
# ENABLE_DIAGNOSTICS_FOR_TX_SUBMISSION defaults to false.
# If set to true, attach a small diagnostics message in the format of
# Soroban diagnostic event to some transaction submission errors (mainly,
# `txSOROBAN_INVALID` errors). The diagnostics message is guaranteed to be
# small and independent of the user input, so this can be safely enabled
# on validators that accept transactions.
ENABLE_DIAGNOSTICS_FOR_TX_SUBMISSION=false
# TESTING_MINIMUM_PERSISTENT_ENTRY_LIFETIME defaults to 0, which disables the override
# The value must be greater than 0 if set through the config file
# Override the initial hardcoded MINIMUM_PERSISTENT_ENTRY_LIFETIME (4096)
# for testing.
# TESTING_MINIMUM_PERSISTENT_ENTRY_LIFETIME=
# TESTING_SOROBAN_HIGH_LIMIT_OVERRIDE defaults to false.
# Increase all initial max limits to higher values for testing
TESTING_SOROBAN_HIGH_LIMIT_OVERRIDE=false
# OVERRIDE_EVICTION_PARAMS_FOR_TESTING defaults to false.
# Override eviction parameters for testing. If
# OVERRIDE_EVICTION_PARAMS_FOR_TESTING is true, the eviction TESTING_*
# parameters below will be used instead of the default values.
OVERRIDE_EVICTION_PARAMS_FOR_TESTING=false
# TESTING_EVICTION_SCAN_SIZE defaults to 100000
# This is only used if OVERRIDE_EVICTION_PARAMS_FOR_TESTING=true
TESTING_EVICTION_SCAN_SIZE=100000
# TESTING_MAX_ENTRIES_TO_ARCHIVE defaults to 100
# This is only used if OVERRIDE_EVICTION_PARAMS_FOR_TESTING=true
TESTING_MAX_ENTRIES_TO_ARCHIVE=100
# TESTING_STARTING_EVICTION_SCAN_LEVEL default to 6
# This is only used if OVERRIDE_EVICTION_PARAMS_FOR_TESTING=true
TESTING_STARTING_EVICTION_SCAN_LEVEL=6
#####################
## Tables must come at the end. (TOML you are almost perfect!)
# HOME_DOMAINS
# list of properties for home domains
# HOME_DOMAIN: string (required) home domain identifier
# QUALITY: string (required) quality of all validators in HOME_DOMAIN
# CRITICAL, HIGH, MEDIUM, LOW
# HIGH quality validators must have archives and redundancy
# CRITICAL quality validators must have archive and redundancy like HIGH,
# but also enforces that this HOME_DOMAIN must be included to achieve consensus
#
[[HOME_DOMAINS]]
HOME_DOMAIN="testnet.stellar.org"
QUALITY="HIGH"
# [[HOME_DOMAINS]]
# HOME_DOMAIN="some-other-domain"
# QUALITY="LOW"
# VALIDATORS
# List of validators used to automatically generate quorum sets
#
# NB: you need to either depend on exactly one entity OR
# have at least 4 entities to have a "safe" configuration
# see below rules for detail.
#
# The quorum set is generated using the following rules:
# validators with the same home domain (representing an entity) are automatically
# grouped together; the threshold used assumes a simple majority (2f+1)
# entities are grouped by QUALITY
# groups are nested such that the group for the quality that precedes a given group
# is added as a backup for the higher quality group.
# ie: at top level group contains HIGH quality entities and the group that
# contains MEDIUM quality entities
# heterogeneous groups use a threshold assuming byzantine failure (3f+1)
#
#
# Individual validators can be added in standard form
# NAME: string (required) unique identifier to use to identify a validator
# NAME is added as an alias for PUBLIC_KEY
# QUALITY: string (required*) quality of validator
# all validators must have a quality, either directly (as set by this property)
# or indirectly via HOME_DOMAINS (see HOME_DOMAINS for more detail on QUALITY)
# HOME_DOMAIN: string (required) home domain for validator
# PUBLIC_KEY: string (required) public key associated with a validator
# ADDRESS: string (optional) peer:port associated with a validator
# ADDRESS will be added to the KNOWN_PEERS list
# HISTORY: string (optional) history archive GET command associated
# with a validator
# HISTORY will be added to the list of known archives that can be downloaded from
# Stellar Testnet validators
[[VALIDATORS]]
NAME="sdftest1"
HOME_DOMAIN="testnet.stellar.org"
PUBLIC_KEY="GDKXE2OZMJIPOSLNA6N6F2BVCI3O777I2OOC4BV7VOYUEHYX7RTRYA7Y"
ADDRESS="core-testnet1.stellar.org"
HISTORY="curl -sf http://history.stellar.org/prd/core-testnet/core_testnet_001/{0} -o {1}"
[[VALIDATORS]]
NAME="sdftest2"
HOME_DOMAIN="testnet.stellar.org"
PUBLIC_KEY="GCUCJTIYXSOXKBSNFGNFWW5MUQ54HKRPGJUTQFJ5RQXZXNOLNXYDHRAP"
ADDRESS="core-testnet2.stellar.org"
HISTORY="curl -sf http://history.stellar.org/prd/core-testnet/core_testnet_002/{0} -o {1}"
[[VALIDATORS]]
NAME="sdftest3"
HOME_DOMAIN="testnet.stellar.org"
PUBLIC_KEY="GC2V2EFSXN6SQTWVYA5EPJPBWWIMSD2XQNKUOHGEKB535AQE2I6IXV2Z"
ADDRESS="core-testnet3.stellar.org"
HISTORY="curl -sf http://history.stellar.org/prd/core-testnet/core_testnet_003/{0} -o {1}"
# HISTORY
# Used to specify where to fetch and store the history archives.
# Fetching and storing history is kept as general as possible.
# Any place you can save and load static files from should be usable by the
# stellar-core history system. s3, the file system, http, etc
# stellar-core will call any external process you specify and will pass it the
# name of the file to save or load.
# Simply use template parameters `{0}` and `{1}` in place of the files being transmitted or retrieved.
# You can specify multiple places to store and fetch from. stellar-core will
# use multiple fetching locations as backup in case there is a failure fetching from one.
#
# Note: any archive you *put* to you must run `$ stellar-core new-hist <historyarchive>`
# once before you start.
# for example this config you would run: $ stellar-core new-hist local
# this creates a `local` archive on the local drive
# NB: this is an example, in general you should probably not do this as
# archives grow indefinitely
[HISTORY.local]
get="cp /var/lib/stellar-core/history/vs/{0} {1}"
put="cp {0} /var/lib/stellar-core/history/vs/{1}"
mkdir="mkdir -p /var/lib/stellar-core/history/vs/{0}"
# other examples:
# [HISTORY.stellar]
# get="curl http://history.stellar.org/{0} -o {1}"
# put="aws s3 cp {0} s3://history.stellar.org/{1}"
# [HISTORY.backup]
# get="curl http://backupstore.blob.core.windows.net/backupstore/{0} -o {1}"
# put="azure storage blob upload {0} backupstore {1}"
#The history store of the Stellar testnet
#[HISTORY.h1]
#get="curl -sf http://history.stellar.org/prd/core-testnet/core_testnet_001/{0} -o {1}"
#[HISTORY.h2]
#get="curl -sf http://history.stellar.org/prd/core-testnet/core_testnet_002/{0} -o {1}"
#[HISTORY.h3]
#get="curl -sf http://history.stellar.org/prd/core-testnet/core_testnet_003/{0} -o {1}"
# QUORUM_SET (optional)
# This is how you specify this server's quorum set manually
#
# *** this section is for advanced users and exists mostly for historical reasons ***
# the preferred way to configure your quorum set is to use instead [[VALIDATORS]]
#
# It can be nested up to 2 levels: {A,B,C,{D,E,F},{G,H,{I,J,K,L}}}
# THRESHOLD_PERCENT is how many have to agree (1-100%) within a given set.
# Each set is treated as one vote.
# So for example in the above there are 5 things that can vote:
# individual validators: A,B,C, and the sets {D,E,F} and {G,H with subset {I,J,K,L}}
# the sets each have their own threshold.
# For example with {100% G,H with subset (50% I,J,K,L}}
# means that quorum will be met with G, H and any 2 (50%) of {I, J, K, L}
#
# a [QUORUM_SET.path] section is constructed as
# THRESHOLD_PERCENT: how many have to agree, defaults to 67 (rounds up).
# VALIDATORS: array of node IDs
# additional subsets [QUORUM_SET.path.item_number]
# a QUORUM_SET
# must not contain duplicate entries {{A,B},{A,C}} is invalid for example
# The key for "self" is implicitly added at the top level, so the effective
# quorum set is [t:2, self, QUORUM_SET]. Note that "self" is always agreeing
# with the instance (if QUORUM_SET includes it)
#
# The following setup is equivalent to the example given above.
#
# Note on naming: you can add common names to the NAMED_NODES list here as
# shown in the first 3 validators or use common names that have been
# previously defined.
# [QUORUM_SET]
# THRESHOLD_PERCENT=66
# VALIDATORS=[
# "GDQWITFJLZ5HT6JCOXYEVV5VFD6FTLAKJAUDKHAV3HKYGVJWA2DPYSQV A_from_above",
# "GANLKVE4WOTE75MJS6FQ73CL65TSPYYMFZKC4VDEZ45LGQRCATGAIGIA B_from_above",
# "GDV46EIEF57TDL4W27UFDAUVPDDCKJNVBYB3WIV2WYUYUG753FCFU6EJ C_from_above"
# ]
#
# [QUORUM_SET.1]
# THRESHOLD_PERCENT=67
# VALIDATORS=[
# "$self", # 'D' from above is this node
# "GDXJAZZJ3H5MJGR6PDQX3JHRREAVYNCVM7FJYGLZJKEHQV2ZXEUO5SX2 E_from_above",
# "GB6GK3WWTZYY2JXWM6C5LRKLQ2X7INQ7IYTSECCG3SMZFYOZNEZR4SO5 F_from_above"
# ]
#
# [QUORUM_SET.2]
# THRESHOLD_PERCENT=100
# VALIDATORS=[
# "GCTAIXWDDBM3HBDHGSAOLY223QZHPS2EDROF7YUBB3GNYXLOCPV5PXUK G_from_above",
# "GCJ6UBAOXNQFN3HGLCVQBWGEZO6IABSMNE2OCQC4FJAZXJA5AIE7WSPW H_from_above"
# ]
#
# [QUORUM_SET.2.1]
# THRESHOLD_PERCENT=50
# VALIDATORS=[
# "GC4X65TQJVI3OWAS4DTA2EN2VNZ5ZRJD646H5WKEJHO5ZHURDRAX2OTH I_from_above",
# "GAXSWUO4RBELRQT5WMDLIKTRIKC722GGXX2GIGEYQZDQDLOTINQ4DX6F J_from_above",
# "GAWOEMG7DQDWHCFDTPJEBYWRKUUZTX2M2HLMNABM42G7C7IAPU54GL6X K_from_above",
# "GDZAJNUUDJFKTZX3YWZSOAS4S4NGCJ5RQAY7JPYBG5CUFL3JZ5C3ECOH L_from_above"
# ]