world>!")
+// fmt.Println(text)
+func Render(a ...any) string {
+ if len(a) == 0 {
+ return ""
+ }
+ return ReplaceTag(fmt.Sprint(a...))
+}
+
+// Sprint parse color tags, return rendered string
+func Sprint(a ...any) string {
+ if len(a) == 0 {
+ return ""
+ }
+ return ReplaceTag(fmt.Sprint(a...))
+}
+
+// Sprintf format and return rendered string
+func Sprintf(format string, a ...any) string {
+ return ReplaceTag(fmt.Sprintf(format, a...))
+}
+
+// String alias of the ReplaceTag
+func String(s string) string { return ReplaceTag(s) }
+
+// Text alias of the ReplaceTag
+func Text(s string) string { return ReplaceTag(s) }
+
+// Uint8sToInts convert []uint8 to []int
+// func Uint8sToInts(u8s []uint8 ) []int {
+// ints := make([]int, len(u8s))
+// for i, u8 := range u8s {
+// ints[i] = int(u8)
+// }
+// return ints
+// }
+
+/*************************************************************
+ * helper methods for print
+ *************************************************************/
+
+// new implementation, support render full color code on pwsh.exe, cmd.exe
+func doPrintV2(code, str string) {
+ _, err := fmt.Fprint(output, RenderString(code, str))
+ saveInternalError(err)
+}
+
+// new implementation, support render full color code on pwsh.exe, cmd.exe
+func doPrintlnV2(code string, args []any) {
+ str := formatArgsForPrintln(args)
+ _, err := fmt.Fprintln(output, RenderString(code, str))
+ saveInternalError(err)
+}
+
+// use Println, will add spaces for each arg
+func formatArgsForPrintln(args []any) (message string) {
+ if ln := len(args); ln == 0 {
+ message = ""
+ } else if ln == 1 {
+ message = fmt.Sprint(args[0])
+ } else {
+ message = fmt.Sprintln(args...)
+ // clear last "\n"
+ message = message[:len(message)-1]
+ }
+ return
+}
+
+/*************************************************************
+ * helper methods
+ *************************************************************/
+
+// is on debug mode
+// func isDebugMode() bool {
+// return debugMode == "on"
+// }
+
+func debugf(f string, v ...any) {
+ if debugMode {
+ fmt.Print("COLOR_DEBUG: ")
+ fmt.Printf(f, v...)
+ fmt.Println()
+ }
+}
+
+// equals: return ok ? val1 : val2
+func isValidUint8(val int) bool {
+ return val >= 0 && val < 256
+}
+
+// equals: return ok ? val1 : val2
+func compareVal(ok bool, val1, val2 uint8) uint8 {
+ if ok {
+ return val1
+ }
+ return val2
+}
+
+// equals: return ok ? val1 : val2
+func compareF64Val(ok bool, val1, val2 float64) float64 {
+ if ok {
+ return val1
+ }
+ return val2
+}
+
+func saveInternalError(err error) {
+ if err != nil {
+ debugf("inner error: %s", err.Error())
+ innerErrs = append(innerErrs, err)
+ }
+}
+
+func stringToArr(str, sep string) (arr []string) {
+ str = strings.TrimSpace(str)
+ if str == "" {
+ return
+ }
+
+ ss := strings.Split(str, sep)
+ for _, val := range ss {
+ if val = strings.TrimSpace(val); val != "" {
+ arr = append(arr, val)
+ }
+ }
+ return
+}
diff --git a/vendor/github.com/jacobbrewer1/patcher/.clog.toml b/vendor/github.com/jacobbrewer1/patcher/.clog.toml
new file mode 100644
index 0000000..c3585b6
--- /dev/null
+++ b/vendor/github.com/jacobbrewer1/patcher/.clog.toml
@@ -0,0 +1,33 @@
+[clog]
+# A repository link with the trailing '.git' which will be used to generate
+# all commit and issue links
+repository = "https://github.com/jacobbrewer1/patcher"
+
+# A constant release title
+subtitle = "What's Changed"
+
+# specify the style of commit links to generate, defaults to "github" if omitted
+link-style = "github"
+
+# This sets an output file only! If it exists already, new changelog data will be
+# prepended, if not it will be created.
+#
+# This is useful in conjunction with the infile field if you have a separate file
+# that you would like to append after newly created clog data
+#
+# Defaults to stdout when omitted
+outfile = "changelog.md"
+
+# This sets the output format. There are two options "json" or "markdown" and
+# defaults to "markdown" when omitted
+output-format = "markdown"
+
+# If you use tags, you can set the following if you wish to only pick
+# up changes since your latest tag
+from-latest-tag = true
+
+[sections]
+Refactors = ["refactor"]
+Chores = ["chore"]
+Documentation = ["doc", "docs"]
+Features = ["feat", "feature"]
diff --git a/vendor/github.com/jacobbrewer1/patcher/.gitignore b/vendor/github.com/jacobbrewer1/patcher/.gitignore
new file mode 100644
index 0000000..cb38dbf
--- /dev/null
+++ b/vendor/github.com/jacobbrewer1/patcher/.gitignore
@@ -0,0 +1,37 @@
+# If you prefer the allow list template instead of the deny list, see community template:
+# https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore
+#
+# Binaries for programs and plugins
+*.exe
+*.exe~
+*.dll
+*.so
+*.dylib
+
+# Test binary, built with `go test -c`
+*.test
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
+
+# Dependency directories (remove the comment below to include it)
+# vendor/
+
+# Go workspace file
+go.work
+go.work.sum
+
+# env file
+.env
+
+# Intellij Idea
+.idea/
+
+# Ignore all config files
+***/config.json
+
+# Ignore all binary files
+**/bin/
+
+# Ignore all tar files
+**/*.tar
diff --git a/vendor/github.com/jacobbrewer1/patcher/.mockery.yaml b/vendor/github.com/jacobbrewer1/patcher/.mockery.yaml
new file mode 100644
index 0000000..7ebfedf
--- /dev/null
+++ b/vendor/github.com/jacobbrewer1/patcher/.mockery.yaml
@@ -0,0 +1,5 @@
+all: true
+inpackage: true
+recursive: true
+exclude: vendor
+disable-version-string: true
diff --git a/vendor/github.com/jacobbrewer1/patcher/CODE_OF_CONDUCT.md b/vendor/github.com/jacobbrewer1/patcher/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000..3f69306
--- /dev/null
+++ b/vendor/github.com/jacobbrewer1/patcher/CODE_OF_CONDUCT.md
@@ -0,0 +1,128 @@
+# Contributor Covenant Code of Conduct
+
+## Our Pledge
+
+We as members, contributors, and leaders pledge to make participation in our
+community a harassment-free experience for everyone, regardless of age, body
+size, visible or invisible disability, ethnicity, sex characteristics, gender
+identity and expression, level of experience, education, socio-economic status,
+nationality, personal appearance, race, religion, or sexual identity
+and orientation.
+
+We pledge to act and interact in ways that contribute to an open, welcoming,
+diverse, inclusive, and healthy community.
+
+## Our Standards
+
+Examples of behavior that contributes to a positive environment for our
+community include:
+
+* Demonstrating empathy and kindness toward other people
+* Being respectful of differing opinions, viewpoints, and experiences
+* Giving and gracefully accepting constructive feedback
+* Accepting responsibility and apologizing to those affected by our mistakes,
+ and learning from the experience
+* Focusing on what is best not just for us as individuals, but for the
+ overall community
+
+Examples of unacceptable behavior include:
+
+* The use of sexualized language or imagery, and sexual attention or
+ advances of any kind
+* Trolling, insulting or derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others' private information, such as a physical or email
+ address, without their explicit permission
+* Other conduct which could reasonably be considered inappropriate in a
+ professional setting
+
+## Enforcement Responsibilities
+
+Community leaders are responsible for clarifying and enforcing our standards of
+acceptable behavior and will take appropriate and fair corrective action in
+response to any behavior that they deem inappropriate, threatening, offensive,
+or harmful.
+
+Community leaders have the right and responsibility to remove, edit, or reject
+comments, commits, code, wiki edits, issues, and other contributions that are
+not aligned to this Code of Conduct, and will communicate reasons for moderation
+decisions when appropriate.
+
+## Scope
+
+This Code of Conduct applies within all community spaces, and also applies when
+an individual is officially representing the community in public spaces.
+Examples of representing our community include using an official e-mail address,
+posting via an official social media account, or acting as an appointed
+representative at an online or offline event.
+
+## Enforcement
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be
+reported to the community leaders responsible for enforcement at
+Jacob.brewer@bthree.uk.
+All complaints will be reviewed and investigated promptly and fairly.
+
+All community leaders are obligated to respect the privacy and security of the
+reporter of any incident.
+
+## Enforcement Guidelines
+
+Community leaders will follow these Community Impact Guidelines in determining
+the consequences for any action they deem in violation of this Code of Conduct:
+
+### 1. Correction
+
+**Community Impact**: Use of inappropriate language or other behavior deemed
+unprofessional or unwelcome in the community.
+
+**Consequence**: A private, written warning from community leaders, providing
+clarity around the nature of the violation and an explanation of why the
+behavior was inappropriate. A public apology may be requested.
+
+### 2. Warning
+
+**Community Impact**: A violation through a single incident or series
+of actions.
+
+**Consequence**: A warning with consequences for continued behavior. No
+interaction with the people involved, including unsolicited interaction with
+those enforcing the Code of Conduct, for a specified period of time. This
+includes avoiding interactions in community spaces as well as external channels
+like social media. Violating these terms may lead to a temporary or
+permanent ban.
+
+### 3. Temporary Ban
+
+**Community Impact**: A serious violation of community standards, including
+sustained inappropriate behavior.
+
+**Consequence**: A temporary ban from any sort of interaction or public
+communication with the community for a specified period of time. No public or
+private interaction with the people involved, including unsolicited interaction
+with those enforcing the Code of Conduct, is allowed during this period.
+Violating these terms may lead to a permanent ban.
+
+### 4. Permanent Ban
+
+**Community Impact**: Demonstrating a pattern of violation of community
+standards, including sustained inappropriate behavior, harassment of an
+individual, or aggression toward or disparagement of classes of individuals.
+
+**Consequence**: A permanent ban from any sort of public interaction within
+the community.
+
+## Attribution
+
+This Code of Conduct is adapted from the [Contributor Covenant][homepage],
+version 2.0, available at
+https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
+
+Community Impact Guidelines were inspired by [Mozilla's code of conduct
+enforcement ladder](https://github.com/mozilla/diversity).
+
+[homepage]: https://www.contributor-covenant.org
+
+For answers to common questions about this code of conduct, see the FAQ at
+https://www.contributor-covenant.org/faq. Translations are available at
+https://www.contributor-covenant.org/translations.
diff --git a/vendor/github.com/jacobbrewer1/patcher/LICENSE b/vendor/github.com/jacobbrewer1/patcher/LICENSE
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/vendor/github.com/jacobbrewer1/patcher/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/jacobbrewer1/patcher/Makefile b/vendor/github.com/jacobbrewer1/patcher/Makefile
new file mode 100644
index 0000000..eeaa30f
--- /dev/null
+++ b/vendor/github.com/jacobbrewer1/patcher/Makefile
@@ -0,0 +1,11 @@
+# Define variables
+hash = $(shell git rev-parse --short HEAD)
+DATE = $(shell date -u +'%Y-%m-%dT%H:%M:%SZ')
+
+pr-approval:
+ @echo "Running PR CI"
+ go build ./...
+ go vet ./...
+ go test ./...
+codegen:
+ go generate ./...
diff --git a/vendor/github.com/jacobbrewer1/patcher/README.md b/vendor/github.com/jacobbrewer1/patcher/README.md
new file mode 100644
index 0000000..a12d3f2
--- /dev/null
+++ b/vendor/github.com/jacobbrewer1/patcher/README.md
@@ -0,0 +1,317 @@
+# Patcher
+
+Patcher is a GO library that provides a simple way to generate and SQL patches from structs. The library was built out
+of the need to generate patches for a database; when a new field is added to a struct, this would result in a bunch of
+new `if` checks to be created in the codebase. This library aims to solve that problem by generating the SQL patches for
+you.
+
+## What is Patcher?
+
+* **Automatic SQL Generation**: It automatically generates SQL UPDATE queries from structs, reducing the need for
+ manually
+ writing and maintaining SQL statements.
+* **Code Simplification**: It reduces the amount of boilerplate code and if-else conditions required to handle different
+ struct fields, making the codebase cleaner and easier to maintain.
+* **Struct Diffs**: It allows injecting changes from one struct to another and generating update scripts based on
+ differences, streamlining the process of synchronizing data changes.
+* **Join Support**: It supports generating SQL joins by creating structs that implement the Joiner interface,
+ simplifying
+ the process of managing related data across multiple tables.
+
+## Usage
+
+### Configuration
+
+#### LoadDiff Options
+
+* `includeZeroValues`: Set to true to include zero values in the diff.
+* `includeNilValues`: Set to true to include nil values in the diff.
+
+#### GenerateSQL Options
+
+* `WithTable(tableName string)`: Specify the table name for the SQL query.
+* `WithWhere(whereClause WhereTyper)`: Provide a where clause for the SQL query.
+* `WithJoin(joinClause Joiner)`: Add join clauses to the SQL query.
+* `includeZeroValues`: Set to true to include zero values in the diff. (Only for NewDiffSQLPatch)
+* `includeNilValues`: Set to true to include nil values in the diff. (Only for NewDiffSQLPatch)
+
+### Basic Examples
+
+#### Basic
+
+To use the library, you need to create a struct that represents the table you want to generate patches for. The struct
+should have the following tags:
+
+- `db:"column_name"`: This tag is used to specify the column name in the database.
+
+Example:
+
+```go
+package main
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "github.com/jacobbrewer1/patcher"
+)
+
+type Person struct {
+ ID *int `db:"-"`
+ Name *string `db:"name"`
+}
+
+type PersonWhere struct {
+ ID *int `db:"id"`
+}
+
+func NewPersonWhere(id int) *PersonWhere {
+ return &PersonWhere{
+ ID: &id,
+ }
+}
+
+func (p *PersonWhere) Where() (string, []any) {
+ return "id = ?", []any{*p.ID}
+}
+
+func main() {
+ const jsonStr = `{"id": 1, "name": "john"}`
+
+ person := new(Person)
+ if err := json.Unmarshal([]byte(jsonStr), person); err != nil {
+ panic(err)
+ }
+
+ condition := NewPersonWhere(*person.ID)
+
+ sqlStr, args, err := patcher.GenerateSQL(
+ person,
+ patcher.WithTable("people"),
+ patcher.WithWhere(condition),
+ )
+ if err != nil {
+ panic(err)
+ }
+
+ fmt.Println(sqlStr)
+
+ fmt.Println(args)
+}
+
+```
+
+This will output:
+
+```sql
+UPDATE people
+SET name = ?
+WHERE (1 = 1)
+ AND (
+ id = ?
+ )
+```
+
+with the args:
+
+```
+["john", 1]
+```
+
+#### Struct diffs
+
+The Patcher library has functionality where you are able to inject changes from one struct to another. This is
+configurable to include Zero values and Nil values if requested. Please see the
+example [here](./examples/loader_with_opts) for the detailed example. Below is an example on how you can utilize this
+method with the default behaviour (Please see the comment attached to the `LoadDiff` [method](./loader.go) for the
+default behaviour).
+
+Example:
+
+```go
+package main
+
+import (
+ "fmt"
+
+ "github.com/jacobbrewer1/patcher"
+)
+
+type Something struct {
+ Number int
+ Text string
+ PrePopulated string
+ NewText string
+}
+
+func main() {
+ s := Something{
+ Number: 5,
+ Text: "Hello",
+ PrePopulated: "PrePopulated",
+ }
+
+ n := Something{
+ Number: 6,
+ Text: "Old Text",
+ NewText: "New Text",
+ }
+
+ // The patcher.LoadDiff function will apply the changes from n to s.
+ if err := patcher.LoadDiff(&s, &n); err != nil {
+ panic(err)
+ }
+
+ fmt.Println(s.Number)
+ fmt.Println(s.Text)
+ fmt.Println(s.PrePopulated)
+ fmt.Println(s.NewText)
+}
+
+```
+
+This will output:
+
+```
+6
+Hello
+PrePopulated
+New Text
+```
+
+If you would like to generate an update script from two structs, you can use the `NewDiffSQLPatch` function. This
+function will generate an update script from the two structs.
+
+Example:
+
+```go
+package main
+
+import (
+ "fmt"
+
+ "github.com/jacobbrewer1/patcher"
+)
+
+type Something struct {
+ Number int
+ Text string
+ PrePopulated string
+ NewText string
+}
+
+type SomeWhere struct {
+ id int
+}
+
+func NewSomeWhere(id int) *SomeWhere {
+ return &SomeWhere{id: id}
+}
+
+func (s *SomeWhere) Where() (string, []any) {
+ return "id = ?", []any{s.id}
+}
+
+func main() {
+ s := Something{
+ Number: 5,
+ Text: "Old Text",
+ PrePopulated: "PrePopulated",
+ NewText: "New Text",
+ }
+
+ n := Something{
+ Number: 5,
+ Text: "Old Text",
+ PrePopulated: "PrePopulatedDifferent",
+ NewText: "New Text",
+ }
+
+ wherer := NewSomeWhere(5)
+
+ // The patcher.LoadDiff function will apply the changes from n to s.
+ patch, err := patcher.NewDiffSQLPatch(
+ &s,
+ &n,
+ patcher.WithTable("table_name"),
+ patcher.WithWhere(wherer),
+ )
+ if err != nil {
+ panic(err)
+ }
+
+ sqlStr, sqlArgs, err := patch.GenerateSQL()
+ if err != nil {
+ panic(err)
+ }
+
+ fmt.Println(sqlStr)
+ fmt.Println(sqlArgs)
+}
+
+```
+
+This will output:
+
+```sql
+UPDATE table_name
+SET pre_populated = ?
+WHERE (1 = 1)
+ AND (
+ id = ?
+ )
+```
+
+with the args:
+
+```
+["PrePopulatedDifferent", 5]
+```
+
+You can also take a look at the Loader [examples](./examples) for more examples on how to use the library for this
+approach.
+
+#### Using `OR` in the where clause
+
+If you would like to use `OR` in the where clause, you can apply the `patcher.WhereTyper` interface to your where
+struct. Please take a look at the [example here](./examples/where_type).
+
+### Joins
+
+To generate a join, you need to create a struct that represents the join. This struct should implement
+the [Joiner](./joiner.go) interface.
+
+Once you have the join struct, you can pass it to the `GenerateSQL` function using the `WithJoin` option. You can add as
+many of these as you would like.
+
+## Installation
+
+To install the Patcher library, use the following command:
+
+```sh
+go get github.com/jacobbrewer1/patcher
+```
+
+## Examples
+
+You can find examples of how to use this library in the [examples](./examples) directory.
+
+## Contributing
+
+We welcome contributions! Please follow these steps to contribute:
+
+1. Fork the repository.
+2. Create a new branch for your feature or bugfix.
+3. Write tests for your changes.
+4. Run the tests to ensure everything works.
+5. Submit a pull request.
+
+To run tests, use the following command:
+
+```sh
+go test ./...
+```
+
+## License
+
+This project is licensed under the MIT License - see the [LICENSE](./LICENSE) file for details.
diff --git a/vendor/github.com/jacobbrewer1/patcher/gen.go b/vendor/github.com/jacobbrewer1/patcher/gen.go
new file mode 100644
index 0000000..6242be4
--- /dev/null
+++ b/vendor/github.com/jacobbrewer1/patcher/gen.go
@@ -0,0 +1,3 @@
+package patcher
+
+//go:generate go run -mod=mod github.com/vektra/mockery/v2
diff --git a/vendor/github.com/jacobbrewer1/patcher/inserter/README.md b/vendor/github.com/jacobbrewer1/patcher/inserter/README.md
new file mode 100644
index 0000000..60e567c
--- /dev/null
+++ b/vendor/github.com/jacobbrewer1/patcher/inserter/README.md
@@ -0,0 +1,85 @@
+# Inserter Package
+
+The `inserter` package provides functionality to insert data into a database using Go. It is designed to be flexible and easy to use.
+
+## Installation
+
+To install the `inserter` package, use the following command:
+
+```sh
+go get github.com/jacobbrewer1/patcher/inserter
+```
+
+## Usage
+
+Here is an example of how to use the inserter package:
+
+```go
+package main
+
+import (
+ "fmt"
+
+ "github.com/jacobbrewer1/patcher/inserter"
+)
+
+type User struct {
+ ID int `db:"id,pk,autoinc"` // pk = primary key (This field will be ignored by default by the inserter package), autoinc = auto increment
+ Name string `db:"name"`
+ Email string `db:"email"`
+}
+
+func main() {
+ user := User{
+ Name: "John Doe",
+ Email: "john.doe@example.com",
+ }
+
+ sql, args, err := inserter.NewBatch([]any{user}, inserter.WithTable("users")).GenerateSQL()
+ if err != nil {
+ panic(err)
+ }
+
+ fmt.Println(sql)
+ fmt.Println(args)
+}
+
+```
+
+This will output the following:
+
+```SQL
+INSERT INTO users (id, name, email) VALUES (?, ?, ?)
+```
+
+with the following arguments:
+
+```
+[1, "John Doe", "john.doe@example.com"]
+```
+
+## Configuration Options
+
+### GenerateInsertSQL Options
+
+* `WithTable(tableName string)`: Specify the table name for the SQL query.
+
+## Contributing
+
+We welcome contributions! Please follow these steps to contribute:
+
+1. Fork the repository.
+2. Create a new branch for your feature or bugfix.
+3. Write tests for your changes.
+4. Run the tests to ensure everything works.
+5. Submit a pull request.
+
+To run tests, use the following command:
+
+```sh
+go test ./...
+```
+
+## License
+
+This project is licensed under the MIT License. See the [LICENSE](../LICENSE) file for details.
diff --git a/vendor/github.com/jacobbrewer1/patcher/inserter/batch.go b/vendor/github.com/jacobbrewer1/patcher/inserter/batch.go
new file mode 100644
index 0000000..51128c3
--- /dev/null
+++ b/vendor/github.com/jacobbrewer1/patcher/inserter/batch.go
@@ -0,0 +1,113 @@
+package inserter
+
+import (
+ "database/sql"
+ "errors"
+
+ "github.com/jacobbrewer1/patcher"
+)
+
+var (
+ // ErrNoDatabaseConnection is returned when no database connection is set
+ ErrNoDatabaseConnection = errors.New("no database connection set")
+
+ // ErrNoTable is returned when no table is set
+ ErrNoTable = errors.New("no table set")
+
+ // ErrNoFields is returned when no fields are set
+ ErrNoFields = errors.New("no fields set")
+
+ // ErrNoArgs is returned when no arguments are set
+ ErrNoArgs = errors.New("no arguments set")
+)
+
+type SQLBatch struct {
+ // fields is the fields to update in the SQL statement
+ fields []string
+
+ // args is the arguments to use in the SQL statement
+ args []any
+
+ // db is the database connection to use
+ db *sql.DB
+
+ // tagName is the tag name to look for in the struct. This is an override from the default tag "db"
+ tagName string
+
+ // table is the table name to use in the SQL statement
+ table string
+
+ // ignoreFields is a list of fields to ignore when patching
+ ignoreFields []string
+
+ // ignoreFieldsFunc is a function that determines whether a field should be ignored
+ //
+ // This func should return true is the field is to be ignored
+ ignoreFieldsFunc patcher.IgnoreFieldsFunc
+
+ // includePrimaryKey determines whether the primary key should be included in the insert
+ includePrimaryKey bool
+}
+
+// newBatchDefaults returns a new SQLBatch with default values
+func newBatchDefaults(opts ...BatchOpt) *SQLBatch {
+ b := &SQLBatch{
+ fields: make([]string, 0),
+ args: make([]any, 0),
+ db: nil,
+ tagName: patcher.DefaultDbTagName,
+ table: "",
+ includePrimaryKey: false,
+ }
+
+ for _, opt := range opts {
+ opt(b)
+ }
+
+ return b
+}
+
+func (b *SQLBatch) Fields() []string {
+ if len(b.fields) == 0 {
+ // Default behaviour to return nil if no fields are set
+ return nil
+ }
+ return b.fields
+}
+
+func (b *SQLBatch) Args() []any {
+ if len(b.args) == 0 {
+ // Default behaviour to return nil if no args are set
+ return nil
+ }
+ return b.args
+}
+
+func (b *SQLBatch) validateSQLGen() error {
+ if b.table == "" {
+ return ErrNoTable
+ }
+ if len(b.fields) == 0 {
+ return ErrNoFields
+ }
+ if len(b.args) == 0 {
+ return ErrNoArgs
+ }
+ return nil
+}
+
+func (b *SQLBatch) validateSQLInsert() error {
+ if b.db == nil {
+ return ErrNoDatabaseConnection
+ }
+ if b.table == "" {
+ return ErrNoTable
+ }
+ if len(b.fields) == 0 {
+ return ErrNoFields
+ }
+ if len(b.args) == 0 {
+ return ErrNoArgs
+ }
+ return nil
+}
diff --git a/vendor/github.com/jacobbrewer1/patcher/inserter/batch_opts.go b/vendor/github.com/jacobbrewer1/patcher/inserter/batch_opts.go
new file mode 100644
index 0000000..53984bd
--- /dev/null
+++ b/vendor/github.com/jacobbrewer1/patcher/inserter/batch_opts.go
@@ -0,0 +1,51 @@
+package inserter
+
+import (
+ "database/sql"
+
+ "github.com/jacobbrewer1/patcher"
+)
+
+type BatchOpt func(*SQLBatch)
+
+// WithTagName sets the tag name to look for in the struct. This is an override from the default tag "db"
+func WithTagName(tagName string) BatchOpt {
+ return func(b *SQLBatch) {
+ b.tagName = tagName
+ }
+}
+
+// WithTable sets the table name to use in the SQL statement
+func WithTable(table string) BatchOpt {
+ return func(b *SQLBatch) {
+ b.table = table
+ }
+}
+
+// WithDB sets the database connection to use
+func WithDB(db *sql.DB) BatchOpt {
+ return func(b *SQLBatch) {
+ b.db = db
+ }
+}
+
+// WithIgnoreFields sets the fields to ignore when patching
+func WithIgnoreFields(fields ...string) BatchOpt {
+ return func(b *SQLBatch) {
+ b.ignoreFields = fields
+ }
+}
+
+// WithIgnoreFieldsFunc sets the function that determines whether a field should be ignored
+func WithIgnoreFieldsFunc(f patcher.IgnoreFieldsFunc) BatchOpt {
+ return func(b *SQLBatch) {
+ b.ignoreFieldsFunc = f
+ }
+}
+
+// WithIncludePrimaryKey determines whether the primary key should be included in the insert
+func WithIncludePrimaryKey() BatchOpt {
+ return func(b *SQLBatch) {
+ b.includePrimaryKey = true
+ }
+}
diff --git a/vendor/github.com/jacobbrewer1/patcher/inserter/gen.go b/vendor/github.com/jacobbrewer1/patcher/inserter/gen.go
new file mode 100644
index 0000000..7705cc8
--- /dev/null
+++ b/vendor/github.com/jacobbrewer1/patcher/inserter/gen.go
@@ -0,0 +1,3 @@
+package inserter
+
+//go:generate go run -mod=mod github.com/vektra/mockery/v2
diff --git a/vendor/github.com/jacobbrewer1/patcher/inserter/mock_BatchOpt.go b/vendor/github.com/jacobbrewer1/patcher/inserter/mock_BatchOpt.go
new file mode 100644
index 0000000..20a9c45
--- /dev/null
+++ b/vendor/github.com/jacobbrewer1/patcher/inserter/mock_BatchOpt.go
@@ -0,0 +1,29 @@
+// Code generated by mockery. DO NOT EDIT.
+
+package inserter
+
+import mock "github.com/stretchr/testify/mock"
+
+// MockBatchOpt is an autogenerated mock type for the BatchOpt type
+type MockBatchOpt struct {
+ mock.Mock
+}
+
+// Execute provides a mock function with given fields: _a0
+func (_m *MockBatchOpt) Execute(_a0 *SQLBatch) {
+ _m.Called(_a0)
+}
+
+// NewMockBatchOpt creates a new instance of MockBatchOpt. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
+// The first argument is typically a *testing.T value.
+func NewMockBatchOpt(t interface {
+ mock.TestingT
+ Cleanup(func())
+}) *MockBatchOpt {
+ mock := &MockBatchOpt{}
+ mock.Mock.Test(t)
+
+ t.Cleanup(func() { mock.AssertExpectations(t) })
+
+ return mock
+}
diff --git a/vendor/github.com/jacobbrewer1/patcher/inserter/sql.go b/vendor/github.com/jacobbrewer1/patcher/inserter/sql.go
new file mode 100644
index 0000000..44b6960
--- /dev/null
+++ b/vendor/github.com/jacobbrewer1/patcher/inserter/sql.go
@@ -0,0 +1,197 @@
+package inserter
+
+import (
+ "database/sql"
+ "fmt"
+ "reflect"
+ "slices"
+ "strings"
+
+ "github.com/jacobbrewer1/patcher"
+)
+
+func NewBatch(resources []any, opts ...BatchOpt) *SQLBatch {
+ b := newBatchDefaults(opts...)
+
+ for _, opt := range opts {
+ opt(b)
+ }
+
+ b.genBatch(resources)
+
+ return b
+}
+
+func (b *SQLBatch) genBatch(resources []any) {
+ uniqueFields := make(map[string]struct{})
+
+ for _, r := range resources {
+ // get the type of the resource
+ t := reflect.TypeOf(r)
+ if t.Kind() == reflect.Ptr {
+ t = t.Elem()
+ }
+
+ // Is the type a struct?
+ if t.Kind() != reflect.Struct {
+ continue
+ }
+
+ // get the value of the resource
+ v := reflect.ValueOf(r)
+ if v.Kind() == reflect.Ptr {
+ v = v.Elem()
+ }
+
+ // get the fields
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ tag := f.Tag.Get(b.tagName)
+ if tag == patcher.TagOptSkip {
+ continue
+ }
+
+ tags := strings.Split(tag, patcher.TagOptSeparator)
+ if len(tags) > 1 {
+ tag = tags[0]
+ }
+
+ // Skip unexported fields
+ if !f.IsExported() {
+ continue
+ }
+
+ // Skip fields that are to be ignored
+ if b.checkSkipField(f) {
+ continue
+ }
+
+ patcherOptsTag := f.Tag.Get(patcher.TagOptsName)
+ if patcherOptsTag != "" {
+ patcherOpts := strings.Split(patcherOptsTag, patcher.TagOptSeparator)
+ if slices.Contains(patcherOpts, patcher.TagOptSkip) {
+ continue
+ }
+ }
+
+ // if no tag is set, use the field name
+ if tag == "" {
+ tag = f.Name
+ }
+
+ b.args = append(b.args, b.getFieldValue(v.Field(i), f))
+
+ // if the field is not unique, skip it
+ if _, ok := uniqueFields[tag]; ok {
+ continue
+ }
+
+ // add the field to the list
+ b.fields = append(b.fields, tag)
+ uniqueFields[tag] = struct{}{}
+ }
+ }
+}
+
+func (b *SQLBatch) getFieldValue(v reflect.Value, f reflect.StructField) any {
+ if f.Type.Kind() == reflect.Ptr {
+ if v.IsNil() {
+ return nil
+ }
+ return v.Elem().Interface()
+ }
+
+ return v.Interface()
+}
+
+func (b *SQLBatch) GenerateSQL() (string, []any, error) {
+ if err := b.validateSQLGen(); err != nil {
+ return "", nil, err
+ }
+
+ sqlBuilder := new(strings.Builder)
+
+ sqlBuilder.WriteString("INSERT INTO ")
+ sqlBuilder.WriteString(b.table)
+ sqlBuilder.WriteString(" (")
+ sqlBuilder.WriteString(strings.Join(b.fields, ", "))
+ sqlBuilder.WriteString(") VALUES ")
+
+ // We need to have the same number of "?" as fields and then repeat that for the number of resources
+ placeholder := strings.Repeat("?, ", len(b.fields))
+ placeholder = placeholder[:len(placeholder)-2] // Remove the trailing ", "
+ placeholder = "(" + placeholder + "), "
+
+ // Calculate the number of placeholders needed. Args divided by fields
+ n := len(b.args) / len(b.fields)
+
+ // Repeat the placeholder for the number of resources
+ placeholders := strings.Repeat(placeholder, n)
+ sqlBuilder.WriteString(placeholders[:len(placeholders)-2]) // Remove the trailing ", " and add the closing ")"
+
+ return sqlBuilder.String(), b.args, nil
+}
+
+func (b *SQLBatch) Perform() (sql.Result, error) {
+ if err := b.validateSQLInsert(); err != nil {
+ return nil, fmt.Errorf("validate SQL generation: %w", err)
+ }
+
+ sqlStr, args, err := b.GenerateSQL()
+ if err != nil {
+ return nil, fmt.Errorf("generate SQL: %w", err)
+ }
+
+ return b.db.Exec(sqlStr, args...)
+}
+
+func (b *SQLBatch) checkSkipField(field reflect.StructField) bool {
+ // The ignore fields tag takes precedence over the ignore fields list
+ if b.checkSkipTag(field) {
+ return true
+ }
+
+ // Check if the field is a primary key, we don't want to include the primary key in the insert unless specified
+ if b.checkPrimaryKey(field) {
+ return true
+ }
+
+ return b.ignoredFieldsCheck(field)
+}
+
+func (b *SQLBatch) checkSkipTag(field reflect.StructField) bool {
+ val, ok := field.Tag.Lookup(patcher.TagOptsName)
+ if !ok {
+ return false
+ }
+
+ tags := strings.Split(val, patcher.TagOptSeparator)
+ return slices.Contains(tags, patcher.TagOptSkip)
+}
+
+func (b *SQLBatch) checkPrimaryKey(field reflect.StructField) bool {
+ // If we are including the primary key, we can immediately return false
+ if b.includePrimaryKey {
+ return false
+ }
+
+ val, ok := field.Tag.Lookup(patcher.DefaultDbTagName)
+ if !ok {
+ return false
+ }
+
+ tags := strings.Split(val, patcher.TagOptSeparator)
+ return slices.Contains(tags, patcher.DBTagPrimaryKey)
+}
+
+func (b *SQLBatch) ignoredFieldsCheck(field reflect.StructField) bool {
+ return b.checkIgnoredFields(strings.ToLower(field.Name)) || b.checkIgnoreFunc(field)
+}
+
+func (b *SQLBatch) checkIgnoreFunc(field reflect.StructField) bool {
+ return b.ignoreFieldsFunc != nil && b.ignoreFieldsFunc(field)
+}
+
+func (b *SQLBatch) checkIgnoredFields(field string) bool {
+ return len(b.ignoreFields) > 0 && slices.Contains(b.ignoreFields, strings.ToLower(field))
+}
diff --git a/vendor/github.com/jacobbrewer1/patcher/joiner.go b/vendor/github.com/jacobbrewer1/patcher/joiner.go
new file mode 100644
index 0000000..c4fa5d5
--- /dev/null
+++ b/vendor/github.com/jacobbrewer1/patcher/joiner.go
@@ -0,0 +1,5 @@
+package patcher
+
+type Joiner interface {
+ Join() (string, []any)
+}
diff --git a/vendor/github.com/jacobbrewer1/patcher/loader.go b/vendor/github.com/jacobbrewer1/patcher/loader.go
new file mode 100644
index 0000000..55260c8
--- /dev/null
+++ b/vendor/github.com/jacobbrewer1/patcher/loader.go
@@ -0,0 +1,123 @@
+package patcher
+
+import (
+ "errors"
+ "reflect"
+ "slices"
+ "strings"
+)
+
+var (
+ // ErrInvalidType is returned when the provided type is not a pointer to a struct
+ ErrInvalidType = errors.New("invalid type: must pointer to struct")
+)
+
+// LoadDiff inserts the fields provided in the new struct pointer into the old struct pointer and injects the new
+// values into the old struct
+//
+// Note that it only pushes non-zero value updates, meaning you cannot set any field to zero, the empty string, etc.
+// This is configurable by setting the includeZeroValues option to true or for nil values by setting includeNilValues.
+// Please see the LoaderOption's for more configuration options.
+//
+// This can be if you are inserting a patch into an existing object but require a new object to be returned with
+// all fields
+func LoadDiff[T any](old *T, newT *T, opts ...PatchOpt) error {
+ return newPatchDefaults(opts...).loadDiff(old, newT)
+}
+
+func (s *SQLPatch) loadDiff(old, newT any) error {
+ if !isPointerToStruct(old) || !isPointerToStruct(newT) {
+ return ErrInvalidType
+ }
+
+ oElem := reflect.ValueOf(old).Elem()
+ nElem := reflect.ValueOf(newT).Elem()
+
+ for i := 0; i < oElem.NumField(); i++ {
+ oField := oElem.Field(i)
+ nField := nElem.Field(i)
+
+ // Include only exported fields
+ if !oField.CanSet() || !nField.CanSet() {
+ continue
+ }
+
+ // Handle embedded structs (Anonymous fields)
+ if oElem.Type().Field(i).Anonymous {
+ // If the embedded field is a pointer, dereference it
+ if oField.Kind() == reflect.Ptr {
+ if !oField.IsNil() && !nField.IsNil() { // If both are not nil, we need to recursively call LoadDiff
+ if err := s.loadDiff(oField.Interface(), nField.Interface()); err != nil {
+ return err
+ }
+ } else if nElem.Field(i).IsValid() && !nField.IsNil() {
+ oField.Set(nField)
+ }
+
+ continue
+ }
+
+ if err := s.loadDiff(oField.Addr().Interface(), nField.Addr().Interface()); err != nil {
+ return err
+ }
+ continue
+ }
+
+ // If the field is a struct, we need to recursively call LoadDiff
+ if oField.Kind() == reflect.Struct {
+ if err := s.loadDiff(oField.Addr().Interface(), nField.Addr().Interface()); err != nil {
+ return err
+ }
+ continue
+ }
+
+ // See if the field should be ignored.
+ if s.checkSkipField(oElem.Type().Field(i)) {
+ continue
+ }
+
+ patcherOptsTag := oElem.Type().Field(i).Tag.Get(TagOptsName)
+
+ // Compare the old and new fields.
+ //
+ // New fields take priority over old fields if they are provided based on the configuration.
+ if nElem.Field(i).Kind() != reflect.Ptr && (!nField.IsZero() || s.shouldIncludeZero(patcherOptsTag)) {
+ oElem.Field(i).Set(nElem.Field(i))
+ } else if nElem.Field(i).Kind() == reflect.Ptr && (!nField.IsNil() || s.shouldIncludeNil(patcherOptsTag)) {
+ oField.Set(nElem.Field(i))
+ }
+ }
+
+ return nil
+}
+
+func (s *SQLPatch) checkSkipField(field reflect.StructField) bool {
+ // The ignore fields tag takes precedence over the ignore fields list
+ if s.checkSkipTag(field) {
+ return true
+ }
+
+ return s.ignoredFieldsCheck(field)
+}
+
+func (s *SQLPatch) checkSkipTag(field reflect.StructField) bool {
+ val, ok := field.Tag.Lookup(TagOptsName)
+ if !ok {
+ return false
+ }
+
+ tags := strings.Split(val, TagOptSeparator)
+ return slices.Contains(tags, TagOptSkip)
+}
+
+func (s *SQLPatch) ignoredFieldsCheck(field reflect.StructField) bool {
+ return s.checkIgnoredFields(strings.ToLower(field.Name)) || s.checkIgnoreFunc(field)
+}
+
+func (s *SQLPatch) checkIgnoreFunc(field reflect.StructField) bool {
+ return s.ignoreFieldsFunc != nil && s.ignoreFieldsFunc(field)
+}
+
+func (s *SQLPatch) checkIgnoredFields(field string) bool {
+ return len(s.ignoreFields) > 0 && slices.Contains(s.ignoreFields, strings.ToLower(field))
+}
diff --git a/vendor/github.com/jacobbrewer1/patcher/mock_IgnoreFieldsFunc.go b/vendor/github.com/jacobbrewer1/patcher/mock_IgnoreFieldsFunc.go
new file mode 100644
index 0000000..7781a40
--- /dev/null
+++ b/vendor/github.com/jacobbrewer1/patcher/mock_IgnoreFieldsFunc.go
@@ -0,0 +1,46 @@
+// Code generated by mockery. DO NOT EDIT.
+
+package patcher
+
+import (
+ reflect "reflect"
+
+ mock "github.com/stretchr/testify/mock"
+)
+
+// MockIgnoreFieldsFunc is an autogenerated mock type for the IgnoreFieldsFunc type
+type MockIgnoreFieldsFunc struct {
+ mock.Mock
+}
+
+// Execute provides a mock function with given fields: field
+func (_m *MockIgnoreFieldsFunc) Execute(field reflect.StructField) bool {
+ ret := _m.Called(field)
+
+ if len(ret) == 0 {
+ panic("no return value specified for Execute")
+ }
+
+ var r0 bool
+ if rf, ok := ret.Get(0).(func(reflect.StructField) bool); ok {
+ r0 = rf(field)
+ } else {
+ r0 = ret.Get(0).(bool)
+ }
+
+ return r0
+}
+
+// NewMockIgnoreFieldsFunc creates a new instance of MockIgnoreFieldsFunc. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
+// The first argument is typically a *testing.T value.
+func NewMockIgnoreFieldsFunc(t interface {
+ mock.TestingT
+ Cleanup(func())
+}) *MockIgnoreFieldsFunc {
+ mock := &MockIgnoreFieldsFunc{}
+ mock.Mock.Test(t)
+
+ t.Cleanup(func() { mock.AssertExpectations(t) })
+
+ return mock
+}
diff --git a/vendor/github.com/jacobbrewer1/patcher/mock_Joiner.go b/vendor/github.com/jacobbrewer1/patcher/mock_Joiner.go
new file mode 100644
index 0000000..28c6b96
--- /dev/null
+++ b/vendor/github.com/jacobbrewer1/patcher/mock_Joiner.go
@@ -0,0 +1,54 @@
+// Code generated by mockery. DO NOT EDIT.
+
+package patcher
+
+import mock "github.com/stretchr/testify/mock"
+
+// MockJoiner is an autogenerated mock type for the Joiner type
+type MockJoiner struct {
+ mock.Mock
+}
+
+// Join provides a mock function with given fields:
+func (_m *MockJoiner) Join() (string, []interface{}) {
+ ret := _m.Called()
+
+ if len(ret) == 0 {
+ panic("no return value specified for Join")
+ }
+
+ var r0 string
+ var r1 []interface{}
+ if rf, ok := ret.Get(0).(func() (string, []interface{})); ok {
+ return rf()
+ }
+ if rf, ok := ret.Get(0).(func() string); ok {
+ r0 = rf()
+ } else {
+ r0 = ret.Get(0).(string)
+ }
+
+ if rf, ok := ret.Get(1).(func() []interface{}); ok {
+ r1 = rf()
+ } else {
+ if ret.Get(1) != nil {
+ r1 = ret.Get(1).([]interface{})
+ }
+ }
+
+ return r0, r1
+}
+
+// NewMockJoiner creates a new instance of MockJoiner. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
+// The first argument is typically a *testing.T value.
+func NewMockJoiner(t interface {
+ mock.TestingT
+ Cleanup(func())
+}) *MockJoiner {
+ mock := &MockJoiner{}
+ mock.Mock.Test(t)
+
+ t.Cleanup(func() { mock.AssertExpectations(t) })
+
+ return mock
+}
diff --git a/vendor/github.com/jacobbrewer1/patcher/mock_PatchOpt.go b/vendor/github.com/jacobbrewer1/patcher/mock_PatchOpt.go
new file mode 100644
index 0000000..d16a049
--- /dev/null
+++ b/vendor/github.com/jacobbrewer1/patcher/mock_PatchOpt.go
@@ -0,0 +1,29 @@
+// Code generated by mockery. DO NOT EDIT.
+
+package patcher
+
+import mock "github.com/stretchr/testify/mock"
+
+// MockPatchOpt is an autogenerated mock type for the PatchOpt type
+type MockPatchOpt struct {
+ mock.Mock
+}
+
+// Execute provides a mock function with given fields: _a0
+func (_m *MockPatchOpt) Execute(_a0 *SQLPatch) {
+ _m.Called(_a0)
+}
+
+// NewMockPatchOpt creates a new instance of MockPatchOpt. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
+// The first argument is typically a *testing.T value.
+func NewMockPatchOpt(t interface {
+ mock.TestingT
+ Cleanup(func())
+}) *MockPatchOpt {
+ mock := &MockPatchOpt{}
+ mock.Mock.Test(t)
+
+ t.Cleanup(func() { mock.AssertExpectations(t) })
+
+ return mock
+}
diff --git a/vendor/github.com/jacobbrewer1/patcher/mock_WhereTyper.go b/vendor/github.com/jacobbrewer1/patcher/mock_WhereTyper.go
new file mode 100644
index 0000000..9fb7052
--- /dev/null
+++ b/vendor/github.com/jacobbrewer1/patcher/mock_WhereTyper.go
@@ -0,0 +1,72 @@
+// Code generated by mockery. DO NOT EDIT.
+
+package patcher
+
+import mock "github.com/stretchr/testify/mock"
+
+// MockWhereTyper is an autogenerated mock type for the WhereTyper type
+type MockWhereTyper struct {
+ mock.Mock
+}
+
+// Where provides a mock function with given fields:
+func (_m *MockWhereTyper) Where() (string, []interface{}) {
+ ret := _m.Called()
+
+ if len(ret) == 0 {
+ panic("no return value specified for Where")
+ }
+
+ var r0 string
+ var r1 []interface{}
+ if rf, ok := ret.Get(0).(func() (string, []interface{})); ok {
+ return rf()
+ }
+ if rf, ok := ret.Get(0).(func() string); ok {
+ r0 = rf()
+ } else {
+ r0 = ret.Get(0).(string)
+ }
+
+ if rf, ok := ret.Get(1).(func() []interface{}); ok {
+ r1 = rf()
+ } else {
+ if ret.Get(1) != nil {
+ r1 = ret.Get(1).([]interface{})
+ }
+ }
+
+ return r0, r1
+}
+
+// WhereType provides a mock function with given fields:
+func (_m *MockWhereTyper) WhereType() WhereType {
+ ret := _m.Called()
+
+ if len(ret) == 0 {
+ panic("no return value specified for WhereType")
+ }
+
+ var r0 WhereType
+ if rf, ok := ret.Get(0).(func() WhereType); ok {
+ r0 = rf()
+ } else {
+ r0 = ret.Get(0).(WhereType)
+ }
+
+ return r0
+}
+
+// NewMockWhereTyper creates a new instance of MockWhereTyper. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
+// The first argument is typically a *testing.T value.
+func NewMockWhereTyper(t interface {
+ mock.TestingT
+ Cleanup(func())
+}) *MockWhereTyper {
+ mock := &MockWhereTyper{}
+ mock.Mock.Test(t)
+
+ t.Cleanup(func() { mock.AssertExpectations(t) })
+
+ return mock
+}
diff --git a/vendor/github.com/jacobbrewer1/patcher/mock_Wherer.go b/vendor/github.com/jacobbrewer1/patcher/mock_Wherer.go
new file mode 100644
index 0000000..597f481
--- /dev/null
+++ b/vendor/github.com/jacobbrewer1/patcher/mock_Wherer.go
@@ -0,0 +1,54 @@
+// Code generated by mockery. DO NOT EDIT.
+
+package patcher
+
+import mock "github.com/stretchr/testify/mock"
+
+// MockWherer is an autogenerated mock type for the Wherer type
+type MockWherer struct {
+ mock.Mock
+}
+
+// Where provides a mock function with given fields:
+func (_m *MockWherer) Where() (string, []interface{}) {
+ ret := _m.Called()
+
+ if len(ret) == 0 {
+ panic("no return value specified for Where")
+ }
+
+ var r0 string
+ var r1 []interface{}
+ if rf, ok := ret.Get(0).(func() (string, []interface{})); ok {
+ return rf()
+ }
+ if rf, ok := ret.Get(0).(func() string); ok {
+ r0 = rf()
+ } else {
+ r0 = ret.Get(0).(string)
+ }
+
+ if rf, ok := ret.Get(1).(func() []interface{}); ok {
+ r1 = rf()
+ } else {
+ if ret.Get(1) != nil {
+ r1 = ret.Get(1).([]interface{})
+ }
+ }
+
+ return r0, r1
+}
+
+// NewMockWherer creates a new instance of MockWherer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
+// The first argument is typically a *testing.T value.
+func NewMockWherer(t interface {
+ mock.TestingT
+ Cleanup(func())
+}) *MockWherer {
+ mock := &MockWherer{}
+ mock.Mock.Test(t)
+
+ t.Cleanup(func() { mock.AssertExpectations(t) })
+
+ return mock
+}
diff --git a/vendor/github.com/jacobbrewer1/patcher/patch.go b/vendor/github.com/jacobbrewer1/patcher/patch.go
new file mode 100644
index 0000000..5d6a87a
--- /dev/null
+++ b/vendor/github.com/jacobbrewer1/patcher/patch.go
@@ -0,0 +1,170 @@
+package patcher
+
+import (
+ "database/sql"
+ "errors"
+ "reflect"
+ "slices"
+ "strings"
+)
+
+var (
+ // ErrNoDatabaseConnection is returned when no database connection is set
+ ErrNoDatabaseConnection = errors.New("no database connection set")
+
+ // ErrNoTable is returned when no table is set
+ ErrNoTable = errors.New("no table set")
+
+ // ErrNoFields is returned when no fields are set
+ ErrNoFields = errors.New("no fields set")
+
+ // ErrNoArgs is returned when no arguments are set
+ ErrNoArgs = errors.New("no arguments set")
+
+ // ErrNoWhere is returned when no where clause is set
+ ErrNoWhere = errors.New("no where clause set")
+)
+
+type IgnoreFieldsFunc func(field reflect.StructField) bool
+
+type SQLPatch struct {
+ // fields is the fields to update in the SQL statement
+ fields []string
+
+ // args is the arguments to use in the SQL statement
+ args []any
+
+ // db is the database connection to use
+ db *sql.DB
+
+ // tagName is the tag name to look for in the struct. This is an override from the default tag "db"
+ tagName string
+
+ // table is the table name to use in the SQL statement
+ table string
+
+ // whereSql is the where clause to use in the SQL statement
+ whereSql *strings.Builder
+
+ // whereArgs is the arguments to use in the where clause
+ whereArgs []any
+
+ // joinSql is the join clause to use in the SQL statement
+ joinSql *strings.Builder
+
+ // joinArgs is the arguments to use in the join clause
+ joinArgs []any
+
+ // includeZeroValues determines whether zero values should be included in the patch
+ includeZeroValues bool
+
+ // includeNilValues determines whether nil values should be included in the patch
+ includeNilValues bool
+
+ // ignoreFields is a list of fields to ignore when patching
+ ignoreFields []string
+
+ // ignoreFieldsFunc is a function that determines whether a field should be ignored
+ //
+ // This func should return true is the field is to be ignored
+ ignoreFieldsFunc IgnoreFieldsFunc
+}
+
+// newPatchDefaults creates a new SQLPatch with default options.
+func newPatchDefaults(opts ...PatchOpt) *SQLPatch {
+ // Default options
+ p := &SQLPatch{
+ fields: make([]string, 0),
+ args: make([]any, 0),
+ db: nil,
+ tagName: DefaultDbTagName,
+ table: "",
+ whereSql: new(strings.Builder),
+ whereArgs: nil,
+ joinSql: new(strings.Builder),
+ joinArgs: nil,
+ includeZeroValues: false,
+ includeNilValues: false,
+ ignoreFields: nil,
+ ignoreFieldsFunc: nil,
+ }
+
+ for _, opt := range opts {
+ opt(p)
+ }
+
+ return p
+}
+
+func (s *SQLPatch) Fields() []string {
+ if len(s.fields) == 0 {
+ // Default behaviour is to return nil if there are no fields
+ return nil
+ }
+ return s.fields
+}
+
+func (s *SQLPatch) Args() []any {
+ if len(s.args) == 0 {
+ // Default behaviour is to return nil if there are no args
+ return nil
+ }
+ return s.args
+}
+
+func (s *SQLPatch) validatePerformPatch() error {
+ if s.db == nil {
+ return ErrNoDatabaseConnection
+ } else if s.table == "" {
+ return ErrNoTable
+ } else if len(s.fields) == 0 {
+ return ErrNoFields
+ } else if len(s.args) == 0 {
+ return ErrNoArgs
+ } else if s.whereSql.String() == "" {
+ return ErrNoWhere
+ }
+
+ return nil
+}
+
+func (s *SQLPatch) validateSQLGen() error {
+ if s.table == "" {
+ return ErrNoTable
+ } else if len(s.fields) == 0 {
+ return ErrNoFields
+ } else if len(s.args) == 0 {
+ return ErrNoArgs
+ } else if s.whereSql.String() == "" {
+ return ErrNoWhere
+ }
+
+ return nil
+}
+
+func (s *SQLPatch) shouldIncludeNil(tag string) bool {
+ if s.includeNilValues {
+ return true
+ }
+
+ return s.shouldOmitEmpty(tag)
+}
+
+func (s *SQLPatch) shouldIncludeZero(tag string) bool {
+ if s.includeZeroValues {
+ return true
+ }
+
+ return s.shouldOmitEmpty(tag)
+}
+
+func (s *SQLPatch) shouldOmitEmpty(tag string) bool {
+ if tag != "" {
+ tags := strings.Split(tag, TagOptSeparator)
+ if slices.Contains(tags, TagOptOmitempty) {
+ return true
+ }
+ }
+
+ return false
+}
diff --git a/vendor/github.com/jacobbrewer1/patcher/patch_opts.go b/vendor/github.com/jacobbrewer1/patcher/patch_opts.go
new file mode 100644
index 0000000..67b9e9c
--- /dev/null
+++ b/vendor/github.com/jacobbrewer1/patcher/patch_opts.go
@@ -0,0 +1,115 @@
+package patcher
+
+import (
+ "database/sql"
+ "strings"
+)
+
+const (
+ TagOptsName = "patcher"
+ TagOptSeparator = ","
+ TagOptSkip = "-"
+ TagOptOmitempty = "omitempty"
+)
+
+type PatchOpt func(*SQLPatch)
+
+// WithTagName sets the tag name to look for in the struct. This is an override from the default tag "db"
+func WithTagName(tagName string) PatchOpt {
+ return func(s *SQLPatch) {
+ s.tagName = tagName
+ }
+}
+
+// WithTable sets the table name to use in the SQL statement
+func WithTable(table string) PatchOpt {
+ return func(s *SQLPatch) {
+ s.table = table
+ }
+}
+
+// WithWhere sets the where clause to use in the SQL statement
+func WithWhere(where Wherer) PatchOpt {
+ return func(s *SQLPatch) {
+ if s.whereSql == nil {
+ s.whereSql = new(strings.Builder)
+ }
+ fwSQL, fwArgs := where.Where()
+ if fwArgs == nil {
+ fwArgs = make([]any, 0)
+ }
+ wtStr := WhereTypeAnd // default to AND
+ wt, ok := where.(WhereTyper)
+ if ok && wt.WhereType().IsValid() {
+ wtStr = wt.WhereType()
+ }
+ s.whereSql.WriteString(string(wtStr) + " ")
+ s.whereSql.WriteString(strings.TrimSpace(fwSQL))
+ s.whereSql.WriteString("\n")
+ s.whereArgs = append(s.whereArgs, fwArgs...)
+ }
+}
+
+// WithJoin sets the join clause to use in the SQL statement
+func WithJoin(join Joiner) PatchOpt {
+ return func(s *SQLPatch) {
+ if s.joinSql == nil {
+ s.joinSql = new(strings.Builder)
+ }
+ fjSQL, fjArgs := join.Join()
+ if fjArgs == nil {
+ fjArgs = make([]any, 0)
+ }
+ s.joinSql.WriteString(strings.TrimSpace(fjSQL))
+ s.joinSql.WriteString("\n")
+ s.joinArgs = append(s.joinArgs, fjArgs...)
+ }
+}
+
+// WithDB sets the database connection to use
+func WithDB(db *sql.DB) PatchOpt {
+ return func(s *SQLPatch) {
+ s.db = db
+ }
+}
+
+// WithIncludeZeroValues sets whether zero values should be included in the patch.
+//
+// This is useful when you want to set a field to zero.
+func WithIncludeZeroValues() PatchOpt {
+ return func(s *SQLPatch) {
+ s.includeZeroValues = true
+ }
+}
+
+// WithIncludeNilValues sets whether nil values should be included in the patch.
+//
+// This is useful when you want to set a field to nil.
+func WithIncludeNilValues() PatchOpt {
+ return func(s *SQLPatch) {
+ s.includeNilValues = true
+ }
+}
+
+// WithIgnoredFields sets the fields to ignore when patching.
+//
+// This should be the actual field name, not the JSON tag name or the db tag name.
+//
+// Note. When we parse the slice of strings, we convert them to lowercase to ensure that the comparison is
+// case-insensitive.
+func WithIgnoredFields(fields ...string) PatchOpt {
+ return func(s *SQLPatch) {
+ for i := range fields {
+ fields[i] = strings.ToLower(fields[i])
+ }
+
+ s.ignoreFields = fields
+ }
+}
+
+// WithIgnoredFieldsFunc sets a function that determines whether a field should be ignored when patching.
+func WithIgnoredFieldsFunc(f IgnoreFieldsFunc) PatchOpt {
+ return func(s *SQLPatch) {
+ s.ignoreFieldsFunc = f
+ }
+}
diff --git a/vendor/github.com/jacobbrewer1/patcher/sql.go b/vendor/github.com/jacobbrewer1/patcher/sql.go
new file mode 100644
index 0000000..ca306be
--- /dev/null
+++ b/vendor/github.com/jacobbrewer1/patcher/sql.go
@@ -0,0 +1,246 @@
+package patcher
+
+import (
+ "database/sql"
+ "errors"
+ "fmt"
+ "reflect"
+ "slices"
+ "strings"
+)
+
+const (
+ DefaultDbTagName = "db"
+ DBTagPrimaryKey = "pk"
+)
+
+var (
+ // ErrNoChanges is returned when no changes are detected between the old and new objects
+ ErrNoChanges = errors.New("no changes detected between the old and new objects")
+)
+
+func NewSQLPatch(resource any, opts ...PatchOpt) *SQLPatch {
+ sqlPatch := newPatchDefaults(opts...)
+ sqlPatch.patchGen(resource)
+ return sqlPatch
+}
+
+func (s *SQLPatch) patchGen(resource any) {
+ // If the resource is a pointer, we need to dereference it to get the value
+ if reflect.TypeOf(resource).Kind() == reflect.Ptr {
+ resource = reflect.ValueOf(resource).Elem().Interface()
+ }
+
+ // Ensure that the resource is a struct
+ if reflect.TypeOf(resource).Kind() != reflect.Struct {
+ // This is intentionally a panic as this is a programming error and should be fixed by the developer
+ panic("resource is not a struct")
+ }
+
+ rType := reflect.TypeOf(resource)
+ rVal := reflect.ValueOf(resource)
+ n := rType.NumField()
+
+ s.fields = make([]string, 0, n)
+ s.args = make([]any, 0, n)
+
+ for i := 0; i < n; i++ {
+ fType := rType.Field(i)
+ fVal := rVal.Field(i)
+ tag := fType.Tag.Get(s.tagName)
+
+ // Skip unexported fields
+ if !fType.IsExported() {
+ continue
+ }
+
+ tags := strings.Split(tag, TagOptSeparator)
+ if len(tags) > 1 {
+ tag = tags[0]
+ }
+
+ patcherOptsTag := fType.Tag.Get(TagOptsName)
+
+ // Skip fields that are to be ignored
+ if s.checkSkipField(fType) {
+ continue
+ } else if fVal.Kind() == reflect.Ptr && (fVal.IsNil() && !s.shouldIncludeNil(patcherOptsTag)) {
+ continue
+ } else if fVal.Kind() != reflect.Ptr && (fVal.IsZero() && !s.shouldIncludeZero(patcherOptsTag)) {
+ continue
+ }
+
+ if patcherOptsTag != "" {
+ patcherOpts := strings.Split(patcherOptsTag, TagOptSeparator)
+ if slices.Contains(patcherOpts, TagOptSkip) {
+ continue
+ }
+ }
+
+ // If no tag is set, use the field name
+ if tag == "" {
+ tag = fType.Name
+ }
+
+ addField := func() {
+ s.fields = append(s.fields, tag+" = ?")
+ }
+
+ if fVal.Kind() == reflect.Ptr && fVal.IsNil() && s.shouldIncludeNil(patcherOptsTag) {
+ s.args = append(s.args, nil)
+ addField()
+ continue
+ } else if fVal.Kind() == reflect.Ptr && fVal.IsNil() {
+ continue
+ }
+
+ addField()
+
+ var val reflect.Value
+ if fVal.Kind() == reflect.Ptr {
+ val = fVal.Elem()
+ } else {
+ val = fVal
+ }
+
+ switch val.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ s.args = append(s.args, val.Int())
+ case reflect.String:
+ s.args = append(s.args, val.String())
+ case reflect.Bool:
+ boolArg := 0
+ if val.Bool() {
+ boolArg = 1
+ }
+ s.args = append(s.args, boolArg)
+ case reflect.Float32, reflect.Float64:
+ s.args = append(s.args, val.Float())
+ default:
+ // This is intentionally a panic as this is a programming error and should be fixed by the developer
+ panic(fmt.Sprintf("unsupported type: %s", val.Kind()))
+ }
+ }
+}
+
+func GenerateSQL(resource any, opts ...PatchOpt) (string, []any, error) {
+ sqlPatch := NewSQLPatch(resource, opts...)
+ return sqlPatch.GenerateSQL()
+}
+
+func (s *SQLPatch) GenerateSQL() (string, []any, error) {
+ if err := s.validateSQLGen(); err != nil {
+ return "", nil, fmt.Errorf("validate perform patch: %w", err)
+ }
+
+ sqlBuilder := new(strings.Builder)
+ sqlBuilder.WriteString("UPDATE ")
+ sqlBuilder.WriteString(s.table)
+ sqlBuilder.WriteString("\n")
+
+ if s.joinSql.String() != "" {
+ sqlBuilder.WriteString(s.joinSql.String())
+ }
+
+ sqlBuilder.WriteString("SET ")
+ sqlBuilder.WriteString(strings.Join(s.fields, ", "))
+ sqlBuilder.WriteString("\n")
+
+ sqlBuilder.WriteString("WHERE (1=1)\n")
+ sqlBuilder.WriteString("AND (\n")
+
+ // If the where clause starts with "AND" or "OR", we need to remove it
+ where := s.whereSql.String()
+ if strings.HasPrefix(where, string(WhereTypeAnd)) || strings.HasPrefix(where, string(WhereTypeOr)) {
+ where = strings.TrimPrefix(where, string(WhereTypeAnd))
+ where = strings.TrimPrefix(where, string(WhereTypeOr))
+ where = strings.TrimSpace(where)
+ }
+
+ sqlBuilder.WriteString(strings.TrimSpace(where) + "\n")
+ sqlBuilder.WriteString(")")
+
+ args := append(s.joinArgs, s.args...)
+ args = append(args, s.whereArgs...)
+
+ return sqlBuilder.String(), args, nil
+}
+
+func PerformPatch(resource any, opts ...PatchOpt) (sql.Result, error) {
+ sqlPatch := NewSQLPatch(resource, opts...)
+ return sqlPatch.PerformPatch()
+}
+
+func PerformDiffPatch[T any](old, newT *T, opts ...PatchOpt) (sql.Result, error) {
+ sqlPatch, err := NewDiffSQLPatch(old, newT, opts...)
+ if err != nil {
+ return nil, fmt.Errorf("new diff sql patch: %w", err)
+ }
+
+ return sqlPatch.PerformPatch()
+}
+
+func (s *SQLPatch) PerformPatch() (sql.Result, error) {
+ if err := s.validatePerformPatch(); err != nil {
+ return nil, fmt.Errorf("validate perform patch: %w", err)
+ }
+
+ sqlStr, args, err := s.GenerateSQL()
+ if err != nil {
+ return nil, fmt.Errorf("generate SQL: %w", err)
+ }
+
+ return s.db.Exec(sqlStr, args...)
+}
+
+func NewDiffSQLPatch[T any](old, newT *T, opts ...PatchOpt) (*SQLPatch, error) {
+ if !isPointerToStruct(old) || !isPointerToStruct(newT) {
+ return nil, ErrInvalidType
+ }
+
+ // Take a copy of the old object
+ oldCopy := reflect.New(reflect.TypeOf(old).Elem()).Interface()
+
+ // copy the old object into the copy
+ reflect.ValueOf(oldCopy).Elem().Set(reflect.ValueOf(old).Elem())
+
+ patch := newPatchDefaults(opts...)
+ if err := patch.loadDiff(old, newT); err != nil {
+ return nil, fmt.Errorf("load diff: %w", err)
+ }
+
+ // Are the old and new objects the same?
+ if reflect.DeepEqual(old, oldCopy) {
+ return nil, ErrNoChanges
+ }
+
+ oldElem := reflect.ValueOf(old).Elem()
+ oldCopyElem := reflect.ValueOf(oldCopy).Elem()
+
+ // For each field in the old object, compare it against the copy and if the fields are the same, set them to zero or nil.
+ for i := 0; i < reflect.ValueOf(old).Elem().NumField(); i++ {
+ oldField := oldElem.Field(i)
+ copyField := oldCopyElem.Field(i)
+
+ patcherOptsTag := oldElem.Type().Field(i).Tag.Get(TagOptsName)
+
+ if oldField.Kind() == reflect.Ptr && (oldField.IsNil() && copyField.IsNil() && !patch.shouldIncludeNil(patcherOptsTag)) {
+ continue
+ } else if oldField.Kind() != reflect.Ptr && (oldField.IsZero() && copyField.IsZero() && !patch.shouldIncludeZero(patcherOptsTag)) {
+ continue
+ }
+
+ if reflect.DeepEqual(oldField.Interface(), copyField.Interface()) {
+ // Field is the same, set it to zero or nil. Add it to be ignored in the patch
+ if patch.ignoreFields == nil {
+ patch.ignoreFields = make([]string, 0)
+ }
+ patch.ignoreFields = append(patch.ignoreFields, strings.ToLower(oldElem.Type().Field(i).Name))
+ continue
+ }
+ }
+
+ patch.patchGen(old)
+
+ return patch, nil
+}
diff --git a/vendor/github.com/jacobbrewer1/patcher/utils.go b/vendor/github.com/jacobbrewer1/patcher/utils.go
new file mode 100644
index 0000000..df9f6be
--- /dev/null
+++ b/vendor/github.com/jacobbrewer1/patcher/utils.go
@@ -0,0 +1,39 @@
+package patcher
+
+import (
+ "errors"
+ "reflect"
+)
+
+// ptr returns a pointer to the value passed in.
+func ptr[T any](v T) *T {
+ return &v
+}
+
+func isPointerToStruct[T any](t T) bool {
+ rv := reflect.ValueOf(t)
+ if rv.Kind() != reflect.Ptr || rv.IsNil() {
+ return false
+ }
+
+ return rv.Elem().Kind() == reflect.Struct
+}
+
+// IgnoreNoChangesErr ignores the ErrNoChanges error. This is useful when you want to ignore the error when no changes
+// were made. Please ensure that you are still handling the errors as needed. We will return a "nil" patch when there
+// are no changes as the ErrNoChanges error is returned.
+//
+// Example:
+//
+// err := report.Patch(db, newReport)
+// if patcher.IgnoreNoChangesErr(err) != nil {
+// return err
+// }
+func IgnoreNoChangesErr(err error) error {
+ switch {
+ case errors.Is(err, ErrNoChanges):
+ return nil
+ default:
+ return err
+ }
+}
diff --git a/vendor/github.com/jacobbrewer1/patcher/wherer.go b/vendor/github.com/jacobbrewer1/patcher/wherer.go
new file mode 100644
index 0000000..051c164
--- /dev/null
+++ b/vendor/github.com/jacobbrewer1/patcher/wherer.go
@@ -0,0 +1,30 @@
+package patcher
+
+// Wherer is an interface that can be used to specify the WHERE clause to use. By using this interface,
+// the package will default to using an "AND" WHERE clause. If you want to use an "OR" WHERE clause, you can
+// use the WhereTyper interface instead.
+type Wherer interface {
+ Where() (string, []any)
+}
+
+// WhereTyper is an interface that can be used to specify the type of WHERE clause to use. By using this
+// interface, you can specify whether to use an "AND" or "OR" WHERE clause.
+type WhereTyper interface {
+ Wherer
+ WhereType() WhereType
+}
+
+type WhereType string
+
+const (
+ WhereTypeAnd WhereType = "AND"
+ WhereTypeOr WhereType = "OR"
+)
+
+func (w WhereType) IsValid() bool {
+ switch w {
+ case WhereTypeAnd, WhereTypeOr:
+ return true
+ }
+ return false
+}
diff --git a/vendor/github.com/lithammer/fuzzysearch/LICENSE b/vendor/github.com/lithammer/fuzzysearch/LICENSE
new file mode 100644
index 0000000..dee3d1d
--- /dev/null
+++ b/vendor/github.com/lithammer/fuzzysearch/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2018 Peter Lithammer
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/lithammer/fuzzysearch/fuzzy/fuzzy.go b/vendor/github.com/lithammer/fuzzysearch/fuzzy/fuzzy.go
new file mode 100644
index 0000000..8890877
--- /dev/null
+++ b/vendor/github.com/lithammer/fuzzysearch/fuzzy/fuzzy.go
@@ -0,0 +1,292 @@
+// Fuzzy searching allows for flexibly matching a string with partial input,
+// useful for filtering data very quickly based on lightweight user input.
+package fuzzy
+
+import (
+ "unicode"
+ "unicode/utf8"
+
+ "golang.org/x/text/runes"
+ "golang.org/x/text/transform"
+ "golang.org/x/text/unicode/norm"
+)
+
+func noopTransformer() transform.Transformer {
+ return nopTransformer{}
+}
+
+func foldTransformer() transform.Transformer {
+ return unicodeFoldTransformer{}
+}
+
+func normalizeTransformer() transform.Transformer {
+ return transform.Chain(norm.NFD, runes.Remove(runes.In(unicode.Mn)), norm.NFC)
+}
+
+func normalizedFoldTransformer() transform.Transformer {
+ return transform.Chain(normalizeTransformer(), foldTransformer())
+}
+
+// Match returns true if source matches target using a fuzzy-searching
+// algorithm. Note that it doesn't implement Levenshtein distance (see
+// RankMatch instead), but rather a simplified version where there's no
+// approximation. The method will return true only if each character in the
+// source can be found in the target and occurs after the preceding matches.
+func Match(source, target string) bool {
+ return match(source, target, noopTransformer())
+}
+
+// MatchFold is a case-insensitive version of Match.
+func MatchFold(source, target string) bool {
+ return match(source, target, foldTransformer())
+}
+
+// MatchNormalized is a unicode-normalized version of Match.
+func MatchNormalized(source, target string) bool {
+ return match(source, target, normalizeTransformer())
+}
+
+// MatchNormalizedFold is a unicode-normalized and case-insensitive version of Match.
+func MatchNormalizedFold(source, target string) bool {
+ return match(source, target, normalizedFoldTransformer())
+}
+
+func match(source, target string, transformer transform.Transformer) bool {
+ sourceT := stringTransform(source, transformer)
+ targetT := stringTransform(target, transformer)
+ return matchTransformed(sourceT, targetT)
+}
+
+func matchTransformed(source, target string) bool {
+ lenDiff := len(target) - len(source)
+
+ if lenDiff < 0 {
+ return false
+ }
+
+ if lenDiff == 0 && source == target {
+ return true
+ }
+
+Outer:
+ for _, r1 := range source {
+ for i, r2 := range target {
+ if r1 == r2 {
+ target = target[i+utf8.RuneLen(r2):]
+ continue Outer
+ }
+ }
+ return false
+ }
+
+ return true
+}
+
+// Find will return a list of strings in targets that fuzzy matches source.
+func Find(source string, targets []string) []string {
+ return find(source, targets, noopTransformer())
+}
+
+// FindFold is a case-insensitive version of Find.
+func FindFold(source string, targets []string) []string {
+ return find(source, targets, foldTransformer())
+}
+
+// FindNormalized is a unicode-normalized version of Find.
+func FindNormalized(source string, targets []string) []string {
+ return find(source, targets, normalizeTransformer())
+}
+
+// FindNormalizedFold is a unicode-normalized and case-insensitive version of Find.
+func FindNormalizedFold(source string, targets []string) []string {
+ return find(source, targets, normalizedFoldTransformer())
+}
+
+func find(source string, targets []string, transformer transform.Transformer) []string {
+ sourceT := stringTransform(source, transformer)
+
+ var matches []string
+
+ for _, target := range targets {
+ targetT := stringTransform(target, transformer)
+ if matchTransformed(sourceT, targetT) {
+ matches = append(matches, target)
+ }
+ }
+
+ return matches
+}
+
+// RankMatch is similar to Match except it will measure the Levenshtein
+// distance between the source and the target and return its result. If there
+// was no match, it will return -1.
+// Given the requirements of match, RankMatch only needs to perform a subset of
+// the Levenshtein calculation, only deletions need be considered, required
+// additions and substitutions would fail the match test.
+func RankMatch(source, target string) int {
+ return rank(source, target, noopTransformer())
+}
+
+// RankMatchFold is a case-insensitive version of RankMatch.
+func RankMatchFold(source, target string) int {
+ return rank(source, target, foldTransformer())
+}
+
+// RankMatchNormalized is a unicode-normalized version of RankMatch.
+func RankMatchNormalized(source, target string) int {
+ return rank(source, target, normalizeTransformer())
+}
+
+// RankMatchNormalizedFold is a unicode-normalized and case-insensitive version of RankMatch.
+func RankMatchNormalizedFold(source, target string) int {
+ return rank(source, target, normalizedFoldTransformer())
+}
+
+func rank(source, target string, transformer transform.Transformer) int {
+ lenDiff := len(target) - len(source)
+
+ if lenDiff < 0 {
+ return -1
+ }
+
+ source = stringTransform(source, transformer)
+ target = stringTransform(target, transformer)
+
+ if lenDiff == 0 && source == target {
+ return 0
+ }
+
+ runeDiff := 0
+
+Outer:
+ for _, r1 := range source {
+ for i, r2 := range target {
+ if r1 == r2 {
+ target = target[i+utf8.RuneLen(r2):]
+ continue Outer
+ } else {
+ runeDiff++
+ }
+ }
+ return -1
+ }
+
+ // Count up remaining char
+ runeDiff += utf8.RuneCountInString(target)
+
+ return runeDiff
+}
+
+// RankFind is similar to Find, except it will also rank all matches using
+// Levenshtein distance.
+func RankFind(source string, targets []string) Ranks {
+ return rankFind(source, targets, noopTransformer())
+}
+
+// RankFindFold is a case-insensitive version of RankFind.
+func RankFindFold(source string, targets []string) Ranks {
+ return rankFind(source, targets, foldTransformer())
+}
+
+// RankFindNormalized is a unicode-normalized version of RankFind.
+func RankFindNormalized(source string, targets []string) Ranks {
+ return rankFind(source, targets, normalizeTransformer())
+}
+
+// RankFindNormalizedFold is a unicode-normalized and case-insensitive version of RankFind.
+func RankFindNormalizedFold(source string, targets []string) Ranks {
+ return rankFind(source, targets, normalizedFoldTransformer())
+}
+
+func rankFind(source string, targets []string, transformer transform.Transformer) Ranks {
+ sourceT := stringTransform(source, transformer)
+
+ var r Ranks
+
+ for index, target := range targets {
+ targetT := stringTransform(target, transformer)
+ if matchTransformed(sourceT, targetT) {
+ distance := LevenshteinDistance(source, target)
+ r = append(r, Rank{source, target, distance, index})
+ }
+ }
+ return r
+}
+
+type Rank struct {
+ // Source is used as the source for matching.
+ Source string
+
+ // Target is the word matched against.
+ Target string
+
+ // Distance is the Levenshtein distance between Source and Target.
+ Distance int
+
+ // Location of Target in original list
+ OriginalIndex int
+}
+
+type Ranks []Rank
+
+func (r Ranks) Len() int {
+ return len(r)
+}
+
+func (r Ranks) Swap(i, j int) {
+ r[i], r[j] = r[j], r[i]
+}
+
+func (r Ranks) Less(i, j int) bool {
+ return r[i].Distance < r[j].Distance
+}
+
+func stringTransform(s string, t transform.Transformer) (transformed string) {
+ // Fast path for the nop transformer to prevent unnecessary allocations.
+ if _, ok := t.(nopTransformer); ok {
+ return s
+ }
+
+ var err error
+ transformed, _, err = transform.String(t, s)
+ if err != nil {
+ transformed = s
+ }
+
+ return
+}
+
+type unicodeFoldTransformer struct{ transform.NopResetter }
+
+func (unicodeFoldTransformer) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
+ // Converting src to a string allocates.
+ // In theory, it need not; see https://go.dev/issue/27148.
+ // It is possible to write this loop using utf8.DecodeRune
+ // and thereby avoid allocations, but it is noticeably slower.
+ // So just let's wait for the compiler to get smarter.
+ for _, r := range string(src) {
+ if r == utf8.RuneError {
+ // Go spec for ranging over a string says:
+ // If the iteration encounters an invalid UTF-8 sequence,
+ // the second value will be 0xFFFD, the Unicode replacement character,
+ // and the next iteration will advance a single byte in the string.
+ nSrc++
+ } else {
+ nSrc += utf8.RuneLen(r)
+ }
+ r = unicode.ToLower(r)
+ x := utf8.RuneLen(r)
+ if x > len(dst[nDst:]) {
+ err = transform.ErrShortDst
+ break
+ }
+ nDst += utf8.EncodeRune(dst[nDst:], r)
+ }
+ return nDst, nSrc, err
+}
+
+type nopTransformer struct{ transform.NopResetter }
+
+func (nopTransformer) Transform(dst []byte, src []byte, atEOF bool) (int, int, error) {
+ return 0, len(src), nil
+}
diff --git a/vendor/github.com/lithammer/fuzzysearch/fuzzy/levenshtein.go b/vendor/github.com/lithammer/fuzzysearch/fuzzy/levenshtein.go
new file mode 100644
index 0000000..c0fc191
--- /dev/null
+++ b/vendor/github.com/lithammer/fuzzysearch/fuzzy/levenshtein.go
@@ -0,0 +1,45 @@
+package fuzzy
+
+// LevenshteinDistance measures the difference between two strings.
+// The Levenshtein distance between two words is the minimum number of
+// single-character edits (i.e. insertions, deletions or substitutions)
+// required to change one word into the other.
+//
+// This implemention is optimized to use O(min(m,n)) space and is based on the
+// optimized C version found here:
+// http://en.wikibooks.org/wiki/Algorithm_implementation/Strings/Levenshtein_distance#C
+func LevenshteinDistance(s, t string) int {
+ r1, r2 := []rune(s), []rune(t)
+ column := make([]int, 1, 64)
+
+ for y := 1; y <= len(r1); y++ {
+ column = append(column, y)
+ }
+
+ for x := 1; x <= len(r2); x++ {
+ column[0] = x
+
+ for y, lastDiag := 1, x-1; y <= len(r1); y++ {
+ oldDiag := column[y]
+ cost := 0
+ if r1[y-1] != r2[x-1] {
+ cost = 1
+ }
+ column[y] = min(column[y]+1, column[y-1]+1, lastDiag+cost)
+ lastDiag = oldDiag
+ }
+ }
+
+ return column[len(r1)]
+}
+
+func min2(a, b int) int {
+ if a < b {
+ return a
+ }
+ return b
+}
+
+func min(a, b, c int) int {
+ return min2(min2(a, b), c)
+}
diff --git a/vendor/github.com/mattn/go-runewidth/LICENSE b/vendor/github.com/mattn/go-runewidth/LICENSE
new file mode 100644
index 0000000..91b5cef
--- /dev/null
+++ b/vendor/github.com/mattn/go-runewidth/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2016 Yasuhiro Matsumoto
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/mattn/go-runewidth/README.md b/vendor/github.com/mattn/go-runewidth/README.md
new file mode 100644
index 0000000..5e2cfd9
--- /dev/null
+++ b/vendor/github.com/mattn/go-runewidth/README.md
@@ -0,0 +1,27 @@
+go-runewidth
+============
+
+[](https://github.com/mattn/go-runewidth/actions?query=workflow%3Atest)
+[](https://codecov.io/gh/mattn/go-runewidth)
+[](http://godoc.org/github.com/mattn/go-runewidth)
+[](https://goreportcard.com/report/github.com/mattn/go-runewidth)
+
+Provides functions to get fixed width of the character or string.
+
+Usage
+-----
+
+```go
+runewidth.StringWidth("つのだ☆HIRO") == 12
+```
+
+
+Author
+------
+
+Yasuhiro Matsumoto
+
+License
+-------
+
+under the MIT License: http://mattn.mit-license.org/2013
diff --git a/vendor/github.com/mattn/go-runewidth/runewidth.go b/vendor/github.com/mattn/go-runewidth/runewidth.go
new file mode 100644
index 0000000..7dfbb3b
--- /dev/null
+++ b/vendor/github.com/mattn/go-runewidth/runewidth.go
@@ -0,0 +1,358 @@
+package runewidth
+
+import (
+ "os"
+ "strings"
+
+ "github.com/rivo/uniseg"
+)
+
+//go:generate go run script/generate.go
+
+var (
+ // EastAsianWidth will be set true if the current locale is CJK
+ EastAsianWidth bool
+
+ // StrictEmojiNeutral should be set false if handle broken fonts
+ StrictEmojiNeutral bool = true
+
+ // DefaultCondition is a condition in current locale
+ DefaultCondition = &Condition{
+ EastAsianWidth: false,
+ StrictEmojiNeutral: true,
+ }
+)
+
+func init() {
+ handleEnv()
+}
+
+func handleEnv() {
+ env := os.Getenv("RUNEWIDTH_EASTASIAN")
+ if env == "" {
+ EastAsianWidth = IsEastAsian()
+ } else {
+ EastAsianWidth = env == "1"
+ }
+ // update DefaultCondition
+ if DefaultCondition.EastAsianWidth != EastAsianWidth {
+ DefaultCondition.EastAsianWidth = EastAsianWidth
+ if len(DefaultCondition.combinedLut) > 0 {
+ DefaultCondition.combinedLut = DefaultCondition.combinedLut[:0]
+ CreateLUT()
+ }
+ }
+}
+
+type interval struct {
+ first rune
+ last rune
+}
+
+type table []interval
+
+func inTables(r rune, ts ...table) bool {
+ for _, t := range ts {
+ if inTable(r, t) {
+ return true
+ }
+ }
+ return false
+}
+
+func inTable(r rune, t table) bool {
+ if r < t[0].first {
+ return false
+ }
+
+ bot := 0
+ top := len(t) - 1
+ for top >= bot {
+ mid := (bot + top) >> 1
+
+ switch {
+ case t[mid].last < r:
+ bot = mid + 1
+ case t[mid].first > r:
+ top = mid - 1
+ default:
+ return true
+ }
+ }
+
+ return false
+}
+
+var private = table{
+ {0x00E000, 0x00F8FF}, {0x0F0000, 0x0FFFFD}, {0x100000, 0x10FFFD},
+}
+
+var nonprint = table{
+ {0x0000, 0x001F}, {0x007F, 0x009F}, {0x00AD, 0x00AD},
+ {0x070F, 0x070F}, {0x180B, 0x180E}, {0x200B, 0x200F},
+ {0x2028, 0x202E}, {0x206A, 0x206F}, {0xD800, 0xDFFF},
+ {0xFEFF, 0xFEFF}, {0xFFF9, 0xFFFB}, {0xFFFE, 0xFFFF},
+}
+
+// Condition have flag EastAsianWidth whether the current locale is CJK or not.
+type Condition struct {
+ combinedLut []byte
+ EastAsianWidth bool
+ StrictEmojiNeutral bool
+}
+
+// NewCondition return new instance of Condition which is current locale.
+func NewCondition() *Condition {
+ return &Condition{
+ EastAsianWidth: EastAsianWidth,
+ StrictEmojiNeutral: StrictEmojiNeutral,
+ }
+}
+
+// RuneWidth returns the number of cells in r.
+// See http://www.unicode.org/reports/tr11/
+func (c *Condition) RuneWidth(r rune) int {
+ if r < 0 || r > 0x10FFFF {
+ return 0
+ }
+ if len(c.combinedLut) > 0 {
+ return int(c.combinedLut[r>>1]>>(uint(r&1)*4)) & 3
+ }
+ // optimized version, verified by TestRuneWidthChecksums()
+ if !c.EastAsianWidth {
+ switch {
+ case r < 0x20:
+ return 0
+ case (r >= 0x7F && r <= 0x9F) || r == 0xAD: // nonprint
+ return 0
+ case r < 0x300:
+ return 1
+ case inTable(r, narrow):
+ return 1
+ case inTables(r, nonprint, combining):
+ return 0
+ case inTable(r, doublewidth):
+ return 2
+ default:
+ return 1
+ }
+ } else {
+ switch {
+ case inTables(r, nonprint, combining):
+ return 0
+ case inTable(r, narrow):
+ return 1
+ case inTables(r, ambiguous, doublewidth):
+ return 2
+ case !c.StrictEmojiNeutral && inTables(r, ambiguous, emoji, narrow):
+ return 2
+ default:
+ return 1
+ }
+ }
+}
+
+// CreateLUT will create an in-memory lookup table of 557056 bytes for faster operation.
+// This should not be called concurrently with other operations on c.
+// If options in c is changed, CreateLUT should be called again.
+func (c *Condition) CreateLUT() {
+ const max = 0x110000
+ lut := c.combinedLut
+ if len(c.combinedLut) != 0 {
+ // Remove so we don't use it.
+ c.combinedLut = nil
+ } else {
+ lut = make([]byte, max/2)
+ }
+ for i := range lut {
+ i32 := int32(i * 2)
+ x0 := c.RuneWidth(i32)
+ x1 := c.RuneWidth(i32 + 1)
+ lut[i] = uint8(x0) | uint8(x1)<<4
+ }
+ c.combinedLut = lut
+}
+
+// StringWidth return width as you can see
+func (c *Condition) StringWidth(s string) (width int) {
+ g := uniseg.NewGraphemes(s)
+ for g.Next() {
+ var chWidth int
+ for _, r := range g.Runes() {
+ chWidth = c.RuneWidth(r)
+ if chWidth > 0 {
+ break // Our best guess at this point is to use the width of the first non-zero-width rune.
+ }
+ }
+ width += chWidth
+ }
+ return
+}
+
+// Truncate return string truncated with w cells
+func (c *Condition) Truncate(s string, w int, tail string) string {
+ if c.StringWidth(s) <= w {
+ return s
+ }
+ w -= c.StringWidth(tail)
+ var width int
+ pos := len(s)
+ g := uniseg.NewGraphemes(s)
+ for g.Next() {
+ var chWidth int
+ for _, r := range g.Runes() {
+ chWidth = c.RuneWidth(r)
+ if chWidth > 0 {
+ break // See StringWidth() for details.
+ }
+ }
+ if width+chWidth > w {
+ pos, _ = g.Positions()
+ break
+ }
+ width += chWidth
+ }
+ return s[:pos] + tail
+}
+
+// TruncateLeft cuts w cells from the beginning of the `s`.
+func (c *Condition) TruncateLeft(s string, w int, prefix string) string {
+ if c.StringWidth(s) <= w {
+ return prefix
+ }
+
+ var width int
+ pos := len(s)
+
+ g := uniseg.NewGraphemes(s)
+ for g.Next() {
+ var chWidth int
+ for _, r := range g.Runes() {
+ chWidth = c.RuneWidth(r)
+ if chWidth > 0 {
+ break // See StringWidth() for details.
+ }
+ }
+
+ if width+chWidth > w {
+ if width < w {
+ _, pos = g.Positions()
+ prefix += strings.Repeat(" ", width+chWidth-w)
+ } else {
+ pos, _ = g.Positions()
+ }
+
+ break
+ }
+
+ width += chWidth
+ }
+
+ return prefix + s[pos:]
+}
+
+// Wrap return string wrapped with w cells
+func (c *Condition) Wrap(s string, w int) string {
+ width := 0
+ out := ""
+ for _, r := range s {
+ cw := c.RuneWidth(r)
+ if r == '\n' {
+ out += string(r)
+ width = 0
+ continue
+ } else if width+cw > w {
+ out += "\n"
+ width = 0
+ out += string(r)
+ width += cw
+ continue
+ }
+ out += string(r)
+ width += cw
+ }
+ return out
+}
+
+// FillLeft return string filled in left by spaces in w cells
+func (c *Condition) FillLeft(s string, w int) string {
+ width := c.StringWidth(s)
+ count := w - width
+ if count > 0 {
+ b := make([]byte, count)
+ for i := range b {
+ b[i] = ' '
+ }
+ return string(b) + s
+ }
+ return s
+}
+
+// FillRight return string filled in left by spaces in w cells
+func (c *Condition) FillRight(s string, w int) string {
+ width := c.StringWidth(s)
+ count := w - width
+ if count > 0 {
+ b := make([]byte, count)
+ for i := range b {
+ b[i] = ' '
+ }
+ return s + string(b)
+ }
+ return s
+}
+
+// RuneWidth returns the number of cells in r.
+// See http://www.unicode.org/reports/tr11/
+func RuneWidth(r rune) int {
+ return DefaultCondition.RuneWidth(r)
+}
+
+// IsAmbiguousWidth returns whether is ambiguous width or not.
+func IsAmbiguousWidth(r rune) bool {
+ return inTables(r, private, ambiguous)
+}
+
+// IsNeutralWidth returns whether is neutral width or not.
+func IsNeutralWidth(r rune) bool {
+ return inTable(r, neutral)
+}
+
+// StringWidth return width as you can see
+func StringWidth(s string) (width int) {
+ return DefaultCondition.StringWidth(s)
+}
+
+// Truncate return string truncated with w cells
+func Truncate(s string, w int, tail string) string {
+ return DefaultCondition.Truncate(s, w, tail)
+}
+
+// TruncateLeft cuts w cells from the beginning of the `s`.
+func TruncateLeft(s string, w int, prefix string) string {
+ return DefaultCondition.TruncateLeft(s, w, prefix)
+}
+
+// Wrap return string wrapped with w cells
+func Wrap(s string, w int) string {
+ return DefaultCondition.Wrap(s, w)
+}
+
+// FillLeft return string filled in left by spaces in w cells
+func FillLeft(s string, w int) string {
+ return DefaultCondition.FillLeft(s, w)
+}
+
+// FillRight return string filled in left by spaces in w cells
+func FillRight(s string, w int) string {
+ return DefaultCondition.FillRight(s, w)
+}
+
+// CreateLUT will create an in-memory lookup table of 557055 bytes for faster operation.
+// This should not be called concurrently with other operations.
+func CreateLUT() {
+ if len(DefaultCondition.combinedLut) > 0 {
+ return
+ }
+ DefaultCondition.CreateLUT()
+}
diff --git a/vendor/github.com/mattn/go-runewidth/runewidth_appengine.go b/vendor/github.com/mattn/go-runewidth/runewidth_appengine.go
new file mode 100644
index 0000000..84b6528
--- /dev/null
+++ b/vendor/github.com/mattn/go-runewidth/runewidth_appengine.go
@@ -0,0 +1,9 @@
+//go:build appengine
+// +build appengine
+
+package runewidth
+
+// IsEastAsian return true if the current locale is CJK
+func IsEastAsian() bool {
+ return false
+}
diff --git a/vendor/github.com/mattn/go-runewidth/runewidth_js.go b/vendor/github.com/mattn/go-runewidth/runewidth_js.go
new file mode 100644
index 0000000..c2abbc2
--- /dev/null
+++ b/vendor/github.com/mattn/go-runewidth/runewidth_js.go
@@ -0,0 +1,9 @@
+//go:build js && !appengine
+// +build js,!appengine
+
+package runewidth
+
+func IsEastAsian() bool {
+ // TODO: Implement this for the web. Detect east asian in a compatible way, and return true.
+ return false
+}
diff --git a/vendor/github.com/mattn/go-runewidth/runewidth_posix.go b/vendor/github.com/mattn/go-runewidth/runewidth_posix.go
new file mode 100644
index 0000000..5a31d73
--- /dev/null
+++ b/vendor/github.com/mattn/go-runewidth/runewidth_posix.go
@@ -0,0 +1,81 @@
+//go:build !windows && !js && !appengine
+// +build !windows,!js,!appengine
+
+package runewidth
+
+import (
+ "os"
+ "regexp"
+ "strings"
+)
+
+var reLoc = regexp.MustCompile(`^[a-z][a-z][a-z]?(?:_[A-Z][A-Z])?\.(.+)`)
+
+var mblenTable = map[string]int{
+ "utf-8": 6,
+ "utf8": 6,
+ "jis": 8,
+ "eucjp": 3,
+ "euckr": 2,
+ "euccn": 2,
+ "sjis": 2,
+ "cp932": 2,
+ "cp51932": 2,
+ "cp936": 2,
+ "cp949": 2,
+ "cp950": 2,
+ "big5": 2,
+ "gbk": 2,
+ "gb2312": 2,
+}
+
+func isEastAsian(locale string) bool {
+ charset := strings.ToLower(locale)
+ r := reLoc.FindStringSubmatch(locale)
+ if len(r) == 2 {
+ charset = strings.ToLower(r[1])
+ }
+
+ if strings.HasSuffix(charset, "@cjk_narrow") {
+ return false
+ }
+
+ for pos, b := range []byte(charset) {
+ if b == '@' {
+ charset = charset[:pos]
+ break
+ }
+ }
+ max := 1
+ if m, ok := mblenTable[charset]; ok {
+ max = m
+ }
+ if max > 1 && (charset[0] != 'u' ||
+ strings.HasPrefix(locale, "ja") ||
+ strings.HasPrefix(locale, "ko") ||
+ strings.HasPrefix(locale, "zh")) {
+ return true
+ }
+ return false
+}
+
+// IsEastAsian return true if the current locale is CJK
+func IsEastAsian() bool {
+ locale := os.Getenv("LC_ALL")
+ if locale == "" {
+ locale = os.Getenv("LC_CTYPE")
+ }
+ if locale == "" {
+ locale = os.Getenv("LANG")
+ }
+
+ // ignore C locale
+ if locale == "POSIX" || locale == "C" {
+ return false
+ }
+ if len(locale) > 1 && locale[0] == 'C' && (locale[1] == '.' || locale[1] == '-') {
+ return false
+ }
+
+ return isEastAsian(locale)
+}
diff --git a/vendor/github.com/mattn/go-runewidth/runewidth_table.go b/vendor/github.com/mattn/go-runewidth/runewidth_table.go
new file mode 100644
index 0000000..ad025ad
--- /dev/null
+++ b/vendor/github.com/mattn/go-runewidth/runewidth_table.go
@@ -0,0 +1,450 @@
+// Code generated by script/generate.go. DO NOT EDIT.
+
+package runewidth
+
+var combining = table{
+ {0x0300, 0x036F}, {0x0483, 0x0489}, {0x07EB, 0x07F3},
+ {0x0C00, 0x0C00}, {0x0C04, 0x0C04}, {0x0CF3, 0x0CF3},
+ {0x0D00, 0x0D01}, {0x135D, 0x135F}, {0x1A7F, 0x1A7F},
+ {0x1AB0, 0x1ACE}, {0x1B6B, 0x1B73}, {0x1DC0, 0x1DFF},
+ {0x20D0, 0x20F0}, {0x2CEF, 0x2CF1}, {0x2DE0, 0x2DFF},
+ {0x3099, 0x309A}, {0xA66F, 0xA672}, {0xA674, 0xA67D},
+ {0xA69E, 0xA69F}, {0xA6F0, 0xA6F1}, {0xA8E0, 0xA8F1},
+ {0xFE20, 0xFE2F}, {0x101FD, 0x101FD}, {0x10376, 0x1037A},
+ {0x10EAB, 0x10EAC}, {0x10F46, 0x10F50}, {0x10F82, 0x10F85},
+ {0x11300, 0x11301}, {0x1133B, 0x1133C}, {0x11366, 0x1136C},
+ {0x11370, 0x11374}, {0x16AF0, 0x16AF4}, {0x1CF00, 0x1CF2D},
+ {0x1CF30, 0x1CF46}, {0x1D165, 0x1D169}, {0x1D16D, 0x1D172},
+ {0x1D17B, 0x1D182}, {0x1D185, 0x1D18B}, {0x1D1AA, 0x1D1AD},
+ {0x1D242, 0x1D244}, {0x1E000, 0x1E006}, {0x1E008, 0x1E018},
+ {0x1E01B, 0x1E021}, {0x1E023, 0x1E024}, {0x1E026, 0x1E02A},
+ {0x1E08F, 0x1E08F}, {0x1E8D0, 0x1E8D6},
+}
+
+var doublewidth = table{
+ {0x1100, 0x115F}, {0x231A, 0x231B}, {0x2329, 0x232A},
+ {0x23E9, 0x23EC}, {0x23F0, 0x23F0}, {0x23F3, 0x23F3},
+ {0x25FD, 0x25FE}, {0x2614, 0x2615}, {0x2648, 0x2653},
+ {0x267F, 0x267F}, {0x2693, 0x2693}, {0x26A1, 0x26A1},
+ {0x26AA, 0x26AB}, {0x26BD, 0x26BE}, {0x26C4, 0x26C5},
+ {0x26CE, 0x26CE}, {0x26D4, 0x26D4}, {0x26EA, 0x26EA},
+ {0x26F2, 0x26F3}, {0x26F5, 0x26F5}, {0x26FA, 0x26FA},
+ {0x26FD, 0x26FD}, {0x2705, 0x2705}, {0x270A, 0x270B},
+ {0x2728, 0x2728}, {0x274C, 0x274C}, {0x274E, 0x274E},
+ {0x2753, 0x2755}, {0x2757, 0x2757}, {0x2795, 0x2797},
+ {0x27B0, 0x27B0}, {0x27BF, 0x27BF}, {0x2B1B, 0x2B1C},
+ {0x2B50, 0x2B50}, {0x2B55, 0x2B55}, {0x2E80, 0x2E99},
+ {0x2E9B, 0x2EF3}, {0x2F00, 0x2FD5}, {0x2FF0, 0x303E},
+ {0x3041, 0x3096}, {0x3099, 0x30FF}, {0x3105, 0x312F},
+ {0x3131, 0x318E}, {0x3190, 0x31E3}, {0x31EF, 0x321E},
+ {0x3220, 0x3247}, {0x3250, 0x4DBF}, {0x4E00, 0xA48C},
+ {0xA490, 0xA4C6}, {0xA960, 0xA97C}, {0xAC00, 0xD7A3},
+ {0xF900, 0xFAFF}, {0xFE10, 0xFE19}, {0xFE30, 0xFE52},
+ {0xFE54, 0xFE66}, {0xFE68, 0xFE6B}, {0xFF01, 0xFF60},
+ {0xFFE0, 0xFFE6}, {0x16FE0, 0x16FE4}, {0x16FF0, 0x16FF1},
+ {0x17000, 0x187F7}, {0x18800, 0x18CD5}, {0x18D00, 0x18D08},
+ {0x1AFF0, 0x1AFF3}, {0x1AFF5, 0x1AFFB}, {0x1AFFD, 0x1AFFE},
+ {0x1B000, 0x1B122}, {0x1B132, 0x1B132}, {0x1B150, 0x1B152},
+ {0x1B155, 0x1B155}, {0x1B164, 0x1B167}, {0x1B170, 0x1B2FB},
+ {0x1F004, 0x1F004}, {0x1F0CF, 0x1F0CF}, {0x1F18E, 0x1F18E},
+ {0x1F191, 0x1F19A}, {0x1F200, 0x1F202}, {0x1F210, 0x1F23B},
+ {0x1F240, 0x1F248}, {0x1F250, 0x1F251}, {0x1F260, 0x1F265},
+ {0x1F300, 0x1F320}, {0x1F32D, 0x1F335}, {0x1F337, 0x1F37C},
+ {0x1F37E, 0x1F393}, {0x1F3A0, 0x1F3CA}, {0x1F3CF, 0x1F3D3},
+ {0x1F3E0, 0x1F3F0}, {0x1F3F4, 0x1F3F4}, {0x1F3F8, 0x1F43E},
+ {0x1F440, 0x1F440}, {0x1F442, 0x1F4FC}, {0x1F4FF, 0x1F53D},
+ {0x1F54B, 0x1F54E}, {0x1F550, 0x1F567}, {0x1F57A, 0x1F57A},
+ {0x1F595, 0x1F596}, {0x1F5A4, 0x1F5A4}, {0x1F5FB, 0x1F64F},
+ {0x1F680, 0x1F6C5}, {0x1F6CC, 0x1F6CC}, {0x1F6D0, 0x1F6D2},
+ {0x1F6D5, 0x1F6D7}, {0x1F6DC, 0x1F6DF}, {0x1F6EB, 0x1F6EC},
+ {0x1F6F4, 0x1F6FC}, {0x1F7E0, 0x1F7EB}, {0x1F7F0, 0x1F7F0},
+ {0x1F90C, 0x1F93A}, {0x1F93C, 0x1F945}, {0x1F947, 0x1F9FF},
+ {0x1FA70, 0x1FA7C}, {0x1FA80, 0x1FA88}, {0x1FA90, 0x1FABD},
+ {0x1FABF, 0x1FAC5}, {0x1FACE, 0x1FADB}, {0x1FAE0, 0x1FAE8},
+ {0x1FAF0, 0x1FAF8}, {0x20000, 0x2FFFD}, {0x30000, 0x3FFFD},
+}
+
+var ambiguous = table{
+ {0x00A1, 0x00A1}, {0x00A4, 0x00A4}, {0x00A7, 0x00A8},
+ {0x00AA, 0x00AA}, {0x00AD, 0x00AE}, {0x00B0, 0x00B4},
+ {0x00B6, 0x00BA}, {0x00BC, 0x00BF}, {0x00C6, 0x00C6},
+ {0x00D0, 0x00D0}, {0x00D7, 0x00D8}, {0x00DE, 0x00E1},
+ {0x00E6, 0x00E6}, {0x00E8, 0x00EA}, {0x00EC, 0x00ED},
+ {0x00F0, 0x00F0}, {0x00F2, 0x00F3}, {0x00F7, 0x00FA},
+ {0x00FC, 0x00FC}, {0x00FE, 0x00FE}, {0x0101, 0x0101},
+ {0x0111, 0x0111}, {0x0113, 0x0113}, {0x011B, 0x011B},
+ {0x0126, 0x0127}, {0x012B, 0x012B}, {0x0131, 0x0133},
+ {0x0138, 0x0138}, {0x013F, 0x0142}, {0x0144, 0x0144},
+ {0x0148, 0x014B}, {0x014D, 0x014D}, {0x0152, 0x0153},
+ {0x0166, 0x0167}, {0x016B, 0x016B}, {0x01CE, 0x01CE},
+ {0x01D0, 0x01D0}, {0x01D2, 0x01D2}, {0x01D4, 0x01D4},
+ {0x01D6, 0x01D6}, {0x01D8, 0x01D8}, {0x01DA, 0x01DA},
+ {0x01DC, 0x01DC}, {0x0251, 0x0251}, {0x0261, 0x0261},
+ {0x02C4, 0x02C4}, {0x02C7, 0x02C7}, {0x02C9, 0x02CB},
+ {0x02CD, 0x02CD}, {0x02D0, 0x02D0}, {0x02D8, 0x02DB},
+ {0x02DD, 0x02DD}, {0x02DF, 0x02DF}, {0x0300, 0x036F},
+ {0x0391, 0x03A1}, {0x03A3, 0x03A9}, {0x03B1, 0x03C1},
+ {0x03C3, 0x03C9}, {0x0401, 0x0401}, {0x0410, 0x044F},
+ {0x0451, 0x0451}, {0x2010, 0x2010}, {0x2013, 0x2016},
+ {0x2018, 0x2019}, {0x201C, 0x201D}, {0x2020, 0x2022},
+ {0x2024, 0x2027}, {0x2030, 0x2030}, {0x2032, 0x2033},
+ {0x2035, 0x2035}, {0x203B, 0x203B}, {0x203E, 0x203E},
+ {0x2074, 0x2074}, {0x207F, 0x207F}, {0x2081, 0x2084},
+ {0x20AC, 0x20AC}, {0x2103, 0x2103}, {0x2105, 0x2105},
+ {0x2109, 0x2109}, {0x2113, 0x2113}, {0x2116, 0x2116},
+ {0x2121, 0x2122}, {0x2126, 0x2126}, {0x212B, 0x212B},
+ {0x2153, 0x2154}, {0x215B, 0x215E}, {0x2160, 0x216B},
+ {0x2170, 0x2179}, {0x2189, 0x2189}, {0x2190, 0x2199},
+ {0x21B8, 0x21B9}, {0x21D2, 0x21D2}, {0x21D4, 0x21D4},
+ {0x21E7, 0x21E7}, {0x2200, 0x2200}, {0x2202, 0x2203},
+ {0x2207, 0x2208}, {0x220B, 0x220B}, {0x220F, 0x220F},
+ {0x2211, 0x2211}, {0x2215, 0x2215}, {0x221A, 0x221A},
+ {0x221D, 0x2220}, {0x2223, 0x2223}, {0x2225, 0x2225},
+ {0x2227, 0x222C}, {0x222E, 0x222E}, {0x2234, 0x2237},
+ {0x223C, 0x223D}, {0x2248, 0x2248}, {0x224C, 0x224C},
+ {0x2252, 0x2252}, {0x2260, 0x2261}, {0x2264, 0x2267},
+ {0x226A, 0x226B}, {0x226E, 0x226F}, {0x2282, 0x2283},
+ {0x2286, 0x2287}, {0x2295, 0x2295}, {0x2299, 0x2299},
+ {0x22A5, 0x22A5}, {0x22BF, 0x22BF}, {0x2312, 0x2312},
+ {0x2460, 0x24E9}, {0x24EB, 0x254B}, {0x2550, 0x2573},
+ {0x2580, 0x258F}, {0x2592, 0x2595}, {0x25A0, 0x25A1},
+ {0x25A3, 0x25A9}, {0x25B2, 0x25B3}, {0x25B6, 0x25B7},
+ {0x25BC, 0x25BD}, {0x25C0, 0x25C1}, {0x25C6, 0x25C8},
+ {0x25CB, 0x25CB}, {0x25CE, 0x25D1}, {0x25E2, 0x25E5},
+ {0x25EF, 0x25EF}, {0x2605, 0x2606}, {0x2609, 0x2609},
+ {0x260E, 0x260F}, {0x261C, 0x261C}, {0x261E, 0x261E},
+ {0x2640, 0x2640}, {0x2642, 0x2642}, {0x2660, 0x2661},
+ {0x2663, 0x2665}, {0x2667, 0x266A}, {0x266C, 0x266D},
+ {0x266F, 0x266F}, {0x269E, 0x269F}, {0x26BF, 0x26BF},
+ {0x26C6, 0x26CD}, {0x26CF, 0x26D3}, {0x26D5, 0x26E1},
+ {0x26E3, 0x26E3}, {0x26E8, 0x26E9}, {0x26EB, 0x26F1},
+ {0x26F4, 0x26F4}, {0x26F6, 0x26F9}, {0x26FB, 0x26FC},
+ {0x26FE, 0x26FF}, {0x273D, 0x273D}, {0x2776, 0x277F},
+ {0x2B56, 0x2B59}, {0x3248, 0x324F}, {0xE000, 0xF8FF},
+ {0xFE00, 0xFE0F}, {0xFFFD, 0xFFFD}, {0x1F100, 0x1F10A},
+ {0x1F110, 0x1F12D}, {0x1F130, 0x1F169}, {0x1F170, 0x1F18D},
+ {0x1F18F, 0x1F190}, {0x1F19B, 0x1F1AC}, {0xE0100, 0xE01EF},
+ {0xF0000, 0xFFFFD}, {0x100000, 0x10FFFD},
+}
+var narrow = table{
+ {0x0020, 0x007E}, {0x00A2, 0x00A3}, {0x00A5, 0x00A6},
+ {0x00AC, 0x00AC}, {0x00AF, 0x00AF}, {0x27E6, 0x27ED},
+ {0x2985, 0x2986},
+}
+
+var neutral = table{
+ {0x0000, 0x001F}, {0x007F, 0x00A0}, {0x00A9, 0x00A9},
+ {0x00AB, 0x00AB}, {0x00B5, 0x00B5}, {0x00BB, 0x00BB},
+ {0x00C0, 0x00C5}, {0x00C7, 0x00CF}, {0x00D1, 0x00D6},
+ {0x00D9, 0x00DD}, {0x00E2, 0x00E5}, {0x00E7, 0x00E7},
+ {0x00EB, 0x00EB}, {0x00EE, 0x00EF}, {0x00F1, 0x00F1},
+ {0x00F4, 0x00F6}, {0x00FB, 0x00FB}, {0x00FD, 0x00FD},
+ {0x00FF, 0x0100}, {0x0102, 0x0110}, {0x0112, 0x0112},
+ {0x0114, 0x011A}, {0x011C, 0x0125}, {0x0128, 0x012A},
+ {0x012C, 0x0130}, {0x0134, 0x0137}, {0x0139, 0x013E},
+ {0x0143, 0x0143}, {0x0145, 0x0147}, {0x014C, 0x014C},
+ {0x014E, 0x0151}, {0x0154, 0x0165}, {0x0168, 0x016A},
+ {0x016C, 0x01CD}, {0x01CF, 0x01CF}, {0x01D1, 0x01D1},
+ {0x01D3, 0x01D3}, {0x01D5, 0x01D5}, {0x01D7, 0x01D7},
+ {0x01D9, 0x01D9}, {0x01DB, 0x01DB}, {0x01DD, 0x0250},
+ {0x0252, 0x0260}, {0x0262, 0x02C3}, {0x02C5, 0x02C6},
+ {0x02C8, 0x02C8}, {0x02CC, 0x02CC}, {0x02CE, 0x02CF},
+ {0x02D1, 0x02D7}, {0x02DC, 0x02DC}, {0x02DE, 0x02DE},
+ {0x02E0, 0x02FF}, {0x0370, 0x0377}, {0x037A, 0x037F},
+ {0x0384, 0x038A}, {0x038C, 0x038C}, {0x038E, 0x0390},
+ {0x03AA, 0x03B0}, {0x03C2, 0x03C2}, {0x03CA, 0x0400},
+ {0x0402, 0x040F}, {0x0450, 0x0450}, {0x0452, 0x052F},
+ {0x0531, 0x0556}, {0x0559, 0x058A}, {0x058D, 0x058F},
+ {0x0591, 0x05C7}, {0x05D0, 0x05EA}, {0x05EF, 0x05F4},
+ {0x0600, 0x070D}, {0x070F, 0x074A}, {0x074D, 0x07B1},
+ {0x07C0, 0x07FA}, {0x07FD, 0x082D}, {0x0830, 0x083E},
+ {0x0840, 0x085B}, {0x085E, 0x085E}, {0x0860, 0x086A},
+ {0x0870, 0x088E}, {0x0890, 0x0891}, {0x0898, 0x0983},
+ {0x0985, 0x098C}, {0x098F, 0x0990}, {0x0993, 0x09A8},
+ {0x09AA, 0x09B0}, {0x09B2, 0x09B2}, {0x09B6, 0x09B9},
+ {0x09BC, 0x09C4}, {0x09C7, 0x09C8}, {0x09CB, 0x09CE},
+ {0x09D7, 0x09D7}, {0x09DC, 0x09DD}, {0x09DF, 0x09E3},
+ {0x09E6, 0x09FE}, {0x0A01, 0x0A03}, {0x0A05, 0x0A0A},
+ {0x0A0F, 0x0A10}, {0x0A13, 0x0A28}, {0x0A2A, 0x0A30},
+ {0x0A32, 0x0A33}, {0x0A35, 0x0A36}, {0x0A38, 0x0A39},
+ {0x0A3C, 0x0A3C}, {0x0A3E, 0x0A42}, {0x0A47, 0x0A48},
+ {0x0A4B, 0x0A4D}, {0x0A51, 0x0A51}, {0x0A59, 0x0A5C},
+ {0x0A5E, 0x0A5E}, {0x0A66, 0x0A76}, {0x0A81, 0x0A83},
+ {0x0A85, 0x0A8D}, {0x0A8F, 0x0A91}, {0x0A93, 0x0AA8},
+ {0x0AAA, 0x0AB0}, {0x0AB2, 0x0AB3}, {0x0AB5, 0x0AB9},
+ {0x0ABC, 0x0AC5}, {0x0AC7, 0x0AC9}, {0x0ACB, 0x0ACD},
+ {0x0AD0, 0x0AD0}, {0x0AE0, 0x0AE3}, {0x0AE6, 0x0AF1},
+ {0x0AF9, 0x0AFF}, {0x0B01, 0x0B03}, {0x0B05, 0x0B0C},
+ {0x0B0F, 0x0B10}, {0x0B13, 0x0B28}, {0x0B2A, 0x0B30},
+ {0x0B32, 0x0B33}, {0x0B35, 0x0B39}, {0x0B3C, 0x0B44},
+ {0x0B47, 0x0B48}, {0x0B4B, 0x0B4D}, {0x0B55, 0x0B57},
+ {0x0B5C, 0x0B5D}, {0x0B5F, 0x0B63}, {0x0B66, 0x0B77},
+ {0x0B82, 0x0B83}, {0x0B85, 0x0B8A}, {0x0B8E, 0x0B90},
+ {0x0B92, 0x0B95}, {0x0B99, 0x0B9A}, {0x0B9C, 0x0B9C},
+ {0x0B9E, 0x0B9F}, {0x0BA3, 0x0BA4}, {0x0BA8, 0x0BAA},
+ {0x0BAE, 0x0BB9}, {0x0BBE, 0x0BC2}, {0x0BC6, 0x0BC8},
+ {0x0BCA, 0x0BCD}, {0x0BD0, 0x0BD0}, {0x0BD7, 0x0BD7},
+ {0x0BE6, 0x0BFA}, {0x0C00, 0x0C0C}, {0x0C0E, 0x0C10},
+ {0x0C12, 0x0C28}, {0x0C2A, 0x0C39}, {0x0C3C, 0x0C44},
+ {0x0C46, 0x0C48}, {0x0C4A, 0x0C4D}, {0x0C55, 0x0C56},
+ {0x0C58, 0x0C5A}, {0x0C5D, 0x0C5D}, {0x0C60, 0x0C63},
+ {0x0C66, 0x0C6F}, {0x0C77, 0x0C8C}, {0x0C8E, 0x0C90},
+ {0x0C92, 0x0CA8}, {0x0CAA, 0x0CB3}, {0x0CB5, 0x0CB9},
+ {0x0CBC, 0x0CC4}, {0x0CC6, 0x0CC8}, {0x0CCA, 0x0CCD},
+ {0x0CD5, 0x0CD6}, {0x0CDD, 0x0CDE}, {0x0CE0, 0x0CE3},
+ {0x0CE6, 0x0CEF}, {0x0CF1, 0x0CF3}, {0x0D00, 0x0D0C},
+ {0x0D0E, 0x0D10}, {0x0D12, 0x0D44}, {0x0D46, 0x0D48},
+ {0x0D4A, 0x0D4F}, {0x0D54, 0x0D63}, {0x0D66, 0x0D7F},
+ {0x0D81, 0x0D83}, {0x0D85, 0x0D96}, {0x0D9A, 0x0DB1},
+ {0x0DB3, 0x0DBB}, {0x0DBD, 0x0DBD}, {0x0DC0, 0x0DC6},
+ {0x0DCA, 0x0DCA}, {0x0DCF, 0x0DD4}, {0x0DD6, 0x0DD6},
+ {0x0DD8, 0x0DDF}, {0x0DE6, 0x0DEF}, {0x0DF2, 0x0DF4},
+ {0x0E01, 0x0E3A}, {0x0E3F, 0x0E5B}, {0x0E81, 0x0E82},
+ {0x0E84, 0x0E84}, {0x0E86, 0x0E8A}, {0x0E8C, 0x0EA3},
+ {0x0EA5, 0x0EA5}, {0x0EA7, 0x0EBD}, {0x0EC0, 0x0EC4},
+ {0x0EC6, 0x0EC6}, {0x0EC8, 0x0ECE}, {0x0ED0, 0x0ED9},
+ {0x0EDC, 0x0EDF}, {0x0F00, 0x0F47}, {0x0F49, 0x0F6C},
+ {0x0F71, 0x0F97}, {0x0F99, 0x0FBC}, {0x0FBE, 0x0FCC},
+ {0x0FCE, 0x0FDA}, {0x1000, 0x10C5}, {0x10C7, 0x10C7},
+ {0x10CD, 0x10CD}, {0x10D0, 0x10FF}, {0x1160, 0x1248},
+ {0x124A, 0x124D}, {0x1250, 0x1256}, {0x1258, 0x1258},
+ {0x125A, 0x125D}, {0x1260, 0x1288}, {0x128A, 0x128D},
+ {0x1290, 0x12B0}, {0x12B2, 0x12B5}, {0x12B8, 0x12BE},
+ {0x12C0, 0x12C0}, {0x12C2, 0x12C5}, {0x12C8, 0x12D6},
+ {0x12D8, 0x1310}, {0x1312, 0x1315}, {0x1318, 0x135A},
+ {0x135D, 0x137C}, {0x1380, 0x1399}, {0x13A0, 0x13F5},
+ {0x13F8, 0x13FD}, {0x1400, 0x169C}, {0x16A0, 0x16F8},
+ {0x1700, 0x1715}, {0x171F, 0x1736}, {0x1740, 0x1753},
+ {0x1760, 0x176C}, {0x176E, 0x1770}, {0x1772, 0x1773},
+ {0x1780, 0x17DD}, {0x17E0, 0x17E9}, {0x17F0, 0x17F9},
+ {0x1800, 0x1819}, {0x1820, 0x1878}, {0x1880, 0x18AA},
+ {0x18B0, 0x18F5}, {0x1900, 0x191E}, {0x1920, 0x192B},
+ {0x1930, 0x193B}, {0x1940, 0x1940}, {0x1944, 0x196D},
+ {0x1970, 0x1974}, {0x1980, 0x19AB}, {0x19B0, 0x19C9},
+ {0x19D0, 0x19DA}, {0x19DE, 0x1A1B}, {0x1A1E, 0x1A5E},
+ {0x1A60, 0x1A7C}, {0x1A7F, 0x1A89}, {0x1A90, 0x1A99},
+ {0x1AA0, 0x1AAD}, {0x1AB0, 0x1ACE}, {0x1B00, 0x1B4C},
+ {0x1B50, 0x1B7E}, {0x1B80, 0x1BF3}, {0x1BFC, 0x1C37},
+ {0x1C3B, 0x1C49}, {0x1C4D, 0x1C88}, {0x1C90, 0x1CBA},
+ {0x1CBD, 0x1CC7}, {0x1CD0, 0x1CFA}, {0x1D00, 0x1F15},
+ {0x1F18, 0x1F1D}, {0x1F20, 0x1F45}, {0x1F48, 0x1F4D},
+ {0x1F50, 0x1F57}, {0x1F59, 0x1F59}, {0x1F5B, 0x1F5B},
+ {0x1F5D, 0x1F5D}, {0x1F5F, 0x1F7D}, {0x1F80, 0x1FB4},
+ {0x1FB6, 0x1FC4}, {0x1FC6, 0x1FD3}, {0x1FD6, 0x1FDB},
+ {0x1FDD, 0x1FEF}, {0x1FF2, 0x1FF4}, {0x1FF6, 0x1FFE},
+ {0x2000, 0x200F}, {0x2011, 0x2012}, {0x2017, 0x2017},
+ {0x201A, 0x201B}, {0x201E, 0x201F}, {0x2023, 0x2023},
+ {0x2028, 0x202F}, {0x2031, 0x2031}, {0x2034, 0x2034},
+ {0x2036, 0x203A}, {0x203C, 0x203D}, {0x203F, 0x2064},
+ {0x2066, 0x2071}, {0x2075, 0x207E}, {0x2080, 0x2080},
+ {0x2085, 0x208E}, {0x2090, 0x209C}, {0x20A0, 0x20A8},
+ {0x20AA, 0x20AB}, {0x20AD, 0x20C0}, {0x20D0, 0x20F0},
+ {0x2100, 0x2102}, {0x2104, 0x2104}, {0x2106, 0x2108},
+ {0x210A, 0x2112}, {0x2114, 0x2115}, {0x2117, 0x2120},
+ {0x2123, 0x2125}, {0x2127, 0x212A}, {0x212C, 0x2152},
+ {0x2155, 0x215A}, {0x215F, 0x215F}, {0x216C, 0x216F},
+ {0x217A, 0x2188}, {0x218A, 0x218B}, {0x219A, 0x21B7},
+ {0x21BA, 0x21D1}, {0x21D3, 0x21D3}, {0x21D5, 0x21E6},
+ {0x21E8, 0x21FF}, {0x2201, 0x2201}, {0x2204, 0x2206},
+ {0x2209, 0x220A}, {0x220C, 0x220E}, {0x2210, 0x2210},
+ {0x2212, 0x2214}, {0x2216, 0x2219}, {0x221B, 0x221C},
+ {0x2221, 0x2222}, {0x2224, 0x2224}, {0x2226, 0x2226},
+ {0x222D, 0x222D}, {0x222F, 0x2233}, {0x2238, 0x223B},
+ {0x223E, 0x2247}, {0x2249, 0x224B}, {0x224D, 0x2251},
+ {0x2253, 0x225F}, {0x2262, 0x2263}, {0x2268, 0x2269},
+ {0x226C, 0x226D}, {0x2270, 0x2281}, {0x2284, 0x2285},
+ {0x2288, 0x2294}, {0x2296, 0x2298}, {0x229A, 0x22A4},
+ {0x22A6, 0x22BE}, {0x22C0, 0x2311}, {0x2313, 0x2319},
+ {0x231C, 0x2328}, {0x232B, 0x23E8}, {0x23ED, 0x23EF},
+ {0x23F1, 0x23F2}, {0x23F4, 0x2426}, {0x2440, 0x244A},
+ {0x24EA, 0x24EA}, {0x254C, 0x254F}, {0x2574, 0x257F},
+ {0x2590, 0x2591}, {0x2596, 0x259F}, {0x25A2, 0x25A2},
+ {0x25AA, 0x25B1}, {0x25B4, 0x25B5}, {0x25B8, 0x25BB},
+ {0x25BE, 0x25BF}, {0x25C2, 0x25C5}, {0x25C9, 0x25CA},
+ {0x25CC, 0x25CD}, {0x25D2, 0x25E1}, {0x25E6, 0x25EE},
+ {0x25F0, 0x25FC}, {0x25FF, 0x2604}, {0x2607, 0x2608},
+ {0x260A, 0x260D}, {0x2610, 0x2613}, {0x2616, 0x261B},
+ {0x261D, 0x261D}, {0x261F, 0x263F}, {0x2641, 0x2641},
+ {0x2643, 0x2647}, {0x2654, 0x265F}, {0x2662, 0x2662},
+ {0x2666, 0x2666}, {0x266B, 0x266B}, {0x266E, 0x266E},
+ {0x2670, 0x267E}, {0x2680, 0x2692}, {0x2694, 0x269D},
+ {0x26A0, 0x26A0}, {0x26A2, 0x26A9}, {0x26AC, 0x26BC},
+ {0x26C0, 0x26C3}, {0x26E2, 0x26E2}, {0x26E4, 0x26E7},
+ {0x2700, 0x2704}, {0x2706, 0x2709}, {0x270C, 0x2727},
+ {0x2729, 0x273C}, {0x273E, 0x274B}, {0x274D, 0x274D},
+ {0x274F, 0x2752}, {0x2756, 0x2756}, {0x2758, 0x2775},
+ {0x2780, 0x2794}, {0x2798, 0x27AF}, {0x27B1, 0x27BE},
+ {0x27C0, 0x27E5}, {0x27EE, 0x2984}, {0x2987, 0x2B1A},
+ {0x2B1D, 0x2B4F}, {0x2B51, 0x2B54}, {0x2B5A, 0x2B73},
+ {0x2B76, 0x2B95}, {0x2B97, 0x2CF3}, {0x2CF9, 0x2D25},
+ {0x2D27, 0x2D27}, {0x2D2D, 0x2D2D}, {0x2D30, 0x2D67},
+ {0x2D6F, 0x2D70}, {0x2D7F, 0x2D96}, {0x2DA0, 0x2DA6},
+ {0x2DA8, 0x2DAE}, {0x2DB0, 0x2DB6}, {0x2DB8, 0x2DBE},
+ {0x2DC0, 0x2DC6}, {0x2DC8, 0x2DCE}, {0x2DD0, 0x2DD6},
+ {0x2DD8, 0x2DDE}, {0x2DE0, 0x2E5D}, {0x303F, 0x303F},
+ {0x4DC0, 0x4DFF}, {0xA4D0, 0xA62B}, {0xA640, 0xA6F7},
+ {0xA700, 0xA7CA}, {0xA7D0, 0xA7D1}, {0xA7D3, 0xA7D3},
+ {0xA7D5, 0xA7D9}, {0xA7F2, 0xA82C}, {0xA830, 0xA839},
+ {0xA840, 0xA877}, {0xA880, 0xA8C5}, {0xA8CE, 0xA8D9},
+ {0xA8E0, 0xA953}, {0xA95F, 0xA95F}, {0xA980, 0xA9CD},
+ {0xA9CF, 0xA9D9}, {0xA9DE, 0xA9FE}, {0xAA00, 0xAA36},
+ {0xAA40, 0xAA4D}, {0xAA50, 0xAA59}, {0xAA5C, 0xAAC2},
+ {0xAADB, 0xAAF6}, {0xAB01, 0xAB06}, {0xAB09, 0xAB0E},
+ {0xAB11, 0xAB16}, {0xAB20, 0xAB26}, {0xAB28, 0xAB2E},
+ {0xAB30, 0xAB6B}, {0xAB70, 0xABED}, {0xABF0, 0xABF9},
+ {0xD7B0, 0xD7C6}, {0xD7CB, 0xD7FB}, {0xD800, 0xDFFF},
+ {0xFB00, 0xFB06}, {0xFB13, 0xFB17}, {0xFB1D, 0xFB36},
+ {0xFB38, 0xFB3C}, {0xFB3E, 0xFB3E}, {0xFB40, 0xFB41},
+ {0xFB43, 0xFB44}, {0xFB46, 0xFBC2}, {0xFBD3, 0xFD8F},
+ {0xFD92, 0xFDC7}, {0xFDCF, 0xFDCF}, {0xFDF0, 0xFDFF},
+ {0xFE20, 0xFE2F}, {0xFE70, 0xFE74}, {0xFE76, 0xFEFC},
+ {0xFEFF, 0xFEFF}, {0xFFF9, 0xFFFC}, {0x10000, 0x1000B},
+ {0x1000D, 0x10026}, {0x10028, 0x1003A}, {0x1003C, 0x1003D},
+ {0x1003F, 0x1004D}, {0x10050, 0x1005D}, {0x10080, 0x100FA},
+ {0x10100, 0x10102}, {0x10107, 0x10133}, {0x10137, 0x1018E},
+ {0x10190, 0x1019C}, {0x101A0, 0x101A0}, {0x101D0, 0x101FD},
+ {0x10280, 0x1029C}, {0x102A0, 0x102D0}, {0x102E0, 0x102FB},
+ {0x10300, 0x10323}, {0x1032D, 0x1034A}, {0x10350, 0x1037A},
+ {0x10380, 0x1039D}, {0x1039F, 0x103C3}, {0x103C8, 0x103D5},
+ {0x10400, 0x1049D}, {0x104A0, 0x104A9}, {0x104B0, 0x104D3},
+ {0x104D8, 0x104FB}, {0x10500, 0x10527}, {0x10530, 0x10563},
+ {0x1056F, 0x1057A}, {0x1057C, 0x1058A}, {0x1058C, 0x10592},
+ {0x10594, 0x10595}, {0x10597, 0x105A1}, {0x105A3, 0x105B1},
+ {0x105B3, 0x105B9}, {0x105BB, 0x105BC}, {0x10600, 0x10736},
+ {0x10740, 0x10755}, {0x10760, 0x10767}, {0x10780, 0x10785},
+ {0x10787, 0x107B0}, {0x107B2, 0x107BA}, {0x10800, 0x10805},
+ {0x10808, 0x10808}, {0x1080A, 0x10835}, {0x10837, 0x10838},
+ {0x1083C, 0x1083C}, {0x1083F, 0x10855}, {0x10857, 0x1089E},
+ {0x108A7, 0x108AF}, {0x108E0, 0x108F2}, {0x108F4, 0x108F5},
+ {0x108FB, 0x1091B}, {0x1091F, 0x10939}, {0x1093F, 0x1093F},
+ {0x10980, 0x109B7}, {0x109BC, 0x109CF}, {0x109D2, 0x10A03},
+ {0x10A05, 0x10A06}, {0x10A0C, 0x10A13}, {0x10A15, 0x10A17},
+ {0x10A19, 0x10A35}, {0x10A38, 0x10A3A}, {0x10A3F, 0x10A48},
+ {0x10A50, 0x10A58}, {0x10A60, 0x10A9F}, {0x10AC0, 0x10AE6},
+ {0x10AEB, 0x10AF6}, {0x10B00, 0x10B35}, {0x10B39, 0x10B55},
+ {0x10B58, 0x10B72}, {0x10B78, 0x10B91}, {0x10B99, 0x10B9C},
+ {0x10BA9, 0x10BAF}, {0x10C00, 0x10C48}, {0x10C80, 0x10CB2},
+ {0x10CC0, 0x10CF2}, {0x10CFA, 0x10D27}, {0x10D30, 0x10D39},
+ {0x10E60, 0x10E7E}, {0x10E80, 0x10EA9}, {0x10EAB, 0x10EAD},
+ {0x10EB0, 0x10EB1}, {0x10EFD, 0x10F27}, {0x10F30, 0x10F59},
+ {0x10F70, 0x10F89}, {0x10FB0, 0x10FCB}, {0x10FE0, 0x10FF6},
+ {0x11000, 0x1104D}, {0x11052, 0x11075}, {0x1107F, 0x110C2},
+ {0x110CD, 0x110CD}, {0x110D0, 0x110E8}, {0x110F0, 0x110F9},
+ {0x11100, 0x11134}, {0x11136, 0x11147}, {0x11150, 0x11176},
+ {0x11180, 0x111DF}, {0x111E1, 0x111F4}, {0x11200, 0x11211},
+ {0x11213, 0x11241}, {0x11280, 0x11286}, {0x11288, 0x11288},
+ {0x1128A, 0x1128D}, {0x1128F, 0x1129D}, {0x1129F, 0x112A9},
+ {0x112B0, 0x112EA}, {0x112F0, 0x112F9}, {0x11300, 0x11303},
+ {0x11305, 0x1130C}, {0x1130F, 0x11310}, {0x11313, 0x11328},
+ {0x1132A, 0x11330}, {0x11332, 0x11333}, {0x11335, 0x11339},
+ {0x1133B, 0x11344}, {0x11347, 0x11348}, {0x1134B, 0x1134D},
+ {0x11350, 0x11350}, {0x11357, 0x11357}, {0x1135D, 0x11363},
+ {0x11366, 0x1136C}, {0x11370, 0x11374}, {0x11400, 0x1145B},
+ {0x1145D, 0x11461}, {0x11480, 0x114C7}, {0x114D0, 0x114D9},
+ {0x11580, 0x115B5}, {0x115B8, 0x115DD}, {0x11600, 0x11644},
+ {0x11650, 0x11659}, {0x11660, 0x1166C}, {0x11680, 0x116B9},
+ {0x116C0, 0x116C9}, {0x11700, 0x1171A}, {0x1171D, 0x1172B},
+ {0x11730, 0x11746}, {0x11800, 0x1183B}, {0x118A0, 0x118F2},
+ {0x118FF, 0x11906}, {0x11909, 0x11909}, {0x1190C, 0x11913},
+ {0x11915, 0x11916}, {0x11918, 0x11935}, {0x11937, 0x11938},
+ {0x1193B, 0x11946}, {0x11950, 0x11959}, {0x119A0, 0x119A7},
+ {0x119AA, 0x119D7}, {0x119DA, 0x119E4}, {0x11A00, 0x11A47},
+ {0x11A50, 0x11AA2}, {0x11AB0, 0x11AF8}, {0x11B00, 0x11B09},
+ {0x11C00, 0x11C08}, {0x11C0A, 0x11C36}, {0x11C38, 0x11C45},
+ {0x11C50, 0x11C6C}, {0x11C70, 0x11C8F}, {0x11C92, 0x11CA7},
+ {0x11CA9, 0x11CB6}, {0x11D00, 0x11D06}, {0x11D08, 0x11D09},
+ {0x11D0B, 0x11D36}, {0x11D3A, 0x11D3A}, {0x11D3C, 0x11D3D},
+ {0x11D3F, 0x11D47}, {0x11D50, 0x11D59}, {0x11D60, 0x11D65},
+ {0x11D67, 0x11D68}, {0x11D6A, 0x11D8E}, {0x11D90, 0x11D91},
+ {0x11D93, 0x11D98}, {0x11DA0, 0x11DA9}, {0x11EE0, 0x11EF8},
+ {0x11F00, 0x11F10}, {0x11F12, 0x11F3A}, {0x11F3E, 0x11F59},
+ {0x11FB0, 0x11FB0}, {0x11FC0, 0x11FF1}, {0x11FFF, 0x12399},
+ {0x12400, 0x1246E}, {0x12470, 0x12474}, {0x12480, 0x12543},
+ {0x12F90, 0x12FF2}, {0x13000, 0x13455}, {0x14400, 0x14646},
+ {0x16800, 0x16A38}, {0x16A40, 0x16A5E}, {0x16A60, 0x16A69},
+ {0x16A6E, 0x16ABE}, {0x16AC0, 0x16AC9}, {0x16AD0, 0x16AED},
+ {0x16AF0, 0x16AF5}, {0x16B00, 0x16B45}, {0x16B50, 0x16B59},
+ {0x16B5B, 0x16B61}, {0x16B63, 0x16B77}, {0x16B7D, 0x16B8F},
+ {0x16E40, 0x16E9A}, {0x16F00, 0x16F4A}, {0x16F4F, 0x16F87},
+ {0x16F8F, 0x16F9F}, {0x1BC00, 0x1BC6A}, {0x1BC70, 0x1BC7C},
+ {0x1BC80, 0x1BC88}, {0x1BC90, 0x1BC99}, {0x1BC9C, 0x1BCA3},
+ {0x1CF00, 0x1CF2D}, {0x1CF30, 0x1CF46}, {0x1CF50, 0x1CFC3},
+ {0x1D000, 0x1D0F5}, {0x1D100, 0x1D126}, {0x1D129, 0x1D1EA},
+ {0x1D200, 0x1D245}, {0x1D2C0, 0x1D2D3}, {0x1D2E0, 0x1D2F3},
+ {0x1D300, 0x1D356}, {0x1D360, 0x1D378}, {0x1D400, 0x1D454},
+ {0x1D456, 0x1D49C}, {0x1D49E, 0x1D49F}, {0x1D4A2, 0x1D4A2},
+ {0x1D4A5, 0x1D4A6}, {0x1D4A9, 0x1D4AC}, {0x1D4AE, 0x1D4B9},
+ {0x1D4BB, 0x1D4BB}, {0x1D4BD, 0x1D4C3}, {0x1D4C5, 0x1D505},
+ {0x1D507, 0x1D50A}, {0x1D50D, 0x1D514}, {0x1D516, 0x1D51C},
+ {0x1D51E, 0x1D539}, {0x1D53B, 0x1D53E}, {0x1D540, 0x1D544},
+ {0x1D546, 0x1D546}, {0x1D54A, 0x1D550}, {0x1D552, 0x1D6A5},
+ {0x1D6A8, 0x1D7CB}, {0x1D7CE, 0x1DA8B}, {0x1DA9B, 0x1DA9F},
+ {0x1DAA1, 0x1DAAF}, {0x1DF00, 0x1DF1E}, {0x1DF25, 0x1DF2A},
+ {0x1E000, 0x1E006}, {0x1E008, 0x1E018}, {0x1E01B, 0x1E021},
+ {0x1E023, 0x1E024}, {0x1E026, 0x1E02A}, {0x1E030, 0x1E06D},
+ {0x1E08F, 0x1E08F}, {0x1E100, 0x1E12C}, {0x1E130, 0x1E13D},
+ {0x1E140, 0x1E149}, {0x1E14E, 0x1E14F}, {0x1E290, 0x1E2AE},
+ {0x1E2C0, 0x1E2F9}, {0x1E2FF, 0x1E2FF}, {0x1E4D0, 0x1E4F9},
+ {0x1E7E0, 0x1E7E6}, {0x1E7E8, 0x1E7EB}, {0x1E7ED, 0x1E7EE},
+ {0x1E7F0, 0x1E7FE}, {0x1E800, 0x1E8C4}, {0x1E8C7, 0x1E8D6},
+ {0x1E900, 0x1E94B}, {0x1E950, 0x1E959}, {0x1E95E, 0x1E95F},
+ {0x1EC71, 0x1ECB4}, {0x1ED01, 0x1ED3D}, {0x1EE00, 0x1EE03},
+ {0x1EE05, 0x1EE1F}, {0x1EE21, 0x1EE22}, {0x1EE24, 0x1EE24},
+ {0x1EE27, 0x1EE27}, {0x1EE29, 0x1EE32}, {0x1EE34, 0x1EE37},
+ {0x1EE39, 0x1EE39}, {0x1EE3B, 0x1EE3B}, {0x1EE42, 0x1EE42},
+ {0x1EE47, 0x1EE47}, {0x1EE49, 0x1EE49}, {0x1EE4B, 0x1EE4B},
+ {0x1EE4D, 0x1EE4F}, {0x1EE51, 0x1EE52}, {0x1EE54, 0x1EE54},
+ {0x1EE57, 0x1EE57}, {0x1EE59, 0x1EE59}, {0x1EE5B, 0x1EE5B},
+ {0x1EE5D, 0x1EE5D}, {0x1EE5F, 0x1EE5F}, {0x1EE61, 0x1EE62},
+ {0x1EE64, 0x1EE64}, {0x1EE67, 0x1EE6A}, {0x1EE6C, 0x1EE72},
+ {0x1EE74, 0x1EE77}, {0x1EE79, 0x1EE7C}, {0x1EE7E, 0x1EE7E},
+ {0x1EE80, 0x1EE89}, {0x1EE8B, 0x1EE9B}, {0x1EEA1, 0x1EEA3},
+ {0x1EEA5, 0x1EEA9}, {0x1EEAB, 0x1EEBB}, {0x1EEF0, 0x1EEF1},
+ {0x1F000, 0x1F003}, {0x1F005, 0x1F02B}, {0x1F030, 0x1F093},
+ {0x1F0A0, 0x1F0AE}, {0x1F0B1, 0x1F0BF}, {0x1F0C1, 0x1F0CE},
+ {0x1F0D1, 0x1F0F5}, {0x1F10B, 0x1F10F}, {0x1F12E, 0x1F12F},
+ {0x1F16A, 0x1F16F}, {0x1F1AD, 0x1F1AD}, {0x1F1E6, 0x1F1FF},
+ {0x1F321, 0x1F32C}, {0x1F336, 0x1F336}, {0x1F37D, 0x1F37D},
+ {0x1F394, 0x1F39F}, {0x1F3CB, 0x1F3CE}, {0x1F3D4, 0x1F3DF},
+ {0x1F3F1, 0x1F3F3}, {0x1F3F5, 0x1F3F7}, {0x1F43F, 0x1F43F},
+ {0x1F441, 0x1F441}, {0x1F4FD, 0x1F4FE}, {0x1F53E, 0x1F54A},
+ {0x1F54F, 0x1F54F}, {0x1F568, 0x1F579}, {0x1F57B, 0x1F594},
+ {0x1F597, 0x1F5A3}, {0x1F5A5, 0x1F5FA}, {0x1F650, 0x1F67F},
+ {0x1F6C6, 0x1F6CB}, {0x1F6CD, 0x1F6CF}, {0x1F6D3, 0x1F6D4},
+ {0x1F6E0, 0x1F6EA}, {0x1F6F0, 0x1F6F3}, {0x1F700, 0x1F776},
+ {0x1F77B, 0x1F7D9}, {0x1F800, 0x1F80B}, {0x1F810, 0x1F847},
+ {0x1F850, 0x1F859}, {0x1F860, 0x1F887}, {0x1F890, 0x1F8AD},
+ {0x1F8B0, 0x1F8B1}, {0x1F900, 0x1F90B}, {0x1F93B, 0x1F93B},
+ {0x1F946, 0x1F946}, {0x1FA00, 0x1FA53}, {0x1FA60, 0x1FA6D},
+ {0x1FB00, 0x1FB92}, {0x1FB94, 0x1FBCA}, {0x1FBF0, 0x1FBF9},
+ {0xE0001, 0xE0001}, {0xE0020, 0xE007F},
+}
+
+var emoji = table{
+ {0x203C, 0x203C}, {0x2049, 0x2049}, {0x2122, 0x2122},
+ {0x2139, 0x2139}, {0x2194, 0x2199}, {0x21A9, 0x21AA},
+ {0x231A, 0x231B}, {0x2328, 0x2328}, {0x2388, 0x2388},
+ {0x23CF, 0x23CF}, {0x23E9, 0x23F3}, {0x23F8, 0x23FA},
+ {0x24C2, 0x24C2}, {0x25AA, 0x25AB}, {0x25B6, 0x25B6},
+ {0x25C0, 0x25C0}, {0x25FB, 0x25FE}, {0x2600, 0x2605},
+ {0x2607, 0x2612}, {0x2614, 0x2685}, {0x2690, 0x2705},
+ {0x2708, 0x2712}, {0x2714, 0x2714}, {0x2716, 0x2716},
+ {0x271D, 0x271D}, {0x2721, 0x2721}, {0x2728, 0x2728},
+ {0x2733, 0x2734}, {0x2744, 0x2744}, {0x2747, 0x2747},
+ {0x274C, 0x274C}, {0x274E, 0x274E}, {0x2753, 0x2755},
+ {0x2757, 0x2757}, {0x2763, 0x2767}, {0x2795, 0x2797},
+ {0x27A1, 0x27A1}, {0x27B0, 0x27B0}, {0x27BF, 0x27BF},
+ {0x2934, 0x2935}, {0x2B05, 0x2B07}, {0x2B1B, 0x2B1C},
+ {0x2B50, 0x2B50}, {0x2B55, 0x2B55}, {0x3030, 0x3030},
+ {0x303D, 0x303D}, {0x3297, 0x3297}, {0x3299, 0x3299},
+ {0x1F000, 0x1F0FF}, {0x1F10D, 0x1F10F}, {0x1F12F, 0x1F12F},
+ {0x1F16C, 0x1F171}, {0x1F17E, 0x1F17F}, {0x1F18E, 0x1F18E},
+ {0x1F191, 0x1F19A}, {0x1F1AD, 0x1F1E5}, {0x1F201, 0x1F20F},
+ {0x1F21A, 0x1F21A}, {0x1F22F, 0x1F22F}, {0x1F232, 0x1F23A},
+ {0x1F23C, 0x1F23F}, {0x1F249, 0x1F3FA}, {0x1F400, 0x1F53D},
+ {0x1F546, 0x1F64F}, {0x1F680, 0x1F6FF}, {0x1F774, 0x1F77F},
+ {0x1F7D5, 0x1F7FF}, {0x1F80C, 0x1F80F}, {0x1F848, 0x1F84F},
+ {0x1F85A, 0x1F85F}, {0x1F888, 0x1F88F}, {0x1F8AE, 0x1F8FF},
+ {0x1F90C, 0x1F93A}, {0x1F93C, 0x1F945}, {0x1F947, 0x1FAFF},
+ {0x1FC00, 0x1FFFD},
+}
diff --git a/vendor/github.com/mattn/go-runewidth/runewidth_windows.go b/vendor/github.com/mattn/go-runewidth/runewidth_windows.go
new file mode 100644
index 0000000..5f987a3
--- /dev/null
+++ b/vendor/github.com/mattn/go-runewidth/runewidth_windows.go
@@ -0,0 +1,28 @@
+//go:build windows && !appengine
+// +build windows,!appengine
+
+package runewidth
+
+import (
+ "syscall"
+)
+
+var (
+ kernel32 = syscall.NewLazyDLL("kernel32")
+ procGetConsoleOutputCP = kernel32.NewProc("GetConsoleOutputCP")
+)
+
+// IsEastAsian return true if the current locale is CJK
+func IsEastAsian() bool {
+ r1, _, _ := procGetConsoleOutputCP.Call()
+ if r1 == 0 {
+ return false
+ }
+
+ switch int(r1) {
+ case 932, 51932, 936, 949, 950:
+ return true
+ }
+
+ return false
+}
diff --git a/vendor/github.com/munnerz/goautoneg/LICENSE b/vendor/github.com/munnerz/goautoneg/LICENSE
new file mode 100644
index 0000000..bbc7b89
--- /dev/null
+++ b/vendor/github.com/munnerz/goautoneg/LICENSE
@@ -0,0 +1,31 @@
+Copyright (c) 2011, Open Knowledge Foundation Ltd.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ Neither the name of the Open Knowledge Foundation Ltd. nor the
+ names of its contributors may be used to endorse or promote
+ products derived from this software without specific prior written
+ permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/munnerz/goautoneg/Makefile b/vendor/github.com/munnerz/goautoneg/Makefile
new file mode 100644
index 0000000..e33ee17
--- /dev/null
+++ b/vendor/github.com/munnerz/goautoneg/Makefile
@@ -0,0 +1,13 @@
+include $(GOROOT)/src/Make.inc
+
+TARG=bitbucket.org/ww/goautoneg
+GOFILES=autoneg.go
+
+include $(GOROOT)/src/Make.pkg
+
+format:
+ gofmt -w *.go
+
+docs:
+ gomake clean
+ godoc ${TARG} > README.txt
diff --git a/vendor/github.com/munnerz/goautoneg/README.txt b/vendor/github.com/munnerz/goautoneg/README.txt
new file mode 100644
index 0000000..7723656
--- /dev/null
+++ b/vendor/github.com/munnerz/goautoneg/README.txt
@@ -0,0 +1,67 @@
+PACKAGE
+
+package goautoneg
+import "bitbucket.org/ww/goautoneg"
+
+HTTP Content-Type Autonegotiation.
+
+The functions in this package implement the behaviour specified in
+http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
+
+Copyright (c) 2011, Open Knowledge Foundation Ltd.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ Neither the name of the Open Knowledge Foundation Ltd. nor the
+ names of its contributors may be used to endorse or promote
+ products derived from this software without specific prior written
+ permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+FUNCTIONS
+
+func Negotiate(header string, alternatives []string) (content_type string)
+Negotiate the most appropriate content_type given the accept header
+and a list of alternatives.
+
+func ParseAccept(header string) (accept []Accept)
+Parse an Accept Header string returning a sorted list
+of clauses
+
+
+TYPES
+
+type Accept struct {
+ Type, SubType string
+ Q float32
+ Params map[string]string
+}
+Structure to represent a clause in an HTTP Accept Header
+
+
+SUBDIRECTORIES
+
+ .hg
diff --git a/vendor/github.com/munnerz/goautoneg/autoneg.go b/vendor/github.com/munnerz/goautoneg/autoneg.go
new file mode 100644
index 0000000..1dd1cad
--- /dev/null
+++ b/vendor/github.com/munnerz/goautoneg/autoneg.go
@@ -0,0 +1,189 @@
+/*
+HTTP Content-Type Autonegotiation.
+
+The functions in this package implement the behaviour specified in
+http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
+
+Copyright (c) 2011, Open Knowledge Foundation Ltd.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ Neither the name of the Open Knowledge Foundation Ltd. nor the
+ names of its contributors may be used to endorse or promote
+ products derived from this software without specific prior written
+ permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+package goautoneg
+
+import (
+ "sort"
+ "strconv"
+ "strings"
+)
+
+// Structure to represent a clause in an HTTP Accept Header
+type Accept struct {
+ Type, SubType string
+ Q float64
+ Params map[string]string
+}
+
+// acceptSlice is defined to implement sort interface.
+type acceptSlice []Accept
+
+func (slice acceptSlice) Len() int {
+ return len(slice)
+}
+
+func (slice acceptSlice) Less(i, j int) bool {
+ ai, aj := slice[i], slice[j]
+ if ai.Q > aj.Q {
+ return true
+ }
+ if ai.Type != "*" && aj.Type == "*" {
+ return true
+ }
+ if ai.SubType != "*" && aj.SubType == "*" {
+ return true
+ }
+ return false
+}
+
+func (slice acceptSlice) Swap(i, j int) {
+ slice[i], slice[j] = slice[j], slice[i]
+}
+
+func stringTrimSpaceCutset(r rune) bool {
+ return r == ' '
+}
+
+func nextSplitElement(s, sep string) (item string, remaining string) {
+ if index := strings.Index(s, sep); index != -1 {
+ return s[:index], s[index+1:]
+ }
+ return s, ""
+}
+
+// Parse an Accept Header string returning a sorted list
+// of clauses
+func ParseAccept(header string) acceptSlice {
+ partsCount := 0
+ remaining := header
+ for len(remaining) > 0 {
+ partsCount++
+ _, remaining = nextSplitElement(remaining, ",")
+ }
+ accept := make(acceptSlice, 0, partsCount)
+
+ remaining = header
+ var part string
+ for len(remaining) > 0 {
+ part, remaining = nextSplitElement(remaining, ",")
+ part = strings.TrimFunc(part, stringTrimSpaceCutset)
+
+ a := Accept{
+ Q: 1.0,
+ }
+
+ sp, remainingPart := nextSplitElement(part, ";")
+
+ sp0, spRemaining := nextSplitElement(sp, "/")
+ a.Type = strings.TrimFunc(sp0, stringTrimSpaceCutset)
+
+ switch {
+ case len(spRemaining) == 0:
+ if a.Type == "*" {
+ a.SubType = "*"
+ } else {
+ continue
+ }
+ default:
+ var sp1 string
+ sp1, spRemaining = nextSplitElement(spRemaining, "/")
+ if len(spRemaining) > 0 {
+ continue
+ }
+ a.SubType = strings.TrimFunc(sp1, stringTrimSpaceCutset)
+ }
+
+ if len(remainingPart) == 0 {
+ accept = append(accept, a)
+ continue
+ }
+
+ a.Params = make(map[string]string)
+ for len(remainingPart) > 0 {
+ sp, remainingPart = nextSplitElement(remainingPart, ";")
+ sp0, spRemaining = nextSplitElement(sp, "=")
+ if len(spRemaining) == 0 {
+ continue
+ }
+ var sp1 string
+ sp1, spRemaining = nextSplitElement(spRemaining, "=")
+ if len(spRemaining) != 0 {
+ continue
+ }
+ token := strings.TrimFunc(sp0, stringTrimSpaceCutset)
+ if token == "q" {
+ a.Q, _ = strconv.ParseFloat(sp1, 32)
+ } else {
+ a.Params[token] = strings.TrimFunc(sp1, stringTrimSpaceCutset)
+ }
+ }
+
+ accept = append(accept, a)
+ }
+
+ sort.Sort(accept)
+ return accept
+}
+
+// Negotiate the most appropriate content_type given the accept header
+// and a list of alternatives.
+func Negotiate(header string, alternatives []string) (content_type string) {
+ asp := make([][]string, 0, len(alternatives))
+ for _, ctype := range alternatives {
+ asp = append(asp, strings.SplitN(ctype, "/", 2))
+ }
+ for _, clause := range ParseAccept(header) {
+ for i, ctsp := range asp {
+ if clause.Type == ctsp[0] && clause.SubType == ctsp[1] {
+ content_type = alternatives[i]
+ return
+ }
+ if clause.Type == ctsp[0] && clause.SubType == "*" {
+ content_type = alternatives[i]
+ return
+ }
+ if clause.Type == "*" && clause.SubType == "*" {
+ content_type = alternatives[i]
+ return
+ }
+ }
+ }
+ return
+}
diff --git a/vendor/github.com/prometheus/client_golang/LICENSE b/vendor/github.com/prometheus/client_golang/LICENSE
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/prometheus/client_golang/NOTICE b/vendor/github.com/prometheus/client_golang/NOTICE
new file mode 100644
index 0000000..b9cc55a
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/NOTICE
@@ -0,0 +1,18 @@
+Prometheus instrumentation library for Go applications
+Copyright 2012-2015 The Prometheus Authors
+
+This product includes software developed at
+SoundCloud Ltd. (http://soundcloud.com/).
+
+
+The following components are included in this product:
+
+perks - a fork of https://github.com/bmizerany/perks
+https://github.com/beorn7/perks
+Copyright 2013-2015 Blake Mizerany, Björn Rabenstein
+See https://github.com/beorn7/perks/blob/master/README.md for license details.
+
+Go support for Protocol Buffers - Google's data interchange format
+http://github.com/golang/protobuf/
+Copyright 2010 The Go Authors
+See source code for license details.
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/.gitignore b/vendor/github.com/prometheus/client_golang/prometheus/.gitignore
new file mode 100644
index 0000000..3460f03
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/.gitignore
@@ -0,0 +1 @@
+command-line-arguments.test
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/README.md b/vendor/github.com/prometheus/client_golang/prometheus/README.md
new file mode 100644
index 0000000..c67ff1b
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/README.md
@@ -0,0 +1 @@
+See [](https://pkg.go.dev/github.com/prometheus/client_golang/prometheus).
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/build_info_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/build_info_collector.go
new file mode 100644
index 0000000..450189f
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/build_info_collector.go
@@ -0,0 +1,38 @@
+// Copyright 2021 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import "runtime/debug"
+
+// NewBuildInfoCollector is the obsolete version of collectors.NewBuildInfoCollector.
+// See there for documentation.
+//
+// Deprecated: Use collectors.NewBuildInfoCollector instead.
+func NewBuildInfoCollector() Collector {
+ path, version, sum := "unknown", "unknown", "unknown"
+ if bi, ok := debug.ReadBuildInfo(); ok {
+ path = bi.Main.Path
+ version = bi.Main.Version
+ sum = bi.Main.Sum
+ }
+ c := &selfCollector{MustNewConstMetric(
+ NewDesc(
+ "go_build_info",
+ "Build information about the main Go module.",
+ nil, Labels{"path": path, "version": version, "checksum": sum},
+ ),
+ GaugeValue, 1)}
+ c.init(c.self)
+ return c
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collector.go
new file mode 100644
index 0000000..cf05079
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/collector.go
@@ -0,0 +1,128 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+// Collector is the interface implemented by anything that can be used by
+// Prometheus to collect metrics. A Collector has to be registered for
+// collection. See Registerer.Register.
+//
+// The stock metrics provided by this package (Gauge, Counter, Summary,
+// Histogram, Untyped) are also Collectors (which only ever collect one metric,
+// namely itself). An implementer of Collector may, however, collect multiple
+// metrics in a coordinated fashion and/or create metrics on the fly. Examples
+// for collectors already implemented in this library are the metric vectors
+// (i.e. collection of multiple instances of the same Metric but with different
+// label values) like GaugeVec or SummaryVec, and the ExpvarCollector.
+type Collector interface {
+ // Describe sends the super-set of all possible descriptors of metrics
+ // collected by this Collector to the provided channel and returns once
+ // the last descriptor has been sent. The sent descriptors fulfill the
+ // consistency and uniqueness requirements described in the Desc
+ // documentation.
+ //
+ // It is valid if one and the same Collector sends duplicate
+ // descriptors. Those duplicates are simply ignored. However, two
+ // different Collectors must not send duplicate descriptors.
+ //
+ // Sending no descriptor at all marks the Collector as “unchecked”,
+ // i.e. no checks will be performed at registration time, and the
+ // Collector may yield any Metric it sees fit in its Collect method.
+ //
+ // This method idempotently sends the same descriptors throughout the
+ // lifetime of the Collector. It may be called concurrently and
+ // therefore must be implemented in a concurrency safe way.
+ //
+ // If a Collector encounters an error while executing this method, it
+ // must send an invalid descriptor (created with NewInvalidDesc) to
+ // signal the error to the registry.
+ Describe(chan<- *Desc)
+ // Collect is called by the Prometheus registry when collecting
+ // metrics. The implementation sends each collected metric via the
+ // provided channel and returns once the last metric has been sent. The
+ // descriptor of each sent metric is one of those returned by Describe
+ // (unless the Collector is unchecked, see above). Returned metrics that
+ // share the same descriptor must differ in their variable label
+ // values.
+ //
+ // This method may be called concurrently and must therefore be
+ // implemented in a concurrency safe way. Blocking occurs at the expense
+ // of total performance of rendering all registered metrics. Ideally,
+ // Collector implementations support concurrent readers.
+ Collect(chan<- Metric)
+}
+
+// DescribeByCollect is a helper to implement the Describe method of a custom
+// Collector. It collects the metrics from the provided Collector and sends
+// their descriptors to the provided channel.
+//
+// If a Collector collects the same metrics throughout its lifetime, its
+// Describe method can simply be implemented as:
+//
+// func (c customCollector) Describe(ch chan<- *Desc) {
+// DescribeByCollect(c, ch)
+// }
+//
+// However, this will not work if the metrics collected change dynamically over
+// the lifetime of the Collector in a way that their combined set of descriptors
+// changes as well. The shortcut implementation will then violate the contract
+// of the Describe method. If a Collector sometimes collects no metrics at all
+// (for example vectors like CounterVec, GaugeVec, etc., which only collect
+// metrics after a metric with a fully specified label set has been accessed),
+// it might even get registered as an unchecked Collector (cf. the Register
+// method of the Registerer interface). Hence, only use this shortcut
+// implementation of Describe if you are certain to fulfill the contract.
+//
+// The Collector example demonstrates a use of DescribeByCollect.
+func DescribeByCollect(c Collector, descs chan<- *Desc) {
+ metrics := make(chan Metric)
+ go func() {
+ c.Collect(metrics)
+ close(metrics)
+ }()
+ for m := range metrics {
+ descs <- m.Desc()
+ }
+}
+
+// selfCollector implements Collector for a single Metric so that the Metric
+// collects itself. Add it as an anonymous field to a struct that implements
+// Metric, and call init with the Metric itself as an argument.
+type selfCollector struct {
+ self Metric
+}
+
+// init provides the selfCollector with a reference to the metric it is supposed
+// to collect. It is usually called within the factory function to create a
+// metric. See example.
+func (c *selfCollector) init(self Metric) {
+ c.self = self
+}
+
+// Describe implements Collector.
+func (c *selfCollector) Describe(ch chan<- *Desc) {
+ ch <- c.self.Desc()
+}
+
+// Collect implements Collector.
+func (c *selfCollector) Collect(ch chan<- Metric) {
+ ch <- c.self
+}
+
+// collectorMetric is a metric that is also a collector.
+// Because of selfCollector, most (if not all) Metrics in
+// this package are also collectors.
+type collectorMetric interface {
+ Metric
+ Collector
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/vendor/github.com/prometheus/client_golang/prometheus/counter.go
new file mode 100644
index 0000000..4ce84e7
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/counter.go
@@ -0,0 +1,358 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "errors"
+ "math"
+ "sync/atomic"
+ "time"
+
+ dto "github.com/prometheus/client_model/go"
+ "google.golang.org/protobuf/types/known/timestamppb"
+)
+
+// Counter is a Metric that represents a single numerical value that only ever
+// goes up. That implies that it cannot be used to count items whose number can
+// also go down, e.g. the number of currently running goroutines. Those
+// "counters" are represented by Gauges.
+//
+// A Counter is typically used to count requests served, tasks completed, errors
+// occurred, etc.
+//
+// To create Counter instances, use NewCounter.
+type Counter interface {
+ Metric
+ Collector
+
+ // Inc increments the counter by 1. Use Add to increment it by arbitrary
+ // non-negative values.
+ Inc()
+ // Add adds the given value to the counter. It panics if the value is <
+ // 0.
+ Add(float64)
+}
+
+// ExemplarAdder is implemented by Counters that offer the option of adding a
+// value to the Counter together with an exemplar. Its AddWithExemplar method
+// works like the Add method of the Counter interface but also replaces the
+// currently saved exemplar (if any) with a new one, created from the provided
+// value, the current time as timestamp, and the provided labels. Empty Labels
+// will lead to a valid (label-less) exemplar. But if Labels is nil, the current
+// exemplar is left in place. AddWithExemplar panics if the value is < 0, if any
+// of the provided labels are invalid, or if the provided labels contain more
+// than 128 runes in total.
+type ExemplarAdder interface {
+ AddWithExemplar(value float64, exemplar Labels)
+}
+
+// CounterOpts is an alias for Opts. See there for doc comments.
+type CounterOpts Opts
+
+// CounterVecOpts bundles the options to create a CounterVec metric.
+// It is mandatory to set CounterOpts, see there for mandatory fields. VariableLabels
+// is optional and can safely be left to its default value.
+type CounterVecOpts struct {
+ CounterOpts
+
+ // VariableLabels are used to partition the metric vector by the given set
+ // of labels. Each label value will be constrained with the optional Constraint
+ // function, if provided.
+ VariableLabels ConstrainableLabels
+}
+
+// NewCounter creates a new Counter based on the provided CounterOpts.
+//
+// The returned implementation also implements ExemplarAdder. It is safe to
+// perform the corresponding type assertion.
+//
+// The returned implementation tracks the counter value in two separate
+// variables, a float64 and a uint64. The latter is used to track calls of the
+// Inc method and calls of the Add method with a value that can be represented
+// as a uint64. This allows atomic increments of the counter with optimal
+// performance. (It is common to have an Inc call in very hot execution paths.)
+// Both internal tracking values are added up in the Write method. This has to
+// be taken into account when it comes to precision and overflow behavior.
+func NewCounter(opts CounterOpts) Counter {
+ desc := NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ )
+ if opts.now == nil {
+ opts.now = time.Now
+ }
+ result := &counter{desc: desc, labelPairs: desc.constLabelPairs, now: opts.now}
+ result.init(result) // Init self-collection.
+ result.createdTs = timestamppb.New(opts.now())
+ return result
+}
+
+type counter struct {
+ // valBits contains the bits of the represented float64 value, while
+ // valInt stores values that are exact integers. Both have to go first
+ // in the struct to guarantee alignment for atomic operations.
+ // http://golang.org/pkg/sync/atomic/#pkg-note-BUG
+ valBits uint64
+ valInt uint64
+
+ selfCollector
+ desc *Desc
+
+ createdTs *timestamppb.Timestamp
+ labelPairs []*dto.LabelPair
+ exemplar atomic.Value // Containing nil or a *dto.Exemplar.
+
+ // now is for testing purposes, by default it's time.Now.
+ now func() time.Time
+}
+
+func (c *counter) Desc() *Desc {
+ return c.desc
+}
+
+func (c *counter) Add(v float64) {
+ if v < 0 {
+ panic(errors.New("counter cannot decrease in value"))
+ }
+
+ ival := uint64(v)
+ if float64(ival) == v {
+ atomic.AddUint64(&c.valInt, ival)
+ return
+ }
+
+ for {
+ oldBits := atomic.LoadUint64(&c.valBits)
+ newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
+ if atomic.CompareAndSwapUint64(&c.valBits, oldBits, newBits) {
+ return
+ }
+ }
+}
+
+func (c *counter) AddWithExemplar(v float64, e Labels) {
+ c.Add(v)
+ c.updateExemplar(v, e)
+}
+
+func (c *counter) Inc() {
+ atomic.AddUint64(&c.valInt, 1)
+}
+
+func (c *counter) get() float64 {
+ fval := math.Float64frombits(atomic.LoadUint64(&c.valBits))
+ ival := atomic.LoadUint64(&c.valInt)
+ return fval + float64(ival)
+}
+
+func (c *counter) Write(out *dto.Metric) error {
+ // Read the Exemplar first and the value second. This is to avoid a race condition
+ // where users see an exemplar for a not-yet-existing observation.
+ var exemplar *dto.Exemplar
+ if e := c.exemplar.Load(); e != nil {
+ exemplar = e.(*dto.Exemplar)
+ }
+ val := c.get()
+ return populateMetric(CounterValue, val, c.labelPairs, exemplar, out, c.createdTs)
+}
+
+func (c *counter) updateExemplar(v float64, l Labels) {
+ if l == nil {
+ return
+ }
+ e, err := newExemplar(v, c.now(), l)
+ if err != nil {
+ panic(err)
+ }
+ c.exemplar.Store(e)
+}
+
+// CounterVec is a Collector that bundles a set of Counters that all share the
+// same Desc, but have different values for their variable labels. This is used
+// if you want to count the same thing partitioned by various dimensions
+// (e.g. number of HTTP requests, partitioned by response code and
+// method). Create instances with NewCounterVec.
+type CounterVec struct {
+ *MetricVec
+}
+
+// NewCounterVec creates a new CounterVec based on the provided CounterOpts and
+// partitioned by the given label names.
+func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
+ return V2.NewCounterVec(CounterVecOpts{
+ CounterOpts: opts,
+ VariableLabels: UnconstrainedLabels(labelNames),
+ })
+}
+
+// NewCounterVec creates a new CounterVec based on the provided CounterVecOpts.
+func (v2) NewCounterVec(opts CounterVecOpts) *CounterVec {
+ desc := V2.NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ opts.VariableLabels,
+ opts.ConstLabels,
+ )
+ if opts.now == nil {
+ opts.now = time.Now
+ }
+ return &CounterVec{
+ MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
+ if len(lvs) != len(desc.variableLabels.names) {
+ panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, lvs))
+ }
+ result := &counter{desc: desc, labelPairs: MakeLabelPairs(desc, lvs), now: opts.now}
+ result.init(result) // Init self-collection.
+ result.createdTs = timestamppb.New(opts.now())
+ return result
+ }),
+ }
+}
+
+// GetMetricWithLabelValues returns the Counter for the given slice of label
+// values (same order as the variable labels in Desc). If that combination of
+// label values is accessed for the first time, a new Counter is created.
+//
+// It is possible to call this method without using the returned Counter to only
+// create the new Counter but leave it at its starting value 0. See also the
+// SummaryVec example.
+//
+// Keeping the Counter for later use is possible (and should be considered if
+// performance is critical), but keep in mind that Reset, DeleteLabelValues and
+// Delete can be used to delete the Counter from the CounterVec. In that case,
+// the Counter will still exist, but it will not be exported anymore, even if a
+// Counter with the same label values is created later.
+//
+// An error is returned if the number of label values is not the same as the
+// number of variable labels in Desc (minus any curried labels).
+//
+// Note that for more than one label value, this method is prone to mistakes
+// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
+// an alternative to avoid that type of mistake. For higher label numbers, the
+// latter has a much more readable (albeit more verbose) syntax, but it comes
+// with a performance overhead (for creating and processing the Labels map).
+// See also the GaugeVec example.
+func (v *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) {
+ metric, err := v.MetricVec.GetMetricWithLabelValues(lvs...)
+ if metric != nil {
+ return metric.(Counter), err
+ }
+ return nil, err
+}
+
+// GetMetricWith returns the Counter for the given Labels map (the label names
+// must match those of the variable labels in Desc). If that label map is
+// accessed for the first time, a new Counter is created. Implications of
+// creating a Counter without using it and keeping the Counter for later use are
+// the same as for GetMetricWithLabelValues.
+//
+// An error is returned if the number and names of the Labels are inconsistent
+// with those of the variable labels in Desc (minus any curried labels).
+//
+// This method is used for the same purpose as
+// GetMetricWithLabelValues(...string). See there for pros and cons of the two
+// methods.
+func (v *CounterVec) GetMetricWith(labels Labels) (Counter, error) {
+ metric, err := v.MetricVec.GetMetricWith(labels)
+ if metric != nil {
+ return metric.(Counter), err
+ }
+ return nil, err
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics where
+// GetMetricWithLabelValues would have returned an error. Not returning an
+// error allows shortcuts like
+//
+// myVec.WithLabelValues("404", "GET").Add(42)
+func (v *CounterVec) WithLabelValues(lvs ...string) Counter {
+ c, err := v.GetMetricWithLabelValues(lvs...)
+ if err != nil {
+ panic(err)
+ }
+ return c
+}
+
+// With works as GetMetricWith, but panics where GetMetricWithLabels would have
+// returned an error. Not returning an error allows shortcuts like
+//
+// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42)
+func (v *CounterVec) With(labels Labels) Counter {
+ c, err := v.GetMetricWith(labels)
+ if err != nil {
+ panic(err)
+ }
+ return c
+}
+
+// CurryWith returns a vector curried with the provided labels, i.e. the
+// returned vector has those labels pre-set for all labeled operations performed
+// on it. The cardinality of the curried vector is reduced accordingly. The
+// order of the remaining labels stays the same (just with the curried labels
+// taken out of the sequence – which is relevant for the
+// (GetMetric)WithLabelValues methods). It is possible to curry a curried
+// vector, but only with labels not yet used for currying before.
+//
+// The metrics contained in the CounterVec are shared between the curried and
+// uncurried vectors. They are just accessed differently. Curried and uncurried
+// vectors behave identically in terms of collection. Only one must be
+// registered with a given registry (usually the uncurried version). The Reset
+// method deletes all metrics, even if called on a curried vector.
+func (v *CounterVec) CurryWith(labels Labels) (*CounterVec, error) {
+ vec, err := v.MetricVec.CurryWith(labels)
+ if vec != nil {
+ return &CounterVec{vec}, err
+ }
+ return nil, err
+}
+
+// MustCurryWith works as CurryWith but panics where CurryWith would have
+// returned an error.
+func (v *CounterVec) MustCurryWith(labels Labels) *CounterVec {
+ vec, err := v.CurryWith(labels)
+ if err != nil {
+ panic(err)
+ }
+ return vec
+}
+
+// CounterFunc is a Counter whose value is determined at collect time by calling a
+// provided function.
+//
+// To create CounterFunc instances, use NewCounterFunc.
+type CounterFunc interface {
+ Metric
+ Collector
+}
+
+// NewCounterFunc creates a new CounterFunc based on the provided
+// CounterOpts. The value reported is determined by calling the given function
+// from within the Write method. Take into account that metric collection may
+// happen concurrently. If that results in concurrent calls to Write, like in
+// the case where a CounterFunc is directly registered with Prometheus, the
+// provided function must be concurrency-safe. The function should also honor
+// the contract for a Counter (values only go up, not down), but compliance will
+// not be checked.
+//
+// Check out the ExampleGaugeFunc examples for the similar GaugeFunc.
+func NewCounterFunc(opts CounterOpts, function func() float64) CounterFunc {
+ return newValueFunc(NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ), CounterValue, function)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go
new file mode 100644
index 0000000..68ffe3c
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/desc.go
@@ -0,0 +1,207 @@
+// Copyright 2016 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "fmt"
+ "sort"
+ "strings"
+
+ "github.com/cespare/xxhash/v2"
+ dto "github.com/prometheus/client_model/go"
+ "github.com/prometheus/common/model"
+ "google.golang.org/protobuf/proto"
+
+ "github.com/prometheus/client_golang/prometheus/internal"
+)
+
+// Desc is the descriptor used by every Prometheus Metric. It is essentially
+// the immutable meta-data of a Metric. The normal Metric implementations
+// included in this package manage their Desc under the hood. Users only have to
+// deal with Desc if they use advanced features like the ExpvarCollector or
+// custom Collectors and Metrics.
+//
+// Descriptors registered with the same registry have to fulfill certain
+// consistency and uniqueness criteria if they share the same fully-qualified
+// name: They must have the same help string and the same label names (aka label
+// dimensions) in each, constLabels and variableLabels, but they must differ in
+// the values of the constLabels.
+//
+// Descriptors that share the same fully-qualified names and the same label
+// values of their constLabels are considered equal.
+//
+// Use NewDesc to create new Desc instances.
+type Desc struct {
+ // fqName has been built from Namespace, Subsystem, and Name.
+ fqName string
+ // help provides some helpful information about this metric.
+ help string
+ // constLabelPairs contains precalculated DTO label pairs based on
+ // the constant labels.
+ constLabelPairs []*dto.LabelPair
+ // variableLabels contains names of labels and normalization function for
+ // which the metric maintains variable values.
+ variableLabels *compiledLabels
+ // id is a hash of the values of the ConstLabels and fqName. This
+ // must be unique among all registered descriptors and can therefore be
+ // used as an identifier of the descriptor.
+ id uint64
+ // dimHash is a hash of the label names (preset and variable) and the
+ // Help string. Each Desc with the same fqName must have the same
+ // dimHash.
+ dimHash uint64
+ // err is an error that occurred during construction. It is reported on
+ // registration time.
+ err error
+}
+
+// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc
+// and will be reported on registration time. variableLabels and constLabels can
+// be nil if no such labels should be set. fqName must not be empty.
+//
+// variableLabels only contain the label names. Their label values are variable
+// and therefore not part of the Desc. (They are managed within the Metric.)
+//
+// For constLabels, the label values are constant. Therefore, they are fully
+// specified in the Desc. See the Collector example for a usage pattern.
+func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc {
+ return V2.NewDesc(fqName, help, UnconstrainedLabels(variableLabels), constLabels)
+}
+
+// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc
+// and will be reported on registration time. variableLabels and constLabels can
+// be nil if no such labels should be set. fqName must not be empty.
+//
+// variableLabels only contain the label names and normalization functions. Their
+// label values are variable and therefore not part of the Desc. (They are managed
+// within the Metric.)
+//
+// For constLabels, the label values are constant. Therefore, they are fully
+// specified in the Desc. See the Collector example for a usage pattern.
+func (v2) NewDesc(fqName, help string, variableLabels ConstrainableLabels, constLabels Labels) *Desc {
+ d := &Desc{
+ fqName: fqName,
+ help: help,
+ variableLabels: variableLabels.compile(),
+ }
+ if !model.IsValidMetricName(model.LabelValue(fqName)) {
+ d.err = fmt.Errorf("%q is not a valid metric name", fqName)
+ return d
+ }
+ // labelValues contains the label values of const labels (in order of
+ // their sorted label names) plus the fqName (at position 0).
+ labelValues := make([]string, 1, len(constLabels)+1)
+ labelValues[0] = fqName
+ labelNames := make([]string, 0, len(constLabels)+len(d.variableLabels.names))
+ labelNameSet := map[string]struct{}{}
+ // First add only the const label names and sort them...
+ for labelName := range constLabels {
+ if !checkLabelName(labelName) {
+ d.err = fmt.Errorf("%q is not a valid label name for metric %q", labelName, fqName)
+ return d
+ }
+ labelNames = append(labelNames, labelName)
+ labelNameSet[labelName] = struct{}{}
+ }
+ sort.Strings(labelNames)
+ // ... so that we can now add const label values in the order of their names.
+ for _, labelName := range labelNames {
+ labelValues = append(labelValues, constLabels[labelName])
+ }
+ // Validate the const label values. They can't have a wrong cardinality, so
+ // use in len(labelValues) as expectedNumberOfValues.
+ if err := validateLabelValues(labelValues, len(labelValues)); err != nil {
+ d.err = err
+ return d
+ }
+ // Now add the variable label names, but prefix them with something that
+ // cannot be in a regular label name. That prevents matching the label
+ // dimension with a different mix between preset and variable labels.
+ for _, label := range d.variableLabels.names {
+ if !checkLabelName(label) {
+ d.err = fmt.Errorf("%q is not a valid label name for metric %q", label, fqName)
+ return d
+ }
+ labelNames = append(labelNames, "$"+label)
+ labelNameSet[label] = struct{}{}
+ }
+ if len(labelNames) != len(labelNameSet) {
+ d.err = fmt.Errorf("duplicate label names in constant and variable labels for metric %q", fqName)
+ return d
+ }
+
+ xxh := xxhash.New()
+ for _, val := range labelValues {
+ xxh.WriteString(val)
+ xxh.Write(separatorByteSlice)
+ }
+ d.id = xxh.Sum64()
+ // Sort labelNames so that order doesn't matter for the hash.
+ sort.Strings(labelNames)
+ // Now hash together (in this order) the help string and the sorted
+ // label names.
+ xxh.Reset()
+ xxh.WriteString(help)
+ xxh.Write(separatorByteSlice)
+ for _, labelName := range labelNames {
+ xxh.WriteString(labelName)
+ xxh.Write(separatorByteSlice)
+ }
+ d.dimHash = xxh.Sum64()
+
+ d.constLabelPairs = make([]*dto.LabelPair, 0, len(constLabels))
+ for n, v := range constLabels {
+ d.constLabelPairs = append(d.constLabelPairs, &dto.LabelPair{
+ Name: proto.String(n),
+ Value: proto.String(v),
+ })
+ }
+ sort.Sort(internal.LabelPairSorter(d.constLabelPairs))
+ return d
+}
+
+// NewInvalidDesc returns an invalid descriptor, i.e. a descriptor with the
+// provided error set. If a collector returning such a descriptor is registered,
+// registration will fail with the provided error. NewInvalidDesc can be used by
+// a Collector to signal inability to describe itself.
+func NewInvalidDesc(err error) *Desc {
+ return &Desc{
+ err: err,
+ }
+}
+
+func (d *Desc) String() string {
+ lpStrings := make([]string, 0, len(d.constLabelPairs))
+ for _, lp := range d.constLabelPairs {
+ lpStrings = append(
+ lpStrings,
+ fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()),
+ )
+ }
+ vlStrings := make([]string, 0, len(d.variableLabels.names))
+ for _, vl := range d.variableLabels.names {
+ if fn, ok := d.variableLabels.labelConstraints[vl]; ok && fn != nil {
+ vlStrings = append(vlStrings, fmt.Sprintf("c(%s)", vl))
+ } else {
+ vlStrings = append(vlStrings, vl)
+ }
+ }
+ return fmt.Sprintf(
+ "Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: {%s}}",
+ d.fqName,
+ d.help,
+ strings.Join(lpStrings, ","),
+ strings.Join(vlStrings, ","),
+ )
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/doc.go b/vendor/github.com/prometheus/client_golang/prometheus/doc.go
new file mode 100644
index 0000000..962608f
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/doc.go
@@ -0,0 +1,210 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package prometheus is the core instrumentation package. It provides metrics
+// primitives to instrument code for monitoring. It also offers a registry for
+// metrics. Sub-packages allow to expose the registered metrics via HTTP
+// (package promhttp) or push them to a Pushgateway (package push). There is
+// also a sub-package promauto, which provides metrics constructors with
+// automatic registration.
+//
+// All exported functions and methods are safe to be used concurrently unless
+// specified otherwise.
+//
+// # A Basic Example
+//
+// As a starting point, a very basic usage example:
+//
+// package main
+//
+// import (
+// "log"
+// "net/http"
+//
+// "github.com/prometheus/client_golang/prometheus"
+// "github.com/prometheus/client_golang/prometheus/promhttp"
+// )
+//
+// type metrics struct {
+// cpuTemp prometheus.Gauge
+// hdFailures *prometheus.CounterVec
+// }
+//
+// func NewMetrics(reg prometheus.Registerer) *metrics {
+// m := &metrics{
+// cpuTemp: prometheus.NewGauge(prometheus.GaugeOpts{
+// Name: "cpu_temperature_celsius",
+// Help: "Current temperature of the CPU.",
+// }),
+// hdFailures: prometheus.NewCounterVec(
+// prometheus.CounterOpts{
+// Name: "hd_errors_total",
+// Help: "Number of hard-disk errors.",
+// },
+// []string{"device"},
+// ),
+// }
+// reg.MustRegister(m.cpuTemp)
+// reg.MustRegister(m.hdFailures)
+// return m
+// }
+//
+// func main() {
+// // Create a non-global registry.
+// reg := prometheus.NewRegistry()
+//
+// // Create new metrics and register them using the custom registry.
+// m := NewMetrics(reg)
+// // Set values for the new created metrics.
+// m.cpuTemp.Set(65.3)
+// m.hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc()
+//
+// // Expose metrics and custom registry via an HTTP server
+// // using the HandleFor function. "/metrics" is the usual endpoint for that.
+// http.Handle("/metrics", promhttp.HandlerFor(reg, promhttp.HandlerOpts{Registry: reg}))
+// log.Fatal(http.ListenAndServe(":8080", nil))
+// }
+//
+// This is a complete program that exports two metrics, a Gauge and a Counter,
+// the latter with a label attached to turn it into a (one-dimensional) vector.
+// It register the metrics using a custom registry and exposes them via an HTTP server
+// on the /metrics endpoint.
+//
+// # Metrics
+//
+// The number of exported identifiers in this package might appear a bit
+// overwhelming. However, in addition to the basic plumbing shown in the example
+// above, you only need to understand the different metric types and their
+// vector versions for basic usage. Furthermore, if you are not concerned with
+// fine-grained control of when and how to register metrics with the registry,
+// have a look at the promauto package, which will effectively allow you to
+// ignore registration altogether in simple cases.
+//
+// Above, you have already touched the Counter and the Gauge. There are two more
+// advanced metric types: the Summary and Histogram. A more thorough description
+// of those four metric types can be found in the Prometheus docs:
+// https://prometheus.io/docs/concepts/metric_types/
+//
+// In addition to the fundamental metric types Gauge, Counter, Summary, and
+// Histogram, a very important part of the Prometheus data model is the
+// partitioning of samples along dimensions called labels, which results in
+// metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec,
+// and HistogramVec.
+//
+// While only the fundamental metric types implement the Metric interface, both
+// the metrics and their vector versions implement the Collector interface. A
+// Collector manages the collection of a number of Metrics, but for convenience,
+// a Metric can also “collect itself”. Note that Gauge, Counter, Summary, and
+// Histogram are interfaces themselves while GaugeVec, CounterVec, SummaryVec,
+// and HistogramVec are not.
+//
+// To create instances of Metrics and their vector versions, you need a suitable
+// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, or HistogramOpts.
+//
+// # Custom Collectors and constant Metrics
+//
+// While you could create your own implementations of Metric, most likely you
+// will only ever implement the Collector interface on your own. At a first
+// glance, a custom Collector seems handy to bundle Metrics for common
+// registration (with the prime example of the different metric vectors above,
+// which bundle all the metrics of the same name but with different labels).
+//
+// There is a more involved use case, too: If you already have metrics
+// available, created outside of the Prometheus context, you don't need the
+// interface of the various Metric types. You essentially want to mirror the
+// existing numbers into Prometheus Metrics during collection. An own
+// implementation of the Collector interface is perfect for that. You can create
+// Metric instances “on the fly” using NewConstMetric, NewConstHistogram, and
+// NewConstSummary (and their respective Must… versions). NewConstMetric is used
+// for all metric types with just a float64 as their value: Counter, Gauge, and
+// a special “type” called Untyped. Use the latter if you are not sure if the
+// mirrored metric is a Counter or a Gauge. Creation of the Metric instance
+// happens in the Collect method. The Describe method has to return separate
+// Desc instances, representative of the “throw-away” metrics to be created
+// later. NewDesc comes in handy to create those Desc instances. Alternatively,
+// you could return no Desc at all, which will mark the Collector “unchecked”.
+// No checks are performed at registration time, but metric consistency will
+// still be ensured at scrape time, i.e. any inconsistencies will lead to scrape
+// errors. Thus, with unchecked Collectors, the responsibility to not collect
+// metrics that lead to inconsistencies in the total scrape result lies with the
+// implementer of the Collector. While this is not a desirable state, it is
+// sometimes necessary. The typical use case is a situation where the exact
+// metrics to be returned by a Collector cannot be predicted at registration
+// time, but the implementer has sufficient knowledge of the whole system to
+// guarantee metric consistency.
+//
+// The Collector example illustrates the use case. You can also look at the
+// source code of the processCollector (mirroring process metrics), the
+// goCollector (mirroring Go metrics), or the expvarCollector (mirroring expvar
+// metrics) as examples that are used in this package itself.
+//
+// If you just need to call a function to get a single float value to collect as
+// a metric, GaugeFunc, CounterFunc, or UntypedFunc might be interesting
+// shortcuts.
+//
+// # Advanced Uses of the Registry
+//
+// While MustRegister is the by far most common way of registering a Collector,
+// sometimes you might want to handle the errors the registration might cause.
+// As suggested by the name, MustRegister panics if an error occurs. With the
+// Register function, the error is returned and can be handled.
+//
+// An error is returned if the registered Collector is incompatible or
+// inconsistent with already registered metrics. The registry aims for
+// consistency of the collected metrics according to the Prometheus data model.
+// Inconsistencies are ideally detected at registration time, not at collect
+// time. The former will usually be detected at start-up time of a program,
+// while the latter will only happen at scrape time, possibly not even on the
+// first scrape if the inconsistency only becomes relevant later. That is the
+// main reason why a Collector and a Metric have to describe themselves to the
+// registry.
+//
+// So far, everything we did operated on the so-called default registry, as it
+// can be found in the global DefaultRegisterer variable. With NewRegistry, you
+// can create a custom registry, or you can even implement the Registerer or
+// Gatherer interfaces yourself. The methods Register and Unregister work in the
+// same way on a custom registry as the global functions Register and Unregister
+// on the default registry.
+//
+// There are a number of uses for custom registries: You can use registries with
+// special properties, see NewPedanticRegistry. You can avoid global state, as
+// it is imposed by the DefaultRegisterer. You can use multiple registries at
+// the same time to expose different metrics in different ways. You can use
+// separate registries for testing purposes.
+//
+// Also note that the DefaultRegisterer comes registered with a Collector for Go
+// runtime metrics (via NewGoCollector) and a Collector for process metrics (via
+// NewProcessCollector). With a custom registry, you are in control and decide
+// yourself about the Collectors to register.
+//
+// # HTTP Exposition
+//
+// The Registry implements the Gatherer interface. The caller of the Gather
+// method can then expose the gathered metrics in some way. Usually, the metrics
+// are served via HTTP on the /metrics endpoint. That's happening in the example
+// above. The tools to expose metrics via HTTP are in the promhttp sub-package.
+//
+// # Pushing to the Pushgateway
+//
+// Function for pushing to the Pushgateway can be found in the push sub-package.
+//
+// # Graphite Bridge
+//
+// Functions and examples to push metrics from a Gatherer to Graphite can be
+// found in the graphite sub-package.
+//
+// # Other Means of Exposition
+//
+// More ways of exposing metrics can easily be added by following the approaches
+// of the existing implementations.
+package prometheus
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go
new file mode 100644
index 0000000..de5a856
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go
@@ -0,0 +1,86 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "encoding/json"
+ "expvar"
+)
+
+type expvarCollector struct {
+ exports map[string]*Desc
+}
+
+// NewExpvarCollector is the obsolete version of collectors.NewExpvarCollector.
+// See there for documentation.
+//
+// Deprecated: Use collectors.NewExpvarCollector instead.
+func NewExpvarCollector(exports map[string]*Desc) Collector {
+ return &expvarCollector{
+ exports: exports,
+ }
+}
+
+// Describe implements Collector.
+func (e *expvarCollector) Describe(ch chan<- *Desc) {
+ for _, desc := range e.exports {
+ ch <- desc
+ }
+}
+
+// Collect implements Collector.
+func (e *expvarCollector) Collect(ch chan<- Metric) {
+ for name, desc := range e.exports {
+ var m Metric
+ expVar := expvar.Get(name)
+ if expVar == nil {
+ continue
+ }
+ var v interface{}
+ labels := make([]string, len(desc.variableLabels.names))
+ if err := json.Unmarshal([]byte(expVar.String()), &v); err != nil {
+ ch <- NewInvalidMetric(desc, err)
+ continue
+ }
+ var processValue func(v interface{}, i int)
+ processValue = func(v interface{}, i int) {
+ if i >= len(labels) {
+ copiedLabels := append(make([]string, 0, len(labels)), labels...)
+ switch v := v.(type) {
+ case float64:
+ m = MustNewConstMetric(desc, UntypedValue, v, copiedLabels...)
+ case bool:
+ if v {
+ m = MustNewConstMetric(desc, UntypedValue, 1, copiedLabels...)
+ } else {
+ m = MustNewConstMetric(desc, UntypedValue, 0, copiedLabels...)
+ }
+ default:
+ return
+ }
+ ch <- m
+ return
+ }
+ vm, ok := v.(map[string]interface{})
+ if !ok {
+ return
+ }
+ for lv, val := range vm {
+ labels[i] = lv
+ processValue(val, i+1)
+ }
+ }
+ processValue(v, 0)
+ }
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/fnv.go b/vendor/github.com/prometheus/client_golang/prometheus/fnv.go
new file mode 100644
index 0000000..3d383a7
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/fnv.go
@@ -0,0 +1,42 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+// Inline and byte-free variant of hash/fnv's fnv64a.
+
+const (
+ offset64 = 14695981039346656037
+ prime64 = 1099511628211
+)
+
+// hashNew initializies a new fnv64a hash value.
+func hashNew() uint64 {
+ return offset64
+}
+
+// hashAdd adds a string to a fnv64a hash value, returning the updated hash.
+func hashAdd(h uint64, s string) uint64 {
+ for i := 0; i < len(s); i++ {
+ h ^= uint64(s[i])
+ h *= prime64
+ }
+ return h
+}
+
+// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash.
+func hashAddByte(h uint64, b byte) uint64 {
+ h ^= uint64(b)
+ h *= prime64
+ return h
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go
new file mode 100644
index 0000000..dd2eac9
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go
@@ -0,0 +1,311 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "math"
+ "sync/atomic"
+ "time"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+// Gauge is a Metric that represents a single numerical value that can
+// arbitrarily go up and down.
+//
+// A Gauge is typically used for measured values like temperatures or current
+// memory usage, but also "counts" that can go up and down, like the number of
+// running goroutines.
+//
+// To create Gauge instances, use NewGauge.
+type Gauge interface {
+ Metric
+ Collector
+
+ // Set sets the Gauge to an arbitrary value.
+ Set(float64)
+ // Inc increments the Gauge by 1. Use Add to increment it by arbitrary
+ // values.
+ Inc()
+ // Dec decrements the Gauge by 1. Use Sub to decrement it by arbitrary
+ // values.
+ Dec()
+ // Add adds the given value to the Gauge. (The value can be negative,
+ // resulting in a decrease of the Gauge.)
+ Add(float64)
+ // Sub subtracts the given value from the Gauge. (The value can be
+ // negative, resulting in an increase of the Gauge.)
+ Sub(float64)
+
+ // SetToCurrentTime sets the Gauge to the current Unix time in seconds.
+ SetToCurrentTime()
+}
+
+// GaugeOpts is an alias for Opts. See there for doc comments.
+type GaugeOpts Opts
+
+// GaugeVecOpts bundles the options to create a GaugeVec metric.
+// It is mandatory to set GaugeOpts, see there for mandatory fields. VariableLabels
+// is optional and can safely be left to its default value.
+type GaugeVecOpts struct {
+ GaugeOpts
+
+ // VariableLabels are used to partition the metric vector by the given set
+ // of labels. Each label value will be constrained with the optional Constraint
+ // function, if provided.
+ VariableLabels ConstrainableLabels
+}
+
+// NewGauge creates a new Gauge based on the provided GaugeOpts.
+//
+// The returned implementation is optimized for a fast Set method. If you have a
+// choice for managing the value of a Gauge via Set vs. Inc/Dec/Add/Sub, pick
+// the former. For example, the Inc method of the returned Gauge is slower than
+// the Inc method of a Counter returned by NewCounter. This matches the typical
+// scenarios for Gauges and Counters, where the former tends to be Set-heavy and
+// the latter Inc-heavy.
+func NewGauge(opts GaugeOpts) Gauge {
+ desc := NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ )
+ result := &gauge{desc: desc, labelPairs: desc.constLabelPairs}
+ result.init(result) // Init self-collection.
+ return result
+}
+
+type gauge struct {
+ // valBits contains the bits of the represented float64 value. It has
+ // to go first in the struct to guarantee alignment for atomic
+ // operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG
+ valBits uint64
+
+ selfCollector
+
+ desc *Desc
+ labelPairs []*dto.LabelPair
+}
+
+func (g *gauge) Desc() *Desc {
+ return g.desc
+}
+
+func (g *gauge) Set(val float64) {
+ atomic.StoreUint64(&g.valBits, math.Float64bits(val))
+}
+
+func (g *gauge) SetToCurrentTime() {
+ g.Set(float64(time.Now().UnixNano()) / 1e9)
+}
+
+func (g *gauge) Inc() {
+ g.Add(1)
+}
+
+func (g *gauge) Dec() {
+ g.Add(-1)
+}
+
+func (g *gauge) Add(val float64) {
+ for {
+ oldBits := atomic.LoadUint64(&g.valBits)
+ newBits := math.Float64bits(math.Float64frombits(oldBits) + val)
+ if atomic.CompareAndSwapUint64(&g.valBits, oldBits, newBits) {
+ return
+ }
+ }
+}
+
+func (g *gauge) Sub(val float64) {
+ g.Add(val * -1)
+}
+
+func (g *gauge) Write(out *dto.Metric) error {
+ val := math.Float64frombits(atomic.LoadUint64(&g.valBits))
+ return populateMetric(GaugeValue, val, g.labelPairs, nil, out, nil)
+}
+
+// GaugeVec is a Collector that bundles a set of Gauges that all share the same
+// Desc, but have different values for their variable labels. This is used if
+// you want to count the same thing partitioned by various dimensions
+// (e.g. number of operations queued, partitioned by user and operation
+// type). Create instances with NewGaugeVec.
+type GaugeVec struct {
+ *MetricVec
+}
+
+// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and
+// partitioned by the given label names.
+func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
+ return V2.NewGaugeVec(GaugeVecOpts{
+ GaugeOpts: opts,
+ VariableLabels: UnconstrainedLabels(labelNames),
+ })
+}
+
+// NewGaugeVec creates a new GaugeVec based on the provided GaugeVecOpts.
+func (v2) NewGaugeVec(opts GaugeVecOpts) *GaugeVec {
+ desc := V2.NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ opts.VariableLabels,
+ opts.ConstLabels,
+ )
+ return &GaugeVec{
+ MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
+ if len(lvs) != len(desc.variableLabels.names) {
+ panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, lvs))
+ }
+ result := &gauge{desc: desc, labelPairs: MakeLabelPairs(desc, lvs)}
+ result.init(result) // Init self-collection.
+ return result
+ }),
+ }
+}
+
+// GetMetricWithLabelValues returns the Gauge for the given slice of label
+// values (same order as the variable labels in Desc). If that combination of
+// label values is accessed for the first time, a new Gauge is created.
+//
+// It is possible to call this method without using the returned Gauge to only
+// create the new Gauge but leave it at its starting value 0. See also the
+// SummaryVec example.
+//
+// Keeping the Gauge for later use is possible (and should be considered if
+// performance is critical), but keep in mind that Reset, DeleteLabelValues and
+// Delete can be used to delete the Gauge from the GaugeVec. In that case, the
+// Gauge will still exist, but it will not be exported anymore, even if a
+// Gauge with the same label values is created later. See also the CounterVec
+// example.
+//
+// An error is returned if the number of label values is not the same as the
+// number of variable labels in Desc (minus any curried labels).
+//
+// Note that for more than one label value, this method is prone to mistakes
+// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
+// an alternative to avoid that type of mistake. For higher label numbers, the
+// latter has a much more readable (albeit more verbose) syntax, but it comes
+// with a performance overhead (for creating and processing the Labels map).
+func (v *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) {
+ metric, err := v.MetricVec.GetMetricWithLabelValues(lvs...)
+ if metric != nil {
+ return metric.(Gauge), err
+ }
+ return nil, err
+}
+
+// GetMetricWith returns the Gauge for the given Labels map (the label names
+// must match those of the variable labels in Desc). If that label map is
+// accessed for the first time, a new Gauge is created. Implications of
+// creating a Gauge without using it and keeping the Gauge for later use are
+// the same as for GetMetricWithLabelValues.
+//
+// An error is returned if the number and names of the Labels are inconsistent
+// with those of the variable labels in Desc (minus any curried labels).
+//
+// This method is used for the same purpose as
+// GetMetricWithLabelValues(...string). See there for pros and cons of the two
+// methods.
+func (v *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) {
+ metric, err := v.MetricVec.GetMetricWith(labels)
+ if metric != nil {
+ return metric.(Gauge), err
+ }
+ return nil, err
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics where
+// GetMetricWithLabelValues would have returned an error. Not returning an
+// error allows shortcuts like
+//
+// myVec.WithLabelValues("404", "GET").Add(42)
+func (v *GaugeVec) WithLabelValues(lvs ...string) Gauge {
+ g, err := v.GetMetricWithLabelValues(lvs...)
+ if err != nil {
+ panic(err)
+ }
+ return g
+}
+
+// With works as GetMetricWith, but panics where GetMetricWithLabels would have
+// returned an error. Not returning an error allows shortcuts like
+//
+// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42)
+func (v *GaugeVec) With(labels Labels) Gauge {
+ g, err := v.GetMetricWith(labels)
+ if err != nil {
+ panic(err)
+ }
+ return g
+}
+
+// CurryWith returns a vector curried with the provided labels, i.e. the
+// returned vector has those labels pre-set for all labeled operations performed
+// on it. The cardinality of the curried vector is reduced accordingly. The
+// order of the remaining labels stays the same (just with the curried labels
+// taken out of the sequence – which is relevant for the
+// (GetMetric)WithLabelValues methods). It is possible to curry a curried
+// vector, but only with labels not yet used for currying before.
+//
+// The metrics contained in the GaugeVec are shared between the curried and
+// uncurried vectors. They are just accessed differently. Curried and uncurried
+// vectors behave identically in terms of collection. Only one must be
+// registered with a given registry (usually the uncurried version). The Reset
+// method deletes all metrics, even if called on a curried vector.
+func (v *GaugeVec) CurryWith(labels Labels) (*GaugeVec, error) {
+ vec, err := v.MetricVec.CurryWith(labels)
+ if vec != nil {
+ return &GaugeVec{vec}, err
+ }
+ return nil, err
+}
+
+// MustCurryWith works as CurryWith but panics where CurryWith would have
+// returned an error.
+func (v *GaugeVec) MustCurryWith(labels Labels) *GaugeVec {
+ vec, err := v.CurryWith(labels)
+ if err != nil {
+ panic(err)
+ }
+ return vec
+}
+
+// GaugeFunc is a Gauge whose value is determined at collect time by calling a
+// provided function.
+//
+// To create GaugeFunc instances, use NewGaugeFunc.
+type GaugeFunc interface {
+ Metric
+ Collector
+}
+
+// NewGaugeFunc creates a new GaugeFunc based on the provided GaugeOpts. The
+// value reported is determined by calling the given function from within the
+// Write method. Take into account that metric collection may happen
+// concurrently. Therefore, it must be safe to call the provided function
+// concurrently.
+//
+// NewGaugeFunc is a good way to create an “info” style metric with a constant
+// value of 1. Example:
+// https://github.com/prometheus/common/blob/8558a5b7db3c84fa38b4766966059a7bd5bfa2ee/version/info.go#L36-L56
+func NewGaugeFunc(opts GaugeOpts, function func() float64) GaugeFunc {
+ return newValueFunc(NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ), GaugeValue, function)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/get_pid.go b/vendor/github.com/prometheus/client_golang/prometheus/get_pid.go
new file mode 100644
index 0000000..614fd61
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/get_pid.go
@@ -0,0 +1,26 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !js || wasm
+// +build !js wasm
+
+package prometheus
+
+import "os"
+
+func getPIDFn() func() (int, error) {
+ pid := os.Getpid()
+ return func() (int, error) {
+ return pid, nil
+ }
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/get_pid_gopherjs.go b/vendor/github.com/prometheus/client_golang/prometheus/get_pid_gopherjs.go
new file mode 100644
index 0000000..eaf8059
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/get_pid_gopherjs.go
@@ -0,0 +1,23 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build js && !wasm
+// +build js,!wasm
+
+package prometheus
+
+func getPIDFn() func() (int, error) {
+ return func() (int, error) {
+ return 1, nil
+ }
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
new file mode 100644
index 0000000..520cbd7
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
@@ -0,0 +1,274 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "runtime"
+ "runtime/debug"
+ "time"
+)
+
+// goRuntimeMemStats provides the metrics initially provided by runtime.ReadMemStats.
+// From Go 1.17 those similar (and better) statistics are provided by runtime/metrics, so
+// while eval closure works on runtime.MemStats, the struct from Go 1.17+ is
+// populated using runtime/metrics. Those are the defaults we can't alter.
+func goRuntimeMemStats() memStatsMetrics {
+ return memStatsMetrics{
+ {
+ desc: NewDesc(
+ memstatNamespace("alloc_bytes"),
+ "Number of bytes allocated in heap and currently in use. Equals to /memory/classes/heap/objects:bytes.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("alloc_bytes_total"),
+ "Total number of bytes allocated in heap until now, even if released already. Equals to /gc/heap/allocs:bytes.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) },
+ valType: CounterValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("sys_bytes"),
+ "Number of bytes obtained from system. Equals to /memory/classes/total:byte.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("mallocs_total"),
+ // TODO(bwplotka): We could add go_memstats_heap_objects, probably useful for discovery. Let's gather more feedback, kind of a waste of bytes for everybody for compatibility reasons to keep both, and we can't really rename/remove useful metric.
+ "Total number of heap objects allocated, both live and gc-ed. Semantically a counter version for go_memstats_heap_objects gauge. Equals to /gc/heap/allocs:objects + /gc/heap/tiny/allocs:objects.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) },
+ valType: CounterValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("frees_total"),
+ "Total number of heap objects frees. Equals to /gc/heap/frees:objects + /gc/heap/tiny/allocs:objects.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.Frees) },
+ valType: CounterValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_alloc_bytes"),
+ "Number of heap bytes allocated and currently in use, same as go_memstats_alloc_bytes. Equals to /memory/classes/heap/objects:bytes.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_sys_bytes"),
+ "Number of heap bytes obtained from system. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes + /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_idle_bytes"),
+ "Number of heap bytes waiting to be used. Equals to /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_inuse_bytes"),
+ "Number of heap bytes that are in use. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_released_bytes"),
+ "Number of heap bytes released to OS. Equals to /memory/classes/heap/released:bytes.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_objects"),
+ "Number of currently allocated objects. Equals to /gc/heap/objects:objects.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("stack_inuse_bytes"),
+ "Number of bytes obtained from system for stack allocator in non-CGO environments. Equals to /memory/classes/heap/stacks:bytes.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("stack_sys_bytes"),
+ "Number of bytes obtained from system for stack allocator. Equals to /memory/classes/heap/stacks:bytes + /memory/classes/os-stacks:bytes.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("mspan_inuse_bytes"),
+ "Number of bytes in use by mspan structures. Equals to /memory/classes/metadata/mspan/inuse:bytes.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("mspan_sys_bytes"),
+ "Number of bytes used for mspan structures obtained from system. Equals to /memory/classes/metadata/mspan/inuse:bytes + /memory/classes/metadata/mspan/free:bytes.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("mcache_inuse_bytes"),
+ "Number of bytes in use by mcache structures. Equals to /memory/classes/metadata/mcache/inuse:bytes.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("mcache_sys_bytes"),
+ "Number of bytes used for mcache structures obtained from system. Equals to /memory/classes/metadata/mcache/inuse:bytes + /memory/classes/metadata/mcache/free:bytes.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("buck_hash_sys_bytes"),
+ "Number of bytes used by the profiling bucket hash table. Equals to /memory/classes/profiling/buckets:bytes.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("gc_sys_bytes"),
+ "Number of bytes used for garbage collection system metadata. Equals to /memory/classes/metadata/other:bytes.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("other_sys_bytes"),
+ "Number of bytes used for other system allocations. Equals to /memory/classes/other:bytes.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("next_gc_bytes"),
+ "Number of heap bytes when next garbage collection will take place. Equals to /gc/heap/goal:bytes.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) },
+ valType: GaugeValue,
+ },
+ }
+}
+
+type baseGoCollector struct {
+ goroutinesDesc *Desc
+ threadsDesc *Desc
+ gcDesc *Desc
+ gcLastTimeDesc *Desc
+ goInfoDesc *Desc
+}
+
+func newBaseGoCollector() baseGoCollector {
+ return baseGoCollector{
+ goroutinesDesc: NewDesc(
+ "go_goroutines",
+ "Number of goroutines that currently exist.",
+ nil, nil),
+ threadsDesc: NewDesc(
+ "go_threads",
+ "Number of OS threads created.",
+ nil, nil),
+ gcDesc: NewDesc(
+ "go_gc_duration_seconds",
+ "A summary of the wall-time pause (stop-the-world) duration in garbage collection cycles.",
+ nil, nil),
+ gcLastTimeDesc: NewDesc(
+ "go_memstats_last_gc_time_seconds",
+ "Number of seconds since 1970 of last garbage collection.",
+ nil, nil),
+ goInfoDesc: NewDesc(
+ "go_info",
+ "Information about the Go environment.",
+ nil, Labels{"version": runtime.Version()}),
+ }
+}
+
+// Describe returns all descriptions of the collector.
+func (c *baseGoCollector) Describe(ch chan<- *Desc) {
+ ch <- c.goroutinesDesc
+ ch <- c.threadsDesc
+ ch <- c.gcDesc
+ ch <- c.gcLastTimeDesc
+ ch <- c.goInfoDesc
+}
+
+// Collect returns the current state of all metrics of the collector.
+func (c *baseGoCollector) Collect(ch chan<- Metric) {
+ ch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine()))
+
+ n := getRuntimeNumThreads()
+ ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, n)
+
+ var stats debug.GCStats
+ stats.PauseQuantiles = make([]time.Duration, 5)
+ debug.ReadGCStats(&stats)
+
+ quantiles := make(map[float64]float64)
+ for idx, pq := range stats.PauseQuantiles[1:] {
+ quantiles[float64(idx+1)/float64(len(stats.PauseQuantiles)-1)] = pq.Seconds()
+ }
+ quantiles[0.0] = stats.PauseQuantiles[0].Seconds()
+ ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), stats.PauseTotal.Seconds(), quantiles)
+ ch <- MustNewConstMetric(c.gcLastTimeDesc, GaugeValue, float64(stats.LastGC.UnixNano())/1e9)
+ ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1)
+}
+
+func memstatNamespace(s string) string {
+ return "go_memstats_" + s
+}
+
+// memStatsMetrics provide description, evaluator, runtime/metrics name, and
+// value type for memstat metrics.
+type memStatsMetrics []struct {
+ desc *Desc
+ eval func(*runtime.MemStats) float64
+ valType ValueType
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go116.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go116.go
new file mode 100644
index 0000000..897a6e9
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go116.go
@@ -0,0 +1,122 @@
+// Copyright 2021 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !go1.17
+// +build !go1.17
+
+package prometheus
+
+import (
+ "runtime"
+ "sync"
+ "time"
+)
+
+type goCollector struct {
+ base baseGoCollector
+
+ // ms... are memstats related.
+ msLast *runtime.MemStats // Previously collected memstats.
+ msLastTimestamp time.Time
+ msMtx sync.Mutex // Protects msLast and msLastTimestamp.
+ msMetrics memStatsMetrics
+ msRead func(*runtime.MemStats) // For mocking in tests.
+ msMaxWait time.Duration // Wait time for fresh memstats.
+ msMaxAge time.Duration // Maximum allowed age of old memstats.
+}
+
+// NewGoCollector is the obsolete version of collectors.NewGoCollector.
+// See there for documentation.
+//
+// Deprecated: Use collectors.NewGoCollector instead.
+func NewGoCollector() Collector {
+ msMetrics := goRuntimeMemStats()
+ msMetrics = append(msMetrics, struct {
+ desc *Desc
+ eval func(*runtime.MemStats) float64
+ valType ValueType
+ }{
+ // This metric is omitted in Go1.17+, see https://github.com/prometheus/client_golang/issues/842#issuecomment-861812034
+ desc: NewDesc(
+ memstatNamespace("gc_cpu_fraction"),
+ "The fraction of this program's available CPU time used by the GC since the program started.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction },
+ valType: GaugeValue,
+ })
+ return &goCollector{
+ base: newBaseGoCollector(),
+ msLast: &runtime.MemStats{},
+ msRead: runtime.ReadMemStats,
+ msMaxWait: time.Second,
+ msMaxAge: 5 * time.Minute,
+ msMetrics: msMetrics,
+ }
+}
+
+// Describe returns all descriptions of the collector.
+func (c *goCollector) Describe(ch chan<- *Desc) {
+ c.base.Describe(ch)
+ for _, i := range c.msMetrics {
+ ch <- i.desc
+ }
+}
+
+// Collect returns the current state of all metrics of the collector.
+func (c *goCollector) Collect(ch chan<- Metric) {
+ var (
+ ms = &runtime.MemStats{}
+ done = make(chan struct{})
+ )
+ // Start reading memstats first as it might take a while.
+ go func() {
+ c.msRead(ms)
+ c.msMtx.Lock()
+ c.msLast = ms
+ c.msLastTimestamp = time.Now()
+ c.msMtx.Unlock()
+ close(done)
+ }()
+
+ // Collect base non-memory metrics.
+ c.base.Collect(ch)
+
+ timer := time.NewTimer(c.msMaxWait)
+ select {
+ case <-done: // Our own ReadMemStats succeeded in time. Use it.
+ timer.Stop() // Important for high collection frequencies to not pile up timers.
+ c.msCollect(ch, ms)
+ return
+ case <-timer.C: // Time out, use last memstats if possible. Continue below.
+ }
+ c.msMtx.Lock()
+ if time.Since(c.msLastTimestamp) < c.msMaxAge {
+ // Last memstats are recent enough. Collect from them under the lock.
+ c.msCollect(ch, c.msLast)
+ c.msMtx.Unlock()
+ return
+ }
+ // If we are here, the last memstats are too old or don't exist. We have
+ // to wait until our own ReadMemStats finally completes. For that to
+ // happen, we have to release the lock.
+ c.msMtx.Unlock()
+ <-done
+ c.msCollect(ch, ms)
+}
+
+func (c *goCollector) msCollect(ch chan<- Metric, ms *runtime.MemStats) {
+ for _, i := range c.msMetrics {
+ ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms))
+ }
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go
new file mode 100644
index 0000000..5117464
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go
@@ -0,0 +1,574 @@
+// Copyright 2021 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build go1.17
+// +build go1.17
+
+package prometheus
+
+import (
+ "fmt"
+ "math"
+ "runtime"
+ "runtime/metrics"
+ "strings"
+ "sync"
+
+ "github.com/prometheus/client_golang/prometheus/internal"
+
+ dto "github.com/prometheus/client_model/go"
+ "google.golang.org/protobuf/proto"
+)
+
+const (
+ // constants for strings referenced more than once.
+ goGCHeapTinyAllocsObjects = "/gc/heap/tiny/allocs:objects"
+ goGCHeapAllocsObjects = "/gc/heap/allocs:objects"
+ goGCHeapFreesObjects = "/gc/heap/frees:objects"
+ goGCHeapFreesBytes = "/gc/heap/frees:bytes"
+ goGCHeapAllocsBytes = "/gc/heap/allocs:bytes"
+ goGCHeapObjects = "/gc/heap/objects:objects"
+ goGCHeapGoalBytes = "/gc/heap/goal:bytes"
+ goMemoryClassesTotalBytes = "/memory/classes/total:bytes"
+ goMemoryClassesHeapObjectsBytes = "/memory/classes/heap/objects:bytes"
+ goMemoryClassesHeapUnusedBytes = "/memory/classes/heap/unused:bytes"
+ goMemoryClassesHeapReleasedBytes = "/memory/classes/heap/released:bytes"
+ goMemoryClassesHeapFreeBytes = "/memory/classes/heap/free:bytes"
+ goMemoryClassesHeapStacksBytes = "/memory/classes/heap/stacks:bytes"
+ goMemoryClassesOSStacksBytes = "/memory/classes/os-stacks:bytes"
+ goMemoryClassesMetadataMSpanInuseBytes = "/memory/classes/metadata/mspan/inuse:bytes"
+ goMemoryClassesMetadataMSPanFreeBytes = "/memory/classes/metadata/mspan/free:bytes"
+ goMemoryClassesMetadataMCacheInuseBytes = "/memory/classes/metadata/mcache/inuse:bytes"
+ goMemoryClassesMetadataMCacheFreeBytes = "/memory/classes/metadata/mcache/free:bytes"
+ goMemoryClassesProfilingBucketsBytes = "/memory/classes/profiling/buckets:bytes"
+ goMemoryClassesMetadataOtherBytes = "/memory/classes/metadata/other:bytes"
+ goMemoryClassesOtherBytes = "/memory/classes/other:bytes"
+)
+
+// rmNamesForMemStatsMetrics represents runtime/metrics names required to populate goRuntimeMemStats from like logic.
+var rmNamesForMemStatsMetrics = []string{
+ goGCHeapTinyAllocsObjects,
+ goGCHeapAllocsObjects,
+ goGCHeapFreesObjects,
+ goGCHeapAllocsBytes,
+ goGCHeapObjects,
+ goGCHeapGoalBytes,
+ goMemoryClassesTotalBytes,
+ goMemoryClassesHeapObjectsBytes,
+ goMemoryClassesHeapUnusedBytes,
+ goMemoryClassesHeapReleasedBytes,
+ goMemoryClassesHeapFreeBytes,
+ goMemoryClassesHeapStacksBytes,
+ goMemoryClassesOSStacksBytes,
+ goMemoryClassesMetadataMSpanInuseBytes,
+ goMemoryClassesMetadataMSPanFreeBytes,
+ goMemoryClassesMetadataMCacheInuseBytes,
+ goMemoryClassesMetadataMCacheFreeBytes,
+ goMemoryClassesProfilingBucketsBytes,
+ goMemoryClassesMetadataOtherBytes,
+ goMemoryClassesOtherBytes,
+}
+
+func bestEffortLookupRM(lookup []string) []metrics.Description {
+ ret := make([]metrics.Description, 0, len(lookup))
+ for _, rm := range metrics.All() {
+ for _, m := range lookup {
+ if m == rm.Name {
+ ret = append(ret, rm)
+ }
+ }
+ }
+ return ret
+}
+
+type goCollector struct {
+ base baseGoCollector
+
+ // mu protects updates to all fields ensuring a consistent
+ // snapshot is always produced by Collect.
+ mu sync.Mutex
+
+ // Contains all samples that has to retrieved from runtime/metrics (not all of them will be exposed).
+ sampleBuf []metrics.Sample
+ // sampleMap allows lookup for MemStats metrics and runtime/metrics histograms for exact sums.
+ sampleMap map[string]*metrics.Sample
+
+ // rmExposedMetrics represents all runtime/metrics package metrics
+ // that were configured to be exposed.
+ rmExposedMetrics []collectorMetric
+ rmExactSumMapForHist map[string]string
+
+ // With Go 1.17, the runtime/metrics package was introduced.
+ // From that point on, metric names produced by the runtime/metrics
+ // package could be generated from runtime/metrics names. However,
+ // these differ from the old names for the same values.
+ //
+ // This field exists to export the same values under the old names
+ // as well.
+ msMetrics memStatsMetrics
+ msMetricsEnabled bool
+}
+
+type rmMetricDesc struct {
+ metrics.Description
+}
+
+func matchRuntimeMetricsRules(rules []internal.GoCollectorRule) []rmMetricDesc {
+ var descs []rmMetricDesc
+ for _, d := range metrics.All() {
+ var (
+ deny = true
+ desc rmMetricDesc
+ )
+
+ for _, r := range rules {
+ if !r.Matcher.MatchString(d.Name) {
+ continue
+ }
+ deny = r.Deny
+ }
+ if deny {
+ continue
+ }
+
+ desc.Description = d
+ descs = append(descs, desc)
+ }
+ return descs
+}
+
+func defaultGoCollectorOptions() internal.GoCollectorOptions {
+ return internal.GoCollectorOptions{
+ RuntimeMetricSumForHist: map[string]string{
+ "/gc/heap/allocs-by-size:bytes": goGCHeapAllocsBytes,
+ "/gc/heap/frees-by-size:bytes": goGCHeapFreesBytes,
+ },
+ RuntimeMetricRules: []internal.GoCollectorRule{
+ // Recommended metrics we want by default from runtime/metrics.
+ {Matcher: internal.GoCollectorDefaultRuntimeMetrics},
+ },
+ }
+}
+
+// NewGoCollector is the obsolete version of collectors.NewGoCollector.
+// See there for documentation.
+//
+// Deprecated: Use collectors.NewGoCollector instead.
+func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector {
+ opt := defaultGoCollectorOptions()
+ for _, o := range opts {
+ o(&opt)
+ }
+
+ exposedDescriptions := matchRuntimeMetricsRules(opt.RuntimeMetricRules)
+
+ // Collect all histogram samples so that we can get their buckets.
+ // The API guarantees that the buckets are always fixed for the lifetime
+ // of the process.
+ var histograms []metrics.Sample
+ for _, d := range exposedDescriptions {
+ if d.Kind == metrics.KindFloat64Histogram {
+ histograms = append(histograms, metrics.Sample{Name: d.Name})
+ }
+ }
+
+ if len(histograms) > 0 {
+ metrics.Read(histograms)
+ }
+
+ bucketsMap := make(map[string][]float64)
+ for i := range histograms {
+ bucketsMap[histograms[i].Name] = histograms[i].Value.Float64Histogram().Buckets
+ }
+
+ // Generate a collector for each exposed runtime/metrics metric.
+ metricSet := make([]collectorMetric, 0, len(exposedDescriptions))
+ // SampleBuf is used for reading from runtime/metrics.
+ // We are assuming the largest case to have stable pointers for sampleMap purposes.
+ sampleBuf := make([]metrics.Sample, 0, len(exposedDescriptions)+len(opt.RuntimeMetricSumForHist)+len(rmNamesForMemStatsMetrics))
+ sampleMap := make(map[string]*metrics.Sample, len(exposedDescriptions))
+ for _, d := range exposedDescriptions {
+ namespace, subsystem, name, ok := internal.RuntimeMetricsToProm(&d.Description)
+ if !ok {
+ // Just ignore this metric; we can't do anything with it here.
+ // If a user decides to use the latest version of Go, we don't want
+ // to fail here. This condition is tested in TestExpectedRuntimeMetrics.
+ continue
+ }
+ help := attachOriginalName(d.Description.Description, d.Name)
+
+ sampleBuf = append(sampleBuf, metrics.Sample{Name: d.Name})
+ sampleMap[d.Name] = &sampleBuf[len(sampleBuf)-1]
+
+ var m collectorMetric
+ if d.Kind == metrics.KindFloat64Histogram {
+ _, hasSum := opt.RuntimeMetricSumForHist[d.Name]
+ unit := d.Name[strings.IndexRune(d.Name, ':')+1:]
+ m = newBatchHistogram(
+ NewDesc(
+ BuildFQName(namespace, subsystem, name),
+ help,
+ nil,
+ nil,
+ ),
+ internal.RuntimeMetricsBucketsForUnit(bucketsMap[d.Name], unit),
+ hasSum,
+ )
+ } else if d.Cumulative {
+ m = NewCounter(CounterOpts{
+ Namespace: namespace,
+ Subsystem: subsystem,
+ Name: name,
+ Help: help,
+ },
+ )
+ } else {
+ m = NewGauge(GaugeOpts{
+ Namespace: namespace,
+ Subsystem: subsystem,
+ Name: name,
+ Help: help,
+ })
+ }
+ metricSet = append(metricSet, m)
+ }
+
+ // Add exact sum metrics to sampleBuf if not added before.
+ for _, h := range histograms {
+ sumMetric, ok := opt.RuntimeMetricSumForHist[h.Name]
+ if !ok {
+ continue
+ }
+
+ if _, ok := sampleMap[sumMetric]; ok {
+ continue
+ }
+ sampleBuf = append(sampleBuf, metrics.Sample{Name: sumMetric})
+ sampleMap[sumMetric] = &sampleBuf[len(sampleBuf)-1]
+ }
+
+ var (
+ msMetrics memStatsMetrics
+ msDescriptions []metrics.Description
+ )
+
+ if !opt.DisableMemStatsLikeMetrics {
+ msMetrics = goRuntimeMemStats()
+ msDescriptions = bestEffortLookupRM(rmNamesForMemStatsMetrics)
+
+ // Check if metric was not exposed before and if not, add to sampleBuf.
+ for _, mdDesc := range msDescriptions {
+ if _, ok := sampleMap[mdDesc.Name]; ok {
+ continue
+ }
+ sampleBuf = append(sampleBuf, metrics.Sample{Name: mdDesc.Name})
+ sampleMap[mdDesc.Name] = &sampleBuf[len(sampleBuf)-1]
+ }
+ }
+
+ return &goCollector{
+ base: newBaseGoCollector(),
+ sampleBuf: sampleBuf,
+ sampleMap: sampleMap,
+ rmExposedMetrics: metricSet,
+ rmExactSumMapForHist: opt.RuntimeMetricSumForHist,
+ msMetrics: msMetrics,
+ msMetricsEnabled: !opt.DisableMemStatsLikeMetrics,
+ }
+}
+
+func attachOriginalName(desc, origName string) string {
+ return fmt.Sprintf("%s Sourced from %s", desc, origName)
+}
+
+// Describe returns all descriptions of the collector.
+func (c *goCollector) Describe(ch chan<- *Desc) {
+ c.base.Describe(ch)
+ for _, i := range c.msMetrics {
+ ch <- i.desc
+ }
+ for _, m := range c.rmExposedMetrics {
+ ch <- m.Desc()
+ }
+}
+
+// Collect returns the current state of all metrics of the collector.
+func (c *goCollector) Collect(ch chan<- Metric) {
+ // Collect base non-memory metrics.
+ c.base.Collect(ch)
+
+ if len(c.sampleBuf) == 0 {
+ return
+ }
+
+ // Collect must be thread-safe, so prevent concurrent use of
+ // sampleBuf elements. Just read into sampleBuf but write all the data
+ // we get into our Metrics or MemStats.
+ //
+ // This lock also ensures that the Metrics we send out are all from
+ // the same updates, ensuring their mutual consistency insofar as
+ // is guaranteed by the runtime/metrics package.
+ //
+ // N.B. This locking is heavy-handed, but Collect is expected to be called
+ // relatively infrequently. Also the core operation here, metrics.Read,
+ // is fast (O(tens of microseconds)) so contention should certainly be
+ // low, though channel operations and any allocations may add to that.
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ // Populate runtime/metrics sample buffer.
+ metrics.Read(c.sampleBuf)
+
+ // Collect all our runtime/metrics user chose to expose from sampleBuf (if any).
+ for i, metric := range c.rmExposedMetrics {
+ // We created samples for exposed metrics first in order, so indexes match.
+ sample := c.sampleBuf[i]
+
+ // N.B. switch on concrete type because it's significantly more efficient
+ // than checking for the Counter and Gauge interface implementations. In
+ // this case, we control all the types here.
+ switch m := metric.(type) {
+ case *counter:
+ // Guard against decreases. This should never happen, but a failure
+ // to do so will result in a panic, which is a harsh consequence for
+ // a metrics collection bug.
+ v0, v1 := m.get(), unwrapScalarRMValue(sample.Value)
+ if v1 > v0 {
+ m.Add(unwrapScalarRMValue(sample.Value) - m.get())
+ }
+ m.Collect(ch)
+ case *gauge:
+ m.Set(unwrapScalarRMValue(sample.Value))
+ m.Collect(ch)
+ case *batchHistogram:
+ m.update(sample.Value.Float64Histogram(), c.exactSumFor(sample.Name))
+ m.Collect(ch)
+ default:
+ panic("unexpected metric type")
+ }
+ }
+
+ if c.msMetricsEnabled {
+ // ms is a dummy MemStats that we populate ourselves so that we can
+ // populate the old metrics from it if goMemStatsCollection is enabled.
+ var ms runtime.MemStats
+ memStatsFromRM(&ms, c.sampleMap)
+ for _, i := range c.msMetrics {
+ ch <- MustNewConstMetric(i.desc, i.valType, i.eval(&ms))
+ }
+ }
+}
+
+// unwrapScalarRMValue unwraps a runtime/metrics value that is assumed
+// to be scalar and returns the equivalent float64 value. Panics if the
+// value is not scalar.
+func unwrapScalarRMValue(v metrics.Value) float64 {
+ switch v.Kind() {
+ case metrics.KindUint64:
+ return float64(v.Uint64())
+ case metrics.KindFloat64:
+ return v.Float64()
+ case metrics.KindBad:
+ // Unsupported metric.
+ //
+ // This should never happen because we always populate our metric
+ // set from the runtime/metrics package.
+ panic("unexpected bad kind metric")
+ default:
+ // Unsupported metric kind.
+ //
+ // This should never happen because we check for this during initialization
+ // and flag and filter metrics whose kinds we don't understand.
+ panic(fmt.Sprintf("unexpected unsupported metric: %v", v.Kind()))
+ }
+}
+
+// exactSumFor takes a runtime/metrics metric name (that is assumed to
+// be of kind KindFloat64Histogram) and returns its exact sum and whether
+// its exact sum exists.
+//
+// The runtime/metrics API for histograms doesn't currently expose exact
+// sums, but some of the other metrics are in fact exact sums of histograms.
+func (c *goCollector) exactSumFor(rmName string) float64 {
+ sumName, ok := c.rmExactSumMapForHist[rmName]
+ if !ok {
+ return 0
+ }
+ s, ok := c.sampleMap[sumName]
+ if !ok {
+ return 0
+ }
+ return unwrapScalarRMValue(s.Value)
+}
+
+func memStatsFromRM(ms *runtime.MemStats, rm map[string]*metrics.Sample) {
+ lookupOrZero := func(name string) uint64 {
+ if s, ok := rm[name]; ok {
+ return s.Value.Uint64()
+ }
+ return 0
+ }
+
+ // Currently, MemStats adds tiny alloc count to both Mallocs AND Frees.
+ // The reason for this is because MemStats couldn't be extended at the time
+ // but there was a desire to have Mallocs at least be a little more representative,
+ // while having Mallocs - Frees still represent a live object count.
+ // Unfortunately, MemStats doesn't actually export a large allocation count,
+ // so it's impossible to pull this number out directly.
+ tinyAllocs := lookupOrZero(goGCHeapTinyAllocsObjects)
+ ms.Mallocs = lookupOrZero(goGCHeapAllocsObjects) + tinyAllocs
+ ms.Frees = lookupOrZero(goGCHeapFreesObjects) + tinyAllocs
+
+ ms.TotalAlloc = lookupOrZero(goGCHeapAllocsBytes)
+ ms.Sys = lookupOrZero(goMemoryClassesTotalBytes)
+ ms.Lookups = 0 // Already always zero.
+ ms.HeapAlloc = lookupOrZero(goMemoryClassesHeapObjectsBytes)
+ ms.Alloc = ms.HeapAlloc
+ ms.HeapInuse = ms.HeapAlloc + lookupOrZero(goMemoryClassesHeapUnusedBytes)
+ ms.HeapReleased = lookupOrZero(goMemoryClassesHeapReleasedBytes)
+ ms.HeapIdle = ms.HeapReleased + lookupOrZero(goMemoryClassesHeapFreeBytes)
+ ms.HeapSys = ms.HeapInuse + ms.HeapIdle
+ ms.HeapObjects = lookupOrZero(goGCHeapObjects)
+ ms.StackInuse = lookupOrZero(goMemoryClassesHeapStacksBytes)
+ ms.StackSys = ms.StackInuse + lookupOrZero(goMemoryClassesOSStacksBytes)
+ ms.MSpanInuse = lookupOrZero(goMemoryClassesMetadataMSpanInuseBytes)
+ ms.MSpanSys = ms.MSpanInuse + lookupOrZero(goMemoryClassesMetadataMSPanFreeBytes)
+ ms.MCacheInuse = lookupOrZero(goMemoryClassesMetadataMCacheInuseBytes)
+ ms.MCacheSys = ms.MCacheInuse + lookupOrZero(goMemoryClassesMetadataMCacheFreeBytes)
+ ms.BuckHashSys = lookupOrZero(goMemoryClassesProfilingBucketsBytes)
+ ms.GCSys = lookupOrZero(goMemoryClassesMetadataOtherBytes)
+ ms.OtherSys = lookupOrZero(goMemoryClassesOtherBytes)
+ ms.NextGC = lookupOrZero(goGCHeapGoalBytes)
+
+ // N.B. GCCPUFraction is intentionally omitted. This metric is not useful,
+ // and often misleading due to the fact that it's an average over the lifetime
+ // of the process.
+ // See https://github.com/prometheus/client_golang/issues/842#issuecomment-861812034
+ // for more details.
+ ms.GCCPUFraction = 0
+}
+
+// batchHistogram is a mutable histogram that is updated
+// in batches.
+type batchHistogram struct {
+ selfCollector
+
+ // Static fields updated only once.
+ desc *Desc
+ hasSum bool
+
+ // Because this histogram operates in batches, it just uses a
+ // single mutex for everything. updates are always serialized
+ // but Write calls may operate concurrently with updates.
+ // Contention between these two sources should be rare.
+ mu sync.Mutex
+ buckets []float64 // Inclusive lower bounds, like runtime/metrics.
+ counts []uint64
+ sum float64 // Used if hasSum is true.
+}
+
+// newBatchHistogram creates a new batch histogram value with the given
+// Desc, buckets, and whether or not it has an exact sum available.
+//
+// buckets must always be from the runtime/metrics package, following
+// the same conventions.
+func newBatchHistogram(desc *Desc, buckets []float64, hasSum bool) *batchHistogram {
+ // We need to remove -Inf values. runtime/metrics keeps them around.
+ // But -Inf bucket should not be allowed for prometheus histograms.
+ if buckets[0] == math.Inf(-1) {
+ buckets = buckets[1:]
+ }
+ h := &batchHistogram{
+ desc: desc,
+ buckets: buckets,
+ // Because buckets follows runtime/metrics conventions, there's
+ // 1 more value in the buckets list than there are buckets represented,
+ // because in runtime/metrics, the bucket values represent *boundaries*,
+ // and non-Inf boundaries are inclusive lower bounds for that bucket.
+ counts: make([]uint64, len(buckets)-1),
+ hasSum: hasSum,
+ }
+ h.init(h)
+ return h
+}
+
+// update updates the batchHistogram from a runtime/metrics histogram.
+//
+// sum must be provided if the batchHistogram was created to have an exact sum.
+// h.buckets must be a strict subset of his.Buckets.
+func (h *batchHistogram) update(his *metrics.Float64Histogram, sum float64) {
+ counts, buckets := his.Counts, his.Buckets
+
+ h.mu.Lock()
+ defer h.mu.Unlock()
+
+ // Clear buckets.
+ for i := range h.counts {
+ h.counts[i] = 0
+ }
+ // Copy and reduce buckets.
+ var j int
+ for i, count := range counts {
+ h.counts[j] += count
+ if buckets[i+1] == h.buckets[j+1] {
+ j++
+ }
+ }
+ if h.hasSum {
+ h.sum = sum
+ }
+}
+
+func (h *batchHistogram) Desc() *Desc {
+ return h.desc
+}
+
+func (h *batchHistogram) Write(out *dto.Metric) error {
+ h.mu.Lock()
+ defer h.mu.Unlock()
+
+ sum := float64(0)
+ if h.hasSum {
+ sum = h.sum
+ }
+ dtoBuckets := make([]*dto.Bucket, 0, len(h.counts))
+ totalCount := uint64(0)
+ for i, count := range h.counts {
+ totalCount += count
+ if !h.hasSum {
+ if count != 0 {
+ // N.B. This computed sum is an underestimate.
+ sum += h.buckets[i] * float64(count)
+ }
+ }
+
+ // Skip the +Inf bucket, but only for the bucket list.
+ // It must still count for sum and totalCount.
+ if math.IsInf(h.buckets[i+1], 1) {
+ break
+ }
+ // Float64Histogram's upper bound is exclusive, so make it inclusive
+ // by obtaining the next float64 value down, in order.
+ upperBound := math.Nextafter(h.buckets[i+1], h.buckets[i])
+ dtoBuckets = append(dtoBuckets, &dto.Bucket{
+ CumulativeCount: proto.Uint64(totalCount),
+ UpperBound: proto.Float64(upperBound),
+ })
+ }
+ out.Histogram = &dto.Histogram{
+ Bucket: dtoBuckets,
+ SampleCount: proto.Uint64(totalCount),
+ SampleSum: proto.Float64(sum),
+ }
+ return nil
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
new file mode 100644
index 0000000..519db34
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
@@ -0,0 +1,1837 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "fmt"
+ "math"
+ "runtime"
+ "sort"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ dto "github.com/prometheus/client_model/go"
+
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/types/known/timestamppb"
+)
+
+// nativeHistogramBounds for the frac of observed values. Only relevant for
+// schema > 0. The position in the slice is the schema. (0 is never used, just
+// here for convenience of using the schema directly as the index.)
+//
+// TODO(beorn7): Currently, we do a binary search into these slices. There are
+// ways to turn it into a small number of simple array lookups. It probably only
+// matters for schema 5 and beyond, but should be investigated. See this comment
+// as a starting point:
+// https://github.com/open-telemetry/opentelemetry-specification/issues/1776#issuecomment-870164310
+var nativeHistogramBounds = [][]float64{
+ // Schema "0":
+ {0.5},
+ // Schema 1:
+ {0.5, 0.7071067811865475},
+ // Schema 2:
+ {0.5, 0.5946035575013605, 0.7071067811865475, 0.8408964152537144},
+ // Schema 3:
+ {
+ 0.5, 0.5452538663326288, 0.5946035575013605, 0.6484197773255048,
+ 0.7071067811865475, 0.7711054127039704, 0.8408964152537144, 0.9170040432046711,
+ },
+ // Schema 4:
+ {
+ 0.5, 0.5221368912137069, 0.5452538663326288, 0.5693943173783458,
+ 0.5946035575013605, 0.620928906036742, 0.6484197773255048, 0.6771277734684463,
+ 0.7071067811865475, 0.7384130729697496, 0.7711054127039704, 0.805245165974627,
+ 0.8408964152537144, 0.8781260801866495, 0.9170040432046711, 0.9576032806985735,
+ },
+ // Schema 5:
+ {
+ 0.5, 0.5109485743270583, 0.5221368912137069, 0.5335702003384117,
+ 0.5452538663326288, 0.5571933712979462, 0.5693943173783458, 0.5818624293887887,
+ 0.5946035575013605, 0.6076236799902344, 0.620928906036742, 0.6345254785958666,
+ 0.6484197773255048, 0.6626183215798706, 0.6771277734684463, 0.6919549409819159,
+ 0.7071067811865475, 0.7225904034885232, 0.7384130729697496, 0.7545822137967112,
+ 0.7711054127039704, 0.7879904225539431, 0.805245165974627, 0.8228777390769823,
+ 0.8408964152537144, 0.8593096490612387, 0.8781260801866495, 0.8973545375015533,
+ 0.9170040432046711, 0.9370838170551498, 0.9576032806985735, 0.9785720620876999,
+ },
+ // Schema 6:
+ {
+ 0.5, 0.5054446430258502, 0.5109485743270583, 0.5165124395106142,
+ 0.5221368912137069, 0.5278225891802786, 0.5335702003384117, 0.5393803988785598,
+ 0.5452538663326288, 0.5511912916539204, 0.5571933712979462, 0.5632608093041209,
+ 0.5693943173783458, 0.5755946149764913, 0.5818624293887887, 0.5881984958251406,
+ 0.5946035575013605, 0.6010783657263515, 0.6076236799902344, 0.6142402680534349,
+ 0.620928906036742, 0.6276903785123455, 0.6345254785958666, 0.6414350080393891,
+ 0.6484197773255048, 0.6554806057623822, 0.6626183215798706, 0.6698337620266515,
+ 0.6771277734684463, 0.6845012114872953, 0.6919549409819159, 0.6994898362691555,
+ 0.7071067811865475, 0.7148066691959849, 0.7225904034885232, 0.7304588970903234,
+ 0.7384130729697496, 0.7464538641456323, 0.7545822137967112, 0.762799075372269,
+ 0.7711054127039704, 0.7795022001189185, 0.7879904225539431, 0.7965710756711334,
+ 0.805245165974627, 0.8140137109286738, 0.8228777390769823, 0.8318382901633681,
+ 0.8408964152537144, 0.8500531768592616, 0.8593096490612387, 0.8686669176368529,
+ 0.8781260801866495, 0.8876882462632604, 0.8973545375015533, 0.9071260877501991,
+ 0.9170040432046711, 0.9269895625416926, 0.9370838170551498, 0.9472879907934827,
+ 0.9576032806985735, 0.9680308967461471, 0.9785720620876999, 0.9892280131939752,
+ },
+ // Schema 7:
+ {
+ 0.5, 0.5027149505564014, 0.5054446430258502, 0.5081891574554764,
+ 0.5109485743270583, 0.5137229745593818, 0.5165124395106142, 0.5193170509806894,
+ 0.5221368912137069, 0.5249720429003435, 0.5278225891802786, 0.5306886136446309,
+ 0.5335702003384117, 0.5364674337629877, 0.5393803988785598, 0.5423091811066545,
+ 0.5452538663326288, 0.5482145409081883, 0.5511912916539204, 0.5541842058618393,
+ 0.5571933712979462, 0.5602188762048033, 0.5632608093041209, 0.5663192597993595,
+ 0.5693943173783458, 0.572486072215902, 0.5755946149764913, 0.5787200368168754,
+ 0.5818624293887887, 0.585021884841625, 0.5881984958251406, 0.5913923554921704,
+ 0.5946035575013605, 0.5978321960199137, 0.6010783657263515, 0.6043421618132907,
+ 0.6076236799902344, 0.6109230164863786, 0.6142402680534349, 0.6175755319684665,
+ 0.620928906036742, 0.6243004885946023, 0.6276903785123455, 0.6310986751971253,
+ 0.6345254785958666, 0.637970889198196, 0.6414350080393891, 0.6449179367033329,
+ 0.6484197773255048, 0.6519406325959679, 0.6554806057623822, 0.659039800633032,
+ 0.6626183215798706, 0.6662162735415805, 0.6698337620266515, 0.6734708931164728,
+ 0.6771277734684463, 0.6808045103191123, 0.6845012114872953, 0.688217985377265,
+ 0.6919549409819159, 0.6957121878859629, 0.6994898362691555, 0.7032879969095076,
+ 0.7071067811865475, 0.7109463010845827, 0.7148066691959849, 0.718687998724491,
+ 0.7225904034885232, 0.7265139979245261, 0.7304588970903234, 0.7344252166684908,
+ 0.7384130729697496, 0.7424225829363761, 0.7464538641456323, 0.7505070348132126,
+ 0.7545822137967112, 0.7586795205991071, 0.762799075372269, 0.7669409989204777,
+ 0.7711054127039704, 0.7752924388424999, 0.7795022001189185, 0.7837348199827764,
+ 0.7879904225539431, 0.7922691326262467, 0.7965710756711334, 0.8008963778413465,
+ 0.805245165974627, 0.8096175675974316, 0.8140137109286738, 0.8184337248834821,
+ 0.8228777390769823, 0.8273458838280969, 0.8318382901633681, 0.8363550898207981,
+ 0.8408964152537144, 0.8454623996346523, 0.8500531768592616, 0.8546688815502312,
+ 0.8593096490612387, 0.8639756154809185, 0.8686669176368529, 0.8733836930995842,
+ 0.8781260801866495, 0.8828942179666361, 0.8876882462632604, 0.8925083056594671,
+ 0.8973545375015533, 0.9022270839033115, 0.9071260877501991, 0.9120516927035263,
+ 0.9170040432046711, 0.9219832844793128, 0.9269895625416926, 0.9320230241988943,
+ 0.9370838170551498, 0.9421720895161669, 0.9472879907934827, 0.9524316709088368,
+ 0.9576032806985735, 0.9628029718180622, 0.9680308967461471, 0.9732872087896164,
+ 0.9785720620876999, 0.9838856116165875, 0.9892280131939752, 0.9945994234836328,
+ },
+ // Schema 8:
+ {
+ 0.5, 0.5013556375251013, 0.5027149505564014, 0.5040779490592088,
+ 0.5054446430258502, 0.5068150424757447, 0.5081891574554764, 0.509566998038869,
+ 0.5109485743270583, 0.5123338964485679, 0.5137229745593818, 0.5151158188430205,
+ 0.5165124395106142, 0.5179128468009786, 0.5193170509806894, 0.520725062344158,
+ 0.5221368912137069, 0.5235525479396449, 0.5249720429003435, 0.526395386502313,
+ 0.5278225891802786, 0.5292536613972564, 0.5306886136446309, 0.5321274564422321,
+ 0.5335702003384117, 0.5350168559101208, 0.5364674337629877, 0.5379219445313954,
+ 0.5393803988785598, 0.5408428074966075, 0.5423091811066545, 0.5437795304588847,
+ 0.5452538663326288, 0.5467321995364429, 0.5482145409081883, 0.549700901315111,
+ 0.5511912916539204, 0.5526857228508706, 0.5541842058618393, 0.5556867516724088,
+ 0.5571933712979462, 0.5587040757836845, 0.5602188762048033, 0.5617377836665098,
+ 0.5632608093041209, 0.564787964283144, 0.5663192597993595, 0.5678547070789026,
+ 0.5693943173783458, 0.5709381019847808, 0.572486072215902, 0.5740382394200894,
+ 0.5755946149764913, 0.5771552102951081, 0.5787200368168754, 0.5802891060137493,
+ 0.5818624293887887, 0.5834400184762408, 0.585021884841625, 0.5866080400818185,
+ 0.5881984958251406, 0.5897932637314379, 0.5913923554921704, 0.5929957828304968,
+ 0.5946035575013605, 0.5962156912915756, 0.5978321960199137, 0.5994530835371903,
+ 0.6010783657263515, 0.6027080545025619, 0.6043421618132907, 0.6059806996384005,
+ 0.6076236799902344, 0.6092711149137041, 0.6109230164863786, 0.6125793968185725,
+ 0.6142402680534349, 0.6159056423670379, 0.6175755319684665, 0.6192499490999082,
+ 0.620928906036742, 0.622612415087629, 0.6243004885946023, 0.6259931389331581,
+ 0.6276903785123455, 0.6293922197748583, 0.6310986751971253, 0.6328097572894031,
+ 0.6345254785958666, 0.6362458516947014, 0.637970889198196, 0.6397006037528346,
+ 0.6414350080393891, 0.6431741147730128, 0.6449179367033329, 0.6466664866145447,
+ 0.6484197773255048, 0.6501778216898253, 0.6519406325959679, 0.6537082229673385,
+ 0.6554806057623822, 0.6572577939746774, 0.659039800633032, 0.6608266388015788,
+ 0.6626183215798706, 0.6644148621029772, 0.6662162735415805, 0.6680225691020727,
+ 0.6698337620266515, 0.6716498655934177, 0.6734708931164728, 0.6752968579460171,
+ 0.6771277734684463, 0.6789636531064505, 0.6808045103191123, 0.6826503586020058,
+ 0.6845012114872953, 0.6863570825438342, 0.688217985377265, 0.690083933630119,
+ 0.6919549409819159, 0.6938310211492645, 0.6957121878859629, 0.6975984549830999,
+ 0.6994898362691555, 0.7013863456101023, 0.7032879969095076, 0.7051948041086352,
+ 0.7071067811865475, 0.7090239421602076, 0.7109463010845827, 0.7128738720527471,
+ 0.7148066691959849, 0.7167447066838943, 0.718687998724491, 0.7206365595643126,
+ 0.7225904034885232, 0.7245495448210174, 0.7265139979245261, 0.7284837772007218,
+ 0.7304588970903234, 0.7324393720732029, 0.7344252166684908, 0.7364164454346837,
+ 0.7384130729697496, 0.7404151139112358, 0.7424225829363761, 0.7444354947621984,
+ 0.7464538641456323, 0.7484777058836176, 0.7505070348132126, 0.7525418658117031,
+ 0.7545822137967112, 0.7566280937263048, 0.7586795205991071, 0.7607365094544071,
+ 0.762799075372269, 0.7648672334736434, 0.7669409989204777, 0.7690203869158282,
+ 0.7711054127039704, 0.7731960915705107, 0.7752924388424999, 0.7773944698885442,
+ 0.7795022001189185, 0.7816156449856788, 0.7837348199827764, 0.7858597406461707,
+ 0.7879904225539431, 0.7901268813264122, 0.7922691326262467, 0.7944171921585818,
+ 0.7965710756711334, 0.7987307989543135, 0.8008963778413465, 0.8030678282083853,
+ 0.805245165974627, 0.8074284071024302, 0.8096175675974316, 0.8118126635086642,
+ 0.8140137109286738, 0.8162207259936375, 0.8184337248834821, 0.820652723822003,
+ 0.8228777390769823, 0.8251087869603088, 0.8273458838280969, 0.8295890460808079,
+ 0.8318382901633681, 0.8340936325652911, 0.8363550898207981, 0.8386226785089391,
+ 0.8408964152537144, 0.8431763167241966, 0.8454623996346523, 0.8477546807446661,
+ 0.8500531768592616, 0.8523579048290255, 0.8546688815502312, 0.8569861239649629,
+ 0.8593096490612387, 0.8616394738731368, 0.8639756154809185, 0.8663180910111553,
+ 0.8686669176368529, 0.871022112577578, 0.8733836930995842, 0.8757516765159389,
+ 0.8781260801866495, 0.8805069215187917, 0.8828942179666361, 0.8852879870317771,
+ 0.8876882462632604, 0.890095013257712, 0.8925083056594671, 0.8949281411607002,
+ 0.8973545375015533, 0.8997875124702672, 0.9022270839033115, 0.9046732696855155,
+ 0.9071260877501991, 0.909585556079304, 0.9120516927035263, 0.9145245157024483,
+ 0.9170040432046711, 0.9194902933879467, 0.9219832844793128, 0.9244830347552253,
+ 0.9269895625416926, 0.92950288621441, 0.9320230241988943, 0.9345499949706191,
+ 0.9370838170551498, 0.93962450902828, 0.9421720895161669, 0.9447265771954693,
+ 0.9472879907934827, 0.9498563490882775, 0.9524316709088368, 0.9550139751351947,
+ 0.9576032806985735, 0.9601996065815236, 0.9628029718180622, 0.9654133954938133,
+ 0.9680308967461471, 0.9706554947643201, 0.9732872087896164, 0.9759260581154889,
+ 0.9785720620876999, 0.9812252401044634, 0.9838856116165875, 0.9865531961276168,
+ 0.9892280131939752, 0.9919100824251095, 0.9945994234836328, 0.9972960560854698,
+ },
+}
+
+// The nativeHistogramBounds above can be generated with the code below.
+//
+// TODO(beorn7): It's tempting to actually use `go generate` to generate the
+// code above. However, this could lead to slightly different numbers on
+// different architectures. We still need to come to terms if we are fine with
+// that, or if we might prefer to specify precise numbers in the standard.
+//
+// var nativeHistogramBounds [][]float64 = make([][]float64, 9)
+//
+// func init() {
+// // Populate nativeHistogramBounds.
+// numBuckets := 1
+// for i := range nativeHistogramBounds {
+// bounds := []float64{0.5}
+// factor := math.Exp2(math.Exp2(float64(-i)))
+// for j := 0; j < numBuckets-1; j++ {
+// var bound float64
+// if (j+1)%2 == 0 {
+// // Use previously calculated value for increased precision.
+// bound = nativeHistogramBounds[i-1][j/2+1]
+// } else {
+// bound = bounds[j] * factor
+// }
+// bounds = append(bounds, bound)
+// }
+// numBuckets *= 2
+// nativeHistogramBounds[i] = bounds
+// }
+// }
+
+// A Histogram counts individual observations from an event or sample stream in
+// configurable static buckets (or in dynamic sparse buckets as part of the
+// experimental Native Histograms, see below for more details). Similar to a
+// Summary, it also provides a sum of observations and an observation count.
+//
+// On the Prometheus server, quantiles can be calculated from a Histogram using
+// the histogram_quantile PromQL function.
+//
+// Note that Histograms, in contrast to Summaries, can be aggregated in PromQL
+// (see the documentation for detailed procedures). However, Histograms require
+// the user to pre-define suitable buckets, and they are in general less
+// accurate. (Both problems are addressed by the experimental Native
+// Histograms. To use them, configure a NativeHistogramBucketFactor in the
+// HistogramOpts. They also require a Prometheus server v2.40+ with the
+// corresponding feature flag enabled.)
+//
+// The Observe method of a Histogram has a very low performance overhead in
+// comparison with the Observe method of a Summary.
+//
+// To create Histogram instances, use NewHistogram.
+type Histogram interface {
+ Metric
+ Collector
+
+ // Observe adds a single observation to the histogram. Observations are
+ // usually positive or zero. Negative observations are accepted but
+ // prevent current versions of Prometheus from properly detecting
+ // counter resets in the sum of observations. (The experimental Native
+ // Histograms handle negative observations properly.) See
+ // https://prometheus.io/docs/practices/histograms/#count-and-sum-of-observations
+ // for details.
+ Observe(float64)
+}
+
+// bucketLabel is used for the label that defines the upper bound of a
+// bucket of a histogram ("le" -> "less or equal").
+const bucketLabel = "le"
+
+// DefBuckets are the default Histogram buckets. The default buckets are
+// tailored to broadly measure the response time (in seconds) of a network
+// service. Most likely, however, you will be required to define buckets
+// customized to your use case.
+var DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10}
+
+// DefNativeHistogramZeroThreshold is the default value for
+// NativeHistogramZeroThreshold in the HistogramOpts.
+//
+// The value is 2^-128 (or 0.5*2^-127 in the actual IEEE 754 representation),
+// which is a bucket boundary at all possible resolutions.
+const DefNativeHistogramZeroThreshold = 2.938735877055719e-39
+
+// NativeHistogramZeroThresholdZero can be used as NativeHistogramZeroThreshold
+// in the HistogramOpts to create a zero bucket of width zero, i.e. a zero
+// bucket that only receives observations of precisely zero.
+const NativeHistogramZeroThresholdZero = -1
+
+var errBucketLabelNotAllowed = fmt.Errorf(
+ "%q is not allowed as label name in histograms", bucketLabel,
+)
+
+// LinearBuckets creates 'count' regular buckets, each 'width' wide, where the
+// lowest bucket has an upper bound of 'start'. The final +Inf bucket is not
+// counted and not included in the returned slice. The returned slice is meant
+// to be used for the Buckets field of HistogramOpts.
+//
+// The function panics if 'count' is zero or negative.
+func LinearBuckets(start, width float64, count int) []float64 {
+ if count < 1 {
+ panic("LinearBuckets needs a positive count")
+ }
+ buckets := make([]float64, count)
+ for i := range buckets {
+ buckets[i] = start
+ start += width
+ }
+ return buckets
+}
+
+// ExponentialBuckets creates 'count' regular buckets, where the lowest bucket
+// has an upper bound of 'start' and each following bucket's upper bound is
+// 'factor' times the previous bucket's upper bound. The final +Inf bucket is
+// not counted and not included in the returned slice. The returned slice is
+// meant to be used for the Buckets field of HistogramOpts.
+//
+// The function panics if 'count' is 0 or negative, if 'start' is 0 or negative,
+// or if 'factor' is less than or equal 1.
+func ExponentialBuckets(start, factor float64, count int) []float64 {
+ if count < 1 {
+ panic("ExponentialBuckets needs a positive count")
+ }
+ if start <= 0 {
+ panic("ExponentialBuckets needs a positive start value")
+ }
+ if factor <= 1 {
+ panic("ExponentialBuckets needs a factor greater than 1")
+ }
+ buckets := make([]float64, count)
+ for i := range buckets {
+ buckets[i] = start
+ start *= factor
+ }
+ return buckets
+}
+
+// ExponentialBucketsRange creates 'count' buckets, where the lowest bucket is
+// 'min' and the highest bucket is 'max'. The final +Inf bucket is not counted
+// and not included in the returned slice. The returned slice is meant to be
+// used for the Buckets field of HistogramOpts.
+//
+// The function panics if 'count' is 0 or negative, if 'min' is 0 or negative.
+func ExponentialBucketsRange(min, max float64, count int) []float64 {
+ if count < 1 {
+ panic("ExponentialBucketsRange count needs a positive count")
+ }
+ if min <= 0 {
+ panic("ExponentialBucketsRange min needs to be greater than 0")
+ }
+
+ // Formula for exponential buckets.
+ // max = min*growthFactor^(bucketCount-1)
+
+ // We know max/min and highest bucket. Solve for growthFactor.
+ growthFactor := math.Pow(max/min, 1.0/float64(count-1))
+
+ // Now that we know growthFactor, solve for each bucket.
+ buckets := make([]float64, count)
+ for i := 1; i <= count; i++ {
+ buckets[i-1] = min * math.Pow(growthFactor, float64(i-1))
+ }
+ return buckets
+}
+
+// HistogramOpts bundles the options for creating a Histogram metric. It is
+// mandatory to set Name to a non-empty string. All other fields are optional
+// and can safely be left at their zero value, although it is strongly
+// encouraged to set a Help string.
+type HistogramOpts struct {
+ // Namespace, Subsystem, and Name are components of the fully-qualified
+ // name of the Histogram (created by joining these components with
+ // "_"). Only Name is mandatory, the others merely help structuring the
+ // name. Note that the fully-qualified name of the Histogram must be a
+ // valid Prometheus metric name.
+ Namespace string
+ Subsystem string
+ Name string
+
+ // Help provides information about this Histogram.
+ //
+ // Metrics with the same fully-qualified name must have the same Help
+ // string.
+ Help string
+
+ // ConstLabels are used to attach fixed labels to this metric. Metrics
+ // with the same fully-qualified name must have the same label names in
+ // their ConstLabels.
+ //
+ // ConstLabels are only used rarely. In particular, do not use them to
+ // attach the same labels to all your metrics. Those use cases are
+ // better covered by target labels set by the scraping Prometheus
+ // server, or by one specific metric (e.g. a build_info or a
+ // machine_role metric). See also
+ // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels
+ ConstLabels Labels
+
+ // Buckets defines the buckets into which observations are counted. Each
+ // element in the slice is the upper inclusive bound of a bucket. The
+ // values must be sorted in strictly increasing order. There is no need
+ // to add a highest bucket with +Inf bound, it will be added
+ // implicitly. If Buckets is left as nil or set to a slice of length
+ // zero, it is replaced by default buckets. The default buckets are
+ // DefBuckets if no buckets for a native histogram (see below) are used,
+ // otherwise the default is no buckets. (In other words, if you want to
+ // use both regular buckets and buckets for a native histogram, you have
+ // to define the regular buckets here explicitly.)
+ Buckets []float64
+
+ // If NativeHistogramBucketFactor is greater than one, so-called sparse
+ // buckets are used (in addition to the regular buckets, if defined
+ // above). A Histogram with sparse buckets will be ingested as a Native
+ // Histogram by a Prometheus server with that feature enabled (requires
+ // Prometheus v2.40+). Sparse buckets are exponential buckets covering
+ // the whole float64 range (with the exception of the “zero” bucket, see
+ // NativeHistogramZeroThreshold below). From any one bucket to the next,
+ // the width of the bucket grows by a constant
+ // factor. NativeHistogramBucketFactor provides an upper bound for this
+ // factor (exception see below). The smaller
+ // NativeHistogramBucketFactor, the more buckets will be used and thus
+ // the more costly the histogram will become. A generally good trade-off
+ // between cost and accuracy is a value of 1.1 (each bucket is at most
+ // 10% wider than the previous one), which will result in each power of
+ // two divided into 8 buckets (e.g. there will be 8 buckets between 1
+ // and 2, same as between 2 and 4, and 4 and 8, etc.).
+ //
+ // Details about the actually used factor: The factor is calculated as
+ // 2^(2^-n), where n is an integer number between (and including) -4 and
+ // 8. n is chosen so that the resulting factor is the largest that is
+ // still smaller or equal to NativeHistogramBucketFactor. Note that the
+ // smallest possible factor is therefore approx. 1.00271 (i.e. 2^(2^-8)
+ // ). If NativeHistogramBucketFactor is greater than 1 but smaller than
+ // 2^(2^-8), then the actually used factor is still 2^(2^-8) even though
+ // it is larger than the provided NativeHistogramBucketFactor.
+ //
+ // NOTE: Native Histograms are still an experimental feature. Their
+ // behavior might still change without a major version
+ // bump. Subsequently, all NativeHistogram... options here might still
+ // change their behavior or name (or might completely disappear) without
+ // a major version bump.
+ NativeHistogramBucketFactor float64
+ // All observations with an absolute value of less or equal
+ // NativeHistogramZeroThreshold are accumulated into a “zero” bucket.
+ // For best results, this should be close to a bucket boundary. This is
+ // usually the case if picking a power of two. If
+ // NativeHistogramZeroThreshold is left at zero,
+ // DefNativeHistogramZeroThreshold is used as the threshold. To
+ // configure a zero bucket with an actual threshold of zero (i.e. only
+ // observations of precisely zero will go into the zero bucket), set
+ // NativeHistogramZeroThreshold to the NativeHistogramZeroThresholdZero
+ // constant (or any negative float value).
+ NativeHistogramZeroThreshold float64
+
+ // The next three fields define a strategy to limit the number of
+ // populated sparse buckets. If NativeHistogramMaxBucketNumber is left
+ // at zero, the number of buckets is not limited. (Note that this might
+ // lead to unbounded memory consumption if the values observed by the
+ // Histogram are sufficiently wide-spread. In particular, this could be
+ // used as a DoS attack vector. Where the observed values depend on
+ // external inputs, it is highly recommended to set a
+ // NativeHistogramMaxBucketNumber.) Once the set
+ // NativeHistogramMaxBucketNumber is exceeded, the following strategy is
+ // enacted:
+ // - First, if the last reset (or the creation) of the histogram is at
+ // least NativeHistogramMinResetDuration ago, then the whole
+ // histogram is reset to its initial state (including regular
+ // buckets).
+ // - If less time has passed, or if NativeHistogramMinResetDuration is
+ // zero, no reset is performed. Instead, the zero threshold is
+ // increased sufficiently to reduce the number of buckets to or below
+ // NativeHistogramMaxBucketNumber, but not to more than
+ // NativeHistogramMaxZeroThreshold. Thus, if
+ // NativeHistogramMaxZeroThreshold is already at or below the current
+ // zero threshold, nothing happens at this step.
+ // - After that, if the number of buckets still exceeds
+ // NativeHistogramMaxBucketNumber, the resolution of the histogram is
+ // reduced by doubling the width of the sparse buckets (up to a
+ // growth factor between one bucket to the next of 2^(2^4) = 65536,
+ // see above).
+ // - Any increased zero threshold or reduced resolution is reset back
+ // to their original values once NativeHistogramMinResetDuration has
+ // passed (since the last reset or the creation of the histogram).
+ NativeHistogramMaxBucketNumber uint32
+ NativeHistogramMinResetDuration time.Duration
+ NativeHistogramMaxZeroThreshold float64
+
+ // NativeHistogramMaxExemplars limits the number of exemplars
+ // that are kept in memory for each native histogram. If you leave it at
+ // zero, a default value of 10 is used. If no exemplars should be kept specifically
+ // for native histograms, set it to a negative value. (Scrapers can
+ // still use the exemplars exposed for classic buckets, which are managed
+ // independently.)
+ NativeHistogramMaxExemplars int
+ // NativeHistogramExemplarTTL is only checked once
+ // NativeHistogramMaxExemplars is exceeded. In that case, the
+ // oldest exemplar is removed if it is older than NativeHistogramExemplarTTL.
+ // Otherwise, the older exemplar in the pair of exemplars that are closest
+ // together (on an exponential scale) is removed.
+ // If NativeHistogramExemplarTTL is left at its zero value, a default value of
+ // 5m is used. To always delete the oldest exemplar, set it to a negative value.
+ NativeHistogramExemplarTTL time.Duration
+
+ // now is for testing purposes, by default it's time.Now.
+ now func() time.Time
+
+ // afterFunc is for testing purposes, by default it's time.AfterFunc.
+ afterFunc func(time.Duration, func()) *time.Timer
+}
+
+// HistogramVecOpts bundles the options to create a HistogramVec metric.
+// It is mandatory to set HistogramOpts, see there for mandatory fields. VariableLabels
+// is optional and can safely be left to its default value.
+type HistogramVecOpts struct {
+ HistogramOpts
+
+ // VariableLabels are used to partition the metric vector by the given set
+ // of labels. Each label value will be constrained with the optional Constraint
+ // function, if provided.
+ VariableLabels ConstrainableLabels
+}
+
+// NewHistogram creates a new Histogram based on the provided HistogramOpts. It
+// panics if the buckets in HistogramOpts are not in strictly increasing order.
+//
+// The returned implementation also implements ExemplarObserver. It is safe to
+// perform the corresponding type assertion. Exemplars are tracked separately
+// for each bucket.
+func NewHistogram(opts HistogramOpts) Histogram {
+ return newHistogram(
+ NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ),
+ opts,
+ )
+}
+
+func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram {
+ if len(desc.variableLabels.names) != len(labelValues) {
+ panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, labelValues))
+ }
+
+ for _, n := range desc.variableLabels.names {
+ if n == bucketLabel {
+ panic(errBucketLabelNotAllowed)
+ }
+ }
+ for _, lp := range desc.constLabelPairs {
+ if lp.GetName() == bucketLabel {
+ panic(errBucketLabelNotAllowed)
+ }
+ }
+
+ if opts.now == nil {
+ opts.now = time.Now
+ }
+ if opts.afterFunc == nil {
+ opts.afterFunc = time.AfterFunc
+ }
+
+ h := &histogram{
+ desc: desc,
+ upperBounds: opts.Buckets,
+ labelPairs: MakeLabelPairs(desc, labelValues),
+ nativeHistogramMaxBuckets: opts.NativeHistogramMaxBucketNumber,
+ nativeHistogramMaxZeroThreshold: opts.NativeHistogramMaxZeroThreshold,
+ nativeHistogramMinResetDuration: opts.NativeHistogramMinResetDuration,
+ lastResetTime: opts.now(),
+ now: opts.now,
+ afterFunc: opts.afterFunc,
+ }
+ if len(h.upperBounds) == 0 && opts.NativeHistogramBucketFactor <= 1 {
+ h.upperBounds = DefBuckets
+ }
+ if opts.NativeHistogramBucketFactor <= 1 {
+ h.nativeHistogramSchema = math.MinInt32 // To mark that there are no sparse buckets.
+ } else {
+ switch {
+ case opts.NativeHistogramZeroThreshold > 0:
+ h.nativeHistogramZeroThreshold = opts.NativeHistogramZeroThreshold
+ case opts.NativeHistogramZeroThreshold == 0:
+ h.nativeHistogramZeroThreshold = DefNativeHistogramZeroThreshold
+ } // Leave h.nativeHistogramZeroThreshold at 0 otherwise.
+ h.nativeHistogramSchema = pickSchema(opts.NativeHistogramBucketFactor)
+ h.nativeExemplars = makeNativeExemplars(opts.NativeHistogramExemplarTTL, opts.NativeHistogramMaxExemplars)
+ }
+ for i, upperBound := range h.upperBounds {
+ if i < len(h.upperBounds)-1 {
+ if upperBound >= h.upperBounds[i+1] {
+ panic(fmt.Errorf(
+ "histogram buckets must be in increasing order: %f >= %f",
+ upperBound, h.upperBounds[i+1],
+ ))
+ }
+ } else {
+ if math.IsInf(upperBound, +1) {
+ // The +Inf bucket is implicit. Remove it here.
+ h.upperBounds = h.upperBounds[:i]
+ }
+ }
+ }
+ // Finally we know the final length of h.upperBounds and can make buckets
+ // for both counts as well as exemplars:
+ h.counts[0] = &histogramCounts{buckets: make([]uint64, len(h.upperBounds))}
+ atomic.StoreUint64(&h.counts[0].nativeHistogramZeroThresholdBits, math.Float64bits(h.nativeHistogramZeroThreshold))
+ atomic.StoreInt32(&h.counts[0].nativeHistogramSchema, h.nativeHistogramSchema)
+ h.counts[1] = &histogramCounts{buckets: make([]uint64, len(h.upperBounds))}
+ atomic.StoreUint64(&h.counts[1].nativeHistogramZeroThresholdBits, math.Float64bits(h.nativeHistogramZeroThreshold))
+ atomic.StoreInt32(&h.counts[1].nativeHistogramSchema, h.nativeHistogramSchema)
+ h.exemplars = make([]atomic.Value, len(h.upperBounds)+1)
+
+ h.init(h) // Init self-collection.
+ return h
+}
+
+type histogramCounts struct {
+ // Order in this struct matters for the alignment required by atomic
+ // operations, see http://golang.org/pkg/sync/atomic/#pkg-note-BUG
+
+ // sumBits contains the bits of the float64 representing the sum of all
+ // observations.
+ sumBits uint64
+ count uint64
+
+ // nativeHistogramZeroBucket counts all (positive and negative)
+ // observations in the zero bucket (with an absolute value less or equal
+ // the current threshold, see next field.
+ nativeHistogramZeroBucket uint64
+ // nativeHistogramZeroThresholdBits is the bit pattern of the current
+ // threshold for the zero bucket. It's initially equal to
+ // nativeHistogramZeroThreshold but may change according to the bucket
+ // count limitation strategy.
+ nativeHistogramZeroThresholdBits uint64
+ // nativeHistogramSchema may change over time according to the bucket
+ // count limitation strategy and therefore has to be saved here.
+ nativeHistogramSchema int32
+ // Number of (positive and negative) sparse buckets.
+ nativeHistogramBucketsNumber uint32
+
+ // Regular buckets.
+ buckets []uint64
+
+ // The sparse buckets for native histograms are implemented with a
+ // sync.Map for now. A dedicated data structure will likely be more
+ // efficient. There are separate maps for negative and positive
+ // observations. The map's value is an *int64, counting observations in
+ // that bucket. (Note that we don't use uint64 as an int64 won't
+ // overflow in practice, and working with signed numbers from the
+ // beginning simplifies the handling of deltas.) The map's key is the
+ // index of the bucket according to the used
+ // nativeHistogramSchema. Index 0 is for an upper bound of 1.
+ nativeHistogramBucketsPositive, nativeHistogramBucketsNegative sync.Map
+}
+
+// observe manages the parts of observe that only affects
+// histogramCounts. doSparse is true if sparse buckets should be done,
+// too.
+func (hc *histogramCounts) observe(v float64, bucket int, doSparse bool) {
+ if bucket < len(hc.buckets) {
+ atomic.AddUint64(&hc.buckets[bucket], 1)
+ }
+ atomicAddFloat(&hc.sumBits, v)
+ if doSparse && !math.IsNaN(v) {
+ var (
+ key int
+ schema = atomic.LoadInt32(&hc.nativeHistogramSchema)
+ zeroThreshold = math.Float64frombits(atomic.LoadUint64(&hc.nativeHistogramZeroThresholdBits))
+ bucketCreated, isInf bool
+ )
+ if math.IsInf(v, 0) {
+ // Pretend v is MaxFloat64 but later increment key by one.
+ if math.IsInf(v, +1) {
+ v = math.MaxFloat64
+ } else {
+ v = -math.MaxFloat64
+ }
+ isInf = true
+ }
+ frac, exp := math.Frexp(math.Abs(v))
+ if schema > 0 {
+ bounds := nativeHistogramBounds[schema]
+ key = sort.SearchFloat64s(bounds, frac) + (exp-1)*len(bounds)
+ } else {
+ key = exp
+ if frac == 0.5 {
+ key--
+ }
+ offset := (1 << -schema) - 1
+ key = (key + offset) >> -schema
+ }
+ if isInf {
+ key++
+ }
+ switch {
+ case v > zeroThreshold:
+ bucketCreated = addToBucket(&hc.nativeHistogramBucketsPositive, key, 1)
+ case v < -zeroThreshold:
+ bucketCreated = addToBucket(&hc.nativeHistogramBucketsNegative, key, 1)
+ default:
+ atomic.AddUint64(&hc.nativeHistogramZeroBucket, 1)
+ }
+ if bucketCreated {
+ atomic.AddUint32(&hc.nativeHistogramBucketsNumber, 1)
+ }
+ }
+ // Increment count last as we take it as a signal that the observation
+ // is complete.
+ atomic.AddUint64(&hc.count, 1)
+}
+
+type histogram struct {
+ // countAndHotIdx enables lock-free writes with use of atomic updates.
+ // The most significant bit is the hot index [0 or 1] of the count field
+ // below. Observe calls update the hot one. All remaining bits count the
+ // number of Observe calls. Observe starts by incrementing this counter,
+ // and finish by incrementing the count field in the respective
+ // histogramCounts, as a marker for completion.
+ //
+ // Calls of the Write method (which are non-mutating reads from the
+ // perspective of the histogram) swap the hot–cold under the writeMtx
+ // lock. A cooldown is awaited (while locked) by comparing the number of
+ // observations with the initiation count. Once they match, then the
+ // last observation on the now cool one has completed. All cold fields must
+ // be merged into the new hot before releasing writeMtx.
+ //
+ // Fields with atomic access first! See alignment constraint:
+ // http://golang.org/pkg/sync/atomic/#pkg-note-BUG
+ countAndHotIdx uint64
+
+ selfCollector
+ desc *Desc
+
+ // Only used in the Write method and for sparse bucket management.
+ mtx sync.Mutex
+
+ // Two counts, one is "hot" for lock-free observations, the other is
+ // "cold" for writing out a dto.Metric. It has to be an array of
+ // pointers to guarantee 64bit alignment of the histogramCounts, see
+ // http://golang.org/pkg/sync/atomic/#pkg-note-BUG.
+ counts [2]*histogramCounts
+
+ upperBounds []float64
+ labelPairs []*dto.LabelPair
+ exemplars []atomic.Value // One more than buckets (to include +Inf), each a *dto.Exemplar.
+ nativeHistogramSchema int32 // The initial schema. Set to math.MinInt32 if no sparse buckets are used.
+ nativeHistogramZeroThreshold float64 // The initial zero threshold.
+ nativeHistogramMaxZeroThreshold float64
+ nativeHistogramMaxBuckets uint32
+ nativeHistogramMinResetDuration time.Duration
+ // lastResetTime is protected by mtx. It is also used as created timestamp.
+ lastResetTime time.Time
+ // resetScheduled is protected by mtx. It is true if a reset is
+ // scheduled for a later time (when nativeHistogramMinResetDuration has
+ // passed).
+ resetScheduled bool
+ nativeExemplars nativeExemplars
+
+ // now is for testing purposes, by default it's time.Now.
+ now func() time.Time
+
+ // afterFunc is for testing purposes, by default it's time.AfterFunc.
+ afterFunc func(time.Duration, func()) *time.Timer
+}
+
+func (h *histogram) Desc() *Desc {
+ return h.desc
+}
+
+func (h *histogram) Observe(v float64) {
+ h.observe(v, h.findBucket(v))
+}
+
+// ObserveWithExemplar should not be called in a high-frequency setting
+// for a native histogram with configured exemplars. For this case,
+// the implementation isn't lock-free and might suffer from lock contention.
+func (h *histogram) ObserveWithExemplar(v float64, e Labels) {
+ i := h.findBucket(v)
+ h.observe(v, i)
+ h.updateExemplar(v, i, e)
+}
+
+func (h *histogram) Write(out *dto.Metric) error {
+ // For simplicity, we protect this whole method by a mutex. It is not in
+ // the hot path, i.e. Observe is called much more often than Write. The
+ // complication of making Write lock-free isn't worth it, if possible at
+ // all.
+ h.mtx.Lock()
+ defer h.mtx.Unlock()
+
+ // Adding 1<<63 switches the hot index (from 0 to 1 or from 1 to 0)
+ // without touching the count bits. See the struct comments for a full
+ // description of the algorithm.
+ n := atomic.AddUint64(&h.countAndHotIdx, 1<<63)
+ // count is contained unchanged in the lower 63 bits.
+ count := n & ((1 << 63) - 1)
+ // The most significant bit tells us which counts is hot. The complement
+ // is thus the cold one.
+ hotCounts := h.counts[n>>63]
+ coldCounts := h.counts[(^n)>>63]
+
+ waitForCooldown(count, coldCounts)
+
+ his := &dto.Histogram{
+ Bucket: make([]*dto.Bucket, len(h.upperBounds)),
+ SampleCount: proto.Uint64(count),
+ SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))),
+ CreatedTimestamp: timestamppb.New(h.lastResetTime),
+ }
+ out.Histogram = his
+ out.Label = h.labelPairs
+
+ var cumCount uint64
+ for i, upperBound := range h.upperBounds {
+ cumCount += atomic.LoadUint64(&coldCounts.buckets[i])
+ his.Bucket[i] = &dto.Bucket{
+ CumulativeCount: proto.Uint64(cumCount),
+ UpperBound: proto.Float64(upperBound),
+ }
+ if e := h.exemplars[i].Load(); e != nil {
+ his.Bucket[i].Exemplar = e.(*dto.Exemplar)
+ }
+ }
+ // If there is an exemplar for the +Inf bucket, we have to add that bucket explicitly.
+ if e := h.exemplars[len(h.upperBounds)].Load(); e != nil {
+ b := &dto.Bucket{
+ CumulativeCount: proto.Uint64(count),
+ UpperBound: proto.Float64(math.Inf(1)),
+ Exemplar: e.(*dto.Exemplar),
+ }
+ his.Bucket = append(his.Bucket, b)
+ }
+ if h.nativeHistogramSchema > math.MinInt32 {
+ his.ZeroThreshold = proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.nativeHistogramZeroThresholdBits)))
+ his.Schema = proto.Int32(atomic.LoadInt32(&coldCounts.nativeHistogramSchema))
+ zeroBucket := atomic.LoadUint64(&coldCounts.nativeHistogramZeroBucket)
+
+ defer func() {
+ coldCounts.nativeHistogramBucketsPositive.Range(addAndReset(&hotCounts.nativeHistogramBucketsPositive, &hotCounts.nativeHistogramBucketsNumber))
+ coldCounts.nativeHistogramBucketsNegative.Range(addAndReset(&hotCounts.nativeHistogramBucketsNegative, &hotCounts.nativeHistogramBucketsNumber))
+ }()
+
+ his.ZeroCount = proto.Uint64(zeroBucket)
+ his.NegativeSpan, his.NegativeDelta = makeBuckets(&coldCounts.nativeHistogramBucketsNegative)
+ his.PositiveSpan, his.PositiveDelta = makeBuckets(&coldCounts.nativeHistogramBucketsPositive)
+
+ // Add a no-op span to a histogram without observations and with
+ // a zero threshold of zero. Otherwise, a native histogram would
+ // look like a classic histogram to scrapers.
+ if *his.ZeroThreshold == 0 && *his.ZeroCount == 0 && len(his.PositiveSpan) == 0 && len(his.NegativeSpan) == 0 {
+ his.PositiveSpan = []*dto.BucketSpan{{
+ Offset: proto.Int32(0),
+ Length: proto.Uint32(0),
+ }}
+ }
+
+ if h.nativeExemplars.isEnabled() {
+ h.nativeExemplars.Lock()
+ his.Exemplars = append(his.Exemplars, h.nativeExemplars.exemplars...)
+ h.nativeExemplars.Unlock()
+ }
+
+ }
+ addAndResetCounts(hotCounts, coldCounts)
+ return nil
+}
+
+// findBucket returns the index of the bucket for the provided value, or
+// len(h.upperBounds) for the +Inf bucket.
+func (h *histogram) findBucket(v float64) int {
+ // TODO(beorn7): For small numbers of buckets (<30), a linear search is
+ // slightly faster than the binary search. If we really care, we could
+ // switch from one search strategy to the other depending on the number
+ // of buckets.
+ //
+ // Microbenchmarks (BenchmarkHistogramNoLabels):
+ // 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op
+ // 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op
+ // 300 buckets: 154 ns/op linear - binary 61.6 ns/op
+ return sort.SearchFloat64s(h.upperBounds, v)
+}
+
+// observe is the implementation for Observe without the findBucket part.
+func (h *histogram) observe(v float64, bucket int) {
+ // Do not add to sparse buckets for NaN observations.
+ doSparse := h.nativeHistogramSchema > math.MinInt32 && !math.IsNaN(v)
+ // We increment h.countAndHotIdx so that the counter in the lower
+ // 63 bits gets incremented. At the same time, we get the new value
+ // back, which we can use to find the currently-hot counts.
+ n := atomic.AddUint64(&h.countAndHotIdx, 1)
+ hotCounts := h.counts[n>>63]
+ hotCounts.observe(v, bucket, doSparse)
+ if doSparse {
+ h.limitBuckets(hotCounts, v, bucket)
+ }
+}
+
+// limitBuckets applies a strategy to limit the number of populated sparse
+// buckets. It's generally best effort, and there are situations where the
+// number can go higher (if even the lowest resolution isn't enough to reduce
+// the number sufficiently, or if the provided counts aren't fully updated yet
+// by a concurrently happening Write call).
+func (h *histogram) limitBuckets(counts *histogramCounts, value float64, bucket int) {
+ if h.nativeHistogramMaxBuckets == 0 {
+ return // No limit configured.
+ }
+ if h.nativeHistogramMaxBuckets >= atomic.LoadUint32(&counts.nativeHistogramBucketsNumber) {
+ return // Bucket limit not exceeded yet.
+ }
+
+ h.mtx.Lock()
+ defer h.mtx.Unlock()
+
+ // The hot counts might have been swapped just before we acquired the
+ // lock. Re-fetch the hot counts first...
+ n := atomic.LoadUint64(&h.countAndHotIdx)
+ hotIdx := n >> 63
+ coldIdx := (^n) >> 63
+ hotCounts := h.counts[hotIdx]
+ coldCounts := h.counts[coldIdx]
+ // ...and then check again if we really have to reduce the bucket count.
+ if h.nativeHistogramMaxBuckets >= atomic.LoadUint32(&hotCounts.nativeHistogramBucketsNumber) {
+ return // Bucket limit not exceeded after all.
+ }
+ // Try the various strategies in order.
+ if h.maybeReset(hotCounts, coldCounts, coldIdx, value, bucket) {
+ return
+ }
+ // One of the other strategies will happen. To undo what they will do as
+ // soon as enough time has passed to satisfy
+ // h.nativeHistogramMinResetDuration, schedule a reset at the right time
+ // if we haven't done so already.
+ if h.nativeHistogramMinResetDuration > 0 && !h.resetScheduled {
+ h.resetScheduled = true
+ h.afterFunc(h.nativeHistogramMinResetDuration-h.now().Sub(h.lastResetTime), h.reset)
+ }
+
+ if h.maybeWidenZeroBucket(hotCounts, coldCounts) {
+ return
+ }
+ h.doubleBucketWidth(hotCounts, coldCounts)
+}
+
+// maybeReset resets the whole histogram if at least
+// h.nativeHistogramMinResetDuration has been passed. It returns true if the
+// histogram has been reset. The caller must have locked h.mtx.
+func (h *histogram) maybeReset(
+ hot, cold *histogramCounts, coldIdx uint64, value float64, bucket int,
+) bool {
+ // We are using the possibly mocked h.now() rather than
+ // time.Since(h.lastResetTime) to enable testing.
+ if h.nativeHistogramMinResetDuration == 0 || // No reset configured.
+ h.resetScheduled || // Do not interefere if a reset is already scheduled.
+ h.now().Sub(h.lastResetTime) < h.nativeHistogramMinResetDuration {
+ return false
+ }
+ // Completely reset coldCounts.
+ h.resetCounts(cold)
+ // Repeat the latest observation to not lose it completely.
+ cold.observe(value, bucket, true)
+ // Make coldCounts the new hot counts while resetting countAndHotIdx.
+ n := atomic.SwapUint64(&h.countAndHotIdx, (coldIdx<<63)+1)
+ count := n & ((1 << 63) - 1)
+ waitForCooldown(count, hot)
+ // Finally, reset the formerly hot counts, too.
+ h.resetCounts(hot)
+ h.lastResetTime = h.now()
+ return true
+}
+
+// reset resets the whole histogram. It locks h.mtx itself, i.e. it has to be
+// called without having locked h.mtx.
+func (h *histogram) reset() {
+ h.mtx.Lock()
+ defer h.mtx.Unlock()
+
+ n := atomic.LoadUint64(&h.countAndHotIdx)
+ hotIdx := n >> 63
+ coldIdx := (^n) >> 63
+ hot := h.counts[hotIdx]
+ cold := h.counts[coldIdx]
+ // Completely reset coldCounts.
+ h.resetCounts(cold)
+ // Make coldCounts the new hot counts while resetting countAndHotIdx.
+ n = atomic.SwapUint64(&h.countAndHotIdx, coldIdx<<63)
+ count := n & ((1 << 63) - 1)
+ waitForCooldown(count, hot)
+ // Finally, reset the formerly hot counts, too.
+ h.resetCounts(hot)
+ h.lastResetTime = h.now()
+ h.resetScheduled = false
+}
+
+// maybeWidenZeroBucket widens the zero bucket until it includes the existing
+// buckets closest to the zero bucket (which could be two, if an equidistant
+// negative and a positive bucket exists, but usually it's only one bucket to be
+// merged into the new wider zero bucket). h.nativeHistogramMaxZeroThreshold
+// limits how far the zero bucket can be extended, and if that's not enough to
+// include an existing bucket, the method returns false. The caller must have
+// locked h.mtx.
+func (h *histogram) maybeWidenZeroBucket(hot, cold *histogramCounts) bool {
+ currentZeroThreshold := math.Float64frombits(atomic.LoadUint64(&hot.nativeHistogramZeroThresholdBits))
+ if currentZeroThreshold >= h.nativeHistogramMaxZeroThreshold {
+ return false
+ }
+ // Find the key of the bucket closest to zero.
+ smallestKey := findSmallestKey(&hot.nativeHistogramBucketsPositive)
+ smallestNegativeKey := findSmallestKey(&hot.nativeHistogramBucketsNegative)
+ if smallestNegativeKey < smallestKey {
+ smallestKey = smallestNegativeKey
+ }
+ if smallestKey == math.MaxInt32 {
+ return false
+ }
+ newZeroThreshold := getLe(smallestKey, atomic.LoadInt32(&hot.nativeHistogramSchema))
+ if newZeroThreshold > h.nativeHistogramMaxZeroThreshold {
+ return false // New threshold would exceed the max threshold.
+ }
+ atomic.StoreUint64(&cold.nativeHistogramZeroThresholdBits, math.Float64bits(newZeroThreshold))
+ // Remove applicable buckets.
+ if _, loaded := cold.nativeHistogramBucketsNegative.LoadAndDelete(smallestKey); loaded {
+ atomicDecUint32(&cold.nativeHistogramBucketsNumber)
+ }
+ if _, loaded := cold.nativeHistogramBucketsPositive.LoadAndDelete(smallestKey); loaded {
+ atomicDecUint32(&cold.nativeHistogramBucketsNumber)
+ }
+ // Make cold counts the new hot counts.
+ n := atomic.AddUint64(&h.countAndHotIdx, 1<<63)
+ count := n & ((1 << 63) - 1)
+ // Swap the pointer names to represent the new roles and make
+ // the rest less confusing.
+ hot, cold = cold, hot
+ waitForCooldown(count, cold)
+ // Add all the now cold counts to the new hot counts...
+ addAndResetCounts(hot, cold)
+ // ...adjust the new zero threshold in the cold counts, too...
+ atomic.StoreUint64(&cold.nativeHistogramZeroThresholdBits, math.Float64bits(newZeroThreshold))
+ // ...and then merge the newly deleted buckets into the wider zero
+ // bucket.
+ mergeAndDeleteOrAddAndReset := func(hotBuckets, coldBuckets *sync.Map) func(k, v interface{}) bool {
+ return func(k, v interface{}) bool {
+ key := k.(int)
+ bucket := v.(*int64)
+ if key == smallestKey {
+ // Merge into hot zero bucket...
+ atomic.AddUint64(&hot.nativeHistogramZeroBucket, uint64(atomic.LoadInt64(bucket)))
+ // ...and delete from cold counts.
+ coldBuckets.Delete(key)
+ atomicDecUint32(&cold.nativeHistogramBucketsNumber)
+ } else {
+ // Add to corresponding hot bucket...
+ if addToBucket(hotBuckets, key, atomic.LoadInt64(bucket)) {
+ atomic.AddUint32(&hot.nativeHistogramBucketsNumber, 1)
+ }
+ // ...and reset cold bucket.
+ atomic.StoreInt64(bucket, 0)
+ }
+ return true
+ }
+ }
+
+ cold.nativeHistogramBucketsPositive.Range(mergeAndDeleteOrAddAndReset(&hot.nativeHistogramBucketsPositive, &cold.nativeHistogramBucketsPositive))
+ cold.nativeHistogramBucketsNegative.Range(mergeAndDeleteOrAddAndReset(&hot.nativeHistogramBucketsNegative, &cold.nativeHistogramBucketsNegative))
+ return true
+}
+
+// doubleBucketWidth doubles the bucket width (by decrementing the schema
+// number). Note that very sparse buckets could lead to a low reduction of the
+// bucket count (or even no reduction at all). The method does nothing if the
+// schema is already -4.
+func (h *histogram) doubleBucketWidth(hot, cold *histogramCounts) {
+ coldSchema := atomic.LoadInt32(&cold.nativeHistogramSchema)
+ if coldSchema == -4 {
+ return // Already at lowest resolution.
+ }
+ coldSchema--
+ atomic.StoreInt32(&cold.nativeHistogramSchema, coldSchema)
+ // Play it simple and just delete all cold buckets.
+ atomic.StoreUint32(&cold.nativeHistogramBucketsNumber, 0)
+ deleteSyncMap(&cold.nativeHistogramBucketsNegative)
+ deleteSyncMap(&cold.nativeHistogramBucketsPositive)
+ // Make coldCounts the new hot counts.
+ n := atomic.AddUint64(&h.countAndHotIdx, 1<<63)
+ count := n & ((1 << 63) - 1)
+ // Swap the pointer names to represent the new roles and make
+ // the rest less confusing.
+ hot, cold = cold, hot
+ waitForCooldown(count, cold)
+ // Add all the now cold counts to the new hot counts...
+ addAndResetCounts(hot, cold)
+ // ...adjust the schema in the cold counts, too...
+ atomic.StoreInt32(&cold.nativeHistogramSchema, coldSchema)
+ // ...and then merge the cold buckets into the wider hot buckets.
+ merge := func(hotBuckets *sync.Map) func(k, v interface{}) bool {
+ return func(k, v interface{}) bool {
+ key := k.(int)
+ bucket := v.(*int64)
+ // Adjust key to match the bucket to merge into.
+ if key > 0 {
+ key++
+ }
+ key /= 2
+ // Add to corresponding hot bucket.
+ if addToBucket(hotBuckets, key, atomic.LoadInt64(bucket)) {
+ atomic.AddUint32(&hot.nativeHistogramBucketsNumber, 1)
+ }
+ return true
+ }
+ }
+
+ cold.nativeHistogramBucketsPositive.Range(merge(&hot.nativeHistogramBucketsPositive))
+ cold.nativeHistogramBucketsNegative.Range(merge(&hot.nativeHistogramBucketsNegative))
+ // Play it simple again and just delete all cold buckets.
+ atomic.StoreUint32(&cold.nativeHistogramBucketsNumber, 0)
+ deleteSyncMap(&cold.nativeHistogramBucketsNegative)
+ deleteSyncMap(&cold.nativeHistogramBucketsPositive)
+}
+
+func (h *histogram) resetCounts(counts *histogramCounts) {
+ atomic.StoreUint64(&counts.sumBits, 0)
+ atomic.StoreUint64(&counts.count, 0)
+ atomic.StoreUint64(&counts.nativeHistogramZeroBucket, 0)
+ atomic.StoreUint64(&counts.nativeHistogramZeroThresholdBits, math.Float64bits(h.nativeHistogramZeroThreshold))
+ atomic.StoreInt32(&counts.nativeHistogramSchema, h.nativeHistogramSchema)
+ atomic.StoreUint32(&counts.nativeHistogramBucketsNumber, 0)
+ for i := range h.upperBounds {
+ atomic.StoreUint64(&counts.buckets[i], 0)
+ }
+ deleteSyncMap(&counts.nativeHistogramBucketsNegative)
+ deleteSyncMap(&counts.nativeHistogramBucketsPositive)
+}
+
+// updateExemplar replaces the exemplar for the provided classic bucket.
+// With empty labels, it's a no-op. It panics if any of the labels is invalid.
+// If histogram is native, the exemplar will be cached into nativeExemplars,
+// which has a limit, and will remove one exemplar when limit is reached.
+func (h *histogram) updateExemplar(v float64, bucket int, l Labels) {
+ if l == nil {
+ return
+ }
+ e, err := newExemplar(v, h.now(), l)
+ if err != nil {
+ panic(err)
+ }
+ h.exemplars[bucket].Store(e)
+ doSparse := h.nativeHistogramSchema > math.MinInt32 && !math.IsNaN(v)
+ if doSparse {
+ h.nativeExemplars.addExemplar(e)
+ }
+}
+
+// HistogramVec is a Collector that bundles a set of Histograms that all share the
+// same Desc, but have different values for their variable labels. This is used
+// if you want to count the same thing partitioned by various dimensions
+// (e.g. HTTP request latencies, partitioned by status code and method). Create
+// instances with NewHistogramVec.
+type HistogramVec struct {
+ *MetricVec
+}
+
+// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and
+// partitioned by the given label names.
+func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec {
+ return V2.NewHistogramVec(HistogramVecOpts{
+ HistogramOpts: opts,
+ VariableLabels: UnconstrainedLabels(labelNames),
+ })
+}
+
+// NewHistogramVec creates a new HistogramVec based on the provided HistogramVecOpts.
+func (v2) NewHistogramVec(opts HistogramVecOpts) *HistogramVec {
+ desc := V2.NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ opts.VariableLabels,
+ opts.ConstLabels,
+ )
+ return &HistogramVec{
+ MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
+ return newHistogram(desc, opts.HistogramOpts, lvs...)
+ }),
+ }
+}
+
+// GetMetricWithLabelValues returns the Histogram for the given slice of label
+// values (same order as the variable labels in Desc). If that combination of
+// label values is accessed for the first time, a new Histogram is created.
+//
+// It is possible to call this method without using the returned Histogram to only
+// create the new Histogram but leave it at its starting value, a Histogram without
+// any observations.
+//
+// Keeping the Histogram for later use is possible (and should be considered if
+// performance is critical), but keep in mind that Reset, DeleteLabelValues and
+// Delete can be used to delete the Histogram from the HistogramVec. In that case, the
+// Histogram will still exist, but it will not be exported anymore, even if a
+// Histogram with the same label values is created later. See also the CounterVec
+// example.
+//
+// An error is returned if the number of label values is not the same as the
+// number of variable labels in Desc (minus any curried labels).
+//
+// Note that for more than one label value, this method is prone to mistakes
+// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
+// an alternative to avoid that type of mistake. For higher label numbers, the
+// latter has a much more readable (albeit more verbose) syntax, but it comes
+// with a performance overhead (for creating and processing the Labels map).
+// See also the GaugeVec example.
+func (v *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) {
+ metric, err := v.MetricVec.GetMetricWithLabelValues(lvs...)
+ if metric != nil {
+ return metric.(Observer), err
+ }
+ return nil, err
+}
+
+// GetMetricWith returns the Histogram for the given Labels map (the label names
+// must match those of the variable labels in Desc). If that label map is
+// accessed for the first time, a new Histogram is created. Implications of
+// creating a Histogram without using it and keeping the Histogram for later use
+// are the same as for GetMetricWithLabelValues.
+//
+// An error is returned if the number and names of the Labels are inconsistent
+// with those of the variable labels in Desc (minus any curried labels).
+//
+// This method is used for the same purpose as
+// GetMetricWithLabelValues(...string). See there for pros and cons of the two
+// methods.
+func (v *HistogramVec) GetMetricWith(labels Labels) (Observer, error) {
+ metric, err := v.MetricVec.GetMetricWith(labels)
+ if metric != nil {
+ return metric.(Observer), err
+ }
+ return nil, err
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics where
+// GetMetricWithLabelValues would have returned an error. Not returning an
+// error allows shortcuts like
+//
+// myVec.WithLabelValues("404", "GET").Observe(42.21)
+func (v *HistogramVec) WithLabelValues(lvs ...string) Observer {
+ h, err := v.GetMetricWithLabelValues(lvs...)
+ if err != nil {
+ panic(err)
+ }
+ return h
+}
+
+// With works as GetMetricWith but panics where GetMetricWithLabels would have
+// returned an error. Not returning an error allows shortcuts like
+//
+// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21)
+func (v *HistogramVec) With(labels Labels) Observer {
+ h, err := v.GetMetricWith(labels)
+ if err != nil {
+ panic(err)
+ }
+ return h
+}
+
+// CurryWith returns a vector curried with the provided labels, i.e. the
+// returned vector has those labels pre-set for all labeled operations performed
+// on it. The cardinality of the curried vector is reduced accordingly. The
+// order of the remaining labels stays the same (just with the curried labels
+// taken out of the sequence – which is relevant for the
+// (GetMetric)WithLabelValues methods). It is possible to curry a curried
+// vector, but only with labels not yet used for currying before.
+//
+// The metrics contained in the HistogramVec are shared between the curried and
+// uncurried vectors. They are just accessed differently. Curried and uncurried
+// vectors behave identically in terms of collection. Only one must be
+// registered with a given registry (usually the uncurried version). The Reset
+// method deletes all metrics, even if called on a curried vector.
+func (v *HistogramVec) CurryWith(labels Labels) (ObserverVec, error) {
+ vec, err := v.MetricVec.CurryWith(labels)
+ if vec != nil {
+ return &HistogramVec{vec}, err
+ }
+ return nil, err
+}
+
+// MustCurryWith works as CurryWith but panics where CurryWith would have
+// returned an error.
+func (v *HistogramVec) MustCurryWith(labels Labels) ObserverVec {
+ vec, err := v.CurryWith(labels)
+ if err != nil {
+ panic(err)
+ }
+ return vec
+}
+
+type constHistogram struct {
+ desc *Desc
+ count uint64
+ sum float64
+ buckets map[float64]uint64
+ labelPairs []*dto.LabelPair
+ createdTs *timestamppb.Timestamp
+}
+
+func (h *constHistogram) Desc() *Desc {
+ return h.desc
+}
+
+func (h *constHistogram) Write(out *dto.Metric) error {
+ his := &dto.Histogram{
+ CreatedTimestamp: h.createdTs,
+ }
+
+ buckets := make([]*dto.Bucket, 0, len(h.buckets))
+
+ his.SampleCount = proto.Uint64(h.count)
+ his.SampleSum = proto.Float64(h.sum)
+ for upperBound, count := range h.buckets {
+ buckets = append(buckets, &dto.Bucket{
+ CumulativeCount: proto.Uint64(count),
+ UpperBound: proto.Float64(upperBound),
+ })
+ }
+
+ if len(buckets) > 0 {
+ sort.Sort(buckSort(buckets))
+ }
+ his.Bucket = buckets
+
+ out.Histogram = his
+ out.Label = h.labelPairs
+
+ return nil
+}
+
+// NewConstHistogram returns a metric representing a Prometheus histogram with
+// fixed values for the count, sum, and bucket counts. As those parameters
+// cannot be changed, the returned value does not implement the Histogram
+// interface (but only the Metric interface). Users of this package will not
+// have much use for it in regular operations. However, when implementing custom
+// Collectors, it is useful as a throw-away metric that is generated on the fly
+// to send it to Prometheus in the Collect method.
+//
+// buckets is a map of upper bounds to cumulative counts, excluding the +Inf
+// bucket. The +Inf bucket is implicit, and its value is equal to the provided count.
+//
+// NewConstHistogram returns an error if the length of labelValues is not
+// consistent with the variable labels in Desc or if Desc is invalid.
+func NewConstHistogram(
+ desc *Desc,
+ count uint64,
+ sum float64,
+ buckets map[float64]uint64,
+ labelValues ...string,
+) (Metric, error) {
+ if desc.err != nil {
+ return nil, desc.err
+ }
+ if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
+ return nil, err
+ }
+ return &constHistogram{
+ desc: desc,
+ count: count,
+ sum: sum,
+ buckets: buckets,
+ labelPairs: MakeLabelPairs(desc, labelValues),
+ }, nil
+}
+
+// MustNewConstHistogram is a version of NewConstHistogram that panics where
+// NewConstHistogram would have returned an error.
+func MustNewConstHistogram(
+ desc *Desc,
+ count uint64,
+ sum float64,
+ buckets map[float64]uint64,
+ labelValues ...string,
+) Metric {
+ m, err := NewConstHistogram(desc, count, sum, buckets, labelValues...)
+ if err != nil {
+ panic(err)
+ }
+ return m
+}
+
+// NewConstHistogramWithCreatedTimestamp does the same thing as NewConstHistogram but sets the created timestamp.
+func NewConstHistogramWithCreatedTimestamp(
+ desc *Desc,
+ count uint64,
+ sum float64,
+ buckets map[float64]uint64,
+ ct time.Time,
+ labelValues ...string,
+) (Metric, error) {
+ if desc.err != nil {
+ return nil, desc.err
+ }
+ if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
+ return nil, err
+ }
+ return &constHistogram{
+ desc: desc,
+ count: count,
+ sum: sum,
+ buckets: buckets,
+ labelPairs: MakeLabelPairs(desc, labelValues),
+ createdTs: timestamppb.New(ct),
+ }, nil
+}
+
+// MustNewConstHistogramWithCreatedTimestamp is a version of NewConstHistogramWithCreatedTimestamp that panics where
+// NewConstHistogramWithCreatedTimestamp would have returned an error.
+func MustNewConstHistogramWithCreatedTimestamp(
+ desc *Desc,
+ count uint64,
+ sum float64,
+ buckets map[float64]uint64,
+ ct time.Time,
+ labelValues ...string,
+) Metric {
+ m, err := NewConstHistogramWithCreatedTimestamp(desc, count, sum, buckets, ct, labelValues...)
+ if err != nil {
+ panic(err)
+ }
+ return m
+}
+
+type buckSort []*dto.Bucket
+
+func (s buckSort) Len() int {
+ return len(s)
+}
+
+func (s buckSort) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func (s buckSort) Less(i, j int) bool {
+ return s[i].GetUpperBound() < s[j].GetUpperBound()
+}
+
+// pickSchema returns the largest number n between -4 and 8 such that
+// 2^(2^-n) is less or equal the provided bucketFactor.
+//
+// Special cases:
+// - bucketFactor <= 1: panics.
+// - bucketFactor < 2^(2^-8) (but > 1): still returns 8.
+func pickSchema(bucketFactor float64) int32 {
+ if bucketFactor <= 1 {
+ panic(fmt.Errorf("bucketFactor %f is <=1", bucketFactor))
+ }
+ floor := math.Floor(math.Log2(math.Log2(bucketFactor)))
+ switch {
+ case floor <= -8:
+ return 8
+ case floor >= 4:
+ return -4
+ default:
+ return -int32(floor)
+ }
+}
+
+func makeBuckets(buckets *sync.Map) ([]*dto.BucketSpan, []int64) {
+ var ii []int
+ buckets.Range(func(k, v interface{}) bool {
+ ii = append(ii, k.(int))
+ return true
+ })
+ sort.Ints(ii)
+
+ if len(ii) == 0 {
+ return nil, nil
+ }
+
+ var (
+ spans []*dto.BucketSpan
+ deltas []int64
+ prevCount int64
+ nextI int
+ )
+
+ appendDelta := func(count int64) {
+ *spans[len(spans)-1].Length++
+ deltas = append(deltas, count-prevCount)
+ prevCount = count
+ }
+
+ for n, i := range ii {
+ v, _ := buckets.Load(i)
+ count := atomic.LoadInt64(v.(*int64))
+ // Multiple spans with only small gaps in between are probably
+ // encoded more efficiently as one larger span with a few empty
+ // buckets. Needs some research to find the sweet spot. For now,
+ // we assume that gaps of one or two buckets should not create
+ // a new span.
+ iDelta := int32(i - nextI)
+ if n == 0 || iDelta > 2 {
+ // We have to create a new span, either because we are
+ // at the very beginning, or because we have found a gap
+ // of more than two buckets.
+ spans = append(spans, &dto.BucketSpan{
+ Offset: proto.Int32(iDelta),
+ Length: proto.Uint32(0),
+ })
+ } else {
+ // We have found a small gap (or no gap at all).
+ // Insert empty buckets as needed.
+ for j := int32(0); j < iDelta; j++ {
+ appendDelta(0)
+ }
+ }
+ appendDelta(count)
+ nextI = i + 1
+ }
+ return spans, deltas
+}
+
+// addToBucket increments the sparse bucket at key by the provided amount. It
+// returns true if a new sparse bucket had to be created for that.
+func addToBucket(buckets *sync.Map, key int, increment int64) bool {
+ if existingBucket, ok := buckets.Load(key); ok {
+ // Fast path without allocation.
+ atomic.AddInt64(existingBucket.(*int64), increment)
+ return false
+ }
+ // Bucket doesn't exist yet. Slow path allocating new counter.
+ newBucket := increment // TODO(beorn7): Check if this is sufficient to not let increment escape.
+ if actualBucket, loaded := buckets.LoadOrStore(key, &newBucket); loaded {
+ // The bucket was created concurrently in another goroutine.
+ // Have to increment after all.
+ atomic.AddInt64(actualBucket.(*int64), increment)
+ return false
+ }
+ return true
+}
+
+// addAndReset returns a function to be used with sync.Map.Range of spare
+// buckets in coldCounts. It increments the buckets in the provided hotBuckets
+// according to the buckets ranged through. It then resets all buckets ranged
+// through to 0 (but leaves them in place so that they don't need to get
+// recreated on the next scrape).
+func addAndReset(hotBuckets *sync.Map, bucketNumber *uint32) func(k, v interface{}) bool {
+ return func(k, v interface{}) bool {
+ bucket := v.(*int64)
+ if addToBucket(hotBuckets, k.(int), atomic.LoadInt64(bucket)) {
+ atomic.AddUint32(bucketNumber, 1)
+ }
+ atomic.StoreInt64(bucket, 0)
+ return true
+ }
+}
+
+func deleteSyncMap(m *sync.Map) {
+ m.Range(func(k, v interface{}) bool {
+ m.Delete(k)
+ return true
+ })
+}
+
+func findSmallestKey(m *sync.Map) int {
+ result := math.MaxInt32
+ m.Range(func(k, v interface{}) bool {
+ key := k.(int)
+ if key < result {
+ result = key
+ }
+ return true
+ })
+ return result
+}
+
+func getLe(key int, schema int32) float64 {
+ // Here a bit of context about the behavior for the last bucket counting
+ // regular numbers (called simply "last bucket" below) and the bucket
+ // counting observations of ±Inf (called "inf bucket" below, with a key
+ // one higher than that of the "last bucket"):
+ //
+ // If we apply the usual formula to the last bucket, its upper bound
+ // would be calculated as +Inf. The reason is that the max possible
+ // regular float64 number (math.MaxFloat64) doesn't coincide with one of
+ // the calculated bucket boundaries. So the calculated boundary has to
+ // be larger than math.MaxFloat64, and the only float64 larger than
+ // math.MaxFloat64 is +Inf. However, we want to count actual
+ // observations of ±Inf in the inf bucket. Therefore, we have to treat
+ // the upper bound of the last bucket specially and set it to
+ // math.MaxFloat64. (The upper bound of the inf bucket, with its key
+ // being one higher than that of the last bucket, naturally comes out as
+ // +Inf by the usual formula. So that's fine.)
+ //
+ // math.MaxFloat64 has a frac of 0.9999999999999999 and an exp of
+ // 1024. If there were a float64 number following math.MaxFloat64, it
+ // would have a frac of 1.0 and an exp of 1024, or equivalently a frac
+ // of 0.5 and an exp of 1025. However, since frac must be smaller than
+ // 1, and exp must be smaller than 1025, either representation overflows
+ // a float64. (Which, in turn, is the reason that math.MaxFloat64 is the
+ // largest possible float64. Q.E.D.) However, the formula for
+ // calculating the upper bound from the idx and schema of the last
+ // bucket results in precisely that. It is either frac=1.0 & exp=1024
+ // (for schema < 0) or frac=0.5 & exp=1025 (for schema >=0). (This is,
+ // by the way, a power of two where the exponent itself is a power of
+ // two, 2¹⁰ in fact, which coinicides with a bucket boundary in all
+ // schemas.) So these are the special cases we have to catch below.
+ if schema < 0 {
+ exp := key << -schema
+ if exp == 1024 {
+ // This is the last bucket before the overflow bucket
+ // (for ±Inf observations). Return math.MaxFloat64 as
+ // explained above.
+ return math.MaxFloat64
+ }
+ return math.Ldexp(1, exp)
+ }
+
+ fracIdx := key & ((1 << schema) - 1)
+ frac := nativeHistogramBounds[schema][fracIdx]
+ exp := (key >> schema) + 1
+ if frac == 0.5 && exp == 1025 {
+ // This is the last bucket before the overflow bucket (for ±Inf
+ // observations). Return math.MaxFloat64 as explained above.
+ return math.MaxFloat64
+ }
+ return math.Ldexp(frac, exp)
+}
+
+// waitForCooldown returns after the count field in the provided histogramCounts
+// has reached the provided count value.
+func waitForCooldown(count uint64, counts *histogramCounts) {
+ for count != atomic.LoadUint64(&counts.count) {
+ runtime.Gosched() // Let observations get work done.
+ }
+}
+
+// atomicAddFloat adds the provided float atomically to another float
+// represented by the bit pattern the bits pointer is pointing to.
+func atomicAddFloat(bits *uint64, v float64) {
+ for {
+ loadedBits := atomic.LoadUint64(bits)
+ newBits := math.Float64bits(math.Float64frombits(loadedBits) + v)
+ if atomic.CompareAndSwapUint64(bits, loadedBits, newBits) {
+ break
+ }
+ }
+}
+
+// atomicDecUint32 atomically decrements the uint32 p points to. See
+// https://pkg.go.dev/sync/atomic#AddUint32 to understand how this is done.
+func atomicDecUint32(p *uint32) {
+ atomic.AddUint32(p, ^uint32(0))
+}
+
+// addAndResetCounts adds certain fields (count, sum, conventional buckets, zero
+// bucket) from the cold counts to the corresponding fields in the hot
+// counts. Those fields are then reset to 0 in the cold counts.
+func addAndResetCounts(hot, cold *histogramCounts) {
+ atomic.AddUint64(&hot.count, atomic.LoadUint64(&cold.count))
+ atomic.StoreUint64(&cold.count, 0)
+ coldSum := math.Float64frombits(atomic.LoadUint64(&cold.sumBits))
+ atomicAddFloat(&hot.sumBits, coldSum)
+ atomic.StoreUint64(&cold.sumBits, 0)
+ for i := range hot.buckets {
+ atomic.AddUint64(&hot.buckets[i], atomic.LoadUint64(&cold.buckets[i]))
+ atomic.StoreUint64(&cold.buckets[i], 0)
+ }
+ atomic.AddUint64(&hot.nativeHistogramZeroBucket, atomic.LoadUint64(&cold.nativeHistogramZeroBucket))
+ atomic.StoreUint64(&cold.nativeHistogramZeroBucket, 0)
+}
+
+type nativeExemplars struct {
+ sync.Mutex
+
+ // Time-to-live for exemplars, it is set to -1 if exemplars are disabled, that is NativeHistogramMaxExemplars is below 0.
+ // The ttl is used on insertion to remove an exemplar that is older than ttl, if present.
+ ttl time.Duration
+
+ exemplars []*dto.Exemplar
+}
+
+func (n *nativeExemplars) isEnabled() bool {
+ return n.ttl != -1
+}
+
+func makeNativeExemplars(ttl time.Duration, maxCount int) nativeExemplars {
+ if ttl == 0 {
+ ttl = 5 * time.Minute
+ }
+
+ if maxCount == 0 {
+ maxCount = 10
+ }
+
+ if maxCount < 0 {
+ maxCount = 0
+ ttl = -1
+ }
+
+ return nativeExemplars{
+ ttl: ttl,
+ exemplars: make([]*dto.Exemplar, 0, maxCount),
+ }
+}
+
+func (n *nativeExemplars) addExemplar(e *dto.Exemplar) {
+ if !n.isEnabled() {
+ return
+ }
+
+ n.Lock()
+ defer n.Unlock()
+
+ // When the number of exemplars has not yet exceeded or
+ // is equal to cap(n.exemplars), then
+ // insert the new exemplar directly.
+ if len(n.exemplars) < cap(n.exemplars) {
+ var nIdx int
+ for nIdx = 0; nIdx < len(n.exemplars); nIdx++ {
+ if *e.Value < *n.exemplars[nIdx].Value {
+ break
+ }
+ }
+ n.exemplars = append(n.exemplars[:nIdx], append([]*dto.Exemplar{e}, n.exemplars[nIdx:]...)...)
+ return
+ }
+
+ if len(n.exemplars) == 1 {
+ // When the number of exemplars is 1, then
+ // replace the existing exemplar with the new exemplar.
+ n.exemplars[0] = e
+ return
+ }
+ // From this point on, the number of exemplars is greater than 1.
+
+ // When the number of exemplars exceeds the limit, remove one exemplar.
+ var (
+ ot = time.Time{} // Oldest timestamp seen. Initial value doesn't matter as we replace it due to otIdx == -1 in the loop.
+ otIdx = -1 // Index of the exemplar with the oldest timestamp.
+
+ md = -1.0 // Logarithm of the delta of the closest pair of exemplars.
+
+ // The insertion point of the new exemplar in the exemplars slice after insertion.
+ // This is calculated purely based on the order of the exemplars by value.
+ // nIdx == len(n.exemplars) means the new exemplar is to be inserted after the end.
+ nIdx = -1
+
+ // rIdx is ultimately the index for the exemplar that we are replacing with the new exemplar.
+ // The aim is to keep a good spread of exemplars by value and not let them bunch up too much.
+ // It is calculated in 3 steps:
+ // 1. First we set rIdx to the index of the older exemplar within the closest pair by value.
+ // That is the following will be true (on log scale):
+ // either the exemplar pair on index (rIdx-1, rIdx) or (rIdx, rIdx+1) will have
+ // the closest values to each other from all pairs.
+ // For example, suppose the values are distributed like this:
+ // |-----------x-------------x----------------x----x-----|
+ // ^--rIdx as this is older.
+ // Or like this:
+ // |-----------x-------------x----------------x----x-----|
+ // ^--rIdx as this is older.
+ // 2. If there is an exemplar that expired, then we simple reset rIdx to that index.
+ // 3. We check if by inserting the new exemplar we would create a closer pair at
+ // (nIdx-1, nIdx) or (nIdx, nIdx+1) and set rIdx to nIdx-1 or nIdx accordingly to
+ // keep the spread of exemplars by value; otherwise we keep rIdx as it is.
+ rIdx = -1
+ cLog float64 // Logarithm of the current exemplar.
+ pLog float64 // Logarithm of the previous exemplar.
+ )
+
+ for i, exemplar := range n.exemplars {
+ // Find the exemplar with the oldest timestamp.
+ if otIdx == -1 || exemplar.Timestamp.AsTime().Before(ot) {
+ ot = exemplar.Timestamp.AsTime()
+ otIdx = i
+ }
+
+ // Find the index at which to insert new the exemplar.
+ if nIdx == -1 && *e.Value <= *exemplar.Value {
+ nIdx = i
+ }
+
+ // Find the two closest exemplars and pick the one the with older timestamp.
+ pLog = cLog
+ cLog = math.Log(exemplar.GetValue())
+ if i == 0 {
+ continue
+ }
+ diff := math.Abs(cLog - pLog)
+ if md == -1 || diff < md {
+ // The closest exemplar pair is at index: i-1, i.
+ // Choose the exemplar with the older timestamp for replacement.
+ md = diff
+ if n.exemplars[i].Timestamp.AsTime().Before(n.exemplars[i-1].Timestamp.AsTime()) {
+ rIdx = i
+ } else {
+ rIdx = i - 1
+ }
+ }
+
+ }
+
+ // If all existing exemplar are smaller than new exemplar,
+ // then the exemplar should be inserted at the end.
+ if nIdx == -1 {
+ nIdx = len(n.exemplars)
+ }
+ // Here, we have the following relationships:
+ // n.exemplars[nIdx-1].Value < e.Value (if nIdx > 0)
+ // e.Value <= n.exemplars[nIdx].Value (if nIdx < len(n.exemplars))
+
+ if otIdx != -1 && e.Timestamp.AsTime().Sub(ot) > n.ttl {
+ // If the oldest exemplar has expired, then replace it with the new exemplar.
+ rIdx = otIdx
+ } else {
+ // In the previous for loop, when calculating the closest pair of exemplars,
+ // we did not take into account the newly inserted exemplar.
+ // So we need to calculate with the newly inserted exemplar again.
+ elog := math.Log(e.GetValue())
+ if nIdx > 0 {
+ diff := math.Abs(elog - math.Log(n.exemplars[nIdx-1].GetValue()))
+ if diff < md {
+ // The value we are about to insert is closer to the previous exemplar at the insertion point than what we calculated before in rIdx.
+ // v--rIdx
+ // |-----------x-n-----------x----------------x----x-----|
+ // nIdx-1--^ ^--new exemplar value
+ // Do not make the spread worse, replace nIdx-1 and not rIdx.
+ md = diff
+ rIdx = nIdx - 1
+ }
+ }
+ if nIdx < len(n.exemplars) {
+ diff := math.Abs(math.Log(n.exemplars[nIdx].GetValue()) - elog)
+ if diff < md {
+ // The value we are about to insert is closer to the next exemplar at the insertion point than what we calculated before in rIdx.
+ // v--rIdx
+ // |-----------x-----------n-x----------------x----x-----|
+ // new exemplar value--^ ^--nIdx
+ // Do not make the spread worse, replace nIdx-1 and not rIdx.
+ rIdx = nIdx
+ }
+ }
+ }
+
+ // Adjust the slice according to rIdx and nIdx.
+ switch {
+ case rIdx == nIdx:
+ n.exemplars[nIdx] = e
+ case rIdx < nIdx:
+ n.exemplars = append(n.exemplars[:rIdx], append(n.exemplars[rIdx+1:nIdx], append([]*dto.Exemplar{e}, n.exemplars[nIdx:]...)...)...)
+ case rIdx > nIdx:
+ n.exemplars = append(n.exemplars[:nIdx], append([]*dto.Exemplar{e}, append(n.exemplars[nIdx:rIdx], n.exemplars[rIdx+1:]...)...)...)
+ }
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/almost_equal.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/almost_equal.go
new file mode 100644
index 0000000..1ed5abe
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/almost_equal.go
@@ -0,0 +1,60 @@
+// Copyright (c) 2015 Björn Rabenstein
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+//
+// The code in this package is copy/paste to avoid a dependency. Hence this file
+// carries the copyright of the original repo.
+// https://github.com/beorn7/floats
+package internal
+
+import (
+ "math"
+)
+
+// minNormalFloat64 is the smallest positive normal value of type float64.
+var minNormalFloat64 = math.Float64frombits(0x0010000000000000)
+
+// AlmostEqualFloat64 returns true if a and b are equal within a relative error
+// of epsilon. See http://floating-point-gui.de/errors/comparison/ for the
+// details of the applied method.
+func AlmostEqualFloat64(a, b, epsilon float64) bool {
+ if a == b {
+ return true
+ }
+ absA := math.Abs(a)
+ absB := math.Abs(b)
+ diff := math.Abs(a - b)
+ if a == 0 || b == 0 || absA+absB < minNormalFloat64 {
+ return diff < epsilon*minNormalFloat64
+ }
+ return diff/math.Min(absA+absB, math.MaxFloat64) < epsilon
+}
+
+// AlmostEqualFloat64s is the slice form of AlmostEqualFloat64.
+func AlmostEqualFloat64s(a, b []float64, epsilon float64) bool {
+ if len(a) != len(b) {
+ return false
+ }
+ for i := range a {
+ if !AlmostEqualFloat64(a[i], b[i], epsilon) {
+ return false
+ }
+ }
+ return true
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go
new file mode 100644
index 0000000..a595a20
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go
@@ -0,0 +1,654 @@
+// Copyright 2022 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// It provides tools to compare sequences of strings and generate textual diffs.
+//
+// Maintaining `GetUnifiedDiffString` here because original repository
+// (https://github.com/pmezard/go-difflib) is no longer maintained.
+package internal
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "strings"
+)
+
+func min(a, b int) int {
+ if a < b {
+ return a
+ }
+ return b
+}
+
+func max(a, b int) int {
+ if a > b {
+ return a
+ }
+ return b
+}
+
+func calculateRatio(matches, length int) float64 {
+ if length > 0 {
+ return 2.0 * float64(matches) / float64(length)
+ }
+ return 1.0
+}
+
+type Match struct {
+ A int
+ B int
+ Size int
+}
+
+type OpCode struct {
+ Tag byte
+ I1 int
+ I2 int
+ J1 int
+ J2 int
+}
+
+// SequenceMatcher compares sequence of strings. The basic
+// algorithm predates, and is a little fancier than, an algorithm
+// published in the late 1980's by Ratcliff and Obershelp under the
+// hyperbolic name "gestalt pattern matching". The basic idea is to find
+// the longest contiguous matching subsequence that contains no "junk"
+// elements (R-O doesn't address junk). The same idea is then applied
+// recursively to the pieces of the sequences to the left and to the right
+// of the matching subsequence. This does not yield minimal edit
+// sequences, but does tend to yield matches that "look right" to people.
+//
+// SequenceMatcher tries to compute a "human-friendly diff" between two
+// sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the
+// longest *contiguous* & junk-free matching subsequence. That's what
+// catches peoples' eyes. The Windows(tm) windiff has another interesting
+// notion, pairing up elements that appear uniquely in each sequence.
+// That, and the method here, appear to yield more intuitive difference
+// reports than does diff. This method appears to be the least vulnerable
+// to synching up on blocks of "junk lines", though (like blank lines in
+// ordinary text files, or maybe "" lines in HTML files). That may be
+// because this is the only method of the 3 that has a *concept* of
+// "junk" .
+//
+// Timing: Basic R-O is cubic time worst case and quadratic time expected
+// case. SequenceMatcher is quadratic time for the worst case and has
+// expected-case behavior dependent in a complicated way on how many
+// elements the sequences have in common; best case time is linear.
+type SequenceMatcher struct {
+ a []string
+ b []string
+ b2j map[string][]int
+ IsJunk func(string) bool
+ autoJunk bool
+ bJunk map[string]struct{}
+ matchingBlocks []Match
+ fullBCount map[string]int
+ bPopular map[string]struct{}
+ opCodes []OpCode
+}
+
+func NewMatcher(a, b []string) *SequenceMatcher {
+ m := SequenceMatcher{autoJunk: true}
+ m.SetSeqs(a, b)
+ return &m
+}
+
+func NewMatcherWithJunk(a, b []string, autoJunk bool,
+ isJunk func(string) bool,
+) *SequenceMatcher {
+ m := SequenceMatcher{IsJunk: isJunk, autoJunk: autoJunk}
+ m.SetSeqs(a, b)
+ return &m
+}
+
+// Set two sequences to be compared.
+func (m *SequenceMatcher) SetSeqs(a, b []string) {
+ m.SetSeq1(a)
+ m.SetSeq2(b)
+}
+
+// Set the first sequence to be compared. The second sequence to be compared is
+// not changed.
+//
+// SequenceMatcher computes and caches detailed information about the second
+// sequence, so if you want to compare one sequence S against many sequences,
+// use .SetSeq2(s) once and call .SetSeq1(x) repeatedly for each of the other
+// sequences.
+//
+// See also SetSeqs() and SetSeq2().
+func (m *SequenceMatcher) SetSeq1(a []string) {
+ if &a == &m.a {
+ return
+ }
+ m.a = a
+ m.matchingBlocks = nil
+ m.opCodes = nil
+}
+
+// Set the second sequence to be compared. The first sequence to be compared is
+// not changed.
+func (m *SequenceMatcher) SetSeq2(b []string) {
+ if &b == &m.b {
+ return
+ }
+ m.b = b
+ m.matchingBlocks = nil
+ m.opCodes = nil
+ m.fullBCount = nil
+ m.chainB()
+}
+
+func (m *SequenceMatcher) chainB() {
+ // Populate line -> index mapping
+ b2j := map[string][]int{}
+ for i, s := range m.b {
+ indices := b2j[s]
+ indices = append(indices, i)
+ b2j[s] = indices
+ }
+
+ // Purge junk elements
+ m.bJunk = map[string]struct{}{}
+ if m.IsJunk != nil {
+ junk := m.bJunk
+ for s := range b2j {
+ if m.IsJunk(s) {
+ junk[s] = struct{}{}
+ }
+ }
+ for s := range junk {
+ delete(b2j, s)
+ }
+ }
+
+ // Purge remaining popular elements
+ popular := map[string]struct{}{}
+ n := len(m.b)
+ if m.autoJunk && n >= 200 {
+ ntest := n/100 + 1
+ for s, indices := range b2j {
+ if len(indices) > ntest {
+ popular[s] = struct{}{}
+ }
+ }
+ for s := range popular {
+ delete(b2j, s)
+ }
+ }
+ m.bPopular = popular
+ m.b2j = b2j
+}
+
+func (m *SequenceMatcher) isBJunk(s string) bool {
+ _, ok := m.bJunk[s]
+ return ok
+}
+
+// Find longest matching block in a[alo:ahi] and b[blo:bhi].
+//
+// If IsJunk is not defined:
+//
+// Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where
+//
+// alo <= i <= i+k <= ahi
+// blo <= j <= j+k <= bhi
+//
+// and for all (i',j',k') meeting those conditions,
+//
+// k >= k'
+// i <= i'
+// and if i == i', j <= j'
+//
+// In other words, of all maximal matching blocks, return one that
+// starts earliest in a, and of all those maximal matching blocks that
+// start earliest in a, return the one that starts earliest in b.
+//
+// If IsJunk is defined, first the longest matching block is
+// determined as above, but with the additional restriction that no
+// junk element appears in the block. Then that block is extended as
+// far as possible by matching (only) junk elements on both sides. So
+// the resulting block never matches on junk except as identical junk
+// happens to be adjacent to an "interesting" match.
+//
+// If no blocks match, return (alo, blo, 0).
+func (m *SequenceMatcher) findLongestMatch(alo, ahi, blo, bhi int) Match {
+ // CAUTION: stripping common prefix or suffix would be incorrect.
+ // E.g.,
+ // ab
+ // acab
+ // Longest matching block is "ab", but if common prefix is
+ // stripped, it's "a" (tied with "b"). UNIX(tm) diff does so
+ // strip, so ends up claiming that ab is changed to acab by
+ // inserting "ca" in the middle. That's minimal but unintuitive:
+ // "it's obvious" that someone inserted "ac" at the front.
+ // Windiff ends up at the same place as diff, but by pairing up
+ // the unique 'b's and then matching the first two 'a's.
+ besti, bestj, bestsize := alo, blo, 0
+
+ // find longest junk-free match
+ // during an iteration of the loop, j2len[j] = length of longest
+ // junk-free match ending with a[i-1] and b[j]
+ j2len := map[int]int{}
+ for i := alo; i != ahi; i++ {
+ // look at all instances of a[i] in b; note that because
+ // b2j has no junk keys, the loop is skipped if a[i] is junk
+ newj2len := map[int]int{}
+ for _, j := range m.b2j[m.a[i]] {
+ // a[i] matches b[j]
+ if j < blo {
+ continue
+ }
+ if j >= bhi {
+ break
+ }
+ k := j2len[j-1] + 1
+ newj2len[j] = k
+ if k > bestsize {
+ besti, bestj, bestsize = i-k+1, j-k+1, k
+ }
+ }
+ j2len = newj2len
+ }
+
+ // Extend the best by non-junk elements on each end. In particular,
+ // "popular" non-junk elements aren't in b2j, which greatly speeds
+ // the inner loop above, but also means "the best" match so far
+ // doesn't contain any junk *or* popular non-junk elements.
+ for besti > alo && bestj > blo && !m.isBJunk(m.b[bestj-1]) &&
+ m.a[besti-1] == m.b[bestj-1] {
+ besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
+ }
+ for besti+bestsize < ahi && bestj+bestsize < bhi &&
+ !m.isBJunk(m.b[bestj+bestsize]) &&
+ m.a[besti+bestsize] == m.b[bestj+bestsize] {
+ bestsize++
+ }
+
+ // Now that we have a wholly interesting match (albeit possibly
+ // empty!), we may as well suck up the matching junk on each
+ // side of it too. Can't think of a good reason not to, and it
+ // saves post-processing the (possibly considerable) expense of
+ // figuring out what to do with it. In the case of an empty
+ // interesting match, this is clearly the right thing to do,
+ // because no other kind of match is possible in the regions.
+ for besti > alo && bestj > blo && m.isBJunk(m.b[bestj-1]) &&
+ m.a[besti-1] == m.b[bestj-1] {
+ besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
+ }
+ for besti+bestsize < ahi && bestj+bestsize < bhi &&
+ m.isBJunk(m.b[bestj+bestsize]) &&
+ m.a[besti+bestsize] == m.b[bestj+bestsize] {
+ bestsize++
+ }
+
+ return Match{A: besti, B: bestj, Size: bestsize}
+}
+
+// Return list of triples describing matching subsequences.
+//
+// Each triple is of the form (i, j, n), and means that
+// a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in
+// i and in j. It's also guaranteed that if (i, j, n) and (i', j', n') are
+// adjacent triples in the list, and the second is not the last triple in the
+// list, then i+n != i' or j+n != j'. IOW, adjacent triples never describe
+// adjacent equal blocks.
+//
+// The last triple is a dummy, (len(a), len(b), 0), and is the only
+// triple with n==0.
+func (m *SequenceMatcher) GetMatchingBlocks() []Match {
+ if m.matchingBlocks != nil {
+ return m.matchingBlocks
+ }
+
+ var matchBlocks func(alo, ahi, blo, bhi int, matched []Match) []Match
+ matchBlocks = func(alo, ahi, blo, bhi int, matched []Match) []Match {
+ match := m.findLongestMatch(alo, ahi, blo, bhi)
+ i, j, k := match.A, match.B, match.Size
+ if match.Size > 0 {
+ if alo < i && blo < j {
+ matched = matchBlocks(alo, i, blo, j, matched)
+ }
+ matched = append(matched, match)
+ if i+k < ahi && j+k < bhi {
+ matched = matchBlocks(i+k, ahi, j+k, bhi, matched)
+ }
+ }
+ return matched
+ }
+ matched := matchBlocks(0, len(m.a), 0, len(m.b), nil)
+
+ // It's possible that we have adjacent equal blocks in the
+ // matching_blocks list now.
+ nonAdjacent := []Match{}
+ i1, j1, k1 := 0, 0, 0
+ for _, b := range matched {
+ // Is this block adjacent to i1, j1, k1?
+ i2, j2, k2 := b.A, b.B, b.Size
+ if i1+k1 == i2 && j1+k1 == j2 {
+ // Yes, so collapse them -- this just increases the length of
+ // the first block by the length of the second, and the first
+ // block so lengthened remains the block to compare against.
+ k1 += k2
+ } else {
+ // Not adjacent. Remember the first block (k1==0 means it's
+ // the dummy we started with), and make the second block the
+ // new block to compare against.
+ if k1 > 0 {
+ nonAdjacent = append(nonAdjacent, Match{i1, j1, k1})
+ }
+ i1, j1, k1 = i2, j2, k2
+ }
+ }
+ if k1 > 0 {
+ nonAdjacent = append(nonAdjacent, Match{i1, j1, k1})
+ }
+
+ nonAdjacent = append(nonAdjacent, Match{len(m.a), len(m.b), 0})
+ m.matchingBlocks = nonAdjacent
+ return m.matchingBlocks
+}
+
+// Return list of 5-tuples describing how to turn a into b.
+//
+// Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple
+// has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the
+// tuple preceding it, and likewise for j1 == the previous j2.
+//
+// The tags are characters, with these meanings:
+//
+// 'r' (replace): a[i1:i2] should be replaced by b[j1:j2]
+//
+// 'd' (delete): a[i1:i2] should be deleted, j1==j2 in this case.
+//
+// 'i' (insert): b[j1:j2] should be inserted at a[i1:i1], i1==i2 in this case.
+//
+// 'e' (equal): a[i1:i2] == b[j1:j2]
+func (m *SequenceMatcher) GetOpCodes() []OpCode {
+ if m.opCodes != nil {
+ return m.opCodes
+ }
+ i, j := 0, 0
+ matching := m.GetMatchingBlocks()
+ opCodes := make([]OpCode, 0, len(matching))
+ for _, m := range matching {
+ // invariant: we've pumped out correct diffs to change
+ // a[:i] into b[:j], and the next matching block is
+ // a[ai:ai+size] == b[bj:bj+size]. So we need to pump
+ // out a diff to change a[i:ai] into b[j:bj], pump out
+ // the matching block, and move (i,j) beyond the match
+ ai, bj, size := m.A, m.B, m.Size
+ tag := byte(0)
+ if i < ai && j < bj {
+ tag = 'r'
+ } else if i < ai {
+ tag = 'd'
+ } else if j < bj {
+ tag = 'i'
+ }
+ if tag > 0 {
+ opCodes = append(opCodes, OpCode{tag, i, ai, j, bj})
+ }
+ i, j = ai+size, bj+size
+ // the list of matching blocks is terminated by a
+ // sentinel with size 0
+ if size > 0 {
+ opCodes = append(opCodes, OpCode{'e', ai, i, bj, j})
+ }
+ }
+ m.opCodes = opCodes
+ return m.opCodes
+}
+
+// Isolate change clusters by eliminating ranges with no changes.
+//
+// Return a generator of groups with up to n lines of context.
+// Each group is in the same format as returned by GetOpCodes().
+func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode {
+ if n < 0 {
+ n = 3
+ }
+ codes := m.GetOpCodes()
+ if len(codes) == 0 {
+ codes = []OpCode{{'e', 0, 1, 0, 1}}
+ }
+ // Fixup leading and trailing groups if they show no changes.
+ if codes[0].Tag == 'e' {
+ c := codes[0]
+ i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
+ codes[0] = OpCode{c.Tag, max(i1, i2-n), i2, max(j1, j2-n), j2}
+ }
+ if codes[len(codes)-1].Tag == 'e' {
+ c := codes[len(codes)-1]
+ i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
+ codes[len(codes)-1] = OpCode{c.Tag, i1, min(i2, i1+n), j1, min(j2, j1+n)}
+ }
+ nn := n + n
+ groups := [][]OpCode{}
+ group := []OpCode{}
+ for _, c := range codes {
+ i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
+ // End the current group and start a new one whenever
+ // there is a large range with no changes.
+ if c.Tag == 'e' && i2-i1 > nn {
+ group = append(group, OpCode{
+ c.Tag, i1, min(i2, i1+n),
+ j1, min(j2, j1+n),
+ })
+ groups = append(groups, group)
+ group = []OpCode{}
+ i1, j1 = max(i1, i2-n), max(j1, j2-n)
+ }
+ group = append(group, OpCode{c.Tag, i1, i2, j1, j2})
+ }
+ if len(group) > 0 && !(len(group) == 1 && group[0].Tag == 'e') {
+ groups = append(groups, group)
+ }
+ return groups
+}
+
+// Return a measure of the sequences' similarity (float in [0,1]).
+//
+// Where T is the total number of elements in both sequences, and
+// M is the number of matches, this is 2.0*M / T.
+// Note that this is 1 if the sequences are identical, and 0 if
+// they have nothing in common.
+//
+// .Ratio() is expensive to compute if you haven't already computed
+// .GetMatchingBlocks() or .GetOpCodes(), in which case you may
+// want to try .QuickRatio() or .RealQuickRation() first to get an
+// upper bound.
+func (m *SequenceMatcher) Ratio() float64 {
+ matches := 0
+ for _, m := range m.GetMatchingBlocks() {
+ matches += m.Size
+ }
+ return calculateRatio(matches, len(m.a)+len(m.b))
+}
+
+// Return an upper bound on ratio() relatively quickly.
+//
+// This isn't defined beyond that it is an upper bound on .Ratio(), and
+// is faster to compute.
+func (m *SequenceMatcher) QuickRatio() float64 {
+ // viewing a and b as multisets, set matches to the cardinality
+ // of their intersection; this counts the number of matches
+ // without regard to order, so is clearly an upper bound
+ if m.fullBCount == nil {
+ m.fullBCount = map[string]int{}
+ for _, s := range m.b {
+ m.fullBCount[s]++
+ }
+ }
+
+ // avail[x] is the number of times x appears in 'b' less the
+ // number of times we've seen it in 'a' so far ... kinda
+ avail := map[string]int{}
+ matches := 0
+ for _, s := range m.a {
+ n, ok := avail[s]
+ if !ok {
+ n = m.fullBCount[s]
+ }
+ avail[s] = n - 1
+ if n > 0 {
+ matches++
+ }
+ }
+ return calculateRatio(matches, len(m.a)+len(m.b))
+}
+
+// Return an upper bound on ratio() very quickly.
+//
+// This isn't defined beyond that it is an upper bound on .Ratio(), and
+// is faster to compute than either .Ratio() or .QuickRatio().
+func (m *SequenceMatcher) RealQuickRatio() float64 {
+ la, lb := len(m.a), len(m.b)
+ return calculateRatio(min(la, lb), la+lb)
+}
+
+// Convert range to the "ed" format
+func formatRangeUnified(start, stop int) string {
+ // Per the diff spec at http://www.unix.org/single_unix_specification/
+ beginning := start + 1 // lines start numbering with one
+ length := stop - start
+ if length == 1 {
+ return fmt.Sprintf("%d", beginning)
+ }
+ if length == 0 {
+ beginning-- // empty ranges begin at line just before the range
+ }
+ return fmt.Sprintf("%d,%d", beginning, length)
+}
+
+// Unified diff parameters
+type UnifiedDiff struct {
+ A []string // First sequence lines
+ FromFile string // First file name
+ FromDate string // First file time
+ B []string // Second sequence lines
+ ToFile string // Second file name
+ ToDate string // Second file time
+ Eol string // Headers end of line, defaults to LF
+ Context int // Number of context lines
+}
+
+// Compare two sequences of lines; generate the delta as a unified diff.
+//
+// Unified diffs are a compact way of showing line changes and a few
+// lines of context. The number of context lines is set by 'n' which
+// defaults to three.
+//
+// By default, the diff control lines (those with ---, +++, or @@) are
+// created with a trailing newline. This is helpful so that inputs
+// created from file.readlines() result in diffs that are suitable for
+// file.writelines() since both the inputs and outputs have trailing
+// newlines.
+//
+// For inputs that do not have trailing newlines, set the lineterm
+// argument to "" so that the output will be uniformly newline free.
+//
+// The unidiff format normally has a header for filenames and modification
+// times. Any or all of these may be specified using strings for
+// 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'.
+// The modification times are normally expressed in the ISO 8601 format.
+func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error {
+ buf := bufio.NewWriter(writer)
+ defer buf.Flush()
+ wf := func(format string, args ...interface{}) error {
+ _, err := buf.WriteString(fmt.Sprintf(format, args...))
+ return err
+ }
+ ws := func(s string) error {
+ _, err := buf.WriteString(s)
+ return err
+ }
+
+ if len(diff.Eol) == 0 {
+ diff.Eol = "\n"
+ }
+
+ started := false
+ m := NewMatcher(diff.A, diff.B)
+ for _, g := range m.GetGroupedOpCodes(diff.Context) {
+ if !started {
+ started = true
+ fromDate := ""
+ if len(diff.FromDate) > 0 {
+ fromDate = "\t" + diff.FromDate
+ }
+ toDate := ""
+ if len(diff.ToDate) > 0 {
+ toDate = "\t" + diff.ToDate
+ }
+ if diff.FromFile != "" || diff.ToFile != "" {
+ err := wf("--- %s%s%s", diff.FromFile, fromDate, diff.Eol)
+ if err != nil {
+ return err
+ }
+ err = wf("+++ %s%s%s", diff.ToFile, toDate, diff.Eol)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ first, last := g[0], g[len(g)-1]
+ range1 := formatRangeUnified(first.I1, last.I2)
+ range2 := formatRangeUnified(first.J1, last.J2)
+ if err := wf("@@ -%s +%s @@%s", range1, range2, diff.Eol); err != nil {
+ return err
+ }
+ for _, c := range g {
+ i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
+ if c.Tag == 'e' {
+ for _, line := range diff.A[i1:i2] {
+ if err := ws(" " + line); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ if c.Tag == 'r' || c.Tag == 'd' {
+ for _, line := range diff.A[i1:i2] {
+ if err := ws("-" + line); err != nil {
+ return err
+ }
+ }
+ }
+ if c.Tag == 'r' || c.Tag == 'i' {
+ for _, line := range diff.B[j1:j2] {
+ if err := ws("+" + line); err != nil {
+ return err
+ }
+ }
+ }
+ }
+ }
+ return nil
+}
+
+// Like WriteUnifiedDiff but returns the diff a string.
+func GetUnifiedDiffString(diff UnifiedDiff) (string, error) {
+ w := &bytes.Buffer{}
+ err := WriteUnifiedDiff(w, diff)
+ return w.String(), err
+}
+
+// Split a string on "\n" while preserving them. The output can be used
+// as input for UnifiedDiff and ContextDiff structures.
+func SplitLines(s string) []string {
+ lines := strings.SplitAfter(s, "\n")
+ lines[len(lines)-1] += "\n"
+ return lines
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go
new file mode 100644
index 0000000..a4fa6ea
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go
@@ -0,0 +1,34 @@
+// Copyright 2021 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package internal
+
+import "regexp"
+
+type GoCollectorRule struct {
+ Matcher *regexp.Regexp
+ Deny bool
+}
+
+// GoCollectorOptions should not be used be directly by anything, except `collectors` package.
+// Use it via collectors package instead. See issue
+// https://github.com/prometheus/client_golang/issues/1030.
+//
+// This is internal, so external users only can use it via `collector.WithGoCollector*` methods
+type GoCollectorOptions struct {
+ DisableMemStatsLikeMetrics bool
+ RuntimeMetricSumForHist map[string]string
+ RuntimeMetricRules []GoCollectorRule
+}
+
+var GoCollectorDefaultRuntimeMetrics = regexp.MustCompile(`/gc/gogc:percent|/gc/gomemlimit:bytes|/sched/gomaxprocs:threads`)
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go
new file mode 100644
index 0000000..97d17d6
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go
@@ -0,0 +1,142 @@
+// Copyright 2021 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build go1.17
+// +build go1.17
+
+package internal
+
+import (
+ "math"
+ "path"
+ "runtime/metrics"
+ "strings"
+
+ "github.com/prometheus/common/model"
+)
+
+// RuntimeMetricsToProm produces a Prometheus metric name from a runtime/metrics
+// metric description and validates whether the metric is suitable for integration
+// with Prometheus.
+//
+// Returns false if a name could not be produced, or if Prometheus does not understand
+// the runtime/metrics Kind.
+//
+// Note that the main reason a name couldn't be produced is if the runtime/metrics
+// package exports a name with characters outside the valid Prometheus metric name
+// character set. This is theoretically possible, but should never happen in practice.
+// Still, don't rely on it.
+func RuntimeMetricsToProm(d *metrics.Description) (string, string, string, bool) {
+ namespace := "go"
+
+ comp := strings.SplitN(d.Name, ":", 2)
+ key := comp[0]
+ unit := comp[1]
+
+ // The last path element in the key is the name,
+ // the rest is the subsystem.
+ subsystem := path.Dir(key[1:] /* remove leading / */)
+ name := path.Base(key)
+
+ // subsystem is translated by replacing all / and - with _.
+ subsystem = strings.ReplaceAll(subsystem, "/", "_")
+ subsystem = strings.ReplaceAll(subsystem, "-", "_")
+
+ // unit is translated assuming that the unit contains no
+ // non-ASCII characters.
+ unit = strings.ReplaceAll(unit, "-", "_")
+ unit = strings.ReplaceAll(unit, "*", "_")
+ unit = strings.ReplaceAll(unit, "/", "_per_")
+
+ // name has - replaced with _ and is concatenated with the unit and
+ // other data.
+ name = strings.ReplaceAll(name, "-", "_")
+ name += "_" + unit
+ if d.Cumulative && d.Kind != metrics.KindFloat64Histogram {
+ name += "_total"
+ }
+
+ valid := model.IsValidMetricName(model.LabelValue(namespace + "_" + subsystem + "_" + name))
+ switch d.Kind {
+ case metrics.KindUint64:
+ case metrics.KindFloat64:
+ case metrics.KindFloat64Histogram:
+ default:
+ valid = false
+ }
+ return namespace, subsystem, name, valid
+}
+
+// RuntimeMetricsBucketsForUnit takes a set of buckets obtained for a runtime/metrics histogram
+// type (so, lower-bound inclusive) and a unit from a runtime/metrics name, and produces
+// a reduced set of buckets. This function always removes any -Inf bucket as it's represented
+// as the bottom-most upper-bound inclusive bucket in Prometheus.
+func RuntimeMetricsBucketsForUnit(buckets []float64, unit string) []float64 {
+ switch unit {
+ case "bytes":
+ // Re-bucket as powers of 2.
+ return reBucketExp(buckets, 2)
+ case "seconds":
+ // Re-bucket as powers of 10 and then merge all buckets greater
+ // than 1 second into the +Inf bucket.
+ b := reBucketExp(buckets, 10)
+ for i := range b {
+ if b[i] <= 1 {
+ continue
+ }
+ b[i] = math.Inf(1)
+ b = b[:i+1]
+ break
+ }
+ return b
+ }
+ return buckets
+}
+
+// reBucketExp takes a list of bucket boundaries (lower bound inclusive) and
+// downsamples the buckets to those a multiple of base apart. The end result
+// is a roughly exponential (in many cases, perfectly exponential) bucketing
+// scheme.
+func reBucketExp(buckets []float64, base float64) []float64 {
+ bucket := buckets[0]
+ var newBuckets []float64
+ // We may see a -Inf here, in which case, add it and skip it
+ // since we risk producing NaNs otherwise.
+ //
+ // We need to preserve -Inf values to maintain runtime/metrics
+ // conventions. We'll strip it out later.
+ if bucket == math.Inf(-1) {
+ newBuckets = append(newBuckets, bucket)
+ buckets = buckets[1:]
+ bucket = buckets[0]
+ }
+ // From now on, bucket should always have a non-Inf value because
+ // Infs are only ever at the ends of the bucket lists, so
+ // arithmetic operations on it are non-NaN.
+ for i := 1; i < len(buckets); i++ {
+ if bucket >= 0 && buckets[i] < bucket*base {
+ // The next bucket we want to include is at least bucket*base.
+ continue
+ } else if bucket < 0 && buckets[i] < bucket/base {
+ // In this case the bucket we're targeting is negative, and since
+ // we're ascending through buckets here, we need to divide to get
+ // closer to zero exponentially.
+ continue
+ }
+ // The +Inf bucket will always be the last one, and we'll always
+ // end up including it here because bucket
+ newBuckets = append(newBuckets, bucket)
+ bucket = buckets[i]
+ }
+ return append(newBuckets, bucket)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go
new file mode 100644
index 0000000..6515c11
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go
@@ -0,0 +1,101 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package internal
+
+import (
+ "sort"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+// LabelPairSorter implements sort.Interface. It is used to sort a slice of
+// dto.LabelPair pointers.
+type LabelPairSorter []*dto.LabelPair
+
+func (s LabelPairSorter) Len() int {
+ return len(s)
+}
+
+func (s LabelPairSorter) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func (s LabelPairSorter) Less(i, j int) bool {
+ return s[i].GetName() < s[j].GetName()
+}
+
+// MetricSorter is a sortable slice of *dto.Metric.
+type MetricSorter []*dto.Metric
+
+func (s MetricSorter) Len() int {
+ return len(s)
+}
+
+func (s MetricSorter) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func (s MetricSorter) Less(i, j int) bool {
+ if len(s[i].Label) != len(s[j].Label) {
+ // This should not happen. The metrics are
+ // inconsistent. However, we have to deal with the fact, as
+ // people might use custom collectors or metric family injection
+ // to create inconsistent metrics. So let's simply compare the
+ // number of labels in this case. That will still yield
+ // reproducible sorting.
+ return len(s[i].Label) < len(s[j].Label)
+ }
+ for n, lp := range s[i].Label {
+ vi := lp.GetValue()
+ vj := s[j].Label[n].GetValue()
+ if vi != vj {
+ return vi < vj
+ }
+ }
+
+ // We should never arrive here. Multiple metrics with the same
+ // label set in the same scrape will lead to undefined ingestion
+ // behavior. However, as above, we have to provide stable sorting
+ // here, even for inconsistent metrics. So sort equal metrics
+ // by their timestamp, with missing timestamps (implying "now")
+ // coming last.
+ if s[i].TimestampMs == nil {
+ return false
+ }
+ if s[j].TimestampMs == nil {
+ return true
+ }
+ return s[i].GetTimestampMs() < s[j].GetTimestampMs()
+}
+
+// NormalizeMetricFamilies returns a MetricFamily slice with empty
+// MetricFamilies pruned and the remaining MetricFamilies sorted by name within
+// the slice, with the contained Metrics sorted within each MetricFamily.
+func NormalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily {
+ for _, mf := range metricFamiliesByName {
+ sort.Sort(MetricSorter(mf.Metric))
+ }
+ names := make([]string, 0, len(metricFamiliesByName))
+ for name, mf := range metricFamiliesByName {
+ if len(mf.Metric) > 0 {
+ names = append(names, name)
+ }
+ }
+ sort.Strings(names)
+ result := make([]*dto.MetricFamily, 0, len(names))
+ for _, name := range names {
+ result = append(result, metricFamiliesByName[name])
+ }
+ return result
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/labels.go b/vendor/github.com/prometheus/client_golang/prometheus/labels.go
new file mode 100644
index 0000000..c21911f
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/labels.go
@@ -0,0 +1,188 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+ "unicode/utf8"
+
+ "github.com/prometheus/common/model"
+)
+
+// Labels represents a collection of label name -> value mappings. This type is
+// commonly used with the With(Labels) and GetMetricWith(Labels) methods of
+// metric vector Collectors, e.g.:
+//
+// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
+//
+// The other use-case is the specification of constant label pairs in Opts or to
+// create a Desc.
+type Labels map[string]string
+
+// LabelConstraint normalizes label values.
+type LabelConstraint func(string) string
+
+// ConstrainedLabels represents a label name and its constrain function
+// to normalize label values. This type is commonly used when constructing
+// metric vector Collectors.
+type ConstrainedLabel struct {
+ Name string
+ Constraint LabelConstraint
+}
+
+// ConstrainableLabels is an interface that allows creating of labels that can
+// be optionally constrained.
+//
+// prometheus.V2().NewCounterVec(CounterVecOpts{
+// CounterOpts: {...}, // Usual CounterOpts fields
+// VariableLabels: []ConstrainedLabels{
+// {Name: "A"},
+// {Name: "B", Constraint: func(v string) string { ... }},
+// },
+// })
+type ConstrainableLabels interface {
+ compile() *compiledLabels
+ labelNames() []string
+}
+
+// ConstrainedLabels represents a collection of label name -> constrain function
+// to normalize label values. This type is commonly used when constructing
+// metric vector Collectors.
+type ConstrainedLabels []ConstrainedLabel
+
+func (cls ConstrainedLabels) compile() *compiledLabels {
+ compiled := &compiledLabels{
+ names: make([]string, len(cls)),
+ labelConstraints: map[string]LabelConstraint{},
+ }
+
+ for i, label := range cls {
+ compiled.names[i] = label.Name
+ if label.Constraint != nil {
+ compiled.labelConstraints[label.Name] = label.Constraint
+ }
+ }
+
+ return compiled
+}
+
+func (cls ConstrainedLabels) labelNames() []string {
+ names := make([]string, len(cls))
+ for i, label := range cls {
+ names[i] = label.Name
+ }
+ return names
+}
+
+// UnconstrainedLabels represents collection of label without any constraint on
+// their value. Thus, it is simply a collection of label names.
+//
+// UnconstrainedLabels([]string{ "A", "B" })
+//
+// is equivalent to
+//
+// ConstrainedLabels {
+// { Name: "A" },
+// { Name: "B" },
+// }
+type UnconstrainedLabels []string
+
+func (uls UnconstrainedLabels) compile() *compiledLabels {
+ return &compiledLabels{
+ names: uls,
+ }
+}
+
+func (uls UnconstrainedLabels) labelNames() []string {
+ return uls
+}
+
+type compiledLabels struct {
+ names []string
+ labelConstraints map[string]LabelConstraint
+}
+
+func (cls *compiledLabels) compile() *compiledLabels {
+ return cls
+}
+
+func (cls *compiledLabels) labelNames() []string {
+ return cls.names
+}
+
+func (cls *compiledLabels) constrain(labelName, value string) string {
+ if fn, ok := cls.labelConstraints[labelName]; ok && fn != nil {
+ return fn(value)
+ }
+ return value
+}
+
+// reservedLabelPrefix is a prefix which is not legal in user-supplied
+// label names.
+const reservedLabelPrefix = "__"
+
+var errInconsistentCardinality = errors.New("inconsistent label cardinality")
+
+func makeInconsistentCardinalityError(fqName string, labels, labelValues []string) error {
+ return fmt.Errorf(
+ "%w: %q has %d variable labels named %q but %d values %q were provided",
+ errInconsistentCardinality, fqName,
+ len(labels), labels,
+ len(labelValues), labelValues,
+ )
+}
+
+func validateValuesInLabels(labels Labels, expectedNumberOfValues int) error {
+ if len(labels) != expectedNumberOfValues {
+ return fmt.Errorf(
+ "%w: expected %d label values but got %d in %#v",
+ errInconsistentCardinality, expectedNumberOfValues,
+ len(labels), labels,
+ )
+ }
+
+ for name, val := range labels {
+ if !utf8.ValidString(val) {
+ return fmt.Errorf("label %s: value %q is not valid UTF-8", name, val)
+ }
+ }
+
+ return nil
+}
+
+func validateLabelValues(vals []string, expectedNumberOfValues int) error {
+ if len(vals) != expectedNumberOfValues {
+ // The call below makes vals escape, copy them to avoid that.
+ vals := append([]string(nil), vals...)
+ return fmt.Errorf(
+ "%w: expected %d label values but got %d in %#v",
+ errInconsistentCardinality, expectedNumberOfValues,
+ len(vals), vals,
+ )
+ }
+
+ for _, val := range vals {
+ if !utf8.ValidString(val) {
+ return fmt.Errorf("label value %q is not valid UTF-8", val)
+ }
+ }
+
+ return nil
+}
+
+func checkLabelName(l string) bool {
+ return model.LabelName(l).IsValid() && !strings.HasPrefix(l, reservedLabelPrefix)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go
new file mode 100644
index 0000000..9d9b81a
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go
@@ -0,0 +1,257 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "errors"
+ "math"
+ "sort"
+ "strings"
+ "time"
+
+ dto "github.com/prometheus/client_model/go"
+ "github.com/prometheus/common/model"
+ "google.golang.org/protobuf/proto"
+)
+
+var separatorByteSlice = []byte{model.SeparatorByte} // For convenient use with xxhash.
+
+// A Metric models a single sample value with its meta data being exported to
+// Prometheus. Implementations of Metric in this package are Gauge, Counter,
+// Histogram, Summary, and Untyped.
+type Metric interface {
+ // Desc returns the descriptor for the Metric. This method idempotently
+ // returns the same descriptor throughout the lifetime of the
+ // Metric. The returned descriptor is immutable by contract. A Metric
+ // unable to describe itself must return an invalid descriptor (created
+ // with NewInvalidDesc).
+ Desc() *Desc
+ // Write encodes the Metric into a "Metric" Protocol Buffer data
+ // transmission object.
+ //
+ // Metric implementations must observe concurrency safety as reads of
+ // this metric may occur at any time, and any blocking occurs at the
+ // expense of total performance of rendering all registered
+ // metrics. Ideally, Metric implementations should support concurrent
+ // readers.
+ //
+ // While populating dto.Metric, it is the responsibility of the
+ // implementation to ensure validity of the Metric protobuf (like valid
+ // UTF-8 strings or syntactically valid metric and label names). It is
+ // recommended to sort labels lexicographically. Callers of Write should
+ // still make sure of sorting if they depend on it.
+ Write(*dto.Metric) error
+ // TODO(beorn7): The original rationale of passing in a pre-allocated
+ // dto.Metric protobuf to save allocations has disappeared. The
+ // signature of this method should be changed to "Write() (*dto.Metric,
+ // error)".
+}
+
+// Opts bundles the options for creating most Metric types. Each metric
+// implementation XXX has its own XXXOpts type, but in most cases, it is just
+// an alias of this type (which might change when the requirement arises.)
+//
+// It is mandatory to set Name to a non-empty string. All other fields are
+// optional and can safely be left at their zero value, although it is strongly
+// encouraged to set a Help string.
+type Opts struct {
+ // Namespace, Subsystem, and Name are components of the fully-qualified
+ // name of the Metric (created by joining these components with
+ // "_"). Only Name is mandatory, the others merely help structuring the
+ // name. Note that the fully-qualified name of the metric must be a
+ // valid Prometheus metric name.
+ Namespace string
+ Subsystem string
+ Name string
+
+ // Help provides information about this metric.
+ //
+ // Metrics with the same fully-qualified name must have the same Help
+ // string.
+ Help string
+
+ // ConstLabels are used to attach fixed labels to this metric. Metrics
+ // with the same fully-qualified name must have the same label names in
+ // their ConstLabels.
+ //
+ // ConstLabels are only used rarely. In particular, do not use them to
+ // attach the same labels to all your metrics. Those use cases are
+ // better covered by target labels set by the scraping Prometheus
+ // server, or by one specific metric (e.g. a build_info or a
+ // machine_role metric). See also
+ // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels
+ ConstLabels Labels
+
+ // now is for testing purposes, by default it's time.Now.
+ now func() time.Time
+}
+
+// BuildFQName joins the given three name components by "_". Empty name
+// components are ignored. If the name parameter itself is empty, an empty
+// string is returned, no matter what. Metric implementations included in this
+// library use this function internally to generate the fully-qualified metric
+// name from the name component in their Opts. Users of the library will only
+// need this function if they implement their own Metric or instantiate a Desc
+// (with NewDesc) directly.
+func BuildFQName(namespace, subsystem, name string) string {
+ if name == "" {
+ return ""
+ }
+ switch {
+ case namespace != "" && subsystem != "":
+ return strings.Join([]string{namespace, subsystem, name}, "_")
+ case namespace != "":
+ return strings.Join([]string{namespace, name}, "_")
+ case subsystem != "":
+ return strings.Join([]string{subsystem, name}, "_")
+ }
+ return name
+}
+
+type invalidMetric struct {
+ desc *Desc
+ err error
+}
+
+// NewInvalidMetric returns a metric whose Write method always returns the
+// provided error. It is useful if a Collector finds itself unable to collect
+// a metric and wishes to report an error to the registry.
+func NewInvalidMetric(desc *Desc, err error) Metric {
+ return &invalidMetric{desc, err}
+}
+
+func (m *invalidMetric) Desc() *Desc { return m.desc }
+
+func (m *invalidMetric) Write(*dto.Metric) error { return m.err }
+
+type timestampedMetric struct {
+ Metric
+ t time.Time
+}
+
+func (m timestampedMetric) Write(pb *dto.Metric) error {
+ e := m.Metric.Write(pb)
+ pb.TimestampMs = proto.Int64(m.t.Unix()*1000 + int64(m.t.Nanosecond()/1000000))
+ return e
+}
+
+// NewMetricWithTimestamp returns a new Metric wrapping the provided Metric in a
+// way that it has an explicit timestamp set to the provided Time. This is only
+// useful in rare cases as the timestamp of a Prometheus metric should usually
+// be set by the Prometheus server during scraping. Exceptions include mirroring
+// metrics with given timestamps from other metric
+// sources.
+//
+// NewMetricWithTimestamp works best with MustNewConstMetric,
+// MustNewConstHistogram, and MustNewConstSummary, see example.
+//
+// Currently, the exposition formats used by Prometheus are limited to
+// millisecond resolution. Thus, the provided time will be rounded down to the
+// next full millisecond value.
+func NewMetricWithTimestamp(t time.Time, m Metric) Metric {
+ return timestampedMetric{Metric: m, t: t}
+}
+
+type withExemplarsMetric struct {
+ Metric
+
+ exemplars []*dto.Exemplar
+}
+
+func (m *withExemplarsMetric) Write(pb *dto.Metric) error {
+ if err := m.Metric.Write(pb); err != nil {
+ return err
+ }
+
+ switch {
+ case pb.Counter != nil:
+ pb.Counter.Exemplar = m.exemplars[len(m.exemplars)-1]
+ case pb.Histogram != nil:
+ for _, e := range m.exemplars {
+ // pb.Histogram.Bucket are sorted by UpperBound.
+ i := sort.Search(len(pb.Histogram.Bucket), func(i int) bool {
+ return pb.Histogram.Bucket[i].GetUpperBound() >= e.GetValue()
+ })
+ if i < len(pb.Histogram.Bucket) {
+ pb.Histogram.Bucket[i].Exemplar = e
+ } else {
+ // The +Inf bucket should be explicitly added if there is an exemplar for it, similar to non-const histogram logic in https://github.com/prometheus/client_golang/blob/main/prometheus/histogram.go#L357-L365.
+ b := &dto.Bucket{
+ CumulativeCount: proto.Uint64(pb.Histogram.GetSampleCount()),
+ UpperBound: proto.Float64(math.Inf(1)),
+ Exemplar: e,
+ }
+ pb.Histogram.Bucket = append(pb.Histogram.Bucket, b)
+ }
+ }
+ default:
+ // TODO(bwplotka): Implement Gauge?
+ return errors.New("cannot inject exemplar into Gauge, Summary or Untyped")
+ }
+
+ return nil
+}
+
+// Exemplar is easier to use, user-facing representation of *dto.Exemplar.
+type Exemplar struct {
+ Value float64
+ Labels Labels
+ // Optional.
+ // Default value (time.Time{}) indicates its empty, which should be
+ // understood as time.Now() time at the moment of creation of metric.
+ Timestamp time.Time
+}
+
+// NewMetricWithExemplars returns a new Metric wrapping the provided Metric with given
+// exemplars. Exemplars are validated.
+//
+// Only last applicable exemplar is injected from the list.
+// For example for Counter it means last exemplar is injected.
+// For Histogram, it means last applicable exemplar for each bucket is injected.
+//
+// NewMetricWithExemplars works best with MustNewConstMetric and
+// MustNewConstHistogram, see example.
+func NewMetricWithExemplars(m Metric, exemplars ...Exemplar) (Metric, error) {
+ if len(exemplars) == 0 {
+ return nil, errors.New("no exemplar was passed for NewMetricWithExemplars")
+ }
+
+ var (
+ now = time.Now()
+ exs = make([]*dto.Exemplar, len(exemplars))
+ err error
+ )
+ for i, e := range exemplars {
+ ts := e.Timestamp
+ if ts.IsZero() {
+ ts = now
+ }
+ exs[i], err = newExemplar(e.Value, ts, e.Labels)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return &withExemplarsMetric{Metric: m, exemplars: exs}, nil
+}
+
+// MustNewMetricWithExemplars is a version of NewMetricWithExemplars that panics where
+// NewMetricWithExemplars would have returned an error.
+func MustNewMetricWithExemplars(m Metric, exemplars ...Exemplar) Metric {
+ ret, err := NewMetricWithExemplars(m, exemplars...)
+ if err != nil {
+ panic(err)
+ }
+ return ret
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/num_threads.go b/vendor/github.com/prometheus/client_golang/prometheus/num_threads.go
new file mode 100644
index 0000000..7c12b21
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/num_threads.go
@@ -0,0 +1,25 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !js || wasm
+// +build !js wasm
+
+package prometheus
+
+import "runtime"
+
+// getRuntimeNumThreads returns the number of open OS threads.
+func getRuntimeNumThreads() float64 {
+ n, _ := runtime.ThreadCreateProfile(nil)
+ return float64(n)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/num_threads_gopherjs.go b/vendor/github.com/prometheus/client_golang/prometheus/num_threads_gopherjs.go
new file mode 100644
index 0000000..7348df0
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/num_threads_gopherjs.go
@@ -0,0 +1,22 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build js && !wasm
+// +build js,!wasm
+
+package prometheus
+
+// getRuntimeNumThreads returns the number of open OS threads.
+func getRuntimeNumThreads() float64 {
+ return 1
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/observer.go b/vendor/github.com/prometheus/client_golang/prometheus/observer.go
new file mode 100644
index 0000000..03773b2
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/observer.go
@@ -0,0 +1,64 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+// Observer is the interface that wraps the Observe method, which is used by
+// Histogram and Summary to add observations.
+type Observer interface {
+ Observe(float64)
+}
+
+// The ObserverFunc type is an adapter to allow the use of ordinary
+// functions as Observers. If f is a function with the appropriate
+// signature, ObserverFunc(f) is an Observer that calls f.
+//
+// This adapter is usually used in connection with the Timer type, and there are
+// two general use cases:
+//
+// The most common one is to use a Gauge as the Observer for a Timer.
+// See the "Gauge" Timer example.
+//
+// The more advanced use case is to create a function that dynamically decides
+// which Observer to use for observing the duration. See the "Complex" Timer
+// example.
+type ObserverFunc func(float64)
+
+// Observe calls f(value). It implements Observer.
+func (f ObserverFunc) Observe(value float64) {
+ f(value)
+}
+
+// ObserverVec is an interface implemented by `HistogramVec` and `SummaryVec`.
+type ObserverVec interface {
+ GetMetricWith(Labels) (Observer, error)
+ GetMetricWithLabelValues(lvs ...string) (Observer, error)
+ With(Labels) Observer
+ WithLabelValues(...string) Observer
+ CurryWith(Labels) (ObserverVec, error)
+ MustCurryWith(Labels) ObserverVec
+
+ Collector
+}
+
+// ExemplarObserver is implemented by Observers that offer the option of
+// observing a value together with an exemplar. Its ObserveWithExemplar method
+// works like the Observe method of an Observer but also replaces the currently
+// saved exemplar (if any) with a new one, created from the provided value, the
+// current time as timestamp, and the provided Labels. Empty Labels will lead to
+// a valid (label-less) exemplar. But if Labels is nil, the current exemplar is
+// left in place. ObserveWithExemplar panics if any of the provided labels are
+// invalid or if the provided labels contain more than 128 runes in total.
+type ExemplarObserver interface {
+ ObserveWithExemplar(value float64, exemplar Labels)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
new file mode 100644
index 0000000..62a4e7a
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
@@ -0,0 +1,177 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "strconv"
+ "strings"
+)
+
+type processCollector struct {
+ collectFn func(chan<- Metric)
+ pidFn func() (int, error)
+ reportErrors bool
+ cpuTotal *Desc
+ openFDs, maxFDs *Desc
+ vsize, maxVsize *Desc
+ rss *Desc
+ startTime *Desc
+ inBytes, outBytes *Desc
+}
+
+// ProcessCollectorOpts defines the behavior of a process metrics collector
+// created with NewProcessCollector.
+type ProcessCollectorOpts struct {
+ // PidFn returns the PID of the process the collector collects metrics
+ // for. It is called upon each collection. By default, the PID of the
+ // current process is used, as determined on construction time by
+ // calling os.Getpid().
+ PidFn func() (int, error)
+ // If non-empty, each of the collected metrics is prefixed by the
+ // provided string and an underscore ("_").
+ Namespace string
+ // If true, any error encountered during collection is reported as an
+ // invalid metric (see NewInvalidMetric). Otherwise, errors are ignored
+ // and the collected metrics will be incomplete. (Possibly, no metrics
+ // will be collected at all.) While that's usually not desired, it is
+ // appropriate for the common "mix-in" of process metrics, where process
+ // metrics are nice to have, but failing to collect them should not
+ // disrupt the collection of the remaining metrics.
+ ReportErrors bool
+}
+
+// NewProcessCollector is the obsolete version of collectors.NewProcessCollector.
+// See there for documentation.
+//
+// Deprecated: Use collectors.NewProcessCollector instead.
+func NewProcessCollector(opts ProcessCollectorOpts) Collector {
+ ns := ""
+ if len(opts.Namespace) > 0 {
+ ns = opts.Namespace + "_"
+ }
+
+ c := &processCollector{
+ reportErrors: opts.ReportErrors,
+ cpuTotal: NewDesc(
+ ns+"process_cpu_seconds_total",
+ "Total user and system CPU time spent in seconds.",
+ nil, nil,
+ ),
+ openFDs: NewDesc(
+ ns+"process_open_fds",
+ "Number of open file descriptors.",
+ nil, nil,
+ ),
+ maxFDs: NewDesc(
+ ns+"process_max_fds",
+ "Maximum number of open file descriptors.",
+ nil, nil,
+ ),
+ vsize: NewDesc(
+ ns+"process_virtual_memory_bytes",
+ "Virtual memory size in bytes.",
+ nil, nil,
+ ),
+ maxVsize: NewDesc(
+ ns+"process_virtual_memory_max_bytes",
+ "Maximum amount of virtual memory available in bytes.",
+ nil, nil,
+ ),
+ rss: NewDesc(
+ ns+"process_resident_memory_bytes",
+ "Resident memory size in bytes.",
+ nil, nil,
+ ),
+ startTime: NewDesc(
+ ns+"process_start_time_seconds",
+ "Start time of the process since unix epoch in seconds.",
+ nil, nil,
+ ),
+ inBytes: NewDesc(
+ ns+"process_network_receive_bytes_total",
+ "Number of bytes received by the process over the network.",
+ nil, nil,
+ ),
+ outBytes: NewDesc(
+ ns+"process_network_transmit_bytes_total",
+ "Number of bytes sent by the process over the network.",
+ nil, nil,
+ ),
+ }
+
+ if opts.PidFn == nil {
+ c.pidFn = getPIDFn()
+ } else {
+ c.pidFn = opts.PidFn
+ }
+
+ // Set up process metric collection if supported by the runtime.
+ if canCollectProcess() {
+ c.collectFn = c.processCollect
+ } else {
+ c.collectFn = func(ch chan<- Metric) {
+ c.reportError(ch, nil, errors.New("process metrics not supported on this platform"))
+ }
+ }
+
+ return c
+}
+
+// Describe returns all descriptions of the collector.
+func (c *processCollector) Describe(ch chan<- *Desc) {
+ ch <- c.cpuTotal
+ ch <- c.openFDs
+ ch <- c.maxFDs
+ ch <- c.vsize
+ ch <- c.maxVsize
+ ch <- c.rss
+ ch <- c.startTime
+ ch <- c.inBytes
+ ch <- c.outBytes
+}
+
+// Collect returns the current state of all metrics of the collector.
+func (c *processCollector) Collect(ch chan<- Metric) {
+ c.collectFn(ch)
+}
+
+func (c *processCollector) reportError(ch chan<- Metric, desc *Desc, err error) {
+ if !c.reportErrors {
+ return
+ }
+ if desc == nil {
+ desc = NewInvalidDesc(err)
+ }
+ ch <- NewInvalidMetric(desc, err)
+}
+
+// NewPidFileFn returns a function that retrieves a pid from the specified file.
+// It is meant to be used for the PidFn field in ProcessCollectorOpts.
+func NewPidFileFn(pidFilePath string) func() (int, error) {
+ return func() (int, error) {
+ content, err := os.ReadFile(pidFilePath)
+ if err != nil {
+ return 0, fmt.Errorf("can't read pid file %q: %w", pidFilePath, err)
+ }
+ pid, err := strconv.Atoi(strings.TrimSpace(string(content)))
+ if err != nil {
+ return 0, fmt.Errorf("can't parse pid file %q: %w", pidFilePath, err)
+ }
+
+ return pid, nil
+ }
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_js.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_js.go
new file mode 100644
index 0000000..b1e363d
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_js.go
@@ -0,0 +1,26 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build js
+// +build js
+
+package prometheus
+
+func canCollectProcess() bool {
+ return false
+}
+
+func (c *processCollector) processCollect(ch chan<- Metric) {
+ // noop on this platform
+ return
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go
new file mode 100644
index 0000000..14d56d2
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go
@@ -0,0 +1,80 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !windows && !js && !wasip1
+// +build !windows,!js,!wasip1
+
+package prometheus
+
+import (
+ "github.com/prometheus/procfs"
+)
+
+func canCollectProcess() bool {
+ _, err := procfs.NewDefaultFS()
+ return err == nil
+}
+
+func (c *processCollector) processCollect(ch chan<- Metric) {
+ pid, err := c.pidFn()
+ if err != nil {
+ c.reportError(ch, nil, err)
+ return
+ }
+
+ p, err := procfs.NewProc(pid)
+ if err != nil {
+ c.reportError(ch, nil, err)
+ return
+ }
+
+ if stat, err := p.Stat(); err == nil {
+ ch <- MustNewConstMetric(c.cpuTotal, CounterValue, stat.CPUTime())
+ ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(stat.VirtualMemory()))
+ ch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory()))
+ if startTime, err := stat.StartTime(); err == nil {
+ ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime)
+ } else {
+ c.reportError(ch, c.startTime, err)
+ }
+ } else {
+ c.reportError(ch, nil, err)
+ }
+
+ if fds, err := p.FileDescriptorsLen(); err == nil {
+ ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(fds))
+ } else {
+ c.reportError(ch, c.openFDs, err)
+ }
+
+ if limits, err := p.Limits(); err == nil {
+ ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles))
+ ch <- MustNewConstMetric(c.maxVsize, GaugeValue, float64(limits.AddressSpace))
+ } else {
+ c.reportError(ch, nil, err)
+ }
+
+ if netstat, err := p.Netstat(); err == nil {
+ var inOctets, outOctets float64
+ if netstat.IpExt.InOctets != nil {
+ inOctets = *netstat.IpExt.InOctets
+ }
+ if netstat.IpExt.OutOctets != nil {
+ outOctets = *netstat.IpExt.OutOctets
+ }
+ ch <- MustNewConstMetric(c.inBytes, CounterValue, inOctets)
+ ch <- MustNewConstMetric(c.outBytes, CounterValue, outOctets)
+ } else {
+ c.reportError(ch, nil, err)
+ }
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_wasip1.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_wasip1.go
new file mode 100644
index 0000000..d8d9a6d
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_wasip1.go
@@ -0,0 +1,26 @@
+// Copyright 2023 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build wasip1
+// +build wasip1
+
+package prometheus
+
+func canCollectProcess() bool {
+ return false
+}
+
+func (*processCollector) processCollect(chan<- Metric) {
+ // noop on this platform
+ return
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go
new file mode 100644
index 0000000..f973398
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go
@@ -0,0 +1,116 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "syscall"
+ "unsafe"
+
+ "golang.org/x/sys/windows"
+)
+
+func canCollectProcess() bool {
+ return true
+}
+
+var (
+ modpsapi = syscall.NewLazyDLL("psapi.dll")
+ modkernel32 = syscall.NewLazyDLL("kernel32.dll")
+
+ procGetProcessMemoryInfo = modpsapi.NewProc("GetProcessMemoryInfo")
+ procGetProcessHandleCount = modkernel32.NewProc("GetProcessHandleCount")
+)
+
+type processMemoryCounters struct {
+ // System interface description
+ // https://docs.microsoft.com/en-us/windows/desktop/api/psapi/ns-psapi-process_memory_counters_ex
+
+ // Refer to the Golang internal implementation
+ // https://golang.org/src/internal/syscall/windows/psapi_windows.go
+ _ uint32
+ PageFaultCount uint32
+ PeakWorkingSetSize uintptr
+ WorkingSetSize uintptr
+ QuotaPeakPagedPoolUsage uintptr
+ QuotaPagedPoolUsage uintptr
+ QuotaPeakNonPagedPoolUsage uintptr
+ QuotaNonPagedPoolUsage uintptr
+ PagefileUsage uintptr
+ PeakPagefileUsage uintptr
+ PrivateUsage uintptr
+}
+
+func getProcessMemoryInfo(handle windows.Handle) (processMemoryCounters, error) {
+ mem := processMemoryCounters{}
+ r1, _, err := procGetProcessMemoryInfo.Call(
+ uintptr(handle),
+ uintptr(unsafe.Pointer(&mem)),
+ uintptr(unsafe.Sizeof(mem)),
+ )
+ if r1 != 1 {
+ return mem, err
+ } else {
+ return mem, nil
+ }
+}
+
+func getProcessHandleCount(handle windows.Handle) (uint32, error) {
+ var count uint32
+ r1, _, err := procGetProcessHandleCount.Call(
+ uintptr(handle),
+ uintptr(unsafe.Pointer(&count)),
+ )
+ if r1 != 1 {
+ return 0, err
+ } else {
+ return count, nil
+ }
+}
+
+func (c *processCollector) processCollect(ch chan<- Metric) {
+ h, err := windows.GetCurrentProcess()
+ if err != nil {
+ c.reportError(ch, nil, err)
+ return
+ }
+
+ var startTime, exitTime, kernelTime, userTime windows.Filetime
+ err = windows.GetProcessTimes(h, &startTime, &exitTime, &kernelTime, &userTime)
+ if err != nil {
+ c.reportError(ch, nil, err)
+ return
+ }
+ ch <- MustNewConstMetric(c.startTime, GaugeValue, float64(startTime.Nanoseconds()/1e9))
+ ch <- MustNewConstMetric(c.cpuTotal, CounterValue, fileTimeToSeconds(kernelTime)+fileTimeToSeconds(userTime))
+
+ mem, err := getProcessMemoryInfo(h)
+ if err != nil {
+ c.reportError(ch, nil, err)
+ return
+ }
+ ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(mem.PrivateUsage))
+ ch <- MustNewConstMetric(c.rss, GaugeValue, float64(mem.WorkingSetSize))
+
+ handles, err := getProcessHandleCount(h)
+ if err != nil {
+ c.reportError(ch, nil, err)
+ return
+ }
+ ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(handles))
+ ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(16*1024*1024)) // Windows has a hard-coded max limit, not per-process.
+}
+
+func fileTimeToSeconds(ft windows.Filetime) float64 {
+ return float64(uint64(ft.HighDateTime)<<32+uint64(ft.LowDateTime)) / 1e7
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promauto/auto.go b/vendor/github.com/prometheus/client_golang/prometheus/promauto/auto.go
new file mode 100644
index 0000000..58f9659
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promauto/auto.go
@@ -0,0 +1,376 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package promauto provides alternative constructors for the fundamental
+// Prometheus metric types and their …Vec and …Func variants. The difference to
+// their counterparts in the prometheus package is that the promauto
+// constructors register the Collectors with a registry before returning them.
+// There are two sets of constructors. The constructors in the first set are
+// top-level functions, while the constructors in the other set are methods of
+// the Factory type. The top-level functions return Collectors registered with
+// the global registry (prometheus.DefaultRegisterer), while the methods return
+// Collectors registered with the registry the Factory was constructed with. All
+// constructors panic if the registration fails.
+//
+// The following example is a complete program to create a histogram of normally
+// distributed random numbers from the math/rand package:
+//
+// package main
+//
+// import (
+// "math/rand"
+// "net/http"
+//
+// "github.com/prometheus/client_golang/prometheus"
+// "github.com/prometheus/client_golang/prometheus/promauto"
+// "github.com/prometheus/client_golang/prometheus/promhttp"
+// )
+//
+// var histogram = promauto.NewHistogram(prometheus.HistogramOpts{
+// Name: "random_numbers",
+// Help: "A histogram of normally distributed random numbers.",
+// Buckets: prometheus.LinearBuckets(-3, .1, 61),
+// })
+//
+// func Random() {
+// for {
+// histogram.Observe(rand.NormFloat64())
+// }
+// }
+//
+// func main() {
+// go Random()
+// http.Handle("/metrics", promhttp.Handler())
+// http.ListenAndServe(":1971", nil)
+// }
+//
+// Prometheus's version of a minimal hello-world program:
+//
+// package main
+//
+// import (
+// "fmt"
+// "net/http"
+//
+// "github.com/prometheus/client_golang/prometheus"
+// "github.com/prometheus/client_golang/prometheus/promauto"
+// "github.com/prometheus/client_golang/prometheus/promhttp"
+// )
+//
+// func main() {
+// http.Handle("/", promhttp.InstrumentHandlerCounter(
+// promauto.NewCounterVec(
+// prometheus.CounterOpts{
+// Name: "hello_requests_total",
+// Help: "Total number of hello-world requests by HTTP code.",
+// },
+// []string{"code"},
+// ),
+// http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+// fmt.Fprint(w, "Hello, world!")
+// }),
+// ))
+// http.Handle("/metrics", promhttp.Handler())
+// http.ListenAndServe(":1971", nil)
+// }
+//
+// A Factory is created with the With(prometheus.Registerer) function, which
+// enables two usage patterns. With(prometheus.Registerer) can be called once per
+// line:
+//
+// var (
+// reg = prometheus.NewRegistry()
+// randomNumbers = promauto.With(reg).NewHistogram(prometheus.HistogramOpts{
+// Name: "random_numbers",
+// Help: "A histogram of normally distributed random numbers.",
+// Buckets: prometheus.LinearBuckets(-3, .1, 61),
+// })
+// requestCount = promauto.With(reg).NewCounterVec(
+// prometheus.CounterOpts{
+// Name: "http_requests_total",
+// Help: "Total number of HTTP requests by status code and method.",
+// },
+// []string{"code", "method"},
+// )
+// )
+//
+// Or it can be used to create a Factory once to be used multiple times:
+//
+// var (
+// reg = prometheus.NewRegistry()
+// factory = promauto.With(reg)
+// randomNumbers = factory.NewHistogram(prometheus.HistogramOpts{
+// Name: "random_numbers",
+// Help: "A histogram of normally distributed random numbers.",
+// Buckets: prometheus.LinearBuckets(-3, .1, 61),
+// })
+// requestCount = factory.NewCounterVec(
+// prometheus.CounterOpts{
+// Name: "http_requests_total",
+// Help: "Total number of HTTP requests by status code and method.",
+// },
+// []string{"code", "method"},
+// )
+// )
+//
+// This appears very handy. So why are these constructors locked away in a
+// separate package?
+//
+// The main problem is that registration may fail, e.g. if a metric inconsistent
+// with or equal to the newly to be registered one is already registered.
+// Therefore, the Register method in the prometheus.Registerer interface returns
+// an error, and the same is the case for the top-level prometheus.Register
+// function that registers with the global registry. The prometheus package also
+// provides MustRegister versions for both. They panic if the registration
+// fails, and they clearly call this out by using the Must… idiom. Panicking is
+// problematic in this case because it doesn't just happen on input provided by
+// the caller that is invalid on its own. Things are a bit more subtle here:
+// Metric creation and registration tend to be spread widely over the
+// codebase. It can easily happen that an incompatible metric is added to an
+// unrelated part of the code, and suddenly code that used to work perfectly
+// fine starts to panic (provided that the registration of the newly added
+// metric happens before the registration of the previously existing
+// metric). This may come as an even bigger surprise with the global registry,
+// where simply importing another package can trigger a panic (if the newly
+// imported package registers metrics in its init function). At least, in the
+// prometheus package, creation of metrics and other collectors is separate from
+// registration. You first create the metric, and then you decide explicitly if
+// you want to register it with a local or the global registry, and if you want
+// to handle the error or risk a panic. With the constructors in the promauto
+// package, registration is automatic, and if it fails, it will always
+// panic. Furthermore, the constructors will often be called in the var section
+// of a file, which means that panicking will happen as a side effect of merely
+// importing a package.
+//
+// A separate package allows conservative users to entirely ignore it. And
+// whoever wants to use it will do so explicitly, with an opportunity to read
+// this warning.
+//
+// Enjoy promauto responsibly!
+package promauto
+
+import "github.com/prometheus/client_golang/prometheus"
+
+// NewCounter works like the function of the same name in the prometheus package
+// but it automatically registers the Counter with the
+// prometheus.DefaultRegisterer. If the registration fails, NewCounter panics.
+func NewCounter(opts prometheus.CounterOpts) prometheus.Counter {
+ return With(prometheus.DefaultRegisterer).NewCounter(opts)
+}
+
+// NewCounterVec works like the function of the same name in the prometheus
+// package but it automatically registers the CounterVec with the
+// prometheus.DefaultRegisterer. If the registration fails, NewCounterVec
+// panics.
+func NewCounterVec(opts prometheus.CounterOpts, labelNames []string) *prometheus.CounterVec {
+ return With(prometheus.DefaultRegisterer).NewCounterVec(opts, labelNames)
+}
+
+// NewCounterFunc works like the function of the same name in the prometheus
+// package but it automatically registers the CounterFunc with the
+// prometheus.DefaultRegisterer. If the registration fails, NewCounterFunc
+// panics.
+func NewCounterFunc(opts prometheus.CounterOpts, function func() float64) prometheus.CounterFunc {
+ return With(prometheus.DefaultRegisterer).NewCounterFunc(opts, function)
+}
+
+// NewGauge works like the function of the same name in the prometheus package
+// but it automatically registers the Gauge with the
+// prometheus.DefaultRegisterer. If the registration fails, NewGauge panics.
+func NewGauge(opts prometheus.GaugeOpts) prometheus.Gauge {
+ return With(prometheus.DefaultRegisterer).NewGauge(opts)
+}
+
+// NewGaugeVec works like the function of the same name in the prometheus
+// package but it automatically registers the GaugeVec with the
+// prometheus.DefaultRegisterer. If the registration fails, NewGaugeVec panics.
+func NewGaugeVec(opts prometheus.GaugeOpts, labelNames []string) *prometheus.GaugeVec {
+ return With(prometheus.DefaultRegisterer).NewGaugeVec(opts, labelNames)
+}
+
+// NewGaugeFunc works like the function of the same name in the prometheus
+// package but it automatically registers the GaugeFunc with the
+// prometheus.DefaultRegisterer. If the registration fails, NewGaugeFunc panics.
+func NewGaugeFunc(opts prometheus.GaugeOpts, function func() float64) prometheus.GaugeFunc {
+ return With(prometheus.DefaultRegisterer).NewGaugeFunc(opts, function)
+}
+
+// NewSummary works like the function of the same name in the prometheus package
+// but it automatically registers the Summary with the
+// prometheus.DefaultRegisterer. If the registration fails, NewSummary panics.
+func NewSummary(opts prometheus.SummaryOpts) prometheus.Summary {
+ return With(prometheus.DefaultRegisterer).NewSummary(opts)
+}
+
+// NewSummaryVec works like the function of the same name in the prometheus
+// package but it automatically registers the SummaryVec with the
+// prometheus.DefaultRegisterer. If the registration fails, NewSummaryVec
+// panics.
+func NewSummaryVec(opts prometheus.SummaryOpts, labelNames []string) *prometheus.SummaryVec {
+ return With(prometheus.DefaultRegisterer).NewSummaryVec(opts, labelNames)
+}
+
+// NewHistogram works like the function of the same name in the prometheus
+// package but it automatically registers the Histogram with the
+// prometheus.DefaultRegisterer. If the registration fails, NewHistogram panics.
+func NewHistogram(opts prometheus.HistogramOpts) prometheus.Histogram {
+ return With(prometheus.DefaultRegisterer).NewHistogram(opts)
+}
+
+// NewHistogramVec works like the function of the same name in the prometheus
+// package but it automatically registers the HistogramVec with the
+// prometheus.DefaultRegisterer. If the registration fails, NewHistogramVec
+// panics.
+func NewHistogramVec(opts prometheus.HistogramOpts, labelNames []string) *prometheus.HistogramVec {
+ return With(prometheus.DefaultRegisterer).NewHistogramVec(opts, labelNames)
+}
+
+// NewUntypedFunc works like the function of the same name in the prometheus
+// package but it automatically registers the UntypedFunc with the
+// prometheus.DefaultRegisterer. If the registration fails, NewUntypedFunc
+// panics.
+func NewUntypedFunc(opts prometheus.UntypedOpts, function func() float64) prometheus.UntypedFunc {
+ return With(prometheus.DefaultRegisterer).NewUntypedFunc(opts, function)
+}
+
+// Factory provides factory methods to create Collectors that are automatically
+// registered with a Registerer. Create a Factory with the With function,
+// providing a Registerer to auto-register created Collectors with. The zero
+// value of a Factory creates Collectors that are not registered with any
+// Registerer. All methods of the Factory panic if the registration fails.
+type Factory struct {
+ r prometheus.Registerer
+}
+
+// With creates a Factory using the provided Registerer for registration of the
+// created Collectors. If the provided Registerer is nil, the returned Factory
+// creates Collectors that are not registered with any Registerer.
+func With(r prometheus.Registerer) Factory { return Factory{r} }
+
+// NewCounter works like the function of the same name in the prometheus package
+// but it automatically registers the Counter with the Factory's Registerer.
+func (f Factory) NewCounter(opts prometheus.CounterOpts) prometheus.Counter {
+ c := prometheus.NewCounter(opts)
+ if f.r != nil {
+ f.r.MustRegister(c)
+ }
+ return c
+}
+
+// NewCounterVec works like the function of the same name in the prometheus
+// package but it automatically registers the CounterVec with the Factory's
+// Registerer.
+func (f Factory) NewCounterVec(opts prometheus.CounterOpts, labelNames []string) *prometheus.CounterVec {
+ c := prometheus.NewCounterVec(opts, labelNames)
+ if f.r != nil {
+ f.r.MustRegister(c)
+ }
+ return c
+}
+
+// NewCounterFunc works like the function of the same name in the prometheus
+// package but it automatically registers the CounterFunc with the Factory's
+// Registerer.
+func (f Factory) NewCounterFunc(opts prometheus.CounterOpts, function func() float64) prometheus.CounterFunc {
+ c := prometheus.NewCounterFunc(opts, function)
+ if f.r != nil {
+ f.r.MustRegister(c)
+ }
+ return c
+}
+
+// NewGauge works like the function of the same name in the prometheus package
+// but it automatically registers the Gauge with the Factory's Registerer.
+func (f Factory) NewGauge(opts prometheus.GaugeOpts) prometheus.Gauge {
+ g := prometheus.NewGauge(opts)
+ if f.r != nil {
+ f.r.MustRegister(g)
+ }
+ return g
+}
+
+// NewGaugeVec works like the function of the same name in the prometheus
+// package but it automatically registers the GaugeVec with the Factory's
+// Registerer.
+func (f Factory) NewGaugeVec(opts prometheus.GaugeOpts, labelNames []string) *prometheus.GaugeVec {
+ g := prometheus.NewGaugeVec(opts, labelNames)
+ if f.r != nil {
+ f.r.MustRegister(g)
+ }
+ return g
+}
+
+// NewGaugeFunc works like the function of the same name in the prometheus
+// package but it automatically registers the GaugeFunc with the Factory's
+// Registerer.
+func (f Factory) NewGaugeFunc(opts prometheus.GaugeOpts, function func() float64) prometheus.GaugeFunc {
+ g := prometheus.NewGaugeFunc(opts, function)
+ if f.r != nil {
+ f.r.MustRegister(g)
+ }
+ return g
+}
+
+// NewSummary works like the function of the same name in the prometheus package
+// but it automatically registers the Summary with the Factory's Registerer.
+func (f Factory) NewSummary(opts prometheus.SummaryOpts) prometheus.Summary {
+ s := prometheus.NewSummary(opts)
+ if f.r != nil {
+ f.r.MustRegister(s)
+ }
+ return s
+}
+
+// NewSummaryVec works like the function of the same name in the prometheus
+// package but it automatically registers the SummaryVec with the Factory's
+// Registerer.
+func (f Factory) NewSummaryVec(opts prometheus.SummaryOpts, labelNames []string) *prometheus.SummaryVec {
+ s := prometheus.NewSummaryVec(opts, labelNames)
+ if f.r != nil {
+ f.r.MustRegister(s)
+ }
+ return s
+}
+
+// NewHistogram works like the function of the same name in the prometheus
+// package but it automatically registers the Histogram with the Factory's
+// Registerer.
+func (f Factory) NewHistogram(opts prometheus.HistogramOpts) prometheus.Histogram {
+ h := prometheus.NewHistogram(opts)
+ if f.r != nil {
+ f.r.MustRegister(h)
+ }
+ return h
+}
+
+// NewHistogramVec works like the function of the same name in the prometheus
+// package but it automatically registers the HistogramVec with the Factory's
+// Registerer.
+func (f Factory) NewHistogramVec(opts prometheus.HistogramOpts, labelNames []string) *prometheus.HistogramVec {
+ h := prometheus.NewHistogramVec(opts, labelNames)
+ if f.r != nil {
+ f.r.MustRegister(h)
+ }
+ return h
+}
+
+// NewUntypedFunc works like the function of the same name in the prometheus
+// package but it automatically registers the UntypedFunc with the Factory's
+// Registerer.
+func (f Factory) NewUntypedFunc(opts prometheus.UntypedOpts, function func() float64) prometheus.UntypedFunc {
+ u := prometheus.NewUntypedFunc(opts, function)
+ if f.r != nil {
+ f.r.MustRegister(u)
+ }
+ return u
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go
new file mode 100644
index 0000000..c6fd2f5
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go
@@ -0,0 +1,1076 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "os"
+ "path/filepath"
+ "runtime"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "unicode/utf8"
+
+ "github.com/prometheus/client_golang/prometheus/internal"
+
+ "github.com/cespare/xxhash/v2"
+ dto "github.com/prometheus/client_model/go"
+ "github.com/prometheus/common/expfmt"
+ "google.golang.org/protobuf/proto"
+)
+
+const (
+ // Capacity for the channel to collect metrics and descriptors.
+ capMetricChan = 1000
+ capDescChan = 10
+)
+
+// DefaultRegisterer and DefaultGatherer are the implementations of the
+// Registerer and Gatherer interface a number of convenience functions in this
+// package act on. Initially, both variables point to the same Registry, which
+// has a process collector (currently on Linux only, see NewProcessCollector)
+// and a Go collector (see NewGoCollector, in particular the note about
+// stop-the-world implication with Go versions older than 1.9) already
+// registered. This approach to keep default instances as global state mirrors
+// the approach of other packages in the Go standard library. Note that there
+// are caveats. Change the variables with caution and only if you understand the
+// consequences. Users who want to avoid global state altogether should not use
+// the convenience functions and act on custom instances instead.
+var (
+ defaultRegistry = NewRegistry()
+ DefaultRegisterer Registerer = defaultRegistry
+ DefaultGatherer Gatherer = defaultRegistry
+)
+
+func init() {
+ MustRegister(NewProcessCollector(ProcessCollectorOpts{}))
+ MustRegister(NewGoCollector())
+}
+
+// NewRegistry creates a new vanilla Registry without any Collectors
+// pre-registered.
+func NewRegistry() *Registry {
+ return &Registry{
+ collectorsByID: map[uint64]Collector{},
+ descIDs: map[uint64]struct{}{},
+ dimHashesByName: map[string]uint64{},
+ }
+}
+
+// NewPedanticRegistry returns a registry that checks during collection if each
+// collected Metric is consistent with its reported Desc, and if the Desc has
+// actually been registered with the registry. Unchecked Collectors (those whose
+// Describe method does not yield any descriptors) are excluded from the check.
+//
+// Usually, a Registry will be happy as long as the union of all collected
+// Metrics is consistent and valid even if some metrics are not consistent with
+// their own Desc or a Desc provided by their registered Collector. Well-behaved
+// Collectors and Metrics will only provide consistent Descs. This Registry is
+// useful to test the implementation of Collectors and Metrics.
+func NewPedanticRegistry() *Registry {
+ r := NewRegistry()
+ r.pedanticChecksEnabled = true
+ return r
+}
+
+// Registerer is the interface for the part of a registry in charge of
+// registering and unregistering. Users of custom registries should use
+// Registerer as type for registration purposes (rather than the Registry type
+// directly). In that way, they are free to use custom Registerer implementation
+// (e.g. for testing purposes).
+type Registerer interface {
+ // Register registers a new Collector to be included in metrics
+ // collection. It returns an error if the descriptors provided by the
+ // Collector are invalid or if they — in combination with descriptors of
+ // already registered Collectors — do not fulfill the consistency and
+ // uniqueness criteria described in the documentation of metric.Desc.
+ //
+ // If the provided Collector is equal to a Collector already registered
+ // (which includes the case of re-registering the same Collector), the
+ // returned error is an instance of AlreadyRegisteredError, which
+ // contains the previously registered Collector.
+ //
+ // A Collector whose Describe method does not yield any Desc is treated
+ // as unchecked. Registration will always succeed. No check for
+ // re-registering (see previous paragraph) is performed. Thus, the
+ // caller is responsible for not double-registering the same unchecked
+ // Collector, and for providing a Collector that will not cause
+ // inconsistent metrics on collection. (This would lead to scrape
+ // errors.)
+ Register(Collector) error
+ // MustRegister works like Register but registers any number of
+ // Collectors and panics upon the first registration that causes an
+ // error.
+ MustRegister(...Collector)
+ // Unregister unregisters the Collector that equals the Collector passed
+ // in as an argument. (Two Collectors are considered equal if their
+ // Describe method yields the same set of descriptors.) The function
+ // returns whether a Collector was unregistered. Note that an unchecked
+ // Collector cannot be unregistered (as its Describe method does not
+ // yield any descriptor).
+ //
+ // Note that even after unregistering, it will not be possible to
+ // register a new Collector that is inconsistent with the unregistered
+ // Collector, e.g. a Collector collecting metrics with the same name but
+ // a different help string. The rationale here is that the same registry
+ // instance must only collect consistent metrics throughout its
+ // lifetime.
+ Unregister(Collector) bool
+}
+
+// Gatherer is the interface for the part of a registry in charge of gathering
+// the collected metrics into a number of MetricFamilies. The Gatherer interface
+// comes with the same general implication as described for the Registerer
+// interface.
+type Gatherer interface {
+ // Gather calls the Collect method of the registered Collectors and then
+ // gathers the collected metrics into a lexicographically sorted slice
+ // of uniquely named MetricFamily protobufs. Gather ensures that the
+ // returned slice is valid and self-consistent so that it can be used
+ // for valid exposition. As an exception to the strict consistency
+ // requirements described for metric.Desc, Gather will tolerate
+ // different sets of label names for metrics of the same metric family.
+ //
+ // Even if an error occurs, Gather attempts to gather as many metrics as
+ // possible. Hence, if a non-nil error is returned, the returned
+ // MetricFamily slice could be nil (in case of a fatal error that
+ // prevented any meaningful metric collection) or contain a number of
+ // MetricFamily protobufs, some of which might be incomplete, and some
+ // might be missing altogether. The returned error (which might be a
+ // MultiError) explains the details. Note that this is mostly useful for
+ // debugging purposes. If the gathered protobufs are to be used for
+ // exposition in actual monitoring, it is almost always better to not
+ // expose an incomplete result and instead disregard the returned
+ // MetricFamily protobufs in case the returned error is non-nil.
+ Gather() ([]*dto.MetricFamily, error)
+}
+
+// Register registers the provided Collector with the DefaultRegisterer.
+//
+// Register is a shortcut for DefaultRegisterer.Register(c). See there for more
+// details.
+func Register(c Collector) error {
+ return DefaultRegisterer.Register(c)
+}
+
+// MustRegister registers the provided Collectors with the DefaultRegisterer and
+// panics if any error occurs.
+//
+// MustRegister is a shortcut for DefaultRegisterer.MustRegister(cs...). See
+// there for more details.
+func MustRegister(cs ...Collector) {
+ DefaultRegisterer.MustRegister(cs...)
+}
+
+// Unregister removes the registration of the provided Collector from the
+// DefaultRegisterer.
+//
+// Unregister is a shortcut for DefaultRegisterer.Unregister(c). See there for
+// more details.
+func Unregister(c Collector) bool {
+ return DefaultRegisterer.Unregister(c)
+}
+
+// GathererFunc turns a function into a Gatherer.
+type GathererFunc func() ([]*dto.MetricFamily, error)
+
+// Gather implements Gatherer.
+func (gf GathererFunc) Gather() ([]*dto.MetricFamily, error) {
+ return gf()
+}
+
+// AlreadyRegisteredError is returned by the Register method if the Collector to
+// be registered has already been registered before, or a different Collector
+// that collects the same metrics has been registered before. Registration fails
+// in that case, but you can detect from the kind of error what has
+// happened. The error contains fields for the existing Collector and the
+// (rejected) new Collector that equals the existing one. This can be used to
+// find out if an equal Collector has been registered before and switch over to
+// using the old one, as demonstrated in the example.
+type AlreadyRegisteredError struct {
+ ExistingCollector, NewCollector Collector
+}
+
+func (err AlreadyRegisteredError) Error() string {
+ return "duplicate metrics collector registration attempted"
+}
+
+// MultiError is a slice of errors implementing the error interface. It is used
+// by a Gatherer to report multiple errors during MetricFamily gathering.
+type MultiError []error
+
+// Error formats the contained errors as a bullet point list, preceded by the
+// total number of errors. Note that this results in a multi-line string.
+func (errs MultiError) Error() string {
+ if len(errs) == 0 {
+ return ""
+ }
+ buf := &bytes.Buffer{}
+ fmt.Fprintf(buf, "%d error(s) occurred:", len(errs))
+ for _, err := range errs {
+ fmt.Fprintf(buf, "\n* %s", err)
+ }
+ return buf.String()
+}
+
+// Append appends the provided error if it is not nil.
+func (errs *MultiError) Append(err error) {
+ if err != nil {
+ *errs = append(*errs, err)
+ }
+}
+
+// MaybeUnwrap returns nil if len(errs) is 0. It returns the first and only
+// contained error as error if len(errs is 1). In all other cases, it returns
+// the MultiError directly. This is helpful for returning a MultiError in a way
+// that only uses the MultiError if needed.
+func (errs MultiError) MaybeUnwrap() error {
+ switch len(errs) {
+ case 0:
+ return nil
+ case 1:
+ return errs[0]
+ default:
+ return errs
+ }
+}
+
+// Registry registers Prometheus collectors, collects their metrics, and gathers
+// them into MetricFamilies for exposition. It implements Registerer, Gatherer,
+// and Collector. The zero value is not usable. Create instances with
+// NewRegistry or NewPedanticRegistry.
+//
+// Registry implements Collector to allow it to be used for creating groups of
+// metrics. See the Grouping example for how this can be done.
+type Registry struct {
+ mtx sync.RWMutex
+ collectorsByID map[uint64]Collector // ID is a hash of the descIDs.
+ descIDs map[uint64]struct{}
+ dimHashesByName map[string]uint64
+ uncheckedCollectors []Collector
+ pedanticChecksEnabled bool
+}
+
+// Register implements Registerer.
+func (r *Registry) Register(c Collector) error {
+ var (
+ descChan = make(chan *Desc, capDescChan)
+ newDescIDs = map[uint64]struct{}{}
+ newDimHashesByName = map[string]uint64{}
+ collectorID uint64 // All desc IDs XOR'd together.
+ duplicateDescErr error
+ )
+ go func() {
+ c.Describe(descChan)
+ close(descChan)
+ }()
+ r.mtx.Lock()
+ defer func() {
+ // Drain channel in case of premature return to not leak a goroutine.
+ for range descChan {
+ }
+ r.mtx.Unlock()
+ }()
+ // Conduct various tests...
+ for desc := range descChan {
+
+ // Is the descriptor valid at all?
+ if desc.err != nil {
+ return fmt.Errorf("descriptor %s is invalid: %w", desc, desc.err)
+ }
+
+ // Is the descID unique?
+ // (In other words: Is the fqName + constLabel combination unique?)
+ if _, exists := r.descIDs[desc.id]; exists {
+ duplicateDescErr = fmt.Errorf("descriptor %s already exists with the same fully-qualified name and const label values", desc)
+ }
+ // If it is not a duplicate desc in this collector, XOR it to
+ // the collectorID. (We allow duplicate descs within the same
+ // collector, but their existence must be a no-op.)
+ if _, exists := newDescIDs[desc.id]; !exists {
+ newDescIDs[desc.id] = struct{}{}
+ collectorID ^= desc.id
+ }
+
+ // Are all the label names and the help string consistent with
+ // previous descriptors of the same name?
+ // First check existing descriptors...
+ if dimHash, exists := r.dimHashesByName[desc.fqName]; exists {
+ if dimHash != desc.dimHash {
+ return fmt.Errorf("a previously registered descriptor with the same fully-qualified name as %s has different label names or a different help string", desc)
+ }
+ continue
+ }
+
+ // ...then check the new descriptors already seen.
+ if dimHash, exists := newDimHashesByName[desc.fqName]; exists {
+ if dimHash != desc.dimHash {
+ return fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc)
+ }
+ continue
+ }
+ newDimHashesByName[desc.fqName] = desc.dimHash
+ }
+ // A Collector yielding no Desc at all is considered unchecked.
+ if len(newDescIDs) == 0 {
+ r.uncheckedCollectors = append(r.uncheckedCollectors, c)
+ return nil
+ }
+ if existing, exists := r.collectorsByID[collectorID]; exists {
+ switch e := existing.(type) {
+ case *wrappingCollector:
+ return AlreadyRegisteredError{
+ ExistingCollector: e.unwrapRecursively(),
+ NewCollector: c,
+ }
+ default:
+ return AlreadyRegisteredError{
+ ExistingCollector: e,
+ NewCollector: c,
+ }
+ }
+ }
+ // If the collectorID is new, but at least one of the descs existed
+ // before, we are in trouble.
+ if duplicateDescErr != nil {
+ return duplicateDescErr
+ }
+
+ // Only after all tests have passed, actually register.
+ r.collectorsByID[collectorID] = c
+ for hash := range newDescIDs {
+ r.descIDs[hash] = struct{}{}
+ }
+ for name, dimHash := range newDimHashesByName {
+ r.dimHashesByName[name] = dimHash
+ }
+ return nil
+}
+
+// Unregister implements Registerer.
+func (r *Registry) Unregister(c Collector) bool {
+ var (
+ descChan = make(chan *Desc, capDescChan)
+ descIDs = map[uint64]struct{}{}
+ collectorID uint64 // All desc IDs XOR'd together.
+ )
+ go func() {
+ c.Describe(descChan)
+ close(descChan)
+ }()
+ for desc := range descChan {
+ if _, exists := descIDs[desc.id]; !exists {
+ collectorID ^= desc.id
+ descIDs[desc.id] = struct{}{}
+ }
+ }
+
+ r.mtx.RLock()
+ if _, exists := r.collectorsByID[collectorID]; !exists {
+ r.mtx.RUnlock()
+ return false
+ }
+ r.mtx.RUnlock()
+
+ r.mtx.Lock()
+ defer r.mtx.Unlock()
+
+ delete(r.collectorsByID, collectorID)
+ for id := range descIDs {
+ delete(r.descIDs, id)
+ }
+ // dimHashesByName is left untouched as those must be consistent
+ // throughout the lifetime of a program.
+ return true
+}
+
+// MustRegister implements Registerer.
+func (r *Registry) MustRegister(cs ...Collector) {
+ for _, c := range cs {
+ if err := r.Register(c); err != nil {
+ panic(err)
+ }
+ }
+}
+
+// Gather implements Gatherer.
+func (r *Registry) Gather() ([]*dto.MetricFamily, error) {
+ r.mtx.RLock()
+
+ if len(r.collectorsByID) == 0 && len(r.uncheckedCollectors) == 0 {
+ // Fast path.
+ r.mtx.RUnlock()
+ return nil, nil
+ }
+
+ var (
+ checkedMetricChan = make(chan Metric, capMetricChan)
+ uncheckedMetricChan = make(chan Metric, capMetricChan)
+ metricHashes = map[uint64]struct{}{}
+ wg sync.WaitGroup
+ errs MultiError // The collected errors to return in the end.
+ registeredDescIDs map[uint64]struct{} // Only used for pedantic checks
+ )
+
+ goroutineBudget := len(r.collectorsByID) + len(r.uncheckedCollectors)
+ metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName))
+ checkedCollectors := make(chan Collector, len(r.collectorsByID))
+ uncheckedCollectors := make(chan Collector, len(r.uncheckedCollectors))
+ for _, collector := range r.collectorsByID {
+ checkedCollectors <- collector
+ }
+ for _, collector := range r.uncheckedCollectors {
+ uncheckedCollectors <- collector
+ }
+ // In case pedantic checks are enabled, we have to copy the map before
+ // giving up the RLock.
+ if r.pedanticChecksEnabled {
+ registeredDescIDs = make(map[uint64]struct{}, len(r.descIDs))
+ for id := range r.descIDs {
+ registeredDescIDs[id] = struct{}{}
+ }
+ }
+ r.mtx.RUnlock()
+
+ wg.Add(goroutineBudget)
+
+ collectWorker := func() {
+ for {
+ select {
+ case collector := <-checkedCollectors:
+ collector.Collect(checkedMetricChan)
+ case collector := <-uncheckedCollectors:
+ collector.Collect(uncheckedMetricChan)
+ default:
+ return
+ }
+ wg.Done()
+ }
+ }
+
+ // Start the first worker now to make sure at least one is running.
+ go collectWorker()
+ goroutineBudget--
+
+ // Close checkedMetricChan and uncheckedMetricChan once all collectors
+ // are collected.
+ go func() {
+ wg.Wait()
+ close(checkedMetricChan)
+ close(uncheckedMetricChan)
+ }()
+
+ // Drain checkedMetricChan and uncheckedMetricChan in case of premature return.
+ defer func() {
+ if checkedMetricChan != nil {
+ for range checkedMetricChan {
+ }
+ }
+ if uncheckedMetricChan != nil {
+ for range uncheckedMetricChan {
+ }
+ }
+ }()
+
+ // Copy the channel references so we can nil them out later to remove
+ // them from the select statements below.
+ cmc := checkedMetricChan
+ umc := uncheckedMetricChan
+
+ for {
+ select {
+ case metric, ok := <-cmc:
+ if !ok {
+ cmc = nil
+ break
+ }
+ errs.Append(processMetric(
+ metric, metricFamiliesByName,
+ metricHashes,
+ registeredDescIDs,
+ ))
+ case metric, ok := <-umc:
+ if !ok {
+ umc = nil
+ break
+ }
+ errs.Append(processMetric(
+ metric, metricFamiliesByName,
+ metricHashes,
+ nil,
+ ))
+ default:
+ if goroutineBudget <= 0 || len(checkedCollectors)+len(uncheckedCollectors) == 0 {
+ // All collectors are already being worked on or
+ // we have already as many goroutines started as
+ // there are collectors. Do the same as above,
+ // just without the default.
+ select {
+ case metric, ok := <-cmc:
+ if !ok {
+ cmc = nil
+ break
+ }
+ errs.Append(processMetric(
+ metric, metricFamiliesByName,
+ metricHashes,
+ registeredDescIDs,
+ ))
+ case metric, ok := <-umc:
+ if !ok {
+ umc = nil
+ break
+ }
+ errs.Append(processMetric(
+ metric, metricFamiliesByName,
+ metricHashes,
+ nil,
+ ))
+ }
+ break
+ }
+ // Start more workers.
+ go collectWorker()
+ goroutineBudget--
+ runtime.Gosched()
+ }
+ // Once both checkedMetricChan and uncheckedMetricChan are closed
+ // and drained, the contraption above will nil out cmc and umc,
+ // and then we can leave the collect loop here.
+ if cmc == nil && umc == nil {
+ break
+ }
+ }
+ return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap()
+}
+
+// Describe implements Collector.
+func (r *Registry) Describe(ch chan<- *Desc) {
+ r.mtx.RLock()
+ defer r.mtx.RUnlock()
+
+ // Only report the checked Collectors; unchecked collectors don't report any
+ // Desc.
+ for _, c := range r.collectorsByID {
+ c.Describe(ch)
+ }
+}
+
+// Collect implements Collector.
+func (r *Registry) Collect(ch chan<- Metric) {
+ r.mtx.RLock()
+ defer r.mtx.RUnlock()
+
+ for _, c := range r.collectorsByID {
+ c.Collect(ch)
+ }
+ for _, c := range r.uncheckedCollectors {
+ c.Collect(ch)
+ }
+}
+
+// WriteToTextfile calls Gather on the provided Gatherer, encodes the result in the
+// Prometheus text format, and writes it to a temporary file. Upon success, the
+// temporary file is renamed to the provided filename.
+//
+// This is intended for use with the textfile collector of the node exporter.
+// Note that the node exporter expects the filename to be suffixed with ".prom".
+func WriteToTextfile(filename string, g Gatherer) error {
+ tmp, err := os.CreateTemp(filepath.Dir(filename), filepath.Base(filename))
+ if err != nil {
+ return err
+ }
+ defer os.Remove(tmp.Name())
+
+ mfs, err := g.Gather()
+ if err != nil {
+ return err
+ }
+ for _, mf := range mfs {
+ if _, err := expfmt.MetricFamilyToText(tmp, mf); err != nil {
+ return err
+ }
+ }
+ if err := tmp.Close(); err != nil {
+ return err
+ }
+
+ if err := os.Chmod(tmp.Name(), 0o644); err != nil {
+ return err
+ }
+ return os.Rename(tmp.Name(), filename)
+}
+
+// processMetric is an internal helper method only used by the Gather method.
+func processMetric(
+ metric Metric,
+ metricFamiliesByName map[string]*dto.MetricFamily,
+ metricHashes map[uint64]struct{},
+ registeredDescIDs map[uint64]struct{},
+) error {
+ desc := metric.Desc()
+ // Wrapped metrics collected by an unchecked Collector can have an
+ // invalid Desc.
+ if desc.err != nil {
+ return desc.err
+ }
+ dtoMetric := &dto.Metric{}
+ if err := metric.Write(dtoMetric); err != nil {
+ return fmt.Errorf("error collecting metric %v: %w", desc, err)
+ }
+ metricFamily, ok := metricFamiliesByName[desc.fqName]
+ if ok { // Existing name.
+ if metricFamily.GetHelp() != desc.help {
+ return fmt.Errorf(
+ "collected metric %s %s has help %q but should have %q",
+ desc.fqName, dtoMetric, desc.help, metricFamily.GetHelp(),
+ )
+ }
+ // TODO(beorn7): Simplify switch once Desc has type.
+ switch metricFamily.GetType() {
+ case dto.MetricType_COUNTER:
+ if dtoMetric.Counter == nil {
+ return fmt.Errorf(
+ "collected metric %s %s should be a Counter",
+ desc.fqName, dtoMetric,
+ )
+ }
+ case dto.MetricType_GAUGE:
+ if dtoMetric.Gauge == nil {
+ return fmt.Errorf(
+ "collected metric %s %s should be a Gauge",
+ desc.fqName, dtoMetric,
+ )
+ }
+ case dto.MetricType_SUMMARY:
+ if dtoMetric.Summary == nil {
+ return fmt.Errorf(
+ "collected metric %s %s should be a Summary",
+ desc.fqName, dtoMetric,
+ )
+ }
+ case dto.MetricType_UNTYPED:
+ if dtoMetric.Untyped == nil {
+ return fmt.Errorf(
+ "collected metric %s %s should be Untyped",
+ desc.fqName, dtoMetric,
+ )
+ }
+ case dto.MetricType_HISTOGRAM:
+ if dtoMetric.Histogram == nil {
+ return fmt.Errorf(
+ "collected metric %s %s should be a Histogram",
+ desc.fqName, dtoMetric,
+ )
+ }
+ default:
+ panic("encountered MetricFamily with invalid type")
+ }
+ } else { // New name.
+ metricFamily = &dto.MetricFamily{}
+ metricFamily.Name = proto.String(desc.fqName)
+ metricFamily.Help = proto.String(desc.help)
+ // TODO(beorn7): Simplify switch once Desc has type.
+ switch {
+ case dtoMetric.Gauge != nil:
+ metricFamily.Type = dto.MetricType_GAUGE.Enum()
+ case dtoMetric.Counter != nil:
+ metricFamily.Type = dto.MetricType_COUNTER.Enum()
+ case dtoMetric.Summary != nil:
+ metricFamily.Type = dto.MetricType_SUMMARY.Enum()
+ case dtoMetric.Untyped != nil:
+ metricFamily.Type = dto.MetricType_UNTYPED.Enum()
+ case dtoMetric.Histogram != nil:
+ metricFamily.Type = dto.MetricType_HISTOGRAM.Enum()
+ default:
+ return fmt.Errorf("empty metric collected: %s", dtoMetric)
+ }
+ if err := checkSuffixCollisions(metricFamily, metricFamiliesByName); err != nil {
+ return err
+ }
+ metricFamiliesByName[desc.fqName] = metricFamily
+ }
+ if err := checkMetricConsistency(metricFamily, dtoMetric, metricHashes); err != nil {
+ return err
+ }
+ if registeredDescIDs != nil {
+ // Is the desc registered at all?
+ if _, exist := registeredDescIDs[desc.id]; !exist {
+ return fmt.Errorf(
+ "collected metric %s %s with unregistered descriptor %s",
+ metricFamily.GetName(), dtoMetric, desc,
+ )
+ }
+ if err := checkDescConsistency(metricFamily, dtoMetric, desc); err != nil {
+ return err
+ }
+ }
+ metricFamily.Metric = append(metricFamily.Metric, dtoMetric)
+ return nil
+}
+
+// Gatherers is a slice of Gatherer instances that implements the Gatherer
+// interface itself. Its Gather method calls Gather on all Gatherers in the
+// slice in order and returns the merged results. Errors returned from the
+// Gather calls are all returned in a flattened MultiError. Duplicate and
+// inconsistent Metrics are skipped (first occurrence in slice order wins) and
+// reported in the returned error.
+//
+// Gatherers can be used to merge the Gather results from multiple
+// Registries. It also provides a way to directly inject existing MetricFamily
+// protobufs into the gathering by creating a custom Gatherer with a Gather
+// method that simply returns the existing MetricFamily protobufs. Note that no
+// registration is involved (in contrast to Collector registration), so
+// obviously registration-time checks cannot happen. Any inconsistencies between
+// the gathered MetricFamilies are reported as errors by the Gather method, and
+// inconsistent Metrics are dropped. Invalid parts of the MetricFamilies
+// (e.g. syntactically invalid metric or label names) will go undetected.
+type Gatherers []Gatherer
+
+// Gather implements Gatherer.
+func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) {
+ var (
+ metricFamiliesByName = map[string]*dto.MetricFamily{}
+ metricHashes = map[uint64]struct{}{}
+ errs MultiError // The collected errors to return in the end.
+ )
+
+ for i, g := range gs {
+ mfs, err := g.Gather()
+ if err != nil {
+ multiErr := MultiError{}
+ if errors.As(err, &multiErr) {
+ for _, err := range multiErr {
+ errs = append(errs, fmt.Errorf("[from Gatherer #%d] %w", i+1, err))
+ }
+ } else {
+ errs = append(errs, fmt.Errorf("[from Gatherer #%d] %w", i+1, err))
+ }
+ }
+ for _, mf := range mfs {
+ existingMF, exists := metricFamiliesByName[mf.GetName()]
+ if exists {
+ if existingMF.GetHelp() != mf.GetHelp() {
+ errs = append(errs, fmt.Errorf(
+ "gathered metric family %s has help %q but should have %q",
+ mf.GetName(), mf.GetHelp(), existingMF.GetHelp(),
+ ))
+ continue
+ }
+ if existingMF.GetType() != mf.GetType() {
+ errs = append(errs, fmt.Errorf(
+ "gathered metric family %s has type %s but should have %s",
+ mf.GetName(), mf.GetType(), existingMF.GetType(),
+ ))
+ continue
+ }
+ } else {
+ existingMF = &dto.MetricFamily{}
+ existingMF.Name = mf.Name
+ existingMF.Help = mf.Help
+ existingMF.Type = mf.Type
+ if err := checkSuffixCollisions(existingMF, metricFamiliesByName); err != nil {
+ errs = append(errs, err)
+ continue
+ }
+ metricFamiliesByName[mf.GetName()] = existingMF
+ }
+ for _, m := range mf.Metric {
+ if err := checkMetricConsistency(existingMF, m, metricHashes); err != nil {
+ errs = append(errs, err)
+ continue
+ }
+ existingMF.Metric = append(existingMF.Metric, m)
+ }
+ }
+ }
+ return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap()
+}
+
+// checkSuffixCollisions checks for collisions with the “magic” suffixes the
+// Prometheus text format and the internal metric representation of the
+// Prometheus server add while flattening Summaries and Histograms.
+func checkSuffixCollisions(mf *dto.MetricFamily, mfs map[string]*dto.MetricFamily) error {
+ var (
+ newName = mf.GetName()
+ newType = mf.GetType()
+ newNameWithoutSuffix = ""
+ )
+ switch {
+ case strings.HasSuffix(newName, "_count"):
+ newNameWithoutSuffix = newName[:len(newName)-6]
+ case strings.HasSuffix(newName, "_sum"):
+ newNameWithoutSuffix = newName[:len(newName)-4]
+ case strings.HasSuffix(newName, "_bucket"):
+ newNameWithoutSuffix = newName[:len(newName)-7]
+ }
+ if newNameWithoutSuffix != "" {
+ if existingMF, ok := mfs[newNameWithoutSuffix]; ok {
+ switch existingMF.GetType() {
+ case dto.MetricType_SUMMARY:
+ if !strings.HasSuffix(newName, "_bucket") {
+ return fmt.Errorf(
+ "collected metric named %q collides with previously collected summary named %q",
+ newName, newNameWithoutSuffix,
+ )
+ }
+ case dto.MetricType_HISTOGRAM:
+ return fmt.Errorf(
+ "collected metric named %q collides with previously collected histogram named %q",
+ newName, newNameWithoutSuffix,
+ )
+ }
+ }
+ }
+ if newType == dto.MetricType_SUMMARY || newType == dto.MetricType_HISTOGRAM {
+ if _, ok := mfs[newName+"_count"]; ok {
+ return fmt.Errorf(
+ "collected histogram or summary named %q collides with previously collected metric named %q",
+ newName, newName+"_count",
+ )
+ }
+ if _, ok := mfs[newName+"_sum"]; ok {
+ return fmt.Errorf(
+ "collected histogram or summary named %q collides with previously collected metric named %q",
+ newName, newName+"_sum",
+ )
+ }
+ }
+ if newType == dto.MetricType_HISTOGRAM {
+ if _, ok := mfs[newName+"_bucket"]; ok {
+ return fmt.Errorf(
+ "collected histogram named %q collides with previously collected metric named %q",
+ newName, newName+"_bucket",
+ )
+ }
+ }
+ return nil
+}
+
+// checkMetricConsistency checks if the provided Metric is consistent with the
+// provided MetricFamily. It also hashes the Metric labels and the MetricFamily
+// name. If the resulting hash is already in the provided metricHashes, an error
+// is returned. If not, it is added to metricHashes.
+func checkMetricConsistency(
+ metricFamily *dto.MetricFamily,
+ dtoMetric *dto.Metric,
+ metricHashes map[uint64]struct{},
+) error {
+ name := metricFamily.GetName()
+
+ // Type consistency with metric family.
+ if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil ||
+ metricFamily.GetType() == dto.MetricType_COUNTER && dtoMetric.Counter == nil ||
+ metricFamily.GetType() == dto.MetricType_SUMMARY && dtoMetric.Summary == nil ||
+ metricFamily.GetType() == dto.MetricType_HISTOGRAM && dtoMetric.Histogram == nil ||
+ metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil {
+ return fmt.Errorf(
+ "collected metric %q { %s} is not a %s",
+ name, dtoMetric, metricFamily.GetType(),
+ )
+ }
+
+ previousLabelName := ""
+ for _, labelPair := range dtoMetric.GetLabel() {
+ labelName := labelPair.GetName()
+ if labelName == previousLabelName {
+ return fmt.Errorf(
+ "collected metric %q { %s} has two or more labels with the same name: %s",
+ name, dtoMetric, labelName,
+ )
+ }
+ if !checkLabelName(labelName) {
+ return fmt.Errorf(
+ "collected metric %q { %s} has a label with an invalid name: %s",
+ name, dtoMetric, labelName,
+ )
+ }
+ if dtoMetric.Summary != nil && labelName == quantileLabel {
+ return fmt.Errorf(
+ "collected metric %q { %s} must not have an explicit %q label",
+ name, dtoMetric, quantileLabel,
+ )
+ }
+ if !utf8.ValidString(labelPair.GetValue()) {
+ return fmt.Errorf(
+ "collected metric %q { %s} has a label named %q whose value is not utf8: %#v",
+ name, dtoMetric, labelName, labelPair.GetValue())
+ }
+ previousLabelName = labelName
+ }
+
+ // Is the metric unique (i.e. no other metric with the same name and the same labels)?
+ h := xxhash.New()
+ h.WriteString(name)
+ h.Write(separatorByteSlice)
+ // Make sure label pairs are sorted. We depend on it for the consistency
+ // check.
+ if !sort.IsSorted(internal.LabelPairSorter(dtoMetric.Label)) {
+ // We cannot sort dtoMetric.Label in place as it is immutable by contract.
+ copiedLabels := make([]*dto.LabelPair, len(dtoMetric.Label))
+ copy(copiedLabels, dtoMetric.Label)
+ sort.Sort(internal.LabelPairSorter(copiedLabels))
+ dtoMetric.Label = copiedLabels
+ }
+ for _, lp := range dtoMetric.Label {
+ h.WriteString(lp.GetName())
+ h.Write(separatorByteSlice)
+ h.WriteString(lp.GetValue())
+ h.Write(separatorByteSlice)
+ }
+ if dtoMetric.TimestampMs != nil {
+ h.WriteString(strconv.FormatInt(*(dtoMetric.TimestampMs), 10))
+ h.Write(separatorByteSlice)
+ }
+ hSum := h.Sum64()
+ if _, exists := metricHashes[hSum]; exists {
+ return fmt.Errorf(
+ "collected metric %q { %s} was collected before with the same name and label values",
+ name, dtoMetric,
+ )
+ }
+ metricHashes[hSum] = struct{}{}
+ return nil
+}
+
+func checkDescConsistency(
+ metricFamily *dto.MetricFamily,
+ dtoMetric *dto.Metric,
+ desc *Desc,
+) error {
+ // Desc help consistency with metric family help.
+ if metricFamily.GetHelp() != desc.help {
+ return fmt.Errorf(
+ "collected metric %s %s has help %q but should have %q",
+ metricFamily.GetName(), dtoMetric, metricFamily.GetHelp(), desc.help,
+ )
+ }
+
+ // Is the desc consistent with the content of the metric?
+ lpsFromDesc := make([]*dto.LabelPair, len(desc.constLabelPairs), len(dtoMetric.Label))
+ copy(lpsFromDesc, desc.constLabelPairs)
+ for _, l := range desc.variableLabels.names {
+ lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{
+ Name: proto.String(l),
+ })
+ }
+ if len(lpsFromDesc) != len(dtoMetric.Label) {
+ return fmt.Errorf(
+ "labels in collected metric %s %s are inconsistent with descriptor %s",
+ metricFamily.GetName(), dtoMetric, desc,
+ )
+ }
+ sort.Sort(internal.LabelPairSorter(lpsFromDesc))
+ for i, lpFromDesc := range lpsFromDesc {
+ lpFromMetric := dtoMetric.Label[i]
+ if lpFromDesc.GetName() != lpFromMetric.GetName() ||
+ lpFromDesc.Value != nil && lpFromDesc.GetValue() != lpFromMetric.GetValue() {
+ return fmt.Errorf(
+ "labels in collected metric %s %s are inconsistent with descriptor %s",
+ metricFamily.GetName(), dtoMetric, desc,
+ )
+ }
+ }
+ return nil
+}
+
+var _ TransactionalGatherer = &MultiTRegistry{}
+
+// MultiTRegistry is a TransactionalGatherer that joins gathered metrics from multiple
+// transactional gatherers.
+//
+// It is caller responsibility to ensure two registries have mutually exclusive metric families,
+// no deduplication will happen.
+type MultiTRegistry struct {
+ tGatherers []TransactionalGatherer
+}
+
+// NewMultiTRegistry creates MultiTRegistry.
+func NewMultiTRegistry(tGatherers ...TransactionalGatherer) *MultiTRegistry {
+ return &MultiTRegistry{
+ tGatherers: tGatherers,
+ }
+}
+
+// Gather implements TransactionalGatherer interface.
+func (r *MultiTRegistry) Gather() (mfs []*dto.MetricFamily, done func(), err error) {
+ errs := MultiError{}
+
+ dFns := make([]func(), 0, len(r.tGatherers))
+ // TODO(bwplotka): Implement concurrency for those?
+ for _, g := range r.tGatherers {
+ // TODO(bwplotka): Check for duplicates?
+ m, d, err := g.Gather()
+ errs.Append(err)
+
+ mfs = append(mfs, m...)
+ dFns = append(dFns, d)
+ }
+
+ // TODO(bwplotka): Consider sort in place, given metric family in gather is sorted already.
+ sort.Slice(mfs, func(i, j int) bool {
+ return *mfs[i].Name < *mfs[j].Name
+ })
+ return mfs, func() {
+ for _, d := range dFns {
+ d()
+ }
+ }, errs.MaybeUnwrap()
+}
+
+// TransactionalGatherer represents transactional gatherer that can be triggered to notify gatherer that memory
+// used by metric family is no longer used by a caller. This allows implementations with cache.
+type TransactionalGatherer interface {
+ // Gather returns metrics in a lexicographically sorted slice
+ // of uniquely named MetricFamily protobufs. Gather ensures that the
+ // returned slice is valid and self-consistent so that it can be used
+ // for valid exposition. As an exception to the strict consistency
+ // requirements described for metric.Desc, Gather will tolerate
+ // different sets of label names for metrics of the same metric family.
+ //
+ // Even if an error occurs, Gather attempts to gather as many metrics as
+ // possible. Hence, if a non-nil error is returned, the returned
+ // MetricFamily slice could be nil (in case of a fatal error that
+ // prevented any meaningful metric collection) or contain a number of
+ // MetricFamily protobufs, some of which might be incomplete, and some
+ // might be missing altogether. The returned error (which might be a
+ // MultiError) explains the details. Note that this is mostly useful for
+ // debugging purposes. If the gathered protobufs are to be used for
+ // exposition in actual monitoring, it is almost always better to not
+ // expose an incomplete result and instead disregard the returned
+ // MetricFamily protobufs in case the returned error is non-nil.
+ //
+ // Important: done is expected to be triggered (even if the error occurs!)
+ // once caller does not need returned slice of dto.MetricFamily.
+ Gather() (_ []*dto.MetricFamily, done func(), err error)
+}
+
+// ToTransactionalGatherer transforms Gatherer to transactional one with noop as done function.
+func ToTransactionalGatherer(g Gatherer) TransactionalGatherer {
+ return &noTransactionGatherer{g: g}
+}
+
+type noTransactionGatherer struct {
+ g Gatherer
+}
+
+// Gather implements TransactionalGatherer interface.
+func (g *noTransactionGatherer) Gather() (_ []*dto.MetricFamily, done func(), err error) {
+ mfs, err := g.g.Gather()
+ return mfs, func() {}, err
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go
new file mode 100644
index 0000000..1ab0e47
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go
@@ -0,0 +1,827 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "fmt"
+ "math"
+ "runtime"
+ "sort"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ dto "github.com/prometheus/client_model/go"
+
+ "github.com/beorn7/perks/quantile"
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/types/known/timestamppb"
+)
+
+// quantileLabel is used for the label that defines the quantile in a
+// summary.
+const quantileLabel = "quantile"
+
+// A Summary captures individual observations from an event or sample stream and
+// summarizes them in a manner similar to traditional summary statistics: 1. sum
+// of observations, 2. observation count, 3. rank estimations.
+//
+// A typical use-case is the observation of request latencies. By default, a
+// Summary provides the median, the 90th and the 99th percentile of the latency
+// as rank estimations. However, the default behavior will change in the
+// upcoming v1.0.0 of the library. There will be no rank estimations at all by
+// default. For a sane transition, it is recommended to set the desired rank
+// estimations explicitly.
+//
+// Note that the rank estimations cannot be aggregated in a meaningful way with
+// the Prometheus query language (i.e. you cannot average or add them). If you
+// need aggregatable quantiles (e.g. you want the 99th percentile latency of all
+// queries served across all instances of a service), consider the Histogram
+// metric type. See the Prometheus documentation for more details.
+//
+// To create Summary instances, use NewSummary.
+type Summary interface {
+ Metric
+ Collector
+
+ // Observe adds a single observation to the summary. Observations are
+ // usually positive or zero. Negative observations are accepted but
+ // prevent current versions of Prometheus from properly detecting
+ // counter resets in the sum of observations. See
+ // https://prometheus.io/docs/practices/histograms/#count-and-sum-of-observations
+ // for details.
+ Observe(float64)
+}
+
+var errQuantileLabelNotAllowed = fmt.Errorf(
+ "%q is not allowed as label name in summaries", quantileLabel,
+)
+
+// Default values for SummaryOpts.
+const (
+ // DefMaxAge is the default duration for which observations stay
+ // relevant.
+ DefMaxAge time.Duration = 10 * time.Minute
+ // DefAgeBuckets is the default number of buckets used to calculate the
+ // age of observations.
+ DefAgeBuckets = 5
+ // DefBufCap is the standard buffer size for collecting Summary observations.
+ DefBufCap = 500
+)
+
+// SummaryOpts bundles the options for creating a Summary metric. It is
+// mandatory to set Name to a non-empty string. While all other fields are
+// optional and can safely be left at their zero value, it is recommended to set
+// a help string and to explicitly set the Objectives field to the desired value
+// as the default value will change in the upcoming v1.0.0 of the library.
+type SummaryOpts struct {
+ // Namespace, Subsystem, and Name are components of the fully-qualified
+ // name of the Summary (created by joining these components with
+ // "_"). Only Name is mandatory, the others merely help structuring the
+ // name. Note that the fully-qualified name of the Summary must be a
+ // valid Prometheus metric name.
+ Namespace string
+ Subsystem string
+ Name string
+
+ // Help provides information about this Summary.
+ //
+ // Metrics with the same fully-qualified name must have the same Help
+ // string.
+ Help string
+
+ // ConstLabels are used to attach fixed labels to this metric. Metrics
+ // with the same fully-qualified name must have the same label names in
+ // their ConstLabels.
+ //
+ // Due to the way a Summary is represented in the Prometheus text format
+ // and how it is handled by the Prometheus server internally, “quantile”
+ // is an illegal label name. Construction of a Summary or SummaryVec
+ // will panic if this label name is used in ConstLabels.
+ //
+ // ConstLabels are only used rarely. In particular, do not use them to
+ // attach the same labels to all your metrics. Those use cases are
+ // better covered by target labels set by the scraping Prometheus
+ // server, or by one specific metric (e.g. a build_info or a
+ // machine_role metric). See also
+ // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels
+ ConstLabels Labels
+
+ // Objectives defines the quantile rank estimates with their respective
+ // absolute error. If Objectives[q] = e, then the value reported for q
+ // will be the φ-quantile value for some φ between q-e and q+e. The
+ // default value is an empty map, resulting in a summary without
+ // quantiles.
+ Objectives map[float64]float64
+
+ // MaxAge defines the duration for which an observation stays relevant
+ // for the summary. Only applies to pre-calculated quantiles, does not
+ // apply to _sum and _count. Must be positive. The default value is
+ // DefMaxAge.
+ MaxAge time.Duration
+
+ // AgeBuckets is the number of buckets used to exclude observations that
+ // are older than MaxAge from the summary. A higher number has a
+ // resource penalty, so only increase it if the higher resolution is
+ // really required. For very high observation rates, you might want to
+ // reduce the number of age buckets. With only one age bucket, you will
+ // effectively see a complete reset of the summary each time MaxAge has
+ // passed. The default value is DefAgeBuckets.
+ AgeBuckets uint32
+
+ // BufCap defines the default sample stream buffer size. The default
+ // value of DefBufCap should suffice for most uses. If there is a need
+ // to increase the value, a multiple of 500 is recommended (because that
+ // is the internal buffer size of the underlying package
+ // "github.com/bmizerany/perks/quantile").
+ BufCap uint32
+
+ // now is for testing purposes, by default it's time.Now.
+ now func() time.Time
+}
+
+// SummaryVecOpts bundles the options to create a SummaryVec metric.
+// It is mandatory to set SummaryOpts, see there for mandatory fields. VariableLabels
+// is optional and can safely be left to its default value.
+type SummaryVecOpts struct {
+ SummaryOpts
+
+ // VariableLabels are used to partition the metric vector by the given set
+ // of labels. Each label value will be constrained with the optional Constraint
+ // function, if provided.
+ VariableLabels ConstrainableLabels
+}
+
+// Problem with the sliding-window decay algorithm... The Merge method of
+// perk/quantile is actually not working as advertised - and it might be
+// unfixable, as the underlying algorithm is apparently not capable of merging
+// summaries in the first place. To avoid using Merge, we are currently adding
+// observations to _each_ age bucket, i.e. the effort to add a sample is
+// essentially multiplied by the number of age buckets. When rotating age
+// buckets, we empty the previous head stream. On scrape time, we simply take
+// the quantiles from the head stream (no merging required). Result: More effort
+// on observation time, less effort on scrape time, which is exactly the
+// opposite of what we try to accomplish, but at least the results are correct.
+//
+// The quite elegant previous contraption to merge the age buckets efficiently
+// on scrape time (see code up commit 6b9530d72ea715f0ba612c0120e6e09fbf1d49d0)
+// can't be used anymore.
+
+// NewSummary creates a new Summary based on the provided SummaryOpts.
+func NewSummary(opts SummaryOpts) Summary {
+ return newSummary(
+ NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ),
+ opts,
+ )
+}
+
+func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
+ if len(desc.variableLabels.names) != len(labelValues) {
+ panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, labelValues))
+ }
+
+ for _, n := range desc.variableLabels.names {
+ if n == quantileLabel {
+ panic(errQuantileLabelNotAllowed)
+ }
+ }
+ for _, lp := range desc.constLabelPairs {
+ if lp.GetName() == quantileLabel {
+ panic(errQuantileLabelNotAllowed)
+ }
+ }
+
+ if opts.Objectives == nil {
+ opts.Objectives = map[float64]float64{}
+ }
+
+ if opts.MaxAge < 0 {
+ panic(fmt.Errorf("illegal max age MaxAge=%v", opts.MaxAge))
+ }
+ if opts.MaxAge == 0 {
+ opts.MaxAge = DefMaxAge
+ }
+
+ if opts.AgeBuckets == 0 {
+ opts.AgeBuckets = DefAgeBuckets
+ }
+
+ if opts.BufCap == 0 {
+ opts.BufCap = DefBufCap
+ }
+
+ if opts.now == nil {
+ opts.now = time.Now
+ }
+ if len(opts.Objectives) == 0 {
+ // Use the lock-free implementation of a Summary without objectives.
+ s := &noObjectivesSummary{
+ desc: desc,
+ labelPairs: MakeLabelPairs(desc, labelValues),
+ counts: [2]*summaryCounts{{}, {}},
+ }
+ s.init(s) // Init self-collection.
+ s.createdTs = timestamppb.New(opts.now())
+ return s
+ }
+
+ s := &summary{
+ desc: desc,
+
+ objectives: opts.Objectives,
+ sortedObjectives: make([]float64, 0, len(opts.Objectives)),
+
+ labelPairs: MakeLabelPairs(desc, labelValues),
+
+ hotBuf: make([]float64, 0, opts.BufCap),
+ coldBuf: make([]float64, 0, opts.BufCap),
+ streamDuration: opts.MaxAge / time.Duration(opts.AgeBuckets),
+ }
+ s.headStreamExpTime = opts.now().Add(s.streamDuration)
+ s.hotBufExpTime = s.headStreamExpTime
+
+ for i := uint32(0); i < opts.AgeBuckets; i++ {
+ s.streams = append(s.streams, s.newStream())
+ }
+ s.headStream = s.streams[0]
+
+ for qu := range s.objectives {
+ s.sortedObjectives = append(s.sortedObjectives, qu)
+ }
+ sort.Float64s(s.sortedObjectives)
+
+ s.init(s) // Init self-collection.
+ s.createdTs = timestamppb.New(opts.now())
+ return s
+}
+
+type summary struct {
+ selfCollector
+
+ bufMtx sync.Mutex // Protects hotBuf and hotBufExpTime.
+ mtx sync.Mutex // Protects every other moving part.
+ // Lock bufMtx before mtx if both are needed.
+
+ desc *Desc
+
+ objectives map[float64]float64
+ sortedObjectives []float64
+
+ labelPairs []*dto.LabelPair
+
+ sum float64
+ cnt uint64
+
+ hotBuf, coldBuf []float64
+
+ streams []*quantile.Stream
+ streamDuration time.Duration
+ headStream *quantile.Stream
+ headStreamIdx int
+ headStreamExpTime, hotBufExpTime time.Time
+
+ createdTs *timestamppb.Timestamp
+}
+
+func (s *summary) Desc() *Desc {
+ return s.desc
+}
+
+func (s *summary) Observe(v float64) {
+ s.bufMtx.Lock()
+ defer s.bufMtx.Unlock()
+
+ now := time.Now()
+ if now.After(s.hotBufExpTime) {
+ s.asyncFlush(now)
+ }
+ s.hotBuf = append(s.hotBuf, v)
+ if len(s.hotBuf) == cap(s.hotBuf) {
+ s.asyncFlush(now)
+ }
+}
+
+func (s *summary) Write(out *dto.Metric) error {
+ sum := &dto.Summary{
+ CreatedTimestamp: s.createdTs,
+ }
+ qs := make([]*dto.Quantile, 0, len(s.objectives))
+
+ s.bufMtx.Lock()
+ s.mtx.Lock()
+ // Swap bufs even if hotBuf is empty to set new hotBufExpTime.
+ s.swapBufs(time.Now())
+ s.bufMtx.Unlock()
+
+ s.flushColdBuf()
+ sum.SampleCount = proto.Uint64(s.cnt)
+ sum.SampleSum = proto.Float64(s.sum)
+
+ for _, rank := range s.sortedObjectives {
+ var q float64
+ if s.headStream.Count() == 0 {
+ q = math.NaN()
+ } else {
+ q = s.headStream.Query(rank)
+ }
+ qs = append(qs, &dto.Quantile{
+ Quantile: proto.Float64(rank),
+ Value: proto.Float64(q),
+ })
+ }
+
+ s.mtx.Unlock()
+
+ if len(qs) > 0 {
+ sort.Sort(quantSort(qs))
+ }
+ sum.Quantile = qs
+
+ out.Summary = sum
+ out.Label = s.labelPairs
+ return nil
+}
+
+func (s *summary) newStream() *quantile.Stream {
+ return quantile.NewTargeted(s.objectives)
+}
+
+// asyncFlush needs bufMtx locked.
+func (s *summary) asyncFlush(now time.Time) {
+ s.mtx.Lock()
+ s.swapBufs(now)
+
+ // Unblock the original goroutine that was responsible for the mutation
+ // that triggered the compaction. But hold onto the global non-buffer
+ // state mutex until the operation finishes.
+ go func() {
+ s.flushColdBuf()
+ s.mtx.Unlock()
+ }()
+}
+
+// rotateStreams needs mtx AND bufMtx locked.
+func (s *summary) maybeRotateStreams() {
+ for !s.hotBufExpTime.Equal(s.headStreamExpTime) {
+ s.headStream.Reset()
+ s.headStreamIdx++
+ if s.headStreamIdx >= len(s.streams) {
+ s.headStreamIdx = 0
+ }
+ s.headStream = s.streams[s.headStreamIdx]
+ s.headStreamExpTime = s.headStreamExpTime.Add(s.streamDuration)
+ }
+}
+
+// flushColdBuf needs mtx locked.
+func (s *summary) flushColdBuf() {
+ for _, v := range s.coldBuf {
+ for _, stream := range s.streams {
+ stream.Insert(v)
+ }
+ s.cnt++
+ s.sum += v
+ }
+ s.coldBuf = s.coldBuf[0:0]
+ s.maybeRotateStreams()
+}
+
+// swapBufs needs mtx AND bufMtx locked, coldBuf must be empty.
+func (s *summary) swapBufs(now time.Time) {
+ if len(s.coldBuf) != 0 {
+ panic("coldBuf is not empty")
+ }
+ s.hotBuf, s.coldBuf = s.coldBuf, s.hotBuf
+ // hotBuf is now empty and gets new expiration set.
+ for now.After(s.hotBufExpTime) {
+ s.hotBufExpTime = s.hotBufExpTime.Add(s.streamDuration)
+ }
+}
+
+type summaryCounts struct {
+ // sumBits contains the bits of the float64 representing the sum of all
+ // observations. sumBits and count have to go first in the struct to
+ // guarantee alignment for atomic operations.
+ // http://golang.org/pkg/sync/atomic/#pkg-note-BUG
+ sumBits uint64
+ count uint64
+}
+
+type noObjectivesSummary struct {
+ // countAndHotIdx enables lock-free writes with use of atomic updates.
+ // The most significant bit is the hot index [0 or 1] of the count field
+ // below. Observe calls update the hot one. All remaining bits count the
+ // number of Observe calls. Observe starts by incrementing this counter,
+ // and finish by incrementing the count field in the respective
+ // summaryCounts, as a marker for completion.
+ //
+ // Calls of the Write method (which are non-mutating reads from the
+ // perspective of the summary) swap the hot–cold under the writeMtx
+ // lock. A cooldown is awaited (while locked) by comparing the number of
+ // observations with the initiation count. Once they match, then the
+ // last observation on the now cool one has completed. All cool fields must
+ // be merged into the new hot before releasing writeMtx.
+
+ // Fields with atomic access first! See alignment constraint:
+ // http://golang.org/pkg/sync/atomic/#pkg-note-BUG
+ countAndHotIdx uint64
+
+ selfCollector
+ desc *Desc
+ writeMtx sync.Mutex // Only used in the Write method.
+
+ // Two counts, one is "hot" for lock-free observations, the other is
+ // "cold" for writing out a dto.Metric. It has to be an array of
+ // pointers to guarantee 64bit alignment of the histogramCounts, see
+ // http://golang.org/pkg/sync/atomic/#pkg-note-BUG.
+ counts [2]*summaryCounts
+
+ labelPairs []*dto.LabelPair
+
+ createdTs *timestamppb.Timestamp
+}
+
+func (s *noObjectivesSummary) Desc() *Desc {
+ return s.desc
+}
+
+func (s *noObjectivesSummary) Observe(v float64) {
+ // We increment h.countAndHotIdx so that the counter in the lower
+ // 63 bits gets incremented. At the same time, we get the new value
+ // back, which we can use to find the currently-hot counts.
+ n := atomic.AddUint64(&s.countAndHotIdx, 1)
+ hotCounts := s.counts[n>>63]
+
+ for {
+ oldBits := atomic.LoadUint64(&hotCounts.sumBits)
+ newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
+ if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
+ break
+ }
+ }
+ // Increment count last as we take it as a signal that the observation
+ // is complete.
+ atomic.AddUint64(&hotCounts.count, 1)
+}
+
+func (s *noObjectivesSummary) Write(out *dto.Metric) error {
+ // For simplicity, we protect this whole method by a mutex. It is not in
+ // the hot path, i.e. Observe is called much more often than Write. The
+ // complication of making Write lock-free isn't worth it, if possible at
+ // all.
+ s.writeMtx.Lock()
+ defer s.writeMtx.Unlock()
+
+ // Adding 1<<63 switches the hot index (from 0 to 1 or from 1 to 0)
+ // without touching the count bits. See the struct comments for a full
+ // description of the algorithm.
+ n := atomic.AddUint64(&s.countAndHotIdx, 1<<63)
+ // count is contained unchanged in the lower 63 bits.
+ count := n & ((1 << 63) - 1)
+ // The most significant bit tells us which counts is hot. The complement
+ // is thus the cold one.
+ hotCounts := s.counts[n>>63]
+ coldCounts := s.counts[(^n)>>63]
+
+ // Await cooldown.
+ for count != atomic.LoadUint64(&coldCounts.count) {
+ runtime.Gosched() // Let observations get work done.
+ }
+
+ sum := &dto.Summary{
+ SampleCount: proto.Uint64(count),
+ SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))),
+ CreatedTimestamp: s.createdTs,
+ }
+
+ out.Summary = sum
+ out.Label = s.labelPairs
+
+ // Finally add all the cold counts to the new hot counts and reset the cold counts.
+ atomic.AddUint64(&hotCounts.count, count)
+ atomic.StoreUint64(&coldCounts.count, 0)
+ for {
+ oldBits := atomic.LoadUint64(&hotCounts.sumBits)
+ newBits := math.Float64bits(math.Float64frombits(oldBits) + sum.GetSampleSum())
+ if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
+ atomic.StoreUint64(&coldCounts.sumBits, 0)
+ break
+ }
+ }
+ return nil
+}
+
+type quantSort []*dto.Quantile
+
+func (s quantSort) Len() int {
+ return len(s)
+}
+
+func (s quantSort) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func (s quantSort) Less(i, j int) bool {
+ return s[i].GetQuantile() < s[j].GetQuantile()
+}
+
+// SummaryVec is a Collector that bundles a set of Summaries that all share the
+// same Desc, but have different values for their variable labels. This is used
+// if you want to count the same thing partitioned by various dimensions
+// (e.g. HTTP request latencies, partitioned by status code and method). Create
+// instances with NewSummaryVec.
+type SummaryVec struct {
+ *MetricVec
+}
+
+// NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and
+// partitioned by the given label names.
+//
+// Due to the way a Summary is represented in the Prometheus text format and how
+// it is handled by the Prometheus server internally, “quantile” is an illegal
+// label name. NewSummaryVec will panic if this label name is used.
+func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec {
+ return V2.NewSummaryVec(SummaryVecOpts{
+ SummaryOpts: opts,
+ VariableLabels: UnconstrainedLabels(labelNames),
+ })
+}
+
+// NewSummaryVec creates a new SummaryVec based on the provided SummaryVecOpts.
+func (v2) NewSummaryVec(opts SummaryVecOpts) *SummaryVec {
+ for _, ln := range opts.VariableLabels.labelNames() {
+ if ln == quantileLabel {
+ panic(errQuantileLabelNotAllowed)
+ }
+ }
+ desc := V2.NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ opts.VariableLabels,
+ opts.ConstLabels,
+ )
+ return &SummaryVec{
+ MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
+ return newSummary(desc, opts.SummaryOpts, lvs...)
+ }),
+ }
+}
+
+// GetMetricWithLabelValues returns the Summary for the given slice of label
+// values (same order as the variable labels in Desc). If that combination of
+// label values is accessed for the first time, a new Summary is created.
+//
+// It is possible to call this method without using the returned Summary to only
+// create the new Summary but leave it at its starting value, a Summary without
+// any observations.
+//
+// Keeping the Summary for later use is possible (and should be considered if
+// performance is critical), but keep in mind that Reset, DeleteLabelValues and
+// Delete can be used to delete the Summary from the SummaryVec. In that case,
+// the Summary will still exist, but it will not be exported anymore, even if a
+// Summary with the same label values is created later. See also the CounterVec
+// example.
+//
+// An error is returned if the number of label values is not the same as the
+// number of variable labels in Desc (minus any curried labels).
+//
+// Note that for more than one label value, this method is prone to mistakes
+// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
+// an alternative to avoid that type of mistake. For higher label numbers, the
+// latter has a much more readable (albeit more verbose) syntax, but it comes
+// with a performance overhead (for creating and processing the Labels map).
+// See also the GaugeVec example.
+func (v *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) {
+ metric, err := v.MetricVec.GetMetricWithLabelValues(lvs...)
+ if metric != nil {
+ return metric.(Observer), err
+ }
+ return nil, err
+}
+
+// GetMetricWith returns the Summary for the given Labels map (the label names
+// must match those of the variable labels in Desc). If that label map is
+// accessed for the first time, a new Summary is created. Implications of
+// creating a Summary without using it and keeping the Summary for later use are
+// the same as for GetMetricWithLabelValues.
+//
+// An error is returned if the number and names of the Labels are inconsistent
+// with those of the variable labels in Desc (minus any curried labels).
+//
+// This method is used for the same purpose as
+// GetMetricWithLabelValues(...string). See there for pros and cons of the two
+// methods.
+func (v *SummaryVec) GetMetricWith(labels Labels) (Observer, error) {
+ metric, err := v.MetricVec.GetMetricWith(labels)
+ if metric != nil {
+ return metric.(Observer), err
+ }
+ return nil, err
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics where
+// GetMetricWithLabelValues would have returned an error. Not returning an
+// error allows shortcuts like
+//
+// myVec.WithLabelValues("404", "GET").Observe(42.21)
+func (v *SummaryVec) WithLabelValues(lvs ...string) Observer {
+ s, err := v.GetMetricWithLabelValues(lvs...)
+ if err != nil {
+ panic(err)
+ }
+ return s
+}
+
+// With works as GetMetricWith, but panics where GetMetricWithLabels would have
+// returned an error. Not returning an error allows shortcuts like
+//
+// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21)
+func (v *SummaryVec) With(labels Labels) Observer {
+ s, err := v.GetMetricWith(labels)
+ if err != nil {
+ panic(err)
+ }
+ return s
+}
+
+// CurryWith returns a vector curried with the provided labels, i.e. the
+// returned vector has those labels pre-set for all labeled operations performed
+// on it. The cardinality of the curried vector is reduced accordingly. The
+// order of the remaining labels stays the same (just with the curried labels
+// taken out of the sequence – which is relevant for the
+// (GetMetric)WithLabelValues methods). It is possible to curry a curried
+// vector, but only with labels not yet used for currying before.
+//
+// The metrics contained in the SummaryVec are shared between the curried and
+// uncurried vectors. They are just accessed differently. Curried and uncurried
+// vectors behave identically in terms of collection. Only one must be
+// registered with a given registry (usually the uncurried version). The Reset
+// method deletes all metrics, even if called on a curried vector.
+func (v *SummaryVec) CurryWith(labels Labels) (ObserverVec, error) {
+ vec, err := v.MetricVec.CurryWith(labels)
+ if vec != nil {
+ return &SummaryVec{vec}, err
+ }
+ return nil, err
+}
+
+// MustCurryWith works as CurryWith but panics where CurryWith would have
+// returned an error.
+func (v *SummaryVec) MustCurryWith(labels Labels) ObserverVec {
+ vec, err := v.CurryWith(labels)
+ if err != nil {
+ panic(err)
+ }
+ return vec
+}
+
+type constSummary struct {
+ desc *Desc
+ count uint64
+ sum float64
+ quantiles map[float64]float64
+ labelPairs []*dto.LabelPair
+ createdTs *timestamppb.Timestamp
+}
+
+func (s *constSummary) Desc() *Desc {
+ return s.desc
+}
+
+func (s *constSummary) Write(out *dto.Metric) error {
+ sum := &dto.Summary{
+ CreatedTimestamp: s.createdTs,
+ }
+ qs := make([]*dto.Quantile, 0, len(s.quantiles))
+
+ sum.SampleCount = proto.Uint64(s.count)
+ sum.SampleSum = proto.Float64(s.sum)
+
+ for rank, q := range s.quantiles {
+ qs = append(qs, &dto.Quantile{
+ Quantile: proto.Float64(rank),
+ Value: proto.Float64(q),
+ })
+ }
+
+ if len(qs) > 0 {
+ sort.Sort(quantSort(qs))
+ }
+ sum.Quantile = qs
+
+ out.Summary = sum
+ out.Label = s.labelPairs
+
+ return nil
+}
+
+// NewConstSummary returns a metric representing a Prometheus summary with fixed
+// values for the count, sum, and quantiles. As those parameters cannot be
+// changed, the returned value does not implement the Summary interface (but
+// only the Metric interface). Users of this package will not have much use for
+// it in regular operations. However, when implementing custom Collectors, it is
+// useful as a throw-away metric that is generated on the fly to send it to
+// Prometheus in the Collect method.
+//
+// quantiles maps ranks to quantile values. For example, a median latency of
+// 0.23s and a 99th percentile latency of 0.56s would be expressed as:
+//
+// map[float64]float64{0.5: 0.23, 0.99: 0.56}
+//
+// NewConstSummary returns an error if the length of labelValues is not
+// consistent with the variable labels in Desc or if Desc is invalid.
+func NewConstSummary(
+ desc *Desc,
+ count uint64,
+ sum float64,
+ quantiles map[float64]float64,
+ labelValues ...string,
+) (Metric, error) {
+ if desc.err != nil {
+ return nil, desc.err
+ }
+ if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
+ return nil, err
+ }
+ return &constSummary{
+ desc: desc,
+ count: count,
+ sum: sum,
+ quantiles: quantiles,
+ labelPairs: MakeLabelPairs(desc, labelValues),
+ }, nil
+}
+
+// MustNewConstSummary is a version of NewConstSummary that panics where
+// NewConstMetric would have returned an error.
+func MustNewConstSummary(
+ desc *Desc,
+ count uint64,
+ sum float64,
+ quantiles map[float64]float64,
+ labelValues ...string,
+) Metric {
+ m, err := NewConstSummary(desc, count, sum, quantiles, labelValues...)
+ if err != nil {
+ panic(err)
+ }
+ return m
+}
+
+// NewConstSummaryWithCreatedTimestamp does the same thing as NewConstSummary but sets the created timestamp.
+func NewConstSummaryWithCreatedTimestamp(
+ desc *Desc,
+ count uint64,
+ sum float64,
+ quantiles map[float64]float64,
+ ct time.Time,
+ labelValues ...string,
+) (Metric, error) {
+ if desc.err != nil {
+ return nil, desc.err
+ }
+ if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
+ return nil, err
+ }
+ return &constSummary{
+ desc: desc,
+ count: count,
+ sum: sum,
+ quantiles: quantiles,
+ labelPairs: MakeLabelPairs(desc, labelValues),
+ createdTs: timestamppb.New(ct),
+ }, nil
+}
+
+// MustNewConstSummaryWithCreatedTimestamp is a version of NewConstSummaryWithCreatedTimestamp that panics where
+// NewConstSummaryWithCreatedTimestamp would have returned an error.
+func MustNewConstSummaryWithCreatedTimestamp(
+ desc *Desc,
+ count uint64,
+ sum float64,
+ quantiles map[float64]float64,
+ ct time.Time,
+ labelValues ...string,
+) Metric {
+ m, err := NewConstSummaryWithCreatedTimestamp(desc, count, sum, quantiles, ct, labelValues...)
+ if err != nil {
+ panic(err)
+ }
+ return m
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/timer.go b/vendor/github.com/prometheus/client_golang/prometheus/timer.go
new file mode 100644
index 0000000..52344fe
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/timer.go
@@ -0,0 +1,81 @@
+// Copyright 2016 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import "time"
+
+// Timer is a helper type to time functions. Use NewTimer to create new
+// instances.
+type Timer struct {
+ begin time.Time
+ observer Observer
+}
+
+// NewTimer creates a new Timer. The provided Observer is used to observe a
+// duration in seconds. If the Observer implements ExemplarObserver, passing exemplar
+// later on will be also supported.
+// Timer is usually used to time a function call in the
+// following way:
+//
+// func TimeMe() {
+// timer := NewTimer(myHistogram)
+// defer timer.ObserveDuration()
+// // Do actual work.
+// }
+//
+// or
+//
+// func TimeMeWithExemplar() {
+// timer := NewTimer(myHistogram)
+// defer timer.ObserveDurationWithExemplar(exemplar)
+// // Do actual work.
+// }
+func NewTimer(o Observer) *Timer {
+ return &Timer{
+ begin: time.Now(),
+ observer: o,
+ }
+}
+
+// ObserveDuration records the duration passed since the Timer was created with
+// NewTimer. It calls the Observe method of the Observer provided during
+// construction with the duration in seconds as an argument. The observed
+// duration is also returned. ObserveDuration is usually called with a defer
+// statement.
+//
+// Note that this method is only guaranteed to never observe negative durations
+// if used with Go1.9+.
+func (t *Timer) ObserveDuration() time.Duration {
+ d := time.Since(t.begin)
+ if t.observer != nil {
+ t.observer.Observe(d.Seconds())
+ }
+ return d
+}
+
+// ObserveDurationWithExemplar is like ObserveDuration, but it will also
+// observe exemplar with the duration unless exemplar is nil or provided Observer can't
+// be casted to ExemplarObserver.
+func (t *Timer) ObserveDurationWithExemplar(exemplar Labels) time.Duration {
+ d := time.Since(t.begin)
+ eo, ok := t.observer.(ExemplarObserver)
+ if ok && exemplar != nil {
+ eo.ObserveWithExemplar(d.Seconds(), exemplar)
+ return d
+ }
+ if t.observer != nil {
+ t.observer.Observe(d.Seconds())
+ }
+ return d
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/untyped.go b/vendor/github.com/prometheus/client_golang/prometheus/untyped.go
new file mode 100644
index 0000000..0f9ce63
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/untyped.go
@@ -0,0 +1,42 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+// UntypedOpts is an alias for Opts. See there for doc comments.
+type UntypedOpts Opts
+
+// UntypedFunc works like GaugeFunc but the collected metric is of type
+// "Untyped". UntypedFunc is useful to mirror an external metric of unknown
+// type.
+//
+// To create UntypedFunc instances, use NewUntypedFunc.
+type UntypedFunc interface {
+ Metric
+ Collector
+}
+
+// NewUntypedFunc creates a new UntypedFunc based on the provided
+// UntypedOpts. The value reported is determined by calling the given function
+// from within the Write method. Take into account that metric collection may
+// happen concurrently. If that results in concurrent calls to Write, like in
+// the case where an UntypedFunc is directly registered with Prometheus, the
+// provided function must be concurrency-safe.
+func NewUntypedFunc(opts UntypedOpts, function func() float64) UntypedFunc {
+ return newValueFunc(NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ), UntypedValue, function)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/value.go b/vendor/github.com/prometheus/client_golang/prometheus/value.go
new file mode 100644
index 0000000..cc23011
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/value.go
@@ -0,0 +1,274 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "errors"
+ "fmt"
+ "sort"
+ "time"
+ "unicode/utf8"
+
+ "github.com/prometheus/client_golang/prometheus/internal"
+
+ dto "github.com/prometheus/client_model/go"
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/types/known/timestamppb"
+)
+
+// ValueType is an enumeration of metric types that represent a simple value.
+type ValueType int
+
+// Possible values for the ValueType enum. Use UntypedValue to mark a metric
+// with an unknown type.
+const (
+ _ ValueType = iota
+ CounterValue
+ GaugeValue
+ UntypedValue
+)
+
+var (
+ CounterMetricTypePtr = func() *dto.MetricType { d := dto.MetricType_COUNTER; return &d }()
+ GaugeMetricTypePtr = func() *dto.MetricType { d := dto.MetricType_GAUGE; return &d }()
+ UntypedMetricTypePtr = func() *dto.MetricType { d := dto.MetricType_UNTYPED; return &d }()
+)
+
+func (v ValueType) ToDTO() *dto.MetricType {
+ switch v {
+ case CounterValue:
+ return CounterMetricTypePtr
+ case GaugeValue:
+ return GaugeMetricTypePtr
+ default:
+ return UntypedMetricTypePtr
+ }
+}
+
+// valueFunc is a generic metric for simple values retrieved on collect time
+// from a function. It implements Metric and Collector. Its effective type is
+// determined by ValueType. This is a low-level building block used by the
+// library to back the implementations of CounterFunc, GaugeFunc, and
+// UntypedFunc.
+type valueFunc struct {
+ selfCollector
+
+ desc *Desc
+ valType ValueType
+ function func() float64
+ labelPairs []*dto.LabelPair
+}
+
+// newValueFunc returns a newly allocated valueFunc with the given Desc and
+// ValueType. The value reported is determined by calling the given function
+// from within the Write method. Take into account that metric collection may
+// happen concurrently. If that results in concurrent calls to Write, like in
+// the case where a valueFunc is directly registered with Prometheus, the
+// provided function must be concurrency-safe.
+func newValueFunc(desc *Desc, valueType ValueType, function func() float64) *valueFunc {
+ result := &valueFunc{
+ desc: desc,
+ valType: valueType,
+ function: function,
+ labelPairs: MakeLabelPairs(desc, nil),
+ }
+ result.init(result)
+ return result
+}
+
+func (v *valueFunc) Desc() *Desc {
+ return v.desc
+}
+
+func (v *valueFunc) Write(out *dto.Metric) error {
+ return populateMetric(v.valType, v.function(), v.labelPairs, nil, out, nil)
+}
+
+// NewConstMetric returns a metric with one fixed value that cannot be
+// changed. Users of this package will not have much use for it in regular
+// operations. However, when implementing custom Collectors, it is useful as a
+// throw-away metric that is generated on the fly to send it to Prometheus in
+// the Collect method. NewConstMetric returns an error if the length of
+// labelValues is not consistent with the variable labels in Desc or if Desc is
+// invalid.
+func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) {
+ if desc.err != nil {
+ return nil, desc.err
+ }
+ if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
+ return nil, err
+ }
+
+ metric := &dto.Metric{}
+ if err := populateMetric(valueType, value, MakeLabelPairs(desc, labelValues), nil, metric, nil); err != nil {
+ return nil, err
+ }
+
+ return &constMetric{
+ desc: desc,
+ metric: metric,
+ }, nil
+}
+
+// MustNewConstMetric is a version of NewConstMetric that panics where
+// NewConstMetric would have returned an error.
+func MustNewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) Metric {
+ m, err := NewConstMetric(desc, valueType, value, labelValues...)
+ if err != nil {
+ panic(err)
+ }
+ return m
+}
+
+// NewConstMetricWithCreatedTimestamp does the same thing as NewConstMetric, but generates Counters
+// with created timestamp set and returns an error for other metric types.
+func NewConstMetricWithCreatedTimestamp(desc *Desc, valueType ValueType, value float64, ct time.Time, labelValues ...string) (Metric, error) {
+ if desc.err != nil {
+ return nil, desc.err
+ }
+ if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
+ return nil, err
+ }
+ switch valueType {
+ case CounterValue:
+ break
+ default:
+ return nil, errors.New("created timestamps are only supported for counters")
+ }
+
+ metric := &dto.Metric{}
+ if err := populateMetric(valueType, value, MakeLabelPairs(desc, labelValues), nil, metric, timestamppb.New(ct)); err != nil {
+ return nil, err
+ }
+
+ return &constMetric{
+ desc: desc,
+ metric: metric,
+ }, nil
+}
+
+// MustNewConstMetricWithCreatedTimestamp is a version of NewConstMetricWithCreatedTimestamp that panics where
+// NewConstMetricWithCreatedTimestamp would have returned an error.
+func MustNewConstMetricWithCreatedTimestamp(desc *Desc, valueType ValueType, value float64, ct time.Time, labelValues ...string) Metric {
+ m, err := NewConstMetricWithCreatedTimestamp(desc, valueType, value, ct, labelValues...)
+ if err != nil {
+ panic(err)
+ }
+ return m
+}
+
+type constMetric struct {
+ desc *Desc
+ metric *dto.Metric
+}
+
+func (m *constMetric) Desc() *Desc {
+ return m.desc
+}
+
+func (m *constMetric) Write(out *dto.Metric) error {
+ out.Label = m.metric.Label
+ out.Counter = m.metric.Counter
+ out.Gauge = m.metric.Gauge
+ out.Untyped = m.metric.Untyped
+ return nil
+}
+
+func populateMetric(
+ t ValueType,
+ v float64,
+ labelPairs []*dto.LabelPair,
+ e *dto.Exemplar,
+ m *dto.Metric,
+ ct *timestamppb.Timestamp,
+) error {
+ m.Label = labelPairs
+ switch t {
+ case CounterValue:
+ m.Counter = &dto.Counter{Value: proto.Float64(v), Exemplar: e, CreatedTimestamp: ct}
+ case GaugeValue:
+ m.Gauge = &dto.Gauge{Value: proto.Float64(v)}
+ case UntypedValue:
+ m.Untyped = &dto.Untyped{Value: proto.Float64(v)}
+ default:
+ return fmt.Errorf("encountered unknown type %v", t)
+ }
+ return nil
+}
+
+// MakeLabelPairs is a helper function to create protobuf LabelPairs from the
+// variable and constant labels in the provided Desc. The values for the
+// variable labels are defined by the labelValues slice, which must be in the
+// same order as the corresponding variable labels in the Desc.
+//
+// This function is only needed for custom Metric implementations. See MetricVec
+// example.
+func MakeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair {
+ totalLen := len(desc.variableLabels.names) + len(desc.constLabelPairs)
+ if totalLen == 0 {
+ // Super fast path.
+ return nil
+ }
+ if len(desc.variableLabels.names) == 0 {
+ // Moderately fast path.
+ return desc.constLabelPairs
+ }
+ labelPairs := make([]*dto.LabelPair, 0, totalLen)
+ for i, l := range desc.variableLabels.names {
+ labelPairs = append(labelPairs, &dto.LabelPair{
+ Name: proto.String(l),
+ Value: proto.String(labelValues[i]),
+ })
+ }
+ labelPairs = append(labelPairs, desc.constLabelPairs...)
+ sort.Sort(internal.LabelPairSorter(labelPairs))
+ return labelPairs
+}
+
+// ExemplarMaxRunes is the max total number of runes allowed in exemplar labels.
+const ExemplarMaxRunes = 128
+
+// newExemplar creates a new dto.Exemplar from the provided values. An error is
+// returned if any of the label names or values are invalid or if the total
+// number of runes in the label names and values exceeds ExemplarMaxRunes.
+func newExemplar(value float64, ts time.Time, l Labels) (*dto.Exemplar, error) {
+ e := &dto.Exemplar{}
+ e.Value = proto.Float64(value)
+ tsProto := timestamppb.New(ts)
+ if err := tsProto.CheckValid(); err != nil {
+ return nil, err
+ }
+ e.Timestamp = tsProto
+ labelPairs := make([]*dto.LabelPair, 0, len(l))
+ var runes int
+ for name, value := range l {
+ if !checkLabelName(name) {
+ return nil, fmt.Errorf("exemplar label name %q is invalid", name)
+ }
+ runes += utf8.RuneCountInString(name)
+ if !utf8.ValidString(value) {
+ return nil, fmt.Errorf("exemplar label value %q is not valid UTF-8", value)
+ }
+ runes += utf8.RuneCountInString(value)
+ labelPairs = append(labelPairs, &dto.LabelPair{
+ Name: proto.String(name),
+ Value: proto.String(value),
+ })
+ }
+ if runes > ExemplarMaxRunes {
+ return nil, fmt.Errorf("exemplar labels have %d runes, exceeding the limit of %d", runes, ExemplarMaxRunes)
+ }
+ e.Label = labelPairs
+ return e, nil
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go
new file mode 100644
index 0000000..2c808ee
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/vec.go
@@ -0,0 +1,709 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "fmt"
+ "sync"
+
+ "github.com/prometheus/common/model"
+)
+
+// MetricVec is a Collector to bundle metrics of the same name that differ in
+// their label values. MetricVec is not used directly but as a building block
+// for implementations of vectors of a given metric type, like GaugeVec,
+// CounterVec, SummaryVec, and HistogramVec. It is exported so that it can be
+// used for custom Metric implementations.
+//
+// To create a FooVec for custom Metric Foo, embed a pointer to MetricVec in
+// FooVec and initialize it with NewMetricVec. Implement wrappers for
+// GetMetricWithLabelValues and GetMetricWith that return (Foo, error) rather
+// than (Metric, error). Similarly, create a wrapper for CurryWith that returns
+// (*FooVec, error) rather than (*MetricVec, error). It is recommended to also
+// add the convenience methods WithLabelValues, With, and MustCurryWith, which
+// panic instead of returning errors. See also the MetricVec example.
+type MetricVec struct {
+ *metricMap
+
+ curry []curriedLabelValue
+
+ // hashAdd and hashAddByte can be replaced for testing collision handling.
+ hashAdd func(h uint64, s string) uint64
+ hashAddByte func(h uint64, b byte) uint64
+}
+
+// NewMetricVec returns an initialized metricVec.
+func NewMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *MetricVec {
+ return &MetricVec{
+ metricMap: &metricMap{
+ metrics: map[uint64][]metricWithLabelValues{},
+ desc: desc,
+ newMetric: newMetric,
+ },
+ hashAdd: hashAdd,
+ hashAddByte: hashAddByte,
+ }
+}
+
+// DeleteLabelValues removes the metric where the variable labels are the same
+// as those passed in as labels (same order as the VariableLabels in Desc). It
+// returns true if a metric was deleted.
+//
+// It is not an error if the number of label values is not the same as the
+// number of VariableLabels in Desc. However, such inconsistent label count can
+// never match an actual metric, so the method will always return false in that
+// case.
+//
+// Note that for more than one label value, this method is prone to mistakes
+// caused by an incorrect order of arguments. Consider Delete(Labels) as an
+// alternative to avoid that type of mistake. For higher label numbers, the
+// latter has a much more readable (albeit more verbose) syntax, but it comes
+// with a performance overhead (for creating and processing the Labels map).
+// See also the CounterVec example.
+func (m *MetricVec) DeleteLabelValues(lvs ...string) bool {
+ lvs = constrainLabelValues(m.desc, lvs, m.curry)
+
+ h, err := m.hashLabelValues(lvs)
+ if err != nil {
+ return false
+ }
+
+ return m.metricMap.deleteByHashWithLabelValues(h, lvs, m.curry)
+}
+
+// Delete deletes the metric where the variable labels are the same as those
+// passed in as labels. It returns true if a metric was deleted.
+//
+// It is not an error if the number and names of the Labels are inconsistent
+// with those of the VariableLabels in Desc. However, such inconsistent Labels
+// can never match an actual metric, so the method will always return false in
+// that case.
+//
+// This method is used for the same purpose as DeleteLabelValues(...string). See
+// there for pros and cons of the two methods.
+func (m *MetricVec) Delete(labels Labels) bool {
+ labels, closer := constrainLabels(m.desc, labels)
+ defer closer()
+
+ h, err := m.hashLabels(labels)
+ if err != nil {
+ return false
+ }
+
+ return m.metricMap.deleteByHashWithLabels(h, labels, m.curry)
+}
+
+// DeletePartialMatch deletes all metrics where the variable labels contain all of those
+// passed in as labels. The order of the labels does not matter.
+// It returns the number of metrics deleted.
+//
+// Note that curried labels will never be matched if deleting from the curried vector.
+// To match curried labels with DeletePartialMatch, it must be called on the base vector.
+func (m *MetricVec) DeletePartialMatch(labels Labels) int {
+ labels, closer := constrainLabels(m.desc, labels)
+ defer closer()
+
+ return m.metricMap.deleteByLabels(labels, m.curry)
+}
+
+// Without explicit forwarding of Describe, Collect, Reset, those methods won't
+// show up in GoDoc.
+
+// Describe implements Collector.
+func (m *MetricVec) Describe(ch chan<- *Desc) { m.metricMap.Describe(ch) }
+
+// Collect implements Collector.
+func (m *MetricVec) Collect(ch chan<- Metric) { m.metricMap.Collect(ch) }
+
+// Reset deletes all metrics in this vector.
+func (m *MetricVec) Reset() { m.metricMap.Reset() }
+
+// CurryWith returns a vector curried with the provided labels, i.e. the
+// returned vector has those labels pre-set for all labeled operations performed
+// on it. The cardinality of the curried vector is reduced accordingly. The
+// order of the remaining labels stays the same (just with the curried labels
+// taken out of the sequence – which is relevant for the
+// (GetMetric)WithLabelValues methods). It is possible to curry a curried
+// vector, but only with labels not yet used for currying before.
+//
+// The metrics contained in the MetricVec are shared between the curried and
+// uncurried vectors. They are just accessed differently. Curried and uncurried
+// vectors behave identically in terms of collection. Only one must be
+// registered with a given registry (usually the uncurried version). The Reset
+// method deletes all metrics, even if called on a curried vector.
+//
+// Note that CurryWith is usually not called directly but through a wrapper
+// around MetricVec, implementing a vector for a specific Metric
+// implementation, for example GaugeVec.
+func (m *MetricVec) CurryWith(labels Labels) (*MetricVec, error) {
+ var (
+ newCurry []curriedLabelValue
+ oldCurry = m.curry
+ iCurry int
+ )
+ for i, labelName := range m.desc.variableLabels.names {
+ val, ok := labels[labelName]
+ if iCurry < len(oldCurry) && oldCurry[iCurry].index == i {
+ if ok {
+ return nil, fmt.Errorf("label name %q is already curried", labelName)
+ }
+ newCurry = append(newCurry, oldCurry[iCurry])
+ iCurry++
+ } else {
+ if !ok {
+ continue // Label stays uncurried.
+ }
+ newCurry = append(newCurry, curriedLabelValue{
+ i,
+ m.desc.variableLabels.constrain(labelName, val),
+ })
+ }
+ }
+ if l := len(oldCurry) + len(labels) - len(newCurry); l > 0 {
+ return nil, fmt.Errorf("%d unknown label(s) found during currying", l)
+ }
+
+ return &MetricVec{
+ metricMap: m.metricMap,
+ curry: newCurry,
+ hashAdd: m.hashAdd,
+ hashAddByte: m.hashAddByte,
+ }, nil
+}
+
+// GetMetricWithLabelValues returns the Metric for the given slice of label
+// values (same order as the variable labels in Desc). If that combination of
+// label values is accessed for the first time, a new Metric is created (by
+// calling the newMetric function provided during construction of the
+// MetricVec).
+//
+// It is possible to call this method without using the returned Metric to only
+// create the new Metric but leave it in its initial state.
+//
+// Keeping the Metric for later use is possible (and should be considered if
+// performance is critical), but keep in mind that Reset, DeleteLabelValues and
+// Delete can be used to delete the Metric from the MetricVec. In that case, the
+// Metric will still exist, but it will not be exported anymore, even if a
+// Metric with the same label values is created later.
+//
+// An error is returned if the number of label values is not the same as the
+// number of variable labels in Desc (minus any curried labels).
+//
+// Note that for more than one label value, this method is prone to mistakes
+// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
+// an alternative to avoid that type of mistake. For higher label numbers, the
+// latter has a much more readable (albeit more verbose) syntax, but it comes
+// with a performance overhead (for creating and processing the Labels map).
+//
+// Note that GetMetricWithLabelValues is usually not called directly but through
+// a wrapper around MetricVec, implementing a vector for a specific Metric
+// implementation, for example GaugeVec.
+func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) {
+ lvs = constrainLabelValues(m.desc, lvs, m.curry)
+ h, err := m.hashLabelValues(lvs)
+ if err != nil {
+ return nil, err
+ }
+
+ return m.metricMap.getOrCreateMetricWithLabelValues(h, lvs, m.curry), nil
+}
+
+// GetMetricWith returns the Metric for the given Labels map (the label names
+// must match those of the variable labels in Desc). If that label map is
+// accessed for the first time, a new Metric is created. Implications of
+// creating a Metric without using it and keeping the Metric for later use
+// are the same as for GetMetricWithLabelValues.
+//
+// An error is returned if the number and names of the Labels are inconsistent
+// with those of the variable labels in Desc (minus any curried labels).
+//
+// This method is used for the same purpose as
+// GetMetricWithLabelValues(...string). See there for pros and cons of the two
+// methods.
+//
+// Note that GetMetricWith is usually not called directly but through a wrapper
+// around MetricVec, implementing a vector for a specific Metric implementation,
+// for example GaugeVec.
+func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) {
+ labels, closer := constrainLabels(m.desc, labels)
+ defer closer()
+
+ h, err := m.hashLabels(labels)
+ if err != nil {
+ return nil, err
+ }
+
+ return m.metricMap.getOrCreateMetricWithLabels(h, labels, m.curry), nil
+}
+
+func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) {
+ if err := validateLabelValues(vals, len(m.desc.variableLabels.names)-len(m.curry)); err != nil {
+ return 0, err
+ }
+
+ var (
+ h = hashNew()
+ curry = m.curry
+ iVals, iCurry int
+ )
+ for i := 0; i < len(m.desc.variableLabels.names); i++ {
+ if iCurry < len(curry) && curry[iCurry].index == i {
+ h = m.hashAdd(h, curry[iCurry].value)
+ iCurry++
+ } else {
+ h = m.hashAdd(h, vals[iVals])
+ iVals++
+ }
+ h = m.hashAddByte(h, model.SeparatorByte)
+ }
+ return h, nil
+}
+
+func (m *MetricVec) hashLabels(labels Labels) (uint64, error) {
+ if err := validateValuesInLabels(labels, len(m.desc.variableLabels.names)-len(m.curry)); err != nil {
+ return 0, err
+ }
+
+ var (
+ h = hashNew()
+ curry = m.curry
+ iCurry int
+ )
+ for i, labelName := range m.desc.variableLabels.names {
+ val, ok := labels[labelName]
+ if iCurry < len(curry) && curry[iCurry].index == i {
+ if ok {
+ return 0, fmt.Errorf("label name %q is already curried", labelName)
+ }
+ h = m.hashAdd(h, curry[iCurry].value)
+ iCurry++
+ } else {
+ if !ok {
+ return 0, fmt.Errorf("label name %q missing in label map", labelName)
+ }
+ h = m.hashAdd(h, val)
+ }
+ h = m.hashAddByte(h, model.SeparatorByte)
+ }
+ return h, nil
+}
+
+// metricWithLabelValues provides the metric and its label values for
+// disambiguation on hash collision.
+type metricWithLabelValues struct {
+ values []string
+ metric Metric
+}
+
+// curriedLabelValue sets the curried value for a label at the given index.
+type curriedLabelValue struct {
+ index int
+ value string
+}
+
+// metricMap is a helper for metricVec and shared between differently curried
+// metricVecs.
+type metricMap struct {
+ mtx sync.RWMutex // Protects metrics.
+ metrics map[uint64][]metricWithLabelValues
+ desc *Desc
+ newMetric func(labelValues ...string) Metric
+}
+
+// Describe implements Collector. It will send exactly one Desc to the provided
+// channel.
+func (m *metricMap) Describe(ch chan<- *Desc) {
+ ch <- m.desc
+}
+
+// Collect implements Collector.
+func (m *metricMap) Collect(ch chan<- Metric) {
+ m.mtx.RLock()
+ defer m.mtx.RUnlock()
+
+ for _, metrics := range m.metrics {
+ for _, metric := range metrics {
+ ch <- metric.metric
+ }
+ }
+}
+
+// Reset deletes all metrics in this vector.
+func (m *metricMap) Reset() {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ for h := range m.metrics {
+ delete(m.metrics, h)
+ }
+}
+
+// deleteByHashWithLabelValues removes the metric from the hash bucket h. If
+// there are multiple matches in the bucket, use lvs to select a metric and
+// remove only that metric.
+func (m *metricMap) deleteByHashWithLabelValues(
+ h uint64, lvs []string, curry []curriedLabelValue,
+) bool {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ metrics, ok := m.metrics[h]
+ if !ok {
+ return false
+ }
+
+ i := findMetricWithLabelValues(metrics, lvs, curry)
+ if i >= len(metrics) {
+ return false
+ }
+
+ if len(metrics) > 1 {
+ old := metrics
+ m.metrics[h] = append(metrics[:i], metrics[i+1:]...)
+ old[len(old)-1] = metricWithLabelValues{}
+ } else {
+ delete(m.metrics, h)
+ }
+ return true
+}
+
+// deleteByHashWithLabels removes the metric from the hash bucket h. If there
+// are multiple matches in the bucket, use lvs to select a metric and remove
+// only that metric.
+func (m *metricMap) deleteByHashWithLabels(
+ h uint64, labels Labels, curry []curriedLabelValue,
+) bool {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ metrics, ok := m.metrics[h]
+ if !ok {
+ return false
+ }
+ i := findMetricWithLabels(m.desc, metrics, labels, curry)
+ if i >= len(metrics) {
+ return false
+ }
+
+ if len(metrics) > 1 {
+ old := metrics
+ m.metrics[h] = append(metrics[:i], metrics[i+1:]...)
+ old[len(old)-1] = metricWithLabelValues{}
+ } else {
+ delete(m.metrics, h)
+ }
+ return true
+}
+
+// deleteByLabels deletes a metric if the given labels are present in the metric.
+func (m *metricMap) deleteByLabels(labels Labels, curry []curriedLabelValue) int {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ var numDeleted int
+
+ for h, metrics := range m.metrics {
+ i := findMetricWithPartialLabels(m.desc, metrics, labels, curry)
+ if i >= len(metrics) {
+ // Didn't find matching labels in this metric slice.
+ continue
+ }
+ delete(m.metrics, h)
+ numDeleted++
+ }
+
+ return numDeleted
+}
+
+// findMetricWithPartialLabel returns the index of the matching metric or
+// len(metrics) if not found.
+func findMetricWithPartialLabels(
+ desc *Desc, metrics []metricWithLabelValues, labels Labels, curry []curriedLabelValue,
+) int {
+ for i, metric := range metrics {
+ if matchPartialLabels(desc, metric.values, labels, curry) {
+ return i
+ }
+ }
+ return len(metrics)
+}
+
+// indexOf searches the given slice of strings for the target string and returns
+// the index or len(items) as well as a boolean whether the search succeeded.
+func indexOf(target string, items []string) (int, bool) {
+ for i, l := range items {
+ if l == target {
+ return i, true
+ }
+ }
+ return len(items), false
+}
+
+// valueMatchesVariableOrCurriedValue determines if a value was previously curried,
+// and returns whether it matches either the "base" value or the curried value accordingly.
+// It also indicates whether the match is against a curried or uncurried value.
+func valueMatchesVariableOrCurriedValue(targetValue string, index int, values []string, curry []curriedLabelValue) (bool, bool) {
+ for _, curriedValue := range curry {
+ if curriedValue.index == index {
+ // This label was curried. See if the curried value matches our target.
+ return curriedValue.value == targetValue, true
+ }
+ }
+ // This label was not curried. See if the current value matches our target label.
+ return values[index] == targetValue, false
+}
+
+// matchPartialLabels searches the current metric and returns whether all of the target label:value pairs are present.
+func matchPartialLabels(desc *Desc, values []string, labels Labels, curry []curriedLabelValue) bool {
+ for l, v := range labels {
+ // Check if the target label exists in our metrics and get the index.
+ varLabelIndex, validLabel := indexOf(l, desc.variableLabels.names)
+ if validLabel {
+ // Check the value of that label against the target value.
+ // We don't consider curried values in partial matches.
+ matches, curried := valueMatchesVariableOrCurriedValue(v, varLabelIndex, values, curry)
+ if matches && !curried {
+ continue
+ }
+ }
+ return false
+ }
+ return true
+}
+
+// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value
+// or creates it and returns the new one.
+//
+// This function holds the mutex.
+func (m *metricMap) getOrCreateMetricWithLabelValues(
+ hash uint64, lvs []string, curry []curriedLabelValue,
+) Metric {
+ m.mtx.RLock()
+ metric, ok := m.getMetricWithHashAndLabelValues(hash, lvs, curry)
+ m.mtx.RUnlock()
+ if ok {
+ return metric
+ }
+
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+ metric, ok = m.getMetricWithHashAndLabelValues(hash, lvs, curry)
+ if !ok {
+ inlinedLVs := inlineLabelValues(lvs, curry)
+ metric = m.newMetric(inlinedLVs...)
+ m.metrics[hash] = append(m.metrics[hash], metricWithLabelValues{values: inlinedLVs, metric: metric})
+ }
+ return metric
+}
+
+// getOrCreateMetricWithLabels retrieves the metric by hash and label value
+// or creates it and returns the new one.
+//
+// This function holds the mutex.
+func (m *metricMap) getOrCreateMetricWithLabels(
+ hash uint64, labels Labels, curry []curriedLabelValue,
+) Metric {
+ m.mtx.RLock()
+ metric, ok := m.getMetricWithHashAndLabels(hash, labels, curry)
+ m.mtx.RUnlock()
+ if ok {
+ return metric
+ }
+
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+ metric, ok = m.getMetricWithHashAndLabels(hash, labels, curry)
+ if !ok {
+ lvs := extractLabelValues(m.desc, labels, curry)
+ metric = m.newMetric(lvs...)
+ m.metrics[hash] = append(m.metrics[hash], metricWithLabelValues{values: lvs, metric: metric})
+ }
+ return metric
+}
+
+// getMetricWithHashAndLabelValues gets a metric while handling possible
+// collisions in the hash space. Must be called while holding the read mutex.
+func (m *metricMap) getMetricWithHashAndLabelValues(
+ h uint64, lvs []string, curry []curriedLabelValue,
+) (Metric, bool) {
+ metrics, ok := m.metrics[h]
+ if ok {
+ if i := findMetricWithLabelValues(metrics, lvs, curry); i < len(metrics) {
+ return metrics[i].metric, true
+ }
+ }
+ return nil, false
+}
+
+// getMetricWithHashAndLabels gets a metric while handling possible collisions in
+// the hash space. Must be called while holding read mutex.
+func (m *metricMap) getMetricWithHashAndLabels(
+ h uint64, labels Labels, curry []curriedLabelValue,
+) (Metric, bool) {
+ metrics, ok := m.metrics[h]
+ if ok {
+ if i := findMetricWithLabels(m.desc, metrics, labels, curry); i < len(metrics) {
+ return metrics[i].metric, true
+ }
+ }
+ return nil, false
+}
+
+// findMetricWithLabelValues returns the index of the matching metric or
+// len(metrics) if not found.
+func findMetricWithLabelValues(
+ metrics []metricWithLabelValues, lvs []string, curry []curriedLabelValue,
+) int {
+ for i, metric := range metrics {
+ if matchLabelValues(metric.values, lvs, curry) {
+ return i
+ }
+ }
+ return len(metrics)
+}
+
+// findMetricWithLabels returns the index of the matching metric or len(metrics)
+// if not found.
+func findMetricWithLabels(
+ desc *Desc, metrics []metricWithLabelValues, labels Labels, curry []curriedLabelValue,
+) int {
+ for i, metric := range metrics {
+ if matchLabels(desc, metric.values, labels, curry) {
+ return i
+ }
+ }
+ return len(metrics)
+}
+
+func matchLabelValues(values, lvs []string, curry []curriedLabelValue) bool {
+ if len(values) != len(lvs)+len(curry) {
+ return false
+ }
+ var iLVs, iCurry int
+ for i, v := range values {
+ if iCurry < len(curry) && curry[iCurry].index == i {
+ if v != curry[iCurry].value {
+ return false
+ }
+ iCurry++
+ continue
+ }
+ if v != lvs[iLVs] {
+ return false
+ }
+ iLVs++
+ }
+ return true
+}
+
+func matchLabels(desc *Desc, values []string, labels Labels, curry []curriedLabelValue) bool {
+ if len(values) != len(labels)+len(curry) {
+ return false
+ }
+ iCurry := 0
+ for i, k := range desc.variableLabels.names {
+ if iCurry < len(curry) && curry[iCurry].index == i {
+ if values[i] != curry[iCurry].value {
+ return false
+ }
+ iCurry++
+ continue
+ }
+ if values[i] != labels[k] {
+ return false
+ }
+ }
+ return true
+}
+
+func extractLabelValues(desc *Desc, labels Labels, curry []curriedLabelValue) []string {
+ labelValues := make([]string, len(labels)+len(curry))
+ iCurry := 0
+ for i, k := range desc.variableLabels.names {
+ if iCurry < len(curry) && curry[iCurry].index == i {
+ labelValues[i] = curry[iCurry].value
+ iCurry++
+ continue
+ }
+ labelValues[i] = labels[k]
+ }
+ return labelValues
+}
+
+func inlineLabelValues(lvs []string, curry []curriedLabelValue) []string {
+ labelValues := make([]string, len(lvs)+len(curry))
+ var iCurry, iLVs int
+ for i := range labelValues {
+ if iCurry < len(curry) && curry[iCurry].index == i {
+ labelValues[i] = curry[iCurry].value
+ iCurry++
+ continue
+ }
+ labelValues[i] = lvs[iLVs]
+ iLVs++
+ }
+ return labelValues
+}
+
+var labelsPool = &sync.Pool{
+ New: func() interface{} {
+ return make(Labels)
+ },
+}
+
+func constrainLabels(desc *Desc, labels Labels) (Labels, func()) {
+ if len(desc.variableLabels.labelConstraints) == 0 {
+ // Fast path when there's no constraints
+ return labels, func() {}
+ }
+
+ constrainedLabels := labelsPool.Get().(Labels)
+ for l, v := range labels {
+ constrainedLabels[l] = desc.variableLabels.constrain(l, v)
+ }
+
+ return constrainedLabels, func() {
+ for k := range constrainedLabels {
+ delete(constrainedLabels, k)
+ }
+ labelsPool.Put(constrainedLabels)
+ }
+}
+
+func constrainLabelValues(desc *Desc, lvs []string, curry []curriedLabelValue) []string {
+ if len(desc.variableLabels.labelConstraints) == 0 {
+ // Fast path when there's no constraints
+ return lvs
+ }
+
+ constrainedValues := make([]string, len(lvs))
+ var iCurry, iLVs int
+ for i := 0; i < len(lvs)+len(curry); i++ {
+ if iCurry < len(curry) && curry[iCurry].index == i {
+ iCurry++
+ continue
+ }
+
+ if i < len(desc.variableLabels.names) {
+ constrainedValues[iLVs] = desc.variableLabels.constrain(
+ desc.variableLabels.names[i],
+ lvs[iLVs],
+ )
+ } else {
+ constrainedValues[iLVs] = lvs[iLVs]
+ }
+ iLVs++
+ }
+ return constrainedValues
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vnext.go b/vendor/github.com/prometheus/client_golang/prometheus/vnext.go
new file mode 100644
index 0000000..42bc3a8
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/vnext.go
@@ -0,0 +1,23 @@
+// Copyright 2022 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+type v2 struct{}
+
+// V2 is a struct that can be referenced to access experimental API that might
+// be present in v2 of client golang someday. It offers extended functionality
+// of v1 with slightly changed API. It is acceptable to use some pieces from v1
+// and e.g `prometheus.NewGauge` and some from v2 e.g. `prometheus.V2.NewDesc`
+// in the same codebase.
+var V2 = v2{}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go
new file mode 100644
index 0000000..25da157
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go
@@ -0,0 +1,214 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "fmt"
+ "sort"
+
+ "github.com/prometheus/client_golang/prometheus/internal"
+
+ dto "github.com/prometheus/client_model/go"
+ "google.golang.org/protobuf/proto"
+)
+
+// WrapRegistererWith returns a Registerer wrapping the provided
+// Registerer. Collectors registered with the returned Registerer will be
+// registered with the wrapped Registerer in a modified way. The modified
+// Collector adds the provided Labels to all Metrics it collects (as
+// ConstLabels). The Metrics collected by the unmodified Collector must not
+// duplicate any of those labels. Wrapping a nil value is valid, resulting
+// in a no-op Registerer.
+//
+// WrapRegistererWith provides a way to add fixed labels to a subset of
+// Collectors. It should not be used to add fixed labels to all metrics
+// exposed. See also
+// https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels
+//
+// Conflicts between Collectors registered through the original Registerer with
+// Collectors registered through the wrapping Registerer will still be
+// detected. Any AlreadyRegisteredError returned by the Register method of
+// either Registerer will contain the ExistingCollector in the form it was
+// provided to the respective registry.
+//
+// The Collector example demonstrates a use of WrapRegistererWith.
+func WrapRegistererWith(labels Labels, reg Registerer) Registerer {
+ return &wrappingRegisterer{
+ wrappedRegisterer: reg,
+ labels: labels,
+ }
+}
+
+// WrapRegistererWithPrefix returns a Registerer wrapping the provided
+// Registerer. Collectors registered with the returned Registerer will be
+// registered with the wrapped Registerer in a modified way. The modified
+// Collector adds the provided prefix to the name of all Metrics it collects.
+// Wrapping a nil value is valid, resulting in a no-op Registerer.
+//
+// WrapRegistererWithPrefix is useful to have one place to prefix all metrics of
+// a sub-system. To make this work, register metrics of the sub-system with the
+// wrapping Registerer returned by WrapRegistererWithPrefix. It is rarely useful
+// to use the same prefix for all metrics exposed. In particular, do not prefix
+// metric names that are standardized across applications, as that would break
+// horizontal monitoring, for example the metrics provided by the Go collector
+// (see NewGoCollector) and the process collector (see NewProcessCollector). (In
+// fact, those metrics are already prefixed with “go_” or “process_”,
+// respectively.)
+//
+// Conflicts between Collectors registered through the original Registerer with
+// Collectors registered through the wrapping Registerer will still be
+// detected. Any AlreadyRegisteredError returned by the Register method of
+// either Registerer will contain the ExistingCollector in the form it was
+// provided to the respective registry.
+func WrapRegistererWithPrefix(prefix string, reg Registerer) Registerer {
+ return &wrappingRegisterer{
+ wrappedRegisterer: reg,
+ prefix: prefix,
+ }
+}
+
+type wrappingRegisterer struct {
+ wrappedRegisterer Registerer
+ prefix string
+ labels Labels
+}
+
+func (r *wrappingRegisterer) Register(c Collector) error {
+ if r.wrappedRegisterer == nil {
+ return nil
+ }
+ return r.wrappedRegisterer.Register(&wrappingCollector{
+ wrappedCollector: c,
+ prefix: r.prefix,
+ labels: r.labels,
+ })
+}
+
+func (r *wrappingRegisterer) MustRegister(cs ...Collector) {
+ if r.wrappedRegisterer == nil {
+ return
+ }
+ for _, c := range cs {
+ if err := r.Register(c); err != nil {
+ panic(err)
+ }
+ }
+}
+
+func (r *wrappingRegisterer) Unregister(c Collector) bool {
+ if r.wrappedRegisterer == nil {
+ return false
+ }
+ return r.wrappedRegisterer.Unregister(&wrappingCollector{
+ wrappedCollector: c,
+ prefix: r.prefix,
+ labels: r.labels,
+ })
+}
+
+type wrappingCollector struct {
+ wrappedCollector Collector
+ prefix string
+ labels Labels
+}
+
+func (c *wrappingCollector) Collect(ch chan<- Metric) {
+ wrappedCh := make(chan Metric)
+ go func() {
+ c.wrappedCollector.Collect(wrappedCh)
+ close(wrappedCh)
+ }()
+ for m := range wrappedCh {
+ ch <- &wrappingMetric{
+ wrappedMetric: m,
+ prefix: c.prefix,
+ labels: c.labels,
+ }
+ }
+}
+
+func (c *wrappingCollector) Describe(ch chan<- *Desc) {
+ wrappedCh := make(chan *Desc)
+ go func() {
+ c.wrappedCollector.Describe(wrappedCh)
+ close(wrappedCh)
+ }()
+ for desc := range wrappedCh {
+ ch <- wrapDesc(desc, c.prefix, c.labels)
+ }
+}
+
+func (c *wrappingCollector) unwrapRecursively() Collector {
+ switch wc := c.wrappedCollector.(type) {
+ case *wrappingCollector:
+ return wc.unwrapRecursively()
+ default:
+ return wc
+ }
+}
+
+type wrappingMetric struct {
+ wrappedMetric Metric
+ prefix string
+ labels Labels
+}
+
+func (m *wrappingMetric) Desc() *Desc {
+ return wrapDesc(m.wrappedMetric.Desc(), m.prefix, m.labels)
+}
+
+func (m *wrappingMetric) Write(out *dto.Metric) error {
+ if err := m.wrappedMetric.Write(out); err != nil {
+ return err
+ }
+ if len(m.labels) == 0 {
+ // No wrapping labels.
+ return nil
+ }
+ for ln, lv := range m.labels {
+ out.Label = append(out.Label, &dto.LabelPair{
+ Name: proto.String(ln),
+ Value: proto.String(lv),
+ })
+ }
+ sort.Sort(internal.LabelPairSorter(out.Label))
+ return nil
+}
+
+func wrapDesc(desc *Desc, prefix string, labels Labels) *Desc {
+ constLabels := Labels{}
+ for _, lp := range desc.constLabelPairs {
+ constLabels[*lp.Name] = *lp.Value
+ }
+ for ln, lv := range labels {
+ if _, alreadyUsed := constLabels[ln]; alreadyUsed {
+ return &Desc{
+ fqName: desc.fqName,
+ help: desc.help,
+ variableLabels: desc.variableLabels,
+ constLabelPairs: desc.constLabelPairs,
+ err: fmt.Errorf("attempted wrapping with already existing label name %q", ln),
+ }
+ }
+ constLabels[ln] = lv
+ }
+ // NewDesc will do remaining validations.
+ newDesc := V2.NewDesc(prefix+desc.fqName, desc.help, desc.variableLabels, constLabels)
+ // Propagate errors if there was any. This will override any errer
+ // created by NewDesc above, i.e. earlier errors get precedence.
+ if desc.err != nil {
+ newDesc.err = desc.err
+ }
+ return newDesc
+}
diff --git a/vendor/github.com/prometheus/client_model/LICENSE b/vendor/github.com/prometheus/client_model/LICENSE
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/vendor/github.com/prometheus/client_model/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/prometheus/client_model/NOTICE b/vendor/github.com/prometheus/client_model/NOTICE
new file mode 100644
index 0000000..20110e4
--- /dev/null
+++ b/vendor/github.com/prometheus/client_model/NOTICE
@@ -0,0 +1,5 @@
+Data model artifacts for Prometheus.
+Copyright 2012-2015 The Prometheus Authors
+
+This product includes software developed at
+SoundCloud Ltd. (http://soundcloud.com/).
diff --git a/vendor/github.com/prometheus/client_model/go/metrics.pb.go b/vendor/github.com/prometheus/client_model/go/metrics.pb.go
new file mode 100644
index 0000000..2f15490
--- /dev/null
+++ b/vendor/github.com/prometheus/client_model/go/metrics.pb.go
@@ -0,0 +1,1399 @@
+// Copyright 2013 Prometheus Team
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.30.0
+// protoc v3.20.3
+// source: io/prometheus/client/metrics.proto
+
+package io_prometheus_client
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ timestamppb "google.golang.org/protobuf/types/known/timestamppb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type MetricType int32
+
+const (
+ // COUNTER must use the Metric field "counter".
+ MetricType_COUNTER MetricType = 0
+ // GAUGE must use the Metric field "gauge".
+ MetricType_GAUGE MetricType = 1
+ // SUMMARY must use the Metric field "summary".
+ MetricType_SUMMARY MetricType = 2
+ // UNTYPED must use the Metric field "untyped".
+ MetricType_UNTYPED MetricType = 3
+ // HISTOGRAM must use the Metric field "histogram".
+ MetricType_HISTOGRAM MetricType = 4
+ // GAUGE_HISTOGRAM must use the Metric field "histogram".
+ MetricType_GAUGE_HISTOGRAM MetricType = 5
+)
+
+// Enum value maps for MetricType.
+var (
+ MetricType_name = map[int32]string{
+ 0: "COUNTER",
+ 1: "GAUGE",
+ 2: "SUMMARY",
+ 3: "UNTYPED",
+ 4: "HISTOGRAM",
+ 5: "GAUGE_HISTOGRAM",
+ }
+ MetricType_value = map[string]int32{
+ "COUNTER": 0,
+ "GAUGE": 1,
+ "SUMMARY": 2,
+ "UNTYPED": 3,
+ "HISTOGRAM": 4,
+ "GAUGE_HISTOGRAM": 5,
+ }
+)
+
+func (x MetricType) Enum() *MetricType {
+ p := new(MetricType)
+ *p = x
+ return p
+}
+
+func (x MetricType) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (MetricType) Descriptor() protoreflect.EnumDescriptor {
+ return file_io_prometheus_client_metrics_proto_enumTypes[0].Descriptor()
+}
+
+func (MetricType) Type() protoreflect.EnumType {
+ return &file_io_prometheus_client_metrics_proto_enumTypes[0]
+}
+
+func (x MetricType) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Do not use.
+func (x *MetricType) UnmarshalJSON(b []byte) error {
+ num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
+ if err != nil {
+ return err
+ }
+ *x = MetricType(num)
+ return nil
+}
+
+// Deprecated: Use MetricType.Descriptor instead.
+func (MetricType) EnumDescriptor() ([]byte, []int) {
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{0}
+}
+
+type LabelPair struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
+}
+
+func (x *LabelPair) Reset() {
+ *x = LabelPair{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *LabelPair) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*LabelPair) ProtoMessage() {}
+
+func (x *LabelPair) ProtoReflect() protoreflect.Message {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use LabelPair.ProtoReflect.Descriptor instead.
+func (*LabelPair) Descriptor() ([]byte, []int) {
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *LabelPair) GetName() string {
+ if x != nil && x.Name != nil {
+ return *x.Name
+ }
+ return ""
+}
+
+func (x *LabelPair) GetValue() string {
+ if x != nil && x.Value != nil {
+ return *x.Value
+ }
+ return ""
+}
+
+type Gauge struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
+}
+
+func (x *Gauge) Reset() {
+ *x = Gauge{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Gauge) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Gauge) ProtoMessage() {}
+
+func (x *Gauge) ProtoReflect() protoreflect.Message {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Gauge.ProtoReflect.Descriptor instead.
+func (*Gauge) Descriptor() ([]byte, []int) {
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *Gauge) GetValue() float64 {
+ if x != nil && x.Value != nil {
+ return *x.Value
+ }
+ return 0
+}
+
+type Counter struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
+ Exemplar *Exemplar `protobuf:"bytes,2,opt,name=exemplar" json:"exemplar,omitempty"`
+ CreatedTimestamp *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=created_timestamp,json=createdTimestamp" json:"created_timestamp,omitempty"`
+}
+
+func (x *Counter) Reset() {
+ *x = Counter{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Counter) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Counter) ProtoMessage() {}
+
+func (x *Counter) ProtoReflect() protoreflect.Message {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Counter.ProtoReflect.Descriptor instead.
+func (*Counter) Descriptor() ([]byte, []int) {
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *Counter) GetValue() float64 {
+ if x != nil && x.Value != nil {
+ return *x.Value
+ }
+ return 0
+}
+
+func (x *Counter) GetExemplar() *Exemplar {
+ if x != nil {
+ return x.Exemplar
+ }
+ return nil
+}
+
+func (x *Counter) GetCreatedTimestamp() *timestamppb.Timestamp {
+ if x != nil {
+ return x.CreatedTimestamp
+ }
+ return nil
+}
+
+type Quantile struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"`
+ Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"`
+}
+
+func (x *Quantile) Reset() {
+ *x = Quantile{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Quantile) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Quantile) ProtoMessage() {}
+
+func (x *Quantile) ProtoReflect() protoreflect.Message {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Quantile.ProtoReflect.Descriptor instead.
+func (*Quantile) Descriptor() ([]byte, []int) {
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *Quantile) GetQuantile() float64 {
+ if x != nil && x.Quantile != nil {
+ return *x.Quantile
+ }
+ return 0
+}
+
+func (x *Quantile) GetValue() float64 {
+ if x != nil && x.Value != nil {
+ return *x.Value
+ }
+ return 0
+}
+
+type Summary struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"`
+ SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"`
+ Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"`
+ CreatedTimestamp *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=created_timestamp,json=createdTimestamp" json:"created_timestamp,omitempty"`
+}
+
+func (x *Summary) Reset() {
+ *x = Summary{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Summary) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Summary) ProtoMessage() {}
+
+func (x *Summary) ProtoReflect() protoreflect.Message {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Summary.ProtoReflect.Descriptor instead.
+func (*Summary) Descriptor() ([]byte, []int) {
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *Summary) GetSampleCount() uint64 {
+ if x != nil && x.SampleCount != nil {
+ return *x.SampleCount
+ }
+ return 0
+}
+
+func (x *Summary) GetSampleSum() float64 {
+ if x != nil && x.SampleSum != nil {
+ return *x.SampleSum
+ }
+ return 0
+}
+
+func (x *Summary) GetQuantile() []*Quantile {
+ if x != nil {
+ return x.Quantile
+ }
+ return nil
+}
+
+func (x *Summary) GetCreatedTimestamp() *timestamppb.Timestamp {
+ if x != nil {
+ return x.CreatedTimestamp
+ }
+ return nil
+}
+
+type Untyped struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
+}
+
+func (x *Untyped) Reset() {
+ *x = Untyped{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Untyped) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Untyped) ProtoMessage() {}
+
+func (x *Untyped) ProtoReflect() protoreflect.Message {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Untyped.ProtoReflect.Descriptor instead.
+func (*Untyped) Descriptor() ([]byte, []int) {
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *Untyped) GetValue() float64 {
+ if x != nil && x.Value != nil {
+ return *x.Value
+ }
+ return 0
+}
+
+type Histogram struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"`
+ SampleCountFloat *float64 `protobuf:"fixed64,4,opt,name=sample_count_float,json=sampleCountFloat" json:"sample_count_float,omitempty"` // Overrides sample_count if > 0.
+ SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"`
+ // Buckets for the conventional histogram.
+ Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"` // Ordered in increasing order of upper_bound, +Inf bucket is optional.
+ CreatedTimestamp *timestamppb.Timestamp `protobuf:"bytes,15,opt,name=created_timestamp,json=createdTimestamp" json:"created_timestamp,omitempty"`
+ // schema defines the bucket schema. Currently, valid numbers are -4 <= n <= 8.
+ // They are all for base-2 bucket schemas, where 1 is a bucket boundary in each case, and
+ // then each power of two is divided into 2^n logarithmic buckets.
+ // Or in other words, each bucket boundary is the previous boundary times 2^(2^-n).
+ // In the future, more bucket schemas may be added using numbers < -4 or > 8.
+ Schema *int32 `protobuf:"zigzag32,5,opt,name=schema" json:"schema,omitempty"`
+ ZeroThreshold *float64 `protobuf:"fixed64,6,opt,name=zero_threshold,json=zeroThreshold" json:"zero_threshold,omitempty"` // Breadth of the zero bucket.
+ ZeroCount *uint64 `protobuf:"varint,7,opt,name=zero_count,json=zeroCount" json:"zero_count,omitempty"` // Count in zero bucket.
+ ZeroCountFloat *float64 `protobuf:"fixed64,8,opt,name=zero_count_float,json=zeroCountFloat" json:"zero_count_float,omitempty"` // Overrides sb_zero_count if > 0.
+ // Negative buckets for the native histogram.
+ NegativeSpan []*BucketSpan `protobuf:"bytes,9,rep,name=negative_span,json=negativeSpan" json:"negative_span,omitempty"`
+ // Use either "negative_delta" or "negative_count", the former for
+ // regular histograms with integer counts, the latter for float
+ // histograms.
+ NegativeDelta []int64 `protobuf:"zigzag64,10,rep,name=negative_delta,json=negativeDelta" json:"negative_delta,omitempty"` // Count delta of each bucket compared to previous one (or to zero for 1st bucket).
+ NegativeCount []float64 `protobuf:"fixed64,11,rep,name=negative_count,json=negativeCount" json:"negative_count,omitempty"` // Absolute count of each bucket.
+ // Positive buckets for the native histogram.
+ // Use a no-op span (offset 0, length 0) for a native histogram without any
+ // observations yet and with a zero_threshold of 0. Otherwise, it would be
+ // indistinguishable from a classic histogram.
+ PositiveSpan []*BucketSpan `protobuf:"bytes,12,rep,name=positive_span,json=positiveSpan" json:"positive_span,omitempty"`
+ // Use either "positive_delta" or "positive_count", the former for
+ // regular histograms with integer counts, the latter for float
+ // histograms.
+ PositiveDelta []int64 `protobuf:"zigzag64,13,rep,name=positive_delta,json=positiveDelta" json:"positive_delta,omitempty"` // Count delta of each bucket compared to previous one (or to zero for 1st bucket).
+ PositiveCount []float64 `protobuf:"fixed64,14,rep,name=positive_count,json=positiveCount" json:"positive_count,omitempty"` // Absolute count of each bucket.
+ // Only used for native histograms. These exemplars MUST have a timestamp.
+ Exemplars []*Exemplar `protobuf:"bytes,16,rep,name=exemplars" json:"exemplars,omitempty"`
+}
+
+func (x *Histogram) Reset() {
+ *x = Histogram{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Histogram) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Histogram) ProtoMessage() {}
+
+func (x *Histogram) ProtoReflect() protoreflect.Message {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Histogram.ProtoReflect.Descriptor instead.
+func (*Histogram) Descriptor() ([]byte, []int) {
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{6}
+}
+
+func (x *Histogram) GetSampleCount() uint64 {
+ if x != nil && x.SampleCount != nil {
+ return *x.SampleCount
+ }
+ return 0
+}
+
+func (x *Histogram) GetSampleCountFloat() float64 {
+ if x != nil && x.SampleCountFloat != nil {
+ return *x.SampleCountFloat
+ }
+ return 0
+}
+
+func (x *Histogram) GetSampleSum() float64 {
+ if x != nil && x.SampleSum != nil {
+ return *x.SampleSum
+ }
+ return 0
+}
+
+func (x *Histogram) GetBucket() []*Bucket {
+ if x != nil {
+ return x.Bucket
+ }
+ return nil
+}
+
+func (x *Histogram) GetCreatedTimestamp() *timestamppb.Timestamp {
+ if x != nil {
+ return x.CreatedTimestamp
+ }
+ return nil
+}
+
+func (x *Histogram) GetSchema() int32 {
+ if x != nil && x.Schema != nil {
+ return *x.Schema
+ }
+ return 0
+}
+
+func (x *Histogram) GetZeroThreshold() float64 {
+ if x != nil && x.ZeroThreshold != nil {
+ return *x.ZeroThreshold
+ }
+ return 0
+}
+
+func (x *Histogram) GetZeroCount() uint64 {
+ if x != nil && x.ZeroCount != nil {
+ return *x.ZeroCount
+ }
+ return 0
+}
+
+func (x *Histogram) GetZeroCountFloat() float64 {
+ if x != nil && x.ZeroCountFloat != nil {
+ return *x.ZeroCountFloat
+ }
+ return 0
+}
+
+func (x *Histogram) GetNegativeSpan() []*BucketSpan {
+ if x != nil {
+ return x.NegativeSpan
+ }
+ return nil
+}
+
+func (x *Histogram) GetNegativeDelta() []int64 {
+ if x != nil {
+ return x.NegativeDelta
+ }
+ return nil
+}
+
+func (x *Histogram) GetNegativeCount() []float64 {
+ if x != nil {
+ return x.NegativeCount
+ }
+ return nil
+}
+
+func (x *Histogram) GetPositiveSpan() []*BucketSpan {
+ if x != nil {
+ return x.PositiveSpan
+ }
+ return nil
+}
+
+func (x *Histogram) GetPositiveDelta() []int64 {
+ if x != nil {
+ return x.PositiveDelta
+ }
+ return nil
+}
+
+func (x *Histogram) GetPositiveCount() []float64 {
+ if x != nil {
+ return x.PositiveCount
+ }
+ return nil
+}
+
+func (x *Histogram) GetExemplars() []*Exemplar {
+ if x != nil {
+ return x.Exemplars
+ }
+ return nil
+}
+
+// A Bucket of a conventional histogram, each of which is treated as
+// an individual counter-like time series by Prometheus.
+type Bucket struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"` // Cumulative in increasing order.
+ CumulativeCountFloat *float64 `protobuf:"fixed64,4,opt,name=cumulative_count_float,json=cumulativeCountFloat" json:"cumulative_count_float,omitempty"` // Overrides cumulative_count if > 0.
+ UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"` // Inclusive.
+ Exemplar *Exemplar `protobuf:"bytes,3,opt,name=exemplar" json:"exemplar,omitempty"`
+}
+
+func (x *Bucket) Reset() {
+ *x = Bucket{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Bucket) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Bucket) ProtoMessage() {}
+
+func (x *Bucket) ProtoReflect() protoreflect.Message {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[7]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Bucket.ProtoReflect.Descriptor instead.
+func (*Bucket) Descriptor() ([]byte, []int) {
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{7}
+}
+
+func (x *Bucket) GetCumulativeCount() uint64 {
+ if x != nil && x.CumulativeCount != nil {
+ return *x.CumulativeCount
+ }
+ return 0
+}
+
+func (x *Bucket) GetCumulativeCountFloat() float64 {
+ if x != nil && x.CumulativeCountFloat != nil {
+ return *x.CumulativeCountFloat
+ }
+ return 0
+}
+
+func (x *Bucket) GetUpperBound() float64 {
+ if x != nil && x.UpperBound != nil {
+ return *x.UpperBound
+ }
+ return 0
+}
+
+func (x *Bucket) GetExemplar() *Exemplar {
+ if x != nil {
+ return x.Exemplar
+ }
+ return nil
+}
+
+// A BucketSpan defines a number of consecutive buckets in a native
+// histogram with their offset. Logically, it would be more
+// straightforward to include the bucket counts in the Span. However,
+// the protobuf representation is more compact in the way the data is
+// structured here (with all the buckets in a single array separate
+// from the Spans).
+type BucketSpan struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Offset *int32 `protobuf:"zigzag32,1,opt,name=offset" json:"offset,omitempty"` // Gap to previous span, or starting point for 1st span (which can be negative).
+ Length *uint32 `protobuf:"varint,2,opt,name=length" json:"length,omitempty"` // Length of consecutive buckets.
+}
+
+func (x *BucketSpan) Reset() {
+ *x = BucketSpan{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *BucketSpan) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*BucketSpan) ProtoMessage() {}
+
+func (x *BucketSpan) ProtoReflect() protoreflect.Message {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[8]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use BucketSpan.ProtoReflect.Descriptor instead.
+func (*BucketSpan) Descriptor() ([]byte, []int) {
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{8}
+}
+
+func (x *BucketSpan) GetOffset() int32 {
+ if x != nil && x.Offset != nil {
+ return *x.Offset
+ }
+ return 0
+}
+
+func (x *BucketSpan) GetLength() uint32 {
+ if x != nil && x.Length != nil {
+ return *x.Length
+ }
+ return 0
+}
+
+type Exemplar struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"`
+ Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"`
+ Timestamp *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=timestamp" json:"timestamp,omitempty"` // OpenMetrics-style.
+}
+
+func (x *Exemplar) Reset() {
+ *x = Exemplar{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Exemplar) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Exemplar) ProtoMessage() {}
+
+func (x *Exemplar) ProtoReflect() protoreflect.Message {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[9]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Exemplar.ProtoReflect.Descriptor instead.
+func (*Exemplar) Descriptor() ([]byte, []int) {
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{9}
+}
+
+func (x *Exemplar) GetLabel() []*LabelPair {
+ if x != nil {
+ return x.Label
+ }
+ return nil
+}
+
+func (x *Exemplar) GetValue() float64 {
+ if x != nil && x.Value != nil {
+ return *x.Value
+ }
+ return 0
+}
+
+func (x *Exemplar) GetTimestamp() *timestamppb.Timestamp {
+ if x != nil {
+ return x.Timestamp
+ }
+ return nil
+}
+
+type Metric struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"`
+ Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"`
+ Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"`
+ Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"`
+ Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"`
+ Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"`
+ TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms,json=timestampMs" json:"timestamp_ms,omitempty"`
+}
+
+func (x *Metric) Reset() {
+ *x = Metric{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Metric) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Metric) ProtoMessage() {}
+
+func (x *Metric) ProtoReflect() protoreflect.Message {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[10]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Metric.ProtoReflect.Descriptor instead.
+func (*Metric) Descriptor() ([]byte, []int) {
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{10}
+}
+
+func (x *Metric) GetLabel() []*LabelPair {
+ if x != nil {
+ return x.Label
+ }
+ return nil
+}
+
+func (x *Metric) GetGauge() *Gauge {
+ if x != nil {
+ return x.Gauge
+ }
+ return nil
+}
+
+func (x *Metric) GetCounter() *Counter {
+ if x != nil {
+ return x.Counter
+ }
+ return nil
+}
+
+func (x *Metric) GetSummary() *Summary {
+ if x != nil {
+ return x.Summary
+ }
+ return nil
+}
+
+func (x *Metric) GetUntyped() *Untyped {
+ if x != nil {
+ return x.Untyped
+ }
+ return nil
+}
+
+func (x *Metric) GetHistogram() *Histogram {
+ if x != nil {
+ return x.Histogram
+ }
+ return nil
+}
+
+func (x *Metric) GetTimestampMs() int64 {
+ if x != nil && x.TimestampMs != nil {
+ return *x.TimestampMs
+ }
+ return 0
+}
+
+type MetricFamily struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"`
+ Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"`
+ Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"`
+ Unit *string `protobuf:"bytes,5,opt,name=unit" json:"unit,omitempty"`
+}
+
+func (x *MetricFamily) Reset() {
+ *x = MetricFamily{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *MetricFamily) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MetricFamily) ProtoMessage() {}
+
+func (x *MetricFamily) ProtoReflect() protoreflect.Message {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[11]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use MetricFamily.ProtoReflect.Descriptor instead.
+func (*MetricFamily) Descriptor() ([]byte, []int) {
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{11}
+}
+
+func (x *MetricFamily) GetName() string {
+ if x != nil && x.Name != nil {
+ return *x.Name
+ }
+ return ""
+}
+
+func (x *MetricFamily) GetHelp() string {
+ if x != nil && x.Help != nil {
+ return *x.Help
+ }
+ return ""
+}
+
+func (x *MetricFamily) GetType() MetricType {
+ if x != nil && x.Type != nil {
+ return *x.Type
+ }
+ return MetricType_COUNTER
+}
+
+func (x *MetricFamily) GetMetric() []*Metric {
+ if x != nil {
+ return x.Metric
+ }
+ return nil
+}
+
+func (x *MetricFamily) GetUnit() string {
+ if x != nil && x.Unit != nil {
+ return *x.Unit
+ }
+ return ""
+}
+
+var File_io_prometheus_client_metrics_proto protoreflect.FileDescriptor
+
+var file_io_prometheus_client_metrics_proto_rawDesc = []byte{
+ 0x0a, 0x22, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2f,
+ 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68,
+ 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65,
+ 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x35, 0x0a, 0x09, 0x4c,
+ 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c,
+ 0x75, 0x65, 0x22, 0x1d, 0x0a, 0x05, 0x47, 0x61, 0x75, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x22, 0xa4, 0x01, 0x0a, 0x07, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x14, 0x0a,
+ 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61,
+ 0x6c, 0x75, 0x65, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65,
+ 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78, 0x65,
+ 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x12,
+ 0x47, 0x0a, 0x11, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73,
+ 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d,
+ 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x10, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x54,
+ 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x3c, 0x0a, 0x08, 0x51, 0x75, 0x61, 0x6e,
+ 0x74, 0x69, 0x6c, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x08, 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65,
+ 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52,
+ 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xd0, 0x01, 0x0a, 0x07, 0x53, 0x75, 0x6d, 0x6d, 0x61,
+ 0x72, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75,
+ 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65,
+ 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f,
+ 0x73, 0x75, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x73, 0x61, 0x6d, 0x70, 0x6c,
+ 0x65, 0x53, 0x75, 0x6d, 0x12, 0x3a, 0x0a, 0x08, 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65,
+ 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d,
+ 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x51, 0x75,
+ 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x52, 0x08, 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65,
+ 0x12, 0x47, 0x0a, 0x11, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65,
+ 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69,
+ 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x10, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64,
+ 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x1f, 0x0a, 0x07, 0x55, 0x6e, 0x74,
+ 0x79, 0x70, 0x65, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xea, 0x05, 0x0a, 0x09, 0x48,
+ 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x61, 0x6d, 0x70,
+ 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b,
+ 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2c, 0x0a, 0x12, 0x73,
+ 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61,
+ 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, 0x10, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x43,
+ 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6d,
+ 0x70, 0x6c, 0x65, 0x5f, 0x73, 0x75, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x73,
+ 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x53, 0x75, 0x6d, 0x12, 0x34, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b,
+ 0x65, 0x74, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72,
+ 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e,
+ 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x47,
+ 0x0a, 0x11, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74,
+ 0x61, 0x6d, 0x70, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65,
+ 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x10, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x54, 0x69,
+ 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d,
+ 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x11, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12,
+ 0x25, 0x0a, 0x0e, 0x7a, 0x65, 0x72, 0x6f, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c,
+ 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0d, 0x7a, 0x65, 0x72, 0x6f, 0x54, 0x68, 0x72,
+ 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x7a, 0x65, 0x72, 0x6f, 0x5f, 0x63,
+ 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x7a, 0x65, 0x72, 0x6f,
+ 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x28, 0x0a, 0x10, 0x7a, 0x65, 0x72, 0x6f, 0x5f, 0x63, 0x6f,
+ 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x01, 0x52,
+ 0x0e, 0x7a, 0x65, 0x72, 0x6f, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12,
+ 0x45, 0x0a, 0x0d, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x73, 0x70, 0x61, 0x6e,
+ 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d,
+ 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x42, 0x75,
+ 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x52, 0x0c, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69,
+ 0x76, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69,
+ 0x76, 0x65, 0x5f, 0x64, 0x65, 0x6c, 0x74, 0x61, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x12, 0x52, 0x0d,
+ 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x12, 0x25, 0x0a,
+ 0x0e, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18,
+ 0x0b, 0x20, 0x03, 0x28, 0x01, 0x52, 0x0d, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43,
+ 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x45, 0x0a, 0x0d, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65,
+ 0x5f, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x69, 0x6f,
+ 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65,
+ 0x6e, 0x74, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x52, 0x0c, 0x70,
+ 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x70,
+ 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x64, 0x65, 0x6c, 0x74, 0x61, 0x18, 0x0d, 0x20,
+ 0x03, 0x28, 0x12, 0x52, 0x0d, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x44, 0x65, 0x6c,
+ 0x74, 0x61, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63,
+ 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x01, 0x52, 0x0d, 0x70, 0x6f, 0x73, 0x69,
+ 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x3c, 0x0a, 0x09, 0x65, 0x78, 0x65,
+ 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x73, 0x18, 0x10, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69,
+ 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69,
+ 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x09, 0x65, 0x78,
+ 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x73, 0x22, 0xc6, 0x01, 0x0a, 0x06, 0x42, 0x75, 0x63, 0x6b,
+ 0x65, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65,
+ 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f, 0x63, 0x75,
+ 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x34, 0x0a,
+ 0x16, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e,
+ 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, 0x14, 0x63,
+ 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c,
+ 0x6f, 0x61, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x75, 0x70, 0x70, 0x65, 0x72, 0x5f, 0x62, 0x6f, 0x75,
+ 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x75, 0x70, 0x70, 0x65, 0x72, 0x42,
+ 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d,
+ 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78,
+ 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72,
+ 0x22, 0x3c, 0x0a, 0x0a, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x16,
+ 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x11, 0x52, 0x06,
+ 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x22, 0x91,
+ 0x01, 0x0a, 0x08, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x12, 0x35, 0x0a, 0x05, 0x6c,
+ 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e,
+ 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e,
+ 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x52, 0x05, 0x6c, 0x61, 0x62,
+ 0x65, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65,
+ 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69,
+ 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
+ 0x6d, 0x70, 0x22, 0xff, 0x02, 0x0a, 0x06, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x35, 0x0a,
+ 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69,
+ 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69,
+ 0x65, 0x6e, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x52, 0x05, 0x6c,
+ 0x61, 0x62, 0x65, 0x6c, 0x12, 0x31, 0x0a, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68,
+ 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x61, 0x75, 0x67, 0x65,
+ 0x52, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x63, 0x6f, 0x75, 0x6e, 0x74,
+ 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72,
+ 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e,
+ 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72,
+ 0x12, 0x37, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75,
+ 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79,
+ 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x37, 0x0a, 0x07, 0x75, 0x6e, 0x74,
+ 0x79, 0x70, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e,
+ 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e,
+ 0x74, 0x2e, 0x55, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x52, 0x07, 0x75, 0x6e, 0x74, 0x79, 0x70,
+ 0x65, 0x64, 0x12, 0x3d, 0x0a, 0x09, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x18,
+ 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65,
+ 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x48, 0x69, 0x73,
+ 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x52, 0x09, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61,
+ 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x6d,
+ 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
+ 0x6d, 0x70, 0x4d, 0x73, 0x22, 0xb6, 0x01, 0x0a, 0x0c, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x46,
+ 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x65, 0x6c,
+ 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x65, 0x6c, 0x70, 0x12, 0x34, 0x0a,
+ 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e, 0x69, 0x6f,
+ 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65,
+ 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74,
+ 0x79, 0x70, 0x65, 0x12, 0x34, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x18, 0x04, 0x20,
+ 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68,
+ 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69,
+ 0x63, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x6e, 0x69,
+ 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x6e, 0x69, 0x74, 0x2a, 0x62, 0x0a,
+ 0x0a, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x43,
+ 0x4f, 0x55, 0x4e, 0x54, 0x45, 0x52, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x47, 0x41, 0x55, 0x47,
+ 0x45, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x4d, 0x4d, 0x41, 0x52, 0x59, 0x10, 0x02,
+ 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x54, 0x59, 0x50, 0x45, 0x44, 0x10, 0x03, 0x12, 0x0d, 0x0a,
+ 0x09, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, 0x04, 0x12, 0x13, 0x0a, 0x0f,
+ 0x47, 0x41, 0x55, 0x47, 0x45, 0x5f, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10,
+ 0x05, 0x42, 0x52, 0x0a, 0x14, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65,
+ 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5a, 0x3a, 0x67, 0x69, 0x74, 0x68, 0x75,
+ 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73,
+ 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2f, 0x67, 0x6f,
+ 0x3b, 0x69, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x5f, 0x63,
+ 0x6c, 0x69, 0x65, 0x6e, 0x74,
+}
+
+var (
+ file_io_prometheus_client_metrics_proto_rawDescOnce sync.Once
+ file_io_prometheus_client_metrics_proto_rawDescData = file_io_prometheus_client_metrics_proto_rawDesc
+)
+
+func file_io_prometheus_client_metrics_proto_rawDescGZIP() []byte {
+ file_io_prometheus_client_metrics_proto_rawDescOnce.Do(func() {
+ file_io_prometheus_client_metrics_proto_rawDescData = protoimpl.X.CompressGZIP(file_io_prometheus_client_metrics_proto_rawDescData)
+ })
+ return file_io_prometheus_client_metrics_proto_rawDescData
+}
+
+var file_io_prometheus_client_metrics_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_io_prometheus_client_metrics_proto_msgTypes = make([]protoimpl.MessageInfo, 12)
+var file_io_prometheus_client_metrics_proto_goTypes = []interface{}{
+ (MetricType)(0), // 0: io.prometheus.client.MetricType
+ (*LabelPair)(nil), // 1: io.prometheus.client.LabelPair
+ (*Gauge)(nil), // 2: io.prometheus.client.Gauge
+ (*Counter)(nil), // 3: io.prometheus.client.Counter
+ (*Quantile)(nil), // 4: io.prometheus.client.Quantile
+ (*Summary)(nil), // 5: io.prometheus.client.Summary
+ (*Untyped)(nil), // 6: io.prometheus.client.Untyped
+ (*Histogram)(nil), // 7: io.prometheus.client.Histogram
+ (*Bucket)(nil), // 8: io.prometheus.client.Bucket
+ (*BucketSpan)(nil), // 9: io.prometheus.client.BucketSpan
+ (*Exemplar)(nil), // 10: io.prometheus.client.Exemplar
+ (*Metric)(nil), // 11: io.prometheus.client.Metric
+ (*MetricFamily)(nil), // 12: io.prometheus.client.MetricFamily
+ (*timestamppb.Timestamp)(nil), // 13: google.protobuf.Timestamp
+}
+var file_io_prometheus_client_metrics_proto_depIdxs = []int32{
+ 10, // 0: io.prometheus.client.Counter.exemplar:type_name -> io.prometheus.client.Exemplar
+ 13, // 1: io.prometheus.client.Counter.created_timestamp:type_name -> google.protobuf.Timestamp
+ 4, // 2: io.prometheus.client.Summary.quantile:type_name -> io.prometheus.client.Quantile
+ 13, // 3: io.prometheus.client.Summary.created_timestamp:type_name -> google.protobuf.Timestamp
+ 8, // 4: io.prometheus.client.Histogram.bucket:type_name -> io.prometheus.client.Bucket
+ 13, // 5: io.prometheus.client.Histogram.created_timestamp:type_name -> google.protobuf.Timestamp
+ 9, // 6: io.prometheus.client.Histogram.negative_span:type_name -> io.prometheus.client.BucketSpan
+ 9, // 7: io.prometheus.client.Histogram.positive_span:type_name -> io.prometheus.client.BucketSpan
+ 10, // 8: io.prometheus.client.Histogram.exemplars:type_name -> io.prometheus.client.Exemplar
+ 10, // 9: io.prometheus.client.Bucket.exemplar:type_name -> io.prometheus.client.Exemplar
+ 1, // 10: io.prometheus.client.Exemplar.label:type_name -> io.prometheus.client.LabelPair
+ 13, // 11: io.prometheus.client.Exemplar.timestamp:type_name -> google.protobuf.Timestamp
+ 1, // 12: io.prometheus.client.Metric.label:type_name -> io.prometheus.client.LabelPair
+ 2, // 13: io.prometheus.client.Metric.gauge:type_name -> io.prometheus.client.Gauge
+ 3, // 14: io.prometheus.client.Metric.counter:type_name -> io.prometheus.client.Counter
+ 5, // 15: io.prometheus.client.Metric.summary:type_name -> io.prometheus.client.Summary
+ 6, // 16: io.prometheus.client.Metric.untyped:type_name -> io.prometheus.client.Untyped
+ 7, // 17: io.prometheus.client.Metric.histogram:type_name -> io.prometheus.client.Histogram
+ 0, // 18: io.prometheus.client.MetricFamily.type:type_name -> io.prometheus.client.MetricType
+ 11, // 19: io.prometheus.client.MetricFamily.metric:type_name -> io.prometheus.client.Metric
+ 20, // [20:20] is the sub-list for method output_type
+ 20, // [20:20] is the sub-list for method input_type
+ 20, // [20:20] is the sub-list for extension type_name
+ 20, // [20:20] is the sub-list for extension extendee
+ 0, // [0:20] is the sub-list for field type_name
+}
+
+func init() { file_io_prometheus_client_metrics_proto_init() }
+func file_io_prometheus_client_metrics_proto_init() {
+ if File_io_prometheus_client_metrics_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_io_prometheus_client_metrics_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*LabelPair); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_io_prometheus_client_metrics_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Gauge); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_io_prometheus_client_metrics_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Counter); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_io_prometheus_client_metrics_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Quantile); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_io_prometheus_client_metrics_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Summary); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_io_prometheus_client_metrics_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Untyped); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_io_prometheus_client_metrics_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Histogram); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_io_prometheus_client_metrics_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Bucket); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_io_prometheus_client_metrics_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*BucketSpan); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_io_prometheus_client_metrics_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Exemplar); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_io_prometheus_client_metrics_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Metric); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_io_prometheus_client_metrics_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*MetricFamily); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_io_prometheus_client_metrics_proto_rawDesc,
+ NumEnums: 1,
+ NumMessages: 12,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_io_prometheus_client_metrics_proto_goTypes,
+ DependencyIndexes: file_io_prometheus_client_metrics_proto_depIdxs,
+ EnumInfos: file_io_prometheus_client_metrics_proto_enumTypes,
+ MessageInfos: file_io_prometheus_client_metrics_proto_msgTypes,
+ }.Build()
+ File_io_prometheus_client_metrics_proto = out.File
+ file_io_prometheus_client_metrics_proto_rawDesc = nil
+ file_io_prometheus_client_metrics_proto_goTypes = nil
+ file_io_prometheus_client_metrics_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/prometheus/common/LICENSE b/vendor/github.com/prometheus/common/LICENSE
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/vendor/github.com/prometheus/common/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/prometheus/common/NOTICE b/vendor/github.com/prometheus/common/NOTICE
new file mode 100644
index 0000000..636a2c1
--- /dev/null
+++ b/vendor/github.com/prometheus/common/NOTICE
@@ -0,0 +1,5 @@
+Common libraries shared by Prometheus Go components.
+Copyright 2015 The Prometheus Authors
+
+This product includes software developed at
+SoundCloud Ltd. (http://soundcloud.com/).
diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go
new file mode 100644
index 0000000..25cfaa2
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/decode.go
@@ -0,0 +1,431 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "math"
+ "mime"
+ "net/http"
+
+ dto "github.com/prometheus/client_model/go"
+ "google.golang.org/protobuf/encoding/protodelim"
+
+ "github.com/prometheus/common/model"
+)
+
+// Decoder types decode an input stream into metric families.
+type Decoder interface {
+ Decode(*dto.MetricFamily) error
+}
+
+// DecodeOptions contains options used by the Decoder and in sample extraction.
+type DecodeOptions struct {
+ // Timestamp is added to each value from the stream that has no explicit timestamp set.
+ Timestamp model.Time
+}
+
+// ResponseFormat extracts the correct format from a HTTP response header.
+// If no matching format can be found FormatUnknown is returned.
+func ResponseFormat(h http.Header) Format {
+ ct := h.Get(hdrContentType)
+
+ mediatype, params, err := mime.ParseMediaType(ct)
+ if err != nil {
+ return fmtUnknown
+ }
+
+ const textType = "text/plain"
+
+ switch mediatype {
+ case ProtoType:
+ if p, ok := params["proto"]; ok && p != ProtoProtocol {
+ return fmtUnknown
+ }
+ if e, ok := params["encoding"]; ok && e != "delimited" {
+ return fmtUnknown
+ }
+ return fmtProtoDelim
+
+ case textType:
+ if v, ok := params["version"]; ok && v != TextVersion {
+ return fmtUnknown
+ }
+ return fmtText
+ }
+
+ return fmtUnknown
+}
+
+// NewDecoder returns a new decoder based on the given input format.
+// If the input format does not imply otherwise, a text format decoder is returned.
+func NewDecoder(r io.Reader, format Format) Decoder {
+ switch format.FormatType() {
+ case TypeProtoDelim:
+ return &protoDecoder{r: bufio.NewReader(r)}
+ }
+ return &textDecoder{r: r}
+}
+
+// protoDecoder implements the Decoder interface for protocol buffers.
+type protoDecoder struct {
+ r protodelim.Reader
+}
+
+// Decode implements the Decoder interface.
+func (d *protoDecoder) Decode(v *dto.MetricFamily) error {
+ opts := protodelim.UnmarshalOptions{
+ MaxSize: -1,
+ }
+ if err := opts.UnmarshalFrom(d.r, v); err != nil {
+ return err
+ }
+ if !model.IsValidMetricName(model.LabelValue(v.GetName())) {
+ return fmt.Errorf("invalid metric name %q", v.GetName())
+ }
+ for _, m := range v.GetMetric() {
+ if m == nil {
+ continue
+ }
+ for _, l := range m.GetLabel() {
+ if l == nil {
+ continue
+ }
+ if !model.LabelValue(l.GetValue()).IsValid() {
+ return fmt.Errorf("invalid label value %q", l.GetValue())
+ }
+ if !model.LabelName(l.GetName()).IsValid() {
+ return fmt.Errorf("invalid label name %q", l.GetName())
+ }
+ }
+ }
+ return nil
+}
+
+// textDecoder implements the Decoder interface for the text protocol.
+type textDecoder struct {
+ r io.Reader
+ fams map[string]*dto.MetricFamily
+ err error
+}
+
+// Decode implements the Decoder interface.
+func (d *textDecoder) Decode(v *dto.MetricFamily) error {
+ if d.err == nil {
+ // Read all metrics in one shot.
+ var p TextParser
+ d.fams, d.err = p.TextToMetricFamilies(d.r)
+ // If we don't get an error, store io.EOF for the end.
+ if d.err == nil {
+ d.err = io.EOF
+ }
+ }
+ // Pick off one MetricFamily per Decode until there's nothing left.
+ for key, fam := range d.fams {
+ v.Name = fam.Name
+ v.Help = fam.Help
+ v.Type = fam.Type
+ v.Metric = fam.Metric
+ delete(d.fams, key)
+ return nil
+ }
+ return d.err
+}
+
+// SampleDecoder wraps a Decoder to extract samples from the metric families
+// decoded by the wrapped Decoder.
+type SampleDecoder struct {
+ Dec Decoder
+ Opts *DecodeOptions
+
+ f dto.MetricFamily
+}
+
+// Decode calls the Decode method of the wrapped Decoder and then extracts the
+// samples from the decoded MetricFamily into the provided model.Vector.
+func (sd *SampleDecoder) Decode(s *model.Vector) error {
+ err := sd.Dec.Decode(&sd.f)
+ if err != nil {
+ return err
+ }
+ *s, err = extractSamples(&sd.f, sd.Opts)
+ return err
+}
+
+// ExtractSamples builds a slice of samples from the provided metric
+// families. If an error occurs during sample extraction, it continues to
+// extract from the remaining metric families. The returned error is the last
+// error that has occurred.
+func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) (model.Vector, error) {
+ var (
+ all model.Vector
+ lastErr error
+ )
+ for _, f := range fams {
+ some, err := extractSamples(f, o)
+ if err != nil {
+ lastErr = err
+ continue
+ }
+ all = append(all, some...)
+ }
+ return all, lastErr
+}
+
+func extractSamples(f *dto.MetricFamily, o *DecodeOptions) (model.Vector, error) {
+ switch f.GetType() {
+ case dto.MetricType_COUNTER:
+ return extractCounter(o, f), nil
+ case dto.MetricType_GAUGE:
+ return extractGauge(o, f), nil
+ case dto.MetricType_SUMMARY:
+ return extractSummary(o, f), nil
+ case dto.MetricType_UNTYPED:
+ return extractUntyped(o, f), nil
+ case dto.MetricType_HISTOGRAM:
+ return extractHistogram(o, f), nil
+ }
+ return nil, fmt.Errorf("expfmt.extractSamples: unknown metric family type %v", f.GetType())
+}
+
+func extractCounter(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+ samples := make(model.Vector, 0, len(f.Metric))
+
+ for _, m := range f.Metric {
+ if m.Counter == nil {
+ continue
+ }
+
+ lset := make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
+
+ smpl := &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Counter.GetValue()),
+ }
+
+ if m.TimestampMs != nil {
+ smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+ } else {
+ smpl.Timestamp = o.Timestamp
+ }
+
+ samples = append(samples, smpl)
+ }
+
+ return samples
+}
+
+func extractGauge(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+ samples := make(model.Vector, 0, len(f.Metric))
+
+ for _, m := range f.Metric {
+ if m.Gauge == nil {
+ continue
+ }
+
+ lset := make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
+
+ smpl := &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Gauge.GetValue()),
+ }
+
+ if m.TimestampMs != nil {
+ smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+ } else {
+ smpl.Timestamp = o.Timestamp
+ }
+
+ samples = append(samples, smpl)
+ }
+
+ return samples
+}
+
+func extractUntyped(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+ samples := make(model.Vector, 0, len(f.Metric))
+
+ for _, m := range f.Metric {
+ if m.Untyped == nil {
+ continue
+ }
+
+ lset := make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
+
+ smpl := &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Untyped.GetValue()),
+ }
+
+ if m.TimestampMs != nil {
+ smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+ } else {
+ smpl.Timestamp = o.Timestamp
+ }
+
+ samples = append(samples, smpl)
+ }
+
+ return samples
+}
+
+func extractSummary(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+ samples := make(model.Vector, 0, len(f.Metric))
+
+ for _, m := range f.Metric {
+ if m.Summary == nil {
+ continue
+ }
+
+ timestamp := o.Timestamp
+ if m.TimestampMs != nil {
+ timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+ }
+
+ for _, q := range m.Summary.Quantile {
+ lset := make(model.LabelSet, len(m.Label)+2)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ // BUG(matt): Update other names to "quantile".
+ lset[model.LabelName(model.QuantileLabel)] = model.LabelValue(fmt.Sprint(q.GetQuantile()))
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(q.GetValue()),
+ Timestamp: timestamp,
+ })
+ }
+
+ lset := make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum")
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Summary.GetSampleSum()),
+ Timestamp: timestamp,
+ })
+
+ lset = make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count")
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Summary.GetSampleCount()),
+ Timestamp: timestamp,
+ })
+ }
+
+ return samples
+}
+
+func extractHistogram(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+ samples := make(model.Vector, 0, len(f.Metric))
+
+ for _, m := range f.Metric {
+ if m.Histogram == nil {
+ continue
+ }
+
+ timestamp := o.Timestamp
+ if m.TimestampMs != nil {
+ timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+ }
+
+ infSeen := false
+
+ for _, q := range m.Histogram.Bucket {
+ lset := make(model.LabelSet, len(m.Label)+2)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.LabelName(model.BucketLabel)] = model.LabelValue(fmt.Sprint(q.GetUpperBound()))
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket")
+
+ if math.IsInf(q.GetUpperBound(), +1) {
+ infSeen = true
+ }
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(q.GetCumulativeCount()),
+ Timestamp: timestamp,
+ })
+ }
+
+ lset := make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum")
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Histogram.GetSampleSum()),
+ Timestamp: timestamp,
+ })
+
+ lset = make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count")
+
+ count := &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Histogram.GetSampleCount()),
+ Timestamp: timestamp,
+ }
+ samples = append(samples, count)
+
+ if !infSeen {
+ // Append an infinity bucket sample.
+ lset := make(model.LabelSet, len(m.Label)+2)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.LabelName(model.BucketLabel)] = model.LabelValue("+Inf")
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket")
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: count.Value,
+ Timestamp: timestamp,
+ })
+ }
+ }
+
+ return samples
+}
diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go
new file mode 100644
index 0000000..ff5ef7a
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/encode.go
@@ -0,0 +1,198 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+ "fmt"
+ "io"
+ "net/http"
+
+ "google.golang.org/protobuf/encoding/protodelim"
+ "google.golang.org/protobuf/encoding/prototext"
+
+ "github.com/prometheus/common/model"
+
+ "github.com/munnerz/goautoneg"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+// Encoder types encode metric families into an underlying wire protocol.
+type Encoder interface {
+ Encode(*dto.MetricFamily) error
+}
+
+// Closer is implemented by Encoders that need to be closed to finalize
+// encoding. (For example, OpenMetrics needs a final `# EOF` line.)
+//
+// Note that all Encoder implementations returned from this package implement
+// Closer, too, even if the Close call is a no-op. This happens in preparation
+// for adding a Close method to the Encoder interface directly in a (mildly
+// breaking) release in the future.
+type Closer interface {
+ Close() error
+}
+
+type encoderCloser struct {
+ encode func(*dto.MetricFamily) error
+ close func() error
+}
+
+func (ec encoderCloser) Encode(v *dto.MetricFamily) error {
+ return ec.encode(v)
+}
+
+func (ec encoderCloser) Close() error {
+ return ec.close()
+}
+
+// Negotiate returns the Content-Type based on the given Accept header. If no
+// appropriate accepted type is found, FmtText is returned (which is the
+// Prometheus text format). This function will never negotiate FmtOpenMetrics,
+// as the support is still experimental. To include the option to negotiate
+// FmtOpenMetrics, use NegotiateOpenMetrics.
+func Negotiate(h http.Header) Format {
+ escapingScheme := Format(fmt.Sprintf("; escaping=%s", Format(model.NameEscapingScheme.String())))
+ for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) {
+ if escapeParam := ac.Params[model.EscapingKey]; escapeParam != "" {
+ switch Format(escapeParam) {
+ case model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues:
+ escapingScheme = Format(fmt.Sprintf("; escaping=%s", escapeParam))
+ default:
+ // If the escaping parameter is unknown, ignore it.
+ }
+ }
+ ver := ac.Params["version"]
+ if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol {
+ switch ac.Params["encoding"] {
+ case "delimited":
+ return fmtProtoDelim + escapingScheme
+ case "text":
+ return fmtProtoText + escapingScheme
+ case "compact-text":
+ return fmtProtoCompact + escapingScheme
+ }
+ }
+ if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") {
+ return fmtText + escapingScheme
+ }
+ }
+ return fmtText + escapingScheme
+}
+
+// NegotiateIncludingOpenMetrics works like Negotiate but includes
+// FmtOpenMetrics as an option for the result. Note that this function is
+// temporary and will disappear once FmtOpenMetrics is fully supported and as
+// such may be negotiated by the normal Negotiate function.
+func NegotiateIncludingOpenMetrics(h http.Header) Format {
+ escapingScheme := Format(fmt.Sprintf("; escaping=%s", Format(model.NameEscapingScheme.String())))
+ for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) {
+ if escapeParam := ac.Params[model.EscapingKey]; escapeParam != "" {
+ switch Format(escapeParam) {
+ case model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues:
+ escapingScheme = Format(fmt.Sprintf("; escaping=%s", escapeParam))
+ default:
+ // If the escaping parameter is unknown, ignore it.
+ }
+ }
+ ver := ac.Params["version"]
+ if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol {
+ switch ac.Params["encoding"] {
+ case "delimited":
+ return fmtProtoDelim + escapingScheme
+ case "text":
+ return fmtProtoText + escapingScheme
+ case "compact-text":
+ return fmtProtoCompact + escapingScheme
+ }
+ }
+ if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") {
+ return fmtText + escapingScheme
+ }
+ if ac.Type+"/"+ac.SubType == OpenMetricsType && (ver == OpenMetricsVersion_0_0_1 || ver == OpenMetricsVersion_1_0_0 || ver == "") {
+ switch ver {
+ case OpenMetricsVersion_1_0_0:
+ return fmtOpenMetrics_1_0_0 + escapingScheme
+ default:
+ return fmtOpenMetrics_0_0_1 + escapingScheme
+ }
+ }
+ }
+ return fmtText + escapingScheme
+}
+
+// NewEncoder returns a new encoder based on content type negotiation. All
+// Encoder implementations returned by NewEncoder also implement Closer, and
+// callers should always call the Close method. It is currently only required
+// for FmtOpenMetrics, but a future (breaking) release will add the Close method
+// to the Encoder interface directly. The current version of the Encoder
+// interface is kept for backwards compatibility.
+// In cases where the Format does not allow for UTF-8 names, the global
+// NameEscapingScheme will be applied.
+//
+// NewEncoder can be called with additional options to customize the OpenMetrics text output.
+// For example:
+// NewEncoder(w, FmtOpenMetrics_1_0_0, WithCreatedLines())
+//
+// Extra options are ignored for all other formats.
+func NewEncoder(w io.Writer, format Format, options ...EncoderOption) Encoder {
+ escapingScheme := format.ToEscapingScheme()
+
+ switch format.FormatType() {
+ case TypeProtoDelim:
+ return encoderCloser{
+ encode: func(v *dto.MetricFamily) error {
+ _, err := protodelim.MarshalTo(w, v)
+ return err
+ },
+ close: func() error { return nil },
+ }
+ case TypeProtoCompact:
+ return encoderCloser{
+ encode: func(v *dto.MetricFamily) error {
+ _, err := fmt.Fprintln(w, model.EscapeMetricFamily(v, escapingScheme).String())
+ return err
+ },
+ close: func() error { return nil },
+ }
+ case TypeProtoText:
+ return encoderCloser{
+ encode: func(v *dto.MetricFamily) error {
+ _, err := fmt.Fprintln(w, prototext.Format(model.EscapeMetricFamily(v, escapingScheme)))
+ return err
+ },
+ close: func() error { return nil },
+ }
+ case TypeTextPlain:
+ return encoderCloser{
+ encode: func(v *dto.MetricFamily) error {
+ _, err := MetricFamilyToText(w, model.EscapeMetricFamily(v, escapingScheme))
+ return err
+ },
+ close: func() error { return nil },
+ }
+ case TypeOpenMetrics:
+ return encoderCloser{
+ encode: func(v *dto.MetricFamily) error {
+ _, err := MetricFamilyToOpenMetrics(w, model.EscapeMetricFamily(v, escapingScheme), options...)
+ return err
+ },
+ close: func() error {
+ _, err := FinalizeOpenMetrics(w)
+ return err
+ },
+ }
+ }
+ panic(fmt.Errorf("expfmt.NewEncoder: unknown format %q", format))
+}
diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go
new file mode 100644
index 0000000..051b38c
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/expfmt.go
@@ -0,0 +1,177 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package expfmt contains tools for reading and writing Prometheus metrics.
+package expfmt
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/prometheus/common/model"
+)
+
+// Format specifies the HTTP content type of the different wire protocols.
+type Format string
+
+// Constants to assemble the Content-Type values for the different wire
+// protocols. The Content-Type strings here are all for the legacy exposition
+// formats, where valid characters for metric names and label names are limited.
+// Support for arbitrary UTF-8 characters in those names is already partially
+// implemented in this module (see model.ValidationScheme), but to actually use
+// it on the wire, new content-type strings will have to be agreed upon and
+// added here.
+const (
+ TextVersion = "0.0.4"
+ ProtoType = `application/vnd.google.protobuf`
+ ProtoProtocol = `io.prometheus.client.MetricFamily`
+ protoFmt = ProtoType + "; proto=" + ProtoProtocol + ";"
+ OpenMetricsType = `application/openmetrics-text`
+ OpenMetricsVersion_0_0_1 = "0.0.1"
+ OpenMetricsVersion_1_0_0 = "1.0.0"
+
+ // The Content-Type values for the different wire protocols. Note that these
+ // values are now unexported. If code was relying on comparisons to these
+ // constants, instead use FormatType().
+ fmtUnknown Format = ``
+ fmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8`
+ fmtProtoDelim Format = protoFmt + ` encoding=delimited`
+ fmtProtoText Format = protoFmt + ` encoding=text`
+ fmtProtoCompact Format = protoFmt + ` encoding=compact-text`
+ fmtOpenMetrics_1_0_0 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_1_0_0 + `; charset=utf-8`
+ fmtOpenMetrics_0_0_1 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_0_0_1 + `; charset=utf-8`
+)
+
+const (
+ hdrContentType = "Content-Type"
+ hdrAccept = "Accept"
+)
+
+// FormatType is a Go enum representing the overall category for the given
+// Format. As the number of Format permutations increases, doing basic string
+// comparisons are not feasible, so this enum captures the most useful
+// high-level attribute of the Format string.
+type FormatType int
+
+const (
+ TypeUnknown FormatType = iota
+ TypeProtoCompact
+ TypeProtoDelim
+ TypeProtoText
+ TypeTextPlain
+ TypeOpenMetrics
+)
+
+// NewFormat generates a new Format from the type provided. Mostly used for
+// tests, most Formats should be generated as part of content negotiation in
+// encode.go. If a type has more than one version, the latest version will be
+// returned.
+func NewFormat(t FormatType) Format {
+ switch t {
+ case TypeProtoCompact:
+ return fmtProtoCompact
+ case TypeProtoDelim:
+ return fmtProtoDelim
+ case TypeProtoText:
+ return fmtProtoText
+ case TypeTextPlain:
+ return fmtText
+ case TypeOpenMetrics:
+ return fmtOpenMetrics_1_0_0
+ default:
+ return fmtUnknown
+ }
+}
+
+// NewOpenMetricsFormat generates a new OpenMetrics format matching the
+// specified version number.
+func NewOpenMetricsFormat(version string) (Format, error) {
+ if version == OpenMetricsVersion_0_0_1 {
+ return fmtOpenMetrics_0_0_1, nil
+ }
+ if version == OpenMetricsVersion_1_0_0 {
+ return fmtOpenMetrics_1_0_0, nil
+ }
+ return fmtUnknown, fmt.Errorf("unknown open metrics version string")
+}
+
+// FormatType deduces an overall FormatType for the given format.
+func (f Format) FormatType() FormatType {
+ toks := strings.Split(string(f), ";")
+ params := make(map[string]string)
+ for i, t := range toks {
+ if i == 0 {
+ continue
+ }
+ args := strings.Split(t, "=")
+ if len(args) != 2 {
+ continue
+ }
+ params[strings.TrimSpace(args[0])] = strings.TrimSpace(args[1])
+ }
+
+ switch strings.TrimSpace(toks[0]) {
+ case ProtoType:
+ if params["proto"] != ProtoProtocol {
+ return TypeUnknown
+ }
+ switch params["encoding"] {
+ case "delimited":
+ return TypeProtoDelim
+ case "text":
+ return TypeProtoText
+ case "compact-text":
+ return TypeProtoCompact
+ default:
+ return TypeUnknown
+ }
+ case OpenMetricsType:
+ if params["charset"] != "utf-8" {
+ return TypeUnknown
+ }
+ return TypeOpenMetrics
+ case "text/plain":
+ v, ok := params["version"]
+ if !ok {
+ return TypeTextPlain
+ }
+ if v == TextVersion {
+ return TypeTextPlain
+ }
+ return TypeUnknown
+ default:
+ return TypeUnknown
+ }
+}
+
+// ToEscapingScheme returns an EscapingScheme depending on the Format. Iff the
+// Format contains a escaping=allow-utf-8 term, it will select NoEscaping. If a valid
+// "escaping" term exists, that will be used. Otherwise, the global default will
+// be returned.
+func (format Format) ToEscapingScheme() model.EscapingScheme {
+ for _, p := range strings.Split(string(format), ";") {
+ toks := strings.Split(p, "=")
+ if len(toks) != 2 {
+ continue
+ }
+ key, value := strings.TrimSpace(toks[0]), strings.TrimSpace(toks[1])
+ if key == model.EscapingKey {
+ scheme, err := model.ToEscapingScheme(value)
+ if err != nil {
+ return model.NameEscapingScheme
+ }
+ return scheme
+ }
+ }
+ return model.NameEscapingScheme
+}
diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz.go b/vendor/github.com/prometheus/common/expfmt/fuzz.go
new file mode 100644
index 0000000..dfac962
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/fuzz.go
@@ -0,0 +1,37 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Build only when actually fuzzing
+//go:build gofuzz
+// +build gofuzz
+
+package expfmt
+
+import "bytes"
+
+// Fuzz text metric parser with with github.com/dvyukov/go-fuzz:
+//
+// go-fuzz-build github.com/prometheus/common/expfmt
+// go-fuzz -bin expfmt-fuzz.zip -workdir fuzz
+//
+// Further input samples should go in the folder fuzz/corpus.
+func Fuzz(in []byte) int {
+ parser := TextParser{}
+ _, err := parser.TextToMetricFamilies(bytes.NewReader(in))
+
+ if err != nil {
+ return 0
+ }
+
+ return 1
+}
diff --git a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go
new file mode 100644
index 0000000..353c5e9
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go
@@ -0,0 +1,696 @@
+// Copyright 2020 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "math"
+ "strconv"
+ "strings"
+
+ "google.golang.org/protobuf/types/known/timestamppb"
+
+ "github.com/prometheus/common/model"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+type encoderOption struct {
+ withCreatedLines bool
+ withUnit bool
+}
+
+type EncoderOption func(*encoderOption)
+
+// WithCreatedLines is an EncoderOption that configures the OpenMetrics encoder
+// to include _created lines (See
+// https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#counter-1).
+// Created timestamps can improve the accuracy of series reset detection, but
+// come with a bandwidth cost.
+//
+// At the time of writing, created timestamp ingestion is still experimental in
+// Prometheus and need to be enabled with the feature-flag
+// `--feature-flag=created-timestamp-zero-ingestion`, and breaking changes are
+// still possible. Therefore, it is recommended to use this feature with caution.
+func WithCreatedLines() EncoderOption {
+ return func(t *encoderOption) {
+ t.withCreatedLines = true
+ }
+}
+
+// WithUnit is an EncoderOption enabling a set unit to be written to the output
+// and to be added to the metric name, if it's not there already, as a suffix.
+// Without opting in this way, the unit will not be added to the metric name and,
+// on top of that, the unit will not be passed onto the output, even if it
+// were declared in the *dto.MetricFamily struct, i.e. even if in.Unit !=nil.
+func WithUnit() EncoderOption {
+ return func(t *encoderOption) {
+ t.withUnit = true
+ }
+}
+
+// MetricFamilyToOpenMetrics converts a MetricFamily proto message into the
+// OpenMetrics text format and writes the resulting lines to 'out'. It returns
+// the number of bytes written and any error encountered. The output will have
+// the same order as the input, no further sorting is performed. Furthermore,
+// this function assumes the input is already sanitized and does not perform any
+// sanity checks. If the input contains duplicate metrics or invalid metric or
+// label names, the conversion will result in invalid text format output.
+//
+// If metric names conform to the legacy validation pattern, they will be placed
+// outside the brackets in the traditional way, like `foo{}`. If the metric name
+// fails the legacy validation check, it will be placed quoted inside the
+// brackets: `{"foo"}`. As stated above, the input is assumed to be santized and
+// no error will be thrown in this case.
+//
+// Similar to metric names, if label names conform to the legacy validation
+// pattern, they will be unquoted as normal, like `foo{bar="baz"}`. If the label
+// name fails the legacy validation check, it will be quoted:
+// `foo{"bar"="baz"}`. As stated above, the input is assumed to be santized and
+// no error will be thrown in this case.
+//
+// This function fulfills the type 'expfmt.encoder'.
+//
+// Note that OpenMetrics requires a final `# EOF` line. Since this function acts
+// on individual metric families, it is the responsibility of the caller to
+// append this line to 'out' once all metric families have been written.
+// Conveniently, this can be done by calling FinalizeOpenMetrics.
+//
+// The output should be fully OpenMetrics compliant. However, there are a few
+// missing features and peculiarities to avoid complications when switching from
+// Prometheus to OpenMetrics or vice versa:
+//
+// - Counters are expected to have the `_total` suffix in their metric name. In
+// the output, the suffix will be truncated from the `# TYPE`, `# HELP` and `# UNIT`
+// lines. A counter with a missing `_total` suffix is not an error. However,
+// its type will be set to `unknown` in that case to avoid invalid OpenMetrics
+// output.
+//
+// - According to the OM specs, the `# UNIT` line is optional, but if populated,
+// the unit has to be present in the metric name as its suffix:
+// (see https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#unit).
+// However, in order to accommodate any potential scenario where such a change in the
+// metric name is not desirable, the users are here given the choice of either explicitly
+// opt in, in case they wish for the unit to be included in the output AND in the metric name
+// as a suffix (see the description of the WithUnit function above),
+// or not to opt in, in case they don't want for any of that to happen.
+//
+// - No support for the following (optional) features: info type,
+// stateset type, gaugehistogram type.
+//
+// - The size of exemplar labels is not checked (i.e. it's possible to create
+// exemplars that are larger than allowed by the OpenMetrics specification).
+//
+// - The value of Counters is not checked. (OpenMetrics doesn't allow counters
+// with a `NaN` value.)
+func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily, options ...EncoderOption) (written int, err error) {
+ toOM := encoderOption{}
+ for _, option := range options {
+ option(&toOM)
+ }
+
+ name := in.GetName()
+ if name == "" {
+ return 0, fmt.Errorf("MetricFamily has no name: %s", in)
+ }
+
+ // Try the interface upgrade. If it doesn't work, we'll use a
+ // bufio.Writer from the sync.Pool.
+ w, ok := out.(enhancedWriter)
+ if !ok {
+ b := bufPool.Get().(*bufio.Writer)
+ b.Reset(out)
+ w = b
+ defer func() {
+ bErr := b.Flush()
+ if err == nil {
+ err = bErr
+ }
+ bufPool.Put(b)
+ }()
+ }
+
+ var (
+ n int
+ metricType = in.GetType()
+ compliantName = name
+ )
+ if metricType == dto.MetricType_COUNTER && strings.HasSuffix(compliantName, "_total") {
+ compliantName = name[:len(name)-6]
+ }
+ if toOM.withUnit && in.Unit != nil && !strings.HasSuffix(compliantName, fmt.Sprintf("_%s", *in.Unit)) {
+ compliantName = compliantName + fmt.Sprintf("_%s", *in.Unit)
+ }
+
+ // Comments, first HELP, then TYPE.
+ if in.Help != nil {
+ n, err = w.WriteString("# HELP ")
+ written += n
+ if err != nil {
+ return
+ }
+ n, err = writeName(w, compliantName)
+ written += n
+ if err != nil {
+ return
+ }
+ err = w.WriteByte(' ')
+ written++
+ if err != nil {
+ return
+ }
+ n, err = writeEscapedString(w, *in.Help, true)
+ written += n
+ if err != nil {
+ return
+ }
+ err = w.WriteByte('\n')
+ written++
+ if err != nil {
+ return
+ }
+ }
+ n, err = w.WriteString("# TYPE ")
+ written += n
+ if err != nil {
+ return
+ }
+ n, err = writeName(w, compliantName)
+ written += n
+ if err != nil {
+ return
+ }
+ switch metricType {
+ case dto.MetricType_COUNTER:
+ if strings.HasSuffix(name, "_total") {
+ n, err = w.WriteString(" counter\n")
+ } else {
+ n, err = w.WriteString(" unknown\n")
+ }
+ case dto.MetricType_GAUGE:
+ n, err = w.WriteString(" gauge\n")
+ case dto.MetricType_SUMMARY:
+ n, err = w.WriteString(" summary\n")
+ case dto.MetricType_UNTYPED:
+ n, err = w.WriteString(" unknown\n")
+ case dto.MetricType_HISTOGRAM:
+ n, err = w.WriteString(" histogram\n")
+ default:
+ return written, fmt.Errorf("unknown metric type %s", metricType.String())
+ }
+ written += n
+ if err != nil {
+ return
+ }
+ if toOM.withUnit && in.Unit != nil {
+ n, err = w.WriteString("# UNIT ")
+ written += n
+ if err != nil {
+ return
+ }
+ n, err = writeName(w, compliantName)
+ written += n
+ if err != nil {
+ return
+ }
+
+ err = w.WriteByte(' ')
+ written++
+ if err != nil {
+ return
+ }
+ n, err = writeEscapedString(w, *in.Unit, true)
+ written += n
+ if err != nil {
+ return
+ }
+ err = w.WriteByte('\n')
+ written++
+ if err != nil {
+ return
+ }
+ }
+
+ var createdTsBytesWritten int
+
+ // Finally the samples, one line for each.
+ if metricType == dto.MetricType_COUNTER && strings.HasSuffix(name, "_total") {
+ compliantName = compliantName + "_total"
+ }
+ for _, metric := range in.Metric {
+ switch metricType {
+ case dto.MetricType_COUNTER:
+ if metric.Counter == nil {
+ return written, fmt.Errorf(
+ "expected counter in metric %s %s", compliantName, metric,
+ )
+ }
+ n, err = writeOpenMetricsSample(
+ w, compliantName, "", metric, "", 0,
+ metric.Counter.GetValue(), 0, false,
+ metric.Counter.Exemplar,
+ )
+ if toOM.withCreatedLines && metric.Counter.CreatedTimestamp != nil {
+ createdTsBytesWritten, err = writeOpenMetricsCreated(w, compliantName, "_total", metric, "", 0, metric.Counter.GetCreatedTimestamp())
+ n += createdTsBytesWritten
+ }
+ case dto.MetricType_GAUGE:
+ if metric.Gauge == nil {
+ return written, fmt.Errorf(
+ "expected gauge in metric %s %s", compliantName, metric,
+ )
+ }
+ n, err = writeOpenMetricsSample(
+ w, compliantName, "", metric, "", 0,
+ metric.Gauge.GetValue(), 0, false,
+ nil,
+ )
+ case dto.MetricType_UNTYPED:
+ if metric.Untyped == nil {
+ return written, fmt.Errorf(
+ "expected untyped in metric %s %s", compliantName, metric,
+ )
+ }
+ n, err = writeOpenMetricsSample(
+ w, compliantName, "", metric, "", 0,
+ metric.Untyped.GetValue(), 0, false,
+ nil,
+ )
+ case dto.MetricType_SUMMARY:
+ if metric.Summary == nil {
+ return written, fmt.Errorf(
+ "expected summary in metric %s %s", compliantName, metric,
+ )
+ }
+ for _, q := range metric.Summary.Quantile {
+ n, err = writeOpenMetricsSample(
+ w, compliantName, "", metric,
+ model.QuantileLabel, q.GetQuantile(),
+ q.GetValue(), 0, false,
+ nil,
+ )
+ written += n
+ if err != nil {
+ return
+ }
+ }
+ n, err = writeOpenMetricsSample(
+ w, compliantName, "_sum", metric, "", 0,
+ metric.Summary.GetSampleSum(), 0, false,
+ nil,
+ )
+ written += n
+ if err != nil {
+ return
+ }
+ n, err = writeOpenMetricsSample(
+ w, compliantName, "_count", metric, "", 0,
+ 0, metric.Summary.GetSampleCount(), true,
+ nil,
+ )
+ if toOM.withCreatedLines && metric.Summary.CreatedTimestamp != nil {
+ createdTsBytesWritten, err = writeOpenMetricsCreated(w, compliantName, "", metric, "", 0, metric.Summary.GetCreatedTimestamp())
+ n += createdTsBytesWritten
+ }
+ case dto.MetricType_HISTOGRAM:
+ if metric.Histogram == nil {
+ return written, fmt.Errorf(
+ "expected histogram in metric %s %s", compliantName, metric,
+ )
+ }
+ infSeen := false
+ for _, b := range metric.Histogram.Bucket {
+ n, err = writeOpenMetricsSample(
+ w, compliantName, "_bucket", metric,
+ model.BucketLabel, b.GetUpperBound(),
+ 0, b.GetCumulativeCount(), true,
+ b.Exemplar,
+ )
+ written += n
+ if err != nil {
+ return
+ }
+ if math.IsInf(b.GetUpperBound(), +1) {
+ infSeen = true
+ }
+ }
+ if !infSeen {
+ n, err = writeOpenMetricsSample(
+ w, compliantName, "_bucket", metric,
+ model.BucketLabel, math.Inf(+1),
+ 0, metric.Histogram.GetSampleCount(), true,
+ nil,
+ )
+ written += n
+ if err != nil {
+ return
+ }
+ }
+ n, err = writeOpenMetricsSample(
+ w, compliantName, "_sum", metric, "", 0,
+ metric.Histogram.GetSampleSum(), 0, false,
+ nil,
+ )
+ written += n
+ if err != nil {
+ return
+ }
+ n, err = writeOpenMetricsSample(
+ w, compliantName, "_count", metric, "", 0,
+ 0, metric.Histogram.GetSampleCount(), true,
+ nil,
+ )
+ if toOM.withCreatedLines && metric.Histogram.CreatedTimestamp != nil {
+ createdTsBytesWritten, err = writeOpenMetricsCreated(w, compliantName, "", metric, "", 0, metric.Histogram.GetCreatedTimestamp())
+ n += createdTsBytesWritten
+ }
+ default:
+ return written, fmt.Errorf(
+ "unexpected type in metric %s %s", compliantName, metric,
+ )
+ }
+ written += n
+ if err != nil {
+ return
+ }
+ }
+ return
+}
+
+// FinalizeOpenMetrics writes the final `# EOF\n` line required by OpenMetrics.
+func FinalizeOpenMetrics(w io.Writer) (written int, err error) {
+ return w.Write([]byte("# EOF\n"))
+}
+
+// writeOpenMetricsSample writes a single sample in OpenMetrics text format to
+// w, given the metric name, the metric proto message itself, optionally an
+// additional label name with a float64 value (use empty string as label name if
+// not required), the value (optionally as float64 or uint64, determined by
+// useIntValue), and optionally an exemplar (use nil if not required). The
+// function returns the number of bytes written and any error encountered.
+func writeOpenMetricsSample(
+ w enhancedWriter,
+ name, suffix string,
+ metric *dto.Metric,
+ additionalLabelName string, additionalLabelValue float64,
+ floatValue float64, intValue uint64, useIntValue bool,
+ exemplar *dto.Exemplar,
+) (int, error) {
+ written := 0
+ n, err := writeOpenMetricsNameAndLabelPairs(
+ w, name+suffix, metric.Label, additionalLabelName, additionalLabelValue,
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+ err = w.WriteByte(' ')
+ written++
+ if err != nil {
+ return written, err
+ }
+ if useIntValue {
+ n, err = writeUint(w, intValue)
+ } else {
+ n, err = writeOpenMetricsFloat(w, floatValue)
+ }
+ written += n
+ if err != nil {
+ return written, err
+ }
+ if metric.TimestampMs != nil {
+ err = w.WriteByte(' ')
+ written++
+ if err != nil {
+ return written, err
+ }
+ // TODO(beorn7): Format this directly without converting to a float first.
+ n, err = writeOpenMetricsFloat(w, float64(*metric.TimestampMs)/1000)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ }
+ if exemplar != nil && len(exemplar.Label) > 0 {
+ n, err = writeExemplar(w, exemplar)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ }
+ err = w.WriteByte('\n')
+ written++
+ if err != nil {
+ return written, err
+ }
+ return written, nil
+}
+
+// writeOpenMetricsNameAndLabelPairs works like writeOpenMetricsSample but
+// formats the float in OpenMetrics style.
+func writeOpenMetricsNameAndLabelPairs(
+ w enhancedWriter,
+ name string,
+ in []*dto.LabelPair,
+ additionalLabelName string, additionalLabelValue float64,
+) (int, error) {
+ var (
+ written int
+ separator byte = '{'
+ metricInsideBraces = false
+ )
+
+ if name != "" {
+ // If the name does not pass the legacy validity check, we must put the
+ // metric name inside the braces, quoted.
+ if !model.IsValidLegacyMetricName(model.LabelValue(name)) {
+ metricInsideBraces = true
+ err := w.WriteByte(separator)
+ written++
+ if err != nil {
+ return written, err
+ }
+ separator = ','
+ }
+
+ n, err := writeName(w, name)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ }
+
+ if len(in) == 0 && additionalLabelName == "" {
+ if metricInsideBraces {
+ err := w.WriteByte('}')
+ written++
+ if err != nil {
+ return written, err
+ }
+ }
+ return written, nil
+ }
+
+ for _, lp := range in {
+ err := w.WriteByte(separator)
+ written++
+ if err != nil {
+ return written, err
+ }
+ n, err := writeName(w, lp.GetName())
+ written += n
+ if err != nil {
+ return written, err
+ }
+ n, err = w.WriteString(`="`)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ n, err = writeEscapedString(w, lp.GetValue(), true)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ err = w.WriteByte('"')
+ written++
+ if err != nil {
+ return written, err
+ }
+ separator = ','
+ }
+ if additionalLabelName != "" {
+ err := w.WriteByte(separator)
+ written++
+ if err != nil {
+ return written, err
+ }
+ n, err := w.WriteString(additionalLabelName)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ n, err = w.WriteString(`="`)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ n, err = writeOpenMetricsFloat(w, additionalLabelValue)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ err = w.WriteByte('"')
+ written++
+ if err != nil {
+ return written, err
+ }
+ }
+ err := w.WriteByte('}')
+ written++
+ if err != nil {
+ return written, err
+ }
+ return written, nil
+}
+
+// writeOpenMetricsCreated writes the created timestamp for a single time series
+// following OpenMetrics text format to w, given the metric name, the metric proto
+// message itself, optionally a suffix to be removed, e.g. '_total' for counters,
+// an additional label name with a float64 value (use empty string as label name if
+// not required) and the timestamp that represents the created timestamp.
+// The function returns the number of bytes written and any error encountered.
+func writeOpenMetricsCreated(w enhancedWriter,
+ name, suffixToTrim string, metric *dto.Metric,
+ additionalLabelName string, additionalLabelValue float64,
+ createdTimestamp *timestamppb.Timestamp,
+) (int, error) {
+ written := 0
+ n, err := writeOpenMetricsNameAndLabelPairs(
+ w, strings.TrimSuffix(name, suffixToTrim)+"_created", metric.Label, additionalLabelName, additionalLabelValue,
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+
+ err = w.WriteByte(' ')
+ written++
+ if err != nil {
+ return written, err
+ }
+
+ // TODO(beorn7): Format this directly from components of ts to
+ // avoid overflow/underflow and precision issues of the float
+ // conversion.
+ n, err = writeOpenMetricsFloat(w, float64(createdTimestamp.AsTime().UnixNano())/1e9)
+ written += n
+ if err != nil {
+ return written, err
+ }
+
+ err = w.WriteByte('\n')
+ written++
+ if err != nil {
+ return written, err
+ }
+ return written, nil
+}
+
+// writeExemplar writes the provided exemplar in OpenMetrics format to w. The
+// function returns the number of bytes written and any error encountered.
+func writeExemplar(w enhancedWriter, e *dto.Exemplar) (int, error) {
+ written := 0
+ n, err := w.WriteString(" # ")
+ written += n
+ if err != nil {
+ return written, err
+ }
+ n, err = writeOpenMetricsNameAndLabelPairs(w, "", e.Label, "", 0)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ err = w.WriteByte(' ')
+ written++
+ if err != nil {
+ return written, err
+ }
+ n, err = writeOpenMetricsFloat(w, e.GetValue())
+ written += n
+ if err != nil {
+ return written, err
+ }
+ if e.Timestamp != nil {
+ err = w.WriteByte(' ')
+ written++
+ if err != nil {
+ return written, err
+ }
+ err = (*e).Timestamp.CheckValid()
+ if err != nil {
+ return written, err
+ }
+ ts := (*e).Timestamp.AsTime()
+ // TODO(beorn7): Format this directly from components of ts to
+ // avoid overflow/underflow and precision issues of the float
+ // conversion.
+ n, err = writeOpenMetricsFloat(w, float64(ts.UnixNano())/1e9)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ }
+ return written, nil
+}
+
+// writeOpenMetricsFloat works like writeFloat but appends ".0" if the resulting
+// number would otherwise contain neither a "." nor an "e".
+func writeOpenMetricsFloat(w enhancedWriter, f float64) (int, error) {
+ switch {
+ case f == 1:
+ return w.WriteString("1.0")
+ case f == 0:
+ return w.WriteString("0.0")
+ case f == -1:
+ return w.WriteString("-1.0")
+ case math.IsNaN(f):
+ return w.WriteString("NaN")
+ case math.IsInf(f, +1):
+ return w.WriteString("+Inf")
+ case math.IsInf(f, -1):
+ return w.WriteString("-Inf")
+ default:
+ bp := numBufPool.Get().(*[]byte)
+ *bp = strconv.AppendFloat((*bp)[:0], f, 'g', -1, 64)
+ if !bytes.ContainsAny(*bp, "e.") {
+ *bp = append(*bp, '.', '0')
+ }
+ written, err := w.Write(*bp)
+ numBufPool.Put(bp)
+ return written, err
+ }
+}
+
+// writeUint is like writeInt just for uint64.
+func writeUint(w enhancedWriter, u uint64) (int, error) {
+ bp := numBufPool.Get().(*[]byte)
+ *bp = strconv.AppendUint((*bp)[:0], u, 10)
+ written, err := w.Write(*bp)
+ numBufPool.Put(bp)
+ return written, err
+}
diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go
new file mode 100644
index 0000000..f9b8265
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/text_create.go
@@ -0,0 +1,520 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "math"
+ "strconv"
+ "strings"
+ "sync"
+
+ "github.com/prometheus/common/model"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+// enhancedWriter has all the enhanced write functions needed here. bufio.Writer
+// implements it.
+type enhancedWriter interface {
+ io.Writer
+ WriteRune(r rune) (n int, err error)
+ WriteString(s string) (n int, err error)
+ WriteByte(c byte) error
+}
+
+const (
+ initialNumBufSize = 24
+)
+
+var (
+ bufPool = sync.Pool{
+ New: func() interface{} {
+ return bufio.NewWriter(io.Discard)
+ },
+ }
+ numBufPool = sync.Pool{
+ New: func() interface{} {
+ b := make([]byte, 0, initialNumBufSize)
+ return &b
+ },
+ }
+)
+
+// MetricFamilyToText converts a MetricFamily proto message into text format and
+// writes the resulting lines to 'out'. It returns the number of bytes written
+// and any error encountered. The output will have the same order as the input,
+// no further sorting is performed. Furthermore, this function assumes the input
+// is already sanitized and does not perform any sanity checks. If the input
+// contains duplicate metrics or invalid metric or label names, the conversion
+// will result in invalid text format output.
+//
+// If metric names conform to the legacy validation pattern, they will be placed
+// outside the brackets in the traditional way, like `foo{}`. If the metric name
+// fails the legacy validation check, it will be placed quoted inside the
+// brackets: `{"foo"}`. As stated above, the input is assumed to be santized and
+// no error will be thrown in this case.
+//
+// Similar to metric names, if label names conform to the legacy validation
+// pattern, they will be unquoted as normal, like `foo{bar="baz"}`. If the label
+// name fails the legacy validation check, it will be quoted:
+// `foo{"bar"="baz"}`. As stated above, the input is assumed to be santized and
+// no error will be thrown in this case.
+//
+// This method fulfills the type 'prometheus.encoder'.
+func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err error) {
+ // Fail-fast checks.
+ if len(in.Metric) == 0 {
+ return 0, fmt.Errorf("MetricFamily has no metrics: %s", in)
+ }
+ name := in.GetName()
+ if name == "" {
+ return 0, fmt.Errorf("MetricFamily has no name: %s", in)
+ }
+
+ // Try the interface upgrade. If it doesn't work, we'll use a
+ // bufio.Writer from the sync.Pool.
+ w, ok := out.(enhancedWriter)
+ if !ok {
+ b := bufPool.Get().(*bufio.Writer)
+ b.Reset(out)
+ w = b
+ defer func() {
+ bErr := b.Flush()
+ if err == nil {
+ err = bErr
+ }
+ bufPool.Put(b)
+ }()
+ }
+
+ var n int
+
+ // Comments, first HELP, then TYPE.
+ if in.Help != nil {
+ n, err = w.WriteString("# HELP ")
+ written += n
+ if err != nil {
+ return
+ }
+ n, err = writeName(w, name)
+ written += n
+ if err != nil {
+ return
+ }
+ err = w.WriteByte(' ')
+ written++
+ if err != nil {
+ return
+ }
+ n, err = writeEscapedString(w, *in.Help, false)
+ written += n
+ if err != nil {
+ return
+ }
+ err = w.WriteByte('\n')
+ written++
+ if err != nil {
+ return
+ }
+ }
+ n, err = w.WriteString("# TYPE ")
+ written += n
+ if err != nil {
+ return
+ }
+ n, err = writeName(w, name)
+ written += n
+ if err != nil {
+ return
+ }
+ metricType := in.GetType()
+ switch metricType {
+ case dto.MetricType_COUNTER:
+ n, err = w.WriteString(" counter\n")
+ case dto.MetricType_GAUGE:
+ n, err = w.WriteString(" gauge\n")
+ case dto.MetricType_SUMMARY:
+ n, err = w.WriteString(" summary\n")
+ case dto.MetricType_UNTYPED:
+ n, err = w.WriteString(" untyped\n")
+ case dto.MetricType_HISTOGRAM:
+ n, err = w.WriteString(" histogram\n")
+ default:
+ return written, fmt.Errorf("unknown metric type %s", metricType.String())
+ }
+ written += n
+ if err != nil {
+ return
+ }
+
+ // Finally the samples, one line for each.
+ for _, metric := range in.Metric {
+ switch metricType {
+ case dto.MetricType_COUNTER:
+ if metric.Counter == nil {
+ return written, fmt.Errorf(
+ "expected counter in metric %s %s", name, metric,
+ )
+ }
+ n, err = writeSample(
+ w, name, "", metric, "", 0,
+ metric.Counter.GetValue(),
+ )
+ case dto.MetricType_GAUGE:
+ if metric.Gauge == nil {
+ return written, fmt.Errorf(
+ "expected gauge in metric %s %s", name, metric,
+ )
+ }
+ n, err = writeSample(
+ w, name, "", metric, "", 0,
+ metric.Gauge.GetValue(),
+ )
+ case dto.MetricType_UNTYPED:
+ if metric.Untyped == nil {
+ return written, fmt.Errorf(
+ "expected untyped in metric %s %s", name, metric,
+ )
+ }
+ n, err = writeSample(
+ w, name, "", metric, "", 0,
+ metric.Untyped.GetValue(),
+ )
+ case dto.MetricType_SUMMARY:
+ if metric.Summary == nil {
+ return written, fmt.Errorf(
+ "expected summary in metric %s %s", name, metric,
+ )
+ }
+ for _, q := range metric.Summary.Quantile {
+ n, err = writeSample(
+ w, name, "", metric,
+ model.QuantileLabel, q.GetQuantile(),
+ q.GetValue(),
+ )
+ written += n
+ if err != nil {
+ return
+ }
+ }
+ n, err = writeSample(
+ w, name, "_sum", metric, "", 0,
+ metric.Summary.GetSampleSum(),
+ )
+ written += n
+ if err != nil {
+ return
+ }
+ n, err = writeSample(
+ w, name, "_count", metric, "", 0,
+ float64(metric.Summary.GetSampleCount()),
+ )
+ case dto.MetricType_HISTOGRAM:
+ if metric.Histogram == nil {
+ return written, fmt.Errorf(
+ "expected histogram in metric %s %s", name, metric,
+ )
+ }
+ infSeen := false
+ for _, b := range metric.Histogram.Bucket {
+ n, err = writeSample(
+ w, name, "_bucket", metric,
+ model.BucketLabel, b.GetUpperBound(),
+ float64(b.GetCumulativeCount()),
+ )
+ written += n
+ if err != nil {
+ return
+ }
+ if math.IsInf(b.GetUpperBound(), +1) {
+ infSeen = true
+ }
+ }
+ if !infSeen {
+ n, err = writeSample(
+ w, name, "_bucket", metric,
+ model.BucketLabel, math.Inf(+1),
+ float64(metric.Histogram.GetSampleCount()),
+ )
+ written += n
+ if err != nil {
+ return
+ }
+ }
+ n, err = writeSample(
+ w, name, "_sum", metric, "", 0,
+ metric.Histogram.GetSampleSum(),
+ )
+ written += n
+ if err != nil {
+ return
+ }
+ n, err = writeSample(
+ w, name, "_count", metric, "", 0,
+ float64(metric.Histogram.GetSampleCount()),
+ )
+ default:
+ return written, fmt.Errorf(
+ "unexpected type in metric %s %s", name, metric,
+ )
+ }
+ written += n
+ if err != nil {
+ return
+ }
+ }
+ return
+}
+
+// writeSample writes a single sample in text format to w, given the metric
+// name, the metric proto message itself, optionally an additional label name
+// with a float64 value (use empty string as label name if not required), and
+// the value. The function returns the number of bytes written and any error
+// encountered.
+func writeSample(
+ w enhancedWriter,
+ name, suffix string,
+ metric *dto.Metric,
+ additionalLabelName string, additionalLabelValue float64,
+ value float64,
+) (int, error) {
+ written := 0
+ n, err := writeNameAndLabelPairs(
+ w, name+suffix, metric.Label, additionalLabelName, additionalLabelValue,
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+ err = w.WriteByte(' ')
+ written++
+ if err != nil {
+ return written, err
+ }
+ n, err = writeFloat(w, value)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ if metric.TimestampMs != nil {
+ err = w.WriteByte(' ')
+ written++
+ if err != nil {
+ return written, err
+ }
+ n, err = writeInt(w, *metric.TimestampMs)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ }
+ err = w.WriteByte('\n')
+ written++
+ if err != nil {
+ return written, err
+ }
+ return written, nil
+}
+
+// writeNameAndLabelPairs converts a slice of LabelPair proto messages plus the
+// explicitly given metric name and additional label pair into text formatted as
+// required by the text format and writes it to 'w'. An empty slice in
+// combination with an empty string 'additionalLabelName' results in nothing
+// being written. Otherwise, the label pairs are written, escaped as required by
+// the text format, and enclosed in '{...}'. The function returns the number of
+// bytes written and any error encountered. If the metric name is not
+// legacy-valid, it will be put inside the brackets as well. Legacy-invalid
+// label names will also be quoted.
+func writeNameAndLabelPairs(
+ w enhancedWriter,
+ name string,
+ in []*dto.LabelPair,
+ additionalLabelName string, additionalLabelValue float64,
+) (int, error) {
+ var (
+ written int
+ separator byte = '{'
+ metricInsideBraces = false
+ )
+
+ if name != "" {
+ // If the name does not pass the legacy validity check, we must put the
+ // metric name inside the braces.
+ if !model.IsValidLegacyMetricName(model.LabelValue(name)) {
+ metricInsideBraces = true
+ err := w.WriteByte(separator)
+ written++
+ if err != nil {
+ return written, err
+ }
+ separator = ','
+ }
+ n, err := writeName(w, name)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ }
+
+ if len(in) == 0 && additionalLabelName == "" {
+ if metricInsideBraces {
+ err := w.WriteByte('}')
+ written++
+ if err != nil {
+ return written, err
+ }
+ }
+ return written, nil
+ }
+
+ for _, lp := range in {
+ err := w.WriteByte(separator)
+ written++
+ if err != nil {
+ return written, err
+ }
+ n, err := writeName(w, lp.GetName())
+ written += n
+ if err != nil {
+ return written, err
+ }
+ n, err = w.WriteString(`="`)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ n, err = writeEscapedString(w, lp.GetValue(), true)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ err = w.WriteByte('"')
+ written++
+ if err != nil {
+ return written, err
+ }
+ separator = ','
+ }
+ if additionalLabelName != "" {
+ err := w.WriteByte(separator)
+ written++
+ if err != nil {
+ return written, err
+ }
+ n, err := w.WriteString(additionalLabelName)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ n, err = w.WriteString(`="`)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ n, err = writeFloat(w, additionalLabelValue)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ err = w.WriteByte('"')
+ written++
+ if err != nil {
+ return written, err
+ }
+ }
+ err := w.WriteByte('}')
+ written++
+ if err != nil {
+ return written, err
+ }
+ return written, nil
+}
+
+// writeEscapedString replaces '\' by '\\', new line character by '\n', and - if
+// includeDoubleQuote is true - '"' by '\"'.
+var (
+ escaper = strings.NewReplacer("\\", `\\`, "\n", `\n`)
+ quotedEscaper = strings.NewReplacer("\\", `\\`, "\n", `\n`, "\"", `\"`)
+)
+
+func writeEscapedString(w enhancedWriter, v string, includeDoubleQuote bool) (int, error) {
+ if includeDoubleQuote {
+ return quotedEscaper.WriteString(w, v)
+ }
+ return escaper.WriteString(w, v)
+}
+
+// writeFloat is equivalent to fmt.Fprint with a float64 argument but hardcodes
+// a few common cases for increased efficiency. For non-hardcoded cases, it uses
+// strconv.AppendFloat to avoid allocations, similar to writeInt.
+func writeFloat(w enhancedWriter, f float64) (int, error) {
+ switch {
+ case f == 1:
+ return 1, w.WriteByte('1')
+ case f == 0:
+ return 1, w.WriteByte('0')
+ case f == -1:
+ return w.WriteString("-1")
+ case math.IsNaN(f):
+ return w.WriteString("NaN")
+ case math.IsInf(f, +1):
+ return w.WriteString("+Inf")
+ case math.IsInf(f, -1):
+ return w.WriteString("-Inf")
+ default:
+ bp := numBufPool.Get().(*[]byte)
+ *bp = strconv.AppendFloat((*bp)[:0], f, 'g', -1, 64)
+ written, err := w.Write(*bp)
+ numBufPool.Put(bp)
+ return written, err
+ }
+}
+
+// writeInt is equivalent to fmt.Fprint with an int64 argument but uses
+// strconv.AppendInt with a byte slice taken from a sync.Pool to avoid
+// allocations.
+func writeInt(w enhancedWriter, i int64) (int, error) {
+ bp := numBufPool.Get().(*[]byte)
+ *bp = strconv.AppendInt((*bp)[:0], i, 10)
+ written, err := w.Write(*bp)
+ numBufPool.Put(bp)
+ return written, err
+}
+
+// writeName writes a string as-is if it complies with the legacy naming
+// scheme, or escapes it in double quotes if not.
+func writeName(w enhancedWriter, name string) (int, error) {
+ if model.IsValidLegacyMetricName(model.LabelValue(name)) {
+ return w.WriteString(name)
+ }
+ var written int
+ var err error
+ err = w.WriteByte('"')
+ written++
+ if err != nil {
+ return written, err
+ }
+ var n int
+ n, err = writeEscapedString(w, name, true)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ err = w.WriteByte('"')
+ written++
+ return written, err
+}
diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go
new file mode 100644
index 0000000..2649021
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go
@@ -0,0 +1,781 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "strconv"
+ "strings"
+
+ dto "github.com/prometheus/client_model/go"
+
+ "google.golang.org/protobuf/proto"
+
+ "github.com/prometheus/common/model"
+)
+
+// A stateFn is a function that represents a state in a state machine. By
+// executing it, the state is progressed to the next state. The stateFn returns
+// another stateFn, which represents the new state. The end state is represented
+// by nil.
+type stateFn func() stateFn
+
+// ParseError signals errors while parsing the simple and flat text-based
+// exchange format.
+type ParseError struct {
+ Line int
+ Msg string
+}
+
+// Error implements the error interface.
+func (e ParseError) Error() string {
+ return fmt.Sprintf("text format parsing error in line %d: %s", e.Line, e.Msg)
+}
+
+// TextParser is used to parse the simple and flat text-based exchange format. Its
+// zero value is ready to use.
+type TextParser struct {
+ metricFamiliesByName map[string]*dto.MetricFamily
+ buf *bufio.Reader // Where the parsed input is read through.
+ err error // Most recent error.
+ lineCount int // Tracks the line count for error messages.
+ currentByte byte // The most recent byte read.
+ currentToken bytes.Buffer // Re-used each time a token has to be gathered from multiple bytes.
+ currentMF *dto.MetricFamily
+ currentMetric *dto.Metric
+ currentLabelPair *dto.LabelPair
+
+ // The remaining member variables are only used for summaries/histograms.
+ currentLabels map[string]string // All labels including '__name__' but excluding 'quantile'/'le'
+ // Summary specific.
+ summaries map[uint64]*dto.Metric // Key is created with LabelsToSignature.
+ currentQuantile float64
+ // Histogram specific.
+ histograms map[uint64]*dto.Metric // Key is created with LabelsToSignature.
+ currentBucket float64
+ // These tell us if the currently processed line ends on '_count' or
+ // '_sum' respectively and belong to a summary/histogram, representing the sample
+ // count and sum of that summary/histogram.
+ currentIsSummaryCount, currentIsSummarySum bool
+ currentIsHistogramCount, currentIsHistogramSum bool
+}
+
+// TextToMetricFamilies reads 'in' as the simple and flat text-based exchange
+// format and creates MetricFamily proto messages. It returns the MetricFamily
+// proto messages in a map where the metric names are the keys, along with any
+// error encountered.
+//
+// If the input contains duplicate metrics (i.e. lines with the same metric name
+// and exactly the same label set), the resulting MetricFamily will contain
+// duplicate Metric proto messages. Similar is true for duplicate label
+// names. Checks for duplicates have to be performed separately, if required.
+// Also note that neither the metrics within each MetricFamily are sorted nor
+// the label pairs within each Metric. Sorting is not required for the most
+// frequent use of this method, which is sample ingestion in the Prometheus
+// server. However, for presentation purposes, you might want to sort the
+// metrics, and in some cases, you must sort the labels, e.g. for consumption by
+// the metric family injection hook of the Prometheus registry.
+//
+// Summaries and histograms are rather special beasts. You would probably not
+// use them in the simple text format anyway. This method can deal with
+// summaries and histograms if they are presented in exactly the way the
+// text.Create function creates them.
+//
+// This method must not be called concurrently. If you want to parse different
+// input concurrently, instantiate a separate Parser for each goroutine.
+func (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricFamily, error) {
+ p.reset(in)
+ for nextState := p.startOfLine; nextState != nil; nextState = nextState() {
+ // Magic happens here...
+ }
+ // Get rid of empty metric families.
+ for k, mf := range p.metricFamiliesByName {
+ if len(mf.GetMetric()) == 0 {
+ delete(p.metricFamiliesByName, k)
+ }
+ }
+ // If p.err is io.EOF now, we have run into a premature end of the input
+ // stream. Turn this error into something nicer and more
+ // meaningful. (io.EOF is often used as a signal for the legitimate end
+ // of an input stream.)
+ if p.err != nil && errors.Is(p.err, io.EOF) {
+ p.parseError("unexpected end of input stream")
+ }
+ return p.metricFamiliesByName, p.err
+}
+
+func (p *TextParser) reset(in io.Reader) {
+ p.metricFamiliesByName = map[string]*dto.MetricFamily{}
+ if p.buf == nil {
+ p.buf = bufio.NewReader(in)
+ } else {
+ p.buf.Reset(in)
+ }
+ p.err = nil
+ p.lineCount = 0
+ if p.summaries == nil || len(p.summaries) > 0 {
+ p.summaries = map[uint64]*dto.Metric{}
+ }
+ if p.histograms == nil || len(p.histograms) > 0 {
+ p.histograms = map[uint64]*dto.Metric{}
+ }
+ p.currentQuantile = math.NaN()
+ p.currentBucket = math.NaN()
+}
+
+// startOfLine represents the state where the next byte read from p.buf is the
+// start of a line (or whitespace leading up to it).
+func (p *TextParser) startOfLine() stateFn {
+ p.lineCount++
+ if p.skipBlankTab(); p.err != nil {
+ // This is the only place that we expect to see io.EOF,
+ // which is not an error but the signal that we are done.
+ // Any other error that happens to align with the start of
+ // a line is still an error.
+ if errors.Is(p.err, io.EOF) {
+ p.err = nil
+ }
+ return nil
+ }
+ switch p.currentByte {
+ case '#':
+ return p.startComment
+ case '\n':
+ return p.startOfLine // Empty line, start the next one.
+ }
+ return p.readingMetricName
+}
+
+// startComment represents the state where the next byte read from p.buf is the
+// start of a comment (or whitespace leading up to it).
+func (p *TextParser) startComment() stateFn {
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte == '\n' {
+ return p.startOfLine
+ }
+ if p.readTokenUntilWhitespace(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ // If we have hit the end of line already, there is nothing left
+ // to do. This is not considered a syntax error.
+ if p.currentByte == '\n' {
+ return p.startOfLine
+ }
+ keyword := p.currentToken.String()
+ if keyword != "HELP" && keyword != "TYPE" {
+ // Generic comment, ignore by fast forwarding to end of line.
+ for p.currentByte != '\n' {
+ if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ }
+ return p.startOfLine
+ }
+ // There is something. Next has to be a metric name.
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.readTokenAsMetricName(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte == '\n' {
+ // At the end of the line already.
+ // Again, this is not considered a syntax error.
+ return p.startOfLine
+ }
+ if !isBlankOrTab(p.currentByte) {
+ p.parseError("invalid metric name in comment")
+ return nil
+ }
+ p.setOrCreateCurrentMF()
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte == '\n' {
+ // At the end of the line already.
+ // Again, this is not considered a syntax error.
+ return p.startOfLine
+ }
+ switch keyword {
+ case "HELP":
+ return p.readingHelp
+ case "TYPE":
+ return p.readingType
+ }
+ panic(fmt.Sprintf("code error: unexpected keyword %q", keyword))
+}
+
+// readingMetricName represents the state where the last byte read (now in
+// p.currentByte) is the first byte of a metric name.
+func (p *TextParser) readingMetricName() stateFn {
+ if p.readTokenAsMetricName(); p.err != nil {
+ return nil
+ }
+ if p.currentToken.Len() == 0 {
+ p.parseError("invalid metric name")
+ return nil
+ }
+ p.setOrCreateCurrentMF()
+ // Now is the time to fix the type if it hasn't happened yet.
+ if p.currentMF.Type == nil {
+ p.currentMF.Type = dto.MetricType_UNTYPED.Enum()
+ }
+ p.currentMetric = &dto.Metric{}
+ // Do not append the newly created currentMetric to
+ // currentMF.Metric right now. First wait if this is a summary,
+ // and the metric exists already, which we can only know after
+ // having read all the labels.
+ if p.skipBlankTabIfCurrentBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ return p.readingLabels
+}
+
+// readingLabels represents the state where the last byte read (now in
+// p.currentByte) is either the first byte of the label set (i.e. a '{'), or the
+// first byte of the value (otherwise).
+func (p *TextParser) readingLabels() stateFn {
+ // Summaries/histograms are special. We have to reset the
+ // currentLabels map, currentQuantile and currentBucket before starting to
+ // read labels.
+ if p.currentMF.GetType() == dto.MetricType_SUMMARY || p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
+ p.currentLabels = map[string]string{}
+ p.currentLabels[string(model.MetricNameLabel)] = p.currentMF.GetName()
+ p.currentQuantile = math.NaN()
+ p.currentBucket = math.NaN()
+ }
+ if p.currentByte != '{' {
+ return p.readingValue
+ }
+ return p.startLabelName
+}
+
+// startLabelName represents the state where the next byte read from p.buf is
+// the start of a label name (or whitespace leading up to it).
+func (p *TextParser) startLabelName() stateFn {
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte == '}' {
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ return p.readingValue
+ }
+ if p.readTokenAsLabelName(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentToken.Len() == 0 {
+ p.parseError(fmt.Sprintf("invalid label name for metric %q", p.currentMF.GetName()))
+ return nil
+ }
+ p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())}
+ if p.currentLabelPair.GetName() == string(model.MetricNameLabel) {
+ p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel))
+ return nil
+ }
+ // Special summary/histogram treatment. Don't add 'quantile' and 'le'
+ // labels to 'real' labels.
+ if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) &&
+ !(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) {
+ p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPair)
+ }
+ if p.skipBlankTabIfCurrentBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte != '=' {
+ p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte))
+ return nil
+ }
+ // Check for duplicate label names.
+ labels := make(map[string]struct{})
+ for _, l := range p.currentMetric.Label {
+ lName := l.GetName()
+ if _, exists := labels[lName]; !exists {
+ labels[lName] = struct{}{}
+ } else {
+ p.parseError(fmt.Sprintf("duplicate label names for metric %q", p.currentMF.GetName()))
+ return nil
+ }
+ }
+ return p.startLabelValue
+}
+
+// startLabelValue represents the state where the next byte read from p.buf is
+// the start of a (quoted) label value (or whitespace leading up to it).
+func (p *TextParser) startLabelValue() stateFn {
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte != '"' {
+ p.parseError(fmt.Sprintf("expected '\"' at start of label value, found %q", p.currentByte))
+ return nil
+ }
+ if p.readTokenAsLabelValue(); p.err != nil {
+ return nil
+ }
+ if !model.LabelValue(p.currentToken.String()).IsValid() {
+ p.parseError(fmt.Sprintf("invalid label value %q", p.currentToken.String()))
+ return nil
+ }
+ p.currentLabelPair.Value = proto.String(p.currentToken.String())
+ // Special treatment of summaries:
+ // - Quantile labels are special, will result in dto.Quantile later.
+ // - Other labels have to be added to currentLabels for signature calculation.
+ if p.currentMF.GetType() == dto.MetricType_SUMMARY {
+ if p.currentLabelPair.GetName() == model.QuantileLabel {
+ if p.currentQuantile, p.err = parseFloat(p.currentLabelPair.GetValue()); p.err != nil {
+ // Create a more helpful error message.
+ p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue()))
+ return nil
+ }
+ } else {
+ p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue()
+ }
+ }
+ // Similar special treatment of histograms.
+ if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
+ if p.currentLabelPair.GetName() == model.BucketLabel {
+ if p.currentBucket, p.err = parseFloat(p.currentLabelPair.GetValue()); p.err != nil {
+ // Create a more helpful error message.
+ p.parseError(fmt.Sprintf("expected float as value for 'le' label, got %q", p.currentLabelPair.GetValue()))
+ return nil
+ }
+ } else {
+ p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue()
+ }
+ }
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ switch p.currentByte {
+ case ',':
+ return p.startLabelName
+
+ case '}':
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ return p.readingValue
+ default:
+ p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.GetValue()))
+ return nil
+ }
+}
+
+// readingValue represents the state where the last byte read (now in
+// p.currentByte) is the first byte of the sample value (i.e. a float).
+func (p *TextParser) readingValue() stateFn {
+ // When we are here, we have read all the labels, so for the
+ // special case of a summary/histogram, we can finally find out
+ // if the metric already exists.
+ if p.currentMF.GetType() == dto.MetricType_SUMMARY {
+ signature := model.LabelsToSignature(p.currentLabels)
+ if summary := p.summaries[signature]; summary != nil {
+ p.currentMetric = summary
+ } else {
+ p.summaries[signature] = p.currentMetric
+ p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
+ }
+ } else if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
+ signature := model.LabelsToSignature(p.currentLabels)
+ if histogram := p.histograms[signature]; histogram != nil {
+ p.currentMetric = histogram
+ } else {
+ p.histograms[signature] = p.currentMetric
+ p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
+ }
+ } else {
+ p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
+ }
+ if p.readTokenUntilWhitespace(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ value, err := parseFloat(p.currentToken.String())
+ if err != nil {
+ // Create a more helpful error message.
+ p.parseError(fmt.Sprintf("expected float as value, got %q", p.currentToken.String()))
+ return nil
+ }
+ switch p.currentMF.GetType() {
+ case dto.MetricType_COUNTER:
+ p.currentMetric.Counter = &dto.Counter{Value: proto.Float64(value)}
+ case dto.MetricType_GAUGE:
+ p.currentMetric.Gauge = &dto.Gauge{Value: proto.Float64(value)}
+ case dto.MetricType_UNTYPED:
+ p.currentMetric.Untyped = &dto.Untyped{Value: proto.Float64(value)}
+ case dto.MetricType_SUMMARY:
+ // *sigh*
+ if p.currentMetric.Summary == nil {
+ p.currentMetric.Summary = &dto.Summary{}
+ }
+ switch {
+ case p.currentIsSummaryCount:
+ p.currentMetric.Summary.SampleCount = proto.Uint64(uint64(value))
+ case p.currentIsSummarySum:
+ p.currentMetric.Summary.SampleSum = proto.Float64(value)
+ case !math.IsNaN(p.currentQuantile):
+ p.currentMetric.Summary.Quantile = append(
+ p.currentMetric.Summary.Quantile,
+ &dto.Quantile{
+ Quantile: proto.Float64(p.currentQuantile),
+ Value: proto.Float64(value),
+ },
+ )
+ }
+ case dto.MetricType_HISTOGRAM:
+ // *sigh*
+ if p.currentMetric.Histogram == nil {
+ p.currentMetric.Histogram = &dto.Histogram{}
+ }
+ switch {
+ case p.currentIsHistogramCount:
+ p.currentMetric.Histogram.SampleCount = proto.Uint64(uint64(value))
+ case p.currentIsHistogramSum:
+ p.currentMetric.Histogram.SampleSum = proto.Float64(value)
+ case !math.IsNaN(p.currentBucket):
+ p.currentMetric.Histogram.Bucket = append(
+ p.currentMetric.Histogram.Bucket,
+ &dto.Bucket{
+ UpperBound: proto.Float64(p.currentBucket),
+ CumulativeCount: proto.Uint64(uint64(value)),
+ },
+ )
+ }
+ default:
+ p.err = fmt.Errorf("unexpected type for metric name %q", p.currentMF.GetName())
+ }
+ if p.currentByte == '\n' {
+ return p.startOfLine
+ }
+ return p.startTimestamp
+}
+
+// startTimestamp represents the state where the next byte read from p.buf is
+// the start of the timestamp (or whitespace leading up to it).
+func (p *TextParser) startTimestamp() stateFn {
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.readTokenUntilWhitespace(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ timestamp, err := strconv.ParseInt(p.currentToken.String(), 10, 64)
+ if err != nil {
+ // Create a more helpful error message.
+ p.parseError(fmt.Sprintf("expected integer as timestamp, got %q", p.currentToken.String()))
+ return nil
+ }
+ p.currentMetric.TimestampMs = proto.Int64(timestamp)
+ if p.readTokenUntilNewline(false); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentToken.Len() > 0 {
+ p.parseError(fmt.Sprintf("spurious string after timestamp: %q", p.currentToken.String()))
+ return nil
+ }
+ return p.startOfLine
+}
+
+// readingHelp represents the state where the last byte read (now in
+// p.currentByte) is the first byte of the docstring after 'HELP'.
+func (p *TextParser) readingHelp() stateFn {
+ if p.currentMF.Help != nil {
+ p.parseError(fmt.Sprintf("second HELP line for metric name %q", p.currentMF.GetName()))
+ return nil
+ }
+ // Rest of line is the docstring.
+ if p.readTokenUntilNewline(true); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ p.currentMF.Help = proto.String(p.currentToken.String())
+ return p.startOfLine
+}
+
+// readingType represents the state where the last byte read (now in
+// p.currentByte) is the first byte of the type hint after 'HELP'.
+func (p *TextParser) readingType() stateFn {
+ if p.currentMF.Type != nil {
+ p.parseError(fmt.Sprintf("second TYPE line for metric name %q, or TYPE reported after samples", p.currentMF.GetName()))
+ return nil
+ }
+ // Rest of line is the type.
+ if p.readTokenUntilNewline(false); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ metricType, ok := dto.MetricType_value[strings.ToUpper(p.currentToken.String())]
+ if !ok {
+ p.parseError(fmt.Sprintf("unknown metric type %q", p.currentToken.String()))
+ return nil
+ }
+ p.currentMF.Type = dto.MetricType(metricType).Enum()
+ return p.startOfLine
+}
+
+// parseError sets p.err to a ParseError at the current line with the given
+// message.
+func (p *TextParser) parseError(msg string) {
+ p.err = ParseError{
+ Line: p.lineCount,
+ Msg: msg,
+ }
+}
+
+// skipBlankTab reads (and discards) bytes from p.buf until it encounters a byte
+// that is neither ' ' nor '\t'. That byte is left in p.currentByte.
+func (p *TextParser) skipBlankTab() {
+ for {
+ if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil || !isBlankOrTab(p.currentByte) {
+ return
+ }
+ }
+}
+
+// skipBlankTabIfCurrentBlankTab works exactly as skipBlankTab but doesn't do
+// anything if p.currentByte is neither ' ' nor '\t'.
+func (p *TextParser) skipBlankTabIfCurrentBlankTab() {
+ if isBlankOrTab(p.currentByte) {
+ p.skipBlankTab()
+ }
+}
+
+// readTokenUntilWhitespace copies bytes from p.buf into p.currentToken. The
+// first byte considered is the byte already read (now in p.currentByte). The
+// first whitespace byte encountered is still copied into p.currentByte, but not
+// into p.currentToken.
+func (p *TextParser) readTokenUntilWhitespace() {
+ p.currentToken.Reset()
+ for p.err == nil && !isBlankOrTab(p.currentByte) && p.currentByte != '\n' {
+ p.currentToken.WriteByte(p.currentByte)
+ p.currentByte, p.err = p.buf.ReadByte()
+ }
+}
+
+// readTokenUntilNewline copies bytes from p.buf into p.currentToken. The first
+// byte considered is the byte already read (now in p.currentByte). The first
+// newline byte encountered is still copied into p.currentByte, but not into
+// p.currentToken. If recognizeEscapeSequence is true, two escape sequences are
+// recognized: '\\' translates into '\', and '\n' into a line-feed character.
+// All other escape sequences are invalid and cause an error.
+func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) {
+ p.currentToken.Reset()
+ escaped := false
+ for p.err == nil {
+ if recognizeEscapeSequence && escaped {
+ switch p.currentByte {
+ case '\\':
+ p.currentToken.WriteByte(p.currentByte)
+ case 'n':
+ p.currentToken.WriteByte('\n')
+ default:
+ p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
+ return
+ }
+ escaped = false
+ } else {
+ switch p.currentByte {
+ case '\n':
+ return
+ case '\\':
+ escaped = true
+ default:
+ p.currentToken.WriteByte(p.currentByte)
+ }
+ }
+ p.currentByte, p.err = p.buf.ReadByte()
+ }
+}
+
+// readTokenAsMetricName copies a metric name from p.buf into p.currentToken.
+// The first byte considered is the byte already read (now in p.currentByte).
+// The first byte not part of a metric name is still copied into p.currentByte,
+// but not into p.currentToken.
+func (p *TextParser) readTokenAsMetricName() {
+ p.currentToken.Reset()
+ if !isValidMetricNameStart(p.currentByte) {
+ return
+ }
+ for {
+ p.currentToken.WriteByte(p.currentByte)
+ p.currentByte, p.err = p.buf.ReadByte()
+ if p.err != nil || !isValidMetricNameContinuation(p.currentByte) {
+ return
+ }
+ }
+}
+
+// readTokenAsLabelName copies a label name from p.buf into p.currentToken.
+// The first byte considered is the byte already read (now in p.currentByte).
+// The first byte not part of a label name is still copied into p.currentByte,
+// but not into p.currentToken.
+func (p *TextParser) readTokenAsLabelName() {
+ p.currentToken.Reset()
+ if !isValidLabelNameStart(p.currentByte) {
+ return
+ }
+ for {
+ p.currentToken.WriteByte(p.currentByte)
+ p.currentByte, p.err = p.buf.ReadByte()
+ if p.err != nil || !isValidLabelNameContinuation(p.currentByte) {
+ return
+ }
+ }
+}
+
+// readTokenAsLabelValue copies a label value from p.buf into p.currentToken.
+// In contrast to the other 'readTokenAs...' functions, which start with the
+// last read byte in p.currentByte, this method ignores p.currentByte and starts
+// with reading a new byte from p.buf. The first byte not part of a label value
+// is still copied into p.currentByte, but not into p.currentToken.
+func (p *TextParser) readTokenAsLabelValue() {
+ p.currentToken.Reset()
+ escaped := false
+ for {
+ if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil {
+ return
+ }
+ if escaped {
+ switch p.currentByte {
+ case '"', '\\':
+ p.currentToken.WriteByte(p.currentByte)
+ case 'n':
+ p.currentToken.WriteByte('\n')
+ default:
+ p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
+ return
+ }
+ escaped = false
+ continue
+ }
+ switch p.currentByte {
+ case '"':
+ return
+ case '\n':
+ p.parseError(fmt.Sprintf("label value %q contains unescaped new-line", p.currentToken.String()))
+ return
+ case '\\':
+ escaped = true
+ default:
+ p.currentToken.WriteByte(p.currentByte)
+ }
+ }
+}
+
+func (p *TextParser) setOrCreateCurrentMF() {
+ p.currentIsSummaryCount = false
+ p.currentIsSummarySum = false
+ p.currentIsHistogramCount = false
+ p.currentIsHistogramSum = false
+ name := p.currentToken.String()
+ if p.currentMF = p.metricFamiliesByName[name]; p.currentMF != nil {
+ return
+ }
+ // Try out if this is a _sum or _count for a summary/histogram.
+ summaryName := summaryMetricName(name)
+ if p.currentMF = p.metricFamiliesByName[summaryName]; p.currentMF != nil {
+ if p.currentMF.GetType() == dto.MetricType_SUMMARY {
+ if isCount(name) {
+ p.currentIsSummaryCount = true
+ }
+ if isSum(name) {
+ p.currentIsSummarySum = true
+ }
+ return
+ }
+ }
+ histogramName := histogramMetricName(name)
+ if p.currentMF = p.metricFamiliesByName[histogramName]; p.currentMF != nil {
+ if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
+ if isCount(name) {
+ p.currentIsHistogramCount = true
+ }
+ if isSum(name) {
+ p.currentIsHistogramSum = true
+ }
+ return
+ }
+ }
+ p.currentMF = &dto.MetricFamily{Name: proto.String(name)}
+ p.metricFamiliesByName[name] = p.currentMF
+}
+
+func isValidLabelNameStart(b byte) bool {
+ return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_'
+}
+
+func isValidLabelNameContinuation(b byte) bool {
+ return isValidLabelNameStart(b) || (b >= '0' && b <= '9')
+}
+
+func isValidMetricNameStart(b byte) bool {
+ return isValidLabelNameStart(b) || b == ':'
+}
+
+func isValidMetricNameContinuation(b byte) bool {
+ return isValidLabelNameContinuation(b) || b == ':'
+}
+
+func isBlankOrTab(b byte) bool {
+ return b == ' ' || b == '\t'
+}
+
+func isCount(name string) bool {
+ return len(name) > 6 && name[len(name)-6:] == "_count"
+}
+
+func isSum(name string) bool {
+ return len(name) > 4 && name[len(name)-4:] == "_sum"
+}
+
+func isBucket(name string) bool {
+ return len(name) > 7 && name[len(name)-7:] == "_bucket"
+}
+
+func summaryMetricName(name string) string {
+ switch {
+ case isCount(name):
+ return name[:len(name)-6]
+ case isSum(name):
+ return name[:len(name)-4]
+ default:
+ return name
+ }
+}
+
+func histogramMetricName(name string) string {
+ switch {
+ case isCount(name):
+ return name[:len(name)-6]
+ case isSum(name):
+ return name[:len(name)-4]
+ case isBucket(name):
+ return name[:len(name)-7]
+ default:
+ return name
+ }
+}
+
+func parseFloat(s string) (float64, error) {
+ if strings.ContainsAny(s, "pP_") {
+ return 0, fmt.Errorf("unsupported character in float")
+ }
+ return strconv.ParseFloat(s, 64)
+}
diff --git a/vendor/github.com/prometheus/common/model/alert.go b/vendor/github.com/prometheus/common/model/alert.go
new file mode 100644
index 0000000..80d1fe9
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/alert.go
@@ -0,0 +1,161 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "fmt"
+ "time"
+)
+
+type AlertStatus string
+
+const (
+ AlertFiring AlertStatus = "firing"
+ AlertResolved AlertStatus = "resolved"
+)
+
+// Alert is a generic representation of an alert in the Prometheus eco-system.
+type Alert struct {
+ // Label value pairs for purpose of aggregation, matching, and disposition
+ // dispatching. This must minimally include an "alertname" label.
+ Labels LabelSet `json:"labels"`
+
+ // Extra key/value information which does not define alert identity.
+ Annotations LabelSet `json:"annotations"`
+
+ // The known time range for this alert. Both ends are optional.
+ StartsAt time.Time `json:"startsAt,omitempty"`
+ EndsAt time.Time `json:"endsAt,omitempty"`
+ GeneratorURL string `json:"generatorURL"`
+}
+
+// Name returns the name of the alert. It is equivalent to the "alertname" label.
+func (a *Alert) Name() string {
+ return string(a.Labels[AlertNameLabel])
+}
+
+// Fingerprint returns a unique hash for the alert. It is equivalent to
+// the fingerprint of the alert's label set.
+func (a *Alert) Fingerprint() Fingerprint {
+ return a.Labels.Fingerprint()
+}
+
+func (a *Alert) String() string {
+ s := fmt.Sprintf("%s[%s]", a.Name(), a.Fingerprint().String()[:7])
+ if a.Resolved() {
+ return s + "[resolved]"
+ }
+ return s + "[active]"
+}
+
+// Resolved returns true iff the activity interval ended in the past.
+func (a *Alert) Resolved() bool {
+ return a.ResolvedAt(time.Now())
+}
+
+// ResolvedAt returns true off the activity interval ended before
+// the given timestamp.
+func (a *Alert) ResolvedAt(ts time.Time) bool {
+ if a.EndsAt.IsZero() {
+ return false
+ }
+ return !a.EndsAt.After(ts)
+}
+
+// Status returns the status of the alert.
+func (a *Alert) Status() AlertStatus {
+ return a.StatusAt(time.Now())
+}
+
+// StatusAt returns the status of the alert at the given timestamp.
+func (a *Alert) StatusAt(ts time.Time) AlertStatus {
+ if a.ResolvedAt(ts) {
+ return AlertResolved
+ }
+ return AlertFiring
+}
+
+// Validate checks whether the alert data is inconsistent.
+func (a *Alert) Validate() error {
+ if a.StartsAt.IsZero() {
+ return fmt.Errorf("start time missing")
+ }
+ if !a.EndsAt.IsZero() && a.EndsAt.Before(a.StartsAt) {
+ return fmt.Errorf("start time must be before end time")
+ }
+ if err := a.Labels.Validate(); err != nil {
+ return fmt.Errorf("invalid label set: %w", err)
+ }
+ if len(a.Labels) == 0 {
+ return fmt.Errorf("at least one label pair required")
+ }
+ if err := a.Annotations.Validate(); err != nil {
+ return fmt.Errorf("invalid annotations: %w", err)
+ }
+ return nil
+}
+
+// Alert is a list of alerts that can be sorted in chronological order.
+type Alerts []*Alert
+
+func (as Alerts) Len() int { return len(as) }
+func (as Alerts) Swap(i, j int) { as[i], as[j] = as[j], as[i] }
+
+func (as Alerts) Less(i, j int) bool {
+ if as[i].StartsAt.Before(as[j].StartsAt) {
+ return true
+ }
+ if as[i].EndsAt.Before(as[j].EndsAt) {
+ return true
+ }
+ return as[i].Fingerprint() < as[j].Fingerprint()
+}
+
+// HasFiring returns true iff one of the alerts is not resolved.
+func (as Alerts) HasFiring() bool {
+ for _, a := range as {
+ if !a.Resolved() {
+ return true
+ }
+ }
+ return false
+}
+
+// HasFiringAt returns true iff one of the alerts is not resolved
+// at the time ts.
+func (as Alerts) HasFiringAt(ts time.Time) bool {
+ for _, a := range as {
+ if !a.ResolvedAt(ts) {
+ return true
+ }
+ }
+ return false
+}
+
+// Status returns StatusFiring iff at least one of the alerts is firing.
+func (as Alerts) Status() AlertStatus {
+ if as.HasFiring() {
+ return AlertFiring
+ }
+ return AlertResolved
+}
+
+// StatusAt returns StatusFiring iff at least one of the alerts is firing
+// at the time ts.
+func (as Alerts) StatusAt(ts time.Time) AlertStatus {
+ if as.HasFiringAt(ts) {
+ return AlertFiring
+ }
+ return AlertResolved
+}
diff --git a/vendor/github.com/prometheus/common/model/fingerprinting.go b/vendor/github.com/prometheus/common/model/fingerprinting.go
new file mode 100644
index 0000000..fc4de41
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/fingerprinting.go
@@ -0,0 +1,105 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "fmt"
+ "strconv"
+)
+
+// Fingerprint provides a hash-capable representation of a Metric.
+// For our purposes, FNV-1A 64-bit is used.
+type Fingerprint uint64
+
+// FingerprintFromString transforms a string representation into a Fingerprint.
+func FingerprintFromString(s string) (Fingerprint, error) {
+ num, err := strconv.ParseUint(s, 16, 64)
+ return Fingerprint(num), err
+}
+
+// ParseFingerprint parses the input string into a fingerprint.
+func ParseFingerprint(s string) (Fingerprint, error) {
+ num, err := strconv.ParseUint(s, 16, 64)
+ if err != nil {
+ return 0, err
+ }
+ return Fingerprint(num), nil
+}
+
+func (f Fingerprint) String() string {
+ return fmt.Sprintf("%016x", uint64(f))
+}
+
+// Fingerprints represents a collection of Fingerprint subject to a given
+// natural sorting scheme. It implements sort.Interface.
+type Fingerprints []Fingerprint
+
+// Len implements sort.Interface.
+func (f Fingerprints) Len() int {
+ return len(f)
+}
+
+// Less implements sort.Interface.
+func (f Fingerprints) Less(i, j int) bool {
+ return f[i] < f[j]
+}
+
+// Swap implements sort.Interface.
+func (f Fingerprints) Swap(i, j int) {
+ f[i], f[j] = f[j], f[i]
+}
+
+// FingerprintSet is a set of Fingerprints.
+type FingerprintSet map[Fingerprint]struct{}
+
+// Equal returns true if both sets contain the same elements (and not more).
+func (s FingerprintSet) Equal(o FingerprintSet) bool {
+ if len(s) != len(o) {
+ return false
+ }
+
+ for k := range s {
+ if _, ok := o[k]; !ok {
+ return false
+ }
+ }
+
+ return true
+}
+
+// Intersection returns the elements contained in both sets.
+func (s FingerprintSet) Intersection(o FingerprintSet) FingerprintSet {
+ myLength, otherLength := len(s), len(o)
+ if myLength == 0 || otherLength == 0 {
+ return FingerprintSet{}
+ }
+
+ subSet := s
+ superSet := o
+
+ if otherLength < myLength {
+ subSet = o
+ superSet = s
+ }
+
+ out := FingerprintSet{}
+
+ for k := range subSet {
+ if _, ok := superSet[k]; ok {
+ out[k] = struct{}{}
+ }
+ }
+
+ return out
+}
diff --git a/vendor/github.com/prometheus/common/model/fnv.go b/vendor/github.com/prometheus/common/model/fnv.go
new file mode 100644
index 0000000..367afec
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/fnv.go
@@ -0,0 +1,42 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+// Inline and byte-free variant of hash/fnv's fnv64a.
+
+const (
+ offset64 = 14695981039346656037
+ prime64 = 1099511628211
+)
+
+// hashNew initializes a new fnv64a hash value.
+func hashNew() uint64 {
+ return offset64
+}
+
+// hashAdd adds a string to a fnv64a hash value, returning the updated hash.
+func hashAdd(h uint64, s string) uint64 {
+ for i := 0; i < len(s); i++ {
+ h ^= uint64(s[i])
+ h *= prime64
+ }
+ return h
+}
+
+// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash.
+func hashAddByte(h uint64, b byte) uint64 {
+ h ^= uint64(b)
+ h *= prime64
+ return h
+}
diff --git a/vendor/github.com/prometheus/common/model/labels.go b/vendor/github.com/prometheus/common/model/labels.go
new file mode 100644
index 0000000..3317ce2
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/labels.go
@@ -0,0 +1,226 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "encoding/json"
+ "fmt"
+ "regexp"
+ "strings"
+ "unicode/utf8"
+)
+
+const (
+ // AlertNameLabel is the name of the label containing the an alert's name.
+ AlertNameLabel = "alertname"
+
+ // ExportedLabelPrefix is the prefix to prepend to the label names present in
+ // exported metrics if a label of the same name is added by the server.
+ ExportedLabelPrefix = "exported_"
+
+ // MetricNameLabel is the label name indicating the metric name of a
+ // timeseries.
+ MetricNameLabel = "__name__"
+
+ // SchemeLabel is the name of the label that holds the scheme on which to
+ // scrape a target.
+ SchemeLabel = "__scheme__"
+
+ // AddressLabel is the name of the label that holds the address of
+ // a scrape target.
+ AddressLabel = "__address__"
+
+ // MetricsPathLabel is the name of the label that holds the path on which to
+ // scrape a target.
+ MetricsPathLabel = "__metrics_path__"
+
+ // ScrapeIntervalLabel is the name of the label that holds the scrape interval
+ // used to scrape a target.
+ ScrapeIntervalLabel = "__scrape_interval__"
+
+ // ScrapeTimeoutLabel is the name of the label that holds the scrape
+ // timeout used to scrape a target.
+ ScrapeTimeoutLabel = "__scrape_timeout__"
+
+ // ReservedLabelPrefix is a prefix which is not legal in user-supplied
+ // label names.
+ ReservedLabelPrefix = "__"
+
+ // MetaLabelPrefix is a prefix for labels that provide meta information.
+ // Labels with this prefix are used for intermediate label processing and
+ // will not be attached to time series.
+ MetaLabelPrefix = "__meta_"
+
+ // TmpLabelPrefix is a prefix for temporary labels as part of relabelling.
+ // Labels with this prefix are used for intermediate label processing and
+ // will not be attached to time series. This is reserved for use in
+ // Prometheus configuration files by users.
+ TmpLabelPrefix = "__tmp_"
+
+ // ParamLabelPrefix is a prefix for labels that provide URL parameters
+ // used to scrape a target.
+ ParamLabelPrefix = "__param_"
+
+ // JobLabel is the label name indicating the job from which a timeseries
+ // was scraped.
+ JobLabel = "job"
+
+ // InstanceLabel is the label name used for the instance label.
+ InstanceLabel = "instance"
+
+ // BucketLabel is used for the label that defines the upper bound of a
+ // bucket of a histogram ("le" -> "less or equal").
+ BucketLabel = "le"
+
+ // QuantileLabel is used for the label that defines the quantile in a
+ // summary.
+ QuantileLabel = "quantile"
+)
+
+// LabelNameRE is a regular expression matching valid label names. Note that the
+// IsValid method of LabelName performs the same check but faster than a match
+// with this regular expression.
+var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$")
+
+// A LabelName is a key for a LabelSet or Metric. It has a value associated
+// therewith.
+type LabelName string
+
+// IsValid returns true iff name matches the pattern of LabelNameRE for legacy
+// names, and iff it's valid UTF-8 if NameValidationScheme is set to
+// UTF8Validation. For the legacy matching, it does not use LabelNameRE for the
+// check but a much faster hardcoded implementation.
+func (ln LabelName) IsValid() bool {
+ if len(ln) == 0 {
+ return false
+ }
+ switch NameValidationScheme {
+ case LegacyValidation:
+ for i, b := range ln {
+ if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) {
+ return false
+ }
+ }
+ case UTF8Validation:
+ return utf8.ValidString(string(ln))
+ default:
+ panic(fmt.Sprintf("Invalid name validation scheme requested: %d", NameValidationScheme))
+ }
+ return true
+}
+
+// UnmarshalYAML implements the yaml.Unmarshaler interface.
+func (ln *LabelName) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ var s string
+ if err := unmarshal(&s); err != nil {
+ return err
+ }
+ if !LabelName(s).IsValid() {
+ return fmt.Errorf("%q is not a valid label name", s)
+ }
+ *ln = LabelName(s)
+ return nil
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (ln *LabelName) UnmarshalJSON(b []byte) error {
+ var s string
+ if err := json.Unmarshal(b, &s); err != nil {
+ return err
+ }
+ if !LabelName(s).IsValid() {
+ return fmt.Errorf("%q is not a valid label name", s)
+ }
+ *ln = LabelName(s)
+ return nil
+}
+
+// LabelNames is a sortable LabelName slice. In implements sort.Interface.
+type LabelNames []LabelName
+
+func (l LabelNames) Len() int {
+ return len(l)
+}
+
+func (l LabelNames) Less(i, j int) bool {
+ return l[i] < l[j]
+}
+
+func (l LabelNames) Swap(i, j int) {
+ l[i], l[j] = l[j], l[i]
+}
+
+func (l LabelNames) String() string {
+ labelStrings := make([]string, 0, len(l))
+ for _, label := range l {
+ labelStrings = append(labelStrings, string(label))
+ }
+ return strings.Join(labelStrings, ", ")
+}
+
+// A LabelValue is an associated value for a LabelName.
+type LabelValue string
+
+// IsValid returns true iff the string is a valid UTF-8.
+func (lv LabelValue) IsValid() bool {
+ return utf8.ValidString(string(lv))
+}
+
+// LabelValues is a sortable LabelValue slice. It implements sort.Interface.
+type LabelValues []LabelValue
+
+func (l LabelValues) Len() int {
+ return len(l)
+}
+
+func (l LabelValues) Less(i, j int) bool {
+ return string(l[i]) < string(l[j])
+}
+
+func (l LabelValues) Swap(i, j int) {
+ l[i], l[j] = l[j], l[i]
+}
+
+// LabelPair pairs a name with a value.
+type LabelPair struct {
+ Name LabelName
+ Value LabelValue
+}
+
+// LabelPairs is a sortable slice of LabelPair pointers. It implements
+// sort.Interface.
+type LabelPairs []*LabelPair
+
+func (l LabelPairs) Len() int {
+ return len(l)
+}
+
+func (l LabelPairs) Less(i, j int) bool {
+ switch {
+ case l[i].Name > l[j].Name:
+ return false
+ case l[i].Name < l[j].Name:
+ return true
+ case l[i].Value > l[j].Value:
+ return false
+ case l[i].Value < l[j].Value:
+ return true
+ default:
+ return false
+ }
+}
+
+func (l LabelPairs) Swap(i, j int) {
+ l[i], l[j] = l[j], l[i]
+}
diff --git a/vendor/github.com/prometheus/common/model/labelset.go b/vendor/github.com/prometheus/common/model/labelset.go
new file mode 100644
index 0000000..d0ad88d
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/labelset.go
@@ -0,0 +1,158 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "encoding/json"
+ "fmt"
+ "sort"
+)
+
+// A LabelSet is a collection of LabelName and LabelValue pairs. The LabelSet
+// may be fully-qualified down to the point where it may resolve to a single
+// Metric in the data store or not. All operations that occur within the realm
+// of a LabelSet can emit a vector of Metric entities to which the LabelSet may
+// match.
+type LabelSet map[LabelName]LabelValue
+
+// Validate checks whether all names and values in the label set
+// are valid.
+func (ls LabelSet) Validate() error {
+ for ln, lv := range ls {
+ if !ln.IsValid() {
+ return fmt.Errorf("invalid name %q", ln)
+ }
+ if !lv.IsValid() {
+ return fmt.Errorf("invalid value %q", lv)
+ }
+ }
+ return nil
+}
+
+// Equal returns true iff both label sets have exactly the same key/value pairs.
+func (ls LabelSet) Equal(o LabelSet) bool {
+ if len(ls) != len(o) {
+ return false
+ }
+ for ln, lv := range ls {
+ olv, ok := o[ln]
+ if !ok {
+ return false
+ }
+ if olv != lv {
+ return false
+ }
+ }
+ return true
+}
+
+// Before compares the metrics, using the following criteria:
+//
+// If m has fewer labels than o, it is before o. If it has more, it is not.
+//
+// If the number of labels is the same, the superset of all label names is
+// sorted alphanumerically. The first differing label pair found in that order
+// determines the outcome: If the label does not exist at all in m, then m is
+// before o, and vice versa. Otherwise the label value is compared
+// alphanumerically.
+//
+// If m and o are equal, the method returns false.
+func (ls LabelSet) Before(o LabelSet) bool {
+ if len(ls) < len(o) {
+ return true
+ }
+ if len(ls) > len(o) {
+ return false
+ }
+
+ lns := make(LabelNames, 0, len(ls)+len(o))
+ for ln := range ls {
+ lns = append(lns, ln)
+ }
+ for ln := range o {
+ lns = append(lns, ln)
+ }
+ // It's probably not worth it to de-dup lns.
+ sort.Sort(lns)
+ for _, ln := range lns {
+ mlv, ok := ls[ln]
+ if !ok {
+ return true
+ }
+ olv, ok := o[ln]
+ if !ok {
+ return false
+ }
+ if mlv < olv {
+ return true
+ }
+ if mlv > olv {
+ return false
+ }
+ }
+ return false
+}
+
+// Clone returns a copy of the label set.
+func (ls LabelSet) Clone() LabelSet {
+ lsn := make(LabelSet, len(ls))
+ for ln, lv := range ls {
+ lsn[ln] = lv
+ }
+ return lsn
+}
+
+// Merge is a helper function to non-destructively merge two label sets.
+func (l LabelSet) Merge(other LabelSet) LabelSet {
+ result := make(LabelSet, len(l))
+
+ for k, v := range l {
+ result[k] = v
+ }
+
+ for k, v := range other {
+ result[k] = v
+ }
+
+ return result
+}
+
+// Fingerprint returns the LabelSet's fingerprint.
+func (ls LabelSet) Fingerprint() Fingerprint {
+ return labelSetToFingerprint(ls)
+}
+
+// FastFingerprint returns the LabelSet's Fingerprint calculated by a faster hashing
+// algorithm, which is, however, more susceptible to hash collisions.
+func (ls LabelSet) FastFingerprint() Fingerprint {
+ return labelSetToFastFingerprint(ls)
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (l *LabelSet) UnmarshalJSON(b []byte) error {
+ var m map[LabelName]LabelValue
+ if err := json.Unmarshal(b, &m); err != nil {
+ return err
+ }
+ // encoding/json only unmarshals maps of the form map[string]T. It treats
+ // LabelName as a string and does not call its UnmarshalJSON method.
+ // Thus, we have to replicate the behavior here.
+ for ln := range m {
+ if !ln.IsValid() {
+ return fmt.Errorf("%q is not a valid label name", ln)
+ }
+ }
+ *l = LabelSet(m)
+ return nil
+}
diff --git a/vendor/github.com/prometheus/common/model/labelset_string.go b/vendor/github.com/prometheus/common/model/labelset_string.go
new file mode 100644
index 0000000..481c47b
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/labelset_string.go
@@ -0,0 +1,45 @@
+// Copyright 2024 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build go1.21
+
+package model
+
+import (
+ "bytes"
+ "slices"
+ "strconv"
+)
+
+// String will look like `{foo="bar", more="less"}`. Names are sorted alphabetically.
+func (l LabelSet) String() string {
+ var lna [32]string // On stack to avoid memory allocation for sorting names.
+ labelNames := lna[:0]
+ for name := range l {
+ labelNames = append(labelNames, string(name))
+ }
+ slices.Sort(labelNames)
+ var bytea [1024]byte // On stack to avoid memory allocation while building the output.
+ b := bytes.NewBuffer(bytea[:0])
+ b.WriteByte('{')
+ for i, name := range labelNames {
+ if i > 0 {
+ b.WriteString(", ")
+ }
+ b.WriteString(name)
+ b.WriteByte('=')
+ b.Write(strconv.AppendQuote(b.AvailableBuffer(), string(l[LabelName(name)])))
+ }
+ b.WriteByte('}')
+ return b.String()
+}
diff --git a/vendor/github.com/prometheus/common/model/labelset_string_go120.go b/vendor/github.com/prometheus/common/model/labelset_string_go120.go
new file mode 100644
index 0000000..c421268
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/labelset_string_go120.go
@@ -0,0 +1,39 @@
+// Copyright 2024 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !go1.21
+
+package model
+
+import (
+ "fmt"
+ "sort"
+ "strings"
+)
+
+// String was optimized using functions not available for go 1.20
+// or lower. We keep the old implementation for compatibility with client_golang.
+// Once client golang drops support for go 1.20 (scheduled for August 2024), this
+// file can be removed.
+func (l LabelSet) String() string {
+ labelNames := make([]string, 0, len(l))
+ for name := range l {
+ labelNames = append(labelNames, string(name))
+ }
+ sort.Strings(labelNames)
+ lstrs := make([]string, 0, len(l))
+ for _, name := range labelNames {
+ lstrs = append(lstrs, fmt.Sprintf("%s=%q", name, l[LabelName(name)]))
+ }
+ return fmt.Sprintf("{%s}", strings.Join(lstrs, ", "))
+}
diff --git a/vendor/github.com/prometheus/common/model/metadata.go b/vendor/github.com/prometheus/common/model/metadata.go
new file mode 100644
index 0000000..447ab8a
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/metadata.go
@@ -0,0 +1,28 @@
+// Copyright 2023 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+// MetricType represents metric type values.
+type MetricType string
+
+const (
+ MetricTypeCounter = MetricType("counter")
+ MetricTypeGauge = MetricType("gauge")
+ MetricTypeHistogram = MetricType("histogram")
+ MetricTypeGaugeHistogram = MetricType("gaugehistogram")
+ MetricTypeSummary = MetricType("summary")
+ MetricTypeInfo = MetricType("info")
+ MetricTypeStateset = MetricType("stateset")
+ MetricTypeUnknown = MetricType("unknown")
+)
diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go
new file mode 100644
index 0000000..eb865e5
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/metric.go
@@ -0,0 +1,457 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "fmt"
+ "regexp"
+ "sort"
+ "strings"
+ "unicode/utf8"
+
+ dto "github.com/prometheus/client_model/go"
+ "google.golang.org/protobuf/proto"
+)
+
+var (
+ // NameValidationScheme determines the method of name validation to be used by
+ // all calls to IsValidMetricName() and LabelName IsValid(). Setting UTF-8 mode
+ // in isolation from other components that don't support UTF-8 may result in
+ // bugs or other undefined behavior. This value is intended to be set by
+ // UTF-8-aware binaries as part of their startup. To avoid need for locking,
+ // this value should be set once, ideally in an init(), before multiple
+ // goroutines are started.
+ NameValidationScheme = LegacyValidation
+
+ // NameEscapingScheme defines the default way that names will be
+ // escaped when presented to systems that do not support UTF-8 names. If the
+ // Content-Type "escaping" term is specified, that will override this value.
+ NameEscapingScheme = ValueEncodingEscaping
+)
+
+// ValidationScheme is a Go enum for determining how metric and label names will
+// be validated by this library.
+type ValidationScheme int
+
+const (
+ // LegacyValidation is a setting that requirets that metric and label names
+ // conform to the original Prometheus character requirements described by
+ // MetricNameRE and LabelNameRE.
+ LegacyValidation ValidationScheme = iota
+
+ // UTF8Validation only requires that metric and label names be valid UTF-8
+ // strings.
+ UTF8Validation
+)
+
+type EscapingScheme int
+
+const (
+ // NoEscaping indicates that a name will not be escaped. Unescaped names that
+ // do not conform to the legacy validity check will use a new exposition
+ // format syntax that will be officially standardized in future versions.
+ NoEscaping EscapingScheme = iota
+
+ // UnderscoreEscaping replaces all legacy-invalid characters with underscores.
+ UnderscoreEscaping
+
+ // DotsEscaping is similar to UnderscoreEscaping, except that dots are
+ // converted to `_dot_` and pre-existing underscores are converted to `__`.
+ DotsEscaping
+
+ // ValueEncodingEscaping prepends the name with `U__` and replaces all invalid
+ // characters with the unicode value, surrounded by underscores. Single
+ // underscores are replaced with double underscores.
+ ValueEncodingEscaping
+)
+
+const (
+ // EscapingKey is the key in an Accept or Content-Type header that defines how
+ // metric and label names that do not conform to the legacy character
+ // requirements should be escaped when being scraped by a legacy prometheus
+ // system. If a system does not explicitly pass an escaping parameter in the
+ // Accept header, the default NameEscapingScheme will be used.
+ EscapingKey = "escaping"
+
+ // Possible values for Escaping Key:
+ AllowUTF8 = "allow-utf-8" // No escaping required.
+ EscapeUnderscores = "underscores"
+ EscapeDots = "dots"
+ EscapeValues = "values"
+)
+
+// MetricNameRE is a regular expression matching valid metric
+// names. Note that the IsValidMetricName function performs the same
+// check but faster than a match with this regular expression.
+var MetricNameRE = regexp.MustCompile(`^[a-zA-Z_:][a-zA-Z0-9_:]*$`)
+
+// A Metric is similar to a LabelSet, but the key difference is that a Metric is
+// a singleton and refers to one and only one stream of samples.
+type Metric LabelSet
+
+// Equal compares the metrics.
+func (m Metric) Equal(o Metric) bool {
+ return LabelSet(m).Equal(LabelSet(o))
+}
+
+// Before compares the metrics' underlying label sets.
+func (m Metric) Before(o Metric) bool {
+ return LabelSet(m).Before(LabelSet(o))
+}
+
+// Clone returns a copy of the Metric.
+func (m Metric) Clone() Metric {
+ clone := make(Metric, len(m))
+ for k, v := range m {
+ clone[k] = v
+ }
+ return clone
+}
+
+func (m Metric) String() string {
+ metricName, hasName := m[MetricNameLabel]
+ numLabels := len(m) - 1
+ if !hasName {
+ numLabels = len(m)
+ }
+ labelStrings := make([]string, 0, numLabels)
+ for label, value := range m {
+ if label != MetricNameLabel {
+ labelStrings = append(labelStrings, fmt.Sprintf("%s=%q", label, value))
+ }
+ }
+
+ switch numLabels {
+ case 0:
+ if hasName {
+ return string(metricName)
+ }
+ return "{}"
+ default:
+ sort.Strings(labelStrings)
+ return fmt.Sprintf("%s{%s}", metricName, strings.Join(labelStrings, ", "))
+ }
+}
+
+// Fingerprint returns a Metric's Fingerprint.
+func (m Metric) Fingerprint() Fingerprint {
+ return LabelSet(m).Fingerprint()
+}
+
+// FastFingerprint returns a Metric's Fingerprint calculated by a faster hashing
+// algorithm, which is, however, more susceptible to hash collisions.
+func (m Metric) FastFingerprint() Fingerprint {
+ return LabelSet(m).FastFingerprint()
+}
+
+// IsValidMetricName returns true iff name matches the pattern of MetricNameRE
+// for legacy names, and iff it's valid UTF-8 if the UTF8Validation scheme is
+// selected.
+func IsValidMetricName(n LabelValue) bool {
+ switch NameValidationScheme {
+ case LegacyValidation:
+ return IsValidLegacyMetricName(n)
+ case UTF8Validation:
+ if len(n) == 0 {
+ return false
+ }
+ return utf8.ValidString(string(n))
+ default:
+ panic(fmt.Sprintf("Invalid name validation scheme requested: %d", NameValidationScheme))
+ }
+}
+
+// IsValidLegacyMetricName is similar to IsValidMetricName but always uses the
+// legacy validation scheme regardless of the value of NameValidationScheme.
+// This function, however, does not use MetricNameRE for the check but a much
+// faster hardcoded implementation.
+func IsValidLegacyMetricName(n LabelValue) bool {
+ if len(n) == 0 {
+ return false
+ }
+ for i, b := range n {
+ if !isValidLegacyRune(b, i) {
+ return false
+ }
+ }
+ return true
+}
+
+// EscapeMetricFamily escapes the given metric names and labels with the given
+// escaping scheme. Returns a new object that uses the same pointers to fields
+// when possible and creates new escaped versions so as not to mutate the
+// input.
+func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricFamily {
+ if v == nil {
+ return nil
+ }
+
+ if scheme == NoEscaping {
+ return v
+ }
+
+ out := &dto.MetricFamily{
+ Help: v.Help,
+ Type: v.Type,
+ Unit: v.Unit,
+ }
+
+ // If the name is nil, copy as-is, don't try to escape.
+ if v.Name == nil || IsValidLegacyMetricName(LabelValue(v.GetName())) {
+ out.Name = v.Name
+ } else {
+ out.Name = proto.String(EscapeName(v.GetName(), scheme))
+ }
+ for _, m := range v.Metric {
+ if !metricNeedsEscaping(m) {
+ out.Metric = append(out.Metric, m)
+ continue
+ }
+
+ escaped := &dto.Metric{
+ Gauge: m.Gauge,
+ Counter: m.Counter,
+ Summary: m.Summary,
+ Untyped: m.Untyped,
+ Histogram: m.Histogram,
+ TimestampMs: m.TimestampMs,
+ }
+
+ for _, l := range m.Label {
+ if l.GetName() == MetricNameLabel {
+ if l.Value == nil || IsValidLegacyMetricName(LabelValue(l.GetValue())) {
+ escaped.Label = append(escaped.Label, l)
+ continue
+ }
+ escaped.Label = append(escaped.Label, &dto.LabelPair{
+ Name: proto.String(MetricNameLabel),
+ Value: proto.String(EscapeName(l.GetValue(), scheme)),
+ })
+ continue
+ }
+ if l.Name == nil || IsValidLegacyMetricName(LabelValue(l.GetName())) {
+ escaped.Label = append(escaped.Label, l)
+ continue
+ }
+ escaped.Label = append(escaped.Label, &dto.LabelPair{
+ Name: proto.String(EscapeName(l.GetName(), scheme)),
+ Value: l.Value,
+ })
+ }
+ out.Metric = append(out.Metric, escaped)
+ }
+ return out
+}
+
+func metricNeedsEscaping(m *dto.Metric) bool {
+ for _, l := range m.Label {
+ if l.GetName() == MetricNameLabel && !IsValidLegacyMetricName(LabelValue(l.GetValue())) {
+ return true
+ }
+ if !IsValidLegacyMetricName(LabelValue(l.GetName())) {
+ return true
+ }
+ }
+ return false
+}
+
+const (
+ lowerhex = "0123456789abcdef"
+)
+
+// EscapeName escapes the incoming name according to the provided escaping
+// scheme. Depending on the rules of escaping, this may cause no change in the
+// string that is returned. (Especially NoEscaping, which by definition is a
+// noop). This function does not do any validation of the name.
+func EscapeName(name string, scheme EscapingScheme) string {
+ if len(name) == 0 {
+ return name
+ }
+ var escaped strings.Builder
+ switch scheme {
+ case NoEscaping:
+ return name
+ case UnderscoreEscaping:
+ if IsValidLegacyMetricName(LabelValue(name)) {
+ return name
+ }
+ for i, b := range name {
+ if isValidLegacyRune(b, i) {
+ escaped.WriteRune(b)
+ } else {
+ escaped.WriteRune('_')
+ }
+ }
+ return escaped.String()
+ case DotsEscaping:
+ // Do not early return for legacy valid names, we still escape underscores.
+ for i, b := range name {
+ if b == '_' {
+ escaped.WriteString("__")
+ } else if b == '.' {
+ escaped.WriteString("_dot_")
+ } else if isValidLegacyRune(b, i) {
+ escaped.WriteRune(b)
+ } else {
+ escaped.WriteRune('_')
+ }
+ }
+ return escaped.String()
+ case ValueEncodingEscaping:
+ if IsValidLegacyMetricName(LabelValue(name)) {
+ return name
+ }
+ escaped.WriteString("U__")
+ for i, b := range name {
+ if isValidLegacyRune(b, i) {
+ escaped.WriteRune(b)
+ } else if !utf8.ValidRune(b) {
+ escaped.WriteString("_FFFD_")
+ } else if b < 0x100 {
+ escaped.WriteRune('_')
+ for s := 4; s >= 0; s -= 4 {
+ escaped.WriteByte(lowerhex[b>>uint(s)&0xF])
+ }
+ escaped.WriteRune('_')
+ } else if b < 0x10000 {
+ escaped.WriteRune('_')
+ for s := 12; s >= 0; s -= 4 {
+ escaped.WriteByte(lowerhex[b>>uint(s)&0xF])
+ }
+ escaped.WriteRune('_')
+ }
+ }
+ return escaped.String()
+ default:
+ panic(fmt.Sprintf("invalid escaping scheme %d", scheme))
+ }
+}
+
+// lower function taken from strconv.atoi
+func lower(c byte) byte {
+ return c | ('x' - 'X')
+}
+
+// UnescapeName unescapes the incoming name according to the provided escaping
+// scheme if possible. Some schemes are partially or totally non-roundtripable.
+// If any error is enountered, returns the original input.
+func UnescapeName(name string, scheme EscapingScheme) string {
+ if len(name) == 0 {
+ return name
+ }
+ switch scheme {
+ case NoEscaping:
+ return name
+ case UnderscoreEscaping:
+ // It is not possible to unescape from underscore replacement.
+ return name
+ case DotsEscaping:
+ name = strings.ReplaceAll(name, "_dot_", ".")
+ name = strings.ReplaceAll(name, "__", "_")
+ return name
+ case ValueEncodingEscaping:
+ escapedName, found := strings.CutPrefix(name, "U__")
+ if !found {
+ return name
+ }
+
+ var unescaped strings.Builder
+ TOP:
+ for i := 0; i < len(escapedName); i++ {
+ // All non-underscores are treated normally.
+ if escapedName[i] != '_' {
+ unescaped.WriteByte(escapedName[i])
+ continue
+ }
+ i++
+ if i >= len(escapedName) {
+ return name
+ }
+ // A double underscore is a single underscore.
+ if escapedName[i] == '_' {
+ unescaped.WriteByte('_')
+ continue
+ }
+ // We think we are in a UTF-8 code, process it.
+ var utf8Val uint
+ for j := 0; i < len(escapedName); j++ {
+ // This is too many characters for a utf8 value.
+ if j > 4 {
+ return name
+ }
+ // Found a closing underscore, convert to a rune, check validity, and append.
+ if escapedName[i] == '_' {
+ utf8Rune := rune(utf8Val)
+ if !utf8.ValidRune(utf8Rune) {
+ return name
+ }
+ unescaped.WriteRune(utf8Rune)
+ continue TOP
+ }
+ r := lower(escapedName[i])
+ utf8Val *= 16
+ if r >= '0' && r <= '9' {
+ utf8Val += uint(r) - '0'
+ } else if r >= 'a' && r <= 'f' {
+ utf8Val += uint(r) - 'a' + 10
+ } else {
+ return name
+ }
+ i++
+ }
+ // Didn't find closing underscore, invalid.
+ return name
+ }
+ return unescaped.String()
+ default:
+ panic(fmt.Sprintf("invalid escaping scheme %d", scheme))
+ }
+}
+
+func isValidLegacyRune(b rune, i int) bool {
+ return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == ':' || (b >= '0' && b <= '9' && i > 0)
+}
+
+func (e EscapingScheme) String() string {
+ switch e {
+ case NoEscaping:
+ return AllowUTF8
+ case UnderscoreEscaping:
+ return EscapeUnderscores
+ case DotsEscaping:
+ return EscapeDots
+ case ValueEncodingEscaping:
+ return EscapeValues
+ default:
+ panic(fmt.Sprintf("unknown format scheme %d", e))
+ }
+}
+
+func ToEscapingScheme(s string) (EscapingScheme, error) {
+ if s == "" {
+ return NoEscaping, fmt.Errorf("got empty string instead of escaping scheme")
+ }
+ switch s {
+ case AllowUTF8:
+ return NoEscaping, nil
+ case EscapeUnderscores:
+ return UnderscoreEscaping, nil
+ case EscapeDots:
+ return DotsEscaping, nil
+ case EscapeValues:
+ return ValueEncodingEscaping, nil
+ default:
+ return NoEscaping, fmt.Errorf("unknown format scheme " + s)
+ }
+}
diff --git a/vendor/github.com/prometheus/common/model/model.go b/vendor/github.com/prometheus/common/model/model.go
new file mode 100644
index 0000000..a7b9691
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/model.go
@@ -0,0 +1,16 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package model contains common data structures that are shared across
+// Prometheus components and libraries.
+package model
diff --git a/vendor/github.com/prometheus/common/model/signature.go b/vendor/github.com/prometheus/common/model/signature.go
new file mode 100644
index 0000000..dc8a002
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/signature.go
@@ -0,0 +1,142 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "sort"
+)
+
+// SeparatorByte is a byte that cannot occur in valid UTF-8 sequences and is
+// used to separate label names, label values, and other strings from each other
+// when calculating their combined hash value (aka signature aka fingerprint).
+const SeparatorByte byte = 255
+
+// cache the signature of an empty label set.
+var emptyLabelSignature = hashNew()
+
+// LabelsToSignature returns a quasi-unique signature (i.e., fingerprint) for a
+// given label set. (Collisions are possible but unlikely if the number of label
+// sets the function is applied to is small.)
+func LabelsToSignature(labels map[string]string) uint64 {
+ if len(labels) == 0 {
+ return emptyLabelSignature
+ }
+
+ labelNames := make([]string, 0, len(labels))
+ for labelName := range labels {
+ labelNames = append(labelNames, labelName)
+ }
+ sort.Strings(labelNames)
+
+ sum := hashNew()
+ for _, labelName := range labelNames {
+ sum = hashAdd(sum, labelName)
+ sum = hashAddByte(sum, SeparatorByte)
+ sum = hashAdd(sum, labels[labelName])
+ sum = hashAddByte(sum, SeparatorByte)
+ }
+ return sum
+}
+
+// labelSetToFingerprint works exactly as LabelsToSignature but takes a LabelSet as
+// parameter (rather than a label map) and returns a Fingerprint.
+func labelSetToFingerprint(ls LabelSet) Fingerprint {
+ if len(ls) == 0 {
+ return Fingerprint(emptyLabelSignature)
+ }
+
+ labelNames := make(LabelNames, 0, len(ls))
+ for labelName := range ls {
+ labelNames = append(labelNames, labelName)
+ }
+ sort.Sort(labelNames)
+
+ sum := hashNew()
+ for _, labelName := range labelNames {
+ sum = hashAdd(sum, string(labelName))
+ sum = hashAddByte(sum, SeparatorByte)
+ sum = hashAdd(sum, string(ls[labelName]))
+ sum = hashAddByte(sum, SeparatorByte)
+ }
+ return Fingerprint(sum)
+}
+
+// labelSetToFastFingerprint works similar to labelSetToFingerprint but uses a
+// faster and less allocation-heavy hash function, which is more susceptible to
+// create hash collisions. Therefore, collision detection should be applied.
+func labelSetToFastFingerprint(ls LabelSet) Fingerprint {
+ if len(ls) == 0 {
+ return Fingerprint(emptyLabelSignature)
+ }
+
+ var result uint64
+ for labelName, labelValue := range ls {
+ sum := hashNew()
+ sum = hashAdd(sum, string(labelName))
+ sum = hashAddByte(sum, SeparatorByte)
+ sum = hashAdd(sum, string(labelValue))
+ result ^= sum
+ }
+ return Fingerprint(result)
+}
+
+// SignatureForLabels works like LabelsToSignature but takes a Metric as
+// parameter (rather than a label map) and only includes the labels with the
+// specified LabelNames into the signature calculation. The labels passed in
+// will be sorted by this function.
+func SignatureForLabels(m Metric, labels ...LabelName) uint64 {
+ if len(labels) == 0 {
+ return emptyLabelSignature
+ }
+
+ sort.Sort(LabelNames(labels))
+
+ sum := hashNew()
+ for _, label := range labels {
+ sum = hashAdd(sum, string(label))
+ sum = hashAddByte(sum, SeparatorByte)
+ sum = hashAdd(sum, string(m[label]))
+ sum = hashAddByte(sum, SeparatorByte)
+ }
+ return sum
+}
+
+// SignatureWithoutLabels works like LabelsToSignature but takes a Metric as
+// parameter (rather than a label map) and excludes the labels with any of the
+// specified LabelNames from the signature calculation.
+func SignatureWithoutLabels(m Metric, labels map[LabelName]struct{}) uint64 {
+ if len(m) == 0 {
+ return emptyLabelSignature
+ }
+
+ labelNames := make(LabelNames, 0, len(m))
+ for labelName := range m {
+ if _, exclude := labels[labelName]; !exclude {
+ labelNames = append(labelNames, labelName)
+ }
+ }
+ if len(labelNames) == 0 {
+ return emptyLabelSignature
+ }
+ sort.Sort(labelNames)
+
+ sum := hashNew()
+ for _, labelName := range labelNames {
+ sum = hashAdd(sum, string(labelName))
+ sum = hashAddByte(sum, SeparatorByte)
+ sum = hashAdd(sum, string(m[labelName]))
+ sum = hashAddByte(sum, SeparatorByte)
+ }
+ return sum
+}
diff --git a/vendor/github.com/prometheus/common/model/silence.go b/vendor/github.com/prometheus/common/model/silence.go
new file mode 100644
index 0000000..910b0b7
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/silence.go
@@ -0,0 +1,106 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "encoding/json"
+ "fmt"
+ "regexp"
+ "time"
+)
+
+// Matcher describes a matches the value of a given label.
+type Matcher struct {
+ Name LabelName `json:"name"`
+ Value string `json:"value"`
+ IsRegex bool `json:"isRegex"`
+}
+
+func (m *Matcher) UnmarshalJSON(b []byte) error {
+ type plain Matcher
+ if err := json.Unmarshal(b, (*plain)(m)); err != nil {
+ return err
+ }
+
+ if len(m.Name) == 0 {
+ return fmt.Errorf("label name in matcher must not be empty")
+ }
+ if m.IsRegex {
+ if _, err := regexp.Compile(m.Value); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Validate returns true iff all fields of the matcher have valid values.
+func (m *Matcher) Validate() error {
+ if !m.Name.IsValid() {
+ return fmt.Errorf("invalid name %q", m.Name)
+ }
+ if m.IsRegex {
+ if _, err := regexp.Compile(m.Value); err != nil {
+ return fmt.Errorf("invalid regular expression %q", m.Value)
+ }
+ } else if !LabelValue(m.Value).IsValid() || len(m.Value) == 0 {
+ return fmt.Errorf("invalid value %q", m.Value)
+ }
+ return nil
+}
+
+// Silence defines the representation of a silence definition in the Prometheus
+// eco-system.
+type Silence struct {
+ ID uint64 `json:"id,omitempty"`
+
+ Matchers []*Matcher `json:"matchers"`
+
+ StartsAt time.Time `json:"startsAt"`
+ EndsAt time.Time `json:"endsAt"`
+
+ CreatedAt time.Time `json:"createdAt,omitempty"`
+ CreatedBy string `json:"createdBy"`
+ Comment string `json:"comment,omitempty"`
+}
+
+// Validate returns true iff all fields of the silence have valid values.
+func (s *Silence) Validate() error {
+ if len(s.Matchers) == 0 {
+ return fmt.Errorf("at least one matcher required")
+ }
+ for _, m := range s.Matchers {
+ if err := m.Validate(); err != nil {
+ return fmt.Errorf("invalid matcher: %w", err)
+ }
+ }
+ if s.StartsAt.IsZero() {
+ return fmt.Errorf("start time missing")
+ }
+ if s.EndsAt.IsZero() {
+ return fmt.Errorf("end time missing")
+ }
+ if s.EndsAt.Before(s.StartsAt) {
+ return fmt.Errorf("start time must be before end time")
+ }
+ if s.CreatedBy == "" {
+ return fmt.Errorf("creator information missing")
+ }
+ if s.Comment == "" {
+ return fmt.Errorf("comment missing")
+ }
+ if s.CreatedAt.IsZero() {
+ return fmt.Errorf("creation timestamp missing")
+ }
+ return nil
+}
diff --git a/vendor/github.com/prometheus/common/model/time.go b/vendor/github.com/prometheus/common/model/time.go
new file mode 100644
index 0000000..5727452
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/time.go
@@ -0,0 +1,340 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "math"
+ "strconv"
+ "strings"
+ "time"
+)
+
+const (
+ // MinimumTick is the minimum supported time resolution. This has to be
+ // at least time.Second in order for the code below to work.
+ minimumTick = time.Millisecond
+ // second is the Time duration equivalent to one second.
+ second = int64(time.Second / minimumTick)
+ // The number of nanoseconds per minimum tick.
+ nanosPerTick = int64(minimumTick / time.Nanosecond)
+
+ // Earliest is the earliest Time representable. Handy for
+ // initializing a high watermark.
+ Earliest = Time(math.MinInt64)
+ // Latest is the latest Time representable. Handy for initializing
+ // a low watermark.
+ Latest = Time(math.MaxInt64)
+)
+
+// Time is the number of milliseconds since the epoch
+// (1970-01-01 00:00 UTC) excluding leap seconds.
+type Time int64
+
+// Interval describes an interval between two timestamps.
+type Interval struct {
+ Start, End Time
+}
+
+// Now returns the current time as a Time.
+func Now() Time {
+ return TimeFromUnixNano(time.Now().UnixNano())
+}
+
+// TimeFromUnix returns the Time equivalent to the Unix Time t
+// provided in seconds.
+func TimeFromUnix(t int64) Time {
+ return Time(t * second)
+}
+
+// TimeFromUnixNano returns the Time equivalent to the Unix Time
+// t provided in nanoseconds.
+func TimeFromUnixNano(t int64) Time {
+ return Time(t / nanosPerTick)
+}
+
+// Equal reports whether two Times represent the same instant.
+func (t Time) Equal(o Time) bool {
+ return t == o
+}
+
+// Before reports whether the Time t is before o.
+func (t Time) Before(o Time) bool {
+ return t < o
+}
+
+// After reports whether the Time t is after o.
+func (t Time) After(o Time) bool {
+ return t > o
+}
+
+// Add returns the Time t + d.
+func (t Time) Add(d time.Duration) Time {
+ return t + Time(d/minimumTick)
+}
+
+// Sub returns the Duration t - o.
+func (t Time) Sub(o Time) time.Duration {
+ return time.Duration(t-o) * minimumTick
+}
+
+// Time returns the time.Time representation of t.
+func (t Time) Time() time.Time {
+ return time.Unix(int64(t)/second, (int64(t)%second)*nanosPerTick)
+}
+
+// Unix returns t as a Unix time, the number of seconds elapsed
+// since January 1, 1970 UTC.
+func (t Time) Unix() int64 {
+ return int64(t) / second
+}
+
+// UnixNano returns t as a Unix time, the number of nanoseconds elapsed
+// since January 1, 1970 UTC.
+func (t Time) UnixNano() int64 {
+ return int64(t) * nanosPerTick
+}
+
+// The number of digits after the dot.
+var dotPrecision = int(math.Log10(float64(second)))
+
+// String returns a string representation of the Time.
+func (t Time) String() string {
+ return strconv.FormatFloat(float64(t)/float64(second), 'f', -1, 64)
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (t Time) MarshalJSON() ([]byte, error) {
+ return []byte(t.String()), nil
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (t *Time) UnmarshalJSON(b []byte) error {
+ p := strings.Split(string(b), ".")
+ switch len(p) {
+ case 1:
+ v, err := strconv.ParseInt(string(p[0]), 10, 64)
+ if err != nil {
+ return err
+ }
+ *t = Time(v * second)
+
+ case 2:
+ v, err := strconv.ParseInt(string(p[0]), 10, 64)
+ if err != nil {
+ return err
+ }
+ v *= second
+
+ prec := dotPrecision - len(p[1])
+ if prec < 0 {
+ p[1] = p[1][:dotPrecision]
+ } else if prec > 0 {
+ p[1] = p[1] + strings.Repeat("0", prec)
+ }
+
+ va, err := strconv.ParseInt(p[1], 10, 32)
+ if err != nil {
+ return err
+ }
+
+ // If the value was something like -0.1 the negative is lost in the
+ // parsing because of the leading zero, this ensures that we capture it.
+ if len(p[0]) > 0 && p[0][0] == '-' && v+va > 0 {
+ *t = Time(v+va) * -1
+ } else {
+ *t = Time(v + va)
+ }
+
+ default:
+ return fmt.Errorf("invalid time %q", string(b))
+ }
+ return nil
+}
+
+// Duration wraps time.Duration. It is used to parse the custom duration format
+// from YAML.
+// This type should not propagate beyond the scope of input/output processing.
+type Duration time.Duration
+
+// Set implements pflag/flag.Value
+func (d *Duration) Set(s string) error {
+ var err error
+ *d, err = ParseDuration(s)
+ return err
+}
+
+// Type implements pflag.Value
+func (d *Duration) Type() string {
+ return "duration"
+}
+
+func isdigit(c byte) bool { return c >= '0' && c <= '9' }
+
+// Units are required to go in order from biggest to smallest.
+// This guards against confusion from "1m1d" being 1 minute + 1 day, not 1 month + 1 day.
+var unitMap = map[string]struct {
+ pos int
+ mult uint64
+}{
+ "ms": {7, uint64(time.Millisecond)},
+ "s": {6, uint64(time.Second)},
+ "m": {5, uint64(time.Minute)},
+ "h": {4, uint64(time.Hour)},
+ "d": {3, uint64(24 * time.Hour)},
+ "w": {2, uint64(7 * 24 * time.Hour)},
+ "y": {1, uint64(365 * 24 * time.Hour)},
+}
+
+// ParseDuration parses a string into a time.Duration, assuming that a year
+// always has 365d, a week always has 7d, and a day always has 24h.
+func ParseDuration(s string) (Duration, error) {
+ switch s {
+ case "0":
+ // Allow 0 without a unit.
+ return 0, nil
+ case "":
+ return 0, errors.New("empty duration string")
+ }
+
+ orig := s
+ var dur uint64
+ lastUnitPos := 0
+
+ for s != "" {
+ if !isdigit(s[0]) {
+ return 0, fmt.Errorf("not a valid duration string: %q", orig)
+ }
+ // Consume [0-9]*
+ i := 0
+ for ; i < len(s) && isdigit(s[i]); i++ {
+ }
+ v, err := strconv.ParseUint(s[:i], 10, 0)
+ if err != nil {
+ return 0, fmt.Errorf("not a valid duration string: %q", orig)
+ }
+ s = s[i:]
+
+ // Consume unit.
+ for i = 0; i < len(s) && !isdigit(s[i]); i++ {
+ }
+ if i == 0 {
+ return 0, fmt.Errorf("not a valid duration string: %q", orig)
+ }
+ u := s[:i]
+ s = s[i:]
+ unit, ok := unitMap[u]
+ if !ok {
+ return 0, fmt.Errorf("unknown unit %q in duration %q", u, orig)
+ }
+ if unit.pos <= lastUnitPos { // Units must go in order from biggest to smallest.
+ return 0, fmt.Errorf("not a valid duration string: %q", orig)
+ }
+ lastUnitPos = unit.pos
+ // Check if the provided duration overflows time.Duration (> ~ 290years).
+ if v > 1<<63/unit.mult {
+ return 0, errors.New("duration out of range")
+ }
+ dur += v * unit.mult
+ if dur > 1<<63-1 {
+ return 0, errors.New("duration out of range")
+ }
+ }
+ return Duration(dur), nil
+}
+
+func (d Duration) String() string {
+ var (
+ ms = int64(time.Duration(d) / time.Millisecond)
+ r = ""
+ )
+ if ms == 0 {
+ return "0s"
+ }
+
+ f := func(unit string, mult int64, exact bool) {
+ if exact && ms%mult != 0 {
+ return
+ }
+ if v := ms / mult; v > 0 {
+ r += fmt.Sprintf("%d%s", v, unit)
+ ms -= v * mult
+ }
+ }
+
+ // Only format years and weeks if the remainder is zero, as it is often
+ // easier to read 90d than 12w6d.
+ f("y", 1000*60*60*24*365, true)
+ f("w", 1000*60*60*24*7, true)
+
+ f("d", 1000*60*60*24, false)
+ f("h", 1000*60*60, false)
+ f("m", 1000*60, false)
+ f("s", 1000, false)
+ f("ms", 1, false)
+
+ return r
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (d Duration) MarshalJSON() ([]byte, error) {
+ return json.Marshal(d.String())
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (d *Duration) UnmarshalJSON(bytes []byte) error {
+ var s string
+ if err := json.Unmarshal(bytes, &s); err != nil {
+ return err
+ }
+ dur, err := ParseDuration(s)
+ if err != nil {
+ return err
+ }
+ *d = dur
+ return nil
+}
+
+// MarshalText implements the encoding.TextMarshaler interface.
+func (d *Duration) MarshalText() ([]byte, error) {
+ return []byte(d.String()), nil
+}
+
+// UnmarshalText implements the encoding.TextUnmarshaler interface.
+func (d *Duration) UnmarshalText(text []byte) error {
+ var err error
+ *d, err = ParseDuration(string(text))
+ return err
+}
+
+// MarshalYAML implements the yaml.Marshaler interface.
+func (d Duration) MarshalYAML() (interface{}, error) {
+ return d.String(), nil
+}
+
+// UnmarshalYAML implements the yaml.Unmarshaler interface.
+func (d *Duration) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ var s string
+ if err := unmarshal(&s); err != nil {
+ return err
+ }
+ dur, err := ParseDuration(s)
+ if err != nil {
+ return err
+ }
+ *d = dur
+ return nil
+}
diff --git a/vendor/github.com/prometheus/common/model/value.go b/vendor/github.com/prometheus/common/model/value.go
new file mode 100644
index 0000000..8050637
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/value.go
@@ -0,0 +1,364 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "encoding/json"
+ "fmt"
+ "sort"
+ "strconv"
+ "strings"
+)
+
+// ZeroSample is the pseudo zero-value of Sample used to signal a
+// non-existing sample. It is a Sample with timestamp Earliest, value 0.0,
+// and metric nil. Note that the natural zero value of Sample has a timestamp
+// of 0, which is possible to appear in a real Sample and thus not suitable
+// to signal a non-existing Sample.
+var ZeroSample = Sample{Timestamp: Earliest}
+
+// Sample is a sample pair associated with a metric. A single sample must either
+// define Value or Histogram but not both. Histogram == nil implies the Value
+// field is used, otherwise it should be ignored.
+type Sample struct {
+ Metric Metric `json:"metric"`
+ Value SampleValue `json:"value"`
+ Timestamp Time `json:"timestamp"`
+ Histogram *SampleHistogram `json:"histogram"`
+}
+
+// Equal compares first the metrics, then the timestamp, then the value. The
+// semantics of value equality is defined by SampleValue.Equal.
+func (s *Sample) Equal(o *Sample) bool {
+ if s == o {
+ return true
+ }
+
+ if !s.Metric.Equal(o.Metric) {
+ return false
+ }
+ if !s.Timestamp.Equal(o.Timestamp) {
+ return false
+ }
+ if s.Histogram != nil {
+ return s.Histogram.Equal(o.Histogram)
+ }
+ return s.Value.Equal(o.Value)
+}
+
+func (s Sample) String() string {
+ if s.Histogram != nil {
+ return fmt.Sprintf("%s => %s", s.Metric, SampleHistogramPair{
+ Timestamp: s.Timestamp,
+ Histogram: s.Histogram,
+ })
+ }
+ return fmt.Sprintf("%s => %s", s.Metric, SamplePair{
+ Timestamp: s.Timestamp,
+ Value: s.Value,
+ })
+}
+
+// MarshalJSON implements json.Marshaler.
+func (s Sample) MarshalJSON() ([]byte, error) {
+ if s.Histogram != nil {
+ v := struct {
+ Metric Metric `json:"metric"`
+ Histogram SampleHistogramPair `json:"histogram"`
+ }{
+ Metric: s.Metric,
+ Histogram: SampleHistogramPair{
+ Timestamp: s.Timestamp,
+ Histogram: s.Histogram,
+ },
+ }
+ return json.Marshal(&v)
+ }
+ v := struct {
+ Metric Metric `json:"metric"`
+ Value SamplePair `json:"value"`
+ }{
+ Metric: s.Metric,
+ Value: SamplePair{
+ Timestamp: s.Timestamp,
+ Value: s.Value,
+ },
+ }
+ return json.Marshal(&v)
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (s *Sample) UnmarshalJSON(b []byte) error {
+ v := struct {
+ Metric Metric `json:"metric"`
+ Value SamplePair `json:"value"`
+ Histogram SampleHistogramPair `json:"histogram"`
+ }{
+ Metric: s.Metric,
+ Value: SamplePair{
+ Timestamp: s.Timestamp,
+ Value: s.Value,
+ },
+ Histogram: SampleHistogramPair{
+ Timestamp: s.Timestamp,
+ Histogram: s.Histogram,
+ },
+ }
+
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+
+ s.Metric = v.Metric
+ if v.Histogram.Histogram != nil {
+ s.Timestamp = v.Histogram.Timestamp
+ s.Histogram = v.Histogram.Histogram
+ } else {
+ s.Timestamp = v.Value.Timestamp
+ s.Value = v.Value.Value
+ }
+
+ return nil
+}
+
+// Samples is a sortable Sample slice. It implements sort.Interface.
+type Samples []*Sample
+
+func (s Samples) Len() int {
+ return len(s)
+}
+
+// Less compares first the metrics, then the timestamp.
+func (s Samples) Less(i, j int) bool {
+ switch {
+ case s[i].Metric.Before(s[j].Metric):
+ return true
+ case s[j].Metric.Before(s[i].Metric):
+ return false
+ case s[i].Timestamp.Before(s[j].Timestamp):
+ return true
+ default:
+ return false
+ }
+}
+
+func (s Samples) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+// Equal compares two sets of samples and returns true if they are equal.
+func (s Samples) Equal(o Samples) bool {
+ if len(s) != len(o) {
+ return false
+ }
+
+ for i, sample := range s {
+ if !sample.Equal(o[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+// SampleStream is a stream of Values belonging to an attached COWMetric.
+type SampleStream struct {
+ Metric Metric `json:"metric"`
+ Values []SamplePair `json:"values"`
+ Histograms []SampleHistogramPair `json:"histograms"`
+}
+
+func (ss SampleStream) String() string {
+ valuesLength := len(ss.Values)
+ vals := make([]string, valuesLength+len(ss.Histograms))
+ for i, v := range ss.Values {
+ vals[i] = v.String()
+ }
+ for i, v := range ss.Histograms {
+ vals[i+valuesLength] = v.String()
+ }
+ return fmt.Sprintf("%s =>\n%s", ss.Metric, strings.Join(vals, "\n"))
+}
+
+func (ss SampleStream) MarshalJSON() ([]byte, error) {
+ if len(ss.Histograms) > 0 && len(ss.Values) > 0 {
+ v := struct {
+ Metric Metric `json:"metric"`
+ Values []SamplePair `json:"values"`
+ Histograms []SampleHistogramPair `json:"histograms"`
+ }{
+ Metric: ss.Metric,
+ Values: ss.Values,
+ Histograms: ss.Histograms,
+ }
+ return json.Marshal(&v)
+ } else if len(ss.Histograms) > 0 {
+ v := struct {
+ Metric Metric `json:"metric"`
+ Histograms []SampleHistogramPair `json:"histograms"`
+ }{
+ Metric: ss.Metric,
+ Histograms: ss.Histograms,
+ }
+ return json.Marshal(&v)
+ } else {
+ v := struct {
+ Metric Metric `json:"metric"`
+ Values []SamplePair `json:"values"`
+ }{
+ Metric: ss.Metric,
+ Values: ss.Values,
+ }
+ return json.Marshal(&v)
+ }
+}
+
+func (ss *SampleStream) UnmarshalJSON(b []byte) error {
+ v := struct {
+ Metric Metric `json:"metric"`
+ Values []SamplePair `json:"values"`
+ Histograms []SampleHistogramPair `json:"histograms"`
+ }{
+ Metric: ss.Metric,
+ Values: ss.Values,
+ Histograms: ss.Histograms,
+ }
+
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+
+ ss.Metric = v.Metric
+ ss.Values = v.Values
+ ss.Histograms = v.Histograms
+
+ return nil
+}
+
+// Scalar is a scalar value evaluated at the set timestamp.
+type Scalar struct {
+ Value SampleValue `json:"value"`
+ Timestamp Time `json:"timestamp"`
+}
+
+func (s Scalar) String() string {
+ return fmt.Sprintf("scalar: %v @[%v]", s.Value, s.Timestamp)
+}
+
+// MarshalJSON implements json.Marshaler.
+func (s Scalar) MarshalJSON() ([]byte, error) {
+ v := strconv.FormatFloat(float64(s.Value), 'f', -1, 64)
+ return json.Marshal([...]interface{}{s.Timestamp, string(v)})
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (s *Scalar) UnmarshalJSON(b []byte) error {
+ var f string
+ v := [...]interface{}{&s.Timestamp, &f}
+
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+
+ value, err := strconv.ParseFloat(f, 64)
+ if err != nil {
+ return fmt.Errorf("error parsing sample value: %w", err)
+ }
+ s.Value = SampleValue(value)
+ return nil
+}
+
+// String is a string value evaluated at the set timestamp.
+type String struct {
+ Value string `json:"value"`
+ Timestamp Time `json:"timestamp"`
+}
+
+func (s *String) String() string {
+ return s.Value
+}
+
+// MarshalJSON implements json.Marshaler.
+func (s String) MarshalJSON() ([]byte, error) {
+ return json.Marshal([]interface{}{s.Timestamp, s.Value})
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (s *String) UnmarshalJSON(b []byte) error {
+ v := [...]interface{}{&s.Timestamp, &s.Value}
+ return json.Unmarshal(b, &v)
+}
+
+// Vector is basically only an alias for Samples, but the
+// contract is that in a Vector, all Samples have the same timestamp.
+type Vector []*Sample
+
+func (vec Vector) String() string {
+ entries := make([]string, len(vec))
+ for i, s := range vec {
+ entries[i] = s.String()
+ }
+ return strings.Join(entries, "\n")
+}
+
+func (vec Vector) Len() int { return len(vec) }
+func (vec Vector) Swap(i, j int) { vec[i], vec[j] = vec[j], vec[i] }
+
+// Less compares first the metrics, then the timestamp.
+func (vec Vector) Less(i, j int) bool {
+ switch {
+ case vec[i].Metric.Before(vec[j].Metric):
+ return true
+ case vec[j].Metric.Before(vec[i].Metric):
+ return false
+ case vec[i].Timestamp.Before(vec[j].Timestamp):
+ return true
+ default:
+ return false
+ }
+}
+
+// Equal compares two sets of samples and returns true if they are equal.
+func (vec Vector) Equal(o Vector) bool {
+ if len(vec) != len(o) {
+ return false
+ }
+
+ for i, sample := range vec {
+ if !sample.Equal(o[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+// Matrix is a list of time series.
+type Matrix []*SampleStream
+
+func (m Matrix) Len() int { return len(m) }
+func (m Matrix) Less(i, j int) bool { return m[i].Metric.Before(m[j].Metric) }
+func (m Matrix) Swap(i, j int) { m[i], m[j] = m[j], m[i] }
+
+func (mat Matrix) String() string {
+ matCp := make(Matrix, len(mat))
+ copy(matCp, mat)
+ sort.Sort(matCp)
+
+ strs := make([]string, len(matCp))
+
+ for i, ss := range matCp {
+ strs[i] = ss.String()
+ }
+
+ return strings.Join(strs, "\n")
+}
diff --git a/vendor/github.com/prometheus/common/model/value_float.go b/vendor/github.com/prometheus/common/model/value_float.go
new file mode 100644
index 0000000..ae35cc2
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/value_float.go
@@ -0,0 +1,98 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "encoding/json"
+ "fmt"
+ "math"
+ "strconv"
+)
+
+// ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a
+// non-existing sample pair. It is a SamplePair with timestamp Earliest and
+// value 0.0. Note that the natural zero value of SamplePair has a timestamp
+// of 0, which is possible to appear in a real SamplePair and thus not
+// suitable to signal a non-existing SamplePair.
+var ZeroSamplePair = SamplePair{Timestamp: Earliest}
+
+// A SampleValue is a representation of a value for a given sample at a given
+// time.
+type SampleValue float64
+
+// MarshalJSON implements json.Marshaler.
+func (v SampleValue) MarshalJSON() ([]byte, error) {
+ return json.Marshal(v.String())
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (v *SampleValue) UnmarshalJSON(b []byte) error {
+ if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' {
+ return fmt.Errorf("sample value must be a quoted string")
+ }
+ f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64)
+ if err != nil {
+ return err
+ }
+ *v = SampleValue(f)
+ return nil
+}
+
+// Equal returns true if the value of v and o is equal or if both are NaN. Note
+// that v==o is false if both are NaN. If you want the conventional float
+// behavior, use == to compare two SampleValues.
+func (v SampleValue) Equal(o SampleValue) bool {
+ if v == o {
+ return true
+ }
+ return math.IsNaN(float64(v)) && math.IsNaN(float64(o))
+}
+
+func (v SampleValue) String() string {
+ return strconv.FormatFloat(float64(v), 'f', -1, 64)
+}
+
+// SamplePair pairs a SampleValue with a Timestamp.
+type SamplePair struct {
+ Timestamp Time
+ Value SampleValue
+}
+
+func (s SamplePair) MarshalJSON() ([]byte, error) {
+ t, err := json.Marshal(s.Timestamp)
+ if err != nil {
+ return nil, err
+ }
+ v, err := json.Marshal(s.Value)
+ if err != nil {
+ return nil, err
+ }
+ return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (s *SamplePair) UnmarshalJSON(b []byte) error {
+ v := [...]json.Unmarshaler{&s.Timestamp, &s.Value}
+ return json.Unmarshal(b, &v)
+}
+
+// Equal returns true if this SamplePair and o have equal Values and equal
+// Timestamps. The semantics of Value equality is defined by SampleValue.Equal.
+func (s *SamplePair) Equal(o *SamplePair) bool {
+ return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp))
+}
+
+func (s SamplePair) String() string {
+ return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp)
+}
diff --git a/vendor/github.com/prometheus/common/model/value_histogram.go b/vendor/github.com/prometheus/common/model/value_histogram.go
new file mode 100644
index 0000000..54bb038
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/value_histogram.go
@@ -0,0 +1,178 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "encoding/json"
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+type FloatString float64
+
+func (v FloatString) String() string {
+ return strconv.FormatFloat(float64(v), 'f', -1, 64)
+}
+
+func (v FloatString) MarshalJSON() ([]byte, error) {
+ return json.Marshal(v.String())
+}
+
+func (v *FloatString) UnmarshalJSON(b []byte) error {
+ if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' {
+ return fmt.Errorf("float value must be a quoted string")
+ }
+ f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64)
+ if err != nil {
+ return err
+ }
+ *v = FloatString(f)
+ return nil
+}
+
+type HistogramBucket struct {
+ Boundaries int32
+ Lower FloatString
+ Upper FloatString
+ Count FloatString
+}
+
+func (s HistogramBucket) MarshalJSON() ([]byte, error) {
+ b, err := json.Marshal(s.Boundaries)
+ if err != nil {
+ return nil, err
+ }
+ l, err := json.Marshal(s.Lower)
+ if err != nil {
+ return nil, err
+ }
+ u, err := json.Marshal(s.Upper)
+ if err != nil {
+ return nil, err
+ }
+ c, err := json.Marshal(s.Count)
+ if err != nil {
+ return nil, err
+ }
+ return []byte(fmt.Sprintf("[%s,%s,%s,%s]", b, l, u, c)), nil
+}
+
+func (s *HistogramBucket) UnmarshalJSON(buf []byte) error {
+ tmp := []interface{}{&s.Boundaries, &s.Lower, &s.Upper, &s.Count}
+ wantLen := len(tmp)
+ if err := json.Unmarshal(buf, &tmp); err != nil {
+ return err
+ }
+ if gotLen := len(tmp); gotLen != wantLen {
+ return fmt.Errorf("wrong number of fields: %d != %d", gotLen, wantLen)
+ }
+ return nil
+}
+
+func (s *HistogramBucket) Equal(o *HistogramBucket) bool {
+ return s == o || (s.Boundaries == o.Boundaries && s.Lower == o.Lower && s.Upper == o.Upper && s.Count == o.Count)
+}
+
+func (b HistogramBucket) String() string {
+ var sb strings.Builder
+ lowerInclusive := b.Boundaries == 1 || b.Boundaries == 3
+ upperInclusive := b.Boundaries == 0 || b.Boundaries == 3
+ if lowerInclusive {
+ sb.WriteRune('[')
+ } else {
+ sb.WriteRune('(')
+ }
+ fmt.Fprintf(&sb, "%g,%g", b.Lower, b.Upper)
+ if upperInclusive {
+ sb.WriteRune(']')
+ } else {
+ sb.WriteRune(')')
+ }
+ fmt.Fprintf(&sb, ":%v", b.Count)
+ return sb.String()
+}
+
+type HistogramBuckets []*HistogramBucket
+
+func (s HistogramBuckets) Equal(o HistogramBuckets) bool {
+ if len(s) != len(o) {
+ return false
+ }
+
+ for i, bucket := range s {
+ if !bucket.Equal(o[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+type SampleHistogram struct {
+ Count FloatString `json:"count"`
+ Sum FloatString `json:"sum"`
+ Buckets HistogramBuckets `json:"buckets"`
+}
+
+func (s SampleHistogram) String() string {
+ return fmt.Sprintf("Count: %f, Sum: %f, Buckets: %v", s.Count, s.Sum, s.Buckets)
+}
+
+func (s *SampleHistogram) Equal(o *SampleHistogram) bool {
+ return s == o || (s.Count == o.Count && s.Sum == o.Sum && s.Buckets.Equal(o.Buckets))
+}
+
+type SampleHistogramPair struct {
+ Timestamp Time
+ // Histogram should never be nil, it's only stored as pointer for efficiency.
+ Histogram *SampleHistogram
+}
+
+func (s SampleHistogramPair) MarshalJSON() ([]byte, error) {
+ if s.Histogram == nil {
+ return nil, fmt.Errorf("histogram is nil")
+ }
+ t, err := json.Marshal(s.Timestamp)
+ if err != nil {
+ return nil, err
+ }
+ v, err := json.Marshal(s.Histogram)
+ if err != nil {
+ return nil, err
+ }
+ return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil
+}
+
+func (s *SampleHistogramPair) UnmarshalJSON(buf []byte) error {
+ tmp := []interface{}{&s.Timestamp, &s.Histogram}
+ wantLen := len(tmp)
+ if err := json.Unmarshal(buf, &tmp); err != nil {
+ return err
+ }
+ if gotLen := len(tmp); gotLen != wantLen {
+ return fmt.Errorf("wrong number of fields: %d != %d", gotLen, wantLen)
+ }
+ if s.Histogram == nil {
+ return fmt.Errorf("histogram is null")
+ }
+ return nil
+}
+
+func (s SampleHistogramPair) String() string {
+ return fmt.Sprintf("%s @[%s]", s.Histogram, s.Timestamp)
+}
+
+func (s *SampleHistogramPair) Equal(o *SampleHistogramPair) bool {
+ return s == o || (s.Histogram.Equal(o.Histogram) && s.Timestamp.Equal(o.Timestamp))
+}
diff --git a/vendor/github.com/prometheus/common/model/value_type.go b/vendor/github.com/prometheus/common/model/value_type.go
new file mode 100644
index 0000000..726c50e
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/value_type.go
@@ -0,0 +1,83 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "encoding/json"
+ "fmt"
+)
+
+// Value is a generic interface for values resulting from a query evaluation.
+type Value interface {
+ Type() ValueType
+ String() string
+}
+
+func (Matrix) Type() ValueType { return ValMatrix }
+func (Vector) Type() ValueType { return ValVector }
+func (*Scalar) Type() ValueType { return ValScalar }
+func (*String) Type() ValueType { return ValString }
+
+type ValueType int
+
+const (
+ ValNone ValueType = iota
+ ValScalar
+ ValVector
+ ValMatrix
+ ValString
+)
+
+// MarshalJSON implements json.Marshaler.
+func (et ValueType) MarshalJSON() ([]byte, error) {
+ return json.Marshal(et.String())
+}
+
+func (et *ValueType) UnmarshalJSON(b []byte) error {
+ var s string
+ if err := json.Unmarshal(b, &s); err != nil {
+ return err
+ }
+ switch s {
+ case "":
+ *et = ValNone
+ case "scalar":
+ *et = ValScalar
+ case "vector":
+ *et = ValVector
+ case "matrix":
+ *et = ValMatrix
+ case "string":
+ *et = ValString
+ default:
+ return fmt.Errorf("unknown value type %q", s)
+ }
+ return nil
+}
+
+func (e ValueType) String() string {
+ switch e {
+ case ValNone:
+ return ""
+ case ValScalar:
+ return "scalar"
+ case ValVector:
+ return "vector"
+ case ValMatrix:
+ return "matrix"
+ case ValString:
+ return "string"
+ }
+ panic("ValueType.String: unhandled value type")
+}
diff --git a/vendor/github.com/prometheus/procfs/.gitignore b/vendor/github.com/prometheus/procfs/.gitignore
new file mode 100644
index 0000000..7cc33ae
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/.gitignore
@@ -0,0 +1,2 @@
+/testdata/fixtures/
+/fixtures
diff --git a/vendor/github.com/prometheus/procfs/.golangci.yml b/vendor/github.com/prometheus/procfs/.golangci.yml
new file mode 100644
index 0000000..126df9e
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/.golangci.yml
@@ -0,0 +1,22 @@
+---
+linters:
+ enable:
+ - errcheck
+ - godot
+ - gosimple
+ - govet
+ - ineffassign
+ - misspell
+ - revive
+ - staticcheck
+ - testifylint
+ - unused
+
+linter-settings:
+ godot:
+ capital: true
+ exclude:
+ # Ignore "See: URL"
+ - 'See:'
+ misspell:
+ locale: US
diff --git a/vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md b/vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000..d325872
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md
@@ -0,0 +1,3 @@
+# Prometheus Community Code of Conduct
+
+Prometheus follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md).
diff --git a/vendor/github.com/prometheus/procfs/CONTRIBUTING.md b/vendor/github.com/prometheus/procfs/CONTRIBUTING.md
new file mode 100644
index 0000000..853eb9d
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/CONTRIBUTING.md
@@ -0,0 +1,121 @@
+# Contributing
+
+Prometheus uses GitHub to manage reviews of pull requests.
+
+* If you are a new contributor see: [Steps to Contribute](#steps-to-contribute)
+
+* If you have a trivial fix or improvement, go ahead and create a pull request,
+ addressing (with `@...`) a suitable maintainer of this repository (see
+ [MAINTAINERS.md](MAINTAINERS.md)) in the description of the pull request.
+
+* If you plan to do something more involved, first discuss your ideas
+ on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers).
+ This will avoid unnecessary work and surely give you and us a good deal
+ of inspiration. Also please see our [non-goals issue](https://github.com/prometheus/docs/issues/149) on areas that the Prometheus community doesn't plan to work on.
+
+* Relevant coding style guidelines are the [Go Code Review
+ Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments)
+ and the _Formatting and style_ section of Peter Bourgon's [Go: Best
+ Practices for Production
+ Environments](https://peter.bourgon.org/go-in-production/#formatting-and-style).
+
+* Be sure to sign off on the [DCO](https://github.com/probot/dco#how-it-works)
+
+## Steps to Contribute
+
+Should you wish to work on an issue, please claim it first by commenting on the GitHub issue that you want to work on it. This is to prevent duplicated efforts from contributors on the same issue.
+
+Please check the [`help-wanted`](https://github.com/prometheus/procfs/issues?q=is%3Aissue+is%3Aopen+label%3A%22help+wanted%22) label to find issues that are good for getting started. If you have questions about one of the issues, with or without the tag, please comment on them and one of the maintainers will clarify it. For a quicker response, contact us over [IRC](https://prometheus.io/community).
+
+For quickly compiling and testing your changes do:
+```
+make test # Make sure all the tests pass before you commit and push :)
+```
+
+We use [`golangci-lint`](https://github.com/golangci/golangci-lint) for linting the code. If it reports an issue and you think that the warning needs to be disregarded or is a false-positive, you can add a special comment `//nolint:linter1[,linter2,...]` before the offending line. Use this sparingly though, fixing the code to comply with the linter's recommendation is in general the preferred course of action.
+
+## Pull Request Checklist
+
+* Branch from the master branch and, if needed, rebase to the current master branch before submitting your pull request. If it doesn't merge cleanly with master you may be asked to rebase your changes.
+
+* Commits should be as small as possible, while ensuring that each commit is correct independently (i.e., each commit should compile and pass tests).
+
+* If your patch is not getting reviewed or you need a specific person to review it, you can @-reply a reviewer asking for a review in the pull request or a comment, or you can ask for a review on IRC channel [#prometheus](https://webchat.freenode.net/?channels=#prometheus) on irc.freenode.net (for the easiest start, [join via Riot](https://riot.im/app/#/room/#prometheus:matrix.org)).
+
+* Add tests relevant to the fixed bug or new feature.
+
+## Dependency management
+
+The Prometheus project uses [Go modules](https://golang.org/cmd/go/#hdr-Modules__module_versions__and_more) to manage dependencies on external packages. This requires a working Go environment with version 1.12 or greater installed.
+
+All dependencies are vendored in the `vendor/` directory.
+
+To add or update a new dependency, use the `go get` command:
+
+```bash
+# Pick the latest tagged release.
+go get example.com/some/module/pkg
+
+# Pick a specific version.
+go get example.com/some/module/pkg@vX.Y.Z
+```
+
+Tidy up the `go.mod` and `go.sum` files and copy the new/updated dependency to the `vendor/` directory:
+
+
+```bash
+# The GO111MODULE variable can be omitted when the code isn't located in GOPATH.
+GO111MODULE=on go mod tidy
+
+GO111MODULE=on go mod vendor
+```
+
+You have to commit the changes to `go.mod`, `go.sum` and the `vendor/` directory before submitting the pull request.
+
+
+## API Implementation Guidelines
+
+### Naming and Documentation
+
+Public functions and structs should normally be named according to the file(s) being read and parsed. For example,
+the `fs.BuddyInfo()` function reads the file `/proc/buddyinfo`. In addition, the godoc for each public function
+should contain the path to the file(s) being read and a URL of the linux kernel documentation describing the file(s).
+
+### Reading vs. Parsing
+
+Most functionality in this library consists of reading files and then parsing the text into structured data. In most
+cases reading and parsing should be separated into different functions/methods with a public `fs.Thing()` method and
+a private `parseThing(r Reader)` function. This provides a logical separation and allows parsing to be tested
+directly without the need to read from the filesystem. Using a `Reader` argument is preferred over other data types
+such as `string` or `*File` because it provides the most flexibility regarding the data source. When a set of files
+in a directory needs to be parsed, then a `path` string parameter to the parse function can be used instead.
+
+### /proc and /sys filesystem I/O
+
+The `proc` and `sys` filesystems are pseudo file systems and work a bit differently from standard disk I/O.
+Many of the files are changing continuously and the data being read can in some cases change between subsequent
+reads in the same file. Also, most of the files are relatively small (less than a few KBs), and system calls
+to the `stat` function will often return the wrong size. Therefore, for most files it's recommended to read the
+full file in a single operation using an internal utility function called `util.ReadFileNoStat`.
+This function is similar to `os.ReadFile`, but it avoids the system call to `stat` to get the current size of
+the file.
+
+Note that parsing the file's contents can still be performed one line at a time. This is done by first reading
+the full file, and then using a scanner on the `[]byte` or `string` containing the data.
+
+```
+ data, err := util.ReadFileNoStat("/proc/cpuinfo")
+ if err != nil {
+ return err
+ }
+ reader := bytes.NewReader(data)
+ scanner := bufio.NewScanner(reader)
+```
+
+The `/sys` filesystem contains many very small files which contain only a single numeric or text value. These files
+can be read using an internal function called `util.SysReadFile` which is similar to `os.ReadFile` but does
+not bother to check the size of the file before reading.
+```
+ data, err := util.SysReadFile("/sys/class/power_supply/BAT0/capacity")
+```
+
diff --git a/vendor/github.com/prometheus/procfs/LICENSE b/vendor/github.com/prometheus/procfs/LICENSE
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/prometheus/procfs/MAINTAINERS.md b/vendor/github.com/prometheus/procfs/MAINTAINERS.md
new file mode 100644
index 0000000..e00f3b3
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/MAINTAINERS.md
@@ -0,0 +1,3 @@
+* Johannes 'fish' Ziemke @discordianfish
+* Paul Gier @pgier
+* Ben Kochie @SuperQ
diff --git a/vendor/github.com/prometheus/procfs/Makefile b/vendor/github.com/prometheus/procfs/Makefile
new file mode 100644
index 0000000..7edfe4d
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/Makefile
@@ -0,0 +1,31 @@
+# Copyright 2018 The Prometheus Authors
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+include Makefile.common
+
+%/.unpacked: %.ttar
+ @echo ">> extracting fixtures $*"
+ ./ttar -C $(dir $*) -x -f $*.ttar
+ touch $@
+
+fixtures: testdata/fixtures/.unpacked
+
+update_fixtures:
+ rm -vf testdata/fixtures/.unpacked
+ ./ttar -c -f testdata/fixtures.ttar -C testdata/ fixtures/
+
+.PHONY: build
+build:
+
+.PHONY: test
+test: testdata/fixtures/.unpacked common-test
diff --git a/vendor/github.com/prometheus/procfs/Makefile.common b/vendor/github.com/prometheus/procfs/Makefile.common
new file mode 100644
index 0000000..1617292
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/Makefile.common
@@ -0,0 +1,277 @@
+# Copyright 2018 The Prometheus Authors
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# A common Makefile that includes rules to be reused in different prometheus projects.
+# !!! Open PRs only against the prometheus/prometheus/Makefile.common repository!
+
+# Example usage :
+# Create the main Makefile in the root project directory.
+# include Makefile.common
+# customTarget:
+# @echo ">> Running customTarget"
+#
+
+# Ensure GOBIN is not set during build so that promu is installed to the correct path
+unexport GOBIN
+
+GO ?= go
+GOFMT ?= $(GO)fmt
+FIRST_GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH)))
+GOOPTS ?=
+GOHOSTOS ?= $(shell $(GO) env GOHOSTOS)
+GOHOSTARCH ?= $(shell $(GO) env GOHOSTARCH)
+
+GO_VERSION ?= $(shell $(GO) version)
+GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION))
+PRE_GO_111 ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9])\.')
+
+PROMU := $(FIRST_GOPATH)/bin/promu
+pkgs = ./...
+
+ifeq (arm, $(GOHOSTARCH))
+ GOHOSTARM ?= $(shell GOARM= $(GO) env GOARM)
+ GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH)v$(GOHOSTARM)
+else
+ GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH)
+endif
+
+GOTEST := $(GO) test
+GOTEST_DIR :=
+ifneq ($(CIRCLE_JOB),)
+ifneq ($(shell command -v gotestsum 2> /dev/null),)
+ GOTEST_DIR := test-results
+ GOTEST := gotestsum --junitfile $(GOTEST_DIR)/unit-tests.xml --
+endif
+endif
+
+PROMU_VERSION ?= 0.17.0
+PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz
+
+SKIP_GOLANGCI_LINT :=
+GOLANGCI_LINT :=
+GOLANGCI_LINT_OPTS ?=
+GOLANGCI_LINT_VERSION ?= v1.59.0
+# golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64.
+# windows isn't included here because of the path separator being different.
+ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
+ ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386 arm64))
+ # If we're in CI and there is an Actions file, that means the linter
+ # is being run in Actions, so we don't need to run it here.
+ ifneq (,$(SKIP_GOLANGCI_LINT))
+ GOLANGCI_LINT :=
+ else ifeq (,$(CIRCLE_JOB))
+ GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint
+ else ifeq (,$(wildcard .github/workflows/golangci-lint.yml))
+ GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint
+ endif
+ endif
+endif
+
+PREFIX ?= $(shell pwd)
+BIN_DIR ?= $(shell pwd)
+DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD))
+DOCKERFILE_PATH ?= ./Dockerfile
+DOCKERBUILD_CONTEXT ?= ./
+DOCKER_REPO ?= prom
+
+DOCKER_ARCHS ?= amd64
+
+BUILD_DOCKER_ARCHS = $(addprefix common-docker-,$(DOCKER_ARCHS))
+PUBLISH_DOCKER_ARCHS = $(addprefix common-docker-publish-,$(DOCKER_ARCHS))
+TAG_DOCKER_ARCHS = $(addprefix common-docker-tag-latest-,$(DOCKER_ARCHS))
+
+SANITIZED_DOCKER_IMAGE_TAG := $(subst +,-,$(DOCKER_IMAGE_TAG))
+
+ifeq ($(GOHOSTARCH),amd64)
+ ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux freebsd darwin windows))
+ # Only supported on amd64
+ test-flags := -race
+ endif
+endif
+
+# This rule is used to forward a target like "build" to "common-build". This
+# allows a new "build" target to be defined in a Makefile which includes this
+# one and override "common-build" without override warnings.
+%: common-% ;
+
+.PHONY: common-all
+common-all: precheck style check_license lint yamllint unused build test
+
+.PHONY: common-style
+common-style:
+ @echo ">> checking code style"
+ @fmtRes=$$($(GOFMT) -d $$(find . -path ./vendor -prune -o -name '*.go' -print)); \
+ if [ -n "$${fmtRes}" ]; then \
+ echo "gofmt checking failed!"; echo "$${fmtRes}"; echo; \
+ echo "Please ensure you are using $$($(GO) version) for formatting code."; \
+ exit 1; \
+ fi
+
+.PHONY: common-check_license
+common-check_license:
+ @echo ">> checking license header"
+ @licRes=$$(for file in $$(find . -type f -iname '*.go' ! -path './vendor/*') ; do \
+ awk 'NR<=3' $$file | grep -Eq "(Copyright|generated|GENERATED)" || echo $$file; \
+ done); \
+ if [ -n "$${licRes}" ]; then \
+ echo "license header checking failed:"; echo "$${licRes}"; \
+ exit 1; \
+ fi
+
+.PHONY: common-deps
+common-deps:
+ @echo ">> getting dependencies"
+ $(GO) mod download
+
+.PHONY: update-go-deps
+update-go-deps:
+ @echo ">> updating Go dependencies"
+ @for m in $$($(GO) list -mod=readonly -m -f '{{ if and (not .Indirect) (not .Main)}}{{.Path}}{{end}}' all); do \
+ $(GO) get -d $$m; \
+ done
+ $(GO) mod tidy
+
+.PHONY: common-test-short
+common-test-short: $(GOTEST_DIR)
+ @echo ">> running short tests"
+ $(GOTEST) -short $(GOOPTS) $(pkgs)
+
+.PHONY: common-test
+common-test: $(GOTEST_DIR)
+ @echo ">> running all tests"
+ $(GOTEST) $(test-flags) $(GOOPTS) $(pkgs)
+
+$(GOTEST_DIR):
+ @mkdir -p $@
+
+.PHONY: common-format
+common-format:
+ @echo ">> formatting code"
+ $(GO) fmt $(pkgs)
+
+.PHONY: common-vet
+common-vet:
+ @echo ">> vetting code"
+ $(GO) vet $(GOOPTS) $(pkgs)
+
+.PHONY: common-lint
+common-lint: $(GOLANGCI_LINT)
+ifdef GOLANGCI_LINT
+ @echo ">> running golangci-lint"
+ $(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs)
+endif
+
+.PHONY: common-lint-fix
+common-lint-fix: $(GOLANGCI_LINT)
+ifdef GOLANGCI_LINT
+ @echo ">> running golangci-lint fix"
+ $(GOLANGCI_LINT) run --fix $(GOLANGCI_LINT_OPTS) $(pkgs)
+endif
+
+.PHONY: common-yamllint
+common-yamllint:
+ @echo ">> running yamllint on all YAML files in the repository"
+ifeq (, $(shell command -v yamllint 2> /dev/null))
+ @echo "yamllint not installed so skipping"
+else
+ yamllint .
+endif
+
+# For backward-compatibility.
+.PHONY: common-staticcheck
+common-staticcheck: lint
+
+.PHONY: common-unused
+common-unused:
+ @echo ">> running check for unused/missing packages in go.mod"
+ $(GO) mod tidy
+ @git diff --exit-code -- go.sum go.mod
+
+.PHONY: common-build
+common-build: promu
+ @echo ">> building binaries"
+ $(PROMU) build --prefix $(PREFIX) $(PROMU_BINARIES)
+
+.PHONY: common-tarball
+common-tarball: promu
+ @echo ">> building release tarball"
+ $(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR)
+
+.PHONY: common-docker-repo-name
+common-docker-repo-name:
+ @echo "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)"
+
+.PHONY: common-docker $(BUILD_DOCKER_ARCHS)
+common-docker: $(BUILD_DOCKER_ARCHS)
+$(BUILD_DOCKER_ARCHS): common-docker-%:
+ docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" \
+ -f $(DOCKERFILE_PATH) \
+ --build-arg ARCH="$*" \
+ --build-arg OS="linux" \
+ $(DOCKERBUILD_CONTEXT)
+
+.PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS)
+common-docker-publish: $(PUBLISH_DOCKER_ARCHS)
+$(PUBLISH_DOCKER_ARCHS): common-docker-publish-%:
+ docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)"
+
+DOCKER_MAJOR_VERSION_TAG = $(firstword $(subst ., ,$(shell cat VERSION)))
+.PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS)
+common-docker-tag-latest: $(TAG_DOCKER_ARCHS)
+$(TAG_DOCKER_ARCHS): common-docker-tag-latest-%:
+ docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest"
+ docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)"
+
+.PHONY: common-docker-manifest
+common-docker-manifest:
+ DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(SANITIZED_DOCKER_IMAGE_TAG))
+ DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)"
+
+.PHONY: promu
+promu: $(PROMU)
+
+$(PROMU):
+ $(eval PROMU_TMP := $(shell mktemp -d))
+ curl -s -L $(PROMU_URL) | tar -xvzf - -C $(PROMU_TMP)
+ mkdir -p $(FIRST_GOPATH)/bin
+ cp $(PROMU_TMP)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM)/promu $(FIRST_GOPATH)/bin/promu
+ rm -r $(PROMU_TMP)
+
+.PHONY: proto
+proto:
+ @echo ">> generating code from proto files"
+ @./scripts/genproto.sh
+
+ifdef GOLANGCI_LINT
+$(GOLANGCI_LINT):
+ mkdir -p $(FIRST_GOPATH)/bin
+ curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/$(GOLANGCI_LINT_VERSION)/install.sh \
+ | sed -e '/install -d/d' \
+ | sh -s -- -b $(FIRST_GOPATH)/bin $(GOLANGCI_LINT_VERSION)
+endif
+
+.PHONY: precheck
+precheck::
+
+define PRECHECK_COMMAND_template =
+precheck:: $(1)_precheck
+
+PRECHECK_COMMAND_$(1) ?= $(1) $$(strip $$(PRECHECK_OPTIONS_$(1)))
+.PHONY: $(1)_precheck
+$(1)_precheck:
+ @if ! $$(PRECHECK_COMMAND_$(1)) 1>/dev/null 2>&1; then \
+ echo "Execution of '$$(PRECHECK_COMMAND_$(1))' command failed. Is $(1) installed?"; \
+ exit 1; \
+ fi
+endef
diff --git a/vendor/github.com/prometheus/procfs/NOTICE b/vendor/github.com/prometheus/procfs/NOTICE
new file mode 100644
index 0000000..53c5e9a
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/NOTICE
@@ -0,0 +1,7 @@
+procfs provides functions to retrieve system, kernel and process
+metrics from the pseudo-filesystem proc.
+
+Copyright 2014-2015 The Prometheus Authors
+
+This product includes software developed at
+SoundCloud Ltd. (http://soundcloud.com/).
diff --git a/vendor/github.com/prometheus/procfs/README.md b/vendor/github.com/prometheus/procfs/README.md
new file mode 100644
index 0000000..1224816
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/README.md
@@ -0,0 +1,61 @@
+# procfs
+
+This package provides functions to retrieve system, kernel, and process
+metrics from the pseudo-filesystems /proc and /sys.
+
+*WARNING*: This package is a work in progress. Its API may still break in
+backwards-incompatible ways without warnings. Use it at your own risk.
+
+[](https://pkg.go.dev/github.com/prometheus/procfs)
+[](https://circleci.com/gh/prometheus/procfs/tree/master)
+[](https://goreportcard.com/report/github.com/prometheus/procfs)
+
+## Usage
+
+The procfs library is organized by packages based on whether the gathered data is coming from
+/proc, /sys, or both. Each package contains an `FS` type which represents the path to either /proc,
+/sys, or both. For example, cpu statistics are gathered from
+`/proc/stat` and are available via the root procfs package. First, the proc filesystem mount
+point is initialized, and then the stat information is read.
+
+```go
+fs, err := procfs.NewFS("/proc")
+stats, err := fs.Stat()
+```
+
+Some sub-packages such as `blockdevice`, require access to both the proc and sys filesystems.
+
+```go
+ fs, err := blockdevice.NewFS("/proc", "/sys")
+ stats, err := fs.ProcDiskstats()
+```
+
+## Package Organization
+
+The packages in this project are organized according to (1) whether the data comes from the `/proc` or
+`/sys` filesystem and (2) the type of information being retrieved. For example, most process information
+can be gathered from the functions in the root `procfs` package. Information about block devices such as disk drives
+is available in the `blockdevices` sub-package.
+
+## Building and Testing
+
+The procfs library is intended to be built as part of another application, so there are no distributable binaries.
+However, most of the API includes unit tests which can be run with `make test`.
+
+### Updating Test Fixtures
+
+The procfs library includes a set of test fixtures which include many example files from
+the `/proc` and `/sys` filesystems. These fixtures are included as a [ttar](https://github.com/ideaship/ttar) file
+which is extracted automatically during testing. To add/update the test fixtures, first
+ensure the `fixtures` directory is up to date by removing the existing directory and then
+extracting the ttar file using `make fixtures/.unpacked` or just `make test`.
+
+```bash
+rm -rf testdata/fixtures
+make test
+```
+
+Next, make the required changes to the extracted files in the `fixtures` directory. When
+the changes are complete, run `make update_fixtures` to create a new `fixtures.ttar` file
+based on the updated `fixtures` directory. And finally, verify the changes using
+`git diff testdata/fixtures.ttar`.
diff --git a/vendor/github.com/prometheus/procfs/SECURITY.md b/vendor/github.com/prometheus/procfs/SECURITY.md
new file mode 100644
index 0000000..fed02d8
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/SECURITY.md
@@ -0,0 +1,6 @@
+# Reporting a security issue
+
+The Prometheus security policy, including how to report vulnerabilities, can be
+found here:
+
+
diff --git a/vendor/github.com/prometheus/procfs/arp.go b/vendor/github.com/prometheus/procfs/arp.go
new file mode 100644
index 0000000..cdcc8a7
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/arp.go
@@ -0,0 +1,116 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "fmt"
+ "net"
+ "os"
+ "strconv"
+ "strings"
+)
+
+// Learned from include/uapi/linux/if_arp.h.
+const (
+ // completed entry (ha valid).
+ ATFComplete = 0x02
+ // permanent entry.
+ ATFPermanent = 0x04
+ // Publish entry.
+ ATFPublish = 0x08
+ // Has requested trailers.
+ ATFUseTrailers = 0x10
+ // Obsoleted: Want to use a netmask (only for proxy entries).
+ ATFNetmask = 0x20
+ // Don't answer this addresses.
+ ATFDontPublish = 0x40
+)
+
+// ARPEntry contains a single row of the columnar data represented in
+// /proc/net/arp.
+type ARPEntry struct {
+ // IP address
+ IPAddr net.IP
+ // MAC address
+ HWAddr net.HardwareAddr
+ // Name of the device
+ Device string
+ // Flags
+ Flags byte
+}
+
+// GatherARPEntries retrieves all the ARP entries, parse the relevant columns,
+// and then return a slice of ARPEntry's.
+func (fs FS) GatherARPEntries() ([]ARPEntry, error) {
+ data, err := os.ReadFile(fs.proc.Path("net/arp"))
+ if err != nil {
+ return nil, fmt.Errorf("%w: error reading arp %s: %w", ErrFileRead, fs.proc.Path("net/arp"), err)
+ }
+
+ return parseARPEntries(data)
+}
+
+func parseARPEntries(data []byte) ([]ARPEntry, error) {
+ lines := strings.Split(string(data), "\n")
+ entries := make([]ARPEntry, 0)
+ var err error
+ const (
+ expectedDataWidth = 6
+ expectedHeaderWidth = 9
+ )
+ for _, line := range lines {
+ columns := strings.Fields(line)
+ width := len(columns)
+
+ if width == expectedHeaderWidth || width == 0 {
+ continue
+ } else if width == expectedDataWidth {
+ entry, err := parseARPEntry(columns)
+ if err != nil {
+ return []ARPEntry{}, fmt.Errorf("%w: Failed to parse ARP entry: %v: %w", ErrFileParse, entry, err)
+ }
+ entries = append(entries, entry)
+ } else {
+ return []ARPEntry{}, fmt.Errorf("%w: %d columns found, but expected %d: %w", ErrFileParse, width, expectedDataWidth, err)
+ }
+
+ }
+
+ return entries, err
+}
+
+func parseARPEntry(columns []string) (ARPEntry, error) {
+ entry := ARPEntry{Device: columns[5]}
+ ip := net.ParseIP(columns[0])
+ entry.IPAddr = ip
+
+ if mac, err := net.ParseMAC(columns[3]); err == nil {
+ entry.HWAddr = mac
+ } else {
+ return ARPEntry{}, err
+ }
+
+ if flags, err := strconv.ParseUint(columns[2], 0, 8); err == nil {
+ entry.Flags = byte(flags)
+ } else {
+ return ARPEntry{}, err
+ }
+
+ return entry, nil
+}
+
+// IsComplete returns true if ARP entry is marked with complete flag.
+func (entry *ARPEntry) IsComplete() bool {
+ return entry.Flags&ATFComplete != 0
+}
diff --git a/vendor/github.com/prometheus/procfs/buddyinfo.go b/vendor/github.com/prometheus/procfs/buddyinfo.go
new file mode 100644
index 0000000..8380750
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/buddyinfo.go
@@ -0,0 +1,85 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "os"
+ "strconv"
+ "strings"
+)
+
+// A BuddyInfo is the details parsed from /proc/buddyinfo.
+// The data is comprised of an array of free fragments of each size.
+// The sizes are 2^n*PAGE_SIZE, where n is the array index.
+type BuddyInfo struct {
+ Node string
+ Zone string
+ Sizes []float64
+}
+
+// BuddyInfo reads the buddyinfo statistics from the specified `proc` filesystem.
+func (fs FS) BuddyInfo() ([]BuddyInfo, error) {
+ file, err := os.Open(fs.proc.Path("buddyinfo"))
+ if err != nil {
+ return nil, err
+ }
+ defer file.Close()
+
+ return parseBuddyInfo(file)
+}
+
+func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) {
+ var (
+ buddyInfo = []BuddyInfo{}
+ scanner = bufio.NewScanner(r)
+ bucketCount = -1
+ )
+
+ for scanner.Scan() {
+ var err error
+ line := scanner.Text()
+ parts := strings.Fields(line)
+
+ if len(parts) < 4 {
+ return nil, fmt.Errorf("%w: Invalid number of fields, found: %v", ErrFileParse, parts)
+ }
+
+ node := strings.TrimSuffix(parts[1], ",")
+ zone := strings.TrimSuffix(parts[3], ",")
+ arraySize := len(parts[4:])
+
+ if bucketCount == -1 {
+ bucketCount = arraySize
+ } else {
+ if bucketCount != arraySize {
+ return nil, fmt.Errorf("%w: mismatch in number of buddyinfo buckets, previous count %d, new count %d", ErrFileParse, bucketCount, arraySize)
+ }
+ }
+
+ sizes := make([]float64, arraySize)
+ for i := 0; i < arraySize; i++ {
+ sizes[i], err = strconv.ParseFloat(parts[i+4], 64)
+ if err != nil {
+ return nil, fmt.Errorf("%w: Invalid valid in buddyinfo: %f: %w", ErrFileParse, sizes[i], err)
+ }
+ }
+
+ buddyInfo = append(buddyInfo, BuddyInfo{node, zone, sizes})
+ }
+
+ return buddyInfo, scanner.Err()
+}
diff --git a/vendor/github.com/prometheus/procfs/cmdline.go b/vendor/github.com/prometheus/procfs/cmdline.go
new file mode 100644
index 0000000..bf4f3b4
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/cmdline.go
@@ -0,0 +1,30 @@
+// Copyright 2021 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "strings"
+
+ "github.com/prometheus/procfs/internal/util"
+)
+
+// CmdLine returns the command line of the kernel.
+func (fs FS) CmdLine() ([]string, error) {
+ data, err := util.ReadFileNoStat(fs.proc.Path("cmdline"))
+ if err != nil {
+ return nil, err
+ }
+
+ return strings.Fields(string(data)), nil
+}
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo.go b/vendor/github.com/prometheus/procfs/cpuinfo.go
new file mode 100644
index 0000000..f0950bb
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/cpuinfo.go
@@ -0,0 +1,519 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build linux
+// +build linux
+
+package procfs
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "fmt"
+ "regexp"
+ "strconv"
+ "strings"
+
+ "github.com/prometheus/procfs/internal/util"
+)
+
+// CPUInfo contains general information about a system CPU found in /proc/cpuinfo.
+type CPUInfo struct {
+ Processor uint
+ VendorID string
+ CPUFamily string
+ Model string
+ ModelName string
+ Stepping string
+ Microcode string
+ CPUMHz float64
+ CacheSize string
+ PhysicalID string
+ Siblings uint
+ CoreID string
+ CPUCores uint
+ APICID string
+ InitialAPICID string
+ FPU string
+ FPUException string
+ CPUIDLevel uint
+ WP string
+ Flags []string
+ Bugs []string
+ BogoMips float64
+ CLFlushSize uint
+ CacheAlignment uint
+ AddressSizes string
+ PowerManagement string
+}
+
+var (
+ cpuinfoClockRegexp = regexp.MustCompile(`([\d.]+)`)
+ cpuinfoS390XProcessorRegexp = regexp.MustCompile(`^processor\s+(\d+):.*`)
+)
+
+// CPUInfo returns information about current system CPUs.
+// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt
+func (fs FS) CPUInfo() ([]CPUInfo, error) {
+ data, err := util.ReadFileNoStat(fs.proc.Path("cpuinfo"))
+ if err != nil {
+ return nil, err
+ }
+ return parseCPUInfo(data)
+}
+
+func parseCPUInfoX86(info []byte) ([]CPUInfo, error) {
+ scanner := bufio.NewScanner(bytes.NewReader(info))
+
+ // find the first "processor" line
+ firstLine := firstNonEmptyLine(scanner)
+ if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") {
+ return nil, fmt.Errorf("%w: Cannot parse line: %q", ErrFileParse, firstLine)
+ }
+ field := strings.SplitN(firstLine, ": ", 2)
+ v, err := strconv.ParseUint(field[1], 0, 32)
+ if err != nil {
+ return nil, err
+ }
+ firstcpu := CPUInfo{Processor: uint(v)}
+ cpuinfo := []CPUInfo{firstcpu}
+ i := 0
+
+ for scanner.Scan() {
+ line := scanner.Text()
+ if !strings.Contains(line, ":") {
+ continue
+ }
+ field := strings.SplitN(line, ": ", 2)
+ switch strings.TrimSpace(field[0]) {
+ case "processor":
+ cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor
+ i++
+ v, err := strconv.ParseUint(field[1], 0, 32)
+ if err != nil {
+ return nil, err
+ }
+ cpuinfo[i].Processor = uint(v)
+ case "vendor", "vendor_id":
+ cpuinfo[i].VendorID = field[1]
+ case "cpu family":
+ cpuinfo[i].CPUFamily = field[1]
+ case "model":
+ cpuinfo[i].Model = field[1]
+ case "model name":
+ cpuinfo[i].ModelName = field[1]
+ case "stepping":
+ cpuinfo[i].Stepping = field[1]
+ case "microcode":
+ cpuinfo[i].Microcode = field[1]
+ case "cpu MHz":
+ v, err := strconv.ParseFloat(field[1], 64)
+ if err != nil {
+ return nil, err
+ }
+ cpuinfo[i].CPUMHz = v
+ case "cache size":
+ cpuinfo[i].CacheSize = field[1]
+ case "physical id":
+ cpuinfo[i].PhysicalID = field[1]
+ case "siblings":
+ v, err := strconv.ParseUint(field[1], 0, 32)
+ if err != nil {
+ return nil, err
+ }
+ cpuinfo[i].Siblings = uint(v)
+ case "core id":
+ cpuinfo[i].CoreID = field[1]
+ case "cpu cores":
+ v, err := strconv.ParseUint(field[1], 0, 32)
+ if err != nil {
+ return nil, err
+ }
+ cpuinfo[i].CPUCores = uint(v)
+ case "apicid":
+ cpuinfo[i].APICID = field[1]
+ case "initial apicid":
+ cpuinfo[i].InitialAPICID = field[1]
+ case "fpu":
+ cpuinfo[i].FPU = field[1]
+ case "fpu_exception":
+ cpuinfo[i].FPUException = field[1]
+ case "cpuid level":
+ v, err := strconv.ParseUint(field[1], 0, 32)
+ if err != nil {
+ return nil, err
+ }
+ cpuinfo[i].CPUIDLevel = uint(v)
+ case "wp":
+ cpuinfo[i].WP = field[1]
+ case "flags":
+ cpuinfo[i].Flags = strings.Fields(field[1])
+ case "bugs":
+ cpuinfo[i].Bugs = strings.Fields(field[1])
+ case "bogomips":
+ v, err := strconv.ParseFloat(field[1], 64)
+ if err != nil {
+ return nil, err
+ }
+ cpuinfo[i].BogoMips = v
+ case "clflush size":
+ v, err := strconv.ParseUint(field[1], 0, 32)
+ if err != nil {
+ return nil, err
+ }
+ cpuinfo[i].CLFlushSize = uint(v)
+ case "cache_alignment":
+ v, err := strconv.ParseUint(field[1], 0, 32)
+ if err != nil {
+ return nil, err
+ }
+ cpuinfo[i].CacheAlignment = uint(v)
+ case "address sizes":
+ cpuinfo[i].AddressSizes = field[1]
+ case "power management":
+ cpuinfo[i].PowerManagement = field[1]
+ }
+ }
+ return cpuinfo, nil
+}
+
+func parseCPUInfoARM(info []byte) ([]CPUInfo, error) {
+ scanner := bufio.NewScanner(bytes.NewReader(info))
+
+ firstLine := firstNonEmptyLine(scanner)
+ match, err := regexp.MatchString("^[Pp]rocessor", firstLine)
+ if !match || !strings.Contains(firstLine, ":") {
+ return nil, fmt.Errorf("%w: Cannot parse line: %q: %w", ErrFileParse, firstLine, err)
+
+ }
+ field := strings.SplitN(firstLine, ": ", 2)
+ cpuinfo := []CPUInfo{}
+ featuresLine := ""
+ commonCPUInfo := CPUInfo{}
+ i := 0
+ if strings.TrimSpace(field[0]) == "Processor" {
+ commonCPUInfo = CPUInfo{ModelName: field[1]}
+ i = -1
+ } else {
+ v, err := strconv.ParseUint(field[1], 0, 32)
+ if err != nil {
+ return nil, err
+ }
+ firstcpu := CPUInfo{Processor: uint(v)}
+ cpuinfo = []CPUInfo{firstcpu}
+ }
+
+ for scanner.Scan() {
+ line := scanner.Text()
+ if !strings.Contains(line, ":") {
+ continue
+ }
+ field := strings.SplitN(line, ": ", 2)
+ switch strings.TrimSpace(field[0]) {
+ case "processor":
+ cpuinfo = append(cpuinfo, commonCPUInfo) // start of the next processor
+ i++
+ v, err := strconv.ParseUint(field[1], 0, 32)
+ if err != nil {
+ return nil, err
+ }
+ cpuinfo[i].Processor = uint(v)
+ case "BogoMIPS":
+ if i == -1 {
+ cpuinfo = append(cpuinfo, commonCPUInfo) // There is only one processor
+ i++
+ cpuinfo[i].Processor = 0
+ }
+ v, err := strconv.ParseFloat(field[1], 64)
+ if err != nil {
+ return nil, err
+ }
+ cpuinfo[i].BogoMips = v
+ case "Features":
+ featuresLine = line
+ case "model name":
+ cpuinfo[i].ModelName = field[1]
+ }
+ }
+ fields := strings.SplitN(featuresLine, ": ", 2)
+ for i := range cpuinfo {
+ cpuinfo[i].Flags = strings.Fields(fields[1])
+ }
+ return cpuinfo, nil
+
+}
+
+func parseCPUInfoS390X(info []byte) ([]CPUInfo, error) {
+ scanner := bufio.NewScanner(bytes.NewReader(info))
+
+ firstLine := firstNonEmptyLine(scanner)
+ if !strings.HasPrefix(firstLine, "vendor_id") || !strings.Contains(firstLine, ":") {
+ return nil, fmt.Errorf("%w: Cannot parse line: %q", ErrFileParse, firstLine)
+ }
+ field := strings.SplitN(firstLine, ": ", 2)
+ cpuinfo := []CPUInfo{}
+ commonCPUInfo := CPUInfo{VendorID: field[1]}
+
+ for scanner.Scan() {
+ line := scanner.Text()
+ if !strings.Contains(line, ":") {
+ continue
+ }
+ field := strings.SplitN(line, ": ", 2)
+ switch strings.TrimSpace(field[0]) {
+ case "bogomips per cpu":
+ v, err := strconv.ParseFloat(field[1], 64)
+ if err != nil {
+ return nil, err
+ }
+ commonCPUInfo.BogoMips = v
+ case "features":
+ commonCPUInfo.Flags = strings.Fields(field[1])
+ }
+ if strings.HasPrefix(line, "processor") {
+ match := cpuinfoS390XProcessorRegexp.FindStringSubmatch(line)
+ if len(match) < 2 {
+ return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine)
+ }
+ cpu := commonCPUInfo
+ v, err := strconv.ParseUint(match[1], 0, 32)
+ if err != nil {
+ return nil, err
+ }
+ cpu.Processor = uint(v)
+ cpuinfo = append(cpuinfo, cpu)
+ }
+ if strings.HasPrefix(line, "cpu number") {
+ break
+ }
+ }
+
+ i := 0
+ for scanner.Scan() {
+ line := scanner.Text()
+ if !strings.Contains(line, ":") {
+ continue
+ }
+ field := strings.SplitN(line, ": ", 2)
+ switch strings.TrimSpace(field[0]) {
+ case "cpu number":
+ i++
+ case "cpu MHz dynamic":
+ clock := cpuinfoClockRegexp.FindString(strings.TrimSpace(field[1]))
+ v, err := strconv.ParseFloat(clock, 64)
+ if err != nil {
+ return nil, err
+ }
+ cpuinfo[i].CPUMHz = v
+ case "physical id":
+ cpuinfo[i].PhysicalID = field[1]
+ case "core id":
+ cpuinfo[i].CoreID = field[1]
+ case "cpu cores":
+ v, err := strconv.ParseUint(field[1], 0, 32)
+ if err != nil {
+ return nil, err
+ }
+ cpuinfo[i].CPUCores = uint(v)
+ case "siblings":
+ v, err := strconv.ParseUint(field[1], 0, 32)
+ if err != nil {
+ return nil, err
+ }
+ cpuinfo[i].Siblings = uint(v)
+ }
+ }
+
+ return cpuinfo, nil
+}
+
+func parseCPUInfoMips(info []byte) ([]CPUInfo, error) {
+ scanner := bufio.NewScanner(bytes.NewReader(info))
+
+ // find the first "processor" line
+ firstLine := firstNonEmptyLine(scanner)
+ if !strings.HasPrefix(firstLine, "system type") || !strings.Contains(firstLine, ":") {
+ return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine)
+ }
+ field := strings.SplitN(firstLine, ": ", 2)
+ cpuinfo := []CPUInfo{}
+ systemType := field[1]
+
+ i := 0
+
+ for scanner.Scan() {
+ line := scanner.Text()
+ if !strings.Contains(line, ":") {
+ continue
+ }
+ field := strings.SplitN(line, ": ", 2)
+ switch strings.TrimSpace(field[0]) {
+ case "processor":
+ v, err := strconv.ParseUint(field[1], 0, 32)
+ if err != nil {
+ return nil, err
+ }
+ i = int(v)
+ cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor
+ cpuinfo[i].Processor = uint(v)
+ cpuinfo[i].VendorID = systemType
+ case "cpu model":
+ cpuinfo[i].ModelName = field[1]
+ case "BogoMIPS":
+ v, err := strconv.ParseFloat(field[1], 64)
+ if err != nil {
+ return nil, err
+ }
+ cpuinfo[i].BogoMips = v
+ }
+ }
+ return cpuinfo, nil
+}
+
+func parseCPUInfoLoong(info []byte) ([]CPUInfo, error) {
+ scanner := bufio.NewScanner(bytes.NewReader(info))
+ // find the first "processor" line
+ firstLine := firstNonEmptyLine(scanner)
+ if !strings.HasPrefix(firstLine, "system type") || !strings.Contains(firstLine, ":") {
+ return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine)
+ }
+ field := strings.SplitN(firstLine, ": ", 2)
+ cpuinfo := []CPUInfo{}
+ systemType := field[1]
+ i := 0
+ for scanner.Scan() {
+ line := scanner.Text()
+ if !strings.Contains(line, ":") {
+ continue
+ }
+ field := strings.SplitN(line, ": ", 2)
+ switch strings.TrimSpace(field[0]) {
+ case "processor":
+ v, err := strconv.ParseUint(field[1], 0, 32)
+ if err != nil {
+ return nil, err
+ }
+ i = int(v)
+ cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor
+ cpuinfo[i].Processor = uint(v)
+ cpuinfo[i].VendorID = systemType
+ case "CPU Family":
+ cpuinfo[i].CPUFamily = field[1]
+ case "Model Name":
+ cpuinfo[i].ModelName = field[1]
+ }
+ }
+ return cpuinfo, nil
+}
+
+func parseCPUInfoPPC(info []byte) ([]CPUInfo, error) {
+ scanner := bufio.NewScanner(bytes.NewReader(info))
+
+ firstLine := firstNonEmptyLine(scanner)
+ if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") {
+ return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine)
+ }
+ field := strings.SplitN(firstLine, ": ", 2)
+ v, err := strconv.ParseUint(field[1], 0, 32)
+ if err != nil {
+ return nil, err
+ }
+ firstcpu := CPUInfo{Processor: uint(v)}
+ cpuinfo := []CPUInfo{firstcpu}
+ i := 0
+
+ for scanner.Scan() {
+ line := scanner.Text()
+ if !strings.Contains(line, ":") {
+ continue
+ }
+ field := strings.SplitN(line, ": ", 2)
+ switch strings.TrimSpace(field[0]) {
+ case "processor":
+ cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor
+ i++
+ v, err := strconv.ParseUint(field[1], 0, 32)
+ if err != nil {
+ return nil, err
+ }
+ cpuinfo[i].Processor = uint(v)
+ case "cpu":
+ cpuinfo[i].VendorID = field[1]
+ case "clock":
+ clock := cpuinfoClockRegexp.FindString(strings.TrimSpace(field[1]))
+ v, err := strconv.ParseFloat(clock, 64)
+ if err != nil {
+ return nil, err
+ }
+ cpuinfo[i].CPUMHz = v
+ }
+ }
+ return cpuinfo, nil
+}
+
+func parseCPUInfoRISCV(info []byte) ([]CPUInfo, error) {
+ scanner := bufio.NewScanner(bytes.NewReader(info))
+
+ firstLine := firstNonEmptyLine(scanner)
+ if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") {
+ return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine)
+ }
+ field := strings.SplitN(firstLine, ": ", 2)
+ v, err := strconv.ParseUint(field[1], 0, 32)
+ if err != nil {
+ return nil, err
+ }
+ firstcpu := CPUInfo{Processor: uint(v)}
+ cpuinfo := []CPUInfo{firstcpu}
+ i := 0
+
+ for scanner.Scan() {
+ line := scanner.Text()
+ if !strings.Contains(line, ":") {
+ continue
+ }
+ field := strings.SplitN(line, ": ", 2)
+ switch strings.TrimSpace(field[0]) {
+ case "processor":
+ v, err := strconv.ParseUint(field[1], 0, 32)
+ if err != nil {
+ return nil, err
+ }
+ i = int(v)
+ cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor
+ cpuinfo[i].Processor = uint(v)
+ case "hart":
+ cpuinfo[i].CoreID = field[1]
+ case "isa":
+ cpuinfo[i].ModelName = field[1]
+ }
+ }
+ return cpuinfo, nil
+}
+
+func parseCPUInfoDummy(_ []byte) ([]CPUInfo, error) { // nolint:unused,deadcode
+ return nil, errors.New("not implemented")
+}
+
+// firstNonEmptyLine advances the scanner to the first non-empty line
+// and returns the contents of that line.
+func firstNonEmptyLine(scanner *bufio.Scanner) string {
+ for scanner.Scan() {
+ line := scanner.Text()
+ if strings.TrimSpace(line) != "" {
+ return line
+ }
+ }
+ return ""
+}
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_armx.go b/vendor/github.com/prometheus/procfs/cpuinfo_armx.go
new file mode 100644
index 0000000..64cfd53
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/cpuinfo_armx.go
@@ -0,0 +1,20 @@
+// Copyright 2020 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build linux && (arm || arm64)
+// +build linux
+// +build arm arm64
+
+package procfs
+
+var parseCPUInfo = parseCPUInfoARM
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_loong64.go b/vendor/github.com/prometheus/procfs/cpuinfo_loong64.go
new file mode 100644
index 0000000..d88442f
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/cpuinfo_loong64.go
@@ -0,0 +1,19 @@
+// Copyright 2022 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build linux
+// +build linux
+
+package procfs
+
+var parseCPUInfo = parseCPUInfoLoong
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go b/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go
new file mode 100644
index 0000000..c11207f
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go
@@ -0,0 +1,20 @@
+// Copyright 2020 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build linux && (mips || mipsle || mips64 || mips64le)
+// +build linux
+// +build mips mipsle mips64 mips64le
+
+package procfs
+
+var parseCPUInfo = parseCPUInfoMips
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_others.go b/vendor/github.com/prometheus/procfs/cpuinfo_others.go
new file mode 100644
index 0000000..a6b2b31
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/cpuinfo_others.go
@@ -0,0 +1,19 @@
+// Copyright 2020 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build linux && !386 && !amd64 && !arm && !arm64 && !loong64 && !mips && !mips64 && !mips64le && !mipsle && !ppc64 && !ppc64le && !riscv64 && !s390x
+// +build linux,!386,!amd64,!arm,!arm64,!loong64,!mips,!mips64,!mips64le,!mipsle,!ppc64,!ppc64le,!riscv64,!s390x
+
+package procfs
+
+var parseCPUInfo = parseCPUInfoDummy
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go b/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go
new file mode 100644
index 0000000..003bc2a
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go
@@ -0,0 +1,20 @@
+// Copyright 2020 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build linux && (ppc64 || ppc64le)
+// +build linux
+// +build ppc64 ppc64le
+
+package procfs
+
+var parseCPUInfo = parseCPUInfoPPC
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go b/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go
new file mode 100644
index 0000000..1c9b731
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go
@@ -0,0 +1,20 @@
+// Copyright 2020 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build linux && (riscv || riscv64)
+// +build linux
+// +build riscv riscv64
+
+package procfs
+
+var parseCPUInfo = parseCPUInfoRISCV
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go b/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go
new file mode 100644
index 0000000..fa3686b
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go
@@ -0,0 +1,19 @@
+// Copyright 2020 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build linux
+// +build linux
+
+package procfs
+
+var parseCPUInfo = parseCPUInfoS390X
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_x86.go b/vendor/github.com/prometheus/procfs/cpuinfo_x86.go
new file mode 100644
index 0000000..a0ef555
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/cpuinfo_x86.go
@@ -0,0 +1,20 @@
+// Copyright 2020 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build linux && (386 || amd64)
+// +build linux
+// +build 386 amd64
+
+package procfs
+
+var parseCPUInfo = parseCPUInfoX86
diff --git a/vendor/github.com/prometheus/procfs/crypto.go b/vendor/github.com/prometheus/procfs/crypto.go
new file mode 100644
index 0000000..5f2a37a
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/crypto.go
@@ -0,0 +1,154 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "strings"
+
+ "github.com/prometheus/procfs/internal/util"
+)
+
+// Crypto holds info parsed from /proc/crypto.
+type Crypto struct {
+ Alignmask *uint64
+ Async bool
+ Blocksize *uint64
+ Chunksize *uint64
+ Ctxsize *uint64
+ Digestsize *uint64
+ Driver string
+ Geniv string
+ Internal string
+ Ivsize *uint64
+ Maxauthsize *uint64
+ MaxKeysize *uint64
+ MinKeysize *uint64
+ Module string
+ Name string
+ Priority *int64
+ Refcnt *int64
+ Seedsize *uint64
+ Selftest string
+ Type string
+ Walksize *uint64
+}
+
+// Crypto parses an crypto-file (/proc/crypto) and returns a slice of
+// structs containing the relevant info. More information available here:
+// https://kernel.readthedocs.io/en/sphinx-samples/crypto-API.html
+func (fs FS) Crypto() ([]Crypto, error) {
+ path := fs.proc.Path("crypto")
+ b, err := util.ReadFileNoStat(path)
+ if err != nil {
+ return nil, fmt.Errorf("%w: Cannot read file %v: %w", ErrFileRead, b, err)
+
+ }
+
+ crypto, err := parseCrypto(bytes.NewReader(b))
+ if err != nil {
+ return nil, fmt.Errorf("%w: Cannot parse %v: %w", ErrFileParse, crypto, err)
+ }
+
+ return crypto, nil
+}
+
+// parseCrypto parses a /proc/crypto stream into Crypto elements.
+func parseCrypto(r io.Reader) ([]Crypto, error) {
+ var out []Crypto
+
+ s := bufio.NewScanner(r)
+ for s.Scan() {
+ text := s.Text()
+ switch {
+ case strings.HasPrefix(text, "name"):
+ // Each crypto element begins with its name.
+ out = append(out, Crypto{})
+ case text == "":
+ continue
+ }
+
+ kv := strings.Split(text, ":")
+ if len(kv) != 2 {
+ return nil, fmt.Errorf("%w: Cannot parse line: %q", ErrFileParse, text)
+ }
+
+ k := strings.TrimSpace(kv[0])
+ v := strings.TrimSpace(kv[1])
+
+ // Parse the key/value pair into the currently focused element.
+ c := &out[len(out)-1]
+ if err := c.parseKV(k, v); err != nil {
+ return nil, err
+ }
+ }
+
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+
+ return out, nil
+}
+
+// parseKV parses a key/value pair into the appropriate field of c.
+func (c *Crypto) parseKV(k, v string) error {
+ vp := util.NewValueParser(v)
+
+ switch k {
+ case "async":
+ // Interpret literal yes as true.
+ c.Async = v == "yes"
+ case "blocksize":
+ c.Blocksize = vp.PUInt64()
+ case "chunksize":
+ c.Chunksize = vp.PUInt64()
+ case "digestsize":
+ c.Digestsize = vp.PUInt64()
+ case "driver":
+ c.Driver = v
+ case "geniv":
+ c.Geniv = v
+ case "internal":
+ c.Internal = v
+ case "ivsize":
+ c.Ivsize = vp.PUInt64()
+ case "maxauthsize":
+ c.Maxauthsize = vp.PUInt64()
+ case "max keysize":
+ c.MaxKeysize = vp.PUInt64()
+ case "min keysize":
+ c.MinKeysize = vp.PUInt64()
+ case "module":
+ c.Module = v
+ case "name":
+ c.Name = v
+ case "priority":
+ c.Priority = vp.PInt64()
+ case "refcnt":
+ c.Refcnt = vp.PInt64()
+ case "seedsize":
+ c.Seedsize = vp.PUInt64()
+ case "selftest":
+ c.Selftest = v
+ case "type":
+ c.Type = v
+ case "walksize":
+ c.Walksize = vp.PUInt64()
+ }
+
+ return vp.Err()
+}
diff --git a/vendor/github.com/prometheus/procfs/doc.go b/vendor/github.com/prometheus/procfs/doc.go
new file mode 100644
index 0000000..f9d961e
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/doc.go
@@ -0,0 +1,44 @@
+// Copyright 2014 Prometheus Team
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package procfs provides functions to retrieve system, kernel and process
+// metrics from the pseudo-filesystem proc.
+//
+// Example:
+//
+// package main
+//
+// import (
+// "fmt"
+// "log"
+//
+// "github.com/prometheus/procfs"
+// )
+//
+// func main() {
+// p, err := procfs.Self()
+// if err != nil {
+// log.Fatalf("could not get process: %s", err)
+// }
+//
+// stat, err := p.Stat()
+// if err != nil {
+// log.Fatalf("could not get process stat: %s", err)
+// }
+//
+// fmt.Printf("command: %s\n", stat.Comm)
+// fmt.Printf("cpu time: %fs\n", stat.CPUTime())
+// fmt.Printf("vsize: %dB\n", stat.VirtualMemory())
+// fmt.Printf("rss: %dB\n", stat.ResidentMemory())
+// }
+package procfs
diff --git a/vendor/github.com/prometheus/procfs/fs.go b/vendor/github.com/prometheus/procfs/fs.go
new file mode 100644
index 0000000..4980c87
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/fs.go
@@ -0,0 +1,50 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "github.com/prometheus/procfs/internal/fs"
+)
+
+// FS represents the pseudo-filesystem sys, which provides an interface to
+// kernel data structures.
+type FS struct {
+ proc fs.FS
+ isReal bool
+}
+
+// DefaultMountPoint is the common mount point of the proc filesystem.
+const DefaultMountPoint = fs.DefaultProcMountPoint
+
+// NewDefaultFS returns a new proc FS mounted under the default proc mountPoint.
+// It will error if the mount point directory can't be read or is a file.
+func NewDefaultFS() (FS, error) {
+ return NewFS(DefaultMountPoint)
+}
+
+// NewFS returns a new proc FS mounted under the given proc mountPoint. It will error
+// if the mount point directory can't be read or is a file.
+func NewFS(mountPoint string) (FS, error) {
+ fs, err := fs.NewFS(mountPoint)
+ if err != nil {
+ return FS{}, err
+ }
+
+ isReal, err := isRealProc(mountPoint)
+ if err != nil {
+ return FS{}, err
+ }
+
+ return FS{fs, isReal}, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/fs_statfs_notype.go b/vendor/github.com/prometheus/procfs/fs_statfs_notype.go
new file mode 100644
index 0000000..134767d
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/fs_statfs_notype.go
@@ -0,0 +1,23 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !freebsd && !linux
+// +build !freebsd,!linux
+
+package procfs
+
+// isRealProc returns true on architectures that don't have a Type argument
+// in their Statfs_t struct
+func isRealProc(mountPoint string) (bool, error) {
+ return true, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/fs_statfs_type.go b/vendor/github.com/prometheus/procfs/fs_statfs_type.go
new file mode 100644
index 0000000..80df79c
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/fs_statfs_type.go
@@ -0,0 +1,33 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build freebsd || linux
+// +build freebsd linux
+
+package procfs
+
+import (
+ "syscall"
+)
+
+// isRealProc determines whether supplied mountpoint is really a proc filesystem.
+func isRealProc(mountPoint string) (bool, error) {
+ stat := syscall.Statfs_t{}
+ err := syscall.Statfs(mountPoint, &stat)
+ if err != nil {
+ return false, err
+ }
+
+ // 0x9fa0 is PROC_SUPER_MAGIC: https://elixir.bootlin.com/linux/v6.1/source/include/uapi/linux/magic.h#L87
+ return stat.Type == 0x9fa0, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/fscache.go b/vendor/github.com/prometheus/procfs/fscache.go
new file mode 100644
index 0000000..cf2e3ea
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/fscache.go
@@ -0,0 +1,422 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+
+ "github.com/prometheus/procfs/internal/util"
+)
+
+// Fscacheinfo represents fscache statistics.
+type Fscacheinfo struct {
+ // Number of index cookies allocated
+ IndexCookiesAllocated uint64
+ // data storage cookies allocated
+ DataStorageCookiesAllocated uint64
+ // Number of special cookies allocated
+ SpecialCookiesAllocated uint64
+ // Number of objects allocated
+ ObjectsAllocated uint64
+ // Number of object allocation failures
+ ObjectAllocationsFailure uint64
+ // Number of objects that reached the available state
+ ObjectsAvailable uint64
+ // Number of objects that reached the dead state
+ ObjectsDead uint64
+ // Number of objects that didn't have a coherency check
+ ObjectsWithoutCoherencyCheck uint64
+ // Number of objects that passed a coherency check
+ ObjectsWithCoherencyCheck uint64
+ // Number of objects that needed a coherency data update
+ ObjectsNeedCoherencyCheckUpdate uint64
+ // Number of objects that were declared obsolete
+ ObjectsDeclaredObsolete uint64
+ // Number of pages marked as being cached
+ PagesMarkedAsBeingCached uint64
+ // Number of uncache page requests seen
+ UncachePagesRequestSeen uint64
+ // Number of acquire cookie requests seen
+ AcquireCookiesRequestSeen uint64
+ // Number of acq reqs given a NULL parent
+ AcquireRequestsWithNullParent uint64
+ // Number of acq reqs rejected due to no cache available
+ AcquireRequestsRejectedNoCacheAvailable uint64
+ // Number of acq reqs succeeded
+ AcquireRequestsSucceeded uint64
+ // Number of acq reqs rejected due to error
+ AcquireRequestsRejectedDueToError uint64
+ // Number of acq reqs failed on ENOMEM
+ AcquireRequestsFailedDueToEnomem uint64
+ // Number of lookup calls made on cache backends
+ LookupsNumber uint64
+ // Number of negative lookups made
+ LookupsNegative uint64
+ // Number of positive lookups made
+ LookupsPositive uint64
+ // Number of objects created by lookup
+ ObjectsCreatedByLookup uint64
+ // Number of lookups timed out and requeued
+ LookupsTimedOutAndRequed uint64
+ InvalidationsNumber uint64
+ InvalidationsRunning uint64
+ // Number of update cookie requests seen
+ UpdateCookieRequestSeen uint64
+ // Number of upd reqs given a NULL parent
+ UpdateRequestsWithNullParent uint64
+ // Number of upd reqs granted CPU time
+ UpdateRequestsRunning uint64
+ // Number of relinquish cookie requests seen
+ RelinquishCookiesRequestSeen uint64
+ // Number of rlq reqs given a NULL parent
+ RelinquishCookiesWithNullParent uint64
+ // Number of rlq reqs waited on completion of creation
+ RelinquishRequestsWaitingCompleteCreation uint64
+ // Relinqs rtr
+ RelinquishRetries uint64
+ // Number of attribute changed requests seen
+ AttributeChangedRequestsSeen uint64
+ // Number of attr changed requests queued
+ AttributeChangedRequestsQueued uint64
+ // Number of attr changed rejected -ENOBUFS
+ AttributeChangedRejectDueToEnobufs uint64
+ // Number of attr changed failed -ENOMEM
+ AttributeChangedFailedDueToEnomem uint64
+ // Number of attr changed ops given CPU time
+ AttributeChangedOps uint64
+ // Number of allocation requests seen
+ AllocationRequestsSeen uint64
+ // Number of successful alloc reqs
+ AllocationOkRequests uint64
+ // Number of alloc reqs that waited on lookup completion
+ AllocationWaitingOnLookup uint64
+ // Number of alloc reqs rejected -ENOBUFS
+ AllocationsRejectedDueToEnobufs uint64
+ // Number of alloc reqs aborted -ERESTARTSYS
+ AllocationsAbortedDueToErestartsys uint64
+ // Number of alloc reqs submitted
+ AllocationOperationsSubmitted uint64
+ // Number of alloc reqs waited for CPU time
+ AllocationsWaitedForCPU uint64
+ // Number of alloc reqs aborted due to object death
+ AllocationsAbortedDueToObjectDeath uint64
+ // Number of retrieval (read) requests seen
+ RetrievalsReadRequests uint64
+ // Number of successful retr reqs
+ RetrievalsOk uint64
+ // Number of retr reqs that waited on lookup completion
+ RetrievalsWaitingLookupCompletion uint64
+ // Number of retr reqs returned -ENODATA
+ RetrievalsReturnedEnodata uint64
+ // Number of retr reqs rejected -ENOBUFS
+ RetrievalsRejectedDueToEnobufs uint64
+ // Number of retr reqs aborted -ERESTARTSYS
+ RetrievalsAbortedDueToErestartsys uint64
+ // Number of retr reqs failed -ENOMEM
+ RetrievalsFailedDueToEnomem uint64
+ // Number of retr reqs submitted
+ RetrievalsRequests uint64
+ // Number of retr reqs waited for CPU time
+ RetrievalsWaitingCPU uint64
+ // Number of retr reqs aborted due to object death
+ RetrievalsAbortedDueToObjectDeath uint64
+ // Number of storage (write) requests seen
+ StoreWriteRequests uint64
+ // Number of successful store reqs
+ StoreSuccessfulRequests uint64
+ // Number of store reqs on a page already pending storage
+ StoreRequestsOnPendingStorage uint64
+ // Number of store reqs rejected -ENOBUFS
+ StoreRequestsRejectedDueToEnobufs uint64
+ // Number of store reqs failed -ENOMEM
+ StoreRequestsFailedDueToEnomem uint64
+ // Number of store reqs submitted
+ StoreRequestsSubmitted uint64
+ // Number of store reqs granted CPU time
+ StoreRequestsRunning uint64
+ // Number of pages given store req processing time
+ StorePagesWithRequestsProcessing uint64
+ // Number of store reqs deleted from tracking tree
+ StoreRequestsDeleted uint64
+ // Number of store reqs over store limit
+ StoreRequestsOverStoreLimit uint64
+ // Number of release reqs against pages with no pending store
+ ReleaseRequestsAgainstPagesWithNoPendingStorage uint64
+ // Number of release reqs against pages stored by time lock granted
+ ReleaseRequestsAgainstPagesStoredByTimeLockGranted uint64
+ // Number of release reqs ignored due to in-progress store
+ ReleaseRequestsIgnoredDueToInProgressStore uint64
+ // Number of page stores cancelled due to release req
+ PageStoresCancelledByReleaseRequests uint64
+ VmscanWaiting uint64
+ // Number of times async ops added to pending queues
+ OpsPending uint64
+ // Number of times async ops given CPU time
+ OpsRunning uint64
+ // Number of times async ops queued for processing
+ OpsEnqueued uint64
+ // Number of async ops cancelled
+ OpsCancelled uint64
+ // Number of async ops rejected due to object lookup/create failure
+ OpsRejected uint64
+ // Number of async ops initialised
+ OpsInitialised uint64
+ // Number of async ops queued for deferred release
+ OpsDeferred uint64
+ // Number of async ops released (should equal ini=N when idle)
+ OpsReleased uint64
+ // Number of deferred-release async ops garbage collected
+ OpsGarbageCollected uint64
+ // Number of in-progress alloc_object() cache ops
+ CacheopAllocationsinProgress uint64
+ // Number of in-progress lookup_object() cache ops
+ CacheopLookupObjectInProgress uint64
+ // Number of in-progress lookup_complete() cache ops
+ CacheopLookupCompleteInPorgress uint64
+ // Number of in-progress grab_object() cache ops
+ CacheopGrabObjectInProgress uint64
+ CacheopInvalidations uint64
+ // Number of in-progress update_object() cache ops
+ CacheopUpdateObjectInProgress uint64
+ // Number of in-progress drop_object() cache ops
+ CacheopDropObjectInProgress uint64
+ // Number of in-progress put_object() cache ops
+ CacheopPutObjectInProgress uint64
+ // Number of in-progress attr_changed() cache ops
+ CacheopAttributeChangeInProgress uint64
+ // Number of in-progress sync_cache() cache ops
+ CacheopSyncCacheInProgress uint64
+ // Number of in-progress read_or_alloc_page() cache ops
+ CacheopReadOrAllocPageInProgress uint64
+ // Number of in-progress read_or_alloc_pages() cache ops
+ CacheopReadOrAllocPagesInProgress uint64
+ // Number of in-progress allocate_page() cache ops
+ CacheopAllocatePageInProgress uint64
+ // Number of in-progress allocate_pages() cache ops
+ CacheopAllocatePagesInProgress uint64
+ // Number of in-progress write_page() cache ops
+ CacheopWritePagesInProgress uint64
+ // Number of in-progress uncache_page() cache ops
+ CacheopUncachePagesInProgress uint64
+ // Number of in-progress dissociate_pages() cache ops
+ CacheopDissociatePagesInProgress uint64
+ // Number of object lookups/creations rejected due to lack of space
+ CacheevLookupsAndCreationsRejectedLackSpace uint64
+ // Number of stale objects deleted
+ CacheevStaleObjectsDeleted uint64
+ // Number of objects retired when relinquished
+ CacheevRetiredWhenReliquished uint64
+ // Number of objects culled
+ CacheevObjectsCulled uint64
+}
+
+// Fscacheinfo returns information about current fscache statistics.
+// See https://www.kernel.org/doc/Documentation/filesystems/caching/fscache.txt
+func (fs FS) Fscacheinfo() (Fscacheinfo, error) {
+ b, err := util.ReadFileNoStat(fs.proc.Path("fs/fscache/stats"))
+ if err != nil {
+ return Fscacheinfo{}, err
+ }
+
+ m, err := parseFscacheinfo(bytes.NewReader(b))
+ if err != nil {
+ return Fscacheinfo{}, fmt.Errorf("%w: Cannot parse %v: %w", ErrFileParse, m, err)
+ }
+
+ return *m, nil
+}
+
+func setFSCacheFields(fields []string, setFields ...*uint64) error {
+ var err error
+ if len(fields) < len(setFields) {
+ return fmt.Errorf("%w: Expected %d, but got %d: %w", ErrFileParse, len(setFields), len(fields), err)
+ }
+
+ for i := range setFields {
+ *setFields[i], err = strconv.ParseUint(strings.Split(fields[i], "=")[1], 0, 64)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func parseFscacheinfo(r io.Reader) (*Fscacheinfo, error) {
+ var m Fscacheinfo
+ s := bufio.NewScanner(r)
+ for s.Scan() {
+ fields := strings.Fields(s.Text())
+ if len(fields) < 2 {
+ return nil, fmt.Errorf("%w: malformed Fscacheinfo line: %q", ErrFileParse, s.Text())
+ }
+
+ switch fields[0] {
+ case "Cookies:":
+ err := setFSCacheFields(fields[1:], &m.IndexCookiesAllocated, &m.DataStorageCookiesAllocated,
+ &m.SpecialCookiesAllocated)
+ if err != nil {
+ return &m, err
+ }
+ case "Objects:":
+ err := setFSCacheFields(fields[1:], &m.ObjectsAllocated, &m.ObjectAllocationsFailure,
+ &m.ObjectsAvailable, &m.ObjectsDead)
+ if err != nil {
+ return &m, err
+ }
+ case "ChkAux":
+ err := setFSCacheFields(fields[2:], &m.ObjectsWithoutCoherencyCheck, &m.ObjectsWithCoherencyCheck,
+ &m.ObjectsNeedCoherencyCheckUpdate, &m.ObjectsDeclaredObsolete)
+ if err != nil {
+ return &m, err
+ }
+ case "Pages":
+ err := setFSCacheFields(fields[2:], &m.PagesMarkedAsBeingCached, &m.UncachePagesRequestSeen)
+ if err != nil {
+ return &m, err
+ }
+ case "Acquire:":
+ err := setFSCacheFields(fields[1:], &m.AcquireCookiesRequestSeen, &m.AcquireRequestsWithNullParent,
+ &m.AcquireRequestsRejectedNoCacheAvailable, &m.AcquireRequestsSucceeded, &m.AcquireRequestsRejectedDueToError,
+ &m.AcquireRequestsFailedDueToEnomem)
+ if err != nil {
+ return &m, err
+ }
+ case "Lookups:":
+ err := setFSCacheFields(fields[1:], &m.LookupsNumber, &m.LookupsNegative, &m.LookupsPositive,
+ &m.ObjectsCreatedByLookup, &m.LookupsTimedOutAndRequed)
+ if err != nil {
+ return &m, err
+ }
+ case "Invals":
+ err := setFSCacheFields(fields[2:], &m.InvalidationsNumber, &m.InvalidationsRunning)
+ if err != nil {
+ return &m, err
+ }
+ case "Updates:":
+ err := setFSCacheFields(fields[1:], &m.UpdateCookieRequestSeen, &m.UpdateRequestsWithNullParent,
+ &m.UpdateRequestsRunning)
+ if err != nil {
+ return &m, err
+ }
+ case "Relinqs:":
+ err := setFSCacheFields(fields[1:], &m.RelinquishCookiesRequestSeen, &m.RelinquishCookiesWithNullParent,
+ &m.RelinquishRequestsWaitingCompleteCreation, &m.RelinquishRetries)
+ if err != nil {
+ return &m, err
+ }
+ case "AttrChg:":
+ err := setFSCacheFields(fields[1:], &m.AttributeChangedRequestsSeen, &m.AttributeChangedRequestsQueued,
+ &m.AttributeChangedRejectDueToEnobufs, &m.AttributeChangedFailedDueToEnomem, &m.AttributeChangedOps)
+ if err != nil {
+ return &m, err
+ }
+ case "Allocs":
+ if strings.Split(fields[2], "=")[0] == "n" {
+ err := setFSCacheFields(fields[2:], &m.AllocationRequestsSeen, &m.AllocationOkRequests,
+ &m.AllocationWaitingOnLookup, &m.AllocationsRejectedDueToEnobufs, &m.AllocationsAbortedDueToErestartsys)
+ if err != nil {
+ return &m, err
+ }
+ } else {
+ err := setFSCacheFields(fields[2:], &m.AllocationOperationsSubmitted, &m.AllocationsWaitedForCPU,
+ &m.AllocationsAbortedDueToObjectDeath)
+ if err != nil {
+ return &m, err
+ }
+ }
+ case "Retrvls:":
+ if strings.Split(fields[1], "=")[0] == "n" {
+ err := setFSCacheFields(fields[1:], &m.RetrievalsReadRequests, &m.RetrievalsOk, &m.RetrievalsWaitingLookupCompletion,
+ &m.RetrievalsReturnedEnodata, &m.RetrievalsRejectedDueToEnobufs, &m.RetrievalsAbortedDueToErestartsys,
+ &m.RetrievalsFailedDueToEnomem)
+ if err != nil {
+ return &m, err
+ }
+ } else {
+ err := setFSCacheFields(fields[1:], &m.RetrievalsRequests, &m.RetrievalsWaitingCPU, &m.RetrievalsAbortedDueToObjectDeath)
+ if err != nil {
+ return &m, err
+ }
+ }
+ case "Stores":
+ if strings.Split(fields[2], "=")[0] == "n" {
+ err := setFSCacheFields(fields[2:], &m.StoreWriteRequests, &m.StoreSuccessfulRequests,
+ &m.StoreRequestsOnPendingStorage, &m.StoreRequestsRejectedDueToEnobufs, &m.StoreRequestsFailedDueToEnomem)
+ if err != nil {
+ return &m, err
+ }
+ } else {
+ err := setFSCacheFields(fields[2:], &m.StoreRequestsSubmitted, &m.StoreRequestsRunning,
+ &m.StorePagesWithRequestsProcessing, &m.StoreRequestsDeleted, &m.StoreRequestsOverStoreLimit)
+ if err != nil {
+ return &m, err
+ }
+ }
+ case "VmScan":
+ err := setFSCacheFields(fields[2:], &m.ReleaseRequestsAgainstPagesWithNoPendingStorage,
+ &m.ReleaseRequestsAgainstPagesStoredByTimeLockGranted, &m.ReleaseRequestsIgnoredDueToInProgressStore,
+ &m.PageStoresCancelledByReleaseRequests, &m.VmscanWaiting)
+ if err != nil {
+ return &m, err
+ }
+ case "Ops":
+ if strings.Split(fields[2], "=")[0] == "pend" {
+ err := setFSCacheFields(fields[2:], &m.OpsPending, &m.OpsRunning, &m.OpsEnqueued, &m.OpsCancelled, &m.OpsRejected)
+ if err != nil {
+ return &m, err
+ }
+ } else {
+ err := setFSCacheFields(fields[2:], &m.OpsInitialised, &m.OpsDeferred, &m.OpsReleased, &m.OpsGarbageCollected)
+ if err != nil {
+ return &m, err
+ }
+ }
+ case "CacheOp:":
+ if strings.Split(fields[1], "=")[0] == "alo" {
+ err := setFSCacheFields(fields[1:], &m.CacheopAllocationsinProgress, &m.CacheopLookupObjectInProgress,
+ &m.CacheopLookupCompleteInPorgress, &m.CacheopGrabObjectInProgress)
+ if err != nil {
+ return &m, err
+ }
+ } else if strings.Split(fields[1], "=")[0] == "inv" {
+ err := setFSCacheFields(fields[1:], &m.CacheopInvalidations, &m.CacheopUpdateObjectInProgress,
+ &m.CacheopDropObjectInProgress, &m.CacheopPutObjectInProgress, &m.CacheopAttributeChangeInProgress,
+ &m.CacheopSyncCacheInProgress)
+ if err != nil {
+ return &m, err
+ }
+ } else {
+ err := setFSCacheFields(fields[1:], &m.CacheopReadOrAllocPageInProgress, &m.CacheopReadOrAllocPagesInProgress,
+ &m.CacheopAllocatePageInProgress, &m.CacheopAllocatePagesInProgress, &m.CacheopWritePagesInProgress,
+ &m.CacheopUncachePagesInProgress, &m.CacheopDissociatePagesInProgress)
+ if err != nil {
+ return &m, err
+ }
+ }
+ case "CacheEv:":
+ err := setFSCacheFields(fields[1:], &m.CacheevLookupsAndCreationsRejectedLackSpace, &m.CacheevStaleObjectsDeleted,
+ &m.CacheevRetiredWhenReliquished, &m.CacheevObjectsCulled)
+ if err != nil {
+ return &m, err
+ }
+ }
+ }
+
+ return &m, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/internal/fs/fs.go b/vendor/github.com/prometheus/procfs/internal/fs/fs.go
new file mode 100644
index 0000000..3c18c76
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/internal/fs/fs.go
@@ -0,0 +1,55 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package fs
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+)
+
+const (
+ // DefaultProcMountPoint is the common mount point of the proc filesystem.
+ DefaultProcMountPoint = "/proc"
+
+ // DefaultSysMountPoint is the common mount point of the sys filesystem.
+ DefaultSysMountPoint = "/sys"
+
+ // DefaultConfigfsMountPoint is the common mount point of the configfs.
+ DefaultConfigfsMountPoint = "/sys/kernel/config"
+)
+
+// FS represents a pseudo-filesystem, normally /proc or /sys, which provides an
+// interface to kernel data structures.
+type FS string
+
+// NewFS returns a new FS mounted under the given mountPoint. It will error
+// if the mount point can't be read.
+func NewFS(mountPoint string) (FS, error) {
+ info, err := os.Stat(mountPoint)
+ if err != nil {
+ return "", fmt.Errorf("could not read %q: %w", mountPoint, err)
+ }
+ if !info.IsDir() {
+ return "", fmt.Errorf("mount point %q is not a directory", mountPoint)
+ }
+
+ return FS(mountPoint), nil
+}
+
+// Path appends the given path elements to the filesystem path, adding separators
+// as necessary.
+func (fs FS) Path(p ...string) string {
+ return filepath.Join(append([]string{string(fs)}, p...)...)
+}
diff --git a/vendor/github.com/prometheus/procfs/internal/util/parse.go b/vendor/github.com/prometheus/procfs/internal/util/parse.go
new file mode 100644
index 0000000..14272dc
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/internal/util/parse.go
@@ -0,0 +1,112 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package util
+
+import (
+ "os"
+ "strconv"
+ "strings"
+)
+
+// ParseUint32s parses a slice of strings into a slice of uint32s.
+func ParseUint32s(ss []string) ([]uint32, error) {
+ us := make([]uint32, 0, len(ss))
+ for _, s := range ss {
+ u, err := strconv.ParseUint(s, 10, 32)
+ if err != nil {
+ return nil, err
+ }
+
+ us = append(us, uint32(u))
+ }
+
+ return us, nil
+}
+
+// ParseUint64s parses a slice of strings into a slice of uint64s.
+func ParseUint64s(ss []string) ([]uint64, error) {
+ us := make([]uint64, 0, len(ss))
+ for _, s := range ss {
+ u, err := strconv.ParseUint(s, 10, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ us = append(us, u)
+ }
+
+ return us, nil
+}
+
+// ParsePInt64s parses a slice of strings into a slice of int64 pointers.
+func ParsePInt64s(ss []string) ([]*int64, error) {
+ us := make([]*int64, 0, len(ss))
+ for _, s := range ss {
+ u, err := strconv.ParseInt(s, 10, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ us = append(us, &u)
+ }
+
+ return us, nil
+}
+
+// Parses a uint64 from given hex in string.
+func ParseHexUint64s(ss []string) ([]*uint64, error) {
+ us := make([]*uint64, 0, len(ss))
+ for _, s := range ss {
+ u, err := strconv.ParseUint(s, 16, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ us = append(us, &u)
+ }
+
+ return us, nil
+}
+
+// ReadUintFromFile reads a file and attempts to parse a uint64 from it.
+func ReadUintFromFile(path string) (uint64, error) {
+ data, err := os.ReadFile(path)
+ if err != nil {
+ return 0, err
+ }
+ return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64)
+}
+
+// ReadIntFromFile reads a file and attempts to parse a int64 from it.
+func ReadIntFromFile(path string) (int64, error) {
+ data, err := os.ReadFile(path)
+ if err != nil {
+ return 0, err
+ }
+ return strconv.ParseInt(strings.TrimSpace(string(data)), 10, 64)
+}
+
+// ParseBool parses a string into a boolean pointer.
+func ParseBool(b string) *bool {
+ var truth bool
+ switch b {
+ case "enabled":
+ truth = true
+ case "disabled":
+ truth = false
+ default:
+ return nil
+ }
+ return &truth
+}
diff --git a/vendor/github.com/prometheus/procfs/internal/util/readfile.go b/vendor/github.com/prometheus/procfs/internal/util/readfile.go
new file mode 100644
index 0000000..71b7a70
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/internal/util/readfile.go
@@ -0,0 +1,37 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package util
+
+import (
+ "io"
+ "os"
+)
+
+// ReadFileNoStat uses io.ReadAll to read contents of entire file.
+// This is similar to os.ReadFile but without the call to os.Stat, because
+// many files in /proc and /sys report incorrect file sizes (either 0 or 4096).
+// Reads a max file size of 1024kB. For files larger than this, a scanner
+// should be used.
+func ReadFileNoStat(filename string) ([]byte, error) {
+ const maxBufferSize = 1024 * 1024
+
+ f, err := os.Open(filename)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ reader := io.LimitReader(f, maxBufferSize)
+ return io.ReadAll(reader)
+}
diff --git a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go
new file mode 100644
index 0000000..1ab875c
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go
@@ -0,0 +1,50 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build (linux || darwin) && !appengine
+// +build linux darwin
+// +build !appengine
+
+package util
+
+import (
+ "bytes"
+ "os"
+ "syscall"
+)
+
+// SysReadFile is a simplified os.ReadFile that invokes syscall.Read directly.
+// https://github.com/prometheus/node_exporter/pull/728/files
+//
+// Note that this function will not read files larger than 128 bytes.
+func SysReadFile(file string) (string, error) {
+ f, err := os.Open(file)
+ if err != nil {
+ return "", err
+ }
+ defer f.Close()
+
+ // On some machines, hwmon drivers are broken and return EAGAIN. This causes
+ // Go's os.ReadFile implementation to poll forever.
+ //
+ // Since we either want to read data or bail immediately, do the simplest
+ // possible read using syscall directly.
+ const sysFileBufferSize = 128
+ b := make([]byte, sysFileBufferSize)
+ n, err := syscall.Read(int(f.Fd()), b)
+ if err != nil {
+ return "", err
+ }
+
+ return string(bytes.TrimSpace(b[:n])), nil
+}
diff --git a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go
new file mode 100644
index 0000000..1d86f5e
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go
@@ -0,0 +1,27 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build (linux && appengine) || (!linux && !darwin)
+// +build linux,appengine !linux,!darwin
+
+package util
+
+import (
+ "fmt"
+)
+
+// SysReadFile is here implemented as a noop for builds that do not support
+// the read syscall. For example Windows, or Linux on Google App Engine.
+func SysReadFile(file string) (string, error) {
+ return "", fmt.Errorf("not supported on this platform")
+}
diff --git a/vendor/github.com/prometheus/procfs/internal/util/valueparser.go b/vendor/github.com/prometheus/procfs/internal/util/valueparser.go
new file mode 100644
index 0000000..fe2355d
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/internal/util/valueparser.go
@@ -0,0 +1,91 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package util
+
+import (
+ "strconv"
+)
+
+// TODO(mdlayher): util packages are an anti-pattern and this should be moved
+// somewhere else that is more focused in the future.
+
+// A ValueParser enables parsing a single string into a variety of data types
+// in a concise and safe way. The Err method must be invoked after invoking
+// any other methods to ensure a value was successfully parsed.
+type ValueParser struct {
+ v string
+ err error
+}
+
+// NewValueParser creates a ValueParser using the input string.
+func NewValueParser(v string) *ValueParser {
+ return &ValueParser{v: v}
+}
+
+// Int interprets the underlying value as an int and returns that value.
+func (vp *ValueParser) Int() int { return int(vp.int64()) }
+
+// PInt64 interprets the underlying value as an int64 and returns a pointer to
+// that value.
+func (vp *ValueParser) PInt64() *int64 {
+ if vp.err != nil {
+ return nil
+ }
+
+ v := vp.int64()
+ return &v
+}
+
+// int64 interprets the underlying value as an int64 and returns that value.
+// TODO: export if/when necessary.
+func (vp *ValueParser) int64() int64 {
+ if vp.err != nil {
+ return 0
+ }
+
+ // A base value of zero makes ParseInt infer the correct base using the
+ // string's prefix, if any.
+ const base = 0
+ v, err := strconv.ParseInt(vp.v, base, 64)
+ if err != nil {
+ vp.err = err
+ return 0
+ }
+
+ return v
+}
+
+// PUInt64 interprets the underlying value as an uint64 and returns a pointer to
+// that value.
+func (vp *ValueParser) PUInt64() *uint64 {
+ if vp.err != nil {
+ return nil
+ }
+
+ // A base value of zero makes ParseInt infer the correct base using the
+ // string's prefix, if any.
+ const base = 0
+ v, err := strconv.ParseUint(vp.v, base, 64)
+ if err != nil {
+ vp.err = err
+ return nil
+ }
+
+ return &v
+}
+
+// Err returns the last error, if any, encountered by the ValueParser.
+func (vp *ValueParser) Err() error {
+ return vp.err
+}
diff --git a/vendor/github.com/prometheus/procfs/ipvs.go b/vendor/github.com/prometheus/procfs/ipvs.go
new file mode 100644
index 0000000..bc3a20c
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/ipvs.go
@@ -0,0 +1,241 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "bytes"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "io"
+ "net"
+ "os"
+ "strconv"
+ "strings"
+
+ "github.com/prometheus/procfs/internal/util"
+)
+
+// IPVSStats holds IPVS statistics, as exposed by the kernel in `/proc/net/ip_vs_stats`.
+type IPVSStats struct {
+ // Total count of connections.
+ Connections uint64
+ // Total incoming packages processed.
+ IncomingPackets uint64
+ // Total outgoing packages processed.
+ OutgoingPackets uint64
+ // Total incoming traffic.
+ IncomingBytes uint64
+ // Total outgoing traffic.
+ OutgoingBytes uint64
+}
+
+// IPVSBackendStatus holds current metrics of one virtual / real address pair.
+type IPVSBackendStatus struct {
+ // The local (virtual) IP address.
+ LocalAddress net.IP
+ // The remote (real) IP address.
+ RemoteAddress net.IP
+ // The local (virtual) port.
+ LocalPort uint16
+ // The remote (real) port.
+ RemotePort uint16
+ // The local firewall mark
+ LocalMark string
+ // The transport protocol (TCP, UDP).
+ Proto string
+ // The current number of active connections for this virtual/real address pair.
+ ActiveConn uint64
+ // The current number of inactive connections for this virtual/real address pair.
+ InactConn uint64
+ // The current weight of this virtual/real address pair.
+ Weight uint64
+}
+
+// IPVSStats reads the IPVS statistics from the specified `proc` filesystem.
+func (fs FS) IPVSStats() (IPVSStats, error) {
+ data, err := util.ReadFileNoStat(fs.proc.Path("net/ip_vs_stats"))
+ if err != nil {
+ return IPVSStats{}, err
+ }
+
+ return parseIPVSStats(bytes.NewReader(data))
+}
+
+// parseIPVSStats performs the actual parsing of `ip_vs_stats`.
+func parseIPVSStats(r io.Reader) (IPVSStats, error) {
+ var (
+ statContent []byte
+ statLines []string
+ statFields []string
+ stats IPVSStats
+ )
+
+ statContent, err := io.ReadAll(r)
+ if err != nil {
+ return IPVSStats{}, err
+ }
+
+ statLines = strings.SplitN(string(statContent), "\n", 4)
+ if len(statLines) != 4 {
+ return IPVSStats{}, errors.New("ip_vs_stats corrupt: too short")
+ }
+
+ statFields = strings.Fields(statLines[2])
+ if len(statFields) != 5 {
+ return IPVSStats{}, errors.New("ip_vs_stats corrupt: unexpected number of fields")
+ }
+
+ stats.Connections, err = strconv.ParseUint(statFields[0], 16, 64)
+ if err != nil {
+ return IPVSStats{}, err
+ }
+ stats.IncomingPackets, err = strconv.ParseUint(statFields[1], 16, 64)
+ if err != nil {
+ return IPVSStats{}, err
+ }
+ stats.OutgoingPackets, err = strconv.ParseUint(statFields[2], 16, 64)
+ if err != nil {
+ return IPVSStats{}, err
+ }
+ stats.IncomingBytes, err = strconv.ParseUint(statFields[3], 16, 64)
+ if err != nil {
+ return IPVSStats{}, err
+ }
+ stats.OutgoingBytes, err = strconv.ParseUint(statFields[4], 16, 64)
+ if err != nil {
+ return IPVSStats{}, err
+ }
+
+ return stats, nil
+}
+
+// IPVSBackendStatus reads and returns the status of all (virtual,real) server pairs from the specified `proc` filesystem.
+func (fs FS) IPVSBackendStatus() ([]IPVSBackendStatus, error) {
+ file, err := os.Open(fs.proc.Path("net/ip_vs"))
+ if err != nil {
+ return nil, err
+ }
+ defer file.Close()
+
+ return parseIPVSBackendStatus(file)
+}
+
+func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) {
+ var (
+ status []IPVSBackendStatus
+ scanner = bufio.NewScanner(file)
+ proto string
+ localMark string
+ localAddress net.IP
+ localPort uint16
+ err error
+ )
+
+ for scanner.Scan() {
+ fields := strings.Fields(scanner.Text())
+ if len(fields) == 0 {
+ continue
+ }
+ switch {
+ case fields[0] == "IP" || fields[0] == "Prot" || fields[1] == "RemoteAddress:Port":
+ continue
+ case fields[0] == "TCP" || fields[0] == "UDP":
+ if len(fields) < 2 {
+ continue
+ }
+ proto = fields[0]
+ localMark = ""
+ localAddress, localPort, err = parseIPPort(fields[1])
+ if err != nil {
+ return nil, err
+ }
+ case fields[0] == "FWM":
+ if len(fields) < 2 {
+ continue
+ }
+ proto = fields[0]
+ localMark = fields[1]
+ localAddress = nil
+ localPort = 0
+ case fields[0] == "->":
+ if len(fields) < 6 {
+ continue
+ }
+ remoteAddress, remotePort, err := parseIPPort(fields[1])
+ if err != nil {
+ return nil, err
+ }
+ weight, err := strconv.ParseUint(fields[3], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ activeConn, err := strconv.ParseUint(fields[4], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ inactConn, err := strconv.ParseUint(fields[5], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ status = append(status, IPVSBackendStatus{
+ LocalAddress: localAddress,
+ LocalPort: localPort,
+ LocalMark: localMark,
+ RemoteAddress: remoteAddress,
+ RemotePort: remotePort,
+ Proto: proto,
+ Weight: weight,
+ ActiveConn: activeConn,
+ InactConn: inactConn,
+ })
+ }
+ }
+ return status, nil
+}
+
+func parseIPPort(s string) (net.IP, uint16, error) {
+ var (
+ ip net.IP
+ err error
+ )
+
+ switch len(s) {
+ case 13:
+ ip, err = hex.DecodeString(s[0:8])
+ if err != nil {
+ return nil, 0, err
+ }
+ case 46:
+ ip = net.ParseIP(s[1:40])
+ if ip == nil {
+ return nil, 0, fmt.Errorf("%w: Invalid IPv6 addr %s: %w", ErrFileParse, s[1:40], err)
+ }
+ default:
+ return nil, 0, fmt.Errorf("%w: Unexpected IP:Port %s: %w", ErrFileParse, s, err)
+ }
+
+ portString := s[len(s)-4:]
+ if len(portString) != 4 {
+ return nil, 0,
+ fmt.Errorf("%w: Unexpected port string format %s: %w", ErrFileParse, portString, err)
+ }
+ port, err := strconv.ParseUint(portString, 16, 16)
+ if err != nil {
+ return nil, 0, err
+ }
+
+ return ip, uint16(port), nil
+}
diff --git a/vendor/github.com/prometheus/procfs/kernel_random.go b/vendor/github.com/prometheus/procfs/kernel_random.go
new file mode 100644
index 0000000..db88566
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/kernel_random.go
@@ -0,0 +1,63 @@
+// Copyright 2020 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !windows
+// +build !windows
+
+package procfs
+
+import (
+ "os"
+
+ "github.com/prometheus/procfs/internal/util"
+)
+
+// KernelRandom contains information about to the kernel's random number generator.
+type KernelRandom struct {
+ // EntropyAvaliable gives the available entropy, in bits.
+ EntropyAvaliable *uint64
+ // PoolSize gives the size of the entropy pool, in bits.
+ PoolSize *uint64
+ // URandomMinReseedSeconds is the number of seconds after which the DRNG will be reseeded.
+ URandomMinReseedSeconds *uint64
+ // WriteWakeupThreshold the number of bits of entropy below which we wake up processes
+ // that do a select(2) or poll(2) for write access to /dev/random.
+ WriteWakeupThreshold *uint64
+ // ReadWakeupThreshold is the number of bits of entropy required for waking up processes that sleep
+ // waiting for entropy from /dev/random.
+ ReadWakeupThreshold *uint64
+}
+
+// KernelRandom returns values from /proc/sys/kernel/random.
+func (fs FS) KernelRandom() (KernelRandom, error) {
+ random := KernelRandom{}
+
+ for file, p := range map[string]**uint64{
+ "entropy_avail": &random.EntropyAvaliable,
+ "poolsize": &random.PoolSize,
+ "urandom_min_reseed_secs": &random.URandomMinReseedSeconds,
+ "write_wakeup_threshold": &random.WriteWakeupThreshold,
+ "read_wakeup_threshold": &random.ReadWakeupThreshold,
+ } {
+ val, err := util.ReadUintFromFile(fs.proc.Path("sys", "kernel", "random", file))
+ if os.IsNotExist(err) {
+ continue
+ }
+ if err != nil {
+ return random, err
+ }
+ *p = &val
+ }
+
+ return random, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/loadavg.go b/vendor/github.com/prometheus/procfs/loadavg.go
new file mode 100644
index 0000000..332e76c
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/loadavg.go
@@ -0,0 +1,62 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+
+ "github.com/prometheus/procfs/internal/util"
+)
+
+// LoadAvg represents an entry in /proc/loadavg.
+type LoadAvg struct {
+ Load1 float64
+ Load5 float64
+ Load15 float64
+}
+
+// LoadAvg returns loadavg from /proc.
+func (fs FS) LoadAvg() (*LoadAvg, error) {
+ path := fs.proc.Path("loadavg")
+
+ data, err := util.ReadFileNoStat(path)
+ if err != nil {
+ return nil, err
+ }
+ return parseLoad(data)
+}
+
+// Parse /proc loadavg and return 1m, 5m and 15m.
+func parseLoad(loadavgBytes []byte) (*LoadAvg, error) {
+ loads := make([]float64, 3)
+ parts := strings.Fields(string(loadavgBytes))
+ if len(parts) < 3 {
+ return nil, fmt.Errorf("%w: Malformed line %q", ErrFileParse, string(loadavgBytes))
+ }
+
+ var err error
+ for i, load := range parts[0:3] {
+ loads[i], err = strconv.ParseFloat(load, 64)
+ if err != nil {
+ return nil, fmt.Errorf("%w: Cannot parse load: %f: %w", ErrFileParse, loads[i], err)
+ }
+ }
+ return &LoadAvg{
+ Load1: loads[0],
+ Load5: loads[1],
+ Load15: loads[2],
+ }, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/mdstat.go b/vendor/github.com/prometheus/procfs/mdstat.go
new file mode 100644
index 0000000..67a9d2b
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/mdstat.go
@@ -0,0 +1,276 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "fmt"
+ "os"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+var (
+ statusLineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[([U_]+)\]`)
+ recoveryLineBlocksRE = regexp.MustCompile(`\((\d+/\d+)\)`)
+ recoveryLinePctRE = regexp.MustCompile(`= (.+)%`)
+ recoveryLineFinishRE = regexp.MustCompile(`finish=(.+)min`)
+ recoveryLineSpeedRE = regexp.MustCompile(`speed=(.+)[A-Z]`)
+ componentDeviceRE = regexp.MustCompile(`(.*)\[\d+\]`)
+)
+
+// MDStat holds info parsed from /proc/mdstat.
+type MDStat struct {
+ // Name of the device.
+ Name string
+ // activity-state of the device.
+ ActivityState string
+ // Number of active disks.
+ DisksActive int64
+ // Total number of disks the device requires.
+ DisksTotal int64
+ // Number of failed disks.
+ DisksFailed int64
+ // Number of "down" disks. (the _ indicator in the status line)
+ DisksDown int64
+ // Spare disks in the device.
+ DisksSpare int64
+ // Number of blocks the device holds.
+ BlocksTotal int64
+ // Number of blocks on the device that are in sync.
+ BlocksSynced int64
+ // Number of blocks on the device that need to be synced.
+ BlocksToBeSynced int64
+ // progress percentage of current sync
+ BlocksSyncedPct float64
+ // estimated finishing time for current sync (in minutes)
+ BlocksSyncedFinishTime float64
+ // current sync speed (in Kilobytes/sec)
+ BlocksSyncedSpeed float64
+ // Name of md component devices
+ Devices []string
+}
+
+// MDStat parses an mdstat-file (/proc/mdstat) and returns a slice of
+// structs containing the relevant info. More information available here:
+// https://raid.wiki.kernel.org/index.php/Mdstat
+func (fs FS) MDStat() ([]MDStat, error) {
+ data, err := os.ReadFile(fs.proc.Path("mdstat"))
+ if err != nil {
+ return nil, err
+ }
+ mdstat, err := parseMDStat(data)
+ if err != nil {
+ return nil, fmt.Errorf("%w: Cannot parse %v: %w", ErrFileParse, fs.proc.Path("mdstat"), err)
+ }
+ return mdstat, nil
+}
+
+// parseMDStat parses data from mdstat file (/proc/mdstat) and returns a slice of
+// structs containing the relevant info.
+func parseMDStat(mdStatData []byte) ([]MDStat, error) {
+ mdStats := []MDStat{}
+ lines := strings.Split(string(mdStatData), "\n")
+
+ for i, line := range lines {
+ if strings.TrimSpace(line) == "" || line[0] == ' ' ||
+ strings.HasPrefix(line, "Personalities") ||
+ strings.HasPrefix(line, "unused") {
+ continue
+ }
+
+ deviceFields := strings.Fields(line)
+ if len(deviceFields) < 3 {
+ return nil, fmt.Errorf("%w: Expected 3+ lines, got %q", ErrFileParse, line)
+ }
+ mdName := deviceFields[0] // mdx
+ state := deviceFields[2] // active or inactive
+
+ if len(lines) <= i+3 {
+ return nil, fmt.Errorf("%w: Too few lines for md device: %q", ErrFileParse, mdName)
+ }
+
+ // Failed disks have the suffix (F) & Spare disks have the suffix (S).
+ fail := int64(strings.Count(line, "(F)"))
+ spare := int64(strings.Count(line, "(S)"))
+ active, total, down, size, err := evalStatusLine(lines[i], lines[i+1])
+
+ if err != nil {
+ return nil, fmt.Errorf("%w: Cannot parse md device lines: %v: %w", ErrFileParse, active, err)
+ }
+
+ syncLineIdx := i + 2
+ if strings.Contains(lines[i+2], "bitmap") { // skip bitmap line
+ syncLineIdx++
+ }
+
+ // If device is syncing at the moment, get the number of currently
+ // synced bytes, otherwise that number equals the size of the device.
+ blocksSynced := size
+ blocksToBeSynced := size
+ speed := float64(0)
+ finish := float64(0)
+ pct := float64(0)
+ recovering := strings.Contains(lines[syncLineIdx], "recovery")
+ resyncing := strings.Contains(lines[syncLineIdx], "resync")
+ checking := strings.Contains(lines[syncLineIdx], "check")
+
+ // Append recovery and resyncing state info.
+ if recovering || resyncing || checking {
+ if recovering {
+ state = "recovering"
+ } else if checking {
+ state = "checking"
+ } else {
+ state = "resyncing"
+ }
+
+ // Handle case when resync=PENDING or resync=DELAYED.
+ if strings.Contains(lines[syncLineIdx], "PENDING") ||
+ strings.Contains(lines[syncLineIdx], "DELAYED") {
+ blocksSynced = 0
+ } else {
+ blocksSynced, blocksToBeSynced, pct, finish, speed, err = evalRecoveryLine(lines[syncLineIdx])
+ if err != nil {
+ return nil, fmt.Errorf("%w: Cannot parse sync line in md device: %q: %w", ErrFileParse, mdName, err)
+ }
+ }
+ }
+
+ mdStats = append(mdStats, MDStat{
+ Name: mdName,
+ ActivityState: state,
+ DisksActive: active,
+ DisksFailed: fail,
+ DisksDown: down,
+ DisksSpare: spare,
+ DisksTotal: total,
+ BlocksTotal: size,
+ BlocksSynced: blocksSynced,
+ BlocksToBeSynced: blocksToBeSynced,
+ BlocksSyncedPct: pct,
+ BlocksSyncedFinishTime: finish,
+ BlocksSyncedSpeed: speed,
+ Devices: evalComponentDevices(deviceFields),
+ })
+ }
+
+ return mdStats, nil
+}
+
+func evalStatusLine(deviceLine, statusLine string) (active, total, down, size int64, err error) {
+ statusFields := strings.Fields(statusLine)
+ if len(statusFields) < 1 {
+ return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected statusline %q: %w", ErrFileParse, statusLine, err)
+ }
+
+ sizeStr := statusFields[0]
+ size, err = strconv.ParseInt(sizeStr, 10, 64)
+ if err != nil {
+ return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected statusline %q: %w", ErrFileParse, statusLine, err)
+ }
+
+ if strings.Contains(deviceLine, "raid0") || strings.Contains(deviceLine, "linear") {
+ // In the device deviceLine, only disks have a number associated with them in [].
+ total = int64(strings.Count(deviceLine, "["))
+ return total, total, 0, size, nil
+ }
+
+ if strings.Contains(deviceLine, "inactive") {
+ return 0, 0, 0, size, nil
+ }
+
+ matches := statusLineRE.FindStringSubmatch(statusLine)
+ if len(matches) != 5 {
+ return 0, 0, 0, 0, fmt.Errorf("%w: Could not fild all substring matches %s: %w", ErrFileParse, statusLine, err)
+ }
+
+ total, err = strconv.ParseInt(matches[2], 10, 64)
+ if err != nil {
+ return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected statusline %q: %w", ErrFileParse, statusLine, err)
+ }
+
+ active, err = strconv.ParseInt(matches[3], 10, 64)
+ if err != nil {
+ return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected active %d: %w", ErrFileParse, active, err)
+ }
+ down = int64(strings.Count(matches[4], "_"))
+
+ return active, total, down, size, nil
+}
+
+func evalRecoveryLine(recoveryLine string) (blocksSynced int64, blocksToBeSynced int64, pct float64, finish float64, speed float64, err error) {
+ matches := recoveryLineBlocksRE.FindStringSubmatch(recoveryLine)
+ if len(matches) != 2 {
+ return 0, 0, 0, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine blocks %s: %w", ErrFileParse, recoveryLine, err)
+ }
+
+ blocks := strings.Split(matches[1], "/")
+ blocksSynced, err = strconv.ParseInt(blocks[0], 10, 64)
+ if err != nil {
+ return 0, 0, 0, 0, 0, fmt.Errorf("%w: Unable to parse recovery blocks synced %q: %w", ErrFileParse, matches[1], err)
+ }
+
+ blocksToBeSynced, err = strconv.ParseInt(blocks[1], 10, 64)
+ if err != nil {
+ return blocksSynced, 0, 0, 0, 0, fmt.Errorf("%w: Unable to parse recovery to be synced blocks %q: %w", ErrFileParse, matches[2], err)
+ }
+
+ // Get percentage complete
+ matches = recoveryLinePctRE.FindStringSubmatch(recoveryLine)
+ if len(matches) != 2 {
+ return blocksSynced, blocksToBeSynced, 0, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching percentage %s", ErrFileParse, recoveryLine)
+ }
+ pct, err = strconv.ParseFloat(strings.TrimSpace(matches[1]), 64)
+ if err != nil {
+ return blocksSynced, blocksToBeSynced, 0, 0, 0, fmt.Errorf("%w: Error parsing float from recoveryLine %q", ErrFileParse, recoveryLine)
+ }
+
+ // Get time expected left to complete
+ matches = recoveryLineFinishRE.FindStringSubmatch(recoveryLine)
+ if len(matches) != 2 {
+ return blocksSynced, blocksToBeSynced, pct, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching est. finish time: %s", ErrFileParse, recoveryLine)
+ }
+ finish, err = strconv.ParseFloat(matches[1], 64)
+ if err != nil {
+ return blocksSynced, blocksToBeSynced, pct, 0, 0, fmt.Errorf("%w: Unable to parse float from recoveryLine: %q", ErrFileParse, recoveryLine)
+ }
+
+ // Get recovery speed
+ matches = recoveryLineSpeedRE.FindStringSubmatch(recoveryLine)
+ if len(matches) != 2 {
+ return blocksSynced, blocksToBeSynced, pct, finish, 0, fmt.Errorf("%w: Unexpected recoveryLine value: %s", ErrFileParse, recoveryLine)
+ }
+ speed, err = strconv.ParseFloat(matches[1], 64)
+ if err != nil {
+ return blocksSynced, blocksToBeSynced, pct, finish, 0, fmt.Errorf("%w: Error parsing float from recoveryLine: %q: %w", ErrFileParse, recoveryLine, err)
+ }
+
+ return blocksSynced, blocksToBeSynced, pct, finish, speed, nil
+}
+
+func evalComponentDevices(deviceFields []string) []string {
+ mdComponentDevices := make([]string, 0)
+ if len(deviceFields) > 3 {
+ for _, field := range deviceFields[4:] {
+ match := componentDeviceRE.FindStringSubmatch(field)
+ if match == nil {
+ continue
+ }
+ mdComponentDevices = append(mdComponentDevices, match[1])
+ }
+ }
+
+ return mdComponentDevices
+}
diff --git a/vendor/github.com/prometheus/procfs/meminfo.go b/vendor/github.com/prometheus/procfs/meminfo.go
new file mode 100644
index 0000000..4b2c405
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/meminfo.go
@@ -0,0 +1,389 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+
+ "github.com/prometheus/procfs/internal/util"
+)
+
+// Meminfo represents memory statistics.
+type Meminfo struct {
+ // Total usable ram (i.e. physical ram minus a few reserved
+ // bits and the kernel binary code)
+ MemTotal *uint64
+ // The sum of LowFree+HighFree
+ MemFree *uint64
+ // An estimate of how much memory is available for starting
+ // new applications, without swapping. Calculated from
+ // MemFree, SReclaimable, the size of the file LRU lists, and
+ // the low watermarks in each zone. The estimate takes into
+ // account that the system needs some page cache to function
+ // well, and that not all reclaimable slab will be
+ // reclaimable, due to items being in use. The impact of those
+ // factors will vary from system to system.
+ MemAvailable *uint64
+ // Relatively temporary storage for raw disk blocks shouldn't
+ // get tremendously large (20MB or so)
+ Buffers *uint64
+ Cached *uint64
+ // Memory that once was swapped out, is swapped back in but
+ // still also is in the swapfile (if memory is needed it
+ // doesn't need to be swapped out AGAIN because it is already
+ // in the swapfile. This saves I/O)
+ SwapCached *uint64
+ // Memory that has been used more recently and usually not
+ // reclaimed unless absolutely necessary.
+ Active *uint64
+ // Memory which has been less recently used. It is more
+ // eligible to be reclaimed for other purposes
+ Inactive *uint64
+ ActiveAnon *uint64
+ InactiveAnon *uint64
+ ActiveFile *uint64
+ InactiveFile *uint64
+ Unevictable *uint64
+ Mlocked *uint64
+ // total amount of swap space available
+ SwapTotal *uint64
+ // Memory which has been evicted from RAM, and is temporarily
+ // on the disk
+ SwapFree *uint64
+ // Memory which is waiting to get written back to the disk
+ Dirty *uint64
+ // Memory which is actively being written back to the disk
+ Writeback *uint64
+ // Non-file backed pages mapped into userspace page tables
+ AnonPages *uint64
+ // files which have been mapped, such as libraries
+ Mapped *uint64
+ Shmem *uint64
+ // in-kernel data structures cache
+ Slab *uint64
+ // Part of Slab, that might be reclaimed, such as caches
+ SReclaimable *uint64
+ // Part of Slab, that cannot be reclaimed on memory pressure
+ SUnreclaim *uint64
+ KernelStack *uint64
+ // amount of memory dedicated to the lowest level of page
+ // tables.
+ PageTables *uint64
+ // NFS pages sent to the server, but not yet committed to
+ // stable storage
+ NFSUnstable *uint64
+ // Memory used for block device "bounce buffers"
+ Bounce *uint64
+ // Memory used by FUSE for temporary writeback buffers
+ WritebackTmp *uint64
+ // Based on the overcommit ratio ('vm.overcommit_ratio'),
+ // this is the total amount of memory currently available to
+ // be allocated on the system. This limit is only adhered to
+ // if strict overcommit accounting is enabled (mode 2 in
+ // 'vm.overcommit_memory').
+ // The CommitLimit is calculated with the following formula:
+ // CommitLimit = ([total RAM pages] - [total huge TLB pages]) *
+ // overcommit_ratio / 100 + [total swap pages]
+ // For example, on a system with 1G of physical RAM and 7G
+ // of swap with a `vm.overcommit_ratio` of 30 it would
+ // yield a CommitLimit of 7.3G.
+ // For more details, see the memory overcommit documentation
+ // in vm/overcommit-accounting.
+ CommitLimit *uint64
+ // The amount of memory presently allocated on the system.
+ // The committed memory is a sum of all of the memory which
+ // has been allocated by processes, even if it has not been
+ // "used" by them as of yet. A process which malloc()'s 1G
+ // of memory, but only touches 300M of it will show up as
+ // using 1G. This 1G is memory which has been "committed" to
+ // by the VM and can be used at any time by the allocating
+ // application. With strict overcommit enabled on the system
+ // (mode 2 in 'vm.overcommit_memory'),allocations which would
+ // exceed the CommitLimit (detailed above) will not be permitted.
+ // This is useful if one needs to guarantee that processes will
+ // not fail due to lack of memory once that memory has been
+ // successfully allocated.
+ CommittedAS *uint64
+ // total size of vmalloc memory area
+ VmallocTotal *uint64
+ // amount of vmalloc area which is used
+ VmallocUsed *uint64
+ // largest contiguous block of vmalloc area which is free
+ VmallocChunk *uint64
+ Percpu *uint64
+ HardwareCorrupted *uint64
+ AnonHugePages *uint64
+ ShmemHugePages *uint64
+ ShmemPmdMapped *uint64
+ CmaTotal *uint64
+ CmaFree *uint64
+ HugePagesTotal *uint64
+ HugePagesFree *uint64
+ HugePagesRsvd *uint64
+ HugePagesSurp *uint64
+ Hugepagesize *uint64
+ DirectMap4k *uint64
+ DirectMap2M *uint64
+ DirectMap1G *uint64
+
+ // The struct fields below are the byte-normalized counterparts to the
+ // existing struct fields. Values are normalized using the optional
+ // unit field in the meminfo line.
+ MemTotalBytes *uint64
+ MemFreeBytes *uint64
+ MemAvailableBytes *uint64
+ BuffersBytes *uint64
+ CachedBytes *uint64
+ SwapCachedBytes *uint64
+ ActiveBytes *uint64
+ InactiveBytes *uint64
+ ActiveAnonBytes *uint64
+ InactiveAnonBytes *uint64
+ ActiveFileBytes *uint64
+ InactiveFileBytes *uint64
+ UnevictableBytes *uint64
+ MlockedBytes *uint64
+ SwapTotalBytes *uint64
+ SwapFreeBytes *uint64
+ DirtyBytes *uint64
+ WritebackBytes *uint64
+ AnonPagesBytes *uint64
+ MappedBytes *uint64
+ ShmemBytes *uint64
+ SlabBytes *uint64
+ SReclaimableBytes *uint64
+ SUnreclaimBytes *uint64
+ KernelStackBytes *uint64
+ PageTablesBytes *uint64
+ NFSUnstableBytes *uint64
+ BounceBytes *uint64
+ WritebackTmpBytes *uint64
+ CommitLimitBytes *uint64
+ CommittedASBytes *uint64
+ VmallocTotalBytes *uint64
+ VmallocUsedBytes *uint64
+ VmallocChunkBytes *uint64
+ PercpuBytes *uint64
+ HardwareCorruptedBytes *uint64
+ AnonHugePagesBytes *uint64
+ ShmemHugePagesBytes *uint64
+ ShmemPmdMappedBytes *uint64
+ CmaTotalBytes *uint64
+ CmaFreeBytes *uint64
+ HugepagesizeBytes *uint64
+ DirectMap4kBytes *uint64
+ DirectMap2MBytes *uint64
+ DirectMap1GBytes *uint64
+}
+
+// Meminfo returns an information about current kernel/system memory statistics.
+// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt
+func (fs FS) Meminfo() (Meminfo, error) {
+ b, err := util.ReadFileNoStat(fs.proc.Path("meminfo"))
+ if err != nil {
+ return Meminfo{}, err
+ }
+
+ m, err := parseMemInfo(bytes.NewReader(b))
+ if err != nil {
+ return Meminfo{}, fmt.Errorf("%w: %w", ErrFileParse, err)
+ }
+
+ return *m, nil
+}
+
+func parseMemInfo(r io.Reader) (*Meminfo, error) {
+ var m Meminfo
+ s := bufio.NewScanner(r)
+ for s.Scan() {
+ fields := strings.Fields(s.Text())
+ var val, valBytes uint64
+
+ val, err := strconv.ParseUint(fields[1], 0, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ switch len(fields) {
+ case 2:
+ // No unit present, use the parsed the value as bytes directly.
+ valBytes = val
+ case 3:
+ // Unit present in optional 3rd field, convert it to
+ // bytes. The only unit supported within the Linux
+ // kernel is `kB`.
+ if fields[2] != "kB" {
+ return nil, fmt.Errorf("%w: Unsupported unit in optional 3rd field %q", ErrFileParse, fields[2])
+ }
+
+ valBytes = 1024 * val
+
+ default:
+ return nil, fmt.Errorf("%w: Malformed line %q", ErrFileParse, s.Text())
+ }
+
+ switch fields[0] {
+ case "MemTotal:":
+ m.MemTotal = &val
+ m.MemTotalBytes = &valBytes
+ case "MemFree:":
+ m.MemFree = &val
+ m.MemFreeBytes = &valBytes
+ case "MemAvailable:":
+ m.MemAvailable = &val
+ m.MemAvailableBytes = &valBytes
+ case "Buffers:":
+ m.Buffers = &val
+ m.BuffersBytes = &valBytes
+ case "Cached:":
+ m.Cached = &val
+ m.CachedBytes = &valBytes
+ case "SwapCached:":
+ m.SwapCached = &val
+ m.SwapCachedBytes = &valBytes
+ case "Active:":
+ m.Active = &val
+ m.ActiveBytes = &valBytes
+ case "Inactive:":
+ m.Inactive = &val
+ m.InactiveBytes = &valBytes
+ case "Active(anon):":
+ m.ActiveAnon = &val
+ m.ActiveAnonBytes = &valBytes
+ case "Inactive(anon):":
+ m.InactiveAnon = &val
+ m.InactiveAnonBytes = &valBytes
+ case "Active(file):":
+ m.ActiveFile = &val
+ m.ActiveFileBytes = &valBytes
+ case "Inactive(file):":
+ m.InactiveFile = &val
+ m.InactiveFileBytes = &valBytes
+ case "Unevictable:":
+ m.Unevictable = &val
+ m.UnevictableBytes = &valBytes
+ case "Mlocked:":
+ m.Mlocked = &val
+ m.MlockedBytes = &valBytes
+ case "SwapTotal:":
+ m.SwapTotal = &val
+ m.SwapTotalBytes = &valBytes
+ case "SwapFree:":
+ m.SwapFree = &val
+ m.SwapFreeBytes = &valBytes
+ case "Dirty:":
+ m.Dirty = &val
+ m.DirtyBytes = &valBytes
+ case "Writeback:":
+ m.Writeback = &val
+ m.WritebackBytes = &valBytes
+ case "AnonPages:":
+ m.AnonPages = &val
+ m.AnonPagesBytes = &valBytes
+ case "Mapped:":
+ m.Mapped = &val
+ m.MappedBytes = &valBytes
+ case "Shmem:":
+ m.Shmem = &val
+ m.ShmemBytes = &valBytes
+ case "Slab:":
+ m.Slab = &val
+ m.SlabBytes = &valBytes
+ case "SReclaimable:":
+ m.SReclaimable = &val
+ m.SReclaimableBytes = &valBytes
+ case "SUnreclaim:":
+ m.SUnreclaim = &val
+ m.SUnreclaimBytes = &valBytes
+ case "KernelStack:":
+ m.KernelStack = &val
+ m.KernelStackBytes = &valBytes
+ case "PageTables:":
+ m.PageTables = &val
+ m.PageTablesBytes = &valBytes
+ case "NFS_Unstable:":
+ m.NFSUnstable = &val
+ m.NFSUnstableBytes = &valBytes
+ case "Bounce:":
+ m.Bounce = &val
+ m.BounceBytes = &valBytes
+ case "WritebackTmp:":
+ m.WritebackTmp = &val
+ m.WritebackTmpBytes = &valBytes
+ case "CommitLimit:":
+ m.CommitLimit = &val
+ m.CommitLimitBytes = &valBytes
+ case "Committed_AS:":
+ m.CommittedAS = &val
+ m.CommittedASBytes = &valBytes
+ case "VmallocTotal:":
+ m.VmallocTotal = &val
+ m.VmallocTotalBytes = &valBytes
+ case "VmallocUsed:":
+ m.VmallocUsed = &val
+ m.VmallocUsedBytes = &valBytes
+ case "VmallocChunk:":
+ m.VmallocChunk = &val
+ m.VmallocChunkBytes = &valBytes
+ case "Percpu:":
+ m.Percpu = &val
+ m.PercpuBytes = &valBytes
+ case "HardwareCorrupted:":
+ m.HardwareCorrupted = &val
+ m.HardwareCorruptedBytes = &valBytes
+ case "AnonHugePages:":
+ m.AnonHugePages = &val
+ m.AnonHugePagesBytes = &valBytes
+ case "ShmemHugePages:":
+ m.ShmemHugePages = &val
+ m.ShmemHugePagesBytes = &valBytes
+ case "ShmemPmdMapped:":
+ m.ShmemPmdMapped = &val
+ m.ShmemPmdMappedBytes = &valBytes
+ case "CmaTotal:":
+ m.CmaTotal = &val
+ m.CmaTotalBytes = &valBytes
+ case "CmaFree:":
+ m.CmaFree = &val
+ m.CmaFreeBytes = &valBytes
+ case "HugePages_Total:":
+ m.HugePagesTotal = &val
+ case "HugePages_Free:":
+ m.HugePagesFree = &val
+ case "HugePages_Rsvd:":
+ m.HugePagesRsvd = &val
+ case "HugePages_Surp:":
+ m.HugePagesSurp = &val
+ case "Hugepagesize:":
+ m.Hugepagesize = &val
+ m.HugepagesizeBytes = &valBytes
+ case "DirectMap4k:":
+ m.DirectMap4k = &val
+ m.DirectMap4kBytes = &valBytes
+ case "DirectMap2M:":
+ m.DirectMap2M = &val
+ m.DirectMap2MBytes = &valBytes
+ case "DirectMap1G:":
+ m.DirectMap1G = &val
+ m.DirectMap1GBytes = &valBytes
+ }
+ }
+
+ return &m, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/mountinfo.go b/vendor/github.com/prometheus/procfs/mountinfo.go
new file mode 100644
index 0000000..a704c5e
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/mountinfo.go
@@ -0,0 +1,180 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "strconv"
+ "strings"
+
+ "github.com/prometheus/procfs/internal/util"
+)
+
+// A MountInfo is a type that describes the details, options
+// for each mount, parsed from /proc/self/mountinfo.
+// The fields described in each entry of /proc/self/mountinfo
+// is described in the following man page.
+// http://man7.org/linux/man-pages/man5/proc.5.html
+type MountInfo struct {
+ // Unique ID for the mount
+ MountID int
+ // The ID of the parent mount
+ ParentID int
+ // The value of `st_dev` for the files on this FS
+ MajorMinorVer string
+ // The pathname of the directory in the FS that forms
+ // the root for this mount
+ Root string
+ // The pathname of the mount point relative to the root
+ MountPoint string
+ // Mount options
+ Options map[string]string
+ // Zero or more optional fields
+ OptionalFields map[string]string
+ // The Filesystem type
+ FSType string
+ // FS specific information or "none"
+ Source string
+ // Superblock options
+ SuperOptions map[string]string
+}
+
+// Reads each line of the mountinfo file, and returns a list of formatted MountInfo structs.
+func parseMountInfo(info []byte) ([]*MountInfo, error) {
+ mounts := []*MountInfo{}
+ scanner := bufio.NewScanner(bytes.NewReader(info))
+ for scanner.Scan() {
+ mountString := scanner.Text()
+ parsedMounts, err := parseMountInfoString(mountString)
+ if err != nil {
+ return nil, err
+ }
+ mounts = append(mounts, parsedMounts)
+ }
+
+ err := scanner.Err()
+ return mounts, err
+}
+
+// Parses a mountinfo file line, and converts it to a MountInfo struct.
+// An important check here is to see if the hyphen separator, as if it does not exist,
+// it means that the line is malformed.
+func parseMountInfoString(mountString string) (*MountInfo, error) {
+ var err error
+
+ mountInfo := strings.Split(mountString, " ")
+ mountInfoLength := len(mountInfo)
+ if mountInfoLength < 10 {
+ return nil, fmt.Errorf("%w: Too few fields in mount string: %s", ErrFileParse, mountString)
+ }
+
+ if mountInfo[mountInfoLength-4] != "-" {
+ return nil, fmt.Errorf("%w: couldn't find separator in expected field: %s", ErrFileParse, mountInfo[mountInfoLength-4])
+ }
+
+ mount := &MountInfo{
+ MajorMinorVer: mountInfo[2],
+ Root: mountInfo[3],
+ MountPoint: mountInfo[4],
+ Options: mountOptionsParser(mountInfo[5]),
+ OptionalFields: nil,
+ FSType: mountInfo[mountInfoLength-3],
+ Source: mountInfo[mountInfoLength-2],
+ SuperOptions: mountOptionsParser(mountInfo[mountInfoLength-1]),
+ }
+
+ mount.MountID, err = strconv.Atoi(mountInfo[0])
+ if err != nil {
+ return nil, fmt.Errorf("%w: mount ID: %q", ErrFileParse, mount.MountID)
+ }
+ mount.ParentID, err = strconv.Atoi(mountInfo[1])
+ if err != nil {
+ return nil, fmt.Errorf("%w: parent ID: %q", ErrFileParse, mount.ParentID)
+ }
+ // Has optional fields, which is a space separated list of values.
+ // Example: shared:2 master:7
+ if mountInfo[6] != "" {
+ mount.OptionalFields, err = mountOptionsParseOptionalFields(mountInfo[6 : mountInfoLength-4])
+ if err != nil {
+ return nil, fmt.Errorf("%w: %w", ErrFileParse, err)
+ }
+ }
+ return mount, nil
+}
+
+// mountOptionsIsValidField checks a string against a valid list of optional fields keys.
+func mountOptionsIsValidField(s string) bool {
+ switch s {
+ case
+ "shared",
+ "master",
+ "propagate_from",
+ "unbindable":
+ return true
+ }
+ return false
+}
+
+// mountOptionsParseOptionalFields parses a list of optional fields strings into a double map of strings.
+func mountOptionsParseOptionalFields(o []string) (map[string]string, error) {
+ optionalFields := make(map[string]string)
+ for _, field := range o {
+ optionSplit := strings.SplitN(field, ":", 2)
+ value := ""
+ if len(optionSplit) == 2 {
+ value = optionSplit[1]
+ }
+ if mountOptionsIsValidField(optionSplit[0]) {
+ optionalFields[optionSplit[0]] = value
+ }
+ }
+ return optionalFields, nil
+}
+
+// mountOptionsParser parses the mount options, superblock options.
+func mountOptionsParser(mountOptions string) map[string]string {
+ opts := make(map[string]string)
+ options := strings.Split(mountOptions, ",")
+ for _, opt := range options {
+ splitOption := strings.Split(opt, "=")
+ if len(splitOption) < 2 {
+ key := splitOption[0]
+ opts[key] = ""
+ } else {
+ key, value := splitOption[0], splitOption[1]
+ opts[key] = value
+ }
+ }
+ return opts
+}
+
+// GetMounts retrieves mountinfo information from `/proc/self/mountinfo`.
+func GetMounts() ([]*MountInfo, error) {
+ data, err := util.ReadFileNoStat("/proc/self/mountinfo")
+ if err != nil {
+ return nil, err
+ }
+ return parseMountInfo(data)
+}
+
+// GetProcMounts retrieves mountinfo information from a processes' `/proc//mountinfo`.
+func GetProcMounts(pid int) ([]*MountInfo, error) {
+ data, err := util.ReadFileNoStat(fmt.Sprintf("/proc/%d/mountinfo", pid))
+ if err != nil {
+ return nil, err
+ }
+ return parseMountInfo(data)
+}
diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go
new file mode 100644
index 0000000..75a3b6c
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/mountstats.go
@@ -0,0 +1,707 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+// While implementing parsing of /proc/[pid]/mountstats, this blog was used
+// heavily as a reference:
+// https://utcc.utoronto.ca/~cks/space/blog/linux/NFSMountstatsIndex
+//
+// Special thanks to Chris Siebenmann for all of his posts explaining the
+// various statistics available for NFS.
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// Constants shared between multiple functions.
+const (
+ deviceEntryLen = 8
+
+ fieldBytesLen = 8
+ fieldEventsLen = 27
+
+ statVersion10 = "1.0"
+ statVersion11 = "1.1"
+
+ fieldTransport10TCPLen = 10
+ fieldTransport10UDPLen = 7
+
+ fieldTransport11TCPLen = 13
+ fieldTransport11UDPLen = 10
+
+ // kernel version >= 4.14 MaxLen
+ // See: https://elixir.bootlin.com/linux/v6.4.8/source/net/sunrpc/xprtrdma/xprt_rdma.h#L393
+ fieldTransport11RDMAMaxLen = 28
+
+ // kernel version <= 4.2 MinLen
+ // See: https://elixir.bootlin.com/linux/v4.2.8/source/net/sunrpc/xprtrdma/xprt_rdma.h#L331
+ fieldTransport11RDMAMinLen = 20
+)
+
+// A Mount is a device mount parsed from /proc/[pid]/mountstats.
+type Mount struct {
+ // Name of the device.
+ Device string
+ // The mount point of the device.
+ Mount string
+ // The filesystem type used by the device.
+ Type string
+ // If available additional statistics related to this Mount.
+ // Use a type assertion to determine if additional statistics are available.
+ Stats MountStats
+}
+
+// A MountStats is a type which contains detailed statistics for a specific
+// type of Mount.
+type MountStats interface {
+ mountStats()
+}
+
+// A MountStatsNFS is a MountStats implementation for NFSv3 and v4 mounts.
+type MountStatsNFS struct {
+ // The version of statistics provided.
+ StatVersion string
+ // The mount options of the NFS mount.
+ Opts map[string]string
+ // The age of the NFS mount.
+ Age time.Duration
+ // Statistics related to byte counters for various operations.
+ Bytes NFSBytesStats
+ // Statistics related to various NFS event occurrences.
+ Events NFSEventsStats
+ // Statistics broken down by filesystem operation.
+ Operations []NFSOperationStats
+ // Statistics about the NFS RPC transport.
+ Transport []NFSTransportStats
+}
+
+// mountStats implements MountStats.
+func (m MountStatsNFS) mountStats() {}
+
+// A NFSBytesStats contains statistics about the number of bytes read and written
+// by an NFS client to and from an NFS server.
+type NFSBytesStats struct {
+ // Number of bytes read using the read() syscall.
+ Read uint64
+ // Number of bytes written using the write() syscall.
+ Write uint64
+ // Number of bytes read using the read() syscall in O_DIRECT mode.
+ DirectRead uint64
+ // Number of bytes written using the write() syscall in O_DIRECT mode.
+ DirectWrite uint64
+ // Number of bytes read from the NFS server, in total.
+ ReadTotal uint64
+ // Number of bytes written to the NFS server, in total.
+ WriteTotal uint64
+ // Number of pages read directly via mmap()'d files.
+ ReadPages uint64
+ // Number of pages written directly via mmap()'d files.
+ WritePages uint64
+}
+
+// A NFSEventsStats contains statistics about NFS event occurrences.
+type NFSEventsStats struct {
+ // Number of times cached inode attributes are re-validated from the server.
+ InodeRevalidate uint64
+ // Number of times cached dentry nodes are re-validated from the server.
+ DnodeRevalidate uint64
+ // Number of times an inode cache is cleared.
+ DataInvalidate uint64
+ // Number of times cached inode attributes are invalidated.
+ AttributeInvalidate uint64
+ // Number of times files or directories have been open()'d.
+ VFSOpen uint64
+ // Number of times a directory lookup has occurred.
+ VFSLookup uint64
+ // Number of times permissions have been checked.
+ VFSAccess uint64
+ // Number of updates (and potential writes) to pages.
+ VFSUpdatePage uint64
+ // Number of pages read directly via mmap()'d files.
+ VFSReadPage uint64
+ // Number of times a group of pages have been read.
+ VFSReadPages uint64
+ // Number of pages written directly via mmap()'d files.
+ VFSWritePage uint64
+ // Number of times a group of pages have been written.
+ VFSWritePages uint64
+ // Number of times directory entries have been read with getdents().
+ VFSGetdents uint64
+ // Number of times attributes have been set on inodes.
+ VFSSetattr uint64
+ // Number of pending writes that have been forcefully flushed to the server.
+ VFSFlush uint64
+ // Number of times fsync() has been called on directories and files.
+ VFSFsync uint64
+ // Number of times locking has been attempted on a file.
+ VFSLock uint64
+ // Number of times files have been closed and released.
+ VFSFileRelease uint64
+ // Unknown. Possibly unused.
+ CongestionWait uint64
+ // Number of times files have been truncated.
+ Truncation uint64
+ // Number of times a file has been grown due to writes beyond its existing end.
+ WriteExtension uint64
+ // Number of times a file was removed while still open by another process.
+ SillyRename uint64
+ // Number of times the NFS server gave less data than expected while reading.
+ ShortRead uint64
+ // Number of times the NFS server wrote less data than expected while writing.
+ ShortWrite uint64
+ // Number of times the NFS server indicated EJUKEBOX; retrieving data from
+ // offline storage.
+ JukeboxDelay uint64
+ // Number of NFS v4.1+ pNFS reads.
+ PNFSRead uint64
+ // Number of NFS v4.1+ pNFS writes.
+ PNFSWrite uint64
+}
+
+// A NFSOperationStats contains statistics for a single operation.
+type NFSOperationStats struct {
+ // The name of the operation.
+ Operation string
+ // Number of requests performed for this operation.
+ Requests uint64
+ // Number of times an actual RPC request has been transmitted for this operation.
+ Transmissions uint64
+ // Number of times a request has had a major timeout.
+ MajorTimeouts uint64
+ // Number of bytes sent for this operation, including RPC headers and payload.
+ BytesSent uint64
+ // Number of bytes received for this operation, including RPC headers and payload.
+ BytesReceived uint64
+ // Duration all requests spent queued for transmission before they were sent.
+ CumulativeQueueMilliseconds uint64
+ // Duration it took to get a reply back after the request was transmitted.
+ CumulativeTotalResponseMilliseconds uint64
+ // Duration from when a request was enqueued to when it was completely handled.
+ CumulativeTotalRequestMilliseconds uint64
+ // The count of operations that complete with tk_status < 0. These statuses usually indicate error conditions.
+ Errors uint64
+}
+
+// A NFSTransportStats contains statistics for the NFS mount RPC requests and
+// responses.
+type NFSTransportStats struct {
+ // The transport protocol used for the NFS mount.
+ Protocol string
+ // The local port used for the NFS mount.
+ Port uint64
+ // Number of times the client has had to establish a connection from scratch
+ // to the NFS server.
+ Bind uint64
+ // Number of times the client has made a TCP connection to the NFS server.
+ Connect uint64
+ // Duration (in jiffies, a kernel internal unit of time) the NFS mount has
+ // spent waiting for connections to the server to be established.
+ ConnectIdleTime uint64
+ // Duration since the NFS mount last saw any RPC traffic.
+ IdleTimeSeconds uint64
+ // Number of RPC requests for this mount sent to the NFS server.
+ Sends uint64
+ // Number of RPC responses for this mount received from the NFS server.
+ Receives uint64
+ // Number of times the NFS server sent a response with a transaction ID
+ // unknown to this client.
+ BadTransactionIDs uint64
+ // A running counter, incremented on each request as the current difference
+ // ebetween sends and receives.
+ CumulativeActiveRequests uint64
+ // A running counter, incremented on each request by the current backlog
+ // queue size.
+ CumulativeBacklog uint64
+
+ // Stats below only available with stat version 1.1.
+
+ // Maximum number of simultaneously active RPC requests ever used.
+ MaximumRPCSlotsUsed uint64
+ // A running counter, incremented on each request as the current size of the
+ // sending queue.
+ CumulativeSendingQueue uint64
+ // A running counter, incremented on each request as the current size of the
+ // pending queue.
+ CumulativePendingQueue uint64
+
+ // Stats below only available with stat version 1.1.
+ // Transport over RDMA
+
+ // accessed when sending a call
+ ReadChunkCount uint64
+ WriteChunkCount uint64
+ ReplyChunkCount uint64
+ TotalRdmaRequest uint64
+
+ // rarely accessed error counters
+ PullupCopyCount uint64
+ HardwayRegisterCount uint64
+ FailedMarshalCount uint64
+ BadReplyCount uint64
+ MrsRecovered uint64
+ MrsOrphaned uint64
+ MrsAllocated uint64
+ EmptySendctxQ uint64
+
+ // accessed when receiving a reply
+ TotalRdmaReply uint64
+ FixupCopyCount uint64
+ ReplyWaitsForSend uint64
+ LocalInvNeeded uint64
+ NomsgCallCount uint64
+ BcallCount uint64
+}
+
+// parseMountStats parses a /proc/[pid]/mountstats file and returns a slice
+// of Mount structures containing detailed information about each mount.
+// If available, statistics for each mount are parsed as well.
+func parseMountStats(r io.Reader) ([]*Mount, error) {
+ const (
+ device = "device"
+ statVersionPrefix = "statvers="
+
+ nfs3Type = "nfs"
+ nfs4Type = "nfs4"
+ )
+
+ var mounts []*Mount
+
+ s := bufio.NewScanner(r)
+ for s.Scan() {
+ // Only look for device entries in this function
+ ss := strings.Fields(string(s.Bytes()))
+ if len(ss) == 0 || ss[0] != device {
+ continue
+ }
+
+ m, err := parseMount(ss)
+ if err != nil {
+ return nil, err
+ }
+
+ // Does this mount also possess statistics information?
+ if len(ss) > deviceEntryLen {
+ // Only NFSv3 and v4 are supported for parsing statistics
+ if m.Type != nfs3Type && m.Type != nfs4Type {
+ return nil, fmt.Errorf("%w: Cannot parse MountStats for %q", ErrFileParse, m.Type)
+ }
+
+ statVersion := strings.TrimPrefix(ss[8], statVersionPrefix)
+
+ stats, err := parseMountStatsNFS(s, statVersion)
+ if err != nil {
+ return nil, err
+ }
+
+ m.Stats = stats
+ }
+
+ mounts = append(mounts, m)
+ }
+
+ return mounts, s.Err()
+}
+
+// parseMount parses an entry in /proc/[pid]/mountstats in the format:
+//
+// device [device] mounted on [mount] with fstype [type]
+func parseMount(ss []string) (*Mount, error) {
+ if len(ss) < deviceEntryLen {
+ return nil, fmt.Errorf("%w: Invalid device %q", ErrFileParse, ss)
+ }
+
+ // Check for specific words appearing at specific indices to ensure
+ // the format is consistent with what we expect
+ format := []struct {
+ i int
+ s string
+ }{
+ {i: 0, s: "device"},
+ {i: 2, s: "mounted"},
+ {i: 3, s: "on"},
+ {i: 5, s: "with"},
+ {i: 6, s: "fstype"},
+ }
+
+ for _, f := range format {
+ if ss[f.i] != f.s {
+ return nil, fmt.Errorf("%w: Invalid device %q", ErrFileParse, ss)
+ }
+ }
+
+ return &Mount{
+ Device: ss[1],
+ Mount: ss[4],
+ Type: ss[7],
+ }, nil
+}
+
+// parseMountStatsNFS parses a MountStatsNFS by scanning additional information
+// related to NFS statistics.
+func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, error) {
+ // Field indicators for parsing specific types of data
+ const (
+ fieldOpts = "opts:"
+ fieldAge = "age:"
+ fieldBytes = "bytes:"
+ fieldEvents = "events:"
+ fieldPerOpStats = "per-op"
+ fieldTransport = "xprt:"
+ )
+
+ stats := &MountStatsNFS{
+ StatVersion: statVersion,
+ }
+
+ for s.Scan() {
+ ss := strings.Fields(string(s.Bytes()))
+ if len(ss) == 0 {
+ break
+ }
+
+ switch ss[0] {
+ case fieldOpts:
+ if len(ss) < 2 {
+ return nil, fmt.Errorf("%w: Incomplete information for NFS stats: %v", ErrFileParse, ss)
+ }
+ if stats.Opts == nil {
+ stats.Opts = map[string]string{}
+ }
+ for _, opt := range strings.Split(ss[1], ",") {
+ split := strings.Split(opt, "=")
+ if len(split) == 2 {
+ stats.Opts[split[0]] = split[1]
+ } else {
+ stats.Opts[opt] = ""
+ }
+ }
+ case fieldAge:
+ if len(ss) < 2 {
+ return nil, fmt.Errorf("%w: Incomplete information for NFS stats: %v", ErrFileParse, ss)
+ }
+ // Age integer is in seconds
+ d, err := time.ParseDuration(ss[1] + "s")
+ if err != nil {
+ return nil, err
+ }
+
+ stats.Age = d
+ case fieldBytes:
+ if len(ss) < 2 {
+ return nil, fmt.Errorf("%w: Incomplete information for NFS stats: %v", ErrFileParse, ss)
+ }
+ bstats, err := parseNFSBytesStats(ss[1:])
+ if err != nil {
+ return nil, err
+ }
+
+ stats.Bytes = *bstats
+ case fieldEvents:
+ if len(ss) < 2 {
+ return nil, fmt.Errorf("%w: Incomplete information for NFS events: %v", ErrFileParse, ss)
+ }
+ estats, err := parseNFSEventsStats(ss[1:])
+ if err != nil {
+ return nil, err
+ }
+
+ stats.Events = *estats
+ case fieldTransport:
+ if len(ss) < 3 {
+ return nil, fmt.Errorf("%w: Incomplete information for NFS transport stats: %v", ErrFileParse, ss)
+ }
+
+ tstats, err := parseNFSTransportStats(ss[1:], statVersion)
+ if err != nil {
+ return nil, err
+ }
+
+ stats.Transport = append(stats.Transport, *tstats)
+ }
+
+ // When encountering "per-operation statistics", we must break this
+ // loop and parse them separately to ensure we can terminate parsing
+ // before reaching another device entry; hence why this 'if' statement
+ // is not just another switch case
+ if ss[0] == fieldPerOpStats {
+ break
+ }
+ }
+
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+
+ // NFS per-operation stats appear last before the next device entry
+ perOpStats, err := parseNFSOperationStats(s)
+ if err != nil {
+ return nil, err
+ }
+
+ stats.Operations = perOpStats
+
+ return stats, nil
+}
+
+// parseNFSBytesStats parses a NFSBytesStats line using an input set of
+// integer fields.
+func parseNFSBytesStats(ss []string) (*NFSBytesStats, error) {
+ if len(ss) != fieldBytesLen {
+ return nil, fmt.Errorf("%w: Invalid NFS bytes stats: %v", ErrFileParse, ss)
+ }
+
+ ns := make([]uint64, 0, fieldBytesLen)
+ for _, s := range ss {
+ n, err := strconv.ParseUint(s, 10, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ ns = append(ns, n)
+ }
+
+ return &NFSBytesStats{
+ Read: ns[0],
+ Write: ns[1],
+ DirectRead: ns[2],
+ DirectWrite: ns[3],
+ ReadTotal: ns[4],
+ WriteTotal: ns[5],
+ ReadPages: ns[6],
+ WritePages: ns[7],
+ }, nil
+}
+
+// parseNFSEventsStats parses a NFSEventsStats line using an input set of
+// integer fields.
+func parseNFSEventsStats(ss []string) (*NFSEventsStats, error) {
+ if len(ss) != fieldEventsLen {
+ return nil, fmt.Errorf("%w: invalid NFS events stats: %v", ErrFileParse, ss)
+ }
+
+ ns := make([]uint64, 0, fieldEventsLen)
+ for _, s := range ss {
+ n, err := strconv.ParseUint(s, 10, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ ns = append(ns, n)
+ }
+
+ return &NFSEventsStats{
+ InodeRevalidate: ns[0],
+ DnodeRevalidate: ns[1],
+ DataInvalidate: ns[2],
+ AttributeInvalidate: ns[3],
+ VFSOpen: ns[4],
+ VFSLookup: ns[5],
+ VFSAccess: ns[6],
+ VFSUpdatePage: ns[7],
+ VFSReadPage: ns[8],
+ VFSReadPages: ns[9],
+ VFSWritePage: ns[10],
+ VFSWritePages: ns[11],
+ VFSGetdents: ns[12],
+ VFSSetattr: ns[13],
+ VFSFlush: ns[14],
+ VFSFsync: ns[15],
+ VFSLock: ns[16],
+ VFSFileRelease: ns[17],
+ CongestionWait: ns[18],
+ Truncation: ns[19],
+ WriteExtension: ns[20],
+ SillyRename: ns[21],
+ ShortRead: ns[22],
+ ShortWrite: ns[23],
+ JukeboxDelay: ns[24],
+ PNFSRead: ns[25],
+ PNFSWrite: ns[26],
+ }, nil
+}
+
+// parseNFSOperationStats parses a slice of NFSOperationStats by scanning
+// additional information about per-operation statistics until an empty
+// line is reached.
+func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
+ const (
+ // Minimum number of expected fields in each per-operation statistics set
+ minFields = 9
+ )
+
+ var ops []NFSOperationStats
+
+ for s.Scan() {
+ ss := strings.Fields(string(s.Bytes()))
+ if len(ss) == 0 {
+ // Must break when reading a blank line after per-operation stats to
+ // enable top-level function to parse the next device entry
+ break
+ }
+
+ if len(ss) < minFields {
+ return nil, fmt.Errorf("%w: invalid NFS per-operations stats: %v", ErrFileParse, ss)
+ }
+
+ // Skip string operation name for integers
+ ns := make([]uint64, 0, minFields-1)
+ for _, st := range ss[1:] {
+ n, err := strconv.ParseUint(st, 10, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ ns = append(ns, n)
+ }
+ opStats := NFSOperationStats{
+ Operation: strings.TrimSuffix(ss[0], ":"),
+ Requests: ns[0],
+ Transmissions: ns[1],
+ MajorTimeouts: ns[2],
+ BytesSent: ns[3],
+ BytesReceived: ns[4],
+ CumulativeQueueMilliseconds: ns[5],
+ CumulativeTotalResponseMilliseconds: ns[6],
+ CumulativeTotalRequestMilliseconds: ns[7],
+ }
+
+ if len(ns) > 8 {
+ opStats.Errors = ns[8]
+ }
+
+ ops = append(ops, opStats)
+ }
+
+ return ops, s.Err()
+}
+
+// parseNFSTransportStats parses a NFSTransportStats line using an input set of
+// integer fields matched to a specific stats version.
+func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats, error) {
+ // Extract the protocol field. It is the only string value in the line
+ protocol := ss[0]
+ ss = ss[1:]
+
+ switch statVersion {
+ case statVersion10:
+ var expectedLength int
+ if protocol == "tcp" {
+ expectedLength = fieldTransport10TCPLen
+ } else if protocol == "udp" {
+ expectedLength = fieldTransport10UDPLen
+ } else {
+ return nil, fmt.Errorf("%w: Invalid NFS protocol \"%s\" in stats 1.0 statement: %v", ErrFileParse, protocol, ss)
+ }
+ if len(ss) != expectedLength {
+ return nil, fmt.Errorf("%w: Invalid NFS transport stats 1.0 statement: %v", ErrFileParse, ss)
+ }
+ case statVersion11:
+ var expectedLength int
+ if protocol == "tcp" {
+ expectedLength = fieldTransport11TCPLen
+ } else if protocol == "udp" {
+ expectedLength = fieldTransport11UDPLen
+ } else if protocol == "rdma" {
+ expectedLength = fieldTransport11RDMAMinLen
+ } else {
+ return nil, fmt.Errorf("%w: invalid NFS protocol \"%s\" in stats 1.1 statement: %v", ErrFileParse, protocol, ss)
+ }
+ if (len(ss) != expectedLength && (protocol == "tcp" || protocol == "udp")) ||
+ (protocol == "rdma" && len(ss) < expectedLength) {
+ return nil, fmt.Errorf("%w: invalid NFS transport stats 1.1 statement: %v, protocol: %v", ErrFileParse, ss, protocol)
+ }
+ default:
+ return nil, fmt.Errorf("%w: Unrecognized NFS transport stats version: %q, protocol: %v", ErrFileParse, statVersion, protocol)
+ }
+
+ // Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay
+ // in a v1.0 response. Since the stat length is bigger for TCP stats, we use
+ // the TCP length here.
+ //
+ // Note: slice length must be set to length of v1.1 stats to avoid a panic when
+ // only v1.0 stats are present.
+ // See: https://github.com/prometheus/node_exporter/issues/571.
+ //
+ // Note: NFS Over RDMA slice length is fieldTransport11RDMAMaxLen
+ ns := make([]uint64, fieldTransport11RDMAMaxLen+3)
+ for i, s := range ss {
+ n, err := strconv.ParseUint(s, 10, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ ns[i] = n
+ }
+
+ // The fields differ depending on the transport protocol (TCP or UDP)
+ // From https://utcc.utoronto.ca/%7Ecks/space/blog/linux/NFSMountstatsXprt
+ //
+ // For the udp RPC transport there is no connection count, connect idle time,
+ // or idle time (fields #3, #4, and #5); all other fields are the same. So
+ // we set them to 0 here.
+ if protocol == "udp" {
+ ns = append(ns[:2], append(make([]uint64, 3), ns[2:]...)...)
+ } else if protocol == "tcp" {
+ ns = append(ns[:fieldTransport11TCPLen], make([]uint64, fieldTransport11RDMAMaxLen-fieldTransport11TCPLen+3)...)
+ } else if protocol == "rdma" {
+ ns = append(ns[:fieldTransport10TCPLen], append(make([]uint64, 3), ns[fieldTransport10TCPLen:]...)...)
+ }
+
+ return &NFSTransportStats{
+ // NFS xprt over tcp or udp
+ Protocol: protocol,
+ Port: ns[0],
+ Bind: ns[1],
+ Connect: ns[2],
+ ConnectIdleTime: ns[3],
+ IdleTimeSeconds: ns[4],
+ Sends: ns[5],
+ Receives: ns[6],
+ BadTransactionIDs: ns[7],
+ CumulativeActiveRequests: ns[8],
+ CumulativeBacklog: ns[9],
+
+ // NFS xprt over tcp or udp
+ // And statVersion 1.1
+ MaximumRPCSlotsUsed: ns[10],
+ CumulativeSendingQueue: ns[11],
+ CumulativePendingQueue: ns[12],
+
+ // NFS xprt over rdma
+ // And stat Version 1.1
+ ReadChunkCount: ns[13],
+ WriteChunkCount: ns[14],
+ ReplyChunkCount: ns[15],
+ TotalRdmaRequest: ns[16],
+ PullupCopyCount: ns[17],
+ HardwayRegisterCount: ns[18],
+ FailedMarshalCount: ns[19],
+ BadReplyCount: ns[20],
+ MrsRecovered: ns[21],
+ MrsOrphaned: ns[22],
+ MrsAllocated: ns[23],
+ EmptySendctxQ: ns[24],
+ TotalRdmaReply: ns[25],
+ FixupCopyCount: ns[26],
+ ReplyWaitsForSend: ns[27],
+ LocalInvNeeded: ns[28],
+ NomsgCallCount: ns[29],
+ BcallCount: ns[30],
+ }, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/net_conntrackstat.go b/vendor/github.com/prometheus/procfs/net_conntrackstat.go
new file mode 100644
index 0000000..316df5f
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/net_conntrackstat.go
@@ -0,0 +1,118 @@
+// Copyright 2020 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "strings"
+
+ "github.com/prometheus/procfs/internal/util"
+)
+
+// A ConntrackStatEntry represents one line from net/stat/nf_conntrack
+// and contains netfilter conntrack statistics at one CPU core.
+type ConntrackStatEntry struct {
+ Entries uint64
+ Searched uint64
+ Found uint64
+ New uint64
+ Invalid uint64
+ Ignore uint64
+ Delete uint64
+ DeleteList uint64
+ Insert uint64
+ InsertFailed uint64
+ Drop uint64
+ EarlyDrop uint64
+ SearchRestart uint64
+}
+
+// ConntrackStat retrieves netfilter's conntrack statistics, split by CPU cores.
+func (fs FS) ConntrackStat() ([]ConntrackStatEntry, error) {
+ return readConntrackStat(fs.proc.Path("net", "stat", "nf_conntrack"))
+}
+
+// Parses a slice of ConntrackStatEntries from the given filepath.
+func readConntrackStat(path string) ([]ConntrackStatEntry, error) {
+ // This file is small and can be read with one syscall.
+ b, err := util.ReadFileNoStat(path)
+ if err != nil {
+ // Do not wrap this error so the caller can detect os.IsNotExist and
+ // similar conditions.
+ return nil, err
+ }
+
+ stat, err := parseConntrackStat(bytes.NewReader(b))
+ if err != nil {
+ return nil, fmt.Errorf("%w: Cannot read file: %v: %w", ErrFileRead, path, err)
+ }
+
+ return stat, nil
+}
+
+// Reads the contents of a conntrack statistics file and parses a slice of ConntrackStatEntries.
+func parseConntrackStat(r io.Reader) ([]ConntrackStatEntry, error) {
+ var entries []ConntrackStatEntry
+
+ scanner := bufio.NewScanner(r)
+ scanner.Scan()
+ for scanner.Scan() {
+ fields := strings.Fields(scanner.Text())
+ conntrackEntry, err := parseConntrackStatEntry(fields)
+ if err != nil {
+ return nil, err
+ }
+ entries = append(entries, *conntrackEntry)
+ }
+
+ return entries, nil
+}
+
+// Parses a ConntrackStatEntry from given array of fields.
+func parseConntrackStatEntry(fields []string) (*ConntrackStatEntry, error) {
+ entries, err := util.ParseHexUint64s(fields)
+ if err != nil {
+ return nil, fmt.Errorf("%w: Cannot parse entry: %d: %w", ErrFileParse, entries, err)
+ }
+ numEntries := len(entries)
+ if numEntries < 16 || numEntries > 17 {
+ return nil,
+ fmt.Errorf("%w: invalid conntrackstat entry, invalid number of fields: %d", ErrFileParse, numEntries)
+ }
+
+ stats := &ConntrackStatEntry{
+ Entries: *entries[0],
+ Searched: *entries[1],
+ Found: *entries[2],
+ New: *entries[3],
+ Invalid: *entries[4],
+ Ignore: *entries[5],
+ Delete: *entries[6],
+ DeleteList: *entries[7],
+ Insert: *entries[8],
+ InsertFailed: *entries[9],
+ Drop: *entries[10],
+ EarlyDrop: *entries[11],
+ }
+
+ // Ignore missing search_restart on Linux < 2.6.35.
+ if numEntries == 17 {
+ stats.SearchRestart = *entries[16]
+ }
+
+ return stats, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/net_dev.go b/vendor/github.com/prometheus/procfs/net_dev.go
new file mode 100644
index 0000000..e66208a
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/net_dev.go
@@ -0,0 +1,205 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "errors"
+ "os"
+ "sort"
+ "strconv"
+ "strings"
+)
+
+// NetDevLine is single line parsed from /proc/net/dev or /proc/[pid]/net/dev.
+type NetDevLine struct {
+ Name string `json:"name"` // The name of the interface.
+ RxBytes uint64 `json:"rx_bytes"` // Cumulative count of bytes received.
+ RxPackets uint64 `json:"rx_packets"` // Cumulative count of packets received.
+ RxErrors uint64 `json:"rx_errors"` // Cumulative count of receive errors encountered.
+ RxDropped uint64 `json:"rx_dropped"` // Cumulative count of packets dropped while receiving.
+ RxFIFO uint64 `json:"rx_fifo"` // Cumulative count of FIFO buffer errors.
+ RxFrame uint64 `json:"rx_frame"` // Cumulative count of packet framing errors.
+ RxCompressed uint64 `json:"rx_compressed"` // Cumulative count of compressed packets received by the device driver.
+ RxMulticast uint64 `json:"rx_multicast"` // Cumulative count of multicast frames received by the device driver.
+ TxBytes uint64 `json:"tx_bytes"` // Cumulative count of bytes transmitted.
+ TxPackets uint64 `json:"tx_packets"` // Cumulative count of packets transmitted.
+ TxErrors uint64 `json:"tx_errors"` // Cumulative count of transmit errors encountered.
+ TxDropped uint64 `json:"tx_dropped"` // Cumulative count of packets dropped while transmitting.
+ TxFIFO uint64 `json:"tx_fifo"` // Cumulative count of FIFO buffer errors.
+ TxCollisions uint64 `json:"tx_collisions"` // Cumulative count of collisions detected on the interface.
+ TxCarrier uint64 `json:"tx_carrier"` // Cumulative count of carrier losses detected by the device driver.
+ TxCompressed uint64 `json:"tx_compressed"` // Cumulative count of compressed packets transmitted by the device driver.
+}
+
+// NetDev is parsed from /proc/net/dev or /proc/[pid]/net/dev. The map keys
+// are interface names.
+type NetDev map[string]NetDevLine
+
+// NetDev returns kernel/system statistics read from /proc/net/dev.
+func (fs FS) NetDev() (NetDev, error) {
+ return newNetDev(fs.proc.Path("net/dev"))
+}
+
+// NetDev returns kernel/system statistics read from /proc/[pid]/net/dev.
+func (p Proc) NetDev() (NetDev, error) {
+ return newNetDev(p.path("net/dev"))
+}
+
+// newNetDev creates a new NetDev from the contents of the given file.
+func newNetDev(file string) (NetDev, error) {
+ f, err := os.Open(file)
+ if err != nil {
+ return NetDev{}, err
+ }
+ defer f.Close()
+
+ netDev := NetDev{}
+ s := bufio.NewScanner(f)
+ for n := 0; s.Scan(); n++ {
+ // Skip the 2 header lines.
+ if n < 2 {
+ continue
+ }
+
+ line, err := netDev.parseLine(s.Text())
+ if err != nil {
+ return netDev, err
+ }
+
+ netDev[line.Name] = *line
+ }
+
+ return netDev, s.Err()
+}
+
+// parseLine parses a single line from the /proc/net/dev file. Header lines
+// must be filtered prior to calling this method.
+func (netDev NetDev) parseLine(rawLine string) (*NetDevLine, error) {
+ idx := strings.LastIndex(rawLine, ":")
+ if idx == -1 {
+ return nil, errors.New("invalid net/dev line, missing colon")
+ }
+ fields := strings.Fields(strings.TrimSpace(rawLine[idx+1:]))
+
+ var err error
+ line := &NetDevLine{}
+
+ // Interface Name
+ line.Name = strings.TrimSpace(rawLine[:idx])
+ if line.Name == "" {
+ return nil, errors.New("invalid net/dev line, empty interface name")
+ }
+
+ // RX
+ line.RxBytes, err = strconv.ParseUint(fields[0], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ line.RxPackets, err = strconv.ParseUint(fields[1], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ line.RxErrors, err = strconv.ParseUint(fields[2], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ line.RxDropped, err = strconv.ParseUint(fields[3], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ line.RxFIFO, err = strconv.ParseUint(fields[4], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ line.RxFrame, err = strconv.ParseUint(fields[5], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ line.RxCompressed, err = strconv.ParseUint(fields[6], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ line.RxMulticast, err = strconv.ParseUint(fields[7], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ // TX
+ line.TxBytes, err = strconv.ParseUint(fields[8], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ line.TxPackets, err = strconv.ParseUint(fields[9], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ line.TxErrors, err = strconv.ParseUint(fields[10], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ line.TxDropped, err = strconv.ParseUint(fields[11], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ line.TxFIFO, err = strconv.ParseUint(fields[12], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ line.TxCollisions, err = strconv.ParseUint(fields[13], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ line.TxCarrier, err = strconv.ParseUint(fields[14], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ line.TxCompressed, err = strconv.ParseUint(fields[15], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ return line, nil
+}
+
+// Total aggregates the values across interfaces and returns a new NetDevLine.
+// The Name field will be a sorted comma separated list of interface names.
+func (netDev NetDev) Total() NetDevLine {
+ total := NetDevLine{}
+
+ names := make([]string, 0, len(netDev))
+ for _, ifc := range netDev {
+ names = append(names, ifc.Name)
+ total.RxBytes += ifc.RxBytes
+ total.RxPackets += ifc.RxPackets
+ total.RxErrors += ifc.RxErrors
+ total.RxDropped += ifc.RxDropped
+ total.RxFIFO += ifc.RxFIFO
+ total.RxFrame += ifc.RxFrame
+ total.RxCompressed += ifc.RxCompressed
+ total.RxMulticast += ifc.RxMulticast
+ total.TxBytes += ifc.TxBytes
+ total.TxPackets += ifc.TxPackets
+ total.TxErrors += ifc.TxErrors
+ total.TxDropped += ifc.TxDropped
+ total.TxFIFO += ifc.TxFIFO
+ total.TxCollisions += ifc.TxCollisions
+ total.TxCarrier += ifc.TxCarrier
+ total.TxCompressed += ifc.TxCompressed
+ }
+ sort.Strings(names)
+ total.Name = strings.Join(names, ", ")
+
+ return total
+}
diff --git a/vendor/github.com/prometheus/procfs/net_ip_socket.go b/vendor/github.com/prometheus/procfs/net_ip_socket.go
new file mode 100644
index 0000000..b70f1fc
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/net_ip_socket.go
@@ -0,0 +1,248 @@
+// Copyright 2020 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "encoding/hex"
+ "fmt"
+ "io"
+ "net"
+ "os"
+ "strconv"
+ "strings"
+)
+
+const (
+ // readLimit is used by io.LimitReader while reading the content of the
+ // /proc/net/udp{,6} files. The number of lines inside such a file is dynamic
+ // as each line represents a single used socket.
+ // In theory, the number of available sockets is 65535 (2^16 - 1) per IP.
+ // With e.g. 150 Byte per line and the maximum number of 65535,
+ // the reader needs to handle 150 Byte * 65535 =~ 10 MB for a single IP.
+ readLimit = 4294967296 // Byte -> 4 GiB
+)
+
+// This contains generic data structures for both udp and tcp sockets.
+type (
+ // NetIPSocket represents the contents of /proc/net/{t,u}dp{,6} file without the header.
+ NetIPSocket []*netIPSocketLine
+
+ // NetIPSocketSummary provides already computed values like the total queue lengths or
+ // the total number of used sockets. In contrast to NetIPSocket it does not collect
+ // the parsed lines into a slice.
+ NetIPSocketSummary struct {
+ // TxQueueLength shows the total queue length of all parsed tx_queue lengths.
+ TxQueueLength uint64
+ // RxQueueLength shows the total queue length of all parsed rx_queue lengths.
+ RxQueueLength uint64
+ // UsedSockets shows the total number of parsed lines representing the
+ // number of used sockets.
+ UsedSockets uint64
+ // Drops shows the total number of dropped packets of all UPD sockets.
+ Drops *uint64
+ }
+
+ // netIPSocketLine represents the fields parsed from a single line
+ // in /proc/net/{t,u}dp{,6}. Fields which are not used by IPSocket are skipped.
+ // Drops is non-nil for udp{,6}, but nil for tcp{,6}.
+ // For the proc file format details, see https://linux.die.net/man/5/proc.
+ netIPSocketLine struct {
+ Sl uint64
+ LocalAddr net.IP
+ LocalPort uint64
+ RemAddr net.IP
+ RemPort uint64
+ St uint64
+ TxQueue uint64
+ RxQueue uint64
+ UID uint64
+ Inode uint64
+ Drops *uint64
+ }
+)
+
+func newNetIPSocket(file string) (NetIPSocket, error) {
+ f, err := os.Open(file)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ var netIPSocket NetIPSocket
+ isUDP := strings.Contains(file, "udp")
+
+ lr := io.LimitReader(f, readLimit)
+ s := bufio.NewScanner(lr)
+ s.Scan() // skip first line with headers
+ for s.Scan() {
+ fields := strings.Fields(s.Text())
+ line, err := parseNetIPSocketLine(fields, isUDP)
+ if err != nil {
+ return nil, err
+ }
+ netIPSocket = append(netIPSocket, line)
+ }
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+ return netIPSocket, nil
+}
+
+// newNetIPSocketSummary creates a new NetIPSocket{,6} from the contents of the given file.
+func newNetIPSocketSummary(file string) (*NetIPSocketSummary, error) {
+ f, err := os.Open(file)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ var netIPSocketSummary NetIPSocketSummary
+ var udpPacketDrops uint64
+ isUDP := strings.Contains(file, "udp")
+
+ lr := io.LimitReader(f, readLimit)
+ s := bufio.NewScanner(lr)
+ s.Scan() // skip first line with headers
+ for s.Scan() {
+ fields := strings.Fields(s.Text())
+ line, err := parseNetIPSocketLine(fields, isUDP)
+ if err != nil {
+ return nil, err
+ }
+ netIPSocketSummary.TxQueueLength += line.TxQueue
+ netIPSocketSummary.RxQueueLength += line.RxQueue
+ netIPSocketSummary.UsedSockets++
+ if isUDP {
+ udpPacketDrops += *line.Drops
+ netIPSocketSummary.Drops = &udpPacketDrops
+ }
+ }
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+ return &netIPSocketSummary, nil
+}
+
+// the /proc/net/{t,u}dp{,6} files are network byte order for ipv4 and for ipv6 the address is four words consisting of four bytes each. In each of those four words the four bytes are written in reverse order.
+
+func parseIP(hexIP string) (net.IP, error) {
+ var byteIP []byte
+ byteIP, err := hex.DecodeString(hexIP)
+ if err != nil {
+ return nil, fmt.Errorf("%w: Cannot parse socket field in %q: %w", ErrFileParse, hexIP, err)
+ }
+ switch len(byteIP) {
+ case 4:
+ return net.IP{byteIP[3], byteIP[2], byteIP[1], byteIP[0]}, nil
+ case 16:
+ i := net.IP{
+ byteIP[3], byteIP[2], byteIP[1], byteIP[0],
+ byteIP[7], byteIP[6], byteIP[5], byteIP[4],
+ byteIP[11], byteIP[10], byteIP[9], byteIP[8],
+ byteIP[15], byteIP[14], byteIP[13], byteIP[12],
+ }
+ return i, nil
+ default:
+ return nil, fmt.Errorf("%w: Unable to parse IP %s: %v", ErrFileParse, hexIP, nil)
+ }
+}
+
+// parseNetIPSocketLine parses a single line, represented by a list of fields.
+func parseNetIPSocketLine(fields []string, isUDP bool) (*netIPSocketLine, error) {
+ line := &netIPSocketLine{}
+ if len(fields) < 10 {
+ return nil, fmt.Errorf(
+ "%w: Less than 10 columns found %q",
+ ErrFileParse,
+ strings.Join(fields, " "),
+ )
+ }
+ var err error // parse error
+
+ // sl
+ s := strings.Split(fields[0], ":")
+ if len(s) != 2 {
+ return nil, fmt.Errorf("%w: Unable to parse sl field in line %q", ErrFileParse, fields[0])
+ }
+
+ if line.Sl, err = strconv.ParseUint(s[0], 0, 64); err != nil {
+ return nil, fmt.Errorf("%w: Unable to parse sl field in %q: %w", ErrFileParse, line.Sl, err)
+ }
+ // local_address
+ l := strings.Split(fields[1], ":")
+ if len(l) != 2 {
+ return nil, fmt.Errorf("%w: Unable to parse local_address field in %q", ErrFileParse, fields[1])
+ }
+ if line.LocalAddr, err = parseIP(l[0]); err != nil {
+ return nil, err
+ }
+ if line.LocalPort, err = strconv.ParseUint(l[1], 16, 64); err != nil {
+ return nil, fmt.Errorf("%w: Unable to parse local_address port value line %q: %w", ErrFileParse, line.LocalPort, err)
+ }
+
+ // remote_address
+ r := strings.Split(fields[2], ":")
+ if len(r) != 2 {
+ return nil, fmt.Errorf("%w: Unable to parse rem_address field in %q", ErrFileParse, fields[1])
+ }
+ if line.RemAddr, err = parseIP(r[0]); err != nil {
+ return nil, err
+ }
+ if line.RemPort, err = strconv.ParseUint(r[1], 16, 64); err != nil {
+ return nil, fmt.Errorf("%w: Cannot parse rem_address port value in %q: %w", ErrFileParse, line.RemPort, err)
+ }
+
+ // st
+ if line.St, err = strconv.ParseUint(fields[3], 16, 64); err != nil {
+ return nil, fmt.Errorf("%w: Cannot parse st value in %q: %w", ErrFileParse, line.St, err)
+ }
+
+ // tx_queue and rx_queue
+ q := strings.Split(fields[4], ":")
+ if len(q) != 2 {
+ return nil, fmt.Errorf(
+ "%w: Missing colon for tx/rx queues in socket line %q",
+ ErrFileParse,
+ fields[4],
+ )
+ }
+ if line.TxQueue, err = strconv.ParseUint(q[0], 16, 64); err != nil {
+ return nil, fmt.Errorf("%w: Cannot parse tx_queue value in %q: %w", ErrFileParse, line.TxQueue, err)
+ }
+ if line.RxQueue, err = strconv.ParseUint(q[1], 16, 64); err != nil {
+ return nil, fmt.Errorf("%w: Cannot parse trx_queue value in %q: %w", ErrFileParse, line.RxQueue, err)
+ }
+
+ // uid
+ if line.UID, err = strconv.ParseUint(fields[7], 0, 64); err != nil {
+ return nil, fmt.Errorf("%w: Cannot parse UID value in %q: %w", ErrFileParse, line.UID, err)
+ }
+
+ // inode
+ if line.Inode, err = strconv.ParseUint(fields[9], 0, 64); err != nil {
+ return nil, fmt.Errorf("%w: Cannot parse inode value in %q: %w", ErrFileParse, line.Inode, err)
+ }
+
+ // drops
+ if isUDP {
+ drops, err := strconv.ParseUint(fields[12], 0, 64)
+ if err != nil {
+ return nil, fmt.Errorf("%w: Cannot parse drops value in %q: %w", ErrFileParse, drops, err)
+ }
+ line.Drops = &drops
+ }
+
+ return line, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/net_protocols.go b/vendor/github.com/prometheus/procfs/net_protocols.go
new file mode 100644
index 0000000..b6c77b7
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/net_protocols.go
@@ -0,0 +1,180 @@
+// Copyright 2020 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "strconv"
+ "strings"
+
+ "github.com/prometheus/procfs/internal/util"
+)
+
+// NetProtocolStats stores the contents from /proc/net/protocols.
+type NetProtocolStats map[string]NetProtocolStatLine
+
+// NetProtocolStatLine contains a single line parsed from /proc/net/protocols. We
+// only care about the first six columns as the rest are not likely to change
+// and only serve to provide a set of capabilities for each protocol.
+type NetProtocolStatLine struct {
+ Name string // 0 The name of the protocol
+ Size uint64 // 1 The size, in bytes, of a given protocol structure. e.g. sizeof(struct tcp_sock) or sizeof(struct unix_sock)
+ Sockets int64 // 2 Number of sockets in use by this protocol
+ Memory int64 // 3 Number of 4KB pages allocated by all sockets of this protocol
+ Pressure int // 4 This is either yes, no, or NI (not implemented). For the sake of simplicity we treat NI as not experiencing memory pressure.
+ MaxHeader uint64 // 5 Protocol specific max header size
+ Slab bool // 6 Indicates whether or not memory is allocated from the SLAB
+ ModuleName string // 7 The name of the module that implemented this protocol or "kernel" if not from a module
+ Capabilities NetProtocolCapabilities
+}
+
+// NetProtocolCapabilities contains a list of capabilities for each protocol.
+type NetProtocolCapabilities struct {
+ Close bool // 8
+ Connect bool // 9
+ Disconnect bool // 10
+ Accept bool // 11
+ IoCtl bool // 12
+ Init bool // 13
+ Destroy bool // 14
+ Shutdown bool // 15
+ SetSockOpt bool // 16
+ GetSockOpt bool // 17
+ SendMsg bool // 18
+ RecvMsg bool // 19
+ SendPage bool // 20
+ Bind bool // 21
+ BacklogRcv bool // 22
+ Hash bool // 23
+ UnHash bool // 24
+ GetPort bool // 25
+ EnterMemoryPressure bool // 26
+}
+
+// NetProtocols reads stats from /proc/net/protocols and returns a map of
+// PortocolStatLine entries. As of this writing no official Linux Documentation
+// exists, however the source is fairly self-explanatory and the format seems
+// stable since its introduction in 2.6.12-rc2
+// Linux 2.6.12-rc2 - https://elixir.bootlin.com/linux/v2.6.12-rc2/source/net/core/sock.c#L1452
+// Linux 5.10 - https://elixir.bootlin.com/linux/v5.10.4/source/net/core/sock.c#L3586
+func (fs FS) NetProtocols() (NetProtocolStats, error) {
+ data, err := util.ReadFileNoStat(fs.proc.Path("net/protocols"))
+ if err != nil {
+ return NetProtocolStats{}, err
+ }
+ return parseNetProtocols(bufio.NewScanner(bytes.NewReader(data)))
+}
+
+func parseNetProtocols(s *bufio.Scanner) (NetProtocolStats, error) {
+ nps := NetProtocolStats{}
+
+ // Skip the header line
+ s.Scan()
+
+ for s.Scan() {
+ line, err := nps.parseLine(s.Text())
+ if err != nil {
+ return NetProtocolStats{}, err
+ }
+
+ nps[line.Name] = *line
+ }
+ return nps, nil
+}
+
+func (ps NetProtocolStats) parseLine(rawLine string) (*NetProtocolStatLine, error) {
+ line := &NetProtocolStatLine{Capabilities: NetProtocolCapabilities{}}
+ var err error
+ const enabled = "yes"
+ const disabled = "no"
+
+ fields := strings.Fields(rawLine)
+ line.Name = fields[0]
+ line.Size, err = strconv.ParseUint(fields[1], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ line.Sockets, err = strconv.ParseInt(fields[2], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ line.Memory, err = strconv.ParseInt(fields[3], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ if fields[4] == enabled {
+ line.Pressure = 1
+ } else if fields[4] == disabled {
+ line.Pressure = 0
+ } else {
+ line.Pressure = -1
+ }
+ line.MaxHeader, err = strconv.ParseUint(fields[5], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ if fields[6] == enabled {
+ line.Slab = true
+ } else if fields[6] == disabled {
+ line.Slab = false
+ } else {
+ return nil, fmt.Errorf("%w: capability for protocol: %s", ErrFileParse, line.Name)
+ }
+ line.ModuleName = fields[7]
+
+ err = line.Capabilities.parseCapabilities(fields[8:])
+ if err != nil {
+ return nil, err
+ }
+
+ return line, nil
+}
+
+func (pc *NetProtocolCapabilities) parseCapabilities(capabilities []string) error {
+ // The capabilities are all bools so we can loop over to map them
+ capabilityFields := [...]*bool{
+ &pc.Close,
+ &pc.Connect,
+ &pc.Disconnect,
+ &pc.Accept,
+ &pc.IoCtl,
+ &pc.Init,
+ &pc.Destroy,
+ &pc.Shutdown,
+ &pc.SetSockOpt,
+ &pc.GetSockOpt,
+ &pc.SendMsg,
+ &pc.RecvMsg,
+ &pc.SendPage,
+ &pc.Bind,
+ &pc.BacklogRcv,
+ &pc.Hash,
+ &pc.UnHash,
+ &pc.GetPort,
+ &pc.EnterMemoryPressure,
+ }
+
+ for i := 0; i < len(capabilities); i++ {
+ if capabilities[i] == "y" {
+ *capabilityFields[i] = true
+ } else if capabilities[i] == "n" {
+ *capabilityFields[i] = false
+ } else {
+ return fmt.Errorf("%w: capability block for protocol: position %d", ErrFileParse, i)
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/prometheus/procfs/net_route.go b/vendor/github.com/prometheus/procfs/net_route.go
new file mode 100644
index 0000000..deb7029
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/net_route.go
@@ -0,0 +1,143 @@
+// Copyright 2023 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+
+ "github.com/prometheus/procfs/internal/util"
+)
+
+const (
+ blackholeRepresentation string = "*"
+ blackholeIfaceName string = "blackhole"
+ routeLineColumns int = 11
+)
+
+// A NetRouteLine represents one line from net/route.
+type NetRouteLine struct {
+ Iface string
+ Destination uint32
+ Gateway uint32
+ Flags uint32
+ RefCnt uint32
+ Use uint32
+ Metric uint32
+ Mask uint32
+ MTU uint32
+ Window uint32
+ IRTT uint32
+}
+
+func (fs FS) NetRoute() ([]NetRouteLine, error) {
+ return readNetRoute(fs.proc.Path("net", "route"))
+}
+
+func readNetRoute(path string) ([]NetRouteLine, error) {
+ b, err := util.ReadFileNoStat(path)
+ if err != nil {
+ return nil, err
+ }
+
+ routelines, err := parseNetRoute(bytes.NewReader(b))
+ if err != nil {
+ return nil, fmt.Errorf("failed to read net route from %s: %w", path, err)
+ }
+ return routelines, nil
+}
+
+func parseNetRoute(r io.Reader) ([]NetRouteLine, error) {
+ var routelines []NetRouteLine
+
+ scanner := bufio.NewScanner(r)
+ scanner.Scan()
+ for scanner.Scan() {
+ fields := strings.Fields(scanner.Text())
+ routeline, err := parseNetRouteLine(fields)
+ if err != nil {
+ return nil, err
+ }
+ routelines = append(routelines, *routeline)
+ }
+ return routelines, nil
+}
+
+func parseNetRouteLine(fields []string) (*NetRouteLine, error) {
+ if len(fields) != routeLineColumns {
+ return nil, fmt.Errorf("invalid routeline, num of digits: %d", len(fields))
+ }
+ iface := fields[0]
+ if iface == blackholeRepresentation {
+ iface = blackholeIfaceName
+ }
+ destination, err := strconv.ParseUint(fields[1], 16, 32)
+ if err != nil {
+ return nil, err
+ }
+ gateway, err := strconv.ParseUint(fields[2], 16, 32)
+ if err != nil {
+ return nil, err
+ }
+ flags, err := strconv.ParseUint(fields[3], 10, 32)
+ if err != nil {
+ return nil, err
+ }
+ refcnt, err := strconv.ParseUint(fields[4], 10, 32)
+ if err != nil {
+ return nil, err
+ }
+ use, err := strconv.ParseUint(fields[5], 10, 32)
+ if err != nil {
+ return nil, err
+ }
+ metric, err := strconv.ParseUint(fields[6], 10, 32)
+ if err != nil {
+ return nil, err
+ }
+ mask, err := strconv.ParseUint(fields[7], 16, 32)
+ if err != nil {
+ return nil, err
+ }
+ mtu, err := strconv.ParseUint(fields[8], 10, 32)
+ if err != nil {
+ return nil, err
+ }
+ window, err := strconv.ParseUint(fields[9], 10, 32)
+ if err != nil {
+ return nil, err
+ }
+ irtt, err := strconv.ParseUint(fields[10], 10, 32)
+ if err != nil {
+ return nil, err
+ }
+ routeline := &NetRouteLine{
+ Iface: iface,
+ Destination: uint32(destination),
+ Gateway: uint32(gateway),
+ Flags: uint32(flags),
+ RefCnt: uint32(refcnt),
+ Use: uint32(use),
+ Metric: uint32(metric),
+ Mask: uint32(mask),
+ MTU: uint32(mtu),
+ Window: uint32(window),
+ IRTT: uint32(irtt),
+ }
+ return routeline, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/net_sockstat.go b/vendor/github.com/prometheus/procfs/net_sockstat.go
new file mode 100644
index 0000000..fae62b1
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/net_sockstat.go
@@ -0,0 +1,162 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "strings"
+
+ "github.com/prometheus/procfs/internal/util"
+)
+
+// A NetSockstat contains the output of /proc/net/sockstat{,6} for IPv4 or IPv6,
+// respectively.
+type NetSockstat struct {
+ // Used is non-nil for IPv4 sockstat results, but nil for IPv6.
+ Used *int
+ Protocols []NetSockstatProtocol
+}
+
+// A NetSockstatProtocol contains statistics about a given socket protocol.
+// Pointer fields indicate that the value may or may not be present on any
+// given protocol.
+type NetSockstatProtocol struct {
+ Protocol string
+ InUse int
+ Orphan *int
+ TW *int
+ Alloc *int
+ Mem *int
+ Memory *int
+}
+
+// NetSockstat retrieves IPv4 socket statistics.
+func (fs FS) NetSockstat() (*NetSockstat, error) {
+ return readSockstat(fs.proc.Path("net", "sockstat"))
+}
+
+// NetSockstat6 retrieves IPv6 socket statistics.
+//
+// If IPv6 is disabled on this kernel, the returned error can be checked with
+// os.IsNotExist.
+func (fs FS) NetSockstat6() (*NetSockstat, error) {
+ return readSockstat(fs.proc.Path("net", "sockstat6"))
+}
+
+// readSockstat opens and parses a NetSockstat from the input file.
+func readSockstat(name string) (*NetSockstat, error) {
+ // This file is small and can be read with one syscall.
+ b, err := util.ReadFileNoStat(name)
+ if err != nil {
+ // Do not wrap this error so the caller can detect os.IsNotExist and
+ // similar conditions.
+ return nil, err
+ }
+
+ stat, err := parseSockstat(bytes.NewReader(b))
+ if err != nil {
+ return nil, fmt.Errorf("%w: sockstats from %q: %w", ErrFileRead, name, err)
+ }
+
+ return stat, nil
+}
+
+// parseSockstat reads the contents of a sockstat file and parses a NetSockstat.
+func parseSockstat(r io.Reader) (*NetSockstat, error) {
+ var stat NetSockstat
+ s := bufio.NewScanner(r)
+ for s.Scan() {
+ // Expect a minimum of a protocol and one key/value pair.
+ fields := strings.Split(s.Text(), " ")
+ if len(fields) < 3 {
+ return nil, fmt.Errorf("%w: Malformed sockstat line: %q", ErrFileParse, s.Text())
+ }
+
+ // The remaining fields are key/value pairs.
+ kvs, err := parseSockstatKVs(fields[1:])
+ if err != nil {
+ return nil, fmt.Errorf("%w: sockstat key/value pairs from %q: %w", ErrFileParse, s.Text(), err)
+ }
+
+ // The first field is the protocol. We must trim its colon suffix.
+ proto := strings.TrimSuffix(fields[0], ":")
+ switch proto {
+ case "sockets":
+ // Special case: IPv4 has a sockets "used" key/value pair that we
+ // embed at the top level of the structure.
+ used := kvs["used"]
+ stat.Used = &used
+ default:
+ // Parse all other lines as individual protocols.
+ nsp := parseSockstatProtocol(kvs)
+ nsp.Protocol = proto
+ stat.Protocols = append(stat.Protocols, nsp)
+ }
+ }
+
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+
+ return &stat, nil
+}
+
+// parseSockstatKVs parses a string slice into a map of key/value pairs.
+func parseSockstatKVs(kvs []string) (map[string]int, error) {
+ if len(kvs)%2 != 0 {
+ return nil, fmt.Errorf("%w:: Odd number of fields in key/value pairs %q", ErrFileParse, kvs)
+ }
+
+ // Iterate two values at a time to gather key/value pairs.
+ out := make(map[string]int, len(kvs)/2)
+ for i := 0; i < len(kvs); i += 2 {
+ vp := util.NewValueParser(kvs[i+1])
+ out[kvs[i]] = vp.Int()
+
+ if err := vp.Err(); err != nil {
+ return nil, err
+ }
+ }
+
+ return out, nil
+}
+
+// parseSockstatProtocol parses a NetSockstatProtocol from the input kvs map.
+func parseSockstatProtocol(kvs map[string]int) NetSockstatProtocol {
+ var nsp NetSockstatProtocol
+ for k, v := range kvs {
+ // Capture the range variable to ensure we get unique pointers for
+ // each of the optional fields.
+ v := v
+ switch k {
+ case "inuse":
+ nsp.InUse = v
+ case "orphan":
+ nsp.Orphan = &v
+ case "tw":
+ nsp.TW = &v
+ case "alloc":
+ nsp.Alloc = &v
+ case "mem":
+ nsp.Mem = &v
+ case "memory":
+ nsp.Memory = &v
+ }
+ }
+
+ return nsp
+}
diff --git a/vendor/github.com/prometheus/procfs/net_softnet.go b/vendor/github.com/prometheus/procfs/net_softnet.go
new file mode 100644
index 0000000..71c8059
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/net_softnet.go
@@ -0,0 +1,155 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+
+ "github.com/prometheus/procfs/internal/util"
+)
+
+// For the proc file format details,
+// See:
+// * Linux 2.6.23 https://elixir.bootlin.com/linux/v2.6.23/source/net/core/dev.c#L2343
+// * Linux 2.6.39 https://elixir.bootlin.com/linux/v2.6.39/source/net/core/dev.c#L4086
+// * Linux 4.18 https://elixir.bootlin.com/linux/v4.18/source/net/core/net-procfs.c#L162
+// * Linux 5.14 https://elixir.bootlin.com/linux/v5.14/source/net/core/net-procfs.c#L169
+
+// SoftnetStat contains a single row of data from /proc/net/softnet_stat.
+type SoftnetStat struct {
+ // Number of processed packets.
+ Processed uint32
+ // Number of dropped packets.
+ Dropped uint32
+ // Number of times processing packets ran out of quota.
+ TimeSqueezed uint32
+ // Number of collision occur while obtaining device lock while transmitting.
+ CPUCollision uint32
+ // Number of times cpu woken up received_rps.
+ ReceivedRps uint32
+ // number of times flow limit has been reached.
+ FlowLimitCount uint32
+ // Softnet backlog status.
+ SoftnetBacklogLen uint32
+ // CPU id owning this softnet_data.
+ Index uint32
+ // softnet_data's Width.
+ Width int
+}
+
+var softNetProcFile = "net/softnet_stat"
+
+// NetSoftnetStat reads data from /proc/net/softnet_stat.
+func (fs FS) NetSoftnetStat() ([]SoftnetStat, error) {
+ b, err := util.ReadFileNoStat(fs.proc.Path(softNetProcFile))
+ if err != nil {
+ return nil, err
+ }
+
+ entries, err := parseSoftnet(bytes.NewReader(b))
+ if err != nil {
+ return nil, fmt.Errorf("%w: /proc/net/softnet_stat: %w", ErrFileParse, err)
+ }
+
+ return entries, nil
+}
+
+func parseSoftnet(r io.Reader) ([]SoftnetStat, error) {
+ const minColumns = 9
+
+ s := bufio.NewScanner(r)
+
+ var stats []SoftnetStat
+ cpuIndex := 0
+ for s.Scan() {
+ columns := strings.Fields(s.Text())
+ width := len(columns)
+ softnetStat := SoftnetStat{}
+
+ if width < minColumns {
+ return nil, fmt.Errorf("%w: detected %d columns, but expected at least %d", ErrFileParse, width, minColumns)
+ }
+
+ // Linux 2.6.23 https://elixir.bootlin.com/linux/v2.6.23/source/net/core/dev.c#L2347
+ if width >= minColumns {
+ us, err := parseHexUint32s(columns[0:9])
+ if err != nil {
+ return nil, err
+ }
+
+ softnetStat.Processed = us[0]
+ softnetStat.Dropped = us[1]
+ softnetStat.TimeSqueezed = us[2]
+ softnetStat.CPUCollision = us[8]
+ }
+
+ // Linux 2.6.39 https://elixir.bootlin.com/linux/v2.6.39/source/net/core/dev.c#L4086
+ if width >= 10 {
+ us, err := parseHexUint32s(columns[9:10])
+ if err != nil {
+ return nil, err
+ }
+
+ softnetStat.ReceivedRps = us[0]
+ }
+
+ // Linux 4.18 https://elixir.bootlin.com/linux/v4.18/source/net/core/net-procfs.c#L162
+ if width >= 11 {
+ us, err := parseHexUint32s(columns[10:11])
+ if err != nil {
+ return nil, err
+ }
+
+ softnetStat.FlowLimitCount = us[0]
+ }
+
+ // Linux 5.14 https://elixir.bootlin.com/linux/v5.14/source/net/core/net-procfs.c#L169
+ if width >= 13 {
+ us, err := parseHexUint32s(columns[11:13])
+ if err != nil {
+ return nil, err
+ }
+
+ softnetStat.SoftnetBacklogLen = us[0]
+ softnetStat.Index = us[1]
+ } else {
+ // For older kernels, create the Index based on the scan line number.
+ softnetStat.Index = uint32(cpuIndex)
+ }
+ softnetStat.Width = width
+ stats = append(stats, softnetStat)
+ cpuIndex++
+ }
+
+ return stats, nil
+}
+
+func parseHexUint32s(ss []string) ([]uint32, error) {
+ us := make([]uint32, 0, len(ss))
+ for _, s := range ss {
+ u, err := strconv.ParseUint(s, 16, 32)
+ if err != nil {
+ return nil, err
+ }
+
+ us = append(us, uint32(u))
+ }
+
+ return us, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/net_tcp.go b/vendor/github.com/prometheus/procfs/net_tcp.go
new file mode 100644
index 0000000..5277629
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/net_tcp.go
@@ -0,0 +1,64 @@
+// Copyright 2020 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+type (
+ // NetTCP represents the contents of /proc/net/tcp{,6} file without the header.
+ NetTCP []*netIPSocketLine
+
+ // NetTCPSummary provides already computed values like the total queue lengths or
+ // the total number of used sockets. In contrast to NetTCP it does not collect
+ // the parsed lines into a slice.
+ NetTCPSummary NetIPSocketSummary
+)
+
+// NetTCP returns the IPv4 kernel/networking statistics for TCP datagrams
+// read from /proc/net/tcp.
+func (fs FS) NetTCP() (NetTCP, error) {
+ return newNetTCP(fs.proc.Path("net/tcp"))
+}
+
+// NetTCP6 returns the IPv6 kernel/networking statistics for TCP datagrams
+// read from /proc/net/tcp6.
+func (fs FS) NetTCP6() (NetTCP, error) {
+ return newNetTCP(fs.proc.Path("net/tcp6"))
+}
+
+// NetTCPSummary returns already computed statistics like the total queue lengths
+// for TCP datagrams read from /proc/net/tcp.
+func (fs FS) NetTCPSummary() (*NetTCPSummary, error) {
+ return newNetTCPSummary(fs.proc.Path("net/tcp"))
+}
+
+// NetTCP6Summary returns already computed statistics like the total queue lengths
+// for TCP datagrams read from /proc/net/tcp6.
+func (fs FS) NetTCP6Summary() (*NetTCPSummary, error) {
+ return newNetTCPSummary(fs.proc.Path("net/tcp6"))
+}
+
+// newNetTCP creates a new NetTCP{,6} from the contents of the given file.
+func newNetTCP(file string) (NetTCP, error) {
+ n, err := newNetIPSocket(file)
+ n1 := NetTCP(n)
+ return n1, err
+}
+
+func newNetTCPSummary(file string) (*NetTCPSummary, error) {
+ n, err := newNetIPSocketSummary(file)
+ if n == nil {
+ return nil, err
+ }
+ n1 := NetTCPSummary(*n)
+ return &n1, err
+}
diff --git a/vendor/github.com/prometheus/procfs/net_tls_stat.go b/vendor/github.com/prometheus/procfs/net_tls_stat.go
new file mode 100644
index 0000000..13994c1
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/net_tls_stat.go
@@ -0,0 +1,119 @@
+// Copyright 2023 Prometheus Team
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "fmt"
+ "os"
+ "strconv"
+ "strings"
+)
+
+// TLSStat struct represents data in /proc/net/tls_stat.
+// See https://docs.kernel.org/networking/tls.html#statistics
+type TLSStat struct {
+ // number of TX sessions currently installed where host handles cryptography
+ TLSCurrTxSw int
+ // number of RX sessions currently installed where host handles cryptography
+ TLSCurrRxSw int
+ // number of TX sessions currently installed where NIC handles cryptography
+ TLSCurrTxDevice int
+ // number of RX sessions currently installed where NIC handles cryptography
+ TLSCurrRxDevice int
+ //number of TX sessions opened with host cryptography
+ TLSTxSw int
+ //number of RX sessions opened with host cryptography
+ TLSRxSw int
+ // number of TX sessions opened with NIC cryptography
+ TLSTxDevice int
+ // number of RX sessions opened with NIC cryptography
+ TLSRxDevice int
+ // record decryption failed (e.g. due to incorrect authentication tag)
+ TLSDecryptError int
+ // number of RX resyncs sent to NICs handling cryptography
+ TLSRxDeviceResync int
+ // number of RX records which had to be re-decrypted due to TLS_RX_EXPECT_NO_PAD mis-prediction. Note that this counter will also increment for non-data records.
+ TLSDecryptRetry int
+ // number of data RX records which had to be re-decrypted due to TLS_RX_EXPECT_NO_PAD mis-prediction.
+ TLSRxNoPadViolation int
+}
+
+// NewTLSStat reads the tls_stat statistics.
+func NewTLSStat() (TLSStat, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return TLSStat{}, err
+ }
+
+ return fs.NewTLSStat()
+}
+
+// NewTLSStat reads the tls_stat statistics.
+func (fs FS) NewTLSStat() (TLSStat, error) {
+ file, err := os.Open(fs.proc.Path("net/tls_stat"))
+ if err != nil {
+ return TLSStat{}, err
+ }
+ defer file.Close()
+
+ var (
+ tlsstat = TLSStat{}
+ s = bufio.NewScanner(file)
+ )
+
+ for s.Scan() {
+ fields := strings.Fields(s.Text())
+
+ if len(fields) != 2 {
+ return TLSStat{}, fmt.Errorf("%w: %q line %q", ErrFileParse, file.Name(), s.Text())
+ }
+
+ name := fields[0]
+ value, err := strconv.Atoi(fields[1])
+ if err != nil {
+ return TLSStat{}, err
+ }
+
+ switch name {
+ case "TlsCurrTxSw":
+ tlsstat.TLSCurrTxSw = value
+ case "TlsCurrRxSw":
+ tlsstat.TLSCurrRxSw = value
+ case "TlsCurrTxDevice":
+ tlsstat.TLSCurrTxDevice = value
+ case "TlsCurrRxDevice":
+ tlsstat.TLSCurrRxDevice = value
+ case "TlsTxSw":
+ tlsstat.TLSTxSw = value
+ case "TlsRxSw":
+ tlsstat.TLSRxSw = value
+ case "TlsTxDevice":
+ tlsstat.TLSTxDevice = value
+ case "TlsRxDevice":
+ tlsstat.TLSRxDevice = value
+ case "TlsDecryptError":
+ tlsstat.TLSDecryptError = value
+ case "TlsRxDeviceResync":
+ tlsstat.TLSRxDeviceResync = value
+ case "TlsDecryptRetry":
+ tlsstat.TLSDecryptRetry = value
+ case "TlsRxNoPadViolation":
+ tlsstat.TLSRxNoPadViolation = value
+ }
+
+ }
+
+ return tlsstat, s.Err()
+}
diff --git a/vendor/github.com/prometheus/procfs/net_udp.go b/vendor/github.com/prometheus/procfs/net_udp.go
new file mode 100644
index 0000000..9ac3daf
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/net_udp.go
@@ -0,0 +1,64 @@
+// Copyright 2020 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+type (
+ // NetUDP represents the contents of /proc/net/udp{,6} file without the header.
+ NetUDP []*netIPSocketLine
+
+ // NetUDPSummary provides already computed values like the total queue lengths or
+ // the total number of used sockets. In contrast to NetUDP it does not collect
+ // the parsed lines into a slice.
+ NetUDPSummary NetIPSocketSummary
+)
+
+// NetUDP returns the IPv4 kernel/networking statistics for UDP datagrams
+// read from /proc/net/udp.
+func (fs FS) NetUDP() (NetUDP, error) {
+ return newNetUDP(fs.proc.Path("net/udp"))
+}
+
+// NetUDP6 returns the IPv6 kernel/networking statistics for UDP datagrams
+// read from /proc/net/udp6.
+func (fs FS) NetUDP6() (NetUDP, error) {
+ return newNetUDP(fs.proc.Path("net/udp6"))
+}
+
+// NetUDPSummary returns already computed statistics like the total queue lengths
+// for UDP datagrams read from /proc/net/udp.
+func (fs FS) NetUDPSummary() (*NetUDPSummary, error) {
+ return newNetUDPSummary(fs.proc.Path("net/udp"))
+}
+
+// NetUDP6Summary returns already computed statistics like the total queue lengths
+// for UDP datagrams read from /proc/net/udp6.
+func (fs FS) NetUDP6Summary() (*NetUDPSummary, error) {
+ return newNetUDPSummary(fs.proc.Path("net/udp6"))
+}
+
+// newNetUDP creates a new NetUDP{,6} from the contents of the given file.
+func newNetUDP(file string) (NetUDP, error) {
+ n, err := newNetIPSocket(file)
+ n1 := NetUDP(n)
+ return n1, err
+}
+
+func newNetUDPSummary(file string) (*NetUDPSummary, error) {
+ n, err := newNetIPSocketSummary(file)
+ if n == nil {
+ return nil, err
+ }
+ n1 := NetUDPSummary(*n)
+ return &n1, err
+}
diff --git a/vendor/github.com/prometheus/procfs/net_unix.go b/vendor/github.com/prometheus/procfs/net_unix.go
new file mode 100644
index 0000000..d868ceb
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/net_unix.go
@@ -0,0 +1,257 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "os"
+ "strconv"
+ "strings"
+)
+
+// For the proc file format details,
+// see https://elixir.bootlin.com/linux/v4.17/source/net/unix/af_unix.c#L2815
+// and https://elixir.bootlin.com/linux/latest/source/include/uapi/linux/net.h#L48.
+
+// Constants for the various /proc/net/unix enumerations.
+// TODO: match against x/sys/unix or similar?
+const (
+ netUnixTypeStream = 1
+ netUnixTypeDgram = 2
+ netUnixTypeSeqpacket = 5
+
+ netUnixFlagDefault = 0
+ netUnixFlagListen = 1 << 16
+
+ netUnixStateUnconnected = 1
+ netUnixStateConnecting = 2
+ netUnixStateConnected = 3
+ netUnixStateDisconnected = 4
+)
+
+// NetUNIXType is the type of the type field.
+type NetUNIXType uint64
+
+// NetUNIXFlags is the type of the flags field.
+type NetUNIXFlags uint64
+
+// NetUNIXState is the type of the state field.
+type NetUNIXState uint64
+
+// NetUNIXLine represents a line of /proc/net/unix.
+type NetUNIXLine struct {
+ KernelPtr string
+ RefCount uint64
+ Protocol uint64
+ Flags NetUNIXFlags
+ Type NetUNIXType
+ State NetUNIXState
+ Inode uint64
+ Path string
+}
+
+// NetUNIX holds the data read from /proc/net/unix.
+type NetUNIX struct {
+ Rows []*NetUNIXLine
+}
+
+// NetUNIX returns data read from /proc/net/unix.
+func (fs FS) NetUNIX() (*NetUNIX, error) {
+ return readNetUNIX(fs.proc.Path("net/unix"))
+}
+
+// readNetUNIX reads data in /proc/net/unix format from the specified file.
+func readNetUNIX(file string) (*NetUNIX, error) {
+ // This file could be quite large and a streaming read is desirable versus
+ // reading the entire contents at once.
+ f, err := os.Open(file)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ return parseNetUNIX(f)
+}
+
+// parseNetUNIX creates a NetUnix structure from the incoming stream.
+func parseNetUNIX(r io.Reader) (*NetUNIX, error) {
+ // Begin scanning by checking for the existence of Inode.
+ s := bufio.NewScanner(r)
+ s.Scan()
+
+ // From the man page of proc(5), it does not contain an Inode field,
+ // but in actually it exists. This code works for both cases.
+ hasInode := strings.Contains(s.Text(), "Inode")
+
+ // Expect a minimum number of fields, but Inode and Path are optional:
+ // Num RefCount Protocol Flags Type St Inode Path
+ minFields := 6
+ if hasInode {
+ minFields++
+ }
+
+ var nu NetUNIX
+ for s.Scan() {
+ line := s.Text()
+ item, err := nu.parseLine(line, hasInode, minFields)
+ if err != nil {
+ return nil, fmt.Errorf("%w: /proc/net/unix encountered data %q: %w", ErrFileParse, line, err)
+ }
+
+ nu.Rows = append(nu.Rows, item)
+ }
+
+ if err := s.Err(); err != nil {
+ return nil, fmt.Errorf("%w: /proc/net/unix encountered data: %w", ErrFileParse, err)
+ }
+
+ return &nu, nil
+}
+
+func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine, error) {
+ fields := strings.Fields(line)
+
+ l := len(fields)
+ if l < min {
+ return nil, fmt.Errorf("%w: expected at least %d fields but got %d", ErrFileParse, min, l)
+ }
+
+ // Field offsets are as follows:
+ // Num RefCount Protocol Flags Type St Inode Path
+
+ kernelPtr := strings.TrimSuffix(fields[0], ":")
+
+ users, err := u.parseUsers(fields[1])
+ if err != nil {
+ return nil, fmt.Errorf("%w: ref count %q: %w", ErrFileParse, fields[1], err)
+ }
+
+ flags, err := u.parseFlags(fields[3])
+ if err != nil {
+ return nil, fmt.Errorf("%w: Unable to parse flags %q: %w", ErrFileParse, fields[3], err)
+ }
+
+ typ, err := u.parseType(fields[4])
+ if err != nil {
+ return nil, fmt.Errorf("%w: Failed to parse type %q: %w", ErrFileParse, fields[4], err)
+ }
+
+ state, err := u.parseState(fields[5])
+ if err != nil {
+ return nil, fmt.Errorf("%w: Failed to parse state %q: %w", ErrFileParse, fields[5], err)
+ }
+
+ var inode uint64
+ if hasInode {
+ inode, err = u.parseInode(fields[6])
+ if err != nil {
+ return nil, fmt.Errorf("%w failed to parse inode %q: %w", ErrFileParse, fields[6], err)
+ }
+ }
+
+ n := &NetUNIXLine{
+ KernelPtr: kernelPtr,
+ RefCount: users,
+ Type: typ,
+ Flags: flags,
+ State: state,
+ Inode: inode,
+ }
+
+ // Path field is optional.
+ if l > min {
+ // Path occurs at either index 6 or 7 depending on whether inode is
+ // already present.
+ pathIdx := 7
+ if !hasInode {
+ pathIdx--
+ }
+
+ n.Path = fields[pathIdx]
+ }
+
+ return n, nil
+}
+
+func (u NetUNIX) parseUsers(s string) (uint64, error) {
+ return strconv.ParseUint(s, 16, 32)
+}
+
+func (u NetUNIX) parseType(s string) (NetUNIXType, error) {
+ typ, err := strconv.ParseUint(s, 16, 16)
+ if err != nil {
+ return 0, err
+ }
+
+ return NetUNIXType(typ), nil
+}
+
+func (u NetUNIX) parseFlags(s string) (NetUNIXFlags, error) {
+ flags, err := strconv.ParseUint(s, 16, 32)
+ if err != nil {
+ return 0, err
+ }
+
+ return NetUNIXFlags(flags), nil
+}
+
+func (u NetUNIX) parseState(s string) (NetUNIXState, error) {
+ st, err := strconv.ParseInt(s, 16, 8)
+ if err != nil {
+ return 0, err
+ }
+
+ return NetUNIXState(st), nil
+}
+
+func (u NetUNIX) parseInode(s string) (uint64, error) {
+ return strconv.ParseUint(s, 10, 64)
+}
+
+func (t NetUNIXType) String() string {
+ switch t {
+ case netUnixTypeStream:
+ return "stream"
+ case netUnixTypeDgram:
+ return "dgram"
+ case netUnixTypeSeqpacket:
+ return "seqpacket"
+ }
+ return "unknown"
+}
+
+func (f NetUNIXFlags) String() string {
+ switch f {
+ case netUnixFlagListen:
+ return "listen"
+ default:
+ return "default"
+ }
+}
+
+func (s NetUNIXState) String() string {
+ switch s {
+ case netUnixStateUnconnected:
+ return "unconnected"
+ case netUnixStateConnecting:
+ return "connecting"
+ case netUnixStateConnected:
+ return "connected"
+ case netUnixStateDisconnected:
+ return "disconnected"
+ }
+ return "unknown"
+}
diff --git a/vendor/github.com/prometheus/procfs/net_wireless.go b/vendor/github.com/prometheus/procfs/net_wireless.go
new file mode 100644
index 0000000..7c597bc
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/net_wireless.go
@@ -0,0 +1,182 @@
+// Copyright 2023 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+
+ "github.com/prometheus/procfs/internal/util"
+)
+
+// Wireless models the content of /proc/net/wireless.
+type Wireless struct {
+ Name string
+
+ // Status is the current 4-digit hex value status of the interface.
+ Status uint64
+
+ // QualityLink is the link quality.
+ QualityLink int
+
+ // QualityLevel is the signal gain (dBm).
+ QualityLevel int
+
+ // QualityNoise is the signal noise baseline (dBm).
+ QualityNoise int
+
+ // DiscardedNwid is the number of discarded packets with wrong nwid/essid.
+ DiscardedNwid int
+
+ // DiscardedCrypt is the number of discarded packets with wrong code/decode (WEP).
+ DiscardedCrypt int
+
+ // DiscardedFrag is the number of discarded packets that can't perform MAC reassembly.
+ DiscardedFrag int
+
+ // DiscardedRetry is the number of discarded packets that reached max MAC retries.
+ DiscardedRetry int
+
+ // DiscardedMisc is the number of discarded packets for other reasons.
+ DiscardedMisc int
+
+ // MissedBeacon is the number of missed beacons/superframe.
+ MissedBeacon int
+}
+
+// Wireless returns kernel wireless statistics.
+func (fs FS) Wireless() ([]*Wireless, error) {
+ b, err := util.ReadFileNoStat(fs.proc.Path("net/wireless"))
+ if err != nil {
+ return nil, err
+ }
+
+ m, err := parseWireless(bytes.NewReader(b))
+ if err != nil {
+ return nil, fmt.Errorf("%w: wireless: %w", ErrFileParse, err)
+ }
+
+ return m, nil
+}
+
+// parseWireless parses the contents of /proc/net/wireless.
+/*
+Inter-| sta-| Quality | Discarded packets | Missed | WE
+face | tus | link level noise | nwid crypt frag retry misc | beacon | 22
+ eth1: 0000 5. -256. -10. 0 1 0 3 0 0
+ eth2: 0000 5. -256. -20. 0 2 0 4 0 0
+*/
+func parseWireless(r io.Reader) ([]*Wireless, error) {
+ var (
+ interfaces []*Wireless
+ scanner = bufio.NewScanner(r)
+ )
+
+ for n := 0; scanner.Scan(); n++ {
+ // Skip the 2 header lines.
+ if n < 2 {
+ continue
+ }
+
+ line := scanner.Text()
+
+ parts := strings.Split(line, ":")
+ if len(parts) != 2 {
+ return nil, fmt.Errorf("%w: expected 2 parts after splitting line by ':', got %d for line %q", ErrFileParse, len(parts), line)
+ }
+
+ name := strings.TrimSpace(parts[0])
+ stats := strings.Fields(parts[1])
+
+ if len(stats) < 10 {
+ return nil, fmt.Errorf("%w: invalid number of fields in line %d, expected 10+, got %d: %q", ErrFileParse, n, len(stats), line)
+ }
+
+ status, err := strconv.ParseUint(stats[0], 16, 16)
+ if err != nil {
+ return nil, fmt.Errorf("%w: invalid status in line %d: %q", ErrFileParse, n, line)
+ }
+
+ qlink, err := strconv.Atoi(strings.TrimSuffix(stats[1], "."))
+ if err != nil {
+ return nil, fmt.Errorf("%w: parse Quality:link as integer %q: %w", ErrFileParse, qlink, err)
+ }
+
+ qlevel, err := strconv.Atoi(strings.TrimSuffix(stats[2], "."))
+ if err != nil {
+ return nil, fmt.Errorf("%w: Quality:level as integer %q: %w", ErrFileParse, qlevel, err)
+ }
+
+ qnoise, err := strconv.Atoi(strings.TrimSuffix(stats[3], "."))
+ if err != nil {
+ return nil, fmt.Errorf("%w: Quality:noise as integer %q: %w", ErrFileParse, qnoise, err)
+ }
+
+ dnwid, err := strconv.Atoi(stats[4])
+ if err != nil {
+ return nil, fmt.Errorf("%w: Discarded:nwid as integer %q: %w", ErrFileParse, dnwid, err)
+ }
+
+ dcrypt, err := strconv.Atoi(stats[5])
+ if err != nil {
+ return nil, fmt.Errorf("%w: Discarded:crypt as integer %q: %w", ErrFileParse, dcrypt, err)
+ }
+
+ dfrag, err := strconv.Atoi(stats[6])
+ if err != nil {
+ return nil, fmt.Errorf("%w: Discarded:frag as integer %q: %w", ErrFileParse, dfrag, err)
+ }
+
+ dretry, err := strconv.Atoi(stats[7])
+ if err != nil {
+ return nil, fmt.Errorf("%w: Discarded:retry as integer %q: %w", ErrFileParse, dretry, err)
+ }
+
+ dmisc, err := strconv.Atoi(stats[8])
+ if err != nil {
+ return nil, fmt.Errorf("%w: Discarded:misc as integer %q: %w", ErrFileParse, dmisc, err)
+ }
+
+ mbeacon, err := strconv.Atoi(stats[9])
+ if err != nil {
+ return nil, fmt.Errorf("%w: Missed:beacon as integer %q: %w", ErrFileParse, mbeacon, err)
+ }
+
+ w := &Wireless{
+ Name: name,
+ Status: status,
+ QualityLink: qlink,
+ QualityLevel: qlevel,
+ QualityNoise: qnoise,
+ DiscardedNwid: dnwid,
+ DiscardedCrypt: dcrypt,
+ DiscardedFrag: dfrag,
+ DiscardedRetry: dretry,
+ DiscardedMisc: dmisc,
+ MissedBeacon: mbeacon,
+ }
+
+ interfaces = append(interfaces, w)
+ }
+
+ if err := scanner.Err(); err != nil {
+ return nil, fmt.Errorf("%w: Failed to scan /proc/net/wireless: %w", ErrFileRead, err)
+ }
+
+ return interfaces, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/net_xfrm.go b/vendor/github.com/prometheus/procfs/net_xfrm.go
new file mode 100644
index 0000000..932ef20
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/net_xfrm.go
@@ -0,0 +1,189 @@
+// Copyright 2017 Prometheus Team
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "fmt"
+ "os"
+ "strconv"
+ "strings"
+)
+
+// XfrmStat models the contents of /proc/net/xfrm_stat.
+type XfrmStat struct {
+ // All errors which are not matched by other
+ XfrmInError int
+ // No buffer is left
+ XfrmInBufferError int
+ // Header Error
+ XfrmInHdrError int
+ // No state found
+ // i.e. either inbound SPI, address, or IPSEC protocol at SA is wrong
+ XfrmInNoStates int
+ // Transformation protocol specific error
+ // e.g. SA Key is wrong
+ XfrmInStateProtoError int
+ // Transformation mode specific error
+ XfrmInStateModeError int
+ // Sequence error
+ // e.g. sequence number is out of window
+ XfrmInStateSeqError int
+ // State is expired
+ XfrmInStateExpired int
+ // State has mismatch option
+ // e.g. UDP encapsulation type is mismatched
+ XfrmInStateMismatch int
+ // State is invalid
+ XfrmInStateInvalid int
+ // No matching template for states
+ // e.g. Inbound SAs are correct but SP rule is wrong
+ XfrmInTmplMismatch int
+ // No policy is found for states
+ // e.g. Inbound SAs are correct but no SP is found
+ XfrmInNoPols int
+ // Policy discards
+ XfrmInPolBlock int
+ // Policy error
+ XfrmInPolError int
+ // All errors which are not matched by others
+ XfrmOutError int
+ // Bundle generation error
+ XfrmOutBundleGenError int
+ // Bundle check error
+ XfrmOutBundleCheckError int
+ // No state was found
+ XfrmOutNoStates int
+ // Transformation protocol specific error
+ XfrmOutStateProtoError int
+ // Transportation mode specific error
+ XfrmOutStateModeError int
+ // Sequence error
+ // i.e sequence number overflow
+ XfrmOutStateSeqError int
+ // State is expired
+ XfrmOutStateExpired int
+ // Policy discads
+ XfrmOutPolBlock int
+ // Policy is dead
+ XfrmOutPolDead int
+ // Policy Error
+ XfrmOutPolError int
+ // Forward routing of a packet is not allowed
+ XfrmFwdHdrError int
+ // State is invalid, perhaps expired
+ XfrmOutStateInvalid int
+ // State hasn’t been fully acquired before use
+ XfrmAcquireError int
+}
+
+// NewXfrmStat reads the xfrm_stat statistics.
+func NewXfrmStat() (XfrmStat, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return XfrmStat{}, err
+ }
+
+ return fs.NewXfrmStat()
+}
+
+// NewXfrmStat reads the xfrm_stat statistics from the 'proc' filesystem.
+func (fs FS) NewXfrmStat() (XfrmStat, error) {
+ file, err := os.Open(fs.proc.Path("net/xfrm_stat"))
+ if err != nil {
+ return XfrmStat{}, err
+ }
+ defer file.Close()
+
+ var (
+ x = XfrmStat{}
+ s = bufio.NewScanner(file)
+ )
+
+ for s.Scan() {
+ fields := strings.Fields(s.Text())
+
+ if len(fields) != 2 {
+ return XfrmStat{}, fmt.Errorf("%w: %q line %q", ErrFileParse, file.Name(), s.Text())
+ }
+
+ name := fields[0]
+ value, err := strconv.Atoi(fields[1])
+ if err != nil {
+ return XfrmStat{}, err
+ }
+
+ switch name {
+ case "XfrmInError":
+ x.XfrmInError = value
+ case "XfrmInBufferError":
+ x.XfrmInBufferError = value
+ case "XfrmInHdrError":
+ x.XfrmInHdrError = value
+ case "XfrmInNoStates":
+ x.XfrmInNoStates = value
+ case "XfrmInStateProtoError":
+ x.XfrmInStateProtoError = value
+ case "XfrmInStateModeError":
+ x.XfrmInStateModeError = value
+ case "XfrmInStateSeqError":
+ x.XfrmInStateSeqError = value
+ case "XfrmInStateExpired":
+ x.XfrmInStateExpired = value
+ case "XfrmInStateInvalid":
+ x.XfrmInStateInvalid = value
+ case "XfrmInTmplMismatch":
+ x.XfrmInTmplMismatch = value
+ case "XfrmInNoPols":
+ x.XfrmInNoPols = value
+ case "XfrmInPolBlock":
+ x.XfrmInPolBlock = value
+ case "XfrmInPolError":
+ x.XfrmInPolError = value
+ case "XfrmOutError":
+ x.XfrmOutError = value
+ case "XfrmInStateMismatch":
+ x.XfrmInStateMismatch = value
+ case "XfrmOutBundleGenError":
+ x.XfrmOutBundleGenError = value
+ case "XfrmOutBundleCheckError":
+ x.XfrmOutBundleCheckError = value
+ case "XfrmOutNoStates":
+ x.XfrmOutNoStates = value
+ case "XfrmOutStateProtoError":
+ x.XfrmOutStateProtoError = value
+ case "XfrmOutStateModeError":
+ x.XfrmOutStateModeError = value
+ case "XfrmOutStateSeqError":
+ x.XfrmOutStateSeqError = value
+ case "XfrmOutStateExpired":
+ x.XfrmOutStateExpired = value
+ case "XfrmOutPolBlock":
+ x.XfrmOutPolBlock = value
+ case "XfrmOutPolDead":
+ x.XfrmOutPolDead = value
+ case "XfrmOutPolError":
+ x.XfrmOutPolError = value
+ case "XfrmFwdHdrError":
+ x.XfrmFwdHdrError = value
+ case "XfrmOutStateInvalid":
+ x.XfrmOutStateInvalid = value
+ case "XfrmAcquireError":
+ x.XfrmAcquireError = value
+ }
+
+ }
+
+ return x, s.Err()
+}
diff --git a/vendor/github.com/prometheus/procfs/netstat.go b/vendor/github.com/prometheus/procfs/netstat.go
new file mode 100644
index 0000000..742dff4
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/netstat.go
@@ -0,0 +1,82 @@
+// Copyright 2020 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+)
+
+// NetStat contains statistics for all the counters from one file.
+type NetStat struct {
+ Stats map[string][]uint64
+ Filename string
+}
+
+// NetStat retrieves stats from `/proc/net/stat/`.
+func (fs FS) NetStat() ([]NetStat, error) {
+ statFiles, err := filepath.Glob(fs.proc.Path("net/stat/*"))
+ if err != nil {
+ return nil, err
+ }
+
+ var netStatsTotal []NetStat
+
+ for _, filePath := range statFiles {
+ procNetstat, err := parseNetstat(filePath)
+ if err != nil {
+ return nil, err
+ }
+ procNetstat.Filename = filepath.Base(filePath)
+
+ netStatsTotal = append(netStatsTotal, procNetstat)
+ }
+ return netStatsTotal, nil
+}
+
+// parseNetstat parses the metrics from `/proc/net/stat/` file
+// and returns a NetStat structure.
+func parseNetstat(filePath string) (NetStat, error) {
+ netStat := NetStat{
+ Stats: make(map[string][]uint64),
+ }
+ file, err := os.Open(filePath)
+ if err != nil {
+ return netStat, err
+ }
+ defer file.Close()
+
+ scanner := bufio.NewScanner(file)
+ scanner.Scan()
+
+ // First string is always a header for stats
+ var headers []string
+ headers = append(headers, strings.Fields(scanner.Text())...)
+
+ // Other strings represent per-CPU counters
+ for scanner.Scan() {
+ for num, counter := range strings.Fields(scanner.Text()) {
+ value, err := strconv.ParseUint(counter, 16, 64)
+ if err != nil {
+ return NetStat{}, err
+ }
+ netStat.Stats[headers[num]] = append(netStat.Stats[headers[num]], value)
+ }
+ }
+
+ return netStat, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/proc.go b/vendor/github.com/prometheus/procfs/proc.go
new file mode 100644
index 0000000..1427963
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc.go
@@ -0,0 +1,338 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "strconv"
+ "strings"
+
+ "github.com/prometheus/procfs/internal/util"
+)
+
+// Proc provides information about a running process.
+type Proc struct {
+ // The process ID.
+ PID int
+
+ fs FS
+}
+
+// Procs represents a list of Proc structs.
+type Procs []Proc
+
+var (
+ ErrFileParse = errors.New("Error Parsing File")
+ ErrFileRead = errors.New("Error Reading File")
+ ErrMountPoint = errors.New("Error Accessing Mount point")
+)
+
+func (p Procs) Len() int { return len(p) }
+func (p Procs) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+func (p Procs) Less(i, j int) bool { return p[i].PID < p[j].PID }
+
+// Self returns a process for the current process read via /proc/self.
+func Self() (Proc, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil || errors.Unwrap(err) == ErrMountPoint {
+ return Proc{}, err
+ }
+ return fs.Self()
+}
+
+// NewProc returns a process for the given pid under /proc.
+func NewProc(pid int) (Proc, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return Proc{}, err
+ }
+ return fs.Proc(pid)
+}
+
+// AllProcs returns a list of all currently available processes under /proc.
+func AllProcs() (Procs, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return Procs{}, err
+ }
+ return fs.AllProcs()
+}
+
+// Self returns a process for the current process.
+func (fs FS) Self() (Proc, error) {
+ p, err := os.Readlink(fs.proc.Path("self"))
+ if err != nil {
+ return Proc{}, err
+ }
+ pid, err := strconv.Atoi(strings.Replace(p, string(fs.proc), "", -1))
+ if err != nil {
+ return Proc{}, err
+ }
+ return fs.Proc(pid)
+}
+
+// NewProc returns a process for the given pid.
+//
+// Deprecated: Use fs.Proc() instead.
+func (fs FS) NewProc(pid int) (Proc, error) {
+ return fs.Proc(pid)
+}
+
+// Proc returns a process for the given pid.
+func (fs FS) Proc(pid int) (Proc, error) {
+ if _, err := os.Stat(fs.proc.Path(strconv.Itoa(pid))); err != nil {
+ return Proc{}, err
+ }
+ return Proc{PID: pid, fs: fs}, nil
+}
+
+// AllProcs returns a list of all currently available processes.
+func (fs FS) AllProcs() (Procs, error) {
+ d, err := os.Open(fs.proc.Path())
+ if err != nil {
+ return Procs{}, err
+ }
+ defer d.Close()
+
+ names, err := d.Readdirnames(-1)
+ if err != nil {
+ return Procs{}, fmt.Errorf("%w: Cannot read file: %v: %w", ErrFileRead, names, err)
+ }
+
+ p := Procs{}
+ for _, n := range names {
+ pid, err := strconv.ParseInt(n, 10, 64)
+ if err != nil {
+ continue
+ }
+ p = append(p, Proc{PID: int(pid), fs: fs})
+ }
+
+ return p, nil
+}
+
+// CmdLine returns the command line of a process.
+func (p Proc) CmdLine() ([]string, error) {
+ data, err := util.ReadFileNoStat(p.path("cmdline"))
+ if err != nil {
+ return nil, err
+ }
+
+ if len(data) < 1 {
+ return []string{}, nil
+ }
+
+ return strings.Split(string(bytes.TrimRight(data, "\x00")), "\x00"), nil
+}
+
+// Wchan returns the wchan (wait channel) of a process.
+func (p Proc) Wchan() (string, error) {
+ f, err := os.Open(p.path("wchan"))
+ if err != nil {
+ return "", err
+ }
+ defer f.Close()
+
+ data, err := io.ReadAll(f)
+ if err != nil {
+ return "", err
+ }
+
+ wchan := string(data)
+ if wchan == "" || wchan == "0" {
+ return "", nil
+ }
+
+ return wchan, nil
+}
+
+// Comm returns the command name of a process.
+func (p Proc) Comm() (string, error) {
+ data, err := util.ReadFileNoStat(p.path("comm"))
+ if err != nil {
+ return "", err
+ }
+
+ return strings.TrimSpace(string(data)), nil
+}
+
+// Executable returns the absolute path of the executable command of a process.
+func (p Proc) Executable() (string, error) {
+ exe, err := os.Readlink(p.path("exe"))
+ if os.IsNotExist(err) {
+ return "", nil
+ }
+
+ return exe, err
+}
+
+// Cwd returns the absolute path to the current working directory of the process.
+func (p Proc) Cwd() (string, error) {
+ wd, err := os.Readlink(p.path("cwd"))
+ if os.IsNotExist(err) {
+ return "", nil
+ }
+
+ return wd, err
+}
+
+// RootDir returns the absolute path to the process's root directory (as set by chroot).
+func (p Proc) RootDir() (string, error) {
+ rdir, err := os.Readlink(p.path("root"))
+ if os.IsNotExist(err) {
+ return "", nil
+ }
+
+ return rdir, err
+}
+
+// FileDescriptors returns the currently open file descriptors of a process.
+func (p Proc) FileDescriptors() ([]uintptr, error) {
+ names, err := p.fileDescriptors()
+ if err != nil {
+ return nil, err
+ }
+
+ fds := make([]uintptr, len(names))
+ for i, n := range names {
+ fd, err := strconv.ParseInt(n, 10, 32)
+ if err != nil {
+ return nil, fmt.Errorf("%w: Cannot parse line: %v: %w", ErrFileParse, i, err)
+ }
+ fds[i] = uintptr(fd)
+ }
+
+ return fds, nil
+}
+
+// FileDescriptorTargets returns the targets of all file descriptors of a process.
+// If a file descriptor is not a symlink to a file (like a socket), that value will be the empty string.
+func (p Proc) FileDescriptorTargets() ([]string, error) {
+ names, err := p.fileDescriptors()
+ if err != nil {
+ return nil, err
+ }
+
+ targets := make([]string, len(names))
+
+ for i, name := range names {
+ target, err := os.Readlink(p.path("fd", name))
+ if err == nil {
+ targets[i] = target
+ }
+ }
+
+ return targets, nil
+}
+
+// FileDescriptorsLen returns the number of currently open file descriptors of
+// a process.
+func (p Proc) FileDescriptorsLen() (int, error) {
+ // Use fast path if available (Linux v6.2): https://github.com/torvalds/linux/commit/f1f1f2569901
+ if p.fs.isReal {
+ stat, err := os.Stat(p.path("fd"))
+ if err != nil {
+ return 0, err
+ }
+
+ size := stat.Size()
+ if size > 0 {
+ return int(size), nil
+ }
+ }
+
+ fds, err := p.fileDescriptors()
+ if err != nil {
+ return 0, err
+ }
+
+ return len(fds), nil
+}
+
+// MountStats retrieves statistics and configuration for mount points in a
+// process's namespace.
+func (p Proc) MountStats() ([]*Mount, error) {
+ f, err := os.Open(p.path("mountstats"))
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ return parseMountStats(f)
+}
+
+// MountInfo retrieves mount information for mount points in a
+// process's namespace.
+// It supplies information missing in `/proc/self/mounts` and
+// fixes various other problems with that file too.
+func (p Proc) MountInfo() ([]*MountInfo, error) {
+ data, err := util.ReadFileNoStat(p.path("mountinfo"))
+ if err != nil {
+ return nil, err
+ }
+ return parseMountInfo(data)
+}
+
+func (p Proc) fileDescriptors() ([]string, error) {
+ d, err := os.Open(p.path("fd"))
+ if err != nil {
+ return nil, err
+ }
+ defer d.Close()
+
+ names, err := d.Readdirnames(-1)
+ if err != nil {
+ return nil, fmt.Errorf("%w: Cannot read file: %v: %w", ErrFileRead, names, err)
+ }
+
+ return names, nil
+}
+
+func (p Proc) path(pa ...string) string {
+ return p.fs.proc.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...)
+}
+
+// FileDescriptorsInfo retrieves information about all file descriptors of
+// the process.
+func (p Proc) FileDescriptorsInfo() (ProcFDInfos, error) {
+ names, err := p.fileDescriptors()
+ if err != nil {
+ return nil, err
+ }
+
+ var fdinfos ProcFDInfos
+
+ for _, n := range names {
+ fdinfo, err := p.FDInfo(n)
+ if err != nil {
+ continue
+ }
+ fdinfos = append(fdinfos, *fdinfo)
+ }
+
+ return fdinfos, nil
+}
+
+// Schedstat returns task scheduling information for the process.
+func (p Proc) Schedstat() (ProcSchedstat, error) {
+ contents, err := os.ReadFile(p.path("schedstat"))
+ if err != nil {
+ return ProcSchedstat{}, err
+ }
+ return parseProcSchedstat(string(contents))
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_cgroup.go b/vendor/github.com/prometheus/procfs/proc_cgroup.go
new file mode 100644
index 0000000..daeed7f
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_cgroup.go
@@ -0,0 +1,98 @@
+// Copyright 2020 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "strconv"
+ "strings"
+
+ "github.com/prometheus/procfs/internal/util"
+)
+
+// Cgroup models one line from /proc/[pid]/cgroup. Each Cgroup struct describes the placement of a PID inside a
+// specific control hierarchy. The kernel has two cgroup APIs, v1 and v2. v1 has one hierarchy per available resource
+// controller, while v2 has one unified hierarchy shared by all controllers. Regardless of v1 or v2, all hierarchies
+// contain all running processes, so the question answerable with a Cgroup struct is 'where is this process in
+// this hierarchy' (where==what path on the specific cgroupfs). By prefixing this path with the mount point of
+// *this specific* hierarchy, you can locate the relevant pseudo-files needed to read/set the data for this PID
+// in this hierarchy
+//
+// Also see http://man7.org/linux/man-pages/man7/cgroups.7.html
+type Cgroup struct {
+ // HierarchyID that can be matched to a named hierarchy using /proc/cgroups. Cgroups V2 only has one
+ // hierarchy, so HierarchyID is always 0. For cgroups v1 this is a unique ID number
+ HierarchyID int
+ // Controllers using this hierarchy of processes. Controllers are also known as subsystems. For
+ // Cgroups V2 this may be empty, as all active controllers use the same hierarchy
+ Controllers []string
+ // Path of this control group, relative to the mount point of the cgroupfs representing this specific
+ // hierarchy
+ Path string
+}
+
+// parseCgroupString parses each line of the /proc/[pid]/cgroup file
+// Line format is hierarchyID:[controller1,controller2]:path.
+func parseCgroupString(cgroupStr string) (*Cgroup, error) {
+ var err error
+
+ fields := strings.SplitN(cgroupStr, ":", 3)
+ if len(fields) < 3 {
+ return nil, fmt.Errorf("%w: 3+ fields required, found %d fields in cgroup string: %s", ErrFileParse, len(fields), cgroupStr)
+ }
+
+ cgroup := &Cgroup{
+ Path: fields[2],
+ Controllers: nil,
+ }
+ cgroup.HierarchyID, err = strconv.Atoi(fields[0])
+ if err != nil {
+ return nil, fmt.Errorf("%w: hierarchy ID: %q", ErrFileParse, cgroup.HierarchyID)
+ }
+ if fields[1] != "" {
+ ssNames := strings.Split(fields[1], ",")
+ cgroup.Controllers = append(cgroup.Controllers, ssNames...)
+ }
+ return cgroup, nil
+}
+
+// parseCgroups reads each line of the /proc/[pid]/cgroup file.
+func parseCgroups(data []byte) ([]Cgroup, error) {
+ var cgroups []Cgroup
+ scanner := bufio.NewScanner(bytes.NewReader(data))
+ for scanner.Scan() {
+ mountString := scanner.Text()
+ parsedMounts, err := parseCgroupString(mountString)
+ if err != nil {
+ return nil, err
+ }
+ cgroups = append(cgroups, *parsedMounts)
+ }
+
+ err := scanner.Err()
+ return cgroups, err
+}
+
+// Cgroups reads from /proc//cgroups and returns a []*Cgroup struct locating this PID in each process
+// control hierarchy running on this system. On every system (v1 and v2), all hierarchies contain all processes,
+// so the len of the returned struct is equal to the number of active hierarchies on this system.
+func (p Proc) Cgroups() ([]Cgroup, error) {
+ data, err := util.ReadFileNoStat(p.path("cgroup"))
+ if err != nil {
+ return nil, err
+ }
+ return parseCgroups(data)
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_cgroups.go b/vendor/github.com/prometheus/procfs/proc_cgroups.go
new file mode 100644
index 0000000..5dd4938
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_cgroups.go
@@ -0,0 +1,98 @@
+// Copyright 2021 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "strconv"
+ "strings"
+
+ "github.com/prometheus/procfs/internal/util"
+)
+
+// CgroupSummary models one line from /proc/cgroups.
+// This file contains information about the controllers that are compiled into the kernel.
+//
+// Also see http://man7.org/linux/man-pages/man7/cgroups.7.html
+type CgroupSummary struct {
+ // The name of the controller. controller is also known as subsystem.
+ SubsysName string
+ // The unique ID of the cgroup hierarchy on which this controller is mounted.
+ Hierarchy int
+ // The number of control groups in this hierarchy using this controller.
+ Cgroups int
+ // This field contains the value 1 if this controller is enabled, or 0 if it has been disabled
+ Enabled int
+}
+
+// parseCgroupSummary parses each line of the /proc/cgroup file
+// Line format is `subsys_name hierarchy num_cgroups enabled`.
+func parseCgroupSummaryString(CgroupSummaryStr string) (*CgroupSummary, error) {
+ var err error
+
+ fields := strings.Fields(CgroupSummaryStr)
+ // require at least 4 fields
+ if len(fields) < 4 {
+ return nil, fmt.Errorf("%w: 4+ fields required, found %d fields in cgroup info string: %s", ErrFileParse, len(fields), CgroupSummaryStr)
+ }
+
+ CgroupSummary := &CgroupSummary{
+ SubsysName: fields[0],
+ }
+ CgroupSummary.Hierarchy, err = strconv.Atoi(fields[1])
+ if err != nil {
+ return nil, fmt.Errorf("%w: Unable to parse hierarchy ID from %q", ErrFileParse, fields[1])
+ }
+ CgroupSummary.Cgroups, err = strconv.Atoi(fields[2])
+ if err != nil {
+ return nil, fmt.Errorf("%w: Unable to parse Cgroup Num from %q", ErrFileParse, fields[2])
+ }
+ CgroupSummary.Enabled, err = strconv.Atoi(fields[3])
+ if err != nil {
+ return nil, fmt.Errorf("%w: Unable to parse Enabled from %q", ErrFileParse, fields[3])
+ }
+ return CgroupSummary, nil
+}
+
+// parseCgroupSummary reads each line of the /proc/cgroup file.
+func parseCgroupSummary(data []byte) ([]CgroupSummary, error) {
+ var CgroupSummarys []CgroupSummary
+ scanner := bufio.NewScanner(bytes.NewReader(data))
+ for scanner.Scan() {
+ CgroupSummaryString := scanner.Text()
+ // ignore comment lines
+ if strings.HasPrefix(CgroupSummaryString, "#") {
+ continue
+ }
+ CgroupSummary, err := parseCgroupSummaryString(CgroupSummaryString)
+ if err != nil {
+ return nil, err
+ }
+ CgroupSummarys = append(CgroupSummarys, *CgroupSummary)
+ }
+
+ err := scanner.Err()
+ return CgroupSummarys, err
+}
+
+// CgroupSummarys returns information about current /proc/cgroups.
+func (fs FS) CgroupSummarys() ([]CgroupSummary, error) {
+ data, err := util.ReadFileNoStat(fs.proc.Path("cgroups"))
+ if err != nil {
+ return nil, err
+ }
+ return parseCgroupSummary(data)
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_environ.go b/vendor/github.com/prometheus/procfs/proc_environ.go
new file mode 100644
index 0000000..57a8989
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_environ.go
@@ -0,0 +1,37 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "strings"
+
+ "github.com/prometheus/procfs/internal/util"
+)
+
+// Environ reads process environments from `/proc//environ`.
+func (p Proc) Environ() ([]string, error) {
+ environments := make([]string, 0)
+
+ data, err := util.ReadFileNoStat(p.path("environ"))
+ if err != nil {
+ return environments, err
+ }
+
+ environments = strings.Split(string(data), "\000")
+ if len(environments) > 0 {
+ environments = environments[:len(environments)-1]
+ }
+
+ return environments, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_fdinfo.go b/vendor/github.com/prometheus/procfs/proc_fdinfo.go
new file mode 100644
index 0000000..fa761b3
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_fdinfo.go
@@ -0,0 +1,138 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "regexp"
+
+ "github.com/prometheus/procfs/internal/util"
+)
+
+var (
+ rPos = regexp.MustCompile(`^pos:\s+(\d+)$`)
+ rFlags = regexp.MustCompile(`^flags:\s+(\d+)$`)
+ rMntID = regexp.MustCompile(`^mnt_id:\s+(\d+)$`)
+ rIno = regexp.MustCompile(`^ino:\s+(\d+)$`)
+ rInotify = regexp.MustCompile(`^inotify`)
+ rInotifyParts = regexp.MustCompile(`^inotify\s+wd:([0-9a-f]+)\s+ino:([0-9a-f]+)\s+sdev:([0-9a-f]+)(?:\s+mask:([0-9a-f]+))?`)
+)
+
+// ProcFDInfo contains represents file descriptor information.
+type ProcFDInfo struct {
+ // File descriptor
+ FD string
+ // File offset
+ Pos string
+ // File access mode and status flags
+ Flags string
+ // Mount point ID
+ MntID string
+ // Inode number
+ Ino string
+ // List of inotify lines (structured) in the fdinfo file (kernel 3.8+ only)
+ InotifyInfos []InotifyInfo
+}
+
+// FDInfo constructor. On kernels older than 3.8, InotifyInfos will always be empty.
+func (p Proc) FDInfo(fd string) (*ProcFDInfo, error) {
+ data, err := util.ReadFileNoStat(p.path("fdinfo", fd))
+ if err != nil {
+ return nil, err
+ }
+
+ var text, pos, flags, mntid, ino string
+ var inotify []InotifyInfo
+
+ scanner := bufio.NewScanner(bytes.NewReader(data))
+ for scanner.Scan() {
+ text = scanner.Text()
+ if rPos.MatchString(text) {
+ pos = rPos.FindStringSubmatch(text)[1]
+ } else if rFlags.MatchString(text) {
+ flags = rFlags.FindStringSubmatch(text)[1]
+ } else if rMntID.MatchString(text) {
+ mntid = rMntID.FindStringSubmatch(text)[1]
+ } else if rIno.MatchString(text) {
+ ino = rIno.FindStringSubmatch(text)[1]
+ } else if rInotify.MatchString(text) {
+ newInotify, err := parseInotifyInfo(text)
+ if err != nil {
+ return nil, err
+ }
+ inotify = append(inotify, *newInotify)
+ }
+ }
+
+ i := &ProcFDInfo{
+ FD: fd,
+ Pos: pos,
+ Flags: flags,
+ MntID: mntid,
+ Ino: ino,
+ InotifyInfos: inotify,
+ }
+
+ return i, nil
+}
+
+// InotifyInfo represents a single inotify line in the fdinfo file.
+type InotifyInfo struct {
+ // Watch descriptor number
+ WD string
+ // Inode number
+ Ino string
+ // Device ID
+ Sdev string
+ // Mask of events being monitored
+ Mask string
+}
+
+// InotifyInfo constructor. Only available on kernel 3.8+.
+func parseInotifyInfo(line string) (*InotifyInfo, error) {
+ m := rInotifyParts.FindStringSubmatch(line)
+ if len(m) >= 4 {
+ var mask string
+ if len(m) == 5 {
+ mask = m[4]
+ }
+ i := &InotifyInfo{
+ WD: m[1],
+ Ino: m[2],
+ Sdev: m[3],
+ Mask: mask,
+ }
+ return i, nil
+ }
+ return nil, fmt.Errorf("%w: invalid inode entry: %q", ErrFileParse, line)
+}
+
+// ProcFDInfos represents a list of ProcFDInfo structs.
+type ProcFDInfos []ProcFDInfo
+
+func (p ProcFDInfos) Len() int { return len(p) }
+func (p ProcFDInfos) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+func (p ProcFDInfos) Less(i, j int) bool { return p[i].FD < p[j].FD }
+
+// InotifyWatchLen returns the total number of inotify watches.
+func (p ProcFDInfos) InotifyWatchLen() (int, error) {
+ length := 0
+ for _, f := range p {
+ length += len(f.InotifyInfos)
+ }
+
+ return length, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_interrupts.go b/vendor/github.com/prometheus/procfs/proc_interrupts.go
new file mode 100644
index 0000000..86b4b45
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_interrupts.go
@@ -0,0 +1,98 @@
+// Copyright 2022 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+
+ "github.com/prometheus/procfs/internal/util"
+)
+
+// Interrupt represents a single interrupt line.
+type Interrupt struct {
+ // Info is the type of interrupt.
+ Info string
+ // Devices is the name of the device that is located at that IRQ
+ Devices string
+ // Values is the number of interrupts per CPU.
+ Values []string
+}
+
+// Interrupts models the content of /proc/interrupts. Key is the IRQ number.
+// - https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/6/html/deployment_guide/s2-proc-interrupts
+// - https://raspberrypi.stackexchange.com/questions/105802/explanation-of-proc-interrupts-output
+type Interrupts map[string]Interrupt
+
+// Interrupts creates a new instance from a given Proc instance.
+func (p Proc) Interrupts() (Interrupts, error) {
+ data, err := util.ReadFileNoStat(p.path("interrupts"))
+ if err != nil {
+ return nil, err
+ }
+ return parseInterrupts(bytes.NewReader(data))
+}
+
+func parseInterrupts(r io.Reader) (Interrupts, error) {
+ var (
+ interrupts = Interrupts{}
+ scanner = bufio.NewScanner(r)
+ )
+
+ if !scanner.Scan() {
+ return nil, errors.New("interrupts empty")
+ }
+ cpuNum := len(strings.Fields(scanner.Text())) // one header per cpu
+
+ for scanner.Scan() {
+ parts := strings.Fields(scanner.Text())
+ if len(parts) == 0 { // skip empty lines
+ continue
+ }
+ if len(parts) < 2 {
+ return nil, fmt.Errorf("%w: Not enough fields in interrupts (expected 2+ fields but got %d): %s", ErrFileParse, len(parts), parts)
+ }
+ intName := parts[0][:len(parts[0])-1] // remove trailing :
+
+ if len(parts) == 2 {
+ interrupts[intName] = Interrupt{
+ Info: "",
+ Devices: "",
+ Values: []string{
+ parts[1],
+ },
+ }
+ continue
+ }
+
+ intr := Interrupt{
+ Values: parts[1 : cpuNum+1],
+ }
+
+ if _, err := strconv.Atoi(intName); err == nil { // numeral interrupt
+ intr.Info = parts[cpuNum+1]
+ intr.Devices = strings.Join(parts[cpuNum+2:], " ")
+ } else {
+ intr.Info = strings.Join(parts[cpuNum+1:], " ")
+ }
+ interrupts[intName] = intr
+ }
+
+ return interrupts, scanner.Err()
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_io.go b/vendor/github.com/prometheus/procfs/proc_io.go
new file mode 100644
index 0000000..776f349
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_io.go
@@ -0,0 +1,59 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "fmt"
+
+ "github.com/prometheus/procfs/internal/util"
+)
+
+// ProcIO models the content of /proc//io.
+type ProcIO struct {
+ // Chars read.
+ RChar uint64
+ // Chars written.
+ WChar uint64
+ // Read syscalls.
+ SyscR uint64
+ // Write syscalls.
+ SyscW uint64
+ // Bytes read.
+ ReadBytes uint64
+ // Bytes written.
+ WriteBytes uint64
+ // Bytes written, but taking into account truncation. See
+ // Documentation/filesystems/proc.txt in the kernel sources for
+ // detailed explanation.
+ CancelledWriteBytes int64
+}
+
+// IO creates a new ProcIO instance from a given Proc instance.
+func (p Proc) IO() (ProcIO, error) {
+ pio := ProcIO{}
+
+ data, err := util.ReadFileNoStat(p.path("io"))
+ if err != nil {
+ return pio, err
+ }
+
+ ioFormat := "rchar: %d\nwchar: %d\nsyscr: %d\nsyscw: %d\n" +
+ "read_bytes: %d\nwrite_bytes: %d\n" +
+ "cancelled_write_bytes: %d\n"
+
+ _, err = fmt.Sscanf(string(data), ioFormat, &pio.RChar, &pio.WChar, &pio.SyscR,
+ &pio.SyscW, &pio.ReadBytes, &pio.WriteBytes, &pio.CancelledWriteBytes)
+
+ return pio, err
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_limits.go b/vendor/github.com/prometheus/procfs/proc_limits.go
new file mode 100644
index 0000000..9530b14
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_limits.go
@@ -0,0 +1,160 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "fmt"
+ "os"
+ "regexp"
+ "strconv"
+)
+
+// ProcLimits represents the soft limits for each of the process's resource
+// limits. For more information see getrlimit(2):
+// http://man7.org/linux/man-pages/man2/getrlimit.2.html.
+type ProcLimits struct {
+ // CPU time limit in seconds.
+ CPUTime uint64
+ // Maximum size of files that the process may create.
+ FileSize uint64
+ // Maximum size of the process's data segment (initialized data,
+ // uninitialized data, and heap).
+ DataSize uint64
+ // Maximum size of the process stack in bytes.
+ StackSize uint64
+ // Maximum size of a core file.
+ CoreFileSize uint64
+ // Limit of the process's resident set in pages.
+ ResidentSet uint64
+ // Maximum number of processes that can be created for the real user ID of
+ // the calling process.
+ Processes uint64
+ // Value one greater than the maximum file descriptor number that can be
+ // opened by this process.
+ OpenFiles uint64
+ // Maximum number of bytes of memory that may be locked into RAM.
+ LockedMemory uint64
+ // Maximum size of the process's virtual memory address space in bytes.
+ AddressSpace uint64
+ // Limit on the combined number of flock(2) locks and fcntl(2) leases that
+ // this process may establish.
+ FileLocks uint64
+ // Limit of signals that may be queued for the real user ID of the calling
+ // process.
+ PendingSignals uint64
+ // Limit on the number of bytes that can be allocated for POSIX message
+ // queues for the real user ID of the calling process.
+ MsqqueueSize uint64
+ // Limit of the nice priority set using setpriority(2) or nice(2).
+ NicePriority uint64
+ // Limit of the real-time priority set using sched_setscheduler(2) or
+ // sched_setparam(2).
+ RealtimePriority uint64
+ // Limit (in microseconds) on the amount of CPU time that a process
+ // scheduled under a real-time scheduling policy may consume without making
+ // a blocking system call.
+ RealtimeTimeout uint64
+}
+
+const (
+ limitsFields = 4
+ limitsUnlimited = "unlimited"
+)
+
+var (
+ limitsMatch = regexp.MustCompile(`(Max \w+\s{0,1}?\w*\s{0,1}\w*)\s{2,}(\w+)\s+(\w+)`)
+)
+
+// NewLimits returns the current soft limits of the process.
+//
+// Deprecated: Use p.Limits() instead.
+func (p Proc) NewLimits() (ProcLimits, error) {
+ return p.Limits()
+}
+
+// Limits returns the current soft limits of the process.
+func (p Proc) Limits() (ProcLimits, error) {
+ f, err := os.Open(p.path("limits"))
+ if err != nil {
+ return ProcLimits{}, err
+ }
+ defer f.Close()
+
+ var (
+ l = ProcLimits{}
+ s = bufio.NewScanner(f)
+ )
+
+ s.Scan() // Skip limits header
+
+ for s.Scan() {
+ //fields := limitsMatch.Split(s.Text(), limitsFields)
+ fields := limitsMatch.FindStringSubmatch(s.Text())
+ if len(fields) != limitsFields {
+ return ProcLimits{}, fmt.Errorf("%w: couldn't parse %q line %q", ErrFileParse, f.Name(), s.Text())
+ }
+
+ switch fields[1] {
+ case "Max cpu time":
+ l.CPUTime, err = parseUint(fields[2])
+ case "Max file size":
+ l.FileSize, err = parseUint(fields[2])
+ case "Max data size":
+ l.DataSize, err = parseUint(fields[2])
+ case "Max stack size":
+ l.StackSize, err = parseUint(fields[2])
+ case "Max core file size":
+ l.CoreFileSize, err = parseUint(fields[2])
+ case "Max resident set":
+ l.ResidentSet, err = parseUint(fields[2])
+ case "Max processes":
+ l.Processes, err = parseUint(fields[2])
+ case "Max open files":
+ l.OpenFiles, err = parseUint(fields[2])
+ case "Max locked memory":
+ l.LockedMemory, err = parseUint(fields[2])
+ case "Max address space":
+ l.AddressSpace, err = parseUint(fields[2])
+ case "Max file locks":
+ l.FileLocks, err = parseUint(fields[2])
+ case "Max pending signals":
+ l.PendingSignals, err = parseUint(fields[2])
+ case "Max msgqueue size":
+ l.MsqqueueSize, err = parseUint(fields[2])
+ case "Max nice priority":
+ l.NicePriority, err = parseUint(fields[2])
+ case "Max realtime priority":
+ l.RealtimePriority, err = parseUint(fields[2])
+ case "Max realtime timeout":
+ l.RealtimeTimeout, err = parseUint(fields[2])
+ }
+ if err != nil {
+ return ProcLimits{}, err
+ }
+ }
+
+ return l, s.Err()
+}
+
+func parseUint(s string) (uint64, error) {
+ if s == limitsUnlimited {
+ return 18446744073709551615, nil
+ }
+ i, err := strconv.ParseUint(s, 10, 64)
+ if err != nil {
+ return 0, fmt.Errorf("%w: couldn't parse value %q: %w", ErrFileParse, s, err)
+ }
+ return i, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_maps.go b/vendor/github.com/prometheus/procfs/proc_maps.go
new file mode 100644
index 0000000..7e75c28
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_maps.go
@@ -0,0 +1,211 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris) && !js
+// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris
+// +build !js
+
+package procfs
+
+import (
+ "bufio"
+ "fmt"
+ "os"
+ "strconv"
+ "strings"
+
+ "golang.org/x/sys/unix"
+)
+
+// ProcMapPermissions contains permission settings read from `/proc/[pid]/maps`.
+type ProcMapPermissions struct {
+ // mapping has the [R]ead flag set
+ Read bool
+ // mapping has the [W]rite flag set
+ Write bool
+ // mapping has the [X]ecutable flag set
+ Execute bool
+ // mapping has the [S]hared flag set
+ Shared bool
+ // mapping is marked as [P]rivate (copy on write)
+ Private bool
+}
+
+// ProcMap contains the process memory-mappings of the process
+// read from `/proc/[pid]/maps`.
+type ProcMap struct {
+ // The start address of current mapping.
+ StartAddr uintptr
+ // The end address of the current mapping
+ EndAddr uintptr
+ // The permissions for this mapping
+ Perms *ProcMapPermissions
+ // The current offset into the file/fd (e.g., shared libs)
+ Offset int64
+ // Device owner of this mapping (major:minor) in Mkdev format.
+ Dev uint64
+ // The inode of the device above
+ Inode uint64
+ // The file or psuedofile (or empty==anonymous)
+ Pathname string
+}
+
+// parseDevice parses the device token of a line and converts it to a dev_t
+// (mkdev) like structure.
+func parseDevice(s string) (uint64, error) {
+ i := strings.Index(s, ":")
+ if i == -1 {
+ return 0, fmt.Errorf("%w: expected separator `:` in %s", ErrFileParse, s)
+ }
+
+ major, err := strconv.ParseUint(s[0:i], 16, 0)
+ if err != nil {
+ return 0, err
+ }
+
+ minor, err := strconv.ParseUint(s[i+1:], 16, 0)
+ if err != nil {
+ return 0, err
+ }
+
+ return unix.Mkdev(uint32(major), uint32(minor)), nil
+}
+
+// parseAddress converts a hex-string to a uintptr.
+func parseAddress(s string) (uintptr, error) {
+ a, err := strconv.ParseUint(s, 16, 0)
+ if err != nil {
+ return 0, err
+ }
+
+ return uintptr(a), nil
+}
+
+// parseAddresses parses the start-end address.
+func parseAddresses(s string) (uintptr, uintptr, error) {
+ idx := strings.Index(s, "-")
+ if idx == -1 {
+ return 0, 0, fmt.Errorf("%w: expected separator `-` in %s", ErrFileParse, s)
+ }
+
+ saddr, err := parseAddress(s[0:idx])
+ if err != nil {
+ return 0, 0, err
+ }
+
+ eaddr, err := parseAddress(s[idx+1:])
+ if err != nil {
+ return 0, 0, err
+ }
+
+ return saddr, eaddr, nil
+}
+
+// parsePermissions parses a token and returns any that are set.
+func parsePermissions(s string) (*ProcMapPermissions, error) {
+ if len(s) < 4 {
+ return nil, fmt.Errorf("%w: invalid permissions token", ErrFileParse)
+ }
+
+ perms := ProcMapPermissions{}
+ for _, ch := range s {
+ switch ch {
+ case 'r':
+ perms.Read = true
+ case 'w':
+ perms.Write = true
+ case 'x':
+ perms.Execute = true
+ case 'p':
+ perms.Private = true
+ case 's':
+ perms.Shared = true
+ }
+ }
+
+ return &perms, nil
+}
+
+// parseProcMap will attempt to parse a single line within a proc/[pid]/maps
+// buffer.
+func parseProcMap(text string) (*ProcMap, error) {
+ fields := strings.Fields(text)
+ if len(fields) < 5 {
+ return nil, fmt.Errorf("%w: truncated procmap entry", ErrFileParse)
+ }
+
+ saddr, eaddr, err := parseAddresses(fields[0])
+ if err != nil {
+ return nil, err
+ }
+
+ perms, err := parsePermissions(fields[1])
+ if err != nil {
+ return nil, err
+ }
+
+ offset, err := strconv.ParseInt(fields[2], 16, 0)
+ if err != nil {
+ return nil, err
+ }
+
+ device, err := parseDevice(fields[3])
+ if err != nil {
+ return nil, err
+ }
+
+ inode, err := strconv.ParseUint(fields[4], 10, 0)
+ if err != nil {
+ return nil, err
+ }
+
+ pathname := ""
+
+ if len(fields) >= 5 {
+ pathname = strings.Join(fields[5:], " ")
+ }
+
+ return &ProcMap{
+ StartAddr: saddr,
+ EndAddr: eaddr,
+ Perms: perms,
+ Offset: offset,
+ Dev: device,
+ Inode: inode,
+ Pathname: pathname,
+ }, nil
+}
+
+// ProcMaps reads from /proc/[pid]/maps to get the memory-mappings of the
+// process.
+func (p Proc) ProcMaps() ([]*ProcMap, error) {
+ file, err := os.Open(p.path("maps"))
+ if err != nil {
+ return nil, err
+ }
+ defer file.Close()
+
+ maps := []*ProcMap{}
+ scan := bufio.NewScanner(file)
+
+ for scan.Scan() {
+ m, err := parseProcMap(scan.Text())
+ if err != nil {
+ return nil, err
+ }
+
+ maps = append(maps, m)
+ }
+
+ return maps, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_netstat.go b/vendor/github.com/prometheus/procfs/proc_netstat.go
new file mode 100644
index 0000000..8e3ff4d
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_netstat.go
@@ -0,0 +1,443 @@
+// Copyright 2022 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+
+ "github.com/prometheus/procfs/internal/util"
+)
+
+// ProcNetstat models the content of /proc//net/netstat.
+type ProcNetstat struct {
+ // The process ID.
+ PID int
+ TcpExt
+ IpExt
+}
+
+type TcpExt struct { // nolint:revive
+ SyncookiesSent *float64
+ SyncookiesRecv *float64
+ SyncookiesFailed *float64
+ EmbryonicRsts *float64
+ PruneCalled *float64
+ RcvPruned *float64
+ OfoPruned *float64
+ OutOfWindowIcmps *float64
+ LockDroppedIcmps *float64
+ ArpFilter *float64
+ TW *float64
+ TWRecycled *float64
+ TWKilled *float64
+ PAWSActive *float64
+ PAWSEstab *float64
+ DelayedACKs *float64
+ DelayedACKLocked *float64
+ DelayedACKLost *float64
+ ListenOverflows *float64
+ ListenDrops *float64
+ TCPHPHits *float64
+ TCPPureAcks *float64
+ TCPHPAcks *float64
+ TCPRenoRecovery *float64
+ TCPSackRecovery *float64
+ TCPSACKReneging *float64
+ TCPSACKReorder *float64
+ TCPRenoReorder *float64
+ TCPTSReorder *float64
+ TCPFullUndo *float64
+ TCPPartialUndo *float64
+ TCPDSACKUndo *float64
+ TCPLossUndo *float64
+ TCPLostRetransmit *float64
+ TCPRenoFailures *float64
+ TCPSackFailures *float64
+ TCPLossFailures *float64
+ TCPFastRetrans *float64
+ TCPSlowStartRetrans *float64
+ TCPTimeouts *float64
+ TCPLossProbes *float64
+ TCPLossProbeRecovery *float64
+ TCPRenoRecoveryFail *float64
+ TCPSackRecoveryFail *float64
+ TCPRcvCollapsed *float64
+ TCPDSACKOldSent *float64
+ TCPDSACKOfoSent *float64
+ TCPDSACKRecv *float64
+ TCPDSACKOfoRecv *float64
+ TCPAbortOnData *float64
+ TCPAbortOnClose *float64
+ TCPAbortOnMemory *float64
+ TCPAbortOnTimeout *float64
+ TCPAbortOnLinger *float64
+ TCPAbortFailed *float64
+ TCPMemoryPressures *float64
+ TCPMemoryPressuresChrono *float64
+ TCPSACKDiscard *float64
+ TCPDSACKIgnoredOld *float64
+ TCPDSACKIgnoredNoUndo *float64
+ TCPSpuriousRTOs *float64
+ TCPMD5NotFound *float64
+ TCPMD5Unexpected *float64
+ TCPMD5Failure *float64
+ TCPSackShifted *float64
+ TCPSackMerged *float64
+ TCPSackShiftFallback *float64
+ TCPBacklogDrop *float64
+ PFMemallocDrop *float64
+ TCPMinTTLDrop *float64
+ TCPDeferAcceptDrop *float64
+ IPReversePathFilter *float64
+ TCPTimeWaitOverflow *float64
+ TCPReqQFullDoCookies *float64
+ TCPReqQFullDrop *float64
+ TCPRetransFail *float64
+ TCPRcvCoalesce *float64
+ TCPRcvQDrop *float64
+ TCPOFOQueue *float64
+ TCPOFODrop *float64
+ TCPOFOMerge *float64
+ TCPChallengeACK *float64
+ TCPSYNChallenge *float64
+ TCPFastOpenActive *float64
+ TCPFastOpenActiveFail *float64
+ TCPFastOpenPassive *float64
+ TCPFastOpenPassiveFail *float64
+ TCPFastOpenListenOverflow *float64
+ TCPFastOpenCookieReqd *float64
+ TCPFastOpenBlackhole *float64
+ TCPSpuriousRtxHostQueues *float64
+ BusyPollRxPackets *float64
+ TCPAutoCorking *float64
+ TCPFromZeroWindowAdv *float64
+ TCPToZeroWindowAdv *float64
+ TCPWantZeroWindowAdv *float64
+ TCPSynRetrans *float64
+ TCPOrigDataSent *float64
+ TCPHystartTrainDetect *float64
+ TCPHystartTrainCwnd *float64
+ TCPHystartDelayDetect *float64
+ TCPHystartDelayCwnd *float64
+ TCPACKSkippedSynRecv *float64
+ TCPACKSkippedPAWS *float64
+ TCPACKSkippedSeq *float64
+ TCPACKSkippedFinWait2 *float64
+ TCPACKSkippedTimeWait *float64
+ TCPACKSkippedChallenge *float64
+ TCPWinProbe *float64
+ TCPKeepAlive *float64
+ TCPMTUPFail *float64
+ TCPMTUPSuccess *float64
+ TCPWqueueTooBig *float64
+}
+
+type IpExt struct { // nolint:revive
+ InNoRoutes *float64
+ InTruncatedPkts *float64
+ InMcastPkts *float64
+ OutMcastPkts *float64
+ InBcastPkts *float64
+ OutBcastPkts *float64
+ InOctets *float64
+ OutOctets *float64
+ InMcastOctets *float64
+ OutMcastOctets *float64
+ InBcastOctets *float64
+ OutBcastOctets *float64
+ InCsumErrors *float64
+ InNoECTPkts *float64
+ InECT1Pkts *float64
+ InECT0Pkts *float64
+ InCEPkts *float64
+ ReasmOverlaps *float64
+}
+
+func (p Proc) Netstat() (ProcNetstat, error) {
+ filename := p.path("net/netstat")
+ data, err := util.ReadFileNoStat(filename)
+ if err != nil {
+ return ProcNetstat{PID: p.PID}, err
+ }
+ procNetstat, err := parseProcNetstat(bytes.NewReader(data), filename)
+ procNetstat.PID = p.PID
+ return procNetstat, err
+}
+
+// parseProcNetstat parses the metrics from proc//net/netstat file
+// and returns a ProcNetstat structure.
+func parseProcNetstat(r io.Reader, fileName string) (ProcNetstat, error) {
+ var (
+ scanner = bufio.NewScanner(r)
+ procNetstat = ProcNetstat{}
+ )
+
+ for scanner.Scan() {
+ nameParts := strings.Split(scanner.Text(), " ")
+ scanner.Scan()
+ valueParts := strings.Split(scanner.Text(), " ")
+ // Remove trailing :.
+ protocol := strings.TrimSuffix(nameParts[0], ":")
+ if len(nameParts) != len(valueParts) {
+ return procNetstat, fmt.Errorf("%w: mismatch field count mismatch in %s: %s",
+ ErrFileParse, fileName, protocol)
+ }
+ for i := 1; i < len(nameParts); i++ {
+ value, err := strconv.ParseFloat(valueParts[i], 64)
+ if err != nil {
+ return procNetstat, err
+ }
+ key := nameParts[i]
+
+ switch protocol {
+ case "TcpExt":
+ switch key {
+ case "SyncookiesSent":
+ procNetstat.TcpExt.SyncookiesSent = &value
+ case "SyncookiesRecv":
+ procNetstat.TcpExt.SyncookiesRecv = &value
+ case "SyncookiesFailed":
+ procNetstat.TcpExt.SyncookiesFailed = &value
+ case "EmbryonicRsts":
+ procNetstat.TcpExt.EmbryonicRsts = &value
+ case "PruneCalled":
+ procNetstat.TcpExt.PruneCalled = &value
+ case "RcvPruned":
+ procNetstat.TcpExt.RcvPruned = &value
+ case "OfoPruned":
+ procNetstat.TcpExt.OfoPruned = &value
+ case "OutOfWindowIcmps":
+ procNetstat.TcpExt.OutOfWindowIcmps = &value
+ case "LockDroppedIcmps":
+ procNetstat.TcpExt.LockDroppedIcmps = &value
+ case "ArpFilter":
+ procNetstat.TcpExt.ArpFilter = &value
+ case "TW":
+ procNetstat.TcpExt.TW = &value
+ case "TWRecycled":
+ procNetstat.TcpExt.TWRecycled = &value
+ case "TWKilled":
+ procNetstat.TcpExt.TWKilled = &value
+ case "PAWSActive":
+ procNetstat.TcpExt.PAWSActive = &value
+ case "PAWSEstab":
+ procNetstat.TcpExt.PAWSEstab = &value
+ case "DelayedACKs":
+ procNetstat.TcpExt.DelayedACKs = &value
+ case "DelayedACKLocked":
+ procNetstat.TcpExt.DelayedACKLocked = &value
+ case "DelayedACKLost":
+ procNetstat.TcpExt.DelayedACKLost = &value
+ case "ListenOverflows":
+ procNetstat.TcpExt.ListenOverflows = &value
+ case "ListenDrops":
+ procNetstat.TcpExt.ListenDrops = &value
+ case "TCPHPHits":
+ procNetstat.TcpExt.TCPHPHits = &value
+ case "TCPPureAcks":
+ procNetstat.TcpExt.TCPPureAcks = &value
+ case "TCPHPAcks":
+ procNetstat.TcpExt.TCPHPAcks = &value
+ case "TCPRenoRecovery":
+ procNetstat.TcpExt.TCPRenoRecovery = &value
+ case "TCPSackRecovery":
+ procNetstat.TcpExt.TCPSackRecovery = &value
+ case "TCPSACKReneging":
+ procNetstat.TcpExt.TCPSACKReneging = &value
+ case "TCPSACKReorder":
+ procNetstat.TcpExt.TCPSACKReorder = &value
+ case "TCPRenoReorder":
+ procNetstat.TcpExt.TCPRenoReorder = &value
+ case "TCPTSReorder":
+ procNetstat.TcpExt.TCPTSReorder = &value
+ case "TCPFullUndo":
+ procNetstat.TcpExt.TCPFullUndo = &value
+ case "TCPPartialUndo":
+ procNetstat.TcpExt.TCPPartialUndo = &value
+ case "TCPDSACKUndo":
+ procNetstat.TcpExt.TCPDSACKUndo = &value
+ case "TCPLossUndo":
+ procNetstat.TcpExt.TCPLossUndo = &value
+ case "TCPLostRetransmit":
+ procNetstat.TcpExt.TCPLostRetransmit = &value
+ case "TCPRenoFailures":
+ procNetstat.TcpExt.TCPRenoFailures = &value
+ case "TCPSackFailures":
+ procNetstat.TcpExt.TCPSackFailures = &value
+ case "TCPLossFailures":
+ procNetstat.TcpExt.TCPLossFailures = &value
+ case "TCPFastRetrans":
+ procNetstat.TcpExt.TCPFastRetrans = &value
+ case "TCPSlowStartRetrans":
+ procNetstat.TcpExt.TCPSlowStartRetrans = &value
+ case "TCPTimeouts":
+ procNetstat.TcpExt.TCPTimeouts = &value
+ case "TCPLossProbes":
+ procNetstat.TcpExt.TCPLossProbes = &value
+ case "TCPLossProbeRecovery":
+ procNetstat.TcpExt.TCPLossProbeRecovery = &value
+ case "TCPRenoRecoveryFail":
+ procNetstat.TcpExt.TCPRenoRecoveryFail = &value
+ case "TCPSackRecoveryFail":
+ procNetstat.TcpExt.TCPSackRecoveryFail = &value
+ case "TCPRcvCollapsed":
+ procNetstat.TcpExt.TCPRcvCollapsed = &value
+ case "TCPDSACKOldSent":
+ procNetstat.TcpExt.TCPDSACKOldSent = &value
+ case "TCPDSACKOfoSent":
+ procNetstat.TcpExt.TCPDSACKOfoSent = &value
+ case "TCPDSACKRecv":
+ procNetstat.TcpExt.TCPDSACKRecv = &value
+ case "TCPDSACKOfoRecv":
+ procNetstat.TcpExt.TCPDSACKOfoRecv = &value
+ case "TCPAbortOnData":
+ procNetstat.TcpExt.TCPAbortOnData = &value
+ case "TCPAbortOnClose":
+ procNetstat.TcpExt.TCPAbortOnClose = &value
+ case "TCPDeferAcceptDrop":
+ procNetstat.TcpExt.TCPDeferAcceptDrop = &value
+ case "IPReversePathFilter":
+ procNetstat.TcpExt.IPReversePathFilter = &value
+ case "TCPTimeWaitOverflow":
+ procNetstat.TcpExt.TCPTimeWaitOverflow = &value
+ case "TCPReqQFullDoCookies":
+ procNetstat.TcpExt.TCPReqQFullDoCookies = &value
+ case "TCPReqQFullDrop":
+ procNetstat.TcpExt.TCPReqQFullDrop = &value
+ case "TCPRetransFail":
+ procNetstat.TcpExt.TCPRetransFail = &value
+ case "TCPRcvCoalesce":
+ procNetstat.TcpExt.TCPRcvCoalesce = &value
+ case "TCPRcvQDrop":
+ procNetstat.TcpExt.TCPRcvQDrop = &value
+ case "TCPOFOQueue":
+ procNetstat.TcpExt.TCPOFOQueue = &value
+ case "TCPOFODrop":
+ procNetstat.TcpExt.TCPOFODrop = &value
+ case "TCPOFOMerge":
+ procNetstat.TcpExt.TCPOFOMerge = &value
+ case "TCPChallengeACK":
+ procNetstat.TcpExt.TCPChallengeACK = &value
+ case "TCPSYNChallenge":
+ procNetstat.TcpExt.TCPSYNChallenge = &value
+ case "TCPFastOpenActive":
+ procNetstat.TcpExt.TCPFastOpenActive = &value
+ case "TCPFastOpenActiveFail":
+ procNetstat.TcpExt.TCPFastOpenActiveFail = &value
+ case "TCPFastOpenPassive":
+ procNetstat.TcpExt.TCPFastOpenPassive = &value
+ case "TCPFastOpenPassiveFail":
+ procNetstat.TcpExt.TCPFastOpenPassiveFail = &value
+ case "TCPFastOpenListenOverflow":
+ procNetstat.TcpExt.TCPFastOpenListenOverflow = &value
+ case "TCPFastOpenCookieReqd":
+ procNetstat.TcpExt.TCPFastOpenCookieReqd = &value
+ case "TCPFastOpenBlackhole":
+ procNetstat.TcpExt.TCPFastOpenBlackhole = &value
+ case "TCPSpuriousRtxHostQueues":
+ procNetstat.TcpExt.TCPSpuriousRtxHostQueues = &value
+ case "BusyPollRxPackets":
+ procNetstat.TcpExt.BusyPollRxPackets = &value
+ case "TCPAutoCorking":
+ procNetstat.TcpExt.TCPAutoCorking = &value
+ case "TCPFromZeroWindowAdv":
+ procNetstat.TcpExt.TCPFromZeroWindowAdv = &value
+ case "TCPToZeroWindowAdv":
+ procNetstat.TcpExt.TCPToZeroWindowAdv = &value
+ case "TCPWantZeroWindowAdv":
+ procNetstat.TcpExt.TCPWantZeroWindowAdv = &value
+ case "TCPSynRetrans":
+ procNetstat.TcpExt.TCPSynRetrans = &value
+ case "TCPOrigDataSent":
+ procNetstat.TcpExt.TCPOrigDataSent = &value
+ case "TCPHystartTrainDetect":
+ procNetstat.TcpExt.TCPHystartTrainDetect = &value
+ case "TCPHystartTrainCwnd":
+ procNetstat.TcpExt.TCPHystartTrainCwnd = &value
+ case "TCPHystartDelayDetect":
+ procNetstat.TcpExt.TCPHystartDelayDetect = &value
+ case "TCPHystartDelayCwnd":
+ procNetstat.TcpExt.TCPHystartDelayCwnd = &value
+ case "TCPACKSkippedSynRecv":
+ procNetstat.TcpExt.TCPACKSkippedSynRecv = &value
+ case "TCPACKSkippedPAWS":
+ procNetstat.TcpExt.TCPACKSkippedPAWS = &value
+ case "TCPACKSkippedSeq":
+ procNetstat.TcpExt.TCPACKSkippedSeq = &value
+ case "TCPACKSkippedFinWait2":
+ procNetstat.TcpExt.TCPACKSkippedFinWait2 = &value
+ case "TCPACKSkippedTimeWait":
+ procNetstat.TcpExt.TCPACKSkippedTimeWait = &value
+ case "TCPACKSkippedChallenge":
+ procNetstat.TcpExt.TCPACKSkippedChallenge = &value
+ case "TCPWinProbe":
+ procNetstat.TcpExt.TCPWinProbe = &value
+ case "TCPKeepAlive":
+ procNetstat.TcpExt.TCPKeepAlive = &value
+ case "TCPMTUPFail":
+ procNetstat.TcpExt.TCPMTUPFail = &value
+ case "TCPMTUPSuccess":
+ procNetstat.TcpExt.TCPMTUPSuccess = &value
+ case "TCPWqueueTooBig":
+ procNetstat.TcpExt.TCPWqueueTooBig = &value
+ }
+ case "IpExt":
+ switch key {
+ case "InNoRoutes":
+ procNetstat.IpExt.InNoRoutes = &value
+ case "InTruncatedPkts":
+ procNetstat.IpExt.InTruncatedPkts = &value
+ case "InMcastPkts":
+ procNetstat.IpExt.InMcastPkts = &value
+ case "OutMcastPkts":
+ procNetstat.IpExt.OutMcastPkts = &value
+ case "InBcastPkts":
+ procNetstat.IpExt.InBcastPkts = &value
+ case "OutBcastPkts":
+ procNetstat.IpExt.OutBcastPkts = &value
+ case "InOctets":
+ procNetstat.IpExt.InOctets = &value
+ case "OutOctets":
+ procNetstat.IpExt.OutOctets = &value
+ case "InMcastOctets":
+ procNetstat.IpExt.InMcastOctets = &value
+ case "OutMcastOctets":
+ procNetstat.IpExt.OutMcastOctets = &value
+ case "InBcastOctets":
+ procNetstat.IpExt.InBcastOctets = &value
+ case "OutBcastOctets":
+ procNetstat.IpExt.OutBcastOctets = &value
+ case "InCsumErrors":
+ procNetstat.IpExt.InCsumErrors = &value
+ case "InNoECTPkts":
+ procNetstat.IpExt.InNoECTPkts = &value
+ case "InECT1Pkts":
+ procNetstat.IpExt.InECT1Pkts = &value
+ case "InECT0Pkts":
+ procNetstat.IpExt.InECT0Pkts = &value
+ case "InCEPkts":
+ procNetstat.IpExt.InCEPkts = &value
+ case "ReasmOverlaps":
+ procNetstat.IpExt.ReasmOverlaps = &value
+ }
+ }
+ }
+ }
+ return procNetstat, scanner.Err()
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_ns.go b/vendor/github.com/prometheus/procfs/proc_ns.go
new file mode 100644
index 0000000..0f8f847
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_ns.go
@@ -0,0 +1,68 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "fmt"
+ "os"
+ "strconv"
+ "strings"
+)
+
+// Namespace represents a single namespace of a process.
+type Namespace struct {
+ Type string // Namespace type.
+ Inode uint32 // Inode number of the namespace. If two processes are in the same namespace their inodes will match.
+}
+
+// Namespaces contains all of the namespaces that the process is contained in.
+type Namespaces map[string]Namespace
+
+// Namespaces reads from /proc//ns/* to get the namespaces of which the
+// process is a member.
+func (p Proc) Namespaces() (Namespaces, error) {
+ d, err := os.Open(p.path("ns"))
+ if err != nil {
+ return nil, err
+ }
+ defer d.Close()
+
+ names, err := d.Readdirnames(-1)
+ if err != nil {
+ return nil, fmt.Errorf("%w: failed to read contents of ns dir: %w", ErrFileRead, err)
+ }
+
+ ns := make(Namespaces, len(names))
+ for _, name := range names {
+ target, err := os.Readlink(p.path("ns", name))
+ if err != nil {
+ return nil, err
+ }
+
+ fields := strings.SplitN(target, ":", 2)
+ if len(fields) != 2 {
+ return nil, fmt.Errorf("%w: namespace type and inode from %q", ErrFileParse, target)
+ }
+
+ typ := fields[0]
+ inode, err := strconv.ParseUint(strings.Trim(fields[1], "[]"), 10, 32)
+ if err != nil {
+ return nil, fmt.Errorf("%w: inode from %q: %w", ErrFileParse, fields[1], err)
+ }
+
+ ns[name] = Namespace{typ, uint32(inode)}
+ }
+
+ return ns, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_psi.go b/vendor/github.com/prometheus/procfs/proc_psi.go
new file mode 100644
index 0000000..ccd35f1
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_psi.go
@@ -0,0 +1,102 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+// The PSI / pressure interface is described at
+// https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/Documentation/accounting/psi.txt
+// Each resource (cpu, io, memory, ...) is exposed as a single file.
+// Each file may contain up to two lines, one for "some" pressure and one for "full" pressure.
+// Each line contains several averages (over n seconds) and a total in µs.
+//
+// Example io pressure file:
+// > some avg10=0.06 avg60=0.21 avg300=0.99 total=8537362
+// > full avg10=0.00 avg60=0.13 avg300=0.96 total=8183134
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "strings"
+
+ "github.com/prometheus/procfs/internal/util"
+)
+
+const lineFormat = "avg10=%f avg60=%f avg300=%f total=%d"
+
+// PSILine is a single line of values as returned by `/proc/pressure/*`.
+//
+// The Avg entries are averages over n seconds, as a percentage.
+// The Total line is in microseconds.
+type PSILine struct {
+ Avg10 float64
+ Avg60 float64
+ Avg300 float64
+ Total uint64
+}
+
+// PSIStats represent pressure stall information from /proc/pressure/*
+//
+// "Some" indicates the share of time in which at least some tasks are stalled.
+// "Full" indicates the share of time in which all non-idle tasks are stalled simultaneously.
+type PSIStats struct {
+ Some *PSILine
+ Full *PSILine
+}
+
+// PSIStatsForResource reads pressure stall information for the specified
+// resource from /proc/pressure/. At time of writing this can be
+// either "cpu", "memory" or "io".
+func (fs FS) PSIStatsForResource(resource string) (PSIStats, error) {
+ data, err := util.ReadFileNoStat(fs.proc.Path(fmt.Sprintf("%s/%s", "pressure", resource)))
+ if err != nil {
+ return PSIStats{}, fmt.Errorf("%w: psi_stats: unavailable for %q: %w", ErrFileRead, resource, err)
+ }
+
+ return parsePSIStats(bytes.NewReader(data))
+}
+
+// parsePSIStats parses the specified file for pressure stall information.
+func parsePSIStats(r io.Reader) (PSIStats, error) {
+ psiStats := PSIStats{}
+
+ scanner := bufio.NewScanner(r)
+ for scanner.Scan() {
+ l := scanner.Text()
+ prefix := strings.Split(l, " ")[0]
+ switch prefix {
+ case "some":
+ psi := PSILine{}
+ _, err := fmt.Sscanf(l, fmt.Sprintf("some %s", lineFormat), &psi.Avg10, &psi.Avg60, &psi.Avg300, &psi.Total)
+ if err != nil {
+ return PSIStats{}, err
+ }
+ psiStats.Some = &psi
+ case "full":
+ psi := PSILine{}
+ _, err := fmt.Sscanf(l, fmt.Sprintf("full %s", lineFormat), &psi.Avg10, &psi.Avg60, &psi.Avg300, &psi.Total)
+ if err != nil {
+ return PSIStats{}, err
+ }
+ psiStats.Full = &psi
+ default:
+ // If we encounter a line with an unknown prefix, ignore it and move on
+ // Should new measurement types be added in the future we'll simply ignore them instead
+ // of erroring on retrieval
+ continue
+ }
+ }
+
+ return psiStats, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_smaps.go b/vendor/github.com/prometheus/procfs/proc_smaps.go
new file mode 100644
index 0000000..09060e8
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_smaps.go
@@ -0,0 +1,166 @@
+// Copyright 2020 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !windows
+// +build !windows
+
+package procfs
+
+import (
+ "bufio"
+ "errors"
+ "fmt"
+ "os"
+ "regexp"
+ "strconv"
+ "strings"
+
+ "github.com/prometheus/procfs/internal/util"
+)
+
+var (
+ // match the header line before each mapped zone in `/proc/pid/smaps`.
+ procSMapsHeaderLine = regexp.MustCompile(`^[a-f0-9].*$`)
+)
+
+type ProcSMapsRollup struct {
+ // Amount of the mapping that is currently resident in RAM.
+ Rss uint64
+ // Process's proportional share of this mapping.
+ Pss uint64
+ // Size in bytes of clean shared pages.
+ SharedClean uint64
+ // Size in bytes of dirty shared pages.
+ SharedDirty uint64
+ // Size in bytes of clean private pages.
+ PrivateClean uint64
+ // Size in bytes of dirty private pages.
+ PrivateDirty uint64
+ // Amount of memory currently marked as referenced or accessed.
+ Referenced uint64
+ // Amount of memory that does not belong to any file.
+ Anonymous uint64
+ // Amount would-be-anonymous memory currently on swap.
+ Swap uint64
+ // Process's proportional memory on swap.
+ SwapPss uint64
+}
+
+// ProcSMapsRollup reads from /proc/[pid]/smaps_rollup to get summed memory information of the
+// process.
+//
+// If smaps_rollup does not exists (require kernel >= 4.15), the content of /proc/pid/smaps will
+// we read and summed.
+func (p Proc) ProcSMapsRollup() (ProcSMapsRollup, error) {
+ data, err := util.ReadFileNoStat(p.path("smaps_rollup"))
+ if err != nil && os.IsNotExist(err) {
+ return p.procSMapsRollupManual()
+ }
+ if err != nil {
+ return ProcSMapsRollup{}, err
+ }
+
+ lines := strings.Split(string(data), "\n")
+ smaps := ProcSMapsRollup{}
+
+ // skip first line which don't contains information we need
+ lines = lines[1:]
+ for _, line := range lines {
+ if line == "" {
+ continue
+ }
+
+ if err := smaps.parseLine(line); err != nil {
+ return ProcSMapsRollup{}, err
+ }
+ }
+
+ return smaps, nil
+}
+
+// Read /proc/pid/smaps and do the roll-up in Go code.
+func (p Proc) procSMapsRollupManual() (ProcSMapsRollup, error) {
+ file, err := os.Open(p.path("smaps"))
+ if err != nil {
+ return ProcSMapsRollup{}, err
+ }
+ defer file.Close()
+
+ smaps := ProcSMapsRollup{}
+ scan := bufio.NewScanner(file)
+
+ for scan.Scan() {
+ line := scan.Text()
+
+ if procSMapsHeaderLine.MatchString(line) {
+ continue
+ }
+
+ if err := smaps.parseLine(line); err != nil {
+ return ProcSMapsRollup{}, err
+ }
+ }
+
+ return smaps, nil
+}
+
+func (s *ProcSMapsRollup) parseLine(line string) error {
+ kv := strings.SplitN(line, ":", 2)
+ if len(kv) != 2 {
+ fmt.Println(line)
+ return errors.New("invalid net/dev line, missing colon")
+ }
+
+ k := kv[0]
+ if k == "VmFlags" {
+ return nil
+ }
+
+ v := strings.TrimSpace(kv[1])
+ v = strings.TrimSuffix(v, " kB")
+
+ vKBytes, err := strconv.ParseUint(v, 10, 64)
+ if err != nil {
+ return err
+ }
+ vBytes := vKBytes * 1024
+
+ s.addValue(k, vBytes)
+
+ return nil
+}
+
+func (s *ProcSMapsRollup) addValue(k string, vUintBytes uint64) {
+ switch k {
+ case "Rss":
+ s.Rss += vUintBytes
+ case "Pss":
+ s.Pss += vUintBytes
+ case "Shared_Clean":
+ s.SharedClean += vUintBytes
+ case "Shared_Dirty":
+ s.SharedDirty += vUintBytes
+ case "Private_Clean":
+ s.PrivateClean += vUintBytes
+ case "Private_Dirty":
+ s.PrivateDirty += vUintBytes
+ case "Referenced":
+ s.Referenced += vUintBytes
+ case "Anonymous":
+ s.Anonymous += vUintBytes
+ case "Swap":
+ s.Swap += vUintBytes
+ case "SwapPss":
+ s.SwapPss += vUintBytes
+ }
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_snmp.go b/vendor/github.com/prometheus/procfs/proc_snmp.go
new file mode 100644
index 0000000..b9d2cf6
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_snmp.go
@@ -0,0 +1,353 @@
+// Copyright 2022 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+
+ "github.com/prometheus/procfs/internal/util"
+)
+
+// ProcSnmp models the content of /proc//net/snmp.
+type ProcSnmp struct {
+ // The process ID.
+ PID int
+ Ip
+ Icmp
+ IcmpMsg
+ Tcp
+ Udp
+ UdpLite
+}
+
+type Ip struct { // nolint:revive
+ Forwarding *float64
+ DefaultTTL *float64
+ InReceives *float64
+ InHdrErrors *float64
+ InAddrErrors *float64
+ ForwDatagrams *float64
+ InUnknownProtos *float64
+ InDiscards *float64
+ InDelivers *float64
+ OutRequests *float64
+ OutDiscards *float64
+ OutNoRoutes *float64
+ ReasmTimeout *float64
+ ReasmReqds *float64
+ ReasmOKs *float64
+ ReasmFails *float64
+ FragOKs *float64
+ FragFails *float64
+ FragCreates *float64
+}
+
+type Icmp struct { // nolint:revive
+ InMsgs *float64
+ InErrors *float64
+ InCsumErrors *float64
+ InDestUnreachs *float64
+ InTimeExcds *float64
+ InParmProbs *float64
+ InSrcQuenchs *float64
+ InRedirects *float64
+ InEchos *float64
+ InEchoReps *float64
+ InTimestamps *float64
+ InTimestampReps *float64
+ InAddrMasks *float64
+ InAddrMaskReps *float64
+ OutMsgs *float64
+ OutErrors *float64
+ OutDestUnreachs *float64
+ OutTimeExcds *float64
+ OutParmProbs *float64
+ OutSrcQuenchs *float64
+ OutRedirects *float64
+ OutEchos *float64
+ OutEchoReps *float64
+ OutTimestamps *float64
+ OutTimestampReps *float64
+ OutAddrMasks *float64
+ OutAddrMaskReps *float64
+}
+
+type IcmpMsg struct {
+ InType3 *float64
+ OutType3 *float64
+}
+
+type Tcp struct { // nolint:revive
+ RtoAlgorithm *float64
+ RtoMin *float64
+ RtoMax *float64
+ MaxConn *float64
+ ActiveOpens *float64
+ PassiveOpens *float64
+ AttemptFails *float64
+ EstabResets *float64
+ CurrEstab *float64
+ InSegs *float64
+ OutSegs *float64
+ RetransSegs *float64
+ InErrs *float64
+ OutRsts *float64
+ InCsumErrors *float64
+}
+
+type Udp struct { // nolint:revive
+ InDatagrams *float64
+ NoPorts *float64
+ InErrors *float64
+ OutDatagrams *float64
+ RcvbufErrors *float64
+ SndbufErrors *float64
+ InCsumErrors *float64
+ IgnoredMulti *float64
+}
+
+type UdpLite struct { // nolint:revive
+ InDatagrams *float64
+ NoPorts *float64
+ InErrors *float64
+ OutDatagrams *float64
+ RcvbufErrors *float64
+ SndbufErrors *float64
+ InCsumErrors *float64
+ IgnoredMulti *float64
+}
+
+func (p Proc) Snmp() (ProcSnmp, error) {
+ filename := p.path("net/snmp")
+ data, err := util.ReadFileNoStat(filename)
+ if err != nil {
+ return ProcSnmp{PID: p.PID}, err
+ }
+ procSnmp, err := parseSnmp(bytes.NewReader(data), filename)
+ procSnmp.PID = p.PID
+ return procSnmp, err
+}
+
+// parseSnmp parses the metrics from proc//net/snmp file
+// and returns a map contains those metrics (e.g. {"Ip": {"Forwarding": 2}}).
+func parseSnmp(r io.Reader, fileName string) (ProcSnmp, error) {
+ var (
+ scanner = bufio.NewScanner(r)
+ procSnmp = ProcSnmp{}
+ )
+
+ for scanner.Scan() {
+ nameParts := strings.Split(scanner.Text(), " ")
+ scanner.Scan()
+ valueParts := strings.Split(scanner.Text(), " ")
+ // Remove trailing :.
+ protocol := strings.TrimSuffix(nameParts[0], ":")
+ if len(nameParts) != len(valueParts) {
+ return procSnmp, fmt.Errorf("%w: mismatch field count mismatch in %s: %s",
+ ErrFileParse, fileName, protocol)
+ }
+ for i := 1; i < len(nameParts); i++ {
+ value, err := strconv.ParseFloat(valueParts[i], 64)
+ if err != nil {
+ return procSnmp, err
+ }
+ key := nameParts[i]
+
+ switch protocol {
+ case "Ip":
+ switch key {
+ case "Forwarding":
+ procSnmp.Ip.Forwarding = &value
+ case "DefaultTTL":
+ procSnmp.Ip.DefaultTTL = &value
+ case "InReceives":
+ procSnmp.Ip.InReceives = &value
+ case "InHdrErrors":
+ procSnmp.Ip.InHdrErrors = &value
+ case "InAddrErrors":
+ procSnmp.Ip.InAddrErrors = &value
+ case "ForwDatagrams":
+ procSnmp.Ip.ForwDatagrams = &value
+ case "InUnknownProtos":
+ procSnmp.Ip.InUnknownProtos = &value
+ case "InDiscards":
+ procSnmp.Ip.InDiscards = &value
+ case "InDelivers":
+ procSnmp.Ip.InDelivers = &value
+ case "OutRequests":
+ procSnmp.Ip.OutRequests = &value
+ case "OutDiscards":
+ procSnmp.Ip.OutDiscards = &value
+ case "OutNoRoutes":
+ procSnmp.Ip.OutNoRoutes = &value
+ case "ReasmTimeout":
+ procSnmp.Ip.ReasmTimeout = &value
+ case "ReasmReqds":
+ procSnmp.Ip.ReasmReqds = &value
+ case "ReasmOKs":
+ procSnmp.Ip.ReasmOKs = &value
+ case "ReasmFails":
+ procSnmp.Ip.ReasmFails = &value
+ case "FragOKs":
+ procSnmp.Ip.FragOKs = &value
+ case "FragFails":
+ procSnmp.Ip.FragFails = &value
+ case "FragCreates":
+ procSnmp.Ip.FragCreates = &value
+ }
+ case "Icmp":
+ switch key {
+ case "InMsgs":
+ procSnmp.Icmp.InMsgs = &value
+ case "InErrors":
+ procSnmp.Icmp.InErrors = &value
+ case "InCsumErrors":
+ procSnmp.Icmp.InCsumErrors = &value
+ case "InDestUnreachs":
+ procSnmp.Icmp.InDestUnreachs = &value
+ case "InTimeExcds":
+ procSnmp.Icmp.InTimeExcds = &value
+ case "InParmProbs":
+ procSnmp.Icmp.InParmProbs = &value
+ case "InSrcQuenchs":
+ procSnmp.Icmp.InSrcQuenchs = &value
+ case "InRedirects":
+ procSnmp.Icmp.InRedirects = &value
+ case "InEchos":
+ procSnmp.Icmp.InEchos = &value
+ case "InEchoReps":
+ procSnmp.Icmp.InEchoReps = &value
+ case "InTimestamps":
+ procSnmp.Icmp.InTimestamps = &value
+ case "InTimestampReps":
+ procSnmp.Icmp.InTimestampReps = &value
+ case "InAddrMasks":
+ procSnmp.Icmp.InAddrMasks = &value
+ case "InAddrMaskReps":
+ procSnmp.Icmp.InAddrMaskReps = &value
+ case "OutMsgs":
+ procSnmp.Icmp.OutMsgs = &value
+ case "OutErrors":
+ procSnmp.Icmp.OutErrors = &value
+ case "OutDestUnreachs":
+ procSnmp.Icmp.OutDestUnreachs = &value
+ case "OutTimeExcds":
+ procSnmp.Icmp.OutTimeExcds = &value
+ case "OutParmProbs":
+ procSnmp.Icmp.OutParmProbs = &value
+ case "OutSrcQuenchs":
+ procSnmp.Icmp.OutSrcQuenchs = &value
+ case "OutRedirects":
+ procSnmp.Icmp.OutRedirects = &value
+ case "OutEchos":
+ procSnmp.Icmp.OutEchos = &value
+ case "OutEchoReps":
+ procSnmp.Icmp.OutEchoReps = &value
+ case "OutTimestamps":
+ procSnmp.Icmp.OutTimestamps = &value
+ case "OutTimestampReps":
+ procSnmp.Icmp.OutTimestampReps = &value
+ case "OutAddrMasks":
+ procSnmp.Icmp.OutAddrMasks = &value
+ case "OutAddrMaskReps":
+ procSnmp.Icmp.OutAddrMaskReps = &value
+ }
+ case "IcmpMsg":
+ switch key {
+ case "InType3":
+ procSnmp.IcmpMsg.InType3 = &value
+ case "OutType3":
+ procSnmp.IcmpMsg.OutType3 = &value
+ }
+ case "Tcp":
+ switch key {
+ case "RtoAlgorithm":
+ procSnmp.Tcp.RtoAlgorithm = &value
+ case "RtoMin":
+ procSnmp.Tcp.RtoMin = &value
+ case "RtoMax":
+ procSnmp.Tcp.RtoMax = &value
+ case "MaxConn":
+ procSnmp.Tcp.MaxConn = &value
+ case "ActiveOpens":
+ procSnmp.Tcp.ActiveOpens = &value
+ case "PassiveOpens":
+ procSnmp.Tcp.PassiveOpens = &value
+ case "AttemptFails":
+ procSnmp.Tcp.AttemptFails = &value
+ case "EstabResets":
+ procSnmp.Tcp.EstabResets = &value
+ case "CurrEstab":
+ procSnmp.Tcp.CurrEstab = &value
+ case "InSegs":
+ procSnmp.Tcp.InSegs = &value
+ case "OutSegs":
+ procSnmp.Tcp.OutSegs = &value
+ case "RetransSegs":
+ procSnmp.Tcp.RetransSegs = &value
+ case "InErrs":
+ procSnmp.Tcp.InErrs = &value
+ case "OutRsts":
+ procSnmp.Tcp.OutRsts = &value
+ case "InCsumErrors":
+ procSnmp.Tcp.InCsumErrors = &value
+ }
+ case "Udp":
+ switch key {
+ case "InDatagrams":
+ procSnmp.Udp.InDatagrams = &value
+ case "NoPorts":
+ procSnmp.Udp.NoPorts = &value
+ case "InErrors":
+ procSnmp.Udp.InErrors = &value
+ case "OutDatagrams":
+ procSnmp.Udp.OutDatagrams = &value
+ case "RcvbufErrors":
+ procSnmp.Udp.RcvbufErrors = &value
+ case "SndbufErrors":
+ procSnmp.Udp.SndbufErrors = &value
+ case "InCsumErrors":
+ procSnmp.Udp.InCsumErrors = &value
+ case "IgnoredMulti":
+ procSnmp.Udp.IgnoredMulti = &value
+ }
+ case "UdpLite":
+ switch key {
+ case "InDatagrams":
+ procSnmp.UdpLite.InDatagrams = &value
+ case "NoPorts":
+ procSnmp.UdpLite.NoPorts = &value
+ case "InErrors":
+ procSnmp.UdpLite.InErrors = &value
+ case "OutDatagrams":
+ procSnmp.UdpLite.OutDatagrams = &value
+ case "RcvbufErrors":
+ procSnmp.UdpLite.RcvbufErrors = &value
+ case "SndbufErrors":
+ procSnmp.UdpLite.SndbufErrors = &value
+ case "InCsumErrors":
+ procSnmp.UdpLite.InCsumErrors = &value
+ case "IgnoredMulti":
+ procSnmp.UdpLite.IgnoredMulti = &value
+ }
+ }
+ }
+ }
+ return procSnmp, scanner.Err()
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_snmp6.go b/vendor/github.com/prometheus/procfs/proc_snmp6.go
new file mode 100644
index 0000000..3059cc6
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_snmp6.go
@@ -0,0 +1,381 @@
+// Copyright 2022 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "io"
+ "os"
+ "strconv"
+ "strings"
+
+ "github.com/prometheus/procfs/internal/util"
+)
+
+// ProcSnmp6 models the content of /proc//net/snmp6.
+type ProcSnmp6 struct {
+ // The process ID.
+ PID int
+ Ip6
+ Icmp6
+ Udp6
+ UdpLite6
+}
+
+type Ip6 struct { // nolint:revive
+ InReceives *float64
+ InHdrErrors *float64
+ InTooBigErrors *float64
+ InNoRoutes *float64
+ InAddrErrors *float64
+ InUnknownProtos *float64
+ InTruncatedPkts *float64
+ InDiscards *float64
+ InDelivers *float64
+ OutForwDatagrams *float64
+ OutRequests *float64
+ OutDiscards *float64
+ OutNoRoutes *float64
+ ReasmTimeout *float64
+ ReasmReqds *float64
+ ReasmOKs *float64
+ ReasmFails *float64
+ FragOKs *float64
+ FragFails *float64
+ FragCreates *float64
+ InMcastPkts *float64
+ OutMcastPkts *float64
+ InOctets *float64
+ OutOctets *float64
+ InMcastOctets *float64
+ OutMcastOctets *float64
+ InBcastOctets *float64
+ OutBcastOctets *float64
+ InNoECTPkts *float64
+ InECT1Pkts *float64
+ InECT0Pkts *float64
+ InCEPkts *float64
+}
+
+type Icmp6 struct {
+ InMsgs *float64
+ InErrors *float64
+ OutMsgs *float64
+ OutErrors *float64
+ InCsumErrors *float64
+ InDestUnreachs *float64
+ InPktTooBigs *float64
+ InTimeExcds *float64
+ InParmProblems *float64
+ InEchos *float64
+ InEchoReplies *float64
+ InGroupMembQueries *float64
+ InGroupMembResponses *float64
+ InGroupMembReductions *float64
+ InRouterSolicits *float64
+ InRouterAdvertisements *float64
+ InNeighborSolicits *float64
+ InNeighborAdvertisements *float64
+ InRedirects *float64
+ InMLDv2Reports *float64
+ OutDestUnreachs *float64
+ OutPktTooBigs *float64
+ OutTimeExcds *float64
+ OutParmProblems *float64
+ OutEchos *float64
+ OutEchoReplies *float64
+ OutGroupMembQueries *float64
+ OutGroupMembResponses *float64
+ OutGroupMembReductions *float64
+ OutRouterSolicits *float64
+ OutRouterAdvertisements *float64
+ OutNeighborSolicits *float64
+ OutNeighborAdvertisements *float64
+ OutRedirects *float64
+ OutMLDv2Reports *float64
+ InType1 *float64
+ InType134 *float64
+ InType135 *float64
+ InType136 *float64
+ InType143 *float64
+ OutType133 *float64
+ OutType135 *float64
+ OutType136 *float64
+ OutType143 *float64
+}
+
+type Udp6 struct { // nolint:revive
+ InDatagrams *float64
+ NoPorts *float64
+ InErrors *float64
+ OutDatagrams *float64
+ RcvbufErrors *float64
+ SndbufErrors *float64
+ InCsumErrors *float64
+ IgnoredMulti *float64
+}
+
+type UdpLite6 struct { // nolint:revive
+ InDatagrams *float64
+ NoPorts *float64
+ InErrors *float64
+ OutDatagrams *float64
+ RcvbufErrors *float64
+ SndbufErrors *float64
+ InCsumErrors *float64
+}
+
+func (p Proc) Snmp6() (ProcSnmp6, error) {
+ filename := p.path("net/snmp6")
+ data, err := util.ReadFileNoStat(filename)
+ if err != nil {
+ // On systems with IPv6 disabled, this file won't exist.
+ // Do nothing.
+ if errors.Is(err, os.ErrNotExist) {
+ return ProcSnmp6{PID: p.PID}, nil
+ }
+
+ return ProcSnmp6{PID: p.PID}, err
+ }
+
+ procSnmp6, err := parseSNMP6Stats(bytes.NewReader(data))
+ procSnmp6.PID = p.PID
+ return procSnmp6, err
+}
+
+// parseSnmp6 parses the metrics from proc//net/snmp6 file
+// and returns a map contains those metrics.
+func parseSNMP6Stats(r io.Reader) (ProcSnmp6, error) {
+ var (
+ scanner = bufio.NewScanner(r)
+ procSnmp6 = ProcSnmp6{}
+ )
+
+ for scanner.Scan() {
+ stat := strings.Fields(scanner.Text())
+ if len(stat) < 2 {
+ continue
+ }
+ // Expect to have "6" in metric name, skip line otherwise
+ if sixIndex := strings.Index(stat[0], "6"); sixIndex != -1 {
+ protocol := stat[0][:sixIndex+1]
+ key := stat[0][sixIndex+1:]
+ value, err := strconv.ParseFloat(stat[1], 64)
+ if err != nil {
+ return procSnmp6, err
+ }
+
+ switch protocol {
+ case "Ip6":
+ switch key {
+ case "InReceives":
+ procSnmp6.Ip6.InReceives = &value
+ case "InHdrErrors":
+ procSnmp6.Ip6.InHdrErrors = &value
+ case "InTooBigErrors":
+ procSnmp6.Ip6.InTooBigErrors = &value
+ case "InNoRoutes":
+ procSnmp6.Ip6.InNoRoutes = &value
+ case "InAddrErrors":
+ procSnmp6.Ip6.InAddrErrors = &value
+ case "InUnknownProtos":
+ procSnmp6.Ip6.InUnknownProtos = &value
+ case "InTruncatedPkts":
+ procSnmp6.Ip6.InTruncatedPkts = &value
+ case "InDiscards":
+ procSnmp6.Ip6.InDiscards = &value
+ case "InDelivers":
+ procSnmp6.Ip6.InDelivers = &value
+ case "OutForwDatagrams":
+ procSnmp6.Ip6.OutForwDatagrams = &value
+ case "OutRequests":
+ procSnmp6.Ip6.OutRequests = &value
+ case "OutDiscards":
+ procSnmp6.Ip6.OutDiscards = &value
+ case "OutNoRoutes":
+ procSnmp6.Ip6.OutNoRoutes = &value
+ case "ReasmTimeout":
+ procSnmp6.Ip6.ReasmTimeout = &value
+ case "ReasmReqds":
+ procSnmp6.Ip6.ReasmReqds = &value
+ case "ReasmOKs":
+ procSnmp6.Ip6.ReasmOKs = &value
+ case "ReasmFails":
+ procSnmp6.Ip6.ReasmFails = &value
+ case "FragOKs":
+ procSnmp6.Ip6.FragOKs = &value
+ case "FragFails":
+ procSnmp6.Ip6.FragFails = &value
+ case "FragCreates":
+ procSnmp6.Ip6.FragCreates = &value
+ case "InMcastPkts":
+ procSnmp6.Ip6.InMcastPkts = &value
+ case "OutMcastPkts":
+ procSnmp6.Ip6.OutMcastPkts = &value
+ case "InOctets":
+ procSnmp6.Ip6.InOctets = &value
+ case "OutOctets":
+ procSnmp6.Ip6.OutOctets = &value
+ case "InMcastOctets":
+ procSnmp6.Ip6.InMcastOctets = &value
+ case "OutMcastOctets":
+ procSnmp6.Ip6.OutMcastOctets = &value
+ case "InBcastOctets":
+ procSnmp6.Ip6.InBcastOctets = &value
+ case "OutBcastOctets":
+ procSnmp6.Ip6.OutBcastOctets = &value
+ case "InNoECTPkts":
+ procSnmp6.Ip6.InNoECTPkts = &value
+ case "InECT1Pkts":
+ procSnmp6.Ip6.InECT1Pkts = &value
+ case "InECT0Pkts":
+ procSnmp6.Ip6.InECT0Pkts = &value
+ case "InCEPkts":
+ procSnmp6.Ip6.InCEPkts = &value
+
+ }
+ case "Icmp6":
+ switch key {
+ case "InMsgs":
+ procSnmp6.Icmp6.InMsgs = &value
+ case "InErrors":
+ procSnmp6.Icmp6.InErrors = &value
+ case "OutMsgs":
+ procSnmp6.Icmp6.OutMsgs = &value
+ case "OutErrors":
+ procSnmp6.Icmp6.OutErrors = &value
+ case "InCsumErrors":
+ procSnmp6.Icmp6.InCsumErrors = &value
+ case "InDestUnreachs":
+ procSnmp6.Icmp6.InDestUnreachs = &value
+ case "InPktTooBigs":
+ procSnmp6.Icmp6.InPktTooBigs = &value
+ case "InTimeExcds":
+ procSnmp6.Icmp6.InTimeExcds = &value
+ case "InParmProblems":
+ procSnmp6.Icmp6.InParmProblems = &value
+ case "InEchos":
+ procSnmp6.Icmp6.InEchos = &value
+ case "InEchoReplies":
+ procSnmp6.Icmp6.InEchoReplies = &value
+ case "InGroupMembQueries":
+ procSnmp6.Icmp6.InGroupMembQueries = &value
+ case "InGroupMembResponses":
+ procSnmp6.Icmp6.InGroupMembResponses = &value
+ case "InGroupMembReductions":
+ procSnmp6.Icmp6.InGroupMembReductions = &value
+ case "InRouterSolicits":
+ procSnmp6.Icmp6.InRouterSolicits = &value
+ case "InRouterAdvertisements":
+ procSnmp6.Icmp6.InRouterAdvertisements = &value
+ case "InNeighborSolicits":
+ procSnmp6.Icmp6.InNeighborSolicits = &value
+ case "InNeighborAdvertisements":
+ procSnmp6.Icmp6.InNeighborAdvertisements = &value
+ case "InRedirects":
+ procSnmp6.Icmp6.InRedirects = &value
+ case "InMLDv2Reports":
+ procSnmp6.Icmp6.InMLDv2Reports = &value
+ case "OutDestUnreachs":
+ procSnmp6.Icmp6.OutDestUnreachs = &value
+ case "OutPktTooBigs":
+ procSnmp6.Icmp6.OutPktTooBigs = &value
+ case "OutTimeExcds":
+ procSnmp6.Icmp6.OutTimeExcds = &value
+ case "OutParmProblems":
+ procSnmp6.Icmp6.OutParmProblems = &value
+ case "OutEchos":
+ procSnmp6.Icmp6.OutEchos = &value
+ case "OutEchoReplies":
+ procSnmp6.Icmp6.OutEchoReplies = &value
+ case "OutGroupMembQueries":
+ procSnmp6.Icmp6.OutGroupMembQueries = &value
+ case "OutGroupMembResponses":
+ procSnmp6.Icmp6.OutGroupMembResponses = &value
+ case "OutGroupMembReductions":
+ procSnmp6.Icmp6.OutGroupMembReductions = &value
+ case "OutRouterSolicits":
+ procSnmp6.Icmp6.OutRouterSolicits = &value
+ case "OutRouterAdvertisements":
+ procSnmp6.Icmp6.OutRouterAdvertisements = &value
+ case "OutNeighborSolicits":
+ procSnmp6.Icmp6.OutNeighborSolicits = &value
+ case "OutNeighborAdvertisements":
+ procSnmp6.Icmp6.OutNeighborAdvertisements = &value
+ case "OutRedirects":
+ procSnmp6.Icmp6.OutRedirects = &value
+ case "OutMLDv2Reports":
+ procSnmp6.Icmp6.OutMLDv2Reports = &value
+ case "InType1":
+ procSnmp6.Icmp6.InType1 = &value
+ case "InType134":
+ procSnmp6.Icmp6.InType134 = &value
+ case "InType135":
+ procSnmp6.Icmp6.InType135 = &value
+ case "InType136":
+ procSnmp6.Icmp6.InType136 = &value
+ case "InType143":
+ procSnmp6.Icmp6.InType143 = &value
+ case "OutType133":
+ procSnmp6.Icmp6.OutType133 = &value
+ case "OutType135":
+ procSnmp6.Icmp6.OutType135 = &value
+ case "OutType136":
+ procSnmp6.Icmp6.OutType136 = &value
+ case "OutType143":
+ procSnmp6.Icmp6.OutType143 = &value
+ }
+ case "Udp6":
+ switch key {
+ case "InDatagrams":
+ procSnmp6.Udp6.InDatagrams = &value
+ case "NoPorts":
+ procSnmp6.Udp6.NoPorts = &value
+ case "InErrors":
+ procSnmp6.Udp6.InErrors = &value
+ case "OutDatagrams":
+ procSnmp6.Udp6.OutDatagrams = &value
+ case "RcvbufErrors":
+ procSnmp6.Udp6.RcvbufErrors = &value
+ case "SndbufErrors":
+ procSnmp6.Udp6.SndbufErrors = &value
+ case "InCsumErrors":
+ procSnmp6.Udp6.InCsumErrors = &value
+ case "IgnoredMulti":
+ procSnmp6.Udp6.IgnoredMulti = &value
+ }
+ case "UdpLite6":
+ switch key {
+ case "InDatagrams":
+ procSnmp6.UdpLite6.InDatagrams = &value
+ case "NoPorts":
+ procSnmp6.UdpLite6.NoPorts = &value
+ case "InErrors":
+ procSnmp6.UdpLite6.InErrors = &value
+ case "OutDatagrams":
+ procSnmp6.UdpLite6.OutDatagrams = &value
+ case "RcvbufErrors":
+ procSnmp6.UdpLite6.RcvbufErrors = &value
+ case "SndbufErrors":
+ procSnmp6.UdpLite6.SndbufErrors = &value
+ case "InCsumErrors":
+ procSnmp6.UdpLite6.InCsumErrors = &value
+ }
+ }
+ }
+ }
+ return procSnmp6, scanner.Err()
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_stat.go b/vendor/github.com/prometheus/procfs/proc_stat.go
new file mode 100644
index 0000000..06a8d93
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_stat.go
@@ -0,0 +1,229 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+
+ "github.com/prometheus/procfs/internal/util"
+)
+
+// Originally, this USER_HZ value was dynamically retrieved via a sysconf call
+// which required cgo. However, that caused a lot of problems regarding
+// cross-compilation. Alternatives such as running a binary to determine the
+// value, or trying to derive it in some other way were all problematic. After
+// much research it was determined that USER_HZ is actually hardcoded to 100 on
+// all Go-supported platforms as of the time of this writing. This is why we
+// decided to hardcode it here as well. It is not impossible that there could
+// be systems with exceptions, but they should be very exotic edge cases, and
+// in that case, the worst outcome will be two misreported metrics.
+//
+// See also the following discussions:
+//
+// - https://github.com/prometheus/node_exporter/issues/52
+// - https://github.com/prometheus/procfs/pull/2
+// - http://stackoverflow.com/questions/17410841/how-does-user-hz-solve-the-jiffy-scaling-issue
+const userHZ = 100
+
+// ProcStat provides status information about the process,
+// read from /proc/[pid]/stat.
+type ProcStat struct {
+ // The process ID.
+ PID int
+ // The filename of the executable.
+ Comm string
+ // The process state.
+ State string
+ // The PID of the parent of this process.
+ PPID int
+ // The process group ID of the process.
+ PGRP int
+ // The session ID of the process.
+ Session int
+ // The controlling terminal of the process.
+ TTY int
+ // The ID of the foreground process group of the controlling terminal of
+ // the process.
+ TPGID int
+ // The kernel flags word of the process.
+ Flags uint
+ // The number of minor faults the process has made which have not required
+ // loading a memory page from disk.
+ MinFlt uint
+ // The number of minor faults that the process's waited-for children have
+ // made.
+ CMinFlt uint
+ // The number of major faults the process has made which have required
+ // loading a memory page from disk.
+ MajFlt uint
+ // The number of major faults that the process's waited-for children have
+ // made.
+ CMajFlt uint
+ // Amount of time that this process has been scheduled in user mode,
+ // measured in clock ticks.
+ UTime uint
+ // Amount of time that this process has been scheduled in kernel mode,
+ // measured in clock ticks.
+ STime uint
+ // Amount of time that this process's waited-for children have been
+ // scheduled in user mode, measured in clock ticks.
+ CUTime int
+ // Amount of time that this process's waited-for children have been
+ // scheduled in kernel mode, measured in clock ticks.
+ CSTime int
+ // For processes running a real-time scheduling policy, this is the negated
+ // scheduling priority, minus one.
+ Priority int
+ // The nice value, a value in the range 19 (low priority) to -20 (high
+ // priority).
+ Nice int
+ // Number of threads in this process.
+ NumThreads int
+ // The time the process started after system boot, the value is expressed
+ // in clock ticks.
+ Starttime uint64
+ // Virtual memory size in bytes.
+ VSize uint
+ // Resident set size in pages.
+ RSS int
+ // Soft limit in bytes on the rss of the process.
+ RSSLimit uint64
+ // CPU number last executed on.
+ Processor uint
+ // Real-time scheduling priority, a number in the range 1 to 99 for processes
+ // scheduled under a real-time policy, or 0, for non-real-time processes.
+ RTPriority uint
+ // Scheduling policy.
+ Policy uint
+ // Aggregated block I/O delays, measured in clock ticks (centiseconds).
+ DelayAcctBlkIOTicks uint64
+ // Guest time of the process (time spent running a virtual CPU for a guest
+ // operating system), measured in clock ticks.
+ GuestTime int
+ // Guest time of the process's children, measured in clock ticks.
+ CGuestTime int
+
+ proc FS
+}
+
+// NewStat returns the current status information of the process.
+//
+// Deprecated: Use p.Stat() instead.
+func (p Proc) NewStat() (ProcStat, error) {
+ return p.Stat()
+}
+
+// Stat returns the current status information of the process.
+func (p Proc) Stat() (ProcStat, error) {
+ data, err := util.ReadFileNoStat(p.path("stat"))
+ if err != nil {
+ return ProcStat{}, err
+ }
+
+ var (
+ ignoreInt64 int64
+ ignoreUint64 uint64
+
+ s = ProcStat{PID: p.PID, proc: p.fs}
+ l = bytes.Index(data, []byte("("))
+ r = bytes.LastIndex(data, []byte(")"))
+ )
+
+ if l < 0 || r < 0 {
+ return ProcStat{}, fmt.Errorf("%w: unexpected format, couldn't extract comm %q", ErrFileParse, data)
+ }
+
+ s.Comm = string(data[l+1 : r])
+
+ // Check the following resources for the details about the particular stat
+ // fields and their data types:
+ // * https://man7.org/linux/man-pages/man5/proc.5.html
+ // * https://man7.org/linux/man-pages/man3/scanf.3.html
+ _, err = fmt.Fscan(
+ bytes.NewBuffer(data[r+2:]),
+ &s.State,
+ &s.PPID,
+ &s.PGRP,
+ &s.Session,
+ &s.TTY,
+ &s.TPGID,
+ &s.Flags,
+ &s.MinFlt,
+ &s.CMinFlt,
+ &s.MajFlt,
+ &s.CMajFlt,
+ &s.UTime,
+ &s.STime,
+ &s.CUTime,
+ &s.CSTime,
+ &s.Priority,
+ &s.Nice,
+ &s.NumThreads,
+ &ignoreInt64,
+ &s.Starttime,
+ &s.VSize,
+ &s.RSS,
+ &s.RSSLimit,
+ &ignoreUint64,
+ &ignoreUint64,
+ &ignoreUint64,
+ &ignoreUint64,
+ &ignoreUint64,
+ &ignoreUint64,
+ &ignoreUint64,
+ &ignoreUint64,
+ &ignoreUint64,
+ &ignoreUint64,
+ &ignoreUint64,
+ &ignoreUint64,
+ &ignoreInt64,
+ &s.Processor,
+ &s.RTPriority,
+ &s.Policy,
+ &s.DelayAcctBlkIOTicks,
+ &s.GuestTime,
+ &s.CGuestTime,
+ )
+ if err != nil {
+ return ProcStat{}, err
+ }
+
+ return s, nil
+}
+
+// VirtualMemory returns the virtual memory size in bytes.
+func (s ProcStat) VirtualMemory() uint {
+ return s.VSize
+}
+
+// ResidentMemory returns the resident memory size in bytes.
+func (s ProcStat) ResidentMemory() int {
+ return s.RSS * os.Getpagesize()
+}
+
+// StartTime returns the unix timestamp of the process in seconds.
+func (s ProcStat) StartTime() (float64, error) {
+ stat, err := s.proc.Stat()
+ if err != nil {
+ return 0, err
+ }
+ return float64(stat.BootTime) + (float64(s.Starttime) / userHZ), nil
+}
+
+// CPUTime returns the total CPU user and system time in seconds.
+func (s ProcStat) CPUTime() float64 {
+ return float64(s.UTime+s.STime) / userHZ
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_status.go b/vendor/github.com/prometheus/procfs/proc_status.go
new file mode 100644
index 0000000..a055197
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_status.go
@@ -0,0 +1,238 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bytes"
+ "math/bits"
+ "sort"
+ "strconv"
+ "strings"
+
+ "github.com/prometheus/procfs/internal/util"
+)
+
+// ProcStatus provides status information about the process,
+// read from /proc/[pid]/status.
+type ProcStatus struct {
+ // The process ID.
+ PID int
+ // The process name.
+ Name string
+
+ // Thread group ID.
+ TGID int
+ // List of Pid namespace.
+ NSpids []uint64
+
+ // Peak virtual memory size.
+ VmPeak uint64 // nolint:revive
+ // Virtual memory size.
+ VmSize uint64 // nolint:revive
+ // Locked memory size.
+ VmLck uint64 // nolint:revive
+ // Pinned memory size.
+ VmPin uint64 // nolint:revive
+ // Peak resident set size.
+ VmHWM uint64 // nolint:revive
+ // Resident set size (sum of RssAnnon RssFile and RssShmem).
+ VmRSS uint64 // nolint:revive
+ // Size of resident anonymous memory.
+ RssAnon uint64 // nolint:revive
+ // Size of resident file mappings.
+ RssFile uint64 // nolint:revive
+ // Size of resident shared memory.
+ RssShmem uint64 // nolint:revive
+ // Size of data segments.
+ VmData uint64 // nolint:revive
+ // Size of stack segments.
+ VmStk uint64 // nolint:revive
+ // Size of text segments.
+ VmExe uint64 // nolint:revive
+ // Shared library code size.
+ VmLib uint64 // nolint:revive
+ // Page table entries size.
+ VmPTE uint64 // nolint:revive
+ // Size of second-level page tables.
+ VmPMD uint64 // nolint:revive
+ // Swapped-out virtual memory size by anonymous private.
+ VmSwap uint64 // nolint:revive
+ // Size of hugetlb memory portions
+ HugetlbPages uint64
+
+ // Number of voluntary context switches.
+ VoluntaryCtxtSwitches uint64
+ // Number of involuntary context switches.
+ NonVoluntaryCtxtSwitches uint64
+
+ // UIDs of the process (Real, effective, saved set, and filesystem UIDs)
+ UIDs [4]uint64
+ // GIDs of the process (Real, effective, saved set, and filesystem GIDs)
+ GIDs [4]uint64
+
+ // CpusAllowedList: List of cpu cores processes are allowed to run on.
+ CpusAllowedList []uint64
+}
+
+// NewStatus returns the current status information of the process.
+func (p Proc) NewStatus() (ProcStatus, error) {
+ data, err := util.ReadFileNoStat(p.path("status"))
+ if err != nil {
+ return ProcStatus{}, err
+ }
+
+ s := ProcStatus{PID: p.PID}
+
+ lines := strings.Split(string(data), "\n")
+ for _, line := range lines {
+ if !bytes.Contains([]byte(line), []byte(":")) {
+ continue
+ }
+
+ kv := strings.SplitN(line, ":", 2)
+
+ // removes spaces
+ k := strings.TrimSpace(kv[0])
+ v := strings.TrimSpace(kv[1])
+ // removes "kB"
+ v = strings.TrimSuffix(v, " kB")
+
+ // value to int when possible
+ // we can skip error check here, 'cause vKBytes is not used when value is a string
+ vKBytes, _ := strconv.ParseUint(v, 10, 64)
+ // convert kB to B
+ vBytes := vKBytes * 1024
+
+ err = s.fillStatus(k, v, vKBytes, vBytes)
+ if err != nil {
+ return ProcStatus{}, err
+ }
+ }
+
+ return s, nil
+}
+
+func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintBytes uint64) error {
+ switch k {
+ case "Tgid":
+ s.TGID = int(vUint)
+ case "Name":
+ s.Name = vString
+ case "Uid":
+ var err error
+ for i, v := range strings.Split(vString, "\t") {
+ s.UIDs[i], err = strconv.ParseUint(v, 10, bits.UintSize)
+ if err != nil {
+ return err
+ }
+ }
+ case "Gid":
+ var err error
+ for i, v := range strings.Split(vString, "\t") {
+ s.GIDs[i], err = strconv.ParseUint(v, 10, bits.UintSize)
+ if err != nil {
+ return err
+ }
+ }
+ case "NSpid":
+ s.NSpids = calcNSPidsList(vString)
+ case "VmPeak":
+ s.VmPeak = vUintBytes
+ case "VmSize":
+ s.VmSize = vUintBytes
+ case "VmLck":
+ s.VmLck = vUintBytes
+ case "VmPin":
+ s.VmPin = vUintBytes
+ case "VmHWM":
+ s.VmHWM = vUintBytes
+ case "VmRSS":
+ s.VmRSS = vUintBytes
+ case "RssAnon":
+ s.RssAnon = vUintBytes
+ case "RssFile":
+ s.RssFile = vUintBytes
+ case "RssShmem":
+ s.RssShmem = vUintBytes
+ case "VmData":
+ s.VmData = vUintBytes
+ case "VmStk":
+ s.VmStk = vUintBytes
+ case "VmExe":
+ s.VmExe = vUintBytes
+ case "VmLib":
+ s.VmLib = vUintBytes
+ case "VmPTE":
+ s.VmPTE = vUintBytes
+ case "VmPMD":
+ s.VmPMD = vUintBytes
+ case "VmSwap":
+ s.VmSwap = vUintBytes
+ case "HugetlbPages":
+ s.HugetlbPages = vUintBytes
+ case "voluntary_ctxt_switches":
+ s.VoluntaryCtxtSwitches = vUint
+ case "nonvoluntary_ctxt_switches":
+ s.NonVoluntaryCtxtSwitches = vUint
+ case "Cpus_allowed_list":
+ s.CpusAllowedList = calcCpusAllowedList(vString)
+ }
+
+ return nil
+}
+
+// TotalCtxtSwitches returns the total context switch.
+func (s ProcStatus) TotalCtxtSwitches() uint64 {
+ return s.VoluntaryCtxtSwitches + s.NonVoluntaryCtxtSwitches
+}
+
+func calcCpusAllowedList(cpuString string) []uint64 {
+ s := strings.Split(cpuString, ",")
+
+ var g []uint64
+
+ for _, cpu := range s {
+ // parse cpu ranges, example: 1-3=[1,2,3]
+ if l := strings.Split(strings.TrimSpace(cpu), "-"); len(l) > 1 {
+ startCPU, _ := strconv.ParseUint(l[0], 10, 64)
+ endCPU, _ := strconv.ParseUint(l[1], 10, 64)
+
+ for i := startCPU; i <= endCPU; i++ {
+ g = append(g, i)
+ }
+ } else if len(l) == 1 {
+ cpu, _ := strconv.ParseUint(l[0], 10, 64)
+ g = append(g, cpu)
+ }
+
+ }
+
+ sort.Slice(g, func(i, j int) bool { return g[i] < g[j] })
+ return g
+}
+
+func calcNSPidsList(nspidsString string) []uint64 {
+ s := strings.Split(nspidsString, " ")
+ var nspids []uint64
+
+ for _, nspid := range s {
+ nspid, _ := strconv.ParseUint(nspid, 10, 64)
+ if nspid == 0 {
+ continue
+ }
+ nspids = append(nspids, nspid)
+ }
+
+ return nspids
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_sys.go b/vendor/github.com/prometheus/procfs/proc_sys.go
new file mode 100644
index 0000000..5eefbe2
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_sys.go
@@ -0,0 +1,51 @@
+// Copyright 2022 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/prometheus/procfs/internal/util"
+)
+
+func sysctlToPath(sysctl string) string {
+ return strings.Replace(sysctl, ".", "/", -1)
+}
+
+func (fs FS) SysctlStrings(sysctl string) ([]string, error) {
+ value, err := util.SysReadFile(fs.proc.Path("sys", sysctlToPath(sysctl)))
+ if err != nil {
+ return nil, err
+ }
+ return strings.Fields(value), nil
+
+}
+
+func (fs FS) SysctlInts(sysctl string) ([]int, error) {
+ fields, err := fs.SysctlStrings(sysctl)
+ if err != nil {
+ return nil, err
+ }
+
+ values := make([]int, len(fields))
+ for i, f := range fields {
+ vp := util.NewValueParser(f)
+ values[i] = vp.Int()
+ if err := vp.Err(); err != nil {
+ return nil, fmt.Errorf("%w: field %d in sysctl %s is not a valid int: %w", ErrFileParse, i, sysctl, err)
+ }
+ }
+ return values, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/schedstat.go b/vendor/github.com/prometheus/procfs/schedstat.go
new file mode 100644
index 0000000..5f7f32d
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/schedstat.go
@@ -0,0 +1,121 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "errors"
+ "os"
+ "regexp"
+ "strconv"
+)
+
+var (
+ cpuLineRE = regexp.MustCompile(`cpu(\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+)`)
+ procLineRE = regexp.MustCompile(`(\d+) (\d+) (\d+)`)
+)
+
+// Schedstat contains scheduler statistics from /proc/schedstat
+//
+// See
+// https://www.kernel.org/doc/Documentation/scheduler/sched-stats.txt
+// for a detailed description of what these numbers mean.
+//
+// Note the current kernel documentation claims some of the time units are in
+// jiffies when they are actually in nanoseconds since 2.6.23 with the
+// introduction of CFS. A fix to the documentation is pending. See
+// https://lore.kernel.org/patchwork/project/lkml/list/?series=403473
+type Schedstat struct {
+ CPUs []*SchedstatCPU
+}
+
+// SchedstatCPU contains the values from one "cpu" line.
+type SchedstatCPU struct {
+ CPUNum string
+
+ RunningNanoseconds uint64
+ WaitingNanoseconds uint64
+ RunTimeslices uint64
+}
+
+// ProcSchedstat contains the values from `/proc//schedstat`.
+type ProcSchedstat struct {
+ RunningNanoseconds uint64
+ WaitingNanoseconds uint64
+ RunTimeslices uint64
+}
+
+// Schedstat reads data from `/proc/schedstat`.
+func (fs FS) Schedstat() (*Schedstat, error) {
+ file, err := os.Open(fs.proc.Path("schedstat"))
+ if err != nil {
+ return nil, err
+ }
+ defer file.Close()
+
+ stats := &Schedstat{}
+ scanner := bufio.NewScanner(file)
+
+ for scanner.Scan() {
+ match := cpuLineRE.FindStringSubmatch(scanner.Text())
+ if match != nil {
+ cpu := &SchedstatCPU{}
+ cpu.CPUNum = match[1]
+
+ cpu.RunningNanoseconds, err = strconv.ParseUint(match[8], 10, 64)
+ if err != nil {
+ continue
+ }
+
+ cpu.WaitingNanoseconds, err = strconv.ParseUint(match[9], 10, 64)
+ if err != nil {
+ continue
+ }
+
+ cpu.RunTimeslices, err = strconv.ParseUint(match[10], 10, 64)
+ if err != nil {
+ continue
+ }
+
+ stats.CPUs = append(stats.CPUs, cpu)
+ }
+ }
+
+ return stats, nil
+}
+
+func parseProcSchedstat(contents string) (ProcSchedstat, error) {
+ var (
+ stats ProcSchedstat
+ err error
+ )
+ match := procLineRE.FindStringSubmatch(contents)
+
+ if match != nil {
+ stats.RunningNanoseconds, err = strconv.ParseUint(match[1], 10, 64)
+ if err != nil {
+ return stats, err
+ }
+
+ stats.WaitingNanoseconds, err = strconv.ParseUint(match[2], 10, 64)
+ if err != nil {
+ return stats, err
+ }
+
+ stats.RunTimeslices, err = strconv.ParseUint(match[3], 10, 64)
+ return stats, err
+ }
+
+ return stats, errors.New("could not parse schedstat")
+}
diff --git a/vendor/github.com/prometheus/procfs/slab.go b/vendor/github.com/prometheus/procfs/slab.go
new file mode 100644
index 0000000..8611c90
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/slab.go
@@ -0,0 +1,151 @@
+// Copyright 2020 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "regexp"
+ "strconv"
+ "strings"
+
+ "github.com/prometheus/procfs/internal/util"
+)
+
+var (
+ slabSpace = regexp.MustCompile(`\s+`)
+ slabVer = regexp.MustCompile(`slabinfo -`)
+ slabHeader = regexp.MustCompile(`# name`)
+)
+
+// Slab represents a slab pool in the kernel.
+type Slab struct {
+ Name string
+ ObjActive int64
+ ObjNum int64
+ ObjSize int64
+ ObjPerSlab int64
+ PagesPerSlab int64
+ // tunables
+ Limit int64
+ Batch int64
+ SharedFactor int64
+ SlabActive int64
+ SlabNum int64
+ SharedAvail int64
+}
+
+// SlabInfo represents info for all slabs.
+type SlabInfo struct {
+ Slabs []*Slab
+}
+
+func shouldParseSlab(line string) bool {
+ if slabVer.MatchString(line) {
+ return false
+ }
+ if slabHeader.MatchString(line) {
+ return false
+ }
+ return true
+}
+
+// parseV21SlabEntry is used to parse a line from /proc/slabinfo version 2.1.
+func parseV21SlabEntry(line string) (*Slab, error) {
+ // First cleanup whitespace.
+ l := slabSpace.ReplaceAllString(line, " ")
+ s := strings.Split(l, " ")
+ if len(s) != 16 {
+ return nil, fmt.Errorf("%w: unable to parse: %q", ErrFileParse, line)
+ }
+ var err error
+ i := &Slab{Name: s[0]}
+ i.ObjActive, err = strconv.ParseInt(s[1], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ i.ObjNum, err = strconv.ParseInt(s[2], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ i.ObjSize, err = strconv.ParseInt(s[3], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ i.ObjPerSlab, err = strconv.ParseInt(s[4], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ i.PagesPerSlab, err = strconv.ParseInt(s[5], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ i.Limit, err = strconv.ParseInt(s[8], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ i.Batch, err = strconv.ParseInt(s[9], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ i.SharedFactor, err = strconv.ParseInt(s[10], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ i.SlabActive, err = strconv.ParseInt(s[13], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ i.SlabNum, err = strconv.ParseInt(s[14], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ i.SharedAvail, err = strconv.ParseInt(s[15], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ return i, nil
+}
+
+// parseSlabInfo21 is used to parse a slabinfo 2.1 file.
+func parseSlabInfo21(r *bytes.Reader) (SlabInfo, error) {
+ scanner := bufio.NewScanner(r)
+ s := SlabInfo{Slabs: []*Slab{}}
+ for scanner.Scan() {
+ line := scanner.Text()
+ if !shouldParseSlab(line) {
+ continue
+ }
+ slab, err := parseV21SlabEntry(line)
+ if err != nil {
+ return s, err
+ }
+ s.Slabs = append(s.Slabs, slab)
+ }
+ return s, nil
+}
+
+// SlabInfo reads data from `/proc/slabinfo`.
+func (fs FS) SlabInfo() (SlabInfo, error) {
+ // TODO: Consider passing options to allow for parsing different
+ // slabinfo versions. However, slabinfo 2.1 has been stable since
+ // kernel 2.6.10 and later.
+ data, err := util.ReadFileNoStat(fs.proc.Path("slabinfo"))
+ if err != nil {
+ return SlabInfo{}, err
+ }
+
+ return parseSlabInfo21(bytes.NewReader(data))
+}
diff --git a/vendor/github.com/prometheus/procfs/softirqs.go b/vendor/github.com/prometheus/procfs/softirqs.go
new file mode 100644
index 0000000..28708e0
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/softirqs.go
@@ -0,0 +1,160 @@
+// Copyright 2022 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+
+ "github.com/prometheus/procfs/internal/util"
+)
+
+// Softirqs represents the softirq statistics.
+type Softirqs struct {
+ Hi []uint64
+ Timer []uint64
+ NetTx []uint64
+ NetRx []uint64
+ Block []uint64
+ IRQPoll []uint64
+ Tasklet []uint64
+ Sched []uint64
+ HRTimer []uint64
+ RCU []uint64
+}
+
+func (fs FS) Softirqs() (Softirqs, error) {
+ fileName := fs.proc.Path("softirqs")
+ data, err := util.ReadFileNoStat(fileName)
+ if err != nil {
+ return Softirqs{}, err
+ }
+
+ reader := bytes.NewReader(data)
+
+ return parseSoftirqs(reader)
+}
+
+func parseSoftirqs(r io.Reader) (Softirqs, error) {
+ var (
+ softirqs = Softirqs{}
+ scanner = bufio.NewScanner(r)
+ )
+
+ if !scanner.Scan() {
+ return Softirqs{}, fmt.Errorf("%w: softirqs empty", ErrFileRead)
+ }
+
+ for scanner.Scan() {
+ parts := strings.Fields(scanner.Text())
+ var err error
+
+ // require at least one cpu
+ if len(parts) < 2 {
+ continue
+ }
+ switch {
+ case parts[0] == "HI:":
+ perCPU := parts[1:]
+ softirqs.Hi = make([]uint64, len(perCPU))
+ for i, count := range perCPU {
+ if softirqs.Hi[i], err = strconv.ParseUint(count, 10, 64); err != nil {
+ return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (HI%d): %w", ErrFileParse, count, i, err)
+ }
+ }
+ case parts[0] == "TIMER:":
+ perCPU := parts[1:]
+ softirqs.Timer = make([]uint64, len(perCPU))
+ for i, count := range perCPU {
+ if softirqs.Timer[i], err = strconv.ParseUint(count, 10, 64); err != nil {
+ return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (TIMER%d): %w", ErrFileParse, count, i, err)
+ }
+ }
+ case parts[0] == "NET_TX:":
+ perCPU := parts[1:]
+ softirqs.NetTx = make([]uint64, len(perCPU))
+ for i, count := range perCPU {
+ if softirqs.NetTx[i], err = strconv.ParseUint(count, 10, 64); err != nil {
+ return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (NET_TX%d): %w", ErrFileParse, count, i, err)
+ }
+ }
+ case parts[0] == "NET_RX:":
+ perCPU := parts[1:]
+ softirqs.NetRx = make([]uint64, len(perCPU))
+ for i, count := range perCPU {
+ if softirqs.NetRx[i], err = strconv.ParseUint(count, 10, 64); err != nil {
+ return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (NET_RX%d): %w", ErrFileParse, count, i, err)
+ }
+ }
+ case parts[0] == "BLOCK:":
+ perCPU := parts[1:]
+ softirqs.Block = make([]uint64, len(perCPU))
+ for i, count := range perCPU {
+ if softirqs.Block[i], err = strconv.ParseUint(count, 10, 64); err != nil {
+ return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (BLOCK%d): %w", ErrFileParse, count, i, err)
+ }
+ }
+ case parts[0] == "IRQ_POLL:":
+ perCPU := parts[1:]
+ softirqs.IRQPoll = make([]uint64, len(perCPU))
+ for i, count := range perCPU {
+ if softirqs.IRQPoll[i], err = strconv.ParseUint(count, 10, 64); err != nil {
+ return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (IRQ_POLL%d): %w", ErrFileParse, count, i, err)
+ }
+ }
+ case parts[0] == "TASKLET:":
+ perCPU := parts[1:]
+ softirqs.Tasklet = make([]uint64, len(perCPU))
+ for i, count := range perCPU {
+ if softirqs.Tasklet[i], err = strconv.ParseUint(count, 10, 64); err != nil {
+ return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (TASKLET%d): %w", ErrFileParse, count, i, err)
+ }
+ }
+ case parts[0] == "SCHED:":
+ perCPU := parts[1:]
+ softirqs.Sched = make([]uint64, len(perCPU))
+ for i, count := range perCPU {
+ if softirqs.Sched[i], err = strconv.ParseUint(count, 10, 64); err != nil {
+ return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (SCHED%d): %w", ErrFileParse, count, i, err)
+ }
+ }
+ case parts[0] == "HRTIMER:":
+ perCPU := parts[1:]
+ softirqs.HRTimer = make([]uint64, len(perCPU))
+ for i, count := range perCPU {
+ if softirqs.HRTimer[i], err = strconv.ParseUint(count, 10, 64); err != nil {
+ return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (HRTIMER%d): %w", ErrFileParse, count, i, err)
+ }
+ }
+ case parts[0] == "RCU:":
+ perCPU := parts[1:]
+ softirqs.RCU = make([]uint64, len(perCPU))
+ for i, count := range perCPU {
+ if softirqs.RCU[i], err = strconv.ParseUint(count, 10, 64); err != nil {
+ return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (RCU%d): %w", ErrFileParse, count, i, err)
+ }
+ }
+ }
+ }
+
+ if err := scanner.Err(); err != nil {
+ return Softirqs{}, fmt.Errorf("%w: couldn't parse softirqs: %w", ErrFileParse, err)
+ }
+
+ return softirqs, scanner.Err()
+}
diff --git a/vendor/github.com/prometheus/procfs/stat.go b/vendor/github.com/prometheus/procfs/stat.go
new file mode 100644
index 0000000..e36b41c
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/stat.go
@@ -0,0 +1,258 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+
+ "github.com/prometheus/procfs/internal/fs"
+ "github.com/prometheus/procfs/internal/util"
+)
+
+// CPUStat shows how much time the cpu spend in various stages.
+type CPUStat struct {
+ User float64
+ Nice float64
+ System float64
+ Idle float64
+ Iowait float64
+ IRQ float64
+ SoftIRQ float64
+ Steal float64
+ Guest float64
+ GuestNice float64
+}
+
+// SoftIRQStat represent the softirq statistics as exported in the procfs stat file.
+// A nice introduction can be found at https://0xax.gitbooks.io/linux-insides/content/interrupts/interrupts-9.html
+// It is possible to get per-cpu stats by reading `/proc/softirqs`.
+type SoftIRQStat struct {
+ Hi uint64
+ Timer uint64
+ NetTx uint64
+ NetRx uint64
+ Block uint64
+ BlockIoPoll uint64
+ Tasklet uint64
+ Sched uint64
+ Hrtimer uint64
+ Rcu uint64
+}
+
+// Stat represents kernel/system statistics.
+type Stat struct {
+ // Boot time in seconds since the Epoch.
+ BootTime uint64
+ // Summed up cpu statistics.
+ CPUTotal CPUStat
+ // Per-CPU statistics.
+ CPU map[int64]CPUStat
+ // Number of times interrupts were handled, which contains numbered and unnumbered IRQs.
+ IRQTotal uint64
+ // Number of times a numbered IRQ was triggered.
+ IRQ []uint64
+ // Number of times a context switch happened.
+ ContextSwitches uint64
+ // Number of times a process was created.
+ ProcessCreated uint64
+ // Number of processes currently running.
+ ProcessesRunning uint64
+ // Number of processes currently blocked (waiting for IO).
+ ProcessesBlocked uint64
+ // Number of times a softirq was scheduled.
+ SoftIRQTotal uint64
+ // Detailed softirq statistics.
+ SoftIRQ SoftIRQStat
+}
+
+// Parse a cpu statistics line and returns the CPUStat struct plus the cpu id (or -1 for the overall sum).
+func parseCPUStat(line string) (CPUStat, int64, error) {
+ cpuStat := CPUStat{}
+ var cpu string
+
+ count, err := fmt.Sscanf(line, "%s %f %f %f %f %f %f %f %f %f %f",
+ &cpu,
+ &cpuStat.User, &cpuStat.Nice, &cpuStat.System, &cpuStat.Idle,
+ &cpuStat.Iowait, &cpuStat.IRQ, &cpuStat.SoftIRQ, &cpuStat.Steal,
+ &cpuStat.Guest, &cpuStat.GuestNice)
+
+ if err != nil && err != io.EOF {
+ return CPUStat{}, -1, fmt.Errorf("%w: couldn't parse %q (cpu): %w", ErrFileParse, line, err)
+ }
+ if count == 0 {
+ return CPUStat{}, -1, fmt.Errorf("%w: couldn't parse %q (cpu): 0 elements parsed", ErrFileParse, line)
+ }
+
+ cpuStat.User /= userHZ
+ cpuStat.Nice /= userHZ
+ cpuStat.System /= userHZ
+ cpuStat.Idle /= userHZ
+ cpuStat.Iowait /= userHZ
+ cpuStat.IRQ /= userHZ
+ cpuStat.SoftIRQ /= userHZ
+ cpuStat.Steal /= userHZ
+ cpuStat.Guest /= userHZ
+ cpuStat.GuestNice /= userHZ
+
+ if cpu == "cpu" {
+ return cpuStat, -1, nil
+ }
+
+ cpuID, err := strconv.ParseInt(cpu[3:], 10, 64)
+ if err != nil {
+ return CPUStat{}, -1, fmt.Errorf("%w: couldn't parse %q (cpu/cpuid): %w", ErrFileParse, line, err)
+ }
+
+ return cpuStat, cpuID, nil
+}
+
+// Parse a softirq line.
+func parseSoftIRQStat(line string) (SoftIRQStat, uint64, error) {
+ softIRQStat := SoftIRQStat{}
+ var total uint64
+ var prefix string
+
+ _, err := fmt.Sscanf(line, "%s %d %d %d %d %d %d %d %d %d %d %d",
+ &prefix, &total,
+ &softIRQStat.Hi, &softIRQStat.Timer, &softIRQStat.NetTx, &softIRQStat.NetRx,
+ &softIRQStat.Block, &softIRQStat.BlockIoPoll,
+ &softIRQStat.Tasklet, &softIRQStat.Sched,
+ &softIRQStat.Hrtimer, &softIRQStat.Rcu)
+
+ if err != nil {
+ return SoftIRQStat{}, 0, fmt.Errorf("%w: couldn't parse %q (softirq): %w", ErrFileParse, line, err)
+ }
+
+ return softIRQStat, total, nil
+}
+
+// NewStat returns information about current cpu/process statistics.
+// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt
+//
+// Deprecated: Use fs.Stat() instead.
+func NewStat() (Stat, error) {
+ fs, err := NewFS(fs.DefaultProcMountPoint)
+ if err != nil {
+ return Stat{}, err
+ }
+ return fs.Stat()
+}
+
+// NewStat returns information about current cpu/process statistics.
+// See: https://www.kernel.org/doc/Documentation/filesystems/proc.txt
+//
+// Deprecated: Use fs.Stat() instead.
+func (fs FS) NewStat() (Stat, error) {
+ return fs.Stat()
+}
+
+// Stat returns information about current cpu/process statistics.
+// See: https://www.kernel.org/doc/Documentation/filesystems/proc.txt
+func (fs FS) Stat() (Stat, error) {
+ fileName := fs.proc.Path("stat")
+ data, err := util.ReadFileNoStat(fileName)
+ if err != nil {
+ return Stat{}, err
+ }
+ procStat, err := parseStat(bytes.NewReader(data), fileName)
+ if err != nil {
+ return Stat{}, err
+ }
+ return procStat, nil
+}
+
+// parseStat parses the metrics from /proc/[pid]/stat.
+func parseStat(r io.Reader, fileName string) (Stat, error) {
+ var (
+ scanner = bufio.NewScanner(r)
+ stat = Stat{
+ CPU: make(map[int64]CPUStat),
+ }
+ err error
+ )
+
+ // Increase default scanner buffer to handle very long `intr` lines.
+ buf := make([]byte, 0, 8*1024)
+ scanner.Buffer(buf, 1024*1024)
+
+ for scanner.Scan() {
+ line := scanner.Text()
+ parts := strings.Fields(scanner.Text())
+ // require at least
+ if len(parts) < 2 {
+ continue
+ }
+ switch {
+ case parts[0] == "btime":
+ if stat.BootTime, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
+ return Stat{}, fmt.Errorf("%w: couldn't parse %q (btime): %w", ErrFileParse, parts[1], err)
+ }
+ case parts[0] == "intr":
+ if stat.IRQTotal, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
+ return Stat{}, fmt.Errorf("%w: couldn't parse %q (intr): %w", ErrFileParse, parts[1], err)
+ }
+ numberedIRQs := parts[2:]
+ stat.IRQ = make([]uint64, len(numberedIRQs))
+ for i, count := range numberedIRQs {
+ if stat.IRQ[i], err = strconv.ParseUint(count, 10, 64); err != nil {
+ return Stat{}, fmt.Errorf("%w: couldn't parse %q (intr%d): %w", ErrFileParse, count, i, err)
+ }
+ }
+ case parts[0] == "ctxt":
+ if stat.ContextSwitches, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
+ return Stat{}, fmt.Errorf("%w: couldn't parse %q (ctxt): %w", ErrFileParse, parts[1], err)
+ }
+ case parts[0] == "processes":
+ if stat.ProcessCreated, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
+ return Stat{}, fmt.Errorf("%w: couldn't parse %q (processes): %w", ErrFileParse, parts[1], err)
+ }
+ case parts[0] == "procs_running":
+ if stat.ProcessesRunning, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
+ return Stat{}, fmt.Errorf("%w: couldn't parse %q (procs_running): %w", ErrFileParse, parts[1], err)
+ }
+ case parts[0] == "procs_blocked":
+ if stat.ProcessesBlocked, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
+ return Stat{}, fmt.Errorf("%w: couldn't parse %q (procs_blocked): %w", ErrFileParse, parts[1], err)
+ }
+ case parts[0] == "softirq":
+ softIRQStats, total, err := parseSoftIRQStat(line)
+ if err != nil {
+ return Stat{}, err
+ }
+ stat.SoftIRQTotal = total
+ stat.SoftIRQ = softIRQStats
+ case strings.HasPrefix(parts[0], "cpu"):
+ cpuStat, cpuID, err := parseCPUStat(line)
+ if err != nil {
+ return Stat{}, err
+ }
+ if cpuID == -1 {
+ stat.CPUTotal = cpuStat
+ } else {
+ stat.CPU[cpuID] = cpuStat
+ }
+ }
+ }
+
+ if err := scanner.Err(); err != nil {
+ return Stat{}, fmt.Errorf("%w: couldn't parse %q: %w", ErrFileParse, fileName, err)
+ }
+
+ return stat, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/swaps.go b/vendor/github.com/prometheus/procfs/swaps.go
new file mode 100644
index 0000000..65fec83
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/swaps.go
@@ -0,0 +1,89 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "strconv"
+ "strings"
+
+ "github.com/prometheus/procfs/internal/util"
+)
+
+// Swap represents an entry in /proc/swaps.
+type Swap struct {
+ Filename string
+ Type string
+ Size int
+ Used int
+ Priority int
+}
+
+// Swaps returns a slice of all configured swap devices on the system.
+func (fs FS) Swaps() ([]*Swap, error) {
+ data, err := util.ReadFileNoStat(fs.proc.Path("swaps"))
+ if err != nil {
+ return nil, err
+ }
+ return parseSwaps(data)
+}
+
+func parseSwaps(info []byte) ([]*Swap, error) {
+ swaps := []*Swap{}
+ scanner := bufio.NewScanner(bytes.NewReader(info))
+ scanner.Scan() // ignore header line
+ for scanner.Scan() {
+ swapString := scanner.Text()
+ parsedSwap, err := parseSwapString(swapString)
+ if err != nil {
+ return nil, err
+ }
+ swaps = append(swaps, parsedSwap)
+ }
+
+ err := scanner.Err()
+ return swaps, err
+}
+
+func parseSwapString(swapString string) (*Swap, error) {
+ var err error
+
+ swapFields := strings.Fields(swapString)
+ swapLength := len(swapFields)
+ if swapLength < 5 {
+ return nil, fmt.Errorf("%w: too few fields in swap string: %s", ErrFileParse, swapString)
+ }
+
+ swap := &Swap{
+ Filename: swapFields[0],
+ Type: swapFields[1],
+ }
+
+ swap.Size, err = strconv.Atoi(swapFields[2])
+ if err != nil {
+ return nil, fmt.Errorf("%w: invalid swap size: %s: %w", ErrFileParse, swapFields[2], err)
+ }
+ swap.Used, err = strconv.Atoi(swapFields[3])
+ if err != nil {
+ return nil, fmt.Errorf("%w: invalid swap used: %s: %w", ErrFileParse, swapFields[3], err)
+ }
+ swap.Priority, err = strconv.Atoi(swapFields[4])
+ if err != nil {
+ return nil, fmt.Errorf("%w: invalid swap priority: %s: %w", ErrFileParse, swapFields[4], err)
+ }
+
+ return swap, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/thread.go b/vendor/github.com/prometheus/procfs/thread.go
new file mode 100644
index 0000000..80e0e94
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/thread.go
@@ -0,0 +1,80 @@
+// Copyright 2022 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "fmt"
+ "os"
+ "strconv"
+
+ fsi "github.com/prometheus/procfs/internal/fs"
+)
+
+// Provide access to /proc/PID/task/TID files, for thread specific values. Since
+// such files have the same structure as /proc/PID/ ones, the data structures
+// and the parsers for the latter may be reused.
+
+// AllThreads returns a list of all currently available threads under /proc/PID.
+func AllThreads(pid int) (Procs, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return Procs{}, err
+ }
+ return fs.AllThreads(pid)
+}
+
+// AllThreads returns a list of all currently available threads for PID.
+func (fs FS) AllThreads(pid int) (Procs, error) {
+ taskPath := fs.proc.Path(strconv.Itoa(pid), "task")
+ d, err := os.Open(taskPath)
+ if err != nil {
+ return Procs{}, err
+ }
+ defer d.Close()
+
+ names, err := d.Readdirnames(-1)
+ if err != nil {
+ return Procs{}, fmt.Errorf("%w: could not read %q: %w", ErrFileRead, d.Name(), err)
+ }
+
+ t := Procs{}
+ for _, n := range names {
+ tid, err := strconv.ParseInt(n, 10, 64)
+ if err != nil {
+ continue
+ }
+
+ t = append(t, Proc{PID: int(tid), fs: FS{fsi.FS(taskPath), fs.isReal}})
+ }
+
+ return t, nil
+}
+
+// Thread returns a process for a given PID, TID.
+func (fs FS) Thread(pid, tid int) (Proc, error) {
+ taskPath := fs.proc.Path(strconv.Itoa(pid), "task")
+ if _, err := os.Stat(taskPath); err != nil {
+ return Proc{}, err
+ }
+ return Proc{PID: tid, fs: FS{fsi.FS(taskPath), fs.isReal}}, nil
+}
+
+// Thread returns a process for a given TID of Proc.
+func (proc Proc) Thread(tid int) (Proc, error) {
+ tfs := FS{fsi.FS(proc.path("task")), proc.fs.isReal}
+ if _, err := os.Stat(tfs.proc.Path(strconv.Itoa(tid))); err != nil {
+ return Proc{}, err
+ }
+ return Proc{PID: tid, fs: tfs}, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/ttar b/vendor/github.com/prometheus/procfs/ttar
new file mode 100644
index 0000000..19ef02b
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/ttar
@@ -0,0 +1,413 @@
+#!/usr/bin/env bash
+
+# Purpose: plain text tar format
+# Limitations: - only suitable for text files, directories, and symlinks
+# - stores only filename, content, and mode
+# - not designed for untrusted input
+#
+# Note: must work with bash version 3.2 (macOS)
+
+# Copyright 2017 Roger Luethi
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -o errexit -o nounset
+
+# Sanitize environment (for instance, standard sorting of glob matches)
+export LC_ALL=C
+
+path=""
+CMD=""
+ARG_STRING="$*"
+
+#------------------------------------------------------------------------------
+# Not all sed implementations can work on null bytes. In order to make ttar
+# work out of the box on macOS, use Python as a stream editor.
+
+USE_PYTHON=0
+
+PYTHON_CREATE_FILTER=$(cat << 'PCF'
+#!/usr/bin/env python
+
+import re
+import sys
+
+for line in sys.stdin:
+ line = re.sub(r'EOF', r'\EOF', line)
+ line = re.sub(r'NULLBYTE', r'\NULLBYTE', line)
+ line = re.sub('\x00', r'NULLBYTE', line)
+ sys.stdout.write(line)
+PCF
+)
+
+PYTHON_EXTRACT_FILTER=$(cat << 'PEF'
+#!/usr/bin/env python
+
+import re
+import sys
+
+for line in sys.stdin:
+ line = re.sub(r'(?/dev/null; then
+ echo "ERROR Python not found. Aborting."
+ exit 2
+ fi
+ USE_PYTHON=1
+ fi
+}
+
+#------------------------------------------------------------------------------
+
+function usage {
+ bname=$(basename "$0")
+ cat << USAGE
+Usage: $bname [-C ] -c -f (create archive)
+ $bname -t -f (list archive contents)
+ $bname [-C ] -x -f (extract archive)
+
+Options:
+ -C (change directory)
+ -v (verbose)
+ --recursive-unlink (recursively delete existing directory if path
+ collides with file or directory to extract)
+
+Example: Change to sysfs directory, create ttar file from fixtures directory
+ $bname -C sysfs -c -f sysfs/fixtures.ttar fixtures/
+USAGE
+exit "$1"
+}
+
+function vecho {
+ if [ "${VERBOSE:-}" == "yes" ]; then
+ echo >&7 "$@"
+ fi
+}
+
+function set_cmd {
+ if [ -n "$CMD" ]; then
+ echo "ERROR: more than one command given"
+ echo
+ usage 2
+ fi
+ CMD=$1
+}
+
+unset VERBOSE
+unset RECURSIVE_UNLINK
+
+while getopts :cf:-:htxvC: opt; do
+ case $opt in
+ c)
+ set_cmd "create"
+ ;;
+ f)
+ ARCHIVE=$OPTARG
+ ;;
+ h)
+ usage 0
+ ;;
+ t)
+ set_cmd "list"
+ ;;
+ x)
+ set_cmd "extract"
+ ;;
+ v)
+ VERBOSE=yes
+ exec 7>&1
+ ;;
+ C)
+ CDIR=$OPTARG
+ ;;
+ -)
+ case $OPTARG in
+ recursive-unlink)
+ RECURSIVE_UNLINK="yes"
+ ;;
+ *)
+ echo -e "Error: invalid option -$OPTARG"
+ echo
+ usage 1
+ ;;
+ esac
+ ;;
+ *)
+ echo >&2 "ERROR: invalid option -$OPTARG"
+ echo
+ usage 1
+ ;;
+ esac
+done
+
+# Remove processed options from arguments
+shift $(( OPTIND - 1 ));
+
+if [ "${CMD:-}" == "" ]; then
+ echo >&2 "ERROR: no command given"
+ echo
+ usage 1
+elif [ "${ARCHIVE:-}" == "" ]; then
+ echo >&2 "ERROR: no archive name given"
+ echo
+ usage 1
+fi
+
+function list {
+ local path=""
+ local size=0
+ local line_no=0
+ local ttar_file=$1
+ if [ -n "${2:-}" ]; then
+ echo >&2 "ERROR: too many arguments."
+ echo
+ usage 1
+ fi
+ if [ ! -e "$ttar_file" ]; then
+ echo >&2 "ERROR: file not found ($ttar_file)"
+ echo
+ usage 1
+ fi
+ while read -r line; do
+ line_no=$(( line_no + 1 ))
+ if [ $size -gt 0 ]; then
+ size=$(( size - 1 ))
+ continue
+ fi
+ if [[ $line =~ ^Path:\ (.*)$ ]]; then
+ path=${BASH_REMATCH[1]}
+ elif [[ $line =~ ^Lines:\ (.*)$ ]]; then
+ size=${BASH_REMATCH[1]}
+ echo "$path"
+ elif [[ $line =~ ^Directory:\ (.*)$ ]]; then
+ path=${BASH_REMATCH[1]}
+ echo "$path/"
+ elif [[ $line =~ ^SymlinkTo:\ (.*)$ ]]; then
+ echo "$path -> ${BASH_REMATCH[1]}"
+ fi
+ done < "$ttar_file"
+}
+
+function extract {
+ local path=""
+ local size=0
+ local line_no=0
+ local ttar_file=$1
+ if [ -n "${2:-}" ]; then
+ echo >&2 "ERROR: too many arguments."
+ echo
+ usage 1
+ fi
+ if [ ! -e "$ttar_file" ]; then
+ echo >&2 "ERROR: file not found ($ttar_file)"
+ echo
+ usage 1
+ fi
+ while IFS= read -r line; do
+ line_no=$(( line_no + 1 ))
+ local eof_without_newline
+ if [ "$size" -gt 0 ]; then
+ if [[ "$line" =~ [^\\]EOF ]]; then
+ # An EOF not preceded by a backslash indicates that the line
+ # does not end with a newline
+ eof_without_newline=1
+ else
+ eof_without_newline=0
+ fi
+ # Replace NULLBYTE with null byte if at beginning of line
+ # Replace NULLBYTE with null byte unless preceded by backslash
+ # Remove one backslash in front of NULLBYTE (if any)
+ # Remove EOF unless preceded by backslash
+ # Remove one backslash in front of EOF
+ if [ $USE_PYTHON -eq 1 ]; then
+ echo -n "$line" | python -c "$PYTHON_EXTRACT_FILTER" >> "$path"
+ else
+ # The repeated pattern makes up for sed's lack of negative
+ # lookbehind assertions (for consecutive null bytes).
+ echo -n "$line" | \
+ sed -e 's/^NULLBYTE/\x0/g;
+ s/\([^\\]\)NULLBYTE/\1\x0/g;
+ s/\([^\\]\)NULLBYTE/\1\x0/g;
+ s/\\NULLBYTE/NULLBYTE/g;
+ s/\([^\\]\)EOF/\1/g;
+ s/\\EOF/EOF/g;
+ ' >> "$path"
+ fi
+ if [[ "$eof_without_newline" -eq 0 ]]; then
+ echo >> "$path"
+ fi
+ size=$(( size - 1 ))
+ continue
+ fi
+ if [[ $line =~ ^Path:\ (.*)$ ]]; then
+ path=${BASH_REMATCH[1]}
+ if [ -L "$path" ]; then
+ rm "$path"
+ elif [ -d "$path" ]; then
+ if [ "${RECURSIVE_UNLINK:-}" == "yes" ]; then
+ rm -r "$path"
+ else
+ # Safe because symlinks to directories are dealt with above
+ rmdir "$path"
+ fi
+ elif [ -e "$path" ]; then
+ rm "$path"
+ fi
+ elif [[ $line =~ ^Lines:\ (.*)$ ]]; then
+ size=${BASH_REMATCH[1]}
+ # Create file even if it is zero-length.
+ touch "$path"
+ vecho " $path"
+ elif [[ $line =~ ^Mode:\ (.*)$ ]]; then
+ mode=${BASH_REMATCH[1]}
+ chmod "$mode" "$path"
+ vecho "$mode"
+ elif [[ $line =~ ^Directory:\ (.*)$ ]]; then
+ path=${BASH_REMATCH[1]}
+ mkdir -p "$path"
+ vecho " $path/"
+ elif [[ $line =~ ^SymlinkTo:\ (.*)$ ]]; then
+ ln -s "${BASH_REMATCH[1]}" "$path"
+ vecho " $path -> ${BASH_REMATCH[1]}"
+ elif [[ $line =~ ^# ]]; then
+ # Ignore comments between files
+ continue
+ else
+ echo >&2 "ERROR: Unknown keyword on line $line_no: $line"
+ exit 1
+ fi
+ done < "$ttar_file"
+}
+
+function div {
+ echo "# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -" \
+ "- - - - - -"
+}
+
+function get_mode {
+ local mfile=$1
+ if [ -z "${STAT_OPTION:-}" ]; then
+ if stat -c '%a' "$mfile" >/dev/null 2>&1; then
+ # GNU stat
+ STAT_OPTION='-c'
+ STAT_FORMAT='%a'
+ else
+ # BSD stat
+ STAT_OPTION='-f'
+ # Octal output, user/group/other (omit file type, sticky bit)
+ STAT_FORMAT='%OLp'
+ fi
+ fi
+ stat "${STAT_OPTION}" "${STAT_FORMAT}" "$mfile"
+}
+
+function _create {
+ shopt -s nullglob
+ local mode
+ local eof_without_newline
+ while (( "$#" )); do
+ file=$1
+ if [ -L "$file" ]; then
+ echo "Path: $file"
+ symlinkTo=$(readlink "$file")
+ echo "SymlinkTo: $symlinkTo"
+ vecho " $file -> $symlinkTo"
+ div
+ elif [ -d "$file" ]; then
+ # Strip trailing slash (if there is one)
+ file=${file%/}
+ echo "Directory: $file"
+ mode=$(get_mode "$file")
+ echo "Mode: $mode"
+ vecho "$mode $file/"
+ div
+ # Find all files and dirs, including hidden/dot files
+ for x in "$file/"{*,.[^.]*}; do
+ _create "$x"
+ done
+ elif [ -f "$file" ]; then
+ echo "Path: $file"
+ lines=$(wc -l "$file"|awk '{print $1}')
+ eof_without_newline=0
+ if [[ "$(wc -c "$file"|awk '{print $1}')" -gt 0 ]] && \
+ [[ "$(tail -c 1 "$file" | wc -l)" -eq 0 ]]; then
+ eof_without_newline=1
+ lines=$((lines+1))
+ fi
+ echo "Lines: $lines"
+ # Add backslash in front of EOF
+ # Add backslash in front of NULLBYTE
+ # Replace null byte with NULLBYTE
+ if [ $USE_PYTHON -eq 1 ]; then
+ < "$file" python -c "$PYTHON_CREATE_FILTER"
+ else
+ < "$file" \
+ sed 's/EOF/\\EOF/g;
+ s/NULLBYTE/\\NULLBYTE/g;
+ s/\x0/NULLBYTE/g;
+ '
+ fi
+ if [[ "$eof_without_newline" -eq 1 ]]; then
+ # Finish line with EOF to indicate that the original line did
+ # not end with a linefeed
+ echo "EOF"
+ fi
+ mode=$(get_mode "$file")
+ echo "Mode: $mode"
+ vecho "$mode $file"
+ div
+ else
+ echo >&2 "ERROR: file not found ($file in $(pwd))"
+ exit 2
+ fi
+ shift
+ done
+}
+
+function create {
+ ttar_file=$1
+ shift
+ if [ -z "${1:-}" ]; then
+ echo >&2 "ERROR: missing arguments."
+ echo
+ usage 1
+ fi
+ if [ -e "$ttar_file" ]; then
+ rm "$ttar_file"
+ fi
+ exec > "$ttar_file"
+ echo "# Archive created by ttar $ARG_STRING"
+ _create "$@"
+}
+
+test_environment
+
+if [ -n "${CDIR:-}" ]; then
+ if [[ "$ARCHIVE" != /* ]]; then
+ # Relative path: preserve the archive's location before changing
+ # directory
+ ARCHIVE="$(pwd)/$ARCHIVE"
+ fi
+ cd "$CDIR"
+fi
+
+"$CMD" "$ARCHIVE" "$@"
diff --git a/vendor/github.com/prometheus/procfs/vm.go b/vendor/github.com/prometheus/procfs/vm.go
new file mode 100644
index 0000000..51c49d8
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/vm.go
@@ -0,0 +1,212 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !windows
+// +build !windows
+
+package procfs
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/prometheus/procfs/internal/util"
+)
+
+// The VM interface is described at
+//
+// https://www.kernel.org/doc/Documentation/sysctl/vm.txt
+//
+// Each setting is exposed as a single file.
+// Each file contains one line with a single numerical value, except lowmem_reserve_ratio which holds an array
+// and numa_zonelist_order (deprecated) which is a string.
+type VM struct {
+ AdminReserveKbytes *int64 // /proc/sys/vm/admin_reserve_kbytes
+ BlockDump *int64 // /proc/sys/vm/block_dump
+ CompactUnevictableAllowed *int64 // /proc/sys/vm/compact_unevictable_allowed
+ DirtyBackgroundBytes *int64 // /proc/sys/vm/dirty_background_bytes
+ DirtyBackgroundRatio *int64 // /proc/sys/vm/dirty_background_ratio
+ DirtyBytes *int64 // /proc/sys/vm/dirty_bytes
+ DirtyExpireCentisecs *int64 // /proc/sys/vm/dirty_expire_centisecs
+ DirtyRatio *int64 // /proc/sys/vm/dirty_ratio
+ DirtytimeExpireSeconds *int64 // /proc/sys/vm/dirtytime_expire_seconds
+ DirtyWritebackCentisecs *int64 // /proc/sys/vm/dirty_writeback_centisecs
+ DropCaches *int64 // /proc/sys/vm/drop_caches
+ ExtfragThreshold *int64 // /proc/sys/vm/extfrag_threshold
+ HugetlbShmGroup *int64 // /proc/sys/vm/hugetlb_shm_group
+ LaptopMode *int64 // /proc/sys/vm/laptop_mode
+ LegacyVaLayout *int64 // /proc/sys/vm/legacy_va_layout
+ LowmemReserveRatio []*int64 // /proc/sys/vm/lowmem_reserve_ratio
+ MaxMapCount *int64 // /proc/sys/vm/max_map_count
+ MemoryFailureEarlyKill *int64 // /proc/sys/vm/memory_failure_early_kill
+ MemoryFailureRecovery *int64 // /proc/sys/vm/memory_failure_recovery
+ MinFreeKbytes *int64 // /proc/sys/vm/min_free_kbytes
+ MinSlabRatio *int64 // /proc/sys/vm/min_slab_ratio
+ MinUnmappedRatio *int64 // /proc/sys/vm/min_unmapped_ratio
+ MmapMinAddr *int64 // /proc/sys/vm/mmap_min_addr
+ NrHugepages *int64 // /proc/sys/vm/nr_hugepages
+ NrHugepagesMempolicy *int64 // /proc/sys/vm/nr_hugepages_mempolicy
+ NrOvercommitHugepages *int64 // /proc/sys/vm/nr_overcommit_hugepages
+ NumaStat *int64 // /proc/sys/vm/numa_stat
+ NumaZonelistOrder string // /proc/sys/vm/numa_zonelist_order
+ OomDumpTasks *int64 // /proc/sys/vm/oom_dump_tasks
+ OomKillAllocatingTask *int64 // /proc/sys/vm/oom_kill_allocating_task
+ OvercommitKbytes *int64 // /proc/sys/vm/overcommit_kbytes
+ OvercommitMemory *int64 // /proc/sys/vm/overcommit_memory
+ OvercommitRatio *int64 // /proc/sys/vm/overcommit_ratio
+ PageCluster *int64 // /proc/sys/vm/page-cluster
+ PanicOnOom *int64 // /proc/sys/vm/panic_on_oom
+ PercpuPagelistFraction *int64 // /proc/sys/vm/percpu_pagelist_fraction
+ StatInterval *int64 // /proc/sys/vm/stat_interval
+ Swappiness *int64 // /proc/sys/vm/swappiness
+ UserReserveKbytes *int64 // /proc/sys/vm/user_reserve_kbytes
+ VfsCachePressure *int64 // /proc/sys/vm/vfs_cache_pressure
+ WatermarkBoostFactor *int64 // /proc/sys/vm/watermark_boost_factor
+ WatermarkScaleFactor *int64 // /proc/sys/vm/watermark_scale_factor
+ ZoneReclaimMode *int64 // /proc/sys/vm/zone_reclaim_mode
+}
+
+// VM reads the VM statistics from the specified `proc` filesystem.
+func (fs FS) VM() (*VM, error) {
+ path := fs.proc.Path("sys/vm")
+ file, err := os.Stat(path)
+ if err != nil {
+ return nil, err
+ }
+ if !file.Mode().IsDir() {
+ return nil, fmt.Errorf("%w: %s is not a directory", ErrFileRead, path)
+ }
+
+ files, err := os.ReadDir(path)
+ if err != nil {
+ return nil, err
+ }
+
+ var vm VM
+ for _, f := range files {
+ if f.IsDir() {
+ continue
+ }
+
+ name := filepath.Join(path, f.Name())
+ // ignore errors on read, as there are some write only
+ // in /proc/sys/vm
+ value, err := util.SysReadFile(name)
+ if err != nil {
+ continue
+ }
+ vp := util.NewValueParser(value)
+
+ switch f.Name() {
+ case "admin_reserve_kbytes":
+ vm.AdminReserveKbytes = vp.PInt64()
+ case "block_dump":
+ vm.BlockDump = vp.PInt64()
+ case "compact_unevictable_allowed":
+ vm.CompactUnevictableAllowed = vp.PInt64()
+ case "dirty_background_bytes":
+ vm.DirtyBackgroundBytes = vp.PInt64()
+ case "dirty_background_ratio":
+ vm.DirtyBackgroundRatio = vp.PInt64()
+ case "dirty_bytes":
+ vm.DirtyBytes = vp.PInt64()
+ case "dirty_expire_centisecs":
+ vm.DirtyExpireCentisecs = vp.PInt64()
+ case "dirty_ratio":
+ vm.DirtyRatio = vp.PInt64()
+ case "dirtytime_expire_seconds":
+ vm.DirtytimeExpireSeconds = vp.PInt64()
+ case "dirty_writeback_centisecs":
+ vm.DirtyWritebackCentisecs = vp.PInt64()
+ case "drop_caches":
+ vm.DropCaches = vp.PInt64()
+ case "extfrag_threshold":
+ vm.ExtfragThreshold = vp.PInt64()
+ case "hugetlb_shm_group":
+ vm.HugetlbShmGroup = vp.PInt64()
+ case "laptop_mode":
+ vm.LaptopMode = vp.PInt64()
+ case "legacy_va_layout":
+ vm.LegacyVaLayout = vp.PInt64()
+ case "lowmem_reserve_ratio":
+ stringSlice := strings.Fields(value)
+ pint64Slice := make([]*int64, 0, len(stringSlice))
+ for _, value := range stringSlice {
+ vp := util.NewValueParser(value)
+ pint64Slice = append(pint64Slice, vp.PInt64())
+ }
+ vm.LowmemReserveRatio = pint64Slice
+ case "max_map_count":
+ vm.MaxMapCount = vp.PInt64()
+ case "memory_failure_early_kill":
+ vm.MemoryFailureEarlyKill = vp.PInt64()
+ case "memory_failure_recovery":
+ vm.MemoryFailureRecovery = vp.PInt64()
+ case "min_free_kbytes":
+ vm.MinFreeKbytes = vp.PInt64()
+ case "min_slab_ratio":
+ vm.MinSlabRatio = vp.PInt64()
+ case "min_unmapped_ratio":
+ vm.MinUnmappedRatio = vp.PInt64()
+ case "mmap_min_addr":
+ vm.MmapMinAddr = vp.PInt64()
+ case "nr_hugepages":
+ vm.NrHugepages = vp.PInt64()
+ case "nr_hugepages_mempolicy":
+ vm.NrHugepagesMempolicy = vp.PInt64()
+ case "nr_overcommit_hugepages":
+ vm.NrOvercommitHugepages = vp.PInt64()
+ case "numa_stat":
+ vm.NumaStat = vp.PInt64()
+ case "numa_zonelist_order":
+ vm.NumaZonelistOrder = value
+ case "oom_dump_tasks":
+ vm.OomDumpTasks = vp.PInt64()
+ case "oom_kill_allocating_task":
+ vm.OomKillAllocatingTask = vp.PInt64()
+ case "overcommit_kbytes":
+ vm.OvercommitKbytes = vp.PInt64()
+ case "overcommit_memory":
+ vm.OvercommitMemory = vp.PInt64()
+ case "overcommit_ratio":
+ vm.OvercommitRatio = vp.PInt64()
+ case "page-cluster":
+ vm.PageCluster = vp.PInt64()
+ case "panic_on_oom":
+ vm.PanicOnOom = vp.PInt64()
+ case "percpu_pagelist_fraction":
+ vm.PercpuPagelistFraction = vp.PInt64()
+ case "stat_interval":
+ vm.StatInterval = vp.PInt64()
+ case "swappiness":
+ vm.Swappiness = vp.PInt64()
+ case "user_reserve_kbytes":
+ vm.UserReserveKbytes = vp.PInt64()
+ case "vfs_cache_pressure":
+ vm.VfsCachePressure = vp.PInt64()
+ case "watermark_boost_factor":
+ vm.WatermarkBoostFactor = vp.PInt64()
+ case "watermark_scale_factor":
+ vm.WatermarkScaleFactor = vp.PInt64()
+ case "zone_reclaim_mode":
+ vm.ZoneReclaimMode = vp.PInt64()
+ }
+ if err := vp.Err(); err != nil {
+ return nil, err
+ }
+ }
+
+ return &vm, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/zoneinfo.go b/vendor/github.com/prometheus/procfs/zoneinfo.go
new file mode 100644
index 0000000..e54d94b
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/zoneinfo.go
@@ -0,0 +1,196 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !windows
+// +build !windows
+
+package procfs
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+ "regexp"
+ "strings"
+
+ "github.com/prometheus/procfs/internal/util"
+)
+
+// Zoneinfo holds info parsed from /proc/zoneinfo.
+type Zoneinfo struct {
+ Node string
+ Zone string
+ NrFreePages *int64
+ Min *int64
+ Low *int64
+ High *int64
+ Scanned *int64
+ Spanned *int64
+ Present *int64
+ Managed *int64
+ NrActiveAnon *int64
+ NrInactiveAnon *int64
+ NrIsolatedAnon *int64
+ NrAnonPages *int64
+ NrAnonTransparentHugepages *int64
+ NrActiveFile *int64
+ NrInactiveFile *int64
+ NrIsolatedFile *int64
+ NrFilePages *int64
+ NrSlabReclaimable *int64
+ NrSlabUnreclaimable *int64
+ NrMlockStack *int64
+ NrKernelStack *int64
+ NrMapped *int64
+ NrDirty *int64
+ NrWriteback *int64
+ NrUnevictable *int64
+ NrShmem *int64
+ NrDirtied *int64
+ NrWritten *int64
+ NumaHit *int64
+ NumaMiss *int64
+ NumaForeign *int64
+ NumaInterleave *int64
+ NumaLocal *int64
+ NumaOther *int64
+ Protection []*int64
+}
+
+var nodeZoneRE = regexp.MustCompile(`(\d+), zone\s+(\w+)`)
+
+// Zoneinfo parses an zoneinfo-file (/proc/zoneinfo) and returns a slice of
+// structs containing the relevant info. More information available here:
+// https://www.kernel.org/doc/Documentation/sysctl/vm.txt
+func (fs FS) Zoneinfo() ([]Zoneinfo, error) {
+ data, err := os.ReadFile(fs.proc.Path("zoneinfo"))
+ if err != nil {
+ return nil, fmt.Errorf("%w: error reading zoneinfo %q: %w", ErrFileRead, fs.proc.Path("zoneinfo"), err)
+ }
+ zoneinfo, err := parseZoneinfo(data)
+ if err != nil {
+ return nil, fmt.Errorf("%w: error parsing zoneinfo %q: %w", ErrFileParse, fs.proc.Path("zoneinfo"), err)
+ }
+ return zoneinfo, nil
+}
+
+func parseZoneinfo(zoneinfoData []byte) ([]Zoneinfo, error) {
+
+ zoneinfo := []Zoneinfo{}
+
+ zoneinfoBlocks := bytes.Split(zoneinfoData, []byte("\nNode"))
+ for _, block := range zoneinfoBlocks {
+ var zoneinfoElement Zoneinfo
+ lines := strings.Split(string(block), "\n")
+ for _, line := range lines {
+
+ if nodeZone := nodeZoneRE.FindStringSubmatch(line); nodeZone != nil {
+ zoneinfoElement.Node = nodeZone[1]
+ zoneinfoElement.Zone = nodeZone[2]
+ continue
+ }
+ if strings.HasPrefix(strings.TrimSpace(line), "per-node stats") {
+ continue
+ }
+ parts := strings.Fields(strings.TrimSpace(line))
+ if len(parts) < 2 {
+ continue
+ }
+ vp := util.NewValueParser(parts[1])
+ switch parts[0] {
+ case "nr_free_pages":
+ zoneinfoElement.NrFreePages = vp.PInt64()
+ case "min":
+ zoneinfoElement.Min = vp.PInt64()
+ case "low":
+ zoneinfoElement.Low = vp.PInt64()
+ case "high":
+ zoneinfoElement.High = vp.PInt64()
+ case "scanned":
+ zoneinfoElement.Scanned = vp.PInt64()
+ case "spanned":
+ zoneinfoElement.Spanned = vp.PInt64()
+ case "present":
+ zoneinfoElement.Present = vp.PInt64()
+ case "managed":
+ zoneinfoElement.Managed = vp.PInt64()
+ case "nr_active_anon":
+ zoneinfoElement.NrActiveAnon = vp.PInt64()
+ case "nr_inactive_anon":
+ zoneinfoElement.NrInactiveAnon = vp.PInt64()
+ case "nr_isolated_anon":
+ zoneinfoElement.NrIsolatedAnon = vp.PInt64()
+ case "nr_anon_pages":
+ zoneinfoElement.NrAnonPages = vp.PInt64()
+ case "nr_anon_transparent_hugepages":
+ zoneinfoElement.NrAnonTransparentHugepages = vp.PInt64()
+ case "nr_active_file":
+ zoneinfoElement.NrActiveFile = vp.PInt64()
+ case "nr_inactive_file":
+ zoneinfoElement.NrInactiveFile = vp.PInt64()
+ case "nr_isolated_file":
+ zoneinfoElement.NrIsolatedFile = vp.PInt64()
+ case "nr_file_pages":
+ zoneinfoElement.NrFilePages = vp.PInt64()
+ case "nr_slab_reclaimable":
+ zoneinfoElement.NrSlabReclaimable = vp.PInt64()
+ case "nr_slab_unreclaimable":
+ zoneinfoElement.NrSlabUnreclaimable = vp.PInt64()
+ case "nr_mlock_stack":
+ zoneinfoElement.NrMlockStack = vp.PInt64()
+ case "nr_kernel_stack":
+ zoneinfoElement.NrKernelStack = vp.PInt64()
+ case "nr_mapped":
+ zoneinfoElement.NrMapped = vp.PInt64()
+ case "nr_dirty":
+ zoneinfoElement.NrDirty = vp.PInt64()
+ case "nr_writeback":
+ zoneinfoElement.NrWriteback = vp.PInt64()
+ case "nr_unevictable":
+ zoneinfoElement.NrUnevictable = vp.PInt64()
+ case "nr_shmem":
+ zoneinfoElement.NrShmem = vp.PInt64()
+ case "nr_dirtied":
+ zoneinfoElement.NrDirtied = vp.PInt64()
+ case "nr_written":
+ zoneinfoElement.NrWritten = vp.PInt64()
+ case "numa_hit":
+ zoneinfoElement.NumaHit = vp.PInt64()
+ case "numa_miss":
+ zoneinfoElement.NumaMiss = vp.PInt64()
+ case "numa_foreign":
+ zoneinfoElement.NumaForeign = vp.PInt64()
+ case "numa_interleave":
+ zoneinfoElement.NumaInterleave = vp.PInt64()
+ case "numa_local":
+ zoneinfoElement.NumaLocal = vp.PInt64()
+ case "numa_other":
+ zoneinfoElement.NumaOther = vp.PInt64()
+ case "protection:":
+ protectionParts := strings.Split(line, ":")
+ protectionValues := strings.Replace(protectionParts[1], "(", "", 1)
+ protectionValues = strings.Replace(protectionValues, ")", "", 1)
+ protectionValues = strings.TrimSpace(protectionValues)
+ protectionStringMap := strings.Split(protectionValues, ", ")
+ val, err := util.ParsePInt64s(protectionStringMap)
+ if err == nil {
+ zoneinfoElement.Protection = val
+ }
+ }
+
+ }
+
+ zoneinfo = append(zoneinfo, zoneinfoElement)
+ }
+ return zoneinfo, nil
+}
diff --git a/vendor/github.com/pterm/pterm/.gitignore b/vendor/github.com/pterm/pterm/.gitignore
new file mode 100644
index 0000000..dc83dde
--- /dev/null
+++ b/vendor/github.com/pterm/pterm/.gitignore
@@ -0,0 +1,22 @@
+# Binaries for programs and plugins
+*.exe
+*.exe~
+*.dll
+*.so
+*.dylib
+
+# Test binary, built with `go test -c`
+*.test
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
+
+# Dependency directories (remove the comment below to include it)
+vendor/
+
+# This is where we test stuff
+/experimenting/*
+
+/.history
+/.vscode
+/.idea
diff --git a/vendor/github.com/pterm/pterm/.golangci.yml b/vendor/github.com/pterm/pterm/.golangci.yml
new file mode 100644
index 0000000..a964631
--- /dev/null
+++ b/vendor/github.com/pterm/pterm/.golangci.yml
@@ -0,0 +1,95 @@
+linters-settings:
+ gocritic:
+ enabled-tags:
+ - diagnostic
+ - experimental
+ - opinionated
+ - performance
+ - style
+ disabled-checks:
+ - dupImport
+ - ifElseChain
+ - octalLiteral
+ - whyNoLint
+ - wrapperFunc
+ - exitAfterDefer
+ - hugeParam
+ - ptrToRefParam
+ - paramTypeCombine
+ - unnamedResult
+ # maligned:
+ # suggest-new: true
+ misspell:
+ locale: US
+linters:
+ disable-all: true
+ enable:
+ - gocritic
+ - gosec
+ - govet
+ - ineffassign
+ - unconvert
+ - gosimple
+ - godox
+ - whitespace
+ - staticcheck
+ # - bodyclose
+ # - maligned
+ # - godot
+ # - deadcode
+ # - depguard
+ # - dogsled
+ # - dupl
+ # - errcheck
+ # - exhaustive
+ # - funlen
+ # - gochecknoinits
+ # - goconst
+ # - gocyclo
+ # - gofmt
+ # - goimports
+ # - golint
+ # - gomnd
+ # - goprintffuncname
+ # - lll
+ # - misspell
+ # - nakedret
+ # - noctx
+ # - nolintlint
+ # - rowserrcheck
+ # - scopelint
+ # - structcheck
+ # - stylecheck
+ # - typecheck
+ # - unparam
+ # - unused
+ # - varcheck
+ # - whitespace
+ # - asciicheck
+ # - gochecknoglobals
+ # - gocognit
+ # - goerr113
+ # - nestif
+ # - prealloc
+ # - testpackage
+ # - wsl
+issues:
+ # Excluding configuration per-path, per-linter, per-text and per-source
+ exclude-rules:
+ - path: _test\.go
+ linters:
+ - gocyclo
+ - errcheck
+ - dupl
+ - gosec
+ - gocritic
+ # https://github.com/go-critic/go-critic/issues/926
+ - linters:
+ - gocritic
+ text: "unnecessaryDefer:"
+ - linters:
+ - gocritic
+ text: "preferDecodeRune:"
+service:
+ golangci-lint-version: 1.31.x # use the fixed version to not introduce new linters unexpectedly
+
diff --git a/vendor/github.com/pterm/pterm/CODE_OF_CONDUCT.md b/vendor/github.com/pterm/pterm/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000..066ceae
--- /dev/null
+++ b/vendor/github.com/pterm/pterm/CODE_OF_CONDUCT.md
@@ -0,0 +1,76 @@
+# Contributor Covenant Code of Conduct
+
+## Our Pledge
+
+In the interest of fostering an open and welcoming environment, we as
+contributors and maintainers pledge to making participation in our project and
+our community a harassment-free experience for everyone, regardless of age, body
+size, disability, ethnicity, sex characteristics, gender identity and expression,
+level of experience, education, socio-economic status, nationality, personal
+appearance, race, religion, or sexual identity and orientation.
+
+## Our Standards
+
+Examples of behavior that contributes to creating a positive environment
+include:
+
+* Using welcoming and inclusive language
+* Being respectful of differing viewpoints and experiences
+* Gracefully accepting constructive criticism
+* Focusing on what is best for the community
+* Showing empathy towards other community members
+
+Examples of unacceptable behavior by participants include:
+
+* The use of sexualized language or imagery and unwelcome sexual attention or
+ advances
+* Trolling, insulting/derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others' private information, such as a physical or electronic
+ address, without explicit permission
+* Other conduct which could reasonably be considered inappropriate in a
+ professional setting
+
+## Our Responsibilities
+
+Project maintainers are responsible for clarifying the standards of acceptable
+behavior and are expected to take appropriate and fair corrective action in
+response to any instances of unacceptable behavior.
+
+Project maintainers have the right and responsibility to remove, edit, or
+reject comments, commits, code, wiki edits, issues, and other contributions
+that are not aligned to this Code of Conduct, or to ban temporarily or
+permanently any contributor for other behaviors that they deem inappropriate,
+threatening, offensive, or harmful.
+
+## Scope
+
+This Code of Conduct applies both within project spaces and in public spaces
+when an individual is representing the project or its community. Examples of
+representing a project or community include using an official project e-mail
+address, posting via an official social media account, or acting as an appointed
+representative at an online or offline event. Representation of a project may be
+further defined and clarified by project maintainers.
+
+## Enforcement
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be
+reported by contacting the project team at pterm@marvinjwendt.com. All
+complaints will be reviewed and investigated and will result in a response that
+is deemed necessary and appropriate to the circumstances. The project team is
+obligated to maintain confidentiality with regard to the reporter of an incident.
+Further details of specific enforcement policies may be posted separately.
+
+Project maintainers who do not follow or enforce the Code of Conduct in good
+faith may face temporary or permanent repercussions as determined by other
+members of the project's leadership.
+
+## Attribution
+
+This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
+available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
+
+[homepage]: https://www.contributor-covenant.org
+
+For answers to common questions about this code of conduct, see
+https://www.contributor-covenant.org/faq
diff --git a/vendor/github.com/pterm/pterm/CONTRIBUTING.md b/vendor/github.com/pterm/pterm/CONTRIBUTING.md
new file mode 100644
index 0000000..fcde992
--- /dev/null
+++ b/vendor/github.com/pterm/pterm/CONTRIBUTING.md
@@ -0,0 +1,225 @@
+# Contributing to PTerm
+
+> This document explains how to participate in the development of PTerm.\
+If your goal is to report a bug instead of programming PTerm, you can do so [here](https://github.com/pterm/pterm/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated-desc).
+
+## Best practise
+
+We enforce some best practises, especially made for PTerm, to provide a clean and consistent user experience.
+
+### Styles
+
+Styles should always be consumed as pointers. That way, the user can change the style of printers globally.
+
+## Creating a new printer
+
+> In this chapter we will show you how to create a new printer.
+
+### `TextPrinter` Template
+```go
+package pterm
+
+type TemplatePrinter struct{
+ // TODO: Add printer settings here
+}
+
+// Sprint formats using the default formats for its operands and returns the resulting string.
+// Spaces are added between operands when neither is a string.
+func (p TemplatePrinter) Sprint(a ...any) string {
+ panic("write printer code here")
+}
+
+// Sprintln formats using the default formats for its operands and returns the resulting string.
+// Spaces are always added between operands and a newline is appended.
+func (p TemplatePrinter) Sprintln(a ...any) string {
+ return Sprintln(p.Sprint(a...))
+}
+
+// Sprintf formats according to a format specifier and returns the resulting string.
+func (p TemplatePrinter) Sprintf(format string, a ...any) string {
+ return p.Sprint(Sprintf(format, a...))
+}
+
+// Print formats using the default formats for its operands and writes to standard output.
+// Spaces are added between operands when neither is a string.
+// It returns the number of bytes written and any write error encountered.
+func (p TemplatePrinter) Print(a ...any) *TextPrinter {
+ Print(p.Sprint(a...))
+ tp := TextPrinter(p)
+ return &tp
+}
+
+// Println formats using the default formats for its operands and writes to standard output.
+// Spaces are always added between operands and a newline is appended.
+// It returns the number of bytes written and any write error encountered.
+func (p TemplatePrinter) Println(a ...any) *TextPrinter {
+ Println(p.Sprint(a...))
+ tp := TextPrinter(p)
+ return &tp
+}
+
+// Printf formats according to a format specifier and writes to standard output.
+// It returns the number of bytes written and any write error encountered.
+func (p TemplatePrinter) Printf(format string, a ...any) *TextPrinter {
+ Print(p.Sprintf(format, a...))
+ tp := TextPrinter(p)
+ return &tp
+}
+```
+
+### `RenderablePrinter` Template
+
+```go
+package pterm
+
+type TemplatePrinter struct{
+ // TODO: Add printer settings here
+}
+
+// Srender renders the Template as a string.
+func (p TemplatePrinter) Srender() (string, error) {
+ var ret strings.Builder
+
+ return ret.String(), nil
+}
+
+// Render prints the Template to the terminal.
+func (p TemplatePrinter) Render() error {
+ s, err := p.Srender()
+ if err != nil {
+ return err
+ }
+ Println(s)
+
+ return nil
+}
+```
+
+### `LivePrinter` Template
+
+```go
+// Start the TemplatePrinter.
+package pterm
+import "github.com/pterm/pterm"
+
+type TemplatePrinter struct{
+
+}
+
+
+func (s TemplatePrinter) Start(text...any) (*TemplatePrinter, error) { // TODO: Replace Template with actual printer.
+ // TODO: start logic
+ return &s, nil
+}
+
+// Stop terminates the TemplatePrinter immediately.
+// The TemplatePrinter will not resolve into anything.
+func (s *TemplatePrinter) Stop() error {
+ // TODO: stop logic
+ return nil
+}
+
+// GenericStart runs Start, but returns a LivePrinter.
+// This is used for the interface LivePrinter.
+// You most likely want to use Start instead of this in your program.
+func (s *TemplatePrinter) GenericStart() (*LivePrinter, error) {
+ _, err := s.Start()
+ lp := LivePrinter(s)
+ return &lp, err
+}
+
+// GenericStop runs Stop, but returns a LivePrinter.
+// This is used for the interface LivePrinter.
+// You most likely want to use Stop instead of this in your program.
+func (s *TemplatePrinter) GenericStop() (*LivePrinter, error) {
+ err := s.Stop()
+ lp := LivePrinter(s)
+ return &lp, err
+}
+```
+
+## Writing Tests
+
+> Each method of PTerm must be tested.
+
+### Required tests for every printer
+
+#### Nil Check
+
+> This ensures that a printer without set values will not produce errors.
+
+```go
+func TestTemplatePrinterNilPrint(t *testing.T) { // TODO: Replace "Template" with actual printer name.
+ p := TemplatePrinter{} // TODO: Replace "Template" with actual printer name.
+ p.Println("Hello, World!")
+}
+```
+
+#### `WithXxx()` Methods
+
+> Each method, which starts with `With` can be tested by checking if it actually creates a new printer and sets the value.
+
+Example from `SectionPrinter`:
+
+```go
+func TestSectionPrinter_WithStyle(t *testing.T) {
+ p := SectionPrinter{}
+ s := NewStyle(FgRed, BgRed, Bold)
+ p2 := p.WithStyle(s)
+
+ assert.Equal(t, s, p2.Style)
+ assert.Empty(t, p.Style)
+}
+
+func TestSectionPrinter_WithTopPadding(t *testing.T) {
+ p := SectionPrinter{}
+ p2 := p.WithTopPadding(1337)
+
+ assert.Equal(t, 1337, p2.TopPadding)
+ assert.Empty(t, p.TopPadding)
+}
+```
+
+### `TextPrinter` Tests Template
+
+```go
+func TestTemplatePrinterPrintMethods(t *testing.T) { // TODO: Replace "Template" with actual printer name.
+ p := DefaultTemplate // TODO: Replace "Template" with actual printer name.
+
+ t.Run("Print", func(t *testing.T) {
+ testPrintContains(t, func(w io.Writer, a any) {
+ p.Print(a)
+ })
+ })
+
+ t.Run("Printf", func(t *testing.T) {
+ testPrintfContains(t, func(w io.Writer, format string, a any) {
+ p.Printf(format, a)
+ })
+ })
+
+ t.Run("Println", func(t *testing.T) {
+ testPrintlnContains(t, func(w io.Writer, a any) {
+ p.Println(a)
+ })
+ })
+
+ t.Run("Sprint", func(t *testing.T) {
+ testSprintContains(t, func(a any) string {
+ return p.Sprint(a)
+ })
+ })
+
+ t.Run("Sprintf", func(t *testing.T) {
+ testSprintfContains(t, func(format string, a any) string {
+ return p.Sprintf(format, a)
+ })
+ })
+
+ t.Run("Sprintln", func(t *testing.T) {
+ testSprintlnContains(t, func(a any) string {
+ return p.Sprintln(a)
+ })
+ })
+}
+```
diff --git a/vendor/github.com/pterm/pterm/LICENSE b/vendor/github.com/pterm/pterm/LICENSE
new file mode 100644
index 0000000..63f8a64
--- /dev/null
+++ b/vendor/github.com/pterm/pterm/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2020 pterm
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/pterm/pterm/README.md b/vendor/github.com/pterm/pterm/README.md
new file mode 100644
index 0000000..4002a59
--- /dev/null
+++ b/vendor/github.com/pterm/pterm/README.md
@@ -0,0 +1,3814 @@
+
+
+💻 PTerm | Pretty Terminal Printer
+A modern Go framework to make beautiful CLIs
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Show Demo Code
+
+
+
+---
+
+
+PTerm.sh
+|
+Installation
+|
+Getting Started
+|
+Documentation
+|
+Examples
+|
+Q&A
+|
+Discord
+
+
+---
+
+## 📦 Installation
+
+To make PTerm available in your project, you can run the following command.\
+Make sure to run this command inside your project, when you're using go modules 😉
+
+```sh
+go get github.com/pterm/pterm
+```
+
+## ⭐ Main Features
+
+| Feature | Description |
+|------------------|-----------------------------------------------------|
+| 🪀 Easy to use | PTerm emphasizes ease of use, with [examples](#-examples) and consistent component design. |
+| 🤹♀️ Cross-Platform | PTerm works on various OS and terminals, including `Windows CMD`, `macOS iTerm2`, and in CI systems like `GitHub Actions`. |
+| 🧪 Well tested | A high test coverage and `28774` automated tests ensure PTerm's reliability. |
+| ✨ Consistent Colors | PTerm uses the [ANSI color scheme](https://en.wikipedia.org/wiki/ANSI_escape_code#3/4_bit) for uniformity and supports `TrueColor` for advanced terminals. |
+| 📚 Component system | PTerm's flexible `Printers` can be used individually or combined to generate beautiful console output. |
+| 🛠 Configurable | PTerm is ready to use without configuration but allows easy customization for unique terminal output. |
+| ✏ Documentation | Access comprehensive docs on [pkg.go.dev](https://pkg.go.dev/github.com/pterm/pterm#section-documentation) and view practical examples in the [examples section](#-examples). |
+
+### Printers (Components)
+
+
+
+
+| Feature | Feature | Feature | Feature | Feature |
+| :-------: | :-------: | :-------: | :-------: | :-------: |
+| Area
[(Examples)](https://github.com/pterm/pterm/tree/master/_examples/area) |Barchart
[(Examples)](https://github.com/pterm/pterm/tree/master/_examples/barchart) |Basictext
[(Examples)](https://github.com/pterm/pterm/tree/master/_examples/basictext) |Bigtext
[(Examples)](https://github.com/pterm/pterm/tree/master/_examples/bigtext) |Box
[(Examples)](https://github.com/pterm/pterm/tree/master/_examples/box) |
+| Bulletlist
[(Examples)](https://github.com/pterm/pterm/tree/master/_examples/bulletlist) |Center
[(Examples)](https://github.com/pterm/pterm/tree/master/_examples/center) |Coloring
[(Examples)](https://github.com/pterm/pterm/tree/master/_examples/coloring) |Header
[(Examples)](https://github.com/pterm/pterm/tree/master/_examples/header) |Heatmap
[(Examples)](https://github.com/pterm/pterm/tree/master/_examples/heatmap) |
+| Interactive confirm
[(Examples)](https://github.com/pterm/pterm/tree/master/_examples/interactive_confirm) |Interactive continue
[(Examples)](https://github.com/pterm/pterm/tree/master/_examples/interactive_continue) |Interactive multiselect
[(Examples)](https://github.com/pterm/pterm/tree/master/_examples/interactive_multiselect) |Interactive select
[(Examples)](https://github.com/pterm/pterm/tree/master/_examples/interactive_select) |Interactive textinput
[(Examples)](https://github.com/pterm/pterm/tree/master/_examples/interactive_textinput) |
+| Logger
[(Examples)](https://github.com/pterm/pterm/tree/master/_examples/logger) |Multiple-live-printers
[(Examples)](https://github.com/pterm/pterm/tree/master/_examples/multiple-live-printers) |Panel
[(Examples)](https://github.com/pterm/pterm/tree/master/_examples/panel) |Paragraph
[(Examples)](https://github.com/pterm/pterm/tree/master/_examples/paragraph) |Prefix
[(Examples)](https://github.com/pterm/pterm/tree/master/_examples/prefix) |
+| Progressbar
[(Examples)](https://github.com/pterm/pterm/tree/master/_examples/progressbar) |Section
[(Examples)](https://github.com/pterm/pterm/tree/master/_examples/section) |Slog
[(Examples)](https://github.com/pterm/pterm/tree/master/_examples/slog) |Spinner
[(Examples)](https://github.com/pterm/pterm/tree/master/_examples/spinner) |Style
[(Examples)](https://github.com/pterm/pterm/tree/master/_examples/style) |
+| Table
[(Examples)](https://github.com/pterm/pterm/tree/master/_examples/table) |Test.sh
[(Examples)](https://github.com/pterm/pterm/tree/master/_examples/test.sh) |Theme
[(Examples)](https://github.com/pterm/pterm/tree/master/_examples/theme) |Tree
[(Examples)](https://github.com/pterm/pterm/tree/master/_examples/tree) | |
+
+
+
+
+---
+
+
+
+### 🦸♂️ Sponsors
+
+

+
+---
+
+
+
+## 🧪 Examples
+
+
+
+
+
+
+### area/demo
+
+
+
+
+
+SHOW SOURCE
+
+```go
+package main
+
+import (
+ "time"
+
+ "github.com/pterm/pterm"
+ "github.com/pterm/pterm/putils"
+)
+
+func main() {
+ // Print an informational message using PTerm's Info printer.
+ // This message will stay in place while the area updates.
+ pterm.Info.Println("The previous text will stay in place, while the area updates.")
+
+ // Print two new lines as spacer.
+ pterm.Print("\n\n")
+
+ // Start the Area printer from PTerm's DefaultArea, with the Center option.
+ // The Area printer allows us to update a specific area of the console output.
+ // The returned 'area' object is used to control the area updates.
+ area, _ := pterm.DefaultArea.WithCenter().Start()
+
+ // Loop 10 times to update the area with the current time.
+ for i := 0; i < 10; i++ {
+ // Get the current time, format it as "15:04:05" (hour:minute:second), and convert it to a string.
+ // Then, create a BigText from the time string using PTerm's DefaultBigText and putils NewLettersFromString.
+ // The Srender() function is used to save the BigText as a string.
+ str, _ := pterm.DefaultBigText.WithLetters(putils.LettersFromString(time.Now().Format("15:04:05"))).Srender()
+
+ // Update the Area contents with the current time string.
+ area.Update(str)
+
+ // Sleep for a second before the next update.
+ time.Sleep(time.Second)
+ }
+
+ // Stop the Area printer after all updates are done.
+ area.Stop()
+}
+
+```
+
+
+
+### area/center
+
+
+
+
+
+SHOW SOURCE
+
+```go
+package main
+
+import (
+ "time"
+
+ "github.com/pterm/pterm"
+)
+
+func main() {
+ // Start a new default area in the center of the terminal.
+ // The Start() function returns the created area and an error.
+ area, _ := pterm.DefaultArea.WithCenter().Start()
+
+ // Loop 5 times to simulate a dynamic update.
+ for i := 0; i < 5; i++ {
+ // Update the content of the area with the current count.
+ // The Sprintf function is used to format the string.
+ area.Update(pterm.Sprintf("Current count: %d\nAreas can update their content dynamically!", i))
+
+ // Pause for a second to simulate a time-consuming task.
+ time.Sleep(time.Second)
+ }
+
+ // Stop the area after all updates are done.
+ area.Stop()
+}
+
+```
+
+
+
+### area/default
+
+
+
+
+
+SHOW SOURCE
+
+```go
+package main
+
+import (
+ "time"
+
+ "github.com/pterm/pterm"
+)
+
+func main() {
+ // Start a new default area and get a reference to it.
+ // The second return value is an error which is ignored here.
+ area, _ := pterm.DefaultArea.Start()
+
+ // Loop 5 times
+ for i := 0; i < 5; i++ {
+ // Update the content of the area dynamically.
+ // Here we're just displaying the current count.
+ area.Update(pterm.Sprintf("Current count: %d\nAreas can update their content dynamically!", i))
+
+ // Pause for a second before the next update.
+ time.Sleep(time.Second)
+ }
+
+ // Stop the area after all updates are done.
+ // This will clean up and free resources used by the area.
+ area.Stop()
+}
+
+```
+
+
+
+### area/dynamic-chart
+
+
+
+
+
+SHOW SOURCE
+
+```go
+package main
+
+import (
+ "time"
+
+ "github.com/pterm/pterm"
+)
+
+func main() {
+ // Start a new fullscreen centered area.
+ // This area will be used to display the bar chart.
+ area, _ := pterm.DefaultArea.WithFullscreen().WithCenter().Start()
+ // Ensure the area stops updating when we're done.
+ defer area.Stop()
+
+ // Loop to update the bar chart 10 times.
+ for i := 0; i < 10; i++ {
+ // Create a new bar chart with dynamic bars.
+ // The bars will change based on the current iteration.
+ barchart := pterm.DefaultBarChart.WithBars(dynamicBars(i))
+ // Render the bar chart to a string.
+ // This string will be used to update the area.
+ content, _ := barchart.Srender()
+ // Update the area with the new bar chart.
+ area.Update(content)
+ // Wait for half a second before the next update.
+ time.Sleep(500 * time.Millisecond)
+ }
+}
+
+// dynamicBars generates a set of bars for the bar chart.
+// The bars will change based on the current iteration.
+func dynamicBars(i int) pterm.Bars {
+ return pterm.Bars{
+ {Label: "A", Value: 10}, // A static bar.
+ {Label: "B", Value: 20 * i}, // A bar that grows with each iteration.
+ {Label: "C", Value: 30}, // Another static bar.
+ {Label: "D", Value: 40 + i}, // A bar that grows slowly with each iteration.
+ }
+}
+
+```
+
+
+
+### area/fullscreen
+
+
+
+
+
+SHOW SOURCE
+
+```go
+package main
+
+import (
+ "time"
+
+ "github.com/pterm/pterm"
+)
+
+func main() {
+ // Start a new fullscreen area. This will return an area instance and an error.
+ // The underscore (_) is used to ignore the error.
+ area, _ := pterm.DefaultArea.WithFullscreen().Start()
+
+ // Loop 5 times to update the area content.
+ for i := 0; i < 5; i++ {
+ // Update the content of the area with the current count.
+ // The Sprintf function is used to format the string.
+ area.Update(pterm.Sprintf("Current count: %d\nAreas can update their content dynamically!", i))
+
+ // Pause for a second before the next update.
+ time.Sleep(time.Second)
+ }
+
+ // Stop the area after all updates are done.
+ area.Stop()
+}
+
+```
+
+
+
+### area/fullscreen-center
+
+
+
+
+
+SHOW SOURCE
+
+```go
+package main
+
+import (
+ "time"
+
+ "github.com/pterm/pterm"
+)
+
+func main() {
+ // Initialize a new PTerm area with fullscreen and center options
+ // The Start() function returns the created area and an error (ignored here)
+ area, _ := pterm.DefaultArea.WithFullscreen().WithCenter().Start()
+
+ // Loop 5 times to demonstrate dynamic content update
+ for i := 0; i < 5; i++ {
+ // Update the content of the area with the current count
+ // The Sprintf function is used to format the string with the count
+ area.Update(pterm.Sprintf("Current count: %d\nAreas can update their content dynamically!", i))
+
+ // Pause for a second
+ time.Sleep(time.Second)
+ }
+
+ // Stop the area after all updates are done
+ // This will clear the area and return the terminal to its normal state
+ area.Stop()
+}
+
+```
+
+
+
+### barchart/demo
+
+
+
+
+
+SHOW SOURCE
+
+```go
+package main
+
+import (
+ "github.com/pterm/pterm"
+)
+
+func main() {
+ // Define the bars for the chart
+ bars := []pterm.Bar{
+ {Label: "Bar 1", Value: 5},
+ {Label: "Bar 2", Value: 3},
+ {Label: "Longer Label", Value: 7},
+ }
+
+ // Print an informational message
+ pterm.Info.Println("Chart example with positive only values (bars use 100% of chart area)")
+
+ // Create a bar chart with the defined bars and render it
+ // The DefaultBarChart is used as a base, and the bars are added with the WithBars option
+ // The Render function is then called to display the chart
+ pterm.DefaultBarChart.WithBars(bars).Render()
+
+ // Create a horizontal bar chart with the defined bars and render it
+ // The DefaultBarChart is used as a base, the chart is made horizontal with the WithHorizontal option, and the bars are added with the WithBars option
+ // The Render function is then called to display the chart
+ pterm.DefaultBarChart.WithHorizontal().WithBars(bars).Render()
+}
+
+```
+
+
+
+### barchart/custom-height
+
+
+
+
+
+SHOW SOURCE
+
+```go
+package main
+
+import "github.com/pterm/pterm"
+
+func main() {
+ // Define a slice of Bar structs. Each struct represents a bar in the chart.
+ // The Label field is the name of the bar and the Value field is the height of the bar.
+ bars := []pterm.Bar{
+ {Label: "A", Value: 10},
+ {Label: "B", Value: 20},
+ {Label: "C", Value: 30},
+ {Label: "D", Value: 40},
+ {Label: "E", Value: 50},
+ {Label: "F", Value: 40},
+ {Label: "G", Value: 30},
+ {Label: "H", Value: 20},
+ {Label: "I", Value: 10},
+ }
+
+ // Create and render a bar chart with the defined bars and a height of 5.
+ // The WithBars method is used to set the bars of the chart.
+ // The WithHeight method is used to set the height of the chart.
+ // The Render method is used to display the chart in the terminal.
+ pterm.DefaultBarChart.WithBars(bars).WithHeight(5).Render()
+}
+
+```
+
+
+
+### barchart/custom-width
+
+
+
+
+
+SHOW SOURCE
+
+```go
+package main
+
+import "github.com/pterm/pterm"
+
+func main() {
+ // Define the data for the bar chart
+ barData := []pterm.Bar{
+ {Label: "A", Value: 10},
+ {Label: "B", Value: 20},
+ {Label: "C", Value: 30},
+ {Label: "D", Value: 40},
+ {Label: "E", Value: 50},
+ {Label: "F", Value: 40},
+ {Label: "G", Value: 30},
+ {Label: "H", Value: 20},
+ {Label: "I", Value: 10},
+ }
+
+ // Create a bar chart with the defined data
+ // The chart is horizontal and has a width of 5
+ // The Render() function is called to display the chart
+ pterm.DefaultBarChart.WithBars(barData).WithHorizontal().WithWidth(5).Render()
+}
+
+```
+
+
+
+### barchart/default
+
+
+
+
+
+SHOW SOURCE
+
+```go
+package main
+
+import "github.com/pterm/pterm"
+
+func main() {
+ // Define the data for the bar chart. Each bar is represented by a `pterm.Bar` struct.
+ // The `Label` field represents the label of the bar, and the `Value` field represents the value of the bar.
+ bars := []pterm.Bar{
+ {Label: "A", Value: 10},
+ {Label: "B", Value: 20},
+ {Label: "C", Value: 30},
+ {Label: "D", Value: 40},
+ {Label: "E", Value: 50},
+ {Label: "F", Value: 40},
+ {Label: "G", Value: 30},
+ {Label: "H", Value: 20},
+ {Label: "I", Value: 10},
+ }
+
+ // Use the `DefaultBarChart` from the `pterm` package to create a bar chart.
+ // The `WithBars` method is used to set the bars of the chart.
+ // The `Render` method is used to display the chart.
+ pterm.DefaultBarChart.WithBars(bars).Render()
+}
+
+```
+
+
+
+### barchart/horizontal
+
+
+
+
+
+SHOW SOURCE
+
+```go
+package main
+
+import "github.com/pterm/pterm"
+
+func main() {
+ // Define the data for the bar chart
+ bars := []pterm.Bar{
+ {Label: "A", Value: 10},
+ {Label: "B", Value: 20},
+ {Label: "C", Value: 30},
+ {Label: "D", Value: 40},
+ {Label: "E", Value: 50},
+ {Label: "F", Value: 40},
+ {Label: "G", Value: 30},
+ {Label: "H", Value: 20},
+ {Label: "I", Value: 10},
+ }
+
+ // Create a bar chart with the defined data
+ // The chart is displayed horizontally
+ // The Render() function is called to display the chart
+ pterm.DefaultBarChart.WithBars(bars).WithHorizontal().Render()
+}
+
+```
+
+
+
+### barchart/horizontal-show-value
+
+
+
+
+
+SHOW SOURCE
+
+```go
+package main
+
+import "github.com/pterm/pterm"
+
+func main() {
+ // Define the data for the bar chart
+ barData := []pterm.Bar{
+ {Label: "A", Value: 10},
+ {Label: "B", Value: 20},
+ {Label: "C", Value: 30},
+ {Label: "D", Value: 40},
+ {Label: "E", Value: 50},
+ {Label: "F", Value: 40},
+ {Label: "G", Value: 30},
+ {Label: "H", Value: 20},
+ {Label: "I", Value: 10},
+ }
+
+ // Create a bar chart with the defined data
+ // The chart is horizontal and displays the value of each bar
+ // The Render() function is called to display the chart
+ pterm.DefaultBarChart.WithBars(barData).WithHorizontal().WithShowValue().Render()
+}
+
+```
+
+
+
+### barchart/mixed-values
+
+
+
+
+
+SHOW SOURCE
+
+```go
+package main
+
+import (
+ "github.com/pterm/pterm"
+)
+
+func main() {
+ // Define a set of bars for the chart.
+ // Each bar has a label and a value.
+ bars := []pterm.Bar{
+ {Label: "Bar 1", Value: 2},
+ {Label: "Bar 2", Value: -3},
+ {Label: "Bar 3", Value: -2},
+ {Label: "Bar 4", Value: 5},
+ {Label: "Longer Label", Value: 7},
+ }
+
+ // Print a section header.
+ // This is useful for separating different parts of the output.
+ pterm.DefaultSection.Println("Chart example with mixed values (note screen space usage in case when ABSOLUTE values of negative and positive parts are differ too much)")
+
+ // Create a bar chart with the defined bars.
+ // The chart will display the value of each bar.
+ // The Render() function is called to display the chart.
+ pterm.DefaultBarChart.WithBars(bars).WithShowValue().Render()
+
+ // Create a horizontal bar chart with the same bars.
+ // The chart will display the value of each bar.
+ // The Render() function is called to display the chart.
+ pterm.DefaultBarChart.WithHorizontal().WithBars(bars).WithShowValue().Render()
+}
+
+```
+
+
+
+### barchart/negative-values
+
+
+
+
+
+SHOW SOURCE
+
+```go
+package main
+
+import (
+ "github.com/pterm/pterm"
+)
+
+func main() {
+ // Define a set of bars with negative values.
+ // Each bar is represented by a struct with a label and a value.
+ negativeBars := pterm.Bars{
+ {Label: "Bar 1", Value: -5},
+ {Label: "Bar 2", Value: -3},
+ {Label: "Longer Label", Value: -7},
+ }
+
+ // Print an informational message to the console.
+ pterm.Info.Println("Chart example with negative only values (bars use 100% of chart area)")
+
+ // Create a vertical bar chart with the defined bars.
+ // The WithShowValue() option is used to display the value of each bar in the chart.
+ // The Render() method is called to draw the chart.
+ _ = pterm.DefaultBarChart.WithBars(negativeBars).WithShowValue().Render()
+
+ // Create a horizontal bar chart with the same bars.
+ // The WithHorizontal() option is used to orient the chart horizontally.
+ // The WithShowValue() option and Render() method are used in the same way as before.
+ _ = pterm.DefaultBarChart.WithHorizontal().WithBars(negativeBars).WithShowValue().Render()
+}
+
+```
+
+
+
+### barchart/show-value
+
+
+
+
+
+SHOW SOURCE
+
+```go
+package main
+
+import "github.com/pterm/pterm"
+
+func main() {
+ // Define a slice of bars for the bar chart. Each bar is represented by a struct
+ // with a Label and a Value. The Label is a string that represents the name of the bar,
+ // and the Value is an integer that represents the height of the bar.
+ bars := []pterm.Bar{
+ {Label: "A", Value: 10},
+ {Label: "B", Value: 20},
+ {Label: "C", Value: 30},
+ {Label: "D", Value: 40},
+ {Label: "E", Value: 50},
+ {Label: "F", Value: 40},
+ {Label: "G", Value: 30},
+ {Label: "H", Value: 20},
+ {Label: "I", Value: 10},
+ }
+
+ // Create a bar chart with the defined bars using the DefaultBarChart object from PTerm.
+ // Chain the WithBars method to set the bars of the chart.
+ // Chain the WithShowValue method to display the value of each bar on the chart.
+ // Finally, call the Render method to display the chart.
+ pterm.DefaultBarChart.WithBars(bars).WithShowValue().Render()
+}
+
+```
+
+
+
+### basictext/demo
+
+
+
+
+
+SHOW SOURCE
+
+```go
+package main
+
+import "github.com/pterm/pterm"
+
+func main() {
+ // The DefaultBasicText is a basic text printer provided by PTerm.
+ // It is used to print text without any special formatting.
+ pterm.DefaultBasicText.Println("Default basic text printer.")
+
+ // The DefaultBasicText can be used in any context that requires a TextPrinter.
+ // Here, we're using it with the LightMagenta function to color a portion of the text.
+ pterm.DefaultBasicText.Println("Can be used in any" + pterm.LightMagenta(" TextPrinter ") + "context.")
+
+ // The DefaultBasicText is also useful for resolving progress bars and spinners.
+}
+
+```
+
+
+
+### bigtext/demo
+
+
+
+
+
+SHOW SOURCE
+
+```go
+package main
+
+import (
+ "github.com/pterm/pterm"
+ "github.com/pterm/pterm/putils"
+)
+
+func main() {
+ // Create a large text with the LetterStyle from the standard theme.
+ // This is useful for creating title screens.
+ pterm.DefaultBigText.WithLetters(putils.LettersFromString("PTerm")).Render()
+
+ // Create a large text with differently colored letters.
+ // Here, the first letter 'P' is colored cyan and the rest 'Term' is colored light magenta.
+ // This can be used to highlight specific parts of the text.
+ pterm.DefaultBigText.WithLetters(
+ putils.LettersFromStringWithStyle("P", pterm.FgCyan.ToStyle()),
+ putils.LettersFromStringWithStyle("Term", pterm.FgLightMagenta.ToStyle()),
+ ).Render()
+
+ // Create a large text with a specific RGB color.
+ // This can be used when you need a specific color that is not available in the standard colors.
+ // Here, the color is gold (RGB: 255, 215, 0).
+ pterm.DefaultBigText.WithLetters(
+ putils.LettersFromStringWithRGB("PTerm", pterm.NewRGB(255, 215, 0)),
+ ).Render()
+}
+
+```
+
+
+
+### bigtext/colored
+
+
+
+
+
+SHOW SOURCE
+
+```go
+package main
+
+import (
+ "github.com/pterm/pterm"
+ "github.com/pterm/pterm/putils"
+)
+
+func main() {
+ // Initialize a big text display with the letters "P" and "Term"
+ // "P" is displayed in cyan and "Term" is displayed in light magenta
+ pterm.DefaultBigText.WithLetters(
+ putils.LettersFromStringWithStyle("P", pterm.FgCyan.ToStyle()),
+ putils.LettersFromStringWithStyle("Term", pterm.FgLightMagenta.ToStyle())).
+ Render() // Render the big text to the terminal
+}
+
+```
+
+
+
+### bigtext/default
+
+
+
+
+
+SHOW SOURCE
+
+```go
+package main
+
+import (
+ "github.com/pterm/pterm"
+ "github.com/pterm/pterm/putils"
+)
+
+func main() {
+ // Define the text to be rendered
+ var text = "PTerm"
+
+ // Convert the text into a format suitable for PTerm
+ var letters = putils.LettersFromString(text)
+
+ // Render the text using PTerm's default big text style
+ pterm.DefaultBigText.WithLetters(letters).Render()
+}
+
+```
+
+
+
+### box/demo
+
+
+
+
+
+SHOW SOURCE
+
+```go
+package main
+
+import "github.com/pterm/pterm"
+
+func main() {
+ // Print an informational message.
+ pterm.Info.Println("This might not be rendered correctly on GitHub,\nbut it will work in a real terminal.\nThis is because GitHub does not use a monospaced font by default for SVGs")
+
+ // Create three panels with text, some of them with titles.
+ // The panels are created using the DefaultBox style.
+ panel1 := pterm.DefaultBox.Sprint("Lorem ipsum dolor sit amet,\nconsectetur adipiscing elit,\nsed do eiusmod tempor incididunt\nut labore et dolore\nmagna aliqua.")
+ panel2 := pterm.DefaultBox.WithTitle("title").Sprint("Ut enim ad minim veniam,\nquis nostrud exercitation\nullamco laboris\nnisi ut aliquip\nex ea commodo\nconsequat.")
+ panel3 := pterm.DefaultBox.WithTitle("bottom center title").WithTitleBottomCenter().Sprint("Duis aute irure\ndolor in reprehenderit\nin voluptate velit esse cillum\ndolore eu fugiat\nnulla pariatur.")
+
+ // Combine the panels into a layout using the DefaultPanel style.
+ // The layout is a 2D grid, with each row being an array of panels.
+ // In this case, the first row contains panel1 and panel2, and the second row contains only panel3.
+ panels, _ := pterm.DefaultPanel.WithPanels(pterm.Panels{
+ {{Data: panel1}, {Data: panel2}},
+ {{Data: panel3}},
+ }).Srender()
+
+ // Print the panels layout inside a box with a title.
+ // The box is created using the DefaultBox style, with the title positioned at the bottom right.
+ pterm.DefaultBox.WithTitle("Lorem Ipsum").WithTitleBottomRight().WithRightPadding(0).WithBottomPadding(0).Println(panels)
+}
+
+```
+
+
+
+### box/custom-padding
+
+
+
+
+
+SHOW SOURCE
+
+```go
+package main
+
+import "github.com/pterm/pterm"
+
+func main() {
+ // Create a default box with custom padding options and print "Hello, World!" inside it.
+ pterm.DefaultBox.WithRightPadding(10).WithLeftPadding(10).WithTopPadding(2).WithBottomPadding(2).Println("Hello, World!")
+}
+
+```
+
+
+
+### box/default
+
+
+
+
+
+SHOW SOURCE
+
+```go
+package main
+
+import "github.com/pterm/pterm"
+
+func main() {
+ // Create a default box with PTerm and print a message in it.
+ // The DefaultBox.Println method automatically starts, prints the message, and stops the box.
+ pterm.DefaultBox.Println("Hello, World!")
+}
+
+```
+
+
+
+### box/title
+
+
+
+
+
+SHOW SOURCE
+
+```go
+package main
+
+import "github.com/pterm/pterm"
+
+func main() {
+ // Create a default box with specified padding
+ paddedBox := pterm.DefaultBox.WithLeftPadding(4).WithRightPadding(4).WithTopPadding(1).WithBottomPadding(1)
+
+ // Define a title for the box
+ title := pterm.LightRed("I'm a box!")
+
+ // Create boxes with the title positioned differently and containing different content
+ box1 := paddedBox.WithTitle(title).Sprint("Hello, World!\n 1") // Title at default position (top left)
+ box2 := paddedBox.WithTitle(title).WithTitleTopCenter().Sprint("Hello, World!\n 2") // Title at top center
+ box3 := paddedBox.WithTitle(title).WithTitleTopRight().Sprint("Hello, World!\n 3") // Title at top right
+ box4 := paddedBox.WithTitle(title).WithTitleBottomRight().Sprint("Hello, World!\n 4") // Title at bottom right
+ box5 := paddedBox.WithTitle(title).WithTitleBottomCenter().Sprint("Hello, World!\n 5") // Title at bottom center
+ box6 := paddedBox.WithTitle(title).WithTitleBottomLeft().Sprint("Hello, World!\n 6") // Title at bottom left
+ box7 := paddedBox.WithTitle(title).WithTitleTopLeft().Sprint("Hello, World!\n 7") // Title at top left
+
+ // Render the boxes in a panel layout
+ pterm.DefaultPanel.WithPanels([][]pterm.Panel{
+ {{box1}, {box2}, {box3}},
+ {{box4}, {box5}, {box6}},
+ {{box7}},
+ }).Render()
+}
+
+```
+
+
+
+### bulletlist/demo
+
+
+
+
+
+SHOW SOURCE
+
+```go
+package main
+
+import (
+ "github.com/pterm/pterm"
+ "github.com/pterm/pterm/putils"
+)
+
+func main() {
+ // Define a list of bullet list items with different levels.
+ bulletListItems := []pterm.BulletListItem{
+ {Level: 0, Text: "Level 0"}, // Level 0 item
+ {Level: 1, Text: "Level 1"}, // Level 1 item
+ {Level: 2, Text: "Level 2"}, // Level 2 item
+ }
+
+ // Use the default bullet list style to render the list items.
+ pterm.DefaultBulletList.WithItems(bulletListItems).Render()
+
+ // Define a string with different levels of indentation.
+ text := `0
+ 1
+ 2
+ 3`
+
+ // Convert the indented string to a bullet list and render it.
+ putils.BulletListFromString(text, " ").Render()
+}
+
+```
+
+
+
+### bulletlist/customized
+
+
+
+
+
+SHOW SOURCE
+
+```go
+package main
+
+import (
+ "github.com/pterm/pterm"
+)
+
+func main() {
+ // Define a list of bullet list items with different styles and levels.
+ bulletListItems := []pterm.BulletListItem{
+ {
+ Level: 0, // Level 0 (top level)
+ Text: "Blue", // Text to display
+ TextStyle: pterm.NewStyle(pterm.FgBlue), // Text color
+ BulletStyle: pterm.NewStyle(pterm.FgRed), // Bullet color
+ },
+ {
+ Level: 1, // Level 1 (sub-item)
+ Text: "Green", // Text to display
+ TextStyle: pterm.NewStyle(pterm.FgGreen), // Text color
+ Bullet: "-", // Custom bullet symbol
+ BulletStyle: pterm.NewStyle(pterm.FgLightWhite), // Bullet color
+ },
+ {
+ Level: 2, // Level 2 (sub-sub-item)
+ Text: "Cyan", // Text to display
+ TextStyle: pterm.NewStyle(pterm.FgCyan), // Text color
+ Bullet: ">", // Custom bullet symbol
+ BulletStyle: pterm.NewStyle(pterm.FgYellow), // Bullet color
+ },
+ }
+
+ // Create a bullet list with the defined items and render it.
+ pterm.DefaultBulletList.WithItems(bulletListItems).Render()
+}
+
+```
+
+
+
+### center/demo
+
+
+
+
+
+SHOW SOURCE
+
+```go
+package main
+
+import (
+ "github.com/pterm/pterm"
+ "github.com/pterm/pterm/putils"
+)
+
+func main() {
+ // Print a block of text centered in the terminal
+ pterm.DefaultCenter.Println("This text is centered!\nIt centers the whole block by default.\nIn that way you can do stuff like this:")
+
+ // Generate BigLetters and store in 's'
+ s, _ := pterm.DefaultBigText.WithLetters(putils.LettersFromString("PTerm")).Srender()
+
+ // Print the BigLetters 's' centered in the terminal
+ pterm.DefaultCenter.Println(s)
+
+ // Print each line of the text separately centered in the terminal
+ pterm.DefaultCenter.WithCenterEachLineSeparately().Println("This text is centered!\nBut each line is\ncentered\nseparately")
+}
+
+```
+
+
+
+### coloring/demo
+
+
+
+
+
+SHOW SOURCE
+
+```go
+package main
+
+import "github.com/pterm/pterm"
+
+func main() {
+ // Create a table with different foreground and background colors.
+ pterm.DefaultTable.WithData([][]string{
+ {pterm.FgBlack.Sprint("Black"), pterm.FgRed.Sprint("Red"), pterm.FgGreen.Sprint("Green"), pterm.FgYellow.Sprint("Yellow")},
+ {"", pterm.FgLightRed.Sprint("Light Red"), pterm.FgLightGreen.Sprint("Light Green"), pterm.FgLightYellow.Sprint("Light Yellow")},
+ {pterm.BgBlack.Sprint("Black"), pterm.BgRed.Sprint("Red"), pterm.BgGreen.Sprint("Green"), pterm.BgYellow.Sprint("Yellow")},
+ {"", pterm.BgLightRed.Sprint("Light Red"), pterm.BgLightGreen.Sprint("Light Green"), pterm.BgLightYellow.Sprint("Light Yellow")},
+ {pterm.FgBlue.Sprint("Blue"), pterm.FgMagenta.Sprint("Magenta"), pterm.FgCyan.Sprint("Cyan"), pterm.FgWhite.Sprint("White")},
+ {pterm.FgLightBlue.Sprint("Light Blue"), pterm.FgLightMagenta.Sprint("Light Magenta"), pterm.FgLightCyan.Sprint("Light Cyan"), pterm.FgLightWhite.Sprint("Light White")},
+ {pterm.BgBlue.Sprint("Blue"), pterm.BgMagenta.Sprint("Magenta"), pterm.BgCyan.Sprint("Cyan"), pterm.BgWhite.Sprint("White")},
+ {pterm.BgLightBlue.Sprint("Light Blue"), pterm.BgLightMagenta.Sprint("Light Magenta"), pterm.BgLightCyan.Sprint("Light Cyan"), pterm.BgLightWhite.Sprint("Light White")},
+ }).Render() // Render the table.
+
+ pterm.Println()
+
+ // Print words in different colors.
+ pterm.Println(pterm.Red("Hello, ") + pterm.Green("World") + pterm.Cyan("!"))
+ pterm.Println(pterm.Red("Even " + pterm.Cyan("nested ") + pterm.Green("colors ") + "are supported!"))
+
+ pterm.Println()
+
+ // Create a new style with a red background, light green foreground, and bold text.
+ style := pterm.NewStyle(pterm.BgRed, pterm.FgLightGreen, pterm.Bold)
+ // Print text using the created style.
+ style.Println("This text uses a style and is bold and light green with a red background!")
+}
+
+```
+
+
+
+### coloring/disable-output
+
+
+
+
+
+SHOW SOURCE
+
+```go
+package main
+
+import "github.com/pterm/pterm"
+
+func main() {
+ // Loop from 0 to 14
+ for i := 0; i < 15; i++ {
+ switch i {
+ case 5:
+ // At the 5th iteration, print a message and disable the output
+ pterm.Info.Println("Disabled Output!")
+ pterm.DisableOutput()
+ case 10:
+ // At the 10th iteration, enable the output and print a message
+ pterm.EnableOutput()
+ pterm.Info.Println("Enabled Output!")
+ }
+
+ // Print a progress message for each iteration
+ pterm.Printf("Printing something... [%d/%d]\n", i, 15)
+ }
+}
+
+```
+
+
+
+### coloring/fade-colors
+
+
+
+
+
+SHOW SOURCE
+
+```go
+package main
+
+import (
+ "github.com/pterm/pterm"
+)
+
+func main() {
+ // Print an informational message.
+ pterm.Info.Println("RGB colors only work in Terminals which support TrueColor.")
+
+ // Define the start and end points for the color gradient.
+ startColor := pterm.NewRGB(0, 255, 255) // Cyan
+ endColor := pterm.NewRGB(255, 0, 255) // Magenta
+
+ // Get the terminal height to determine the gradient range.
+ terminalHeight := pterm.GetTerminalHeight()
+
+ // Loop over the range of the terminal height to create a color gradient.
+ for i := 0; i < terminalHeight-2; i++ {
+ // Calculate the fade factor for the current step in the gradient.
+ fadeFactor := float32(i) / float32(terminalHeight-2)
+
+ // Create a color that represents the current step in the gradient.
+ currentColor := startColor.Fade(0, 1, fadeFactor, endColor)
+
+ // Print a string with the current color.
+ currentColor.Println("Hello, World!")
+ }
+}
+
+```
+
+
+
+### coloring/fade-colors-rgb-style
+
+
+
+
+
+SHOW SOURCE
+
+```go
+package main
+
+import (
+ "strings"
+
+ "github.com/pterm/pterm"
+)
+
+func main() {
+ // Define RGB colors
+ white := pterm.NewRGB(255, 255, 255)
+ grey := pterm.NewRGB(128, 128, 128)
+ black := pterm.NewRGB(0, 0, 0)
+ red := pterm.NewRGB(255, 0, 0)
+ purple := pterm.NewRGB(255, 0, 255)
+ green := pterm.NewRGB(0, 255, 0)
+
+ // Define strings to be printed
+ str1 := "RGB colors only work in Terminals which support TrueColor."
+ str2 := "The background and foreground colors can be customized individually."
+ str3 := "Styles can also be applied. For example: Bold or Italic."
+
+ // Print first string with color fading from white to purple
+ printFadedString(str1, white, purple, grey, black)
+
+ // Print second string with color fading from purple to red
+ printFadedString(str2, black, purple, red, red)
+
+ // Print third string with color fading from white to green and style changes
+ printStyledString(str3, white, green, red, black)
+}
+
+// printFadedString prints a string with color fading effect
+func printFadedString(str string, fgStart, fgEnd, bgStart, bgEnd pterm.RGB) {
+ strs := strings.Split(str, "")
+ var result string
+ for i := 0; i < len(str); i++ {
+ // Create a style with color fading effect
+ style := pterm.NewRGBStyle(fgStart.Fade(0, float32(len(str)), float32(i), fgEnd), bgStart.Fade(0, float32(len(str)), float32(i), bgEnd))
+ // Append styled letter to result string
+ result += style.Sprint(strs[i])
+ }
+ pterm.Println(result)
+}
+
+// printStyledString prints a string with color fading and style changes
+func printStyledString(str string, fgStart, fgEnd, bgStart, bgEnd pterm.RGB) {
+ strs := strings.Split(str, "")
+ var result string
+ boldStr := strings.Split("Bold", "")
+ italicStr := strings.Split("Italic", "")
+ bold, italic := 0, 0
+ for i := 0; i < len(str); i++ {
+ // Create a style with color fading effect
+ style := pterm.NewRGBStyle(fgStart.Fade(0, float32(len(str)), float32(i), fgEnd), bgStart.Fade(0, float32(len(str)), float32(i), bgEnd))
+ // Check if the next letters are "Bold" or "Italic" and add the corresponding style
+ if bold < len(boldStr) && i+len(boldStr)-bold <= len(strs) && strings.Join(strs[i:i+len(boldStr)-bold], "") == strings.Join(boldStr[bold:], "") {
+ style = style.AddOptions(pterm.Bold)
+ bold++
+ } else if italic < len(italicStr) && i+len(italicStr)-italic < len(strs) && strings.Join(strs[i:i+len(italicStr)-italic], "") == strings.Join(italicStr[italic:], "") {
+ style = style.AddOptions(pterm.Italic)
+ italic++
+ }
+ // Append styled letter to result string
+ result += style.Sprint(strs[i])
+ }
+ pterm.Println(result)
+}
+
+```
+
+
+
+### coloring/fade-multiple-colors
+
+
+
+
+
+SHOW SOURCE
+
+```go
+package main
+
+import (
+ "strings"
+
+ "github.com/pterm/pterm"
+)
+
+func main() {
+ // Define RGB values for gradient points.
+ startColor := pterm.NewRGB(0, 255, 255)
+ firstPoint := pterm.NewRGB(255, 0, 255)
+ secondPoint := pterm.NewRGB(255, 0, 0)
+ thirdPoint := pterm.NewRGB(0, 255, 0)
+ endColor := pterm.NewRGB(255, 255, 255)
+
+ // Define the string to be printed.
+ str := "RGB colors only work in Terminals which support TrueColor."
+ strs := strings.Split(str, "")
+
+ // Initialize an empty string for the faded info.
+ var fadeInfo string
+
+ // Loop over the string length to create a gradient effect.
+ for i := 0; i < len(str); i++ {
+ // Append each character of the string with a faded color to the info string.
+ fadeInfo += startColor.Fade(0, float32(len(str)), float32(i), firstPoint).Sprint(strs[i])
+ }
+
+ // Print the info string with gradient effect.
+ pterm.Info.Println(fadeInfo)
+
+ // Get the terminal height.
+ terminalHeight := pterm.GetTerminalHeight()
+
+ // Loop over the terminal height to print "Hello, World!" with a gradient effect.
+ for i := 0; i < terminalHeight-2; i++ {
+ // Print the string with a color that fades from startColor to endColor.
+ startColor.Fade(0, float32(terminalHeight-2), float32(i), firstPoint, secondPoint, thirdPoint, endColor).Println("Hello, World!")
+ }
+}
+
+```
+
+
+
+### coloring/override-default-printers
+
+
+
+
+
+SHOW SOURCE
+
+```go
+package main
+
+import "github.com/pterm/pterm"
+
+func main() {
+ // Print a default error message with PTerm's built-in Error style.
+ pterm.Error.Println("This is the default Error")
+
+ // Override the default error prefix with a new text and style.
+ pterm.Error.Prefix = pterm.Prefix{Text: "OVERRIDE", Style: pterm.NewStyle(pterm.BgCyan, pterm.FgRed)}
+
+ // Print the error message again, this time with the overridden prefix.
+ pterm.Error.Println("This is the default Error after the prefix was overridden")
+}
+
+```
+
+
+
+### coloring/print-color-rgb
+
+
+
+
+
+SHOW SOURCE
+
+```go
+package main
+
+import "github.com/pterm/pterm"
+
+func main() {
+ // Create a new RGB color with values 178, 44, 199.
+ // This color will be used for the text.
+ pterm.NewRGB(178, 44, 199).Println("This text is printed with a custom RGB!")
+
+ // Create a new RGB color with values 15, 199, 209.
+ // This color will be used for the text.
+ pterm.NewRGB(15, 199, 209).Println("This text is printed with a custom RGB!")
+
+ // Create a new RGB color with values 201, 144, 30.
+ // This color will be used for the background.
+ // The 'true' argument indicates that the color is for the background.
+ pterm.NewRGB(201, 144, 30, true).Println("This text is printed with a custom RGB background!")
+}
+
+```
+
+
+
+### coloring/print-color-rgb-style
+
+
+
+
+
+SHOW SOURCE
+
+```go
+package main
+
+import (
+ "github.com/pterm/pterm"
+)
+
+func main() {
+ // Define RGB colors for foreground and background.
+ foregroundRGB := pterm.RGB{R: 187, G: 80, B: 0}
+ backgroundRGB := pterm.RGB{R: 0, G: 50, B: 123}
+
+ // Create a new RGB style with the defined foreground and background colors.
+ rgbStyle := pterm.NewRGBStyle(foregroundRGB, backgroundRGB)
+
+ // Print a string with the custom RGB style.
+ rgbStyle.Println("This text is not styled.")
+
+ // Add the 'Bold' option to the RGB style and print a string with this style.
+ rgbStyle.AddOptions(pterm.Bold).Println("This text is bold.")
+
+ // Add the 'Italic' option to the RGB style and print a string with this style.
+ rgbStyle.AddOptions(pterm.Italic).Println("This text is italic.")
+}
+
+```
+
+
+
+### demo/demo
+
+
+
+
+
+SHOW SOURCE
+
+```go
+package main
+
+import (
+ "flag"
+ "math/rand"
+ "reflect"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/pterm/pterm"
+ "github.com/pterm/pterm/putils"
+)
+
+// Speed the demo up, by setting this flag.
+// Usefull for debugging.
+// Example:
+//
+// go run main.go -speedup
+var speedup = flag.Bool("speedup", false, "Speed up the demo")
+var skipIntro = flag.Bool("skip-intro", false, "Skips the intro")
+var second = time.Second
+
+var pseudoProgramList = strings.Split("pseudo-excel pseudo-photoshop pseudo-chrome pseudo-outlook pseudo-explorer "+
+ "pseudo-git pseudo-vsc pseudo-intellij pseudo-minecraft pseudo-scoop pseudo-chocolatey", " ")
+
+func main() {
+ setup() // Setup the demo (flags etc.)
+
+ // Show intro
+ if !*skipIntro {
+ introScreen()
+ clear()
+ }
+
+ showcase("Structured Logging", 5, func() {
+ logger := pterm.DefaultLogger.
+ WithLevel(pterm.LogLevelTrace)
+
+ logger.Trace("Doing not so important stuff", logger.Args("priority", "super low"))
+
+ time.Sleep(time.Second * 3)
+
+ interstingStuff := map[string]any{
+ "when were crayons invented": "1903",
+ "what is the meaning of life": 42,
+ "is this interesting": true,
+ }
+ logger.Debug("This might be interesting", logger.ArgsFromMap(interstingStuff))
+ time.Sleep(time.Second * 3)
+
+ logger.Info("That was actually interesting", logger.Args("such", "wow"))
+ time.Sleep(time.Second * 3)
+ logger.Warn("Oh no, I see an error coming to us!", logger.Args("speed", 88, "measures", "mph"))
+ time.Sleep(time.Second * 3)
+ logger.Error("Damn, here it is!", logger.Args("error", "something went wrong"))
+ time.Sleep(time.Second * 3)
+ logger.Info("But what's really cool is, that you can print very long logs, and PTerm will automatically wrap them for you! Say goodbye to text, that has weird line breaks!", logger.Args("very", "long"))
+ })
+
+ showcase("Progress bar", 2, func() {
+ pb, _ := pterm.DefaultProgressbar.WithTotal(len(pseudoProgramList)).WithTitle("Installing stuff").Start()
+ for i := 0; i < pb.Total; i++ {
+ pb.UpdateTitle("Installing " + pseudoProgramList[i])
+ if pseudoProgramList[i] == "pseudo-minecraft" {
+ pterm.Warning.Println("Could not install pseudo-minecraft\nThe company policy forbids games.")
+ } else {
+ pterm.Success.Println("Installing " + pseudoProgramList[i])
+ }
+ pb.Increment()
+ time.Sleep(second / 2)
+ }
+ pb.Stop()
+ })
+
+ showcase("Spinner", 2, func() {
+ list := pseudoProgramList[7:]
+ spinner, _ := pterm.DefaultSpinner.Start("Installing stuff")
+ for i := 0; i < len(list); i++ {
+ spinner.UpdateText("Installing " + list[i])
+ if list[i] == "pseudo-minecraft" {
+ pterm.Warning.Println("Could not install pseudo-minecraft\nThe company policy forbids games.")
+ } else {
+ pterm.Success.Println("Installing " + list[i])
+ }
+ time.Sleep(second)
+ }
+ spinner.Success()
+ })
+
+ showcase("Live Output", 2, func() {
+ pterm.Info.Println("You can use an Area to display changing output:")
+ pterm.Println()
+ area, _ := pterm.DefaultArea.WithCenter().Start() // Start the Area printer, with the Center option.
+ for i := 0; i < 10; i++ {
+ str, _ := pterm.DefaultBigText.WithLetters(putils.LettersFromString(time.Now().Format("15:04:05"))).Srender() // Save current time in str.
+ area.Update(str) // Update Area contents.
+ time.Sleep(time.Second)
+ }
+ area.Stop()
+ })
+
+ showcase("Tables", 4, func() {
+ for i := 0; i < 3; i++ {
+ pterm.Println()
+ }
+ td := [][]string{
+ {"Library", "Description"},
+ {"PTerm", "Make beautiful CLIs"},
+ {"Testza", "Programmer friendly test framework"},
+ {"Cursor", "Move the cursor around the terminal"},
+ }
+ table, _ := pterm.DefaultTable.WithHasHeader().WithData(td).Srender()
+ boxedTable, _ := pterm.DefaultTable.WithHasHeader().WithData(td).WithBoxed().Srender()
+ pterm.DefaultCenter.Println(table)
+ pterm.DefaultCenter.Println(boxedTable)
+ })
+
+ showcase("TrueColor Support", 7, func() {
+ from := pterm.NewRGB(0, 255, 255) // This RGB value is used as the gradients start point.
+ to := pterm.NewRGB(255, 0, 255) // This RGB value is used as the gradients first point.
+
+ str := "If your terminal has TrueColor support, you can use RGB colors!\nYou can even fade them :)\n\nLorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet."
+ strs := strings.Split(str, "")
+ var fadeInfo string // String which will be used to print info.
+ // For loop over the range of the string length.
+ for i := 0; i < len(str); i++ {
+ // Append faded letter to info string.
+ fadeInfo += from.Fade(0, float32(len(str)), float32(i), to).Sprint(strs[i])
+ }
+ pterm.DefaultCenter.WithCenterEachLineSeparately().Println(fadeInfo)
+ })
+
+ showcase("Fully Customizable", 2, func() {
+ for i := 0; i < 4; i++ {
+ pterm.Println()
+ }
+ text := "All printers are fully customizable!"
+ area := pterm.DefaultArea.WithCenter()
+ area.Update(pterm.DefaultBox.Sprintln(text))
+ time.Sleep(second)
+ area.Update(pterm.DefaultBox.WithTopPadding(1).Sprintln(text))
+ time.Sleep(second / 3)
+ area.Update(pterm.DefaultBox.WithTopPadding(1).WithBottomPadding(1).Sprintln(text))
+ time.Sleep(second / 3)
+ area.Update(pterm.DefaultBox.WithTopPadding(1).WithBottomPadding(1).WithLeftPadding(1).Sprintln(text))
+ time.Sleep(second / 3)
+ area.Update(pterm.DefaultBox.WithTopPadding(1).WithBottomPadding(1).WithLeftPadding(1).WithRightPadding(1).Sprintln(text))
+ time.Sleep(second / 3)
+ area.Update(pterm.DefaultBox.WithTopPadding(1).WithBottomPadding(1).WithLeftPadding(1).WithRightPadding(1).WithTitle("Some title!").WithTitleTopLeft().Sprintln(text))
+ time.Sleep(second / 3)
+ area.Update(pterm.DefaultBox.WithTopPadding(1).WithBottomPadding(1).WithLeftPadding(1).WithRightPadding(1).WithTitle("Some title!").WithTitleTopCenter().Sprintln(text))
+ time.Sleep(second / 3)
+ area.Update(pterm.DefaultBox.WithTopPadding(1).WithBottomPadding(1).WithLeftPadding(1).WithRightPadding(1).WithTitle("Some title!").WithTitleTopRight().Sprintln(text))
+ time.Sleep(second / 3)
+ area.Update(pterm.DefaultBox.WithTopPadding(1).WithBottomPadding(1).WithLeftPadding(1).WithRightPadding(1).WithTitle("Some title!").WithTitleBottomRight().Sprintln(text))
+ time.Sleep(second / 3)
+ area.Update(pterm.DefaultBox.WithTopPadding(1).WithBottomPadding(1).WithLeftPadding(1).WithRightPadding(1).WithTitle("Some title!").WithTitleBottomCenter().Sprintln(text))
+ time.Sleep(second / 3)
+ area.Update(pterm.DefaultBox.WithTopPadding(1).WithBottomPadding(1).WithLeftPadding(1).WithRightPadding(1).WithTitle("Some title!").WithTitleBottomLeft().Sprintln(text))
+ time.Sleep(second / 3)
+ area.Update(pterm.DefaultBox.WithTopPadding(1).WithBottomPadding(1).WithLeftPadding(1).WithRightPadding(1).WithBoxStyle(pterm.NewStyle(pterm.FgCyan)).Sprintln(text))
+ time.Sleep(second / 5)
+ area.Update(pterm.DefaultBox.WithTopPadding(1).WithBottomPadding(1).WithLeftPadding(1).WithRightPadding(1).WithBoxStyle(pterm.NewStyle(pterm.FgRed)).Sprintln(text))
+ time.Sleep(second / 5)
+ area.Update(pterm.DefaultBox.WithTopPadding(1).WithBottomPadding(1).WithLeftPadding(1).WithRightPadding(1).WithBoxStyle(pterm.NewStyle(pterm.FgGreen)).Sprintln(text))
+ time.Sleep(second / 5)
+ area.Update(pterm.DefaultBox.WithTopPadding(1).
+ WithBottomPadding(1).
+ WithLeftPadding(1).
+ WithRightPadding(1).
+ WithHorizontalString("═").
+ WithVerticalString("║").
+ WithBottomLeftCornerString("╗").
+ WithBottomRightCornerString("╔").
+ WithTopLeftCornerString("╝").
+ WithTopRightCornerString("╚").
+ Sprintln(text))
+ area.Stop()
+ })
+
+ showcase("Themes", 2, func() {
+ pterm.Info.Println("You can change the color theme of PTerm easily to fit your needs!\nThis is the default one:")
+ time.Sleep(second / 2)
+ // Print every value of the default theme with its own style.
+ v := reflect.ValueOf(pterm.ThemeDefault)
+ typeOfS := v.Type()
+
+ if typeOfS == reflect.TypeOf(pterm.Theme{}) {
+ for i := 0; i < v.NumField(); i++ {
+ field, ok := v.Field(i).Interface().(pterm.Style)
+ if ok {
+ field.Println(typeOfS.Field(i).Name)
+ }
+ time.Sleep(time.Millisecond * 250)
+ }
+ }
+ })
+
+ showcase("And much more!", 3, func() {
+ for i := 0; i < 4; i++ {
+ pterm.Println()
+ }
+ box := pterm.DefaultBox.
+ WithBottomPadding(1).
+ WithTopPadding(1).
+ WithLeftPadding(3).
+ WithRightPadding(3).
+ Sprintf("Have fun exploring %s!", pterm.Cyan("PTerm"))
+ pterm.DefaultCenter.Println(box)
+ })
+}
+
+func setup() {
+ flag.Parse()
+ if *speedup {
+ second = time.Millisecond * 200
+ }
+}
+
+func introScreen() {
+ ptermLogo, _ := pterm.DefaultBigText.WithLetters(
+ putils.LettersFromStringWithStyle("P", pterm.NewStyle(pterm.FgLightCyan)),
+ putils.LettersFromStringWithStyle("Term", pterm.NewStyle(pterm.FgLightMagenta))).
+ Srender()
+
+ pterm.DefaultCenter.Print(ptermLogo)
+
+ pterm.DefaultCenter.Print(pterm.DefaultHeader.WithFullWidth().WithBackgroundStyle(pterm.NewStyle(pterm.BgLightBlue)).WithMargin(10).Sprint("PTDP - PTerm Demo Program"))
+
+ pterm.Info.Println("This animation was generated with the latest version of PTerm!" +
+ "\nPTerm works on nearly every terminal and operating system." +
+ "\nIt's super easy to use!" +
+ "\nIf you want, you can customize everything :)" +
+ "\nYou can see the code of this demo in the " + pterm.LightMagenta("./_examples/demo") + " directory." +
+ "\n" +
+ "\nThis demo was updated at: " + pterm.Green(time.Now().Format("02 Jan 2006 - 15:04:05 MST")))
+ pterm.Println()
+ introSpinner, _ := pterm.DefaultSpinner.WithShowTimer(false).WithRemoveWhenDone(true).Start("Waiting for 15 seconds...")
+ time.Sleep(second)
+ for i := 14; i > 0; i-- {
+ if i > 1 {
+ introSpinner.UpdateText("Waiting for " + strconv.Itoa(i) + " seconds...")
+ } else {
+ introSpinner.UpdateText("Waiting for " + strconv.Itoa(i) + " second...")
+ }
+ time.Sleep(second)
+ }
+ introSpinner.Stop()
+}
+
+func clear() {
+ print("\033[H\033[2J")
+}
+
+func showcase(title string, seconds int, content func()) {
+ pterm.DefaultHeader.WithBackgroundStyle(pterm.NewStyle(pterm.BgLightBlue)).WithFullWidth().Println(title)
+ pterm.Println()
+ time.Sleep(second / 2)
+ content()
+ time.Sleep(second * time.Duration(seconds))
+ print("\033[H\033[2J")
+}
+
+func randomInt(min, max int) int {
+ rand.Seed(time.Now().UnixNano())
+ return rand.Intn(max-min+1) + min
+}
+
+```
+
+
+
+### header/demo
+
+
+
+
+
+SHOW SOURCE
+
+```go
+package main
+
+import "github.com/pterm/pterm"
+
+func main() {
+ // Print a default header.
+ // This uses the default settings of PTerm to print a header.
+ pterm.DefaultHeader.Println("This is the default header!")
+
+ // Print a spacer line for better readability.
+ pterm.Println()
+
+ // Print a full-width header.
+ // This uses the WithFullWidth() option of PTerm to print a header that spans the full width of the terminal.
+ pterm.DefaultHeader.WithFullWidth().Println("This is a full-width header.")
+}
+
+```
+
+
+
+### header/custom
+
+
+
+
+
+SHOW SOURCE
+
+```go
+package main
+
+import "github.com/pterm/pterm"
+
+func main() {
+ // Customize the DefaultHeader with a cyan background, black text, and a margin of 15.
+ pterm.DefaultHeader.WithMargin(15).WithBackgroundStyle(pterm.NewStyle(pterm.BgCyan)).WithTextStyle(pterm.NewStyle(pterm.FgBlack)).Println("This is a custom header!")
+
+ // Define a new HeaderPrinter with a red background, black text, and a margin of 20.
+ newHeader := pterm.HeaderPrinter{
+ TextStyle: pterm.NewStyle(pterm.FgBlack),
+ BackgroundStyle: pterm.NewStyle(pterm.BgRed),
+ Margin: 20,
+ }
+
+ // Print the custom header using the new HeaderPrinter.
+ newHeader.Println("This is a custom header!")
+}
+
+```
+
+
+
+### heatmap/demo
+
+
+
+
+
+SHOW SOURCE
+
+```go
+package main
+
+import (
+ "github.com/pterm/pterm"
+)
+
+func main() {
+ // Define the data for the heatmap. Each sub-array represents a row in the heatmap.
+ data := [][]float32{
+ {0.9, 0.2, -0.7, 0.4, -0.5, 0.6, -0.3, 0.8, -0.1, -1.0, 0.1, -0.8, 0.3},
+ {0.2, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.9, -0.9, -0.7, -0.5, -0.3},
+ {0.4, 0.4, -0.3, -1.0, 0.3, -0.2, -0.9, 0.5, -0.3, -1.0, 0.6, -0.2, -0.9},
+ {0.9, -0.5, -0.1, 0.3, 1, -0.7, -0.3, 0.1, 0.7, -0.9, -0.5, 0.2, 0.6},
+ {0.5, 0.6, 0.1, -0.2, -0.7, 0.8, 0.6, 0.1, -0.5, -0.7, 0.7, 0.3, 0.0},
+ }
+
+ // Define the labels for the X and Y axes of the heatmap.
+ headerData := pterm.HeatmapAxis{
+ XAxis: []string{"a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m"},
+ YAxis: []string{"1", "2", "3", "4", "5"},
+ }
+
+ // Create a heatmap with the defined data and axis labels, and enable RGB colors.
+ // Then render the heatmap.
+ pterm.DefaultHeatmap.WithAxisData(headerData).WithData(data).WithEnableRGB().Render()
+}
+
+```
+
+
+
+### heatmap/custom_colors
+
+
+
+
+
+SHOW SOURCE
+
+```go
+package main
+
+import (
+ "github.com/pterm/pterm"
+)
+
+func main() {
+ // Define the data for the heatmap
+ data := [][]float32{
+ {0.9, 0.2, -0.7, 0.4, -0.5, 0.6, -0.3, 0.8, -0.1, -1.0, 0.1, -0.8, 0.3},
+ {0.2, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.9, -0.9, -0.7, -0.5, -0.3},
+ {0.4, 0.4, -0.3, -1.0, 0.3, -0.2, -0.9, 0.5, -0.3, -1.0, 0.6, -0.2, -0.9},
+ {0.9, -0.5, -0.1, 0.3, 1, -0.7, -0.3, 0.1, 0.7, -0.9, -0.5, 0.2, 0.6},
+ {0.5, 0.6, 0.1, -0.2, -0.7, 0.8, 0.6, 0.1, -0.5, -0.7, 0.7, 0.3, 0.0},
+ }
+
+ // Define the axis labels for the heatmap
+ headerData := pterm.HeatmapAxis{
+ XAxis: []string{"a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m"},
+ YAxis: []string{"1", "2", "3", "4", "5"},
+ }
+
+ // Print an informational message
+ pterm.Info.Println("The following table has no rgb (supported by every terminal), no axis data and a legend.")
+ pterm.Println()
+
+ // Create the heatmap with the defined data and options, and render it
+ pterm.DefaultHeatmap.
+ WithData(data).
+ WithBoxed(false).
+ WithAxisData(headerData).
+ WithLegend(false).
+ WithColors(pterm.BgBlue, pterm.BgRed, pterm.BgGreen, pterm.BgYellow).
+ WithLegend().
+ Render()
+}
+
+```
+
+
+
+### heatmap/custom_legend
+
+
+
+
+
+SHOW SOURCE
+
+```go
+package main
+
+import (
+ "github.com/pterm/pterm"
+)
+
+func main() {
+ // Define the data for the heatmap
+ data := [][]float32{
+ {0.9, 0.2, -0.7, 0.4, -0.5, 0.6, -0.3, 0.8, -0.1, -1.0, 0.1, -0.8, 0.3},
+ {0.2, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.9, -0.9, -0.7, -0.5, -0.3},
+ {0.4, 0.4, -0.3, -1.0, 0.3, -0.2, -0.9, 0.5, -0.3, -1.0, 0.6, -0.2, -0.9},
+ {0.9, -0.5, -0.1, 0.3, 1, -0.7, -0.3, 0.1, 0.7, -0.9, -0.5, 0.2, 0.6},
+ {0.5, 0.6, 0.1, -0.2, -0.7, 0.8, 0.6, 0.1, -0.5, -0.7, 0.7, 0.3, 0.0},
+ }
+
+ // Define the header data for the heatmap
+ headerData := pterm.HeatmapAxis{
+ XAxis: []string{"a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m"},
+ YAxis: []string{"1", "2", "3", "4", "5"},
+ }
+
+ // Print an informational message
+ pterm.Info.Println("The following table has rgb (not supported by every terminal), axis data and a custom legend.")
+ pterm.Println()
+
+ // Create the heatmap with the defined data and options
+ // Options are chained in a single line for simplicity
+ pterm.DefaultHeatmap.
+ WithData(data).
+ WithBoxed(false).
+ WithAxisData(headerData).
+ WithEnableRGB().
+ WithLegendLabel("custom").
+ WithLegendOnlyColoredCells().
+ Render() // Render the heatmap
+}
+
+```
+
+
+
+### heatmap/custom_rgb
+
+
+
+
+
+SHOW SOURCE
+
+```go
+package main
+
+import (
+ "github.com/pterm/pterm"
+)
+
+func main() {
+ // Define the data for the heatmap.
+ data := [][]float32{
+ {0.9, 0.2, -0.7, 0.4, -0.5, 0.6, -0.3, 0.8, -0.1, -1.0, 0.1, -0.8, 0.3},
+ {0.2, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.9, -0.9, -0.7, -0.5, -0.3},
+ {0.4, 0.4, -0.3, -1.0, 0.3, -0.2, -0.9, 0.5, -0.3, -1.0, 0.6, -0.2, -0.9},
+ {0.9, -0.5, -0.1, 0.3, 1, -0.7, -0.3, 0.1, 0.7, -0.9, -0.5, 0.2, 0.6},
+ {0.5, 0.6, 0.1, -0.2, -0.7, 0.8, 0.6, 0.1, -0.5, -0.7, 0.7, 0.3, 0.0},
+ }
+
+ // Define the axis labels for the heatmap.
+ axisLabels := pterm.HeatmapAxis{
+ XAxis: []string{"a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m"},
+ YAxis: []string{"1", "2", "3", "4", "5"},
+ }
+
+ // Print an informational message.
+ pterm.Info.Println("The following table has rgb (not supported by every terminal), axis data and a legend.")
+ pterm.Println()
+
+ // Define the color range for the heatmap.
+ rgbRange := []pterm.RGB{
+ pterm.NewRGB(0, 0, 255),
+ pterm.NewRGB(255, 0, 0),
+ pterm.NewRGB(0, 255, 0),
+ pterm.NewRGB(255, 255, 0),
+ }
+
+ // Create and render the heatmap.
+ pterm.DefaultHeatmap.
+ WithData(data).
+ WithBoxed(false).
+ WithAxisData(axisLabels).
+ WithEnableRGB().
+ WithRGBRange(rgbRange...).
+ Render()
+}
+
+```
+
+
+
+### heatmap/no_grid
+
+
+
+
+
+SHOW SOURCE
+
+```go
+package main
+
+import (
+ "github.com/pterm/pterm"
+)
+
+func main() {
+ // Define the data for the heatmap.
+ data := [][]float32{
+ {0.9, 0.2, -0.7, 0.4, -0.5, 0.6, -0.3, 0.8, -0.1, -1.0, 0.1, -0.8, 0.3},
+ {0.2, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.9, -0.9, -0.7, -0.5, -0.3},
+ {0.4, 0.4, -0.3, -1.0, 0.3, -0.2, -0.9, 0.5, -0.3, -1.0, 0.6, -0.2, -0.9},
+ {0.9, -0.5, -0.1, 0.3, 1, -0.7, -0.3, 0.1, 0.7, -0.9, -0.5, 0.2, 0.6},
+ {0.5, 0.6, 0.1, -0.2, -0.7, 0.8, 0.6, 0.1, -0.5, -0.7, 0.7, 0.3, 0.0},
+ }
+
+ // Define the axis data for the heatmap.
+ axisData := pterm.HeatmapAxis{
+ XAxis: []string{"a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m"},
+ YAxis: []string{"1", "2", "3", "4", "5"},
+ }
+
+ // Print an informational message.
+ pterm.Info.Println("The following table has rgb (not supported by every terminal), axis data and a legend.")
+ pterm.Println()
+
+ // Create the heatmap with the defined data and options, then render it.
+ pterm.DefaultHeatmap.WithData(data).WithBoxed(false).WithAxisData(axisData).WithEnableRGB().WithLegend().WithGrid(false).Render()
+}
+
+```
+
+
+
+### heatmap/separated
+
+
+
+
+
+SHOW SOURCE
+
+```go
+package main
+
+import "github.com/pterm/pterm"
+
+func main() {
+ // Define the data for the heatmap.
+ data := [][]float32{
+ {0.9, 0.2, -0.7, 0.4, -0.5, 0.6, -0.3, 0.8, -0.1, -1.0, 0.1, -0.8, 0.3},
+ {0.2, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.9, -0.9, -0.7, -0.5, -0.3},
+ {0.4, 0.4, -0.3, -1.0, 0.3, -0.2, -0.9, 0.5, -0.3, -1.0, 0.6, -0.2, -0.9},
+ {0.9, -0.5, -0.1, 0.3, 1, -0.7, -0.3, 0.1, 0.7, -0.9, -0.5, 0.2, 0.6},
+ {0.5, 0.6, 0.1, -0.2, -0.7, 0.8, 0.6, 0.1, -0.5, -0.7, 0.7, 0.3, 0.0},
+ }
+
+ // Define the axis labels for the heatmap.
+ headerData := pterm.HeatmapAxis{
+ XAxis: []string{"a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m"},
+ YAxis: []string{"1", "2", "3", "4", "5"},
+ }
+
+ // Print an informational message.
+ pterm.Info.Println("The following table has no rgb (supported by every terminal), no axis data and no legend.")
+ pterm.Println()
+
+ // Create the heatmap with the specified data and options, and render it.
+ pterm.DefaultHeatmap.WithData(data).WithBoxed(false).WithAxisData(headerData).WithLegend(false).Render()
+}
+
+```
+
+
+
+### interactive_confirm/demo
+
+
+
+
+
+SHOW SOURCE
+
+```go
+package main
+
+import (
+ "github.com/pterm/pterm"
+)
+
+func main() {
+ // Show an interactive confirmation dialog and get the result.
+ result, _ := pterm.DefaultInteractiveConfirm.Show()
+
+ // Print a blank line for better readability.
+ pterm.Println()
+
+ // Print the user's answer in a formatted way.
+ pterm.Info.Printfln("You answered: %s", boolToText(result))
+}
+
+// boolToText converts a boolean value to a colored text.
+// If the value is true, it returns a green "Yes".
+// If the value is false, it returns a red "No".
+func boolToText(b bool) string {
+ if b {
+ return pterm.Green("Yes")
+ }
+ return pterm.Red("No")
+}
+
+```
+
+
+
+### interactive_continue/demo
+
+
+
+
+
+SHOW SOURCE
+
+```go
+package main
+
+import (
+ "github.com/pterm/pterm"
+)
+
+func main() {
+ // Create an interactive continue prompt with default settings
+ // This will pause the program execution until the user presses enter
+ // The message displayed is "Press 'Enter' to continue..."
+ prompt := pterm.DefaultInteractiveContinue
+
+ // Show the prompt and wait for user input
+ // The returned result is the user's input (should be empty as it's a continue prompt)
+ // The second return value is an error which is ignored here
+ result, _ := prompt.Show()
+
+ // Print a blank line for better readability
+ pterm.Println()
+
+ // Print the user's input with an info prefix
+ // As this is a continue prompt, the input should be empty
+ pterm.Info.Printfln("You answered: %s", result)
+}
+
+```
+
+
+
+### interactive_multiselect/demo
+
+
+
+
+
+SHOW SOURCE
+
+```go
+package main
+
+import (
+ "fmt"
+ "github.com/pterm/pterm"
+)
+
+func main() {
+ // Initialize an empty slice to hold the options.
+ var options []string
+
+ // Populate the options slice with 100 options.
+ for i := 0; i < 100; i++ {
+ options = append(options, fmt.Sprintf("Option %d", i))
+ }
+
+ // Add 5 more options to the slice, indicating the availability of fuzzy searching.
+ for i := 0; i < 5; i++ {
+ options = append(options, fmt.Sprintf("You can use fuzzy searching (%d)", i))
+ }
+
+ // Use PTerm's interactive multiselect to present the options to the user and capture their selections.
+ // The Show() method displays the options and waits for user input.
+ selectedOptions, _ := pterm.DefaultInteractiveMultiselect.WithOptions(options).Show()
+
+ // Print the selected options, highlighted in green.
+ pterm.Info.Printfln("Selected options: %s", pterm.Green(selectedOptions))
+}
+
+```
+
+
+
+### interactive_multiselect/custom-checkmarks
+
+
+
+
+
+SHOW SOURCE
+
+```go
+package main
+
+import (
+ "fmt"
+ "github.com/pterm/pterm"
+)
+
+func main() {
+ // Initialize an empty slice to hold the options
+ var options []string
+
+ // Populate the options slice with 5 options
+ for i := 0; i < 5; i++ {
+ options = append(options, fmt.Sprintf("Option %d", i))
+ }
+
+ // Create a new interactive multiselect printer with the options
+ // Disable the filter and define the checkmark symbols
+ printer := pterm.DefaultInteractiveMultiselect.
+ WithOptions(options).
+ WithFilter(false).
+ WithCheckmark(&pterm.Checkmark{Checked: pterm.Green("+"), Unchecked: pterm.Red("-")})
+
+ // Show the interactive multiselect and get the selected options
+ selectedOptions, _ := printer.Show()
+
+ // Print the selected options
+ pterm.Info.Printfln("Selected options: %s", pterm.Green(selectedOptions))
+}
+
+```
+
+
+
+### interactive_multiselect/custom-keys
+
+
+
+
+
+SHOW SOURCE
+
+```go
+package main
+
+import (
+ "atomicgo.dev/keyboard/keys"
+ "fmt"
+ "github.com/pterm/pterm"
+)
+
+func main() {
+ // Initialize an empty slice to hold the options
+ var options []string
+
+ // Populate the options slice with 5 options
+ for i := 0; i < 5; i++ {
+ options = append(options, fmt.Sprintf("Option %d", i))
+ }
+
+ // Create a new interactive multiselect printer with the options
+ // Disable the filter and set the keys for confirming and selecting options
+ printer := pterm.DefaultInteractiveMultiselect.
+ WithOptions(options).
+ WithFilter(false).
+ WithKeyConfirm(keys.Enter).
+ WithKeySelect(keys.Space)
+
+ // Show the interactive multiselect and get the selected options
+ selectedOptions, _ := printer.Show()
+
+ // Print the selected options
+ pterm.Info.Printfln("Selected options: %s", pterm.Green(selectedOptions))
+}
+
+```
+
+
+
+### interactive_select/demo
+
+
+
+
+
+SHOW SOURCE
+
+```go
+package main
+
+import (
+ "fmt"
+ "github.com/pterm/pterm"
+)
+
+func main() {
+ // Initialize an empty slice to hold the options
+ var options []string
+
+ // Generate 100 options and add them to the options slice
+ for i := 0; i < 100; i++ {
+ options = append(options, fmt.Sprintf("Option %d", i))
+ }
+
+ // Generate 5 additional options with a specific message and add them to the options slice
+ for i := 0; i < 5; i++ {
+ options = append(options, fmt.Sprintf("You can use fuzzy searching (%d)", i))
+ }
+
+ // Use PTerm's interactive select feature to present the options to the user and capture their selection
+ // The Show() method displays the options and waits for the user's input
+ selectedOption, _ := pterm.DefaultInteractiveSelect.WithOptions(options).Show()
+
+ // Display the selected option to the user with a green color for emphasis
+ pterm.Info.Printfln("Selected option: %s", pterm.Green(selectedOption))
+}
+
+```
+
+
+
+### interactive_textinput/demo
+
+
+
+
+
+SHOW SOURCE
+
+```go
+package main
+
+import (
+ "github.com/pterm/pterm"
+)
+
+func main() {
+ // Create an interactive text input with single line input mode and show it
+ result, _ := pterm.DefaultInteractiveTextInput.Show()
+
+ // Print a blank line for better readability
+ pterm.Println()
+
+ // Print the user's answer with an info prefix
+ pterm.Info.Printfln("You answered: %s", result)
+}
+
+```
+
+
+
+### interactive_textinput/default-value
+
+
+
+
+
+SHOW SOURCE
+
+```go
+package main
+
+import (
+ "github.com/pterm/pterm"
+)
+
+func main() {
+ // Create an interactive text input with single line input mode and show it
+ result, _ := pterm.DefaultInteractiveTextInput.WithDefaultValue("Some default value").Show()
+
+ // Print a blank line for better readability
+ pterm.Println()
+
+ // Print the user's answer with an info prefix
+ pterm.Info.Printfln("You answered: %s", result)
+}
+
+```
+
+
+
+### interactive_textinput/multi-line
+
+
+
+
+
+SHOW SOURCE
+
+```go
+package main
+
+import (
+ "github.com/pterm/pterm"
+)
+
+func main() {
+ // Create a default interactive text input with multi-line enabled.
+ // This allows the user to input multiple lines of text.
+ textInput := pterm.DefaultInteractiveTextInput.WithMultiLine()
+
+ // Show the text input to the user and store the result.
+ // The second return value (an error) is ignored with '_'.
+ result, _ := textInput.Show()
+
+ // Print a blank line for better readability in the output.
+ pterm.Println()
+
+ // Print the user's input prefixed with an informational message.
+ // The '%s' placeholder is replaced with the user's input.
+ pterm.Info.Printfln("You answered: %s", result)
+}
+
+```
+
+
+
+### interactive_textinput/password
+
+
+
+
+
+SHOW SOURCE
+
+```go
+package main
+
+import "github.com/pterm/pterm"
+
+func main() {
+ // Create an interactive text input with a mask for password input
+ passwordInput := pterm.DefaultInteractiveTextInput.WithMask("*")
+
+ // Show the password input prompt and store the result
+ result, _ := passwordInput.Show("Enter your password")
+
+ // Get the default logger from PTerm
+ logger := pterm.DefaultLogger
+
+ // Log the received password (masked)
+ // Note: In a real-world application, you should never log passwords
+ logger.Info("Password received", logger.Args("password", result))
+}
+
+```
+
+
+
+### logger/demo
+
+
+
+
+
+SHOW SOURCE
+
+```go
+package main
+
+import (
+ "github.com/pterm/pterm"
+ "time"
+)
+
+func main() {
+ // Create a logger with trace level
+ logger := pterm.DefaultLogger.WithLevel(pterm.LogLevelTrace)
+
+ // Log a trace level message
+ logger.Trace("Doing not so important stuff", logger.Args("priority", "super low"))
+
+ // Pause for 3 seconds
+ sleep()
+
+ // Define a map with interesting stuff
+ interstingStuff := map[string]any{
+ "when were crayons invented": "1903",
+ "what is the meaning of life": 42,
+ "is this interesting": true,
+ }
+
+ // Log a debug level message with arguments from the map
+ logger.Debug("This might be interesting", logger.ArgsFromMap(interstingStuff))
+
+ // Pause for 3 seconds
+ sleep()
+
+ // Log an info level message
+ logger.Info("That was actually interesting", logger.Args("such", "wow"))
+
+ // Pause for 3 seconds
+ sleep()
+
+ // Log a warning level message
+ logger.Warn("Oh no, I see an error coming to us!", logger.Args("speed", 88, "measures", "mph"))
+
+ // Pause for 3 seconds
+ sleep()
+
+ // Log an error level message
+ logger.Error("Damn, here it is!", logger.Args("error", "something went wrong"))
+
+ // Pause for 3 seconds
+ sleep()
+
+ // Log an info level message with a long text that will be automatically wrapped
+ logger.Info("But what's really cool is, that you can print very long logs, and PTerm will automatically wrap them for you! Say goodbye to text, that has weird line breaks!", logger.Args("very", "long"))
+
+ // Pause for 3 seconds
+ sleep()
+
+ // Log a fatal level message
+ logger.Fatal("Oh no, this process is getting killed!", logger.Args("fatal", true))
+}
+
+// Function to pause the execution for 3 seconds
+func sleep() {
+ time.Sleep(time.Second * 3)
+}
+
+```
+
+
+
+### logger/custom-key-styles
+
+
+
+
+
+SHOW SOURCE
+
+```go
+package main
+
+import "github.com/pterm/pterm"
+
+func main() {
+ // Create a logger with a level of Trace or higher.
+ logger := pterm.DefaultLogger.WithLevel(pterm.LogLevelTrace)
+
+ // Define a new style for the "priority" key.
+ priorityStyle := map[string]pterm.Style{
+ "priority": *pterm.NewStyle(pterm.FgRed),
+ }
+
+ // Overwrite all key styles with the new map.
+ logger = logger.WithKeyStyles(priorityStyle)
+
+ // Log an info message. The "priority" key will be displayed in red.
+ logger.Info("The priority key should now be red", logger.Args("priority", "low", "foo", "bar"))
+
+ // Define a new style for the "foo" key.
+ fooStyle := *pterm.NewStyle(pterm.FgBlue)
+
+ // Append the new style to the existing ones.
+ logger.AppendKeyStyle("foo", fooStyle)
+
+ // Log another info message. The "foo" key will be displayed in blue.
+ logger.Info("The foo key should now be blue", logger.Args("priority", "low", "foo", "bar"))
+}
+
+```
+
+
+
+### logger/default
+
+
+
+
+
+SHOW SOURCE
+
+```go
+package main
+
+import (
+ "github.com/pterm/pterm"
+ "time"
+)
+
+func main() {
+ // Create a logger with a level of Trace or higher.
+ logger := pterm.DefaultLogger.WithLevel(pterm.LogLevelTrace)
+
+ // Log a trace message with additional arguments.
+ logger.Trace("Doing not so important stuff", logger.Args("priority", "super low"))
+
+ // Create a map of interesting stuff.
+ interstingStuff := map[string]any{
+ "when were crayons invented": "1903",
+ "what is the meaning of life": 42,
+ "is this interesting": true,
+ }
+
+ // Log a debug message with arguments from a map.
+ logger.Debug("This might be interesting", logger.ArgsFromMap(interstingStuff))
+
+ // Log an info message with additional arguments.
+ logger.Info("That was actually interesting", logger.Args("such", "wow"))
+
+ // Log a warning message with additional arguments.
+ logger.Warn("Oh no, I see an error coming to us!", logger.Args("speed", 88, "measures", "mph"))
+
+ // Log an error message with additional arguments.
+ logger.Error("Damn, here it is!", logger.Args("error", "something went wrong"))
+
+ // Log an info message with additional arguments. PTerm will automatically wrap long logs.
+ logger.Info("But what's really cool is, that you can print very long logs, and PTerm will automatically wrap them for you! Say goodbye to text, that has weird line breaks!", logger.Args("very", "long"))
+
+ // Pause for 2 seconds.
+ time.Sleep(time.Second * 2)
+
+ // Log a fatal message with additional arguments. This will terminate the process.
+ logger.Fatal("Oh no, this process is getting killed!", logger.Args("fatal", true))
+}
+
+```
+
+
+
+### logger/json
+
+
+
+
+
+SHOW SOURCE
+
+```go
+package main
+
+import "github.com/pterm/pterm"
+
+func main() {
+ // Create a logger with Trace level and JSON formatter
+ logger := pterm.DefaultLogger.WithLevel(pterm.LogLevelTrace).WithFormatter(pterm.LogFormatterJSON)
+
+ // Log a Trace level message with additional arguments
+ logger.Trace("Doing not so important stuff", logger.Args("priority", "super low"))
+
+ // Create a map of interesting stuff
+ interestingStuff := map[string]any{
+ "when were crayons invented": "1903",
+ "what is the meaning of life": 42,
+ "is this interesting": true,
+ }
+
+ // Log a Debug level message with arguments from the map
+ logger.Debug("This might be interesting", logger.ArgsFromMap(interestingStuff))
+
+ // Log Info, Warn, Error, and Fatal level messages with additional arguments
+ logger.Info("That was actually interesting", logger.Args("such", "wow"))
+ logger.Warn("Oh no, I see an error coming to us!", logger.Args("speed", 88, "measures", "mph"))
+ logger.Error("Damn, here it is!", logger.Args("error", "something went wrong"))
+ logger.Info("But what's really cool is, that you can print very long logs, and PTerm will automatically wrap them for you! Say goodbye to text, that has weird line breaks!", logger.Args("very", "long"))
+ logger.Fatal("Oh no, this process is getting killed!", logger.Args("fatal", true))
+}
+
+```
+
+
+
+### logger/with-caller
+
+
+
+
+
+SHOW SOURCE
+
+```go
+package main
+
+import "github.com/pterm/pterm"
+
+func main() {
+ // Create a logger with Trace level and caller information
+ logger := pterm.DefaultLogger.WithLevel(pterm.LogLevelTrace).WithCaller()
+
+ // Log a trace message with additional arguments
+ logger.Trace("Doing not so important stuff", logger.Args("priority", "super low"))
+
+ // Create a map of interesting stuff
+ interestingStuff := map[string]any{
+ "when were crayons invented": "1903",
+ "what is the meaning of life": 42,
+ "is this interesting": true,
+ }
+
+ // Log a debug message with arguments from a map
+ logger.Debug("This might be interesting", logger.ArgsFromMap(interestingStuff))
+
+ // Log an info message with additional arguments
+ logger.Info("That was actually interesting", logger.Args("such", "wow"))
+
+ // Log a warning message with additional arguments
+ logger.Warn("Oh no, I see an error coming to us!", logger.Args("speed", 88, "measures", "mph"))
+
+ // Log an error message with additional arguments
+ logger.Error("Damn, here it is!", logger.Args("error", "something went wrong"))
+
+ // Log an info message with additional arguments. PTerm will automatically wrap long logs.
+ logger.Info("But what's really cool is, that you can print very long logs, and PTerm will automatically wrap them for you! Say goodbye to text, that has weird line breaks!", logger.Args("very", "long"))
+
+ // Log a fatal message with additional arguments. This will terminate the process.
+ logger.Fatal("Oh no, this process is getting killed!", logger.Args("fatal", true))
+}
+
+```
+
+
+
+### multiple-live-printers/demo
+
+
+
+
+
+SHOW SOURCE
+
+```go
+package main
+
+import (
+ "time"
+
+ "github.com/pterm/pterm"
+)
+
+func main() {
+ // Create a multi printer for managing multiple printers
+ multi := pterm.DefaultMultiPrinter
+
+ // Create two spinners with their own writers
+ spinner1, _ := pterm.DefaultSpinner.WithWriter(multi.NewWriter()).Start("Spinner 1")
+ spinner2, _ := pterm.DefaultSpinner.WithWriter(multi.NewWriter()).Start("Spinner 2")
+
+ // Create five progress bars with their own writers and a total of 100
+ pb1, _ := pterm.DefaultProgressbar.WithTotal(100).WithWriter(multi.NewWriter()).Start("Progressbar 1")
+ pb2, _ := pterm.DefaultProgressbar.WithTotal(100).WithWriter(multi.NewWriter()).Start("Progressbar 2")
+ pb3, _ := pterm.DefaultProgressbar.WithTotal(100).WithWriter(multi.NewWriter()).Start("Progressbar 3")
+ pb4, _ := pterm.DefaultProgressbar.WithTotal(100).WithWriter(multi.NewWriter()).Start("Progressbar 4")
+ pb5, _ := pterm.DefaultProgressbar.WithTotal(100).WithWriter(multi.NewWriter()).Start("Progressbar 5")
+
+ // Start the multi printer
+ multi.Start()
+
+ // Increment progress bars and spinners based on certain conditions
+ for i := 1; i <= 100; i++ {
+ pb1.Increment() // Increment progress bar 1 every iteration
+
+ if i%2 == 0 {
+ pb2.Add(3) // Add 3 to progress bar 2 every even iteration
+ }
+
+ if i%5 == 0 {
+ pb3.Increment() // Increment progress bar 3 every 5th iteration
+ }
+
+ if i%10 == 0 {
+ pb4.Increment() // Increment progress bar 4 every 10th iteration
+ }
+
+ if i%3 == 0 {
+ pb5.Increment() // Increment progress bar 5 every 3rd iteration
+ }
+
+ if i%50 == 0 {
+ spinner1.Success("Spinner 1 is done!") // Mark spinner 1 as successful every 50th iteration
+ }
+
+ if i%60 == 0 {
+ spinner2.Fail("Spinner 2 failed!") // Mark spinner 2 as failed every 60th iteration
+ }
+
+ time.Sleep(time.Millisecond * 50) // Sleep for 50 milliseconds between each iteration
+ }
+
+ // Stop the multi printer
+ multi.Stop()
+}
+
+```
+
+
+
+### panel/demo
+
+
+
+
+
+SHOW SOURCE
+
+```go
+package main
+
+import "github.com/pterm/pterm"
+
+func main() {
+ // Define panels in a 2D grid system
+ panels := pterm.Panels{
+ {
+ {Data: "This is the first panel"},
+ {Data: pterm.DefaultHeader.Sprint("Hello, World!")},
+ {Data: "This\npanel\ncontains\nmultiple\nlines"},
+ },
+ {
+ {Data: pterm.Red("This is another\npanel line")},
+ {Data: "This is the second panel\nwith a new line"},
+ },
+ }
+
+ // Render the panels with a padding of 5
+ _ = pterm.DefaultPanel.WithPanels(panels).WithPadding(5).Render()
+}
+
+```
+
+
+
+### paragraph/demo
+
+
+
+
+
+SHOW SOURCE
+
+```go
+package main
+
+import "github.com/pterm/pterm"
+
+func main() {
+ // Using the default paragraph printer to print a long text.
+ // The text is split at the spaces, which is useful for continuous text of all kinds.
+ // The line width can be manually adjusted if needed.
+ pterm.DefaultParagraph.Println("This is the default paragraph printer. As you can see, no words are separated, " +
+ "but the text is split at the spaces. This is useful for continuous text of all kinds. You can manually change the line width if you want to." +
+ "Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam")
+
+ // Printing a line space for separation.
+ pterm.Println()
+
+ // Printing a long text without using the paragraph printer.
+ // The default Println() function is used here, which does not provide intelligent splitting.
+ pterm.Println("This text is written with the default Println() function. No intelligent splitting here." +
+ "Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam")
+}
+
+```
+
+
+
+### paragraph/customized
+
+
+
+
+
+SHOW SOURCE
+
+```go
+package main
+
+import "github.com/pterm/pterm"
+
+func main() {
+ // Define a long text to be printed as a paragraph.
+ longText := "This is a custom paragraph printer. As you can see, no words are separated, " +
+ "but the text is split at the spaces. This is useful for continuous text of all kinds. You can manually change the line width if you want to." +
+ "Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam"
+
+ // Print the long text as a paragraph with a custom maximal width of 60 characters.
+ pterm.DefaultParagraph.WithMaxWidth(60).Println(longText)
+
+ // Print a line space to separate the paragraph from the following text.
+ pterm.Println()
+
+ // Define another long text to be printed without a paragraph printer.
+ longTextWithoutParagraph := "This text is written with the default Println() function. No intelligent splitting here." +
+ "Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam"
+
+ // Print the long text without using a paragraph printer.
+ pterm.Println(longTextWithoutParagraph)
+}
+
+```
+
+
+
+### prefix/demo
+
+
+
+
+
+SHOW SOURCE
+
+```go
+package main
+
+import "github.com/pterm/pterm"
+
+func main() {
+ // Enable debug messages in PTerm.
+ pterm.EnableDebugMessages()
+
+ // Print a debug message with PTerm.
+ pterm.Debug.Println("Hello, World!")
+
+ // Print an informational message with PTerm.
+ pterm.Info.Println("Hello, World!")
+
+ // Print a success message with PTerm.
+ pterm.Success.Println("Hello, World!")
+
+ // Print a warning message with PTerm.
+ pterm.Warning.Println("Hello, World!")
+
+ // Print an error message with PTerm. This will also display the filename and line number in the terminal.
+ pterm.Error.Println("Errors show the filename and linenumber inside the terminal!")
+
+ // Print an informational message with PTerm, with line number.
+ // This demonstrates that other PrefixPrinters can also display line numbers.
+ pterm.Info.WithShowLineNumber().Println("Other PrefixPrinters can do that too!")
+
+ // Temporarily set Fatal to false, so that the CI won't crash.
+ // This will print a fatal message with PTerm, but won't terminate the program.
+ pterm.Fatal.WithFatal(false).Println("Hello, World!")
+}
+
+```
+
+
+
+### progressbar/demo
+
+
+
+
+
+SHOW SOURCE
+
+```go
+package main
+
+import (
+ "strings"
+ "time"
+
+ "github.com/pterm/pterm"
+)
+
+// Slice of strings representing names of pseudo applications to be downloaded.
+var fakeInstallList = strings.Split("pseudo-excel pseudo-photoshop pseudo-chrome pseudo-outlook pseudo-explorer "+
+ "pseudo-dops pseudo-git pseudo-vsc pseudo-intellij pseudo-minecraft pseudo-scoop pseudo-chocolatey", " ")
+
+func main() {
+ // Create a progressbar with the total steps equal to the number of items in fakeInstallList.
+ // Set the initial title of the progressbar to "Downloading stuff".
+ p, _ := pterm.DefaultProgressbar.WithTotal(len(fakeInstallList)).WithTitle("Downloading stuff").Start()
+
+ // Loop over each item in the fakeInstallList.
+ for i := 0; i < p.Total; i++ {
+ // Simulate a slow download for the 7th item.
+ if i == 6 {
+ time.Sleep(time.Second * 3)
+ }
+
+ // Update the title of the progressbar with the current item being downloaded.
+ p.UpdateTitle("Downloading " + fakeInstallList[i])
+
+ // Print a success message for the current download. This will be printed above the progressbar.
+ pterm.Success.Println("Downloading " + fakeInstallList[i])
+
+ // Increment the progressbar by one to indicate progress.
+ p.Increment()
+
+ // Pause for 350 milliseconds to simulate the time taken for each download.
+ time.Sleep(time.Millisecond * 350)
+ }
+}
+
+```
+
+
+
+### progressbar/multiple
+
+
+
+
+
+SHOW SOURCE
+
+```go
+package main
+
+import (
+ "time"
+
+ "github.com/pterm/pterm"
+)
+
+func main() {
+ // Create a multi printer instance from the default one
+ multi := pterm.DefaultMultiPrinter
+
+ // Create five progress bars with a total of 100 units each, and assign each a new writer from the multi printer
+ pb1, _ := pterm.DefaultProgressbar.WithTotal(100).WithWriter(multi.NewWriter()).Start("Progressbar 1")
+ pb2, _ := pterm.DefaultProgressbar.WithTotal(100).WithWriter(multi.NewWriter()).Start("Progressbar 2")
+ pb3, _ := pterm.DefaultProgressbar.WithTotal(100).WithWriter(multi.NewWriter()).Start("Progressbar 3")
+ pb4, _ := pterm.DefaultProgressbar.WithTotal(100).WithWriter(multi.NewWriter()).Start("Progressbar 4")
+ pb5, _ := pterm.DefaultProgressbar.WithTotal(100).WithWriter(multi.NewWriter()).Start("Progressbar 5")
+
+ // Start the multi printer
+ multi.Start()
+
+ // Loop to increment progress bars based on certain conditions
+ for i := 1; i <= 100; i++ {
+ pb1.Increment() // Increment the first progress bar at each iteration
+
+ if i%2 == 0 {
+ pb2.Add(3) // Add 3 units to the second progress bar at every even iteration
+ }
+
+ if i%5 == 0 {
+ pb3.Increment() // Increment the third progress bar at every fifth iteration
+ }
+
+ if i%10 == 0 {
+ pb4.Increment() // Increment the fourth progress bar at every tenth iteration
+ }
+
+ if i%3 == 0 {
+ pb5.Increment() // Increment the fifth progress bar at every third iteration
+ }
+
+ time.Sleep(time.Millisecond * 50) // Pause for 50 milliseconds at each iteration
+ }
+
+ // Stop the multi printer
+ multi.Stop()
+}
+
+```
+
+
+
+### section/demo
+
+
+
+
+
+SHOW SOURCE
+
+```go
+package main
+
+import "github.com/pterm/pterm"
+
+func main() {
+ // Create a section with level one and print it.
+ pterm.DefaultSection.Println("This is a section!")
+
+ // Print an informational message.
+ pterm.Info.Println("And here is some text.\nThis text could be anything.\nBasically it's just a placeholder")
+
+ // Create a section with level two and print it.
+ pterm.DefaultSection.WithLevel(2).Println("This is another section!")
+
+ // Print another informational message.
+ pterm.Info.Println("And this is\nmore placeholder text")
+}
+
+```
+
+
+
+### slog/demo
+
+
+
+
+
+SHOW SOURCE
+
+```go
+package main
+
+import (
+ "log/slog"
+
+ "github.com/pterm/pterm"
+)
+
+func main() {
+ // Create a new slog handler with the default PTerm logger
+ handler := pterm.NewSlogHandler(&pterm.DefaultLogger)
+
+ // Create a new slog logger with the handler
+ logger := slog.New(handler)
+
+ // Log a debug message (won't show by default)
+ logger.Debug("This is a debug message that won't show")
+
+ // Change the log level to debug to enable debug messages
+ pterm.DefaultLogger.Level = pterm.LogLevelDebug
+
+ // Log a debug message (will show because debug level is enabled)
+ logger.Debug("This is a debug message", "changedLevel", true)
+
+ // Log an info message
+ logger.Info("This is an info message")
+
+ // Log a warning message
+ logger.Warn("This is a warning message")
+
+ // Log an error message
+ logger.Error("This is an error message")
+}
+
+```
+
+
+
+### spinner/demo
+
+
+
+
+
+SHOW SOURCE
+
+```go
+package main
+
+import (
+ "time"
+
+ "github.com/pterm/pterm"
+)
+
+func main() {
+ // Create and start a fork of the default spinner.
+ spinnerInfo, _ := pterm.DefaultSpinner.Start("Some informational action...")
+ time.Sleep(time.Second * 2) // Simulate 3 seconds of processing something.
+ spinnerInfo.Info() // Resolve spinner with information message.
+
+ // Create and start a fork of the default spinner.
+ spinnerSuccess, _ := pterm.DefaultSpinner.Start("Doing something important... (will succeed)")
+ time.Sleep(time.Second * 2) // Simulate 3 seconds of processing something.
+ spinnerSuccess.Success() // Resolve spinner with success message.
+
+ // Create and start a fork of the default spinner.
+ spinnerWarning, _ := pterm.DefaultSpinner.Start("Doing something important... (will warn)")
+ time.Sleep(time.Second * 2) // Simulate 3 seconds of processing something.
+ spinnerWarning.Warning() // Resolve spinner with warning message.
+
+ // Create and start a fork of the default spinner.
+ spinnerFail, _ := pterm.DefaultSpinner.Start("Doing something important... (will fail)")
+ time.Sleep(time.Second * 2) // Simulate 3 seconds of processing something.
+ spinnerFail.Fail() // Resolve spinner with error message.
+
+ // Create and start a fork of the default spinner.
+ spinnerNochange, _ := pterm.DefaultSpinner.Start("Checking something important... (will result in no change)")
+ // Replace the InfoPrinter with a custom "NOCHG" one
+ spinnerNochange.InfoPrinter = &pterm.PrefixPrinter{
+ MessageStyle: &pterm.Style{pterm.FgLightBlue},
+ Prefix: pterm.Prefix{
+ Style: &pterm.Style{pterm.FgBlack, pterm.BgLightBlue},
+ Text: " NOCHG ",
+ },
+ }
+ time.Sleep(time.Second * 2) // Simulate 3 seconds of processing something.
+ spinnerNochange.Info("No change were required") // Resolve spinner with error message.
+
+ // Create and start a fork of the default spinner.
+ spinnerLiveText, _ := pterm.DefaultSpinner.Start("Doing a lot of stuff...")
+ time.Sleep(time.Second) // Simulate 2 seconds of processing something.
+ spinnerLiveText.UpdateText("It's really much") // Update spinner text.
+ time.Sleep(time.Second) // Simulate 2 seconds of processing something.
+ spinnerLiveText.UpdateText("We're nearly done!") // Update spinner text.
+ time.Sleep(time.Second) // Simulate 2 seconds of processing something.
+ spinnerLiveText.Success("Finally!") // Resolve spinner with success message.
+}
+
+```
+
+
+
+### spinner/multiple
+
+
+
+
+
+SHOW SOURCE
+
+```go
+package main
+
+import (
+ "time"
+
+ "github.com/pterm/pterm"
+)
+
+func main() {
+ // Create a multi printer. This allows multiple spinners to print simultaneously.
+ multi := pterm.DefaultMultiPrinter
+
+ // Create and start spinner 1 with a new writer from the multi printer.
+ // The spinner will display the message "Spinner 1".
+ spinner1, _ := pterm.DefaultSpinner.WithWriter(multi.NewWriter()).Start("Spinner 1")
+
+ // Create and start spinner 2 with a new writer from the multi printer.
+ // The spinner will display the message "Spinner 2".
+ spinner2, _ := pterm.DefaultSpinner.WithWriter(multi.NewWriter()).Start("Spinner 2")
+
+ // Create and start spinner 3 with a new writer from the multi printer.
+ // The spinner will display the message "Spinner 3".
+ spinner3, _ := pterm.DefaultSpinner.WithWriter(multi.NewWriter()).Start("Spinner 3")
+
+ // Start the multi printer. This will start printing all the spinners.
+ multi.Start()
+
+ // Wait for 1 second.
+ time.Sleep(time.Millisecond * 1000)
+
+ // Stop spinner 1 with a success message.
+ spinner1.Success("Spinner 1 is done!")
+
+ // Wait for 750 milliseconds.
+ time.Sleep(time.Millisecond * 750)
+
+ // Stop spinner 2 with a failure message.
+ spinner2.Fail("Spinner 2 failed!")
+
+ // Wait for 500 milliseconds.
+ time.Sleep(time.Millisecond * 500)
+
+ // Stop spinner 3 with a warning message.
+ spinner3.Warning("Spinner 3 has a warning!")
+
+ // Stop the multi printer. This will stop printing all the spinners.
+ multi.Stop()
+}
+
+```
+
+
+
+### style/demo
+
+
+
+
+
+SHOW SOURCE
+
+```go
+package main
+
+import "github.com/pterm/pterm"
+
+func main() {
+ // Define a primary style with light cyan foreground, gray background, and bold text
+ primary := pterm.NewStyle(pterm.FgLightCyan, pterm.BgGray, pterm.Bold)
+
+ // Define a secondary style with light green foreground, white background, and italic text
+ secondary := pterm.NewStyle(pterm.FgLightGreen, pterm.BgWhite, pterm.Italic)
+
+ // Print "Hello, World!" with the primary style
+ primary.Println("Hello, World!")
+
+ // Print "Hello, World!" with the secondary style
+ secondary.Println("Hello, World!")
+}
+
+```
+
+
+
+### table/demo
+
+
+
+
+
+SHOW SOURCE
+
+```go
+package main
+
+import "github.com/pterm/pterm"
+
+func main() {
+ // Define the data for the first table
+ tableData1 := pterm.TableData{
+ {"Firstname", "Lastname", "Email", "Note"},
+ {"Paul", "Dean", "augue@velitAliquam.co.uk", ""},
+ {"Callie", "Mckay", "nunc.sed@est.com", "这是一个测试, haha!"},
+ {"Libby", "Camacho", "lobortis@semper.com", "just a test, hey!"},
+ {"张", "小宝", "zhang@example.com", ""},
+ }
+
+ // Create a table with a header and the defined data, then render it
+ pterm.DefaultTable.WithHasHeader().WithData(tableData1).Render()
+
+ pterm.Println() // Blank line
+
+ // Define the data for the second table
+ tableData2 := pterm.TableData{
+ {"Firstname", "Lastname", "Email"},
+ {"Paul\n\nNewline", "Dean", "augue@velitAliquam.co.uk"},
+ {"Callie", "Mckay", "nunc.sed@est.com\nNewline"},
+ {"Libby", "Camacho", "lobortis@semper.com"},
+ {"张", "小宝", "zhang@example.com"},
+ }
+
+ // Create another table with a header and the defined data, then render it
+ pterm.DefaultTable.WithHasHeader().WithData(tableData2).Render()
+}
+
+```
+
+
+
+### table/alternate-row-style
+
+
+
+
+
+SHOW SOURCE
+
+```go
+package main
+
+import "github.com/pterm/pterm"
+
+func main() {
+ // Define the data for the table.
+ // Each inner slice represents a row in the table.
+ // The first row is considered as the header of the table.
+ alternateStyle := pterm.NewStyle(pterm.BgDarkGray)
+
+ tableData := pterm.TableData{
+ {"Firstname", "Lastname", "Email", "Note"},
+ {"Paul", "Dean", "augue@velitAliquam.co.uk", ""},
+ {"Callie", "Mckay", "nunc.sed@est.com", "这是一个测试, haha!"},
+ {"Libby", "Camacho", "lobortis@semper.com", "just a test, hey!"},
+ {"张", "小宝", "zhang@example.com", ""},
+ }
+
+ // Create a table with the defined data.
+ // The table has a header and is boxed.
+ // Finally, render the table to print it.
+ pterm.DefaultTable.WithHasHeader().WithBoxed().WithData(tableData).WithAlternateRowStyle(alternateStyle).Render()
+}
+
+```
+
+
+
+### table/boxed
+
+
+
+
+
+SHOW SOURCE
+
+```go
+package main
+
+import "github.com/pterm/pterm"
+
+func main() {
+ // Define the data for the table.
+ // Each inner slice represents a row in the table.
+ // The first row is considered as the header of the table.
+ tableData := pterm.TableData{
+ {"Firstname", "Lastname", "Email", "Note"},
+ {"Paul", "Dean", "augue@velitAliquam.co.uk", ""},
+ {"Callie", "Mckay", "nunc.sed@est.com", "这是一个测试, haha!"},
+ {"Libby", "Camacho", "lobortis@semper.com", "just a test, hey!"},
+ {"张", "小宝", "zhang@example.com", ""},
+ }
+
+ // Create a table with the defined data.
+ // The table has a header and is boxed.
+ // Finally, render the table to print it.
+ pterm.DefaultTable.WithHasHeader().WithBoxed().WithData(tableData).Render()
+}
+
+```
+
+
+
+### table/multiple-lines
+
+
+
+
+
+SHOW SOURCE
+
+```go
+package main
+
+import "github.com/pterm/pterm"
+
+func main() {
+ // Define the data for the table.
+ data := pterm.TableData{
+ {"Firstname", "Lastname", "Email"},
+ {"Paul\n\nNewline", "Dean", "augue@velitAliquam.co.uk"},
+ {"Callie", "Mckay", "nunc.sed@est.com\nNewline"},
+ {"Libby", "Camacho", "lobortis@semper.com"},
+ {"张", "小宝", "zhang@example.com"},
+ }
+
+ // Create and render the table.
+ // The options are chained in a single line for simplicity.
+ // The table has a header, a row separator, and a header row separator.
+ pterm.DefaultTable.WithHasHeader().WithRowSeparator("-").WithHeaderRowSeparator("-").WithData(data).Render()
+}
+
+```
+
+
+
+### table/right-alignment
+
+
+
+
+
+SHOW SOURCE
+
+```go
+package main
+
+import "github.com/pterm/pterm"
+
+func main() {
+ // Define the data for the table.
+ // Each inner slice represents a row in the table.
+ // The first row is considered as the header.
+ tableData := pterm.TableData{
+ {"Firstname", "Lastname", "Email", "Note"},
+ {"Paul", "Dean", "augue@velitAliquam.co.uk", ""},
+ {"Callie", "Mckay", "nunc.sed@est.com", "这是一个测试, haha!"},
+ {"Libby", "Camacho", "lobortis@semper.com", "just a test, hey!"},
+ {"张", "小宝", "zhang@example.com", ""},
+ }
+
+ // Create a table with the defined data.
+ // The table has a header and the text in the cells is right-aligned.
+ // The Render() method is used to print the table to the console.
+ pterm.DefaultTable.WithHasHeader().WithRightAlignment().WithData(tableData).Render()
+}
+
+```
+
+
+
+### theme/demo
+
+
+
+
+
+SHOW SOURCE
+
+```go
+package main
+
+import (
+ "github.com/pterm/pterm"
+ "reflect"
+ "time"
+)
+
+func main() {
+ // Print an informational message about the default theme styles.
+ pterm.Info.Println("These are the default theme styles.\nYou can modify them easily to your personal preference,\nor create new themes from scratch :)")
+
+ // Print a blank line for better readability.
+ pterm.Println()
+
+ // Get the value and type of the default theme.
+ v := reflect.ValueOf(pterm.ThemeDefault)
+ typeOfS := v.Type()
+
+ // Check if the type of the default theme is 'pterm.Theme'.
+ if typeOfS == reflect.TypeOf(pterm.Theme{}) {
+ // Iterate over each field in the default theme.
+ for i := 0; i < v.NumField(); i++ {
+ // Try to convert the field to 'pterm.Style'.
+ field, ok := v.Field(i).Interface().(pterm.Style)
+ if ok {
+ // Print the field name using its own style.
+ field.Println(typeOfS.Field(i).Name)
+ }
+ // Pause for a quarter of a second to make the output easier to read.
+ time.Sleep(time.Millisecond * 250)
+ }
+ }
+}
+
+```
+
+
+
+### tree/demo
+
+
+
+
+
+SHOW SOURCE
+
+```go
+package main
+
+import (
+ "github.com/pterm/pterm"
+)
+
+func main() {
+ // Define a tree structure using pterm.TreeNode
+ tree := pterm.TreeNode{
+ // The top node of the tree
+ Text: "Top node",
+ // The children of the top node
+ Children: []pterm.TreeNode{{
+ // A child node
+ Text: "Child node",
+ // The children of the child node
+ Children: []pterm.TreeNode{
+ // Grandchildren nodes
+ {Text: "Grandchild node"},
+ {Text: "Grandchild node"},
+ {Text: "Grandchild node"},
+ },
+ }},
+ }
+
+ // Render the tree with the defined structure as the root
+ pterm.DefaultTree.WithRoot(tree).Render()
+}
+
+```
+
+
+
+### tree/from-leveled-list
+
+
+
+
+
+SHOW SOURCE
+
+```go
+package main
+
+import (
+ "github.com/pterm/pterm"
+ "github.com/pterm/pterm/putils"
+)
+
+func main() {
+ // Define a leveled list to represent the structure of the directories.
+ leveledList := pterm.LeveledList{
+ {Level: 0, Text: "C:"},
+ {Level: 1, Text: "Users"},
+ {Level: 1, Text: "Windows"},
+ {Level: 1, Text: "Programs"},
+ {Level: 1, Text: "Programs(x86)"},
+ {Level: 1, Text: "dev"},
+ {Level: 0, Text: "D:"},
+ {Level: 0, Text: "E:"},
+ {Level: 1, Text: "Movies"},
+ {Level: 1, Text: "Music"},
+ {Level: 2, Text: "LinkinPark"},
+ {Level: 1, Text: "Games"},
+ {Level: 2, Text: "Shooter"},
+ {Level: 3, Text: "CallOfDuty"},
+ {Level: 3, Text: "CS:GO"},
+ {Level: 3, Text: "Battlefield"},
+ {Level: 4, Text: "Battlefield 1"},
+ {Level: 4, Text: "Battlefield 2"},
+ {Level: 0, Text: "F:"},
+ {Level: 1, Text: "dev"},
+ {Level: 2, Text: "dops"},
+ {Level: 2, Text: "PTerm"},
+ }
+
+ // Convert the leveled list into a tree structure.
+ root := putils.TreeFromLeveledList(leveledList)
+ root.Text = "Computer" // Set the root node text.
+
+ // Render the tree structure using the default tree printer.
+ pterm.DefaultTree.WithRoot(root).Render()
+}
+
+```
+
+
+
+
+
+
+
+---
+
+> GitHub [@pterm](https://github.com/pterm) ·
+> Author [@MarvinJWendt](https://github.com/MarvinJWendt)
+> | [PTerm.sh](https://pterm.sh)
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/vendor/github.com/pterm/pterm/SECURITY.md b/vendor/github.com/pterm/pterm/SECURITY.md
new file mode 100644
index 0000000..3e4653f
--- /dev/null
+++ b/vendor/github.com/pterm/pterm/SECURITY.md
@@ -0,0 +1,24 @@
+# PTerm Security Policy
+This security policy applies to the PTerm GitHub repository and outlines the process for reporting security issues and handling security incidents. The primary goal of this policy is to ensure the safety and integrity of the PTerm codebase and to minimize the impact of security incidents on our users.
+
+## 1. Overview
+PTerm is a command-line interface (CLI) tool library, and we believe the security risks associated with it are minimal. However, we recognize that vulnerabilities can still arise, and we are committed to addressing them promptly and transparently.
+
+## 2. Reporting Security Issues
+If you discover a security issue in PTerm, please follow these steps:
+
+Open a new issue in the PTerm GitHub repository, describing the security problem in detail.
+
+## 3. Vulnerable Dependencies
+If a dependency of PTerm is found to be vulnerable or infected and requires immediate updates, please follow these steps:
+
+1. Open a new issue in the PTerm GitHub repository, describing the vulnerable dependency and the need for an update.
+2. *Optional: Contact @MarvinJWendt directly via Twitter or Discord to alert them to the issue.*
+
+## 4. Incident Response
+Upon receiving a security report, the PTerm team will:
+
+1. Acknowledge receipt of the report and review the issue.
+2. Investigate the issue and determine the severity and impact.
+3. Develop and implement a fix or mitigation plan, as necessary.
+4. Update the PTerm repository and notify users, if applicable.
diff --git a/vendor/github.com/pterm/pterm/area_printer.go b/vendor/github.com/pterm/pterm/area_printer.go
new file mode 100644
index 0000000..ce233c0
--- /dev/null
+++ b/vendor/github.com/pterm/pterm/area_printer.go
@@ -0,0 +1,139 @@
+package pterm
+
+import (
+ "io"
+ "strings"
+
+ "atomicgo.dev/cursor"
+
+ "github.com/pterm/pterm/internal"
+)
+
+// DefaultArea is the default area printer.
+var DefaultArea = AreaPrinter{}
+
+// AreaPrinter prints an area which can be updated easily.
+// use this printer for live output like charts, algorithm visualizations, simulations and even games.
+type AreaPrinter struct {
+ RemoveWhenDone bool
+ Fullscreen bool
+ Center bool
+
+ content string
+ isActive bool
+
+ area *cursor.Area
+}
+
+// GetContent returns the current area content.
+func (p *AreaPrinter) GetContent() string {
+ return p.content
+}
+
+// WithRemoveWhenDone removes the AreaPrinter content after it is stopped.
+func (p AreaPrinter) WithRemoveWhenDone(b ...bool) *AreaPrinter {
+ p.RemoveWhenDone = internal.WithBoolean(b)
+ return &p
+}
+
+// WithFullscreen sets the AreaPrinter height the same height as the terminal, making it fullscreen.
+func (p AreaPrinter) WithFullscreen(b ...bool) *AreaPrinter {
+ p.Fullscreen = internal.WithBoolean(b)
+ return &p
+}
+
+// WithCenter centers the AreaPrinter content to the terminal.
+func (p AreaPrinter) WithCenter(b ...bool) *AreaPrinter {
+ p.Center = internal.WithBoolean(b)
+ return &p
+}
+
+// SetWriter sets the writer for the AreaPrinter.
+func (p *AreaPrinter) SetWriter(writer io.Writer) {
+
+}
+
+// Update overwrites the content of the AreaPrinter.
+// Can be used live.
+func (p *AreaPrinter) Update(text ...any) {
+ if p.area == nil {
+ newArea := cursor.NewArea()
+ p.area = &newArea
+ }
+ str := Sprint(text...)
+ p.content = str
+
+ if p.Center {
+ str = DefaultCenter.Sprint(str)
+ }
+
+ if p.Fullscreen {
+ str = strings.TrimRight(str, "\n")
+ height := GetTerminalHeight()
+ contentHeight := strings.Count(str, "\n")
+
+ topPadding := 0
+ bottomPadding := height - contentHeight - 2
+
+ if p.Center {
+ topPadding = (bottomPadding / 2) + 1
+ bottomPadding /= 2
+ }
+
+ if height > contentHeight {
+ str = strings.Repeat("\n", topPadding) + str
+ str += strings.Repeat("\n", bottomPadding)
+ }
+ }
+ p.area.Update(str)
+}
+
+// Start the AreaPrinter.
+func (p *AreaPrinter) Start(text ...any) (*AreaPrinter, error) {
+ p.isActive = true
+ str := Sprint(text...)
+ newArea := cursor.NewArea()
+ p.area = &newArea
+
+ p.Update(str)
+
+ return p, nil
+}
+
+// Stop terminates the AreaPrinter immediately.
+// The AreaPrinter will not resolve into anything.
+func (p *AreaPrinter) Stop() error {
+ if !p.isActive {
+ return nil
+ }
+ p.isActive = false
+ if p.RemoveWhenDone {
+ p.Clear()
+ }
+ return nil
+}
+
+// GenericStart runs Start, but returns a LivePrinter.
+// This is used for the interface LivePrinter.
+// You most likely want to use Start instead of this in your program.
+func (p *AreaPrinter) GenericStart() (*LivePrinter, error) {
+ _, _ = p.Start()
+ lp := LivePrinter(p)
+ return &lp, nil
+}
+
+// GenericStop runs Stop, but returns a LivePrinter.
+// This is used for the interface LivePrinter.
+// You most likely want to use Stop instead of this in your program.
+func (p *AreaPrinter) GenericStop() (*LivePrinter, error) {
+ _ = p.Stop()
+ lp := LivePrinter(p)
+ return &lp, nil
+}
+
+// Clear is a Wrapper function that clears the content of the Area
+// moves the cursor to the bottom of the terminal, clears n lines upwards from
+// the current position and moves the cursor again.
+func (p *AreaPrinter) Clear() {
+ p.area.Clear()
+}
diff --git a/vendor/github.com/pterm/pterm/atoms.go b/vendor/github.com/pterm/pterm/atoms.go
new file mode 100644
index 0000000..bc11da2
--- /dev/null
+++ b/vendor/github.com/pterm/pterm/atoms.go
@@ -0,0 +1,42 @@
+package pterm
+
+// Checkmark is used in the interactive multiselect printer.
+type Checkmark struct {
+ Checked string
+ Unchecked string
+}
+
+// Bars is used to display multiple Bar.
+type Bars []Bar
+
+// Bar is used in bar charts.
+type Bar struct {
+ Label string
+ Value int
+ Style *Style
+ LabelStyle *Style
+}
+
+// WithLabel returns a new Bar with a specific option.
+func (p Bar) WithLabel(s string) *Bar {
+ p.Label = s
+ return &p
+}
+
+// WithLabelStyle returns a new Bar with a specific option.
+func (p Bar) WithLabelStyle(style *Style) *Bar {
+ p.LabelStyle = style
+ return &p
+}
+
+// WithValue returns a new Bar with a specific option.
+func (p Bar) WithValue(value int) *Bar {
+ p.Value = value
+ return &p
+}
+
+// WithStyle returns a new Bar with a specific option.
+func (p Bar) WithStyle(style *Style) *Bar {
+ p.Style = style
+ return &p
+}
diff --git a/vendor/github.com/pterm/pterm/barchart.go b/vendor/github.com/pterm/pterm/barchart.go
new file mode 100644
index 0000000..dd4ba3c
--- /dev/null
+++ b/vendor/github.com/pterm/pterm/barchart.go
@@ -0,0 +1,424 @@
+package pterm
+
+import (
+ "io"
+ "strconv"
+ "strings"
+
+ "github.com/mattn/go-runewidth"
+
+ "github.com/pterm/pterm/internal"
+)
+
+// BarChartPrinter is used to print bar charts.
+type BarChartPrinter struct {
+ Writer io.Writer
+ Bars Bars
+ Horizontal bool
+ ShowValue bool
+ // Height sets the maximum height of a vertical bar chart.
+ // The default is calculated to fit into the terminal.
+ // Ignored if Horizontal is set to true.
+ Height int
+ // Width sets the maximum width of a horizontal bar chart.
+ // The default is calculated to fit into the terminal.
+ // Ignored if Horizontal is set to false.
+ Width int
+ VerticalBarCharacter string
+ HorizontalBarCharacter string
+}
+
+var (
+ // DefaultBarChart is the default BarChartPrinter.
+ DefaultBarChart = BarChartPrinter{
+ Horizontal: false,
+ VerticalBarCharacter: "██",
+ HorizontalBarCharacter: "█",
+ // keep in sync with RecalculateTerminalSize()
+ Height: GetTerminalHeight() * 2 / 3,
+ Width: GetTerminalWidth() * 2 / 3,
+ }
+)
+
+// WithBars returns a new BarChartPrinter with a specific option.
+func (p BarChartPrinter) WithBars(bars Bars) *BarChartPrinter {
+ p.Bars = bars
+ return &p
+}
+
+// WithVerticalBarCharacter returns a new BarChartPrinter with a specific option.
+func (p BarChartPrinter) WithVerticalBarCharacter(char string) *BarChartPrinter {
+ p.VerticalBarCharacter = char
+ return &p
+}
+
+// WithHorizontalBarCharacter returns a new BarChartPrinter with a specific option.
+func (p BarChartPrinter) WithHorizontalBarCharacter(char string) *BarChartPrinter {
+ p.HorizontalBarCharacter = char
+ return &p
+}
+
+// WithHorizontal returns a new BarChartPrinter with a specific option.
+func (p BarChartPrinter) WithHorizontal(b ...bool) *BarChartPrinter {
+ b2 := internal.WithBoolean(b)
+ p.Horizontal = b2
+ return &p
+}
+
+// WithHeight returns a new BarChartPrinter with a specific option.
+func (p BarChartPrinter) WithHeight(value int) *BarChartPrinter {
+ p.Height = value
+ return &p
+}
+
+// WithWidth returns a new BarChartPrinter with a specific option.
+func (p BarChartPrinter) WithWidth(value int) *BarChartPrinter {
+ p.Width = value
+ return &p
+}
+
+// WithShowValue returns a new BarChartPrinter with a specific option.
+func (p BarChartPrinter) WithShowValue(b ...bool) *BarChartPrinter {
+ p.ShowValue = internal.WithBoolean(b)
+ return &p
+}
+
+// WithWriter sets the custom Writer.
+func (p BarChartPrinter) WithWriter(writer io.Writer) *BarChartPrinter {
+ p.Writer = writer
+ return &p
+}
+
+func (p BarChartPrinter) getRawOutput() string {
+ var ret string
+
+ for _, bar := range p.Bars {
+ ret += Sprintfln("%s: %d", bar.Label, bar.Value)
+ }
+
+ return ret
+}
+
+// Srender renders the BarChart as a string.
+func (p BarChartPrinter) Srender() (string, error) {
+ maxAbsValue := func(value1 int, value2 int) int {
+ min := value1
+ max := value2
+
+ if value1 > value2 {
+ min = value2
+ max = value1
+ }
+
+ maxAbs := max
+
+ if min < 0 && -min > max { // This is to avoid something like "int(math.Abs(float64(minBarValue)))"
+ maxAbs = -min // (--) == (+)
+ }
+
+ return maxAbs
+ }
+
+ abs := func(value int) int {
+ if value < 0 {
+ return -value
+ }
+
+ return value
+ }
+ // =================================== VERTICAL BARS RENDERER ======================================================
+
+ type renderParams struct {
+ repeatCount int
+ bar Bar
+ positiveChartPartHeight int
+ negativeChartPartHeight int
+ positiveChartPartWidth int
+ negativeChartPartWidth int
+ indent string
+ showValue bool
+ moveUp bool
+ moveRight bool
+ }
+
+ renderPositiveVerticalBar := func(renderedBarRef *string, rParams renderParams) {
+ if rParams.showValue {
+ *renderedBarRef += Sprint(rParams.indent + strconv.Itoa(rParams.bar.Value) + rParams.indent + "\n")
+ }
+
+ for i := rParams.positiveChartPartHeight; i > 0; i-- {
+ if i > rParams.repeatCount {
+ *renderedBarRef += rParams.indent + " " + rParams.indent + " \n"
+ } else {
+ *renderedBarRef += rParams.indent + rParams.bar.Style.Sprint(p.VerticalBarCharacter) + rParams.indent + " \n"
+ }
+ }
+
+ // Used when we draw diagram with both POSITIVE and NEGATIVE values.
+ // In such case we separately draw top and bottom half of chart.
+ // And we need MOVE UP positive part to top part of chart,
+ // technically by adding empty pillars with height == height of chart's bottom part.
+ if rParams.moveUp {
+ for i := 0; i <= rParams.negativeChartPartHeight; i++ {
+ *renderedBarRef += rParams.indent + " " + rParams.indent + " \n"
+ }
+ }
+ }
+
+ renderNegativeVerticalBar := func(renderedBarRef *string, rParams renderParams) {
+ for i := 0; i > -rParams.negativeChartPartHeight; i-- {
+ if i > rParams.repeatCount {
+ *renderedBarRef += rParams.indent + rParams.bar.Style.Sprint(p.VerticalBarCharacter) + rParams.indent + " \n"
+ } else {
+ *renderedBarRef += rParams.indent + " " + rParams.indent + " \n"
+ }
+ }
+
+ if rParams.showValue {
+ *renderedBarRef += Sprint(rParams.indent + strconv.Itoa(rParams.bar.Value) + rParams.indent + "\n")
+ }
+ }
+
+ // =================================== HORIZONTAL BARS RENDERER ====================================================
+ renderPositiveHorizontalBar := func(renderedBarRef *string, rParams renderParams) {
+ if rParams.moveRight {
+ for i := 0; i < rParams.negativeChartPartWidth; i++ {
+ *renderedBarRef += " "
+ }
+ }
+
+ for i := 0; i < rParams.positiveChartPartWidth; i++ {
+ if i < rParams.repeatCount {
+ *renderedBarRef += rParams.bar.Style.Sprint(p.HorizontalBarCharacter)
+ } else {
+ *renderedBarRef += " "
+ }
+ }
+
+ if rParams.showValue {
+ // For positive horizontal bars we add one more space before adding value,
+ // so they will be well aligned with negative values, which have "-" sign before them
+ *renderedBarRef += " "
+
+ *renderedBarRef += " " + strconv.Itoa(rParams.bar.Value)
+ }
+ }
+
+ renderNegativeHorizontalBar := func(renderedBarRef *string, rParams renderParams) {
+ for i := -rParams.negativeChartPartWidth; i < 0; i++ {
+ if i < rParams.repeatCount {
+ *renderedBarRef += " "
+ } else {
+ *renderedBarRef += rParams.bar.Style.Sprint(p.HorizontalBarCharacter)
+ }
+ }
+
+ // In order to print values well-aligned (in case when we have both - positive and negative part of chart),
+ // we should insert an indent with width == width of positive chart part
+ if rParams.positiveChartPartWidth > 0 {
+ for i := 0; i < rParams.positiveChartPartWidth; i++ {
+ *renderedBarRef += " "
+ }
+ }
+
+ if rParams.showValue {
+ /*
+ This is in order to achieve this effect:
+ 0
+ -15
+ 0
+ -19
+
+ INSTEAD OF THIS:
+
+ 0
+ -15
+ 0
+ -19
+ */
+ if rParams.repeatCount == 0 {
+ *renderedBarRef += " "
+ }
+
+ *renderedBarRef += " " + strconv.Itoa(rParams.bar.Value)
+ }
+ }
+ // =================================================================================================================
+
+ if RawOutput {
+ return p.getRawOutput(), nil
+ }
+ for i, bar := range p.Bars {
+ if bar.Style == nil {
+ p.Bars[i].Style = &ThemeDefault.BarStyle
+ }
+
+ if bar.LabelStyle == nil {
+ p.Bars[i].LabelStyle = &ThemeDefault.BarLabelStyle
+ }
+
+ p.Bars[i].Label = p.Bars[i].LabelStyle.Sprint(bar.Label)
+ }
+
+ var ret strings.Builder
+
+ var maxLabelHeight int
+ var maxBarValue int
+ var minBarValue int
+ var maxAbsBarValue int
+ var rParams renderParams
+
+ for _, bar := range p.Bars {
+ if bar.Value > maxBarValue {
+ maxBarValue = bar.Value
+ }
+ if bar.Value < minBarValue {
+ minBarValue = bar.Value
+ }
+ labelHeight := len(strings.Split(bar.Label, "\n"))
+ if labelHeight > maxLabelHeight {
+ maxLabelHeight = labelHeight
+ }
+ }
+
+ maxAbsBarValue = maxAbsValue(maxBarValue, minBarValue)
+
+ if p.Horizontal {
+ panels := Panels{[]Panel{{}, {}}}
+
+ rParams.showValue = p.ShowValue
+ rParams.positiveChartPartWidth = p.Width
+ rParams.negativeChartPartWidth = p.Width
+
+ // If chart will consist of two parts - positive and negative - we should recalculate max bars WIDTH in LEFT and RIGHT parts
+ if minBarValue < 0 && maxBarValue > 0 {
+ rParams.positiveChartPartWidth = abs(internal.MapRangeToRange(-float32(maxAbsBarValue), float32(maxAbsBarValue), -float32(p.Width)/2, float32(p.Width)/2, float32(maxBarValue)))
+ rParams.negativeChartPartWidth = abs(internal.MapRangeToRange(-float32(maxAbsBarValue), float32(maxAbsBarValue), -float32(p.Width)/2, float32(p.Width)/2, float32(minBarValue)))
+ }
+
+ for _, bar := range p.Bars {
+ rParams.bar = bar
+ panels[0][0].Data += "\n" + bar.Label
+ panels[0][1].Data += "\n"
+
+ if minBarValue >= 0 {
+ // As we don't have negative values, draw only positive (right) part of the chart:
+ rParams.repeatCount = internal.MapRangeToRange(0, float32(maxAbsBarValue), 0, float32(p.Width), float32(bar.Value))
+ rParams.moveRight = false
+
+ renderPositiveHorizontalBar(&panels[0][1].Data, rParams)
+ } else if maxBarValue <= 0 {
+ // As we have only negative values, draw only negative (left) part of the chart:
+ rParams.repeatCount = internal.MapRangeToRange(-float32(maxAbsBarValue), 0, -float32(p.Width), 0, float32(bar.Value))
+ rParams.positiveChartPartWidth = 0
+
+ renderNegativeHorizontalBar(&panels[0][1].Data, rParams)
+ } else {
+ // We have positive and negative values, so draw both (left+right) parts of the chart:
+ rParams.repeatCount = internal.MapRangeToRange(-float32(maxAbsBarValue), float32(maxAbsBarValue), -float32(p.Width)/2, float32(p.Width)/2, float32(bar.Value))
+
+ if bar.Value >= 0 {
+ rParams.moveRight = true
+
+ renderPositiveHorizontalBar(&panels[0][1].Data, rParams)
+ }
+
+ if bar.Value < 0 {
+ renderNegativeHorizontalBar(&panels[0][1].Data, rParams)
+ }
+ }
+ }
+ result, _ := DefaultPanel.WithPanels(panels).Srender()
+ return result, nil
+ } else {
+ renderedBars := make([]string, len(p.Bars))
+
+ rParams.showValue = p.ShowValue
+ rParams.positiveChartPartHeight = p.Height
+ rParams.negativeChartPartHeight = p.Height
+
+ // If chart will consist of two parts - positive and negative - we should recalculate max bars height in top and bottom parts
+ if minBarValue < 0 && maxBarValue > 0 {
+ rParams.positiveChartPartHeight = abs(internal.MapRangeToRange(-float32(maxAbsBarValue), float32(maxAbsBarValue), -float32(p.Height)/2, float32(p.Height)/2, float32(maxBarValue)))
+ rParams.negativeChartPartHeight = abs(internal.MapRangeToRange(-float32(maxAbsBarValue), float32(maxAbsBarValue), -float32(p.Height)/2, float32(p.Height)/2, float32(minBarValue)))
+ }
+
+ for i, bar := range p.Bars {
+ var renderedBar string
+ rParams.bar = bar
+ rParams.indent = strings.Repeat(" ", internal.GetStringMaxWidth(RemoveColorFromString(bar.Label))/2)
+
+ if minBarValue >= 0 {
+ // As we don't have negative values, draw only positive (top) part of the chart:
+ rParams.repeatCount = internal.MapRangeToRange(0, float32(maxAbsBarValue), 0, float32(p.Height), float32(bar.Value))
+ rParams.moveUp = false // Don't MOVE UP as we have ONLY positive part of chart.
+
+ renderPositiveVerticalBar(&renderedBar, rParams)
+ } else if maxBarValue <= 0 {
+ // As we have only negative values, draw only negative (bottom) part of the chart:
+ rParams.repeatCount = internal.MapRangeToRange(-float32(maxAbsBarValue), 0, -float32(p.Height), 0, float32(bar.Value))
+
+ renderNegativeVerticalBar(&renderedBar, rParams)
+ } else {
+ // We have positive and negative values, so draw both (top+bottom) parts of the chart:
+ rParams.repeatCount = internal.MapRangeToRange(-float32(maxAbsBarValue), float32(maxAbsBarValue), -float32(p.Height)/2, float32(p.Height)/2, float32(bar.Value))
+
+ if bar.Value >= 0 {
+ rParams.moveUp = true // MOVE UP positive part, because we have both positive and negative parts of chart.
+
+ renderPositiveVerticalBar(&renderedBar, rParams)
+ }
+
+ if bar.Value < 0 {
+ renderNegativeVerticalBar(&renderedBar, rParams)
+ }
+ }
+
+ labelHeight := len(strings.Split(bar.Label, "\n"))
+ renderedBars[i] = renderedBar + bar.Label + strings.Repeat("\n", maxLabelHeight-labelHeight) + " "
+ }
+
+ var maxBarHeight int
+
+ for _, bar := range renderedBars {
+ totalBarHeight := len(strings.Split(bar, "\n"))
+ if totalBarHeight > maxBarHeight {
+ maxBarHeight = totalBarHeight
+ }
+ }
+
+ for i, bar := range renderedBars {
+ totalBarHeight := len(strings.Split(bar, "\n"))
+ if totalBarHeight < maxBarHeight {
+ renderedBars[i] = strings.Repeat("\n", maxBarHeight-totalBarHeight) + renderedBars[i]
+ }
+ }
+
+ for i := 0; i <= maxBarHeight; i++ {
+ for _, barString := range renderedBars {
+ var barLine string
+ letterLines := strings.Split(barString, "\n")
+ maxBarWidth := internal.GetStringMaxWidth(RemoveColorFromString(barString))
+ if len(letterLines) > i {
+ barLine = letterLines[i]
+ }
+ letterLineLength := runewidth.StringWidth(RemoveColorFromString(barLine))
+ if letterLineLength < maxBarWidth {
+ barLine += strings.Repeat(" ", maxBarWidth-letterLineLength)
+ }
+ ret.WriteString(barLine)
+ }
+ ret.WriteByte('\n')
+ }
+ }
+
+ return ret.String(), nil
+}
+
+// Render prints the Template to the terminal.
+func (p BarChartPrinter) Render() error {
+ s, _ := p.Srender()
+ Fprintln(p.Writer, s)
+
+ return nil
+}
diff --git a/vendor/github.com/pterm/pterm/basic_text_printer.go b/vendor/github.com/pterm/pterm/basic_text_printer.go
new file mode 100644
index 0000000..a3e5274
--- /dev/null
+++ b/vendor/github.com/pterm/pterm/basic_text_printer.go
@@ -0,0 +1,123 @@
+package pterm
+
+import (
+ "fmt"
+ "io"
+)
+
+var (
+ // DefaultBasicText returns a default BasicTextPrinter, which can be used to print text as is.
+ // No default style is present for BasicTextPrinter.
+ DefaultBasicText = BasicTextPrinter{}
+)
+
+// BasicTextPrinter is the printer used to print the input as-is or as specified by user formatting.
+type BasicTextPrinter struct {
+ Style *Style
+ Writer io.Writer
+}
+
+// WithStyle adds a style to the printer.
+func (p BasicTextPrinter) WithStyle(style *Style) *BasicTextPrinter {
+ p.Style = style
+ return &p
+}
+
+func (p BasicTextPrinter) WithWriter(writer io.Writer) *BasicTextPrinter {
+ p.Writer = writer
+ return &p
+}
+
+// Sprint formats using the default formats for its operands and returns the resulting string.
+// Spaces are added between operands when neither is a string.
+func (p BasicTextPrinter) Sprint(a ...any) string {
+ if p.Style == nil {
+ p.Style = NewStyle()
+ }
+ return p.Style.Sprint(a...)
+}
+
+// Sprintln formats using the default formats for its operands and returns the resulting string.
+// Spaces are always added between operands and a newline is appended.
+func (p BasicTextPrinter) Sprintln(a ...any) string {
+ str := fmt.Sprintln(a...)
+ return Sprintln(p.Sprint(str))
+}
+
+// Sprintf formats according to a format specifier and returns the resulting string.
+func (p BasicTextPrinter) Sprintf(format string, a ...any) string {
+ return p.Sprint(Sprintf(format, a...))
+}
+
+// Sprintfln formats according to a format specifier and returns the resulting string.
+// Spaces are always added between operands and a newline is appended.
+func (p BasicTextPrinter) Sprintfln(format string, a ...any) string {
+ return p.Sprintf(format, a...) + "\n"
+}
+
+// Print formats using the default formats for its operands and writes to provided writer.
+// Spaces are added between operands when neither is a string.
+// It returns the number of bytes written and any write error encountered.
+func (p *BasicTextPrinter) Print(a ...any) *TextPrinter {
+ Fprint(p.Writer, p.Sprint(a...))
+ tp := TextPrinter(p)
+ return &tp
+}
+
+// Println formats using the default formats for its operands and writes to provided writer.
+// Spaces are always added between operands and a newline is appended.
+// It returns the number of bytes written and any write error encountered.
+func (p *BasicTextPrinter) Println(a ...any) *TextPrinter {
+ Fprint(p.Writer, p.Sprintln(a...))
+ tp := TextPrinter(p)
+ return &tp
+}
+
+// Printf formats according to a format specifier and writes to provided writer.
+// It returns the number of bytes written and any write error encountered.
+func (p *BasicTextPrinter) Printf(format string, a ...any) *TextPrinter {
+ Fprint(p.Writer, p.Sprintf(format, a...))
+ tp := TextPrinter(p)
+ return &tp
+}
+
+// Printfln formats according to a format specifier and writes to provided writer.
+// Spaces are always added between operands and a newline is appended.
+// It returns the number of bytes written and any write error encountered.
+func (p *BasicTextPrinter) Printfln(format string, a ...any) *TextPrinter {
+ Fprint(p.Writer, p.Sprintfln(format, a...))
+ tp := TextPrinter(p)
+ return &tp
+}
+
+// PrintOnError prints every error which is not nil.
+// If every error is nil, nothing will be printed.
+// This can be used for simple error checking.
+func (p *BasicTextPrinter) PrintOnError(a ...any) *TextPrinter {
+ for _, arg := range a {
+ if err, ok := arg.(error); ok {
+ if err != nil {
+ p.Println(err)
+ }
+ }
+ }
+
+ tp := TextPrinter(p)
+ return &tp
+}
+
+// PrintOnErrorf wraps every error which is not nil and prints it.
+// If every error is nil, nothing will be printed.
+// This can be used for simple error checking.
+func (p *BasicTextPrinter) PrintOnErrorf(format string, a ...any) *TextPrinter {
+ for _, arg := range a {
+ if err, ok := arg.(error); ok {
+ if err != nil {
+ p.Println(fmt.Errorf(format, err))
+ }
+ }
+ }
+
+ tp := TextPrinter(p)
+ return &tp
+}
diff --git a/vendor/github.com/pterm/pterm/bigtext_printer.go b/vendor/github.com/pterm/pterm/bigtext_printer.go
new file mode 100644
index 0000000..6346b48
--- /dev/null
+++ b/vendor/github.com/pterm/pterm/bigtext_printer.go
@@ -0,0 +1,551 @@
+package pterm
+
+import (
+ "io"
+ "strings"
+
+ "github.com/gookit/color"
+
+ "github.com/mattn/go-runewidth"
+
+ "github.com/pterm/pterm/internal"
+)
+
+// Letters is a slice of Letter.
+type Letters []Letter
+
+// Letter is an object, which holds a string and a specific Style for it.
+type Letter struct {
+ String string
+ Style *Style
+ RGB RGB
+}
+
+// WithStyle returns a new Letter with a specific Style.
+func (l Letter) WithStyle(style *Style) *Letter {
+ l.Style = style
+ return &l
+}
+
+// WithRGB returns a new Letter with a specific RGB color (overwrites style).
+func (l Letter) WithRGB(rgb RGB) *Letter {
+ l.RGB = rgb
+ return &l
+}
+
+// WithString returns a new Letter with a specific String.
+func (l Letter) WithString(s string) *Letter {
+ l.String = s
+ return &l
+}
+
+// BigTextPrinter renders big text.
+// You can use this as title screen for your application.
+type BigTextPrinter struct {
+ // BigCharacters holds the map from a normal character to it's big version.
+ BigCharacters map[string]string
+ Letters Letters
+ Writer io.Writer
+}
+
+// WithBigCharacters returns a new BigTextPrinter with specific BigCharacters.
+func (p BigTextPrinter) WithBigCharacters(chars map[string]string) *BigTextPrinter {
+ p.BigCharacters = chars
+ return &p
+}
+
+// WithLetters returns a new BigTextPrinter with specific Letters
+func (p BigTextPrinter) WithLetters(letters ...Letters) *BigTextPrinter {
+ l := Letters{}
+ for _, letter := range letters {
+ l = append(l, letter...)
+ }
+ p.Letters = l
+ return &p
+}
+
+// WithWriter sets the custom Writer.
+func (p BigTextPrinter) WithWriter(writer io.Writer) *BigTextPrinter {
+ p.Writer = writer
+ return &p
+}
+
+// Srender renders the BigText as a string.
+func (p BigTextPrinter) Srender() (string, error) {
+ var ret strings.Builder
+
+ if RawOutput {
+ for _, letter := range p.Letters {
+ ret.WriteString(letter.String)
+ }
+ return ret.String(), nil
+ }
+
+ var bigLetters Letters
+ for _, l := range p.Letters {
+ if val, ok := p.BigCharacters[l.String]; ok {
+ bigLetters = append(bigLetters, Letter{
+ String: val,
+ Style: l.Style,
+ RGB: l.RGB,
+ })
+ }
+ }
+
+ var maxHeight int
+
+ for _, l := range bigLetters {
+ h := strings.Count(l.String, "\n")
+ if h > maxHeight {
+ maxHeight = h
+ }
+ }
+
+ for i := 0; i <= maxHeight; i++ {
+ for _, letter := range bigLetters {
+ var letterLine string
+ letterLines := strings.Split(letter.String, "\n")
+ maxLetterWidth := internal.GetStringMaxWidth(letter.String)
+ if len(letterLines) > i {
+ letterLine = letterLines[i]
+ }
+ letterLineLength := runewidth.StringWidth(letterLine)
+ if letterLineLength < maxLetterWidth {
+ letterLine += strings.Repeat(" ", maxLetterWidth-letterLineLength)
+ }
+
+ if letter.RGB != (RGB{}) && (color.IsSupportRGBColor() || internal.RunsInCi()) {
+ ret.WriteString(letter.RGB.Sprint(letterLine))
+ } else {
+ ret.WriteString(letter.Style.Sprint(letterLine))
+ }
+ }
+ ret.WriteByte('\n')
+ }
+
+ return ret.String(), nil
+}
+
+// Render prints the BigText to the terminal.
+func (p BigTextPrinter) Render() error {
+ s, _ := p.Srender()
+ Fprintln(p.Writer, s)
+
+ return nil
+}
+
+// DefaultBigText contains default values for BigTextPrinter.
+var DefaultBigText = BigTextPrinter{
+ BigCharacters: map[string]string{
+ "a": ` █████
+██ ██
+███████
+██ ██
+██ ██ `,
+ "A": ` █████
+██ ██
+███████
+██ ██
+██ ██ `,
+ "b": `██████
+██ ██
+██████
+██ ██
+██████`,
+ "B": `██████
+██ ██
+██████
+██ ██
+██████`,
+ "c": ` ██████
+██
+██
+██
+ ██████`,
+ "C": ` ██████
+██
+██
+██
+ ██████`,
+ "d": `██████
+██ ██
+██ ██
+██ ██
+██████ `,
+ "D": `██████
+██ ██
+██ ██
+██ ██
+██████ `,
+ "e": `███████
+██
+█████
+██
+███████`,
+ "E": `███████
+██
+█████
+██
+███████`,
+ "f": `███████
+██
+█████
+██
+██ `,
+ "F": `███████
+██
+█████
+██
+██ `,
+ "g": ` ██████
+██
+██ ███
+██ ██
+ ██████ `,
+ "G": ` ██████
+██
+██ ███
+██ ██
+ ██████ `,
+ "h": `██ ██
+██ ██
+███████
+██ ██
+██ ██ `,
+ "H": `██ ██
+██ ██
+███████
+██ ██
+██ ██ `,
+ "i": `██
+██
+██
+██
+██`,
+ "I": `██
+██
+██
+██
+██`,
+ "j": ` ██
+ ██
+ ██
+██ ██
+ █████ `,
+ "J": ` ██
+ ██
+ ██
+██ ██
+ █████ `,
+ "k": `██ ██
+██ ██
+█████
+██ ██
+██ ██`,
+ "K": `██ ██
+██ ██
+█████
+██ ██
+██ ██`,
+ "l": `██
+██
+██
+██
+███████ `,
+ "L": `██
+██
+██
+██
+███████ `,
+ "m": `███ ███
+████ ████
+██ ████ ██
+██ ██ ██
+██ ██`,
+ "M": `███ ███
+████ ████
+██ ████ ██
+██ ██ ██
+██ ██`,
+ "n": `███ ██
+████ ██
+██ ██ ██
+██ ██ ██
+██ ████`,
+ "N": `███ ██
+████ ██
+██ ██ ██
+██ ██ ██
+██ ████`,
+ "o": ` ██████
+██ ██
+██ ██
+██ ██
+ ██████ `,
+ "O": ` ██████
+██ ██
+██ ██
+██ ██
+ ██████ `,
+ "p": `██████
+██ ██
+██████
+██
+██ `,
+ "P": `██████
+██ ██
+██████
+██
+██ `,
+ "q": ` ██████
+██ ██
+██ ██
+██ ▄▄ ██
+ ██████
+ ▀▀ `,
+ "Q": ` ██████
+██ ██
+██ ██
+██ ▄▄ ██
+ ██████
+ ▀▀ `,
+ "r": `██████
+██ ██
+██████
+██ ██
+██ ██`,
+ "R": `██████
+██ ██
+██████
+██ ██
+██ ██`,
+ "s": `███████
+██
+███████
+ ██
+███████`,
+ "S": `███████
+██
+███████
+ ██
+███████`,
+ "t": `████████
+ ██
+ ██
+ ██
+ ██ `,
+ "T": `████████
+ ██
+ ██
+ ██
+ ██ `,
+ "u": `██ ██
+██ ██
+██ ██
+██ ██
+ ██████ `,
+ "U": `██ ██
+██ ██
+██ ██
+██ ██
+ ██████ `,
+ "v": `██ ██
+██ ██
+██ ██
+ ██ ██
+ ████ `,
+ "V": `██ ██
+██ ██
+██ ██
+ ██ ██
+ ████ `,
+ "w": `██ ██
+██ ██
+██ █ ██
+██ ███ ██
+ ███ ███ `,
+ "W": `██ ██
+██ ██
+██ █ ██
+██ ███ ██
+ ███ ███ `,
+ "x": `██ ██
+ ██ ██
+ ███
+ ██ ██
+██ ██ `,
+ "X": `██ ██
+ ██ ██
+ ███
+ ██ ██
+██ ██ `,
+ "y": `██ ██
+ ██ ██
+ ████
+ ██
+ ██ `,
+ "Y": `██ ██
+ ██ ██
+ ████
+ ██
+ ██ `,
+ "z": `███████
+ ███
+ ███
+ ███
+███████`,
+ "Z": `███████
+ ███
+ ███
+ ███
+███████`,
+ "0": ` ██████
+██ ████
+██ ██ ██
+████ ██
+ ██████ `,
+ "1": ` ██
+███
+ ██
+ ██
+ ██ `,
+ "2": `██████
+ ██
+ █████
+██
+███████ `,
+ "3": `██████
+ ██
+ █████
+ ██
+██████ `,
+ "4": `██ ██
+██ ██
+███████
+ ██
+ ██ `,
+ "5": `███████
+██
+███████
+ ██
+███████`,
+ "6": ` ██████
+██
+███████
+██ ██
+ ██████ `,
+ "7": `███████
+ ██
+ ██
+ ██
+ ██`,
+ "8": ` █████
+██ ██
+ █████
+██ ██
+ █████ `,
+ "9": ` █████
+██ ██
+ ██████
+ ██
+ █████ `,
+ " ": " ",
+ "!": `██
+██
+██
+
+██ `,
+ "$": `▄▄███▄▄·
+██
+███████
+ ██
+███████
+ ▀▀▀ `,
+ "%": `██ ██
+ ██
+ ██
+ ██
+██ ██`,
+ "/": ` ██
+ ██
+ ██
+ ██
+██ `,
+ "(": ` ██
+██
+██
+██
+ ██ `,
+ ")": `██
+ ██
+ ██
+ ██
+██ `,
+ "?": `██████
+ ██
+ ▄███
+ ▀▀
+ ██ `,
+ "[": `███
+██
+██
+██
+███`,
+ "]": `███
+ ██
+ ██
+ ██
+███ `,
+ ".": `
+
+
+
+██`,
+ ",": `
+
+
+
+▄█`,
+ "-": `
+
+█████
+
+
+ `,
+ "<": ` ██
+ ██
+██
+ ██
+ ██ `,
+ ">": `██
+ ██
+ ██
+ ██
+██ `,
+ "*": `
+▄ ██ ▄
+ ████
+▀ ██ ▀
+ `,
+ "#": ` ██ ██
+████████
+ ██ ██
+████████
+ ██ ██ `,
+ "_": `
+
+
+
+███████ `,
+ ":": `
+██
+
+
+██ `,
+ "°": ` ████
+██ ██
+ ████
+
+ `,
+ },
+}
diff --git a/vendor/github.com/pterm/pterm/box_printer.go b/vendor/github.com/pterm/pterm/box_printer.go
new file mode 100644
index 0000000..8007d9f
--- /dev/null
+++ b/vendor/github.com/pterm/pterm/box_printer.go
@@ -0,0 +1,371 @@
+package pterm
+
+import (
+ "fmt"
+ "io"
+ "strings"
+
+ "github.com/mattn/go-runewidth"
+
+ "github.com/pterm/pterm/internal"
+)
+
+// BoxPrinter is able to render a box around printables.
+type BoxPrinter struct {
+ Title string
+ TitleTopLeft bool
+ TitleTopRight bool
+ TitleTopCenter bool
+ TitleBottomLeft bool
+ TitleBottomRight bool
+ TitleBottomCenter bool
+ TextStyle *Style
+ VerticalString string
+ BoxStyle *Style
+ HorizontalString string
+ TopRightCornerString string
+ TopLeftCornerString string
+ BottomLeftCornerString string
+ BottomRightCornerString string
+ TopPadding int
+ BottomPadding int
+ RightPadding int
+ LeftPadding int
+ Writer io.Writer
+}
+
+// DefaultBox is the default BoxPrinter.
+var DefaultBox = BoxPrinter{
+ VerticalString: "|",
+ TopRightCornerString: "└",
+ TopLeftCornerString: "┘",
+ BottomLeftCornerString: "┐",
+ BottomRightCornerString: "┌",
+ HorizontalString: "─",
+ BoxStyle: &ThemeDefault.BoxStyle,
+ TextStyle: &ThemeDefault.BoxTextStyle,
+ RightPadding: 1,
+ LeftPadding: 1,
+ TopPadding: 0,
+ BottomPadding: 0,
+ TitleTopLeft: true,
+}
+
+// WithTitle returns a new box with a specific Title.
+func (p BoxPrinter) WithTitle(str string) *BoxPrinter {
+ p.Title = str
+ return &p
+}
+
+// WithTitleTopLeft returns a new box with a specific Title alignment.
+func (p BoxPrinter) WithTitleTopLeft(b ...bool) *BoxPrinter {
+ b2 := internal.WithBoolean(b)
+ p.TitleTopLeft = b2
+ p.TitleTopRight = false
+ p.TitleTopCenter = false
+ p.TitleBottomLeft = false
+ p.TitleBottomRight = false
+ p.TitleBottomCenter = false
+ return &p
+}
+
+// WithTitleTopRight returns a new box with a specific Title alignment.
+func (p BoxPrinter) WithTitleTopRight(b ...bool) *BoxPrinter {
+ b2 := internal.WithBoolean(b)
+ p.TitleTopLeft = false
+ p.TitleTopRight = b2
+ p.TitleTopCenter = false
+ p.TitleBottomLeft = false
+ p.TitleBottomRight = false
+ p.TitleBottomCenter = false
+ return &p
+}
+
+// WithTitleTopCenter returns a new box with a specific Title alignment.
+func (p BoxPrinter) WithTitleTopCenter(b ...bool) *BoxPrinter {
+ b2 := internal.WithBoolean(b)
+ p.TitleTopLeft = false
+ p.TitleTopRight = false
+ p.TitleTopCenter = b2
+ p.TitleBottomLeft = false
+ p.TitleBottomRight = false
+ p.TitleBottomCenter = false
+ return &p
+}
+
+// WithTitleBottomLeft returns a new box with a specific Title alignment.
+func (p BoxPrinter) WithTitleBottomLeft(b ...bool) *BoxPrinter {
+ b2 := internal.WithBoolean(b)
+ p.TitleTopLeft = false
+ p.TitleTopRight = false
+ p.TitleTopCenter = false
+ p.TitleBottomLeft = b2
+ p.TitleBottomRight = false
+ p.TitleBottomCenter = false
+ return &p
+}
+
+// WithTitleBottomRight returns a new box with a specific Title alignment.
+func (p BoxPrinter) WithTitleBottomRight(b ...bool) *BoxPrinter {
+ b2 := internal.WithBoolean(b)
+ p.TitleTopLeft = false
+ p.TitleTopRight = false
+ p.TitleTopCenter = false
+ p.TitleBottomLeft = false
+ p.TitleBottomRight = b2
+ p.TitleBottomCenter = false
+ return &p
+}
+
+// WithTitleBottomCenter returns a new box with a specific Title alignment.
+func (p BoxPrinter) WithTitleBottomCenter(b ...bool) *BoxPrinter {
+ b2 := internal.WithBoolean(b)
+ p.TitleTopLeft = false
+ p.TitleTopRight = false
+ p.TitleTopCenter = false
+ p.TitleBottomLeft = false
+ p.TitleBottomRight = false
+ p.TitleBottomCenter = b2
+ return &p
+}
+
+// WithBoxStyle returns a new box with a specific box Style.
+func (p BoxPrinter) WithBoxStyle(style *Style) *BoxPrinter {
+ p.BoxStyle = style
+ return &p
+}
+
+// WithTextStyle returns a new box with a specific text Style.
+func (p BoxPrinter) WithTextStyle(style *Style) *BoxPrinter {
+ p.TextStyle = style
+ return &p
+}
+
+// WithTopRightCornerString returns a new box with a specific TopRightCornerString.
+func (p BoxPrinter) WithTopRightCornerString(str string) *BoxPrinter {
+ p.TopRightCornerString = str
+ return &p
+}
+
+// WithTopLeftCornerString returns a new box with a specific TopLeftCornerString.
+func (p BoxPrinter) WithTopLeftCornerString(str string) *BoxPrinter {
+ p.TopLeftCornerString = str
+ return &p
+}
+
+// WithBottomRightCornerString returns a new box with a specific BottomRightCornerString.
+func (p BoxPrinter) WithBottomRightCornerString(str string) *BoxPrinter {
+ p.BottomRightCornerString = str
+ return &p
+}
+
+// WithBottomLeftCornerString returns a new box with a specific BottomLeftCornerString.
+func (p BoxPrinter) WithBottomLeftCornerString(str string) *BoxPrinter {
+ p.BottomLeftCornerString = str
+ return &p
+}
+
+// WithVerticalString returns a new box with a specific VerticalString.
+func (p BoxPrinter) WithVerticalString(str string) *BoxPrinter {
+ p.VerticalString = str
+ return &p
+}
+
+// WithHorizontalString returns a new box with a specific HorizontalString.
+func (p BoxPrinter) WithHorizontalString(str string) *BoxPrinter {
+ p.HorizontalString = str
+ return &p
+}
+
+// WithTopPadding returns a new box with a specific TopPadding.
+func (p BoxPrinter) WithTopPadding(padding int) *BoxPrinter {
+ if padding < 0 {
+ padding = 0
+ }
+ p.TopPadding = padding
+ return &p
+}
+
+// WithBottomPadding returns a new box with a specific BottomPadding.
+func (p BoxPrinter) WithBottomPadding(padding int) *BoxPrinter {
+ if padding < 0 {
+ padding = 0
+ }
+ p.BottomPadding = padding
+ return &p
+}
+
+// WithRightPadding returns a new box with a specific RightPadding.
+func (p BoxPrinter) WithRightPadding(padding int) *BoxPrinter {
+ if padding < 0 {
+ padding = 0
+ }
+ p.RightPadding = padding
+ return &p
+}
+
+// WithLeftPadding returns a new box with a specific LeftPadding.
+func (p BoxPrinter) WithLeftPadding(padding int) *BoxPrinter {
+ if padding < 0 {
+ padding = 0
+ }
+ p.LeftPadding = padding
+ return &p
+}
+
+// WithWriter sets the custom Writer.
+func (p BoxPrinter) WithWriter(writer io.Writer) *BoxPrinter {
+ p.Writer = writer
+ return &p
+}
+
+// Sprint formats using the default formats for its operands and returns the resulting string.
+// Spaces are added between operands when neither is a string.
+func (p BoxPrinter) Sprint(a ...any) string {
+ if p.BoxStyle == nil {
+ p.BoxStyle = &ThemeDefault.BoxStyle
+ }
+ if p.TextStyle == nil {
+ p.TextStyle = &ThemeDefault.BoxTextStyle
+ }
+ maxWidth := internal.GetStringMaxWidth(Sprint(a...))
+
+ var topLine string
+ var bottomLine string
+
+ if p.Title == "" {
+ topLine = p.BoxStyle.Sprint(p.BottomRightCornerString) + strings.Repeat(p.BoxStyle.Sprint(p.HorizontalString),
+ maxWidth+p.LeftPadding+p.RightPadding) + p.BoxStyle.Sprint(p.BottomLeftCornerString)
+ bottomLine = p.BoxStyle.Sprint(p.TopRightCornerString) + strings.Repeat(p.BoxStyle.Sprint(p.HorizontalString),
+ maxWidth+p.LeftPadding+p.RightPadding) + p.BoxStyle.Sprint(p.TopLeftCornerString)
+ } else {
+ p.Title = strings.ReplaceAll(p.Title, "\n", " ")
+ if (maxWidth + p.RightPadding + p.LeftPadding - 4) < internal.GetStringMaxWidth(p.Title) {
+ p.RightPadding = internal.GetStringMaxWidth(p.Title) - (maxWidth + p.RightPadding + p.LeftPadding - 5)
+ }
+ if p.TitleTopLeft {
+ topLine = p.BoxStyle.Sprint(p.BottomRightCornerString) + internal.AddTitleToLine(p.Title, p.BoxStyle.Sprint(p.HorizontalString), maxWidth+p.LeftPadding+p.RightPadding, true) + p.BoxStyle.Sprint(p.BottomLeftCornerString)
+ bottomLine = p.BoxStyle.Sprint(p.TopRightCornerString) + strings.Repeat(p.BoxStyle.Sprint(p.HorizontalString),
+ maxWidth+p.LeftPadding+p.RightPadding) + p.BoxStyle.Sprint(p.TopLeftCornerString)
+ } else if p.TitleTopRight {
+ topLine = p.BoxStyle.Sprint(p.BottomRightCornerString) + internal.AddTitleToLine(p.Title, p.BoxStyle.Sprint(p.HorizontalString), maxWidth+p.LeftPadding+p.RightPadding, false) + p.BoxStyle.Sprint(p.BottomLeftCornerString)
+ bottomLine = p.BoxStyle.Sprint(p.TopRightCornerString) + strings.Repeat(p.BoxStyle.Sprint(p.HorizontalString),
+ maxWidth+p.LeftPadding+p.RightPadding) + p.BoxStyle.Sprint(p.TopLeftCornerString)
+ } else if p.TitleTopCenter {
+ topLine = p.BoxStyle.Sprint(p.BottomRightCornerString) + internal.AddTitleToLineCenter(p.Title, p.BoxStyle.Sprint(p.HorizontalString), maxWidth+p.LeftPadding+p.RightPadding) + p.BoxStyle.Sprint(p.BottomLeftCornerString)
+ bottomLine = p.BoxStyle.Sprint(p.TopRightCornerString) + strings.Repeat(p.BoxStyle.Sprint(p.HorizontalString),
+ maxWidth+p.LeftPadding+p.RightPadding) + p.BoxStyle.Sprint(p.TopLeftCornerString)
+ } else if p.TitleBottomLeft {
+ topLine = p.BoxStyle.Sprint(p.BottomRightCornerString) + strings.Repeat(p.BoxStyle.Sprint(p.HorizontalString),
+ maxWidth+p.LeftPadding+p.RightPadding) + p.BoxStyle.Sprint(p.BottomLeftCornerString)
+ bottomLine = p.BoxStyle.Sprint(p.TopRightCornerString) + internal.AddTitleToLine(p.Title, p.BoxStyle.Sprint(p.HorizontalString), maxWidth+p.LeftPadding+p.RightPadding, true) + p.BoxStyle.Sprint(p.TopLeftCornerString)
+ } else if p.TitleBottomRight {
+ topLine = p.BoxStyle.Sprint(p.BottomRightCornerString) + strings.Repeat(p.BoxStyle.Sprint(p.HorizontalString),
+ maxWidth+p.LeftPadding+p.RightPadding) + p.BoxStyle.Sprint(p.BottomLeftCornerString)
+ bottomLine = p.BoxStyle.Sprint(p.TopRightCornerString) + internal.AddTitleToLine(p.Title, p.BoxStyle.Sprint(p.HorizontalString), maxWidth+p.LeftPadding+p.RightPadding, false) + p.BoxStyle.Sprint(p.TopLeftCornerString)
+ } else if p.TitleBottomCenter {
+ topLine = p.BoxStyle.Sprint(p.BottomRightCornerString) + strings.Repeat(p.BoxStyle.Sprint(p.HorizontalString),
+ maxWidth+p.LeftPadding+p.RightPadding) + p.BoxStyle.Sprint(p.BottomLeftCornerString)
+ bottomLine = p.BoxStyle.Sprint(p.TopRightCornerString) + internal.AddTitleToLineCenter(p.Title, p.BoxStyle.Sprint(p.HorizontalString), maxWidth+p.LeftPadding+p.RightPadding) + p.BoxStyle.Sprint(p.TopLeftCornerString)
+ }
+ }
+
+ boxString := strings.Repeat("\n", p.TopPadding) + Sprint(a...) + strings.Repeat("\n", p.BottomPadding)
+
+ ss := strings.Split(boxString, "\n")
+ for i, s2 := range ss {
+ if runewidth.StringWidth(RemoveColorFromString(s2)) < maxWidth {
+ ss[i] = p.BoxStyle.Sprint(p.VerticalString) + strings.Repeat(" ", p.LeftPadding) + p.TextStyle.Sprint(s2) +
+ strings.Repeat(" ", maxWidth-runewidth.StringWidth(RemoveColorFromString(s2))+p.RightPadding) +
+ p.BoxStyle.Sprint(p.VerticalString)
+ } else {
+ ss[i] = p.BoxStyle.Sprint(p.VerticalString) + strings.Repeat(" ", p.LeftPadding) + p.TextStyle.Sprint(s2) +
+ strings.Repeat(" ", p.RightPadding) + p.BoxStyle.Sprint(p.VerticalString)
+ }
+ }
+ return topLine + "\n" + strings.Join(ss, "\n") + "\n" + bottomLine
+}
+
+// Sprintln formats using the default formats for its operands and returns the resulting string.
+// Spaces are always added between operands and a newline is appended.
+func (p BoxPrinter) Sprintln(a ...any) string {
+ return p.Sprint(strings.TrimSuffix(Sprintln(a...), "\n")) + "\n"
+}
+
+// Sprintf formats according to a format specifier and returns the resulting string.
+func (p BoxPrinter) Sprintf(format string, a ...any) string {
+ return p.Sprint(Sprintf(format, a...))
+}
+
+// Sprintfln formats according to a format specifier and returns the resulting string.
+// Spaces are always added between operands and a newline is appended.
+func (p BoxPrinter) Sprintfln(format string, a ...any) string {
+ return p.Sprintf(format, a...) + "\n"
+}
+
+// Print formats using the default formats for its operands and writes to standard output.
+// Spaces are added between operands when neither is a string.
+// It returns the number of bytes written and any write error encountered.
+func (p BoxPrinter) Print(a ...any) *TextPrinter {
+ Fprint(p.Writer, p.Sprint(a...))
+ tp := TextPrinter(p)
+ return &tp
+}
+
+// Println formats using the default formats for its operands and writes to standard output.
+// Spaces are always added between operands and a newline is appended.
+// It returns the number of bytes written and any write error encountered.
+func (p BoxPrinter) Println(a ...any) *TextPrinter {
+ Fprint(p.Writer, p.Sprintln(a...))
+ tp := TextPrinter(p)
+ return &tp
+}
+
+// Printf formats according to a format specifier and writes to standard output.
+// It returns the number of bytes written and any write error encountered.
+func (p BoxPrinter) Printf(format string, a ...any) *TextPrinter {
+ Fprint(p.Writer, p.Sprintf(format, a...))
+ tp := TextPrinter(p)
+ return &tp
+}
+
+// Printfln formats according to a format specifier and writes to standard output.
+// Spaces are always added between operands and a newline is appended.
+// It returns the number of bytes written and any write error encountered.
+func (p BoxPrinter) Printfln(format string, a ...any) *TextPrinter {
+ Fprint(p.Writer, p.Sprintfln(format, a...))
+ tp := TextPrinter(p)
+ return &tp
+}
+
+// PrintOnError prints every error which is not nil.
+// If every error is nil, nothing will be printed.
+// This can be used for simple error checking.
+func (p BoxPrinter) PrintOnError(a ...any) *TextPrinter {
+ for _, arg := range a {
+ if err, ok := arg.(error); ok {
+ if err != nil {
+ p.Println(err)
+ }
+ }
+ }
+
+ tp := TextPrinter(p)
+ return &tp
+}
+
+// PrintOnErrorf wraps every error which is not nil and prints it.
+// If every error is nil, nothing will be printed.
+// This can be used for simple error checking.
+func (p BoxPrinter) PrintOnErrorf(format string, a ...any) *TextPrinter {
+ for _, arg := range a {
+ if err, ok := arg.(error); ok {
+ if err != nil {
+ p.Println(fmt.Errorf(format, err))
+ }
+ }
+ }
+
+ tp := TextPrinter(p)
+ return &tp
+}
diff --git a/vendor/github.com/pterm/pterm/bulletlist_printer.go b/vendor/github.com/pterm/pterm/bulletlist_printer.go
new file mode 100644
index 0000000..de710a6
--- /dev/null
+++ b/vendor/github.com/pterm/pterm/bulletlist_printer.go
@@ -0,0 +1,139 @@
+package pterm
+
+import (
+ "io"
+ "strings"
+)
+
+// BulletListItem is able to render a ListItem.
+type BulletListItem struct {
+ Level int
+ Text string
+ TextStyle *Style
+ Bullet string
+ BulletStyle *Style
+}
+
+// WithLevel returns a new BulletListItem with a specific Level.
+func (p BulletListItem) WithLevel(level int) *BulletListItem {
+ p.Level = level
+ return &p
+}
+
+// WithText returns a new BulletListItem with a specific Text.
+func (p BulletListItem) WithText(text string) *BulletListItem {
+ p.Text = text
+ return &p
+}
+
+// WithTextStyle returns a new BulletListItem with a specific TextStyle.
+func (p BulletListItem) WithTextStyle(style *Style) *BulletListItem {
+ p.TextStyle = style
+ return &p
+}
+
+// WithBullet returns a new BulletListItem with a specific Prefix.
+func (p BulletListItem) WithBullet(bullet string) *BulletListItem {
+ p.Bullet = bullet
+ return &p
+}
+
+// WithBulletStyle returns a new BulletListItem with a specific BulletStyle.
+func (p BulletListItem) WithBulletStyle(style *Style) *BulletListItem {
+ p.BulletStyle = style
+ return &p
+}
+
+// DefaultBulletList contains standards, which can be used to print a BulletListPrinter.
+var DefaultBulletList = BulletListPrinter{
+ Bullet: "•",
+ TextStyle: &ThemeDefault.BulletListTextStyle,
+ BulletStyle: &ThemeDefault.BulletListBulletStyle,
+}
+
+// BulletListPrinter is able to render a list.
+type BulletListPrinter struct {
+ Items []BulletListItem
+ TextStyle *Style
+ Bullet string
+ BulletStyle *Style
+ Writer io.Writer
+}
+
+// WithItems returns a new list with specific Items.
+func (l BulletListPrinter) WithItems(items []BulletListItem) *BulletListPrinter {
+ l.Items = append(l.Items, items...)
+ return &l
+}
+
+// WithTextStyle returns a new list with a specific text style.
+func (l BulletListPrinter) WithTextStyle(style *Style) *BulletListPrinter {
+ l.TextStyle = style
+ return &l
+}
+
+// WithBullet returns a new list with a specific bullet.
+func (l BulletListPrinter) WithBullet(bullet string) *BulletListPrinter {
+ l.Bullet = bullet
+ return &l
+}
+
+// WithBulletStyle returns a new list with a specific bullet style.
+func (l BulletListPrinter) WithBulletStyle(style *Style) *BulletListPrinter {
+ l.BulletStyle = style
+ return &l
+}
+
+// WithWriter sets the custom Writer.
+func (l BulletListPrinter) WithWriter(writer io.Writer) *BulletListPrinter {
+ l.Writer = writer
+ return &l
+}
+
+// Render prints the list to the terminal.
+func (l BulletListPrinter) Render() error {
+ s, _ := l.Srender()
+ Fprintln(l.Writer, s)
+
+ return nil
+}
+
+// Srender renders the list as a string.
+func (l BulletListPrinter) Srender() (string, error) {
+ var ret strings.Builder
+ for _, item := range l.Items {
+ if item.TextStyle == nil {
+ if l.TextStyle == nil {
+ item.TextStyle = &ThemeDefault.BulletListTextStyle
+ } else {
+ item.TextStyle = l.TextStyle
+ }
+ }
+ if item.BulletStyle == nil {
+ if l.BulletStyle == nil {
+ item.BulletStyle = &ThemeDefault.BulletListBulletStyle
+ } else {
+ item.BulletStyle = l.BulletStyle
+ }
+ }
+
+ split := strings.Split(item.Text, "\n")
+ for i, line := range split {
+ ret.WriteString(strings.Repeat(" ", item.Level))
+ if i == 0 {
+ if item.Bullet == "" {
+ ret.WriteString(item.BulletStyle.Sprint(l.Bullet))
+ } else {
+ ret.WriteString(item.BulletStyle.Sprint(item.Bullet))
+ }
+ ret.WriteByte(' ')
+ } else {
+ ret.WriteString(strings.Repeat(" ", len(item.Bullet)))
+ ret.WriteString(" ")
+ }
+ ret.WriteString(item.TextStyle.Sprint(line))
+ ret.WriteByte('\n')
+ }
+ }
+ return ret.String(), nil
+}
diff --git a/vendor/github.com/pterm/pterm/center_printer.go b/vendor/github.com/pterm/pterm/center_printer.go
new file mode 100644
index 0000000..46a87f0
--- /dev/null
+++ b/vendor/github.com/pterm/pterm/center_printer.go
@@ -0,0 +1,171 @@
+package pterm
+
+import (
+ "fmt"
+ "io"
+ "strings"
+
+ "github.com/mattn/go-runewidth"
+
+ "github.com/pterm/pterm/internal"
+)
+
+// DefaultCenter is the default CenterPrinter.
+var DefaultCenter = CenterPrinter{
+ CenterEachLineSeparately: false,
+}
+
+// CenterPrinter prints centered text.
+type CenterPrinter struct {
+ CenterEachLineSeparately bool
+ Writer io.Writer
+}
+
+// WithCenterEachLineSeparately centers each line separately.
+func (p CenterPrinter) WithCenterEachLineSeparately(b ...bool) *CenterPrinter {
+ bt := internal.WithBoolean(b)
+ p.CenterEachLineSeparately = bt
+ return &p
+}
+
+// WithWriter sets the custom Writer.
+func (p CenterPrinter) WithWriter(writer io.Writer) *CenterPrinter {
+ p.Writer = writer
+ return &p
+}
+
+// Sprint formats using the default formats for its operands and returns the resulting string.
+// Spaces are added between operands when neither is a string.
+func (p CenterPrinter) Sprint(a ...any) string {
+ if RawOutput {
+ return Sprint(a...)
+ }
+
+ lines := strings.Split(Sprint(a...), "\n")
+
+ var ret strings.Builder
+
+ if p.CenterEachLineSeparately {
+ for _, line := range lines {
+ margin := (GetTerminalWidth() - runewidth.StringWidth(RemoveColorFromString(line))) / 2
+ if margin >= 1 {
+ ret.WriteString(strings.Repeat(" ", margin))
+ }
+ ret.WriteString(line)
+ ret.WriteByte('\n')
+ }
+ return ret.String()
+ }
+
+ var maxLineWidth int
+
+ for _, line := range lines {
+ lineLength := runewidth.StringWidth(RemoveColorFromString(line))
+ if maxLineWidth < lineLength {
+ maxLineWidth = lineLength
+ }
+ }
+
+ indent := GetTerminalWidth() - maxLineWidth
+
+ if indent/2 < 1 {
+ for _, line := range lines {
+ ret.WriteString(line)
+ ret.WriteByte('\n')
+ }
+
+ return ret.String()
+ }
+
+ for _, line := range lines {
+ ret.WriteString(strings.Repeat(" ", indent/2))
+ ret.WriteString(line)
+ ret.WriteByte('\n')
+ }
+
+ return ret.String()
+}
+
+// Sprintln formats using the default formats for its operands and returns the resulting string.
+// Spaces are always added between operands and a newline is appended.
+func (p CenterPrinter) Sprintln(a ...any) string {
+ return p.Sprint(Sprintln(a...))
+}
+
+// Sprintf formats according to a format specifier and returns the resulting string.
+func (p CenterPrinter) Sprintf(format string, a ...any) string {
+ return p.Sprint(Sprintf(format, a...))
+}
+
+// Sprintfln formats according to a format specifier and returns the resulting string.
+// Spaces are always added between operands and a newline is appended.
+func (p CenterPrinter) Sprintfln(format string, a ...any) string {
+ return p.Sprintf(format, a...) + "\n"
+}
+
+// Print formats using the default formats for its operands and writes to standard output.
+// Spaces are added between operands when neither is a string.
+// It returns the number of bytes written and any write error encountered.
+func (p CenterPrinter) Print(a ...any) *TextPrinter {
+ Fprint(p.Writer, p.Sprint(a...))
+ tp := TextPrinter(p)
+ return &tp
+}
+
+// Println formats using the default formats for its operands and writes to standard output.
+// Spaces are always added between operands and a newline is appended.
+// It returns the number of bytes written and any write error encountered.
+func (p CenterPrinter) Println(a ...any) *TextPrinter {
+ Fprint(p.Writer, p.Sprintln(a...))
+ tp := TextPrinter(p)
+ return &tp
+}
+
+// Printf formats according to a format specifier and writes to standard output.
+// It returns the number of bytes written and any write error encountered.
+func (p CenterPrinter) Printf(format string, a ...any) *TextPrinter {
+ Fprint(p.Writer, p.Sprintf(format, a...))
+ tp := TextPrinter(p)
+ return &tp
+}
+
+// Printfln formats according to a format specifier and writes to standard output.
+// Spaces are always added between operands and a newline is appended.
+// It returns the number of bytes written and any write error encountered.
+func (p CenterPrinter) Printfln(format string, a ...any) *TextPrinter {
+ Fprint(p.Writer, p.Sprintfln(format, a...))
+ tp := TextPrinter(p)
+ return &tp
+}
+
+// PrintOnError prints every error which is not nil.
+// If every error is nil, nothing will be printed.
+// This can be used for simple error checking.
+func (p CenterPrinter) PrintOnError(a ...any) *TextPrinter {
+ for _, arg := range a {
+ if err, ok := arg.(error); ok {
+ if err != nil {
+ p.Println(err)
+ }
+ }
+ }
+
+ tp := TextPrinter(p)
+ return &tp
+}
+
+// PrintOnErrorf wraps every error which is not nil and prints it.
+// If every error is nil, nothing will be printed.
+// This can be used for simple error checking.
+func (p CenterPrinter) PrintOnErrorf(format string, a ...any) *TextPrinter {
+ for _, arg := range a {
+ if err, ok := arg.(error); ok {
+ if err != nil {
+ p.Println(fmt.Errorf(format, err))
+ }
+ }
+ }
+
+ tp := TextPrinter(p)
+ return &tp
+}
diff --git a/vendor/github.com/pterm/pterm/codecov.yml b/vendor/github.com/pterm/pterm/codecov.yml
new file mode 100644
index 0000000..bfdc987
--- /dev/null
+++ b/vendor/github.com/pterm/pterm/codecov.yml
@@ -0,0 +1,8 @@
+coverage:
+ status:
+ project:
+ default:
+ informational: true
+ patch:
+ default:
+ informational: true
diff --git a/vendor/github.com/pterm/pterm/color.go b/vendor/github.com/pterm/pterm/color.go
new file mode 100644
index 0000000..451b6ec
--- /dev/null
+++ b/vendor/github.com/pterm/pterm/color.go
@@ -0,0 +1,383 @@
+package pterm
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/gookit/color"
+)
+
+// PrintColor is false if PTerm should not print colored output.
+var PrintColor = true
+
+// EnableColor enables colors.
+func EnableColor() {
+ color.Enable = true
+ PrintColor = true
+}
+
+// DisableColor disables colors.
+func DisableColor() {
+ color.Enable = false
+ PrintColor = false
+}
+
+// Foreground colors. basic foreground colors 30 - 37.
+const (
+ FgBlack Color = iota + 30
+ FgRed
+ FgGreen
+ FgYellow
+ FgBlue
+ FgMagenta
+ FgCyan
+ FgWhite
+ // FgDefault revert default FG.
+ FgDefault Color = 39
+)
+
+// Extra foreground color 90 - 97.
+const (
+ FgDarkGray Color = iota + 90
+ FgLightRed
+ FgLightGreen
+ FgLightYellow
+ FgLightBlue
+ FgLightMagenta
+ FgLightCyan
+ FgLightWhite
+ // FgGray is an alias of FgDarkGray.
+ FgGray Color = 90
+)
+
+// Background colors. basic background colors 40 - 47.
+const (
+ BgBlack Color = iota + 40
+ BgRed
+ BgGreen
+ BgYellow // BgBrown like yellow
+ BgBlue
+ BgMagenta
+ BgCyan
+ BgWhite
+ // BgDefault reverts to the default background.
+ BgDefault Color = 49
+)
+
+// Extra background color 100 - 107.
+const (
+ BgDarkGray Color = iota + 100
+ BgLightRed
+ BgLightGreen
+ BgLightYellow
+ BgLightBlue
+ BgLightMagenta
+ BgLightCyan
+ BgLightWhite
+ // BgGray is an alias of BgDarkGray.
+ BgGray Color = 100
+)
+
+// Option settings.
+const (
+ Reset Color = iota
+ Bold
+ Fuzzy
+ Italic
+ Underscore
+ Blink
+ FastBlink
+ Reverse
+ Concealed
+ Strikethrough
+)
+
+var (
+ // Red is an alias for FgRed.Sprint.
+ Red = FgRed.Sprint
+ // Cyan is an alias for FgCyan.Sprint.
+ Cyan = FgCyan.Sprint
+ // Gray is an alias for FgGray.Sprint.
+ Gray = FgGray.Sprint
+ // Blue is an alias for FgBlue.Sprint.
+ Blue = FgBlue.Sprint
+ // Black is an alias for FgBlack.Sprint.
+ Black = FgBlack.Sprint
+ // Green is an alias for FgGreen.Sprint.
+ Green = FgGreen.Sprint
+ // White is an alias for FgWhite.Sprint.
+ White = FgWhite.Sprint
+ // Yellow is an alias for FgYellow.Sprint.
+ Yellow = FgYellow.Sprint
+ // Magenta is an alias for FgMagenta.Sprint.
+ Magenta = FgMagenta.Sprint
+
+ // Normal is an alias for FgDefault.Sprint.
+ Normal = FgDefault.Sprint
+
+ // extra light.
+
+ // LightRed is a shortcut for FgLightRed.Sprint.
+ LightRed = FgLightRed.Sprint
+ // LightCyan is a shortcut for FgLightCyan.Sprint.
+ LightCyan = FgLightCyan.Sprint
+ // LightBlue is a shortcut for FgLightBlue.Sprint.
+ LightBlue = FgLightBlue.Sprint
+ // LightGreen is a shortcut for FgLightGreen.Sprint.
+ LightGreen = FgLightGreen.Sprint
+ // LightWhite is a shortcut for FgLightWhite.Sprint.
+ LightWhite = FgLightWhite.Sprint
+ // LightYellow is a shortcut for FgLightYellow.Sprint.
+ LightYellow = FgLightYellow.Sprint
+ // LightMagenta is a shortcut for FgLightMagenta.Sprint.
+ LightMagenta = FgLightMagenta.Sprint
+)
+
+// Color is a number which will be used to color strings in the terminal.
+type Color uint8
+
+// Sprintln formats using the default formats for its operands and returns the resulting string.
+// Spaces are always added between operands and a newline is appended.
+// Input will be colored with the parent Color.
+func (c Color) Sprintln(a ...any) string {
+ str := fmt.Sprintln(a...)
+ return c.Sprint(str)
+}
+
+// Sprint formats using the default formats for its operands and returns the resulting string.
+// Spaces are added between operands when neither is a string.
+// Input will be colored with the parent Color.
+func (c Color) Sprint(a ...any) string {
+ message := Sprint(a...)
+ messageLines := strings.Split(message, "\n")
+ for i, line := range messageLines {
+ messageLines[i] = color.RenderCode(c.String(), strings.ReplaceAll(line, color.ResetSet, Sprintf("\x1b[0m\u001B[%sm", c.String())))
+ }
+ message = strings.Join(messageLines, "\n")
+ return message
+}
+
+// Sprintf formats according to a format specifier and returns the resulting string.
+// Input will be colored with the parent Color.
+func (c Color) Sprintf(format string, a ...any) string {
+ return c.Sprint(Sprintf(format, a...))
+}
+
+// Sprintfln formats according to a format specifier and returns the resulting string.
+// Spaces are always added between operands and a newline is appended.
+// Input will be colored with the parent Color.
+func (c Color) Sprintfln(format string, a ...any) string {
+ return c.Sprint(Sprintf(format, a...) + "\n")
+}
+
+// Println formats using the default formats for its operands and writes to standard output.
+// Spaces are always added between operands and a newline is appended.
+// It returns the number of bytes written and any write error encountered.
+// Input will be colored with the parent Color.
+func (c Color) Println(a ...any) *TextPrinter {
+ Print(c.Sprintln(a...))
+ tc := TextPrinter(c)
+ return &tc
+}
+
+// Print formats using the default formats for its operands and writes to standard output.
+// Spaces are added between operands when neither is a string.
+// It returns the number of bytes written and any write error encountered.
+// Input will be colored with the parent Color.
+func (c Color) Print(a ...any) *TextPrinter {
+ Print(c.Sprint(a...))
+ tc := TextPrinter(c)
+ return &tc
+}
+
+// Printf formats according to a format specifier and writes to standard output.
+// It returns the number of bytes written and any write error encountered.
+// Input will be colored with the parent Color.
+func (c Color) Printf(format string, a ...any) *TextPrinter {
+ Print(c.Sprintf(format, a...))
+ tc := TextPrinter(c)
+ return &tc
+}
+
+// Printfln formats according to a format specifier and writes to standard output.
+// Spaces are always added between operands and a newline is appended.
+// It returns the number of bytes written and any write error encountered.
+// Input will be colored with the parent Color.
+func (c Color) Printfln(format string, a ...any) *TextPrinter {
+ Print(c.Sprintfln(format, a...))
+ tp := TextPrinter(c)
+ return &tp
+}
+
+// PrintOnError prints every error which is not nil.
+// If every error is nil, nothing will be printed.
+// This can be used for simple error checking.
+func (c Color) PrintOnError(a ...any) *TextPrinter {
+ for _, arg := range a {
+ if err, ok := arg.(error); ok {
+ if err != nil {
+ c.Println(err)
+ }
+ }
+ }
+
+ tp := TextPrinter(c)
+ return &tp
+}
+
+// PrintOnErrorf wraps every error which is not nil and prints it.
+// If every error is nil, nothing will be printed.
+// This can be used for simple error checking.
+func (c Color) PrintOnErrorf(format string, a ...any) *TextPrinter {
+ for _, arg := range a {
+ if err, ok := arg.(error); ok {
+ if err != nil {
+ c.Println(fmt.Errorf(format, err))
+ }
+ }
+ }
+
+ tp := TextPrinter(c)
+ return &tp
+}
+
+// String converts the color to a string. eg "35".
+func (c Color) String() string {
+ return fmt.Sprintf("%d", c)
+}
+
+// ToStyle converts the color to a style.
+func (c Color) ToStyle() *Style {
+ return &Style{c}
+}
+
+// Style is a collection of colors.
+// Can include foreground, background and styling (eg. Bold, Underscore, etc.) colors.
+type Style []Color
+
+// NewStyle returns a new Style.
+// Accepts multiple colors.
+func NewStyle(colors ...Color) *Style {
+ ret := Style{}
+ for _, c := range colors {
+ ret = append(ret, c)
+ }
+ return &ret
+}
+
+// Add styles to the current Style.
+func (s Style) Add(styles ...Style) Style {
+ ret := s
+
+ for _, st := range styles {
+ ret = append(ret, st...)
+ }
+
+ return ret
+}
+
+// RemoveColor removes the given colors from the Style.
+func (s Style) RemoveColor(colors ...Color) Style {
+ ret := s
+
+ for _, c := range colors {
+ // remove via index
+ for i := 0; i < len(ret); i++ {
+ if ret[i] == c {
+ ret = append(ret[:i], ret[i+1:]...)
+ i--
+ }
+ }
+ }
+
+ return ret
+}
+
+// Sprint formats using the default formats for its operands and returns the resulting string.
+// Spaces are added between operands when neither is a string.
+// Input will be colored with the parent Style.
+func (s Style) Sprint(a ...any) string {
+ message := Sprint(a...)
+ messageLines := strings.Split(message, "\n")
+ for i, line := range messageLines {
+ messageLines[i] = color.RenderCode(s.String(), strings.ReplaceAll(line, color.ResetSet, Sprintf("\x1b[0m\u001B[%sm", s.String())))
+ }
+ message = strings.Join(messageLines, "\n")
+ return color.RenderCode(s.String(), message)
+}
+
+// Sprintln formats using the default formats for its operands and returns the resulting string.
+// Spaces are always added between operands and a newline is appended.
+// Input will be colored with the parent Style.
+func (s Style) Sprintln(a ...any) string {
+ return s.Sprint(a...) + "\n"
+}
+
+// Sprintf formats according to a format specifier and returns the resulting string.
+// Input will be colored with the parent Style.
+func (s Style) Sprintf(format string, a ...any) string {
+ return s.Sprint(Sprintf(format, a...))
+}
+
+// Sprintfln formats according to a format specifier and returns the resulting string.
+// Spaces are always added between operands and a newline is appended.
+// Input will be colored with the parent Style.
+func (s Style) Sprintfln(format string, a ...any) string {
+ return s.Sprint(Sprintf(format, a...) + "\n")
+}
+
+// Print formats using the default formats for its operands and writes to standard output.
+// Spaces are added between operands when neither is a string.
+// It returns the number of bytes written and any write error encountered.
+// Input will be colored with the parent Style.
+func (s Style) Print(a ...any) {
+ Print(s.Sprint(a...))
+}
+
+// Println formats using the default formats for its operands and writes to standard output.
+// Spaces are always added between operands and a newline is appended.
+// It returns the number of bytes written and any write error encountered.
+// Input will be colored with the parent Style.
+func (s Style) Println(a ...any) {
+ Println(s.Sprint(a...))
+}
+
+// Printf formats according to a format specifier and writes to standard output.
+// It returns the number of bytes written and any write error encountered.
+// Input will be colored with the parent Style.
+func (s Style) Printf(format string, a ...any) {
+ Print(s.Sprintf(format, a...))
+}
+
+// Printfln formats according to a format specifier and writes to standard output.
+// Spaces are always added between operands and a newline is appended.
+// It returns the number of bytes written and any write error encountered.
+// Input will be colored with the parent Style.
+func (s Style) Printfln(format string, a ...any) {
+ Print(s.Sprintfln(format, a...))
+}
+
+// Code convert to code string. returns like "32;45;3".
+func (s Style) Code() string {
+ return s.String()
+}
+
+// String convert to code string. returns like "32;45;3".
+func (s Style) String() string {
+ return colors2code(s...)
+}
+
+// Converts colors to code.
+// Return format: "32;45;3".
+func colors2code(colors ...Color) string {
+ if len(colors) == 0 {
+ return ""
+ }
+
+ var codes []string
+ for _, c := range colors {
+ codes = append(codes, c.String())
+ }
+
+ return strings.Join(codes, ";")
+}
diff --git a/vendor/github.com/pterm/pterm/conventionalcommit.json b/vendor/github.com/pterm/pterm/conventionalcommit.json
new file mode 100644
index 0000000..adfae9b
--- /dev/null
+++ b/vendor/github.com/pterm/pterm/conventionalcommit.json
@@ -0,0 +1,61 @@
+{
+ "types": {
+ "refactor": {
+ "description": "Changes which neither fix a bug nor add a feature",
+ },
+ "fix": {
+ "description": "Changes which patch a bug"
+ },
+ "feat": {
+ "description": "Changes which introduce a new feature"
+ },
+ "build": {
+ "description": "Changes which affect the build system or external dependencies (example scopes: gulp, broccoli, npm)"
+ },
+ "chore": {
+ "description": "Changes which aren’t user-facing"
+ },
+ "style": {
+ "description": "Changes which don't affect code logic.\nWhite-spaces, formatting, missing semi-colons, etc"
+ },
+ "test": {
+ "description": "Changes which add missing tests or correct existing tests"
+ },
+ "docs": {
+ "description": "Changes which affect documentation",
+ "scopes": {
+ "pterm-sh": {},
+ "examples": {},
+ "readme": {},
+ "contributing": {}
+ }
+ },
+ "perf": {
+ "description": "Changes which improve performance"
+ },
+ "ci": {
+ "description": "Changes which affect CI configuration files and scripts (example scopes: travis, circle, browser-stack, sauce-labs)"
+ },
+ "revert": {
+ "description": "Changes which revert a previous commit"
+ }
+ },
+ "footerTypes": [
+ {
+ "name": "BREAKING CHANGE",
+ "description": "The commit introduces breaking API changes"
+ },
+ {
+ "name": "Closes",
+ "description": "The commit closes issues or pull requests"
+ },
+ {
+ "name": "Implements",
+ "description": "The commit implements features"
+ },
+ {
+ "name": "Co-authored-by",
+ "description": "The commit is co-authored by another person (for multiple people use one line each)"
+ }
+ ]
+}
diff --git a/vendor/github.com/pterm/pterm/coverage.txt b/vendor/github.com/pterm/pterm/coverage.txt
new file mode 100644
index 0000000..330b7db
--- /dev/null
+++ b/vendor/github.com/pterm/pterm/coverage.txt
@@ -0,0 +1,1030 @@
+mode: atomic
+github.com/pterm/pterm/internal/percentage.go:6.49,8.2 1 8
+github.com/pterm/pterm/internal/percentage.go:11.54,13.2 1 4
+github.com/pterm/pterm/internal/remove_and_count_prefix.go:7.66,11.2 3 0
+github.com/pterm/pterm/internal/title_in_line.go:10.71,12.10 2 0
+github.com/pterm/pterm/internal/title_in_line.go:18.2,18.12 1 0
+github.com/pterm/pterm/internal/title_in_line.go:12.10,14.3 1 0
+github.com/pterm/pterm/internal/title_in_line.go:14.8,16.3 1 0
+github.com/pterm/pterm/internal/title_in_line.go:22.66,30.2 5 0
+github.com/pterm/pterm/internal/map_range_to_range.go:3.75,4.26 1 4
+github.com/pterm/pterm/internal/map_range_to_range.go:7.2,7.73 1 4
+github.com/pterm/pterm/internal/map_range_to_range.go:4.26,6.3 1 0
+github.com/pterm/pterm/internal/max_text_width.go:11.38,14.24 3 0
+github.com/pterm/pterm/internal/max_text_width.go:19.2,19.12 1 0
+github.com/pterm/pterm/internal/max_text_width.go:14.24,15.55 1 0
+github.com/pterm/pterm/internal/max_text_width.go:15.55,17.4 1 0
+github.com/pterm/pterm/internal/center_text.go:10.48,13.32 3 2
+github.com/pterm/pterm/internal/center_text.go:36.2,37.26 2 2
+github.com/pterm/pterm/internal/center_text.go:41.2,41.39 1 2
+github.com/pterm/pterm/internal/center_text.go:13.32,14.41 1 4
+github.com/pterm/pterm/internal/center_text.go:14.41,17.32 3 1
+github.com/pterm/pterm/internal/center_text.go:24.4,24.41 1 1
+github.com/pterm/pterm/internal/center_text.go:17.32,18.31 1 11
+github.com/pterm/pterm/internal/center_text.go:22.5,22.52 1 11
+github.com/pterm/pterm/internal/center_text.go:18.31,21.6 2 2
+github.com/pterm/pterm/internal/center_text.go:24.41,28.5 3 3
+github.com/pterm/pterm/internal/center_text.go:29.9,33.4 3 3
+github.com/pterm/pterm/internal/center_text.go:37.26,39.3 1 6
+github.com/pterm/pterm/internal/longest_line.go:11.49,14.29 3 0
+github.com/pterm/pterm/internal/longest_line.go:20.2,20.16 1 0
+github.com/pterm/pterm/internal/longest_line.go:14.29,15.101 1 0
+github.com/pterm/pterm/internal/longest_line.go:15.101,17.4 1 0
+github.com/pterm/pterm/internal/with_boolean.go:4.33,5.17 1 0
+github.com/pterm/pterm/internal/with_boolean.go:8.2,8.13 1 0
+github.com/pterm/pterm/internal/with_boolean.go:5.17,7.3 1 0
+github.com/pterm/pterm/putils/download-with-progressbar.go:20.58,25.2 4 0
+github.com/pterm/pterm/putils/download-with-progressbar.go:29.121,32.16 3 0
+github.com/pterm/pterm/putils/download-with-progressbar.go:36.2,37.16 2 0
+github.com/pterm/pterm/putils/download-with-progressbar.go:41.2,45.16 4 0
+github.com/pterm/pterm/putils/download-with-progressbar.go:49.2,50.73 2 0
+github.com/pterm/pterm/putils/download-with-progressbar.go:55.2,56.16 2 0
+github.com/pterm/pterm/putils/download-with-progressbar.go:60.2,61.12 2 0
+github.com/pterm/pterm/putils/download-with-progressbar.go:32.16,34.3 1 0
+github.com/pterm/pterm/putils/download-with-progressbar.go:37.16,40.3 2 0
+github.com/pterm/pterm/putils/download-with-progressbar.go:45.16,47.3 1 0
+github.com/pterm/pterm/putils/download-with-progressbar.go:50.73,53.3 2 0
+github.com/pterm/pterm/putils/download-with-progressbar.go:56.16,58.3 1 0
+github.com/pterm/pterm/putils/download-with-progressbar.go:66.96,68.2 1 0
+github.com/pterm/pterm/putils/run-with-spinner.go:6.103,8.16 2 0
+github.com/pterm/pterm/putils/run-with-spinner.go:12.2,15.12 3 0
+github.com/pterm/pterm/putils/run-with-spinner.go:8.16,10.3 1 0
+github.com/pterm/pterm/putils/run-with-spinner.go:19.107,21.2 1 0
+github.com/pterm/pterm/putils/stats.go:11.70,13.29 2 0
+github.com/pterm/pterm/putils/stats.go:25.2,29.12 3 0
+github.com/pterm/pterm/putils/stats.go:13.29,18.17 4 0
+github.com/pterm/pterm/putils/stats.go:22.3,22.20 1 0
+github.com/pterm/pterm/putils/stats.go:18.17,20.4 1 0
+github.com/pterm/pterm/putils/tabledata-from-struct-slice.go:13.105,15.32 2 0
+github.com/pterm/pterm/putils/tabledata-from-struct-slice.go:18.2,21.30 3 0
+github.com/pterm/pterm/putils/tabledata-from-struct-slice.go:26.2,26.33 1 0
+github.com/pterm/pterm/putils/tabledata-from-struct-slice.go:30.2,33.33 3 0
+github.com/pterm/pterm/putils/tabledata-from-struct-slice.go:37.2,44.33 4 0
+github.com/pterm/pterm/putils/tabledata-from-struct-slice.go:52.2,52.26 1 0
+github.com/pterm/pterm/putils/tabledata-from-struct-slice.go:61.2,63.22 2 0
+github.com/pterm/pterm/putils/tabledata-from-struct-slice.go:15.32,17.3 1 0
+github.com/pterm/pterm/putils/tabledata-from-struct-slice.go:21.30,24.3 2 0
+github.com/pterm/pterm/putils/tabledata-from-struct-slice.go:26.33,28.3 1 0
+github.com/pterm/pterm/putils/tabledata-from-struct-slice.go:33.33,35.3 1 0
+github.com/pterm/pterm/putils/tabledata-from-struct-slice.go:44.33,45.16 1 0
+github.com/pterm/pterm/putils/tabledata-from-struct-slice.go:45.16,47.4 1 0
+github.com/pterm/pterm/putils/tabledata-from-struct-slice.go:47.9,49.4 1 0
+github.com/pterm/pterm/putils/tabledata-from-struct-slice.go:52.26,55.34 3 0
+github.com/pterm/pterm/putils/tabledata-from-struct-slice.go:59.3,59.36 1 0
+github.com/pterm/pterm/putils/tabledata-from-struct-slice.go:55.34,58.4 2 0
+github.com/pterm/pterm/putils/tabledata-from-struct-slice.go:69.79,71.2 1 0
+github.com/pterm/pterm/putils/tabledata_from_csv.go:11.56,13.2 1 1
+github.com/pterm/pterm/putils/tabledata_from_separated_values.go:13.99,14.57 1 3
+github.com/pterm/pterm/putils/tabledata_from_separated_values.go:18.2,18.8 1 3
+github.com/pterm/pterm/putils/tabledata_from_separated_values.go:14.57,16.3 1 6
+github.com/pterm/pterm/putils/tabledata_from_tsv.go:11.56,13.2 1 1
+github.com/pterm/pterm/rgb.go:23.42,25.2 1 14316
+github.com/pterm/pterm/rgb.go:28.32,30.2 1 28640
+github.com/pterm/pterm/rgb.go:33.45,38.19 4 12
+github.com/pterm/pterm/rgb.go:41.2,41.19 1 12
+github.com/pterm/pterm/rgb.go:45.2,46.16 2 8
+github.com/pterm/pterm/rgb.go:49.2,55.8 2 7
+github.com/pterm/pterm/rgb.go:38.19,40.3 1 4
+github.com/pterm/pterm/rgb.go:41.19,43.3 1 4
+github.com/pterm/pterm/rgb.go:46.16,48.3 1 1
+github.com/pterm/pterm/rgb.go:59.62,60.13 1 14416
+github.com/pterm/pterm/rgb.go:65.2,65.19 1 14416
+github.com/pterm/pterm/rgb.go:85.2,85.10 1 1
+github.com/pterm/pterm/rgb.go:60.13,64.3 3 2
+github.com/pterm/pterm/rgb.go:65.19,71.3 1 14411
+github.com/pterm/pterm/rgb.go:71.8,71.25 1 5
+github.com/pterm/pterm/rgb.go:71.25,74.18 3 4
+github.com/pterm/pterm/rgb.go:74.18,76.4 1 1
+github.com/pterm/pterm/rgb.go:76.9,77.36 1 3
+github.com/pterm/pterm/rgb.go:77.36,79.24 2 3
+github.com/pterm/pterm/rgb.go:79.24,81.6 1 3
+github.com/pterm/pterm/rgb.go:90.46,92.2 1 4224
+github.com/pterm/pterm/rgb.go:96.48,98.2 1 1950
+github.com/pterm/pterm/rgb.go:101.62,103.2 1 2052
+github.com/pterm/pterm/rgb.go:107.64,109.2 1 1944
+github.com/pterm/pterm/rgb.go:114.51,118.2 3 54
+github.com/pterm/pterm/rgb.go:123.53,127.2 3 978
+github.com/pterm/pterm/rgb.go:131.67,135.2 3 54
+github.com/pterm/pterm/rgb.go:140.69,144.2 3 972
+github.com/pterm/pterm/rgb.go:149.58,150.24 1 6
+github.com/pterm/pterm/rgb.go:158.2,159.12 2 6
+github.com/pterm/pterm/rgb.go:150.24,151.33 1 6
+github.com/pterm/pterm/rgb.go:151.33,152.18 1 3
+github.com/pterm/pterm/rgb.go:152.18,154.5 1 3
+github.com/pterm/pterm/rgb.go:165.74,166.24 1 6
+github.com/pterm/pterm/rgb.go:174.2,175.12 2 6
+github.com/pterm/pterm/rgb.go:166.24,167.33 1 6
+github.com/pterm/pterm/rgb.go:167.33,168.18 1 3
+github.com/pterm/pterm/rgb.go:168.18,170.5 1 3
+github.com/pterm/pterm/bigtext_printer.go:17.48,19.2 1 3
+github.com/pterm/pterm/bigtext_printer.go:22.71,26.23 3 6
+github.com/pterm/pterm/bigtext_printer.go:33.2,33.10 1 6
+github.com/pterm/pterm/bigtext_printer.go:26.23,31.3 1 19
+github.com/pterm/pterm/bigtext_printer.go:37.64,41.23 3 2
+github.com/pterm/pterm/bigtext_printer.go:49.2,49.10 1 2
+github.com/pterm/pterm/bigtext_printer.go:41.23,47.3 1 7
+github.com/pterm/pterm/bigtext_printer.go:60.49,63.2 2 1
+github.com/pterm/pterm/bigtext_printer.go:66.42,69.2 2 1
+github.com/pterm/pterm/bigtext_printer.go:72.46,75.2 2 1
+github.com/pterm/pterm/bigtext_printer.go:87.84,90.2 2 1
+github.com/pterm/pterm/bigtext_printer.go:93.73,95.33 2 5
+github.com/pterm/pterm/bigtext_printer.go:98.2,99.11 2 5
+github.com/pterm/pterm/bigtext_printer.go:95.33,97.3 1 6
+github.com/pterm/pterm/bigtext_printer.go:103.76,106.2 2 0
+github.com/pterm/pterm/bigtext_printer.go:109.51,112.15 2 8
+github.com/pterm/pterm/bigtext_printer.go:119.2,120.30 2 5
+github.com/pterm/pterm/bigtext_printer.go:130.2,132.31 2 5
+github.com/pterm/pterm/bigtext_printer.go:139.2,139.34 1 5
+github.com/pterm/pterm/bigtext_printer.go:161.2,161.17 1 5
+github.com/pterm/pterm/bigtext_printer.go:112.15,113.36 1 3
+github.com/pterm/pterm/bigtext_printer.go:116.3,116.18 1 3
+github.com/pterm/pterm/bigtext_printer.go:113.36,115.4 1 15
+github.com/pterm/pterm/bigtext_printer.go:120.30,121.47 1 20
+github.com/pterm/pterm/bigtext_printer.go:121.47,127.4 1 20
+github.com/pterm/pterm/bigtext_printer.go:132.31,134.20 2 20
+github.com/pterm/pterm/bigtext_printer.go:134.20,136.4 1 4
+github.com/pterm/pterm/bigtext_printer.go:139.34,140.37 1 21
+github.com/pterm/pterm/bigtext_printer.go:158.3,158.14 1 21
+github.com/pterm/pterm/bigtext_printer.go:140.37,144.28 4 100
+github.com/pterm/pterm/bigtext_printer.go:147.4,148.41 2 100
+github.com/pterm/pterm/bigtext_printer.go:152.4,152.29 1 100
+github.com/pterm/pterm/bigtext_printer.go:144.28,146.5 1 100
+github.com/pterm/pterm/bigtext_printer.go:148.41,150.5 1 7
+github.com/pterm/pterm/bigtext_printer.go:152.29,154.5 1 25
+github.com/pterm/pterm/bigtext_printer.go:154.10,156.5 1 75
+github.com/pterm/pterm/bigtext_printer.go:165.40,170.2 3 7
+github.com/pterm/pterm/panel_printer.go:36.63,39.2 2 96
+github.com/pterm/pterm/panel_printer.go:42.62,43.17 1 2
+github.com/pterm/pterm/panel_printer.go:46.2,47.11 2 2
+github.com/pterm/pterm/panel_printer.go:43.17,45.3 1 1
+github.com/pterm/pterm/panel_printer.go:51.74,52.23 1 20
+github.com/pterm/pterm/panel_printer.go:55.2,56.11 2 20
+github.com/pterm/pterm/panel_printer.go:52.23,54.3 1 1
+github.com/pterm/pterm/panel_printer.go:60.68,64.2 3 19
+github.com/pterm/pterm/panel_printer.go:67.75,70.2 2 19
+github.com/pterm/pterm/panel_printer.go:73.72,76.2 2 0
+github.com/pterm/pterm/panel_printer.go:78.45,80.33 2 45
+github.com/pterm/pterm/panel_printer.go:86.2,86.12 1 45
+github.com/pterm/pterm/panel_printer.go:80.33,81.32 1 99
+github.com/pterm/pterm/panel_printer.go:84.3,84.14 1 99
+github.com/pterm/pterm/panel_printer.go:81.32,83.4 1 117
+github.com/pterm/pterm/panel_printer.go:90.49,93.15 2 96
+github.com/pterm/pterm/panel_printer.go:97.2,97.26 1 51
+github.com/pterm/pterm/panel_printer.go:103.2,103.36 1 51
+github.com/pterm/pterm/panel_printer.go:111.2,111.26 1 51
+github.com/pterm/pterm/panel_printer.go:119.2,121.23 2 51
+github.com/pterm/pterm/panel_printer.go:131.2,131.35 1 51
+github.com/pterm/pterm/panel_printer.go:181.2,181.17 1 51
+github.com/pterm/pterm/panel_printer.go:93.15,95.3 1 45
+github.com/pterm/pterm/panel_printer.go:97.26,98.31 1 104
+github.com/pterm/pterm/panel_printer.go:98.31,100.4 1 126
+github.com/pterm/pterm/panel_printer.go:103.36,104.27 1 9
+github.com/pterm/pterm/panel_printer.go:104.27,105.32 1 18
+github.com/pterm/pterm/panel_printer.go:105.32,107.5 1 27
+github.com/pterm/pterm/panel_printer.go:111.26,112.27 1 104
+github.com/pterm/pterm/panel_printer.go:112.27,113.32 1 54
+github.com/pterm/pterm/panel_printer.go:113.32,115.5 1 72
+github.com/pterm/pterm/panel_printer.go:121.23,122.34 1 9
+github.com/pterm/pterm/panel_printer.go:122.34,123.29 1 27
+github.com/pterm/pterm/panel_printer.go:123.29,124.68 1 27
+github.com/pterm/pterm/panel_printer.go:124.68,126.6 1 21
+github.com/pterm/pterm/panel_printer.go:131.35,136.31 3 104
+github.com/pterm/pterm/panel_printer.go:140.3,140.40 1 104
+github.com/pterm/pterm/panel_printer.go:144.3,144.38 1 104
+github.com/pterm/pterm/panel_printer.go:151.3,151.34 1 104
+github.com/pterm/pterm/panel_printer.go:136.31,138.4 1 126
+github.com/pterm/pterm/panel_printer.go:140.40,142.4 1 126
+github.com/pterm/pterm/panel_printer.go:144.38,146.26 2 126
+github.com/pterm/pterm/panel_printer.go:146.26,148.5 1 104
+github.com/pterm/pterm/panel_printer.go:151.34,152.22 1 215
+github.com/pterm/pterm/panel_printer.go:152.22,153.43 1 215
+github.com/pterm/pterm/panel_printer.go:176.5,176.16 1 215
+github.com/pterm/pterm/panel_printer.go:153.43,157.28 4 312
+github.com/pterm/pterm/panel_printer.go:160.6,160.30 1 312
+github.com/pterm/pterm/panel_printer.go:163.6,164.28 2 312
+github.com/pterm/pterm/panel_printer.go:173.6,174.23 2 312
+github.com/pterm/pterm/panel_printer.go:157.28,159.7 1 285
+github.com/pterm/pterm/panel_printer.go:160.30,162.7 1 276
+github.com/pterm/pterm/panel_printer.go:164.28,165.44 1 285
+github.com/pterm/pterm/panel_printer.go:165.44,167.8 1 87
+github.com/pterm/pterm/panel_printer.go:168.12,169.51 1 27
+github.com/pterm/pterm/panel_printer.go:169.51,171.8 1 17
+github.com/pterm/pterm/panel_printer.go:185.38,190.2 3 92
+github.com/pterm/pterm/tree_printer.go:51.63,54.2 2 1
+github.com/pterm/pterm/tree_printer.go:57.63,60.2 2 1
+github.com/pterm/pterm/tree_printer.go:63.70,66.2 2 1
+github.com/pterm/pterm/tree_printer.go:69.75,72.2 2 1
+github.com/pterm/pterm/tree_printer.go:75.66,78.2 2 1
+github.com/pterm/pterm/tree_printer.go:81.64,84.2 2 1
+github.com/pterm/pterm/tree_printer.go:87.59,90.2 2 8
+github.com/pterm/pterm/tree_printer.go:94.58,95.16 1 2
+github.com/pterm/pterm/tree_printer.go:98.2,99.11 2 2
+github.com/pterm/pterm/tree_printer.go:95.16,97.3 1 1
+github.com/pterm/pterm/tree_printer.go:103.70,106.2 2 0
+github.com/pterm/pterm/tree_printer.go:109.37,114.2 3 3
+github.com/pterm/pterm/tree_printer.go:117.48,118.24 1 4
+github.com/pterm/pterm/tree_printer.go:121.2,121.24 1 4
+github.com/pterm/pterm/tree_printer.go:125.2,125.50 1 4
+github.com/pterm/pterm/tree_printer.go:118.24,120.3 1 2
+github.com/pterm/pterm/tree_printer.go:121.24,123.3 1 2
+github.com/pterm/pterm/tree_printer.go:131.73,133.28 2 12
+github.com/pterm/pterm/tree_printer.go:154.2,154.12 1 12
+github.com/pterm/pterm/tree_printer.go:133.28,134.22 1 32
+github.com/pterm/pterm/tree_printer.go:134.22,135.31 1 21
+github.com/pterm/pterm/tree_printer.go:135.31,138.5 1 17
+github.com/pterm/pterm/tree_printer.go:138.10,142.5 2 4
+github.com/pterm/pterm/tree_printer.go:143.9,143.30 1 11
+github.com/pterm/pterm/tree_printer.go:143.30,144.31 1 11
+github.com/pterm/pterm/tree_printer.go:144.31,147.5 1 7
+github.com/pterm/pterm/tree_printer.go:147.10,151.5 2 4
+github.com/pterm/pterm/tree_printer.go:158.68,159.32 1 11
+github.com/pterm/pterm/tree_printer.go:163.2,168.42 2 9
+github.com/pterm/pterm/tree_printer.go:192.2,192.14 1 9
+github.com/pterm/pterm/tree_printer.go:159.32,161.3 1 2
+github.com/pterm/pterm/tree_printer.go:168.42,171.23 2 86
+github.com/pterm/pterm/tree_printer.go:176.3,176.35 1 86
+github.com/pterm/pterm/tree_printer.go:182.3,182.37 1 86
+github.com/pterm/pterm/tree_printer.go:186.3,189.5 1 86
+github.com/pterm/pterm/tree_printer.go:171.23,174.4 2 1
+github.com/pterm/pterm/tree_printer.go:176.35,177.52 1 77
+github.com/pterm/pterm/tree_printer.go:177.52,179.5 1 1
+github.com/pterm/pterm/tree_printer.go:182.37,185.4 2 97
+github.com/pterm/pterm/basic_text_printer.go:21.69,24.2 2 1
+github.com/pterm/pterm/basic_text_printer.go:26.80,29.2 2 0
+github.com/pterm/pterm/basic_text_printer.go:33.59,34.20 1 1371
+github.com/pterm/pterm/basic_text_printer.go:37.2,37.29 1 1371
+github.com/pterm/pterm/basic_text_printer.go:34.20,36.3 1 1371
+github.com/pterm/pterm/basic_text_printer.go:42.61,45.2 2 651
+github.com/pterm/pterm/basic_text_printer.go:48.75,50.2 1 684
+github.com/pterm/pterm/basic_text_printer.go:54.77,56.2 1 648
+github.com/pterm/pterm/basic_text_printer.go:61.65,65.2 3 18
+github.com/pterm/pterm/basic_text_printer.go:70.67,74.2 3 327
+github.com/pterm/pterm/basic_text_printer.go:78.81,82.2 3 18
+github.com/pterm/pterm/basic_text_printer.go:87.83,91.2 3 324
+github.com/pterm/pterm/basic_text_printer.go:96.72,97.24 1 2
+github.com/pterm/pterm/basic_text_printer.go:105.2,106.12 2 2
+github.com/pterm/pterm/basic_text_printer.go:97.24,98.33 1 2
+github.com/pterm/pterm/basic_text_printer.go:98.33,99.18 1 1
+github.com/pterm/pterm/basic_text_printer.go:99.18,101.5 1 1
+github.com/pterm/pterm/basic_text_printer.go:112.88,113.24 1 2
+github.com/pterm/pterm/basic_text_printer.go:121.2,122.12 2 2
+github.com/pterm/pterm/basic_text_printer.go:113.24,114.33 1 2
+github.com/pterm/pterm/basic_text_printer.go:114.33,115.18 1 1
+github.com/pterm/pterm/basic_text_printer.go:115.18,117.5 1 1
+github.com/pterm/pterm/print.go:12.36,14.2 1 13790
+github.com/pterm/pterm/print.go:18.38,20.2 1 253647
+github.com/pterm/pterm/print.go:23.54,25.2 1 139826
+github.com/pterm/pterm/print.go:29.56,31.2 1 30
+github.com/pterm/pterm/print.go:34.40,37.2 2 6291
+github.com/pterm/pterm/print.go:40.39,42.2 1 29128
+github.com/pterm/pterm/print.go:47.30,49.2 1 3576
+github.com/pterm/pterm/print.go:54.32,56.2 1 432
+github.com/pterm/pterm/print.go:60.46,62.2 1 14
+github.com/pterm/pterm/print.go:67.48,69.2 1 14
+github.com/pterm/pterm/print.go:74.37,75.24 1 2
+github.com/pterm/pterm/print.go:75.24,76.33 1 2
+github.com/pterm/pterm/print.go:76.33,77.18 1 1
+github.com/pterm/pterm/print.go:77.18,79.5 1 1
+github.com/pterm/pterm/print.go:87.53,88.24 1 2
+github.com/pterm/pterm/print.go:88.24,89.33 1 2
+github.com/pterm/pterm/print.go:89.33,90.18 1 1
+github.com/pterm/pterm/print.go:90.18,92.5 1 1
+github.com/pterm/pterm/print.go:100.49,101.13 1 13753
+github.com/pterm/pterm/print.go:105.2,108.48 3 13715
+github.com/pterm/pterm/print.go:116.2,116.48 1 13715
+github.com/pterm/pterm/print.go:124.2,124.14 1 13715
+github.com/pterm/pterm/print.go:128.2,131.48 2 13715
+github.com/pterm/pterm/print.go:101.13,103.3 1 38
+github.com/pterm/pterm/print.go:108.48,109.19 1 19966
+github.com/pterm/pterm/print.go:109.19,113.4 3 14278
+github.com/pterm/pterm/print.go:116.48,117.23 1 3131
+github.com/pterm/pterm/print.go:117.23,121.4 3 283
+github.com/pterm/pterm/print.go:124.14,126.3 1 10861
+github.com/pterm/pterm/print.go:131.48,132.19 1 19966
+github.com/pterm/pterm/print.go:132.19,134.4 1 14278
+github.com/pterm/pterm/print.go:141.51,144.2 1 173
+github.com/pterm/pterm/print.go:152.31,153.13 1 12
+github.com/pterm/pterm/print.go:157.2,157.34 1 6
+github.com/pterm/pterm/print.go:153.13,155.3 1 6
+github.com/pterm/pterm/print.go:161.45,162.13 1 7041
+github.com/pterm/pterm/print.go:166.2,166.37 1 7035
+github.com/pterm/pterm/print.go:162.13,164.3 1 6
+github.com/pterm/pterm/print.go:170.53,172.2 1 36443
+github.com/pterm/pterm/print.go:174.35,176.2 1 76
+github.com/pterm/pterm/print.go:178.26,180.2 1 14561
+github.com/pterm/pterm/progressbar_printer.go:63.72,66.2 2 3
+github.com/pterm/pterm/progressbar_printer.go:71.76,74.2 2 1
+github.com/pterm/pterm/progressbar_printer.go:77.70,80.2 2 9
+github.com/pterm/pterm/progressbar_printer.go:83.74,86.2 2 1
+github.com/pterm/pterm/progressbar_printer.go:89.79,92.2 2 1
+github.com/pterm/pterm/progressbar_printer.go:95.80,98.2 2 1
+github.com/pterm/pterm/progressbar_printer.go:101.103,104.2 2 1
+github.com/pterm/pterm/progressbar_printer.go:107.80,110.2 2 1
+github.com/pterm/pterm/progressbar_printer.go:113.74,116.2 2 1
+github.com/pterm/pterm/progressbar_printer.go:119.74,122.2 2 1
+github.com/pterm/pterm/progressbar_printer.go:125.79,128.2 2 1
+github.com/pterm/pterm/progressbar_printer.go:131.78,134.2 2 1
+github.com/pterm/pterm/progressbar_printer.go:137.76,140.2 2 1
+github.com/pterm/pterm/progressbar_printer.go:143.79,146.2 2 2
+github.com/pterm/pterm/progressbar_printer.go:149.76,152.2 2 1
+github.com/pterm/pterm/progressbar_printer.go:155.84,158.2 2 0
+github.com/pterm/pterm/progressbar_printer.go:161.62,164.2 2 12
+github.com/pterm/pterm/progressbar_printer.go:167.76,171.2 3 14291
+github.com/pterm/pterm/progressbar_printer.go:174.67,175.25 1 14315
+github.com/pterm/pterm/progressbar_printer.go:178.2,178.23 1 14315
+github.com/pterm/pterm/progressbar_printer.go:181.2,181.18 1 14315
+github.com/pterm/pterm/progressbar_printer.go:185.2,189.21 4 14314
+github.com/pterm/pterm/progressbar_printer.go:197.2,206.17 5 14314
+github.com/pterm/pterm/progressbar_printer.go:209.2,209.17 1 14314
+github.com/pterm/pterm/progressbar_printer.go:213.2,215.22 2 14314
+github.com/pterm/pterm/progressbar_printer.go:218.2,218.23 1 14314
+github.com/pterm/pterm/progressbar_printer.go:222.2,226.39 4 14314
+github.com/pterm/pterm/progressbar_printer.go:230.2,231.26 2 14314
+github.com/pterm/pterm/progressbar_printer.go:237.2,237.16 1 14314
+github.com/pterm/pterm/progressbar_printer.go:240.2,240.10 1 14314
+github.com/pterm/pterm/progressbar_printer.go:175.25,177.3 1 2
+github.com/pterm/pterm/progressbar_printer.go:178.23,180.3 1 2
+github.com/pterm/pterm/progressbar_printer.go:181.18,183.3 1 1
+github.com/pterm/pterm/progressbar_printer.go:189.21,191.3 1 1
+github.com/pterm/pterm/progressbar_printer.go:191.8,191.44 1 14313
+github.com/pterm/pterm/progressbar_printer.go:191.44,193.3 1 1
+github.com/pterm/pterm/progressbar_printer.go:193.8,195.3 1 14312
+github.com/pterm/pterm/progressbar_printer.go:206.17,208.3 1 14313
+github.com/pterm/pterm/progressbar_printer.go:209.17,211.3 1 14313
+github.com/pterm/pterm/progressbar_printer.go:215.22,217.3 1 14313
+github.com/pterm/pterm/progressbar_printer.go:218.23,220.3 1 14313
+github.com/pterm/pterm/progressbar_printer.go:226.39,228.3 1 14312
+github.com/pterm/pterm/progressbar_printer.go:231.26,233.3 1 37
+github.com/pterm/pterm/progressbar_printer.go:233.8,235.3 1 14277
+github.com/pterm/pterm/progressbar_printer.go:237.16,239.3 1 6871
+github.com/pterm/pterm/progressbar_printer.go:244.65,245.18 1 18
+github.com/pterm/pterm/progressbar_printer.go:249.2,252.26 3 17
+github.com/pterm/pterm/progressbar_printer.go:255.2,255.10 1 17
+github.com/pterm/pterm/progressbar_printer.go:245.18,247.3 1 1
+github.com/pterm/pterm/progressbar_printer.go:252.26,254.3 1 1
+github.com/pterm/pterm/progressbar_printer.go:259.66,260.30 1 7
+github.com/pterm/pterm/progressbar_printer.go:263.2,269.16 5 7
+github.com/pterm/pterm/progressbar_printer.go:260.30,262.3 1 1
+github.com/pterm/pterm/progressbar_printer.go:273.66,274.17 1 10
+github.com/pterm/pterm/progressbar_printer.go:277.2,278.22 2 3
+github.com/pterm/pterm/progressbar_printer.go:284.2,284.15 1 3
+github.com/pterm/pterm/progressbar_printer.go:274.17,276.3 1 7
+github.com/pterm/pterm/progressbar_printer.go:278.22,281.3 2 1
+github.com/pterm/pterm/progressbar_printer.go:281.8,283.3 1 2
+github.com/pterm/pterm/progressbar_printer.go:290.66,294.2 3 2
+github.com/pterm/pterm/progressbar_printer.go:299.65,303.2 3 1
+github.com/pterm/pterm/progressbar_printer.go:306.61,308.2 1 14314
+github.com/pterm/pterm/progressbar_printer.go:310.56,313.2 2 14313
+github.com/pterm/pterm/prefix_printer.go:101.65,104.2 2 10
+github.com/pterm/pterm/prefix_printer.go:107.62,110.2 2 100
+github.com/pterm/pterm/prefix_printer.go:113.70,116.2 2 5
+github.com/pterm/pterm/prefix_printer.go:122.60,125.2 2 11
+github.com/pterm/pterm/prefix_printer.go:128.69,131.2 2 95
+github.com/pterm/pterm/prefix_printer.go:136.63,139.2 2 75
+github.com/pterm/pterm/prefix_printer.go:144.72,147.2 2 5
+github.com/pterm/pterm/prefix_printer.go:150.74,153.2 2 0
+github.com/pterm/pterm/prefix_printer.go:157.57,159.39 2 10852
+github.com/pterm/pterm/prefix_printer.go:163.2,163.15 1 10762
+github.com/pterm/pterm/prefix_printer.go:171.2,171.27 1 5110
+github.com/pterm/pterm/prefix_printer.go:174.2,174.26 1 5110
+github.com/pterm/pterm/prefix_printer.go:177.2,177.27 1 5110
+github.com/pterm/pterm/prefix_printer.go:181.2,184.32 3 5110
+github.com/pterm/pterm/prefix_printer.go:189.2,190.33 2 5110
+github.com/pterm/pterm/prefix_printer.go:202.2,204.22 2 5110
+github.com/pterm/pterm/prefix_printer.go:209.2,209.13 1 5110
+github.com/pterm/pterm/prefix_printer.go:213.2,213.20 1 5110
+github.com/pterm/pterm/prefix_printer.go:159.39,161.3 1 90
+github.com/pterm/pterm/prefix_printer.go:163.15,164.26 1 5652
+github.com/pterm/pterm/prefix_printer.go:164.26,166.4 1 5607
+github.com/pterm/pterm/prefix_printer.go:166.9,168.4 1 45
+github.com/pterm/pterm/prefix_printer.go:171.27,173.3 1 1
+github.com/pterm/pterm/prefix_printer.go:174.26,176.3 1 1663
+github.com/pterm/pterm/prefix_printer.go:177.27,179.3 1 1
+github.com/pterm/pterm/prefix_printer.go:184.32,187.3 2 3095
+github.com/pterm/pterm/prefix_printer.go:190.33,191.13 1 5133
+github.com/pterm/pterm/prefix_printer.go:191.13,193.26 2 5110
+github.com/pterm/pterm/prefix_printer.go:196.4,196.35 1 5110
+github.com/pterm/pterm/prefix_printer.go:193.26,195.5 1 50
+github.com/pterm/pterm/prefix_printer.go:197.9,199.4 1 23
+github.com/pterm/pterm/prefix_printer.go:204.22,207.3 2 45
+github.com/pterm/pterm/prefix_printer.go:209.13,211.3 1 3095
+github.com/pterm/pterm/prefix_printer.go:218.58,219.39 1 6600
+github.com/pterm/pterm/prefix_printer.go:222.2,223.22 2 6510
+github.com/pterm/pterm/prefix_printer.go:219.39,221.3 1 90
+github.com/pterm/pterm/prefix_printer.go:227.72,228.39 1 3695
+github.com/pterm/pterm/prefix_printer.go:231.2,231.40 1 3605
+github.com/pterm/pterm/prefix_printer.go:228.39,230.3 1 90
+github.com/pterm/pterm/prefix_printer.go:236.74,237.39 1 3330
+github.com/pterm/pterm/prefix_printer.go:240.2,240.39 1 3240
+github.com/pterm/pterm/prefix_printer.go:237.39,239.3 1 90
+github.com/pterm/pterm/prefix_printer.go:246.62,248.39 2 420
+github.com/pterm/pterm/prefix_printer.go:251.2,253.12 3 410
+github.com/pterm/pterm/prefix_printer.go:248.39,250.3 1 10
+github.com/pterm/pterm/prefix_printer.go:259.64,261.39 2 3280
+github.com/pterm/pterm/prefix_printer.go:264.2,266.12 3 3270
+github.com/pterm/pterm/prefix_printer.go:261.39,263.3 1 10
+github.com/pterm/pterm/prefix_printer.go:271.78,273.39 2 190
+github.com/pterm/pterm/prefix_printer.go:276.2,278.12 3 180
+github.com/pterm/pterm/prefix_printer.go:273.39,275.3 1 10
+github.com/pterm/pterm/prefix_printer.go:284.80,286.39 2 1630
+github.com/pterm/pterm/prefix_printer.go:289.2,291.12 3 1620
+github.com/pterm/pterm/prefix_printer.go:286.39,288.3 1 10
+github.com/pterm/pterm/prefix_printer.go:299.69,300.24 1 10
+github.com/pterm/pterm/prefix_printer.go:308.2,309.12 2 10
+github.com/pterm/pterm/prefix_printer.go:300.24,301.33 1 10
+github.com/pterm/pterm/prefix_printer.go:301.33,302.18 1 5
+github.com/pterm/pterm/prefix_printer.go:302.18,304.5 1 5
+github.com/pterm/pterm/prefix_printer.go:315.85,316.24 1 10
+github.com/pterm/pterm/prefix_printer.go:324.2,325.12 2 10
+github.com/pterm/pterm/prefix_printer.go:316.24,317.33 1 10
+github.com/pterm/pterm/prefix_printer.go:317.33,318.18 1 5
+github.com/pterm/pterm/prefix_printer.go:318.18,320.5 1 5
+github.com/pterm/pterm/prefix_printer.go:329.52,331.2 1 5115
+github.com/pterm/pterm/prefix_printer.go:346.35,347.13 1 5480
+github.com/pterm/pterm/prefix_printer.go:347.13,348.12 1 5
+github.com/pterm/pterm/area_printer.go:28.43,30.2 1 9
+github.com/pterm/pterm/area_printer.go:33.65,36.2 2 3
+github.com/pterm/pterm/area_printer.go:39.61,42.2 2 2
+github.com/pterm/pterm/area_printer.go:45.57,48.2 2 1
+github.com/pterm/pterm/area_printer.go:52.51,53.19 1 18
+github.com/pterm/pterm/area_printer.go:57.2,60.14 3 18
+github.com/pterm/pterm/area_printer.go:64.2,64.18 1 18
+github.com/pterm/pterm/area_printer.go:82.2,82.20 1 18
+github.com/pterm/pterm/area_printer.go:53.19,56.3 2 2
+github.com/pterm/pterm/area_printer.go:60.14,62.3 1 2
+github.com/pterm/pterm/area_printer.go:64.18,72.15 6 2
+github.com/pterm/pterm/area_printer.go:77.3,77.29 1 2
+github.com/pterm/pterm/area_printer.go:72.15,75.4 2 2
+github.com/pterm/pterm/area_printer.go:77.29,80.4 2 2
+github.com/pterm/pterm/area_printer.go:86.72,95.2 6 5
+github.com/pterm/pterm/area_printer.go:99.36,101.22 2 4
+github.com/pterm/pterm/area_printer.go:104.2,104.12 1 4
+github.com/pterm/pterm/area_printer.go:101.22,103.3 1 2
+github.com/pterm/pterm/area_printer.go:110.60,114.2 3 2
+github.com/pterm/pterm/area_printer.go:119.59,123.2 3 1
+github.com/pterm/pterm/area_printer.go:128.31,130.2 1 3
+github.com/pterm/pterm/bulletlist_printer.go:11.77,13.25 2 2
+github.com/pterm/pterm/bulletlist_printer.go:16.2,16.42 1 2
+github.com/pterm/pterm/bulletlist_printer.go:13.25,15.3 1 8
+github.com/pterm/pterm/bulletlist_printer.go:20.78,26.2 2 8
+github.com/pterm/pterm/bulletlist_printer.go:38.62,41.2 2 1
+github.com/pterm/pterm/bulletlist_printer.go:44.63,47.2 2 1
+github.com/pterm/pterm/bulletlist_printer.go:50.69,53.2 2 1
+github.com/pterm/pterm/bulletlist_printer.go:56.67,59.2 2 1
+github.com/pterm/pterm/bulletlist_printer.go:62.71,65.2 2 1
+github.com/pterm/pterm/bulletlist_printer.go:68.74,70.2 1 2
+github.com/pterm/pterm/bulletlist_printer.go:89.81,92.2 2 76
+github.com/pterm/pterm/bulletlist_printer.go:95.75,98.2 2 1
+github.com/pterm/pterm/bulletlist_printer.go:101.73,104.2 2 1
+github.com/pterm/pterm/bulletlist_printer.go:107.77,110.2 2 1
+github.com/pterm/pterm/bulletlist_printer.go:113.82,116.2 2 0
+github.com/pterm/pterm/bulletlist_printer.go:119.43,124.2 3 56
+github.com/pterm/pterm/bulletlist_printer.go:127.54,129.31 2 74
+github.com/pterm/pterm/bulletlist_printer.go:150.2,150.17 1 74
+github.com/pterm/pterm/bulletlist_printer.go:129.31,130.28 1 74
+github.com/pterm/pterm/bulletlist_printer.go:137.3,137.30 1 74
+github.com/pterm/pterm/bulletlist_printer.go:144.3,144.24 1 74
+github.com/pterm/pterm/bulletlist_printer.go:130.28,131.26 1 74
+github.com/pterm/pterm/bulletlist_printer.go:131.26,133.5 1 18
+github.com/pterm/pterm/bulletlist_printer.go:133.10,135.5 1 56
+github.com/pterm/pterm/bulletlist_printer.go:137.30,138.28 1 74
+github.com/pterm/pterm/bulletlist_printer.go:138.28,140.5 1 18
+github.com/pterm/pterm/bulletlist_printer.go:140.10,142.5 1 56
+github.com/pterm/pterm/bulletlist_printer.go:144.24,146.4 1 56
+github.com/pterm/pterm/bulletlist_printer.go:146.9,148.4 1 18
+github.com/pterm/pterm/header_printer.go:35.67,38.2 2 1
+github.com/pterm/pterm/header_printer.go:41.73,44.2 2 3
+github.com/pterm/pterm/header_printer.go:47.62,50.2 2 3
+github.com/pterm/pterm/header_printer.go:53.64,56.2 2 21
+github.com/pterm/pterm/header_printer.go:59.74,62.2 2 0
+github.com/pterm/pterm/header_printer.go:66.56,67.15 1 1393
+github.com/pterm/pterm/header_printer.go:71.2,71.24 1 664
+github.com/pterm/pterm/header_printer.go:74.2,74.30 1 664
+github.com/pterm/pterm/header_printer.go:78.2,85.17 5 664
+github.com/pterm/pterm/header_printer.go:98.2,101.17 3 664
+github.com/pterm/pterm/header_printer.go:108.2,109.49 2 664
+github.com/pterm/pterm/header_printer.go:117.2,119.12 2 664
+github.com/pterm/pterm/header_printer.go:67.15,69.3 1 729
+github.com/pterm/pterm/header_printer.go:71.24,73.3 1 3
+github.com/pterm/pterm/header_printer.go:74.30,76.3 1 3
+github.com/pterm/pterm/header_printer.go:85.17,88.3 2 11
+github.com/pterm/pterm/header_printer.go:88.8,89.42 1 653
+github.com/pterm/pterm/header_printer.go:89.42,92.4 2 1
+github.com/pterm/pterm/header_printer.go:92.9,95.4 2 652
+github.com/pterm/pterm/header_printer.go:101.17,104.3 2 11
+github.com/pterm/pterm/header_printer.go:104.8,106.3 1 653
+github.com/pterm/pterm/header_printer.go:109.49,112.69 3 666
+github.com/pterm/pterm/header_printer.go:115.3,115.67 1 666
+github.com/pterm/pterm/header_printer.go:112.69,114.4 1 8
+github.com/pterm/pterm/header_printer.go:122.47,125.32 3 664
+github.com/pterm/pterm/header_printer.go:146.2,147.26 2 664
+github.com/pterm/pterm/header_printer.go:151.2,151.39 1 664
+github.com/pterm/pterm/header_printer.go:125.32,126.65 1 664
+github.com/pterm/pterm/header_printer.go:126.65,129.32 3 2
+github.com/pterm/pterm/header_printer.go:136.4,136.41 1 2
+github.com/pterm/pterm/header_printer.go:129.32,130.31 1 272
+github.com/pterm/pterm/header_printer.go:134.5,134.52 1 272
+github.com/pterm/pterm/header_printer.go:130.31,133.6 2 2
+github.com/pterm/pterm/header_printer.go:136.41,139.5 2 4
+github.com/pterm/pterm/header_printer.go:140.9,143.4 2 662
+github.com/pterm/pterm/header_printer.go:147.26,149.3 1 666
+github.com/pterm/pterm/header_printer.go:156.58,158.2 1 652
+github.com/pterm/pterm/header_printer.go:161.72,163.2 1 684
+github.com/pterm/pterm/header_printer.go:167.74,169.2 1 648
+github.com/pterm/pterm/header_printer.go:174.62,178.2 3 36
+github.com/pterm/pterm/header_printer.go:183.64,187.2 3 328
+github.com/pterm/pterm/header_printer.go:191.78,195.2 3 18
+github.com/pterm/pterm/header_printer.go:200.80,204.2 3 324
+github.com/pterm/pterm/header_printer.go:209.69,210.24 1 2
+github.com/pterm/pterm/header_printer.go:218.2,219.12 2 2
+github.com/pterm/pterm/header_printer.go:210.24,211.33 1 2
+github.com/pterm/pterm/header_printer.go:211.33,212.18 1 1
+github.com/pterm/pterm/header_printer.go:212.18,214.5 1 1
+github.com/pterm/pterm/header_printer.go:225.85,226.24 1 2
+github.com/pterm/pterm/header_printer.go:234.2,235.12 2 2
+github.com/pterm/pterm/header_printer.go:226.24,227.33 1 2
+github.com/pterm/pterm/header_printer.go:227.33,228.18 1 1
+github.com/pterm/pterm/header_printer.go:228.18,230.5 1 1
+github.com/pterm/pterm/center_printer.go:25.79,29.2 3 3
+github.com/pterm/pterm/center_printer.go:32.74,35.2 2 0
+github.com/pterm/pterm/center_printer.go:39.56,40.15 1 2096
+github.com/pterm/pterm/center_printer.go:44.2,48.32 3 998
+github.com/pterm/pterm/center_printer.go:60.2,62.29 2 655
+github.com/pterm/pterm/center_printer.go:69.2,71.18 2 655
+github.com/pterm/pterm/center_printer.go:79.2,79.29 1 653
+github.com/pterm/pterm/center_printer.go:83.2,83.12 1 653
+github.com/pterm/pterm/center_printer.go:40.15,42.3 1 1098
+github.com/pterm/pterm/center_printer.go:48.32,49.30 1 343
+github.com/pterm/pterm/center_printer.go:57.3,57.13 1 343
+github.com/pterm/pterm/center_printer.go:49.30,51.18 2 649
+github.com/pterm/pterm/center_printer.go:51.18,53.5 1 1
+github.com/pterm/pterm/center_printer.go:53.10,55.5 1 648
+github.com/pterm/pterm/center_printer.go:62.29,64.32 2 971
+github.com/pterm/pterm/center_printer.go:64.32,66.4 1 654
+github.com/pterm/pterm/center_printer.go:71.18,72.30 1 2
+github.com/pterm/pterm/center_printer.go:76.3,76.13 1 2
+github.com/pterm/pterm/center_printer.go:72.30,74.4 1 5
+github.com/pterm/pterm/center_printer.go:79.29,81.3 1 966
+github.com/pterm/pterm/center_printer.go:88.58,90.2 1 1298
+github.com/pterm/pterm/center_printer.go:93.72,95.2 1 720
+github.com/pterm/pterm/center_printer.go:99.74,101.2 1 648
+github.com/pterm/pterm/center_printer.go:106.61,110.2 3 38
+github.com/pterm/pterm/center_printer.go:115.63,119.2 3 650
+github.com/pterm/pterm/center_printer.go:123.77,127.2 3 36
+github.com/pterm/pterm/center_printer.go:132.79,136.2 3 324
+github.com/pterm/pterm/center_printer.go:141.68,142.24 1 2
+github.com/pterm/pterm/center_printer.go:150.2,151.12 2 2
+github.com/pterm/pterm/center_printer.go:142.24,143.33 1 2
+github.com/pterm/pterm/center_printer.go:143.33,144.18 1 1
+github.com/pterm/pterm/center_printer.go:144.18,146.5 1 1
+github.com/pterm/pterm/center_printer.go:157.84,158.24 1 2
+github.com/pterm/pterm/center_printer.go:166.2,167.12 2 2
+github.com/pterm/pterm/center_printer.go:158.24,159.33 1 2
+github.com/pterm/pterm/center_printer.go:159.33,160.18 1 1
+github.com/pterm/pterm/center_printer.go:160.18,162.5 1 1
+github.com/pterm/pterm/table_printer.go:48.61,51.2 2 1
+github.com/pterm/pterm/table_printer.go:54.62,57.2 2 9
+github.com/pterm/pterm/table_printer.go:60.67,63.2 2 1
+github.com/pterm/pterm/table_printer.go:66.78,69.2 2 4
+github.com/pterm/pterm/table_printer.go:72.79,75.2 2 1
+github.com/pterm/pterm/table_printer.go:78.69,81.2 2 1
+github.com/pterm/pterm/table_printer.go:84.70,87.2 2 1
+github.com/pterm/pterm/table_printer.go:90.72,93.2 2 3
+github.com/pterm/pterm/table_printer.go:96.73,99.2 2 1
+github.com/pterm/pterm/table_printer.go:102.63,105.2 2 9
+github.com/pterm/pterm/table_printer.go:108.71,109.50 1 1
+github.com/pterm/pterm/table_printer.go:112.2,112.11 1 1
+github.com/pterm/pterm/table_printer.go:109.50,111.3 1 1
+github.com/pterm/pterm/table_printer.go:116.58,119.2 2 1
+github.com/pterm/pterm/table_printer.go:122.66,127.2 4 2
+github.com/pterm/pterm/table_printer.go:130.67,135.2 4 2
+github.com/pterm/pterm/table_printer.go:138.72,141.2 2 0
+github.com/pterm/pterm/table_printer.go:144.49,145.20 1 10
+github.com/pterm/pterm/table_printer.go:148.2,148.29 1 10
+github.com/pterm/pterm/table_printer.go:151.2,151.26 1 10
+github.com/pterm/pterm/table_printer.go:154.2,154.38 1 10
+github.com/pterm/pterm/table_printer.go:157.2,157.32 1 10
+github.com/pterm/pterm/table_printer.go:161.2,164.29 3 10
+github.com/pterm/pterm/table_printer.go:173.2,173.30 1 10
+github.com/pterm/pterm/table_printer.go:202.2,204.13 2 10
+github.com/pterm/pterm/table_printer.go:208.2,208.17 1 10
+github.com/pterm/pterm/table_printer.go:145.20,147.3 1 1
+github.com/pterm/pterm/table_printer.go:148.29,150.3 1 1
+github.com/pterm/pterm/table_printer.go:151.26,153.3 1 1
+github.com/pterm/pterm/table_printer.go:154.38,156.3 1 1
+github.com/pterm/pterm/table_printer.go:157.32,159.3 1 1
+github.com/pterm/pterm/table_printer.go:164.29,165.31 1 41
+github.com/pterm/pterm/table_printer.go:165.31,167.41 2 123
+github.com/pterm/pterm/table_printer.go:167.41,169.5 1 33
+github.com/pterm/pterm/table_printer.go:173.30,175.31 2 41
+github.com/pterm/pterm/table_printer.go:191.3,191.59 1 41
+github.com/pterm/pterm/table_printer.go:195.3,195.61 1 41
+github.com/pterm/pterm/table_printer.go:199.3,199.14 1 41
+github.com/pterm/pterm/table_printer.go:175.31,179.33 3 123
+github.com/pterm/pterm/table_printer.go:184.4,184.30 1 123
+github.com/pterm/pterm/table_printer.go:179.33,182.5 2 82
+github.com/pterm/pterm/table_printer.go:184.30,186.5 1 24
+github.com/pterm/pterm/table_printer.go:186.10,188.5 1 99
+github.com/pterm/pterm/table_printer.go:191.59,193.4 1 3
+github.com/pterm/pterm/table_printer.go:195.61,197.4 1 4
+github.com/pterm/pterm/table_printer.go:204.13,206.3 1 1
+github.com/pterm/pterm/table_printer.go:211.82,213.22 2 123
+github.com/pterm/pterm/table_printer.go:216.2,216.64 1 111
+github.com/pterm/pterm/table_printer.go:213.22,215.3 1 12
+github.com/pterm/pterm/table_printer.go:219.75,221.2 1 3
+github.com/pterm/pterm/table_printer.go:223.69,225.2 1 4
+github.com/pterm/pterm/table_printer.go:228.38,233.2 3 9
+github.com/pterm/pterm/barchart.go:44.63,47.2 2 14
+github.com/pterm/pterm/barchart.go:50.81,53.2 2 1
+github.com/pterm/pterm/barchart.go:56.83,59.2 2 1
+github.com/pterm/pterm/barchart.go:62.69,66.2 3 5
+github.com/pterm/pterm/barchart.go:69.65,72.2 2 1
+github.com/pterm/pterm/barchart.go:75.64,78.2 2 1
+github.com/pterm/pterm/barchart.go:81.68,84.2 2 11
+github.com/pterm/pterm/barchart.go:87.78,90.2 2 0
+github.com/pterm/pterm/barchart.go:92.48,95.29 2 1
+github.com/pterm/pterm/barchart.go:99.2,99.12 1 1
+github.com/pterm/pterm/barchart.go:95.29,97.3 1 6
+github.com/pterm/pterm/barchart.go:103.52,104.50 1 15
+github.com/pterm/pterm/barchart.go:122.2,122.29 1 15
+github.com/pterm/pterm/barchart.go:131.2,144.82 2 15
+github.com/pterm/pterm/barchart.go:168.2,168.82 1 15
+github.com/pterm/pterm/barchart.go:183.2,183.84 1 15
+github.com/pterm/pterm/barchart.go:207.2,207.84 1 15
+github.com/pterm/pterm/barchart.go:248.2,248.15 1 15
+github.com/pterm/pterm/barchart.go:251.2,251.29 1 14
+github.com/pterm/pterm/barchart.go:263.2,271.29 7 14
+github.com/pterm/pterm/barchart.go:284.2,286.18 2 14
+github.com/pterm/pterm/barchart.go:415.2,415.17 1 10
+github.com/pterm/pterm/barchart.go:104.50,108.22 3 14
+github.com/pterm/pterm/barchart.go:113.3,115.28 2 14
+github.com/pterm/pterm/barchart.go:119.3,119.16 1 14
+github.com/pterm/pterm/barchart.go:108.22,111.4 2 11
+github.com/pterm/pterm/barchart.go:115.28,117.4 1 4
+github.com/pterm/pterm/barchart.go:122.29,123.16 1 4
+github.com/pterm/pterm/barchart.go:127.3,127.15 1 2
+github.com/pterm/pterm/barchart.go:123.16,125.4 1 2
+github.com/pterm/pterm/barchart.go:144.82,145.24 1 16
+github.com/pterm/pterm/barchart.go:149.3,149.56 1 16
+github.com/pterm/pterm/barchart.go:161.3,161.21 1 16
+github.com/pterm/pterm/barchart.go:145.24,147.4 1 9
+github.com/pterm/pterm/barchart.go:149.56,150.31 1 560
+github.com/pterm/pterm/barchart.go:150.31,152.5 1 180
+github.com/pterm/pterm/barchart.go:152.10,154.5 1 380
+github.com/pterm/pterm/barchart.go:161.21,162.58 1 4
+github.com/pterm/pterm/barchart.go:162.58,164.5 1 60
+github.com/pterm/pterm/barchart.go:168.82,169.57 1 14
+github.com/pterm/pterm/barchart.go:177.3,177.24 1 14
+github.com/pterm/pterm/barchart.go:169.57,170.31 1 508
+github.com/pterm/pterm/barchart.go:170.31,172.5 1 393
+github.com/pterm/pterm/barchart.go:172.10,174.5 1 115
+github.com/pterm/pterm/barchart.go:177.24,179.4 1 12
+github.com/pterm/pterm/barchart.go:183.84,184.24 1 11
+github.com/pterm/pterm/barchart.go:190.3,190.55 1 11
+github.com/pterm/pterm/barchart.go:198.3,198.24 1 11
+github.com/pterm/pterm/barchart.go:184.24,185.56 1 3
+github.com/pterm/pterm/barchart.go:185.56,187.5 1 45
+github.com/pterm/pterm/barchart.go:190.55,191.31 1 502
+github.com/pterm/pterm/barchart.go:191.31,193.5 1 243
+github.com/pterm/pterm/barchart.go:193.10,195.5 1 259
+github.com/pterm/pterm/barchart.go:198.24,204.4 2 11
+github.com/pterm/pterm/barchart.go:207.84,208.56 1 10
+github.com/pterm/pterm/barchart.go:218.3,218.41 1 10
+github.com/pterm/pterm/barchart.go:224.3,224.24 1 10
+github.com/pterm/pterm/barchart.go:208.56,209.31 1 454
+github.com/pterm/pterm/barchart.go:209.31,211.5 1 310
+github.com/pterm/pterm/barchart.go:211.10,213.5 1 144
+github.com/pterm/pterm/barchart.go:218.41,219.56 1 2
+github.com/pterm/pterm/barchart.go:219.56,221.5 1 52
+github.com/pterm/pterm/barchart.go:224.24,239.32 1 10
+github.com/pterm/pterm/barchart.go:243.4,243.60 1 10
+github.com/pterm/pterm/barchart.go:239.32,241.5 1 2
+github.com/pterm/pterm/barchart.go:248.15,250.3 1 1
+github.com/pterm/pterm/barchart.go:251.29,252.23 1 51
+github.com/pterm/pterm/barchart.go:256.3,256.28 1 51
+github.com/pterm/pterm/barchart.go:260.3,260.59 1 51
+github.com/pterm/pterm/barchart.go:252.23,254.4 1 3
+github.com/pterm/pterm/barchart.go:256.28,258.4 1 48
+github.com/pterm/pterm/barchart.go:271.29,272.30 1 51
+github.com/pterm/pterm/barchart.go:275.3,275.30 1 51
+github.com/pterm/pterm/barchart.go:278.3,279.35 2 51
+github.com/pterm/pterm/barchart.go:272.30,274.4 1 13
+github.com/pterm/pterm/barchart.go:275.30,277.4 1 8
+github.com/pterm/pterm/barchart.go:279.35,281.4 1 15
+github.com/pterm/pterm/barchart.go:286.18,294.41 5 4
+github.com/pterm/pterm/barchart.go:299.3,299.30 1 4
+github.com/pterm/pterm/barchart.go:331.3,332.18 2 4
+github.com/pterm/pterm/barchart.go:294.41,297.4 2 1
+github.com/pterm/pterm/barchart.go:299.30,304.24 4 21
+github.com/pterm/pterm/barchart.go:304.24,310.5 3 8
+github.com/pterm/pterm/barchart.go:310.10,310.31 1 13
+github.com/pterm/pterm/barchart.go:310.31,316.5 3 8
+github.com/pterm/pterm/barchart.go:316.10,320.23 2 5
+github.com/pterm/pterm/barchart.go:326.5,326.22 1 5
+github.com/pterm/pterm/barchart.go:320.23,324.6 2 3
+github.com/pterm/pterm/barchart.go:326.22,328.6 1 2
+github.com/pterm/pterm/barchart.go:333.8,341.41 5 10
+github.com/pterm/pterm/barchart.go:346.3,346.30 1 10
+github.com/pterm/pterm/barchart.go:381.3,383.36 2 10
+github.com/pterm/pterm/barchart.go:390.3,390.36 1 10
+github.com/pterm/pterm/barchart.go:397.3,397.38 1 10
+github.com/pterm/pterm/barchart.go:341.41,344.4 2 1
+github.com/pterm/pterm/barchart.go:346.30,351.24 4 30
+github.com/pterm/pterm/barchart.go:377.4,378.102 2 30
+github.com/pterm/pterm/barchart.go:351.24,357.5 3 12
+github.com/pterm/pterm/barchart.go:357.10,357.31 1 18
+github.com/pterm/pterm/barchart.go:357.31,362.5 2 12
+github.com/pterm/pterm/barchart.go:362.10,366.23 2 6
+github.com/pterm/pterm/barchart.go:372.5,372.22 1 6
+github.com/pterm/pterm/barchart.go:366.23,370.6 2 4
+github.com/pterm/pterm/barchart.go:372.22,374.6 1 2
+github.com/pterm/pterm/barchart.go:383.36,385.37 2 30
+github.com/pterm/pterm/barchart.go:385.37,387.5 1 9
+github.com/pterm/pterm/barchart.go:390.36,392.37 2 30
+github.com/pterm/pterm/barchart.go:392.37,394.5 1 2
+github.com/pterm/pterm/barchart.go:397.38,398.43 1 383
+github.com/pterm/pterm/barchart.go:411.4,411.15 1 383
+github.com/pterm/pterm/barchart.go:398.43,402.29 4 1257
+github.com/pterm/pterm/barchart.go:405.5,406.39 2 1257
+github.com/pterm/pterm/barchart.go:409.5,409.19 1 1257
+github.com/pterm/pterm/barchart.go:402.29,404.6 1 1227
+github.com/pterm/pterm/barchart.go:406.39,408.6 1 712
+github.com/pterm/pterm/barchart.go:419.41,424.2 3 14
+github.com/pterm/pterm/pterm.go:22.13,24.2 1 1
+github.com/pterm/pterm/pterm.go:27.21,29.2 1 1
+github.com/pterm/pterm/pterm.go:32.22,34.2 1 2
+github.com/pterm/pterm/pterm.go:37.28,39.2 1 31
+github.com/pterm/pterm/pterm.go:42.29,44.2 1 36
+github.com/pterm/pterm/pterm.go:48.22,51.2 2 14223
+github.com/pterm/pterm/pterm.go:56.23,59.2 2 14223
+github.com/pterm/pterm/pterm.go:62.32,67.2 3 10
+github.com/pterm/pterm/terminal.go:24.29,25.29 1 30970
+github.com/pterm/pterm/terminal.go:28.2,29.14 2 7
+github.com/pterm/pterm/terminal.go:25.29,27.3 1 30963
+github.com/pterm/pterm/terminal.go:33.30,34.30 1 17
+github.com/pterm/pterm/terminal.go:37.2,38.15 2 4
+github.com/pterm/pterm/terminal.go:34.30,36.3 1 13
+github.com/pterm/pterm/terminal.go:42.55,43.57 1 13
+github.com/pterm/pterm/terminal.go:46.2,47.12 2 12
+github.com/pterm/pterm/terminal.go:50.2,50.12 1 12
+github.com/pterm/pterm/terminal.go:53.2,53.16 1 12
+github.com/pterm/pterm/terminal.go:56.2,56.18 1 12
+github.com/pterm/pterm/terminal.go:43.57,45.3 1 1
+github.com/pterm/pterm/terminal.go:47.12,49.3 1 12
+github.com/pterm/pterm/terminal.go:50.12,52.3 1 12
+github.com/pterm/pterm/terminal.go:53.16,55.3 1 12
+github.com/pterm/pterm/terminal.go:60.51,64.2 3 10
+github.com/pterm/pterm/paragraph_printer.go:23.69,26.2 2 1
+github.com/pterm/pterm/paragraph_printer.go:29.80,32.2 2 0
+github.com/pterm/pterm/paragraph_printer.go:36.59,37.15 1 1373
+github.com/pterm/pterm/paragraph_printer.go:41.2,42.21 2 653
+github.com/pterm/pterm/paragraph_printer.go:45.2,47.33 3 652
+github.com/pterm/pterm/paragraph_printer.go:57.2,57.16 1 652
+github.com/pterm/pterm/paragraph_printer.go:37.15,39.3 1 720
+github.com/pterm/pterm/paragraph_printer.go:42.21,44.3 1 1
+github.com/pterm/pterm/paragraph_printer.go:47.33,48.30 1 405
+github.com/pterm/pterm/paragraph_printer.go:48.30,51.4 2 3
+github.com/pterm/pterm/paragraph_printer.go:51.9,54.4 2 402
+github.com/pterm/pterm/paragraph_printer.go:62.61,64.2 1 651
+github.com/pterm/pterm/paragraph_printer.go:67.75,69.2 1 684
+github.com/pterm/pterm/paragraph_printer.go:73.77,75.2 1 648
+github.com/pterm/pterm/paragraph_printer.go:80.65,84.2 3 20
+github.com/pterm/pterm/paragraph_printer.go:89.67,93.2 3 327
+github.com/pterm/pterm/paragraph_printer.go:97.81,101.2 3 18
+github.com/pterm/pterm/paragraph_printer.go:106.83,110.2 3 324
+github.com/pterm/pterm/paragraph_printer.go:115.72,116.24 1 2
+github.com/pterm/pterm/paragraph_printer.go:124.2,125.12 2 2
+github.com/pterm/pterm/paragraph_printer.go:116.24,117.33 1 2
+github.com/pterm/pterm/paragraph_printer.go:117.33,118.18 1 1
+github.com/pterm/pterm/paragraph_printer.go:118.18,120.5 1 1
+github.com/pterm/pterm/paragraph_printer.go:131.88,132.24 1 2
+github.com/pterm/pterm/paragraph_printer.go:140.2,141.12 2 2
+github.com/pterm/pterm/paragraph_printer.go:132.24,133.33 1 2
+github.com/pterm/pterm/paragraph_printer.go:133.33,134.18 1 1
+github.com/pterm/pterm/paragraph_printer.go:134.18,136.5 1 1
+github.com/pterm/pterm/section_printer.go:30.65,33.2 2 2
+github.com/pterm/pterm/section_printer.go:36.62,39.2 2 4
+github.com/pterm/pterm/section_printer.go:42.74,45.2 2 1
+github.com/pterm/pterm/section_printer.go:48.69,51.2 2 2
+github.com/pterm/pterm/section_printer.go:54.72,57.2 2 2
+github.com/pterm/pterm/section_printer.go:60.76,63.2 2 0
+github.com/pterm/pterm/section_printer.go:67.57,68.20 1 1379
+github.com/pterm/pterm/section_printer.go:72.2,74.36 2 1379
+github.com/pterm/pterm/section_printer.go:78.2,78.17 1 1379
+github.com/pterm/pterm/section_printer.go:82.2,84.39 2 1379
+github.com/pterm/pterm/section_printer.go:88.2,88.12 1 1379
+github.com/pterm/pterm/section_printer.go:68.20,70.3 1 1
+github.com/pterm/pterm/section_printer.go:74.36,76.3 1 1375
+github.com/pterm/pterm/section_printer.go:78.17,80.3 1 1378
+github.com/pterm/pterm/section_printer.go:84.39,86.3 1 1375
+github.com/pterm/pterm/section_printer.go:93.59,96.2 2 657
+github.com/pterm/pterm/section_printer.go:99.73,101.2 1 686
+github.com/pterm/pterm/section_printer.go:105.75,107.2 1 650
+github.com/pterm/pterm/section_printer.go:112.63,116.2 3 18
+github.com/pterm/pterm/section_printer.go:121.65,125.2 3 333
+github.com/pterm/pterm/section_printer.go:129.79,133.2 3 18
+github.com/pterm/pterm/section_printer.go:138.81,142.2 3 326
+github.com/pterm/pterm/section_printer.go:147.70,148.24 1 2
+github.com/pterm/pterm/section_printer.go:156.2,157.12 2 2
+github.com/pterm/pterm/section_printer.go:148.24,149.33 1 2
+github.com/pterm/pterm/section_printer.go:149.33,150.18 1 1
+github.com/pterm/pterm/section_printer.go:150.18,152.5 1 1
+github.com/pterm/pterm/section_printer.go:163.86,164.24 1 2
+github.com/pterm/pterm/section_printer.go:172.2,173.12 2 2
+github.com/pterm/pterm/section_printer.go:164.24,165.33 1 2
+github.com/pterm/pterm/section_printer.go:165.33,166.18 1 1
+github.com/pterm/pterm/section_printer.go:166.18,168.5 1 1
+github.com/pterm/pterm/color.go:14.20,17.2 2 14224
+github.com/pterm/pterm/color.go:20.21,23.2 2 14225
+github.com/pterm/pterm/color.go:142.50,145.2 2 650
+github.com/pterm/pterm/color.go:150.48,153.36 3 73138
+github.com/pterm/pterm/color.go:156.2,157.16 2 73138
+github.com/pterm/pterm/color.go:153.36,155.3 1 74532
+github.com/pterm/pterm/color.go:162.64,164.2 1 36
+github.com/pterm/pterm/color.go:169.66,171.2 1 648
+github.com/pterm/pterm/color.go:177.55,181.2 3 326
+github.com/pterm/pterm/color.go:187.53,191.2 3 18
+github.com/pterm/pterm/color.go:196.69,200.2 3 18
+github.com/pterm/pterm/color.go:206.71,210.2 3 324
+github.com/pterm/pterm/color.go:215.60,216.24 1 2
+github.com/pterm/pterm/color.go:224.2,225.12 2 2
+github.com/pterm/pterm/color.go:216.24,217.33 1 2
+github.com/pterm/pterm/color.go:217.33,218.18 1 1
+github.com/pterm/pterm/color.go:218.18,220.5 1 1
+github.com/pterm/pterm/color.go:231.76,232.24 1 2
+github.com/pterm/pterm/color.go:240.2,241.12 2 2
+github.com/pterm/pterm/color.go:232.24,233.33 1 2
+github.com/pterm/pterm/color.go:233.33,234.18 1 1
+github.com/pterm/pterm/color.go:234.18,236.5 1 1
+github.com/pterm/pterm/color.go:245.32,247.2 1 327052
+github.com/pterm/pterm/color.go:255.39,257.27 2 3313
+github.com/pterm/pterm/color.go:260.2,260.13 1 3313
+github.com/pterm/pterm/color.go:257.27,259.3 1 726
+github.com/pterm/pterm/color.go:264.43,267.28 2 5
+github.com/pterm/pterm/color.go:271.2,271.12 1 5
+github.com/pterm/pterm/color.go:267.28,269.3 1 6
+github.com/pterm/pterm/color.go:277.48,280.36 3 46526
+github.com/pterm/pterm/color.go:283.2,284.46 2 46526
+github.com/pterm/pterm/color.go:280.36,282.3 1 48484
+github.com/pterm/pterm/color.go:290.50,292.2 1 324
+github.com/pterm/pterm/color.go:296.64,298.2 1 36
+github.com/pterm/pterm/color.go:303.66,305.2 1 648
+github.com/pterm/pterm/color.go:311.40,313.2 1 18
+github.com/pterm/pterm/color.go:319.42,321.2 1 324
+github.com/pterm/pterm/color.go:326.56,328.2 1 18
+github.com/pterm/pterm/color.go:334.58,336.2 1 324
+github.com/pterm/pterm/color.go:339.30,341.2 1 1
+github.com/pterm/pterm/color.go:344.32,346.2 1 143496
+github.com/pterm/pterm/color.go:350.42,351.22 1 143496
+github.com/pterm/pterm/color.go:355.2,356.27 2 138007
+github.com/pterm/pterm/color.go:360.2,360.33 1 138007
+github.com/pterm/pterm/color.go:351.22,353.3 1 5489
+github.com/pterm/pterm/color.go:356.27,358.3 1 177988
+github.com/pterm/pterm/theme.go:93.52,96.2 2 1
+github.com/pterm/pterm/theme.go:99.54,102.2 2 1
+github.com/pterm/pterm/theme.go:105.54,108.2 2 1
+github.com/pterm/pterm/theme.go:111.56,114.2 2 1
+github.com/pterm/pterm/theme.go:117.55,120.2 2 1
+github.com/pterm/pterm/theme.go:123.59,126.2 2 1
+github.com/pterm/pterm/theme.go:129.58,132.2 2 1
+github.com/pterm/pterm/theme.go:135.59,138.2 2 1
+github.com/pterm/pterm/theme.go:141.58,144.2 2 1
+github.com/pterm/pterm/theme.go:147.57,150.2 2 1
+github.com/pterm/pterm/theme.go:153.56,156.2 2 1
+github.com/pterm/pterm/theme.go:159.57,162.2 2 1
+github.com/pterm/pterm/theme.go:165.56,168.2 2 1
+github.com/pterm/pterm/theme.go:171.63,174.2 2 1
+github.com/pterm/pterm/theme.go:177.62,180.2 2 1
+github.com/pterm/pterm/theme.go:183.59,186.2 2 1
+github.com/pterm/pterm/theme.go:189.61,192.2 2 1
+github.com/pterm/pterm/theme.go:195.51,198.2 2 1
+github.com/pterm/pterm/theme.go:201.57,204.2 2 1
+github.com/pterm/pterm/theme.go:207.56,210.2 2 1
+github.com/pterm/pterm/theme.go:213.49,216.2 2 1
+github.com/pterm/pterm/theme.go:219.53,222.2 2 1
+github.com/pterm/pterm/theme.go:225.48,228.2 2 1
+github.com/pterm/pterm/theme.go:231.52,234.2 2 1
+github.com/pterm/pterm/theme.go:237.53,240.2 2 1
+github.com/pterm/pterm/theme.go:243.48,246.2 2 1
+github.com/pterm/pterm/atoms.go:15.39,18.2 2 1
+github.com/pterm/pterm/atoms.go:21.48,24.2 2 1
+github.com/pterm/pterm/atoms.go:27.40,30.2 2 1
+github.com/pterm/pterm/atoms.go:33.43,36.2 2 1
+github.com/pterm/pterm/box_printer.go:55.55,58.2 2 25
+github.com/pterm/pterm/box_printer.go:61.61,70.2 8 2
+github.com/pterm/pterm/box_printer.go:73.62,82.2 8 2
+github.com/pterm/pterm/box_printer.go:85.63,94.2 8 2
+github.com/pterm/pterm/box_printer.go:97.64,106.2 8 2
+github.com/pterm/pterm/box_printer.go:109.65,118.2 8 2
+github.com/pterm/pterm/box_printer.go:121.66,130.2 8 2
+github.com/pterm/pterm/box_printer.go:133.60,136.2 2 1
+github.com/pterm/pterm/box_printer.go:139.61,142.2 2 1
+github.com/pterm/pterm/box_printer.go:145.70,148.2 2 1
+github.com/pterm/pterm/box_printer.go:151.69,154.2 2 1
+github.com/pterm/pterm/box_printer.go:157.73,160.2 2 1
+github.com/pterm/pterm/box_printer.go:163.72,166.2 2 1
+github.com/pterm/pterm/box_printer.go:169.64,172.2 2 1
+github.com/pterm/pterm/box_printer.go:175.66,178.2 2 1
+github.com/pterm/pterm/box_printer.go:181.61,182.17 1 2
+github.com/pterm/pterm/box_printer.go:185.2,186.11 2 2
+github.com/pterm/pterm/box_printer.go:182.17,184.3 1 1
+github.com/pterm/pterm/box_printer.go:190.64,191.17 1 2
+github.com/pterm/pterm/box_printer.go:194.2,195.11 2 2
+github.com/pterm/pterm/box_printer.go:191.17,193.3 1 1
+github.com/pterm/pterm/box_printer.go:199.63,200.17 1 2
+github.com/pterm/pterm/box_printer.go:203.2,204.11 2 2
+github.com/pterm/pterm/box_printer.go:200.17,202.3 1 1
+github.com/pterm/pterm/box_printer.go:208.62,209.17 1 2
+github.com/pterm/pterm/box_printer.go:212.2,213.11 2 2
+github.com/pterm/pterm/box_printer.go:209.17,211.3 1 1
+github.com/pterm/pterm/box_printer.go:217.68,220.2 2 0
+github.com/pterm/pterm/box_printer.go:224.53,225.23 1 1441
+github.com/pterm/pterm/box_printer.go:228.2,228.24 1 1441
+github.com/pterm/pterm/box_printer.go:231.2,236.19 4 1441
+github.com/pterm/pterm/box_printer.go:273.2,276.24 3 1441
+github.com/pterm/pterm/box_printer.go:286.2,286.68 1 1441
+github.com/pterm/pterm/box_printer.go:225.23,227.3 1 7
+github.com/pterm/pterm/box_printer.go:228.24,230.3 1 7
+github.com/pterm/pterm/box_printer.go:236.19,241.3 2 1417
+github.com/pterm/pterm/box_printer.go:241.8,243.92 2 24
+github.com/pterm/pterm/box_printer.go:246.3,246.21 1 24
+github.com/pterm/pterm/box_printer.go:243.92,245.4 1 6
+github.com/pterm/pterm/box_printer.go:246.21,250.4 2 19
+github.com/pterm/pterm/box_printer.go:250.9,250.29 1 5
+github.com/pterm/pterm/box_printer.go:250.29,254.4 2 1
+github.com/pterm/pterm/box_printer.go:254.9,254.30 1 4
+github.com/pterm/pterm/box_printer.go:254.30,258.4 2 1
+github.com/pterm/pterm/box_printer.go:258.9,258.31 1 3
+github.com/pterm/pterm/box_printer.go:258.31,262.4 2 1
+github.com/pterm/pterm/box_printer.go:262.9,262.32 1 2
+github.com/pterm/pterm/box_printer.go:262.32,266.4 2 1
+github.com/pterm/pterm/box_printer.go:266.9,266.33 1 1
+github.com/pterm/pterm/box_printer.go:266.33,270.4 2 1
+github.com/pterm/pterm/box_printer.go:276.24,277.66 1 1477
+github.com/pterm/pterm/box_printer.go:277.66,281.4 1 36
+github.com/pterm/pterm/box_printer.go:281.9,284.4 1 1441
+github.com/pterm/pterm/box_printer.go:291.55,293.2 1 651
+github.com/pterm/pterm/box_printer.go:296.69,298.2 1 684
+github.com/pterm/pterm/box_printer.go:302.71,304.2 1 648
+github.com/pterm/pterm/box_printer.go:309.58,313.2 3 18
+github.com/pterm/pterm/box_printer.go:318.60,322.2 3 327
+github.com/pterm/pterm/box_printer.go:326.74,330.2 3 18
+github.com/pterm/pterm/box_printer.go:335.76,339.2 3 324
+github.com/pterm/pterm/box_printer.go:344.65,345.24 1 2
+github.com/pterm/pterm/box_printer.go:353.2,354.12 2 2
+github.com/pterm/pterm/box_printer.go:345.24,346.33 1 2
+github.com/pterm/pterm/box_printer.go:346.33,347.18 1 1
+github.com/pterm/pterm/box_printer.go:347.18,349.5 1 1
+github.com/pterm/pterm/box_printer.go:360.81,361.24 1 2
+github.com/pterm/pterm/box_printer.go:369.2,370.12 2 2
+github.com/pterm/pterm/box_printer.go:361.24,362.33 1 2
+github.com/pterm/pterm/box_printer.go:362.33,363.18 1 1
+github.com/pterm/pterm/box_printer.go:363.18,365.5 1 1
+github.com/pterm/pterm/spinner_printer.go:51.63,54.2 2 1
+github.com/pterm/pterm/spinner_printer.go:57.74,60.2 2 1
+github.com/pterm/pterm/spinner_printer.go:63.65,66.2 2 1
+github.com/pterm/pterm/spinner_printer.go:69.72,72.2 2 2
+github.com/pterm/pterm/spinner_printer.go:75.72,78.2 2 1
+github.com/pterm/pterm/spinner_printer.go:81.71,84.2 2 2
+github.com/pterm/pterm/spinner_printer.go:87.66,90.2 2 2
+github.com/pterm/pterm/spinner_printer.go:93.87,96.2 2 1
+github.com/pterm/pterm/spinner_printer.go:99.70,102.2 2 1
+github.com/pterm/pterm/spinner_printer.go:105.76,108.2 2 0
+github.com/pterm/pterm/spinner_printer.go:112.50,114.16 2 17
+github.com/pterm/pterm/spinner_printer.go:118.2,118.15 1 17
+github.com/pterm/pterm/spinner_printer.go:114.16,117.3 2 16
+github.com/pterm/pterm/spinner_printer.go:118.15,120.3 1 1
+github.com/pterm/pterm/spinner_printer.go:124.77,129.20 4 10
+github.com/pterm/pterm/spinner_printer.go:133.2,133.15 1 10
+github.com/pterm/pterm/spinner_printer.go:137.2,137.12 1 10
+github.com/pterm/pterm/spinner_printer.go:154.2,154.16 1 10
+github.com/pterm/pterm/spinner_printer.go:129.20,131.3 1 5
+github.com/pterm/pterm/spinner_printer.go:133.15,135.3 1 6
+github.com/pterm/pterm/spinner_printer.go:137.12,138.18 1 10
+github.com/pterm/pterm/spinner_printer.go:138.18,139.35 1 536530
+github.com/pterm/pterm/spinner_printer.go:139.35,140.33 1 93756
+github.com/pterm/pterm/spinner_printer.go:144.5,145.20 2 6
+github.com/pterm/pterm/spinner_printer.go:148.5,150.24 3 6
+github.com/pterm/pterm/spinner_printer.go:140.33,141.14 1 93898
+github.com/pterm/pterm/spinner_printer.go:145.20,147.6 1 5
+github.com/pterm/pterm/spinner_printer.go:159.39,161.22 2 65
+github.com/pterm/pterm/spinner_printer.go:167.2,167.12 1 65
+github.com/pterm/pterm/spinner_printer.go:161.22,164.3 2 2
+github.com/pterm/pterm/spinner_printer.go:164.8,166.3 1 63
+github.com/pterm/pterm/spinner_printer.go:173.63,177.2 3 2
+github.com/pterm/pterm/spinner_printer.go:182.62,186.2 3 3
+github.com/pterm/pterm/spinner_printer.go:190.58,191.29 1 19
+github.com/pterm/pterm/spinner_printer.go:195.2,195.23 1 19
+github.com/pterm/pterm/spinner_printer.go:198.2,200.14 3 19
+github.com/pterm/pterm/spinner_printer.go:191.29,193.3 1 1
+github.com/pterm/pterm/spinner_printer.go:195.23,197.3 1 1
+github.com/pterm/pterm/spinner_printer.go:205.55,206.26 1 19
+github.com/pterm/pterm/spinner_printer.go:210.2,210.23 1 19
+github.com/pterm/pterm/spinner_printer.go:213.2,215.14 3 19
+github.com/pterm/pterm/spinner_printer.go:206.26,208.3 1 1
+github.com/pterm/pterm/spinner_printer.go:210.23,212.3 1 1
+github.com/pterm/pterm/spinner_printer.go:220.58,221.29 1 19
+github.com/pterm/pterm/spinner_printer.go:225.2,225.23 1 19
+github.com/pterm/pterm/spinner_printer.go:228.2,230.14 3 19
+github.com/pterm/pterm/spinner_printer.go:221.29,223.3 1 1
+github.com/pterm/pterm/spinner_printer.go:225.23,227.3 1 1
diff --git a/vendor/github.com/pterm/pterm/deprecated.go b/vendor/github.com/pterm/pterm/deprecated.go
new file mode 100644
index 0000000..9a6d9d0
--- /dev/null
+++ b/vendor/github.com/pterm/pterm/deprecated.go
@@ -0,0 +1,148 @@
+package pterm
+
+import (
+ "strconv"
+ "strings"
+
+ "github.com/pterm/pterm/internal"
+)
+
+// NewLettersFromString creates a Letters object from a string, which is prefilled with the LetterStyle from ThemeDefault.
+// You can override the ThemeDefault LetterStyle if you want to.
+//
+// Deprecated: use putils.LettersFromString instead.
+func NewLettersFromString(text string) Letters {
+ return NewLettersFromStringWithStyle(text, &ThemeDefault.LetterStyle)
+}
+
+// NewLettersFromStringWithStyle creates a Letters object from a string and applies a Style to it.
+//
+// Deprecated: use putils.LettersFromStringWithStyle instead.
+func NewLettersFromStringWithStyle(text string, style *Style) Letters {
+ s := strings.Split(text, "")
+ l := Letters{}
+
+ for _, s2 := range s {
+ l = append(l, Letter{
+ String: s2,
+ Style: style,
+ })
+ }
+
+ return l
+}
+
+// NewLettersFromStringWithRGB creates a Letters object from a string and applies an RGB color to it (overwrites style).
+//
+// Deprecated: use putils.LettersFromStringWithRGB instead.
+func NewLettersFromStringWithRGB(text string, rgb RGB) Letters {
+ s := strings.Split(text, "")
+ l := Letters{}
+
+ for _, s2 := range s {
+ l = append(l, Letter{
+ String: s2,
+ Style: &Style{},
+ RGB: rgb,
+ })
+ }
+
+ return l
+}
+
+// NewBulletListFromStrings returns a BulletListPrinter with Text using the NewTreeListItemFromString method.
+//
+// Deprecated: use putils.BulletListFromStrings instead.
+func NewBulletListFromStrings(s []string, padding string) BulletListPrinter {
+ var lis []BulletListItem
+ for _, line := range s {
+ lis = append(lis, NewBulletListItemFromString(line, padding))
+ }
+ return *DefaultBulletList.WithItems(lis)
+}
+
+// NewBulletListItemFromString returns a BulletListItem with a Text. The padding is counted in the Text to define the Level of the ListItem.
+//
+// Deprecated: use putils.BulletListItemFromString instead.
+func NewBulletListItemFromString(text string, padding string) BulletListItem {
+ s, l := internal.RemoveAndCountPrefix(text, padding)
+ return BulletListItem{
+ Level: l,
+ Text: s,
+ }
+}
+
+// NewBulletListFromString returns a BulletListPrinter with Text using the NewTreeListItemFromString method, splitting after return (\n).
+//
+// Deprecated: use putils.BulletListFromString instead.
+func NewBulletListFromString(s string, padding string) BulletListPrinter {
+ return NewBulletListFromStrings(strings.Split(s, "\n"), padding)
+}
+
+// NewTreeFromLeveledList converts a TreeItems list to a TreeNode and returns it.
+//
+// Deprecated: use putils.TreeFromLeveledList instead.
+func NewTreeFromLeveledList(leveledListItems LeveledList) TreeNode {
+ if len(leveledListItems) == 0 {
+ return TreeNode{}
+ }
+
+ root := &TreeNode{
+ Children: []TreeNode{},
+ Text: leveledListItems[0].Text,
+ }
+
+ for i, record := range leveledListItems {
+ last := root
+
+ if record.Level < 0 {
+ record.Level = 0
+ leveledListItems[i].Level = 0
+ }
+
+ if len(leveledListItems)-1 != i {
+ if leveledListItems[i+1].Level-1 > record.Level {
+ leveledListItems[i+1].Level = record.Level + 1
+ }
+ }
+
+ for i := 0; i < record.Level; i++ {
+ lastIndex := len(last.Children) - 1
+ last = &last.Children[lastIndex]
+ }
+ last.Children = append(last.Children, TreeNode{
+ Children: []TreeNode{},
+ Text: record.Text,
+ })
+ }
+
+ return *root
+}
+
+// NewRGBFromHEX converts a HEX and returns a new RGB.
+//
+// Deprecated: use putils.RGBFromHEX instead.
+func NewRGBFromHEX(hex string) (RGB, error) {
+ hex = strings.ToLower(hex)
+ hex = strings.ReplaceAll(hex, "#", "")
+ hex = strings.ReplaceAll(hex, "0x", "")
+
+ if len(hex) == 3 {
+ hex = string([]byte{hex[0], hex[0], hex[1], hex[1], hex[2], hex[2]})
+ }
+ if len(hex) != 6 {
+ return RGB{}, ErrHexCodeIsInvalid
+ }
+
+ i64, err := strconv.ParseInt(hex, 16, 32)
+ if err != nil {
+ return RGB{}, err
+ }
+ c := int(i64)
+ // #nosec G115
+ return RGB{
+ R: uint8(c >> 16), //nolint:gosec
+ G: uint8((c & 0x00FF00) >> 8), //nolint:gosec
+ B: uint8(c & 0x0000FF), //nolint:gosec
+ }, nil
+}
diff --git a/vendor/github.com/pterm/pterm/errors.go b/vendor/github.com/pterm/pterm/errors.go
new file mode 100644
index 0000000..9260ab6
--- /dev/null
+++ b/vendor/github.com/pterm/pterm/errors.go
@@ -0,0 +1,14 @@
+package pterm
+
+import "errors"
+
+var (
+ // ErrTerminalSizeNotDetectable - the terminal size can not be detected and the fallback values are used.
+ ErrTerminalSizeNotDetectable = errors.New("terminal size could not be detected - using fallback value")
+
+ // ErrHexCodeIsInvalid - the given HEX code is invalid.
+ ErrHexCodeIsInvalid = errors.New("hex code is not valid")
+
+ // ErrKeyWithoutValue - an odd number of arguments was passed to a pterm Logger's Args method.
+ ErrKeyWithoutValue = "ERROR: key_without_value"
+)
diff --git a/vendor/github.com/pterm/pterm/header_printer.go b/vendor/github.com/pterm/pterm/header_printer.go
new file mode 100644
index 0000000..e038ef6
--- /dev/null
+++ b/vendor/github.com/pterm/pterm/header_printer.go
@@ -0,0 +1,239 @@
+package pterm
+
+import (
+ "fmt"
+ "io"
+ "strings"
+
+ "github.com/mattn/go-runewidth"
+
+ "github.com/pterm/pterm/internal"
+)
+
+var (
+ // DefaultHeader returns the printer for a default header text.
+ // Defaults to LightWhite, Bold Text and a Gray DefaultHeader background.
+ DefaultHeader = HeaderPrinter{
+ TextStyle: &ThemeDefault.HeaderTextStyle,
+ BackgroundStyle: &ThemeDefault.HeaderBackgroundStyle,
+ Margin: 5,
+ }
+)
+
+// HeaderPrinter contains the data used to craft a header.
+// A header is printed as a big box with text in it.
+// Can be used as title screens or section separator.
+type HeaderPrinter struct {
+ TextStyle *Style
+ BackgroundStyle *Style
+ Margin int
+ FullWidth bool
+ Writer io.Writer
+}
+
+// WithTextStyle returns a new HeaderPrinter with changed
+func (p HeaderPrinter) WithTextStyle(style *Style) *HeaderPrinter {
+ p.TextStyle = style
+ return &p
+}
+
+// WithBackgroundStyle changes the background styling of the header.
+func (p HeaderPrinter) WithBackgroundStyle(style *Style) *HeaderPrinter {
+ p.BackgroundStyle = style
+ return &p
+}
+
+// WithMargin changes the background styling of the header.
+func (p HeaderPrinter) WithMargin(margin int) *HeaderPrinter {
+ p.Margin = margin
+ return &p
+}
+
+// WithFullWidth enables full width on a HeaderPrinter.
+func (p HeaderPrinter) WithFullWidth(b ...bool) *HeaderPrinter {
+ p.FullWidth = internal.WithBoolean(b)
+ return &p
+}
+
+// WithWriter sets the custom Writer.
+func (p HeaderPrinter) WithWriter(writer io.Writer) *HeaderPrinter {
+ p.Writer = writer
+ return &p
+}
+
+// Sprint formats using the default formats for its operands and returns the resulting string.
+// Spaces are added between operands when neither is a string.
+func (p HeaderPrinter) Sprint(a ...any) string {
+ if RawOutput {
+ return Sprint(a...)
+ }
+
+ if p.TextStyle == nil {
+ p.TextStyle = NewStyle()
+ }
+ if p.BackgroundStyle == nil {
+ p.BackgroundStyle = NewStyle()
+ }
+
+ text := Sprint(a...)
+
+ var blankLine string
+
+ longestLine := internal.ReturnLongestLine(text, "\n")
+ longestLineLen := runewidth.StringWidth(RemoveColorFromString(longestLine)) + p.Margin*2
+
+ if p.FullWidth {
+ text = splitText(text, GetTerminalWidth()-p.Margin*2)
+ blankLine = strings.Repeat(" ", GetTerminalWidth())
+ } else {
+ if longestLineLen > GetTerminalWidth() {
+ text = splitText(text, GetTerminalWidth()-p.Margin*2)
+ blankLine = strings.Repeat(" ", GetTerminalWidth())
+ } else {
+ text = splitText(text, longestLineLen-p.Margin*2)
+ blankLine = strings.Repeat(" ", longestLineLen)
+ }
+ }
+
+ var marginString string
+ var ret strings.Builder
+
+ if p.FullWidth {
+ longestLineLen = runewidth.StringWidth(RemoveColorFromString(internal.ReturnLongestLine(text, "\n")))
+ marginString = strings.Repeat(" ", (GetTerminalWidth()-longestLineLen)/2)
+ } else {
+ marginString = strings.Repeat(" ", p.Margin)
+ }
+
+ ret.WriteString(p.BackgroundStyle.Sprint(blankLine))
+ ret.WriteByte('\n')
+ for _, line := range strings.Split(text, "\n") {
+ line = strings.ReplaceAll(line, "\n", "")
+ line = marginString + line + marginString
+ if runewidth.StringWidth(line) < runewidth.StringWidth(blankLine) {
+ line += strings.Repeat(" ", runewidth.StringWidth(blankLine)-runewidth.StringWidth(line))
+ }
+ ret.WriteString(p.BackgroundStyle.Sprint(p.TextStyle.Sprint(line)))
+ ret.WriteByte('\n')
+ }
+ ret.WriteString(p.BackgroundStyle.Sprint(blankLine))
+ ret.WriteByte('\n')
+
+ return ret.String()
+}
+
+func splitText(text string, width int) string {
+ var lines []string
+ linesTmp := strings.Split(text, "\n")
+ for _, line := range linesTmp {
+ if runewidth.StringWidth(RemoveColorFromString(line)) > width {
+ extraLines := []string{""}
+ extraLinesCounter := 0
+ for i, letter := range line {
+ if i%width == 0 && i != 0 {
+ extraLinesCounter++
+ extraLines = append(extraLines, "")
+ }
+ extraLines[extraLinesCounter] += string(letter)
+ }
+ for _, extraLine := range extraLines {
+ extraLine += "\n"
+ lines = append(lines, extraLine)
+ }
+ } else {
+ line += "\n"
+ lines = append(lines, line)
+ }
+ }
+
+ var line string
+ for _, s := range lines {
+ line += s
+ }
+
+ return strings.TrimSuffix(line, "\n")
+}
+
+// Sprintln formats using the default formats for its operands and returns the resulting string.
+// Spaces are always added between operands and a newline is appended.
+func (p HeaderPrinter) Sprintln(a ...any) string {
+ return p.Sprint(strings.TrimSuffix(Sprintln(a...), "\n"))
+}
+
+// Sprintf formats according to a format specifier and returns the resulting string.
+func (p HeaderPrinter) Sprintf(format string, a ...any) string {
+ return p.Sprint(Sprintf(format, a...))
+}
+
+// Sprintfln formats according to a format specifier and returns the resulting string.
+// Spaces are always added between operands and a newline is appended.
+func (p HeaderPrinter) Sprintfln(format string, a ...any) string {
+ return p.Sprintf(format, a...) + "\n"
+}
+
+// Print formats using the default formats for its operands and writes to standard output.
+// Spaces are added between operands when neither is a string.
+// It returns the number of bytes written and any write error encountered.
+func (p *HeaderPrinter) Print(a ...any) *TextPrinter {
+ Fprint(p.Writer, p.Sprint(a...))
+ tp := TextPrinter(p)
+ return &tp
+}
+
+// Println formats using the default formats for its operands and writes to standard output.
+// Spaces are always added between operands and a newline is appended.
+// It returns the number of bytes written and any write error encountered.
+func (p *HeaderPrinter) Println(a ...any) *TextPrinter {
+ Fprint(p.Writer, p.Sprintln(a...))
+ tp := TextPrinter(p)
+ return &tp
+}
+
+// Printf formats according to a format specifier and writes to standard output.
+// It returns the number of bytes written and any write error encountered.
+func (p *HeaderPrinter) Printf(format string, a ...any) *TextPrinter {
+ Fprint(p.Writer, p.Sprintf(format, a...))
+ tp := TextPrinter(p)
+ return &tp
+}
+
+// Printfln formats according to a format specifier and writes to standard output.
+// Spaces are always added between operands and a newline is appended.
+// It returns the number of bytes written and any write error encountered.
+func (p *HeaderPrinter) Printfln(format string, a ...any) *TextPrinter {
+ Fprint(p.Writer, p.Sprintfln(format, a...))
+ tp := TextPrinter(p)
+ return &tp
+}
+
+// PrintOnError prints every error which is not nil.
+// If every error is nil, nothing will be printed.
+// This can be used for simple error checking.
+func (p *HeaderPrinter) PrintOnError(a ...any) *TextPrinter {
+ for _, arg := range a {
+ if err, ok := arg.(error); ok {
+ if err != nil {
+ p.Println(err)
+ }
+ }
+ }
+
+ tp := TextPrinter(p)
+ return &tp
+}
+
+// PrintOnErrorf wraps every error which is not nil and prints it.
+// If every error is nil, nothing will be printed.
+// This can be used for simple error checking.
+func (p *HeaderPrinter) PrintOnErrorf(format string, a ...any) *TextPrinter {
+ for _, arg := range a {
+ if err, ok := arg.(error); ok {
+ if err != nil {
+ p.Println(fmt.Errorf(format, err))
+ }
+ }
+ }
+
+ tp := TextPrinter(p)
+ return &tp
+}
diff --git a/vendor/github.com/pterm/pterm/heatmap_printer.go b/vendor/github.com/pterm/pterm/heatmap_printer.go
new file mode 100644
index 0000000..f0765e5
--- /dev/null
+++ b/vendor/github.com/pterm/pterm/heatmap_printer.go
@@ -0,0 +1,744 @@
+package pterm
+
+import (
+ "bytes"
+ "errors"
+ "io"
+ "math"
+ "strings"
+
+ "github.com/pterm/pterm/internal"
+)
+
+// DefaultHeatmap contains standards, which can be used to print a HeatmapPrinter.
+var DefaultHeatmap = HeatmapPrinter{
+ AxisStyle: &ThemeDefault.HeatmapHeaderStyle,
+ SeparatorStyle: &ThemeDefault.HeatmapSeparatorStyle,
+ VerticalSeparator: "│",
+ TopRightCornerSeparator: "└",
+ TopLeftCornerSeparator: "┘",
+ BottomLeftCornerSeparator: "┐",
+ BottomRightCornerSeparator: "┌",
+ HorizontalSeparator: "─",
+ TSeparator: "┬",
+ TReverseSeparator: "┴",
+ LSeparator: "├",
+ LReverseSeparator: "┤",
+ TCrossSeparator: "┼",
+ LegendLabel: "Legend",
+ Boxed: true,
+ Grid: true,
+ Legend: true,
+ TextRGB: RGB{0, 0, 0, false},
+ RGBRange: []RGB{{R: 255, G: 0, B: 0, Background: true}, {R: 255, G: 165, B: 0, Background: true}, {R: 0, G: 255, B: 0, Background: true}},
+ TextColor: FgBlack,
+ Colors: []Color{BgRed, BgLightRed, BgYellow, BgLightYellow, BgLightGreen, BgGreen},
+
+ EnableRGB: false,
+}
+
+// HeatmapData is the type that contains the data of a HeatmapPrinter.
+type HeatmapData [][]float32
+
+type HeatmapAxis struct {
+ XAxis []string
+ YAxis []string
+}
+
+// HeatmapPrinter is able to render tables.
+type HeatmapPrinter struct {
+ HasHeader bool
+ AxisStyle *Style
+ VerticalSeparator string
+ TopRightCornerSeparator string
+ TopLeftCornerSeparator string
+ BottomLeftCornerSeparator string
+ BottomRightCornerSeparator string
+ HorizontalSeparator string
+ TSeparator string
+ TReverseSeparator string
+ LSeparator string
+ LReverseSeparator string
+ TCrossSeparator string
+ LegendLabel string
+ SeparatorStyle *Style
+ Data HeatmapData
+ Axis HeatmapAxis
+ Boxed bool
+ Grid bool
+ OnlyColoredCells bool
+ LegendOnlyColoredCells bool
+ EnableComplementaryColor bool
+ Legend bool
+ CellSize int
+ Colors []Color
+ TextColor Color
+ EnableRGB bool
+ RGBRange []RGB
+ TextRGB RGB
+ Writer io.Writer
+
+ minValue float32
+ maxValue float32
+
+ rgbLegendValue int
+}
+
+var complementaryColors = map[Color]Color{
+ BgBlack: FgLightWhite,
+ BgRed: FgCyan,
+ BgGreen: FgMagenta,
+ BgYellow: FgBlue,
+ BgBlue: FgYellow,
+ BgMagenta: FgGreen,
+ BgCyan: FgRed,
+ BgWhite: FgBlack,
+ BgDefault: FgBlack,
+ BgDarkGray: FgLightWhite,
+ BgLightRed: FgLightCyan,
+ BgLightGreen: FgLightMagenta,
+ BgLightYellow: FgLightBlue,
+ BgLightBlue: FgLightYellow,
+ BgLightMagenta: FgLightGreen,
+ BgLightCyan: FgLightRed,
+ BgLightWhite: FgBlack,
+}
+
+// WithAxisData returns a new HeatmapPrinter, where the first line and row are headers.
+func (p HeatmapPrinter) WithAxisData(hd HeatmapAxis) *HeatmapPrinter {
+ p.HasHeader = true
+ p.Axis = hd
+ return &p
+}
+
+// WithAxisStyle returns a new HeatmapPrinter with a specific AxisStyle.
+func (p HeatmapPrinter) WithAxisStyle(style *Style) *HeatmapPrinter {
+ p.AxisStyle = style
+ return &p
+}
+
+// WithSeparatorStyle returns a new HeatmapPrinter with a specific SeparatorStyle.
+func (p HeatmapPrinter) WithSeparatorStyle(style *Style) *HeatmapPrinter {
+ p.SeparatorStyle = style
+ return &p
+}
+
+// WithData returns a new HeatmapPrinter with specific Data.
+func (p HeatmapPrinter) WithData(data [][]float32) *HeatmapPrinter {
+ p.Data = data
+ return &p
+}
+
+// WithTextColor returns a new HeatmapPrinter with a specific TextColor.
+// This sets EnableComplementaryColor to false.
+func (p HeatmapPrinter) WithTextColor(color Color) *HeatmapPrinter {
+ p.TextColor = color
+ p.EnableComplementaryColor = false
+ return &p
+}
+
+// WithTextRGB returns a new HeatmapPrinter with a specific TextRGB.
+// This sets EnableComplementaryColor to false.
+func (p HeatmapPrinter) WithTextRGB(rgb RGB) *HeatmapPrinter {
+ p.TextRGB = rgb
+ p.EnableComplementaryColor = false
+ return &p
+}
+
+// WithBoxed returns a new HeatmapPrinter with a box around the table.
+// If set to true, Grid will be set to true too.
+func (p HeatmapPrinter) WithBoxed(b ...bool) *HeatmapPrinter {
+ p.Boxed = internal.WithBoolean(b)
+ if p.Boxed && !p.Grid {
+ p.Grid = true
+ }
+ return &p
+}
+
+// WithGrid returns a new HeatmapPrinter with a grid.
+// If set to false, Boxed will be set to false too.
+func (p HeatmapPrinter) WithGrid(b ...bool) *HeatmapPrinter {
+ b2 := internal.WithBoolean(b)
+ p.Grid = b2
+ if !b2 && p.Boxed {
+ p.Boxed = false
+ }
+ return &p
+}
+
+// WithEnableRGB returns a new HeatmapPrinter with RGB colors.
+func (p HeatmapPrinter) WithEnableRGB(b ...bool) *HeatmapPrinter {
+ p.EnableRGB = internal.WithBoolean(b)
+ return &p
+}
+
+// WithOnlyColoredCells returns a new HeatmapPrinter with only colored cells.
+func (p HeatmapPrinter) WithOnlyColoredCells(b ...bool) *HeatmapPrinter {
+ b2 := internal.WithBoolean(b)
+ p.OnlyColoredCells = b2
+ return &p
+}
+
+// WithLegendOnlyColoredCells returns a new HeatmapPrinter with legend with only colored cells.
+// This sets the Legend to true.
+func (p HeatmapPrinter) WithLegendOnlyColoredCells(b ...bool) *HeatmapPrinter {
+ b2 := internal.WithBoolean(b)
+ p.LegendOnlyColoredCells = b2
+ if b2 {
+ p.Legend = true
+ }
+ return &p
+}
+
+// WithEnableComplementaryColor returns a new HeatmapPrinter with complement color.
+func (p HeatmapPrinter) WithEnableComplementaryColor(b ...bool) *HeatmapPrinter {
+ p.EnableComplementaryColor = internal.WithBoolean(b)
+ return &p
+}
+
+// WithLegend returns a new HeatmapPrinter with a legend.
+func (p HeatmapPrinter) WithLegend(b ...bool) *HeatmapPrinter {
+ p.Legend = internal.WithBoolean(b)
+ return &p
+}
+
+// WithCellSize returns a new HeatmapPrinter with a specific cell size.
+// This only works if there is no header and OnlyColoredCells == true!
+func (p HeatmapPrinter) WithCellSize(i int) *HeatmapPrinter {
+ p.CellSize = i
+ return &p
+}
+
+// WithLegendLabel returns a new HeatmapPrinter with a specific legend tag.
+// This sets the Legend to true.
+func (p HeatmapPrinter) WithLegendLabel(s string) *HeatmapPrinter {
+ p.LegendLabel = s
+ p.Legend = true
+ return &p
+}
+
+// WithRGBRange returns a new HeatmapPrinter with a specific RGBRange.
+func (p HeatmapPrinter) WithRGBRange(rgb ...RGB) *HeatmapPrinter {
+ p.RGBRange = rgb
+ return &p
+}
+
+// WithColors returns a new HeatmapPrinter with a specific Colors.
+func (p HeatmapPrinter) WithColors(colors ...Color) *HeatmapPrinter {
+ p.Colors = colors
+ return &p
+}
+
+// WithWriter sets the Writer.
+func (p HeatmapPrinter) WithWriter(writer io.Writer) *HeatmapPrinter {
+ p.Writer = writer
+ return &p
+}
+
+// Srender renders the HeatmapPrinter as a string.
+func (p HeatmapPrinter) Srender() (string, error) {
+ if err := p.errCheck(); err != nil {
+ return "", err
+ }
+
+ if p.SeparatorStyle == nil {
+ p.SeparatorStyle = DefaultHeatmap.SeparatorStyle
+ }
+ if p.AxisStyle == nil {
+ p.AxisStyle = DefaultHeatmap.AxisStyle
+ }
+
+ if RawOutput {
+ p.Legend = false
+ }
+
+ buffer := bytes.NewBufferString("")
+ xAmount := len(p.Data[0]) - 1
+ yAmount := len(p.Data) - 1
+ p.minValue, p.maxValue = minMaxFloat32(p.Data)
+
+ var data string
+ for _, datum := range p.Data {
+ for _, f := range datum {
+ data += Sprintf("%v\n", f)
+ }
+ }
+
+ if p.HasHeader {
+ data, xAmount, yAmount = p.computeAxisData(data, xAmount, yAmount)
+ }
+
+ colWidth := internal.GetStringMaxWidth(data)
+ legendColWidth := colWidth + 2
+
+ if p.OnlyColoredCells && (p.CellSize > colWidth || !p.HasHeader) {
+ colWidth = p.CellSize
+ }
+
+ if p.Boxed {
+ p.renderSeparatorRow(buffer, colWidth, xAmount, true)
+ }
+
+ p.renderData(buffer, colWidth, xAmount, yAmount)
+
+ if p.HasHeader {
+ p.renderHeader(buffer, colWidth, xAmount)
+ }
+
+ if p.Boxed {
+ p.renderSeparatorRow(buffer, colWidth, xAmount, false)
+ }
+
+ if p.Legend {
+ p.renderLegend(buffer, legendColWidth)
+ }
+
+ buffer.WriteString("\n")
+
+ return buffer.String(), nil
+}
+
+func (p HeatmapPrinter) computeAxisData(data string, xAmount, yAmount int) (string, int, int) {
+ var header string
+ for _, h := range p.Axis.XAxis {
+ header += h + "\n"
+ }
+ for _, h := range p.Axis.YAxis {
+ header += h + "\n"
+ }
+
+ if p.OnlyColoredCells {
+ data = header
+ } else {
+ data += header
+ }
+ xAmount++
+ yAmount++
+
+ p.Axis.YAxis = append(p.Axis.YAxis, "")
+
+ return data, xAmount, yAmount
+}
+
+func (p HeatmapPrinter) renderSeparatorRow(buffer *bytes.Buffer, colWidth, xAmount int, top bool) {
+ tSep := p.TReverseSeparator
+ rightSep := p.TopRightCornerSeparator
+ leftSep := p.TopLeftCornerSeparator
+
+ if top {
+ tSep = p.TSeparator
+ rightSep = p.BottomRightCornerSeparator
+ leftSep = p.BottomLeftCornerSeparator
+ } else {
+ buffer.WriteString("\n")
+ }
+ buffer.WriteString(p.SeparatorStyle.Sprint(rightSep))
+ for i := 0; i < xAmount+1; i++ {
+ buffer.WriteString(strings.Repeat(p.SeparatorStyle.Sprint(p.HorizontalSeparator), colWidth))
+ if i < xAmount {
+ buffer.WriteString(p.SeparatorStyle.Sprint(tSep))
+ }
+ }
+ buffer.WriteString(p.SeparatorStyle.Sprint(leftSep))
+
+ if top {
+ buffer.WriteString("\n")
+ }
+}
+
+func (p HeatmapPrinter) renderLegend(buffer *bytes.Buffer, legendColWidth int) {
+ buffer.WriteString("\n")
+ buffer.WriteString("\n")
+ if p.Boxed {
+ p.boxLegend(buffer, p.LegendLabel, legendColWidth)
+ } else {
+ p.generateLegend(buffer, p.LegendLabel, legendColWidth)
+ }
+}
+
+func (p HeatmapPrinter) renderHeader(buffer *bytes.Buffer, colWidth int, xAmount int) {
+ buffer.WriteString("\n")
+ if p.Boxed {
+ buffer.WriteString(p.SeparatorStyle.Sprint(p.LSeparator))
+ }
+ if p.Grid {
+ for i := 0; i < xAmount+1; i++ {
+ buffer.WriteString(strings.Repeat(p.SeparatorStyle.Sprint(p.HorizontalSeparator), colWidth))
+ if i < xAmount {
+ buffer.WriteString(p.SeparatorStyle.Sprint(p.TCrossSeparator))
+ }
+ }
+ }
+ if p.Boxed {
+ buffer.WriteString(p.SeparatorStyle.Sprint(p.LReverseSeparator))
+ }
+ if p.Grid {
+ buffer.WriteString("\n")
+ }
+ for j, f := range p.Axis.XAxis {
+ if j == 0 {
+ if p.Boxed {
+ buffer.WriteString(p.SeparatorStyle.Sprint(p.VerticalSeparator))
+ }
+ ct := internal.CenterText(" ", colWidth)
+ if len(ct) < colWidth {
+ ct += strings.Repeat(" ", colWidth-len(ct))
+ }
+ buffer.WriteString(p.AxisStyle.Sprint(ct))
+ if p.Grid {
+ buffer.WriteString(p.SeparatorStyle.Sprint(p.VerticalSeparator))
+ }
+ }
+ var ct string
+ ct = internal.CenterText(Sprintf("%v", f), colWidth)
+ if len(ct) < colWidth {
+ ct += strings.Repeat(" ", colWidth-len(ct))
+ }
+ buffer.WriteString(p.AxisStyle.Sprint(ct))
+
+ if j < xAmount {
+ if !p.Boxed && j == xAmount-1 {
+ continue
+ }
+ if p.Grid {
+ buffer.WriteString(p.SeparatorStyle.Sprint(p.VerticalSeparator))
+ }
+ }
+ }
+}
+
+func (p HeatmapPrinter) renderData(buffer *bytes.Buffer, colWidth int, xAmount int, yAmount int) {
+ for i, datum := range p.Data {
+ if p.Boxed {
+ buffer.WriteString(p.SeparatorStyle.Sprint(p.VerticalSeparator))
+ }
+ for j, f := range datum {
+ if j == 0 && p.HasHeader {
+ ct := internal.CenterText(p.Axis.YAxis[i], colWidth)
+ if len(ct) < colWidth {
+ ct += strings.Repeat(" ", colWidth-len(ct))
+ }
+ buffer.WriteString(p.AxisStyle.Sprint(ct))
+ if p.Grid {
+ buffer.WriteString(p.SeparatorStyle.Sprint(p.VerticalSeparator))
+ }
+ }
+ var ct string
+ if p.OnlyColoredCells {
+ ct = internal.CenterText(" ", colWidth)
+ } else {
+ ct = internal.CenterText(Sprintf("%v", f), colWidth)
+ }
+ if len(ct) < colWidth {
+ if len(Sprintf("%v", f)) == 1 {
+ ct += strings.Repeat(" ", colWidth-len(ct))
+ } else {
+ ct = strings.Repeat(" ", colWidth-len(ct)) + ct
+ }
+ }
+ if p.EnableRGB {
+ rgb := p.RGBRange[0].Fade(p.minValue, p.maxValue, f, p.RGBRange[1:]...)
+ rgbStyle := NewRGBStyle(p.TextRGB, rgb)
+ if p.EnableComplementaryColor {
+ complimentary := NewRGB(internal.Complementary(rgb.R, rgb.G, rgb.B))
+ rgbStyle = NewRGBStyle(complimentary, rgb)
+ }
+ buffer.WriteString(rgbStyle.Sprint(ct))
+ } else {
+ color := getColor(p.minValue, p.maxValue, f, p.Colors...)
+ fgColor := p.TextColor
+ if p.EnableComplementaryColor {
+ fgColor = complementaryColors[color]
+ }
+ buffer.WriteString(fgColor.Sprint(color.Sprintf("%s", ct)))
+ }
+ if j < xAmount {
+ if !p.Boxed && p.HasHeader && j == xAmount-1 {
+ continue
+ }
+ if p.Grid {
+ buffer.WriteString(p.SeparatorStyle.Sprint(p.VerticalSeparator))
+ }
+ }
+ if p.Boxed && !p.HasHeader && j == xAmount {
+ buffer.WriteString(p.SeparatorStyle.Sprint(p.VerticalSeparator))
+ }
+ }
+
+ if i < yAmount {
+ if p.HasHeader && i == yAmount-1 {
+ continue
+ }
+ buffer.WriteString("\n")
+ if p.Boxed {
+ buffer.WriteString(p.SeparatorStyle.Sprint(p.LSeparator))
+ }
+ if p.Grid {
+ for i := 0; i < xAmount+1; i++ {
+ buffer.WriteString(strings.Repeat(p.SeparatorStyle.Sprint(p.HorizontalSeparator), colWidth))
+ if i < xAmount {
+ buffer.WriteString(p.SeparatorStyle.Sprint(p.TCrossSeparator))
+ }
+ }
+ }
+ if p.Boxed {
+ buffer.WriteString(p.SeparatorStyle.Sprint(p.LReverseSeparator))
+ }
+ if p.Grid {
+ buffer.WriteString("\n")
+ }
+ }
+ }
+}
+
+func (p HeatmapPrinter) generateLegend(buffer *bytes.Buffer, legend string, legendColWidth int) {
+ buffer.WriteString(p.AxisStyle.Sprint(legend))
+ if p.Grid {
+ buffer.WriteString(p.SeparatorStyle.Sprintf("%s", p.VerticalSeparator))
+ } else {
+ buffer.WriteString(" ")
+ }
+ if p.EnableRGB {
+ p.generateRGBLegend(buffer, legendColWidth)
+ } else {
+ p.generateColorLegend(buffer, legendColWidth)
+ }
+}
+
+func (p HeatmapPrinter) generateColorLegend(buffer *bytes.Buffer, legendColWidth int) {
+ for i, color := range p.Colors {
+ // the first color is the min value and the last color is the max value
+ var f float32
+ if i == 0 {
+ f = p.minValue
+ } else if i == len(p.Colors)-1 {
+ f = p.maxValue
+ } else {
+ f = p.minValue + (p.maxValue-p.minValue)*float32(i)/float32(len(p.Colors)-1)
+ }
+ fgColor := p.TextColor
+ if p.EnableComplementaryColor {
+ fgColor = complementaryColors[color]
+ }
+ buffer.WriteString(fgColor.Sprint(color.Sprint(centerAndShorten(f, legendColWidth, p.LegendOnlyColoredCells))))
+ if p.Grid && i < len(p.Colors)-1 && !p.LegendOnlyColoredCells {
+ buffer.WriteString(p.SeparatorStyle.Sprintf("%s", p.VerticalSeparator))
+ }
+ }
+}
+
+func (p HeatmapPrinter) generateRGBLegend(buffer *bytes.Buffer, legendColWidth int) {
+ p.rgbLegendValue = 10
+ steps := len(p.RGBRange)
+ if steps < p.rgbLegendValue {
+ steps = p.rgbLegendValue
+ }
+ if p.LegendOnlyColoredCells {
+ steps *= 3
+ }
+ for i := 0; i < steps; i++ {
+ // the first color is the min value and the last color is the max value
+ var f float32
+ if i == 0 {
+ f = p.minValue
+ } else if i == steps-1 {
+ f = p.maxValue
+ } else {
+ f = p.minValue + (p.maxValue-p.minValue)*float32(i)/float32(steps-1)
+ }
+ rgb := p.RGBRange[0].Fade(p.minValue, p.maxValue, f, p.RGBRange[1:]...)
+ rgbStyle := NewRGBStyle(p.TextRGB, rgb)
+ if p.EnableComplementaryColor {
+ complimentary := NewRGB(internal.Complementary(rgb.R, rgb.G, rgb.B))
+ rgbStyle = NewRGBStyle(complimentary, rgb)
+ }
+ if p.LegendOnlyColoredCells {
+ buffer.WriteString(rgbStyle.Sprint(centerAndShorten(f, 1, p.LegendOnlyColoredCells)))
+ } else {
+ buffer.WriteString(rgbStyle.Sprint(centerAndShorten(f, legendColWidth, p.LegendOnlyColoredCells)))
+ }
+ if p.Grid && i < steps-1 && !p.LegendOnlyColoredCells {
+ buffer.WriteString(p.SeparatorStyle.Sprintf("%s", p.VerticalSeparator))
+ }
+ }
+}
+
+func (p HeatmapPrinter) boxLegend(buffer *bytes.Buffer, legend string, legendColWidth int) {
+ buffer.WriteString(p.SeparatorStyle.Sprint(p.BottomRightCornerSeparator))
+
+ p.generateSeparatorRow(buffer, legend, legendColWidth, true)
+
+ buffer.WriteString(p.SeparatorStyle.Sprint(p.BottomLeftCornerSeparator))
+ buffer.WriteString("\n")
+ buffer.WriteString(p.SeparatorStyle.Sprintf("%s", p.VerticalSeparator))
+
+ p.generateLegend(buffer, legend, legendColWidth)
+
+ buffer.WriteString(p.SeparatorStyle.Sprintf("%s", p.VerticalSeparator))
+ buffer.WriteString("\n")
+
+ buffer.WriteString(p.SeparatorStyle.Sprint(p.TopRightCornerSeparator))
+
+ p.generateSeparatorRow(buffer, legend, legendColWidth, false)
+
+ buffer.WriteString(p.SeparatorStyle.Sprint(p.TopLeftCornerSeparator))
+}
+
+func (p HeatmapPrinter) generateSeparatorRow(buffer *bytes.Buffer, legend string, legendColWidth int, top bool) {
+ p.rgbLegendValue = 10
+ steps := len(p.RGBRange)
+ if steps < p.rgbLegendValue {
+ steps = p.rgbLegendValue
+ }
+ if p.LegendOnlyColoredCells {
+ steps *= 3
+ }
+
+ var xValue int
+ if p.EnableRGB {
+ xValue = len(p.RGBRange)
+ if xValue < p.rgbLegendValue {
+ xValue = p.rgbLegendValue
+ }
+ } else {
+ xValue = len(p.Colors)
+ }
+
+ for i := 0; i < xValue+1; i++ {
+ if i == 0 {
+ firstLength := len(legend)
+ buffer.WriteString(strings.Repeat(p.SeparatorStyle.Sprint(p.HorizontalSeparator), firstLength))
+ } else {
+ if p.LegendOnlyColoredCells {
+ if p.EnableRGB {
+ buffer.WriteString(strings.Repeat(p.SeparatorStyle.Sprint(p.HorizontalSeparator), steps/(xValue)))
+ } else {
+ buffer.WriteString(strings.Repeat(p.SeparatorStyle.Sprint(p.HorizontalSeparator), legendColWidth))
+ }
+ } else {
+ buffer.WriteString(strings.Repeat(p.SeparatorStyle.Sprint(p.HorizontalSeparator), legendColWidth))
+ }
+ }
+ if i < xValue && !p.LegendOnlyColoredCells || i == 0 {
+ if top {
+ buffer.WriteString(p.SeparatorStyle.Sprint(p.TSeparator))
+ } else {
+ buffer.WriteString(p.SeparatorStyle.Sprint(p.TReverseSeparator))
+ }
+ }
+ }
+}
+
+func centerAndShorten(f float32, lineLength int, onlyColor bool) string {
+ value := ""
+ if !onlyColor {
+ value = Sprintf("%.2v", f)
+ }
+ if len(value) > lineLength {
+ value = value[:lineLength]
+ if strings.HasSuffix(value, ".") {
+ value = Sprintf("%.1v", f)
+ lineLength = len(value)
+ }
+ }
+ ct := internal.CenterText(value, lineLength)
+ if len(ct) < lineLength {
+ if len(Sprintf("%v", f)) == 1 {
+ ct += strings.Repeat(" ", lineLength-len(ct))
+ } else {
+ ct = strings.Repeat(" ", lineLength-len(ct)) + ct
+ }
+ }
+
+ return ct
+}
+
+func getColor(minStep float32, maxStep float32, current float32, colors ...Color) Color {
+ // split the range into equal parts
+ // and assign a color to each part
+ // the last color is assigned to the max value
+ // and the first color to the min value
+ // the rest of the colors are assigned to the
+ // middle values
+ step := (maxStep - minStep) / float32(len(colors))
+ for i := range colors {
+ if current >= minStep+float32(i)*step && current < minStep+float32(i+1)*step {
+ return colors[i]
+ }
+ }
+ return colors[len(colors)-1]
+}
+
+// Render prints the HeatmapPrinter to the terminal.
+func (p HeatmapPrinter) Render() error {
+ s, err := p.Srender()
+ if err != nil {
+ return err
+ }
+ Fprintln(p.Writer, s)
+
+ return nil
+}
+
+func (p HeatmapPrinter) errCheck() error {
+ if p.HasHeader {
+ if p.Axis.XAxis == nil {
+ return errors.New("x axis is nil")
+ }
+ if p.Axis.YAxis == nil {
+ return errors.New("y axis is nil")
+ }
+
+ if len(p.Axis.XAxis) == 0 {
+ return errors.New("x axis is empty")
+ }
+ if len(p.Axis.YAxis) == 0 {
+ return errors.New("y axis is empty")
+ }
+
+ for i := 1; i < len(p.Data); i++ {
+ if len(p.Data[i]) != len(p.Axis.XAxis) {
+ return errors.New("x axis length does not match data")
+ }
+ }
+ if len(p.Axis.YAxis) != len(p.Data) {
+ return errors.New("y axis length does not match data")
+ }
+ }
+
+ if p.Data == nil {
+ return errors.New("data is nil")
+ }
+
+ if len(p.Data) == 0 {
+ return errors.New("data is empty")
+ }
+
+ // check if p.Data[n] has the same length
+ for i := 1; i < len(p.Data); i++ {
+ if len(p.Data[i]) != len(p.Data[0]) {
+ return errors.New("data is not rectangular")
+ }
+ }
+
+ return nil
+}
+
+// return min and max value of a slice
+func minMaxFloat32(s [][]float32) (float32, float32) {
+ var minslice, maxslice float32
+ minslice = math.MaxFloat32
+ maxslice = -math.MaxFloat32
+
+ for _, r := range s {
+ for _, c := range r {
+ if c < minslice {
+ minslice = c
+ }
+ if c > maxslice {
+ maxslice = c
+ }
+ }
+ }
+ return minslice, maxslice
+}
diff --git a/vendor/github.com/pterm/pterm/interactive_confirm_printer.go b/vendor/github.com/pterm/pterm/interactive_confirm_printer.go
new file mode 100644
index 0000000..a94cc5d
--- /dev/null
+++ b/vendor/github.com/pterm/pterm/interactive_confirm_printer.go
@@ -0,0 +1,186 @@
+package pterm
+
+import (
+ "fmt"
+ "strings"
+
+ "atomicgo.dev/cursor"
+ "atomicgo.dev/keyboard"
+ "atomicgo.dev/keyboard/keys"
+
+ "github.com/pterm/pterm/internal"
+)
+
+// DefaultInteractiveConfirm is the default InteractiveConfirm printer.
+// Pressing "y" will return true, "n" will return false.
+// Pressing enter without typing "y" or "n" will return the configured default value (by default set to "no").
+var DefaultInteractiveConfirm = InteractiveConfirmPrinter{
+ DefaultValue: false,
+ DefaultText: "Please confirm",
+ TextStyle: &ThemeDefault.PrimaryStyle,
+ ConfirmText: "Yes",
+ ConfirmStyle: &ThemeDefault.SuccessMessageStyle,
+ RejectText: "No",
+ RejectStyle: &ThemeDefault.ErrorMessageStyle,
+ SuffixStyle: &ThemeDefault.SecondaryStyle,
+ Delimiter: ": ",
+}
+
+// InteractiveConfirmPrinter is a printer for interactive confirm prompts.
+type InteractiveConfirmPrinter struct {
+ DefaultValue bool
+ DefaultText string
+ Delimiter string
+ TextStyle *Style
+ ConfirmText string
+ ConfirmStyle *Style
+ RejectText string
+ RejectStyle *Style
+ SuffixStyle *Style
+ OnInterruptFunc func()
+}
+
+// WithDefaultText sets the default text.
+func (p InteractiveConfirmPrinter) WithDefaultText(text string) *InteractiveConfirmPrinter {
+ p.DefaultText = text
+ return &p
+}
+
+// WithDefaultValue sets the default value, which will be returned when the user presses enter without typing "y" or "n".
+func (p InteractiveConfirmPrinter) WithDefaultValue(value bool) *InteractiveConfirmPrinter {
+ p.DefaultValue = value
+ return &p
+}
+
+// WithTextStyle sets the text style.
+func (p InteractiveConfirmPrinter) WithTextStyle(style *Style) *InteractiveConfirmPrinter {
+ p.TextStyle = style
+ return &p
+}
+
+// WithConfirmText sets the confirm text.
+func (p InteractiveConfirmPrinter) WithConfirmText(text string) *InteractiveConfirmPrinter {
+ p.ConfirmText = text
+ return &p
+}
+
+// WithConfirmStyle sets the confirm style.
+func (p InteractiveConfirmPrinter) WithConfirmStyle(style *Style) *InteractiveConfirmPrinter {
+ p.ConfirmStyle = style
+ return &p
+}
+
+// WithRejectText sets the reject text.
+func (p InteractiveConfirmPrinter) WithRejectText(text string) *InteractiveConfirmPrinter {
+ p.RejectText = text
+ return &p
+}
+
+// WithRejectStyle sets the reject style.
+func (p InteractiveConfirmPrinter) WithRejectStyle(style *Style) *InteractiveConfirmPrinter {
+ p.RejectStyle = style
+ return &p
+}
+
+// WithSuffixStyle sets the suffix style.
+func (p InteractiveConfirmPrinter) WithSuffixStyle(style *Style) *InteractiveConfirmPrinter {
+ p.SuffixStyle = style
+ return &p
+}
+
+// OnInterrupt sets the function to execute on exit of the input reader
+func (p InteractiveConfirmPrinter) WithOnInterruptFunc(exitFunc func()) *InteractiveConfirmPrinter {
+ p.OnInterruptFunc = exitFunc
+ return &p
+}
+
+// WithDelimiter sets the delimiter between the message and the input.
+func (p InteractiveConfirmPrinter) WithDelimiter(delimiter string) *InteractiveConfirmPrinter {
+ p.Delimiter = delimiter
+ return &p
+}
+
+// Show shows the confirm prompt.
+//
+// Example:
+//
+// result, _ := pterm.DefaultInteractiveConfirm.Show("Are you sure?")
+// pterm.Println(result)
+func (p InteractiveConfirmPrinter) Show(text ...string) (bool, error) {
+ // should be the first defer statement to make sure it is executed last
+ // and all the needed cleanup can be done before
+ cancel, exit := internal.NewCancelationSignal(p.OnInterruptFunc)
+ defer exit()
+
+ var result bool
+
+ if len(text) == 0 || text[0] == "" {
+ text = []string{p.DefaultText}
+ }
+
+ p.TextStyle.Print(text[0] + " " + p.getSuffix() + p.Delimiter)
+ y, n := p.getShortHandles()
+
+ var interrupted bool
+ err := keyboard.Listen(func(keyInfo keys.Key) (stop bool, err error) {
+ key := keyInfo.Code
+ char := strings.ToLower(keyInfo.String())
+ if err != nil {
+ return false, fmt.Errorf("failed to get key: %w", err)
+ }
+
+ switch key {
+ case keys.RuneKey:
+ switch char {
+ case y:
+ p.ConfirmStyle.Print(p.ConfirmText)
+ Println()
+ result = true
+ return true, nil
+ case n:
+ p.RejectStyle.Print(p.RejectText)
+ Println()
+ result = false
+ return true, nil
+ }
+ case keys.Enter:
+ if p.DefaultValue {
+ p.ConfirmStyle.Print(p.ConfirmText)
+ } else {
+ p.RejectStyle.Print(p.RejectText)
+ }
+ Println()
+ result = p.DefaultValue
+ return true, nil
+ case keys.CtrlC:
+ cancel()
+ interrupted = true
+ return true, nil
+ }
+ return false, nil
+ })
+ if !interrupted {
+ cursor.StartOfLine()
+ }
+ return result, err
+}
+
+// getShortHandles returns the short hand answers for the confirmation prompt
+func (p InteractiveConfirmPrinter) getShortHandles() (string, string) {
+ y := strings.ToLower(string([]rune(p.ConfirmText)[0]))
+ n := strings.ToLower(string([]rune(p.RejectText)[0]))
+
+ return y, n
+}
+
+// getSuffix returns the confirmation prompt suffix
+func (p InteractiveConfirmPrinter) getSuffix() string {
+ y, n := p.getShortHandles()
+ if p.DefaultValue {
+ y = strings.ToUpper(y)
+ } else {
+ n = strings.ToUpper(n)
+ }
+
+ return p.SuffixStyle.Sprintf("[%s/%s]", y, n)
+}
diff --git a/vendor/github.com/pterm/pterm/interactive_continue_printer.go b/vendor/github.com/pterm/pterm/interactive_continue_printer.go
new file mode 100644
index 0000000..a6c23ff
--- /dev/null
+++ b/vendor/github.com/pterm/pterm/interactive_continue_printer.go
@@ -0,0 +1,197 @@
+package pterm
+
+import (
+ "fmt"
+ "strings"
+
+ "atomicgo.dev/cursor"
+ "atomicgo.dev/keyboard"
+ "atomicgo.dev/keyboard/keys"
+ "golang.org/x/text/cases"
+ "golang.org/x/text/language"
+
+ "github.com/pterm/pterm/internal"
+)
+
+// DefaultInteractiveContinue is the default InteractiveContinue printer.
+// Pressing "y" will return yes, "n" will return no, "a" returns all and "s" returns stop.
+// Pressing enter without typing any letter will return the configured default value (by default set to "yes", the fisrt option).
+var DefaultInteractiveContinue = InteractiveContinuePrinter{
+ DefaultValueIndex: 0,
+ DefaultText: "Do you want to continue",
+ TextStyle: &ThemeDefault.PrimaryStyle,
+ Options: []string{"yes", "no", "all", "cancel"},
+ OptionsStyle: &ThemeDefault.SuccessMessageStyle,
+ SuffixStyle: &ThemeDefault.SecondaryStyle,
+ Delimiter: ": ",
+}
+
+// InteractiveContinuePrinter is a printer for interactive continue prompts.
+type InteractiveContinuePrinter struct {
+ DefaultValueIndex int
+ DefaultText string
+ Delimiter string
+ TextStyle *Style
+ Options []string
+ OptionsStyle *Style
+ Handles []string
+ ShowShortHandles bool
+ SuffixStyle *Style
+}
+
+// WithDefaultText sets the default text.
+func (p InteractiveContinuePrinter) WithDefaultText(text string) *InteractiveContinuePrinter {
+ p.DefaultText = text
+ return &p
+}
+
+// WithDefaultValueIndex sets the default value, which will be returned when the user presses enter without typing any letter.
+func (p InteractiveContinuePrinter) WithDefaultValueIndex(value int) *InteractiveContinuePrinter {
+ if value >= len(p.Options) {
+ panic("Index out of range")
+ }
+ p.DefaultValueIndex = value
+ return &p
+}
+
+// WithDefaultValue sets the default value, which will be returned when the user presses enter without typing any letter.
+func (p InteractiveContinuePrinter) WithDefaultValue(value string) *InteractiveContinuePrinter {
+ for i, o := range p.Options {
+ if o == value {
+ p.DefaultValueIndex = i
+ break
+ }
+ }
+ return &p
+}
+
+// WithTextStyle sets the text style.
+func (p InteractiveContinuePrinter) WithTextStyle(style *Style) *InteractiveContinuePrinter {
+ p.TextStyle = style
+ return &p
+}
+
+// WithOptions sets the options.
+func (p InteractiveContinuePrinter) WithOptions(options []string) *InteractiveContinuePrinter {
+ p.Options = options
+ return &p
+}
+
+// WithHandles allows you to customize the short handles for the answers.
+func (p InteractiveContinuePrinter) WithHandles(handles []string) *InteractiveContinuePrinter {
+ if len(handles) != len(p.Options) {
+ Warning.Printf("%v is not a valid set of handles", handles)
+ p.setDefaultHandles()
+ return &p
+ }
+ p.Handles = handles
+ return &p
+}
+
+// WithShowShortHandles will set ShowShortHandles to true
+// this makes the printer display the shorthand options instead their shorthand version.
+func (p InteractiveContinuePrinter) WithShowShortHandles(b ...bool) *InteractiveContinuePrinter {
+ p.ShowShortHandles = internal.WithBoolean(b)
+ return &p
+}
+
+// WithOptionsStyle sets the continue style.
+func (p InteractiveContinuePrinter) WithOptionsStyle(style *Style) *InteractiveContinuePrinter {
+ p.OptionsStyle = style
+ return &p
+}
+
+// WithSuffixStyle sets the suffix style.
+func (p InteractiveContinuePrinter) WithSuffixStyle(style *Style) *InteractiveContinuePrinter {
+ p.SuffixStyle = style
+ return &p
+}
+
+// WithDelimiter sets the delimiter between the message and the input.
+func (p InteractiveContinuePrinter) WithDelimiter(delimiter string) *InteractiveContinuePrinter {
+ p.Delimiter = delimiter
+ return &p
+}
+
+// Show shows the continue prompt.
+//
+// Example:
+//
+// result, _ := pterm.DefaultInteractiveContinue.Show("Do you want to apply the changes?")
+// pterm.Println(result)
+func (p InteractiveContinuePrinter) Show(text ...string) (string, error) {
+ var result string
+
+ if len(text) == 0 || text[0] == "" {
+ text = []string{p.DefaultText}
+ }
+
+ p.TextStyle.Print(text[0] + " " + p.getSuffix() + p.Delimiter)
+
+ err := keyboard.Listen(func(keyInfo keys.Key) (stop bool, err error) {
+ if err != nil {
+ return false, fmt.Errorf("failed to get key: %w", err)
+ }
+ key := keyInfo.Code
+ char := keyInfo.String()
+
+ switch key {
+ case keys.RuneKey:
+ for i, c := range p.Handles {
+ if !p.ShowShortHandles {
+ c = string([]rune(c)[0])
+ }
+ if char == c || (i == p.DefaultValueIndex && strings.EqualFold(c, char)) {
+ p.OptionsStyle.Print(p.Options[i])
+ Println()
+ result = p.Options[i]
+ return true, nil
+ }
+ }
+ case keys.Enter:
+ p.OptionsStyle.Print(p.Options[p.DefaultValueIndex])
+ Println()
+ result = p.Options[p.DefaultValueIndex]
+ return true, nil
+ case keys.CtrlC:
+ internal.Exit(1)
+ return true, nil
+ }
+ return false, nil
+ })
+ cursor.StartOfLine()
+ return result, err
+}
+
+// getShortHandles returns the short hand answers for the continueation prompt
+func (p InteractiveContinuePrinter) getShortHandles() []string {
+ var handles []string
+ for _, option := range p.Options {
+ handles = append(handles, strings.ToLower(string([]rune(option)[0])))
+ }
+ handles[p.DefaultValueIndex] = strings.ToUpper(handles[p.DefaultValueIndex])
+
+ return handles
+}
+
+// setDefaultHandles initialises the handles
+func (p *InteractiveContinuePrinter) setDefaultHandles() {
+ if p.ShowShortHandles {
+ p.Handles = p.getShortHandles()
+ }
+
+ if len(p.Handles) == 0 {
+ p.Handles = make([]string, len(p.Options))
+ copy(p.Handles, p.Options)
+ p.Handles[p.DefaultValueIndex] = cases.Title(language.Und, cases.Compact).String(p.Handles[p.DefaultValueIndex])
+ }
+}
+
+// getSuffix returns the continuation prompt suffix
+func (p *InteractiveContinuePrinter) getSuffix() string {
+ if p.Handles == nil || len(p.Handles) != len(p.Options) {
+ p.setDefaultHandles()
+ }
+
+ return p.SuffixStyle.Sprintf("[%s]", strings.Join(p.Handles, "/"))
+}
diff --git a/vendor/github.com/pterm/pterm/interactive_multiselect_printer.go b/vendor/github.com/pterm/pterm/interactive_multiselect_printer.go
new file mode 100644
index 0000000..83c1756
--- /dev/null
+++ b/vendor/github.com/pterm/pterm/interactive_multiselect_printer.go
@@ -0,0 +1,402 @@
+package pterm
+
+import (
+ "fmt"
+ "sort"
+ "strings"
+
+ "atomicgo.dev/cursor"
+ "atomicgo.dev/keyboard"
+ "atomicgo.dev/keyboard/keys"
+ "github.com/lithammer/fuzzysearch/fuzzy"
+
+ "github.com/pterm/pterm/internal"
+)
+
+var (
+ // DefaultInteractiveMultiselect is the default InteractiveMultiselect printer.
+ DefaultInteractiveMultiselect = InteractiveMultiselectPrinter{
+ TextStyle: &ThemeDefault.PrimaryStyle,
+ DefaultText: "Please select your options",
+ Options: []string{},
+ OptionStyle: &ThemeDefault.DefaultText,
+ DefaultOptions: []string{},
+ MaxHeight: 5,
+ Selector: ">",
+ SelectorStyle: &ThemeDefault.SecondaryStyle,
+ Filter: true,
+ KeySelect: keys.Enter,
+ KeyConfirm: keys.Tab,
+ Checkmark: &ThemeDefault.Checkmark,
+ }
+)
+
+// InteractiveMultiselectPrinter is a printer for interactive multiselect menus.
+type InteractiveMultiselectPrinter struct {
+ DefaultText string
+ TextStyle *Style
+ Options []string
+ OptionStyle *Style
+ DefaultOptions []string
+ MaxHeight int
+ Selector string
+ SelectorStyle *Style
+ Filter bool
+ Checkmark *Checkmark
+ OnInterruptFunc func()
+
+ selectedOption int
+ selectedOptions []int
+ text string
+ fuzzySearchString string
+ fuzzySearchMatches []string
+ displayedOptions []string
+ displayedOptionsStart int
+ displayedOptionsEnd int
+
+ // KeySelect is the select key. It cannot be keys.Space when Filter is enabled.
+ KeySelect keys.KeyCode
+
+ // KeyConfirm is the confirm key. It cannot be keys.Space when Filter is enabled.
+ KeyConfirm keys.KeyCode
+}
+
+// WithOptions sets the options.
+func (p InteractiveMultiselectPrinter) WithOptions(options []string) *InteractiveMultiselectPrinter {
+ p.Options = options
+ return &p
+}
+
+// WithDefaultOptions sets the default options.
+func (p InteractiveMultiselectPrinter) WithDefaultOptions(options []string) *InteractiveMultiselectPrinter {
+ p.DefaultOptions = options
+ return &p
+}
+
+// WithDefaultText sets the default text.
+func (p InteractiveMultiselectPrinter) WithDefaultText(text string) *InteractiveMultiselectPrinter {
+ p.DefaultText = text
+ return &p
+}
+
+// WithMaxHeight sets the maximum height of the select menu.
+func (p InteractiveMultiselectPrinter) WithMaxHeight(maxHeight int) *InteractiveMultiselectPrinter {
+ p.MaxHeight = maxHeight
+ return &p
+}
+
+// WithFilter sets the Filter option
+func (p InteractiveMultiselectPrinter) WithFilter(b ...bool) *InteractiveMultiselectPrinter {
+ p.Filter = internal.WithBoolean(b)
+ return &p
+}
+
+// WithKeySelect sets the confirm key
+// It cannot be keys.Space when Filter is enabled.
+func (p InteractiveMultiselectPrinter) WithKeySelect(keySelect keys.KeyCode) *InteractiveMultiselectPrinter {
+ p.KeySelect = keySelect
+ return &p
+}
+
+// WithKeyConfirm sets the confirm key
+// It cannot be keys.Space when Filter is enabled.
+func (p InteractiveMultiselectPrinter) WithKeyConfirm(keyConfirm keys.KeyCode) *InteractiveMultiselectPrinter {
+ p.KeyConfirm = keyConfirm
+ return &p
+}
+
+// WithCheckmark sets the checkmark
+func (p InteractiveMultiselectPrinter) WithCheckmark(checkmark *Checkmark) *InteractiveMultiselectPrinter {
+ p.Checkmark = checkmark
+ return &p
+}
+
+// OnInterrupt sets the function to execute on exit of the input reader
+func (p InteractiveMultiselectPrinter) WithOnInterruptFunc(exitFunc func()) *InteractiveMultiselectPrinter {
+ p.OnInterruptFunc = exitFunc
+ return &p
+}
+
+// Show shows the interactive multiselect menu and returns the selected entry.
+func (p *InteractiveMultiselectPrinter) Show(text ...string) ([]string, error) {
+ // should be the first defer statement to make sure it is executed last
+ // and all the needed cleanup can be done before
+ cancel, exit := internal.NewCancelationSignal(p.OnInterruptFunc)
+ defer exit()
+
+ if len(text) == 0 || Sprint(text[0]) == "" {
+ text = []string{p.DefaultText}
+ }
+
+ p.text = p.TextStyle.Sprint(text[0])
+ p.fuzzySearchMatches = append([]string{}, p.Options...)
+
+ if p.MaxHeight == 0 {
+ p.MaxHeight = DefaultInteractiveMultiselect.MaxHeight
+ }
+
+ maxHeight := p.MaxHeight
+ if maxHeight > len(p.fuzzySearchMatches) {
+ maxHeight = len(p.fuzzySearchMatches)
+ }
+
+ if len(p.Options) == 0 {
+ return nil, fmt.Errorf("no options provided")
+ }
+
+ p.displayedOptions = append([]string{}, p.fuzzySearchMatches[:maxHeight]...)
+ p.displayedOptionsStart = 0
+ p.displayedOptionsEnd = maxHeight
+
+ for _, option := range p.DefaultOptions {
+ p.selectOption(option)
+ }
+
+ area, err := DefaultArea.Start(p.renderSelectMenu())
+ defer area.Stop()
+ if err != nil {
+ return nil, fmt.Errorf("could not start area: %w", err)
+ }
+
+ if p.Filter && (p.KeyConfirm == keys.Space || p.KeySelect == keys.Space) {
+ return nil, fmt.Errorf("if filter/search is active, keys.Space can not be used for KeySelect or KeyConfirm")
+ }
+
+ area.Update(p.renderSelectMenu())
+
+ cursor.Hide()
+ defer cursor.Show()
+ err = keyboard.Listen(func(keyInfo keys.Key) (stop bool, err error) {
+ key := keyInfo.Code
+
+ if p.MaxHeight > len(p.fuzzySearchMatches) {
+ maxHeight = len(p.fuzzySearchMatches)
+ } else {
+ maxHeight = p.MaxHeight
+ }
+
+ switch key {
+ case p.KeyConfirm:
+ if len(p.fuzzySearchMatches) == 0 {
+ return false, nil
+ }
+ area.Update(p.renderFinishedMenu())
+ return true, nil
+ case p.KeySelect:
+ if len(p.fuzzySearchMatches) > 0 {
+ // Select option if not already selected
+ p.selectOption(p.fuzzySearchMatches[p.selectedOption])
+ }
+ area.Update(p.renderSelectMenu())
+ case keys.RuneKey:
+ if p.Filter {
+ // Fuzzy search for options
+ // append to fuzzy search string
+ p.fuzzySearchString += keyInfo.String()
+ p.selectedOption = 0
+ p.displayedOptionsStart = 0
+ p.displayedOptionsEnd = maxHeight
+ p.displayedOptions = append([]string{}, p.fuzzySearchMatches[:maxHeight]...)
+ }
+ area.Update(p.renderSelectMenu())
+ case keys.Space:
+ if p.Filter {
+ p.fuzzySearchString += " "
+ p.selectedOption = 0
+ area.Update(p.renderSelectMenu())
+ }
+ case keys.Backspace:
+ // Remove last character from fuzzy search string
+ if p.fuzzySearchString != "" {
+ // Handle UTF-8 characters
+ p.fuzzySearchString = string([]rune(p.fuzzySearchString)[:len([]rune(p.fuzzySearchString))-1])
+ }
+
+ if p.fuzzySearchString == "" {
+ p.fuzzySearchMatches = append([]string{}, p.Options...)
+ }
+
+ p.renderSelectMenu()
+
+ if len(p.fuzzySearchMatches) > p.MaxHeight {
+ maxHeight = p.MaxHeight
+ } else {
+ maxHeight = len(p.fuzzySearchMatches)
+ }
+
+ p.selectedOption = 0
+ p.displayedOptionsStart = 0
+ p.displayedOptionsEnd = maxHeight
+ p.displayedOptions = append([]string{}, p.fuzzySearchMatches[p.displayedOptionsStart:p.displayedOptionsEnd]...)
+
+ area.Update(p.renderSelectMenu())
+ case keys.Left:
+ // Unselect all options
+ p.selectedOptions = []int{}
+ area.Update(p.renderSelectMenu())
+ case keys.Right:
+ // Select all options
+ p.selectedOptions = []int{}
+ for i := 0; i < len(p.Options); i++ {
+ p.selectedOptions = append(p.selectedOptions, i)
+ }
+ area.Update(p.renderSelectMenu())
+ case keys.Up, keys.CtrlP:
+ if len(p.fuzzySearchMatches) == 0 {
+ return false, nil
+ }
+ if p.selectedOption > 0 {
+ p.selectedOption--
+ if p.selectedOption < p.displayedOptionsStart {
+ p.displayedOptionsStart--
+ p.displayedOptionsEnd--
+ if p.displayedOptionsStart < 0 {
+ p.displayedOptionsStart = 0
+ p.displayedOptionsEnd = maxHeight
+ }
+ p.displayedOptions = append([]string{}, p.fuzzySearchMatches[p.displayedOptionsStart:p.displayedOptionsEnd]...)
+ }
+ } else {
+ p.selectedOption = len(p.fuzzySearchMatches) - 1
+ p.displayedOptionsStart = len(p.fuzzySearchMatches) - maxHeight
+ p.displayedOptionsEnd = len(p.fuzzySearchMatches)
+ p.displayedOptions = append([]string{}, p.fuzzySearchMatches[p.displayedOptionsStart:p.displayedOptionsEnd]...)
+ }
+
+ area.Update(p.renderSelectMenu())
+ case keys.Down, keys.CtrlN:
+ if len(p.fuzzySearchMatches) == 0 {
+ return false, nil
+ }
+ p.displayedOptions = p.fuzzySearchMatches[:maxHeight]
+ if p.selectedOption < len(p.fuzzySearchMatches)-1 {
+ p.selectedOption++
+ if p.selectedOption >= p.displayedOptionsEnd {
+ p.displayedOptionsStart++
+ p.displayedOptionsEnd++
+ p.displayedOptions = append([]string{}, p.fuzzySearchMatches[p.displayedOptionsStart:p.displayedOptionsEnd]...)
+ }
+ } else {
+ p.selectedOption = 0
+ p.displayedOptionsStart = 0
+ p.displayedOptionsEnd = maxHeight
+ p.displayedOptions = append([]string{}, p.fuzzySearchMatches[p.displayedOptionsStart:p.displayedOptionsEnd]...)
+ }
+
+ area.Update(p.renderSelectMenu())
+ case keys.CtrlC:
+ cancel()
+ return true, nil
+ }
+
+ return false, nil
+ })
+ if err != nil {
+ Error.Println(err)
+ return nil, fmt.Errorf("failed to start keyboard listener: %w", err)
+ }
+
+ var result []string
+ for _, selectedOption := range p.selectedOptions {
+ result = append(result, p.Options[selectedOption])
+ }
+
+ return result, nil
+}
+
+func (p InteractiveMultiselectPrinter) findOptionByText(text string) int {
+ for i, option := range p.Options {
+ if option == text {
+ return i
+ }
+ }
+ return -1
+}
+
+func (p *InteractiveMultiselectPrinter) isSelected(optionText string) bool {
+ for _, selectedOption := range p.selectedOptions {
+ if p.Options[selectedOption] == optionText {
+ return true
+ }
+ }
+
+ return false
+}
+
+func (p *InteractiveMultiselectPrinter) selectOption(optionText string) {
+ if p.isSelected(optionText) {
+ // Remove from selected options
+ for i, selectedOption := range p.selectedOptions {
+ if p.Options[selectedOption] == optionText {
+ p.selectedOptions = append(p.selectedOptions[:i], p.selectedOptions[i+1:]...)
+ break
+ }
+ }
+ } else {
+ // Add to selected options
+ p.selectedOptions = append(p.selectedOptions, p.findOptionByText(optionText))
+ }
+}
+
+func (p *InteractiveMultiselectPrinter) renderSelectMenu() string {
+ var content strings.Builder
+ content.WriteString(Sprintf("%s: %s\n", p.text, p.fuzzySearchString))
+
+ // find options that match fuzzy search string
+ rankedResults := fuzzy.RankFindFold(p.fuzzySearchString, p.Options)
+ // map rankedResults to fuzzySearchMatches
+ p.fuzzySearchMatches = []string{}
+ if len(rankedResults) != len(p.Options) {
+ sort.Sort(rankedResults)
+ }
+ for _, result := range rankedResults {
+ p.fuzzySearchMatches = append(p.fuzzySearchMatches, result.Target)
+ }
+
+ indexMapper := make([]string, len(p.fuzzySearchMatches))
+ for i := 0; i < len(p.fuzzySearchMatches); i++ {
+ // if in displayed options range
+ if i >= p.displayedOptionsStart && i < p.displayedOptionsEnd {
+ indexMapper[i] = p.fuzzySearchMatches[i]
+ }
+ }
+
+ for i, option := range indexMapper {
+ if option == "" {
+ continue
+ }
+ var checkmark string
+ if p.isSelected(option) {
+ checkmark = fmt.Sprintf("[%s]", p.Checkmark.Checked)
+ } else {
+ checkmark = fmt.Sprintf("[%s]", p.Checkmark.Unchecked)
+ }
+ if i == p.selectedOption {
+ content.WriteString(Sprintf("%s %s %s\n", p.renderSelector(), checkmark, option))
+ } else {
+ content.WriteString(Sprintf(" %s %s\n", checkmark, option))
+ }
+ }
+
+ help := fmt.Sprintf("%s: %s | %s: %s | left: %s | right: %s", p.KeySelect, Bold.Sprint("select"), p.KeyConfirm, Bold.Sprint("confirm"), Bold.Sprint("none"), Bold.Sprint("all"))
+ if p.Filter {
+ help += fmt.Sprintf("| type to %s", Bold.Sprint("filter"))
+ }
+ content.WriteString(ThemeDefault.SecondaryStyle.Sprintfln("%s", help))
+
+ return content.String()
+}
+
+func (p InteractiveMultiselectPrinter) renderFinishedMenu() string {
+ var content string
+ content += Sprintf("%s: %s\n", p.text, p.fuzzySearchString)
+ for _, option := range p.selectedOptions {
+ content += Sprintf(" %s %s\n", p.renderSelector(), p.Options[option])
+ }
+
+ return content
+}
+
+func (p InteractiveMultiselectPrinter) renderSelector() string {
+ return p.SelectorStyle.Sprint(p.Selector)
+}
diff --git a/vendor/github.com/pterm/pterm/interactive_select_printer.go b/vendor/github.com/pterm/pterm/interactive_select_printer.go
new file mode 100644
index 0000000..71e810c
--- /dev/null
+++ b/vendor/github.com/pterm/pterm/interactive_select_printer.go
@@ -0,0 +1,319 @@
+package pterm
+
+import (
+ "fmt"
+ "math"
+ "sort"
+ "strings"
+
+ "atomicgo.dev/cursor"
+ "atomicgo.dev/keyboard"
+ "atomicgo.dev/keyboard/keys"
+ "github.com/lithammer/fuzzysearch/fuzzy"
+ "github.com/pterm/pterm/internal"
+)
+
+var (
+ // DefaultInteractiveSelect is the default InteractiveSelect printer.
+ DefaultInteractiveSelect = InteractiveSelectPrinter{
+ TextStyle: &ThemeDefault.PrimaryStyle,
+ DefaultText: "Please select an option",
+ Options: []string{},
+ OptionStyle: &ThemeDefault.DefaultText,
+ DefaultOption: "",
+ MaxHeight: 5,
+ Selector: ">",
+ SelectorStyle: &ThemeDefault.SecondaryStyle,
+ Filter: true,
+ }
+)
+
+// InteractiveSelectPrinter is a printer for interactive select menus.
+type InteractiveSelectPrinter struct {
+ TextStyle *Style
+ DefaultText string
+ Options []string
+ OptionStyle *Style
+ DefaultOption string
+ MaxHeight int
+ Selector string
+ SelectorStyle *Style
+ OnInterruptFunc func()
+ Filter bool
+
+ selectedOption int
+ result string
+ text string
+ fuzzySearchString string
+ fuzzySearchMatches []string
+ displayedOptions []string
+ displayedOptionsStart int
+ displayedOptionsEnd int
+}
+
+// WithDefaultText sets the default text.
+func (p InteractiveSelectPrinter) WithDefaultText(text string) *InteractiveSelectPrinter {
+ p.DefaultText = text
+ return &p
+}
+
+// WithOptions sets the options.
+func (p InteractiveSelectPrinter) WithOptions(options []string) *InteractiveSelectPrinter {
+ p.Options = options
+ return &p
+}
+
+// WithDefaultOption sets the default options.
+func (p InteractiveSelectPrinter) WithDefaultOption(option string) *InteractiveSelectPrinter {
+ p.DefaultOption = option
+ return &p
+}
+
+// WithMaxHeight sets the maximum height of the select menu.
+func (p InteractiveSelectPrinter) WithMaxHeight(maxHeight int) *InteractiveSelectPrinter {
+ p.MaxHeight = maxHeight
+ return &p
+}
+
+// OnInterrupt sets the function to execute on exit of the input reader
+func (p InteractiveSelectPrinter) WithOnInterruptFunc(exitFunc func()) *InteractiveSelectPrinter {
+ p.OnInterruptFunc = exitFunc
+ return &p
+}
+
+// WithFilter sets the Filter option
+func (p InteractiveSelectPrinter) WithFilter(b ...bool) *InteractiveSelectPrinter {
+ p.Filter = internal.WithBoolean(b)
+ return &p
+}
+
+// Show shows the interactive select menu and returns the selected entry.
+func (p *InteractiveSelectPrinter) Show(text ...string) (string, error) {
+ // should be the first defer statement to make sure it is executed last
+ // and all the needed cleanup can be done before
+ cancel, exit := internal.NewCancelationSignal(p.OnInterruptFunc)
+ defer exit()
+
+ if len(text) == 0 || Sprint(text[0]) == "" {
+ text = []string{p.DefaultText}
+ }
+
+ p.text = p.TextStyle.Sprint(text[0])
+ p.fuzzySearchMatches = append([]string{}, p.Options...)
+
+ if p.MaxHeight == 0 {
+ p.MaxHeight = DefaultInteractiveSelect.MaxHeight
+ }
+
+ maxHeight := p.MaxHeight
+ if maxHeight > len(p.fuzzySearchMatches) {
+ maxHeight = len(p.fuzzySearchMatches)
+ }
+
+ if len(p.Options) == 0 {
+ return "", fmt.Errorf("no options provided")
+ }
+
+ p.displayedOptions = append([]string{}, p.fuzzySearchMatches[:maxHeight]...)
+ p.displayedOptionsStart = 0
+ p.displayedOptionsEnd = maxHeight
+
+ // Get index of default option
+ if p.DefaultOption != "" {
+ for i, option := range p.Options {
+ if option == p.DefaultOption {
+ p.selectedOption = i
+ if i > 0 && len(p.Options) > maxHeight {
+ p.displayedOptionsEnd = int(math.Min(float64(i-1+maxHeight), float64(len(p.Options))))
+ p.displayedOptionsStart = p.displayedOptionsEnd - maxHeight
+ } else {
+ p.displayedOptionsStart = 0
+ p.displayedOptionsEnd = maxHeight
+ }
+ p.displayedOptions = p.Options[p.displayedOptionsStart:p.displayedOptionsEnd]
+ break
+ }
+ }
+ }
+
+ area, err := DefaultArea.Start(p.renderSelectMenu())
+ defer area.Stop()
+ if err != nil {
+ return "", fmt.Errorf("could not start area: %w", err)
+ }
+
+ area.Update(p.renderSelectMenu())
+
+ cursor.Hide()
+ defer cursor.Show()
+
+ err = keyboard.Listen(func(keyInfo keys.Key) (stop bool, err error) {
+ key := keyInfo.Code
+
+ if p.MaxHeight > len(p.fuzzySearchMatches) {
+ maxHeight = len(p.fuzzySearchMatches)
+ } else {
+ maxHeight = p.MaxHeight
+ }
+
+ switch key {
+ case keys.RuneKey:
+ if p.Filter {
+ // Fuzzy search for options
+ // append to fuzzy search string
+ p.fuzzySearchString += keyInfo.String()
+ p.selectedOption = 0
+ p.displayedOptionsStart = 0
+ p.displayedOptionsEnd = maxHeight
+ p.displayedOptions = append([]string{}, p.fuzzySearchMatches[:maxHeight]...)
+ area.Update(p.renderSelectMenu())
+ }
+ case keys.Space:
+ p.fuzzySearchString += " "
+ p.selectedOption = 0
+ area.Update(p.renderSelectMenu())
+ case keys.Backspace:
+ // Remove last character from fuzzy search string
+ if p.fuzzySearchString != "" {
+ // Handle UTF-8 characters
+ p.fuzzySearchString = string([]rune(p.fuzzySearchString)[:len([]rune(p.fuzzySearchString))-1])
+ }
+
+ if p.fuzzySearchString == "" {
+ p.fuzzySearchMatches = append([]string{}, p.Options...)
+ }
+
+ p.renderSelectMenu()
+
+ if len(p.fuzzySearchMatches) > p.MaxHeight {
+ maxHeight = p.MaxHeight
+ } else {
+ maxHeight = len(p.fuzzySearchMatches)
+ }
+
+ p.selectedOption = 0
+ p.displayedOptionsStart = 0
+ p.displayedOptionsEnd = maxHeight
+ p.displayedOptions = append([]string{}, p.fuzzySearchMatches[p.displayedOptionsStart:p.displayedOptionsEnd]...)
+
+ area.Update(p.renderSelectMenu())
+ case keys.Up, keys.CtrlP:
+ if len(p.fuzzySearchMatches) == 0 {
+ return false, nil
+ }
+ if p.selectedOption > 0 {
+ p.selectedOption--
+ if p.selectedOption < p.displayedOptionsStart {
+ p.displayedOptionsStart--
+ p.displayedOptionsEnd--
+ if p.displayedOptionsStart < 0 {
+ p.displayedOptionsStart = 0
+ p.displayedOptionsEnd = maxHeight
+ }
+ p.displayedOptions = append([]string{}, p.fuzzySearchMatches[p.displayedOptionsStart:p.displayedOptionsEnd]...)
+ }
+ } else {
+ p.selectedOption = len(p.fuzzySearchMatches) - 1
+ p.displayedOptionsStart = len(p.fuzzySearchMatches) - maxHeight
+ p.displayedOptionsEnd = len(p.fuzzySearchMatches)
+ p.displayedOptions = append([]string{}, p.fuzzySearchMatches[p.displayedOptionsStart:p.displayedOptionsEnd]...)
+ }
+
+ area.Update(p.renderSelectMenu())
+ case keys.Down, keys.CtrlN:
+ if len(p.fuzzySearchMatches) == 0 {
+ return false, nil
+ }
+ p.displayedOptions = p.fuzzySearchMatches[:maxHeight]
+ if p.selectedOption < len(p.fuzzySearchMatches)-1 {
+ p.selectedOption++
+ if p.selectedOption >= p.displayedOptionsEnd {
+ p.displayedOptionsStart++
+ p.displayedOptionsEnd++
+ p.displayedOptions = append([]string{}, p.fuzzySearchMatches[p.displayedOptionsStart:p.displayedOptionsEnd]...)
+ }
+ } else {
+ p.selectedOption = 0
+ p.displayedOptionsStart = 0
+ p.displayedOptionsEnd = maxHeight
+ p.displayedOptions = append([]string{}, p.fuzzySearchMatches[p.displayedOptionsStart:p.displayedOptionsEnd]...)
+ }
+
+ area.Update(p.renderSelectMenu())
+ case keys.CtrlC:
+ cancel()
+ return true, nil
+ case keys.Enter:
+ if len(p.fuzzySearchMatches) == 0 {
+ return false, nil
+ }
+ area.Update(p.renderFinishedMenu())
+ return true, nil
+ }
+
+ return false, nil
+ })
+ if err != nil {
+ Error.Println(err)
+ return "", fmt.Errorf("failed to start keyboard listener: %w", err)
+ }
+
+ return p.result, nil
+}
+
+func (p *InteractiveSelectPrinter) renderSelectMenu() string {
+ var content strings.Builder
+ if p.Filter {
+ content.WriteString(Sprintf("%s %s: %s\n", p.text, p.SelectorStyle.Sprint("[type to search]"), p.fuzzySearchString))
+ } else {
+ content.WriteString(Sprintf("%s:\n", p.text))
+ }
+
+ // find options that match fuzzy search string
+ rankedResults := fuzzy.RankFindFold(p.fuzzySearchString, p.Options)
+ // map rankedResults to fuzzySearchMatches
+ p.fuzzySearchMatches = []string{}
+ if len(rankedResults) != len(p.Options) {
+ sort.Sort(rankedResults)
+ }
+ for _, result := range rankedResults {
+ p.fuzzySearchMatches = append(p.fuzzySearchMatches, result.Target)
+ }
+
+ if len(p.fuzzySearchMatches) != 0 {
+ p.result = p.fuzzySearchMatches[p.selectedOption]
+ }
+
+ indexMapper := make([]string, len(p.fuzzySearchMatches))
+ for i := 0; i < len(p.fuzzySearchMatches); i++ {
+ // if in displayed options range
+ if i >= p.displayedOptionsStart && i < p.displayedOptionsEnd {
+ indexMapper[i] = p.fuzzySearchMatches[i]
+ }
+ }
+
+ for i, option := range indexMapper {
+ if option == "" {
+ continue
+ }
+ if i == p.selectedOption {
+ content.WriteString(Sprintf("%s %s\n", p.renderSelector(), p.OptionStyle.Sprint(option)))
+ } else {
+ content.WriteString(Sprintf(" %s\n", p.OptionStyle.Sprint(option)))
+ }
+ }
+
+ return content.String()
+}
+
+func (p InteractiveSelectPrinter) renderFinishedMenu() string {
+ var content string
+ content += Sprintf("%s: %s\n", p.text, p.fuzzySearchString)
+ content += Sprintf(" %s %s\n", p.renderSelector(), p.result)
+
+ return content
+}
+
+func (p InteractiveSelectPrinter) renderSelector() string {
+ return p.SelectorStyle.Sprint(p.Selector)
+}
diff --git a/vendor/github.com/pterm/pterm/interactive_textinput_printer.go b/vendor/github.com/pterm/pterm/interactive_textinput_printer.go
new file mode 100644
index 0000000..f649cf8
--- /dev/null
+++ b/vendor/github.com/pterm/pterm/interactive_textinput_printer.go
@@ -0,0 +1,306 @@
+package pterm
+
+import (
+ "strings"
+
+ "atomicgo.dev/cursor"
+ "atomicgo.dev/keyboard"
+ "atomicgo.dev/keyboard/keys"
+ "github.com/mattn/go-runewidth"
+
+ "github.com/pterm/pterm/internal"
+)
+
+// DefaultInteractiveTextInput is the default InteractiveTextInput printer.
+var DefaultInteractiveTextInput = InteractiveTextInputPrinter{
+ DefaultText: "Input text",
+ Delimiter: ": ",
+ TextStyle: &ThemeDefault.PrimaryStyle,
+ Mask: "",
+}
+
+// InteractiveTextInputPrinter is a printer for interactive select menus.
+type InteractiveTextInputPrinter struct {
+ TextStyle *Style
+ DefaultText string
+ DefaultValue string
+ Delimiter string
+ MultiLine bool
+ Mask string
+ OnInterruptFunc func()
+
+ input []string
+ cursorXPos int
+ cursorYPos int
+ text string
+ startedTyping bool
+ valueStyle *Style
+}
+
+// WithDefaultText sets the default text.
+func (p InteractiveTextInputPrinter) WithDefaultText(text string) *InteractiveTextInputPrinter {
+ p.DefaultText = text
+ return &p
+}
+
+// WithDefaultValue sets the default value.
+func (p InteractiveTextInputPrinter) WithDefaultValue(value string) *InteractiveTextInputPrinter {
+ p.DefaultValue = value
+ return &p
+}
+
+// WithTextStyle sets the text style.
+func (p InteractiveTextInputPrinter) WithTextStyle(style *Style) *InteractiveTextInputPrinter {
+ p.TextStyle = style
+ return &p
+}
+
+// WithMultiLine sets the multi line flag.
+func (p InteractiveTextInputPrinter) WithMultiLine(multiLine ...bool) *InteractiveTextInputPrinter {
+ p.MultiLine = internal.WithBoolean(multiLine)
+ return &p
+}
+
+// WithMask sets the mask.
+func (p InteractiveTextInputPrinter) WithMask(mask string) *InteractiveTextInputPrinter {
+ p.Mask = mask
+ return &p
+}
+
+// WithOnInterruptFunc sets the function to execute on exit of the input reader
+func (p InteractiveTextInputPrinter) WithOnInterruptFunc(exitFunc func()) *InteractiveTextInputPrinter {
+ p.OnInterruptFunc = exitFunc
+ return &p
+}
+
+// WithDelimiter sets the delimiter between the message and the input.
+func (p InteractiveTextInputPrinter) WithDelimiter(delimiter string) *InteractiveTextInputPrinter {
+ p.Delimiter = delimiter
+ return &p
+}
+
+// Show shows the interactive select menu and returns the selected entry.
+func (p InteractiveTextInputPrinter) Show(text ...string) (string, error) {
+ // should be the first defer statement to make sure it is executed last
+ // and all the needed cleanup can be done before
+ cancel, exit := internal.NewCancelationSignal(p.OnInterruptFunc)
+ defer exit()
+
+ var areaText string
+
+ if len(text) == 0 || text[0] == "" {
+ text = []string{p.DefaultText}
+ }
+
+ if p.MultiLine {
+ areaText = p.TextStyle.Sprintfln("%s %s %s", text[0], ThemeDefault.SecondaryStyle.Sprint("[Press tab to submit]"), p.Delimiter)
+ } else {
+ areaText = p.TextStyle.Sprintf("%s%s", text[0], p.Delimiter)
+ }
+
+ p.text = areaText
+ area := cursor.NewArea()
+ area.Update(areaText)
+ area.StartOfLine()
+
+ if !p.MultiLine {
+ cursor.Right(runewidth.StringWidth(RemoveColorFromString(areaText)))
+ }
+
+ if p.DefaultValue != "" {
+ p.input = append(p.input, p.DefaultValue)
+ p.updateArea(&area)
+ }
+
+ err := keyboard.Listen(func(key keys.Key) (stop bool, err error) {
+ if !p.MultiLine {
+ p.cursorYPos = 0
+ }
+ if len(p.input) == 0 {
+ p.input = append(p.input, "")
+ }
+
+ switch key.Code {
+ case keys.Tab:
+ if p.MultiLine {
+ area.Bottom()
+ return true, nil
+ }
+ case keys.Enter:
+ if p.DefaultValue != "" && !p.startedTyping {
+ for i := range p.input {
+ p.input[i] = RemoveColorFromString(p.input[i])
+ }
+
+ if p.MultiLine {
+ area.Bottom()
+ }
+ return true, nil
+ }
+
+ if p.MultiLine {
+ if key.AltPressed {
+ p.cursorXPos = 0
+ }
+ appendAfterY := append([]string{}, p.input[p.cursorYPos+1:]...)
+ appendAfterX := string(append([]rune{}, []rune(p.input[p.cursorYPos])[len([]rune(p.input[p.cursorYPos]))+p.cursorXPos:]...))
+ p.input[p.cursorYPos] = string(append([]rune{}, []rune(p.input[p.cursorYPos])[:len([]rune(p.input[p.cursorYPos]))+p.cursorXPos]...))
+ p.input = append(p.input[:p.cursorYPos+1], appendAfterX)
+ p.input = append(p.input, appendAfterY...)
+ p.cursorYPos++
+ p.cursorXPos = -internal.GetStringMaxWidth(p.input[p.cursorYPos])
+ cursor.StartOfLine()
+ } else {
+ return true, nil
+ }
+ case keys.RuneKey:
+ if !p.startedTyping {
+ p.startedTyping = true
+ }
+ p.input[p.cursorYPos] = string(append([]rune(p.input[p.cursorYPos])[:len([]rune(p.input[p.cursorYPos]))+p.cursorXPos], append([]rune(key.String()), []rune(p.input[p.cursorYPos])[len([]rune(p.input[p.cursorYPos]))+p.cursorXPos:]...)...))
+ case keys.Space:
+ if !p.startedTyping {
+ p.startedTyping = true
+ }
+ p.input[p.cursorYPos] = string(append([]rune(p.input[p.cursorYPos])[:len([]rune(p.input[p.cursorYPos]))+p.cursorXPos], append([]rune(" "), []rune(p.input[p.cursorYPos])[len([]rune(p.input[p.cursorYPos]))+p.cursorXPos:]...)...))
+ case keys.Backspace:
+ if !p.startedTyping {
+ p.startedTyping = true
+ }
+ if len([]rune(p.input[p.cursorYPos]))+p.cursorXPos > 0 {
+ p.input[p.cursorYPos] = string(append([]rune(p.input[p.cursorYPos])[:len([]rune(p.input[p.cursorYPos]))-1+p.cursorXPos], []rune(p.input[p.cursorYPos])[len([]rune(p.input[p.cursorYPos]))+p.cursorXPos:]...))
+ } else if p.cursorYPos > 0 {
+ p.input[p.cursorYPos-1] += p.input[p.cursorYPos]
+ appendAfterY := append([]string{}, p.input[p.cursorYPos+1:]...)
+ p.input = append(p.input[:p.cursorYPos], appendAfterY...)
+ p.cursorXPos = 0
+ p.cursorYPos--
+ }
+ case keys.Delete:
+ if !p.startedTyping {
+ p.input = []string{""}
+ p.startedTyping = true
+ return false, nil
+ }
+ if len([]rune(p.input[p.cursorYPos]))+p.cursorXPos < len([]rune(p.input[p.cursorYPos])) {
+ p.input[p.cursorYPos] = string(append([]rune(p.input[p.cursorYPos])[:len([]rune(p.input[p.cursorYPos]))+p.cursorXPos], []rune(p.input[p.cursorYPos])[len([]rune(p.input[p.cursorYPos]))+p.cursorXPos+1:]...))
+ p.cursorXPos++
+ } else if p.cursorYPos < len(p.input)-1 {
+ p.input[p.cursorYPos] += p.input[p.cursorYPos+1]
+ appendAfterY := append([]string{}, p.input[p.cursorYPos+2:]...)
+ p.input = append(p.input[:p.cursorYPos+1], appendAfterY...)
+ p.cursorXPos = 0
+ }
+ case keys.CtrlC:
+ cancel()
+ return true, nil
+ case keys.Down:
+ if !p.MultiLine {
+ return false, nil
+ }
+ if !p.startedTyping {
+ p.input = []string{""}
+ p.startedTyping = true
+ }
+ if p.cursorYPos+1 < len(p.input) {
+ p.cursorXPos = (internal.GetStringMaxWidth(p.input[p.cursorYPos]) + p.cursorXPos) - internal.GetStringMaxWidth(p.input[p.cursorYPos+1])
+ if p.cursorXPos > 0 {
+ p.cursorXPos = 0
+ }
+ p.cursorYPos++
+ }
+ case keys.Up:
+ if !p.MultiLine {
+ return false, nil
+ }
+ if !p.startedTyping {
+ p.input = []string{""}
+ p.startedTyping = true
+ }
+ if p.cursorYPos > 0 {
+ p.cursorXPos = (internal.GetStringMaxWidth(p.input[p.cursorYPos]) + p.cursorXPos) - internal.GetStringMaxWidth(p.input[p.cursorYPos-1])
+ if p.cursorXPos > 0 {
+ p.cursorXPos = 0
+ }
+ p.cursorYPos--
+ }
+ }
+
+ if internal.GetStringMaxWidth(p.input[p.cursorYPos]) > 0 {
+ switch key.Code {
+ case keys.Right:
+ if p.cursorXPos < 0 {
+ p.cursorXPos++
+ } else if p.cursorYPos < len(p.input)-1 {
+ p.cursorYPos++
+ p.cursorXPos = -internal.GetStringMaxWidth(p.input[p.cursorYPos])
+ }
+ case keys.Left:
+ if p.cursorXPos+internal.GetStringMaxWidth(p.input[p.cursorYPos]) > 0 {
+ p.cursorXPos--
+ } else if p.cursorYPos > 0 {
+ p.cursorYPos--
+ p.cursorXPos = 0
+ }
+ }
+ }
+
+ p.updateArea(&area)
+
+ return false, nil
+ })
+ if err != nil {
+ return "", err
+ }
+
+ // Add new line
+ Println()
+
+ for i, s := range p.input {
+ if i < len(p.input)-1 {
+ areaText += s + "\n"
+ } else {
+ areaText += s
+ }
+ }
+
+ if !p.startedTyping {
+ return p.DefaultValue, nil
+ }
+
+ return strings.ReplaceAll(areaText, p.text, ""), nil
+}
+
+func (p InteractiveTextInputPrinter) updateArea(area *cursor.Area) string {
+ if !p.MultiLine {
+ p.cursorYPos = 0
+ }
+ areaText := p.text
+
+ for i, s := range p.input {
+ if i < len(p.input)-1 {
+ areaText += s + "\n"
+ } else {
+ areaText += s
+ }
+ }
+
+ if p.Mask != "" {
+ areaText = p.text + strings.Repeat(p.Mask, internal.GetStringMaxWidth(areaText)-internal.GetStringMaxWidth(p.text))
+ }
+
+ if p.cursorXPos+internal.GetStringMaxWidth(p.input[p.cursorYPos]) < 1 {
+ p.cursorXPos = -internal.GetStringMaxWidth(p.input[p.cursorYPos])
+ }
+
+ area.Update(Gray(areaText))
+ area.Top()
+ area.Down(p.cursorYPos + 1)
+ area.StartOfLine()
+ if p.MultiLine {
+ cursor.Right(internal.GetStringMaxWidth(p.input[p.cursorYPos]) + p.cursorXPos)
+ } else {
+ cursor.Right(internal.GetStringMaxWidth(areaText) + p.cursorXPos)
+ }
+ return areaText
+}
diff --git a/vendor/github.com/pterm/pterm/interface_live_printer.go b/vendor/github.com/pterm/pterm/interface_live_printer.go
new file mode 100644
index 0000000..69dce34
--- /dev/null
+++ b/vendor/github.com/pterm/pterm/interface_live_printer.go
@@ -0,0 +1,18 @@
+package pterm
+
+import "io"
+
+// LivePrinter is a printer which can update it's output live.
+type LivePrinter interface {
+ // GenericStart runs Start, but returns a LivePrinter.
+ // This is used for the interface LivePrinter.
+ // You most likely want to use Start instead of this in your program.
+ GenericStart() (*LivePrinter, error)
+
+ // GenericStop runs Stop, but returns a LivePrinter.
+ // This is used for the interface LivePrinter.
+ // You most likely want to use Stop instead of this in your program.
+ GenericStop() (*LivePrinter, error)
+
+ SetWriter(writer io.Writer)
+}
diff --git a/vendor/github.com/pterm/pterm/interface_renderable_printer.go b/vendor/github.com/pterm/pterm/interface_renderable_printer.go
new file mode 100644
index 0000000..d2089b9
--- /dev/null
+++ b/vendor/github.com/pterm/pterm/interface_renderable_printer.go
@@ -0,0 +1,11 @@
+package pterm
+
+// RenderPrinter is used to display renderable content.
+// Example for renderable content is a Table.
+type RenderPrinter interface {
+ // Render the XXX to the terminal.
+ Render() error
+
+ // Srender returns the rendered string of XXX.
+ Srender() (string, error)
+}
diff --git a/vendor/github.com/pterm/pterm/interface_text_printer.go b/vendor/github.com/pterm/pterm/interface_text_printer.go
new file mode 100644
index 0000000..4603807
--- /dev/null
+++ b/vendor/github.com/pterm/pterm/interface_text_printer.go
@@ -0,0 +1,48 @@
+package pterm
+
+// TextPrinter contains methods to print formatted text to the console or return it as a string.
+type TextPrinter interface {
+ // Sprint formats using the default formats for its operands and returns the resulting string.
+ // Spaces are added between operands when neither is a string.
+ Sprint(a ...any) string
+
+ // Sprintln formats using the default formats for its operands and returns the resulting string.
+ // Spaces are always added between operands and a newline is appended.
+ Sprintln(a ...any) string
+
+ // Sprintf formats according to a format specifier and returns the resulting string.
+ Sprintf(format string, a ...any) string
+
+ // Sprintfln formats according to a format specifier and returns the resulting string.
+ // Spaces are always added between operands and a newline is appended.
+ Sprintfln(format string, a ...any) string
+
+ // Print formats using the default formats for its operands and writes to standard output.
+ // Spaces are added between operands when neither is a string.
+ // It returns the number of bytes written and any write error encountered.
+ Print(a ...any) *TextPrinter
+
+ // Println formats using the default formats for its operands and writes to standard output.
+ // Spaces are always added between operands and a newline is appended.
+ // It returns the number of bytes written and any write error encountered.
+ Println(a ...any) *TextPrinter
+
+ // Printf formats according to a format specifier and writes to standard output.
+ // It returns the number of bytes written and any write error encountered.
+ Printf(format string, a ...any) *TextPrinter
+
+ // Printfln formats according to a format specifier and writes to standard output.
+ // Spaces are always added between operands and a newline is appended.
+ // It returns the number of bytes written and any write error encountered.
+ Printfln(format string, a ...any) *TextPrinter
+
+ // PrintOnError prints every error which is not nil.
+ // If every error is nil, nothing will be printed.
+ // This can be used for simple error checking.
+ PrintOnError(a ...any) *TextPrinter
+
+ // PrintOnErrorf wraps every error which is not nil and prints it.
+ // If every error is nil, nothing will be printed.
+ // This can be used for simple error checking.
+ PrintOnErrorf(format string, a ...any) *TextPrinter
+}
diff --git a/vendor/github.com/pterm/pterm/internal/cancelation_signal.go b/vendor/github.com/pterm/pterm/internal/cancelation_signal.go
new file mode 100644
index 0000000..33f17cc
--- /dev/null
+++ b/vendor/github.com/pterm/pterm/internal/cancelation_signal.go
@@ -0,0 +1,22 @@
+package internal
+
+// NewCancelationSignal for keeping track of a cancelation
+func NewCancelationSignal(interruptFunc func()) (func(), func()) {
+ canceled := false
+
+ cancel := func() {
+ canceled = true
+ }
+
+ exit := func() {
+ if canceled {
+ if interruptFunc != nil {
+ interruptFunc()
+ } else {
+ Exit(1)
+ }
+ }
+ }
+
+ return cancel, exit
+}
diff --git a/vendor/github.com/pterm/pterm/internal/center_text.go b/vendor/github.com/pterm/pterm/internal/center_text.go
new file mode 100644
index 0000000..658224a
--- /dev/null
+++ b/vendor/github.com/pterm/pterm/internal/center_text.go
@@ -0,0 +1,46 @@
+package internal
+
+import (
+ "strings"
+
+ "github.com/gookit/color"
+)
+
+// CenterText returns a centered string with a padding left and right
+// If width is 0, it will be calculated automatically
+func CenterText(text string, width int) string {
+ var lines []string
+ if width == 0 {
+ width = GetStringMaxWidth(text)
+ }
+ linesTmp := strings.Split(text, "\n")
+ for _, line := range linesTmp {
+ if len(color.ClearCode(line)) > width {
+ extraLines := []string{""}
+ extraLinesCounter := 0
+ for i, letter := range line {
+ if i%width == 0 && i != 0 {
+ extraLinesCounter++
+ extraLines = append(extraLines, "")
+ }
+ extraLines[extraLinesCounter] += string(letter)
+ }
+ for _, extraLine := range extraLines {
+ padding := width - len(color.ClearCode(extraLine))
+ extraLine = strings.Repeat(" ", padding/2) + extraLine + strings.Repeat(" ", padding/2) + "\n"
+ lines = append(lines, extraLine)
+ }
+ } else {
+ padding := width - len(color.ClearCode(line))
+ line = strings.Repeat(" ", padding/2) + line + strings.Repeat(" ", padding/2) + "\n"
+ lines = append(lines, line)
+ }
+ }
+
+ var line string
+ for _, s := range lines {
+ line += s
+ }
+
+ return strings.TrimSuffix(line, "\n")
+}
diff --git a/vendor/github.com/pterm/pterm/internal/collection.go b/vendor/github.com/pterm/pterm/internal/collection.go
new file mode 100644
index 0000000..b19f0ea
--- /dev/null
+++ b/vendor/github.com/pterm/pterm/internal/collection.go
@@ -0,0 +1,7 @@
+package internal
+
+// RandomStrings contains a list of random strings to use while testing.
+var RandomStrings = []string{
+ "hello world", "²³14234!`§=)$-.€@_&", "This is a sentence.", "This\nstring\nhas\nmultiple\nlines",
+ "windows\r\nline\r\nendings", "\rtext",
+}
diff --git a/vendor/github.com/pterm/pterm/internal/exit.go b/vendor/github.com/pterm/pterm/internal/exit.go
new file mode 100644
index 0000000..f07a004
--- /dev/null
+++ b/vendor/github.com/pterm/pterm/internal/exit.go
@@ -0,0 +1,14 @@
+package internal
+
+import "os"
+
+// ExitFuncType is the type of function used to exit the program.
+type ExitFuncType func(int)
+
+// DefaultExitFunc is the default function used to exit the program.
+var DefaultExitFunc ExitFuncType = os.Exit
+
+// Exit calls the current exit function.
+func Exit(code int) {
+ DefaultExitFunc(code)
+}
diff --git a/vendor/github.com/pterm/pterm/internal/longest_line.go b/vendor/github.com/pterm/pterm/internal/longest_line.go
new file mode 100644
index 0000000..a00e486
--- /dev/null
+++ b/vendor/github.com/pterm/pterm/internal/longest_line.go
@@ -0,0 +1,21 @@
+package internal
+
+import (
+ "strings"
+
+ "github.com/gookit/color"
+ "github.com/mattn/go-runewidth"
+)
+
+// ReturnLongestLine returns the longest line with a given separator
+func ReturnLongestLine(text, sep string) string {
+ lines := strings.Split(text, sep)
+ var longest string
+ for _, line := range lines {
+ if runewidth.StringWidth(color.ClearCode(line)) > runewidth.StringWidth(color.ClearCode(longest)) {
+ longest = line
+ }
+ }
+
+ return longest
+}
diff --git a/vendor/github.com/pterm/pterm/internal/map_range_to_range.go b/vendor/github.com/pterm/pterm/internal/map_range_to_range.go
new file mode 100644
index 0000000..4673b11
--- /dev/null
+++ b/vendor/github.com/pterm/pterm/internal/map_range_to_range.go
@@ -0,0 +1,8 @@
+package internal
+
+func MapRangeToRange(fromMin, fromMax, toMin, toMax, current float32) int {
+ if fromMax-fromMin == 0 {
+ return 0
+ }
+ return int(toMin + ((toMax-toMin)/(fromMax-fromMin))*(current-fromMin))
+}
diff --git a/vendor/github.com/pterm/pterm/internal/max_text_width.go b/vendor/github.com/pterm/pterm/internal/max_text_width.go
new file mode 100644
index 0000000..9612fcf
--- /dev/null
+++ b/vendor/github.com/pterm/pterm/internal/max_text_width.go
@@ -0,0 +1,21 @@
+package internal
+
+import (
+ "strings"
+
+ "github.com/gookit/color"
+ "github.com/mattn/go-runewidth"
+)
+
+// GetStringMaxWidth returns the maximum width of a string with multiple lines.
+func GetStringMaxWidth(s string) int {
+ var maxString int
+ ss := strings.Split(s, "\n")
+ for _, s2 := range ss {
+ s2WithoutColor := color.ClearCode(s2)
+ if runewidth.StringWidth(s2WithoutColor) > maxString {
+ maxString = runewidth.StringWidth(s2WithoutColor)
+ }
+ }
+ return maxString
+}
diff --git a/vendor/github.com/pterm/pterm/internal/percentage.go b/vendor/github.com/pterm/pterm/internal/percentage.go
new file mode 100644
index 0000000..ce61abc
--- /dev/null
+++ b/vendor/github.com/pterm/pterm/internal/percentage.go
@@ -0,0 +1,13 @@
+package internal
+
+import "math"
+
+// Percentage calculates percentage.
+func Percentage(total, current float64) float64 {
+ return (current / total) * 100
+}
+
+// PercentageRound returns a rounded Percentage.
+func PercentageRound(total, current float64) float64 {
+ return math.Round(Percentage(total, current))
+}
diff --git a/vendor/github.com/pterm/pterm/internal/remove_and_count_prefix.go b/vendor/github.com/pterm/pterm/internal/remove_and_count_prefix.go
new file mode 100644
index 0000000..5c77031
--- /dev/null
+++ b/vendor/github.com/pterm/pterm/internal/remove_and_count_prefix.go
@@ -0,0 +1,11 @@
+package internal
+
+import (
+ "strings"
+)
+
+func RemoveAndCountPrefix(input, subString string) (string, int) {
+ inputLength := len(input)
+ input = strings.TrimLeft(input, subString)
+ return input, inputLength - len(input)
+}
diff --git a/vendor/github.com/pterm/pterm/internal/rgb_complementary.go b/vendor/github.com/pterm/pterm/internal/rgb_complementary.go
new file mode 100644
index 0000000..71520b3
--- /dev/null
+++ b/vendor/github.com/pterm/pterm/internal/rgb_complementary.go
@@ -0,0 +1,5 @@
+package internal
+
+func Complementary(r, g, b uint8) (uint8, uint8, uint8) {
+ return 255 - r, 255 - g, 255 - b
+}
diff --git a/vendor/github.com/pterm/pterm/internal/title_in_line.go b/vendor/github.com/pterm/pterm/internal/title_in_line.go
new file mode 100644
index 0000000..68e28ec
--- /dev/null
+++ b/vendor/github.com/pterm/pterm/internal/title_in_line.go
@@ -0,0 +1,28 @@
+package internal
+
+import (
+ "strings"
+)
+
+// AddTitleToLine adds a title to a site of a line ex: "─ This is the title ──────"
+func AddTitleToLine(title, line string, length int, left bool) string {
+ var ret string
+ if left {
+ ret += line + " " + title + " " + line + strings.Repeat(line, length-(4+GetStringMaxWidth(title)))
+ } else {
+ ret += strings.Repeat(line, length-(4+GetStringMaxWidth(title))) + line + " " + title + " " + line
+ }
+
+ return ret
+}
+
+// AddTitleToLineCenter adds a title to the center of a line ex: "─ This is the title ──────"
+func AddTitleToLineCenter(title, line string, length int) string {
+ var ret string
+ repeatString := length - (4 + GetStringMaxWidth(title))
+ unevenRepeatString := repeatString % 2
+
+ ret += strings.Repeat(line, repeatString/2) + line + " " + title + " " + line + strings.Repeat(line, repeatString/2+unevenRepeatString)
+
+ return ret
+}
diff --git a/vendor/github.com/pterm/pterm/internal/utils.go b/vendor/github.com/pterm/pterm/internal/utils.go
new file mode 100644
index 0000000..ca74cfd
--- /dev/null
+++ b/vendor/github.com/pterm/pterm/internal/utils.go
@@ -0,0 +1,8 @@
+package internal
+
+import "os"
+
+// RunsInCi returns true if the current build is running on a CI server.
+func RunsInCi() bool {
+ return os.Getenv("CI") != ""
+}
diff --git a/vendor/github.com/pterm/pterm/internal/with_boolean.go b/vendor/github.com/pterm/pterm/internal/with_boolean.go
new file mode 100644
index 0000000..4dfdb86
--- /dev/null
+++ b/vendor/github.com/pterm/pterm/internal/with_boolean.go
@@ -0,0 +1,9 @@
+package internal
+
+// WithBoolean helps an option setter (WithXXX(b ...bool) to return true, if no boolean is set, but false if it's explicitly set to false.
+func WithBoolean(b []bool) bool {
+ if len(b) == 0 {
+ b = append(b, true)
+ }
+ return b[0]
+}
diff --git a/vendor/github.com/pterm/pterm/logger.go b/vendor/github.com/pterm/pterm/logger.go
new file mode 100644
index 0000000..407f4d5
--- /dev/null
+++ b/vendor/github.com/pterm/pterm/logger.go
@@ -0,0 +1,447 @@
+package pterm
+
+import (
+ "encoding/json"
+ "io"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/pterm/pterm/internal"
+)
+
+type LogLevel int
+
+// Style returns the style of the log level.
+func (l LogLevel) Style() Style {
+ baseStyle := NewStyle(Bold)
+ switch l {
+ case LogLevelTrace:
+ return baseStyle.Add(*FgCyan.ToStyle())
+ case LogLevelDebug:
+ return baseStyle.Add(*FgBlue.ToStyle())
+ case LogLevelInfo:
+ return baseStyle.Add(*FgGreen.ToStyle())
+ case LogLevelWarn:
+ return baseStyle.Add(*FgYellow.ToStyle())
+ case LogLevelError:
+ return baseStyle.Add(*FgRed.ToStyle())
+ case LogLevelFatal:
+ return baseStyle.Add(*FgRed.ToStyle())
+ case LogLevelPrint:
+ return baseStyle.Add(*FgWhite.ToStyle())
+ }
+
+ return baseStyle.Add(*FgWhite.ToStyle())
+}
+
+func (l LogLevel) String() string {
+ switch l {
+ case LogLevelDisabled:
+ return ""
+ case LogLevelTrace:
+ return "TRACE"
+ case LogLevelDebug:
+ return "DEBUG"
+ case LogLevelInfo:
+ return "INFO"
+ case LogLevelWarn:
+ return "WARN"
+ case LogLevelError:
+ return "ERROR"
+ case LogLevelFatal:
+ return "FATAL"
+ case LogLevelPrint:
+ return "PRINT"
+ }
+ return "Unknown"
+}
+
+const (
+ // LogLevelDisabled does never print.
+ LogLevelDisabled LogLevel = iota
+ // LogLevelTrace is the log level for traces.
+ LogLevelTrace
+ // LogLevelDebug is the log level for debug.
+ LogLevelDebug
+ // LogLevelInfo is the log level for info.
+ LogLevelInfo
+ // LogLevelWarn is the log level for warnings.
+ LogLevelWarn
+ // LogLevelError is the log level for errors.
+ LogLevelError
+ // LogLevelFatal is the log level for fatal errors.
+ LogLevelFatal
+ // LogLevelPrint is the log level for printing.
+ LogLevelPrint
+)
+
+// LogFormatter is the log formatter.
+// Can be either LogFormatterColorful or LogFormatterJSON.
+type LogFormatter int
+
+const (
+ // LogFormatterColorful is a colorful log formatter.
+ LogFormatterColorful LogFormatter = iota
+ // LogFormatterJSON is a JSON log formatter.
+ LogFormatterJSON
+)
+
+// DefaultLogger is the default logger.
+var DefaultLogger = Logger{
+ Formatter: LogFormatterColorful,
+ Writer: os.Stdout,
+ Level: LogLevelInfo,
+ ShowTime: true,
+ TimeFormat: "2006-01-02 15:04:05",
+ MaxWidth: 80,
+ KeyStyles: map[string]Style{
+ "error": *NewStyle(FgRed, Bold),
+ "err": *NewStyle(FgRed, Bold),
+ "caller": *NewStyle(FgGray, Bold),
+ },
+}
+
+// loggerMutex syncs all loggers, so that they don't print at the exact same time.
+var loggerMutex sync.Mutex
+
+type Logger struct {
+ // Formatter is the log formatter of the logger.
+ Formatter LogFormatter
+ // Writer is the writer of the logger.
+ Writer io.Writer
+ // Level is the log level of the logger.
+ Level LogLevel
+ // ShowCaller defines if the logger should print the caller.
+ ShowCaller bool
+ // CallerOffset defines the offset of the caller.
+ CallerOffset int
+ // ShowTime defines if the logger should print a timestamp.
+ ShowTime bool
+ // TimestampLayout defines the layout of the timestamp.
+ TimeFormat string
+ // KeyStyles defines the styles for specific keys.
+ KeyStyles map[string]Style
+ // MaxWidth defines the maximum width of the logger.
+ // If the text (including the arguments) is longer than the max width, it will be split into multiple lines.
+ MaxWidth int
+}
+
+// WithFormatter sets the log formatter of the logger.
+func (l Logger) WithFormatter(formatter LogFormatter) *Logger {
+ l.Formatter = formatter
+ return &l
+}
+
+// WithWriter sets the writer of the logger.
+func (l Logger) WithWriter(writer io.Writer) *Logger {
+ l.Writer = writer
+ return &l
+}
+
+// WithLevel sets the log level of the logger.
+func (l Logger) WithLevel(level LogLevel) *Logger {
+ l.Level = level
+ return &l
+}
+
+// WithCaller enables or disables the caller.
+func (l Logger) WithCaller(b ...bool) *Logger {
+ l.ShowCaller = internal.WithBoolean(b)
+ return &l
+}
+
+// WithCallerOffset sets the caller offset.
+func (l Logger) WithCallerOffset(offset int) *Logger {
+ l.CallerOffset = offset
+ return &l
+}
+
+// WithTime enables or disables the timestamp.
+func (l Logger) WithTime(b ...bool) *Logger {
+ l.ShowTime = internal.WithBoolean(b)
+ return &l
+}
+
+// WithTimeFormat sets the timestamp layout.
+func (l Logger) WithTimeFormat(format string) *Logger {
+ l.TimeFormat = format
+ return &l
+}
+
+// WithKeyStyles sets the style for a specific key.
+func (l Logger) WithKeyStyles(styles map[string]Style) *Logger {
+ l.KeyStyles = styles
+ return &l
+}
+
+// WithMaxWidth sets the maximum width of the logger.
+func (l Logger) WithMaxWidth(width int) *Logger {
+ l.MaxWidth = width
+ return &l
+}
+
+// AppendKeyStyles appends a style for a specific key.
+func (l Logger) AppendKeyStyles(styles map[string]Style) *Logger {
+ for k, v := range styles {
+ l.KeyStyles[k] = v
+ }
+ return &l
+}
+
+// AppendKeyStyle appends a style for a specific key.
+func (l Logger) AppendKeyStyle(key string, style Style) *Logger {
+ l.KeyStyles[key] = style
+ return &l
+}
+
+// CanPrint checks if the logger can print a specific log level.
+func (l Logger) CanPrint(level LogLevel) bool {
+ if l.Level == LogLevelDisabled {
+ return false
+ }
+ return l.Level <= level
+}
+
+// Args converts any arguments to a slice of LoggerArgument.
+func (l Logger) Args(args ...any) []LoggerArgument {
+ var loggerArgs []LoggerArgument
+
+ // args are in the format of: key, value, key, value, key, value, ...
+ args = l.sanitizeArgs(args)
+
+ for i := 0; i < len(args); i += 2 {
+ key := Sprint(args[i])
+ value := args[i+1]
+
+ loggerArgs = append(loggerArgs, LoggerArgument{
+ Key: key,
+ Value: value,
+ })
+ }
+
+ return loggerArgs
+}
+
+// ArgsFromMap converts a map to a slice of LoggerArgument.
+func (l Logger) ArgsFromMap(m map[string]any) []LoggerArgument {
+ var loggerArgs []LoggerArgument
+
+ for k, v := range m {
+ loggerArgs = append(loggerArgs, LoggerArgument{
+ Key: k,
+ Value: v,
+ })
+ }
+
+ return loggerArgs
+}
+
+// sanitizeArgs inserts an error message into an args slice if an odd number of arguments is provided.
+func (l Logger) sanitizeArgs(args []any) []any {
+ numArgs := len(args)
+ if numArgs > 0 && numArgs%2 != 0 {
+ if numArgs > 1 {
+ lastArg := args[numArgs-1]
+ args = append(args[:numArgs-1], []any{ErrKeyWithoutValue, lastArg}...)
+ } else {
+ args = []any{ErrKeyWithoutValue, args[0]}
+ }
+ }
+ return args
+}
+
+func (l Logger) getCallerInfo() (path string, line int) {
+ if !l.ShowCaller {
+ return
+ }
+
+ _, path, line, _ = runtime.Caller(l.CallerOffset + 4)
+ _, callerBase, _, _ := runtime.Caller(0)
+ basepath := filepath.Dir(callerBase)
+ basepath = strings.ReplaceAll(basepath, "\\", "/")
+
+ path = strings.TrimPrefix(path, basepath)
+
+ return
+}
+
+func (l Logger) combineArgs(args ...[]LoggerArgument) []LoggerArgument {
+ var result []LoggerArgument
+
+ for _, arg := range args {
+ result = append(result, arg...)
+ }
+
+ return result
+}
+
+func (l Logger) print(level LogLevel, msg string, args []LoggerArgument) {
+ if !l.CanPrint(level) {
+ return
+ }
+
+ var line string
+
+ switch l.Formatter {
+ case LogFormatterColorful:
+ line = l.renderColorful(level, msg, args)
+ case LogFormatterJSON:
+ line = l.renderJSON(level, msg, args)
+ }
+
+ loggerMutex.Lock()
+ defer loggerMutex.Unlock()
+
+ Fprintln(l.Writer, line)
+}
+
+func (l Logger) renderColorful(level LogLevel, msg string, args []LoggerArgument) (result string) {
+ if l.ShowTime {
+ result += Gray(time.Now().Format(l.TimeFormat)) + " "
+ }
+
+ if GetTerminalWidth() > 0 && GetTerminalWidth() < l.MaxWidth {
+ l.MaxWidth = GetTerminalWidth()
+ }
+
+ var argumentsInNewLine bool
+
+ result += level.Style().Sprintf("%-5s", level.String()) + " "
+
+ // if msg is too long, wrap it to multiple lines with the same length
+ remainingWidth := l.MaxWidth - internal.GetStringMaxWidth(result)
+ if internal.GetStringMaxWidth(msg) > remainingWidth {
+ argumentsInNewLine = true
+ msg = DefaultParagraph.WithMaxWidth(remainingWidth).Sprint(msg)
+ padding := len(time.Now().Format(l.TimeFormat) + " ")
+ msg = strings.ReplaceAll(msg, "\n", "\n"+strings.Repeat(" ", padding)+" │ ")
+ }
+
+ result += msg
+
+ if l.ShowCaller {
+ path, line := l.getCallerInfo()
+ args = append(args, LoggerArgument{
+ Key: "caller",
+ Value: FgGray.Sprintf("%s:%d", path, line),
+ })
+ }
+
+ arguments := make([]string, len(args))
+
+ // add arguments
+ if len(args) > 0 {
+ for i, arg := range args {
+ if style, ok := l.KeyStyles[arg.Key]; ok {
+ arguments[i] = style.Sprintf("%s: ", arg.Key)
+ } else {
+ arguments[i] = level.Style().Sprintf("%s: ", arg.Key)
+ }
+
+ arguments[i] += Sprintf("%s", Sprint(arg.Value))
+ }
+ }
+
+ fullLine := result + " " + strings.Join(arguments, " ")
+
+ // if the full line is too long, wrap the arguments to multiple lines
+ if internal.GetStringMaxWidth(fullLine) > l.MaxWidth {
+ argumentsInNewLine = true
+ }
+
+ if !argumentsInNewLine {
+ result = fullLine
+ } else {
+ padding := 4
+ if l.ShowTime {
+ padding = len(time.Time{}.Format(l.TimeFormat)) + 3
+ }
+
+ for i, argument := range arguments {
+ var pipe string
+ if i < len(arguments)-1 {
+ pipe = "├"
+ } else {
+ pipe = "└"
+ }
+ result += "\n" + strings.Repeat(" ", padding) + pipe + " " + argument
+ }
+ }
+
+ return
+}
+
+func (l Logger) renderJSON(level LogLevel, msg string, args []LoggerArgument) string {
+ m := l.argsToMap(args)
+
+ m["level"] = level.String()
+ m["timestamp"] = time.Now().Format(l.TimeFormat)
+ m["msg"] = msg
+
+ if file, line := l.getCallerInfo(); file != "" {
+ m["caller"] = Sprintf("%s:%d", file, line)
+ }
+
+ b, _ := json.Marshal(m)
+ return string(b)
+}
+
+func (l Logger) argsToMap(args []LoggerArgument) map[string]any {
+ m := make(map[string]any)
+
+ for _, arg := range args {
+ m[arg.Key] = arg.Value
+ }
+
+ return m
+}
+
+// Trace prints a trace log.
+func (l Logger) Trace(msg string, args ...[]LoggerArgument) {
+ l.print(LogLevelTrace, msg, l.combineArgs(args...))
+}
+
+// Debug prints a debug log.
+func (l Logger) Debug(msg string, args ...[]LoggerArgument) {
+ l.print(LogLevelDebug, msg, l.combineArgs(args...))
+}
+
+// Info prints an info log.
+func (l Logger) Info(msg string, args ...[]LoggerArgument) {
+ l.print(LogLevelInfo, msg, l.combineArgs(args...))
+}
+
+// Warn prints a warning log.
+func (l Logger) Warn(msg string, args ...[]LoggerArgument) {
+ l.print(LogLevelWarn, msg, l.combineArgs(args...))
+}
+
+// Error prints an error log.
+func (l Logger) Error(msg string, args ...[]LoggerArgument) {
+ l.print(LogLevelError, msg, l.combineArgs(args...))
+}
+
+// Fatal prints a fatal log and exits the program.
+func (l Logger) Fatal(msg string, args ...[]LoggerArgument) {
+ l.print(LogLevelFatal, msg, l.combineArgs(args...))
+ if l.CanPrint(LogLevelFatal) {
+ os.Exit(1)
+ }
+}
+
+// Print prints a log.
+func (l Logger) Print(msg string, args ...[]LoggerArgument) {
+ l.print(LogLevelPrint, msg, l.combineArgs(args...))
+}
+
+// LoggerArgument is a key-value pair for a logger.
+type LoggerArgument struct {
+ // Key is the key of the argument.
+ Key string
+ // Value is the value of the argument.
+ Value any
+}
diff --git a/vendor/github.com/pterm/pterm/multi_live_printer.go b/vendor/github.com/pterm/pterm/multi_live_printer.go
new file mode 100644
index 0000000..b9e0d0e
--- /dev/null
+++ b/vendor/github.com/pterm/pterm/multi_live_printer.go
@@ -0,0 +1,124 @@
+package pterm
+
+import (
+ "atomicgo.dev/schedule"
+ "bytes"
+ "io"
+ "os"
+ "strings"
+ "time"
+)
+
+var DefaultMultiPrinter = MultiPrinter{
+ printers: []LivePrinter{},
+ Writer: os.Stdout,
+ UpdateDelay: time.Millisecond * 200,
+
+ buffers: []*bytes.Buffer{},
+ area: DefaultArea,
+}
+
+type MultiPrinter struct {
+ IsActive bool
+ Writer io.Writer
+ UpdateDelay time.Duration
+
+ printers []LivePrinter
+ buffers []*bytes.Buffer
+ area AreaPrinter
+}
+
+// SetWriter sets the writer for the AreaPrinter.
+func (p *MultiPrinter) SetWriter(writer io.Writer) {
+ p.Writer = writer
+}
+
+// WithWriter returns a fork of the MultiPrinter with a new writer.
+func (p MultiPrinter) WithWriter(writer io.Writer) *MultiPrinter {
+ p.Writer = writer
+ return &p
+}
+
+// WithUpdateDelay returns a fork of the MultiPrinter with a new update delay.
+func (p MultiPrinter) WithUpdateDelay(delay time.Duration) *MultiPrinter {
+ p.UpdateDelay = delay
+ return &p
+}
+
+func (p *MultiPrinter) NewWriter() io.Writer {
+ buf := bytes.NewBufferString("")
+ p.buffers = append(p.buffers, buf)
+ return buf
+}
+
+// getString returns all buffers appended and separated by a newline.
+func (p *MultiPrinter) getString() string {
+ var buffer bytes.Buffer
+ for _, b := range p.buffers {
+ s := b.String()
+ s = strings.Trim(s, "\n")
+
+ parts := strings.Split(s, "\r") // only get the last override
+ s = parts[len(parts)-1]
+
+ // check if s is empty, if so get one part before, repeat until not empty
+ for s == "" {
+ parts = parts[:len(parts)-1]
+ s = parts[len(parts)-1]
+ }
+
+ s = strings.Trim(s, "\n\r")
+ buffer.WriteString(s)
+ buffer.WriteString("\n")
+ }
+ return buffer.String()
+}
+
+func (p *MultiPrinter) Start() (*MultiPrinter, error) {
+ p.IsActive = true
+ for _, printer := range p.printers {
+ printer.GenericStart()
+ }
+
+ schedule.Every(p.UpdateDelay, func() bool {
+ if !p.IsActive {
+ return false
+ }
+
+ p.area.Update(p.getString())
+
+ return true
+ })
+
+ return p, nil
+}
+
+func (p *MultiPrinter) Stop() (*MultiPrinter, error) {
+ p.IsActive = false
+ for _, printer := range p.printers {
+ printer.GenericStop()
+ }
+ time.Sleep(time.Millisecond * 20)
+ p.area.Update(p.getString())
+ p.area.Stop()
+
+ return p, nil
+}
+
+// GenericStart runs Start, but returns a LivePrinter.
+// This is used for the interface LivePrinter.
+// You most likely want to use Start instead of this in your program.
+func (p MultiPrinter) GenericStart() (*LivePrinter, error) {
+ p2, _ := p.Start()
+ lp := LivePrinter(p2)
+ return &lp, nil
+}
+
+// GenericStop runs Stop, but returns a LivePrinter.
+// This is used for the interface LivePrinter.
+// You most likely want to use Stop instead of this in your program.
+func (p MultiPrinter) GenericStop() (*LivePrinter, error) {
+ p2, _ := p.Stop()
+ lp := LivePrinter(p2)
+ return &lp, nil
+}
diff --git a/vendor/github.com/pterm/pterm/panel_printer.go b/vendor/github.com/pterm/pterm/panel_printer.go
new file mode 100644
index 0000000..9b5ec3f
--- /dev/null
+++ b/vendor/github.com/pterm/pterm/panel_printer.go
@@ -0,0 +1,190 @@
+package pterm
+
+import (
+ "io"
+ "strings"
+
+ "github.com/mattn/go-runewidth"
+
+ "github.com/pterm/pterm/internal"
+)
+
+// Panel contains the data, which should be printed inside a PanelPrinter.
+type Panel struct {
+ Data string
+}
+
+// Panels is a two dimensional coordinate system for Panel.
+type Panels [][]Panel
+
+// DefaultPanel is the default PanelPrinter.
+var DefaultPanel = PanelPrinter{
+ Padding: 1,
+}
+
+// PanelPrinter prints content in boxes.
+type PanelPrinter struct {
+ Panels Panels
+ Padding int
+ BottomPadding int
+ SameColumnWidth bool
+ BoxPrinter BoxPrinter
+ Writer io.Writer
+}
+
+// WithPanels returns a new PanelPrinter with specific options.
+func (p PanelPrinter) WithPanels(panels Panels) *PanelPrinter {
+ p.Panels = panels
+ return &p
+}
+
+// WithPadding returns a new PanelPrinter with specific options.
+func (p PanelPrinter) WithPadding(padding int) *PanelPrinter {
+ if padding < 0 {
+ padding = 0
+ }
+ p.Padding = padding
+ return &p
+}
+
+// WithBottomPadding returns a new PanelPrinter with specific options.
+func (p PanelPrinter) WithBottomPadding(bottomPadding int) *PanelPrinter {
+ if bottomPadding < 0 {
+ bottomPadding = 0
+ }
+ p.BottomPadding = bottomPadding
+ return &p
+}
+
+// WithSameColumnWidth returns a new PanelPrinter with specific options.
+func (p PanelPrinter) WithSameColumnWidth(b ...bool) *PanelPrinter {
+ p.SameColumnWidth = internal.WithBoolean(b)
+ return &p
+}
+
+// WithBoxPrinter returns a new PanelPrinter with specific options.
+func (p PanelPrinter) WithBoxPrinter(boxPrinter BoxPrinter) *PanelPrinter {
+ p.BoxPrinter = boxPrinter
+ return &p
+}
+
+// WithWriter sets the custom Writer.
+func (p PanelPrinter) WithWriter(writer io.Writer) *PanelPrinter {
+ p.Writer = writer
+ return &p
+}
+
+func (p PanelPrinter) getRawOutput() string {
+ var ret strings.Builder
+ for _, panel := range p.Panels {
+ for _, panel2 := range panel {
+ ret.WriteString(panel2.Data)
+ ret.WriteString("\n\n")
+ }
+ ret.WriteByte('\n')
+ }
+ return ret.String()
+}
+
+// Srender renders the Template as a string.
+func (p PanelPrinter) Srender() (string, error) {
+ var ret strings.Builder
+
+ if RawOutput {
+ return p.getRawOutput(), nil
+ }
+
+ for i := range p.Panels {
+ for i2 := range p.Panels[i] {
+ p.Panels[i][i2].Data = strings.TrimSuffix(p.Panels[i][i2].Data, "\n")
+ }
+ }
+
+ if p.BoxPrinter != (BoxPrinter{}) {
+ for i := range p.Panels {
+ for i2 := range p.Panels[i] {
+ p.Panels[i][i2].Data = p.BoxPrinter.Sprint(p.Panels[i][i2].Data)
+ }
+ }
+ }
+
+ for i := range p.Panels {
+ if len(p.Panels)-1 != i {
+ for i2 := range p.Panels[i] {
+ p.Panels[i][i2].Data += strings.Repeat("\n", p.BottomPadding)
+ }
+ }
+ }
+
+ columnMaxHeightMap := make(map[int]int)
+
+ if p.SameColumnWidth {
+ for _, panel := range p.Panels {
+ for i, p2 := range panel {
+ if columnMaxHeightMap[i] < internal.GetStringMaxWidth(p2.Data) {
+ columnMaxHeightMap[i] = internal.GetStringMaxWidth(p2.Data)
+ }
+ }
+ }
+ }
+
+ for _, boxLine := range p.Panels {
+ var maxHeight int
+
+ var renderedPanels []string
+
+ for _, box := range boxLine {
+ renderedPanels = append(renderedPanels, box.Data)
+ }
+
+ for i, panel := range renderedPanels {
+ renderedPanels[i] = strings.ReplaceAll(panel, "\n", Reset.Sprint()+"\n")
+ }
+
+ for _, box := range renderedPanels {
+ height := len(strings.Split(box, "\n"))
+ if height > maxHeight {
+ maxHeight = height
+ }
+ }
+
+ for i := 0; i < maxHeight; i++ {
+ if maxHeight != i {
+ for j, letter := range renderedPanels {
+ var letterLine string
+ letterLines := strings.Split(letter, "\n")
+ var maxLetterWidth int
+ if !p.SameColumnWidth {
+ maxLetterWidth = internal.GetStringMaxWidth(letter)
+ }
+ if len(letterLines) > i {
+ letterLine = letterLines[i]
+ }
+ letterLineLength := runewidth.StringWidth(RemoveColorFromString(letterLine))
+ if !p.SameColumnWidth {
+ if letterLineLength < maxLetterWidth {
+ letterLine += strings.Repeat(" ", maxLetterWidth-letterLineLength)
+ }
+ } else {
+ if letterLineLength < columnMaxHeightMap[j] {
+ letterLine += strings.Repeat(" ", columnMaxHeightMap[j]-letterLineLength)
+ }
+ }
+ letterLine += strings.Repeat(" ", p.Padding)
+ ret.WriteString(letterLine)
+ }
+ ret.WriteByte('\n')
+ }
+ }
+ }
+
+ return ret.String(), nil
+}
+
+// Render prints the Template to the terminal.
+func (p PanelPrinter) Render() error {
+ s, _ := p.Srender()
+ Fprintln(p.Writer, s)
+
+ return nil
+}
diff --git a/vendor/github.com/pterm/pterm/paragraph_printer.go b/vendor/github.com/pterm/pterm/paragraph_printer.go
new file mode 100644
index 0000000..d5060e0
--- /dev/null
+++ b/vendor/github.com/pterm/pterm/paragraph_printer.go
@@ -0,0 +1,142 @@
+package pterm
+
+import (
+ "fmt"
+ "io"
+ "strings"
+)
+
+// DefaultParagraph contains the default values for a ParagraphPrinter.
+var DefaultParagraph = ParagraphPrinter{
+ MaxWidth: GetTerminalWidth(),
+}
+
+// ParagraphPrinter can print paragraphs to a fixed line width.
+// The text will split between words, so that words will stick together.
+// It's like in a book.
+type ParagraphPrinter struct {
+ MaxWidth int
+ Writer io.Writer
+}
+
+// WithMaxWidth returns a new ParagraphPrinter with a specific MaxWidth
+func (p ParagraphPrinter) WithMaxWidth(width int) *ParagraphPrinter {
+ p.MaxWidth = width
+ return &p
+}
+
+// WithWriter sets the custom Writer.
+func (p ParagraphPrinter) WithWriter(writer io.Writer) *ParagraphPrinter {
+ p.Writer = writer
+ return &p
+}
+
+// Sprint formats using the default formats for its operands and returns the resulting string.
+// Spaces are added between operands when neither is a string.
+func (p ParagraphPrinter) Sprint(a ...any) string {
+ if RawOutput {
+ return Sprint(a...)
+ }
+
+ words := strings.Fields(strings.TrimSpace(Sprint(a...)))
+ if len(words) == 0 {
+ return ""
+ }
+ wrapped := words[0]
+ spaceLeft := p.MaxWidth - len(wrapped)
+ for _, word := range words[1:] {
+ if len(word)+1 > spaceLeft {
+ wrapped += "\n" + word
+ spaceLeft = p.MaxWidth - len(word)
+ } else {
+ wrapped += " " + word
+ spaceLeft -= 1 + len(word)
+ }
+ }
+
+ return wrapped
+}
+
+// Sprintln formats using the default formats for its operands and returns the resulting string.
+// Spaces are always added between operands and a newline is appended.
+func (p ParagraphPrinter) Sprintln(a ...any) string {
+ return p.Sprint(Sprintln(a...)) + "\n"
+}
+
+// Sprintf formats according to a format specifier and returns the resulting string.
+func (p ParagraphPrinter) Sprintf(format string, a ...any) string {
+ return p.Sprint(Sprintf(format, a...))
+}
+
+// Sprintfln formats according to a format specifier and returns the resulting string.
+// Spaces are always added between operands and a newline is appended.
+func (p ParagraphPrinter) Sprintfln(format string, a ...any) string {
+ return p.Sprintf(format, a...) + "\n"
+}
+
+// Print formats using the default formats for its operands and writes to standard output.
+// Spaces are added between operands when neither is a string.
+// It returns the number of bytes written and any write error encountered.
+func (p *ParagraphPrinter) Print(a ...any) *TextPrinter {
+ Fprint(p.Writer, p.Sprint(a...))
+ tp := TextPrinter(p)
+ return &tp
+}
+
+// Println formats using the default formats for its operands and writes to standard output.
+// Spaces are always added between operands and a newline is appended.
+// It returns the number of bytes written and any write error encountered.
+func (p *ParagraphPrinter) Println(a ...any) *TextPrinter {
+ Fprint(p.Writer, p.Sprintln(a...))
+ tp := TextPrinter(p)
+ return &tp
+}
+
+// Printf formats according to a format specifier and writes to standard output.
+// It returns the number of bytes written and any write error encountered.
+func (p *ParagraphPrinter) Printf(format string, a ...any) *TextPrinter {
+ Fprint(p.Writer, p.Sprintf(format, a...))
+ tp := TextPrinter(p)
+ return &tp
+}
+
+// Printfln formats according to a format specifier and writes to standard output.
+// Spaces are always added between operands and a newline is appended.
+// It returns the number of bytes written and any write error encountered.
+func (p *ParagraphPrinter) Printfln(format string, a ...any) *TextPrinter {
+ Fprint(p.Writer, p.Sprintfln(format, a...))
+ tp := TextPrinter(p)
+ return &tp
+}
+
+// PrintOnError prints every error which is not nil.
+// If every error is nil, nothing will be printed.
+// This can be used for simple error checking.
+func (p *ParagraphPrinter) PrintOnError(a ...any) *TextPrinter {
+ for _, arg := range a {
+ if err, ok := arg.(error); ok {
+ if err != nil {
+ p.Println(err)
+ }
+ }
+ }
+
+ tp := TextPrinter(p)
+ return &tp
+}
+
+// PrintOnErrorf wraps every error which is not nil and prints it.
+// If every error is nil, nothing will be printed.
+// This can be used for simple error checking.
+func (p *ParagraphPrinter) PrintOnErrorf(format string, a ...any) *TextPrinter {
+ for _, arg := range a {
+ if err, ok := arg.(error); ok {
+ if err != nil {
+ p.Println(fmt.Errorf(format, err))
+ }
+ }
+ }
+
+ tp := TextPrinter(p)
+ return &tp
+}
diff --git a/vendor/github.com/pterm/pterm/prefix_printer.go b/vendor/github.com/pterm/pterm/prefix_printer.go
new file mode 100644
index 0000000..2c2a5e8
--- /dev/null
+++ b/vendor/github.com/pterm/pterm/prefix_printer.go
@@ -0,0 +1,364 @@
+package pterm
+
+import (
+ "fmt"
+ "io"
+ "runtime"
+ "strings"
+
+ "github.com/pterm/pterm/internal"
+)
+
+var (
+ // GrayBoxStyle wraps text in a gray box.
+ GrayBoxStyle = NewStyle(BgGray, FgLightWhite)
+)
+
+var (
+ // Info returns a PrefixPrinter, which can be used to print text with an "info" Prefix.
+ Info = PrefixPrinter{
+ MessageStyle: &ThemeDefault.InfoMessageStyle,
+ Prefix: Prefix{
+ Style: &ThemeDefault.InfoPrefixStyle,
+ Text: "INFO",
+ },
+ Writer: defaultWriter,
+ }
+
+ // Warning returns a PrefixPrinter, which can be used to print text with a "warning" Prefix.
+ Warning = PrefixPrinter{
+ MessageStyle: &ThemeDefault.WarningMessageStyle,
+ Prefix: Prefix{
+ Style: &ThemeDefault.WarningPrefixStyle,
+ Text: "WARNING",
+ },
+ Writer: defaultWriter,
+ }
+
+ // Success returns a PrefixPrinter, which can be used to print text with a "success" Prefix.
+ Success = PrefixPrinter{
+ MessageStyle: &ThemeDefault.SuccessMessageStyle,
+ Prefix: Prefix{
+ Style: &ThemeDefault.SuccessPrefixStyle,
+ Text: "SUCCESS",
+ },
+ Writer: defaultWriter,
+ }
+
+ // Error returns a PrefixPrinter, which can be used to print text with an "error" Prefix.
+ Error = PrefixPrinter{
+ MessageStyle: &ThemeDefault.ErrorMessageStyle,
+ Prefix: Prefix{
+ Style: &ThemeDefault.ErrorPrefixStyle,
+ Text: " ERROR ",
+ },
+ Writer: defaultWriter,
+ }
+
+ // Fatal returns a PrefixPrinter, which can be used to print text with an "fatal" Prefix.
+ // NOTICE: Fatal terminates the application immediately!
+ Fatal = PrefixPrinter{
+ MessageStyle: &ThemeDefault.FatalMessageStyle,
+ Prefix: Prefix{
+ Style: &ThemeDefault.FatalPrefixStyle,
+ Text: " FATAL ",
+ },
+ Fatal: true,
+ Writer: defaultWriter,
+ }
+
+ // Debug Prints debug messages. By default it will only print if PrintDebugMessages is true.
+ // You can change PrintDebugMessages with EnableDebugMessages and DisableDebugMessages, or by setting the variable itself.
+ Debug = PrefixPrinter{
+ MessageStyle: &ThemeDefault.DebugMessageStyle,
+ Prefix: Prefix{
+ Text: " DEBUG ",
+ Style: &ThemeDefault.DebugPrefixStyle,
+ },
+ Debugger: true,
+ Writer: defaultWriter,
+ }
+
+ // Description returns a PrefixPrinter, which can be used to print text with a "description" Prefix.
+ Description = PrefixPrinter{
+ MessageStyle: &ThemeDefault.DescriptionMessageStyle,
+ Prefix: Prefix{
+ Style: &ThemeDefault.DescriptionPrefixStyle,
+ Text: "Description",
+ },
+ Writer: defaultWriter,
+ }
+)
+
+// PrefixPrinter is the printer used to print a Prefix.
+type PrefixPrinter struct {
+ Prefix Prefix
+ Scope Scope
+ MessageStyle *Style
+ Fatal bool
+ ShowLineNumber bool
+ LineNumberOffset int
+ Writer io.Writer
+ // If Debugger is true, the printer will only print if PrintDebugMessages is set to true.
+ // You can change PrintDebugMessages with EnableDebugMessages and DisableDebugMessages, or by setting the variable itself.
+ Debugger bool
+}
+
+// WithPrefix adds a custom prefix to the printer.
+func (p PrefixPrinter) WithPrefix(prefix Prefix) *PrefixPrinter {
+ p.Prefix = prefix
+ return &p
+}
+
+// WithScope adds a scope to the Prefix.
+func (p PrefixPrinter) WithScope(scope Scope) *PrefixPrinter {
+ p.Scope = scope
+ return &p
+}
+
+// WithMessageStyle adds a custom prefix to the printer.
+func (p PrefixPrinter) WithMessageStyle(style *Style) *PrefixPrinter {
+ p.MessageStyle = style
+ return &p
+}
+
+// WithFatal sets if the printer should panic after printing.
+// NOTE:
+// The printer will only panic if either PrefixPrinter.Println, PrefixPrinter.Print
+// or PrefixPrinter.Printf is called.
+func (p PrefixPrinter) WithFatal(b ...bool) *PrefixPrinter {
+ p.Fatal = internal.WithBoolean(b)
+ return &p
+}
+
+// WithShowLineNumber sets if the printer should print the line number from where it's called in a go file.
+func (p PrefixPrinter) WithShowLineNumber(b ...bool) *PrefixPrinter {
+ p.ShowLineNumber = internal.WithBoolean(b)
+ return &p
+}
+
+// WithDebugger returns a new Printer with specific Debugger value.
+// If Debugger is true, the printer will only print if PrintDebugMessages is set to true.
+// You can change PrintDebugMessages with EnableDebugMessages and DisableDebugMessages, or by setting the variable itself.
+func (p PrefixPrinter) WithDebugger(b ...bool) *PrefixPrinter {
+ p.Debugger = internal.WithBoolean(b)
+ return &p
+}
+
+// WithLineNumberOffset can be used to exclude a specific amount of calls in the call stack.
+// If you make a wrapper function for example, you can set this to one.
+// The printed line number will then be the line number where your wrapper function is called.
+func (p PrefixPrinter) WithLineNumberOffset(offset int) *PrefixPrinter {
+ p.LineNumberOffset = offset
+ return &p
+}
+
+// WithWriter sets the custom Writer.
+func (p PrefixPrinter) WithWriter(writer io.Writer) *PrefixPrinter {
+ p.Writer = writer
+ return &p
+}
+
+// Sprint formats using the default formats for its operands and returns the resulting string.
+// Spaces are added between operands when neither is a string.
+func (p *PrefixPrinter) Sprint(a ...any) string {
+ m := Sprint(a...)
+ if p.Debugger && !PrintDebugMessages {
+ return ""
+ }
+
+ if RawOutput {
+ if p.Prefix.Text != "" {
+ return Sprintf("%s: %s", strings.TrimSpace(p.Prefix.Text), Sprint(a...))
+ } else {
+ return Sprint(a...)
+ }
+ }
+
+ if p.Prefix.Style == nil {
+ p.Prefix.Style = NewStyle()
+ }
+ if p.Scope.Style == nil {
+ p.Scope.Style = NewStyle()
+ }
+ if p.MessageStyle == nil {
+ p.MessageStyle = NewStyle()
+ }
+
+ var ret strings.Builder
+ var newLine bool
+
+ if strings.HasSuffix(m, "\n") {
+ m = strings.TrimRight(m, "\n")
+ newLine = true
+ }
+
+ messageLines := strings.Split(m, "\n")
+ for i, m := range messageLines {
+ if i == 0 {
+ ret.WriteString(p.GetFormattedPrefix())
+ ret.WriteByte(' ')
+ if p.Scope.Text != "" {
+ ret.WriteString(NewStyle(*p.Scope.Style...).Sprint(" (" + p.Scope.Text + ") "))
+ }
+ ret.WriteString(p.MessageStyle.Sprint(m))
+ } else {
+ ret.WriteByte('\n')
+ ret.WriteString(p.Prefix.Style.Sprint(strings.Repeat(" ", len([]rune(p.Prefix.Text))+2)))
+ ret.WriteByte(' ')
+ ret.WriteString(p.MessageStyle.Sprint(m))
+ }
+ }
+
+ if p.ShowLineNumber {
+ _, fileName, line, _ := runtime.Caller(3 + p.LineNumberOffset)
+ ret.WriteString(FgGray.Sprint("\n└ " + fmt.Sprintf("(%s:%d)\n", fileName, line)))
+ newLine = false
+ }
+
+ if newLine {
+ ret.WriteByte('\n')
+ }
+
+ return Sprint(ret.String())
+}
+
+// Sprintln formats using the default formats for its operands and returns the resulting string.
+// Spaces are always added between operands and a newline is appended.
+func (p PrefixPrinter) Sprintln(a ...any) string {
+ if p.Debugger && !PrintDebugMessages {
+ return ""
+ }
+ str := fmt.Sprintln(a...)
+ return p.Sprint(str)
+}
+
+// Sprintf formats according to a format specifier and returns the resulting string.
+func (p PrefixPrinter) Sprintf(format string, a ...any) string {
+ if p.Debugger && !PrintDebugMessages {
+ return ""
+ }
+ return p.Sprint(Sprintf(format, a...))
+}
+
+// Sprintfln formats according to a format specifier and returns the resulting string.
+// Spaces are always added between operands and a newline is appended.
+func (p PrefixPrinter) Sprintfln(format string, a ...any) string {
+ if p.Debugger && !PrintDebugMessages {
+ return ""
+ }
+ return p.Sprintf(format, a...) + "\n"
+}
+
+// Print formats using the default formats for its operands and writes to standard output.
+// Spaces are added between operands when neither is a string.
+// It returns the number of bytes written and any write error encountered.
+func (p *PrefixPrinter) Print(a ...any) *TextPrinter {
+ tp := TextPrinter(p)
+ if p.Debugger && !PrintDebugMessages {
+ return &tp
+ }
+ p.LineNumberOffset--
+ Fprint(p.Writer, p.Sprint(a...))
+ p.LineNumberOffset++
+ checkFatal(p)
+ return &tp
+}
+
+// Println formats using the default formats for its operands and writes to standard output.
+// Spaces are always added between operands and a newline is appended.
+// It returns the number of bytes written and any write error encountered.
+func (p *PrefixPrinter) Println(a ...any) *TextPrinter {
+ tp := TextPrinter(p)
+ if p.Debugger && !PrintDebugMessages {
+ return &tp
+ }
+ Fprint(p.Writer, p.Sprintln(a...))
+ checkFatal(p)
+ return &tp
+}
+
+// Printf formats according to a format specifier and writes to standard output.
+// It returns the number of bytes written and any write error encountered.
+func (p *PrefixPrinter) Printf(format string, a ...any) *TextPrinter {
+ tp := TextPrinter(p)
+ if p.Debugger && !PrintDebugMessages {
+ return &tp
+ }
+ Fprint(p.Writer, p.Sprintf(format, a...))
+ checkFatal(p)
+ return &tp
+}
+
+// Printfln formats according to a format specifier and writes to standard output.
+// Spaces are always added between operands and a newline is appended.
+// It returns the number of bytes written and any write error encountered.
+func (p *PrefixPrinter) Printfln(format string, a ...any) *TextPrinter {
+ tp := TextPrinter(p)
+ if p.Debugger && !PrintDebugMessages {
+ return &tp
+ }
+ p.LineNumberOffset++
+ Fprint(p.Writer, p.Sprintfln(format, a...))
+ p.LineNumberOffset--
+ checkFatal(p)
+ return &tp
+}
+
+// PrintOnError prints every error which is not nil.
+// If every error is nil, nothing will be printed.
+// This can be used for simple error checking.
+//
+// Note: Use WithFatal(true) or Fatal to panic after first non nil error.
+func (p *PrefixPrinter) PrintOnError(a ...any) *TextPrinter {
+ for _, arg := range a {
+ if err, ok := arg.(error); ok {
+ if err != nil {
+ p.Println(err)
+ }
+ }
+ }
+
+ tp := TextPrinter(p)
+ return &tp
+}
+
+// PrintOnErrorf wraps every error which is not nil and prints it.
+// If every error is nil, nothing will be printed.
+// This can be used for simple error checking.
+func (p *PrefixPrinter) PrintOnErrorf(format string, a ...any) *TextPrinter {
+ for _, arg := range a {
+ if err, ok := arg.(error); ok {
+ if err != nil {
+ p.Println(fmt.Errorf(format, err))
+ }
+ }
+ }
+
+ tp := TextPrinter(p)
+ return &tp
+}
+
+// GetFormattedPrefix returns the Prefix as a styled text string.
+func (p PrefixPrinter) GetFormattedPrefix() string {
+ return p.Prefix.Style.Sprint(" " + p.Prefix.Text + " ")
+}
+
+// Prefix contains the data used as the beginning of a printed text via a PrefixPrinter.
+type Prefix struct {
+ Text string
+ Style *Style
+}
+
+// Scope contains the data of the optional scope of a prefix.
+// If it has a text, it will be printed after the Prefix in brackets.
+type Scope struct {
+ Text string
+ Style *Style
+}
+
+func checkFatal(p *PrefixPrinter) {
+ if p.Fatal {
+ panic("")
+ }
+}
diff --git a/vendor/github.com/pterm/pterm/print.go b/vendor/github.com/pterm/pterm/print.go
new file mode 100644
index 0000000..fd91d29
--- /dev/null
+++ b/vendor/github.com/pterm/pterm/print.go
@@ -0,0 +1,191 @@
+package pterm
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "strings"
+
+ "github.com/gookit/color"
+)
+
+var defaultWriter io.Writer = os.Stdout
+
+// SetDefaultOutput sets the default output of pterm.
+func SetDefaultOutput(w io.Writer) {
+ defaultWriter = w
+ color.SetOutput(w)
+}
+
+// Sprint formats using the default formats for its operands and returns the resulting string.
+// Spaces are added between operands when neither is a string.
+func Sprint(a ...any) string {
+ return color.Sprint(a...)
+}
+
+// Sprintf formats according to a format specifier and returns the resulting string.
+func Sprintf(format string, a ...any) string {
+ return color.Sprintf(format, a...)
+}
+
+// Sprintfln formats according to a format specifier and returns the resulting string.
+// Spaces are always added between operands and a newline is appended.
+func Sprintfln(format string, a ...any) string {
+ return color.Sprintf(format, a...) + "\n"
+}
+
+// Sprintln returns what Println would print to the terminal.
+func Sprintln(a ...any) string {
+ str := fmt.Sprintln(a...)
+ return Sprint(str)
+}
+
+// Sprinto returns what Printo would print.
+func Sprinto(a ...any) string {
+ return "\r" + Sprint(a...)
+}
+
+// Print formats using the default formats for its operands and writes to standard output.
+// Spaces are added between operands when neither is a string.
+// It returns the number of bytes written and any write error encountered.
+func Print(a ...any) {
+ Fprint(defaultWriter, a...)
+}
+
+// Println formats using the default formats for its operands and writes to standard output.
+// Spaces are always added between operands and a newline is appended.
+// It returns the number of bytes written and any write error encountered.
+func Println(a ...any) {
+ Print(Sprintln(a...))
+}
+
+// Printf formats according to a format specifier and writes to standard output.
+// It returns the number of bytes written and any write error encountered.
+func Printf(format string, a ...any) {
+ Print(Sprintf(format, a...))
+}
+
+// Printfln formats according to a format specifier and writes to standard output.
+// Spaces are always added between operands and a newline is appended.
+// It returns the number of bytes written and any write error encountered.
+func Printfln(format string, a ...any) {
+ Print(Sprintfln(format, a...))
+}
+
+// PrintOnError prints every error which is not nil.
+// If every error is nil, nothing will be printed.
+// This can be used for simple error checking.
+func PrintOnError(a ...any) {
+ for _, arg := range a {
+ if err, ok := arg.(error); ok {
+ if err != nil {
+ Println(err)
+ }
+ }
+ }
+}
+
+// PrintOnErrorf wraps every error which is not nil and prints it.
+// If every error is nil, nothing will be printed.
+// This can be used for simple error checking.
+func PrintOnErrorf(format string, a ...any) {
+ for _, arg := range a {
+ if err, ok := arg.(error); ok {
+ if err != nil {
+ Println(fmt.Errorf(format, err))
+ }
+ }
+ }
+}
+
+// Fprint formats using the default formats for its operands and writes to w.
+// Spaces are added between operands when neither is a string.
+// It returns the number of bytes written and any write error encountered.
+func Fprint(writer io.Writer, a ...any) {
+ if !Output {
+ return
+ }
+
+ var ret string
+ var printed bool
+
+ for _, bar := range ActiveProgressBarPrinters {
+ if bar.IsActive && (bar.Writer == writer || bar.Writer == os.Stderr) {
+ ret += sClearLine()
+ ret += Sprinto(a...)
+ printed = true
+ }
+ }
+
+ for _, spinner := range activeSpinnerPrinters {
+ if spinner.IsActive && (spinner.Writer == writer || spinner.Writer == os.Stderr) {
+ ret += sClearLine()
+ ret += Sprinto(a...)
+ printed = true
+ }
+ }
+
+ if !printed {
+ ret = color.Sprint(Sprint(a...))
+ }
+
+ if writer != nil {
+ color.Fprint(writer, Sprint(ret))
+ } else {
+ color.Print(Sprint(ret))
+ }
+
+ // Refresh all progressbars in case they were overwritten previously. Reference: #302
+ for _, bar := range ActiveProgressBarPrinters {
+ if bar.IsActive {
+ bar.UpdateTitle(bar.Title)
+ }
+ }
+}
+
+// Fprintln formats using the default formats for its operands and writes to w.
+// Spaces are always added between operands and a newline is appended.
+// It returns the number of bytes written and any write error encountered.
+func Fprintln(writer io.Writer, a ...any) {
+ Fprint(writer, Sprint(a...)+"\n")
+}
+
+// Printo overrides the current line in a terminal.
+// If the current line is empty, the text will be printed like with pterm.Print.
+// Example:
+//
+// pterm.Printo("Hello, World")
+// time.Sleep(time.Second)
+// pterm.Printo("Hello, Earth!")
+func Printo(a ...any) {
+ if !Output {
+ return
+ }
+
+ color.Print("\r" + Sprint(a...))
+}
+
+// Fprinto prints Printo to a custom writer.
+func Fprinto(w io.Writer, a ...any) {
+ if !Output {
+ return
+ }
+ if w != nil {
+ color.Fprint(w, "\r", Sprint(a...))
+ } else {
+ color.Print("\r", Sprint(a...))
+ }
+}
+
+// RemoveColorFromString removes color codes from a string.
+func RemoveColorFromString(a ...any) string {
+ return color.ClearCode(Sprint(a...))
+}
+
+func fClearLine(writer io.Writer) {
+ Fprinto(writer, strings.Repeat(" ", GetTerminalWidth()))
+}
+
+func sClearLine() string {
+ return Sprinto(strings.Repeat(" ", GetTerminalWidth()))
+}
diff --git a/vendor/github.com/pterm/pterm/progressbar_printer.go b/vendor/github.com/pterm/pterm/progressbar_printer.go
new file mode 100644
index 0000000..c1083ff
--- /dev/null
+++ b/vendor/github.com/pterm/pterm/progressbar_printer.go
@@ -0,0 +1,355 @@
+package pterm
+
+import (
+ "atomicgo.dev/cursor"
+ "atomicgo.dev/schedule"
+ "fmt"
+ "io"
+ "math"
+ "os"
+ "strings"
+ "time"
+
+ "github.com/gookit/color"
+
+ "github.com/pterm/pterm/internal"
+)
+
+// ActiveProgressBarPrinters contains all running ProgressbarPrinters.
+// Generally, there should only be one active ProgressbarPrinter at a time.
+var ActiveProgressBarPrinters []*ProgressbarPrinter
+
+// DefaultProgressbar is the default ProgressbarPrinter.
+var DefaultProgressbar = ProgressbarPrinter{
+ Total: 100,
+ BarCharacter: "█",
+ LastCharacter: "█",
+ ElapsedTimeRoundingFactor: time.Second,
+ BarStyle: &ThemeDefault.ProgressbarBarStyle,
+ TitleStyle: &ThemeDefault.ProgressbarTitleStyle,
+ ShowTitle: true,
+ ShowCount: true,
+ ShowPercentage: true,
+ ShowElapsedTime: true,
+ BarFiller: Gray("█"),
+ MaxWidth: 80,
+ Writer: os.Stderr,
+}
+
+// ProgressbarPrinter shows a progress animation in the terminal.
+type ProgressbarPrinter struct {
+ Title string
+ Total int
+ Current int
+ BarCharacter string
+ LastCharacter string
+ ElapsedTimeRoundingFactor time.Duration
+ BarFiller string
+ MaxWidth int
+
+ ShowElapsedTime bool
+ ShowCount bool
+ ShowTitle bool
+ ShowPercentage bool
+ RemoveWhenDone bool
+
+ TitleStyle *Style
+ BarStyle *Style
+
+ IsActive bool
+
+ startedAt time.Time
+ rerenderTask *schedule.Task
+
+ Writer io.Writer
+}
+
+// WithTitle sets the name of the ProgressbarPrinter.
+func (p ProgressbarPrinter) WithTitle(name string) *ProgressbarPrinter {
+ p.Title = name
+ return &p
+}
+
+// WithMaxWidth sets the maximum width of the ProgressbarPrinter.
+// If the terminal is smaller than the given width, the terminal width will be used instead.
+// If the width is set to zero, or below, the terminal width will be used.
+func (p ProgressbarPrinter) WithMaxWidth(maxWidth int) *ProgressbarPrinter {
+ p.MaxWidth = maxWidth
+ return &p
+}
+
+// WithTotal sets the total value of the ProgressbarPrinter.
+func (p ProgressbarPrinter) WithTotal(total int) *ProgressbarPrinter {
+ p.Total = total
+ return &p
+}
+
+// WithCurrent sets the current value of the ProgressbarPrinter.
+func (p ProgressbarPrinter) WithCurrent(current int) *ProgressbarPrinter {
+ p.Current = current
+ return &p
+}
+
+// WithBarCharacter sets the bar character of the ProgressbarPrinter.
+func (p ProgressbarPrinter) WithBarCharacter(char string) *ProgressbarPrinter {
+ p.BarCharacter = char
+ return &p
+}
+
+// WithLastCharacter sets the last character of the ProgressbarPrinter.
+func (p ProgressbarPrinter) WithLastCharacter(char string) *ProgressbarPrinter {
+ p.LastCharacter = char
+ return &p
+}
+
+// WithElapsedTimeRoundingFactor sets the rounding factor of the elapsed time.
+func (p ProgressbarPrinter) WithElapsedTimeRoundingFactor(duration time.Duration) *ProgressbarPrinter {
+ p.ElapsedTimeRoundingFactor = duration
+ return &p
+}
+
+// WithShowElapsedTime sets if the elapsed time should be displayed in the ProgressbarPrinter.
+func (p ProgressbarPrinter) WithShowElapsedTime(b ...bool) *ProgressbarPrinter {
+ p.ShowElapsedTime = internal.WithBoolean(b)
+ return &p
+}
+
+// WithShowCount sets if the total and current count should be displayed in the ProgressbarPrinter.
+func (p ProgressbarPrinter) WithShowCount(b ...bool) *ProgressbarPrinter {
+ p.ShowCount = internal.WithBoolean(b)
+ return &p
+}
+
+// WithShowTitle sets if the title should be displayed in the ProgressbarPrinter.
+func (p ProgressbarPrinter) WithShowTitle(b ...bool) *ProgressbarPrinter {
+ p.ShowTitle = internal.WithBoolean(b)
+ return &p
+}
+
+// WithShowPercentage sets if the completed percentage should be displayed in the ProgressbarPrinter.
+func (p ProgressbarPrinter) WithShowPercentage(b ...bool) *ProgressbarPrinter {
+ p.ShowPercentage = internal.WithBoolean(b)
+ return &p
+}
+
+// WithStartedAt sets the time when the ProgressbarPrinter started.
+func (p ProgressbarPrinter) WithStartedAt(t time.Time) *ProgressbarPrinter {
+ p.startedAt = t
+ return &p
+}
+
+// WithTitleStyle sets the style of the title.
+func (p ProgressbarPrinter) WithTitleStyle(style *Style) *ProgressbarPrinter {
+ p.TitleStyle = style
+ return &p
+}
+
+// WithBarStyle sets the style of the bar.
+func (p ProgressbarPrinter) WithBarStyle(style *Style) *ProgressbarPrinter {
+ p.BarStyle = style
+ return &p
+}
+
+// WithRemoveWhenDone sets if the ProgressbarPrinter should be removed when it is done.
+func (p ProgressbarPrinter) WithRemoveWhenDone(b ...bool) *ProgressbarPrinter {
+ p.RemoveWhenDone = internal.WithBoolean(b)
+ return &p
+}
+
+// WithBarFiller sets the filler character for the ProgressbarPrinter.
+func (p ProgressbarPrinter) WithBarFiller(char string) *ProgressbarPrinter {
+ p.BarFiller = char
+ return &p
+}
+
+// WithWriter sets the custom Writer.
+func (p ProgressbarPrinter) WithWriter(writer io.Writer) *ProgressbarPrinter {
+ p.Writer = writer
+ return &p
+}
+
+// SetWriter sets the custom Writer.
+func (p *ProgressbarPrinter) SetWriter(writer io.Writer) {
+ p.Writer = writer
+}
+
+// SetStartedAt sets the time when the ProgressbarPrinter started.
+func (p *ProgressbarPrinter) SetStartedAt(t time.Time) {
+ p.startedAt = t
+}
+
+// ResetTimer resets the timer of the ProgressbarPrinter.
+func (p *ProgressbarPrinter) ResetTimer() {
+ p.startedAt = time.Now()
+}
+
+// Increment current value by one.
+func (p *ProgressbarPrinter) Increment() *ProgressbarPrinter {
+ p.Add(1)
+ return p
+}
+
+// UpdateTitle updates the title and re-renders the progressbar
+func (p *ProgressbarPrinter) UpdateTitle(title string) *ProgressbarPrinter {
+ p.Title = title
+ p.updateProgress()
+ return p
+}
+
+// This is the update logic, renders the progressbar
+func (p *ProgressbarPrinter) updateProgress() *ProgressbarPrinter {
+ Fprinto(p.Writer, p.getString())
+ return p
+}
+
+func (p *ProgressbarPrinter) getString() string {
+ if !p.IsActive {
+ return ""
+ }
+ if p.TitleStyle == nil {
+ p.TitleStyle = NewStyle()
+ }
+ if p.BarStyle == nil {
+ p.BarStyle = NewStyle()
+ }
+ if p.Total == 0 {
+ return ""
+ }
+
+ var before string
+ var after string
+ var width int
+
+ if p.MaxWidth <= 0 {
+ width = GetTerminalWidth()
+ } else if GetTerminalWidth() < p.MaxWidth {
+ width = GetTerminalWidth()
+ } else {
+ width = p.MaxWidth
+ }
+
+ if p.ShowTitle {
+ before += p.TitleStyle.Sprint(p.Title) + " "
+ }
+ if p.ShowCount {
+ padding := 1 + int(math.Log10(float64(p.Total)))
+ before += Gray("[") + LightWhite(fmt.Sprintf("%0*d", padding, p.Current)) + Gray("/") + LightWhite(p.Total) + Gray("]") + " "
+ }
+
+ after += " "
+
+ if p.ShowPercentage {
+ currentPercentage := int(internal.PercentageRound(float64(int64(p.Total)), float64(int64(p.Current))))
+ decoratorCurrentPercentage := color.RGB(NewRGB(255, 0, 0).Fade(0, float32(p.Total), float32(p.Current), NewRGB(0, 255, 0)).GetValues()).
+ Sprintf("%3d%%", currentPercentage)
+ after += decoratorCurrentPercentage + " "
+ }
+ if p.ShowElapsedTime {
+ after += "| " + p.parseElapsedTime()
+ }
+
+ barMaxLength := width - len(RemoveColorFromString(before)) - len(RemoveColorFromString(after)) - 1
+
+ barCurrentLength := (p.Current * barMaxLength) / p.Total
+ var barFiller string
+ if barMaxLength-barCurrentLength > 0 {
+ barFiller = strings.Repeat(p.BarFiller, barMaxLength-barCurrentLength)
+ }
+
+ bar := barFiller
+ if barCurrentLength > 0 {
+ bar = p.BarStyle.Sprint(strings.Repeat(p.BarCharacter, barCurrentLength)+p.LastCharacter) + bar
+ }
+
+ return before + bar + after
+}
+
+// Add to current value.
+func (p *ProgressbarPrinter) Add(count int) *ProgressbarPrinter {
+ if p.Total == 0 {
+ return nil
+ }
+
+ p.Current += count
+ p.updateProgress()
+
+ if p.Current >= p.Total {
+ p.Total = p.Current
+ p.updateProgress()
+ p.Stop()
+ }
+ return p
+}
+
+// Start the ProgressbarPrinter.
+func (p ProgressbarPrinter) Start(title ...any) (*ProgressbarPrinter, error) {
+ cursor.Hide()
+ if RawOutput && p.ShowTitle {
+ Fprintln(p.Writer, p.Title)
+ }
+ p.IsActive = true
+ if len(title) != 0 {
+ p.Title = Sprint(title...)
+ }
+ ActiveProgressBarPrinters = append(ActiveProgressBarPrinters, &p)
+ p.startedAt = time.Now()
+
+ p.updateProgress()
+
+ if p.ShowElapsedTime {
+ p.rerenderTask = schedule.Every(time.Second, func() bool {
+ p.updateProgress()
+ return true
+ })
+ }
+
+ return &p, nil
+}
+
+// Stop the ProgressbarPrinter.
+func (p *ProgressbarPrinter) Stop() (*ProgressbarPrinter, error) {
+ if p.rerenderTask != nil && p.rerenderTask.IsActive() {
+ p.rerenderTask.Stop()
+ }
+ cursor.Show()
+
+ if !p.IsActive {
+ return p, nil
+ }
+ p.IsActive = false
+ if p.RemoveWhenDone {
+ fClearLine(p.Writer)
+ Fprinto(p.Writer)
+ } else {
+ Fprintln(p.Writer)
+ }
+ return p, nil
+}
+
+// GenericStart runs Start, but returns a LivePrinter.
+// This is used for the interface LivePrinter.
+// You most likely want to use Start instead of this in your program.
+func (p *ProgressbarPrinter) GenericStart() (*LivePrinter, error) {
+ p2, _ := p.Start()
+ lp := LivePrinter(p2)
+ return &lp, nil
+}
+
+// GenericStop runs Stop, but returns a LivePrinter.
+// This is used for the interface LivePrinter.
+// You most likely want to use Stop instead of this in your program.
+func (p *ProgressbarPrinter) GenericStop() (*LivePrinter, error) {
+ p2, _ := p.Stop()
+ lp := LivePrinter(p2)
+ return &lp, nil
+}
+
+// GetElapsedTime returns the elapsed time, since the ProgressbarPrinter was started.
+func (p *ProgressbarPrinter) GetElapsedTime() time.Duration {
+ return time.Since(p.startedAt)
+}
+
+func (p *ProgressbarPrinter) parseElapsedTime() string {
+ s := p.GetElapsedTime().Round(p.ElapsedTimeRoundingFactor).String()
+ return s
+}
diff --git a/vendor/github.com/pterm/pterm/pterm.go b/vendor/github.com/pterm/pterm/pterm.go
new file mode 100644
index 0000000..f9d76ca
--- /dev/null
+++ b/vendor/github.com/pterm/pterm/pterm.go
@@ -0,0 +1,72 @@
+// Package pterm is a modern go module to beautify console output.
+// It can be used without configuration, but if desired, everything can be customized down to the smallest detail.
+//
+// Official docs are available at: https://docs.pterm.sh
+//
+// View the animated examples here: https://github.com/pterm/pterm#-examples
+package pterm
+
+import (
+ "github.com/gookit/color"
+)
+
+var (
+ // Output completely disables output from pterm if set to false. Can be used in CLI application quiet mode.
+ Output = true
+
+ // PrintDebugMessages sets if messages printed by the DebugPrinter should be printed.
+ PrintDebugMessages = false
+
+ // RawOutput is set to true if pterm.DisableStyling() was called.
+ // The variable indicates that PTerm will not add additional styling to text.
+ // Use pterm.DisableStyling() or pterm.EnableStyling() to change this variable.
+ // Changing this variable directly, will disable or enable the output of colored text.
+ RawOutput = false
+)
+
+func init() {
+ color.ForceColor()
+}
+
+// EnableOutput enables the output of PTerm.
+func EnableOutput() {
+ Output = true
+}
+
+// DisableOutput disables the output of PTerm.
+func DisableOutput() {
+ Output = false
+}
+
+// EnableDebugMessages enables the output of debug printers.
+func EnableDebugMessages() {
+ PrintDebugMessages = true
+}
+
+// DisableDebugMessages disables the output of debug printers.
+func DisableDebugMessages() {
+ PrintDebugMessages = false
+}
+
+// EnableStyling enables the default PTerm styling.
+// This also calls EnableColor.
+func EnableStyling() {
+ RawOutput = false
+ EnableColor()
+}
+
+// DisableStyling sets PTerm to RawOutput mode and disables all of PTerms styling.
+// You can use this to print to text files etc.
+// This also calls DisableColor.
+func DisableStyling() {
+ RawOutput = true
+ DisableColor()
+}
+
+// RecalculateTerminalSize updates already initialized terminal dimensions. Has to be called after a terminal resize to guarantee proper rendering. Applies only to new instances.
+func RecalculateTerminalSize() {
+ // keep in sync with DefaultBarChart
+ DefaultBarChart.Width = GetTerminalWidth() * 2 / 3
+ DefaultBarChart.Height = GetTerminalHeight() * 2 / 3
+ DefaultParagraph.MaxWidth = GetTerminalWidth()
+}
diff --git a/vendor/github.com/pterm/pterm/rgb.go b/vendor/github.com/pterm/pterm/rgb.go
new file mode 100644
index 0000000..1b11eea
--- /dev/null
+++ b/vendor/github.com/pterm/pterm/rgb.go
@@ -0,0 +1,298 @@
+package pterm
+
+import (
+ "fmt"
+
+ "github.com/gookit/color"
+
+ "github.com/pterm/pterm/internal"
+)
+
+// RGB color model is an additive color model in which red, green, and blue light are added together in various ways to reproduce a broad array of colors.
+// The name of the model comes from the initials of the three additive primary colors, red, green, and blue.
+// https://en.wikipedia.org/wiki/RGB_color_model
+type RGB struct {
+ R uint8
+ G uint8
+ B uint8
+ Background bool
+}
+
+type RGBStyle struct {
+ Options []Color
+ Foreground, Background RGB
+
+ hasBg bool
+}
+
+// NewRGBStyle returns a new RGBStyle.
+// The foreground color is required, the background color is optional.
+// The colors will be set as is, ignoring the RGB.Background property.
+func NewRGBStyle(foreground RGB, background ...RGB) RGBStyle {
+ var s RGBStyle
+ s.Foreground = foreground
+ if len(background) > 0 {
+ s.Background = background[0]
+ s.hasBg = true
+ }
+ return s
+}
+
+// AddOptions adds options to the RGBStyle.
+func (p RGBStyle) AddOptions(opts ...Color) RGBStyle {
+ p.Options = append(p.Options, opts...)
+ return p
+}
+
+// Print formats using the default formats for its operands and writes to standard output.
+// Spaces are added between operands when neither is a string.
+// It returns the number of bytes written and any write error encountered.
+func (p RGBStyle) Print(a ...any) *TextPrinter {
+ Print(p.Sprint(a...))
+ tp := TextPrinter(p)
+ return &tp
+}
+
+// Println formats using the default formats for its operands and writes to standard output.
+// Spaces are always added between operands and a newline is appended.
+// It returns the number of bytes written and any write error encountered.
+func (p RGBStyle) Println(a ...any) *TextPrinter {
+ Println(p.Sprint(a...))
+ tp := TextPrinter(p)
+ return &tp
+}
+
+// Printf formats according to a format specifier and writes to standard output.
+// It returns the number of bytes written and any write error encountered.
+func (p RGBStyle) Printf(format string, a ...any) *TextPrinter {
+ Printf(format, p.Sprint(a...))
+ tp := TextPrinter(p)
+ return &tp
+}
+
+// Printfln formats according to a format specifier and writes to standard output.
+// Spaces are always added between operands and a newline is appended.
+// It returns the number of bytes written and any write error encountered.
+func (p RGBStyle) Printfln(format string, a ...any) *TextPrinter {
+ Printf(format, p.Sprint(a...))
+ tp := TextPrinter(p)
+ return &tp
+}
+
+// PrintOnError prints every error which is not nil.
+// If every error is nil, nothing will be printed.
+// This can be used for simple error checking.
+func (p RGBStyle) PrintOnError(a ...any) *TextPrinter {
+ for _, arg := range a {
+ if err, ok := arg.(error); ok {
+ if err != nil {
+ p.Println(err)
+ }
+ }
+ }
+
+ tp := TextPrinter(p)
+ return &tp
+}
+
+// PrintOnErrorf wraps every error which is not nil and prints it.
+// If every error is nil, nothing will be printed.
+// This can be used for simple error checking.
+func (p RGBStyle) PrintOnErrorf(format string, a ...any) *TextPrinter {
+ for _, arg := range a {
+ if err, ok := arg.(error); ok {
+ if err != nil {
+ p.Println(fmt.Errorf(format, err))
+ }
+ }
+ }
+
+ tp := TextPrinter(p)
+ return &tp
+}
+
+// Sprint formats using the default formats for its operands and returns the resulting string.
+// Spaces are added between operands when neither is a string.
+func (p RGBStyle) Sprint(a ...any) string {
+ var rgbStyle *color.RGBStyle
+ if !p.hasBg {
+ rgbStyle = color.NewRGBStyle(color.RGB(p.Foreground.R, p.Foreground.G, p.Foreground.B))
+ } else {
+ rgbStyle = color.NewRGBStyle(color.RGB(p.Foreground.R, p.Foreground.G, p.Foreground.B), color.RGB(p.Background.R, p.Background.G, p.Background.B))
+ }
+ if len(p.Options) > 0 {
+ for _, opt := range p.Options {
+ rgbStyle.AddOpts(color.Color(opt))
+ }
+ }
+ return rgbStyle.Sprint(a...)
+}
+
+// Sprintln formats using the default formats for its operands and returns the resulting string.
+// Spaces are always added between operands and a newline is appended.
+func (p RGBStyle) Sprintln(a ...any) string {
+ return p.Sprint(a...) + "\n"
+}
+
+// Sprintf formats according to a format specifier and returns the resulting string.
+func (p RGBStyle) Sprintf(format string, a ...any) string {
+ return p.Sprint(Sprintf(format, a...))
+}
+
+// Sprintfln formats according to a format specifier and returns the resulting string.
+// Spaces are always added between operands and a newline is appended.
+func (p RGBStyle) Sprintfln(format string, a ...any) string {
+ return p.Sprintf(format, a...) + "\n"
+}
+
+// GetValues returns the RGB values separately.
+func (p RGB) GetValues() (r, g, b uint8) {
+ return p.R, p.G, p.B
+}
+
+// NewRGB returns a new RGB.
+func NewRGB(r, g, b uint8, background ...bool) RGB {
+ var bg bool
+
+ if len(background) > 0 {
+ bg = background[0]
+ }
+
+ return RGB{R: r, G: g, B: b, Background: bg}
+}
+
+// Fade fades one RGB value (over other RGB values) to another RGB value, by giving the function a minimum, maximum and current value.
+func (p RGB) Fade(minRGB, maxRGB, current float32, end ...RGB) RGB {
+ if maxRGB == current {
+ return end[len(end)-1]
+ }
+ if minRGB < 0 {
+ maxRGB -= minRGB
+ current -= minRGB
+ minRGB = 0
+ }
+ // #nosec G115
+ if len(end) == 1 {
+ return RGB{
+ R: uint8(internal.MapRangeToRange(minRGB, maxRGB, float32(p.R), float32(end[0].R), current)), //nolint:gosec
+ G: uint8(internal.MapRangeToRange(minRGB, maxRGB, float32(p.G), float32(end[0].G), current)), //nolint:gosec
+ B: uint8(internal.MapRangeToRange(minRGB, maxRGB, float32(p.B), float32(end[0].B), current)), //nolint:gosec
+ Background: p.Background,
+ }
+ } else if len(end) > 1 {
+ f := (maxRGB - minRGB) / float32(len(end))
+ tempCurrent := current
+ if f > current {
+ return p.Fade(minRGB, f, current, end[0])
+ } else {
+ for i := 0; i < len(end)-1; i++ {
+ tempCurrent -= f
+ if f > tempCurrent {
+ return end[i].Fade(minRGB, minRGB+f, tempCurrent, end[i+1])
+ }
+ }
+ }
+ }
+ return p
+}
+
+// Sprint formats using the default formats for its operands and returns the resulting string.
+// Spaces are added between operands when neither is a string.
+func (p RGB) Sprint(a ...any) string {
+ if p.Background {
+ return color.RGB(p.R, p.G, p.B, p.Background).Sprint(a...) + "\033[0m\033[K"
+ }
+ return color.RGB(p.R, p.G, p.B, p.Background).Sprint(a...)
+}
+
+// Sprintln formats using the default formats for its operands and returns the resulting string.
+// Spaces are always added between operands and a newline is appended.
+func (p RGB) Sprintln(a ...any) string {
+ return p.Sprint(Sprintln(a...))
+}
+
+// Sprintf formats according to a format specifier and returns the resulting string.
+func (p RGB) Sprintf(format string, a ...any) string {
+ return p.Sprint(Sprintf(format, a...))
+}
+
+// Sprintfln formats according to a format specifier and returns the resulting string.
+// Spaces are always added between operands and a newline is appended.
+func (p RGB) Sprintfln(format string, a ...any) string {
+ return p.Sprintf(format, a...) + "\n"
+}
+
+// Print formats using the default formats for its operands and writes to standard output.
+// Spaces are added between operands when neither is a string.
+// It returns the number of bytes written and any write error encountered.
+func (p RGB) Print(a ...any) *TextPrinter {
+ Print(p.Sprint(a...))
+ tp := TextPrinter(p)
+ return &tp
+}
+
+// Println formats using the default formats for its operands and writes to standard output.
+// Spaces are always added between operands and a newline is appended.
+// It returns the number of bytes written and any write error encountered.
+func (p RGB) Println(a ...any) *TextPrinter {
+ Print(p.Sprintln(a...))
+ tp := TextPrinter(p)
+ return &tp
+}
+
+// Printf formats according to a format specifier and writes to standard output.
+// It returns the number of bytes written and any write error encountered.
+func (p RGB) Printf(format string, a ...any) *TextPrinter {
+ Print(p.Sprintf(format, a...))
+ tp := TextPrinter(p)
+ return &tp
+}
+
+// Printfln formats according to a format specifier and writes to standard output.
+// Spaces are always added between operands and a newline is appended.
+// It returns the number of bytes written and any write error encountered.
+func (p RGB) Printfln(format string, a ...any) *TextPrinter {
+ Print(p.Sprintfln(format, a...))
+ tp := TextPrinter(p)
+ return &tp
+}
+
+// PrintOnError prints every error which is not nil.
+// If every error is nil, nothing will be printed.
+// This can be used for simple error checking.
+func (p RGB) PrintOnError(a ...any) *TextPrinter {
+ for _, arg := range a {
+ if err, ok := arg.(error); ok {
+ if err != nil {
+ p.Println(err)
+ }
+ }
+ }
+
+ tp := TextPrinter(p)
+ return &tp
+}
+
+// PrintOnErrorf wraps every error which is not nil and prints it.
+// If every error is nil, nothing will be printed.
+// This can be used for simple error checking.
+func (p RGB) PrintOnErrorf(format string, a ...any) *TextPrinter {
+ for _, arg := range a {
+ if err, ok := arg.(error); ok {
+ if err != nil {
+ p.Println(fmt.Errorf(format, err))
+ }
+ }
+ }
+
+ tp := TextPrinter(p)
+ return &tp
+}
+
+func (p RGB) ToRGBStyle() RGBStyle {
+ if p.Background {
+ return RGBStyle{Background: p}
+ }
+
+ return RGBStyle{Foreground: p}
+}
diff --git a/vendor/github.com/pterm/pterm/section_printer.go b/vendor/github.com/pterm/pterm/section_printer.go
new file mode 100644
index 0000000..9475f42
--- /dev/null
+++ b/vendor/github.com/pterm/pterm/section_printer.go
@@ -0,0 +1,175 @@
+package pterm
+
+import (
+ "fmt"
+ "io"
+ "strings"
+)
+
+// DefaultSection is the default section printer.
+var DefaultSection = SectionPrinter{
+ Style: &ThemeDefault.SectionStyle,
+ Level: 1,
+ TopPadding: 1,
+ BottomPadding: 1,
+ IndentCharacter: "#",
+}
+
+// SectionPrinter prints a new section title.
+// It can be used to structure longer text, or different chapters of your program.
+type SectionPrinter struct {
+ Style *Style
+ Level int
+ IndentCharacter string
+ TopPadding int
+ BottomPadding int
+ Writer io.Writer
+}
+
+// WithStyle returns a new SectionPrinter with a specific style.
+func (p SectionPrinter) WithStyle(style *Style) *SectionPrinter {
+ p.Style = style
+ return &p
+}
+
+// WithLevel returns a new SectionPrinter with a specific level.
+func (p SectionPrinter) WithLevel(level int) *SectionPrinter {
+ p.Level = level
+ return &p
+}
+
+// WithIndentCharacter returns a new SectionPrinter with a specific IndentCharacter.
+func (p SectionPrinter) WithIndentCharacter(char string) *SectionPrinter {
+ p.IndentCharacter = char
+ return &p
+}
+
+// WithTopPadding returns a new SectionPrinter with a specific top padding.
+func (p SectionPrinter) WithTopPadding(padding int) *SectionPrinter {
+ p.TopPadding = padding
+ return &p
+}
+
+// WithBottomPadding returns a new SectionPrinter with a specific top padding.
+func (p SectionPrinter) WithBottomPadding(padding int) *SectionPrinter {
+ p.BottomPadding = padding
+ return &p
+}
+
+// WithWriter sets the custom Writer.
+func (p SectionPrinter) WithWriter(writer io.Writer) *SectionPrinter {
+ p.Writer = writer
+ return &p
+}
+
+// Sprint formats using the default formats for its operands and returns the resulting string.
+// Spaces are added between operands when neither is a string.
+func (p SectionPrinter) Sprint(a ...any) string {
+ if p.Style == nil {
+ p.Style = NewStyle()
+ }
+
+ var ret strings.Builder
+
+ for i := 0; i < p.TopPadding; i++ {
+ ret.WriteByte('\n')
+ }
+
+ if p.Level > 0 {
+ ret.WriteString(strings.Repeat(p.IndentCharacter, p.Level))
+ ret.WriteByte(' ')
+ }
+
+ ret.WriteString(p.Style.Sprint(a...))
+
+ for i := 0; i < p.BottomPadding; i++ {
+ ret.WriteByte('\n')
+ }
+
+ return ret.String()
+}
+
+// Sprintln formats using the default formats for its operands and returns the resulting string.
+// Spaces are always added between operands and a newline is appended.
+func (p SectionPrinter) Sprintln(a ...any) string {
+ str := fmt.Sprintln(a...)
+ return Sprint(p.Sprint(str))
+}
+
+// Sprintf formats according to a format specifier and returns the resulting string.
+func (p SectionPrinter) Sprintf(format string, a ...any) string {
+ return p.Sprint(Sprintf(format, a...))
+}
+
+// Sprintfln formats according to a format specifier and returns the resulting string.
+// Spaces are always added between operands and a newline is appended.
+func (p SectionPrinter) Sprintfln(format string, a ...any) string {
+ return p.Sprintf(format, a...) + "\n"
+}
+
+// Print formats using the default formats for its operands and writes to standard output.
+// Spaces are added between operands when neither is a string.
+// It returns the number of bytes written and any write error encountered.
+func (p *SectionPrinter) Print(a ...any) *TextPrinter {
+ Fprint(p.Writer, p.Sprint(a...))
+ tp := TextPrinter(p)
+ return &tp
+}
+
+// Println formats using the default formats for its operands and writes to standard output.
+// Spaces are always added between operands and a newline is appended.
+// It returns the number of bytes written and any write error encountered.
+func (p *SectionPrinter) Println(a ...any) *TextPrinter {
+ Fprint(p.Writer, p.Sprintln(a...))
+ tp := TextPrinter(p)
+ return &tp
+}
+
+// Printf formats according to a format specifier and writes to standard output.
+// It returns the number of bytes written and any write error encountered.
+func (p *SectionPrinter) Printf(format string, a ...any) *TextPrinter {
+ Fprint(p.Writer, p.Sprintf(format, a...))
+ tp := TextPrinter(p)
+ return &tp
+}
+
+// Printfln formats according to a format specifier and writes to standard output.
+// Spaces are always added between operands and a newline is appended.
+// It returns the number of bytes written and any write error encountered.
+func (p *SectionPrinter) Printfln(format string, a ...any) *TextPrinter {
+ Fprint(p.Writer, p.Sprintfln(format, a...))
+ tp := TextPrinter(p)
+ return &tp
+}
+
+// PrintOnError prints every error which is not nil.
+// If every error is nil, nothing will be printed.
+// This can be used for simple error checking.
+func (p *SectionPrinter) PrintOnError(a ...any) *TextPrinter {
+ for _, arg := range a {
+ if err, ok := arg.(error); ok {
+ if err != nil {
+ p.Println(err)
+ }
+ }
+ }
+
+ tp := TextPrinter(p)
+ return &tp
+}
+
+// PrintOnErrorf wraps every error which is not nil and prints it.
+// If every error is nil, nothing will be printed.
+// This can be used for simple error checking.
+func (p *SectionPrinter) PrintOnErrorf(format string, a ...any) *TextPrinter {
+ for _, arg := range a {
+ if err, ok := arg.(error); ok {
+ if err != nil {
+ p.Println(fmt.Errorf(format, err))
+ }
+ }
+ }
+
+ tp := TextPrinter(p)
+ return &tp
+}
diff --git a/vendor/github.com/pterm/pterm/slog_handler.go b/vendor/github.com/pterm/pterm/slog_handler.go
new file mode 100644
index 0000000..d607afd
--- /dev/null
+++ b/vendor/github.com/pterm/pterm/slog_handler.go
@@ -0,0 +1,90 @@
+package pterm
+
+import (
+ "context"
+
+ "log/slog"
+)
+
+type SlogHandler struct {
+ logger *Logger
+ attrs []slog.Attr
+}
+
+// Enabled returns true if the given level is enabled.
+func (s *SlogHandler) Enabled(ctx context.Context, level slog.Level) bool {
+ switch level {
+ case slog.LevelDebug:
+ return s.logger.CanPrint(LogLevelDebug)
+ case slog.LevelInfo:
+ return s.logger.CanPrint(LogLevelInfo)
+ case slog.LevelWarn:
+ return s.logger.CanPrint(LogLevelWarn)
+ case slog.LevelError:
+ return s.logger.CanPrint(LogLevelError)
+ }
+ return false
+}
+
+// Handle handles the given record.
+func (s *SlogHandler) Handle(ctx context.Context, record slog.Record) error {
+ level := record.Level
+ message := record.Message
+
+ // Convert slog Attrs to a map.
+ keyValsMap := make(map[string]any)
+
+ record.Attrs(func(attr slog.Attr) bool {
+ keyValsMap[attr.Key] = attr.Value
+ return true
+ })
+
+ for _, attr := range s.attrs {
+ keyValsMap[attr.Key] = attr.Value
+ }
+
+ args := s.logger.ArgsFromMap(keyValsMap)
+
+ // Wrapping args inside another slice to match [][]LoggerArgument
+ argsWrapped := [][]LoggerArgument{args}
+
+ logger := s.logger
+
+ // Must be done here, see https://github.com/pterm/pterm/issues/608#issuecomment-1876001650
+ if logger.CallerOffset == 0 {
+ logger = logger.WithCallerOffset(3)
+ }
+
+ switch level {
+ case slog.LevelDebug:
+ logger.Debug(message, argsWrapped...)
+ case slog.LevelInfo:
+ logger.Info(message, argsWrapped...)
+ case slog.LevelWarn:
+ logger.Warn(message, argsWrapped...)
+ case slog.LevelError:
+ logger.Error(message, argsWrapped...)
+ default:
+ logger.Print(message, argsWrapped...)
+ }
+
+ return nil
+}
+
+// WithAttrs returns a new handler with the given attributes.
+func (s *SlogHandler) WithAttrs(attrs []slog.Attr) slog.Handler {
+ newS := *s
+ newS.attrs = attrs
+ return &newS
+}
+
+// WithGroup is not yet supported.
+func (s *SlogHandler) WithGroup(name string) slog.Handler {
+ // Grouping is not yet supported by pterm.
+ return s
+}
+
+// NewSlogHandler returns a new logging handler that can be intrgrated with log/slog.
+func NewSlogHandler(logger *Logger) *SlogHandler {
+ return &SlogHandler{logger: logger}
+}
diff --git a/vendor/github.com/pterm/pterm/spinner_printer.go b/vendor/github.com/pterm/pterm/spinner_printer.go
new file mode 100644
index 0000000..443d45d
--- /dev/null
+++ b/vendor/github.com/pterm/pterm/spinner_printer.go
@@ -0,0 +1,277 @@
+package pterm
+
+import (
+ "io"
+ "os"
+ "time"
+
+ "github.com/pterm/pterm/internal"
+)
+
+var activeSpinnerPrinters []*SpinnerPrinter
+
+// DefaultSpinner is the default SpinnerPrinter.
+var DefaultSpinner = SpinnerPrinter{
+ Sequence: []string{"▀ ", " ▀", " ▄", "▄ "},
+ Style: &ThemeDefault.SpinnerStyle,
+ Delay: time.Millisecond * 200,
+ ShowTimer: true,
+ TimerRoundingFactor: time.Second,
+ TimerStyle: &ThemeDefault.TimerStyle,
+ MessageStyle: &ThemeDefault.SpinnerTextStyle,
+ InfoPrinter: &Info,
+ SuccessPrinter: &Success,
+ FailPrinter: &Error,
+ WarningPrinter: &Warning,
+ Writer: os.Stderr,
+}
+
+// SpinnerPrinter is a loading animation, which can be used if the progress is unknown.
+// It's an animation loop, which can have a text and supports throwing errors or warnings.
+// A TextPrinter is used to display all outputs, after the SpinnerPrinter is done.
+type SpinnerPrinter struct {
+ Text string
+ Sequence []string
+ Style *Style
+ Delay time.Duration
+ MessageStyle *Style
+ InfoPrinter TextPrinter
+ SuccessPrinter TextPrinter
+ FailPrinter TextPrinter
+ WarningPrinter TextPrinter
+ RemoveWhenDone bool
+ ShowTimer bool
+ TimerRoundingFactor time.Duration
+ TimerStyle *Style
+
+ IsActive bool
+
+ startedAt time.Time
+ currentSequence string
+
+ Writer io.Writer
+}
+
+// WithText adds a text to the SpinnerPrinter.
+func (s SpinnerPrinter) WithText(text string) *SpinnerPrinter {
+ s.Text = text
+ return &s
+}
+
+// WithSequence adds a sequence to the SpinnerPrinter.
+func (s SpinnerPrinter) WithSequence(sequence ...string) *SpinnerPrinter {
+ s.Sequence = sequence
+ return &s
+}
+
+// WithStyle adds a style to the SpinnerPrinter.
+func (s SpinnerPrinter) WithStyle(style *Style) *SpinnerPrinter {
+ s.Style = style
+ return &s
+}
+
+// WithDelay adds a delay to the SpinnerPrinter.
+func (s SpinnerPrinter) WithDelay(delay time.Duration) *SpinnerPrinter {
+ s.Delay = delay
+ return &s
+}
+
+// WithMessageStyle adds a style to the SpinnerPrinter message.
+func (s SpinnerPrinter) WithMessageStyle(style *Style) *SpinnerPrinter {
+ s.MessageStyle = style
+ return &s
+}
+
+// WithRemoveWhenDone removes the SpinnerPrinter after it is done.
+func (s SpinnerPrinter) WithRemoveWhenDone(b ...bool) *SpinnerPrinter {
+ s.RemoveWhenDone = internal.WithBoolean(b)
+ return &s
+}
+
+// WithShowTimer shows how long the spinner is running.
+func (s SpinnerPrinter) WithShowTimer(b ...bool) *SpinnerPrinter {
+ s.ShowTimer = internal.WithBoolean(b)
+ return &s
+}
+
+// WithStartedAt sets the time when the SpinnerPrinter started.
+func (s SpinnerPrinter) WithStartedAt(t time.Time) *SpinnerPrinter {
+ s.startedAt = t
+ return &s
+}
+
+// WithTimerRoundingFactor sets the rounding factor for the timer.
+func (s SpinnerPrinter) WithTimerRoundingFactor(factor time.Duration) *SpinnerPrinter {
+ s.TimerRoundingFactor = factor
+ return &s
+}
+
+// WithTimerStyle adds a style to the SpinnerPrinter timer.
+func (s SpinnerPrinter) WithTimerStyle(style *Style) *SpinnerPrinter {
+ s.TimerStyle = style
+ return &s
+}
+
+// WithWriter sets the custom Writer.
+func (p SpinnerPrinter) WithWriter(writer io.Writer) *SpinnerPrinter {
+ p.Writer = writer
+ return &p
+}
+
+// SetWriter sets the custom Writer.
+func (p *SpinnerPrinter) SetWriter(writer io.Writer) {
+ p.Writer = writer
+}
+
+// ResetTimer resets the timer of the SpinnerPrinter.
+func (s *SpinnerPrinter) ResetTimer() {
+ s.startedAt = time.Now()
+}
+
+// SetStartedAt sets the time when the SpinnerPrinter started.
+func (s *SpinnerPrinter) SetStartedAt(t time.Time) {
+ s.startedAt = t
+}
+
+// UpdateText updates the message of the active SpinnerPrinter.
+// Can be used live.
+func (s *SpinnerPrinter) UpdateText(text string) {
+ s.Text = text
+ if !RawOutput {
+ Fprinto(s.Writer, s.Style.Sprint(s.currentSequence)+" "+s.MessageStyle.Sprint(s.Text))
+ } else {
+ Fprintln(s.Writer, s.Text)
+ }
+}
+
+// Start the SpinnerPrinter.
+func (s SpinnerPrinter) Start(text ...any) (*SpinnerPrinter, error) {
+ s.IsActive = true
+ s.startedAt = time.Now()
+ activeSpinnerPrinters = append(activeSpinnerPrinters, &s)
+
+ if len(text) != 0 {
+ s.Text = Sprint(text...)
+ }
+
+ if RawOutput {
+ Fprintln(s.Writer, s.Text)
+ }
+
+ go func() {
+ for s.IsActive {
+ for _, seq := range s.Sequence {
+ if !s.IsActive {
+ continue
+ }
+ if RawOutput {
+ time.Sleep(s.Delay)
+ continue
+ }
+
+ var timer string
+ if s.ShowTimer {
+ timer = " (" + time.Since(s.startedAt).Round(s.TimerRoundingFactor).String() + ")"
+ }
+ Fprinto(s.Writer, s.Style.Sprint(seq)+" "+s.MessageStyle.Sprint(s.Text)+s.TimerStyle.Sprint(timer))
+ s.currentSequence = seq
+ time.Sleep(s.Delay)
+ }
+ }
+ }()
+ return &s, nil
+}
+
+// Stop terminates the SpinnerPrinter immediately.
+// The SpinnerPrinter will not resolve into anything.
+func (s *SpinnerPrinter) Stop() error {
+ if !s.IsActive {
+ return nil
+ }
+ s.IsActive = false
+ if s.RemoveWhenDone {
+ fClearLine(s.Writer)
+ Fprinto(s.Writer)
+ } else {
+ Fprintln(s.Writer)
+ }
+ return nil
+}
+
+// GenericStart runs Start, but returns a LivePrinter.
+// This is used for the interface LivePrinter.
+// You most likely want to use Start instead of this in your program.
+func (s *SpinnerPrinter) GenericStart() (*LivePrinter, error) {
+ p2, _ := s.Start()
+ lp := LivePrinter(p2)
+ return &lp, nil
+}
+
+// GenericStop runs Stop, but returns a LivePrinter.
+// This is used for the interface LivePrinter.
+// You most likely want to use Stop instead of this in your program.
+func (s *SpinnerPrinter) GenericStop() (*LivePrinter, error) {
+ _ = s.Stop()
+ lp := LivePrinter(s)
+ return &lp, nil
+}
+
+// Info displays an info message
+// If no message is given, the text of the SpinnerPrinter will be reused as the default message.
+func (s *SpinnerPrinter) Info(message ...any) {
+ if s.InfoPrinter == nil {
+ s.InfoPrinter = &Info
+ }
+
+ if len(message) == 0 {
+ message = []any{s.Text}
+ }
+ fClearLine(s.Writer)
+ Fprinto(s.Writer, s.InfoPrinter.Sprint(message...))
+ _ = s.Stop()
+}
+
+// Success displays the success printer.
+// If no message is given, the text of the SpinnerPrinter will be reused as the default message.
+func (s *SpinnerPrinter) Success(message ...any) {
+ if s.SuccessPrinter == nil {
+ s.SuccessPrinter = &Success
+ }
+
+ if len(message) == 0 {
+ message = []any{s.Text}
+ }
+ fClearLine(s.Writer)
+ Fprinto(s.Writer, s.SuccessPrinter.Sprint(message...))
+ _ = s.Stop()
+}
+
+// Fail displays the fail printer.
+// If no message is given, the text of the SpinnerPrinter will be reused as the default message.
+func (s *SpinnerPrinter) Fail(message ...any) {
+ if s.FailPrinter == nil {
+ s.FailPrinter = &Error
+ }
+
+ if len(message) == 0 {
+ message = []any{s.Text}
+ }
+ fClearLine(s.Writer)
+ Fprinto(s.Writer, s.FailPrinter.Sprint(message...))
+ _ = s.Stop()
+}
+
+// Warning displays the warning printer.
+// If no message is given, the text of the SpinnerPrinter will be reused as the default message.
+func (s *SpinnerPrinter) Warning(message ...any) {
+ if s.WarningPrinter == nil {
+ s.WarningPrinter = &Warning
+ }
+
+ if len(message) == 0 {
+ message = []any{s.Text}
+ }
+ fClearLine(s.Writer)
+ Fprinto(s.Writer, s.WarningPrinter.Sprint(message...))
+ _ = s.Stop()
+}
diff --git a/vendor/github.com/pterm/pterm/table_printer.go b/vendor/github.com/pterm/pterm/table_printer.go
new file mode 100644
index 0000000..eb4a5b7
--- /dev/null
+++ b/vendor/github.com/pterm/pterm/table_printer.go
@@ -0,0 +1,306 @@
+package pterm
+
+import (
+ "encoding/csv"
+ "io"
+ "strings"
+
+ "github.com/pterm/pterm/internal"
+)
+
+// DefaultTable contains standards, which can be used to print a TablePrinter.
+var DefaultTable = TablePrinter{
+ Style: &ThemeDefault.TableStyle,
+ HeaderStyle: &ThemeDefault.TableHeaderStyle,
+ HeaderRowSeparator: "",
+ HeaderRowSeparatorStyle: &ThemeDefault.TableSeparatorStyle,
+ Separator: " | ",
+ SeparatorStyle: &ThemeDefault.TableSeparatorStyle,
+ RowSeparator: "",
+ RowSeparatorStyle: &ThemeDefault.TableSeparatorStyle,
+ LeftAlignment: true,
+ RightAlignment: false,
+}
+
+// TableData is the type that contains the data of a TablePrinter.
+type TableData [][]string
+
+// TablePrinter is able to render tables.
+type TablePrinter struct {
+ Style *Style
+ HasHeader bool
+ HeaderStyle *Style
+ HeaderRowSeparator string
+ HeaderRowSeparatorStyle *Style
+ Separator string
+ SeparatorStyle *Style
+ RowSeparator string
+ RowSeparatorStyle *Style
+ Data TableData
+ Boxed bool
+ LeftAlignment bool
+ RightAlignment bool
+ Writer io.Writer
+ AlternateRowStyle *Style
+}
+
+// WithStyle returns a new TablePrinter with a specific Style.
+func (p TablePrinter) WithStyle(style *Style) *TablePrinter {
+ p.Style = style
+ return &p
+}
+
+// WithHasHeader returns a new TablePrinter, where the first line is marked as a header.
+func (p TablePrinter) WithHasHeader(b ...bool) *TablePrinter {
+ p.HasHeader = internal.WithBoolean(b)
+ return &p
+}
+
+// WithHeaderStyle returns a new TablePrinter with a specific HeaderStyle.
+func (p TablePrinter) WithHeaderStyle(style *Style) *TablePrinter {
+ p.HeaderStyle = style
+ return &p
+}
+
+// WithHeaderRowSeparator returns a new TablePrinter with a specific header HeaderRowSeparator.
+func (p TablePrinter) WithHeaderRowSeparator(separator string) *TablePrinter {
+ p.HeaderRowSeparator = separator
+ return &p
+}
+
+// WithHeaderRowSeparatorStyle returns a new TablePrinter with a specific header HeaderRowSeparatorStyle.
+func (p TablePrinter) WithHeaderRowSeparatorStyle(style *Style) *TablePrinter {
+ p.HeaderRowSeparatorStyle = style
+ return &p
+}
+
+// WithSeparator returns a new TablePrinter with a specific separator.
+func (p TablePrinter) WithSeparator(separator string) *TablePrinter {
+ p.Separator = separator
+ return &p
+}
+
+// WithSeparatorStyle returns a new TablePrinter with a specific SeparatorStyle.
+func (p TablePrinter) WithSeparatorStyle(style *Style) *TablePrinter {
+ p.SeparatorStyle = style
+ return &p
+}
+
+// WithRowSeparator returns a new TablePrinter with a specific RowSeparator.
+func (p TablePrinter) WithRowSeparator(separator string) *TablePrinter {
+ p.RowSeparator = separator
+ return &p
+}
+
+// WithRowSeparatorStyle returns a new TablePrinter with a specific RowSeparatorStyle.
+func (p TablePrinter) WithRowSeparatorStyle(style *Style) *TablePrinter {
+ p.RowSeparatorStyle = style
+ return &p
+}
+
+// WithData returns a new TablePrinter with specific Data.
+func (p TablePrinter) WithData(data [][]string) *TablePrinter {
+ p.Data = data
+ return &p
+}
+
+// WithCSVReader returns a new TablePrinter with specified Data extracted from CSV.
+func (p TablePrinter) WithCSVReader(reader *csv.Reader) *TablePrinter {
+ if records, err := reader.ReadAll(); err == nil {
+ p.Data = records
+ }
+ return &p
+}
+
+// WithBoxed returns a new TablePrinter with a box around the table.
+func (p TablePrinter) WithBoxed(b ...bool) *TablePrinter {
+ p.Boxed = internal.WithBoolean(b)
+ return &p
+}
+
+// WithLeftAlignment returns a new TablePrinter with left alignment.
+func (p TablePrinter) WithLeftAlignment(b ...bool) *TablePrinter {
+ b2 := internal.WithBoolean(b)
+ p.LeftAlignment = b2
+ p.RightAlignment = false
+ return &p
+}
+
+// WithRightAlignment returns a new TablePrinter with right alignment.
+func (p TablePrinter) WithRightAlignment(b ...bool) *TablePrinter {
+ b2 := internal.WithBoolean(b)
+ p.LeftAlignment = false
+ p.RightAlignment = b2
+ return &p
+}
+
+// WithWriter sets the Writer.
+func (p TablePrinter) WithWriter(writer io.Writer) *TablePrinter {
+ p.Writer = writer
+ return &p
+}
+
+// WithAlternateRowStyle returns a new TablePrinter with a specific AlternateRowStyle.
+func (p TablePrinter) WithAlternateRowStyle(style *Style) *TablePrinter {
+ p.AlternateRowStyle = style
+ return &p
+}
+
+type table struct {
+ rows []row
+ maxColumnWidths []int
+}
+
+type row struct {
+ height int
+ cells []cell
+}
+
+type cell struct {
+ width int
+ height int
+ lines []string
+}
+
+// Srender renders the TablePrinter as a string.
+func (p TablePrinter) Srender() (string, error) {
+ if p.Style == nil {
+ p.Style = NewStyle()
+ }
+ if p.SeparatorStyle == nil {
+ p.SeparatorStyle = NewStyle()
+ }
+ if p.HeaderStyle == nil {
+ p.HeaderStyle = NewStyle()
+ }
+ if p.HeaderRowSeparatorStyle == nil {
+ p.HeaderRowSeparatorStyle = NewStyle()
+ }
+ if p.RowSeparatorStyle == nil {
+ p.RowSeparatorStyle = NewStyle()
+ }
+
+ var t table
+
+ // convert data to table and calculate values
+ for _, rRaw := range p.Data {
+ var r row
+ for _, cRaw := range rRaw {
+ var c cell
+ c.lines = strings.Split(cRaw, "\n")
+ c.height = len(c.lines)
+ for _, l := range c.lines {
+ if maxWidth := internal.GetStringMaxWidth(l); maxWidth > c.width {
+ c.width = maxWidth
+ }
+ }
+ r.cells = append(r.cells, c)
+ if c.height > r.height {
+ r.height = c.height
+ }
+ }
+
+ // set max column widths of table
+ for i, c := range r.cells {
+ if len(t.maxColumnWidths) <= i {
+ t.maxColumnWidths = append(t.maxColumnWidths, c.width)
+ } else if c.width > t.maxColumnWidths[i] {
+ t.maxColumnWidths[i] = c.width
+ }
+ }
+
+ t.rows = append(t.rows, r)
+ }
+
+ var maxRowWidth int
+ for _, r := range t.rows {
+ rowWidth := internal.GetStringMaxWidth(p.renderRow(t, r))
+ if rowWidth > maxRowWidth {
+ maxRowWidth = rowWidth
+ }
+ }
+
+ // render table
+ var ret strings.Builder
+
+ for i, r := range t.rows {
+ if i == 0 && p.HasHeader {
+ ret.WriteString(p.HeaderStyle.Sprint(p.renderRow(t, r)))
+
+ if p.HeaderRowSeparator != "" {
+ ret.WriteString(strings.Repeat(p.HeaderRowSeparatorStyle.Sprint(p.HeaderRowSeparator), maxRowWidth))
+ ret.WriteByte('\n')
+ }
+ continue
+ }
+
+ // Apply AlternateRowStyle if needed
+ if i%2 == 1 && p.AlternateRowStyle != nil {
+ ret.WriteString(p.AlternateRowStyle.Sprint(p.renderRow(t, r)))
+ } else {
+ ret.WriteString(p.renderRow(t, r))
+ }
+
+ if p.RowSeparator != "" && i < len(t.rows)-1 {
+ ret.WriteString(strings.Repeat(p.RowSeparatorStyle.Sprint(p.RowSeparator), maxRowWidth) + "\n")
+ }
+ }
+
+ if p.Boxed {
+ return DefaultBox.Sprint(strings.TrimSuffix(ret.String(), "\n")), nil
+ }
+
+ return ret.String(), nil
+}
+
+// renderRow renders a row.
+// It merges the cells of a row into one string.
+// Each line of each cell is merged with the same line of the other cells.
+func (p TablePrinter) renderRow(t table, r row) string {
+ var s string
+
+ // Merge lines of cells and add separator
+ // Use t.maxColumnWidths to add padding to corresponding cell
+ // A newline in a cell should be in the same column as original cell
+ for i := 0; i < r.height; i++ {
+ for j, c := range r.cells {
+ var currentLine string
+ if i < len(c.lines) {
+ currentLine = c.lines[i]
+ }
+ paddingForLine := t.maxColumnWidths[j] - internal.GetStringMaxWidth(currentLine)
+
+ // Add right alignment if necessary
+ if p.RightAlignment {
+ s += strings.Repeat(" ", paddingForLine)
+ }
+
+ // Add line content
+ if i < len(c.lines) {
+ s += c.lines[i]
+ }
+
+ // Add padding for left alignment, except for last column
+ if j < len(r.cells)-1 {
+ if p.LeftAlignment {
+ s += strings.Repeat(" ", paddingForLine)
+ }
+ s += p.SeparatorStyle.Sprint(p.Separator)
+ } else if p.LeftAlignment {
+ // Add padding after last column
+ s += strings.Repeat(" ", paddingForLine)
+ }
+ }
+ s += "\n"
+ }
+
+ return s
+}
+
+// Render prints the TablePrinter to the terminal.
+func (p TablePrinter) Render() error {
+ s, _ := p.Srender()
+ Fprintln(p.Writer, s)
+
+ return nil
+}
diff --git a/vendor/github.com/pterm/pterm/terminal.go b/vendor/github.com/pterm/pterm/terminal.go
new file mode 100644
index 0000000..24d95e3
--- /dev/null
+++ b/vendor/github.com/pterm/pterm/terminal.go
@@ -0,0 +1,64 @@
+package pterm
+
+import (
+ "os"
+
+ "golang.org/x/term"
+)
+
+// FallbackTerminalWidth is the value used for GetTerminalWidth, if the actual width can not be detected
+// You can override that value if necessary.
+var FallbackTerminalWidth = 80
+
+// FallbackTerminalHeight is the value used for GetTerminalHeight, if the actual height can not be detected
+// You can override that value if necessary.
+var FallbackTerminalHeight = 10
+
+// forcedTerminalWidth, when set along with forcedTerminalHeight, forces the terminal width value.
+var forcedTerminalWidth int = 0
+
+// forcedTerminalHeight, when set along with forcedTerminalWidth, forces the terminal height value.
+var forcedTerminalHeight int = 0
+
+// GetTerminalWidth returns the terminal width of the active terminal.
+func GetTerminalWidth() int {
+ if forcedTerminalWidth > 0 {
+ return forcedTerminalWidth
+ }
+ width, _, _ := GetTerminalSize()
+ return width
+}
+
+// GetTerminalHeight returns the terminal height of the active terminal.
+func GetTerminalHeight() int {
+ if forcedTerminalHeight > 0 {
+ return forcedTerminalHeight
+ }
+ _, height, _ := GetTerminalSize()
+ return height
+}
+
+// GetTerminalSize returns the width and the height of the active terminal.
+func GetTerminalSize() (width, height int, err error) {
+ if forcedTerminalWidth > 0 && forcedTerminalHeight > 0 {
+ return forcedTerminalWidth, forcedTerminalHeight, nil
+ }
+ w, h, err := term.GetSize(int(os.Stdout.Fd()))
+ if w <= 0 {
+ w = FallbackTerminalWidth
+ }
+ if h <= 0 {
+ h = FallbackTerminalHeight
+ }
+ if err != nil {
+ err = ErrTerminalSizeNotDetectable
+ }
+ return w, h, err
+}
+
+// setForcedTerminalSize turns off terminal size autodetection. Usuful for unified tests.
+func SetForcedTerminalSize(width int, height int) {
+ forcedTerminalWidth = width
+ forcedTerminalHeight = height
+ RecalculateTerminalSize()
+}
diff --git a/vendor/github.com/pterm/pterm/theme.go b/vendor/github.com/pterm/pterm/theme.go
new file mode 100644
index 0000000..91594c8
--- /dev/null
+++ b/vendor/github.com/pterm/pterm/theme.go
@@ -0,0 +1,259 @@
+package pterm
+
+var (
+ // ThemeDefault is the default theme used by PTerm.
+ // If this variable is overwritten, the new value is used as default theme.
+ ThemeDefault = Theme{
+ DefaultText: Style{FgDefault, BgDefault},
+ PrimaryStyle: Style{FgLightCyan},
+ SecondaryStyle: Style{FgLightMagenta},
+ HighlightStyle: Style{Bold, FgYellow},
+ InfoMessageStyle: Style{FgLightCyan},
+ InfoPrefixStyle: Style{FgBlack, BgCyan},
+ SuccessMessageStyle: Style{FgGreen},
+ SuccessPrefixStyle: Style{FgBlack, BgGreen},
+ WarningMessageStyle: Style{FgYellow},
+ WarningPrefixStyle: Style{FgBlack, BgYellow},
+ ErrorMessageStyle: Style{FgLightRed},
+ ErrorPrefixStyle: Style{FgBlack, BgLightRed},
+ FatalMessageStyle: Style{FgLightRed},
+ FatalPrefixStyle: Style{FgBlack, BgLightRed},
+ DescriptionMessageStyle: Style{FgDefault},
+ DescriptionPrefixStyle: Style{FgLightWhite, BgDarkGray},
+ ScopeStyle: Style{FgGray},
+ ProgressbarBarStyle: Style{FgCyan},
+ ProgressbarTitleStyle: Style{FgLightCyan},
+ HeaderTextStyle: Style{FgLightWhite, Bold},
+ HeaderBackgroundStyle: Style{BgGray},
+ SpinnerStyle: Style{FgLightCyan},
+ SpinnerTextStyle: Style{FgLightWhite},
+ TableStyle: Style{FgDefault},
+ TableHeaderStyle: Style{FgLightCyan},
+ TableSeparatorStyle: Style{FgGray},
+ HeatmapStyle: Style{FgDefault},
+ HeatmapHeaderStyle: Style{FgLightCyan},
+ HeatmapSeparatorStyle: Style{FgDefault},
+ SectionStyle: Style{Bold, FgYellow},
+ BulletListTextStyle: Style{FgDefault},
+ BulletListBulletStyle: Style{FgGray},
+ TreeStyle: Style{FgGray},
+ TreeTextStyle: Style{FgDefault},
+ LetterStyle: Style{FgDefault},
+ DebugMessageStyle: Style{FgGray},
+ DebugPrefixStyle: Style{FgBlack, BgGray},
+ BoxStyle: Style{FgDefault},
+ BoxTextStyle: Style{FgDefault},
+ BarLabelStyle: Style{FgLightCyan},
+ BarStyle: Style{FgCyan},
+ TimerStyle: Style{FgGray},
+ Checkmark: Checkmark{
+ Checked: Green("✓"),
+ Unchecked: Red("✗"),
+ },
+ }
+)
+
+// Theme for PTerm.
+// Theme contains every Style used in PTerm. You can create own themes for your application or use one
+// of the existing themes.
+type Theme struct {
+ DefaultText Style
+ PrimaryStyle Style
+ SecondaryStyle Style
+ HighlightStyle Style
+ InfoMessageStyle Style
+ InfoPrefixStyle Style
+ SuccessMessageStyle Style
+ SuccessPrefixStyle Style
+ WarningMessageStyle Style
+ WarningPrefixStyle Style
+ ErrorMessageStyle Style
+ ErrorPrefixStyle Style
+ FatalMessageStyle Style
+ FatalPrefixStyle Style
+ DescriptionMessageStyle Style
+ DescriptionPrefixStyle Style
+ ScopeStyle Style
+ ProgressbarBarStyle Style
+ ProgressbarTitleStyle Style
+ HeaderTextStyle Style
+ HeaderBackgroundStyle Style
+ SpinnerStyle Style
+ SpinnerTextStyle Style
+ TimerStyle Style
+ TableStyle Style
+ TableHeaderStyle Style
+ TableSeparatorStyle Style
+ HeatmapStyle Style
+ HeatmapHeaderStyle Style
+ HeatmapSeparatorStyle Style
+ SectionStyle Style
+ BulletListTextStyle Style
+ BulletListBulletStyle Style
+ TreeStyle Style
+ TreeTextStyle Style
+ LetterStyle Style
+ DebugMessageStyle Style
+ DebugPrefixStyle Style
+ BoxStyle Style
+ BoxTextStyle Style
+ BarLabelStyle Style
+ BarStyle Style
+ Checkmark Checkmark
+}
+
+// WithPrimaryStyle returns a new theme with overridden value.
+func (t Theme) WithPrimaryStyle(style Style) Theme {
+ t.PrimaryStyle = style
+ return t
+}
+
+// WithSecondaryStyle returns a new theme with overridden value.
+func (t Theme) WithSecondaryStyle(style Style) Theme {
+ t.SecondaryStyle = style
+ return t
+}
+
+// WithHighlightStyle returns a new theme with overridden value.
+func (t Theme) WithHighlightStyle(style Style) Theme {
+ t.HighlightStyle = style
+ return t
+}
+
+// WithInfoMessageStyle returns a new theme with overridden value.
+func (t Theme) WithInfoMessageStyle(style Style) Theme {
+ t.InfoMessageStyle = style
+ return t
+}
+
+// WithInfoPrefixStyle returns a new theme with overridden value.
+func (t Theme) WithInfoPrefixStyle(style Style) Theme {
+ t.InfoPrefixStyle = style
+ return t
+}
+
+// WithSuccessMessageStyle returns a new theme with overridden value.
+func (t Theme) WithSuccessMessageStyle(style Style) Theme {
+ t.SuccessMessageStyle = style
+ return t
+}
+
+// WithSuccessPrefixStyle returns a new theme with overridden value.
+func (t Theme) WithSuccessPrefixStyle(style Style) Theme {
+ t.SuccessPrefixStyle = style
+ return t
+}
+
+// WithWarningMessageStyle returns a new theme with overridden value.
+func (t Theme) WithWarningMessageStyle(style Style) Theme {
+ t.WarningMessageStyle = style
+ return t
+}
+
+// WithWarningPrefixStyle returns a new theme with overridden value.
+func (t Theme) WithWarningPrefixStyle(style Style) Theme {
+ t.WarningPrefixStyle = style
+ return t
+}
+
+// WithErrorMessageStyle returns a new theme with overridden value.
+func (t Theme) WithErrorMessageStyle(style Style) Theme {
+ t.ErrorMessageStyle = style
+ return t
+}
+
+// WithErrorPrefixStyle returns a new theme with overridden value.
+func (t Theme) WithErrorPrefixStyle(style Style) Theme {
+ t.ErrorPrefixStyle = style
+ return t
+}
+
+// WithFatalMessageStyle returns a new theme with overridden value.
+func (t Theme) WithFatalMessageStyle(style Style) Theme {
+ t.FatalMessageStyle = style
+ return t
+}
+
+// WithFatalPrefixStyle returns a new theme with overridden value.
+func (t Theme) WithFatalPrefixStyle(style Style) Theme {
+ t.FatalPrefixStyle = style
+ return t
+}
+
+// WithDescriptionMessageStyle returns a new theme with overridden value.
+func (t Theme) WithDescriptionMessageStyle(style Style) Theme {
+ t.DescriptionMessageStyle = style
+ return t
+}
+
+// WithDescriptionPrefixStyle returns a new theme with overridden value.
+func (t Theme) WithDescriptionPrefixStyle(style Style) Theme {
+ t.DescriptionPrefixStyle = style
+ return t
+}
+
+// WithBulletListTextStyle returns a new theme with overridden value.
+func (t Theme) WithBulletListTextStyle(style Style) Theme {
+ t.BulletListTextStyle = style
+ return t
+}
+
+// WithBulletListBulletStyle returns a new theme with overridden value.
+func (t Theme) WithBulletListBulletStyle(style Style) Theme {
+ t.BulletListBulletStyle = style
+ return t
+}
+
+// WithLetterStyle returns a new theme with overridden value.
+func (t Theme) WithLetterStyle(style Style) Theme {
+ t.LetterStyle = style
+ return t
+}
+
+// WithDebugMessageStyle returns a new theme with overridden value.
+func (t Theme) WithDebugMessageStyle(style Style) Theme {
+ t.DebugMessageStyle = style
+ return t
+}
+
+// WithDebugPrefixStyle returns a new theme with overridden value.
+func (t Theme) WithDebugPrefixStyle(style Style) Theme {
+ t.DebugPrefixStyle = style
+ return t
+}
+
+// WithTreeStyle returns a new theme with overridden value.
+func (t Theme) WithTreeStyle(style Style) Theme {
+ t.TreeStyle = style
+ return t
+}
+
+// WithTreeTextStyle returns a new theme with overridden value.
+func (t Theme) WithTreeTextStyle(style Style) Theme {
+ t.TreeTextStyle = style
+ return t
+}
+
+// WithBoxStyle returns a new theme with overridden value.
+func (t Theme) WithBoxStyle(style Style) Theme {
+ t.BoxStyle = style
+ return t
+}
+
+// WithBoxTextStyle returns a new theme with overridden value.
+func (t Theme) WithBoxTextStyle(style Style) Theme {
+ t.BoxTextStyle = style
+ return t
+}
+
+// WithBarLabelStyle returns a new theme with overridden value.
+func (t Theme) WithBarLabelStyle(style Style) Theme {
+ t.BarLabelStyle = style
+ return t
+}
+
+// WithBarStyle returns a new theme with overridden value.
+func (t Theme) WithBarStyle(style Style) Theme {
+ t.BarStyle = style
+ return t
+}
diff --git a/vendor/github.com/pterm/pterm/tree_printer.go b/vendor/github.com/pterm/pterm/tree_printer.go
new file mode 100644
index 0000000..3cbeb30
--- /dev/null
+++ b/vendor/github.com/pterm/pterm/tree_printer.go
@@ -0,0 +1,161 @@
+package pterm
+
+import (
+ "io"
+ "strings"
+)
+
+// TreeNode is used as items in a TreePrinter.
+type TreeNode struct {
+ Children []TreeNode
+ Text string
+}
+
+// LeveledList is a list, which contains multiple LeveledListItem.
+type LeveledList []LeveledListItem
+
+// LeveledListItem combines a text with a specific level.
+// The level is the indent, which would normally be seen in a BulletListPrinter.
+type LeveledListItem struct {
+ Level int
+ Text string
+}
+
+// DefaultTree contains standards, which can be used to render a TreePrinter.
+var DefaultTree = TreePrinter{
+ TreeStyle: &ThemeDefault.TreeStyle,
+ TextStyle: &ThemeDefault.TreeTextStyle,
+ TopRightCornerString: "└",
+ HorizontalString: "─",
+ TopRightDownString: "├",
+ VerticalString: "│",
+ RightDownLeftString: "┬",
+ Indent: 2,
+}
+
+// TreePrinter is able to render a list.
+type TreePrinter struct {
+ Root TreeNode
+ TreeStyle *Style
+ TextStyle *Style
+ TopRightCornerString string
+ TopRightDownString string
+ HorizontalString string
+ VerticalString string
+ RightDownLeftString string
+ Indent int
+ Writer io.Writer
+}
+
+// WithTreeStyle returns a new list with a specific tree style.
+func (p TreePrinter) WithTreeStyle(style *Style) *TreePrinter {
+ p.TreeStyle = style
+ return &p
+}
+
+// WithTextStyle returns a new list with a specific text style.
+func (p TreePrinter) WithTextStyle(style *Style) *TreePrinter {
+ p.TextStyle = style
+ return &p
+}
+
+// WithTopRightCornerString returns a new list with a specific TopRightCornerString.
+func (p TreePrinter) WithTopRightCornerString(s string) *TreePrinter {
+ p.TopRightCornerString = s
+ return &p
+}
+
+// WithTopRightDownStringOngoing returns a new list with a specific TopRightDownString.
+func (p TreePrinter) WithTopRightDownStringOngoing(s string) *TreePrinter {
+ p.TopRightDownString = s
+ return &p
+}
+
+// WithHorizontalString returns a new list with a specific HorizontalString.
+func (p TreePrinter) WithHorizontalString(s string) *TreePrinter {
+ p.HorizontalString = s
+ return &p
+}
+
+// WithVerticalString returns a new list with a specific VerticalString.
+func (p TreePrinter) WithVerticalString(s string) *TreePrinter {
+ p.VerticalString = s
+ return &p
+}
+
+// WithRoot returns a new list with a specific Root.
+func (p TreePrinter) WithRoot(root TreeNode) *TreePrinter {
+ p.Root = root
+ return &p
+}
+
+// WithIndent returns a new list with a specific amount of spacing between the levels.
+// Indent must be at least 1.
+func (p TreePrinter) WithIndent(indent int) *TreePrinter {
+ if indent < 1 {
+ indent = 1
+ }
+ p.Indent = indent
+ return &p
+}
+
+// WithWriter sets the Writer.
+func (p TreePrinter) WithWriter(writer io.Writer) *TreePrinter {
+ p.Writer = writer
+ return &p
+}
+
+// Render prints the list to the terminal.
+func (p TreePrinter) Render() error {
+ s, _ := p.Srender()
+ Fprintln(p.Writer, s)
+
+ return nil
+}
+
+// Srender renders the list as a string.
+func (p TreePrinter) Srender() (string, error) {
+ if p.TreeStyle == nil {
+ p.TreeStyle = NewStyle()
+ }
+ if p.TextStyle == nil {
+ p.TextStyle = NewStyle()
+ }
+
+ var result strings.Builder
+ if p.Root.Text != "" {
+ result.WriteString(p.TextStyle.Sprint(p.Root.Text))
+ result.WriteByte('\n')
+ }
+ result.WriteString(walkOverTree(p.Root.Children, p, ""))
+ return result.String(), nil
+}
+
+// walkOverTree is a recursive function,
+// which analyzes a TreePrinter and connects the items with specific characters.
+// Returns TreePrinter as string.
+func walkOverTree(list []TreeNode, p TreePrinter, prefix string) string {
+ var ret string
+ for i, item := range list {
+ if len(list) > i+1 { // if not last in list
+ if len(item.Children) == 0 { // if there are no children
+ ret += prefix + p.TreeStyle.Sprint(p.TopRightDownString) + strings.Repeat(p.TreeStyle.Sprint(p.HorizontalString), p.Indent) +
+ p.TextStyle.Sprint(item.Text) + "\n"
+ } else { // if there are children
+ ret += prefix + p.TreeStyle.Sprint(p.TopRightDownString) + strings.Repeat(p.TreeStyle.Sprint(p.HorizontalString), p.Indent-1) +
+ p.TreeStyle.Sprint(p.RightDownLeftString) + p.TextStyle.Sprint(item.Text) + "\n"
+ ret += walkOverTree(item.Children, p, prefix+p.TreeStyle.Sprint(p.VerticalString)+strings.Repeat(" ", p.Indent-1))
+ }
+ } else if len(list) == i+1 { // if last in list
+ if len(item.Children) == 0 { // if there are no children
+ ret += prefix + p.TreeStyle.Sprint(p.TopRightCornerString) + strings.Repeat(p.TreeStyle.Sprint(p.HorizontalString), p.Indent) +
+ p.TextStyle.Sprint(item.Text) + "\n"
+ } else { // if there are children
+ ret += prefix + p.TreeStyle.Sprint(p.TopRightCornerString) + strings.Repeat(p.TreeStyle.Sprint(p.HorizontalString), p.Indent-1) +
+ p.TreeStyle.Sprint(p.RightDownLeftString) + p.TextStyle.Sprint(item.Text) + "\n"
+ ret += walkOverTree(item.Children, p, prefix+strings.Repeat(" ", p.Indent))
+ }
+ }
+ }
+ return ret
+}
diff --git a/vendor/github.com/rivo/uniseg/LICENSE.txt b/vendor/github.com/rivo/uniseg/LICENSE.txt
new file mode 100644
index 0000000..5040f1e
--- /dev/null
+++ b/vendor/github.com/rivo/uniseg/LICENSE.txt
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2019 Oliver Kuederle
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/rivo/uniseg/README.md b/vendor/github.com/rivo/uniseg/README.md
new file mode 100644
index 0000000..25e9346
--- /dev/null
+++ b/vendor/github.com/rivo/uniseg/README.md
@@ -0,0 +1,157 @@
+# Unicode Text Segmentation for Go
+
+[](https://pkg.go.dev/github.com/rivo/uniseg)
+[](https://goreportcard.com/report/github.com/rivo/uniseg)
+
+This Go package implements Unicode Text Segmentation according to [Unicode Standard Annex #29](https://unicode.org/reports/tr29/), Unicode Line Breaking according to [Unicode Standard Annex #14](https://unicode.org/reports/tr14/) (Unicode version 14.0.0), and monospace font string width calculation similar to [wcwidth](https://man7.org/linux/man-pages/man3/wcwidth.3.html).
+
+## Background
+
+### Grapheme Clusters
+
+In Go, [strings are read-only slices of bytes](https://go.dev/blog/strings). They can be turned into Unicode code points using the `for` loop or by casting: `[]rune(str)`. However, multiple code points may be combined into one user-perceived character or what the Unicode specification calls "grapheme cluster". Here are some examples:
+
+|String|Bytes (UTF-8)|Code points (runes)|Grapheme clusters|
+|-|-|-|-|
+|Käse|6 bytes: `4b 61 cc 88 73 65`|5 code points: `4b 61 308 73 65`|4 clusters: `[4b],[61 308],[73],[65]`|
+|🏳️🌈|14 bytes: `f0 9f 8f b3 ef b8 8f e2 80 8d f0 9f 8c 88`|4 code points: `1f3f3 fe0f 200d 1f308`|1 cluster: `[1f3f3 fe0f 200d 1f308]`|
+|🇩🇪|8 bytes: `f0 9f 87 a9 f0 9f 87 aa`|2 code points: `1f1e9 1f1ea`|1 cluster: `[1f1e9 1f1ea]`|
+
+This package provides tools to iterate over these grapheme clusters. This may be used to determine the number of user-perceived characters, to split strings in their intended places, or to extract individual characters which form a unit.
+
+### Word Boundaries
+
+Word boundaries are used in a number of different contexts. The most familiar ones are selection (double-click mouse selection), cursor movement ("move to next word" control-arrow keys), and the dialog option "Whole Word Search" for search and replace. They are also used in database queries, to determine whether elements are within a certain number of words of one another. Searching may also use word boundaries in determining matching items. This package provides tools to determine word boundaries within strings.
+
+### Sentence Boundaries
+
+Sentence boundaries are often used for triple-click or some other method of selecting or iterating through blocks of text that are larger than single words. They are also used to determine whether words occur within the same sentence in database queries. This package provides tools to determine sentence boundaries within strings.
+
+### Line Breaking
+
+Line breaking, also known as word wrapping, is the process of breaking a section of text into lines such that it will fit in the available width of a page, window or other display area. This package provides tools to determine where a string may or may not be broken and where it must be broken (for example after newline characters).
+
+### Monospace Width
+
+Most terminals or text displays / text editors using a monospace font (for example source code editors) use a fixed width for each character. Some characters such as emojis or characters found in Asian and other languages may take up more than one character cell. This package provides tools to determine the number of cells a string will take up when displayed in a monospace font. See [here](https://pkg.go.dev/github.com/rivo/uniseg#hdr-Monospace_Width) for more information.
+
+## Installation
+
+```bash
+go get github.com/rivo/uniseg
+```
+
+## Examples
+
+### Counting Characters in a String
+
+```go
+n := uniseg.GraphemeClusterCount("🇩🇪🏳️🌈")
+fmt.Println(n)
+// 2
+```
+
+### Calculating the Monospace String Width
+
+```go
+width := uniseg.StringWidth("🇩🇪🏳️🌈!")
+fmt.Println(width)
+// 5
+```
+
+### Using the [`Graphemes`](https://pkg.go.dev/github.com/rivo/uniseg#Graphemes) Class
+
+This is the most convenient method of iterating over grapheme clusters:
+
+```go
+gr := uniseg.NewGraphemes("👍🏼!")
+for gr.Next() {
+ fmt.Printf("%x ", gr.Runes())
+}
+// [1f44d 1f3fc] [21]
+```
+
+### Using the [`Step`](https://pkg.go.dev/github.com/rivo/uniseg#Step) or [`StepString`](https://pkg.go.dev/github.com/rivo/uniseg#StepString) Function
+
+This is orders of magnitude faster than the `Graphemes` class, but it requires the handling of states and boundaries:
+
+```go
+str := "🇩🇪🏳️🌈"
+state := -1
+var c string
+for len(str) > 0 {
+ c, str, _, state = uniseg.StepString(str, state)
+ fmt.Printf("%x ", []rune(c))
+}
+// [1f1e9 1f1ea] [1f3f3 fe0f 200d 1f308]
+```
+
+### Advanced Examples
+
+Breaking into grapheme clusters and evaluating line breaks:
+
+```go
+str := "First line.\nSecond line."
+state := -1
+var (
+ c string
+ boundaries int
+)
+for len(str) > 0 {
+ c, str, boundaries, state = uniseg.StepString(str, state)
+ fmt.Print(c)
+ if boundaries&uniseg.MaskLine == uniseg.LineCanBreak {
+ fmt.Print("|")
+ } else if boundaries&uniseg.MaskLine == uniseg.LineMustBreak {
+ fmt.Print("‖")
+ }
+}
+// First |line.
+// ‖Second |line.‖
+```
+
+If you're only interested in word segmentation, use [`FirstWord`](https://pkg.go.dev/github.com/rivo/uniseg#FirstWord) or [`FirstWordInString`](https://pkg.go.dev/github.com/rivo/uniseg#FirstWordInString):
+
+```go
+str := "Hello, world!"
+state := -1
+var c string
+for len(str) > 0 {
+ c, str, state = uniseg.FirstWordInString(str, state)
+ fmt.Printf("(%s)\n", c)
+}
+// (Hello)
+// (,)
+// ( )
+// (world)
+// (!)
+```
+
+Similarly, use
+
+- [`FirstGraphemeCluster`](https://pkg.go.dev/github.com/rivo/uniseg#FirstGraphemeCluster) or [`FirstGraphemeClusterInString`](https://pkg.go.dev/github.com/rivo/uniseg#FirstGraphemeClusterInString) for grapheme cluster determination only,
+- [`FirstSentence`](https://pkg.go.dev/github.com/rivo/uniseg#FirstSentence) or [`FirstSentenceInString`](https://pkg.go.dev/github.com/rivo/uniseg#FirstSentenceInString) for sentence segmentation only, and
+- [`FirstLineSegment`](https://pkg.go.dev/github.com/rivo/uniseg#FirstLineSegment) or [`FirstLineSegmentInString`](https://pkg.go.dev/github.com/rivo/uniseg#FirstLineSegmentInString) for line breaking / word wrapping (although using [`Step`](https://pkg.go.dev/github.com/rivo/uniseg#Step) or [`StepString`](https://pkg.go.dev/github.com/rivo/uniseg#StepString) is preferred as it will observe grapheme cluster boundaries).
+
+Finally, if you need to reverse a string while preserving grapheme clusters, use [`ReverseString`](https://pkg.go.dev/github.com/rivo/uniseg#ReverseString):
+
+```go
+fmt.Println(uniseg.ReverseString("🇩🇪🏳️🌈"))
+// 🏳️🌈🇩🇪
+```
+
+## Documentation
+
+Refer to https://pkg.go.dev/github.com/rivo/uniseg for the package's documentation.
+
+## Dependencies
+
+This package does not depend on any packages outside the standard library.
+
+## Sponsor this Project
+
+[Become a Sponsor on GitHub](https://github.com/sponsors/rivo?metadata_source=uniseg_readme) to support this project!
+
+## Your Feedback
+
+Add your issue here on GitHub, preferably before submitting any PR's. Feel free to get in touch if you have any questions.
\ No newline at end of file
diff --git a/vendor/github.com/rivo/uniseg/doc.go b/vendor/github.com/rivo/uniseg/doc.go
new file mode 100644
index 0000000..11224ae
--- /dev/null
+++ b/vendor/github.com/rivo/uniseg/doc.go
@@ -0,0 +1,108 @@
+/*
+Package uniseg implements Unicode Text Segmentation, Unicode Line Breaking, and
+string width calculation for monospace fonts. Unicode Text Segmentation conforms
+to Unicode Standard Annex #29 (https://unicode.org/reports/tr29/) and Unicode
+Line Breaking conforms to Unicode Standard Annex #14
+(https://unicode.org/reports/tr14/).
+
+In short, using this package, you can split a string into grapheme clusters
+(what people would usually refer to as a "character"), into words, and into
+sentences. Or, in its simplest case, this package allows you to count the number
+of characters in a string, especially when it contains complex characters such
+as emojis, combining characters, or characters from Asian, Arabic, Hebrew, or
+other languages. Additionally, you can use it to implement line breaking (or
+"word wrapping"), that is, to determine where text can be broken over to the
+next line when the width of the line is not big enough to fit the entire text.
+Finally, you can use it to calculate the display width of a string for monospace
+fonts.
+
+# Getting Started
+
+If you just want to count the number of characters in a string, you can use
+[GraphemeClusterCount]. If you want to determine the display width of a string,
+you can use [StringWidth]. If you want to iterate over a string, you can use
+[Step], [StepString], or the [Graphemes] class (more convenient but less
+performant). This will provide you with all information: grapheme clusters,
+word boundaries, sentence boundaries, line breaks, and monospace character
+widths. The specialized functions [FirstGraphemeCluster],
+[FirstGraphemeClusterInString], [FirstWord], [FirstWordInString],
+[FirstSentence], and [FirstSentenceInString] can be used if only one type of
+information is needed.
+
+# Grapheme Clusters
+
+Consider the rainbow flag emoji: 🏳️🌈. On most modern systems, it appears as one
+character. But its string representation actually has 14 bytes, so counting
+bytes (or using len("🏳️🌈")) will not work as expected. Counting runes won't,
+either: The flag has 4 Unicode code points, thus 4 runes. The stdlib function
+utf8.RuneCountInString("🏳️🌈") and len([]rune("🏳️🌈")) will both return 4.
+
+The [GraphemeClusterCount] function will return 1 for the rainbow flag emoji.
+The Graphemes class and a variety of functions in this package will allow you to
+split strings into its grapheme clusters.
+
+# Word Boundaries
+
+Word boundaries are used in a number of different contexts. The most familiar
+ones are selection (double-click mouse selection), cursor movement ("move to
+next word" control-arrow keys), and the dialog option "Whole Word Search" for
+search and replace. This package provides methods for determining word
+boundaries.
+
+# Sentence Boundaries
+
+Sentence boundaries are often used for triple-click or some other method of
+selecting or iterating through blocks of text that are larger than single words.
+They are also used to determine whether words occur within the same sentence in
+database queries. This package provides methods for determining sentence
+boundaries.
+
+# Line Breaking
+
+Line breaking, also known as word wrapping, is the process of breaking a section
+of text into lines such that it will fit in the available width of a page,
+window or other display area. This package provides methods to determine the
+positions in a string where a line must be broken, may be broken, or must not be
+broken.
+
+# Monospace Width
+
+Monospace width, as referred to in this package, is the width of a string in a
+monospace font. This is commonly used in terminal user interfaces or text
+displays or editors that don't support proportional fonts. A width of 1
+corresponds to a single character cell. The C function [wcswidth()] and its
+implementation in other programming languages is in widespread use for the same
+purpose. However, there is no standard for the calculation of such widths, and
+this package differs from wcswidth() in a number of ways, presumably to generate
+more visually pleasing results.
+
+To start, we assume that every code point has a width of 1, with the following
+exceptions:
+
+ - Code points with grapheme cluster break properties Control, CR, LF, Extend,
+ and ZWJ have a width of 0.
+ - U+2E3A, Two-Em Dash, has a width of 3.
+ - U+2E3B, Three-Em Dash, has a width of 4.
+ - Characters with the East-Asian Width properties "Fullwidth" (F) and "Wide"
+ (W) have a width of 2. (Properties "Ambiguous" (A) and "Neutral" (N) both
+ have a width of 1.)
+ - Code points with grapheme cluster break property Regional Indicator have a
+ width of 2.
+ - Code points with grapheme cluster break property Extended Pictographic have
+ a width of 2, unless their Emoji Presentation flag is "No", in which case
+ the width is 1.
+
+For Hangul grapheme clusters composed of conjoining Jamo and for Regional
+Indicators (flags), all code points except the first one have a width of 0. For
+grapheme clusters starting with an Extended Pictographic, any additional code
+point will force a total width of 2, except if the Variation Selector-15
+(U+FE0E) is included, in which case the total width is always 1. Grapheme
+clusters ending with Variation Selector-16 (U+FE0F) have a width of 2.
+
+Note that whether these widths appear correct depends on your application's
+render engine, to which extent it conforms to the Unicode Standard, and its
+choice of font.
+
+[wcswidth()]: https://man7.org/linux/man-pages/man3/wcswidth.3.html
+*/
+package uniseg
diff --git a/vendor/github.com/rivo/uniseg/eastasianwidth.go b/vendor/github.com/rivo/uniseg/eastasianwidth.go
new file mode 100644
index 0000000..661934a
--- /dev/null
+++ b/vendor/github.com/rivo/uniseg/eastasianwidth.go
@@ -0,0 +1,2556 @@
+package uniseg
+
+// Code generated via go generate from gen_properties.go. DO NOT EDIT.
+
+// eastAsianWidth are taken from
+// https://www.unicode.org/Public/14.0.0/ucd/EastAsianWidth.txt
+// and
+// https://unicode.org/Public/14.0.0/ucd/emoji/emoji-data.txt
+// ("Extended_Pictographic" only)
+// on September 10, 2022. See https://www.unicode.org/license.html for the Unicode
+// license agreement.
+var eastAsianWidth = [][3]int{
+ {0x0000, 0x001F, prN}, // Cc [32] ..
+ {0x0020, 0x0020, prNa}, // Zs SPACE
+ {0x0021, 0x0023, prNa}, // Po [3] EXCLAMATION MARK..NUMBER SIGN
+ {0x0024, 0x0024, prNa}, // Sc DOLLAR SIGN
+ {0x0025, 0x0027, prNa}, // Po [3] PERCENT SIGN..APOSTROPHE
+ {0x0028, 0x0028, prNa}, // Ps LEFT PARENTHESIS
+ {0x0029, 0x0029, prNa}, // Pe RIGHT PARENTHESIS
+ {0x002A, 0x002A, prNa}, // Po ASTERISK
+ {0x002B, 0x002B, prNa}, // Sm PLUS SIGN
+ {0x002C, 0x002C, prNa}, // Po COMMA
+ {0x002D, 0x002D, prNa}, // Pd HYPHEN-MINUS
+ {0x002E, 0x002F, prNa}, // Po [2] FULL STOP..SOLIDUS
+ {0x0030, 0x0039, prNa}, // Nd [10] DIGIT ZERO..DIGIT NINE
+ {0x003A, 0x003B, prNa}, // Po [2] COLON..SEMICOLON
+ {0x003C, 0x003E, prNa}, // Sm [3] LESS-THAN SIGN..GREATER-THAN SIGN
+ {0x003F, 0x0040, prNa}, // Po [2] QUESTION MARK..COMMERCIAL AT
+ {0x0041, 0x005A, prNa}, // Lu [26] LATIN CAPITAL LETTER A..LATIN CAPITAL LETTER Z
+ {0x005B, 0x005B, prNa}, // Ps LEFT SQUARE BRACKET
+ {0x005C, 0x005C, prNa}, // Po REVERSE SOLIDUS
+ {0x005D, 0x005D, prNa}, // Pe RIGHT SQUARE BRACKET
+ {0x005E, 0x005E, prNa}, // Sk CIRCUMFLEX ACCENT
+ {0x005F, 0x005F, prNa}, // Pc LOW LINE
+ {0x0060, 0x0060, prNa}, // Sk GRAVE ACCENT
+ {0x0061, 0x007A, prNa}, // Ll [26] LATIN SMALL LETTER A..LATIN SMALL LETTER Z
+ {0x007B, 0x007B, prNa}, // Ps LEFT CURLY BRACKET
+ {0x007C, 0x007C, prNa}, // Sm VERTICAL LINE
+ {0x007D, 0x007D, prNa}, // Pe RIGHT CURLY BRACKET
+ {0x007E, 0x007E, prNa}, // Sm TILDE
+ {0x007F, 0x007F, prN}, // Cc
+ {0x0080, 0x009F, prN}, // Cc [32] ..
+ {0x00A0, 0x00A0, prN}, // Zs NO-BREAK SPACE
+ {0x00A1, 0x00A1, prA}, // Po INVERTED EXCLAMATION MARK
+ {0x00A2, 0x00A3, prNa}, // Sc [2] CENT SIGN..POUND SIGN
+ {0x00A4, 0x00A4, prA}, // Sc CURRENCY SIGN
+ {0x00A5, 0x00A5, prNa}, // Sc YEN SIGN
+ {0x00A6, 0x00A6, prNa}, // So BROKEN BAR
+ {0x00A7, 0x00A7, prA}, // Po SECTION SIGN
+ {0x00A8, 0x00A8, prA}, // Sk DIAERESIS
+ {0x00A9, 0x00A9, prN}, // So COPYRIGHT SIGN
+ {0x00AA, 0x00AA, prA}, // Lo FEMININE ORDINAL INDICATOR
+ {0x00AB, 0x00AB, prN}, // Pi LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
+ {0x00AC, 0x00AC, prNa}, // Sm NOT SIGN
+ {0x00AD, 0x00AD, prA}, // Cf SOFT HYPHEN
+ {0x00AE, 0x00AE, prA}, // So REGISTERED SIGN
+ {0x00AF, 0x00AF, prNa}, // Sk MACRON
+ {0x00B0, 0x00B0, prA}, // So DEGREE SIGN
+ {0x00B1, 0x00B1, prA}, // Sm PLUS-MINUS SIGN
+ {0x00B2, 0x00B3, prA}, // No [2] SUPERSCRIPT TWO..SUPERSCRIPT THREE
+ {0x00B4, 0x00B4, prA}, // Sk ACUTE ACCENT
+ {0x00B5, 0x00B5, prN}, // Ll MICRO SIGN
+ {0x00B6, 0x00B7, prA}, // Po [2] PILCROW SIGN..MIDDLE DOT
+ {0x00B8, 0x00B8, prA}, // Sk CEDILLA
+ {0x00B9, 0x00B9, prA}, // No SUPERSCRIPT ONE
+ {0x00BA, 0x00BA, prA}, // Lo MASCULINE ORDINAL INDICATOR
+ {0x00BB, 0x00BB, prN}, // Pf RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
+ {0x00BC, 0x00BE, prA}, // No [3] VULGAR FRACTION ONE QUARTER..VULGAR FRACTION THREE QUARTERS
+ {0x00BF, 0x00BF, prA}, // Po INVERTED QUESTION MARK
+ {0x00C0, 0x00C5, prN}, // Lu [6] LATIN CAPITAL LETTER A WITH GRAVE..LATIN CAPITAL LETTER A WITH RING ABOVE
+ {0x00C6, 0x00C6, prA}, // Lu LATIN CAPITAL LETTER AE
+ {0x00C7, 0x00CF, prN}, // Lu [9] LATIN CAPITAL LETTER C WITH CEDILLA..LATIN CAPITAL LETTER I WITH DIAERESIS
+ {0x00D0, 0x00D0, prA}, // Lu LATIN CAPITAL LETTER ETH
+ {0x00D1, 0x00D6, prN}, // Lu [6] LATIN CAPITAL LETTER N WITH TILDE..LATIN CAPITAL LETTER O WITH DIAERESIS
+ {0x00D7, 0x00D7, prA}, // Sm MULTIPLICATION SIGN
+ {0x00D8, 0x00D8, prA}, // Lu LATIN CAPITAL LETTER O WITH STROKE
+ {0x00D9, 0x00DD, prN}, // Lu [5] LATIN CAPITAL LETTER U WITH GRAVE..LATIN CAPITAL LETTER Y WITH ACUTE
+ {0x00DE, 0x00E1, prA}, // L& [4] LATIN CAPITAL LETTER THORN..LATIN SMALL LETTER A WITH ACUTE
+ {0x00E2, 0x00E5, prN}, // Ll [4] LATIN SMALL LETTER A WITH CIRCUMFLEX..LATIN SMALL LETTER A WITH RING ABOVE
+ {0x00E6, 0x00E6, prA}, // Ll LATIN SMALL LETTER AE
+ {0x00E7, 0x00E7, prN}, // Ll LATIN SMALL LETTER C WITH CEDILLA
+ {0x00E8, 0x00EA, prA}, // Ll [3] LATIN SMALL LETTER E WITH GRAVE..LATIN SMALL LETTER E WITH CIRCUMFLEX
+ {0x00EB, 0x00EB, prN}, // Ll LATIN SMALL LETTER E WITH DIAERESIS
+ {0x00EC, 0x00ED, prA}, // Ll [2] LATIN SMALL LETTER I WITH GRAVE..LATIN SMALL LETTER I WITH ACUTE
+ {0x00EE, 0x00EF, prN}, // Ll [2] LATIN SMALL LETTER I WITH CIRCUMFLEX..LATIN SMALL LETTER I WITH DIAERESIS
+ {0x00F0, 0x00F0, prA}, // Ll LATIN SMALL LETTER ETH
+ {0x00F1, 0x00F1, prN}, // Ll LATIN SMALL LETTER N WITH TILDE
+ {0x00F2, 0x00F3, prA}, // Ll [2] LATIN SMALL LETTER O WITH GRAVE..LATIN SMALL LETTER O WITH ACUTE
+ {0x00F4, 0x00F6, prN}, // Ll [3] LATIN SMALL LETTER O WITH CIRCUMFLEX..LATIN SMALL LETTER O WITH DIAERESIS
+ {0x00F7, 0x00F7, prA}, // Sm DIVISION SIGN
+ {0x00F8, 0x00FA, prA}, // Ll [3] LATIN SMALL LETTER O WITH STROKE..LATIN SMALL LETTER U WITH ACUTE
+ {0x00FB, 0x00FB, prN}, // Ll LATIN SMALL LETTER U WITH CIRCUMFLEX
+ {0x00FC, 0x00FC, prA}, // Ll LATIN SMALL LETTER U WITH DIAERESIS
+ {0x00FD, 0x00FD, prN}, // Ll LATIN SMALL LETTER Y WITH ACUTE
+ {0x00FE, 0x00FE, prA}, // Ll LATIN SMALL LETTER THORN
+ {0x00FF, 0x00FF, prN}, // Ll LATIN SMALL LETTER Y WITH DIAERESIS
+ {0x0100, 0x0100, prN}, // Lu LATIN CAPITAL LETTER A WITH MACRON
+ {0x0101, 0x0101, prA}, // Ll LATIN SMALL LETTER A WITH MACRON
+ {0x0102, 0x0110, prN}, // L& [15] LATIN CAPITAL LETTER A WITH BREVE..LATIN CAPITAL LETTER D WITH STROKE
+ {0x0111, 0x0111, prA}, // Ll LATIN SMALL LETTER D WITH STROKE
+ {0x0112, 0x0112, prN}, // Lu LATIN CAPITAL LETTER E WITH MACRON
+ {0x0113, 0x0113, prA}, // Ll LATIN SMALL LETTER E WITH MACRON
+ {0x0114, 0x011A, prN}, // L& [7] LATIN CAPITAL LETTER E WITH BREVE..LATIN CAPITAL LETTER E WITH CARON
+ {0x011B, 0x011B, prA}, // Ll LATIN SMALL LETTER E WITH CARON
+ {0x011C, 0x0125, prN}, // L& [10] LATIN CAPITAL LETTER G WITH CIRCUMFLEX..LATIN SMALL LETTER H WITH CIRCUMFLEX
+ {0x0126, 0x0127, prA}, // L& [2] LATIN CAPITAL LETTER H WITH STROKE..LATIN SMALL LETTER H WITH STROKE
+ {0x0128, 0x012A, prN}, // L& [3] LATIN CAPITAL LETTER I WITH TILDE..LATIN CAPITAL LETTER I WITH MACRON
+ {0x012B, 0x012B, prA}, // Ll LATIN SMALL LETTER I WITH MACRON
+ {0x012C, 0x0130, prN}, // L& [5] LATIN CAPITAL LETTER I WITH BREVE..LATIN CAPITAL LETTER I WITH DOT ABOVE
+ {0x0131, 0x0133, prA}, // L& [3] LATIN SMALL LETTER DOTLESS I..LATIN SMALL LIGATURE IJ
+ {0x0134, 0x0137, prN}, // L& [4] LATIN CAPITAL LETTER J WITH CIRCUMFLEX..LATIN SMALL LETTER K WITH CEDILLA
+ {0x0138, 0x0138, prA}, // Ll LATIN SMALL LETTER KRA
+ {0x0139, 0x013E, prN}, // L& [6] LATIN CAPITAL LETTER L WITH ACUTE..LATIN SMALL LETTER L WITH CARON
+ {0x013F, 0x0142, prA}, // L& [4] LATIN CAPITAL LETTER L WITH MIDDLE DOT..LATIN SMALL LETTER L WITH STROKE
+ {0x0143, 0x0143, prN}, // Lu LATIN CAPITAL LETTER N WITH ACUTE
+ {0x0144, 0x0144, prA}, // Ll LATIN SMALL LETTER N WITH ACUTE
+ {0x0145, 0x0147, prN}, // L& [3] LATIN CAPITAL LETTER N WITH CEDILLA..LATIN CAPITAL LETTER N WITH CARON
+ {0x0148, 0x014B, prA}, // L& [4] LATIN SMALL LETTER N WITH CARON..LATIN SMALL LETTER ENG
+ {0x014C, 0x014C, prN}, // Lu LATIN CAPITAL LETTER O WITH MACRON
+ {0x014D, 0x014D, prA}, // Ll LATIN SMALL LETTER O WITH MACRON
+ {0x014E, 0x0151, prN}, // L& [4] LATIN CAPITAL LETTER O WITH BREVE..LATIN SMALL LETTER O WITH DOUBLE ACUTE
+ {0x0152, 0x0153, prA}, // L& [2] LATIN CAPITAL LIGATURE OE..LATIN SMALL LIGATURE OE
+ {0x0154, 0x0165, prN}, // L& [18] LATIN CAPITAL LETTER R WITH ACUTE..LATIN SMALL LETTER T WITH CARON
+ {0x0166, 0x0167, prA}, // L& [2] LATIN CAPITAL LETTER T WITH STROKE..LATIN SMALL LETTER T WITH STROKE
+ {0x0168, 0x016A, prN}, // L& [3] LATIN CAPITAL LETTER U WITH TILDE..LATIN CAPITAL LETTER U WITH MACRON
+ {0x016B, 0x016B, prA}, // Ll LATIN SMALL LETTER U WITH MACRON
+ {0x016C, 0x017F, prN}, // L& [20] LATIN CAPITAL LETTER U WITH BREVE..LATIN SMALL LETTER LONG S
+ {0x0180, 0x01BA, prN}, // L& [59] LATIN SMALL LETTER B WITH STROKE..LATIN SMALL LETTER EZH WITH TAIL
+ {0x01BB, 0x01BB, prN}, // Lo LATIN LETTER TWO WITH STROKE
+ {0x01BC, 0x01BF, prN}, // L& [4] LATIN CAPITAL LETTER TONE FIVE..LATIN LETTER WYNN
+ {0x01C0, 0x01C3, prN}, // Lo [4] LATIN LETTER DENTAL CLICK..LATIN LETTER RETROFLEX CLICK
+ {0x01C4, 0x01CD, prN}, // L& [10] LATIN CAPITAL LETTER DZ WITH CARON..LATIN CAPITAL LETTER A WITH CARON
+ {0x01CE, 0x01CE, prA}, // Ll LATIN SMALL LETTER A WITH CARON
+ {0x01CF, 0x01CF, prN}, // Lu LATIN CAPITAL LETTER I WITH CARON
+ {0x01D0, 0x01D0, prA}, // Ll LATIN SMALL LETTER I WITH CARON
+ {0x01D1, 0x01D1, prN}, // Lu LATIN CAPITAL LETTER O WITH CARON
+ {0x01D2, 0x01D2, prA}, // Ll LATIN SMALL LETTER O WITH CARON
+ {0x01D3, 0x01D3, prN}, // Lu LATIN CAPITAL LETTER U WITH CARON
+ {0x01D4, 0x01D4, prA}, // Ll LATIN SMALL LETTER U WITH CARON
+ {0x01D5, 0x01D5, prN}, // Lu LATIN CAPITAL LETTER U WITH DIAERESIS AND MACRON
+ {0x01D6, 0x01D6, prA}, // Ll LATIN SMALL LETTER U WITH DIAERESIS AND MACRON
+ {0x01D7, 0x01D7, prN}, // Lu LATIN CAPITAL LETTER U WITH DIAERESIS AND ACUTE
+ {0x01D8, 0x01D8, prA}, // Ll LATIN SMALL LETTER U WITH DIAERESIS AND ACUTE
+ {0x01D9, 0x01D9, prN}, // Lu LATIN CAPITAL LETTER U WITH DIAERESIS AND CARON
+ {0x01DA, 0x01DA, prA}, // Ll LATIN SMALL LETTER U WITH DIAERESIS AND CARON
+ {0x01DB, 0x01DB, prN}, // Lu LATIN CAPITAL LETTER U WITH DIAERESIS AND GRAVE
+ {0x01DC, 0x01DC, prA}, // Ll LATIN SMALL LETTER U WITH DIAERESIS AND GRAVE
+ {0x01DD, 0x024F, prN}, // L& [115] LATIN SMALL LETTER TURNED E..LATIN SMALL LETTER Y WITH STROKE
+ {0x0250, 0x0250, prN}, // Ll LATIN SMALL LETTER TURNED A
+ {0x0251, 0x0251, prA}, // Ll LATIN SMALL LETTER ALPHA
+ {0x0252, 0x0260, prN}, // Ll [15] LATIN SMALL LETTER TURNED ALPHA..LATIN SMALL LETTER G WITH HOOK
+ {0x0261, 0x0261, prA}, // Ll LATIN SMALL LETTER SCRIPT G
+ {0x0262, 0x0293, prN}, // Ll [50] LATIN LETTER SMALL CAPITAL G..LATIN SMALL LETTER EZH WITH CURL
+ {0x0294, 0x0294, prN}, // Lo LATIN LETTER GLOTTAL STOP
+ {0x0295, 0x02AF, prN}, // Ll [27] LATIN LETTER PHARYNGEAL VOICED FRICATIVE..LATIN SMALL LETTER TURNED H WITH FISHHOOK AND TAIL
+ {0x02B0, 0x02C1, prN}, // Lm [18] MODIFIER LETTER SMALL H..MODIFIER LETTER REVERSED GLOTTAL STOP
+ {0x02C2, 0x02C3, prN}, // Sk [2] MODIFIER LETTER LEFT ARROWHEAD..MODIFIER LETTER RIGHT ARROWHEAD
+ {0x02C4, 0x02C4, prA}, // Sk MODIFIER LETTER UP ARROWHEAD
+ {0x02C5, 0x02C5, prN}, // Sk MODIFIER LETTER DOWN ARROWHEAD
+ {0x02C6, 0x02C6, prN}, // Lm MODIFIER LETTER CIRCUMFLEX ACCENT
+ {0x02C7, 0x02C7, prA}, // Lm CARON
+ {0x02C8, 0x02C8, prN}, // Lm MODIFIER LETTER VERTICAL LINE
+ {0x02C9, 0x02CB, prA}, // Lm [3] MODIFIER LETTER MACRON..MODIFIER LETTER GRAVE ACCENT
+ {0x02CC, 0x02CC, prN}, // Lm MODIFIER LETTER LOW VERTICAL LINE
+ {0x02CD, 0x02CD, prA}, // Lm MODIFIER LETTER LOW MACRON
+ {0x02CE, 0x02CF, prN}, // Lm [2] MODIFIER LETTER LOW GRAVE ACCENT..MODIFIER LETTER LOW ACUTE ACCENT
+ {0x02D0, 0x02D0, prA}, // Lm MODIFIER LETTER TRIANGULAR COLON
+ {0x02D1, 0x02D1, prN}, // Lm MODIFIER LETTER HALF TRIANGULAR COLON
+ {0x02D2, 0x02D7, prN}, // Sk [6] MODIFIER LETTER CENTRED RIGHT HALF RING..MODIFIER LETTER MINUS SIGN
+ {0x02D8, 0x02DB, prA}, // Sk [4] BREVE..OGONEK
+ {0x02DC, 0x02DC, prN}, // Sk SMALL TILDE
+ {0x02DD, 0x02DD, prA}, // Sk DOUBLE ACUTE ACCENT
+ {0x02DE, 0x02DE, prN}, // Sk MODIFIER LETTER RHOTIC HOOK
+ {0x02DF, 0x02DF, prA}, // Sk MODIFIER LETTER CROSS ACCENT
+ {0x02E0, 0x02E4, prN}, // Lm [5] MODIFIER LETTER SMALL GAMMA..MODIFIER LETTER SMALL REVERSED GLOTTAL STOP
+ {0x02E5, 0x02EB, prN}, // Sk [7] MODIFIER LETTER EXTRA-HIGH TONE BAR..MODIFIER LETTER YANG DEPARTING TONE MARK
+ {0x02EC, 0x02EC, prN}, // Lm MODIFIER LETTER VOICING
+ {0x02ED, 0x02ED, prN}, // Sk MODIFIER LETTER UNASPIRATED
+ {0x02EE, 0x02EE, prN}, // Lm MODIFIER LETTER DOUBLE APOSTROPHE
+ {0x02EF, 0x02FF, prN}, // Sk [17] MODIFIER LETTER LOW DOWN ARROWHEAD..MODIFIER LETTER LOW LEFT ARROW
+ {0x0300, 0x036F, prA}, // Mn [112] COMBINING GRAVE ACCENT..COMBINING LATIN SMALL LETTER X
+ {0x0370, 0x0373, prN}, // L& [4] GREEK CAPITAL LETTER HETA..GREEK SMALL LETTER ARCHAIC SAMPI
+ {0x0374, 0x0374, prN}, // Lm GREEK NUMERAL SIGN
+ {0x0375, 0x0375, prN}, // Sk GREEK LOWER NUMERAL SIGN
+ {0x0376, 0x0377, prN}, // L& [2] GREEK CAPITAL LETTER PAMPHYLIAN DIGAMMA..GREEK SMALL LETTER PAMPHYLIAN DIGAMMA
+ {0x037A, 0x037A, prN}, // Lm GREEK YPOGEGRAMMENI
+ {0x037B, 0x037D, prN}, // Ll [3] GREEK SMALL REVERSED LUNATE SIGMA SYMBOL..GREEK SMALL REVERSED DOTTED LUNATE SIGMA SYMBOL
+ {0x037E, 0x037E, prN}, // Po GREEK QUESTION MARK
+ {0x037F, 0x037F, prN}, // Lu GREEK CAPITAL LETTER YOT
+ {0x0384, 0x0385, prN}, // Sk [2] GREEK TONOS..GREEK DIALYTIKA TONOS
+ {0x0386, 0x0386, prN}, // Lu GREEK CAPITAL LETTER ALPHA WITH TONOS
+ {0x0387, 0x0387, prN}, // Po GREEK ANO TELEIA
+ {0x0388, 0x038A, prN}, // Lu [3] GREEK CAPITAL LETTER EPSILON WITH TONOS..GREEK CAPITAL LETTER IOTA WITH TONOS
+ {0x038C, 0x038C, prN}, // Lu GREEK CAPITAL LETTER OMICRON WITH TONOS
+ {0x038E, 0x0390, prN}, // L& [3] GREEK CAPITAL LETTER UPSILON WITH TONOS..GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS
+ {0x0391, 0x03A1, prA}, // Lu [17] GREEK CAPITAL LETTER ALPHA..GREEK CAPITAL LETTER RHO
+ {0x03A3, 0x03A9, prA}, // Lu [7] GREEK CAPITAL LETTER SIGMA..GREEK CAPITAL LETTER OMEGA
+ {0x03AA, 0x03B0, prN}, // L& [7] GREEK CAPITAL LETTER IOTA WITH DIALYTIKA..GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS
+ {0x03B1, 0x03C1, prA}, // Ll [17] GREEK SMALL LETTER ALPHA..GREEK SMALL LETTER RHO
+ {0x03C2, 0x03C2, prN}, // Ll GREEK SMALL LETTER FINAL SIGMA
+ {0x03C3, 0x03C9, prA}, // Ll [7] GREEK SMALL LETTER SIGMA..GREEK SMALL LETTER OMEGA
+ {0x03CA, 0x03F5, prN}, // L& [44] GREEK SMALL LETTER IOTA WITH DIALYTIKA..GREEK LUNATE EPSILON SYMBOL
+ {0x03F6, 0x03F6, prN}, // Sm GREEK REVERSED LUNATE EPSILON SYMBOL
+ {0x03F7, 0x03FF, prN}, // L& [9] GREEK CAPITAL LETTER SHO..GREEK CAPITAL REVERSED DOTTED LUNATE SIGMA SYMBOL
+ {0x0400, 0x0400, prN}, // Lu CYRILLIC CAPITAL LETTER IE WITH GRAVE
+ {0x0401, 0x0401, prA}, // Lu CYRILLIC CAPITAL LETTER IO
+ {0x0402, 0x040F, prN}, // Lu [14] CYRILLIC CAPITAL LETTER DJE..CYRILLIC CAPITAL LETTER DZHE
+ {0x0410, 0x044F, prA}, // L& [64] CYRILLIC CAPITAL LETTER A..CYRILLIC SMALL LETTER YA
+ {0x0450, 0x0450, prN}, // Ll CYRILLIC SMALL LETTER IE WITH GRAVE
+ {0x0451, 0x0451, prA}, // Ll CYRILLIC SMALL LETTER IO
+ {0x0452, 0x0481, prN}, // L& [48] CYRILLIC SMALL LETTER DJE..CYRILLIC SMALL LETTER KOPPA
+ {0x0482, 0x0482, prN}, // So CYRILLIC THOUSANDS SIGN
+ {0x0483, 0x0487, prN}, // Mn [5] COMBINING CYRILLIC TITLO..COMBINING CYRILLIC POKRYTIE
+ {0x0488, 0x0489, prN}, // Me [2] COMBINING CYRILLIC HUNDRED THOUSANDS SIGN..COMBINING CYRILLIC MILLIONS SIGN
+ {0x048A, 0x04FF, prN}, // L& [118] CYRILLIC CAPITAL LETTER SHORT I WITH TAIL..CYRILLIC SMALL LETTER HA WITH STROKE
+ {0x0500, 0x052F, prN}, // L& [48] CYRILLIC CAPITAL LETTER KOMI DE..CYRILLIC SMALL LETTER EL WITH DESCENDER
+ {0x0531, 0x0556, prN}, // Lu [38] ARMENIAN CAPITAL LETTER AYB..ARMENIAN CAPITAL LETTER FEH
+ {0x0559, 0x0559, prN}, // Lm ARMENIAN MODIFIER LETTER LEFT HALF RING
+ {0x055A, 0x055F, prN}, // Po [6] ARMENIAN APOSTROPHE..ARMENIAN ABBREVIATION MARK
+ {0x0560, 0x0588, prN}, // Ll [41] ARMENIAN SMALL LETTER TURNED AYB..ARMENIAN SMALL LETTER YI WITH STROKE
+ {0x0589, 0x0589, prN}, // Po ARMENIAN FULL STOP
+ {0x058A, 0x058A, prN}, // Pd ARMENIAN HYPHEN
+ {0x058D, 0x058E, prN}, // So [2] RIGHT-FACING ARMENIAN ETERNITY SIGN..LEFT-FACING ARMENIAN ETERNITY SIGN
+ {0x058F, 0x058F, prN}, // Sc ARMENIAN DRAM SIGN
+ {0x0591, 0x05BD, prN}, // Mn [45] HEBREW ACCENT ETNAHTA..HEBREW POINT METEG
+ {0x05BE, 0x05BE, prN}, // Pd HEBREW PUNCTUATION MAQAF
+ {0x05BF, 0x05BF, prN}, // Mn HEBREW POINT RAFE
+ {0x05C0, 0x05C0, prN}, // Po HEBREW PUNCTUATION PASEQ
+ {0x05C1, 0x05C2, prN}, // Mn [2] HEBREW POINT SHIN DOT..HEBREW POINT SIN DOT
+ {0x05C3, 0x05C3, prN}, // Po HEBREW PUNCTUATION SOF PASUQ
+ {0x05C4, 0x05C5, prN}, // Mn [2] HEBREW MARK UPPER DOT..HEBREW MARK LOWER DOT
+ {0x05C6, 0x05C6, prN}, // Po HEBREW PUNCTUATION NUN HAFUKHA
+ {0x05C7, 0x05C7, prN}, // Mn HEBREW POINT QAMATS QATAN
+ {0x05D0, 0x05EA, prN}, // Lo [27] HEBREW LETTER ALEF..HEBREW LETTER TAV
+ {0x05EF, 0x05F2, prN}, // Lo [4] HEBREW YOD TRIANGLE..HEBREW LIGATURE YIDDISH DOUBLE YOD
+ {0x05F3, 0x05F4, prN}, // Po [2] HEBREW PUNCTUATION GERESH..HEBREW PUNCTUATION GERSHAYIM
+ {0x0600, 0x0605, prN}, // Cf [6] ARABIC NUMBER SIGN..ARABIC NUMBER MARK ABOVE
+ {0x0606, 0x0608, prN}, // Sm [3] ARABIC-INDIC CUBE ROOT..ARABIC RAY
+ {0x0609, 0x060A, prN}, // Po [2] ARABIC-INDIC PER MILLE SIGN..ARABIC-INDIC PER TEN THOUSAND SIGN
+ {0x060B, 0x060B, prN}, // Sc AFGHANI SIGN
+ {0x060C, 0x060D, prN}, // Po [2] ARABIC COMMA..ARABIC DATE SEPARATOR
+ {0x060E, 0x060F, prN}, // So [2] ARABIC POETIC VERSE SIGN..ARABIC SIGN MISRA
+ {0x0610, 0x061A, prN}, // Mn [11] ARABIC SIGN SALLALLAHOU ALAYHE WASSALLAM..ARABIC SMALL KASRA
+ {0x061B, 0x061B, prN}, // Po ARABIC SEMICOLON
+ {0x061C, 0x061C, prN}, // Cf ARABIC LETTER MARK
+ {0x061D, 0x061F, prN}, // Po [3] ARABIC END OF TEXT MARK..ARABIC QUESTION MARK
+ {0x0620, 0x063F, prN}, // Lo [32] ARABIC LETTER KASHMIRI YEH..ARABIC LETTER FARSI YEH WITH THREE DOTS ABOVE
+ {0x0640, 0x0640, prN}, // Lm ARABIC TATWEEL
+ {0x0641, 0x064A, prN}, // Lo [10] ARABIC LETTER FEH..ARABIC LETTER YEH
+ {0x064B, 0x065F, prN}, // Mn [21] ARABIC FATHATAN..ARABIC WAVY HAMZA BELOW
+ {0x0660, 0x0669, prN}, // Nd [10] ARABIC-INDIC DIGIT ZERO..ARABIC-INDIC DIGIT NINE
+ {0x066A, 0x066D, prN}, // Po [4] ARABIC PERCENT SIGN..ARABIC FIVE POINTED STAR
+ {0x066E, 0x066F, prN}, // Lo [2] ARABIC LETTER DOTLESS BEH..ARABIC LETTER DOTLESS QAF
+ {0x0670, 0x0670, prN}, // Mn ARABIC LETTER SUPERSCRIPT ALEF
+ {0x0671, 0x06D3, prN}, // Lo [99] ARABIC LETTER ALEF WASLA..ARABIC LETTER YEH BARREE WITH HAMZA ABOVE
+ {0x06D4, 0x06D4, prN}, // Po ARABIC FULL STOP
+ {0x06D5, 0x06D5, prN}, // Lo ARABIC LETTER AE
+ {0x06D6, 0x06DC, prN}, // Mn [7] ARABIC SMALL HIGH LIGATURE SAD WITH LAM WITH ALEF MAKSURA..ARABIC SMALL HIGH SEEN
+ {0x06DD, 0x06DD, prN}, // Cf ARABIC END OF AYAH
+ {0x06DE, 0x06DE, prN}, // So ARABIC START OF RUB EL HIZB
+ {0x06DF, 0x06E4, prN}, // Mn [6] ARABIC SMALL HIGH ROUNDED ZERO..ARABIC SMALL HIGH MADDA
+ {0x06E5, 0x06E6, prN}, // Lm [2] ARABIC SMALL WAW..ARABIC SMALL YEH
+ {0x06E7, 0x06E8, prN}, // Mn [2] ARABIC SMALL HIGH YEH..ARABIC SMALL HIGH NOON
+ {0x06E9, 0x06E9, prN}, // So ARABIC PLACE OF SAJDAH
+ {0x06EA, 0x06ED, prN}, // Mn [4] ARABIC EMPTY CENTRE LOW STOP..ARABIC SMALL LOW MEEM
+ {0x06EE, 0x06EF, prN}, // Lo [2] ARABIC LETTER DAL WITH INVERTED V..ARABIC LETTER REH WITH INVERTED V
+ {0x06F0, 0x06F9, prN}, // Nd [10] EXTENDED ARABIC-INDIC DIGIT ZERO..EXTENDED ARABIC-INDIC DIGIT NINE
+ {0x06FA, 0x06FC, prN}, // Lo [3] ARABIC LETTER SHEEN WITH DOT BELOW..ARABIC LETTER GHAIN WITH DOT BELOW
+ {0x06FD, 0x06FE, prN}, // So [2] ARABIC SIGN SINDHI AMPERSAND..ARABIC SIGN SINDHI POSTPOSITION MEN
+ {0x06FF, 0x06FF, prN}, // Lo ARABIC LETTER HEH WITH INVERTED V
+ {0x0700, 0x070D, prN}, // Po [14] SYRIAC END OF PARAGRAPH..SYRIAC HARKLEAN ASTERISCUS
+ {0x070F, 0x070F, prN}, // Cf SYRIAC ABBREVIATION MARK
+ {0x0710, 0x0710, prN}, // Lo SYRIAC LETTER ALAPH
+ {0x0711, 0x0711, prN}, // Mn SYRIAC LETTER SUPERSCRIPT ALAPH
+ {0x0712, 0x072F, prN}, // Lo [30] SYRIAC LETTER BETH..SYRIAC LETTER PERSIAN DHALATH
+ {0x0730, 0x074A, prN}, // Mn [27] SYRIAC PTHAHA ABOVE..SYRIAC BARREKH
+ {0x074D, 0x074F, prN}, // Lo [3] SYRIAC LETTER SOGDIAN ZHAIN..SYRIAC LETTER SOGDIAN FE
+ {0x0750, 0x077F, prN}, // Lo [48] ARABIC LETTER BEH WITH THREE DOTS HORIZONTALLY BELOW..ARABIC LETTER KAF WITH TWO DOTS ABOVE
+ {0x0780, 0x07A5, prN}, // Lo [38] THAANA LETTER HAA..THAANA LETTER WAAVU
+ {0x07A6, 0x07B0, prN}, // Mn [11] THAANA ABAFILI..THAANA SUKUN
+ {0x07B1, 0x07B1, prN}, // Lo THAANA LETTER NAA
+ {0x07C0, 0x07C9, prN}, // Nd [10] NKO DIGIT ZERO..NKO DIGIT NINE
+ {0x07CA, 0x07EA, prN}, // Lo [33] NKO LETTER A..NKO LETTER JONA RA
+ {0x07EB, 0x07F3, prN}, // Mn [9] NKO COMBINING SHORT HIGH TONE..NKO COMBINING DOUBLE DOT ABOVE
+ {0x07F4, 0x07F5, prN}, // Lm [2] NKO HIGH TONE APOSTROPHE..NKO LOW TONE APOSTROPHE
+ {0x07F6, 0x07F6, prN}, // So NKO SYMBOL OO DENNEN
+ {0x07F7, 0x07F9, prN}, // Po [3] NKO SYMBOL GBAKURUNEN..NKO EXCLAMATION MARK
+ {0x07FA, 0x07FA, prN}, // Lm NKO LAJANYALAN
+ {0x07FD, 0x07FD, prN}, // Mn NKO DANTAYALAN
+ {0x07FE, 0x07FF, prN}, // Sc [2] NKO DOROME SIGN..NKO TAMAN SIGN
+ {0x0800, 0x0815, prN}, // Lo [22] SAMARITAN LETTER ALAF..SAMARITAN LETTER TAAF
+ {0x0816, 0x0819, prN}, // Mn [4] SAMARITAN MARK IN..SAMARITAN MARK DAGESH
+ {0x081A, 0x081A, prN}, // Lm SAMARITAN MODIFIER LETTER EPENTHETIC YUT
+ {0x081B, 0x0823, prN}, // Mn [9] SAMARITAN MARK EPENTHETIC YUT..SAMARITAN VOWEL SIGN A
+ {0x0824, 0x0824, prN}, // Lm SAMARITAN MODIFIER LETTER SHORT A
+ {0x0825, 0x0827, prN}, // Mn [3] SAMARITAN VOWEL SIGN SHORT A..SAMARITAN VOWEL SIGN U
+ {0x0828, 0x0828, prN}, // Lm SAMARITAN MODIFIER LETTER I
+ {0x0829, 0x082D, prN}, // Mn [5] SAMARITAN VOWEL SIGN LONG I..SAMARITAN MARK NEQUDAA
+ {0x0830, 0x083E, prN}, // Po [15] SAMARITAN PUNCTUATION NEQUDAA..SAMARITAN PUNCTUATION ANNAAU
+ {0x0840, 0x0858, prN}, // Lo [25] MANDAIC LETTER HALQA..MANDAIC LETTER AIN
+ {0x0859, 0x085B, prN}, // Mn [3] MANDAIC AFFRICATION MARK..MANDAIC GEMINATION MARK
+ {0x085E, 0x085E, prN}, // Po MANDAIC PUNCTUATION
+ {0x0860, 0x086A, prN}, // Lo [11] SYRIAC LETTER MALAYALAM NGA..SYRIAC LETTER MALAYALAM SSA
+ {0x0870, 0x0887, prN}, // Lo [24] ARABIC LETTER ALEF WITH ATTACHED FATHA..ARABIC BASELINE ROUND DOT
+ {0x0888, 0x0888, prN}, // Sk ARABIC RAISED ROUND DOT
+ {0x0889, 0x088E, prN}, // Lo [6] ARABIC LETTER NOON WITH INVERTED SMALL V..ARABIC VERTICAL TAIL
+ {0x0890, 0x0891, prN}, // Cf [2] ARABIC POUND MARK ABOVE..ARABIC PIASTRE MARK ABOVE
+ {0x0898, 0x089F, prN}, // Mn [8] ARABIC SMALL HIGH WORD AL-JUZ..ARABIC HALF MADDA OVER MADDA
+ {0x08A0, 0x08C8, prN}, // Lo [41] ARABIC LETTER BEH WITH SMALL V BELOW..ARABIC LETTER GRAF
+ {0x08C9, 0x08C9, prN}, // Lm ARABIC SMALL FARSI YEH
+ {0x08CA, 0x08E1, prN}, // Mn [24] ARABIC SMALL HIGH FARSI YEH..ARABIC SMALL HIGH SIGN SAFHA
+ {0x08E2, 0x08E2, prN}, // Cf ARABIC DISPUTED END OF AYAH
+ {0x08E3, 0x08FF, prN}, // Mn [29] ARABIC TURNED DAMMA BELOW..ARABIC MARK SIDEWAYS NOON GHUNNA
+ {0x0900, 0x0902, prN}, // Mn [3] DEVANAGARI SIGN INVERTED CANDRABINDU..DEVANAGARI SIGN ANUSVARA
+ {0x0903, 0x0903, prN}, // Mc DEVANAGARI SIGN VISARGA
+ {0x0904, 0x0939, prN}, // Lo [54] DEVANAGARI LETTER SHORT A..DEVANAGARI LETTER HA
+ {0x093A, 0x093A, prN}, // Mn DEVANAGARI VOWEL SIGN OE
+ {0x093B, 0x093B, prN}, // Mc DEVANAGARI VOWEL SIGN OOE
+ {0x093C, 0x093C, prN}, // Mn DEVANAGARI SIGN NUKTA
+ {0x093D, 0x093D, prN}, // Lo DEVANAGARI SIGN AVAGRAHA
+ {0x093E, 0x0940, prN}, // Mc [3] DEVANAGARI VOWEL SIGN AA..DEVANAGARI VOWEL SIGN II
+ {0x0941, 0x0948, prN}, // Mn [8] DEVANAGARI VOWEL SIGN U..DEVANAGARI VOWEL SIGN AI
+ {0x0949, 0x094C, prN}, // Mc [4] DEVANAGARI VOWEL SIGN CANDRA O..DEVANAGARI VOWEL SIGN AU
+ {0x094D, 0x094D, prN}, // Mn DEVANAGARI SIGN VIRAMA
+ {0x094E, 0x094F, prN}, // Mc [2] DEVANAGARI VOWEL SIGN PRISHTHAMATRA E..DEVANAGARI VOWEL SIGN AW
+ {0x0950, 0x0950, prN}, // Lo DEVANAGARI OM
+ {0x0951, 0x0957, prN}, // Mn [7] DEVANAGARI STRESS SIGN UDATTA..DEVANAGARI VOWEL SIGN UUE
+ {0x0958, 0x0961, prN}, // Lo [10] DEVANAGARI LETTER QA..DEVANAGARI LETTER VOCALIC LL
+ {0x0962, 0x0963, prN}, // Mn [2] DEVANAGARI VOWEL SIGN VOCALIC L..DEVANAGARI VOWEL SIGN VOCALIC LL
+ {0x0964, 0x0965, prN}, // Po [2] DEVANAGARI DANDA..DEVANAGARI DOUBLE DANDA
+ {0x0966, 0x096F, prN}, // Nd [10] DEVANAGARI DIGIT ZERO..DEVANAGARI DIGIT NINE
+ {0x0970, 0x0970, prN}, // Po DEVANAGARI ABBREVIATION SIGN
+ {0x0971, 0x0971, prN}, // Lm DEVANAGARI SIGN HIGH SPACING DOT
+ {0x0972, 0x097F, prN}, // Lo [14] DEVANAGARI LETTER CANDRA A..DEVANAGARI LETTER BBA
+ {0x0980, 0x0980, prN}, // Lo BENGALI ANJI
+ {0x0981, 0x0981, prN}, // Mn BENGALI SIGN CANDRABINDU
+ {0x0982, 0x0983, prN}, // Mc [2] BENGALI SIGN ANUSVARA..BENGALI SIGN VISARGA
+ {0x0985, 0x098C, prN}, // Lo [8] BENGALI LETTER A..BENGALI LETTER VOCALIC L
+ {0x098F, 0x0990, prN}, // Lo [2] BENGALI LETTER E..BENGALI LETTER AI
+ {0x0993, 0x09A8, prN}, // Lo [22] BENGALI LETTER O..BENGALI LETTER NA
+ {0x09AA, 0x09B0, prN}, // Lo [7] BENGALI LETTER PA..BENGALI LETTER RA
+ {0x09B2, 0x09B2, prN}, // Lo BENGALI LETTER LA
+ {0x09B6, 0x09B9, prN}, // Lo [4] BENGALI LETTER SHA..BENGALI LETTER HA
+ {0x09BC, 0x09BC, prN}, // Mn BENGALI SIGN NUKTA
+ {0x09BD, 0x09BD, prN}, // Lo BENGALI SIGN AVAGRAHA
+ {0x09BE, 0x09C0, prN}, // Mc [3] BENGALI VOWEL SIGN AA..BENGALI VOWEL SIGN II
+ {0x09C1, 0x09C4, prN}, // Mn [4] BENGALI VOWEL SIGN U..BENGALI VOWEL SIGN VOCALIC RR
+ {0x09C7, 0x09C8, prN}, // Mc [2] BENGALI VOWEL SIGN E..BENGALI VOWEL SIGN AI
+ {0x09CB, 0x09CC, prN}, // Mc [2] BENGALI VOWEL SIGN O..BENGALI VOWEL SIGN AU
+ {0x09CD, 0x09CD, prN}, // Mn BENGALI SIGN VIRAMA
+ {0x09CE, 0x09CE, prN}, // Lo BENGALI LETTER KHANDA TA
+ {0x09D7, 0x09D7, prN}, // Mc BENGALI AU LENGTH MARK
+ {0x09DC, 0x09DD, prN}, // Lo [2] BENGALI LETTER RRA..BENGALI LETTER RHA
+ {0x09DF, 0x09E1, prN}, // Lo [3] BENGALI LETTER YYA..BENGALI LETTER VOCALIC LL
+ {0x09E2, 0x09E3, prN}, // Mn [2] BENGALI VOWEL SIGN VOCALIC L..BENGALI VOWEL SIGN VOCALIC LL
+ {0x09E6, 0x09EF, prN}, // Nd [10] BENGALI DIGIT ZERO..BENGALI DIGIT NINE
+ {0x09F0, 0x09F1, prN}, // Lo [2] BENGALI LETTER RA WITH MIDDLE DIAGONAL..BENGALI LETTER RA WITH LOWER DIAGONAL
+ {0x09F2, 0x09F3, prN}, // Sc [2] BENGALI RUPEE MARK..BENGALI RUPEE SIGN
+ {0x09F4, 0x09F9, prN}, // No [6] BENGALI CURRENCY NUMERATOR ONE..BENGALI CURRENCY DENOMINATOR SIXTEEN
+ {0x09FA, 0x09FA, prN}, // So BENGALI ISSHAR
+ {0x09FB, 0x09FB, prN}, // Sc BENGALI GANDA MARK
+ {0x09FC, 0x09FC, prN}, // Lo BENGALI LETTER VEDIC ANUSVARA
+ {0x09FD, 0x09FD, prN}, // Po BENGALI ABBREVIATION SIGN
+ {0x09FE, 0x09FE, prN}, // Mn BENGALI SANDHI MARK
+ {0x0A01, 0x0A02, prN}, // Mn [2] GURMUKHI SIGN ADAK BINDI..GURMUKHI SIGN BINDI
+ {0x0A03, 0x0A03, prN}, // Mc GURMUKHI SIGN VISARGA
+ {0x0A05, 0x0A0A, prN}, // Lo [6] GURMUKHI LETTER A..GURMUKHI LETTER UU
+ {0x0A0F, 0x0A10, prN}, // Lo [2] GURMUKHI LETTER EE..GURMUKHI LETTER AI
+ {0x0A13, 0x0A28, prN}, // Lo [22] GURMUKHI LETTER OO..GURMUKHI LETTER NA
+ {0x0A2A, 0x0A30, prN}, // Lo [7] GURMUKHI LETTER PA..GURMUKHI LETTER RA
+ {0x0A32, 0x0A33, prN}, // Lo [2] GURMUKHI LETTER LA..GURMUKHI LETTER LLA
+ {0x0A35, 0x0A36, prN}, // Lo [2] GURMUKHI LETTER VA..GURMUKHI LETTER SHA
+ {0x0A38, 0x0A39, prN}, // Lo [2] GURMUKHI LETTER SA..GURMUKHI LETTER HA
+ {0x0A3C, 0x0A3C, prN}, // Mn GURMUKHI SIGN NUKTA
+ {0x0A3E, 0x0A40, prN}, // Mc [3] GURMUKHI VOWEL SIGN AA..GURMUKHI VOWEL SIGN II
+ {0x0A41, 0x0A42, prN}, // Mn [2] GURMUKHI VOWEL SIGN U..GURMUKHI VOWEL SIGN UU
+ {0x0A47, 0x0A48, prN}, // Mn [2] GURMUKHI VOWEL SIGN EE..GURMUKHI VOWEL SIGN AI
+ {0x0A4B, 0x0A4D, prN}, // Mn [3] GURMUKHI VOWEL SIGN OO..GURMUKHI SIGN VIRAMA
+ {0x0A51, 0x0A51, prN}, // Mn GURMUKHI SIGN UDAAT
+ {0x0A59, 0x0A5C, prN}, // Lo [4] GURMUKHI LETTER KHHA..GURMUKHI LETTER RRA
+ {0x0A5E, 0x0A5E, prN}, // Lo GURMUKHI LETTER FA
+ {0x0A66, 0x0A6F, prN}, // Nd [10] GURMUKHI DIGIT ZERO..GURMUKHI DIGIT NINE
+ {0x0A70, 0x0A71, prN}, // Mn [2] GURMUKHI TIPPI..GURMUKHI ADDAK
+ {0x0A72, 0x0A74, prN}, // Lo [3] GURMUKHI IRI..GURMUKHI EK ONKAR
+ {0x0A75, 0x0A75, prN}, // Mn GURMUKHI SIGN YAKASH
+ {0x0A76, 0x0A76, prN}, // Po GURMUKHI ABBREVIATION SIGN
+ {0x0A81, 0x0A82, prN}, // Mn [2] GUJARATI SIGN CANDRABINDU..GUJARATI SIGN ANUSVARA
+ {0x0A83, 0x0A83, prN}, // Mc GUJARATI SIGN VISARGA
+ {0x0A85, 0x0A8D, prN}, // Lo [9] GUJARATI LETTER A..GUJARATI VOWEL CANDRA E
+ {0x0A8F, 0x0A91, prN}, // Lo [3] GUJARATI LETTER E..GUJARATI VOWEL CANDRA O
+ {0x0A93, 0x0AA8, prN}, // Lo [22] GUJARATI LETTER O..GUJARATI LETTER NA
+ {0x0AAA, 0x0AB0, prN}, // Lo [7] GUJARATI LETTER PA..GUJARATI LETTER RA
+ {0x0AB2, 0x0AB3, prN}, // Lo [2] GUJARATI LETTER LA..GUJARATI LETTER LLA
+ {0x0AB5, 0x0AB9, prN}, // Lo [5] GUJARATI LETTER VA..GUJARATI LETTER HA
+ {0x0ABC, 0x0ABC, prN}, // Mn GUJARATI SIGN NUKTA
+ {0x0ABD, 0x0ABD, prN}, // Lo GUJARATI SIGN AVAGRAHA
+ {0x0ABE, 0x0AC0, prN}, // Mc [3] GUJARATI VOWEL SIGN AA..GUJARATI VOWEL SIGN II
+ {0x0AC1, 0x0AC5, prN}, // Mn [5] GUJARATI VOWEL SIGN U..GUJARATI VOWEL SIGN CANDRA E
+ {0x0AC7, 0x0AC8, prN}, // Mn [2] GUJARATI VOWEL SIGN E..GUJARATI VOWEL SIGN AI
+ {0x0AC9, 0x0AC9, prN}, // Mc GUJARATI VOWEL SIGN CANDRA O
+ {0x0ACB, 0x0ACC, prN}, // Mc [2] GUJARATI VOWEL SIGN O..GUJARATI VOWEL SIGN AU
+ {0x0ACD, 0x0ACD, prN}, // Mn GUJARATI SIGN VIRAMA
+ {0x0AD0, 0x0AD0, prN}, // Lo GUJARATI OM
+ {0x0AE0, 0x0AE1, prN}, // Lo [2] GUJARATI LETTER VOCALIC RR..GUJARATI LETTER VOCALIC LL
+ {0x0AE2, 0x0AE3, prN}, // Mn [2] GUJARATI VOWEL SIGN VOCALIC L..GUJARATI VOWEL SIGN VOCALIC LL
+ {0x0AE6, 0x0AEF, prN}, // Nd [10] GUJARATI DIGIT ZERO..GUJARATI DIGIT NINE
+ {0x0AF0, 0x0AF0, prN}, // Po GUJARATI ABBREVIATION SIGN
+ {0x0AF1, 0x0AF1, prN}, // Sc GUJARATI RUPEE SIGN
+ {0x0AF9, 0x0AF9, prN}, // Lo GUJARATI LETTER ZHA
+ {0x0AFA, 0x0AFF, prN}, // Mn [6] GUJARATI SIGN SUKUN..GUJARATI SIGN TWO-CIRCLE NUKTA ABOVE
+ {0x0B01, 0x0B01, prN}, // Mn ORIYA SIGN CANDRABINDU
+ {0x0B02, 0x0B03, prN}, // Mc [2] ORIYA SIGN ANUSVARA..ORIYA SIGN VISARGA
+ {0x0B05, 0x0B0C, prN}, // Lo [8] ORIYA LETTER A..ORIYA LETTER VOCALIC L
+ {0x0B0F, 0x0B10, prN}, // Lo [2] ORIYA LETTER E..ORIYA LETTER AI
+ {0x0B13, 0x0B28, prN}, // Lo [22] ORIYA LETTER O..ORIYA LETTER NA
+ {0x0B2A, 0x0B30, prN}, // Lo [7] ORIYA LETTER PA..ORIYA LETTER RA
+ {0x0B32, 0x0B33, prN}, // Lo [2] ORIYA LETTER LA..ORIYA LETTER LLA
+ {0x0B35, 0x0B39, prN}, // Lo [5] ORIYA LETTER VA..ORIYA LETTER HA
+ {0x0B3C, 0x0B3C, prN}, // Mn ORIYA SIGN NUKTA
+ {0x0B3D, 0x0B3D, prN}, // Lo ORIYA SIGN AVAGRAHA
+ {0x0B3E, 0x0B3E, prN}, // Mc ORIYA VOWEL SIGN AA
+ {0x0B3F, 0x0B3F, prN}, // Mn ORIYA VOWEL SIGN I
+ {0x0B40, 0x0B40, prN}, // Mc ORIYA VOWEL SIGN II
+ {0x0B41, 0x0B44, prN}, // Mn [4] ORIYA VOWEL SIGN U..ORIYA VOWEL SIGN VOCALIC RR
+ {0x0B47, 0x0B48, prN}, // Mc [2] ORIYA VOWEL SIGN E..ORIYA VOWEL SIGN AI
+ {0x0B4B, 0x0B4C, prN}, // Mc [2] ORIYA VOWEL SIGN O..ORIYA VOWEL SIGN AU
+ {0x0B4D, 0x0B4D, prN}, // Mn ORIYA SIGN VIRAMA
+ {0x0B55, 0x0B56, prN}, // Mn [2] ORIYA SIGN OVERLINE..ORIYA AI LENGTH MARK
+ {0x0B57, 0x0B57, prN}, // Mc ORIYA AU LENGTH MARK
+ {0x0B5C, 0x0B5D, prN}, // Lo [2] ORIYA LETTER RRA..ORIYA LETTER RHA
+ {0x0B5F, 0x0B61, prN}, // Lo [3] ORIYA LETTER YYA..ORIYA LETTER VOCALIC LL
+ {0x0B62, 0x0B63, prN}, // Mn [2] ORIYA VOWEL SIGN VOCALIC L..ORIYA VOWEL SIGN VOCALIC LL
+ {0x0B66, 0x0B6F, prN}, // Nd [10] ORIYA DIGIT ZERO..ORIYA DIGIT NINE
+ {0x0B70, 0x0B70, prN}, // So ORIYA ISSHAR
+ {0x0B71, 0x0B71, prN}, // Lo ORIYA LETTER WA
+ {0x0B72, 0x0B77, prN}, // No [6] ORIYA FRACTION ONE QUARTER..ORIYA FRACTION THREE SIXTEENTHS
+ {0x0B82, 0x0B82, prN}, // Mn TAMIL SIGN ANUSVARA
+ {0x0B83, 0x0B83, prN}, // Lo TAMIL SIGN VISARGA
+ {0x0B85, 0x0B8A, prN}, // Lo [6] TAMIL LETTER A..TAMIL LETTER UU
+ {0x0B8E, 0x0B90, prN}, // Lo [3] TAMIL LETTER E..TAMIL LETTER AI
+ {0x0B92, 0x0B95, prN}, // Lo [4] TAMIL LETTER O..TAMIL LETTER KA
+ {0x0B99, 0x0B9A, prN}, // Lo [2] TAMIL LETTER NGA..TAMIL LETTER CA
+ {0x0B9C, 0x0B9C, prN}, // Lo TAMIL LETTER JA
+ {0x0B9E, 0x0B9F, prN}, // Lo [2] TAMIL LETTER NYA..TAMIL LETTER TTA
+ {0x0BA3, 0x0BA4, prN}, // Lo [2] TAMIL LETTER NNA..TAMIL LETTER TA
+ {0x0BA8, 0x0BAA, prN}, // Lo [3] TAMIL LETTER NA..TAMIL LETTER PA
+ {0x0BAE, 0x0BB9, prN}, // Lo [12] TAMIL LETTER MA..TAMIL LETTER HA
+ {0x0BBE, 0x0BBF, prN}, // Mc [2] TAMIL VOWEL SIGN AA..TAMIL VOWEL SIGN I
+ {0x0BC0, 0x0BC0, prN}, // Mn TAMIL VOWEL SIGN II
+ {0x0BC1, 0x0BC2, prN}, // Mc [2] TAMIL VOWEL SIGN U..TAMIL VOWEL SIGN UU
+ {0x0BC6, 0x0BC8, prN}, // Mc [3] TAMIL VOWEL SIGN E..TAMIL VOWEL SIGN AI
+ {0x0BCA, 0x0BCC, prN}, // Mc [3] TAMIL VOWEL SIGN O..TAMIL VOWEL SIGN AU
+ {0x0BCD, 0x0BCD, prN}, // Mn TAMIL SIGN VIRAMA
+ {0x0BD0, 0x0BD0, prN}, // Lo TAMIL OM
+ {0x0BD7, 0x0BD7, prN}, // Mc TAMIL AU LENGTH MARK
+ {0x0BE6, 0x0BEF, prN}, // Nd [10] TAMIL DIGIT ZERO..TAMIL DIGIT NINE
+ {0x0BF0, 0x0BF2, prN}, // No [3] TAMIL NUMBER TEN..TAMIL NUMBER ONE THOUSAND
+ {0x0BF3, 0x0BF8, prN}, // So [6] TAMIL DAY SIGN..TAMIL AS ABOVE SIGN
+ {0x0BF9, 0x0BF9, prN}, // Sc TAMIL RUPEE SIGN
+ {0x0BFA, 0x0BFA, prN}, // So TAMIL NUMBER SIGN
+ {0x0C00, 0x0C00, prN}, // Mn TELUGU SIGN COMBINING CANDRABINDU ABOVE
+ {0x0C01, 0x0C03, prN}, // Mc [3] TELUGU SIGN CANDRABINDU..TELUGU SIGN VISARGA
+ {0x0C04, 0x0C04, prN}, // Mn TELUGU SIGN COMBINING ANUSVARA ABOVE
+ {0x0C05, 0x0C0C, prN}, // Lo [8] TELUGU LETTER A..TELUGU LETTER VOCALIC L
+ {0x0C0E, 0x0C10, prN}, // Lo [3] TELUGU LETTER E..TELUGU LETTER AI
+ {0x0C12, 0x0C28, prN}, // Lo [23] TELUGU LETTER O..TELUGU LETTER NA
+ {0x0C2A, 0x0C39, prN}, // Lo [16] TELUGU LETTER PA..TELUGU LETTER HA
+ {0x0C3C, 0x0C3C, prN}, // Mn TELUGU SIGN NUKTA
+ {0x0C3D, 0x0C3D, prN}, // Lo TELUGU SIGN AVAGRAHA
+ {0x0C3E, 0x0C40, prN}, // Mn [3] TELUGU VOWEL SIGN AA..TELUGU VOWEL SIGN II
+ {0x0C41, 0x0C44, prN}, // Mc [4] TELUGU VOWEL SIGN U..TELUGU VOWEL SIGN VOCALIC RR
+ {0x0C46, 0x0C48, prN}, // Mn [3] TELUGU VOWEL SIGN E..TELUGU VOWEL SIGN AI
+ {0x0C4A, 0x0C4D, prN}, // Mn [4] TELUGU VOWEL SIGN O..TELUGU SIGN VIRAMA
+ {0x0C55, 0x0C56, prN}, // Mn [2] TELUGU LENGTH MARK..TELUGU AI LENGTH MARK
+ {0x0C58, 0x0C5A, prN}, // Lo [3] TELUGU LETTER TSA..TELUGU LETTER RRRA
+ {0x0C5D, 0x0C5D, prN}, // Lo TELUGU LETTER NAKAARA POLLU
+ {0x0C60, 0x0C61, prN}, // Lo [2] TELUGU LETTER VOCALIC RR..TELUGU LETTER VOCALIC LL
+ {0x0C62, 0x0C63, prN}, // Mn [2] TELUGU VOWEL SIGN VOCALIC L..TELUGU VOWEL SIGN VOCALIC LL
+ {0x0C66, 0x0C6F, prN}, // Nd [10] TELUGU DIGIT ZERO..TELUGU DIGIT NINE
+ {0x0C77, 0x0C77, prN}, // Po TELUGU SIGN SIDDHAM
+ {0x0C78, 0x0C7E, prN}, // No [7] TELUGU FRACTION DIGIT ZERO FOR ODD POWERS OF FOUR..TELUGU FRACTION DIGIT THREE FOR EVEN POWERS OF FOUR
+ {0x0C7F, 0x0C7F, prN}, // So TELUGU SIGN TUUMU
+ {0x0C80, 0x0C80, prN}, // Lo KANNADA SIGN SPACING CANDRABINDU
+ {0x0C81, 0x0C81, prN}, // Mn KANNADA SIGN CANDRABINDU
+ {0x0C82, 0x0C83, prN}, // Mc [2] KANNADA SIGN ANUSVARA..KANNADA SIGN VISARGA
+ {0x0C84, 0x0C84, prN}, // Po KANNADA SIGN SIDDHAM
+ {0x0C85, 0x0C8C, prN}, // Lo [8] KANNADA LETTER A..KANNADA LETTER VOCALIC L
+ {0x0C8E, 0x0C90, prN}, // Lo [3] KANNADA LETTER E..KANNADA LETTER AI
+ {0x0C92, 0x0CA8, prN}, // Lo [23] KANNADA LETTER O..KANNADA LETTER NA
+ {0x0CAA, 0x0CB3, prN}, // Lo [10] KANNADA LETTER PA..KANNADA LETTER LLA
+ {0x0CB5, 0x0CB9, prN}, // Lo [5] KANNADA LETTER VA..KANNADA LETTER HA
+ {0x0CBC, 0x0CBC, prN}, // Mn KANNADA SIGN NUKTA
+ {0x0CBD, 0x0CBD, prN}, // Lo KANNADA SIGN AVAGRAHA
+ {0x0CBE, 0x0CBE, prN}, // Mc KANNADA VOWEL SIGN AA
+ {0x0CBF, 0x0CBF, prN}, // Mn KANNADA VOWEL SIGN I
+ {0x0CC0, 0x0CC4, prN}, // Mc [5] KANNADA VOWEL SIGN II..KANNADA VOWEL SIGN VOCALIC RR
+ {0x0CC6, 0x0CC6, prN}, // Mn KANNADA VOWEL SIGN E
+ {0x0CC7, 0x0CC8, prN}, // Mc [2] KANNADA VOWEL SIGN EE..KANNADA VOWEL SIGN AI
+ {0x0CCA, 0x0CCB, prN}, // Mc [2] KANNADA VOWEL SIGN O..KANNADA VOWEL SIGN OO
+ {0x0CCC, 0x0CCD, prN}, // Mn [2] KANNADA VOWEL SIGN AU..KANNADA SIGN VIRAMA
+ {0x0CD5, 0x0CD6, prN}, // Mc [2] KANNADA LENGTH MARK..KANNADA AI LENGTH MARK
+ {0x0CDD, 0x0CDE, prN}, // Lo [2] KANNADA LETTER NAKAARA POLLU..KANNADA LETTER FA
+ {0x0CE0, 0x0CE1, prN}, // Lo [2] KANNADA LETTER VOCALIC RR..KANNADA LETTER VOCALIC LL
+ {0x0CE2, 0x0CE3, prN}, // Mn [2] KANNADA VOWEL SIGN VOCALIC L..KANNADA VOWEL SIGN VOCALIC LL
+ {0x0CE6, 0x0CEF, prN}, // Nd [10] KANNADA DIGIT ZERO..KANNADA DIGIT NINE
+ {0x0CF1, 0x0CF2, prN}, // Lo [2] KANNADA SIGN JIHVAMULIYA..KANNADA SIGN UPADHMANIYA
+ {0x0D00, 0x0D01, prN}, // Mn [2] MALAYALAM SIGN COMBINING ANUSVARA ABOVE..MALAYALAM SIGN CANDRABINDU
+ {0x0D02, 0x0D03, prN}, // Mc [2] MALAYALAM SIGN ANUSVARA..MALAYALAM SIGN VISARGA
+ {0x0D04, 0x0D0C, prN}, // Lo [9] MALAYALAM LETTER VEDIC ANUSVARA..MALAYALAM LETTER VOCALIC L
+ {0x0D0E, 0x0D10, prN}, // Lo [3] MALAYALAM LETTER E..MALAYALAM LETTER AI
+ {0x0D12, 0x0D3A, prN}, // Lo [41] MALAYALAM LETTER O..MALAYALAM LETTER TTTA
+ {0x0D3B, 0x0D3C, prN}, // Mn [2] MALAYALAM SIGN VERTICAL BAR VIRAMA..MALAYALAM SIGN CIRCULAR VIRAMA
+ {0x0D3D, 0x0D3D, prN}, // Lo MALAYALAM SIGN AVAGRAHA
+ {0x0D3E, 0x0D40, prN}, // Mc [3] MALAYALAM VOWEL SIGN AA..MALAYALAM VOWEL SIGN II
+ {0x0D41, 0x0D44, prN}, // Mn [4] MALAYALAM VOWEL SIGN U..MALAYALAM VOWEL SIGN VOCALIC RR
+ {0x0D46, 0x0D48, prN}, // Mc [3] MALAYALAM VOWEL SIGN E..MALAYALAM VOWEL SIGN AI
+ {0x0D4A, 0x0D4C, prN}, // Mc [3] MALAYALAM VOWEL SIGN O..MALAYALAM VOWEL SIGN AU
+ {0x0D4D, 0x0D4D, prN}, // Mn MALAYALAM SIGN VIRAMA
+ {0x0D4E, 0x0D4E, prN}, // Lo MALAYALAM LETTER DOT REPH
+ {0x0D4F, 0x0D4F, prN}, // So MALAYALAM SIGN PARA
+ {0x0D54, 0x0D56, prN}, // Lo [3] MALAYALAM LETTER CHILLU M..MALAYALAM LETTER CHILLU LLL
+ {0x0D57, 0x0D57, prN}, // Mc MALAYALAM AU LENGTH MARK
+ {0x0D58, 0x0D5E, prN}, // No [7] MALAYALAM FRACTION ONE ONE-HUNDRED-AND-SIXTIETH..MALAYALAM FRACTION ONE FIFTH
+ {0x0D5F, 0x0D61, prN}, // Lo [3] MALAYALAM LETTER ARCHAIC II..MALAYALAM LETTER VOCALIC LL
+ {0x0D62, 0x0D63, prN}, // Mn [2] MALAYALAM VOWEL SIGN VOCALIC L..MALAYALAM VOWEL SIGN VOCALIC LL
+ {0x0D66, 0x0D6F, prN}, // Nd [10] MALAYALAM DIGIT ZERO..MALAYALAM DIGIT NINE
+ {0x0D70, 0x0D78, prN}, // No [9] MALAYALAM NUMBER TEN..MALAYALAM FRACTION THREE SIXTEENTHS
+ {0x0D79, 0x0D79, prN}, // So MALAYALAM DATE MARK
+ {0x0D7A, 0x0D7F, prN}, // Lo [6] MALAYALAM LETTER CHILLU NN..MALAYALAM LETTER CHILLU K
+ {0x0D81, 0x0D81, prN}, // Mn SINHALA SIGN CANDRABINDU
+ {0x0D82, 0x0D83, prN}, // Mc [2] SINHALA SIGN ANUSVARAYA..SINHALA SIGN VISARGAYA
+ {0x0D85, 0x0D96, prN}, // Lo [18] SINHALA LETTER AYANNA..SINHALA LETTER AUYANNA
+ {0x0D9A, 0x0DB1, prN}, // Lo [24] SINHALA LETTER ALPAPRAANA KAYANNA..SINHALA LETTER DANTAJA NAYANNA
+ {0x0DB3, 0x0DBB, prN}, // Lo [9] SINHALA LETTER SANYAKA DAYANNA..SINHALA LETTER RAYANNA
+ {0x0DBD, 0x0DBD, prN}, // Lo SINHALA LETTER DANTAJA LAYANNA
+ {0x0DC0, 0x0DC6, prN}, // Lo [7] SINHALA LETTER VAYANNA..SINHALA LETTER FAYANNA
+ {0x0DCA, 0x0DCA, prN}, // Mn SINHALA SIGN AL-LAKUNA
+ {0x0DCF, 0x0DD1, prN}, // Mc [3] SINHALA VOWEL SIGN AELA-PILLA..SINHALA VOWEL SIGN DIGA AEDA-PILLA
+ {0x0DD2, 0x0DD4, prN}, // Mn [3] SINHALA VOWEL SIGN KETTI IS-PILLA..SINHALA VOWEL SIGN KETTI PAA-PILLA
+ {0x0DD6, 0x0DD6, prN}, // Mn SINHALA VOWEL SIGN DIGA PAA-PILLA
+ {0x0DD8, 0x0DDF, prN}, // Mc [8] SINHALA VOWEL SIGN GAETTA-PILLA..SINHALA VOWEL SIGN GAYANUKITTA
+ {0x0DE6, 0x0DEF, prN}, // Nd [10] SINHALA LITH DIGIT ZERO..SINHALA LITH DIGIT NINE
+ {0x0DF2, 0x0DF3, prN}, // Mc [2] SINHALA VOWEL SIGN DIGA GAETTA-PILLA..SINHALA VOWEL SIGN DIGA GAYANUKITTA
+ {0x0DF4, 0x0DF4, prN}, // Po SINHALA PUNCTUATION KUNDDALIYA
+ {0x0E01, 0x0E30, prN}, // Lo [48] THAI CHARACTER KO KAI..THAI CHARACTER SARA A
+ {0x0E31, 0x0E31, prN}, // Mn THAI CHARACTER MAI HAN-AKAT
+ {0x0E32, 0x0E33, prN}, // Lo [2] THAI CHARACTER SARA AA..THAI CHARACTER SARA AM
+ {0x0E34, 0x0E3A, prN}, // Mn [7] THAI CHARACTER SARA I..THAI CHARACTER PHINTHU
+ {0x0E3F, 0x0E3F, prN}, // Sc THAI CURRENCY SYMBOL BAHT
+ {0x0E40, 0x0E45, prN}, // Lo [6] THAI CHARACTER SARA E..THAI CHARACTER LAKKHANGYAO
+ {0x0E46, 0x0E46, prN}, // Lm THAI CHARACTER MAIYAMOK
+ {0x0E47, 0x0E4E, prN}, // Mn [8] THAI CHARACTER MAITAIKHU..THAI CHARACTER YAMAKKAN
+ {0x0E4F, 0x0E4F, prN}, // Po THAI CHARACTER FONGMAN
+ {0x0E50, 0x0E59, prN}, // Nd [10] THAI DIGIT ZERO..THAI DIGIT NINE
+ {0x0E5A, 0x0E5B, prN}, // Po [2] THAI CHARACTER ANGKHANKHU..THAI CHARACTER KHOMUT
+ {0x0E81, 0x0E82, prN}, // Lo [2] LAO LETTER KO..LAO LETTER KHO SUNG
+ {0x0E84, 0x0E84, prN}, // Lo LAO LETTER KHO TAM
+ {0x0E86, 0x0E8A, prN}, // Lo [5] LAO LETTER PALI GHA..LAO LETTER SO TAM
+ {0x0E8C, 0x0EA3, prN}, // Lo [24] LAO LETTER PALI JHA..LAO LETTER LO LING
+ {0x0EA5, 0x0EA5, prN}, // Lo LAO LETTER LO LOOT
+ {0x0EA7, 0x0EB0, prN}, // Lo [10] LAO LETTER WO..LAO VOWEL SIGN A
+ {0x0EB1, 0x0EB1, prN}, // Mn LAO VOWEL SIGN MAI KAN
+ {0x0EB2, 0x0EB3, prN}, // Lo [2] LAO VOWEL SIGN AA..LAO VOWEL SIGN AM
+ {0x0EB4, 0x0EBC, prN}, // Mn [9] LAO VOWEL SIGN I..LAO SEMIVOWEL SIGN LO
+ {0x0EBD, 0x0EBD, prN}, // Lo LAO SEMIVOWEL SIGN NYO
+ {0x0EC0, 0x0EC4, prN}, // Lo [5] LAO VOWEL SIGN E..LAO VOWEL SIGN AI
+ {0x0EC6, 0x0EC6, prN}, // Lm LAO KO LA
+ {0x0EC8, 0x0ECD, prN}, // Mn [6] LAO TONE MAI EK..LAO NIGGAHITA
+ {0x0ED0, 0x0ED9, prN}, // Nd [10] LAO DIGIT ZERO..LAO DIGIT NINE
+ {0x0EDC, 0x0EDF, prN}, // Lo [4] LAO HO NO..LAO LETTER KHMU NYO
+ {0x0F00, 0x0F00, prN}, // Lo TIBETAN SYLLABLE OM
+ {0x0F01, 0x0F03, prN}, // So [3] TIBETAN MARK GTER YIG MGO TRUNCATED A..TIBETAN MARK GTER YIG MGO -UM GTER TSHEG MA
+ {0x0F04, 0x0F12, prN}, // Po [15] TIBETAN MARK INITIAL YIG MGO MDUN MA..TIBETAN MARK RGYA GRAM SHAD
+ {0x0F13, 0x0F13, prN}, // So TIBETAN MARK CARET -DZUD RTAGS ME LONG CAN
+ {0x0F14, 0x0F14, prN}, // Po TIBETAN MARK GTER TSHEG
+ {0x0F15, 0x0F17, prN}, // So [3] TIBETAN LOGOTYPE SIGN CHAD RTAGS..TIBETAN ASTROLOGICAL SIGN SGRA GCAN -CHAR RTAGS
+ {0x0F18, 0x0F19, prN}, // Mn [2] TIBETAN ASTROLOGICAL SIGN -KHYUD PA..TIBETAN ASTROLOGICAL SIGN SDONG TSHUGS
+ {0x0F1A, 0x0F1F, prN}, // So [6] TIBETAN SIGN RDEL DKAR GCIG..TIBETAN SIGN RDEL DKAR RDEL NAG
+ {0x0F20, 0x0F29, prN}, // Nd [10] TIBETAN DIGIT ZERO..TIBETAN DIGIT NINE
+ {0x0F2A, 0x0F33, prN}, // No [10] TIBETAN DIGIT HALF ONE..TIBETAN DIGIT HALF ZERO
+ {0x0F34, 0x0F34, prN}, // So TIBETAN MARK BSDUS RTAGS
+ {0x0F35, 0x0F35, prN}, // Mn TIBETAN MARK NGAS BZUNG NYI ZLA
+ {0x0F36, 0x0F36, prN}, // So TIBETAN MARK CARET -DZUD RTAGS BZHI MIG CAN
+ {0x0F37, 0x0F37, prN}, // Mn TIBETAN MARK NGAS BZUNG SGOR RTAGS
+ {0x0F38, 0x0F38, prN}, // So TIBETAN MARK CHE MGO
+ {0x0F39, 0x0F39, prN}, // Mn TIBETAN MARK TSA -PHRU
+ {0x0F3A, 0x0F3A, prN}, // Ps TIBETAN MARK GUG RTAGS GYON
+ {0x0F3B, 0x0F3B, prN}, // Pe TIBETAN MARK GUG RTAGS GYAS
+ {0x0F3C, 0x0F3C, prN}, // Ps TIBETAN MARK ANG KHANG GYON
+ {0x0F3D, 0x0F3D, prN}, // Pe TIBETAN MARK ANG KHANG GYAS
+ {0x0F3E, 0x0F3F, prN}, // Mc [2] TIBETAN SIGN YAR TSHES..TIBETAN SIGN MAR TSHES
+ {0x0F40, 0x0F47, prN}, // Lo [8] TIBETAN LETTER KA..TIBETAN LETTER JA
+ {0x0F49, 0x0F6C, prN}, // Lo [36] TIBETAN LETTER NYA..TIBETAN LETTER RRA
+ {0x0F71, 0x0F7E, prN}, // Mn [14] TIBETAN VOWEL SIGN AA..TIBETAN SIGN RJES SU NGA RO
+ {0x0F7F, 0x0F7F, prN}, // Mc TIBETAN SIGN RNAM BCAD
+ {0x0F80, 0x0F84, prN}, // Mn [5] TIBETAN VOWEL SIGN REVERSED I..TIBETAN MARK HALANTA
+ {0x0F85, 0x0F85, prN}, // Po TIBETAN MARK PALUTA
+ {0x0F86, 0x0F87, prN}, // Mn [2] TIBETAN SIGN LCI RTAGS..TIBETAN SIGN YANG RTAGS
+ {0x0F88, 0x0F8C, prN}, // Lo [5] TIBETAN SIGN LCE TSA CAN..TIBETAN SIGN INVERTED MCHU CAN
+ {0x0F8D, 0x0F97, prN}, // Mn [11] TIBETAN SUBJOINED SIGN LCE TSA CAN..TIBETAN SUBJOINED LETTER JA
+ {0x0F99, 0x0FBC, prN}, // Mn [36] TIBETAN SUBJOINED LETTER NYA..TIBETAN SUBJOINED LETTER FIXED-FORM RA
+ {0x0FBE, 0x0FC5, prN}, // So [8] TIBETAN KU RU KHA..TIBETAN SYMBOL RDO RJE
+ {0x0FC6, 0x0FC6, prN}, // Mn TIBETAN SYMBOL PADMA GDAN
+ {0x0FC7, 0x0FCC, prN}, // So [6] TIBETAN SYMBOL RDO RJE RGYA GRAM..TIBETAN SYMBOL NOR BU BZHI -KHYIL
+ {0x0FCE, 0x0FCF, prN}, // So [2] TIBETAN SIGN RDEL NAG RDEL DKAR..TIBETAN SIGN RDEL NAG GSUM
+ {0x0FD0, 0x0FD4, prN}, // Po [5] TIBETAN MARK BSKA- SHOG GI MGO RGYAN..TIBETAN MARK CLOSING BRDA RNYING YIG MGO SGAB MA
+ {0x0FD5, 0x0FD8, prN}, // So [4] RIGHT-FACING SVASTI SIGN..LEFT-FACING SVASTI SIGN WITH DOTS
+ {0x0FD9, 0x0FDA, prN}, // Po [2] TIBETAN MARK LEADING MCHAN RTAGS..TIBETAN MARK TRAILING MCHAN RTAGS
+ {0x1000, 0x102A, prN}, // Lo [43] MYANMAR LETTER KA..MYANMAR LETTER AU
+ {0x102B, 0x102C, prN}, // Mc [2] MYANMAR VOWEL SIGN TALL AA..MYANMAR VOWEL SIGN AA
+ {0x102D, 0x1030, prN}, // Mn [4] MYANMAR VOWEL SIGN I..MYANMAR VOWEL SIGN UU
+ {0x1031, 0x1031, prN}, // Mc MYANMAR VOWEL SIGN E
+ {0x1032, 0x1037, prN}, // Mn [6] MYANMAR VOWEL SIGN AI..MYANMAR SIGN DOT BELOW
+ {0x1038, 0x1038, prN}, // Mc MYANMAR SIGN VISARGA
+ {0x1039, 0x103A, prN}, // Mn [2] MYANMAR SIGN VIRAMA..MYANMAR SIGN ASAT
+ {0x103B, 0x103C, prN}, // Mc [2] MYANMAR CONSONANT SIGN MEDIAL YA..MYANMAR CONSONANT SIGN MEDIAL RA
+ {0x103D, 0x103E, prN}, // Mn [2] MYANMAR CONSONANT SIGN MEDIAL WA..MYANMAR CONSONANT SIGN MEDIAL HA
+ {0x103F, 0x103F, prN}, // Lo MYANMAR LETTER GREAT SA
+ {0x1040, 0x1049, prN}, // Nd [10] MYANMAR DIGIT ZERO..MYANMAR DIGIT NINE
+ {0x104A, 0x104F, prN}, // Po [6] MYANMAR SIGN LITTLE SECTION..MYANMAR SYMBOL GENITIVE
+ {0x1050, 0x1055, prN}, // Lo [6] MYANMAR LETTER SHA..MYANMAR LETTER VOCALIC LL
+ {0x1056, 0x1057, prN}, // Mc [2] MYANMAR VOWEL SIGN VOCALIC R..MYANMAR VOWEL SIGN VOCALIC RR
+ {0x1058, 0x1059, prN}, // Mn [2] MYANMAR VOWEL SIGN VOCALIC L..MYANMAR VOWEL SIGN VOCALIC LL
+ {0x105A, 0x105D, prN}, // Lo [4] MYANMAR LETTER MON NGA..MYANMAR LETTER MON BBE
+ {0x105E, 0x1060, prN}, // Mn [3] MYANMAR CONSONANT SIGN MON MEDIAL NA..MYANMAR CONSONANT SIGN MON MEDIAL LA
+ {0x1061, 0x1061, prN}, // Lo MYANMAR LETTER SGAW KAREN SHA
+ {0x1062, 0x1064, prN}, // Mc [3] MYANMAR VOWEL SIGN SGAW KAREN EU..MYANMAR TONE MARK SGAW KAREN KE PHO
+ {0x1065, 0x1066, prN}, // Lo [2] MYANMAR LETTER WESTERN PWO KAREN THA..MYANMAR LETTER WESTERN PWO KAREN PWA
+ {0x1067, 0x106D, prN}, // Mc [7] MYANMAR VOWEL SIGN WESTERN PWO KAREN EU..MYANMAR SIGN WESTERN PWO KAREN TONE-5
+ {0x106E, 0x1070, prN}, // Lo [3] MYANMAR LETTER EASTERN PWO KAREN NNA..MYANMAR LETTER EASTERN PWO KAREN GHWA
+ {0x1071, 0x1074, prN}, // Mn [4] MYANMAR VOWEL SIGN GEBA KAREN I..MYANMAR VOWEL SIGN KAYAH EE
+ {0x1075, 0x1081, prN}, // Lo [13] MYANMAR LETTER SHAN KA..MYANMAR LETTER SHAN HA
+ {0x1082, 0x1082, prN}, // Mn MYANMAR CONSONANT SIGN SHAN MEDIAL WA
+ {0x1083, 0x1084, prN}, // Mc [2] MYANMAR VOWEL SIGN SHAN AA..MYANMAR VOWEL SIGN SHAN E
+ {0x1085, 0x1086, prN}, // Mn [2] MYANMAR VOWEL SIGN SHAN E ABOVE..MYANMAR VOWEL SIGN SHAN FINAL Y
+ {0x1087, 0x108C, prN}, // Mc [6] MYANMAR SIGN SHAN TONE-2..MYANMAR SIGN SHAN COUNCIL TONE-3
+ {0x108D, 0x108D, prN}, // Mn MYANMAR SIGN SHAN COUNCIL EMPHATIC TONE
+ {0x108E, 0x108E, prN}, // Lo MYANMAR LETTER RUMAI PALAUNG FA
+ {0x108F, 0x108F, prN}, // Mc MYANMAR SIGN RUMAI PALAUNG TONE-5
+ {0x1090, 0x1099, prN}, // Nd [10] MYANMAR SHAN DIGIT ZERO..MYANMAR SHAN DIGIT NINE
+ {0x109A, 0x109C, prN}, // Mc [3] MYANMAR SIGN KHAMTI TONE-1..MYANMAR VOWEL SIGN AITON A
+ {0x109D, 0x109D, prN}, // Mn MYANMAR VOWEL SIGN AITON AI
+ {0x109E, 0x109F, prN}, // So [2] MYANMAR SYMBOL SHAN ONE..MYANMAR SYMBOL SHAN EXCLAMATION
+ {0x10A0, 0x10C5, prN}, // Lu [38] GEORGIAN CAPITAL LETTER AN..GEORGIAN CAPITAL LETTER HOE
+ {0x10C7, 0x10C7, prN}, // Lu GEORGIAN CAPITAL LETTER YN
+ {0x10CD, 0x10CD, prN}, // Lu GEORGIAN CAPITAL LETTER AEN
+ {0x10D0, 0x10FA, prN}, // Ll [43] GEORGIAN LETTER AN..GEORGIAN LETTER AIN
+ {0x10FB, 0x10FB, prN}, // Po GEORGIAN PARAGRAPH SEPARATOR
+ {0x10FC, 0x10FC, prN}, // Lm MODIFIER LETTER GEORGIAN NAR
+ {0x10FD, 0x10FF, prN}, // Ll [3] GEORGIAN LETTER AEN..GEORGIAN LETTER LABIAL SIGN
+ {0x1100, 0x115F, prW}, // Lo [96] HANGUL CHOSEONG KIYEOK..HANGUL CHOSEONG FILLER
+ {0x1160, 0x11FF, prN}, // Lo [160] HANGUL JUNGSEONG FILLER..HANGUL JONGSEONG SSANGNIEUN
+ {0x1200, 0x1248, prN}, // Lo [73] ETHIOPIC SYLLABLE HA..ETHIOPIC SYLLABLE QWA
+ {0x124A, 0x124D, prN}, // Lo [4] ETHIOPIC SYLLABLE QWI..ETHIOPIC SYLLABLE QWE
+ {0x1250, 0x1256, prN}, // Lo [7] ETHIOPIC SYLLABLE QHA..ETHIOPIC SYLLABLE QHO
+ {0x1258, 0x1258, prN}, // Lo ETHIOPIC SYLLABLE QHWA
+ {0x125A, 0x125D, prN}, // Lo [4] ETHIOPIC SYLLABLE QHWI..ETHIOPIC SYLLABLE QHWE
+ {0x1260, 0x1288, prN}, // Lo [41] ETHIOPIC SYLLABLE BA..ETHIOPIC SYLLABLE XWA
+ {0x128A, 0x128D, prN}, // Lo [4] ETHIOPIC SYLLABLE XWI..ETHIOPIC SYLLABLE XWE
+ {0x1290, 0x12B0, prN}, // Lo [33] ETHIOPIC SYLLABLE NA..ETHIOPIC SYLLABLE KWA
+ {0x12B2, 0x12B5, prN}, // Lo [4] ETHIOPIC SYLLABLE KWI..ETHIOPIC SYLLABLE KWE
+ {0x12B8, 0x12BE, prN}, // Lo [7] ETHIOPIC SYLLABLE KXA..ETHIOPIC SYLLABLE KXO
+ {0x12C0, 0x12C0, prN}, // Lo ETHIOPIC SYLLABLE KXWA
+ {0x12C2, 0x12C5, prN}, // Lo [4] ETHIOPIC SYLLABLE KXWI..ETHIOPIC SYLLABLE KXWE
+ {0x12C8, 0x12D6, prN}, // Lo [15] ETHIOPIC SYLLABLE WA..ETHIOPIC SYLLABLE PHARYNGEAL O
+ {0x12D8, 0x1310, prN}, // Lo [57] ETHIOPIC SYLLABLE ZA..ETHIOPIC SYLLABLE GWA
+ {0x1312, 0x1315, prN}, // Lo [4] ETHIOPIC SYLLABLE GWI..ETHIOPIC SYLLABLE GWE
+ {0x1318, 0x135A, prN}, // Lo [67] ETHIOPIC SYLLABLE GGA..ETHIOPIC SYLLABLE FYA
+ {0x135D, 0x135F, prN}, // Mn [3] ETHIOPIC COMBINING GEMINATION AND VOWEL LENGTH MARK..ETHIOPIC COMBINING GEMINATION MARK
+ {0x1360, 0x1368, prN}, // Po [9] ETHIOPIC SECTION MARK..ETHIOPIC PARAGRAPH SEPARATOR
+ {0x1369, 0x137C, prN}, // No [20] ETHIOPIC DIGIT ONE..ETHIOPIC NUMBER TEN THOUSAND
+ {0x1380, 0x138F, prN}, // Lo [16] ETHIOPIC SYLLABLE SEBATBEIT MWA..ETHIOPIC SYLLABLE PWE
+ {0x1390, 0x1399, prN}, // So [10] ETHIOPIC TONAL MARK YIZET..ETHIOPIC TONAL MARK KURT
+ {0x13A0, 0x13F5, prN}, // Lu [86] CHEROKEE LETTER A..CHEROKEE LETTER MV
+ {0x13F8, 0x13FD, prN}, // Ll [6] CHEROKEE SMALL LETTER YE..CHEROKEE SMALL LETTER MV
+ {0x1400, 0x1400, prN}, // Pd CANADIAN SYLLABICS HYPHEN
+ {0x1401, 0x166C, prN}, // Lo [620] CANADIAN SYLLABICS E..CANADIAN SYLLABICS CARRIER TTSA
+ {0x166D, 0x166D, prN}, // So CANADIAN SYLLABICS CHI SIGN
+ {0x166E, 0x166E, prN}, // Po CANADIAN SYLLABICS FULL STOP
+ {0x166F, 0x167F, prN}, // Lo [17] CANADIAN SYLLABICS QAI..CANADIAN SYLLABICS BLACKFOOT W
+ {0x1680, 0x1680, prN}, // Zs OGHAM SPACE MARK
+ {0x1681, 0x169A, prN}, // Lo [26] OGHAM LETTER BEITH..OGHAM LETTER PEITH
+ {0x169B, 0x169B, prN}, // Ps OGHAM FEATHER MARK
+ {0x169C, 0x169C, prN}, // Pe OGHAM REVERSED FEATHER MARK
+ {0x16A0, 0x16EA, prN}, // Lo [75] RUNIC LETTER FEHU FEOH FE F..RUNIC LETTER X
+ {0x16EB, 0x16ED, prN}, // Po [3] RUNIC SINGLE PUNCTUATION..RUNIC CROSS PUNCTUATION
+ {0x16EE, 0x16F0, prN}, // Nl [3] RUNIC ARLAUG SYMBOL..RUNIC BELGTHOR SYMBOL
+ {0x16F1, 0x16F8, prN}, // Lo [8] RUNIC LETTER K..RUNIC LETTER FRANKS CASKET AESC
+ {0x1700, 0x1711, prN}, // Lo [18] TAGALOG LETTER A..TAGALOG LETTER HA
+ {0x1712, 0x1714, prN}, // Mn [3] TAGALOG VOWEL SIGN I..TAGALOG SIGN VIRAMA
+ {0x1715, 0x1715, prN}, // Mc TAGALOG SIGN PAMUDPOD
+ {0x171F, 0x171F, prN}, // Lo TAGALOG LETTER ARCHAIC RA
+ {0x1720, 0x1731, prN}, // Lo [18] HANUNOO LETTER A..HANUNOO LETTER HA
+ {0x1732, 0x1733, prN}, // Mn [2] HANUNOO VOWEL SIGN I..HANUNOO VOWEL SIGN U
+ {0x1734, 0x1734, prN}, // Mc HANUNOO SIGN PAMUDPOD
+ {0x1735, 0x1736, prN}, // Po [2] PHILIPPINE SINGLE PUNCTUATION..PHILIPPINE DOUBLE PUNCTUATION
+ {0x1740, 0x1751, prN}, // Lo [18] BUHID LETTER A..BUHID LETTER HA
+ {0x1752, 0x1753, prN}, // Mn [2] BUHID VOWEL SIGN I..BUHID VOWEL SIGN U
+ {0x1760, 0x176C, prN}, // Lo [13] TAGBANWA LETTER A..TAGBANWA LETTER YA
+ {0x176E, 0x1770, prN}, // Lo [3] TAGBANWA LETTER LA..TAGBANWA LETTER SA
+ {0x1772, 0x1773, prN}, // Mn [2] TAGBANWA VOWEL SIGN I..TAGBANWA VOWEL SIGN U
+ {0x1780, 0x17B3, prN}, // Lo [52] KHMER LETTER KA..KHMER INDEPENDENT VOWEL QAU
+ {0x17B4, 0x17B5, prN}, // Mn [2] KHMER VOWEL INHERENT AQ..KHMER VOWEL INHERENT AA
+ {0x17B6, 0x17B6, prN}, // Mc KHMER VOWEL SIGN AA
+ {0x17B7, 0x17BD, prN}, // Mn [7] KHMER VOWEL SIGN I..KHMER VOWEL SIGN UA
+ {0x17BE, 0x17C5, prN}, // Mc [8] KHMER VOWEL SIGN OE..KHMER VOWEL SIGN AU
+ {0x17C6, 0x17C6, prN}, // Mn KHMER SIGN NIKAHIT
+ {0x17C7, 0x17C8, prN}, // Mc [2] KHMER SIGN REAHMUK..KHMER SIGN YUUKALEAPINTU
+ {0x17C9, 0x17D3, prN}, // Mn [11] KHMER SIGN MUUSIKATOAN..KHMER SIGN BATHAMASAT
+ {0x17D4, 0x17D6, prN}, // Po [3] KHMER SIGN KHAN..KHMER SIGN CAMNUC PII KUUH
+ {0x17D7, 0x17D7, prN}, // Lm KHMER SIGN LEK TOO
+ {0x17D8, 0x17DA, prN}, // Po [3] KHMER SIGN BEYYAL..KHMER SIGN KOOMUUT
+ {0x17DB, 0x17DB, prN}, // Sc KHMER CURRENCY SYMBOL RIEL
+ {0x17DC, 0x17DC, prN}, // Lo KHMER SIGN AVAKRAHASANYA
+ {0x17DD, 0x17DD, prN}, // Mn KHMER SIGN ATTHACAN
+ {0x17E0, 0x17E9, prN}, // Nd [10] KHMER DIGIT ZERO..KHMER DIGIT NINE
+ {0x17F0, 0x17F9, prN}, // No [10] KHMER SYMBOL LEK ATTAK SON..KHMER SYMBOL LEK ATTAK PRAM-BUON
+ {0x1800, 0x1805, prN}, // Po [6] MONGOLIAN BIRGA..MONGOLIAN FOUR DOTS
+ {0x1806, 0x1806, prN}, // Pd MONGOLIAN TODO SOFT HYPHEN
+ {0x1807, 0x180A, prN}, // Po [4] MONGOLIAN SIBE SYLLABLE BOUNDARY MARKER..MONGOLIAN NIRUGU
+ {0x180B, 0x180D, prN}, // Mn [3] MONGOLIAN FREE VARIATION SELECTOR ONE..MONGOLIAN FREE VARIATION SELECTOR THREE
+ {0x180E, 0x180E, prN}, // Cf MONGOLIAN VOWEL SEPARATOR
+ {0x180F, 0x180F, prN}, // Mn MONGOLIAN FREE VARIATION SELECTOR FOUR
+ {0x1810, 0x1819, prN}, // Nd [10] MONGOLIAN DIGIT ZERO..MONGOLIAN DIGIT NINE
+ {0x1820, 0x1842, prN}, // Lo [35] MONGOLIAN LETTER A..MONGOLIAN LETTER CHI
+ {0x1843, 0x1843, prN}, // Lm MONGOLIAN LETTER TODO LONG VOWEL SIGN
+ {0x1844, 0x1878, prN}, // Lo [53] MONGOLIAN LETTER TODO E..MONGOLIAN LETTER CHA WITH TWO DOTS
+ {0x1880, 0x1884, prN}, // Lo [5] MONGOLIAN LETTER ALI GALI ANUSVARA ONE..MONGOLIAN LETTER ALI GALI INVERTED UBADAMA
+ {0x1885, 0x1886, prN}, // Mn [2] MONGOLIAN LETTER ALI GALI BALUDA..MONGOLIAN LETTER ALI GALI THREE BALUDA
+ {0x1887, 0x18A8, prN}, // Lo [34] MONGOLIAN LETTER ALI GALI A..MONGOLIAN LETTER MANCHU ALI GALI BHA
+ {0x18A9, 0x18A9, prN}, // Mn MONGOLIAN LETTER ALI GALI DAGALGA
+ {0x18AA, 0x18AA, prN}, // Lo MONGOLIAN LETTER MANCHU ALI GALI LHA
+ {0x18B0, 0x18F5, prN}, // Lo [70] CANADIAN SYLLABICS OY..CANADIAN SYLLABICS CARRIER DENTAL S
+ {0x1900, 0x191E, prN}, // Lo [31] LIMBU VOWEL-CARRIER LETTER..LIMBU LETTER TRA
+ {0x1920, 0x1922, prN}, // Mn [3] LIMBU VOWEL SIGN A..LIMBU VOWEL SIGN U
+ {0x1923, 0x1926, prN}, // Mc [4] LIMBU VOWEL SIGN EE..LIMBU VOWEL SIGN AU
+ {0x1927, 0x1928, prN}, // Mn [2] LIMBU VOWEL SIGN E..LIMBU VOWEL SIGN O
+ {0x1929, 0x192B, prN}, // Mc [3] LIMBU SUBJOINED LETTER YA..LIMBU SUBJOINED LETTER WA
+ {0x1930, 0x1931, prN}, // Mc [2] LIMBU SMALL LETTER KA..LIMBU SMALL LETTER NGA
+ {0x1932, 0x1932, prN}, // Mn LIMBU SMALL LETTER ANUSVARA
+ {0x1933, 0x1938, prN}, // Mc [6] LIMBU SMALL LETTER TA..LIMBU SMALL LETTER LA
+ {0x1939, 0x193B, prN}, // Mn [3] LIMBU SIGN MUKPHRENG..LIMBU SIGN SA-I
+ {0x1940, 0x1940, prN}, // So LIMBU SIGN LOO
+ {0x1944, 0x1945, prN}, // Po [2] LIMBU EXCLAMATION MARK..LIMBU QUESTION MARK
+ {0x1946, 0x194F, prN}, // Nd [10] LIMBU DIGIT ZERO..LIMBU DIGIT NINE
+ {0x1950, 0x196D, prN}, // Lo [30] TAI LE LETTER KA..TAI LE LETTER AI
+ {0x1970, 0x1974, prN}, // Lo [5] TAI LE LETTER TONE-2..TAI LE LETTER TONE-6
+ {0x1980, 0x19AB, prN}, // Lo [44] NEW TAI LUE LETTER HIGH QA..NEW TAI LUE LETTER LOW SUA
+ {0x19B0, 0x19C9, prN}, // Lo [26] NEW TAI LUE VOWEL SIGN VOWEL SHORTENER..NEW TAI LUE TONE MARK-2
+ {0x19D0, 0x19D9, prN}, // Nd [10] NEW TAI LUE DIGIT ZERO..NEW TAI LUE DIGIT NINE
+ {0x19DA, 0x19DA, prN}, // No NEW TAI LUE THAM DIGIT ONE
+ {0x19DE, 0x19DF, prN}, // So [2] NEW TAI LUE SIGN LAE..NEW TAI LUE SIGN LAEV
+ {0x19E0, 0x19FF, prN}, // So [32] KHMER SYMBOL PATHAMASAT..KHMER SYMBOL DAP-PRAM ROC
+ {0x1A00, 0x1A16, prN}, // Lo [23] BUGINESE LETTER KA..BUGINESE LETTER HA
+ {0x1A17, 0x1A18, prN}, // Mn [2] BUGINESE VOWEL SIGN I..BUGINESE VOWEL SIGN U
+ {0x1A19, 0x1A1A, prN}, // Mc [2] BUGINESE VOWEL SIGN E..BUGINESE VOWEL SIGN O
+ {0x1A1B, 0x1A1B, prN}, // Mn BUGINESE VOWEL SIGN AE
+ {0x1A1E, 0x1A1F, prN}, // Po [2] BUGINESE PALLAWA..BUGINESE END OF SECTION
+ {0x1A20, 0x1A54, prN}, // Lo [53] TAI THAM LETTER HIGH KA..TAI THAM LETTER GREAT SA
+ {0x1A55, 0x1A55, prN}, // Mc TAI THAM CONSONANT SIGN MEDIAL RA
+ {0x1A56, 0x1A56, prN}, // Mn TAI THAM CONSONANT SIGN MEDIAL LA
+ {0x1A57, 0x1A57, prN}, // Mc TAI THAM CONSONANT SIGN LA TANG LAI
+ {0x1A58, 0x1A5E, prN}, // Mn [7] TAI THAM SIGN MAI KANG LAI..TAI THAM CONSONANT SIGN SA
+ {0x1A60, 0x1A60, prN}, // Mn TAI THAM SIGN SAKOT
+ {0x1A61, 0x1A61, prN}, // Mc TAI THAM VOWEL SIGN A
+ {0x1A62, 0x1A62, prN}, // Mn TAI THAM VOWEL SIGN MAI SAT
+ {0x1A63, 0x1A64, prN}, // Mc [2] TAI THAM VOWEL SIGN AA..TAI THAM VOWEL SIGN TALL AA
+ {0x1A65, 0x1A6C, prN}, // Mn [8] TAI THAM VOWEL SIGN I..TAI THAM VOWEL SIGN OA BELOW
+ {0x1A6D, 0x1A72, prN}, // Mc [6] TAI THAM VOWEL SIGN OY..TAI THAM VOWEL SIGN THAM AI
+ {0x1A73, 0x1A7C, prN}, // Mn [10] TAI THAM VOWEL SIGN OA ABOVE..TAI THAM SIGN KHUEN-LUE KARAN
+ {0x1A7F, 0x1A7F, prN}, // Mn TAI THAM COMBINING CRYPTOGRAMMIC DOT
+ {0x1A80, 0x1A89, prN}, // Nd [10] TAI THAM HORA DIGIT ZERO..TAI THAM HORA DIGIT NINE
+ {0x1A90, 0x1A99, prN}, // Nd [10] TAI THAM THAM DIGIT ZERO..TAI THAM THAM DIGIT NINE
+ {0x1AA0, 0x1AA6, prN}, // Po [7] TAI THAM SIGN WIANG..TAI THAM SIGN REVERSED ROTATED RANA
+ {0x1AA7, 0x1AA7, prN}, // Lm TAI THAM SIGN MAI YAMOK
+ {0x1AA8, 0x1AAD, prN}, // Po [6] TAI THAM SIGN KAAN..TAI THAM SIGN CAANG
+ {0x1AB0, 0x1ABD, prN}, // Mn [14] COMBINING DOUBLED CIRCUMFLEX ACCENT..COMBINING PARENTHESES BELOW
+ {0x1ABE, 0x1ABE, prN}, // Me COMBINING PARENTHESES OVERLAY
+ {0x1ABF, 0x1ACE, prN}, // Mn [16] COMBINING LATIN SMALL LETTER W BELOW..COMBINING LATIN SMALL LETTER INSULAR T
+ {0x1B00, 0x1B03, prN}, // Mn [4] BALINESE SIGN ULU RICEM..BALINESE SIGN SURANG
+ {0x1B04, 0x1B04, prN}, // Mc BALINESE SIGN BISAH
+ {0x1B05, 0x1B33, prN}, // Lo [47] BALINESE LETTER AKARA..BALINESE LETTER HA
+ {0x1B34, 0x1B34, prN}, // Mn BALINESE SIGN REREKAN
+ {0x1B35, 0x1B35, prN}, // Mc BALINESE VOWEL SIGN TEDUNG
+ {0x1B36, 0x1B3A, prN}, // Mn [5] BALINESE VOWEL SIGN ULU..BALINESE VOWEL SIGN RA REPA
+ {0x1B3B, 0x1B3B, prN}, // Mc BALINESE VOWEL SIGN RA REPA TEDUNG
+ {0x1B3C, 0x1B3C, prN}, // Mn BALINESE VOWEL SIGN LA LENGA
+ {0x1B3D, 0x1B41, prN}, // Mc [5] BALINESE VOWEL SIGN LA LENGA TEDUNG..BALINESE VOWEL SIGN TALING REPA TEDUNG
+ {0x1B42, 0x1B42, prN}, // Mn BALINESE VOWEL SIGN PEPET
+ {0x1B43, 0x1B44, prN}, // Mc [2] BALINESE VOWEL SIGN PEPET TEDUNG..BALINESE ADEG ADEG
+ {0x1B45, 0x1B4C, prN}, // Lo [8] BALINESE LETTER KAF SASAK..BALINESE LETTER ARCHAIC JNYA
+ {0x1B50, 0x1B59, prN}, // Nd [10] BALINESE DIGIT ZERO..BALINESE DIGIT NINE
+ {0x1B5A, 0x1B60, prN}, // Po [7] BALINESE PANTI..BALINESE PAMENENG
+ {0x1B61, 0x1B6A, prN}, // So [10] BALINESE MUSICAL SYMBOL DONG..BALINESE MUSICAL SYMBOL DANG GEDE
+ {0x1B6B, 0x1B73, prN}, // Mn [9] BALINESE MUSICAL SYMBOL COMBINING TEGEH..BALINESE MUSICAL SYMBOL COMBINING GONG
+ {0x1B74, 0x1B7C, prN}, // So [9] BALINESE MUSICAL SYMBOL RIGHT-HAND OPEN DUG..BALINESE MUSICAL SYMBOL LEFT-HAND OPEN PING
+ {0x1B7D, 0x1B7E, prN}, // Po [2] BALINESE PANTI LANTANG..BALINESE PAMADA LANTANG
+ {0x1B80, 0x1B81, prN}, // Mn [2] SUNDANESE SIGN PANYECEK..SUNDANESE SIGN PANGLAYAR
+ {0x1B82, 0x1B82, prN}, // Mc SUNDANESE SIGN PANGWISAD
+ {0x1B83, 0x1BA0, prN}, // Lo [30] SUNDANESE LETTER A..SUNDANESE LETTER HA
+ {0x1BA1, 0x1BA1, prN}, // Mc SUNDANESE CONSONANT SIGN PAMINGKAL
+ {0x1BA2, 0x1BA5, prN}, // Mn [4] SUNDANESE CONSONANT SIGN PANYAKRA..SUNDANESE VOWEL SIGN PANYUKU
+ {0x1BA6, 0x1BA7, prN}, // Mc [2] SUNDANESE VOWEL SIGN PANAELAENG..SUNDANESE VOWEL SIGN PANOLONG
+ {0x1BA8, 0x1BA9, prN}, // Mn [2] SUNDANESE VOWEL SIGN PAMEPET..SUNDANESE VOWEL SIGN PANEULEUNG
+ {0x1BAA, 0x1BAA, prN}, // Mc SUNDANESE SIGN PAMAAEH
+ {0x1BAB, 0x1BAD, prN}, // Mn [3] SUNDANESE SIGN VIRAMA..SUNDANESE CONSONANT SIGN PASANGAN WA
+ {0x1BAE, 0x1BAF, prN}, // Lo [2] SUNDANESE LETTER KHA..SUNDANESE LETTER SYA
+ {0x1BB0, 0x1BB9, prN}, // Nd [10] SUNDANESE DIGIT ZERO..SUNDANESE DIGIT NINE
+ {0x1BBA, 0x1BBF, prN}, // Lo [6] SUNDANESE AVAGRAHA..SUNDANESE LETTER FINAL M
+ {0x1BC0, 0x1BE5, prN}, // Lo [38] BATAK LETTER A..BATAK LETTER U
+ {0x1BE6, 0x1BE6, prN}, // Mn BATAK SIGN TOMPI
+ {0x1BE7, 0x1BE7, prN}, // Mc BATAK VOWEL SIGN E
+ {0x1BE8, 0x1BE9, prN}, // Mn [2] BATAK VOWEL SIGN PAKPAK E..BATAK VOWEL SIGN EE
+ {0x1BEA, 0x1BEC, prN}, // Mc [3] BATAK VOWEL SIGN I..BATAK VOWEL SIGN O
+ {0x1BED, 0x1BED, prN}, // Mn BATAK VOWEL SIGN KARO O
+ {0x1BEE, 0x1BEE, prN}, // Mc BATAK VOWEL SIGN U
+ {0x1BEF, 0x1BF1, prN}, // Mn [3] BATAK VOWEL SIGN U FOR SIMALUNGUN SA..BATAK CONSONANT SIGN H
+ {0x1BF2, 0x1BF3, prN}, // Mc [2] BATAK PANGOLAT..BATAK PANONGONAN
+ {0x1BFC, 0x1BFF, prN}, // Po [4] BATAK SYMBOL BINDU NA METEK..BATAK SYMBOL BINDU PANGOLAT
+ {0x1C00, 0x1C23, prN}, // Lo [36] LEPCHA LETTER KA..LEPCHA LETTER A
+ {0x1C24, 0x1C2B, prN}, // Mc [8] LEPCHA SUBJOINED LETTER YA..LEPCHA VOWEL SIGN UU
+ {0x1C2C, 0x1C33, prN}, // Mn [8] LEPCHA VOWEL SIGN E..LEPCHA CONSONANT SIGN T
+ {0x1C34, 0x1C35, prN}, // Mc [2] LEPCHA CONSONANT SIGN NYIN-DO..LEPCHA CONSONANT SIGN KANG
+ {0x1C36, 0x1C37, prN}, // Mn [2] LEPCHA SIGN RAN..LEPCHA SIGN NUKTA
+ {0x1C3B, 0x1C3F, prN}, // Po [5] LEPCHA PUNCTUATION TA-ROL..LEPCHA PUNCTUATION TSHOOK
+ {0x1C40, 0x1C49, prN}, // Nd [10] LEPCHA DIGIT ZERO..LEPCHA DIGIT NINE
+ {0x1C4D, 0x1C4F, prN}, // Lo [3] LEPCHA LETTER TTA..LEPCHA LETTER DDA
+ {0x1C50, 0x1C59, prN}, // Nd [10] OL CHIKI DIGIT ZERO..OL CHIKI DIGIT NINE
+ {0x1C5A, 0x1C77, prN}, // Lo [30] OL CHIKI LETTER LA..OL CHIKI LETTER OH
+ {0x1C78, 0x1C7D, prN}, // Lm [6] OL CHIKI MU TTUDDAG..OL CHIKI AHAD
+ {0x1C7E, 0x1C7F, prN}, // Po [2] OL CHIKI PUNCTUATION MUCAAD..OL CHIKI PUNCTUATION DOUBLE MUCAAD
+ {0x1C80, 0x1C88, prN}, // Ll [9] CYRILLIC SMALL LETTER ROUNDED VE..CYRILLIC SMALL LETTER UNBLENDED UK
+ {0x1C90, 0x1CBA, prN}, // Lu [43] GEORGIAN MTAVRULI CAPITAL LETTER AN..GEORGIAN MTAVRULI CAPITAL LETTER AIN
+ {0x1CBD, 0x1CBF, prN}, // Lu [3] GEORGIAN MTAVRULI CAPITAL LETTER AEN..GEORGIAN MTAVRULI CAPITAL LETTER LABIAL SIGN
+ {0x1CC0, 0x1CC7, prN}, // Po [8] SUNDANESE PUNCTUATION BINDU SURYA..SUNDANESE PUNCTUATION BINDU BA SATANGA
+ {0x1CD0, 0x1CD2, prN}, // Mn [3] VEDIC TONE KARSHANA..VEDIC TONE PRENKHA
+ {0x1CD3, 0x1CD3, prN}, // Po VEDIC SIGN NIHSHVASA
+ {0x1CD4, 0x1CE0, prN}, // Mn [13] VEDIC SIGN YAJURVEDIC MIDLINE SVARITA..VEDIC TONE RIGVEDIC KASHMIRI INDEPENDENT SVARITA
+ {0x1CE1, 0x1CE1, prN}, // Mc VEDIC TONE ATHARVAVEDIC INDEPENDENT SVARITA
+ {0x1CE2, 0x1CE8, prN}, // Mn [7] VEDIC SIGN VISARGA SVARITA..VEDIC SIGN VISARGA ANUDATTA WITH TAIL
+ {0x1CE9, 0x1CEC, prN}, // Lo [4] VEDIC SIGN ANUSVARA ANTARGOMUKHA..VEDIC SIGN ANUSVARA VAMAGOMUKHA WITH TAIL
+ {0x1CED, 0x1CED, prN}, // Mn VEDIC SIGN TIRYAK
+ {0x1CEE, 0x1CF3, prN}, // Lo [6] VEDIC SIGN HEXIFORM LONG ANUSVARA..VEDIC SIGN ROTATED ARDHAVISARGA
+ {0x1CF4, 0x1CF4, prN}, // Mn VEDIC TONE CANDRA ABOVE
+ {0x1CF5, 0x1CF6, prN}, // Lo [2] VEDIC SIGN JIHVAMULIYA..VEDIC SIGN UPADHMANIYA
+ {0x1CF7, 0x1CF7, prN}, // Mc VEDIC SIGN ATIKRAMA
+ {0x1CF8, 0x1CF9, prN}, // Mn [2] VEDIC TONE RING ABOVE..VEDIC TONE DOUBLE RING ABOVE
+ {0x1CFA, 0x1CFA, prN}, // Lo VEDIC SIGN DOUBLE ANUSVARA ANTARGOMUKHA
+ {0x1D00, 0x1D2B, prN}, // Ll [44] LATIN LETTER SMALL CAPITAL A..CYRILLIC LETTER SMALL CAPITAL EL
+ {0x1D2C, 0x1D6A, prN}, // Lm [63] MODIFIER LETTER CAPITAL A..GREEK SUBSCRIPT SMALL LETTER CHI
+ {0x1D6B, 0x1D77, prN}, // Ll [13] LATIN SMALL LETTER UE..LATIN SMALL LETTER TURNED G
+ {0x1D78, 0x1D78, prN}, // Lm MODIFIER LETTER CYRILLIC EN
+ {0x1D79, 0x1D7F, prN}, // Ll [7] LATIN SMALL LETTER INSULAR G..LATIN SMALL LETTER UPSILON WITH STROKE
+ {0x1D80, 0x1D9A, prN}, // Ll [27] LATIN SMALL LETTER B WITH PALATAL HOOK..LATIN SMALL LETTER EZH WITH RETROFLEX HOOK
+ {0x1D9B, 0x1DBF, prN}, // Lm [37] MODIFIER LETTER SMALL TURNED ALPHA..MODIFIER LETTER SMALL THETA
+ {0x1DC0, 0x1DFF, prN}, // Mn [64] COMBINING DOTTED GRAVE ACCENT..COMBINING RIGHT ARROWHEAD AND DOWN ARROWHEAD BELOW
+ {0x1E00, 0x1EFF, prN}, // L& [256] LATIN CAPITAL LETTER A WITH RING BELOW..LATIN SMALL LETTER Y WITH LOOP
+ {0x1F00, 0x1F15, prN}, // L& [22] GREEK SMALL LETTER ALPHA WITH PSILI..GREEK SMALL LETTER EPSILON WITH DASIA AND OXIA
+ {0x1F18, 0x1F1D, prN}, // Lu [6] GREEK CAPITAL LETTER EPSILON WITH PSILI..GREEK CAPITAL LETTER EPSILON WITH DASIA AND OXIA
+ {0x1F20, 0x1F45, prN}, // L& [38] GREEK SMALL LETTER ETA WITH PSILI..GREEK SMALL LETTER OMICRON WITH DASIA AND OXIA
+ {0x1F48, 0x1F4D, prN}, // Lu [6] GREEK CAPITAL LETTER OMICRON WITH PSILI..GREEK CAPITAL LETTER OMICRON WITH DASIA AND OXIA
+ {0x1F50, 0x1F57, prN}, // Ll [8] GREEK SMALL LETTER UPSILON WITH PSILI..GREEK SMALL LETTER UPSILON WITH DASIA AND PERISPOMENI
+ {0x1F59, 0x1F59, prN}, // Lu GREEK CAPITAL LETTER UPSILON WITH DASIA
+ {0x1F5B, 0x1F5B, prN}, // Lu GREEK CAPITAL LETTER UPSILON WITH DASIA AND VARIA
+ {0x1F5D, 0x1F5D, prN}, // Lu GREEK CAPITAL LETTER UPSILON WITH DASIA AND OXIA
+ {0x1F5F, 0x1F7D, prN}, // L& [31] GREEK CAPITAL LETTER UPSILON WITH DASIA AND PERISPOMENI..GREEK SMALL LETTER OMEGA WITH OXIA
+ {0x1F80, 0x1FB4, prN}, // L& [53] GREEK SMALL LETTER ALPHA WITH PSILI AND YPOGEGRAMMENI..GREEK SMALL LETTER ALPHA WITH OXIA AND YPOGEGRAMMENI
+ {0x1FB6, 0x1FBC, prN}, // L& [7] GREEK SMALL LETTER ALPHA WITH PERISPOMENI..GREEK CAPITAL LETTER ALPHA WITH PROSGEGRAMMENI
+ {0x1FBD, 0x1FBD, prN}, // Sk GREEK KORONIS
+ {0x1FBE, 0x1FBE, prN}, // Ll GREEK PROSGEGRAMMENI
+ {0x1FBF, 0x1FC1, prN}, // Sk [3] GREEK PSILI..GREEK DIALYTIKA AND PERISPOMENI
+ {0x1FC2, 0x1FC4, prN}, // Ll [3] GREEK SMALL LETTER ETA WITH VARIA AND YPOGEGRAMMENI..GREEK SMALL LETTER ETA WITH OXIA AND YPOGEGRAMMENI
+ {0x1FC6, 0x1FCC, prN}, // L& [7] GREEK SMALL LETTER ETA WITH PERISPOMENI..GREEK CAPITAL LETTER ETA WITH PROSGEGRAMMENI
+ {0x1FCD, 0x1FCF, prN}, // Sk [3] GREEK PSILI AND VARIA..GREEK PSILI AND PERISPOMENI
+ {0x1FD0, 0x1FD3, prN}, // Ll [4] GREEK SMALL LETTER IOTA WITH VRACHY..GREEK SMALL LETTER IOTA WITH DIALYTIKA AND OXIA
+ {0x1FD6, 0x1FDB, prN}, // L& [6] GREEK SMALL LETTER IOTA WITH PERISPOMENI..GREEK CAPITAL LETTER IOTA WITH OXIA
+ {0x1FDD, 0x1FDF, prN}, // Sk [3] GREEK DASIA AND VARIA..GREEK DASIA AND PERISPOMENI
+ {0x1FE0, 0x1FEC, prN}, // L& [13] GREEK SMALL LETTER UPSILON WITH VRACHY..GREEK CAPITAL LETTER RHO WITH DASIA
+ {0x1FED, 0x1FEF, prN}, // Sk [3] GREEK DIALYTIKA AND VARIA..GREEK VARIA
+ {0x1FF2, 0x1FF4, prN}, // Ll [3] GREEK SMALL LETTER OMEGA WITH VARIA AND YPOGEGRAMMENI..GREEK SMALL LETTER OMEGA WITH OXIA AND YPOGEGRAMMENI
+ {0x1FF6, 0x1FFC, prN}, // L& [7] GREEK SMALL LETTER OMEGA WITH PERISPOMENI..GREEK CAPITAL LETTER OMEGA WITH PROSGEGRAMMENI
+ {0x1FFD, 0x1FFE, prN}, // Sk [2] GREEK OXIA..GREEK DASIA
+ {0x2000, 0x200A, prN}, // Zs [11] EN QUAD..HAIR SPACE
+ {0x200B, 0x200F, prN}, // Cf [5] ZERO WIDTH SPACE..RIGHT-TO-LEFT MARK
+ {0x2010, 0x2010, prA}, // Pd HYPHEN
+ {0x2011, 0x2012, prN}, // Pd [2] NON-BREAKING HYPHEN..FIGURE DASH
+ {0x2013, 0x2015, prA}, // Pd [3] EN DASH..HORIZONTAL BAR
+ {0x2016, 0x2016, prA}, // Po DOUBLE VERTICAL LINE
+ {0x2017, 0x2017, prN}, // Po DOUBLE LOW LINE
+ {0x2018, 0x2018, prA}, // Pi LEFT SINGLE QUOTATION MARK
+ {0x2019, 0x2019, prA}, // Pf RIGHT SINGLE QUOTATION MARK
+ {0x201A, 0x201A, prN}, // Ps SINGLE LOW-9 QUOTATION MARK
+ {0x201B, 0x201B, prN}, // Pi SINGLE HIGH-REVERSED-9 QUOTATION MARK
+ {0x201C, 0x201C, prA}, // Pi LEFT DOUBLE QUOTATION MARK
+ {0x201D, 0x201D, prA}, // Pf RIGHT DOUBLE QUOTATION MARK
+ {0x201E, 0x201E, prN}, // Ps DOUBLE LOW-9 QUOTATION MARK
+ {0x201F, 0x201F, prN}, // Pi DOUBLE HIGH-REVERSED-9 QUOTATION MARK
+ {0x2020, 0x2022, prA}, // Po [3] DAGGER..BULLET
+ {0x2023, 0x2023, prN}, // Po TRIANGULAR BULLET
+ {0x2024, 0x2027, prA}, // Po [4] ONE DOT LEADER..HYPHENATION POINT
+ {0x2028, 0x2028, prN}, // Zl LINE SEPARATOR
+ {0x2029, 0x2029, prN}, // Zp PARAGRAPH SEPARATOR
+ {0x202A, 0x202E, prN}, // Cf [5] LEFT-TO-RIGHT EMBEDDING..RIGHT-TO-LEFT OVERRIDE
+ {0x202F, 0x202F, prN}, // Zs NARROW NO-BREAK SPACE
+ {0x2030, 0x2030, prA}, // Po PER MILLE SIGN
+ {0x2031, 0x2031, prN}, // Po PER TEN THOUSAND SIGN
+ {0x2032, 0x2033, prA}, // Po [2] PRIME..DOUBLE PRIME
+ {0x2034, 0x2034, prN}, // Po TRIPLE PRIME
+ {0x2035, 0x2035, prA}, // Po REVERSED PRIME
+ {0x2036, 0x2038, prN}, // Po [3] REVERSED DOUBLE PRIME..CARET
+ {0x2039, 0x2039, prN}, // Pi SINGLE LEFT-POINTING ANGLE QUOTATION MARK
+ {0x203A, 0x203A, prN}, // Pf SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
+ {0x203B, 0x203B, prA}, // Po REFERENCE MARK
+ {0x203C, 0x203D, prN}, // Po [2] DOUBLE EXCLAMATION MARK..INTERROBANG
+ {0x203E, 0x203E, prA}, // Po OVERLINE
+ {0x203F, 0x2040, prN}, // Pc [2] UNDERTIE..CHARACTER TIE
+ {0x2041, 0x2043, prN}, // Po [3] CARET INSERTION POINT..HYPHEN BULLET
+ {0x2044, 0x2044, prN}, // Sm FRACTION SLASH
+ {0x2045, 0x2045, prN}, // Ps LEFT SQUARE BRACKET WITH QUILL
+ {0x2046, 0x2046, prN}, // Pe RIGHT SQUARE BRACKET WITH QUILL
+ {0x2047, 0x2051, prN}, // Po [11] DOUBLE QUESTION MARK..TWO ASTERISKS ALIGNED VERTICALLY
+ {0x2052, 0x2052, prN}, // Sm COMMERCIAL MINUS SIGN
+ {0x2053, 0x2053, prN}, // Po SWUNG DASH
+ {0x2054, 0x2054, prN}, // Pc INVERTED UNDERTIE
+ {0x2055, 0x205E, prN}, // Po [10] FLOWER PUNCTUATION MARK..VERTICAL FOUR DOTS
+ {0x205F, 0x205F, prN}, // Zs MEDIUM MATHEMATICAL SPACE
+ {0x2060, 0x2064, prN}, // Cf [5] WORD JOINER..INVISIBLE PLUS
+ {0x2066, 0x206F, prN}, // Cf [10] LEFT-TO-RIGHT ISOLATE..NOMINAL DIGIT SHAPES
+ {0x2070, 0x2070, prN}, // No SUPERSCRIPT ZERO
+ {0x2071, 0x2071, prN}, // Lm SUPERSCRIPT LATIN SMALL LETTER I
+ {0x2074, 0x2074, prA}, // No SUPERSCRIPT FOUR
+ {0x2075, 0x2079, prN}, // No [5] SUPERSCRIPT FIVE..SUPERSCRIPT NINE
+ {0x207A, 0x207C, prN}, // Sm [3] SUPERSCRIPT PLUS SIGN..SUPERSCRIPT EQUALS SIGN
+ {0x207D, 0x207D, prN}, // Ps SUPERSCRIPT LEFT PARENTHESIS
+ {0x207E, 0x207E, prN}, // Pe SUPERSCRIPT RIGHT PARENTHESIS
+ {0x207F, 0x207F, prA}, // Lm SUPERSCRIPT LATIN SMALL LETTER N
+ {0x2080, 0x2080, prN}, // No SUBSCRIPT ZERO
+ {0x2081, 0x2084, prA}, // No [4] SUBSCRIPT ONE..SUBSCRIPT FOUR
+ {0x2085, 0x2089, prN}, // No [5] SUBSCRIPT FIVE..SUBSCRIPT NINE
+ {0x208A, 0x208C, prN}, // Sm [3] SUBSCRIPT PLUS SIGN..SUBSCRIPT EQUALS SIGN
+ {0x208D, 0x208D, prN}, // Ps SUBSCRIPT LEFT PARENTHESIS
+ {0x208E, 0x208E, prN}, // Pe SUBSCRIPT RIGHT PARENTHESIS
+ {0x2090, 0x209C, prN}, // Lm [13] LATIN SUBSCRIPT SMALL LETTER A..LATIN SUBSCRIPT SMALL LETTER T
+ {0x20A0, 0x20A8, prN}, // Sc [9] EURO-CURRENCY SIGN..RUPEE SIGN
+ {0x20A9, 0x20A9, prH}, // Sc WON SIGN
+ {0x20AA, 0x20AB, prN}, // Sc [2] NEW SHEQEL SIGN..DONG SIGN
+ {0x20AC, 0x20AC, prA}, // Sc EURO SIGN
+ {0x20AD, 0x20C0, prN}, // Sc [20] KIP SIGN..SOM SIGN
+ {0x20D0, 0x20DC, prN}, // Mn [13] COMBINING LEFT HARPOON ABOVE..COMBINING FOUR DOTS ABOVE
+ {0x20DD, 0x20E0, prN}, // Me [4] COMBINING ENCLOSING CIRCLE..COMBINING ENCLOSING CIRCLE BACKSLASH
+ {0x20E1, 0x20E1, prN}, // Mn COMBINING LEFT RIGHT ARROW ABOVE
+ {0x20E2, 0x20E4, prN}, // Me [3] COMBINING ENCLOSING SCREEN..COMBINING ENCLOSING UPWARD POINTING TRIANGLE
+ {0x20E5, 0x20F0, prN}, // Mn [12] COMBINING REVERSE SOLIDUS OVERLAY..COMBINING ASTERISK ABOVE
+ {0x2100, 0x2101, prN}, // So [2] ACCOUNT OF..ADDRESSED TO THE SUBJECT
+ {0x2102, 0x2102, prN}, // Lu DOUBLE-STRUCK CAPITAL C
+ {0x2103, 0x2103, prA}, // So DEGREE CELSIUS
+ {0x2104, 0x2104, prN}, // So CENTRE LINE SYMBOL
+ {0x2105, 0x2105, prA}, // So CARE OF
+ {0x2106, 0x2106, prN}, // So CADA UNA
+ {0x2107, 0x2107, prN}, // Lu EULER CONSTANT
+ {0x2108, 0x2108, prN}, // So SCRUPLE
+ {0x2109, 0x2109, prA}, // So DEGREE FAHRENHEIT
+ {0x210A, 0x2112, prN}, // L& [9] SCRIPT SMALL G..SCRIPT CAPITAL L
+ {0x2113, 0x2113, prA}, // Ll SCRIPT SMALL L
+ {0x2114, 0x2114, prN}, // So L B BAR SYMBOL
+ {0x2115, 0x2115, prN}, // Lu DOUBLE-STRUCK CAPITAL N
+ {0x2116, 0x2116, prA}, // So NUMERO SIGN
+ {0x2117, 0x2117, prN}, // So SOUND RECORDING COPYRIGHT
+ {0x2118, 0x2118, prN}, // Sm SCRIPT CAPITAL P
+ {0x2119, 0x211D, prN}, // Lu [5] DOUBLE-STRUCK CAPITAL P..DOUBLE-STRUCK CAPITAL R
+ {0x211E, 0x2120, prN}, // So [3] PRESCRIPTION TAKE..SERVICE MARK
+ {0x2121, 0x2122, prA}, // So [2] TELEPHONE SIGN..TRADE MARK SIGN
+ {0x2123, 0x2123, prN}, // So VERSICLE
+ {0x2124, 0x2124, prN}, // Lu DOUBLE-STRUCK CAPITAL Z
+ {0x2125, 0x2125, prN}, // So OUNCE SIGN
+ {0x2126, 0x2126, prA}, // Lu OHM SIGN
+ {0x2127, 0x2127, prN}, // So INVERTED OHM SIGN
+ {0x2128, 0x2128, prN}, // Lu BLACK-LETTER CAPITAL Z
+ {0x2129, 0x2129, prN}, // So TURNED GREEK SMALL LETTER IOTA
+ {0x212A, 0x212A, prN}, // Lu KELVIN SIGN
+ {0x212B, 0x212B, prA}, // Lu ANGSTROM SIGN
+ {0x212C, 0x212D, prN}, // Lu [2] SCRIPT CAPITAL B..BLACK-LETTER CAPITAL C
+ {0x212E, 0x212E, prN}, // So ESTIMATED SYMBOL
+ {0x212F, 0x2134, prN}, // L& [6] SCRIPT SMALL E..SCRIPT SMALL O
+ {0x2135, 0x2138, prN}, // Lo [4] ALEF SYMBOL..DALET SYMBOL
+ {0x2139, 0x2139, prN}, // Ll INFORMATION SOURCE
+ {0x213A, 0x213B, prN}, // So [2] ROTATED CAPITAL Q..FACSIMILE SIGN
+ {0x213C, 0x213F, prN}, // L& [4] DOUBLE-STRUCK SMALL PI..DOUBLE-STRUCK CAPITAL PI
+ {0x2140, 0x2144, prN}, // Sm [5] DOUBLE-STRUCK N-ARY SUMMATION..TURNED SANS-SERIF CAPITAL Y
+ {0x2145, 0x2149, prN}, // L& [5] DOUBLE-STRUCK ITALIC CAPITAL D..DOUBLE-STRUCK ITALIC SMALL J
+ {0x214A, 0x214A, prN}, // So PROPERTY LINE
+ {0x214B, 0x214B, prN}, // Sm TURNED AMPERSAND
+ {0x214C, 0x214D, prN}, // So [2] PER SIGN..AKTIESELSKAB
+ {0x214E, 0x214E, prN}, // Ll TURNED SMALL F
+ {0x214F, 0x214F, prN}, // So SYMBOL FOR SAMARITAN SOURCE
+ {0x2150, 0x2152, prN}, // No [3] VULGAR FRACTION ONE SEVENTH..VULGAR FRACTION ONE TENTH
+ {0x2153, 0x2154, prA}, // No [2] VULGAR FRACTION ONE THIRD..VULGAR FRACTION TWO THIRDS
+ {0x2155, 0x215A, prN}, // No [6] VULGAR FRACTION ONE FIFTH..VULGAR FRACTION FIVE SIXTHS
+ {0x215B, 0x215E, prA}, // No [4] VULGAR FRACTION ONE EIGHTH..VULGAR FRACTION SEVEN EIGHTHS
+ {0x215F, 0x215F, prN}, // No FRACTION NUMERATOR ONE
+ {0x2160, 0x216B, prA}, // Nl [12] ROMAN NUMERAL ONE..ROMAN NUMERAL TWELVE
+ {0x216C, 0x216F, prN}, // Nl [4] ROMAN NUMERAL FIFTY..ROMAN NUMERAL ONE THOUSAND
+ {0x2170, 0x2179, prA}, // Nl [10] SMALL ROMAN NUMERAL ONE..SMALL ROMAN NUMERAL TEN
+ {0x217A, 0x2182, prN}, // Nl [9] SMALL ROMAN NUMERAL ELEVEN..ROMAN NUMERAL TEN THOUSAND
+ {0x2183, 0x2184, prN}, // L& [2] ROMAN NUMERAL REVERSED ONE HUNDRED..LATIN SMALL LETTER REVERSED C
+ {0x2185, 0x2188, prN}, // Nl [4] ROMAN NUMERAL SIX LATE FORM..ROMAN NUMERAL ONE HUNDRED THOUSAND
+ {0x2189, 0x2189, prA}, // No VULGAR FRACTION ZERO THIRDS
+ {0x218A, 0x218B, prN}, // So [2] TURNED DIGIT TWO..TURNED DIGIT THREE
+ {0x2190, 0x2194, prA}, // Sm [5] LEFTWARDS ARROW..LEFT RIGHT ARROW
+ {0x2195, 0x2199, prA}, // So [5] UP DOWN ARROW..SOUTH WEST ARROW
+ {0x219A, 0x219B, prN}, // Sm [2] LEFTWARDS ARROW WITH STROKE..RIGHTWARDS ARROW WITH STROKE
+ {0x219C, 0x219F, prN}, // So [4] LEFTWARDS WAVE ARROW..UPWARDS TWO HEADED ARROW
+ {0x21A0, 0x21A0, prN}, // Sm RIGHTWARDS TWO HEADED ARROW
+ {0x21A1, 0x21A2, prN}, // So [2] DOWNWARDS TWO HEADED ARROW..LEFTWARDS ARROW WITH TAIL
+ {0x21A3, 0x21A3, prN}, // Sm RIGHTWARDS ARROW WITH TAIL
+ {0x21A4, 0x21A5, prN}, // So [2] LEFTWARDS ARROW FROM BAR..UPWARDS ARROW FROM BAR
+ {0x21A6, 0x21A6, prN}, // Sm RIGHTWARDS ARROW FROM BAR
+ {0x21A7, 0x21AD, prN}, // So [7] DOWNWARDS ARROW FROM BAR..LEFT RIGHT WAVE ARROW
+ {0x21AE, 0x21AE, prN}, // Sm LEFT RIGHT ARROW WITH STROKE
+ {0x21AF, 0x21B7, prN}, // So [9] DOWNWARDS ZIGZAG ARROW..CLOCKWISE TOP SEMICIRCLE ARROW
+ {0x21B8, 0x21B9, prA}, // So [2] NORTH WEST ARROW TO LONG BAR..LEFTWARDS ARROW TO BAR OVER RIGHTWARDS ARROW TO BAR
+ {0x21BA, 0x21CD, prN}, // So [20] ANTICLOCKWISE OPEN CIRCLE ARROW..LEFTWARDS DOUBLE ARROW WITH STROKE
+ {0x21CE, 0x21CF, prN}, // Sm [2] LEFT RIGHT DOUBLE ARROW WITH STROKE..RIGHTWARDS DOUBLE ARROW WITH STROKE
+ {0x21D0, 0x21D1, prN}, // So [2] LEFTWARDS DOUBLE ARROW..UPWARDS DOUBLE ARROW
+ {0x21D2, 0x21D2, prA}, // Sm RIGHTWARDS DOUBLE ARROW
+ {0x21D3, 0x21D3, prN}, // So DOWNWARDS DOUBLE ARROW
+ {0x21D4, 0x21D4, prA}, // Sm LEFT RIGHT DOUBLE ARROW
+ {0x21D5, 0x21E6, prN}, // So [18] UP DOWN DOUBLE ARROW..LEFTWARDS WHITE ARROW
+ {0x21E7, 0x21E7, prA}, // So UPWARDS WHITE ARROW
+ {0x21E8, 0x21F3, prN}, // So [12] RIGHTWARDS WHITE ARROW..UP DOWN WHITE ARROW
+ {0x21F4, 0x21FF, prN}, // Sm [12] RIGHT ARROW WITH SMALL CIRCLE..LEFT RIGHT OPEN-HEADED ARROW
+ {0x2200, 0x2200, prA}, // Sm FOR ALL
+ {0x2201, 0x2201, prN}, // Sm COMPLEMENT
+ {0x2202, 0x2203, prA}, // Sm [2] PARTIAL DIFFERENTIAL..THERE EXISTS
+ {0x2204, 0x2206, prN}, // Sm [3] THERE DOES NOT EXIST..INCREMENT
+ {0x2207, 0x2208, prA}, // Sm [2] NABLA..ELEMENT OF
+ {0x2209, 0x220A, prN}, // Sm [2] NOT AN ELEMENT OF..SMALL ELEMENT OF
+ {0x220B, 0x220B, prA}, // Sm CONTAINS AS MEMBER
+ {0x220C, 0x220E, prN}, // Sm [3] DOES NOT CONTAIN AS MEMBER..END OF PROOF
+ {0x220F, 0x220F, prA}, // Sm N-ARY PRODUCT
+ {0x2210, 0x2210, prN}, // Sm N-ARY COPRODUCT
+ {0x2211, 0x2211, prA}, // Sm N-ARY SUMMATION
+ {0x2212, 0x2214, prN}, // Sm [3] MINUS SIGN..DOT PLUS
+ {0x2215, 0x2215, prA}, // Sm DIVISION SLASH
+ {0x2216, 0x2219, prN}, // Sm [4] SET MINUS..BULLET OPERATOR
+ {0x221A, 0x221A, prA}, // Sm SQUARE ROOT
+ {0x221B, 0x221C, prN}, // Sm [2] CUBE ROOT..FOURTH ROOT
+ {0x221D, 0x2220, prA}, // Sm [4] PROPORTIONAL TO..ANGLE
+ {0x2221, 0x2222, prN}, // Sm [2] MEASURED ANGLE..SPHERICAL ANGLE
+ {0x2223, 0x2223, prA}, // Sm DIVIDES
+ {0x2224, 0x2224, prN}, // Sm DOES NOT DIVIDE
+ {0x2225, 0x2225, prA}, // Sm PARALLEL TO
+ {0x2226, 0x2226, prN}, // Sm NOT PARALLEL TO
+ {0x2227, 0x222C, prA}, // Sm [6] LOGICAL AND..DOUBLE INTEGRAL
+ {0x222D, 0x222D, prN}, // Sm TRIPLE INTEGRAL
+ {0x222E, 0x222E, prA}, // Sm CONTOUR INTEGRAL
+ {0x222F, 0x2233, prN}, // Sm [5] SURFACE INTEGRAL..ANTICLOCKWISE CONTOUR INTEGRAL
+ {0x2234, 0x2237, prA}, // Sm [4] THEREFORE..PROPORTION
+ {0x2238, 0x223B, prN}, // Sm [4] DOT MINUS..HOMOTHETIC
+ {0x223C, 0x223D, prA}, // Sm [2] TILDE OPERATOR..REVERSED TILDE
+ {0x223E, 0x2247, prN}, // Sm [10] INVERTED LAZY S..NEITHER APPROXIMATELY NOR ACTUALLY EQUAL TO
+ {0x2248, 0x2248, prA}, // Sm ALMOST EQUAL TO
+ {0x2249, 0x224B, prN}, // Sm [3] NOT ALMOST EQUAL TO..TRIPLE TILDE
+ {0x224C, 0x224C, prA}, // Sm ALL EQUAL TO
+ {0x224D, 0x2251, prN}, // Sm [5] EQUIVALENT TO..GEOMETRICALLY EQUAL TO
+ {0x2252, 0x2252, prA}, // Sm APPROXIMATELY EQUAL TO OR THE IMAGE OF
+ {0x2253, 0x225F, prN}, // Sm [13] IMAGE OF OR APPROXIMATELY EQUAL TO..QUESTIONED EQUAL TO
+ {0x2260, 0x2261, prA}, // Sm [2] NOT EQUAL TO..IDENTICAL TO
+ {0x2262, 0x2263, prN}, // Sm [2] NOT IDENTICAL TO..STRICTLY EQUIVALENT TO
+ {0x2264, 0x2267, prA}, // Sm [4] LESS-THAN OR EQUAL TO..GREATER-THAN OVER EQUAL TO
+ {0x2268, 0x2269, prN}, // Sm [2] LESS-THAN BUT NOT EQUAL TO..GREATER-THAN BUT NOT EQUAL TO
+ {0x226A, 0x226B, prA}, // Sm [2] MUCH LESS-THAN..MUCH GREATER-THAN
+ {0x226C, 0x226D, prN}, // Sm [2] BETWEEN..NOT EQUIVALENT TO
+ {0x226E, 0x226F, prA}, // Sm [2] NOT LESS-THAN..NOT GREATER-THAN
+ {0x2270, 0x2281, prN}, // Sm [18] NEITHER LESS-THAN NOR EQUAL TO..DOES NOT SUCCEED
+ {0x2282, 0x2283, prA}, // Sm [2] SUBSET OF..SUPERSET OF
+ {0x2284, 0x2285, prN}, // Sm [2] NOT A SUBSET OF..NOT A SUPERSET OF
+ {0x2286, 0x2287, prA}, // Sm [2] SUBSET OF OR EQUAL TO..SUPERSET OF OR EQUAL TO
+ {0x2288, 0x2294, prN}, // Sm [13] NEITHER A SUBSET OF NOR EQUAL TO..SQUARE CUP
+ {0x2295, 0x2295, prA}, // Sm CIRCLED PLUS
+ {0x2296, 0x2298, prN}, // Sm [3] CIRCLED MINUS..CIRCLED DIVISION SLASH
+ {0x2299, 0x2299, prA}, // Sm CIRCLED DOT OPERATOR
+ {0x229A, 0x22A4, prN}, // Sm [11] CIRCLED RING OPERATOR..DOWN TACK
+ {0x22A5, 0x22A5, prA}, // Sm UP TACK
+ {0x22A6, 0x22BE, prN}, // Sm [25] ASSERTION..RIGHT ANGLE WITH ARC
+ {0x22BF, 0x22BF, prA}, // Sm RIGHT TRIANGLE
+ {0x22C0, 0x22FF, prN}, // Sm [64] N-ARY LOGICAL AND..Z NOTATION BAG MEMBERSHIP
+ {0x2300, 0x2307, prN}, // So [8] DIAMETER SIGN..WAVY LINE
+ {0x2308, 0x2308, prN}, // Ps LEFT CEILING
+ {0x2309, 0x2309, prN}, // Pe RIGHT CEILING
+ {0x230A, 0x230A, prN}, // Ps LEFT FLOOR
+ {0x230B, 0x230B, prN}, // Pe RIGHT FLOOR
+ {0x230C, 0x2311, prN}, // So [6] BOTTOM RIGHT CROP..SQUARE LOZENGE
+ {0x2312, 0x2312, prA}, // So ARC
+ {0x2313, 0x2319, prN}, // So [7] SEGMENT..TURNED NOT SIGN
+ {0x231A, 0x231B, prW}, // So [2] WATCH..HOURGLASS
+ {0x231C, 0x231F, prN}, // So [4] TOP LEFT CORNER..BOTTOM RIGHT CORNER
+ {0x2320, 0x2321, prN}, // Sm [2] TOP HALF INTEGRAL..BOTTOM HALF INTEGRAL
+ {0x2322, 0x2328, prN}, // So [7] FROWN..KEYBOARD
+ {0x2329, 0x2329, prW}, // Ps LEFT-POINTING ANGLE BRACKET
+ {0x232A, 0x232A, prW}, // Pe RIGHT-POINTING ANGLE BRACKET
+ {0x232B, 0x237B, prN}, // So [81] ERASE TO THE LEFT..NOT CHECK MARK
+ {0x237C, 0x237C, prN}, // Sm RIGHT ANGLE WITH DOWNWARDS ZIGZAG ARROW
+ {0x237D, 0x239A, prN}, // So [30] SHOULDERED OPEN BOX..CLEAR SCREEN SYMBOL
+ {0x239B, 0x23B3, prN}, // Sm [25] LEFT PARENTHESIS UPPER HOOK..SUMMATION BOTTOM
+ {0x23B4, 0x23DB, prN}, // So [40] TOP SQUARE BRACKET..FUSE
+ {0x23DC, 0x23E1, prN}, // Sm [6] TOP PARENTHESIS..BOTTOM TORTOISE SHELL BRACKET
+ {0x23E2, 0x23E8, prN}, // So [7] WHITE TRAPEZIUM..DECIMAL EXPONENT SYMBOL
+ {0x23E9, 0x23EC, prW}, // So [4] BLACK RIGHT-POINTING DOUBLE TRIANGLE..BLACK DOWN-POINTING DOUBLE TRIANGLE
+ {0x23ED, 0x23EF, prN}, // So [3] BLACK RIGHT-POINTING DOUBLE TRIANGLE WITH VERTICAL BAR..BLACK RIGHT-POINTING TRIANGLE WITH DOUBLE VERTICAL BAR
+ {0x23F0, 0x23F0, prW}, // So ALARM CLOCK
+ {0x23F1, 0x23F2, prN}, // So [2] STOPWATCH..TIMER CLOCK
+ {0x23F3, 0x23F3, prW}, // So HOURGLASS WITH FLOWING SAND
+ {0x23F4, 0x23FF, prN}, // So [12] BLACK MEDIUM LEFT-POINTING TRIANGLE..OBSERVER EYE SYMBOL
+ {0x2400, 0x2426, prN}, // So [39] SYMBOL FOR NULL..SYMBOL FOR SUBSTITUTE FORM TWO
+ {0x2440, 0x244A, prN}, // So [11] OCR HOOK..OCR DOUBLE BACKSLASH
+ {0x2460, 0x249B, prA}, // No [60] CIRCLED DIGIT ONE..NUMBER TWENTY FULL STOP
+ {0x249C, 0x24E9, prA}, // So [78] PARENTHESIZED LATIN SMALL LETTER A..CIRCLED LATIN SMALL LETTER Z
+ {0x24EA, 0x24EA, prN}, // No CIRCLED DIGIT ZERO
+ {0x24EB, 0x24FF, prA}, // No [21] NEGATIVE CIRCLED NUMBER ELEVEN..NEGATIVE CIRCLED DIGIT ZERO
+ {0x2500, 0x254B, prA}, // So [76] BOX DRAWINGS LIGHT HORIZONTAL..BOX DRAWINGS HEAVY VERTICAL AND HORIZONTAL
+ {0x254C, 0x254F, prN}, // So [4] BOX DRAWINGS LIGHT DOUBLE DASH HORIZONTAL..BOX DRAWINGS HEAVY DOUBLE DASH VERTICAL
+ {0x2550, 0x2573, prA}, // So [36] BOX DRAWINGS DOUBLE HORIZONTAL..BOX DRAWINGS LIGHT DIAGONAL CROSS
+ {0x2574, 0x257F, prN}, // So [12] BOX DRAWINGS LIGHT LEFT..BOX DRAWINGS HEAVY UP AND LIGHT DOWN
+ {0x2580, 0x258F, prA}, // So [16] UPPER HALF BLOCK..LEFT ONE EIGHTH BLOCK
+ {0x2590, 0x2591, prN}, // So [2] RIGHT HALF BLOCK..LIGHT SHADE
+ {0x2592, 0x2595, prA}, // So [4] MEDIUM SHADE..RIGHT ONE EIGHTH BLOCK
+ {0x2596, 0x259F, prN}, // So [10] QUADRANT LOWER LEFT..QUADRANT UPPER RIGHT AND LOWER LEFT AND LOWER RIGHT
+ {0x25A0, 0x25A1, prA}, // So [2] BLACK SQUARE..WHITE SQUARE
+ {0x25A2, 0x25A2, prN}, // So WHITE SQUARE WITH ROUNDED CORNERS
+ {0x25A3, 0x25A9, prA}, // So [7] WHITE SQUARE CONTAINING BLACK SMALL SQUARE..SQUARE WITH DIAGONAL CROSSHATCH FILL
+ {0x25AA, 0x25B1, prN}, // So [8] BLACK SMALL SQUARE..WHITE PARALLELOGRAM
+ {0x25B2, 0x25B3, prA}, // So [2] BLACK UP-POINTING TRIANGLE..WHITE UP-POINTING TRIANGLE
+ {0x25B4, 0x25B5, prN}, // So [2] BLACK UP-POINTING SMALL TRIANGLE..WHITE UP-POINTING SMALL TRIANGLE
+ {0x25B6, 0x25B6, prA}, // So BLACK RIGHT-POINTING TRIANGLE
+ {0x25B7, 0x25B7, prA}, // Sm WHITE RIGHT-POINTING TRIANGLE
+ {0x25B8, 0x25BB, prN}, // So [4] BLACK RIGHT-POINTING SMALL TRIANGLE..WHITE RIGHT-POINTING POINTER
+ {0x25BC, 0x25BD, prA}, // So [2] BLACK DOWN-POINTING TRIANGLE..WHITE DOWN-POINTING TRIANGLE
+ {0x25BE, 0x25BF, prN}, // So [2] BLACK DOWN-POINTING SMALL TRIANGLE..WHITE DOWN-POINTING SMALL TRIANGLE
+ {0x25C0, 0x25C0, prA}, // So BLACK LEFT-POINTING TRIANGLE
+ {0x25C1, 0x25C1, prA}, // Sm WHITE LEFT-POINTING TRIANGLE
+ {0x25C2, 0x25C5, prN}, // So [4] BLACK LEFT-POINTING SMALL TRIANGLE..WHITE LEFT-POINTING POINTER
+ {0x25C6, 0x25C8, prA}, // So [3] BLACK DIAMOND..WHITE DIAMOND CONTAINING BLACK SMALL DIAMOND
+ {0x25C9, 0x25CA, prN}, // So [2] FISHEYE..LOZENGE
+ {0x25CB, 0x25CB, prA}, // So WHITE CIRCLE
+ {0x25CC, 0x25CD, prN}, // So [2] DOTTED CIRCLE..CIRCLE WITH VERTICAL FILL
+ {0x25CE, 0x25D1, prA}, // So [4] BULLSEYE..CIRCLE WITH RIGHT HALF BLACK
+ {0x25D2, 0x25E1, prN}, // So [16] CIRCLE WITH LOWER HALF BLACK..LOWER HALF CIRCLE
+ {0x25E2, 0x25E5, prA}, // So [4] BLACK LOWER RIGHT TRIANGLE..BLACK UPPER RIGHT TRIANGLE
+ {0x25E6, 0x25EE, prN}, // So [9] WHITE BULLET..UP-POINTING TRIANGLE WITH RIGHT HALF BLACK
+ {0x25EF, 0x25EF, prA}, // So LARGE CIRCLE
+ {0x25F0, 0x25F7, prN}, // So [8] WHITE SQUARE WITH UPPER LEFT QUADRANT..WHITE CIRCLE WITH UPPER RIGHT QUADRANT
+ {0x25F8, 0x25FC, prN}, // Sm [5] UPPER LEFT TRIANGLE..BLACK MEDIUM SQUARE
+ {0x25FD, 0x25FE, prW}, // Sm [2] WHITE MEDIUM SMALL SQUARE..BLACK MEDIUM SMALL SQUARE
+ {0x25FF, 0x25FF, prN}, // Sm LOWER RIGHT TRIANGLE
+ {0x2600, 0x2604, prN}, // So [5] BLACK SUN WITH RAYS..COMET
+ {0x2605, 0x2606, prA}, // So [2] BLACK STAR..WHITE STAR
+ {0x2607, 0x2608, prN}, // So [2] LIGHTNING..THUNDERSTORM
+ {0x2609, 0x2609, prA}, // So SUN
+ {0x260A, 0x260D, prN}, // So [4] ASCENDING NODE..OPPOSITION
+ {0x260E, 0x260F, prA}, // So [2] BLACK TELEPHONE..WHITE TELEPHONE
+ {0x2610, 0x2613, prN}, // So [4] BALLOT BOX..SALTIRE
+ {0x2614, 0x2615, prW}, // So [2] UMBRELLA WITH RAIN DROPS..HOT BEVERAGE
+ {0x2616, 0x261B, prN}, // So [6] WHITE SHOGI PIECE..BLACK RIGHT POINTING INDEX
+ {0x261C, 0x261C, prA}, // So WHITE LEFT POINTING INDEX
+ {0x261D, 0x261D, prN}, // So WHITE UP POINTING INDEX
+ {0x261E, 0x261E, prA}, // So WHITE RIGHT POINTING INDEX
+ {0x261F, 0x263F, prN}, // So [33] WHITE DOWN POINTING INDEX..MERCURY
+ {0x2640, 0x2640, prA}, // So FEMALE SIGN
+ {0x2641, 0x2641, prN}, // So EARTH
+ {0x2642, 0x2642, prA}, // So MALE SIGN
+ {0x2643, 0x2647, prN}, // So [5] JUPITER..PLUTO
+ {0x2648, 0x2653, prW}, // So [12] ARIES..PISCES
+ {0x2654, 0x265F, prN}, // So [12] WHITE CHESS KING..BLACK CHESS PAWN
+ {0x2660, 0x2661, prA}, // So [2] BLACK SPADE SUIT..WHITE HEART SUIT
+ {0x2662, 0x2662, prN}, // So WHITE DIAMOND SUIT
+ {0x2663, 0x2665, prA}, // So [3] BLACK CLUB SUIT..BLACK HEART SUIT
+ {0x2666, 0x2666, prN}, // So BLACK DIAMOND SUIT
+ {0x2667, 0x266A, prA}, // So [4] WHITE CLUB SUIT..EIGHTH NOTE
+ {0x266B, 0x266B, prN}, // So BEAMED EIGHTH NOTES
+ {0x266C, 0x266D, prA}, // So [2] BEAMED SIXTEENTH NOTES..MUSIC FLAT SIGN
+ {0x266E, 0x266E, prN}, // So MUSIC NATURAL SIGN
+ {0x266F, 0x266F, prA}, // Sm MUSIC SHARP SIGN
+ {0x2670, 0x267E, prN}, // So [15] WEST SYRIAC CROSS..PERMANENT PAPER SIGN
+ {0x267F, 0x267F, prW}, // So WHEELCHAIR SYMBOL
+ {0x2680, 0x2692, prN}, // So [19] DIE FACE-1..HAMMER AND PICK
+ {0x2693, 0x2693, prW}, // So ANCHOR
+ {0x2694, 0x269D, prN}, // So [10] CROSSED SWORDS..OUTLINED WHITE STAR
+ {0x269E, 0x269F, prA}, // So [2] THREE LINES CONVERGING RIGHT..THREE LINES CONVERGING LEFT
+ {0x26A0, 0x26A0, prN}, // So WARNING SIGN
+ {0x26A1, 0x26A1, prW}, // So HIGH VOLTAGE SIGN
+ {0x26A2, 0x26A9, prN}, // So [8] DOUBLED FEMALE SIGN..HORIZONTAL MALE WITH STROKE SIGN
+ {0x26AA, 0x26AB, prW}, // So [2] MEDIUM WHITE CIRCLE..MEDIUM BLACK CIRCLE
+ {0x26AC, 0x26BC, prN}, // So [17] MEDIUM SMALL WHITE CIRCLE..SESQUIQUADRATE
+ {0x26BD, 0x26BE, prW}, // So [2] SOCCER BALL..BASEBALL
+ {0x26BF, 0x26BF, prA}, // So SQUARED KEY
+ {0x26C0, 0x26C3, prN}, // So [4] WHITE DRAUGHTS MAN..BLACK DRAUGHTS KING
+ {0x26C4, 0x26C5, prW}, // So [2] SNOWMAN WITHOUT SNOW..SUN BEHIND CLOUD
+ {0x26C6, 0x26CD, prA}, // So [8] RAIN..DISABLED CAR
+ {0x26CE, 0x26CE, prW}, // So OPHIUCHUS
+ {0x26CF, 0x26D3, prA}, // So [5] PICK..CHAINS
+ {0x26D4, 0x26D4, prW}, // So NO ENTRY
+ {0x26D5, 0x26E1, prA}, // So [13] ALTERNATE ONE-WAY LEFT WAY TRAFFIC..RESTRICTED LEFT ENTRY-2
+ {0x26E2, 0x26E2, prN}, // So ASTRONOMICAL SYMBOL FOR URANUS
+ {0x26E3, 0x26E3, prA}, // So HEAVY CIRCLE WITH STROKE AND TWO DOTS ABOVE
+ {0x26E4, 0x26E7, prN}, // So [4] PENTAGRAM..INVERTED PENTAGRAM
+ {0x26E8, 0x26E9, prA}, // So [2] BLACK CROSS ON SHIELD..SHINTO SHRINE
+ {0x26EA, 0x26EA, prW}, // So CHURCH
+ {0x26EB, 0x26F1, prA}, // So [7] CASTLE..UMBRELLA ON GROUND
+ {0x26F2, 0x26F3, prW}, // So [2] FOUNTAIN..FLAG IN HOLE
+ {0x26F4, 0x26F4, prA}, // So FERRY
+ {0x26F5, 0x26F5, prW}, // So SAILBOAT
+ {0x26F6, 0x26F9, prA}, // So [4] SQUARE FOUR CORNERS..PERSON WITH BALL
+ {0x26FA, 0x26FA, prW}, // So TENT
+ {0x26FB, 0x26FC, prA}, // So [2] JAPANESE BANK SYMBOL..HEADSTONE GRAVEYARD SYMBOL
+ {0x26FD, 0x26FD, prW}, // So FUEL PUMP
+ {0x26FE, 0x26FF, prA}, // So [2] CUP ON BLACK SQUARE..WHITE FLAG WITH HORIZONTAL MIDDLE BLACK STRIPE
+ {0x2700, 0x2704, prN}, // So [5] BLACK SAFETY SCISSORS..WHITE SCISSORS
+ {0x2705, 0x2705, prW}, // So WHITE HEAVY CHECK MARK
+ {0x2706, 0x2709, prN}, // So [4] TELEPHONE LOCATION SIGN..ENVELOPE
+ {0x270A, 0x270B, prW}, // So [2] RAISED FIST..RAISED HAND
+ {0x270C, 0x2727, prN}, // So [28] VICTORY HAND..WHITE FOUR POINTED STAR
+ {0x2728, 0x2728, prW}, // So SPARKLES
+ {0x2729, 0x273C, prN}, // So [20] STRESS OUTLINED WHITE STAR..OPEN CENTRE TEARDROP-SPOKED ASTERISK
+ {0x273D, 0x273D, prA}, // So HEAVY TEARDROP-SPOKED ASTERISK
+ {0x273E, 0x274B, prN}, // So [14] SIX PETALLED BLACK AND WHITE FLORETTE..HEAVY EIGHT TEARDROP-SPOKED PROPELLER ASTERISK
+ {0x274C, 0x274C, prW}, // So CROSS MARK
+ {0x274D, 0x274D, prN}, // So SHADOWED WHITE CIRCLE
+ {0x274E, 0x274E, prW}, // So NEGATIVE SQUARED CROSS MARK
+ {0x274F, 0x2752, prN}, // So [4] LOWER RIGHT DROP-SHADOWED WHITE SQUARE..UPPER RIGHT SHADOWED WHITE SQUARE
+ {0x2753, 0x2755, prW}, // So [3] BLACK QUESTION MARK ORNAMENT..WHITE EXCLAMATION MARK ORNAMENT
+ {0x2756, 0x2756, prN}, // So BLACK DIAMOND MINUS WHITE X
+ {0x2757, 0x2757, prW}, // So HEAVY EXCLAMATION MARK SYMBOL
+ {0x2758, 0x2767, prN}, // So [16] LIGHT VERTICAL BAR..ROTATED FLORAL HEART BULLET
+ {0x2768, 0x2768, prN}, // Ps MEDIUM LEFT PARENTHESIS ORNAMENT
+ {0x2769, 0x2769, prN}, // Pe MEDIUM RIGHT PARENTHESIS ORNAMENT
+ {0x276A, 0x276A, prN}, // Ps MEDIUM FLATTENED LEFT PARENTHESIS ORNAMENT
+ {0x276B, 0x276B, prN}, // Pe MEDIUM FLATTENED RIGHT PARENTHESIS ORNAMENT
+ {0x276C, 0x276C, prN}, // Ps MEDIUM LEFT-POINTING ANGLE BRACKET ORNAMENT
+ {0x276D, 0x276D, prN}, // Pe MEDIUM RIGHT-POINTING ANGLE BRACKET ORNAMENT
+ {0x276E, 0x276E, prN}, // Ps HEAVY LEFT-POINTING ANGLE QUOTATION MARK ORNAMENT
+ {0x276F, 0x276F, prN}, // Pe HEAVY RIGHT-POINTING ANGLE QUOTATION MARK ORNAMENT
+ {0x2770, 0x2770, prN}, // Ps HEAVY LEFT-POINTING ANGLE BRACKET ORNAMENT
+ {0x2771, 0x2771, prN}, // Pe HEAVY RIGHT-POINTING ANGLE BRACKET ORNAMENT
+ {0x2772, 0x2772, prN}, // Ps LIGHT LEFT TORTOISE SHELL BRACKET ORNAMENT
+ {0x2773, 0x2773, prN}, // Pe LIGHT RIGHT TORTOISE SHELL BRACKET ORNAMENT
+ {0x2774, 0x2774, prN}, // Ps MEDIUM LEFT CURLY BRACKET ORNAMENT
+ {0x2775, 0x2775, prN}, // Pe MEDIUM RIGHT CURLY BRACKET ORNAMENT
+ {0x2776, 0x277F, prA}, // No [10] DINGBAT NEGATIVE CIRCLED DIGIT ONE..DINGBAT NEGATIVE CIRCLED NUMBER TEN
+ {0x2780, 0x2793, prN}, // No [20] DINGBAT CIRCLED SANS-SERIF DIGIT ONE..DINGBAT NEGATIVE CIRCLED SANS-SERIF NUMBER TEN
+ {0x2794, 0x2794, prN}, // So HEAVY WIDE-HEADED RIGHTWARDS ARROW
+ {0x2795, 0x2797, prW}, // So [3] HEAVY PLUS SIGN..HEAVY DIVISION SIGN
+ {0x2798, 0x27AF, prN}, // So [24] HEAVY SOUTH EAST ARROW..NOTCHED LOWER RIGHT-SHADOWED WHITE RIGHTWARDS ARROW
+ {0x27B0, 0x27B0, prW}, // So CURLY LOOP
+ {0x27B1, 0x27BE, prN}, // So [14] NOTCHED UPPER RIGHT-SHADOWED WHITE RIGHTWARDS ARROW..OPEN-OUTLINED RIGHTWARDS ARROW
+ {0x27BF, 0x27BF, prW}, // So DOUBLE CURLY LOOP
+ {0x27C0, 0x27C4, prN}, // Sm [5] THREE DIMENSIONAL ANGLE..OPEN SUPERSET
+ {0x27C5, 0x27C5, prN}, // Ps LEFT S-SHAPED BAG DELIMITER
+ {0x27C6, 0x27C6, prN}, // Pe RIGHT S-SHAPED BAG DELIMITER
+ {0x27C7, 0x27E5, prN}, // Sm [31] OR WITH DOT INSIDE..WHITE SQUARE WITH RIGHTWARDS TICK
+ {0x27E6, 0x27E6, prNa}, // Ps MATHEMATICAL LEFT WHITE SQUARE BRACKET
+ {0x27E7, 0x27E7, prNa}, // Pe MATHEMATICAL RIGHT WHITE SQUARE BRACKET
+ {0x27E8, 0x27E8, prNa}, // Ps MATHEMATICAL LEFT ANGLE BRACKET
+ {0x27E9, 0x27E9, prNa}, // Pe MATHEMATICAL RIGHT ANGLE BRACKET
+ {0x27EA, 0x27EA, prNa}, // Ps MATHEMATICAL LEFT DOUBLE ANGLE BRACKET
+ {0x27EB, 0x27EB, prNa}, // Pe MATHEMATICAL RIGHT DOUBLE ANGLE BRACKET
+ {0x27EC, 0x27EC, prNa}, // Ps MATHEMATICAL LEFT WHITE TORTOISE SHELL BRACKET
+ {0x27ED, 0x27ED, prNa}, // Pe MATHEMATICAL RIGHT WHITE TORTOISE SHELL BRACKET
+ {0x27EE, 0x27EE, prN}, // Ps MATHEMATICAL LEFT FLATTENED PARENTHESIS
+ {0x27EF, 0x27EF, prN}, // Pe MATHEMATICAL RIGHT FLATTENED PARENTHESIS
+ {0x27F0, 0x27FF, prN}, // Sm [16] UPWARDS QUADRUPLE ARROW..LONG RIGHTWARDS SQUIGGLE ARROW
+ {0x2800, 0x28FF, prN}, // So [256] BRAILLE PATTERN BLANK..BRAILLE PATTERN DOTS-12345678
+ {0x2900, 0x297F, prN}, // Sm [128] RIGHTWARDS TWO-HEADED ARROW WITH VERTICAL STROKE..DOWN FISH TAIL
+ {0x2980, 0x2982, prN}, // Sm [3] TRIPLE VERTICAL BAR DELIMITER..Z NOTATION TYPE COLON
+ {0x2983, 0x2983, prN}, // Ps LEFT WHITE CURLY BRACKET
+ {0x2984, 0x2984, prN}, // Pe RIGHT WHITE CURLY BRACKET
+ {0x2985, 0x2985, prNa}, // Ps LEFT WHITE PARENTHESIS
+ {0x2986, 0x2986, prNa}, // Pe RIGHT WHITE PARENTHESIS
+ {0x2987, 0x2987, prN}, // Ps Z NOTATION LEFT IMAGE BRACKET
+ {0x2988, 0x2988, prN}, // Pe Z NOTATION RIGHT IMAGE BRACKET
+ {0x2989, 0x2989, prN}, // Ps Z NOTATION LEFT BINDING BRACKET
+ {0x298A, 0x298A, prN}, // Pe Z NOTATION RIGHT BINDING BRACKET
+ {0x298B, 0x298B, prN}, // Ps LEFT SQUARE BRACKET WITH UNDERBAR
+ {0x298C, 0x298C, prN}, // Pe RIGHT SQUARE BRACKET WITH UNDERBAR
+ {0x298D, 0x298D, prN}, // Ps LEFT SQUARE BRACKET WITH TICK IN TOP CORNER
+ {0x298E, 0x298E, prN}, // Pe RIGHT SQUARE BRACKET WITH TICK IN BOTTOM CORNER
+ {0x298F, 0x298F, prN}, // Ps LEFT SQUARE BRACKET WITH TICK IN BOTTOM CORNER
+ {0x2990, 0x2990, prN}, // Pe RIGHT SQUARE BRACKET WITH TICK IN TOP CORNER
+ {0x2991, 0x2991, prN}, // Ps LEFT ANGLE BRACKET WITH DOT
+ {0x2992, 0x2992, prN}, // Pe RIGHT ANGLE BRACKET WITH DOT
+ {0x2993, 0x2993, prN}, // Ps LEFT ARC LESS-THAN BRACKET
+ {0x2994, 0x2994, prN}, // Pe RIGHT ARC GREATER-THAN BRACKET
+ {0x2995, 0x2995, prN}, // Ps DOUBLE LEFT ARC GREATER-THAN BRACKET
+ {0x2996, 0x2996, prN}, // Pe DOUBLE RIGHT ARC LESS-THAN BRACKET
+ {0x2997, 0x2997, prN}, // Ps LEFT BLACK TORTOISE SHELL BRACKET
+ {0x2998, 0x2998, prN}, // Pe RIGHT BLACK TORTOISE SHELL BRACKET
+ {0x2999, 0x29D7, prN}, // Sm [63] DOTTED FENCE..BLACK HOURGLASS
+ {0x29D8, 0x29D8, prN}, // Ps LEFT WIGGLY FENCE
+ {0x29D9, 0x29D9, prN}, // Pe RIGHT WIGGLY FENCE
+ {0x29DA, 0x29DA, prN}, // Ps LEFT DOUBLE WIGGLY FENCE
+ {0x29DB, 0x29DB, prN}, // Pe RIGHT DOUBLE WIGGLY FENCE
+ {0x29DC, 0x29FB, prN}, // Sm [32] INCOMPLETE INFINITY..TRIPLE PLUS
+ {0x29FC, 0x29FC, prN}, // Ps LEFT-POINTING CURVED ANGLE BRACKET
+ {0x29FD, 0x29FD, prN}, // Pe RIGHT-POINTING CURVED ANGLE BRACKET
+ {0x29FE, 0x29FF, prN}, // Sm [2] TINY..MINY
+ {0x2A00, 0x2AFF, prN}, // Sm [256] N-ARY CIRCLED DOT OPERATOR..N-ARY WHITE VERTICAL BAR
+ {0x2B00, 0x2B1A, prN}, // So [27] NORTH EAST WHITE ARROW..DOTTED SQUARE
+ {0x2B1B, 0x2B1C, prW}, // So [2] BLACK LARGE SQUARE..WHITE LARGE SQUARE
+ {0x2B1D, 0x2B2F, prN}, // So [19] BLACK VERY SMALL SQUARE..WHITE VERTICAL ELLIPSE
+ {0x2B30, 0x2B44, prN}, // Sm [21] LEFT ARROW WITH SMALL CIRCLE..RIGHTWARDS ARROW THROUGH SUPERSET
+ {0x2B45, 0x2B46, prN}, // So [2] LEFTWARDS QUADRUPLE ARROW..RIGHTWARDS QUADRUPLE ARROW
+ {0x2B47, 0x2B4C, prN}, // Sm [6] REVERSE TILDE OPERATOR ABOVE RIGHTWARDS ARROW..RIGHTWARDS ARROW ABOVE REVERSE TILDE OPERATOR
+ {0x2B4D, 0x2B4F, prN}, // So [3] DOWNWARDS TRIANGLE-HEADED ZIGZAG ARROW..SHORT BACKSLANTED SOUTH ARROW
+ {0x2B50, 0x2B50, prW}, // So WHITE MEDIUM STAR
+ {0x2B51, 0x2B54, prN}, // So [4] BLACK SMALL STAR..WHITE RIGHT-POINTING PENTAGON
+ {0x2B55, 0x2B55, prW}, // So HEAVY LARGE CIRCLE
+ {0x2B56, 0x2B59, prA}, // So [4] HEAVY OVAL WITH OVAL INSIDE..HEAVY CIRCLED SALTIRE
+ {0x2B5A, 0x2B73, prN}, // So [26] SLANTED NORTH ARROW WITH HOOKED HEAD..DOWNWARDS TRIANGLE-HEADED ARROW TO BAR
+ {0x2B76, 0x2B95, prN}, // So [32] NORTH WEST TRIANGLE-HEADED ARROW TO BAR..RIGHTWARDS BLACK ARROW
+ {0x2B97, 0x2BFF, prN}, // So [105] SYMBOL FOR TYPE A ELECTRONICS..HELLSCHREIBER PAUSE SYMBOL
+ {0x2C00, 0x2C5F, prN}, // L& [96] GLAGOLITIC CAPITAL LETTER AZU..GLAGOLITIC SMALL LETTER CAUDATE CHRIVI
+ {0x2C60, 0x2C7B, prN}, // L& [28] LATIN CAPITAL LETTER L WITH DOUBLE BAR..LATIN LETTER SMALL CAPITAL TURNED E
+ {0x2C7C, 0x2C7D, prN}, // Lm [2] LATIN SUBSCRIPT SMALL LETTER J..MODIFIER LETTER CAPITAL V
+ {0x2C7E, 0x2C7F, prN}, // Lu [2] LATIN CAPITAL LETTER S WITH SWASH TAIL..LATIN CAPITAL LETTER Z WITH SWASH TAIL
+ {0x2C80, 0x2CE4, prN}, // L& [101] COPTIC CAPITAL LETTER ALFA..COPTIC SYMBOL KAI
+ {0x2CE5, 0x2CEA, prN}, // So [6] COPTIC SYMBOL MI RO..COPTIC SYMBOL SHIMA SIMA
+ {0x2CEB, 0x2CEE, prN}, // L& [4] COPTIC CAPITAL LETTER CRYPTOGRAMMIC SHEI..COPTIC SMALL LETTER CRYPTOGRAMMIC GANGIA
+ {0x2CEF, 0x2CF1, prN}, // Mn [3] COPTIC COMBINING NI ABOVE..COPTIC COMBINING SPIRITUS LENIS
+ {0x2CF2, 0x2CF3, prN}, // L& [2] COPTIC CAPITAL LETTER BOHAIRIC KHEI..COPTIC SMALL LETTER BOHAIRIC KHEI
+ {0x2CF9, 0x2CFC, prN}, // Po [4] COPTIC OLD NUBIAN FULL STOP..COPTIC OLD NUBIAN VERSE DIVIDER
+ {0x2CFD, 0x2CFD, prN}, // No COPTIC FRACTION ONE HALF
+ {0x2CFE, 0x2CFF, prN}, // Po [2] COPTIC FULL STOP..COPTIC MORPHOLOGICAL DIVIDER
+ {0x2D00, 0x2D25, prN}, // Ll [38] GEORGIAN SMALL LETTER AN..GEORGIAN SMALL LETTER HOE
+ {0x2D27, 0x2D27, prN}, // Ll GEORGIAN SMALL LETTER YN
+ {0x2D2D, 0x2D2D, prN}, // Ll GEORGIAN SMALL LETTER AEN
+ {0x2D30, 0x2D67, prN}, // Lo [56] TIFINAGH LETTER YA..TIFINAGH LETTER YO
+ {0x2D6F, 0x2D6F, prN}, // Lm TIFINAGH MODIFIER LETTER LABIALIZATION MARK
+ {0x2D70, 0x2D70, prN}, // Po TIFINAGH SEPARATOR MARK
+ {0x2D7F, 0x2D7F, prN}, // Mn TIFINAGH CONSONANT JOINER
+ {0x2D80, 0x2D96, prN}, // Lo [23] ETHIOPIC SYLLABLE LOA..ETHIOPIC SYLLABLE GGWE
+ {0x2DA0, 0x2DA6, prN}, // Lo [7] ETHIOPIC SYLLABLE SSA..ETHIOPIC SYLLABLE SSO
+ {0x2DA8, 0x2DAE, prN}, // Lo [7] ETHIOPIC SYLLABLE CCA..ETHIOPIC SYLLABLE CCO
+ {0x2DB0, 0x2DB6, prN}, // Lo [7] ETHIOPIC SYLLABLE ZZA..ETHIOPIC SYLLABLE ZZO
+ {0x2DB8, 0x2DBE, prN}, // Lo [7] ETHIOPIC SYLLABLE CCHA..ETHIOPIC SYLLABLE CCHO
+ {0x2DC0, 0x2DC6, prN}, // Lo [7] ETHIOPIC SYLLABLE QYA..ETHIOPIC SYLLABLE QYO
+ {0x2DC8, 0x2DCE, prN}, // Lo [7] ETHIOPIC SYLLABLE KYA..ETHIOPIC SYLLABLE KYO
+ {0x2DD0, 0x2DD6, prN}, // Lo [7] ETHIOPIC SYLLABLE XYA..ETHIOPIC SYLLABLE XYO
+ {0x2DD8, 0x2DDE, prN}, // Lo [7] ETHIOPIC SYLLABLE GYA..ETHIOPIC SYLLABLE GYO
+ {0x2DE0, 0x2DFF, prN}, // Mn [32] COMBINING CYRILLIC LETTER BE..COMBINING CYRILLIC LETTER IOTIFIED BIG YUS
+ {0x2E00, 0x2E01, prN}, // Po [2] RIGHT ANGLE SUBSTITUTION MARKER..RIGHT ANGLE DOTTED SUBSTITUTION MARKER
+ {0x2E02, 0x2E02, prN}, // Pi LEFT SUBSTITUTION BRACKET
+ {0x2E03, 0x2E03, prN}, // Pf RIGHT SUBSTITUTION BRACKET
+ {0x2E04, 0x2E04, prN}, // Pi LEFT DOTTED SUBSTITUTION BRACKET
+ {0x2E05, 0x2E05, prN}, // Pf RIGHT DOTTED SUBSTITUTION BRACKET
+ {0x2E06, 0x2E08, prN}, // Po [3] RAISED INTERPOLATION MARKER..DOTTED TRANSPOSITION MARKER
+ {0x2E09, 0x2E09, prN}, // Pi LEFT TRANSPOSITION BRACKET
+ {0x2E0A, 0x2E0A, prN}, // Pf RIGHT TRANSPOSITION BRACKET
+ {0x2E0B, 0x2E0B, prN}, // Po RAISED SQUARE
+ {0x2E0C, 0x2E0C, prN}, // Pi LEFT RAISED OMISSION BRACKET
+ {0x2E0D, 0x2E0D, prN}, // Pf RIGHT RAISED OMISSION BRACKET
+ {0x2E0E, 0x2E16, prN}, // Po [9] EDITORIAL CORONIS..DOTTED RIGHT-POINTING ANGLE
+ {0x2E17, 0x2E17, prN}, // Pd DOUBLE OBLIQUE HYPHEN
+ {0x2E18, 0x2E19, prN}, // Po [2] INVERTED INTERROBANG..PALM BRANCH
+ {0x2E1A, 0x2E1A, prN}, // Pd HYPHEN WITH DIAERESIS
+ {0x2E1B, 0x2E1B, prN}, // Po TILDE WITH RING ABOVE
+ {0x2E1C, 0x2E1C, prN}, // Pi LEFT LOW PARAPHRASE BRACKET
+ {0x2E1D, 0x2E1D, prN}, // Pf RIGHT LOW PARAPHRASE BRACKET
+ {0x2E1E, 0x2E1F, prN}, // Po [2] TILDE WITH DOT ABOVE..TILDE WITH DOT BELOW
+ {0x2E20, 0x2E20, prN}, // Pi LEFT VERTICAL BAR WITH QUILL
+ {0x2E21, 0x2E21, prN}, // Pf RIGHT VERTICAL BAR WITH QUILL
+ {0x2E22, 0x2E22, prN}, // Ps TOP LEFT HALF BRACKET
+ {0x2E23, 0x2E23, prN}, // Pe TOP RIGHT HALF BRACKET
+ {0x2E24, 0x2E24, prN}, // Ps BOTTOM LEFT HALF BRACKET
+ {0x2E25, 0x2E25, prN}, // Pe BOTTOM RIGHT HALF BRACKET
+ {0x2E26, 0x2E26, prN}, // Ps LEFT SIDEWAYS U BRACKET
+ {0x2E27, 0x2E27, prN}, // Pe RIGHT SIDEWAYS U BRACKET
+ {0x2E28, 0x2E28, prN}, // Ps LEFT DOUBLE PARENTHESIS
+ {0x2E29, 0x2E29, prN}, // Pe RIGHT DOUBLE PARENTHESIS
+ {0x2E2A, 0x2E2E, prN}, // Po [5] TWO DOTS OVER ONE DOT PUNCTUATION..REVERSED QUESTION MARK
+ {0x2E2F, 0x2E2F, prN}, // Lm VERTICAL TILDE
+ {0x2E30, 0x2E39, prN}, // Po [10] RING POINT..TOP HALF SECTION SIGN
+ {0x2E3A, 0x2E3B, prN}, // Pd [2] TWO-EM DASH..THREE-EM DASH
+ {0x2E3C, 0x2E3F, prN}, // Po [4] STENOGRAPHIC FULL STOP..CAPITULUM
+ {0x2E40, 0x2E40, prN}, // Pd DOUBLE HYPHEN
+ {0x2E41, 0x2E41, prN}, // Po REVERSED COMMA
+ {0x2E42, 0x2E42, prN}, // Ps DOUBLE LOW-REVERSED-9 QUOTATION MARK
+ {0x2E43, 0x2E4F, prN}, // Po [13] DASH WITH LEFT UPTURN..CORNISH VERSE DIVIDER
+ {0x2E50, 0x2E51, prN}, // So [2] CROSS PATTY WITH RIGHT CROSSBAR..CROSS PATTY WITH LEFT CROSSBAR
+ {0x2E52, 0x2E54, prN}, // Po [3] TIRONIAN SIGN CAPITAL ET..MEDIEVAL QUESTION MARK
+ {0x2E55, 0x2E55, prN}, // Ps LEFT SQUARE BRACKET WITH STROKE
+ {0x2E56, 0x2E56, prN}, // Pe RIGHT SQUARE BRACKET WITH STROKE
+ {0x2E57, 0x2E57, prN}, // Ps LEFT SQUARE BRACKET WITH DOUBLE STROKE
+ {0x2E58, 0x2E58, prN}, // Pe RIGHT SQUARE BRACKET WITH DOUBLE STROKE
+ {0x2E59, 0x2E59, prN}, // Ps TOP HALF LEFT PARENTHESIS
+ {0x2E5A, 0x2E5A, prN}, // Pe TOP HALF RIGHT PARENTHESIS
+ {0x2E5B, 0x2E5B, prN}, // Ps BOTTOM HALF LEFT PARENTHESIS
+ {0x2E5C, 0x2E5C, prN}, // Pe BOTTOM HALF RIGHT PARENTHESIS
+ {0x2E5D, 0x2E5D, prN}, // Pd OBLIQUE HYPHEN
+ {0x2E80, 0x2E99, prW}, // So [26] CJK RADICAL REPEAT..CJK RADICAL RAP
+ {0x2E9B, 0x2EF3, prW}, // So [89] CJK RADICAL CHOKE..CJK RADICAL C-SIMPLIFIED TURTLE
+ {0x2F00, 0x2FD5, prW}, // So [214] KANGXI RADICAL ONE..KANGXI RADICAL FLUTE
+ {0x2FF0, 0x2FFB, prW}, // So [12] IDEOGRAPHIC DESCRIPTION CHARACTER LEFT TO RIGHT..IDEOGRAPHIC DESCRIPTION CHARACTER OVERLAID
+ {0x3000, 0x3000, prF}, // Zs IDEOGRAPHIC SPACE
+ {0x3001, 0x3003, prW}, // Po [3] IDEOGRAPHIC COMMA..DITTO MARK
+ {0x3004, 0x3004, prW}, // So JAPANESE INDUSTRIAL STANDARD SYMBOL
+ {0x3005, 0x3005, prW}, // Lm IDEOGRAPHIC ITERATION MARK
+ {0x3006, 0x3006, prW}, // Lo IDEOGRAPHIC CLOSING MARK
+ {0x3007, 0x3007, prW}, // Nl IDEOGRAPHIC NUMBER ZERO
+ {0x3008, 0x3008, prW}, // Ps LEFT ANGLE BRACKET
+ {0x3009, 0x3009, prW}, // Pe RIGHT ANGLE BRACKET
+ {0x300A, 0x300A, prW}, // Ps LEFT DOUBLE ANGLE BRACKET
+ {0x300B, 0x300B, prW}, // Pe RIGHT DOUBLE ANGLE BRACKET
+ {0x300C, 0x300C, prW}, // Ps LEFT CORNER BRACKET
+ {0x300D, 0x300D, prW}, // Pe RIGHT CORNER BRACKET
+ {0x300E, 0x300E, prW}, // Ps LEFT WHITE CORNER BRACKET
+ {0x300F, 0x300F, prW}, // Pe RIGHT WHITE CORNER BRACKET
+ {0x3010, 0x3010, prW}, // Ps LEFT BLACK LENTICULAR BRACKET
+ {0x3011, 0x3011, prW}, // Pe RIGHT BLACK LENTICULAR BRACKET
+ {0x3012, 0x3013, prW}, // So [2] POSTAL MARK..GETA MARK
+ {0x3014, 0x3014, prW}, // Ps LEFT TORTOISE SHELL BRACKET
+ {0x3015, 0x3015, prW}, // Pe RIGHT TORTOISE SHELL BRACKET
+ {0x3016, 0x3016, prW}, // Ps LEFT WHITE LENTICULAR BRACKET
+ {0x3017, 0x3017, prW}, // Pe RIGHT WHITE LENTICULAR BRACKET
+ {0x3018, 0x3018, prW}, // Ps LEFT WHITE TORTOISE SHELL BRACKET
+ {0x3019, 0x3019, prW}, // Pe RIGHT WHITE TORTOISE SHELL BRACKET
+ {0x301A, 0x301A, prW}, // Ps LEFT WHITE SQUARE BRACKET
+ {0x301B, 0x301B, prW}, // Pe RIGHT WHITE SQUARE BRACKET
+ {0x301C, 0x301C, prW}, // Pd WAVE DASH
+ {0x301D, 0x301D, prW}, // Ps REVERSED DOUBLE PRIME QUOTATION MARK
+ {0x301E, 0x301F, prW}, // Pe [2] DOUBLE PRIME QUOTATION MARK..LOW DOUBLE PRIME QUOTATION MARK
+ {0x3020, 0x3020, prW}, // So POSTAL MARK FACE
+ {0x3021, 0x3029, prW}, // Nl [9] HANGZHOU NUMERAL ONE..HANGZHOU NUMERAL NINE
+ {0x302A, 0x302D, prW}, // Mn [4] IDEOGRAPHIC LEVEL TONE MARK..IDEOGRAPHIC ENTERING TONE MARK
+ {0x302E, 0x302F, prW}, // Mc [2] HANGUL SINGLE DOT TONE MARK..HANGUL DOUBLE DOT TONE MARK
+ {0x3030, 0x3030, prW}, // Pd WAVY DASH
+ {0x3031, 0x3035, prW}, // Lm [5] VERTICAL KANA REPEAT MARK..VERTICAL KANA REPEAT MARK LOWER HALF
+ {0x3036, 0x3037, prW}, // So [2] CIRCLED POSTAL MARK..IDEOGRAPHIC TELEGRAPH LINE FEED SEPARATOR SYMBOL
+ {0x3038, 0x303A, prW}, // Nl [3] HANGZHOU NUMERAL TEN..HANGZHOU NUMERAL THIRTY
+ {0x303B, 0x303B, prW}, // Lm VERTICAL IDEOGRAPHIC ITERATION MARK
+ {0x303C, 0x303C, prW}, // Lo MASU MARK
+ {0x303D, 0x303D, prW}, // Po PART ALTERNATION MARK
+ {0x303E, 0x303E, prW}, // So IDEOGRAPHIC VARIATION INDICATOR
+ {0x303F, 0x303F, prN}, // So IDEOGRAPHIC HALF FILL SPACE
+ {0x3041, 0x3096, prW}, // Lo [86] HIRAGANA LETTER SMALL A..HIRAGANA LETTER SMALL KE
+ {0x3099, 0x309A, prW}, // Mn [2] COMBINING KATAKANA-HIRAGANA VOICED SOUND MARK..COMBINING KATAKANA-HIRAGANA SEMI-VOICED SOUND MARK
+ {0x309B, 0x309C, prW}, // Sk [2] KATAKANA-HIRAGANA VOICED SOUND MARK..KATAKANA-HIRAGANA SEMI-VOICED SOUND MARK
+ {0x309D, 0x309E, prW}, // Lm [2] HIRAGANA ITERATION MARK..HIRAGANA VOICED ITERATION MARK
+ {0x309F, 0x309F, prW}, // Lo HIRAGANA DIGRAPH YORI
+ {0x30A0, 0x30A0, prW}, // Pd KATAKANA-HIRAGANA DOUBLE HYPHEN
+ {0x30A1, 0x30FA, prW}, // Lo [90] KATAKANA LETTER SMALL A..KATAKANA LETTER VO
+ {0x30FB, 0x30FB, prW}, // Po KATAKANA MIDDLE DOT
+ {0x30FC, 0x30FE, prW}, // Lm [3] KATAKANA-HIRAGANA PROLONGED SOUND MARK..KATAKANA VOICED ITERATION MARK
+ {0x30FF, 0x30FF, prW}, // Lo KATAKANA DIGRAPH KOTO
+ {0x3105, 0x312F, prW}, // Lo [43] BOPOMOFO LETTER B..BOPOMOFO LETTER NN
+ {0x3131, 0x318E, prW}, // Lo [94] HANGUL LETTER KIYEOK..HANGUL LETTER ARAEAE
+ {0x3190, 0x3191, prW}, // So [2] IDEOGRAPHIC ANNOTATION LINKING MARK..IDEOGRAPHIC ANNOTATION REVERSE MARK
+ {0x3192, 0x3195, prW}, // No [4] IDEOGRAPHIC ANNOTATION ONE MARK..IDEOGRAPHIC ANNOTATION FOUR MARK
+ {0x3196, 0x319F, prW}, // So [10] IDEOGRAPHIC ANNOTATION TOP MARK..IDEOGRAPHIC ANNOTATION MAN MARK
+ {0x31A0, 0x31BF, prW}, // Lo [32] BOPOMOFO LETTER BU..BOPOMOFO LETTER AH
+ {0x31C0, 0x31E3, prW}, // So [36] CJK STROKE T..CJK STROKE Q
+ {0x31F0, 0x31FF, prW}, // Lo [16] KATAKANA LETTER SMALL KU..KATAKANA LETTER SMALL RO
+ {0x3200, 0x321E, prW}, // So [31] PARENTHESIZED HANGUL KIYEOK..PARENTHESIZED KOREAN CHARACTER O HU
+ {0x3220, 0x3229, prW}, // No [10] PARENTHESIZED IDEOGRAPH ONE..PARENTHESIZED IDEOGRAPH TEN
+ {0x322A, 0x3247, prW}, // So [30] PARENTHESIZED IDEOGRAPH MOON..CIRCLED IDEOGRAPH KOTO
+ {0x3248, 0x324F, prA}, // No [8] CIRCLED NUMBER TEN ON BLACK SQUARE..CIRCLED NUMBER EIGHTY ON BLACK SQUARE
+ {0x3250, 0x3250, prW}, // So PARTNERSHIP SIGN
+ {0x3251, 0x325F, prW}, // No [15] CIRCLED NUMBER TWENTY ONE..CIRCLED NUMBER THIRTY FIVE
+ {0x3260, 0x327F, prW}, // So [32] CIRCLED HANGUL KIYEOK..KOREAN STANDARD SYMBOL
+ {0x3280, 0x3289, prW}, // No [10] CIRCLED IDEOGRAPH ONE..CIRCLED IDEOGRAPH TEN
+ {0x328A, 0x32B0, prW}, // So [39] CIRCLED IDEOGRAPH MOON..CIRCLED IDEOGRAPH NIGHT
+ {0x32B1, 0x32BF, prW}, // No [15] CIRCLED NUMBER THIRTY SIX..CIRCLED NUMBER FIFTY
+ {0x32C0, 0x32FF, prW}, // So [64] IDEOGRAPHIC TELEGRAPH SYMBOL FOR JANUARY..SQUARE ERA NAME REIWA
+ {0x3300, 0x33FF, prW}, // So [256] SQUARE APAATO..SQUARE GAL
+ {0x3400, 0x4DBF, prW}, // Lo [6592] CJK UNIFIED IDEOGRAPH-3400..CJK UNIFIED IDEOGRAPH-4DBF
+ {0x4DC0, 0x4DFF, prN}, // So [64] HEXAGRAM FOR THE CREATIVE HEAVEN..HEXAGRAM FOR BEFORE COMPLETION
+ {0x4E00, 0x9FFF, prW}, // Lo [20992] CJK UNIFIED IDEOGRAPH-4E00..CJK UNIFIED IDEOGRAPH-9FFF
+ {0xA000, 0xA014, prW}, // Lo [21] YI SYLLABLE IT..YI SYLLABLE E
+ {0xA015, 0xA015, prW}, // Lm YI SYLLABLE WU
+ {0xA016, 0xA48C, prW}, // Lo [1143] YI SYLLABLE BIT..YI SYLLABLE YYR
+ {0xA490, 0xA4C6, prW}, // So [55] YI RADICAL QOT..YI RADICAL KE
+ {0xA4D0, 0xA4F7, prN}, // Lo [40] LISU LETTER BA..LISU LETTER OE
+ {0xA4F8, 0xA4FD, prN}, // Lm [6] LISU LETTER TONE MYA TI..LISU LETTER TONE MYA JEU
+ {0xA4FE, 0xA4FF, prN}, // Po [2] LISU PUNCTUATION COMMA..LISU PUNCTUATION FULL STOP
+ {0xA500, 0xA60B, prN}, // Lo [268] VAI SYLLABLE EE..VAI SYLLABLE NG
+ {0xA60C, 0xA60C, prN}, // Lm VAI SYLLABLE LENGTHENER
+ {0xA60D, 0xA60F, prN}, // Po [3] VAI COMMA..VAI QUESTION MARK
+ {0xA610, 0xA61F, prN}, // Lo [16] VAI SYLLABLE NDOLE FA..VAI SYMBOL JONG
+ {0xA620, 0xA629, prN}, // Nd [10] VAI DIGIT ZERO..VAI DIGIT NINE
+ {0xA62A, 0xA62B, prN}, // Lo [2] VAI SYLLABLE NDOLE MA..VAI SYLLABLE NDOLE DO
+ {0xA640, 0xA66D, prN}, // L& [46] CYRILLIC CAPITAL LETTER ZEMLYA..CYRILLIC SMALL LETTER DOUBLE MONOCULAR O
+ {0xA66E, 0xA66E, prN}, // Lo CYRILLIC LETTER MULTIOCULAR O
+ {0xA66F, 0xA66F, prN}, // Mn COMBINING CYRILLIC VZMET
+ {0xA670, 0xA672, prN}, // Me [3] COMBINING CYRILLIC TEN MILLIONS SIGN..COMBINING CYRILLIC THOUSAND MILLIONS SIGN
+ {0xA673, 0xA673, prN}, // Po SLAVONIC ASTERISK
+ {0xA674, 0xA67D, prN}, // Mn [10] COMBINING CYRILLIC LETTER UKRAINIAN IE..COMBINING CYRILLIC PAYEROK
+ {0xA67E, 0xA67E, prN}, // Po CYRILLIC KAVYKA
+ {0xA67F, 0xA67F, prN}, // Lm CYRILLIC PAYEROK
+ {0xA680, 0xA69B, prN}, // L& [28] CYRILLIC CAPITAL LETTER DWE..CYRILLIC SMALL LETTER CROSSED O
+ {0xA69C, 0xA69D, prN}, // Lm [2] MODIFIER LETTER CYRILLIC HARD SIGN..MODIFIER LETTER CYRILLIC SOFT SIGN
+ {0xA69E, 0xA69F, prN}, // Mn [2] COMBINING CYRILLIC LETTER EF..COMBINING CYRILLIC LETTER IOTIFIED E
+ {0xA6A0, 0xA6E5, prN}, // Lo [70] BAMUM LETTER A..BAMUM LETTER KI
+ {0xA6E6, 0xA6EF, prN}, // Nl [10] BAMUM LETTER MO..BAMUM LETTER KOGHOM
+ {0xA6F0, 0xA6F1, prN}, // Mn [2] BAMUM COMBINING MARK KOQNDON..BAMUM COMBINING MARK TUKWENTIS
+ {0xA6F2, 0xA6F7, prN}, // Po [6] BAMUM NJAEMLI..BAMUM QUESTION MARK
+ {0xA700, 0xA716, prN}, // Sk [23] MODIFIER LETTER CHINESE TONE YIN PING..MODIFIER LETTER EXTRA-LOW LEFT-STEM TONE BAR
+ {0xA717, 0xA71F, prN}, // Lm [9] MODIFIER LETTER DOT VERTICAL BAR..MODIFIER LETTER LOW INVERTED EXCLAMATION MARK
+ {0xA720, 0xA721, prN}, // Sk [2] MODIFIER LETTER STRESS AND HIGH TONE..MODIFIER LETTER STRESS AND LOW TONE
+ {0xA722, 0xA76F, prN}, // L& [78] LATIN CAPITAL LETTER EGYPTOLOGICAL ALEF..LATIN SMALL LETTER CON
+ {0xA770, 0xA770, prN}, // Lm MODIFIER LETTER US
+ {0xA771, 0xA787, prN}, // L& [23] LATIN SMALL LETTER DUM..LATIN SMALL LETTER INSULAR T
+ {0xA788, 0xA788, prN}, // Lm MODIFIER LETTER LOW CIRCUMFLEX ACCENT
+ {0xA789, 0xA78A, prN}, // Sk [2] MODIFIER LETTER COLON..MODIFIER LETTER SHORT EQUALS SIGN
+ {0xA78B, 0xA78E, prN}, // L& [4] LATIN CAPITAL LETTER SALTILLO..LATIN SMALL LETTER L WITH RETROFLEX HOOK AND BELT
+ {0xA78F, 0xA78F, prN}, // Lo LATIN LETTER SINOLOGICAL DOT
+ {0xA790, 0xA7CA, prN}, // L& [59] LATIN CAPITAL LETTER N WITH DESCENDER..LATIN SMALL LETTER S WITH SHORT STROKE OVERLAY
+ {0xA7D0, 0xA7D1, prN}, // L& [2] LATIN CAPITAL LETTER CLOSED INSULAR G..LATIN SMALL LETTER CLOSED INSULAR G
+ {0xA7D3, 0xA7D3, prN}, // Ll LATIN SMALL LETTER DOUBLE THORN
+ {0xA7D5, 0xA7D9, prN}, // L& [5] LATIN SMALL LETTER DOUBLE WYNN..LATIN SMALL LETTER SIGMOID S
+ {0xA7F2, 0xA7F4, prN}, // Lm [3] MODIFIER LETTER CAPITAL C..MODIFIER LETTER CAPITAL Q
+ {0xA7F5, 0xA7F6, prN}, // L& [2] LATIN CAPITAL LETTER REVERSED HALF H..LATIN SMALL LETTER REVERSED HALF H
+ {0xA7F7, 0xA7F7, prN}, // Lo LATIN EPIGRAPHIC LETTER SIDEWAYS I
+ {0xA7F8, 0xA7F9, prN}, // Lm [2] MODIFIER LETTER CAPITAL H WITH STROKE..MODIFIER LETTER SMALL LIGATURE OE
+ {0xA7FA, 0xA7FA, prN}, // Ll LATIN LETTER SMALL CAPITAL TURNED M
+ {0xA7FB, 0xA7FF, prN}, // Lo [5] LATIN EPIGRAPHIC LETTER REVERSED F..LATIN EPIGRAPHIC LETTER ARCHAIC M
+ {0xA800, 0xA801, prN}, // Lo [2] SYLOTI NAGRI LETTER A..SYLOTI NAGRI LETTER I
+ {0xA802, 0xA802, prN}, // Mn SYLOTI NAGRI SIGN DVISVARA
+ {0xA803, 0xA805, prN}, // Lo [3] SYLOTI NAGRI LETTER U..SYLOTI NAGRI LETTER O
+ {0xA806, 0xA806, prN}, // Mn SYLOTI NAGRI SIGN HASANTA
+ {0xA807, 0xA80A, prN}, // Lo [4] SYLOTI NAGRI LETTER KO..SYLOTI NAGRI LETTER GHO
+ {0xA80B, 0xA80B, prN}, // Mn SYLOTI NAGRI SIGN ANUSVARA
+ {0xA80C, 0xA822, prN}, // Lo [23] SYLOTI NAGRI LETTER CO..SYLOTI NAGRI LETTER HO
+ {0xA823, 0xA824, prN}, // Mc [2] SYLOTI NAGRI VOWEL SIGN A..SYLOTI NAGRI VOWEL SIGN I
+ {0xA825, 0xA826, prN}, // Mn [2] SYLOTI NAGRI VOWEL SIGN U..SYLOTI NAGRI VOWEL SIGN E
+ {0xA827, 0xA827, prN}, // Mc SYLOTI NAGRI VOWEL SIGN OO
+ {0xA828, 0xA82B, prN}, // So [4] SYLOTI NAGRI POETRY MARK-1..SYLOTI NAGRI POETRY MARK-4
+ {0xA82C, 0xA82C, prN}, // Mn SYLOTI NAGRI SIGN ALTERNATE HASANTA
+ {0xA830, 0xA835, prN}, // No [6] NORTH INDIC FRACTION ONE QUARTER..NORTH INDIC FRACTION THREE SIXTEENTHS
+ {0xA836, 0xA837, prN}, // So [2] NORTH INDIC QUARTER MARK..NORTH INDIC PLACEHOLDER MARK
+ {0xA838, 0xA838, prN}, // Sc NORTH INDIC RUPEE MARK
+ {0xA839, 0xA839, prN}, // So NORTH INDIC QUANTITY MARK
+ {0xA840, 0xA873, prN}, // Lo [52] PHAGS-PA LETTER KA..PHAGS-PA LETTER CANDRABINDU
+ {0xA874, 0xA877, prN}, // Po [4] PHAGS-PA SINGLE HEAD MARK..PHAGS-PA MARK DOUBLE SHAD
+ {0xA880, 0xA881, prN}, // Mc [2] SAURASHTRA SIGN ANUSVARA..SAURASHTRA SIGN VISARGA
+ {0xA882, 0xA8B3, prN}, // Lo [50] SAURASHTRA LETTER A..SAURASHTRA LETTER LLA
+ {0xA8B4, 0xA8C3, prN}, // Mc [16] SAURASHTRA CONSONANT SIGN HAARU..SAURASHTRA VOWEL SIGN AU
+ {0xA8C4, 0xA8C5, prN}, // Mn [2] SAURASHTRA SIGN VIRAMA..SAURASHTRA SIGN CANDRABINDU
+ {0xA8CE, 0xA8CF, prN}, // Po [2] SAURASHTRA DANDA..SAURASHTRA DOUBLE DANDA
+ {0xA8D0, 0xA8D9, prN}, // Nd [10] SAURASHTRA DIGIT ZERO..SAURASHTRA DIGIT NINE
+ {0xA8E0, 0xA8F1, prN}, // Mn [18] COMBINING DEVANAGARI DIGIT ZERO..COMBINING DEVANAGARI SIGN AVAGRAHA
+ {0xA8F2, 0xA8F7, prN}, // Lo [6] DEVANAGARI SIGN SPACING CANDRABINDU..DEVANAGARI SIGN CANDRABINDU AVAGRAHA
+ {0xA8F8, 0xA8FA, prN}, // Po [3] DEVANAGARI SIGN PUSHPIKA..DEVANAGARI CARET
+ {0xA8FB, 0xA8FB, prN}, // Lo DEVANAGARI HEADSTROKE
+ {0xA8FC, 0xA8FC, prN}, // Po DEVANAGARI SIGN SIDDHAM
+ {0xA8FD, 0xA8FE, prN}, // Lo [2] DEVANAGARI JAIN OM..DEVANAGARI LETTER AY
+ {0xA8FF, 0xA8FF, prN}, // Mn DEVANAGARI VOWEL SIGN AY
+ {0xA900, 0xA909, prN}, // Nd [10] KAYAH LI DIGIT ZERO..KAYAH LI DIGIT NINE
+ {0xA90A, 0xA925, prN}, // Lo [28] KAYAH LI LETTER KA..KAYAH LI LETTER OO
+ {0xA926, 0xA92D, prN}, // Mn [8] KAYAH LI VOWEL UE..KAYAH LI TONE CALYA PLOPHU
+ {0xA92E, 0xA92F, prN}, // Po [2] KAYAH LI SIGN CWI..KAYAH LI SIGN SHYA
+ {0xA930, 0xA946, prN}, // Lo [23] REJANG LETTER KA..REJANG LETTER A
+ {0xA947, 0xA951, prN}, // Mn [11] REJANG VOWEL SIGN I..REJANG CONSONANT SIGN R
+ {0xA952, 0xA953, prN}, // Mc [2] REJANG CONSONANT SIGN H..REJANG VIRAMA
+ {0xA95F, 0xA95F, prN}, // Po REJANG SECTION MARK
+ {0xA960, 0xA97C, prW}, // Lo [29] HANGUL CHOSEONG TIKEUT-MIEUM..HANGUL CHOSEONG SSANGYEORINHIEUH
+ {0xA980, 0xA982, prN}, // Mn [3] JAVANESE SIGN PANYANGGA..JAVANESE SIGN LAYAR
+ {0xA983, 0xA983, prN}, // Mc JAVANESE SIGN WIGNYAN
+ {0xA984, 0xA9B2, prN}, // Lo [47] JAVANESE LETTER A..JAVANESE LETTER HA
+ {0xA9B3, 0xA9B3, prN}, // Mn JAVANESE SIGN CECAK TELU
+ {0xA9B4, 0xA9B5, prN}, // Mc [2] JAVANESE VOWEL SIGN TARUNG..JAVANESE VOWEL SIGN TOLONG
+ {0xA9B6, 0xA9B9, prN}, // Mn [4] JAVANESE VOWEL SIGN WULU..JAVANESE VOWEL SIGN SUKU MENDUT
+ {0xA9BA, 0xA9BB, prN}, // Mc [2] JAVANESE VOWEL SIGN TALING..JAVANESE VOWEL SIGN DIRGA MURE
+ {0xA9BC, 0xA9BD, prN}, // Mn [2] JAVANESE VOWEL SIGN PEPET..JAVANESE CONSONANT SIGN KERET
+ {0xA9BE, 0xA9C0, prN}, // Mc [3] JAVANESE CONSONANT SIGN PENGKAL..JAVANESE PANGKON
+ {0xA9C1, 0xA9CD, prN}, // Po [13] JAVANESE LEFT RERENGGAN..JAVANESE TURNED PADA PISELEH
+ {0xA9CF, 0xA9CF, prN}, // Lm JAVANESE PANGRANGKEP
+ {0xA9D0, 0xA9D9, prN}, // Nd [10] JAVANESE DIGIT ZERO..JAVANESE DIGIT NINE
+ {0xA9DE, 0xA9DF, prN}, // Po [2] JAVANESE PADA TIRTA TUMETES..JAVANESE PADA ISEN-ISEN
+ {0xA9E0, 0xA9E4, prN}, // Lo [5] MYANMAR LETTER SHAN GHA..MYANMAR LETTER SHAN BHA
+ {0xA9E5, 0xA9E5, prN}, // Mn MYANMAR SIGN SHAN SAW
+ {0xA9E6, 0xA9E6, prN}, // Lm MYANMAR MODIFIER LETTER SHAN REDUPLICATION
+ {0xA9E7, 0xA9EF, prN}, // Lo [9] MYANMAR LETTER TAI LAING NYA..MYANMAR LETTER TAI LAING NNA
+ {0xA9F0, 0xA9F9, prN}, // Nd [10] MYANMAR TAI LAING DIGIT ZERO..MYANMAR TAI LAING DIGIT NINE
+ {0xA9FA, 0xA9FE, prN}, // Lo [5] MYANMAR LETTER TAI LAING LLA..MYANMAR LETTER TAI LAING BHA
+ {0xAA00, 0xAA28, prN}, // Lo [41] CHAM LETTER A..CHAM LETTER HA
+ {0xAA29, 0xAA2E, prN}, // Mn [6] CHAM VOWEL SIGN AA..CHAM VOWEL SIGN OE
+ {0xAA2F, 0xAA30, prN}, // Mc [2] CHAM VOWEL SIGN O..CHAM VOWEL SIGN AI
+ {0xAA31, 0xAA32, prN}, // Mn [2] CHAM VOWEL SIGN AU..CHAM VOWEL SIGN UE
+ {0xAA33, 0xAA34, prN}, // Mc [2] CHAM CONSONANT SIGN YA..CHAM CONSONANT SIGN RA
+ {0xAA35, 0xAA36, prN}, // Mn [2] CHAM CONSONANT SIGN LA..CHAM CONSONANT SIGN WA
+ {0xAA40, 0xAA42, prN}, // Lo [3] CHAM LETTER FINAL K..CHAM LETTER FINAL NG
+ {0xAA43, 0xAA43, prN}, // Mn CHAM CONSONANT SIGN FINAL NG
+ {0xAA44, 0xAA4B, prN}, // Lo [8] CHAM LETTER FINAL CH..CHAM LETTER FINAL SS
+ {0xAA4C, 0xAA4C, prN}, // Mn CHAM CONSONANT SIGN FINAL M
+ {0xAA4D, 0xAA4D, prN}, // Mc CHAM CONSONANT SIGN FINAL H
+ {0xAA50, 0xAA59, prN}, // Nd [10] CHAM DIGIT ZERO..CHAM DIGIT NINE
+ {0xAA5C, 0xAA5F, prN}, // Po [4] CHAM PUNCTUATION SPIRAL..CHAM PUNCTUATION TRIPLE DANDA
+ {0xAA60, 0xAA6F, prN}, // Lo [16] MYANMAR LETTER KHAMTI GA..MYANMAR LETTER KHAMTI FA
+ {0xAA70, 0xAA70, prN}, // Lm MYANMAR MODIFIER LETTER KHAMTI REDUPLICATION
+ {0xAA71, 0xAA76, prN}, // Lo [6] MYANMAR LETTER KHAMTI XA..MYANMAR LOGOGRAM KHAMTI HM
+ {0xAA77, 0xAA79, prN}, // So [3] MYANMAR SYMBOL AITON EXCLAMATION..MYANMAR SYMBOL AITON TWO
+ {0xAA7A, 0xAA7A, prN}, // Lo MYANMAR LETTER AITON RA
+ {0xAA7B, 0xAA7B, prN}, // Mc MYANMAR SIGN PAO KAREN TONE
+ {0xAA7C, 0xAA7C, prN}, // Mn MYANMAR SIGN TAI LAING TONE-2
+ {0xAA7D, 0xAA7D, prN}, // Mc MYANMAR SIGN TAI LAING TONE-5
+ {0xAA7E, 0xAA7F, prN}, // Lo [2] MYANMAR LETTER SHWE PALAUNG CHA..MYANMAR LETTER SHWE PALAUNG SHA
+ {0xAA80, 0xAAAF, prN}, // Lo [48] TAI VIET LETTER LOW KO..TAI VIET LETTER HIGH O
+ {0xAAB0, 0xAAB0, prN}, // Mn TAI VIET MAI KANG
+ {0xAAB1, 0xAAB1, prN}, // Lo TAI VIET VOWEL AA
+ {0xAAB2, 0xAAB4, prN}, // Mn [3] TAI VIET VOWEL I..TAI VIET VOWEL U
+ {0xAAB5, 0xAAB6, prN}, // Lo [2] TAI VIET VOWEL E..TAI VIET VOWEL O
+ {0xAAB7, 0xAAB8, prN}, // Mn [2] TAI VIET MAI KHIT..TAI VIET VOWEL IA
+ {0xAAB9, 0xAABD, prN}, // Lo [5] TAI VIET VOWEL UEA..TAI VIET VOWEL AN
+ {0xAABE, 0xAABF, prN}, // Mn [2] TAI VIET VOWEL AM..TAI VIET TONE MAI EK
+ {0xAAC0, 0xAAC0, prN}, // Lo TAI VIET TONE MAI NUENG
+ {0xAAC1, 0xAAC1, prN}, // Mn TAI VIET TONE MAI THO
+ {0xAAC2, 0xAAC2, prN}, // Lo TAI VIET TONE MAI SONG
+ {0xAADB, 0xAADC, prN}, // Lo [2] TAI VIET SYMBOL KON..TAI VIET SYMBOL NUENG
+ {0xAADD, 0xAADD, prN}, // Lm TAI VIET SYMBOL SAM
+ {0xAADE, 0xAADF, prN}, // Po [2] TAI VIET SYMBOL HO HOI..TAI VIET SYMBOL KOI KOI
+ {0xAAE0, 0xAAEA, prN}, // Lo [11] MEETEI MAYEK LETTER E..MEETEI MAYEK LETTER SSA
+ {0xAAEB, 0xAAEB, prN}, // Mc MEETEI MAYEK VOWEL SIGN II
+ {0xAAEC, 0xAAED, prN}, // Mn [2] MEETEI MAYEK VOWEL SIGN UU..MEETEI MAYEK VOWEL SIGN AAI
+ {0xAAEE, 0xAAEF, prN}, // Mc [2] MEETEI MAYEK VOWEL SIGN AU..MEETEI MAYEK VOWEL SIGN AAU
+ {0xAAF0, 0xAAF1, prN}, // Po [2] MEETEI MAYEK CHEIKHAN..MEETEI MAYEK AHANG KHUDAM
+ {0xAAF2, 0xAAF2, prN}, // Lo MEETEI MAYEK ANJI
+ {0xAAF3, 0xAAF4, prN}, // Lm [2] MEETEI MAYEK SYLLABLE REPETITION MARK..MEETEI MAYEK WORD REPETITION MARK
+ {0xAAF5, 0xAAF5, prN}, // Mc MEETEI MAYEK VOWEL SIGN VISARGA
+ {0xAAF6, 0xAAF6, prN}, // Mn MEETEI MAYEK VIRAMA
+ {0xAB01, 0xAB06, prN}, // Lo [6] ETHIOPIC SYLLABLE TTHU..ETHIOPIC SYLLABLE TTHO
+ {0xAB09, 0xAB0E, prN}, // Lo [6] ETHIOPIC SYLLABLE DDHU..ETHIOPIC SYLLABLE DDHO
+ {0xAB11, 0xAB16, prN}, // Lo [6] ETHIOPIC SYLLABLE DZU..ETHIOPIC SYLLABLE DZO
+ {0xAB20, 0xAB26, prN}, // Lo [7] ETHIOPIC SYLLABLE CCHHA..ETHIOPIC SYLLABLE CCHHO
+ {0xAB28, 0xAB2E, prN}, // Lo [7] ETHIOPIC SYLLABLE BBA..ETHIOPIC SYLLABLE BBO
+ {0xAB30, 0xAB5A, prN}, // Ll [43] LATIN SMALL LETTER BARRED ALPHA..LATIN SMALL LETTER Y WITH SHORT RIGHT LEG
+ {0xAB5B, 0xAB5B, prN}, // Sk MODIFIER BREVE WITH INVERTED BREVE
+ {0xAB5C, 0xAB5F, prN}, // Lm [4] MODIFIER LETTER SMALL HENG..MODIFIER LETTER SMALL U WITH LEFT HOOK
+ {0xAB60, 0xAB68, prN}, // Ll [9] LATIN SMALL LETTER SAKHA YAT..LATIN SMALL LETTER TURNED R WITH MIDDLE TILDE
+ {0xAB69, 0xAB69, prN}, // Lm MODIFIER LETTER SMALL TURNED W
+ {0xAB6A, 0xAB6B, prN}, // Sk [2] MODIFIER LETTER LEFT TACK..MODIFIER LETTER RIGHT TACK
+ {0xAB70, 0xABBF, prN}, // Ll [80] CHEROKEE SMALL LETTER A..CHEROKEE SMALL LETTER YA
+ {0xABC0, 0xABE2, prN}, // Lo [35] MEETEI MAYEK LETTER KOK..MEETEI MAYEK LETTER I LONSUM
+ {0xABE3, 0xABE4, prN}, // Mc [2] MEETEI MAYEK VOWEL SIGN ONAP..MEETEI MAYEK VOWEL SIGN INAP
+ {0xABE5, 0xABE5, prN}, // Mn MEETEI MAYEK VOWEL SIGN ANAP
+ {0xABE6, 0xABE7, prN}, // Mc [2] MEETEI MAYEK VOWEL SIGN YENAP..MEETEI MAYEK VOWEL SIGN SOUNAP
+ {0xABE8, 0xABE8, prN}, // Mn MEETEI MAYEK VOWEL SIGN UNAP
+ {0xABE9, 0xABEA, prN}, // Mc [2] MEETEI MAYEK VOWEL SIGN CHEINAP..MEETEI MAYEK VOWEL SIGN NUNG
+ {0xABEB, 0xABEB, prN}, // Po MEETEI MAYEK CHEIKHEI
+ {0xABEC, 0xABEC, prN}, // Mc MEETEI MAYEK LUM IYEK
+ {0xABED, 0xABED, prN}, // Mn MEETEI MAYEK APUN IYEK
+ {0xABF0, 0xABF9, prN}, // Nd [10] MEETEI MAYEK DIGIT ZERO..MEETEI MAYEK DIGIT NINE
+ {0xAC00, 0xD7A3, prW}, // Lo [11172] HANGUL SYLLABLE GA..HANGUL SYLLABLE HIH
+ {0xD7B0, 0xD7C6, prN}, // Lo [23] HANGUL JUNGSEONG O-YEO..HANGUL JUNGSEONG ARAEA-E
+ {0xD7CB, 0xD7FB, prN}, // Lo [49] HANGUL JONGSEONG NIEUN-RIEUL..HANGUL JONGSEONG PHIEUPH-THIEUTH
+ {0xD800, 0xDB7F, prN}, // Cs [896] ..
+ {0xDB80, 0xDBFF, prN}, // Cs [128] ..
+ {0xDC00, 0xDFFF, prN}, // Cs [1024] ..
+ {0xE000, 0xF8FF, prA}, // Co [6400] ..
+ {0xF900, 0xFA6D, prW}, // Lo [366] CJK COMPATIBILITY IDEOGRAPH-F900..CJK COMPATIBILITY IDEOGRAPH-FA6D
+ {0xFA6E, 0xFA6F, prW}, // Cn [2] ..
+ {0xFA70, 0xFAD9, prW}, // Lo [106] CJK COMPATIBILITY IDEOGRAPH-FA70..CJK COMPATIBILITY IDEOGRAPH-FAD9
+ {0xFADA, 0xFAFF, prW}, // Cn [38] ..
+ {0xFB00, 0xFB06, prN}, // Ll [7] LATIN SMALL LIGATURE FF..LATIN SMALL LIGATURE ST
+ {0xFB13, 0xFB17, prN}, // Ll [5] ARMENIAN SMALL LIGATURE MEN NOW..ARMENIAN SMALL LIGATURE MEN XEH
+ {0xFB1D, 0xFB1D, prN}, // Lo HEBREW LETTER YOD WITH HIRIQ
+ {0xFB1E, 0xFB1E, prN}, // Mn HEBREW POINT JUDEO-SPANISH VARIKA
+ {0xFB1F, 0xFB28, prN}, // Lo [10] HEBREW LIGATURE YIDDISH YOD YOD PATAH..HEBREW LETTER WIDE TAV
+ {0xFB29, 0xFB29, prN}, // Sm HEBREW LETTER ALTERNATIVE PLUS SIGN
+ {0xFB2A, 0xFB36, prN}, // Lo [13] HEBREW LETTER SHIN WITH SHIN DOT..HEBREW LETTER ZAYIN WITH DAGESH
+ {0xFB38, 0xFB3C, prN}, // Lo [5] HEBREW LETTER TET WITH DAGESH..HEBREW LETTER LAMED WITH DAGESH
+ {0xFB3E, 0xFB3E, prN}, // Lo HEBREW LETTER MEM WITH DAGESH
+ {0xFB40, 0xFB41, prN}, // Lo [2] HEBREW LETTER NUN WITH DAGESH..HEBREW LETTER SAMEKH WITH DAGESH
+ {0xFB43, 0xFB44, prN}, // Lo [2] HEBREW LETTER FINAL PE WITH DAGESH..HEBREW LETTER PE WITH DAGESH
+ {0xFB46, 0xFB4F, prN}, // Lo [10] HEBREW LETTER TSADI WITH DAGESH..HEBREW LIGATURE ALEF LAMED
+ {0xFB50, 0xFBB1, prN}, // Lo [98] ARABIC LETTER ALEF WASLA ISOLATED FORM..ARABIC LETTER YEH BARREE WITH HAMZA ABOVE FINAL FORM
+ {0xFBB2, 0xFBC2, prN}, // Sk [17] ARABIC SYMBOL DOT ABOVE..ARABIC SYMBOL WASLA ABOVE
+ {0xFBD3, 0xFD3D, prN}, // Lo [363] ARABIC LETTER NG ISOLATED FORM..ARABIC LIGATURE ALEF WITH FATHATAN ISOLATED FORM
+ {0xFD3E, 0xFD3E, prN}, // Pe ORNATE LEFT PARENTHESIS
+ {0xFD3F, 0xFD3F, prN}, // Ps ORNATE RIGHT PARENTHESIS
+ {0xFD40, 0xFD4F, prN}, // So [16] ARABIC LIGATURE RAHIMAHU ALLAAH..ARABIC LIGATURE RAHIMAHUM ALLAAH
+ {0xFD50, 0xFD8F, prN}, // Lo [64] ARABIC LIGATURE TEH WITH JEEM WITH MEEM INITIAL FORM..ARABIC LIGATURE MEEM WITH KHAH WITH MEEM INITIAL FORM
+ {0xFD92, 0xFDC7, prN}, // Lo [54] ARABIC LIGATURE MEEM WITH JEEM WITH KHAH INITIAL FORM..ARABIC LIGATURE NOON WITH JEEM WITH YEH FINAL FORM
+ {0xFDCF, 0xFDCF, prN}, // So ARABIC LIGATURE SALAAMUHU ALAYNAA
+ {0xFDF0, 0xFDFB, prN}, // Lo [12] ARABIC LIGATURE SALLA USED AS KORANIC STOP SIGN ISOLATED FORM..ARABIC LIGATURE JALLAJALALOUHOU
+ {0xFDFC, 0xFDFC, prN}, // Sc RIAL SIGN
+ {0xFDFD, 0xFDFF, prN}, // So [3] ARABIC LIGATURE BISMILLAH AR-RAHMAN AR-RAHEEM..ARABIC LIGATURE AZZA WA JALL
+ {0xFE00, 0xFE0F, prA}, // Mn [16] VARIATION SELECTOR-1..VARIATION SELECTOR-16
+ {0xFE10, 0xFE16, prW}, // Po [7] PRESENTATION FORM FOR VERTICAL COMMA..PRESENTATION FORM FOR VERTICAL QUESTION MARK
+ {0xFE17, 0xFE17, prW}, // Ps PRESENTATION FORM FOR VERTICAL LEFT WHITE LENTICULAR BRACKET
+ {0xFE18, 0xFE18, prW}, // Pe PRESENTATION FORM FOR VERTICAL RIGHT WHITE LENTICULAR BRAKCET
+ {0xFE19, 0xFE19, prW}, // Po PRESENTATION FORM FOR VERTICAL HORIZONTAL ELLIPSIS
+ {0xFE20, 0xFE2F, prN}, // Mn [16] COMBINING LIGATURE LEFT HALF..COMBINING CYRILLIC TITLO RIGHT HALF
+ {0xFE30, 0xFE30, prW}, // Po PRESENTATION FORM FOR VERTICAL TWO DOT LEADER
+ {0xFE31, 0xFE32, prW}, // Pd [2] PRESENTATION FORM FOR VERTICAL EM DASH..PRESENTATION FORM FOR VERTICAL EN DASH
+ {0xFE33, 0xFE34, prW}, // Pc [2] PRESENTATION FORM FOR VERTICAL LOW LINE..PRESENTATION FORM FOR VERTICAL WAVY LOW LINE
+ {0xFE35, 0xFE35, prW}, // Ps PRESENTATION FORM FOR VERTICAL LEFT PARENTHESIS
+ {0xFE36, 0xFE36, prW}, // Pe PRESENTATION FORM FOR VERTICAL RIGHT PARENTHESIS
+ {0xFE37, 0xFE37, prW}, // Ps PRESENTATION FORM FOR VERTICAL LEFT CURLY BRACKET
+ {0xFE38, 0xFE38, prW}, // Pe PRESENTATION FORM FOR VERTICAL RIGHT CURLY BRACKET
+ {0xFE39, 0xFE39, prW}, // Ps PRESENTATION FORM FOR VERTICAL LEFT TORTOISE SHELL BRACKET
+ {0xFE3A, 0xFE3A, prW}, // Pe PRESENTATION FORM FOR VERTICAL RIGHT TORTOISE SHELL BRACKET
+ {0xFE3B, 0xFE3B, prW}, // Ps PRESENTATION FORM FOR VERTICAL LEFT BLACK LENTICULAR BRACKET
+ {0xFE3C, 0xFE3C, prW}, // Pe PRESENTATION FORM FOR VERTICAL RIGHT BLACK LENTICULAR BRACKET
+ {0xFE3D, 0xFE3D, prW}, // Ps PRESENTATION FORM FOR VERTICAL LEFT DOUBLE ANGLE BRACKET
+ {0xFE3E, 0xFE3E, prW}, // Pe PRESENTATION FORM FOR VERTICAL RIGHT DOUBLE ANGLE BRACKET
+ {0xFE3F, 0xFE3F, prW}, // Ps PRESENTATION FORM FOR VERTICAL LEFT ANGLE BRACKET
+ {0xFE40, 0xFE40, prW}, // Pe PRESENTATION FORM FOR VERTICAL RIGHT ANGLE BRACKET
+ {0xFE41, 0xFE41, prW}, // Ps PRESENTATION FORM FOR VERTICAL LEFT CORNER BRACKET
+ {0xFE42, 0xFE42, prW}, // Pe PRESENTATION FORM FOR VERTICAL RIGHT CORNER BRACKET
+ {0xFE43, 0xFE43, prW}, // Ps PRESENTATION FORM FOR VERTICAL LEFT WHITE CORNER BRACKET
+ {0xFE44, 0xFE44, prW}, // Pe PRESENTATION FORM FOR VERTICAL RIGHT WHITE CORNER BRACKET
+ {0xFE45, 0xFE46, prW}, // Po [2] SESAME DOT..WHITE SESAME DOT
+ {0xFE47, 0xFE47, prW}, // Ps PRESENTATION FORM FOR VERTICAL LEFT SQUARE BRACKET
+ {0xFE48, 0xFE48, prW}, // Pe PRESENTATION FORM FOR VERTICAL RIGHT SQUARE BRACKET
+ {0xFE49, 0xFE4C, prW}, // Po [4] DASHED OVERLINE..DOUBLE WAVY OVERLINE
+ {0xFE4D, 0xFE4F, prW}, // Pc [3] DASHED LOW LINE..WAVY LOW LINE
+ {0xFE50, 0xFE52, prW}, // Po [3] SMALL COMMA..SMALL FULL STOP
+ {0xFE54, 0xFE57, prW}, // Po [4] SMALL SEMICOLON..SMALL EXCLAMATION MARK
+ {0xFE58, 0xFE58, prW}, // Pd SMALL EM DASH
+ {0xFE59, 0xFE59, prW}, // Ps SMALL LEFT PARENTHESIS
+ {0xFE5A, 0xFE5A, prW}, // Pe SMALL RIGHT PARENTHESIS
+ {0xFE5B, 0xFE5B, prW}, // Ps SMALL LEFT CURLY BRACKET
+ {0xFE5C, 0xFE5C, prW}, // Pe SMALL RIGHT CURLY BRACKET
+ {0xFE5D, 0xFE5D, prW}, // Ps SMALL LEFT TORTOISE SHELL BRACKET
+ {0xFE5E, 0xFE5E, prW}, // Pe SMALL RIGHT TORTOISE SHELL BRACKET
+ {0xFE5F, 0xFE61, prW}, // Po [3] SMALL NUMBER SIGN..SMALL ASTERISK
+ {0xFE62, 0xFE62, prW}, // Sm SMALL PLUS SIGN
+ {0xFE63, 0xFE63, prW}, // Pd SMALL HYPHEN-MINUS
+ {0xFE64, 0xFE66, prW}, // Sm [3] SMALL LESS-THAN SIGN..SMALL EQUALS SIGN
+ {0xFE68, 0xFE68, prW}, // Po SMALL REVERSE SOLIDUS
+ {0xFE69, 0xFE69, prW}, // Sc SMALL DOLLAR SIGN
+ {0xFE6A, 0xFE6B, prW}, // Po [2] SMALL PERCENT SIGN..SMALL COMMERCIAL AT
+ {0xFE70, 0xFE74, prN}, // Lo [5] ARABIC FATHATAN ISOLATED FORM..ARABIC KASRATAN ISOLATED FORM
+ {0xFE76, 0xFEFC, prN}, // Lo [135] ARABIC FATHA ISOLATED FORM..ARABIC LIGATURE LAM WITH ALEF FINAL FORM
+ {0xFEFF, 0xFEFF, prN}, // Cf ZERO WIDTH NO-BREAK SPACE
+ {0xFF01, 0xFF03, prF}, // Po [3] FULLWIDTH EXCLAMATION MARK..FULLWIDTH NUMBER SIGN
+ {0xFF04, 0xFF04, prF}, // Sc FULLWIDTH DOLLAR SIGN
+ {0xFF05, 0xFF07, prF}, // Po [3] FULLWIDTH PERCENT SIGN..FULLWIDTH APOSTROPHE
+ {0xFF08, 0xFF08, prF}, // Ps FULLWIDTH LEFT PARENTHESIS
+ {0xFF09, 0xFF09, prF}, // Pe FULLWIDTH RIGHT PARENTHESIS
+ {0xFF0A, 0xFF0A, prF}, // Po FULLWIDTH ASTERISK
+ {0xFF0B, 0xFF0B, prF}, // Sm FULLWIDTH PLUS SIGN
+ {0xFF0C, 0xFF0C, prF}, // Po FULLWIDTH COMMA
+ {0xFF0D, 0xFF0D, prF}, // Pd FULLWIDTH HYPHEN-MINUS
+ {0xFF0E, 0xFF0F, prF}, // Po [2] FULLWIDTH FULL STOP..FULLWIDTH SOLIDUS
+ {0xFF10, 0xFF19, prF}, // Nd [10] FULLWIDTH DIGIT ZERO..FULLWIDTH DIGIT NINE
+ {0xFF1A, 0xFF1B, prF}, // Po [2] FULLWIDTH COLON..FULLWIDTH SEMICOLON
+ {0xFF1C, 0xFF1E, prF}, // Sm [3] FULLWIDTH LESS-THAN SIGN..FULLWIDTH GREATER-THAN SIGN
+ {0xFF1F, 0xFF20, prF}, // Po [2] FULLWIDTH QUESTION MARK..FULLWIDTH COMMERCIAL AT
+ {0xFF21, 0xFF3A, prF}, // Lu [26] FULLWIDTH LATIN CAPITAL LETTER A..FULLWIDTH LATIN CAPITAL LETTER Z
+ {0xFF3B, 0xFF3B, prF}, // Ps FULLWIDTH LEFT SQUARE BRACKET
+ {0xFF3C, 0xFF3C, prF}, // Po FULLWIDTH REVERSE SOLIDUS
+ {0xFF3D, 0xFF3D, prF}, // Pe FULLWIDTH RIGHT SQUARE BRACKET
+ {0xFF3E, 0xFF3E, prF}, // Sk FULLWIDTH CIRCUMFLEX ACCENT
+ {0xFF3F, 0xFF3F, prF}, // Pc FULLWIDTH LOW LINE
+ {0xFF40, 0xFF40, prF}, // Sk FULLWIDTH GRAVE ACCENT
+ {0xFF41, 0xFF5A, prF}, // Ll [26] FULLWIDTH LATIN SMALL LETTER A..FULLWIDTH LATIN SMALL LETTER Z
+ {0xFF5B, 0xFF5B, prF}, // Ps FULLWIDTH LEFT CURLY BRACKET
+ {0xFF5C, 0xFF5C, prF}, // Sm FULLWIDTH VERTICAL LINE
+ {0xFF5D, 0xFF5D, prF}, // Pe FULLWIDTH RIGHT CURLY BRACKET
+ {0xFF5E, 0xFF5E, prF}, // Sm FULLWIDTH TILDE
+ {0xFF5F, 0xFF5F, prF}, // Ps FULLWIDTH LEFT WHITE PARENTHESIS
+ {0xFF60, 0xFF60, prF}, // Pe FULLWIDTH RIGHT WHITE PARENTHESIS
+ {0xFF61, 0xFF61, prH}, // Po HALFWIDTH IDEOGRAPHIC FULL STOP
+ {0xFF62, 0xFF62, prH}, // Ps HALFWIDTH LEFT CORNER BRACKET
+ {0xFF63, 0xFF63, prH}, // Pe HALFWIDTH RIGHT CORNER BRACKET
+ {0xFF64, 0xFF65, prH}, // Po [2] HALFWIDTH IDEOGRAPHIC COMMA..HALFWIDTH KATAKANA MIDDLE DOT
+ {0xFF66, 0xFF6F, prH}, // Lo [10] HALFWIDTH KATAKANA LETTER WO..HALFWIDTH KATAKANA LETTER SMALL TU
+ {0xFF70, 0xFF70, prH}, // Lm HALFWIDTH KATAKANA-HIRAGANA PROLONGED SOUND MARK
+ {0xFF71, 0xFF9D, prH}, // Lo [45] HALFWIDTH KATAKANA LETTER A..HALFWIDTH KATAKANA LETTER N
+ {0xFF9E, 0xFF9F, prH}, // Lm [2] HALFWIDTH KATAKANA VOICED SOUND MARK..HALFWIDTH KATAKANA SEMI-VOICED SOUND MARK
+ {0xFFA0, 0xFFBE, prH}, // Lo [31] HALFWIDTH HANGUL FILLER..HALFWIDTH HANGUL LETTER HIEUH
+ {0xFFC2, 0xFFC7, prH}, // Lo [6] HALFWIDTH HANGUL LETTER A..HALFWIDTH HANGUL LETTER E
+ {0xFFCA, 0xFFCF, prH}, // Lo [6] HALFWIDTH HANGUL LETTER YEO..HALFWIDTH HANGUL LETTER OE
+ {0xFFD2, 0xFFD7, prH}, // Lo [6] HALFWIDTH HANGUL LETTER YO..HALFWIDTH HANGUL LETTER YU
+ {0xFFDA, 0xFFDC, prH}, // Lo [3] HALFWIDTH HANGUL LETTER EU..HALFWIDTH HANGUL LETTER I
+ {0xFFE0, 0xFFE1, prF}, // Sc [2] FULLWIDTH CENT SIGN..FULLWIDTH POUND SIGN
+ {0xFFE2, 0xFFE2, prF}, // Sm FULLWIDTH NOT SIGN
+ {0xFFE3, 0xFFE3, prF}, // Sk FULLWIDTH MACRON
+ {0xFFE4, 0xFFE4, prF}, // So FULLWIDTH BROKEN BAR
+ {0xFFE5, 0xFFE6, prF}, // Sc [2] FULLWIDTH YEN SIGN..FULLWIDTH WON SIGN
+ {0xFFE8, 0xFFE8, prH}, // So HALFWIDTH FORMS LIGHT VERTICAL
+ {0xFFE9, 0xFFEC, prH}, // Sm [4] HALFWIDTH LEFTWARDS ARROW..HALFWIDTH DOWNWARDS ARROW
+ {0xFFED, 0xFFEE, prH}, // So [2] HALFWIDTH BLACK SQUARE..HALFWIDTH WHITE CIRCLE
+ {0xFFF9, 0xFFFB, prN}, // Cf [3] INTERLINEAR ANNOTATION ANCHOR..INTERLINEAR ANNOTATION TERMINATOR
+ {0xFFFC, 0xFFFC, prN}, // So OBJECT REPLACEMENT CHARACTER
+ {0xFFFD, 0xFFFD, prA}, // So REPLACEMENT CHARACTER
+ {0x10000, 0x1000B, prN}, // Lo [12] LINEAR B SYLLABLE B008 A..LINEAR B SYLLABLE B046 JE
+ {0x1000D, 0x10026, prN}, // Lo [26] LINEAR B SYLLABLE B036 JO..LINEAR B SYLLABLE B032 QO
+ {0x10028, 0x1003A, prN}, // Lo [19] LINEAR B SYLLABLE B060 RA..LINEAR B SYLLABLE B042 WO
+ {0x1003C, 0x1003D, prN}, // Lo [2] LINEAR B SYLLABLE B017 ZA..LINEAR B SYLLABLE B074 ZE
+ {0x1003F, 0x1004D, prN}, // Lo [15] LINEAR B SYLLABLE B020 ZO..LINEAR B SYLLABLE B091 TWO
+ {0x10050, 0x1005D, prN}, // Lo [14] LINEAR B SYMBOL B018..LINEAR B SYMBOL B089
+ {0x10080, 0x100FA, prN}, // Lo [123] LINEAR B IDEOGRAM B100 MAN..LINEAR B IDEOGRAM VESSEL B305
+ {0x10100, 0x10102, prN}, // Po [3] AEGEAN WORD SEPARATOR LINE..AEGEAN CHECK MARK
+ {0x10107, 0x10133, prN}, // No [45] AEGEAN NUMBER ONE..AEGEAN NUMBER NINETY THOUSAND
+ {0x10137, 0x1013F, prN}, // So [9] AEGEAN WEIGHT BASE UNIT..AEGEAN MEASURE THIRD SUBUNIT
+ {0x10140, 0x10174, prN}, // Nl [53] GREEK ACROPHONIC ATTIC ONE QUARTER..GREEK ACROPHONIC STRATIAN FIFTY MNAS
+ {0x10175, 0x10178, prN}, // No [4] GREEK ONE HALF SIGN..GREEK THREE QUARTERS SIGN
+ {0x10179, 0x10189, prN}, // So [17] GREEK YEAR SIGN..GREEK TRYBLION BASE SIGN
+ {0x1018A, 0x1018B, prN}, // No [2] GREEK ZERO SIGN..GREEK ONE QUARTER SIGN
+ {0x1018C, 0x1018E, prN}, // So [3] GREEK SINUSOID SIGN..NOMISMA SIGN
+ {0x10190, 0x1019C, prN}, // So [13] ROMAN SEXTANS SIGN..ASCIA SYMBOL
+ {0x101A0, 0x101A0, prN}, // So GREEK SYMBOL TAU RHO
+ {0x101D0, 0x101FC, prN}, // So [45] PHAISTOS DISC SIGN PEDESTRIAN..PHAISTOS DISC SIGN WAVY BAND
+ {0x101FD, 0x101FD, prN}, // Mn PHAISTOS DISC SIGN COMBINING OBLIQUE STROKE
+ {0x10280, 0x1029C, prN}, // Lo [29] LYCIAN LETTER A..LYCIAN LETTER X
+ {0x102A0, 0x102D0, prN}, // Lo [49] CARIAN LETTER A..CARIAN LETTER UUU3
+ {0x102E0, 0x102E0, prN}, // Mn COPTIC EPACT THOUSANDS MARK
+ {0x102E1, 0x102FB, prN}, // No [27] COPTIC EPACT DIGIT ONE..COPTIC EPACT NUMBER NINE HUNDRED
+ {0x10300, 0x1031F, prN}, // Lo [32] OLD ITALIC LETTER A..OLD ITALIC LETTER ESS
+ {0x10320, 0x10323, prN}, // No [4] OLD ITALIC NUMERAL ONE..OLD ITALIC NUMERAL FIFTY
+ {0x1032D, 0x1032F, prN}, // Lo [3] OLD ITALIC LETTER YE..OLD ITALIC LETTER SOUTHERN TSE
+ {0x10330, 0x10340, prN}, // Lo [17] GOTHIC LETTER AHSA..GOTHIC LETTER PAIRTHRA
+ {0x10341, 0x10341, prN}, // Nl GOTHIC LETTER NINETY
+ {0x10342, 0x10349, prN}, // Lo [8] GOTHIC LETTER RAIDA..GOTHIC LETTER OTHAL
+ {0x1034A, 0x1034A, prN}, // Nl GOTHIC LETTER NINE HUNDRED
+ {0x10350, 0x10375, prN}, // Lo [38] OLD PERMIC LETTER AN..OLD PERMIC LETTER IA
+ {0x10376, 0x1037A, prN}, // Mn [5] COMBINING OLD PERMIC LETTER AN..COMBINING OLD PERMIC LETTER SII
+ {0x10380, 0x1039D, prN}, // Lo [30] UGARITIC LETTER ALPA..UGARITIC LETTER SSU
+ {0x1039F, 0x1039F, prN}, // Po UGARITIC WORD DIVIDER
+ {0x103A0, 0x103C3, prN}, // Lo [36] OLD PERSIAN SIGN A..OLD PERSIAN SIGN HA
+ {0x103C8, 0x103CF, prN}, // Lo [8] OLD PERSIAN SIGN AURAMAZDAA..OLD PERSIAN SIGN BUUMISH
+ {0x103D0, 0x103D0, prN}, // Po OLD PERSIAN WORD DIVIDER
+ {0x103D1, 0x103D5, prN}, // Nl [5] OLD PERSIAN NUMBER ONE..OLD PERSIAN NUMBER HUNDRED
+ {0x10400, 0x1044F, prN}, // L& [80] DESERET CAPITAL LETTER LONG I..DESERET SMALL LETTER EW
+ {0x10450, 0x1047F, prN}, // Lo [48] SHAVIAN LETTER PEEP..SHAVIAN LETTER YEW
+ {0x10480, 0x1049D, prN}, // Lo [30] OSMANYA LETTER ALEF..OSMANYA LETTER OO
+ {0x104A0, 0x104A9, prN}, // Nd [10] OSMANYA DIGIT ZERO..OSMANYA DIGIT NINE
+ {0x104B0, 0x104D3, prN}, // Lu [36] OSAGE CAPITAL LETTER A..OSAGE CAPITAL LETTER ZHA
+ {0x104D8, 0x104FB, prN}, // Ll [36] OSAGE SMALL LETTER A..OSAGE SMALL LETTER ZHA
+ {0x10500, 0x10527, prN}, // Lo [40] ELBASAN LETTER A..ELBASAN LETTER KHE
+ {0x10530, 0x10563, prN}, // Lo [52] CAUCASIAN ALBANIAN LETTER ALT..CAUCASIAN ALBANIAN LETTER KIW
+ {0x1056F, 0x1056F, prN}, // Po CAUCASIAN ALBANIAN CITATION MARK
+ {0x10570, 0x1057A, prN}, // Lu [11] VITHKUQI CAPITAL LETTER A..VITHKUQI CAPITAL LETTER GA
+ {0x1057C, 0x1058A, prN}, // Lu [15] VITHKUQI CAPITAL LETTER HA..VITHKUQI CAPITAL LETTER RE
+ {0x1058C, 0x10592, prN}, // Lu [7] VITHKUQI CAPITAL LETTER SE..VITHKUQI CAPITAL LETTER XE
+ {0x10594, 0x10595, prN}, // Lu [2] VITHKUQI CAPITAL LETTER Y..VITHKUQI CAPITAL LETTER ZE
+ {0x10597, 0x105A1, prN}, // Ll [11] VITHKUQI SMALL LETTER A..VITHKUQI SMALL LETTER GA
+ {0x105A3, 0x105B1, prN}, // Ll [15] VITHKUQI SMALL LETTER HA..VITHKUQI SMALL LETTER RE
+ {0x105B3, 0x105B9, prN}, // Ll [7] VITHKUQI SMALL LETTER SE..VITHKUQI SMALL LETTER XE
+ {0x105BB, 0x105BC, prN}, // Ll [2] VITHKUQI SMALL LETTER Y..VITHKUQI SMALL LETTER ZE
+ {0x10600, 0x10736, prN}, // Lo [311] LINEAR A SIGN AB001..LINEAR A SIGN A664
+ {0x10740, 0x10755, prN}, // Lo [22] LINEAR A SIGN A701 A..LINEAR A SIGN A732 JE
+ {0x10760, 0x10767, prN}, // Lo [8] LINEAR A SIGN A800..LINEAR A SIGN A807
+ {0x10780, 0x10785, prN}, // Lm [6] MODIFIER LETTER SMALL CAPITAL AA..MODIFIER LETTER SMALL B WITH HOOK
+ {0x10787, 0x107B0, prN}, // Lm [42] MODIFIER LETTER SMALL DZ DIGRAPH..MODIFIER LETTER SMALL V WITH RIGHT HOOK
+ {0x107B2, 0x107BA, prN}, // Lm [9] MODIFIER LETTER SMALL CAPITAL Y..MODIFIER LETTER SMALL S WITH CURL
+ {0x10800, 0x10805, prN}, // Lo [6] CYPRIOT SYLLABLE A..CYPRIOT SYLLABLE JA
+ {0x10808, 0x10808, prN}, // Lo CYPRIOT SYLLABLE JO
+ {0x1080A, 0x10835, prN}, // Lo [44] CYPRIOT SYLLABLE KA..CYPRIOT SYLLABLE WO
+ {0x10837, 0x10838, prN}, // Lo [2] CYPRIOT SYLLABLE XA..CYPRIOT SYLLABLE XE
+ {0x1083C, 0x1083C, prN}, // Lo CYPRIOT SYLLABLE ZA
+ {0x1083F, 0x1083F, prN}, // Lo CYPRIOT SYLLABLE ZO
+ {0x10840, 0x10855, prN}, // Lo [22] IMPERIAL ARAMAIC LETTER ALEPH..IMPERIAL ARAMAIC LETTER TAW
+ {0x10857, 0x10857, prN}, // Po IMPERIAL ARAMAIC SECTION SIGN
+ {0x10858, 0x1085F, prN}, // No [8] IMPERIAL ARAMAIC NUMBER ONE..IMPERIAL ARAMAIC NUMBER TEN THOUSAND
+ {0x10860, 0x10876, prN}, // Lo [23] PALMYRENE LETTER ALEPH..PALMYRENE LETTER TAW
+ {0x10877, 0x10878, prN}, // So [2] PALMYRENE LEFT-POINTING FLEURON..PALMYRENE RIGHT-POINTING FLEURON
+ {0x10879, 0x1087F, prN}, // No [7] PALMYRENE NUMBER ONE..PALMYRENE NUMBER TWENTY
+ {0x10880, 0x1089E, prN}, // Lo [31] NABATAEAN LETTER FINAL ALEPH..NABATAEAN LETTER TAW
+ {0x108A7, 0x108AF, prN}, // No [9] NABATAEAN NUMBER ONE..NABATAEAN NUMBER ONE HUNDRED
+ {0x108E0, 0x108F2, prN}, // Lo [19] HATRAN LETTER ALEPH..HATRAN LETTER QOPH
+ {0x108F4, 0x108F5, prN}, // Lo [2] HATRAN LETTER SHIN..HATRAN LETTER TAW
+ {0x108FB, 0x108FF, prN}, // No [5] HATRAN NUMBER ONE..HATRAN NUMBER ONE HUNDRED
+ {0x10900, 0x10915, prN}, // Lo [22] PHOENICIAN LETTER ALF..PHOENICIAN LETTER TAU
+ {0x10916, 0x1091B, prN}, // No [6] PHOENICIAN NUMBER ONE..PHOENICIAN NUMBER THREE
+ {0x1091F, 0x1091F, prN}, // Po PHOENICIAN WORD SEPARATOR
+ {0x10920, 0x10939, prN}, // Lo [26] LYDIAN LETTER A..LYDIAN LETTER C
+ {0x1093F, 0x1093F, prN}, // Po LYDIAN TRIANGULAR MARK
+ {0x10980, 0x1099F, prN}, // Lo [32] MEROITIC HIEROGLYPHIC LETTER A..MEROITIC HIEROGLYPHIC SYMBOL VIDJ-2
+ {0x109A0, 0x109B7, prN}, // Lo [24] MEROITIC CURSIVE LETTER A..MEROITIC CURSIVE LETTER DA
+ {0x109BC, 0x109BD, prN}, // No [2] MEROITIC CURSIVE FRACTION ELEVEN TWELFTHS..MEROITIC CURSIVE FRACTION ONE HALF
+ {0x109BE, 0x109BF, prN}, // Lo [2] MEROITIC CURSIVE LOGOGRAM RMT..MEROITIC CURSIVE LOGOGRAM IMN
+ {0x109C0, 0x109CF, prN}, // No [16] MEROITIC CURSIVE NUMBER ONE..MEROITIC CURSIVE NUMBER SEVENTY
+ {0x109D2, 0x109FF, prN}, // No [46] MEROITIC CURSIVE NUMBER ONE HUNDRED..MEROITIC CURSIVE FRACTION TEN TWELFTHS
+ {0x10A00, 0x10A00, prN}, // Lo KHAROSHTHI LETTER A
+ {0x10A01, 0x10A03, prN}, // Mn [3] KHAROSHTHI VOWEL SIGN I..KHAROSHTHI VOWEL SIGN VOCALIC R
+ {0x10A05, 0x10A06, prN}, // Mn [2] KHAROSHTHI VOWEL SIGN E..KHAROSHTHI VOWEL SIGN O
+ {0x10A0C, 0x10A0F, prN}, // Mn [4] KHAROSHTHI VOWEL LENGTH MARK..KHAROSHTHI SIGN VISARGA
+ {0x10A10, 0x10A13, prN}, // Lo [4] KHAROSHTHI LETTER KA..KHAROSHTHI LETTER GHA
+ {0x10A15, 0x10A17, prN}, // Lo [3] KHAROSHTHI LETTER CA..KHAROSHTHI LETTER JA
+ {0x10A19, 0x10A35, prN}, // Lo [29] KHAROSHTHI LETTER NYA..KHAROSHTHI LETTER VHA
+ {0x10A38, 0x10A3A, prN}, // Mn [3] KHAROSHTHI SIGN BAR ABOVE..KHAROSHTHI SIGN DOT BELOW
+ {0x10A3F, 0x10A3F, prN}, // Mn KHAROSHTHI VIRAMA
+ {0x10A40, 0x10A48, prN}, // No [9] KHAROSHTHI DIGIT ONE..KHAROSHTHI FRACTION ONE HALF
+ {0x10A50, 0x10A58, prN}, // Po [9] KHAROSHTHI PUNCTUATION DOT..KHAROSHTHI PUNCTUATION LINES
+ {0x10A60, 0x10A7C, prN}, // Lo [29] OLD SOUTH ARABIAN LETTER HE..OLD SOUTH ARABIAN LETTER THETH
+ {0x10A7D, 0x10A7E, prN}, // No [2] OLD SOUTH ARABIAN NUMBER ONE..OLD SOUTH ARABIAN NUMBER FIFTY
+ {0x10A7F, 0x10A7F, prN}, // Po OLD SOUTH ARABIAN NUMERIC INDICATOR
+ {0x10A80, 0x10A9C, prN}, // Lo [29] OLD NORTH ARABIAN LETTER HEH..OLD NORTH ARABIAN LETTER ZAH
+ {0x10A9D, 0x10A9F, prN}, // No [3] OLD NORTH ARABIAN NUMBER ONE..OLD NORTH ARABIAN NUMBER TWENTY
+ {0x10AC0, 0x10AC7, prN}, // Lo [8] MANICHAEAN LETTER ALEPH..MANICHAEAN LETTER WAW
+ {0x10AC8, 0x10AC8, prN}, // So MANICHAEAN SIGN UD
+ {0x10AC9, 0x10AE4, prN}, // Lo [28] MANICHAEAN LETTER ZAYIN..MANICHAEAN LETTER TAW
+ {0x10AE5, 0x10AE6, prN}, // Mn [2] MANICHAEAN ABBREVIATION MARK ABOVE..MANICHAEAN ABBREVIATION MARK BELOW
+ {0x10AEB, 0x10AEF, prN}, // No [5] MANICHAEAN NUMBER ONE..MANICHAEAN NUMBER ONE HUNDRED
+ {0x10AF0, 0x10AF6, prN}, // Po [7] MANICHAEAN PUNCTUATION STAR..MANICHAEAN PUNCTUATION LINE FILLER
+ {0x10B00, 0x10B35, prN}, // Lo [54] AVESTAN LETTER A..AVESTAN LETTER HE
+ {0x10B39, 0x10B3F, prN}, // Po [7] AVESTAN ABBREVIATION MARK..LARGE ONE RING OVER TWO RINGS PUNCTUATION
+ {0x10B40, 0x10B55, prN}, // Lo [22] INSCRIPTIONAL PARTHIAN LETTER ALEPH..INSCRIPTIONAL PARTHIAN LETTER TAW
+ {0x10B58, 0x10B5F, prN}, // No [8] INSCRIPTIONAL PARTHIAN NUMBER ONE..INSCRIPTIONAL PARTHIAN NUMBER ONE THOUSAND
+ {0x10B60, 0x10B72, prN}, // Lo [19] INSCRIPTIONAL PAHLAVI LETTER ALEPH..INSCRIPTIONAL PAHLAVI LETTER TAW
+ {0x10B78, 0x10B7F, prN}, // No [8] INSCRIPTIONAL PAHLAVI NUMBER ONE..INSCRIPTIONAL PAHLAVI NUMBER ONE THOUSAND
+ {0x10B80, 0x10B91, prN}, // Lo [18] PSALTER PAHLAVI LETTER ALEPH..PSALTER PAHLAVI LETTER TAW
+ {0x10B99, 0x10B9C, prN}, // Po [4] PSALTER PAHLAVI SECTION MARK..PSALTER PAHLAVI FOUR DOTS WITH DOT
+ {0x10BA9, 0x10BAF, prN}, // No [7] PSALTER PAHLAVI NUMBER ONE..PSALTER PAHLAVI NUMBER ONE HUNDRED
+ {0x10C00, 0x10C48, prN}, // Lo [73] OLD TURKIC LETTER ORKHON A..OLD TURKIC LETTER ORKHON BASH
+ {0x10C80, 0x10CB2, prN}, // Lu [51] OLD HUNGARIAN CAPITAL LETTER A..OLD HUNGARIAN CAPITAL LETTER US
+ {0x10CC0, 0x10CF2, prN}, // Ll [51] OLD HUNGARIAN SMALL LETTER A..OLD HUNGARIAN SMALL LETTER US
+ {0x10CFA, 0x10CFF, prN}, // No [6] OLD HUNGARIAN NUMBER ONE..OLD HUNGARIAN NUMBER ONE THOUSAND
+ {0x10D00, 0x10D23, prN}, // Lo [36] HANIFI ROHINGYA LETTER A..HANIFI ROHINGYA MARK NA KHONNA
+ {0x10D24, 0x10D27, prN}, // Mn [4] HANIFI ROHINGYA SIGN HARBAHAY..HANIFI ROHINGYA SIGN TASSI
+ {0x10D30, 0x10D39, prN}, // Nd [10] HANIFI ROHINGYA DIGIT ZERO..HANIFI ROHINGYA DIGIT NINE
+ {0x10E60, 0x10E7E, prN}, // No [31] RUMI DIGIT ONE..RUMI FRACTION TWO THIRDS
+ {0x10E80, 0x10EA9, prN}, // Lo [42] YEZIDI LETTER ELIF..YEZIDI LETTER ET
+ {0x10EAB, 0x10EAC, prN}, // Mn [2] YEZIDI COMBINING HAMZA MARK..YEZIDI COMBINING MADDA MARK
+ {0x10EAD, 0x10EAD, prN}, // Pd YEZIDI HYPHENATION MARK
+ {0x10EB0, 0x10EB1, prN}, // Lo [2] YEZIDI LETTER LAM WITH DOT ABOVE..YEZIDI LETTER YOT WITH CIRCUMFLEX ABOVE
+ {0x10F00, 0x10F1C, prN}, // Lo [29] OLD SOGDIAN LETTER ALEPH..OLD SOGDIAN LETTER FINAL TAW WITH VERTICAL TAIL
+ {0x10F1D, 0x10F26, prN}, // No [10] OLD SOGDIAN NUMBER ONE..OLD SOGDIAN FRACTION ONE HALF
+ {0x10F27, 0x10F27, prN}, // Lo OLD SOGDIAN LIGATURE AYIN-DALETH
+ {0x10F30, 0x10F45, prN}, // Lo [22] SOGDIAN LETTER ALEPH..SOGDIAN INDEPENDENT SHIN
+ {0x10F46, 0x10F50, prN}, // Mn [11] SOGDIAN COMBINING DOT BELOW..SOGDIAN COMBINING STROKE BELOW
+ {0x10F51, 0x10F54, prN}, // No [4] SOGDIAN NUMBER ONE..SOGDIAN NUMBER ONE HUNDRED
+ {0x10F55, 0x10F59, prN}, // Po [5] SOGDIAN PUNCTUATION TWO VERTICAL BARS..SOGDIAN PUNCTUATION HALF CIRCLE WITH DOT
+ {0x10F70, 0x10F81, prN}, // Lo [18] OLD UYGHUR LETTER ALEPH..OLD UYGHUR LETTER LESH
+ {0x10F82, 0x10F85, prN}, // Mn [4] OLD UYGHUR COMBINING DOT ABOVE..OLD UYGHUR COMBINING TWO DOTS BELOW
+ {0x10F86, 0x10F89, prN}, // Po [4] OLD UYGHUR PUNCTUATION BAR..OLD UYGHUR PUNCTUATION FOUR DOTS
+ {0x10FB0, 0x10FC4, prN}, // Lo [21] CHORASMIAN LETTER ALEPH..CHORASMIAN LETTER TAW
+ {0x10FC5, 0x10FCB, prN}, // No [7] CHORASMIAN NUMBER ONE..CHORASMIAN NUMBER ONE HUNDRED
+ {0x10FE0, 0x10FF6, prN}, // Lo [23] ELYMAIC LETTER ALEPH..ELYMAIC LIGATURE ZAYIN-YODH
+ {0x11000, 0x11000, prN}, // Mc BRAHMI SIGN CANDRABINDU
+ {0x11001, 0x11001, prN}, // Mn BRAHMI SIGN ANUSVARA
+ {0x11002, 0x11002, prN}, // Mc BRAHMI SIGN VISARGA
+ {0x11003, 0x11037, prN}, // Lo [53] BRAHMI SIGN JIHVAMULIYA..BRAHMI LETTER OLD TAMIL NNNA
+ {0x11038, 0x11046, prN}, // Mn [15] BRAHMI VOWEL SIGN AA..BRAHMI VIRAMA
+ {0x11047, 0x1104D, prN}, // Po [7] BRAHMI DANDA..BRAHMI PUNCTUATION LOTUS
+ {0x11052, 0x11065, prN}, // No [20] BRAHMI NUMBER ONE..BRAHMI NUMBER ONE THOUSAND
+ {0x11066, 0x1106F, prN}, // Nd [10] BRAHMI DIGIT ZERO..BRAHMI DIGIT NINE
+ {0x11070, 0x11070, prN}, // Mn BRAHMI SIGN OLD TAMIL VIRAMA
+ {0x11071, 0x11072, prN}, // Lo [2] BRAHMI LETTER OLD TAMIL SHORT E..BRAHMI LETTER OLD TAMIL SHORT O
+ {0x11073, 0x11074, prN}, // Mn [2] BRAHMI VOWEL SIGN OLD TAMIL SHORT E..BRAHMI VOWEL SIGN OLD TAMIL SHORT O
+ {0x11075, 0x11075, prN}, // Lo BRAHMI LETTER OLD TAMIL LLA
+ {0x1107F, 0x1107F, prN}, // Mn BRAHMI NUMBER JOINER
+ {0x11080, 0x11081, prN}, // Mn [2] KAITHI SIGN CANDRABINDU..KAITHI SIGN ANUSVARA
+ {0x11082, 0x11082, prN}, // Mc KAITHI SIGN VISARGA
+ {0x11083, 0x110AF, prN}, // Lo [45] KAITHI LETTER A..KAITHI LETTER HA
+ {0x110B0, 0x110B2, prN}, // Mc [3] KAITHI VOWEL SIGN AA..KAITHI VOWEL SIGN II
+ {0x110B3, 0x110B6, prN}, // Mn [4] KAITHI VOWEL SIGN U..KAITHI VOWEL SIGN AI
+ {0x110B7, 0x110B8, prN}, // Mc [2] KAITHI VOWEL SIGN O..KAITHI VOWEL SIGN AU
+ {0x110B9, 0x110BA, prN}, // Mn [2] KAITHI SIGN VIRAMA..KAITHI SIGN NUKTA
+ {0x110BB, 0x110BC, prN}, // Po [2] KAITHI ABBREVIATION SIGN..KAITHI ENUMERATION SIGN
+ {0x110BD, 0x110BD, prN}, // Cf KAITHI NUMBER SIGN
+ {0x110BE, 0x110C1, prN}, // Po [4] KAITHI SECTION MARK..KAITHI DOUBLE DANDA
+ {0x110C2, 0x110C2, prN}, // Mn KAITHI VOWEL SIGN VOCALIC R
+ {0x110CD, 0x110CD, prN}, // Cf KAITHI NUMBER SIGN ABOVE
+ {0x110D0, 0x110E8, prN}, // Lo [25] SORA SOMPENG LETTER SAH..SORA SOMPENG LETTER MAE
+ {0x110F0, 0x110F9, prN}, // Nd [10] SORA SOMPENG DIGIT ZERO..SORA SOMPENG DIGIT NINE
+ {0x11100, 0x11102, prN}, // Mn [3] CHAKMA SIGN CANDRABINDU..CHAKMA SIGN VISARGA
+ {0x11103, 0x11126, prN}, // Lo [36] CHAKMA LETTER AA..CHAKMA LETTER HAA
+ {0x11127, 0x1112B, prN}, // Mn [5] CHAKMA VOWEL SIGN A..CHAKMA VOWEL SIGN UU
+ {0x1112C, 0x1112C, prN}, // Mc CHAKMA VOWEL SIGN E
+ {0x1112D, 0x11134, prN}, // Mn [8] CHAKMA VOWEL SIGN AI..CHAKMA MAAYYAA
+ {0x11136, 0x1113F, prN}, // Nd [10] CHAKMA DIGIT ZERO..CHAKMA DIGIT NINE
+ {0x11140, 0x11143, prN}, // Po [4] CHAKMA SECTION MARK..CHAKMA QUESTION MARK
+ {0x11144, 0x11144, prN}, // Lo CHAKMA LETTER LHAA
+ {0x11145, 0x11146, prN}, // Mc [2] CHAKMA VOWEL SIGN AA..CHAKMA VOWEL SIGN EI
+ {0x11147, 0x11147, prN}, // Lo CHAKMA LETTER VAA
+ {0x11150, 0x11172, prN}, // Lo [35] MAHAJANI LETTER A..MAHAJANI LETTER RRA
+ {0x11173, 0x11173, prN}, // Mn MAHAJANI SIGN NUKTA
+ {0x11174, 0x11175, prN}, // Po [2] MAHAJANI ABBREVIATION SIGN..MAHAJANI SECTION MARK
+ {0x11176, 0x11176, prN}, // Lo MAHAJANI LIGATURE SHRI
+ {0x11180, 0x11181, prN}, // Mn [2] SHARADA SIGN CANDRABINDU..SHARADA SIGN ANUSVARA
+ {0x11182, 0x11182, prN}, // Mc SHARADA SIGN VISARGA
+ {0x11183, 0x111B2, prN}, // Lo [48] SHARADA LETTER A..SHARADA LETTER HA
+ {0x111B3, 0x111B5, prN}, // Mc [3] SHARADA VOWEL SIGN AA..SHARADA VOWEL SIGN II
+ {0x111B6, 0x111BE, prN}, // Mn [9] SHARADA VOWEL SIGN U..SHARADA VOWEL SIGN O
+ {0x111BF, 0x111C0, prN}, // Mc [2] SHARADA VOWEL SIGN AU..SHARADA SIGN VIRAMA
+ {0x111C1, 0x111C4, prN}, // Lo [4] SHARADA SIGN AVAGRAHA..SHARADA OM
+ {0x111C5, 0x111C8, prN}, // Po [4] SHARADA DANDA..SHARADA SEPARATOR
+ {0x111C9, 0x111CC, prN}, // Mn [4] SHARADA SANDHI MARK..SHARADA EXTRA SHORT VOWEL MARK
+ {0x111CD, 0x111CD, prN}, // Po SHARADA SUTRA MARK
+ {0x111CE, 0x111CE, prN}, // Mc SHARADA VOWEL SIGN PRISHTHAMATRA E
+ {0x111CF, 0x111CF, prN}, // Mn SHARADA SIGN INVERTED CANDRABINDU
+ {0x111D0, 0x111D9, prN}, // Nd [10] SHARADA DIGIT ZERO..SHARADA DIGIT NINE
+ {0x111DA, 0x111DA, prN}, // Lo SHARADA EKAM
+ {0x111DB, 0x111DB, prN}, // Po SHARADA SIGN SIDDHAM
+ {0x111DC, 0x111DC, prN}, // Lo SHARADA HEADSTROKE
+ {0x111DD, 0x111DF, prN}, // Po [3] SHARADA CONTINUATION SIGN..SHARADA SECTION MARK-2
+ {0x111E1, 0x111F4, prN}, // No [20] SINHALA ARCHAIC DIGIT ONE..SINHALA ARCHAIC NUMBER ONE THOUSAND
+ {0x11200, 0x11211, prN}, // Lo [18] KHOJKI LETTER A..KHOJKI LETTER JJA
+ {0x11213, 0x1122B, prN}, // Lo [25] KHOJKI LETTER NYA..KHOJKI LETTER LLA
+ {0x1122C, 0x1122E, prN}, // Mc [3] KHOJKI VOWEL SIGN AA..KHOJKI VOWEL SIGN II
+ {0x1122F, 0x11231, prN}, // Mn [3] KHOJKI VOWEL SIGN U..KHOJKI VOWEL SIGN AI
+ {0x11232, 0x11233, prN}, // Mc [2] KHOJKI VOWEL SIGN O..KHOJKI VOWEL SIGN AU
+ {0x11234, 0x11234, prN}, // Mn KHOJKI SIGN ANUSVARA
+ {0x11235, 0x11235, prN}, // Mc KHOJKI SIGN VIRAMA
+ {0x11236, 0x11237, prN}, // Mn [2] KHOJKI SIGN NUKTA..KHOJKI SIGN SHADDA
+ {0x11238, 0x1123D, prN}, // Po [6] KHOJKI DANDA..KHOJKI ABBREVIATION SIGN
+ {0x1123E, 0x1123E, prN}, // Mn KHOJKI SIGN SUKUN
+ {0x11280, 0x11286, prN}, // Lo [7] MULTANI LETTER A..MULTANI LETTER GA
+ {0x11288, 0x11288, prN}, // Lo MULTANI LETTER GHA
+ {0x1128A, 0x1128D, prN}, // Lo [4] MULTANI LETTER CA..MULTANI LETTER JJA
+ {0x1128F, 0x1129D, prN}, // Lo [15] MULTANI LETTER NYA..MULTANI LETTER BA
+ {0x1129F, 0x112A8, prN}, // Lo [10] MULTANI LETTER BHA..MULTANI LETTER RHA
+ {0x112A9, 0x112A9, prN}, // Po MULTANI SECTION MARK
+ {0x112B0, 0x112DE, prN}, // Lo [47] KHUDAWADI LETTER A..KHUDAWADI LETTER HA
+ {0x112DF, 0x112DF, prN}, // Mn KHUDAWADI SIGN ANUSVARA
+ {0x112E0, 0x112E2, prN}, // Mc [3] KHUDAWADI VOWEL SIGN AA..KHUDAWADI VOWEL SIGN II
+ {0x112E3, 0x112EA, prN}, // Mn [8] KHUDAWADI VOWEL SIGN U..KHUDAWADI SIGN VIRAMA
+ {0x112F0, 0x112F9, prN}, // Nd [10] KHUDAWADI DIGIT ZERO..KHUDAWADI DIGIT NINE
+ {0x11300, 0x11301, prN}, // Mn [2] GRANTHA SIGN COMBINING ANUSVARA ABOVE..GRANTHA SIGN CANDRABINDU
+ {0x11302, 0x11303, prN}, // Mc [2] GRANTHA SIGN ANUSVARA..GRANTHA SIGN VISARGA
+ {0x11305, 0x1130C, prN}, // Lo [8] GRANTHA LETTER A..GRANTHA LETTER VOCALIC L
+ {0x1130F, 0x11310, prN}, // Lo [2] GRANTHA LETTER EE..GRANTHA LETTER AI
+ {0x11313, 0x11328, prN}, // Lo [22] GRANTHA LETTER OO..GRANTHA LETTER NA
+ {0x1132A, 0x11330, prN}, // Lo [7] GRANTHA LETTER PA..GRANTHA LETTER RA
+ {0x11332, 0x11333, prN}, // Lo [2] GRANTHA LETTER LA..GRANTHA LETTER LLA
+ {0x11335, 0x11339, prN}, // Lo [5] GRANTHA LETTER VA..GRANTHA LETTER HA
+ {0x1133B, 0x1133C, prN}, // Mn [2] COMBINING BINDU BELOW..GRANTHA SIGN NUKTA
+ {0x1133D, 0x1133D, prN}, // Lo GRANTHA SIGN AVAGRAHA
+ {0x1133E, 0x1133F, prN}, // Mc [2] GRANTHA VOWEL SIGN AA..GRANTHA VOWEL SIGN I
+ {0x11340, 0x11340, prN}, // Mn GRANTHA VOWEL SIGN II
+ {0x11341, 0x11344, prN}, // Mc [4] GRANTHA VOWEL SIGN U..GRANTHA VOWEL SIGN VOCALIC RR
+ {0x11347, 0x11348, prN}, // Mc [2] GRANTHA VOWEL SIGN EE..GRANTHA VOWEL SIGN AI
+ {0x1134B, 0x1134D, prN}, // Mc [3] GRANTHA VOWEL SIGN OO..GRANTHA SIGN VIRAMA
+ {0x11350, 0x11350, prN}, // Lo GRANTHA OM
+ {0x11357, 0x11357, prN}, // Mc GRANTHA AU LENGTH MARK
+ {0x1135D, 0x11361, prN}, // Lo [5] GRANTHA SIGN PLUTA..GRANTHA LETTER VOCALIC LL
+ {0x11362, 0x11363, prN}, // Mc [2] GRANTHA VOWEL SIGN VOCALIC L..GRANTHA VOWEL SIGN VOCALIC LL
+ {0x11366, 0x1136C, prN}, // Mn [7] COMBINING GRANTHA DIGIT ZERO..COMBINING GRANTHA DIGIT SIX
+ {0x11370, 0x11374, prN}, // Mn [5] COMBINING GRANTHA LETTER A..COMBINING GRANTHA LETTER PA
+ {0x11400, 0x11434, prN}, // Lo [53] NEWA LETTER A..NEWA LETTER HA
+ {0x11435, 0x11437, prN}, // Mc [3] NEWA VOWEL SIGN AA..NEWA VOWEL SIGN II
+ {0x11438, 0x1143F, prN}, // Mn [8] NEWA VOWEL SIGN U..NEWA VOWEL SIGN AI
+ {0x11440, 0x11441, prN}, // Mc [2] NEWA VOWEL SIGN O..NEWA VOWEL SIGN AU
+ {0x11442, 0x11444, prN}, // Mn [3] NEWA SIGN VIRAMA..NEWA SIGN ANUSVARA
+ {0x11445, 0x11445, prN}, // Mc NEWA SIGN VISARGA
+ {0x11446, 0x11446, prN}, // Mn NEWA SIGN NUKTA
+ {0x11447, 0x1144A, prN}, // Lo [4] NEWA SIGN AVAGRAHA..NEWA SIDDHI
+ {0x1144B, 0x1144F, prN}, // Po [5] NEWA DANDA..NEWA ABBREVIATION SIGN
+ {0x11450, 0x11459, prN}, // Nd [10] NEWA DIGIT ZERO..NEWA DIGIT NINE
+ {0x1145A, 0x1145B, prN}, // Po [2] NEWA DOUBLE COMMA..NEWA PLACEHOLDER MARK
+ {0x1145D, 0x1145D, prN}, // Po NEWA INSERTION SIGN
+ {0x1145E, 0x1145E, prN}, // Mn NEWA SANDHI MARK
+ {0x1145F, 0x11461, prN}, // Lo [3] NEWA LETTER VEDIC ANUSVARA..NEWA SIGN UPADHMANIYA
+ {0x11480, 0x114AF, prN}, // Lo [48] TIRHUTA ANJI..TIRHUTA LETTER HA
+ {0x114B0, 0x114B2, prN}, // Mc [3] TIRHUTA VOWEL SIGN AA..TIRHUTA VOWEL SIGN II
+ {0x114B3, 0x114B8, prN}, // Mn [6] TIRHUTA VOWEL SIGN U..TIRHUTA VOWEL SIGN VOCALIC LL
+ {0x114B9, 0x114B9, prN}, // Mc TIRHUTA VOWEL SIGN E
+ {0x114BA, 0x114BA, prN}, // Mn TIRHUTA VOWEL SIGN SHORT E
+ {0x114BB, 0x114BE, prN}, // Mc [4] TIRHUTA VOWEL SIGN AI..TIRHUTA VOWEL SIGN AU
+ {0x114BF, 0x114C0, prN}, // Mn [2] TIRHUTA SIGN CANDRABINDU..TIRHUTA SIGN ANUSVARA
+ {0x114C1, 0x114C1, prN}, // Mc TIRHUTA SIGN VISARGA
+ {0x114C2, 0x114C3, prN}, // Mn [2] TIRHUTA SIGN VIRAMA..TIRHUTA SIGN NUKTA
+ {0x114C4, 0x114C5, prN}, // Lo [2] TIRHUTA SIGN AVAGRAHA..TIRHUTA GVANG
+ {0x114C6, 0x114C6, prN}, // Po TIRHUTA ABBREVIATION SIGN
+ {0x114C7, 0x114C7, prN}, // Lo TIRHUTA OM
+ {0x114D0, 0x114D9, prN}, // Nd [10] TIRHUTA DIGIT ZERO..TIRHUTA DIGIT NINE
+ {0x11580, 0x115AE, prN}, // Lo [47] SIDDHAM LETTER A..SIDDHAM LETTER HA
+ {0x115AF, 0x115B1, prN}, // Mc [3] SIDDHAM VOWEL SIGN AA..SIDDHAM VOWEL SIGN II
+ {0x115B2, 0x115B5, prN}, // Mn [4] SIDDHAM VOWEL SIGN U..SIDDHAM VOWEL SIGN VOCALIC RR
+ {0x115B8, 0x115BB, prN}, // Mc [4] SIDDHAM VOWEL SIGN E..SIDDHAM VOWEL SIGN AU
+ {0x115BC, 0x115BD, prN}, // Mn [2] SIDDHAM SIGN CANDRABINDU..SIDDHAM SIGN ANUSVARA
+ {0x115BE, 0x115BE, prN}, // Mc SIDDHAM SIGN VISARGA
+ {0x115BF, 0x115C0, prN}, // Mn [2] SIDDHAM SIGN VIRAMA..SIDDHAM SIGN NUKTA
+ {0x115C1, 0x115D7, prN}, // Po [23] SIDDHAM SIGN SIDDHAM..SIDDHAM SECTION MARK WITH CIRCLES AND FOUR ENCLOSURES
+ {0x115D8, 0x115DB, prN}, // Lo [4] SIDDHAM LETTER THREE-CIRCLE ALTERNATE I..SIDDHAM LETTER ALTERNATE U
+ {0x115DC, 0x115DD, prN}, // Mn [2] SIDDHAM VOWEL SIGN ALTERNATE U..SIDDHAM VOWEL SIGN ALTERNATE UU
+ {0x11600, 0x1162F, prN}, // Lo [48] MODI LETTER A..MODI LETTER LLA
+ {0x11630, 0x11632, prN}, // Mc [3] MODI VOWEL SIGN AA..MODI VOWEL SIGN II
+ {0x11633, 0x1163A, prN}, // Mn [8] MODI VOWEL SIGN U..MODI VOWEL SIGN AI
+ {0x1163B, 0x1163C, prN}, // Mc [2] MODI VOWEL SIGN O..MODI VOWEL SIGN AU
+ {0x1163D, 0x1163D, prN}, // Mn MODI SIGN ANUSVARA
+ {0x1163E, 0x1163E, prN}, // Mc MODI SIGN VISARGA
+ {0x1163F, 0x11640, prN}, // Mn [2] MODI SIGN VIRAMA..MODI SIGN ARDHACANDRA
+ {0x11641, 0x11643, prN}, // Po [3] MODI DANDA..MODI ABBREVIATION SIGN
+ {0x11644, 0x11644, prN}, // Lo MODI SIGN HUVA
+ {0x11650, 0x11659, prN}, // Nd [10] MODI DIGIT ZERO..MODI DIGIT NINE
+ {0x11660, 0x1166C, prN}, // Po [13] MONGOLIAN BIRGA WITH ORNAMENT..MONGOLIAN TURNED SWIRL BIRGA WITH DOUBLE ORNAMENT
+ {0x11680, 0x116AA, prN}, // Lo [43] TAKRI LETTER A..TAKRI LETTER RRA
+ {0x116AB, 0x116AB, prN}, // Mn TAKRI SIGN ANUSVARA
+ {0x116AC, 0x116AC, prN}, // Mc TAKRI SIGN VISARGA
+ {0x116AD, 0x116AD, prN}, // Mn TAKRI VOWEL SIGN AA
+ {0x116AE, 0x116AF, prN}, // Mc [2] TAKRI VOWEL SIGN I..TAKRI VOWEL SIGN II
+ {0x116B0, 0x116B5, prN}, // Mn [6] TAKRI VOWEL SIGN U..TAKRI VOWEL SIGN AU
+ {0x116B6, 0x116B6, prN}, // Mc TAKRI SIGN VIRAMA
+ {0x116B7, 0x116B7, prN}, // Mn TAKRI SIGN NUKTA
+ {0x116B8, 0x116B8, prN}, // Lo TAKRI LETTER ARCHAIC KHA
+ {0x116B9, 0x116B9, prN}, // Po TAKRI ABBREVIATION SIGN
+ {0x116C0, 0x116C9, prN}, // Nd [10] TAKRI DIGIT ZERO..TAKRI DIGIT NINE
+ {0x11700, 0x1171A, prN}, // Lo [27] AHOM LETTER KA..AHOM LETTER ALTERNATE BA
+ {0x1171D, 0x1171F, prN}, // Mn [3] AHOM CONSONANT SIGN MEDIAL LA..AHOM CONSONANT SIGN MEDIAL LIGATING RA
+ {0x11720, 0x11721, prN}, // Mc [2] AHOM VOWEL SIGN A..AHOM VOWEL SIGN AA
+ {0x11722, 0x11725, prN}, // Mn [4] AHOM VOWEL SIGN I..AHOM VOWEL SIGN UU
+ {0x11726, 0x11726, prN}, // Mc AHOM VOWEL SIGN E
+ {0x11727, 0x1172B, prN}, // Mn [5] AHOM VOWEL SIGN AW..AHOM SIGN KILLER
+ {0x11730, 0x11739, prN}, // Nd [10] AHOM DIGIT ZERO..AHOM DIGIT NINE
+ {0x1173A, 0x1173B, prN}, // No [2] AHOM NUMBER TEN..AHOM NUMBER TWENTY
+ {0x1173C, 0x1173E, prN}, // Po [3] AHOM SIGN SMALL SECTION..AHOM SIGN RULAI
+ {0x1173F, 0x1173F, prN}, // So AHOM SYMBOL VI
+ {0x11740, 0x11746, prN}, // Lo [7] AHOM LETTER CA..AHOM LETTER LLA
+ {0x11800, 0x1182B, prN}, // Lo [44] DOGRA LETTER A..DOGRA LETTER RRA
+ {0x1182C, 0x1182E, prN}, // Mc [3] DOGRA VOWEL SIGN AA..DOGRA VOWEL SIGN II
+ {0x1182F, 0x11837, prN}, // Mn [9] DOGRA VOWEL SIGN U..DOGRA SIGN ANUSVARA
+ {0x11838, 0x11838, prN}, // Mc DOGRA SIGN VISARGA
+ {0x11839, 0x1183A, prN}, // Mn [2] DOGRA SIGN VIRAMA..DOGRA SIGN NUKTA
+ {0x1183B, 0x1183B, prN}, // Po DOGRA ABBREVIATION SIGN
+ {0x118A0, 0x118DF, prN}, // L& [64] WARANG CITI CAPITAL LETTER NGAA..WARANG CITI SMALL LETTER VIYO
+ {0x118E0, 0x118E9, prN}, // Nd [10] WARANG CITI DIGIT ZERO..WARANG CITI DIGIT NINE
+ {0x118EA, 0x118F2, prN}, // No [9] WARANG CITI NUMBER TEN..WARANG CITI NUMBER NINETY
+ {0x118FF, 0x118FF, prN}, // Lo WARANG CITI OM
+ {0x11900, 0x11906, prN}, // Lo [7] DIVES AKURU LETTER A..DIVES AKURU LETTER E
+ {0x11909, 0x11909, prN}, // Lo DIVES AKURU LETTER O
+ {0x1190C, 0x11913, prN}, // Lo [8] DIVES AKURU LETTER KA..DIVES AKURU LETTER JA
+ {0x11915, 0x11916, prN}, // Lo [2] DIVES AKURU LETTER NYA..DIVES AKURU LETTER TTA
+ {0x11918, 0x1192F, prN}, // Lo [24] DIVES AKURU LETTER DDA..DIVES AKURU LETTER ZA
+ {0x11930, 0x11935, prN}, // Mc [6] DIVES AKURU VOWEL SIGN AA..DIVES AKURU VOWEL SIGN E
+ {0x11937, 0x11938, prN}, // Mc [2] DIVES AKURU VOWEL SIGN AI..DIVES AKURU VOWEL SIGN O
+ {0x1193B, 0x1193C, prN}, // Mn [2] DIVES AKURU SIGN ANUSVARA..DIVES AKURU SIGN CANDRABINDU
+ {0x1193D, 0x1193D, prN}, // Mc DIVES AKURU SIGN HALANTA
+ {0x1193E, 0x1193E, prN}, // Mn DIVES AKURU VIRAMA
+ {0x1193F, 0x1193F, prN}, // Lo DIVES AKURU PREFIXED NASAL SIGN
+ {0x11940, 0x11940, prN}, // Mc DIVES AKURU MEDIAL YA
+ {0x11941, 0x11941, prN}, // Lo DIVES AKURU INITIAL RA
+ {0x11942, 0x11942, prN}, // Mc DIVES AKURU MEDIAL RA
+ {0x11943, 0x11943, prN}, // Mn DIVES AKURU SIGN NUKTA
+ {0x11944, 0x11946, prN}, // Po [3] DIVES AKURU DOUBLE DANDA..DIVES AKURU END OF TEXT MARK
+ {0x11950, 0x11959, prN}, // Nd [10] DIVES AKURU DIGIT ZERO..DIVES AKURU DIGIT NINE
+ {0x119A0, 0x119A7, prN}, // Lo [8] NANDINAGARI LETTER A..NANDINAGARI LETTER VOCALIC RR
+ {0x119AA, 0x119D0, prN}, // Lo [39] NANDINAGARI LETTER E..NANDINAGARI LETTER RRA
+ {0x119D1, 0x119D3, prN}, // Mc [3] NANDINAGARI VOWEL SIGN AA..NANDINAGARI VOWEL SIGN II
+ {0x119D4, 0x119D7, prN}, // Mn [4] NANDINAGARI VOWEL SIGN U..NANDINAGARI VOWEL SIGN VOCALIC RR
+ {0x119DA, 0x119DB, prN}, // Mn [2] NANDINAGARI VOWEL SIGN E..NANDINAGARI VOWEL SIGN AI
+ {0x119DC, 0x119DF, prN}, // Mc [4] NANDINAGARI VOWEL SIGN O..NANDINAGARI SIGN VISARGA
+ {0x119E0, 0x119E0, prN}, // Mn NANDINAGARI SIGN VIRAMA
+ {0x119E1, 0x119E1, prN}, // Lo NANDINAGARI SIGN AVAGRAHA
+ {0x119E2, 0x119E2, prN}, // Po NANDINAGARI SIGN SIDDHAM
+ {0x119E3, 0x119E3, prN}, // Lo NANDINAGARI HEADSTROKE
+ {0x119E4, 0x119E4, prN}, // Mc NANDINAGARI VOWEL SIGN PRISHTHAMATRA E
+ {0x11A00, 0x11A00, prN}, // Lo ZANABAZAR SQUARE LETTER A
+ {0x11A01, 0x11A0A, prN}, // Mn [10] ZANABAZAR SQUARE VOWEL SIGN I..ZANABAZAR SQUARE VOWEL LENGTH MARK
+ {0x11A0B, 0x11A32, prN}, // Lo [40] ZANABAZAR SQUARE LETTER KA..ZANABAZAR SQUARE LETTER KSSA
+ {0x11A33, 0x11A38, prN}, // Mn [6] ZANABAZAR SQUARE FINAL CONSONANT MARK..ZANABAZAR SQUARE SIGN ANUSVARA
+ {0x11A39, 0x11A39, prN}, // Mc ZANABAZAR SQUARE SIGN VISARGA
+ {0x11A3A, 0x11A3A, prN}, // Lo ZANABAZAR SQUARE CLUSTER-INITIAL LETTER RA
+ {0x11A3B, 0x11A3E, prN}, // Mn [4] ZANABAZAR SQUARE CLUSTER-FINAL LETTER YA..ZANABAZAR SQUARE CLUSTER-FINAL LETTER VA
+ {0x11A3F, 0x11A46, prN}, // Po [8] ZANABAZAR SQUARE INITIAL HEAD MARK..ZANABAZAR SQUARE CLOSING DOUBLE-LINED HEAD MARK
+ {0x11A47, 0x11A47, prN}, // Mn ZANABAZAR SQUARE SUBJOINER
+ {0x11A50, 0x11A50, prN}, // Lo SOYOMBO LETTER A
+ {0x11A51, 0x11A56, prN}, // Mn [6] SOYOMBO VOWEL SIGN I..SOYOMBO VOWEL SIGN OE
+ {0x11A57, 0x11A58, prN}, // Mc [2] SOYOMBO VOWEL SIGN AI..SOYOMBO VOWEL SIGN AU
+ {0x11A59, 0x11A5B, prN}, // Mn [3] SOYOMBO VOWEL SIGN VOCALIC R..SOYOMBO VOWEL LENGTH MARK
+ {0x11A5C, 0x11A89, prN}, // Lo [46] SOYOMBO LETTER KA..SOYOMBO CLUSTER-INITIAL LETTER SA
+ {0x11A8A, 0x11A96, prN}, // Mn [13] SOYOMBO FINAL CONSONANT SIGN G..SOYOMBO SIGN ANUSVARA
+ {0x11A97, 0x11A97, prN}, // Mc SOYOMBO SIGN VISARGA
+ {0x11A98, 0x11A99, prN}, // Mn [2] SOYOMBO GEMINATION MARK..SOYOMBO SUBJOINER
+ {0x11A9A, 0x11A9C, prN}, // Po [3] SOYOMBO MARK TSHEG..SOYOMBO MARK DOUBLE SHAD
+ {0x11A9D, 0x11A9D, prN}, // Lo SOYOMBO MARK PLUTA
+ {0x11A9E, 0x11AA2, prN}, // Po [5] SOYOMBO HEAD MARK WITH MOON AND SUN AND TRIPLE FLAME..SOYOMBO TERMINAL MARK-2
+ {0x11AB0, 0x11ABF, prN}, // Lo [16] CANADIAN SYLLABICS NATTILIK HI..CANADIAN SYLLABICS SPA
+ {0x11AC0, 0x11AF8, prN}, // Lo [57] PAU CIN HAU LETTER PA..PAU CIN HAU GLOTTAL STOP FINAL
+ {0x11C00, 0x11C08, prN}, // Lo [9] BHAIKSUKI LETTER A..BHAIKSUKI LETTER VOCALIC L
+ {0x11C0A, 0x11C2E, prN}, // Lo [37] BHAIKSUKI LETTER E..BHAIKSUKI LETTER HA
+ {0x11C2F, 0x11C2F, prN}, // Mc BHAIKSUKI VOWEL SIGN AA
+ {0x11C30, 0x11C36, prN}, // Mn [7] BHAIKSUKI VOWEL SIGN I..BHAIKSUKI VOWEL SIGN VOCALIC L
+ {0x11C38, 0x11C3D, prN}, // Mn [6] BHAIKSUKI VOWEL SIGN E..BHAIKSUKI SIGN ANUSVARA
+ {0x11C3E, 0x11C3E, prN}, // Mc BHAIKSUKI SIGN VISARGA
+ {0x11C3F, 0x11C3F, prN}, // Mn BHAIKSUKI SIGN VIRAMA
+ {0x11C40, 0x11C40, prN}, // Lo BHAIKSUKI SIGN AVAGRAHA
+ {0x11C41, 0x11C45, prN}, // Po [5] BHAIKSUKI DANDA..BHAIKSUKI GAP FILLER-2
+ {0x11C50, 0x11C59, prN}, // Nd [10] BHAIKSUKI DIGIT ZERO..BHAIKSUKI DIGIT NINE
+ {0x11C5A, 0x11C6C, prN}, // No [19] BHAIKSUKI NUMBER ONE..BHAIKSUKI HUNDREDS UNIT MARK
+ {0x11C70, 0x11C71, prN}, // Po [2] MARCHEN HEAD MARK..MARCHEN MARK SHAD
+ {0x11C72, 0x11C8F, prN}, // Lo [30] MARCHEN LETTER KA..MARCHEN LETTER A
+ {0x11C92, 0x11CA7, prN}, // Mn [22] MARCHEN SUBJOINED LETTER KA..MARCHEN SUBJOINED LETTER ZA
+ {0x11CA9, 0x11CA9, prN}, // Mc MARCHEN SUBJOINED LETTER YA
+ {0x11CAA, 0x11CB0, prN}, // Mn [7] MARCHEN SUBJOINED LETTER RA..MARCHEN VOWEL SIGN AA
+ {0x11CB1, 0x11CB1, prN}, // Mc MARCHEN VOWEL SIGN I
+ {0x11CB2, 0x11CB3, prN}, // Mn [2] MARCHEN VOWEL SIGN U..MARCHEN VOWEL SIGN E
+ {0x11CB4, 0x11CB4, prN}, // Mc MARCHEN VOWEL SIGN O
+ {0x11CB5, 0x11CB6, prN}, // Mn [2] MARCHEN SIGN ANUSVARA..MARCHEN SIGN CANDRABINDU
+ {0x11D00, 0x11D06, prN}, // Lo [7] MASARAM GONDI LETTER A..MASARAM GONDI LETTER E
+ {0x11D08, 0x11D09, prN}, // Lo [2] MASARAM GONDI LETTER AI..MASARAM GONDI LETTER O
+ {0x11D0B, 0x11D30, prN}, // Lo [38] MASARAM GONDI LETTER AU..MASARAM GONDI LETTER TRA
+ {0x11D31, 0x11D36, prN}, // Mn [6] MASARAM GONDI VOWEL SIGN AA..MASARAM GONDI VOWEL SIGN VOCALIC R
+ {0x11D3A, 0x11D3A, prN}, // Mn MASARAM GONDI VOWEL SIGN E
+ {0x11D3C, 0x11D3D, prN}, // Mn [2] MASARAM GONDI VOWEL SIGN AI..MASARAM GONDI VOWEL SIGN O
+ {0x11D3F, 0x11D45, prN}, // Mn [7] MASARAM GONDI VOWEL SIGN AU..MASARAM GONDI VIRAMA
+ {0x11D46, 0x11D46, prN}, // Lo MASARAM GONDI REPHA
+ {0x11D47, 0x11D47, prN}, // Mn MASARAM GONDI RA-KARA
+ {0x11D50, 0x11D59, prN}, // Nd [10] MASARAM GONDI DIGIT ZERO..MASARAM GONDI DIGIT NINE
+ {0x11D60, 0x11D65, prN}, // Lo [6] GUNJALA GONDI LETTER A..GUNJALA GONDI LETTER UU
+ {0x11D67, 0x11D68, prN}, // Lo [2] GUNJALA GONDI LETTER EE..GUNJALA GONDI LETTER AI
+ {0x11D6A, 0x11D89, prN}, // Lo [32] GUNJALA GONDI LETTER OO..GUNJALA GONDI LETTER SA
+ {0x11D8A, 0x11D8E, prN}, // Mc [5] GUNJALA GONDI VOWEL SIGN AA..GUNJALA GONDI VOWEL SIGN UU
+ {0x11D90, 0x11D91, prN}, // Mn [2] GUNJALA GONDI VOWEL SIGN EE..GUNJALA GONDI VOWEL SIGN AI
+ {0x11D93, 0x11D94, prN}, // Mc [2] GUNJALA GONDI VOWEL SIGN OO..GUNJALA GONDI VOWEL SIGN AU
+ {0x11D95, 0x11D95, prN}, // Mn GUNJALA GONDI SIGN ANUSVARA
+ {0x11D96, 0x11D96, prN}, // Mc GUNJALA GONDI SIGN VISARGA
+ {0x11D97, 0x11D97, prN}, // Mn GUNJALA GONDI VIRAMA
+ {0x11D98, 0x11D98, prN}, // Lo GUNJALA GONDI OM
+ {0x11DA0, 0x11DA9, prN}, // Nd [10] GUNJALA GONDI DIGIT ZERO..GUNJALA GONDI DIGIT NINE
+ {0x11EE0, 0x11EF2, prN}, // Lo [19] MAKASAR LETTER KA..MAKASAR ANGKA
+ {0x11EF3, 0x11EF4, prN}, // Mn [2] MAKASAR VOWEL SIGN I..MAKASAR VOWEL SIGN U
+ {0x11EF5, 0x11EF6, prN}, // Mc [2] MAKASAR VOWEL SIGN E..MAKASAR VOWEL SIGN O
+ {0x11EF7, 0x11EF8, prN}, // Po [2] MAKASAR PASSIMBANG..MAKASAR END OF SECTION
+ {0x11FB0, 0x11FB0, prN}, // Lo LISU LETTER YHA
+ {0x11FC0, 0x11FD4, prN}, // No [21] TAMIL FRACTION ONE THREE-HUNDRED-AND-TWENTIETH..TAMIL FRACTION DOWNSCALING FACTOR KIIZH
+ {0x11FD5, 0x11FDC, prN}, // So [8] TAMIL SIGN NEL..TAMIL SIGN MUKKURUNI
+ {0x11FDD, 0x11FE0, prN}, // Sc [4] TAMIL SIGN KAACU..TAMIL SIGN VARAAKAN
+ {0x11FE1, 0x11FF1, prN}, // So [17] TAMIL SIGN PAARAM..TAMIL SIGN VAKAIYARAA
+ {0x11FFF, 0x11FFF, prN}, // Po TAMIL PUNCTUATION END OF TEXT
+ {0x12000, 0x12399, prN}, // Lo [922] CUNEIFORM SIGN A..CUNEIFORM SIGN U U
+ {0x12400, 0x1246E, prN}, // Nl [111] CUNEIFORM NUMERIC SIGN TWO ASH..CUNEIFORM NUMERIC SIGN NINE U VARIANT FORM
+ {0x12470, 0x12474, prN}, // Po [5] CUNEIFORM PUNCTUATION SIGN OLD ASSYRIAN WORD DIVIDER..CUNEIFORM PUNCTUATION SIGN DIAGONAL QUADCOLON
+ {0x12480, 0x12543, prN}, // Lo [196] CUNEIFORM SIGN AB TIMES NUN TENU..CUNEIFORM SIGN ZU5 TIMES THREE DISH TENU
+ {0x12F90, 0x12FF0, prN}, // Lo [97] CYPRO-MINOAN SIGN CM001..CYPRO-MINOAN SIGN CM114
+ {0x12FF1, 0x12FF2, prN}, // Po [2] CYPRO-MINOAN SIGN CM301..CYPRO-MINOAN SIGN CM302
+ {0x13000, 0x1342E, prN}, // Lo [1071] EGYPTIAN HIEROGLYPH A001..EGYPTIAN HIEROGLYPH AA032
+ {0x13430, 0x13438, prN}, // Cf [9] EGYPTIAN HIEROGLYPH VERTICAL JOINER..EGYPTIAN HIEROGLYPH END SEGMENT
+ {0x14400, 0x14646, prN}, // Lo [583] ANATOLIAN HIEROGLYPH A001..ANATOLIAN HIEROGLYPH A530
+ {0x16800, 0x16A38, prN}, // Lo [569] BAMUM LETTER PHASE-A NGKUE MFON..BAMUM LETTER PHASE-F VUEQ
+ {0x16A40, 0x16A5E, prN}, // Lo [31] MRO LETTER TA..MRO LETTER TEK
+ {0x16A60, 0x16A69, prN}, // Nd [10] MRO DIGIT ZERO..MRO DIGIT NINE
+ {0x16A6E, 0x16A6F, prN}, // Po [2] MRO DANDA..MRO DOUBLE DANDA
+ {0x16A70, 0x16ABE, prN}, // Lo [79] TANGSA LETTER OZ..TANGSA LETTER ZA
+ {0x16AC0, 0x16AC9, prN}, // Nd [10] TANGSA DIGIT ZERO..TANGSA DIGIT NINE
+ {0x16AD0, 0x16AED, prN}, // Lo [30] BASSA VAH LETTER ENNI..BASSA VAH LETTER I
+ {0x16AF0, 0x16AF4, prN}, // Mn [5] BASSA VAH COMBINING HIGH TONE..BASSA VAH COMBINING HIGH-LOW TONE
+ {0x16AF5, 0x16AF5, prN}, // Po BASSA VAH FULL STOP
+ {0x16B00, 0x16B2F, prN}, // Lo [48] PAHAWH HMONG VOWEL KEEB..PAHAWH HMONG CONSONANT CAU
+ {0x16B30, 0x16B36, prN}, // Mn [7] PAHAWH HMONG MARK CIM TUB..PAHAWH HMONG MARK CIM TAUM
+ {0x16B37, 0x16B3B, prN}, // Po [5] PAHAWH HMONG SIGN VOS THOM..PAHAWH HMONG SIGN VOS FEEM
+ {0x16B3C, 0x16B3F, prN}, // So [4] PAHAWH HMONG SIGN XYEEM NTXIV..PAHAWH HMONG SIGN XYEEM FAIB
+ {0x16B40, 0x16B43, prN}, // Lm [4] PAHAWH HMONG SIGN VOS SEEV..PAHAWH HMONG SIGN IB YAM
+ {0x16B44, 0x16B44, prN}, // Po PAHAWH HMONG SIGN XAUS
+ {0x16B45, 0x16B45, prN}, // So PAHAWH HMONG SIGN CIM TSOV ROG
+ {0x16B50, 0x16B59, prN}, // Nd [10] PAHAWH HMONG DIGIT ZERO..PAHAWH HMONG DIGIT NINE
+ {0x16B5B, 0x16B61, prN}, // No [7] PAHAWH HMONG NUMBER TENS..PAHAWH HMONG NUMBER TRILLIONS
+ {0x16B63, 0x16B77, prN}, // Lo [21] PAHAWH HMONG SIGN VOS LUB..PAHAWH HMONG SIGN CIM NRES TOS
+ {0x16B7D, 0x16B8F, prN}, // Lo [19] PAHAWH HMONG CLAN SIGN TSHEEJ..PAHAWH HMONG CLAN SIGN VWJ
+ {0x16E40, 0x16E7F, prN}, // L& [64] MEDEFAIDRIN CAPITAL LETTER M..MEDEFAIDRIN SMALL LETTER Y
+ {0x16E80, 0x16E96, prN}, // No [23] MEDEFAIDRIN DIGIT ZERO..MEDEFAIDRIN DIGIT THREE ALTERNATE FORM
+ {0x16E97, 0x16E9A, prN}, // Po [4] MEDEFAIDRIN COMMA..MEDEFAIDRIN EXCLAMATION OH
+ {0x16F00, 0x16F4A, prN}, // Lo [75] MIAO LETTER PA..MIAO LETTER RTE
+ {0x16F4F, 0x16F4F, prN}, // Mn MIAO SIGN CONSONANT MODIFIER BAR
+ {0x16F50, 0x16F50, prN}, // Lo MIAO LETTER NASALIZATION
+ {0x16F51, 0x16F87, prN}, // Mc [55] MIAO SIGN ASPIRATION..MIAO VOWEL SIGN UI
+ {0x16F8F, 0x16F92, prN}, // Mn [4] MIAO TONE RIGHT..MIAO TONE BELOW
+ {0x16F93, 0x16F9F, prN}, // Lm [13] MIAO LETTER TONE-2..MIAO LETTER REFORMED TONE-8
+ {0x16FE0, 0x16FE1, prW}, // Lm [2] TANGUT ITERATION MARK..NUSHU ITERATION MARK
+ {0x16FE2, 0x16FE2, prW}, // Po OLD CHINESE HOOK MARK
+ {0x16FE3, 0x16FE3, prW}, // Lm OLD CHINESE ITERATION MARK
+ {0x16FE4, 0x16FE4, prW}, // Mn KHITAN SMALL SCRIPT FILLER
+ {0x16FF0, 0x16FF1, prW}, // Mc [2] VIETNAMESE ALTERNATE READING MARK CA..VIETNAMESE ALTERNATE READING MARK NHAY
+ {0x17000, 0x187F7, prW}, // Lo [6136] TANGUT IDEOGRAPH-17000..TANGUT IDEOGRAPH-187F7
+ {0x18800, 0x18AFF, prW}, // Lo [768] TANGUT COMPONENT-001..TANGUT COMPONENT-768
+ {0x18B00, 0x18CD5, prW}, // Lo [470] KHITAN SMALL SCRIPT CHARACTER-18B00..KHITAN SMALL SCRIPT CHARACTER-18CD5
+ {0x18D00, 0x18D08, prW}, // Lo [9] TANGUT IDEOGRAPH-18D00..TANGUT IDEOGRAPH-18D08
+ {0x1AFF0, 0x1AFF3, prW}, // Lm [4] KATAKANA LETTER MINNAN TONE-2..KATAKANA LETTER MINNAN TONE-5
+ {0x1AFF5, 0x1AFFB, prW}, // Lm [7] KATAKANA LETTER MINNAN TONE-7..KATAKANA LETTER MINNAN NASALIZED TONE-5
+ {0x1AFFD, 0x1AFFE, prW}, // Lm [2] KATAKANA LETTER MINNAN NASALIZED TONE-7..KATAKANA LETTER MINNAN NASALIZED TONE-8
+ {0x1B000, 0x1B0FF, prW}, // Lo [256] KATAKANA LETTER ARCHAIC E..HENTAIGANA LETTER RE-2
+ {0x1B100, 0x1B122, prW}, // Lo [35] HENTAIGANA LETTER RE-3..KATAKANA LETTER ARCHAIC WU
+ {0x1B150, 0x1B152, prW}, // Lo [3] HIRAGANA LETTER SMALL WI..HIRAGANA LETTER SMALL WO
+ {0x1B164, 0x1B167, prW}, // Lo [4] KATAKANA LETTER SMALL WI..KATAKANA LETTER SMALL N
+ {0x1B170, 0x1B2FB, prW}, // Lo [396] NUSHU CHARACTER-1B170..NUSHU CHARACTER-1B2FB
+ {0x1BC00, 0x1BC6A, prN}, // Lo [107] DUPLOYAN LETTER H..DUPLOYAN LETTER VOCALIC M
+ {0x1BC70, 0x1BC7C, prN}, // Lo [13] DUPLOYAN AFFIX LEFT HORIZONTAL SECANT..DUPLOYAN AFFIX ATTACHED TANGENT HOOK
+ {0x1BC80, 0x1BC88, prN}, // Lo [9] DUPLOYAN AFFIX HIGH ACUTE..DUPLOYAN AFFIX HIGH VERTICAL
+ {0x1BC90, 0x1BC99, prN}, // Lo [10] DUPLOYAN AFFIX LOW ACUTE..DUPLOYAN AFFIX LOW ARROW
+ {0x1BC9C, 0x1BC9C, prN}, // So DUPLOYAN SIGN O WITH CROSS
+ {0x1BC9D, 0x1BC9E, prN}, // Mn [2] DUPLOYAN THICK LETTER SELECTOR..DUPLOYAN DOUBLE MARK
+ {0x1BC9F, 0x1BC9F, prN}, // Po DUPLOYAN PUNCTUATION CHINOOK FULL STOP
+ {0x1BCA0, 0x1BCA3, prN}, // Cf [4] SHORTHAND FORMAT LETTER OVERLAP..SHORTHAND FORMAT UP STEP
+ {0x1CF00, 0x1CF2D, prN}, // Mn [46] ZNAMENNY COMBINING MARK GORAZDO NIZKO S KRYZHEM ON LEFT..ZNAMENNY COMBINING MARK KRYZH ON LEFT
+ {0x1CF30, 0x1CF46, prN}, // Mn [23] ZNAMENNY COMBINING TONAL RANGE MARK MRACHNO..ZNAMENNY PRIZNAK MODIFIER ROG
+ {0x1CF50, 0x1CFC3, prN}, // So [116] ZNAMENNY NEUME KRYUK..ZNAMENNY NEUME PAUK
+ {0x1D000, 0x1D0F5, prN}, // So [246] BYZANTINE MUSICAL SYMBOL PSILI..BYZANTINE MUSICAL SYMBOL GORGON NEO KATO
+ {0x1D100, 0x1D126, prN}, // So [39] MUSICAL SYMBOL SINGLE BARLINE..MUSICAL SYMBOL DRUM CLEF-2
+ {0x1D129, 0x1D164, prN}, // So [60] MUSICAL SYMBOL MULTIPLE MEASURE REST..MUSICAL SYMBOL ONE HUNDRED TWENTY-EIGHTH NOTE
+ {0x1D165, 0x1D166, prN}, // Mc [2] MUSICAL SYMBOL COMBINING STEM..MUSICAL SYMBOL COMBINING SPRECHGESANG STEM
+ {0x1D167, 0x1D169, prN}, // Mn [3] MUSICAL SYMBOL COMBINING TREMOLO-1..MUSICAL SYMBOL COMBINING TREMOLO-3
+ {0x1D16A, 0x1D16C, prN}, // So [3] MUSICAL SYMBOL FINGERED TREMOLO-1..MUSICAL SYMBOL FINGERED TREMOLO-3
+ {0x1D16D, 0x1D172, prN}, // Mc [6] MUSICAL SYMBOL COMBINING AUGMENTATION DOT..MUSICAL SYMBOL COMBINING FLAG-5
+ {0x1D173, 0x1D17A, prN}, // Cf [8] MUSICAL SYMBOL BEGIN BEAM..MUSICAL SYMBOL END PHRASE
+ {0x1D17B, 0x1D182, prN}, // Mn [8] MUSICAL SYMBOL COMBINING ACCENT..MUSICAL SYMBOL COMBINING LOURE
+ {0x1D183, 0x1D184, prN}, // So [2] MUSICAL SYMBOL ARPEGGIATO UP..MUSICAL SYMBOL ARPEGGIATO DOWN
+ {0x1D185, 0x1D18B, prN}, // Mn [7] MUSICAL SYMBOL COMBINING DOIT..MUSICAL SYMBOL COMBINING TRIPLE TONGUE
+ {0x1D18C, 0x1D1A9, prN}, // So [30] MUSICAL SYMBOL RINFORZANDO..MUSICAL SYMBOL DEGREE SLASH
+ {0x1D1AA, 0x1D1AD, prN}, // Mn [4] MUSICAL SYMBOL COMBINING DOWN BOW..MUSICAL SYMBOL COMBINING SNAP PIZZICATO
+ {0x1D1AE, 0x1D1EA, prN}, // So [61] MUSICAL SYMBOL PEDAL MARK..MUSICAL SYMBOL KORON
+ {0x1D200, 0x1D241, prN}, // So [66] GREEK VOCAL NOTATION SYMBOL-1..GREEK INSTRUMENTAL NOTATION SYMBOL-54
+ {0x1D242, 0x1D244, prN}, // Mn [3] COMBINING GREEK MUSICAL TRISEME..COMBINING GREEK MUSICAL PENTASEME
+ {0x1D245, 0x1D245, prN}, // So GREEK MUSICAL LEIMMA
+ {0x1D2E0, 0x1D2F3, prN}, // No [20] MAYAN NUMERAL ZERO..MAYAN NUMERAL NINETEEN
+ {0x1D300, 0x1D356, prN}, // So [87] MONOGRAM FOR EARTH..TETRAGRAM FOR FOSTERING
+ {0x1D360, 0x1D378, prN}, // No [25] COUNTING ROD UNIT DIGIT ONE..TALLY MARK FIVE
+ {0x1D400, 0x1D454, prN}, // L& [85] MATHEMATICAL BOLD CAPITAL A..MATHEMATICAL ITALIC SMALL G
+ {0x1D456, 0x1D49C, prN}, // L& [71] MATHEMATICAL ITALIC SMALL I..MATHEMATICAL SCRIPT CAPITAL A
+ {0x1D49E, 0x1D49F, prN}, // Lu [2] MATHEMATICAL SCRIPT CAPITAL C..MATHEMATICAL SCRIPT CAPITAL D
+ {0x1D4A2, 0x1D4A2, prN}, // Lu MATHEMATICAL SCRIPT CAPITAL G
+ {0x1D4A5, 0x1D4A6, prN}, // Lu [2] MATHEMATICAL SCRIPT CAPITAL J..MATHEMATICAL SCRIPT CAPITAL K
+ {0x1D4A9, 0x1D4AC, prN}, // Lu [4] MATHEMATICAL SCRIPT CAPITAL N..MATHEMATICAL SCRIPT CAPITAL Q
+ {0x1D4AE, 0x1D4B9, prN}, // L& [12] MATHEMATICAL SCRIPT CAPITAL S..MATHEMATICAL SCRIPT SMALL D
+ {0x1D4BB, 0x1D4BB, prN}, // Ll MATHEMATICAL SCRIPT SMALL F
+ {0x1D4BD, 0x1D4C3, prN}, // Ll [7] MATHEMATICAL SCRIPT SMALL H..MATHEMATICAL SCRIPT SMALL N
+ {0x1D4C5, 0x1D505, prN}, // L& [65] MATHEMATICAL SCRIPT SMALL P..MATHEMATICAL FRAKTUR CAPITAL B
+ {0x1D507, 0x1D50A, prN}, // Lu [4] MATHEMATICAL FRAKTUR CAPITAL D..MATHEMATICAL FRAKTUR CAPITAL G
+ {0x1D50D, 0x1D514, prN}, // Lu [8] MATHEMATICAL FRAKTUR CAPITAL J..MATHEMATICAL FRAKTUR CAPITAL Q
+ {0x1D516, 0x1D51C, prN}, // Lu [7] MATHEMATICAL FRAKTUR CAPITAL S..MATHEMATICAL FRAKTUR CAPITAL Y
+ {0x1D51E, 0x1D539, prN}, // L& [28] MATHEMATICAL FRAKTUR SMALL A..MATHEMATICAL DOUBLE-STRUCK CAPITAL B
+ {0x1D53B, 0x1D53E, prN}, // Lu [4] MATHEMATICAL DOUBLE-STRUCK CAPITAL D..MATHEMATICAL DOUBLE-STRUCK CAPITAL G
+ {0x1D540, 0x1D544, prN}, // Lu [5] MATHEMATICAL DOUBLE-STRUCK CAPITAL I..MATHEMATICAL DOUBLE-STRUCK CAPITAL M
+ {0x1D546, 0x1D546, prN}, // Lu MATHEMATICAL DOUBLE-STRUCK CAPITAL O
+ {0x1D54A, 0x1D550, prN}, // Lu [7] MATHEMATICAL DOUBLE-STRUCK CAPITAL S..MATHEMATICAL DOUBLE-STRUCK CAPITAL Y
+ {0x1D552, 0x1D6A5, prN}, // L& [340] MATHEMATICAL DOUBLE-STRUCK SMALL A..MATHEMATICAL ITALIC SMALL DOTLESS J
+ {0x1D6A8, 0x1D6C0, prN}, // Lu [25] MATHEMATICAL BOLD CAPITAL ALPHA..MATHEMATICAL BOLD CAPITAL OMEGA
+ {0x1D6C1, 0x1D6C1, prN}, // Sm MATHEMATICAL BOLD NABLA
+ {0x1D6C2, 0x1D6DA, prN}, // Ll [25] MATHEMATICAL BOLD SMALL ALPHA..MATHEMATICAL BOLD SMALL OMEGA
+ {0x1D6DB, 0x1D6DB, prN}, // Sm MATHEMATICAL BOLD PARTIAL DIFFERENTIAL
+ {0x1D6DC, 0x1D6FA, prN}, // L& [31] MATHEMATICAL BOLD EPSILON SYMBOL..MATHEMATICAL ITALIC CAPITAL OMEGA
+ {0x1D6FB, 0x1D6FB, prN}, // Sm MATHEMATICAL ITALIC NABLA
+ {0x1D6FC, 0x1D714, prN}, // Ll [25] MATHEMATICAL ITALIC SMALL ALPHA..MATHEMATICAL ITALIC SMALL OMEGA
+ {0x1D715, 0x1D715, prN}, // Sm MATHEMATICAL ITALIC PARTIAL DIFFERENTIAL
+ {0x1D716, 0x1D734, prN}, // L& [31] MATHEMATICAL ITALIC EPSILON SYMBOL..MATHEMATICAL BOLD ITALIC CAPITAL OMEGA
+ {0x1D735, 0x1D735, prN}, // Sm MATHEMATICAL BOLD ITALIC NABLA
+ {0x1D736, 0x1D74E, prN}, // Ll [25] MATHEMATICAL BOLD ITALIC SMALL ALPHA..MATHEMATICAL BOLD ITALIC SMALL OMEGA
+ {0x1D74F, 0x1D74F, prN}, // Sm MATHEMATICAL BOLD ITALIC PARTIAL DIFFERENTIAL
+ {0x1D750, 0x1D76E, prN}, // L& [31] MATHEMATICAL BOLD ITALIC EPSILON SYMBOL..MATHEMATICAL SANS-SERIF BOLD CAPITAL OMEGA
+ {0x1D76F, 0x1D76F, prN}, // Sm MATHEMATICAL SANS-SERIF BOLD NABLA
+ {0x1D770, 0x1D788, prN}, // Ll [25] MATHEMATICAL SANS-SERIF BOLD SMALL ALPHA..MATHEMATICAL SANS-SERIF BOLD SMALL OMEGA
+ {0x1D789, 0x1D789, prN}, // Sm MATHEMATICAL SANS-SERIF BOLD PARTIAL DIFFERENTIAL
+ {0x1D78A, 0x1D7A8, prN}, // L& [31] MATHEMATICAL SANS-SERIF BOLD EPSILON SYMBOL..MATHEMATICAL SANS-SERIF BOLD ITALIC CAPITAL OMEGA
+ {0x1D7A9, 0x1D7A9, prN}, // Sm MATHEMATICAL SANS-SERIF BOLD ITALIC NABLA
+ {0x1D7AA, 0x1D7C2, prN}, // Ll [25] MATHEMATICAL SANS-SERIF BOLD ITALIC SMALL ALPHA..MATHEMATICAL SANS-SERIF BOLD ITALIC SMALL OMEGA
+ {0x1D7C3, 0x1D7C3, prN}, // Sm MATHEMATICAL SANS-SERIF BOLD ITALIC PARTIAL DIFFERENTIAL
+ {0x1D7C4, 0x1D7CB, prN}, // L& [8] MATHEMATICAL SANS-SERIF BOLD ITALIC EPSILON SYMBOL..MATHEMATICAL BOLD SMALL DIGAMMA
+ {0x1D7CE, 0x1D7FF, prN}, // Nd [50] MATHEMATICAL BOLD DIGIT ZERO..MATHEMATICAL MONOSPACE DIGIT NINE
+ {0x1D800, 0x1D9FF, prN}, // So [512] SIGNWRITING HAND-FIST INDEX..SIGNWRITING HEAD
+ {0x1DA00, 0x1DA36, prN}, // Mn [55] SIGNWRITING HEAD RIM..SIGNWRITING AIR SUCKING IN
+ {0x1DA37, 0x1DA3A, prN}, // So [4] SIGNWRITING AIR BLOW SMALL ROTATIONS..SIGNWRITING BREATH EXHALE
+ {0x1DA3B, 0x1DA6C, prN}, // Mn [50] SIGNWRITING MOUTH CLOSED NEUTRAL..SIGNWRITING EXCITEMENT
+ {0x1DA6D, 0x1DA74, prN}, // So [8] SIGNWRITING SHOULDER HIP SPINE..SIGNWRITING TORSO-FLOORPLANE TWISTING
+ {0x1DA75, 0x1DA75, prN}, // Mn SIGNWRITING UPPER BODY TILTING FROM HIP JOINTS
+ {0x1DA76, 0x1DA83, prN}, // So [14] SIGNWRITING LIMB COMBINATION..SIGNWRITING LOCATION DEPTH
+ {0x1DA84, 0x1DA84, prN}, // Mn SIGNWRITING LOCATION HEAD NECK
+ {0x1DA85, 0x1DA86, prN}, // So [2] SIGNWRITING LOCATION TORSO..SIGNWRITING LOCATION LIMBS DIGITS
+ {0x1DA87, 0x1DA8B, prN}, // Po [5] SIGNWRITING COMMA..SIGNWRITING PARENTHESIS
+ {0x1DA9B, 0x1DA9F, prN}, // Mn [5] SIGNWRITING FILL MODIFIER-2..SIGNWRITING FILL MODIFIER-6
+ {0x1DAA1, 0x1DAAF, prN}, // Mn [15] SIGNWRITING ROTATION MODIFIER-2..SIGNWRITING ROTATION MODIFIER-16
+ {0x1DF00, 0x1DF09, prN}, // Ll [10] LATIN SMALL LETTER FENG DIGRAPH WITH TRILL..LATIN SMALL LETTER T WITH HOOK AND RETROFLEX HOOK
+ {0x1DF0A, 0x1DF0A, prN}, // Lo LATIN LETTER RETROFLEX CLICK WITH RETROFLEX HOOK
+ {0x1DF0B, 0x1DF1E, prN}, // Ll [20] LATIN SMALL LETTER ESH WITH DOUBLE BAR..LATIN SMALL LETTER S WITH CURL
+ {0x1E000, 0x1E006, prN}, // Mn [7] COMBINING GLAGOLITIC LETTER AZU..COMBINING GLAGOLITIC LETTER ZHIVETE
+ {0x1E008, 0x1E018, prN}, // Mn [17] COMBINING GLAGOLITIC LETTER ZEMLJA..COMBINING GLAGOLITIC LETTER HERU
+ {0x1E01B, 0x1E021, prN}, // Mn [7] COMBINING GLAGOLITIC LETTER SHTA..COMBINING GLAGOLITIC LETTER YATI
+ {0x1E023, 0x1E024, prN}, // Mn [2] COMBINING GLAGOLITIC LETTER YU..COMBINING GLAGOLITIC LETTER SMALL YUS
+ {0x1E026, 0x1E02A, prN}, // Mn [5] COMBINING GLAGOLITIC LETTER YO..COMBINING GLAGOLITIC LETTER FITA
+ {0x1E100, 0x1E12C, prN}, // Lo [45] NYIAKENG PUACHUE HMONG LETTER MA..NYIAKENG PUACHUE HMONG LETTER W
+ {0x1E130, 0x1E136, prN}, // Mn [7] NYIAKENG PUACHUE HMONG TONE-B..NYIAKENG PUACHUE HMONG TONE-D
+ {0x1E137, 0x1E13D, prN}, // Lm [7] NYIAKENG PUACHUE HMONG SIGN FOR PERSON..NYIAKENG PUACHUE HMONG SYLLABLE LENGTHENER
+ {0x1E140, 0x1E149, prN}, // Nd [10] NYIAKENG PUACHUE HMONG DIGIT ZERO..NYIAKENG PUACHUE HMONG DIGIT NINE
+ {0x1E14E, 0x1E14E, prN}, // Lo NYIAKENG PUACHUE HMONG LOGOGRAM NYAJ
+ {0x1E14F, 0x1E14F, prN}, // So NYIAKENG PUACHUE HMONG CIRCLED CA
+ {0x1E290, 0x1E2AD, prN}, // Lo [30] TOTO LETTER PA..TOTO LETTER A
+ {0x1E2AE, 0x1E2AE, prN}, // Mn TOTO SIGN RISING TONE
+ {0x1E2C0, 0x1E2EB, prN}, // Lo [44] WANCHO LETTER AA..WANCHO LETTER YIH
+ {0x1E2EC, 0x1E2EF, prN}, // Mn [4] WANCHO TONE TUP..WANCHO TONE KOINI
+ {0x1E2F0, 0x1E2F9, prN}, // Nd [10] WANCHO DIGIT ZERO..WANCHO DIGIT NINE
+ {0x1E2FF, 0x1E2FF, prN}, // Sc WANCHO NGUN SIGN
+ {0x1E7E0, 0x1E7E6, prN}, // Lo [7] ETHIOPIC SYLLABLE HHYA..ETHIOPIC SYLLABLE HHYO
+ {0x1E7E8, 0x1E7EB, prN}, // Lo [4] ETHIOPIC SYLLABLE GURAGE HHWA..ETHIOPIC SYLLABLE HHWE
+ {0x1E7ED, 0x1E7EE, prN}, // Lo [2] ETHIOPIC SYLLABLE GURAGE MWI..ETHIOPIC SYLLABLE GURAGE MWEE
+ {0x1E7F0, 0x1E7FE, prN}, // Lo [15] ETHIOPIC SYLLABLE GURAGE QWI..ETHIOPIC SYLLABLE GURAGE PWEE
+ {0x1E800, 0x1E8C4, prN}, // Lo [197] MENDE KIKAKUI SYLLABLE M001 KI..MENDE KIKAKUI SYLLABLE M060 NYON
+ {0x1E8C7, 0x1E8CF, prN}, // No [9] MENDE KIKAKUI DIGIT ONE..MENDE KIKAKUI DIGIT NINE
+ {0x1E8D0, 0x1E8D6, prN}, // Mn [7] MENDE KIKAKUI COMBINING NUMBER TEENS..MENDE KIKAKUI COMBINING NUMBER MILLIONS
+ {0x1E900, 0x1E943, prN}, // L& [68] ADLAM CAPITAL LETTER ALIF..ADLAM SMALL LETTER SHA
+ {0x1E944, 0x1E94A, prN}, // Mn [7] ADLAM ALIF LENGTHENER..ADLAM NUKTA
+ {0x1E94B, 0x1E94B, prN}, // Lm ADLAM NASALIZATION MARK
+ {0x1E950, 0x1E959, prN}, // Nd [10] ADLAM DIGIT ZERO..ADLAM DIGIT NINE
+ {0x1E95E, 0x1E95F, prN}, // Po [2] ADLAM INITIAL EXCLAMATION MARK..ADLAM INITIAL QUESTION MARK
+ {0x1EC71, 0x1ECAB, prN}, // No [59] INDIC SIYAQ NUMBER ONE..INDIC SIYAQ NUMBER PREFIXED NINE
+ {0x1ECAC, 0x1ECAC, prN}, // So INDIC SIYAQ PLACEHOLDER
+ {0x1ECAD, 0x1ECAF, prN}, // No [3] INDIC SIYAQ FRACTION ONE QUARTER..INDIC SIYAQ FRACTION THREE QUARTERS
+ {0x1ECB0, 0x1ECB0, prN}, // Sc INDIC SIYAQ RUPEE MARK
+ {0x1ECB1, 0x1ECB4, prN}, // No [4] INDIC SIYAQ NUMBER ALTERNATE ONE..INDIC SIYAQ ALTERNATE LAKH MARK
+ {0x1ED01, 0x1ED2D, prN}, // No [45] OTTOMAN SIYAQ NUMBER ONE..OTTOMAN SIYAQ NUMBER NINETY THOUSAND
+ {0x1ED2E, 0x1ED2E, prN}, // So OTTOMAN SIYAQ MARRATAN
+ {0x1ED2F, 0x1ED3D, prN}, // No [15] OTTOMAN SIYAQ ALTERNATE NUMBER TWO..OTTOMAN SIYAQ FRACTION ONE SIXTH
+ {0x1EE00, 0x1EE03, prN}, // Lo [4] ARABIC MATHEMATICAL ALEF..ARABIC MATHEMATICAL DAL
+ {0x1EE05, 0x1EE1F, prN}, // Lo [27] ARABIC MATHEMATICAL WAW..ARABIC MATHEMATICAL DOTLESS QAF
+ {0x1EE21, 0x1EE22, prN}, // Lo [2] ARABIC MATHEMATICAL INITIAL BEH..ARABIC MATHEMATICAL INITIAL JEEM
+ {0x1EE24, 0x1EE24, prN}, // Lo ARABIC MATHEMATICAL INITIAL HEH
+ {0x1EE27, 0x1EE27, prN}, // Lo ARABIC MATHEMATICAL INITIAL HAH
+ {0x1EE29, 0x1EE32, prN}, // Lo [10] ARABIC MATHEMATICAL INITIAL YEH..ARABIC MATHEMATICAL INITIAL QAF
+ {0x1EE34, 0x1EE37, prN}, // Lo [4] ARABIC MATHEMATICAL INITIAL SHEEN..ARABIC MATHEMATICAL INITIAL KHAH
+ {0x1EE39, 0x1EE39, prN}, // Lo ARABIC MATHEMATICAL INITIAL DAD
+ {0x1EE3B, 0x1EE3B, prN}, // Lo ARABIC MATHEMATICAL INITIAL GHAIN
+ {0x1EE42, 0x1EE42, prN}, // Lo ARABIC MATHEMATICAL TAILED JEEM
+ {0x1EE47, 0x1EE47, prN}, // Lo ARABIC MATHEMATICAL TAILED HAH
+ {0x1EE49, 0x1EE49, prN}, // Lo ARABIC MATHEMATICAL TAILED YEH
+ {0x1EE4B, 0x1EE4B, prN}, // Lo ARABIC MATHEMATICAL TAILED LAM
+ {0x1EE4D, 0x1EE4F, prN}, // Lo [3] ARABIC MATHEMATICAL TAILED NOON..ARABIC MATHEMATICAL TAILED AIN
+ {0x1EE51, 0x1EE52, prN}, // Lo [2] ARABIC MATHEMATICAL TAILED SAD..ARABIC MATHEMATICAL TAILED QAF
+ {0x1EE54, 0x1EE54, prN}, // Lo ARABIC MATHEMATICAL TAILED SHEEN
+ {0x1EE57, 0x1EE57, prN}, // Lo ARABIC MATHEMATICAL TAILED KHAH
+ {0x1EE59, 0x1EE59, prN}, // Lo ARABIC MATHEMATICAL TAILED DAD
+ {0x1EE5B, 0x1EE5B, prN}, // Lo ARABIC MATHEMATICAL TAILED GHAIN
+ {0x1EE5D, 0x1EE5D, prN}, // Lo ARABIC MATHEMATICAL TAILED DOTLESS NOON
+ {0x1EE5F, 0x1EE5F, prN}, // Lo ARABIC MATHEMATICAL TAILED DOTLESS QAF
+ {0x1EE61, 0x1EE62, prN}, // Lo [2] ARABIC MATHEMATICAL STRETCHED BEH..ARABIC MATHEMATICAL STRETCHED JEEM
+ {0x1EE64, 0x1EE64, prN}, // Lo ARABIC MATHEMATICAL STRETCHED HEH
+ {0x1EE67, 0x1EE6A, prN}, // Lo [4] ARABIC MATHEMATICAL STRETCHED HAH..ARABIC MATHEMATICAL STRETCHED KAF
+ {0x1EE6C, 0x1EE72, prN}, // Lo [7] ARABIC MATHEMATICAL STRETCHED MEEM..ARABIC MATHEMATICAL STRETCHED QAF
+ {0x1EE74, 0x1EE77, prN}, // Lo [4] ARABIC MATHEMATICAL STRETCHED SHEEN..ARABIC MATHEMATICAL STRETCHED KHAH
+ {0x1EE79, 0x1EE7C, prN}, // Lo [4] ARABIC MATHEMATICAL STRETCHED DAD..ARABIC MATHEMATICAL STRETCHED DOTLESS BEH
+ {0x1EE7E, 0x1EE7E, prN}, // Lo ARABIC MATHEMATICAL STRETCHED DOTLESS FEH
+ {0x1EE80, 0x1EE89, prN}, // Lo [10] ARABIC MATHEMATICAL LOOPED ALEF..ARABIC MATHEMATICAL LOOPED YEH
+ {0x1EE8B, 0x1EE9B, prN}, // Lo [17] ARABIC MATHEMATICAL LOOPED LAM..ARABIC MATHEMATICAL LOOPED GHAIN
+ {0x1EEA1, 0x1EEA3, prN}, // Lo [3] ARABIC MATHEMATICAL DOUBLE-STRUCK BEH..ARABIC MATHEMATICAL DOUBLE-STRUCK DAL
+ {0x1EEA5, 0x1EEA9, prN}, // Lo [5] ARABIC MATHEMATICAL DOUBLE-STRUCK WAW..ARABIC MATHEMATICAL DOUBLE-STRUCK YEH
+ {0x1EEAB, 0x1EEBB, prN}, // Lo [17] ARABIC MATHEMATICAL DOUBLE-STRUCK LAM..ARABIC MATHEMATICAL DOUBLE-STRUCK GHAIN
+ {0x1EEF0, 0x1EEF1, prN}, // Sm [2] ARABIC MATHEMATICAL OPERATOR MEEM WITH HAH WITH TATWEEL..ARABIC MATHEMATICAL OPERATOR HAH WITH DAL
+ {0x1F000, 0x1F003, prN}, // So [4] MAHJONG TILE EAST WIND..MAHJONG TILE NORTH WIND
+ {0x1F004, 0x1F004, prW}, // So MAHJONG TILE RED DRAGON
+ {0x1F005, 0x1F02B, prN}, // So [39] MAHJONG TILE GREEN DRAGON..MAHJONG TILE BACK
+ {0x1F030, 0x1F093, prN}, // So [100] DOMINO TILE HORIZONTAL BACK..DOMINO TILE VERTICAL-06-06
+ {0x1F0A0, 0x1F0AE, prN}, // So [15] PLAYING CARD BACK..PLAYING CARD KING OF SPADES
+ {0x1F0B1, 0x1F0BF, prN}, // So [15] PLAYING CARD ACE OF HEARTS..PLAYING CARD RED JOKER
+ {0x1F0C1, 0x1F0CE, prN}, // So [14] PLAYING CARD ACE OF DIAMONDS..PLAYING CARD KING OF DIAMONDS
+ {0x1F0CF, 0x1F0CF, prW}, // So PLAYING CARD BLACK JOKER
+ {0x1F0D1, 0x1F0F5, prN}, // So [37] PLAYING CARD ACE OF CLUBS..PLAYING CARD TRUMP-21
+ {0x1F100, 0x1F10A, prA}, // No [11] DIGIT ZERO FULL STOP..DIGIT NINE COMMA
+ {0x1F10B, 0x1F10C, prN}, // No [2] DINGBAT CIRCLED SANS-SERIF DIGIT ZERO..DINGBAT NEGATIVE CIRCLED SANS-SERIF DIGIT ZERO
+ {0x1F10D, 0x1F10F, prN}, // So [3] CIRCLED ZERO WITH SLASH..CIRCLED DOLLAR SIGN WITH OVERLAID BACKSLASH
+ {0x1F110, 0x1F12D, prA}, // So [30] PARENTHESIZED LATIN CAPITAL LETTER A..CIRCLED CD
+ {0x1F12E, 0x1F12F, prN}, // So [2] CIRCLED WZ..COPYLEFT SYMBOL
+ {0x1F130, 0x1F169, prA}, // So [58] SQUARED LATIN CAPITAL LETTER A..NEGATIVE CIRCLED LATIN CAPITAL LETTER Z
+ {0x1F16A, 0x1F16F, prN}, // So [6] RAISED MC SIGN..CIRCLED HUMAN FIGURE
+ {0x1F170, 0x1F18D, prA}, // So [30] NEGATIVE SQUARED LATIN CAPITAL LETTER A..NEGATIVE SQUARED SA
+ {0x1F18E, 0x1F18E, prW}, // So NEGATIVE SQUARED AB
+ {0x1F18F, 0x1F190, prA}, // So [2] NEGATIVE SQUARED WC..SQUARE DJ
+ {0x1F191, 0x1F19A, prW}, // So [10] SQUARED CL..SQUARED VS
+ {0x1F19B, 0x1F1AC, prA}, // So [18] SQUARED THREE D..SQUARED VOD
+ {0x1F1AD, 0x1F1AD, prN}, // So MASK WORK SYMBOL
+ {0x1F1E6, 0x1F1FF, prN}, // So [26] REGIONAL INDICATOR SYMBOL LETTER A..REGIONAL INDICATOR SYMBOL LETTER Z
+ {0x1F200, 0x1F202, prW}, // So [3] SQUARE HIRAGANA HOKA..SQUARED KATAKANA SA
+ {0x1F210, 0x1F23B, prW}, // So [44] SQUARED CJK UNIFIED IDEOGRAPH-624B..SQUARED CJK UNIFIED IDEOGRAPH-914D
+ {0x1F240, 0x1F248, prW}, // So [9] TORTOISE SHELL BRACKETED CJK UNIFIED IDEOGRAPH-672C..TORTOISE SHELL BRACKETED CJK UNIFIED IDEOGRAPH-6557
+ {0x1F250, 0x1F251, prW}, // So [2] CIRCLED IDEOGRAPH ADVANTAGE..CIRCLED IDEOGRAPH ACCEPT
+ {0x1F260, 0x1F265, prW}, // So [6] ROUNDED SYMBOL FOR FU..ROUNDED SYMBOL FOR CAI
+ {0x1F300, 0x1F320, prW}, // So [33] CYCLONE..SHOOTING STAR
+ {0x1F321, 0x1F32C, prN}, // So [12] THERMOMETER..WIND BLOWING FACE
+ {0x1F32D, 0x1F335, prW}, // So [9] HOT DOG..CACTUS
+ {0x1F336, 0x1F336, prN}, // So HOT PEPPER
+ {0x1F337, 0x1F37C, prW}, // So [70] TULIP..BABY BOTTLE
+ {0x1F37D, 0x1F37D, prN}, // So FORK AND KNIFE WITH PLATE
+ {0x1F37E, 0x1F393, prW}, // So [22] BOTTLE WITH POPPING CORK..GRADUATION CAP
+ {0x1F394, 0x1F39F, prN}, // So [12] HEART WITH TIP ON THE LEFT..ADMISSION TICKETS
+ {0x1F3A0, 0x1F3CA, prW}, // So [43] CAROUSEL HORSE..SWIMMER
+ {0x1F3CB, 0x1F3CE, prN}, // So [4] WEIGHT LIFTER..RACING CAR
+ {0x1F3CF, 0x1F3D3, prW}, // So [5] CRICKET BAT AND BALL..TABLE TENNIS PADDLE AND BALL
+ {0x1F3D4, 0x1F3DF, prN}, // So [12] SNOW CAPPED MOUNTAIN..STADIUM
+ {0x1F3E0, 0x1F3F0, prW}, // So [17] HOUSE BUILDING..EUROPEAN CASTLE
+ {0x1F3F1, 0x1F3F3, prN}, // So [3] WHITE PENNANT..WAVING WHITE FLAG
+ {0x1F3F4, 0x1F3F4, prW}, // So WAVING BLACK FLAG
+ {0x1F3F5, 0x1F3F7, prN}, // So [3] ROSETTE..LABEL
+ {0x1F3F8, 0x1F3FA, prW}, // So [3] BADMINTON RACQUET AND SHUTTLECOCK..AMPHORA
+ {0x1F3FB, 0x1F3FF, prW}, // Sk [5] EMOJI MODIFIER FITZPATRICK TYPE-1-2..EMOJI MODIFIER FITZPATRICK TYPE-6
+ {0x1F400, 0x1F43E, prW}, // So [63] RAT..PAW PRINTS
+ {0x1F43F, 0x1F43F, prN}, // So CHIPMUNK
+ {0x1F440, 0x1F440, prW}, // So EYES
+ {0x1F441, 0x1F441, prN}, // So EYE
+ {0x1F442, 0x1F4FC, prW}, // So [187] EAR..VIDEOCASSETTE
+ {0x1F4FD, 0x1F4FE, prN}, // So [2] FILM PROJECTOR..PORTABLE STEREO
+ {0x1F4FF, 0x1F53D, prW}, // So [63] PRAYER BEADS..DOWN-POINTING SMALL RED TRIANGLE
+ {0x1F53E, 0x1F54A, prN}, // So [13] LOWER RIGHT SHADOWED WHITE CIRCLE..DOVE OF PEACE
+ {0x1F54B, 0x1F54E, prW}, // So [4] KAABA..MENORAH WITH NINE BRANCHES
+ {0x1F54F, 0x1F54F, prN}, // So BOWL OF HYGIEIA
+ {0x1F550, 0x1F567, prW}, // So [24] CLOCK FACE ONE OCLOCK..CLOCK FACE TWELVE-THIRTY
+ {0x1F568, 0x1F579, prN}, // So [18] RIGHT SPEAKER..JOYSTICK
+ {0x1F57A, 0x1F57A, prW}, // So MAN DANCING
+ {0x1F57B, 0x1F594, prN}, // So [26] LEFT HAND TELEPHONE RECEIVER..REVERSED VICTORY HAND
+ {0x1F595, 0x1F596, prW}, // So [2] REVERSED HAND WITH MIDDLE FINGER EXTENDED..RAISED HAND WITH PART BETWEEN MIDDLE AND RING FINGERS
+ {0x1F597, 0x1F5A3, prN}, // So [13] WHITE DOWN POINTING LEFT HAND INDEX..BLACK DOWN POINTING BACKHAND INDEX
+ {0x1F5A4, 0x1F5A4, prW}, // So BLACK HEART
+ {0x1F5A5, 0x1F5FA, prN}, // So [86] DESKTOP COMPUTER..WORLD MAP
+ {0x1F5FB, 0x1F5FF, prW}, // So [5] MOUNT FUJI..MOYAI
+ {0x1F600, 0x1F64F, prW}, // So [80] GRINNING FACE..PERSON WITH FOLDED HANDS
+ {0x1F650, 0x1F67F, prN}, // So [48] NORTH WEST POINTING LEAF..REVERSE CHECKER BOARD
+ {0x1F680, 0x1F6C5, prW}, // So [70] ROCKET..LEFT LUGGAGE
+ {0x1F6C6, 0x1F6CB, prN}, // So [6] TRIANGLE WITH ROUNDED CORNERS..COUCH AND LAMP
+ {0x1F6CC, 0x1F6CC, prW}, // So SLEEPING ACCOMMODATION
+ {0x1F6CD, 0x1F6CF, prN}, // So [3] SHOPPING BAGS..BED
+ {0x1F6D0, 0x1F6D2, prW}, // So [3] PLACE OF WORSHIP..SHOPPING TROLLEY
+ {0x1F6D3, 0x1F6D4, prN}, // So [2] STUPA..PAGODA
+ {0x1F6D5, 0x1F6D7, prW}, // So [3] HINDU TEMPLE..ELEVATOR
+ {0x1F6DD, 0x1F6DF, prW}, // So [3] PLAYGROUND SLIDE..RING BUOY
+ {0x1F6E0, 0x1F6EA, prN}, // So [11] HAMMER AND WRENCH..NORTHEAST-POINTING AIRPLANE
+ {0x1F6EB, 0x1F6EC, prW}, // So [2] AIRPLANE DEPARTURE..AIRPLANE ARRIVING
+ {0x1F6F0, 0x1F6F3, prN}, // So [4] SATELLITE..PASSENGER SHIP
+ {0x1F6F4, 0x1F6FC, prW}, // So [9] SCOOTER..ROLLER SKATE
+ {0x1F700, 0x1F773, prN}, // So [116] ALCHEMICAL SYMBOL FOR QUINTESSENCE..ALCHEMICAL SYMBOL FOR HALF OUNCE
+ {0x1F780, 0x1F7D8, prN}, // So [89] BLACK LEFT-POINTING ISOSCELES RIGHT TRIANGLE..NEGATIVE CIRCLED SQUARE
+ {0x1F7E0, 0x1F7EB, prW}, // So [12] LARGE ORANGE CIRCLE..LARGE BROWN SQUARE
+ {0x1F7F0, 0x1F7F0, prW}, // So HEAVY EQUALS SIGN
+ {0x1F800, 0x1F80B, prN}, // So [12] LEFTWARDS ARROW WITH SMALL TRIANGLE ARROWHEAD..DOWNWARDS ARROW WITH LARGE TRIANGLE ARROWHEAD
+ {0x1F810, 0x1F847, prN}, // So [56] LEFTWARDS ARROW WITH SMALL EQUILATERAL ARROWHEAD..DOWNWARDS HEAVY ARROW
+ {0x1F850, 0x1F859, prN}, // So [10] LEFTWARDS SANS-SERIF ARROW..UP DOWN SANS-SERIF ARROW
+ {0x1F860, 0x1F887, prN}, // So [40] WIDE-HEADED LEFTWARDS LIGHT BARB ARROW..WIDE-HEADED SOUTH WEST VERY HEAVY BARB ARROW
+ {0x1F890, 0x1F8AD, prN}, // So [30] LEFTWARDS TRIANGLE ARROWHEAD..WHITE ARROW SHAFT WIDTH TWO THIRDS
+ {0x1F8B0, 0x1F8B1, prN}, // So [2] ARROW POINTING UPWARDS THEN NORTH WEST..ARROW POINTING RIGHTWARDS THEN CURVING SOUTH WEST
+ {0x1F900, 0x1F90B, prN}, // So [12] CIRCLED CROSS FORMEE WITH FOUR DOTS..DOWNWARD FACING NOTCHED HOOK WITH DOT
+ {0x1F90C, 0x1F93A, prW}, // So [47] PINCHED FINGERS..FENCER
+ {0x1F93B, 0x1F93B, prN}, // So MODERN PENTATHLON
+ {0x1F93C, 0x1F945, prW}, // So [10] WRESTLERS..GOAL NET
+ {0x1F946, 0x1F946, prN}, // So RIFLE
+ {0x1F947, 0x1F9FF, prW}, // So [185] FIRST PLACE MEDAL..NAZAR AMULET
+ {0x1FA00, 0x1FA53, prN}, // So [84] NEUTRAL CHESS KING..BLACK CHESS KNIGHT-BISHOP
+ {0x1FA60, 0x1FA6D, prN}, // So [14] XIANGQI RED GENERAL..XIANGQI BLACK SOLDIER
+ {0x1FA70, 0x1FA74, prW}, // So [5] BALLET SHOES..THONG SANDAL
+ {0x1FA78, 0x1FA7C, prW}, // So [5] DROP OF BLOOD..CRUTCH
+ {0x1FA80, 0x1FA86, prW}, // So [7] YO-YO..NESTING DOLLS
+ {0x1FA90, 0x1FAAC, prW}, // So [29] RINGED PLANET..HAMSA
+ {0x1FAB0, 0x1FABA, prW}, // So [11] FLY..NEST WITH EGGS
+ {0x1FAC0, 0x1FAC5, prW}, // So [6] ANATOMICAL HEART..PERSON WITH CROWN
+ {0x1FAD0, 0x1FAD9, prW}, // So [10] BLUEBERRIES..JAR
+ {0x1FAE0, 0x1FAE7, prW}, // So [8] MELTING FACE..BUBBLES
+ {0x1FAF0, 0x1FAF6, prW}, // So [7] HAND WITH INDEX FINGER AND THUMB CROSSED..HEART HANDS
+ {0x1FB00, 0x1FB92, prN}, // So [147] BLOCK SEXTANT-1..UPPER HALF INVERSE MEDIUM SHADE AND LOWER HALF BLOCK
+ {0x1FB94, 0x1FBCA, prN}, // So [55] LEFT HALF INVERSE MEDIUM SHADE AND RIGHT HALF BLOCK..WHITE UP-POINTING CHEVRON
+ {0x1FBF0, 0x1FBF9, prN}, // Nd [10] SEGMENTED DIGIT ZERO..SEGMENTED DIGIT NINE
+ {0x20000, 0x2A6DF, prW}, // Lo [42720] CJK UNIFIED IDEOGRAPH-20000..CJK UNIFIED IDEOGRAPH-2A6DF
+ {0x2A6E0, 0x2A6FF, prW}, // Cn [32] ..
+ {0x2A700, 0x2B738, prW}, // Lo [4153] CJK UNIFIED IDEOGRAPH-2A700..CJK UNIFIED IDEOGRAPH-2B738
+ {0x2B739, 0x2B73F, prW}, // Cn [7] ..
+ {0x2B740, 0x2B81D, prW}, // Lo [222] CJK UNIFIED IDEOGRAPH-2B740..CJK UNIFIED IDEOGRAPH-2B81D
+ {0x2B81E, 0x2B81F, prW}, // Cn [2] ..
+ {0x2B820, 0x2CEA1, prW}, // Lo [5762] CJK UNIFIED IDEOGRAPH-2B820..CJK UNIFIED IDEOGRAPH-2CEA1
+ {0x2CEA2, 0x2CEAF, prW}, // Cn [14] ..
+ {0x2CEB0, 0x2EBE0, prW}, // Lo [7473] CJK UNIFIED IDEOGRAPH-2CEB0..CJK UNIFIED IDEOGRAPH-2EBE0
+ {0x2EBE1, 0x2F7FF, prW}, // Cn [3103] ..
+ {0x2F800, 0x2FA1D, prW}, // Lo [542] CJK COMPATIBILITY IDEOGRAPH-2F800..CJK COMPATIBILITY IDEOGRAPH-2FA1D
+ {0x2FA1E, 0x2FA1F, prW}, // Cn [2] ..
+ {0x2FA20, 0x2FFFD, prW}, // Cn [1502] ..
+ {0x30000, 0x3134A, prW}, // Lo [4939] CJK UNIFIED IDEOGRAPH-30000..CJK UNIFIED IDEOGRAPH-3134A
+ {0x3134B, 0x3FFFD, prW}, // Cn [60595] ..
+ {0xE0001, 0xE0001, prN}, // Cf LANGUAGE TAG
+ {0xE0020, 0xE007F, prN}, // Cf [96] TAG SPACE..CANCEL TAG
+ {0xE0100, 0xE01EF, prA}, // Mn [240] VARIATION SELECTOR-17..VARIATION SELECTOR-256
+ {0xF0000, 0xFFFFD, prA}, // Co [65534] ..
+ {0x100000, 0x10FFFD, prA}, // Co [65534] ..
+}
diff --git a/vendor/github.com/rivo/uniseg/emojipresentation.go b/vendor/github.com/rivo/uniseg/emojipresentation.go
new file mode 100644
index 0000000..fd0f745
--- /dev/null
+++ b/vendor/github.com/rivo/uniseg/emojipresentation.go
@@ -0,0 +1,285 @@
+package uniseg
+
+// Code generated via go generate from gen_properties.go. DO NOT EDIT.
+
+// emojiPresentation are taken from
+//
+// and
+// https://unicode.org/Public/14.0.0/ucd/emoji/emoji-data.txt
+// ("Extended_Pictographic" only)
+// on September 10, 2022. See https://www.unicode.org/license.html for the Unicode
+// license agreement.
+var emojiPresentation = [][3]int{
+ {0x231A, 0x231B, prEmojiPresentation}, // E0.6 [2] (⌚..⌛) watch..hourglass done
+ {0x23E9, 0x23EC, prEmojiPresentation}, // E0.6 [4] (⏩..⏬) fast-forward button..fast down button
+ {0x23F0, 0x23F0, prEmojiPresentation}, // E0.6 [1] (⏰) alarm clock
+ {0x23F3, 0x23F3, prEmojiPresentation}, // E0.6 [1] (⏳) hourglass not done
+ {0x25FD, 0x25FE, prEmojiPresentation}, // E0.6 [2] (◽..◾) white medium-small square..black medium-small square
+ {0x2614, 0x2615, prEmojiPresentation}, // E0.6 [2] (☔..☕) umbrella with rain drops..hot beverage
+ {0x2648, 0x2653, prEmojiPresentation}, // E0.6 [12] (♈..♓) Aries..Pisces
+ {0x267F, 0x267F, prEmojiPresentation}, // E0.6 [1] (♿) wheelchair symbol
+ {0x2693, 0x2693, prEmojiPresentation}, // E0.6 [1] (⚓) anchor
+ {0x26A1, 0x26A1, prEmojiPresentation}, // E0.6 [1] (⚡) high voltage
+ {0x26AA, 0x26AB, prEmojiPresentation}, // E0.6 [2] (⚪..⚫) white circle..black circle
+ {0x26BD, 0x26BE, prEmojiPresentation}, // E0.6 [2] (⚽..⚾) soccer ball..baseball
+ {0x26C4, 0x26C5, prEmojiPresentation}, // E0.6 [2] (⛄..⛅) snowman without snow..sun behind cloud
+ {0x26CE, 0x26CE, prEmojiPresentation}, // E0.6 [1] (⛎) Ophiuchus
+ {0x26D4, 0x26D4, prEmojiPresentation}, // E0.6 [1] (⛔) no entry
+ {0x26EA, 0x26EA, prEmojiPresentation}, // E0.6 [1] (⛪) church
+ {0x26F2, 0x26F3, prEmojiPresentation}, // E0.6 [2] (⛲..⛳) fountain..flag in hole
+ {0x26F5, 0x26F5, prEmojiPresentation}, // E0.6 [1] (⛵) sailboat
+ {0x26FA, 0x26FA, prEmojiPresentation}, // E0.6 [1] (⛺) tent
+ {0x26FD, 0x26FD, prEmojiPresentation}, // E0.6 [1] (⛽) fuel pump
+ {0x2705, 0x2705, prEmojiPresentation}, // E0.6 [1] (✅) check mark button
+ {0x270A, 0x270B, prEmojiPresentation}, // E0.6 [2] (✊..✋) raised fist..raised hand
+ {0x2728, 0x2728, prEmojiPresentation}, // E0.6 [1] (✨) sparkles
+ {0x274C, 0x274C, prEmojiPresentation}, // E0.6 [1] (❌) cross mark
+ {0x274E, 0x274E, prEmojiPresentation}, // E0.6 [1] (❎) cross mark button
+ {0x2753, 0x2755, prEmojiPresentation}, // E0.6 [3] (❓..❕) red question mark..white exclamation mark
+ {0x2757, 0x2757, prEmojiPresentation}, // E0.6 [1] (❗) red exclamation mark
+ {0x2795, 0x2797, prEmojiPresentation}, // E0.6 [3] (➕..➗) plus..divide
+ {0x27B0, 0x27B0, prEmojiPresentation}, // E0.6 [1] (➰) curly loop
+ {0x27BF, 0x27BF, prEmojiPresentation}, // E1.0 [1] (➿) double curly loop
+ {0x2B1B, 0x2B1C, prEmojiPresentation}, // E0.6 [2] (⬛..⬜) black large square..white large square
+ {0x2B50, 0x2B50, prEmojiPresentation}, // E0.6 [1] (⭐) star
+ {0x2B55, 0x2B55, prEmojiPresentation}, // E0.6 [1] (⭕) hollow red circle
+ {0x1F004, 0x1F004, prEmojiPresentation}, // E0.6 [1] (🀄) mahjong red dragon
+ {0x1F0CF, 0x1F0CF, prEmojiPresentation}, // E0.6 [1] (🃏) joker
+ {0x1F18E, 0x1F18E, prEmojiPresentation}, // E0.6 [1] (🆎) AB button (blood type)
+ {0x1F191, 0x1F19A, prEmojiPresentation}, // E0.6 [10] (🆑..🆚) CL button..VS button
+ {0x1F1E6, 0x1F1FF, prEmojiPresentation}, // E0.0 [26] (🇦..🇿) regional indicator symbol letter a..regional indicator symbol letter z
+ {0x1F201, 0x1F201, prEmojiPresentation}, // E0.6 [1] (🈁) Japanese “here” button
+ {0x1F21A, 0x1F21A, prEmojiPresentation}, // E0.6 [1] (🈚) Japanese “free of charge” button
+ {0x1F22F, 0x1F22F, prEmojiPresentation}, // E0.6 [1] (🈯) Japanese “reserved” button
+ {0x1F232, 0x1F236, prEmojiPresentation}, // E0.6 [5] (🈲..🈶) Japanese “prohibited” button..Japanese “not free of charge” button
+ {0x1F238, 0x1F23A, prEmojiPresentation}, // E0.6 [3] (🈸..🈺) Japanese “application” button..Japanese “open for business” button
+ {0x1F250, 0x1F251, prEmojiPresentation}, // E0.6 [2] (🉐..🉑) Japanese “bargain” button..Japanese “acceptable” button
+ {0x1F300, 0x1F30C, prEmojiPresentation}, // E0.6 [13] (🌀..🌌) cyclone..milky way
+ {0x1F30D, 0x1F30E, prEmojiPresentation}, // E0.7 [2] (🌍..🌎) globe showing Europe-Africa..globe showing Americas
+ {0x1F30F, 0x1F30F, prEmojiPresentation}, // E0.6 [1] (🌏) globe showing Asia-Australia
+ {0x1F310, 0x1F310, prEmojiPresentation}, // E1.0 [1] (🌐) globe with meridians
+ {0x1F311, 0x1F311, prEmojiPresentation}, // E0.6 [1] (🌑) new moon
+ {0x1F312, 0x1F312, prEmojiPresentation}, // E1.0 [1] (🌒) waxing crescent moon
+ {0x1F313, 0x1F315, prEmojiPresentation}, // E0.6 [3] (🌓..🌕) first quarter moon..full moon
+ {0x1F316, 0x1F318, prEmojiPresentation}, // E1.0 [3] (🌖..🌘) waning gibbous moon..waning crescent moon
+ {0x1F319, 0x1F319, prEmojiPresentation}, // E0.6 [1] (🌙) crescent moon
+ {0x1F31A, 0x1F31A, prEmojiPresentation}, // E1.0 [1] (🌚) new moon face
+ {0x1F31B, 0x1F31B, prEmojiPresentation}, // E0.6 [1] (🌛) first quarter moon face
+ {0x1F31C, 0x1F31C, prEmojiPresentation}, // E0.7 [1] (🌜) last quarter moon face
+ {0x1F31D, 0x1F31E, prEmojiPresentation}, // E1.0 [2] (🌝..🌞) full moon face..sun with face
+ {0x1F31F, 0x1F320, prEmojiPresentation}, // E0.6 [2] (🌟..🌠) glowing star..shooting star
+ {0x1F32D, 0x1F32F, prEmojiPresentation}, // E1.0 [3] (🌭..🌯) hot dog..burrito
+ {0x1F330, 0x1F331, prEmojiPresentation}, // E0.6 [2] (🌰..🌱) chestnut..seedling
+ {0x1F332, 0x1F333, prEmojiPresentation}, // E1.0 [2] (🌲..🌳) evergreen tree..deciduous tree
+ {0x1F334, 0x1F335, prEmojiPresentation}, // E0.6 [2] (🌴..🌵) palm tree..cactus
+ {0x1F337, 0x1F34A, prEmojiPresentation}, // E0.6 [20] (🌷..🍊) tulip..tangerine
+ {0x1F34B, 0x1F34B, prEmojiPresentation}, // E1.0 [1] (🍋) lemon
+ {0x1F34C, 0x1F34F, prEmojiPresentation}, // E0.6 [4] (🍌..🍏) banana..green apple
+ {0x1F350, 0x1F350, prEmojiPresentation}, // E1.0 [1] (🍐) pear
+ {0x1F351, 0x1F37B, prEmojiPresentation}, // E0.6 [43] (🍑..🍻) peach..clinking beer mugs
+ {0x1F37C, 0x1F37C, prEmojiPresentation}, // E1.0 [1] (🍼) baby bottle
+ {0x1F37E, 0x1F37F, prEmojiPresentation}, // E1.0 [2] (🍾..🍿) bottle with popping cork..popcorn
+ {0x1F380, 0x1F393, prEmojiPresentation}, // E0.6 [20] (🎀..🎓) ribbon..graduation cap
+ {0x1F3A0, 0x1F3C4, prEmojiPresentation}, // E0.6 [37] (🎠..🏄) carousel horse..person surfing
+ {0x1F3C5, 0x1F3C5, prEmojiPresentation}, // E1.0 [1] (🏅) sports medal
+ {0x1F3C6, 0x1F3C6, prEmojiPresentation}, // E0.6 [1] (🏆) trophy
+ {0x1F3C7, 0x1F3C7, prEmojiPresentation}, // E1.0 [1] (🏇) horse racing
+ {0x1F3C8, 0x1F3C8, prEmojiPresentation}, // E0.6 [1] (🏈) american football
+ {0x1F3C9, 0x1F3C9, prEmojiPresentation}, // E1.0 [1] (🏉) rugby football
+ {0x1F3CA, 0x1F3CA, prEmojiPresentation}, // E0.6 [1] (🏊) person swimming
+ {0x1F3CF, 0x1F3D3, prEmojiPresentation}, // E1.0 [5] (🏏..🏓) cricket game..ping pong
+ {0x1F3E0, 0x1F3E3, prEmojiPresentation}, // E0.6 [4] (🏠..🏣) house..Japanese post office
+ {0x1F3E4, 0x1F3E4, prEmojiPresentation}, // E1.0 [1] (🏤) post office
+ {0x1F3E5, 0x1F3F0, prEmojiPresentation}, // E0.6 [12] (🏥..🏰) hospital..castle
+ {0x1F3F4, 0x1F3F4, prEmojiPresentation}, // E1.0 [1] (🏴) black flag
+ {0x1F3F8, 0x1F407, prEmojiPresentation}, // E1.0 [16] (🏸..🐇) badminton..rabbit
+ {0x1F408, 0x1F408, prEmojiPresentation}, // E0.7 [1] (🐈) cat
+ {0x1F409, 0x1F40B, prEmojiPresentation}, // E1.0 [3] (🐉..🐋) dragon..whale
+ {0x1F40C, 0x1F40E, prEmojiPresentation}, // E0.6 [3] (🐌..🐎) snail..horse
+ {0x1F40F, 0x1F410, prEmojiPresentation}, // E1.0 [2] (🐏..🐐) ram..goat
+ {0x1F411, 0x1F412, prEmojiPresentation}, // E0.6 [2] (🐑..🐒) ewe..monkey
+ {0x1F413, 0x1F413, prEmojiPresentation}, // E1.0 [1] (🐓) rooster
+ {0x1F414, 0x1F414, prEmojiPresentation}, // E0.6 [1] (🐔) chicken
+ {0x1F415, 0x1F415, prEmojiPresentation}, // E0.7 [1] (🐕) dog
+ {0x1F416, 0x1F416, prEmojiPresentation}, // E1.0 [1] (🐖) pig
+ {0x1F417, 0x1F429, prEmojiPresentation}, // E0.6 [19] (🐗..🐩) boar..poodle
+ {0x1F42A, 0x1F42A, prEmojiPresentation}, // E1.0 [1] (🐪) camel
+ {0x1F42B, 0x1F43E, prEmojiPresentation}, // E0.6 [20] (🐫..🐾) two-hump camel..paw prints
+ {0x1F440, 0x1F440, prEmojiPresentation}, // E0.6 [1] (👀) eyes
+ {0x1F442, 0x1F464, prEmojiPresentation}, // E0.6 [35] (👂..👤) ear..bust in silhouette
+ {0x1F465, 0x1F465, prEmojiPresentation}, // E1.0 [1] (👥) busts in silhouette
+ {0x1F466, 0x1F46B, prEmojiPresentation}, // E0.6 [6] (👦..👫) boy..woman and man holding hands
+ {0x1F46C, 0x1F46D, prEmojiPresentation}, // E1.0 [2] (👬..👭) men holding hands..women holding hands
+ {0x1F46E, 0x1F4AC, prEmojiPresentation}, // E0.6 [63] (👮..💬) police officer..speech balloon
+ {0x1F4AD, 0x1F4AD, prEmojiPresentation}, // E1.0 [1] (💭) thought balloon
+ {0x1F4AE, 0x1F4B5, prEmojiPresentation}, // E0.6 [8] (💮..💵) white flower..dollar banknote
+ {0x1F4B6, 0x1F4B7, prEmojiPresentation}, // E1.0 [2] (💶..💷) euro banknote..pound banknote
+ {0x1F4B8, 0x1F4EB, prEmojiPresentation}, // E0.6 [52] (💸..📫) money with wings..closed mailbox with raised flag
+ {0x1F4EC, 0x1F4ED, prEmojiPresentation}, // E0.7 [2] (📬..📭) open mailbox with raised flag..open mailbox with lowered flag
+ {0x1F4EE, 0x1F4EE, prEmojiPresentation}, // E0.6 [1] (📮) postbox
+ {0x1F4EF, 0x1F4EF, prEmojiPresentation}, // E1.0 [1] (📯) postal horn
+ {0x1F4F0, 0x1F4F4, prEmojiPresentation}, // E0.6 [5] (📰..📴) newspaper..mobile phone off
+ {0x1F4F5, 0x1F4F5, prEmojiPresentation}, // E1.0 [1] (📵) no mobile phones
+ {0x1F4F6, 0x1F4F7, prEmojiPresentation}, // E0.6 [2] (📶..📷) antenna bars..camera
+ {0x1F4F8, 0x1F4F8, prEmojiPresentation}, // E1.0 [1] (📸) camera with flash
+ {0x1F4F9, 0x1F4FC, prEmojiPresentation}, // E0.6 [4] (📹..📼) video camera..videocassette
+ {0x1F4FF, 0x1F502, prEmojiPresentation}, // E1.0 [4] (📿..🔂) prayer beads..repeat single button
+ {0x1F503, 0x1F503, prEmojiPresentation}, // E0.6 [1] (🔃) clockwise vertical arrows
+ {0x1F504, 0x1F507, prEmojiPresentation}, // E1.0 [4] (🔄..🔇) counterclockwise arrows button..muted speaker
+ {0x1F508, 0x1F508, prEmojiPresentation}, // E0.7 [1] (🔈) speaker low volume
+ {0x1F509, 0x1F509, prEmojiPresentation}, // E1.0 [1] (🔉) speaker medium volume
+ {0x1F50A, 0x1F514, prEmojiPresentation}, // E0.6 [11] (🔊..🔔) speaker high volume..bell
+ {0x1F515, 0x1F515, prEmojiPresentation}, // E1.0 [1] (🔕) bell with slash
+ {0x1F516, 0x1F52B, prEmojiPresentation}, // E0.6 [22] (🔖..🔫) bookmark..water pistol
+ {0x1F52C, 0x1F52D, prEmojiPresentation}, // E1.0 [2] (🔬..🔭) microscope..telescope
+ {0x1F52E, 0x1F53D, prEmojiPresentation}, // E0.6 [16] (🔮..🔽) crystal ball..downwards button
+ {0x1F54B, 0x1F54E, prEmojiPresentation}, // E1.0 [4] (🕋..🕎) kaaba..menorah
+ {0x1F550, 0x1F55B, prEmojiPresentation}, // E0.6 [12] (🕐..🕛) one o’clock..twelve o’clock
+ {0x1F55C, 0x1F567, prEmojiPresentation}, // E0.7 [12] (🕜..🕧) one-thirty..twelve-thirty
+ {0x1F57A, 0x1F57A, prEmojiPresentation}, // E3.0 [1] (🕺) man dancing
+ {0x1F595, 0x1F596, prEmojiPresentation}, // E1.0 [2] (🖕..🖖) middle finger..vulcan salute
+ {0x1F5A4, 0x1F5A4, prEmojiPresentation}, // E3.0 [1] (🖤) black heart
+ {0x1F5FB, 0x1F5FF, prEmojiPresentation}, // E0.6 [5] (🗻..🗿) mount fuji..moai
+ {0x1F600, 0x1F600, prEmojiPresentation}, // E1.0 [1] (😀) grinning face
+ {0x1F601, 0x1F606, prEmojiPresentation}, // E0.6 [6] (😁..😆) beaming face with smiling eyes..grinning squinting face
+ {0x1F607, 0x1F608, prEmojiPresentation}, // E1.0 [2] (😇..😈) smiling face with halo..smiling face with horns
+ {0x1F609, 0x1F60D, prEmojiPresentation}, // E0.6 [5] (😉..😍) winking face..smiling face with heart-eyes
+ {0x1F60E, 0x1F60E, prEmojiPresentation}, // E1.0 [1] (😎) smiling face with sunglasses
+ {0x1F60F, 0x1F60F, prEmojiPresentation}, // E0.6 [1] (😏) smirking face
+ {0x1F610, 0x1F610, prEmojiPresentation}, // E0.7 [1] (😐) neutral face
+ {0x1F611, 0x1F611, prEmojiPresentation}, // E1.0 [1] (😑) expressionless face
+ {0x1F612, 0x1F614, prEmojiPresentation}, // E0.6 [3] (😒..😔) unamused face..pensive face
+ {0x1F615, 0x1F615, prEmojiPresentation}, // E1.0 [1] (😕) confused face
+ {0x1F616, 0x1F616, prEmojiPresentation}, // E0.6 [1] (😖) confounded face
+ {0x1F617, 0x1F617, prEmojiPresentation}, // E1.0 [1] (😗) kissing face
+ {0x1F618, 0x1F618, prEmojiPresentation}, // E0.6 [1] (😘) face blowing a kiss
+ {0x1F619, 0x1F619, prEmojiPresentation}, // E1.0 [1] (😙) kissing face with smiling eyes
+ {0x1F61A, 0x1F61A, prEmojiPresentation}, // E0.6 [1] (😚) kissing face with closed eyes
+ {0x1F61B, 0x1F61B, prEmojiPresentation}, // E1.0 [1] (😛) face with tongue
+ {0x1F61C, 0x1F61E, prEmojiPresentation}, // E0.6 [3] (😜..😞) winking face with tongue..disappointed face
+ {0x1F61F, 0x1F61F, prEmojiPresentation}, // E1.0 [1] (😟) worried face
+ {0x1F620, 0x1F625, prEmojiPresentation}, // E0.6 [6] (😠..😥) angry face..sad but relieved face
+ {0x1F626, 0x1F627, prEmojiPresentation}, // E1.0 [2] (😦..😧) frowning face with open mouth..anguished face
+ {0x1F628, 0x1F62B, prEmojiPresentation}, // E0.6 [4] (😨..😫) fearful face..tired face
+ {0x1F62C, 0x1F62C, prEmojiPresentation}, // E1.0 [1] (😬) grimacing face
+ {0x1F62D, 0x1F62D, prEmojiPresentation}, // E0.6 [1] (😭) loudly crying face
+ {0x1F62E, 0x1F62F, prEmojiPresentation}, // E1.0 [2] (😮..😯) face with open mouth..hushed face
+ {0x1F630, 0x1F633, prEmojiPresentation}, // E0.6 [4] (😰..😳) anxious face with sweat..flushed face
+ {0x1F634, 0x1F634, prEmojiPresentation}, // E1.0 [1] (😴) sleeping face
+ {0x1F635, 0x1F635, prEmojiPresentation}, // E0.6 [1] (😵) face with crossed-out eyes
+ {0x1F636, 0x1F636, prEmojiPresentation}, // E1.0 [1] (😶) face without mouth
+ {0x1F637, 0x1F640, prEmojiPresentation}, // E0.6 [10] (😷..🙀) face with medical mask..weary cat
+ {0x1F641, 0x1F644, prEmojiPresentation}, // E1.0 [4] (🙁..🙄) slightly frowning face..face with rolling eyes
+ {0x1F645, 0x1F64F, prEmojiPresentation}, // E0.6 [11] (🙅..🙏) person gesturing NO..folded hands
+ {0x1F680, 0x1F680, prEmojiPresentation}, // E0.6 [1] (🚀) rocket
+ {0x1F681, 0x1F682, prEmojiPresentation}, // E1.0 [2] (🚁..🚂) helicopter..locomotive
+ {0x1F683, 0x1F685, prEmojiPresentation}, // E0.6 [3] (🚃..🚅) railway car..bullet train
+ {0x1F686, 0x1F686, prEmojiPresentation}, // E1.0 [1] (🚆) train
+ {0x1F687, 0x1F687, prEmojiPresentation}, // E0.6 [1] (🚇) metro
+ {0x1F688, 0x1F688, prEmojiPresentation}, // E1.0 [1] (🚈) light rail
+ {0x1F689, 0x1F689, prEmojiPresentation}, // E0.6 [1] (🚉) station
+ {0x1F68A, 0x1F68B, prEmojiPresentation}, // E1.0 [2] (🚊..🚋) tram..tram car
+ {0x1F68C, 0x1F68C, prEmojiPresentation}, // E0.6 [1] (🚌) bus
+ {0x1F68D, 0x1F68D, prEmojiPresentation}, // E0.7 [1] (🚍) oncoming bus
+ {0x1F68E, 0x1F68E, prEmojiPresentation}, // E1.0 [1] (🚎) trolleybus
+ {0x1F68F, 0x1F68F, prEmojiPresentation}, // E0.6 [1] (🚏) bus stop
+ {0x1F690, 0x1F690, prEmojiPresentation}, // E1.0 [1] (🚐) minibus
+ {0x1F691, 0x1F693, prEmojiPresentation}, // E0.6 [3] (🚑..🚓) ambulance..police car
+ {0x1F694, 0x1F694, prEmojiPresentation}, // E0.7 [1] (🚔) oncoming police car
+ {0x1F695, 0x1F695, prEmojiPresentation}, // E0.6 [1] (🚕) taxi
+ {0x1F696, 0x1F696, prEmojiPresentation}, // E1.0 [1] (🚖) oncoming taxi
+ {0x1F697, 0x1F697, prEmojiPresentation}, // E0.6 [1] (🚗) automobile
+ {0x1F698, 0x1F698, prEmojiPresentation}, // E0.7 [1] (🚘) oncoming automobile
+ {0x1F699, 0x1F69A, prEmojiPresentation}, // E0.6 [2] (🚙..🚚) sport utility vehicle..delivery truck
+ {0x1F69B, 0x1F6A1, prEmojiPresentation}, // E1.0 [7] (🚛..🚡) articulated lorry..aerial tramway
+ {0x1F6A2, 0x1F6A2, prEmojiPresentation}, // E0.6 [1] (🚢) ship
+ {0x1F6A3, 0x1F6A3, prEmojiPresentation}, // E1.0 [1] (🚣) person rowing boat
+ {0x1F6A4, 0x1F6A5, prEmojiPresentation}, // E0.6 [2] (🚤..🚥) speedboat..horizontal traffic light
+ {0x1F6A6, 0x1F6A6, prEmojiPresentation}, // E1.0 [1] (🚦) vertical traffic light
+ {0x1F6A7, 0x1F6AD, prEmojiPresentation}, // E0.6 [7] (🚧..🚭) construction..no smoking
+ {0x1F6AE, 0x1F6B1, prEmojiPresentation}, // E1.0 [4] (🚮..🚱) litter in bin sign..non-potable water
+ {0x1F6B2, 0x1F6B2, prEmojiPresentation}, // E0.6 [1] (🚲) bicycle
+ {0x1F6B3, 0x1F6B5, prEmojiPresentation}, // E1.0 [3] (🚳..🚵) no bicycles..person mountain biking
+ {0x1F6B6, 0x1F6B6, prEmojiPresentation}, // E0.6 [1] (🚶) person walking
+ {0x1F6B7, 0x1F6B8, prEmojiPresentation}, // E1.0 [2] (🚷..🚸) no pedestrians..children crossing
+ {0x1F6B9, 0x1F6BE, prEmojiPresentation}, // E0.6 [6] (🚹..🚾) men’s room..water closet
+ {0x1F6BF, 0x1F6BF, prEmojiPresentation}, // E1.0 [1] (🚿) shower
+ {0x1F6C0, 0x1F6C0, prEmojiPresentation}, // E0.6 [1] (🛀) person taking bath
+ {0x1F6C1, 0x1F6C5, prEmojiPresentation}, // E1.0 [5] (🛁..🛅) bathtub..left luggage
+ {0x1F6CC, 0x1F6CC, prEmojiPresentation}, // E1.0 [1] (🛌) person in bed
+ {0x1F6D0, 0x1F6D0, prEmojiPresentation}, // E1.0 [1] (🛐) place of worship
+ {0x1F6D1, 0x1F6D2, prEmojiPresentation}, // E3.0 [2] (🛑..🛒) stop sign..shopping cart
+ {0x1F6D5, 0x1F6D5, prEmojiPresentation}, // E12.0 [1] (🛕) hindu temple
+ {0x1F6D6, 0x1F6D7, prEmojiPresentation}, // E13.0 [2] (🛖..🛗) hut..elevator
+ {0x1F6DD, 0x1F6DF, prEmojiPresentation}, // E14.0 [3] (🛝..🛟) playground slide..ring buoy
+ {0x1F6EB, 0x1F6EC, prEmojiPresentation}, // E1.0 [2] (🛫..🛬) airplane departure..airplane arrival
+ {0x1F6F4, 0x1F6F6, prEmojiPresentation}, // E3.0 [3] (🛴..🛶) kick scooter..canoe
+ {0x1F6F7, 0x1F6F8, prEmojiPresentation}, // E5.0 [2] (🛷..🛸) sled..flying saucer
+ {0x1F6F9, 0x1F6F9, prEmojiPresentation}, // E11.0 [1] (🛹) skateboard
+ {0x1F6FA, 0x1F6FA, prEmojiPresentation}, // E12.0 [1] (🛺) auto rickshaw
+ {0x1F6FB, 0x1F6FC, prEmojiPresentation}, // E13.0 [2] (🛻..🛼) pickup truck..roller skate
+ {0x1F7E0, 0x1F7EB, prEmojiPresentation}, // E12.0 [12] (🟠..🟫) orange circle..brown square
+ {0x1F7F0, 0x1F7F0, prEmojiPresentation}, // E14.0 [1] (🟰) heavy equals sign
+ {0x1F90C, 0x1F90C, prEmojiPresentation}, // E13.0 [1] (🤌) pinched fingers
+ {0x1F90D, 0x1F90F, prEmojiPresentation}, // E12.0 [3] (🤍..🤏) white heart..pinching hand
+ {0x1F910, 0x1F918, prEmojiPresentation}, // E1.0 [9] (🤐..🤘) zipper-mouth face..sign of the horns
+ {0x1F919, 0x1F91E, prEmojiPresentation}, // E3.0 [6] (🤙..🤞) call me hand..crossed fingers
+ {0x1F91F, 0x1F91F, prEmojiPresentation}, // E5.0 [1] (🤟) love-you gesture
+ {0x1F920, 0x1F927, prEmojiPresentation}, // E3.0 [8] (🤠..🤧) cowboy hat face..sneezing face
+ {0x1F928, 0x1F92F, prEmojiPresentation}, // E5.0 [8] (🤨..🤯) face with raised eyebrow..exploding head
+ {0x1F930, 0x1F930, prEmojiPresentation}, // E3.0 [1] (🤰) pregnant woman
+ {0x1F931, 0x1F932, prEmojiPresentation}, // E5.0 [2] (🤱..🤲) breast-feeding..palms up together
+ {0x1F933, 0x1F93A, prEmojiPresentation}, // E3.0 [8] (🤳..🤺) selfie..person fencing
+ {0x1F93C, 0x1F93E, prEmojiPresentation}, // E3.0 [3] (🤼..🤾) people wrestling..person playing handball
+ {0x1F93F, 0x1F93F, prEmojiPresentation}, // E12.0 [1] (🤿) diving mask
+ {0x1F940, 0x1F945, prEmojiPresentation}, // E3.0 [6] (🥀..🥅) wilted flower..goal net
+ {0x1F947, 0x1F94B, prEmojiPresentation}, // E3.0 [5] (🥇..🥋) 1st place medal..martial arts uniform
+ {0x1F94C, 0x1F94C, prEmojiPresentation}, // E5.0 [1] (🥌) curling stone
+ {0x1F94D, 0x1F94F, prEmojiPresentation}, // E11.0 [3] (🥍..🥏) lacrosse..flying disc
+ {0x1F950, 0x1F95E, prEmojiPresentation}, // E3.0 [15] (🥐..🥞) croissant..pancakes
+ {0x1F95F, 0x1F96B, prEmojiPresentation}, // E5.0 [13] (🥟..🥫) dumpling..canned food
+ {0x1F96C, 0x1F970, prEmojiPresentation}, // E11.0 [5] (🥬..🥰) leafy green..smiling face with hearts
+ {0x1F971, 0x1F971, prEmojiPresentation}, // E12.0 [1] (🥱) yawning face
+ {0x1F972, 0x1F972, prEmojiPresentation}, // E13.0 [1] (🥲) smiling face with tear
+ {0x1F973, 0x1F976, prEmojiPresentation}, // E11.0 [4] (🥳..🥶) partying face..cold face
+ {0x1F977, 0x1F978, prEmojiPresentation}, // E13.0 [2] (🥷..🥸) ninja..disguised face
+ {0x1F979, 0x1F979, prEmojiPresentation}, // E14.0 [1] (🥹) face holding back tears
+ {0x1F97A, 0x1F97A, prEmojiPresentation}, // E11.0 [1] (🥺) pleading face
+ {0x1F97B, 0x1F97B, prEmojiPresentation}, // E12.0 [1] (🥻) sari
+ {0x1F97C, 0x1F97F, prEmojiPresentation}, // E11.0 [4] (🥼..🥿) lab coat..flat shoe
+ {0x1F980, 0x1F984, prEmojiPresentation}, // E1.0 [5] (🦀..🦄) crab..unicorn
+ {0x1F985, 0x1F991, prEmojiPresentation}, // E3.0 [13] (🦅..🦑) eagle..squid
+ {0x1F992, 0x1F997, prEmojiPresentation}, // E5.0 [6] (🦒..🦗) giraffe..cricket
+ {0x1F998, 0x1F9A2, prEmojiPresentation}, // E11.0 [11] (🦘..🦢) kangaroo..swan
+ {0x1F9A3, 0x1F9A4, prEmojiPresentation}, // E13.0 [2] (🦣..🦤) mammoth..dodo
+ {0x1F9A5, 0x1F9AA, prEmojiPresentation}, // E12.0 [6] (🦥..🦪) sloth..oyster
+ {0x1F9AB, 0x1F9AD, prEmojiPresentation}, // E13.0 [3] (🦫..🦭) beaver..seal
+ {0x1F9AE, 0x1F9AF, prEmojiPresentation}, // E12.0 [2] (🦮..🦯) guide dog..white cane
+ {0x1F9B0, 0x1F9B9, prEmojiPresentation}, // E11.0 [10] (🦰..🦹) red hair..supervillain
+ {0x1F9BA, 0x1F9BF, prEmojiPresentation}, // E12.0 [6] (🦺..🦿) safety vest..mechanical leg
+ {0x1F9C0, 0x1F9C0, prEmojiPresentation}, // E1.0 [1] (🧀) cheese wedge
+ {0x1F9C1, 0x1F9C2, prEmojiPresentation}, // E11.0 [2] (🧁..🧂) cupcake..salt
+ {0x1F9C3, 0x1F9CA, prEmojiPresentation}, // E12.0 [8] (🧃..🧊) beverage box..ice
+ {0x1F9CB, 0x1F9CB, prEmojiPresentation}, // E13.0 [1] (🧋) bubble tea
+ {0x1F9CC, 0x1F9CC, prEmojiPresentation}, // E14.0 [1] (🧌) troll
+ {0x1F9CD, 0x1F9CF, prEmojiPresentation}, // E12.0 [3] (🧍..🧏) person standing..deaf person
+ {0x1F9D0, 0x1F9E6, prEmojiPresentation}, // E5.0 [23] (🧐..🧦) face with monocle..socks
+ {0x1F9E7, 0x1F9FF, prEmojiPresentation}, // E11.0 [25] (🧧..🧿) red envelope..nazar amulet
+ {0x1FA70, 0x1FA73, prEmojiPresentation}, // E12.0 [4] (🩰..🩳) ballet shoes..shorts
+ {0x1FA74, 0x1FA74, prEmojiPresentation}, // E13.0 [1] (🩴) thong sandal
+ {0x1FA78, 0x1FA7A, prEmojiPresentation}, // E12.0 [3] (🩸..🩺) drop of blood..stethoscope
+ {0x1FA7B, 0x1FA7C, prEmojiPresentation}, // E14.0 [2] (🩻..🩼) x-ray..crutch
+ {0x1FA80, 0x1FA82, prEmojiPresentation}, // E12.0 [3] (🪀..🪂) yo-yo..parachute
+ {0x1FA83, 0x1FA86, prEmojiPresentation}, // E13.0 [4] (🪃..🪆) boomerang..nesting dolls
+ {0x1FA90, 0x1FA95, prEmojiPresentation}, // E12.0 [6] (🪐..🪕) ringed planet..banjo
+ {0x1FA96, 0x1FAA8, prEmojiPresentation}, // E13.0 [19] (🪖..🪨) military helmet..rock
+ {0x1FAA9, 0x1FAAC, prEmojiPresentation}, // E14.0 [4] (🪩..🪬) mirror ball..hamsa
+ {0x1FAB0, 0x1FAB6, prEmojiPresentation}, // E13.0 [7] (🪰..🪶) fly..feather
+ {0x1FAB7, 0x1FABA, prEmojiPresentation}, // E14.0 [4] (🪷..🪺) lotus..nest with eggs
+ {0x1FAC0, 0x1FAC2, prEmojiPresentation}, // E13.0 [3] (🫀..🫂) anatomical heart..people hugging
+ {0x1FAC3, 0x1FAC5, prEmojiPresentation}, // E14.0 [3] (🫃..🫅) pregnant man..person with crown
+ {0x1FAD0, 0x1FAD6, prEmojiPresentation}, // E13.0 [7] (🫐..🫖) blueberries..teapot
+ {0x1FAD7, 0x1FAD9, prEmojiPresentation}, // E14.0 [3] (🫗..🫙) pouring liquid..jar
+ {0x1FAE0, 0x1FAE7, prEmojiPresentation}, // E14.0 [8] (🫠..🫧) melting face..bubbles
+ {0x1FAF0, 0x1FAF6, prEmojiPresentation}, // E14.0 [7] (🫰..🫶) hand with index finger and thumb crossed..heart hands
+}
diff --git a/vendor/github.com/rivo/uniseg/gen_breaktest.go b/vendor/github.com/rivo/uniseg/gen_breaktest.go
new file mode 100644
index 0000000..e613c4c
--- /dev/null
+++ b/vendor/github.com/rivo/uniseg/gen_breaktest.go
@@ -0,0 +1,213 @@
+//go:build generate
+
+// This program generates a Go containing a slice of test cases based on the
+// Unicode Character Database auxiliary data files. The command line arguments
+// are as follows:
+//
+// 1. The name of the Unicode data file (just the filename, without extension).
+// 2. The name of the locally generated Go file.
+// 3. The name of the slice containing the test cases.
+// 4. The name of the generator, for logging purposes.
+//
+//go:generate go run gen_breaktest.go GraphemeBreakTest graphemebreak_test.go graphemeBreakTestCases graphemes
+//go:generate go run gen_breaktest.go WordBreakTest wordbreak_test.go wordBreakTestCases words
+//go:generate go run gen_breaktest.go SentenceBreakTest sentencebreak_test.go sentenceBreakTestCases sentences
+//go:generate go run gen_breaktest.go LineBreakTest linebreak_test.go lineBreakTestCases lines
+
+package main
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "fmt"
+ "go/format"
+ "io/ioutil"
+ "log"
+ "net/http"
+ "os"
+ "time"
+)
+
+// We want to test against a specific version rather than the latest. When the
+// package is upgraded to a new version, change these to generate new tests.
+const (
+ testCaseURL = `https://www.unicode.org/Public/14.0.0/ucd/auxiliary/%s.txt`
+)
+
+func main() {
+ if len(os.Args) < 5 {
+ fmt.Println("Not enough arguments, see code for details")
+ os.Exit(1)
+ }
+
+ log.SetPrefix("gen_breaktest (" + os.Args[4] + "): ")
+ log.SetFlags(0)
+
+ // Read text of testcases and parse into Go source code.
+ src, err := parse(fmt.Sprintf(testCaseURL, os.Args[1]))
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ // Format the Go code.
+ formatted, err := format.Source(src)
+ if err != nil {
+ log.Fatalln("gofmt:", err)
+ }
+
+ // Write it out.
+ log.Print("Writing to ", os.Args[2])
+ if err := ioutil.WriteFile(os.Args[2], formatted, 0644); err != nil {
+ log.Fatal(err)
+ }
+}
+
+// parse reads a break text file, either from a local file or from a URL. It
+// parses the file data into Go source code representing the test cases.
+func parse(url string) ([]byte, error) {
+ log.Printf("Parsing %s", url)
+ res, err := http.Get(url)
+ if err != nil {
+ return nil, err
+ }
+ body := res.Body
+ defer body.Close()
+
+ buf := new(bytes.Buffer)
+ buf.Grow(120 << 10)
+ buf.WriteString(`package uniseg
+
+// Code generated via go generate from gen_breaktest.go. DO NOT EDIT.
+
+// ` + os.Args[3] + ` are Grapheme testcases taken from
+// ` + url + `
+// on ` + time.Now().Format("January 2, 2006") + `. See
+// https://www.unicode.org/license.html for the Unicode license agreement.
+var ` + os.Args[3] + ` = []testCase {
+`)
+
+ sc := bufio.NewScanner(body)
+ num := 1
+ var line []byte
+ original := make([]byte, 0, 64)
+ expected := make([]byte, 0, 64)
+ for sc.Scan() {
+ num++
+ line = sc.Bytes()
+ if len(line) == 0 || line[0] == '#' {
+ continue
+ }
+ var comment []byte
+ if i := bytes.IndexByte(line, '#'); i >= 0 {
+ comment = bytes.TrimSpace(line[i+1:])
+ line = bytes.TrimSpace(line[:i])
+ }
+ original, expected, err := parseRuneSequence(line, original[:0], expected[:0])
+ if err != nil {
+ return nil, fmt.Errorf(`line %d: %v: %q`, num, err, line)
+ }
+ fmt.Fprintf(buf, "\t{original: \"%s\", expected: %s}, // %s\n", original, expected, comment)
+ }
+ if err := sc.Err(); err != nil {
+ return nil, err
+ }
+
+ // Check for final "# EOF", useful check if we're streaming via HTTP
+ if !bytes.Equal(line, []byte("# EOF")) {
+ return nil, fmt.Errorf(`line %d: exected "# EOF" as final line, got %q`, num, line)
+ }
+ buf.WriteString("}\n")
+ return buf.Bytes(), nil
+}
+
+// Used by parseRuneSequence to match input via bytes.HasPrefix.
+var (
+ prefixBreak = []byte("÷ ")
+ prefixDontBreak = []byte("× ")
+ breakOk = []byte("÷")
+ breakNo = []byte("×")
+)
+
+// parseRuneSequence parses a rune + breaking opportunity sequence from b
+// and appends the Go code for testcase.original to orig
+// and appends the Go code for testcase.expected to exp.
+// It retuns the new orig and exp slices.
+//
+// E.g. for the input b="÷ 0020 × 0308 ÷ 1F1E6 ÷"
+// it will append
+// "\u0020\u0308\U0001F1E6"
+// and "[][]rune{{0x0020,0x0308},{0x1F1E6},}"
+// to orig and exp respectively.
+//
+// The formatting of exp is expected to be cleaned up by gofmt or format.Source.
+// Note we explicitly require the sequence to start with ÷ and we implicitly
+// require it to end with ÷.
+func parseRuneSequence(b, orig, exp []byte) ([]byte, []byte, error) {
+ // Check for and remove first ÷ or ×.
+ if !bytes.HasPrefix(b, prefixBreak) && !bytes.HasPrefix(b, prefixDontBreak) {
+ return nil, nil, errors.New("expected ÷ or × as first character")
+ }
+ if bytes.HasPrefix(b, prefixBreak) {
+ b = b[len(prefixBreak):]
+ } else {
+ b = b[len(prefixDontBreak):]
+ }
+
+ boundary := true
+ exp = append(exp, "[][]rune{"...)
+ for len(b) > 0 {
+ if boundary {
+ exp = append(exp, '{')
+ }
+ exp = append(exp, "0x"...)
+ // Find end of hex digits.
+ var i int
+ for i = 0; i < len(b) && b[i] != ' '; i++ {
+ if d := b[i]; ('0' <= d || d <= '9') ||
+ ('A' <= d || d <= 'F') ||
+ ('a' <= d || d <= 'f') {
+ continue
+ }
+ return nil, nil, errors.New("bad hex digit")
+ }
+ switch i {
+ case 4:
+ orig = append(orig, "\\u"...)
+ case 5:
+ orig = append(orig, "\\U000"...)
+ default:
+ return nil, nil, errors.New("unsupport code point hex length")
+ }
+ orig = append(orig, b[:i]...)
+ exp = append(exp, b[:i]...)
+ b = b[i:]
+
+ // Check for space between hex and ÷ or ×.
+ if len(b) < 1 || b[0] != ' ' {
+ return nil, nil, errors.New("bad input")
+ }
+ b = b[1:]
+
+ // Check for next boundary.
+ switch {
+ case bytes.HasPrefix(b, breakOk):
+ boundary = true
+ b = b[len(breakOk):]
+ case bytes.HasPrefix(b, breakNo):
+ boundary = false
+ b = b[len(breakNo):]
+ default:
+ return nil, nil, errors.New("missing ÷ or ×")
+ }
+ if boundary {
+ exp = append(exp, '}')
+ }
+ exp = append(exp, ',')
+ if len(b) > 0 && b[0] == ' ' {
+ b = b[1:]
+ }
+ }
+ exp = append(exp, '}')
+ return orig, exp, nil
+}
diff --git a/vendor/github.com/rivo/uniseg/gen_properties.go b/vendor/github.com/rivo/uniseg/gen_properties.go
new file mode 100644
index 0000000..999d5ef
--- /dev/null
+++ b/vendor/github.com/rivo/uniseg/gen_properties.go
@@ -0,0 +1,256 @@
+//go:build generate
+
+// This program generates a property file in Go file from Unicode Character
+// Database auxiliary data files. The command line arguments are as follows:
+//
+// 1. The name of the Unicode data file (just the filename, without extension).
+// Can be "-" (to skip) if the emoji flag is included.
+// 2. The name of the locally generated Go file.
+// 3. The name of the slice mapping code points to properties.
+// 4. The name of the generator, for logging purposes.
+// 5. (Optional) Flags, comma-separated. The following flags are available:
+// - "emojis=": include the specified emoji properties (e.g.
+// "Extended_Pictographic").
+// - "gencat": include general category properties.
+//
+//go:generate go run gen_properties.go auxiliary/GraphemeBreakProperty graphemeproperties.go graphemeCodePoints graphemes emojis=Extended_Pictographic
+//go:generate go run gen_properties.go auxiliary/WordBreakProperty wordproperties.go workBreakCodePoints words emojis=Extended_Pictographic
+//go:generate go run gen_properties.go auxiliary/SentenceBreakProperty sentenceproperties.go sentenceBreakCodePoints sentences
+//go:generate go run gen_properties.go LineBreak lineproperties.go lineBreakCodePoints lines gencat
+//go:generate go run gen_properties.go EastAsianWidth eastasianwidth.go eastAsianWidth eastasianwidth
+//go:generate go run gen_properties.go - emojipresentation.go emojiPresentation emojipresentation emojis=Emoji_Presentation
+package main
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "fmt"
+ "go/format"
+ "io/ioutil"
+ "log"
+ "net/http"
+ "os"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// We want to test against a specific version rather than the latest. When the
+// package is upgraded to a new version, change these to generate new tests.
+const (
+ propertyURL = `https://www.unicode.org/Public/14.0.0/ucd/%s.txt`
+ emojiURL = `https://unicode.org/Public/14.0.0/ucd/emoji/emoji-data.txt`
+)
+
+// The regular expression for a line containing a code point range property.
+var propertyPattern = regexp.MustCompile(`^([0-9A-F]{4,6})(\.\.([0-9A-F]{4,6}))?\s*;\s*([A-Za-z0-9_]+)\s*#\s(.+)$`)
+
+func main() {
+ if len(os.Args) < 5 {
+ fmt.Println("Not enough arguments, see code for details")
+ os.Exit(1)
+ }
+
+ log.SetPrefix("gen_properties (" + os.Args[4] + "): ")
+ log.SetFlags(0)
+
+ // Parse flags.
+ flags := make(map[string]string)
+ if len(os.Args) >= 6 {
+ for _, flag := range strings.Split(os.Args[5], ",") {
+ flagFields := strings.Split(flag, "=")
+ if len(flagFields) == 1 {
+ flags[flagFields[0]] = "yes"
+ } else {
+ flags[flagFields[0]] = flagFields[1]
+ }
+ }
+ }
+
+ // Parse the text file and generate Go source code from it.
+ _, includeGeneralCategory := flags["gencat"]
+ var mainURL string
+ if os.Args[1] != "-" {
+ mainURL = fmt.Sprintf(propertyURL, os.Args[1])
+ }
+ src, err := parse(mainURL, flags["emojis"], includeGeneralCategory)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ // Format the Go code.
+ formatted, err := format.Source([]byte(src))
+ if err != nil {
+ log.Fatal("gofmt:", err)
+ }
+
+ // Save it to the (local) target file.
+ log.Print("Writing to ", os.Args[2])
+ if err := ioutil.WriteFile(os.Args[2], formatted, 0644); err != nil {
+ log.Fatal(err)
+ }
+}
+
+// parse parses the Unicode Properties text files located at the given URLs and
+// returns their equivalent Go source code to be used in the uniseg package. If
+// "emojiProperty" is not an empty string, emoji code points for that emoji
+// property (e.g. "Extended_Pictographic") will be included. In those cases, you
+// may pass an empty "propertyURL" to skip parsing the main properties file. If
+// "includeGeneralCategory" is true, the Unicode General Category property will
+// be extracted from the comments and included in the output.
+func parse(propertyURL, emojiProperty string, includeGeneralCategory bool) (string, error) {
+ if propertyURL == "" && emojiProperty == "" {
+ return "", errors.New("no properties to parse")
+ }
+
+ // Temporary buffer to hold properties.
+ var properties [][4]string
+
+ // Open the first URL.
+ if propertyURL != "" {
+ log.Printf("Parsing %s", propertyURL)
+ res, err := http.Get(propertyURL)
+ if err != nil {
+ return "", err
+ }
+ in1 := res.Body
+ defer in1.Close()
+
+ // Parse it.
+ scanner := bufio.NewScanner(in1)
+ num := 0
+ for scanner.Scan() {
+ num++
+ line := strings.TrimSpace(scanner.Text())
+
+ // Skip comments and empty lines.
+ if strings.HasPrefix(line, "#") || line == "" {
+ continue
+ }
+
+ // Everything else must be a code point range, a property and a comment.
+ from, to, property, comment, err := parseProperty(line)
+ if err != nil {
+ return "", fmt.Errorf("%s line %d: %v", os.Args[4], num, err)
+ }
+ properties = append(properties, [4]string{from, to, property, comment})
+ }
+ if err := scanner.Err(); err != nil {
+ return "", err
+ }
+ }
+
+ // Open the second URL.
+ if emojiProperty != "" {
+ log.Printf("Parsing %s", emojiURL)
+ res, err := http.Get(emojiURL)
+ if err != nil {
+ return "", err
+ }
+ in2 := res.Body
+ defer in2.Close()
+
+ // Parse it.
+ scanner := bufio.NewScanner(in2)
+ num := 0
+ for scanner.Scan() {
+ num++
+ line := scanner.Text()
+
+ // Skip comments, empty lines, and everything not containing
+ // "Extended_Pictographic".
+ if strings.HasPrefix(line, "#") || line == "" || !strings.Contains(line, emojiProperty) {
+ continue
+ }
+
+ // Everything else must be a code point range, a property and a comment.
+ from, to, property, comment, err := parseProperty(line)
+ if err != nil {
+ return "", fmt.Errorf("emojis line %d: %v", num, err)
+ }
+ properties = append(properties, [4]string{from, to, property, comment})
+ }
+ if err := scanner.Err(); err != nil {
+ return "", err
+ }
+ }
+
+ // Sort properties.
+ sort.Slice(properties, func(i, j int) bool {
+ left, _ := strconv.ParseUint(properties[i][0], 16, 64)
+ right, _ := strconv.ParseUint(properties[j][0], 16, 64)
+ return left < right
+ })
+
+ // Header.
+ var (
+ buf bytes.Buffer
+ emojiComment string
+ )
+ columns := 3
+ if includeGeneralCategory {
+ columns = 4
+ }
+ if emojiURL != "" {
+ emojiComment = `
+// and
+// ` + emojiURL + `
+// ("Extended_Pictographic" only)`
+ }
+ buf.WriteString(`package uniseg
+
+// Code generated via go generate from gen_properties.go. DO NOT EDIT.
+
+// ` + os.Args[3] + ` are taken from
+// ` + propertyURL + emojiComment + `
+// on ` + time.Now().Format("January 2, 2006") + `. See https://www.unicode.org/license.html for the Unicode
+// license agreement.
+var ` + os.Args[3] + ` = [][` + strconv.Itoa(columns) + `]int{
+ `)
+
+ // Properties.
+ for _, prop := range properties {
+ if includeGeneralCategory {
+ generalCategory := "gc" + prop[3][:2]
+ if generalCategory == "gcL&" {
+ generalCategory = "gcLC"
+ }
+ prop[3] = prop[3][3:]
+ fmt.Fprintf(&buf, "{0x%s,0x%s,%s,%s}, // %s\n", prop[0], prop[1], translateProperty("pr", prop[2]), generalCategory, prop[3])
+ } else {
+ fmt.Fprintf(&buf, "{0x%s,0x%s,%s}, // %s\n", prop[0], prop[1], translateProperty("pr", prop[2]), prop[3])
+ }
+ }
+
+ // Tail.
+ buf.WriteString("}")
+
+ return buf.String(), nil
+}
+
+// parseProperty parses a line of the Unicode properties text file containing a
+// property for a code point range and returns it along with its comment.
+func parseProperty(line string) (from, to, property, comment string, err error) {
+ fields := propertyPattern.FindStringSubmatch(line)
+ if fields == nil {
+ err = errors.New("no property found")
+ return
+ }
+ from = fields[1]
+ to = fields[3]
+ if to == "" {
+ to = from
+ }
+ property = fields[4]
+ comment = fields[5]
+ return
+}
+
+// translateProperty translates a property name as used in the Unicode data file
+// to a variable used in the Go code.
+func translateProperty(prefix, property string) string {
+ return prefix + strings.ReplaceAll(property, "_", "")
+}
diff --git a/vendor/github.com/rivo/uniseg/grapheme.go b/vendor/github.com/rivo/uniseg/grapheme.go
new file mode 100644
index 0000000..0086fc1
--- /dev/null
+++ b/vendor/github.com/rivo/uniseg/grapheme.go
@@ -0,0 +1,334 @@
+package uniseg
+
+import "unicode/utf8"
+
+// Graphemes implements an iterator over Unicode grapheme clusters, or
+// user-perceived characters. While iterating, it also provides information
+// about word boundaries, sentence boundaries, line breaks, and monospace
+// character widths.
+//
+// After constructing the class via [NewGraphemes] for a given string "str",
+// [Graphemes.Next] is called for every grapheme cluster in a loop until it
+// returns false. Inside the loop, information about the grapheme cluster as
+// well as boundary information and character width is available via the various
+// methods (see examples below).
+//
+// Using this class to iterate over a string is convenient but it is much slower
+// than using this package's [Step] or [StepString] functions or any of the
+// other specialized functions starting with "First".
+type Graphemes struct {
+ // The original string.
+ original string
+
+ // The remaining string to be parsed.
+ remaining string
+
+ // The current grapheme cluster.
+ cluster string
+
+ // The byte offset of the current grapheme cluster relative to the original
+ // string.
+ offset int
+
+ // The current boundary information of the [Step] parser.
+ boundaries int
+
+ // The current state of the [Step] parser.
+ state int
+}
+
+// NewGraphemes returns a new grapheme cluster iterator.
+func NewGraphemes(str string) *Graphemes {
+ return &Graphemes{
+ original: str,
+ remaining: str,
+ state: -1,
+ }
+}
+
+// Next advances the iterator by one grapheme cluster and returns false if no
+// clusters are left. This function must be called before the first cluster is
+// accessed.
+func (g *Graphemes) Next() bool {
+ if len(g.remaining) == 0 {
+ // We're already past the end.
+ g.state = -2
+ g.cluster = ""
+ return false
+ }
+ g.offset += len(g.cluster)
+ g.cluster, g.remaining, g.boundaries, g.state = StepString(g.remaining, g.state)
+ return true
+}
+
+// Runes returns a slice of runes (code points) which corresponds to the current
+// grapheme cluster. If the iterator is already past the end or [Graphemes.Next]
+// has not yet been called, nil is returned.
+func (g *Graphemes) Runes() []rune {
+ if g.state < 0 {
+ return nil
+ }
+ return []rune(g.cluster)
+}
+
+// Str returns a substring of the original string which corresponds to the
+// current grapheme cluster. If the iterator is already past the end or
+// [Graphemes.Next] has not yet been called, an empty string is returned.
+func (g *Graphemes) Str() string {
+ return g.cluster
+}
+
+// Bytes returns a byte slice which corresponds to the current grapheme cluster.
+// If the iterator is already past the end or [Graphemes.Next] has not yet been
+// called, nil is returned.
+func (g *Graphemes) Bytes() []byte {
+ if g.state < 0 {
+ return nil
+ }
+ return []byte(g.cluster)
+}
+
+// Positions returns the interval of the current grapheme cluster as byte
+// positions into the original string. The first returned value "from" indexes
+// the first byte and the second returned value "to" indexes the first byte that
+// is not included anymore, i.e. str[from:to] is the current grapheme cluster of
+// the original string "str". If [Graphemes.Next] has not yet been called, both
+// values are 0. If the iterator is already past the end, both values are 1.
+func (g *Graphemes) Positions() (int, int) {
+ if g.state == -1 {
+ return 0, 0
+ } else if g.state == -2 {
+ return 1, 1
+ }
+ return g.offset, g.offset + len(g.cluster)
+}
+
+// IsWordBoundary returns true if a word ends after the current grapheme
+// cluster.
+func (g *Graphemes) IsWordBoundary() bool {
+ if g.state < 0 {
+ return true
+ }
+ return g.boundaries&MaskWord != 0
+}
+
+// IsSentenceBoundary returns true if a sentence ends after the current
+// grapheme cluster.
+func (g *Graphemes) IsSentenceBoundary() bool {
+ if g.state < 0 {
+ return true
+ }
+ return g.boundaries&MaskSentence != 0
+}
+
+// LineBreak returns whether the line can be broken after the current grapheme
+// cluster. A value of [LineDontBreak] means the line may not be broken, a value
+// of [LineMustBreak] means the line must be broken, and a value of
+// [LineCanBreak] means the line may or may not be broken.
+func (g *Graphemes) LineBreak() int {
+ if g.state == -1 {
+ return LineDontBreak
+ }
+ if g.state == -2 {
+ return LineMustBreak
+ }
+ return g.boundaries & MaskLine
+}
+
+// Width returns the monospace width of the current grapheme cluster.
+func (g *Graphemes) Width() int {
+ if g.state < 0 {
+ return 0
+ }
+ return g.boundaries >> ShiftWidth
+}
+
+// Reset puts the iterator into its initial state such that the next call to
+// [Graphemes.Next] sets it to the first grapheme cluster again.
+func (g *Graphemes) Reset() {
+ g.state = -1
+ g.offset = 0
+ g.cluster = ""
+ g.remaining = g.original
+}
+
+// GraphemeClusterCount returns the number of user-perceived characters
+// (grapheme clusters) for the given string.
+func GraphemeClusterCount(s string) (n int) {
+ state := -1
+ for len(s) > 0 {
+ _, s, _, state = FirstGraphemeClusterInString(s, state)
+ n++
+ }
+ return
+}
+
+// ReverseString reverses the given string while observing grapheme cluster
+// boundaries.
+func ReverseString(s string) string {
+ str := []byte(s)
+ reversed := make([]byte, len(str))
+ state := -1
+ index := len(str)
+ for len(str) > 0 {
+ var cluster []byte
+ cluster, str, _, state = FirstGraphemeCluster(str, state)
+ index -= len(cluster)
+ copy(reversed[index:], cluster)
+ if index <= len(str)/2 {
+ break
+ }
+ }
+ return string(reversed)
+}
+
+// The number of bits the grapheme property must be shifted to make place for
+// grapheme states.
+const shiftGraphemePropState = 4
+
+// FirstGraphemeCluster returns the first grapheme cluster found in the given
+// byte slice according to the rules of [Unicode Standard Annex #29, Grapheme
+// Cluster Boundaries]. This function can be called continuously to extract all
+// grapheme clusters from a byte slice, as illustrated in the example below.
+//
+// If you don't know the current state, for example when calling the function
+// for the first time, you must pass -1. For consecutive calls, pass the state
+// and rest slice returned by the previous call.
+//
+// The "rest" slice is the sub-slice of the original byte slice "b" starting
+// after the last byte of the identified grapheme cluster. If the length of the
+// "rest" slice is 0, the entire byte slice "b" has been processed. The
+// "cluster" byte slice is the sub-slice of the input slice containing the
+// identified grapheme cluster.
+//
+// The returned width is the width of the grapheme cluster for most monospace
+// fonts where a value of 1 represents one character cell.
+//
+// Given an empty byte slice "b", the function returns nil values.
+//
+// While slightly less convenient than using the Graphemes class, this function
+// has much better performance and makes no allocations. It lends itself well to
+// large byte slices.
+//
+// [Unicode Standard Annex #29, Grapheme Cluster Boundaries]: http://unicode.org/reports/tr29/#Grapheme_Cluster_Boundaries
+func FirstGraphemeCluster(b []byte, state int) (cluster, rest []byte, width, newState int) {
+ // An empty byte slice returns nothing.
+ if len(b) == 0 {
+ return
+ }
+
+ // Extract the first rune.
+ r, length := utf8.DecodeRune(b)
+ if len(b) <= length { // If we're already past the end, there is nothing else to parse.
+ var prop int
+ if state < 0 {
+ prop = property(graphemeCodePoints, r)
+ } else {
+ prop = state >> shiftGraphemePropState
+ }
+ return b, nil, runeWidth(r, prop), grAny | (prop << shiftGraphemePropState)
+ }
+
+ // If we don't know the state, determine it now.
+ var firstProp int
+ if state < 0 {
+ state, firstProp, _ = transitionGraphemeState(state, r)
+ } else {
+ firstProp = state >> shiftGraphemePropState
+ }
+ width += runeWidth(r, firstProp)
+
+ // Transition until we find a boundary.
+ for {
+ var (
+ prop int
+ boundary bool
+ )
+
+ r, l := utf8.DecodeRune(b[length:])
+ state, prop, boundary = transitionGraphemeState(state&maskGraphemeState, r)
+
+ if boundary {
+ return b[:length], b[length:], width, state | (prop << shiftGraphemePropState)
+ }
+
+ if r == vs16 {
+ width = 2
+ } else if firstProp != prExtendedPictographic && firstProp != prRegionalIndicator && firstProp != prL {
+ width += runeWidth(r, prop)
+ } else if firstProp == prExtendedPictographic {
+ if r == vs15 {
+ width = 1
+ } else {
+ width = 2
+ }
+ }
+
+ length += l
+ if len(b) <= length {
+ return b, nil, width, grAny | (prop << shiftGraphemePropState)
+ }
+ }
+}
+
+// FirstGraphemeClusterInString is like [FirstGraphemeCluster] but its input and
+// outputs are strings.
+func FirstGraphemeClusterInString(str string, state int) (cluster, rest string, width, newState int) {
+ // An empty string returns nothing.
+ if len(str) == 0 {
+ return
+ }
+
+ // Extract the first rune.
+ r, length := utf8.DecodeRuneInString(str)
+ if len(str) <= length { // If we're already past the end, there is nothing else to parse.
+ var prop int
+ if state < 0 {
+ prop = property(graphemeCodePoints, r)
+ } else {
+ prop = state >> shiftGraphemePropState
+ }
+ return str, "", runeWidth(r, prop), grAny | (prop << shiftGraphemePropState)
+ }
+
+ // If we don't know the state, determine it now.
+ var firstProp int
+ if state < 0 {
+ state, firstProp, _ = transitionGraphemeState(state, r)
+ } else {
+ firstProp = state >> shiftGraphemePropState
+ }
+ width += runeWidth(r, firstProp)
+
+ // Transition until we find a boundary.
+ for {
+ var (
+ prop int
+ boundary bool
+ )
+
+ r, l := utf8.DecodeRuneInString(str[length:])
+ state, prop, boundary = transitionGraphemeState(state&maskGraphemeState, r)
+
+ if boundary {
+ return str[:length], str[length:], width, state | (prop << shiftGraphemePropState)
+ }
+
+ if r == vs16 {
+ width = 2
+ } else if firstProp != prExtendedPictographic && firstProp != prRegionalIndicator && firstProp != prL {
+ width += runeWidth(r, prop)
+ } else if firstProp == prExtendedPictographic {
+ if r == vs15 {
+ width = 1
+ } else {
+ width = 2
+ }
+ }
+
+ length += l
+ if len(str) <= length {
+ return str, "", width, grAny | (prop << shiftGraphemePropState)
+ }
+ }
+}
diff --git a/vendor/github.com/rivo/uniseg/graphemeproperties.go b/vendor/github.com/rivo/uniseg/graphemeproperties.go
new file mode 100644
index 0000000..a87d140
--- /dev/null
+++ b/vendor/github.com/rivo/uniseg/graphemeproperties.go
@@ -0,0 +1,1891 @@
+package uniseg
+
+// Code generated via go generate from gen_properties.go. DO NOT EDIT.
+
+// graphemeCodePoints are taken from
+// https://www.unicode.org/Public/14.0.0/ucd/auxiliary/GraphemeBreakProperty.txt
+// and
+// https://unicode.org/Public/14.0.0/ucd/emoji/emoji-data.txt
+// ("Extended_Pictographic" only)
+// on September 10, 2022. See https://www.unicode.org/license.html for the Unicode
+// license agreement.
+var graphemeCodePoints = [][3]int{
+ {0x0000, 0x0009, prControl}, // Cc [10] ..
+ {0x000A, 0x000A, prLF}, // Cc
+ {0x000B, 0x000C, prControl}, // Cc [2] ..
+ {0x000D, 0x000D, prCR}, // Cc
+ {0x000E, 0x001F, prControl}, // Cc [18] ..
+ {0x007F, 0x009F, prControl}, // Cc [33] ..
+ {0x00A9, 0x00A9, prExtendedPictographic}, // E0.6 [1] (©️) copyright
+ {0x00AD, 0x00AD, prControl}, // Cf SOFT HYPHEN
+ {0x00AE, 0x00AE, prExtendedPictographic}, // E0.6 [1] (®️) registered
+ {0x0300, 0x036F, prExtend}, // Mn [112] COMBINING GRAVE ACCENT..COMBINING LATIN SMALL LETTER X
+ {0x0483, 0x0487, prExtend}, // Mn [5] COMBINING CYRILLIC TITLO..COMBINING CYRILLIC POKRYTIE
+ {0x0488, 0x0489, prExtend}, // Me [2] COMBINING CYRILLIC HUNDRED THOUSANDS SIGN..COMBINING CYRILLIC MILLIONS SIGN
+ {0x0591, 0x05BD, prExtend}, // Mn [45] HEBREW ACCENT ETNAHTA..HEBREW POINT METEG
+ {0x05BF, 0x05BF, prExtend}, // Mn HEBREW POINT RAFE
+ {0x05C1, 0x05C2, prExtend}, // Mn [2] HEBREW POINT SHIN DOT..HEBREW POINT SIN DOT
+ {0x05C4, 0x05C5, prExtend}, // Mn [2] HEBREW MARK UPPER DOT..HEBREW MARK LOWER DOT
+ {0x05C7, 0x05C7, prExtend}, // Mn HEBREW POINT QAMATS QATAN
+ {0x0600, 0x0605, prPrepend}, // Cf [6] ARABIC NUMBER SIGN..ARABIC NUMBER MARK ABOVE
+ {0x0610, 0x061A, prExtend}, // Mn [11] ARABIC SIGN SALLALLAHOU ALAYHE WASSALLAM..ARABIC SMALL KASRA
+ {0x061C, 0x061C, prControl}, // Cf ARABIC LETTER MARK
+ {0x064B, 0x065F, prExtend}, // Mn [21] ARABIC FATHATAN..ARABIC WAVY HAMZA BELOW
+ {0x0670, 0x0670, prExtend}, // Mn ARABIC LETTER SUPERSCRIPT ALEF
+ {0x06D6, 0x06DC, prExtend}, // Mn [7] ARABIC SMALL HIGH LIGATURE SAD WITH LAM WITH ALEF MAKSURA..ARABIC SMALL HIGH SEEN
+ {0x06DD, 0x06DD, prPrepend}, // Cf ARABIC END OF AYAH
+ {0x06DF, 0x06E4, prExtend}, // Mn [6] ARABIC SMALL HIGH ROUNDED ZERO..ARABIC SMALL HIGH MADDA
+ {0x06E7, 0x06E8, prExtend}, // Mn [2] ARABIC SMALL HIGH YEH..ARABIC SMALL HIGH NOON
+ {0x06EA, 0x06ED, prExtend}, // Mn [4] ARABIC EMPTY CENTRE LOW STOP..ARABIC SMALL LOW MEEM
+ {0x070F, 0x070F, prPrepend}, // Cf SYRIAC ABBREVIATION MARK
+ {0x0711, 0x0711, prExtend}, // Mn SYRIAC LETTER SUPERSCRIPT ALAPH
+ {0x0730, 0x074A, prExtend}, // Mn [27] SYRIAC PTHAHA ABOVE..SYRIAC BARREKH
+ {0x07A6, 0x07B0, prExtend}, // Mn [11] THAANA ABAFILI..THAANA SUKUN
+ {0x07EB, 0x07F3, prExtend}, // Mn [9] NKO COMBINING SHORT HIGH TONE..NKO COMBINING DOUBLE DOT ABOVE
+ {0x07FD, 0x07FD, prExtend}, // Mn NKO DANTAYALAN
+ {0x0816, 0x0819, prExtend}, // Mn [4] SAMARITAN MARK IN..SAMARITAN MARK DAGESH
+ {0x081B, 0x0823, prExtend}, // Mn [9] SAMARITAN MARK EPENTHETIC YUT..SAMARITAN VOWEL SIGN A
+ {0x0825, 0x0827, prExtend}, // Mn [3] SAMARITAN VOWEL SIGN SHORT A..SAMARITAN VOWEL SIGN U
+ {0x0829, 0x082D, prExtend}, // Mn [5] SAMARITAN VOWEL SIGN LONG I..SAMARITAN MARK NEQUDAA
+ {0x0859, 0x085B, prExtend}, // Mn [3] MANDAIC AFFRICATION MARK..MANDAIC GEMINATION MARK
+ {0x0890, 0x0891, prPrepend}, // Cf [2] ARABIC POUND MARK ABOVE..ARABIC PIASTRE MARK ABOVE
+ {0x0898, 0x089F, prExtend}, // Mn [8] ARABIC SMALL HIGH WORD AL-JUZ..ARABIC HALF MADDA OVER MADDA
+ {0x08CA, 0x08E1, prExtend}, // Mn [24] ARABIC SMALL HIGH FARSI YEH..ARABIC SMALL HIGH SIGN SAFHA
+ {0x08E2, 0x08E2, prPrepend}, // Cf ARABIC DISPUTED END OF AYAH
+ {0x08E3, 0x0902, prExtend}, // Mn [32] ARABIC TURNED DAMMA BELOW..DEVANAGARI SIGN ANUSVARA
+ {0x0903, 0x0903, prSpacingMark}, // Mc DEVANAGARI SIGN VISARGA
+ {0x093A, 0x093A, prExtend}, // Mn DEVANAGARI VOWEL SIGN OE
+ {0x093B, 0x093B, prSpacingMark}, // Mc DEVANAGARI VOWEL SIGN OOE
+ {0x093C, 0x093C, prExtend}, // Mn DEVANAGARI SIGN NUKTA
+ {0x093E, 0x0940, prSpacingMark}, // Mc [3] DEVANAGARI VOWEL SIGN AA..DEVANAGARI VOWEL SIGN II
+ {0x0941, 0x0948, prExtend}, // Mn [8] DEVANAGARI VOWEL SIGN U..DEVANAGARI VOWEL SIGN AI
+ {0x0949, 0x094C, prSpacingMark}, // Mc [4] DEVANAGARI VOWEL SIGN CANDRA O..DEVANAGARI VOWEL SIGN AU
+ {0x094D, 0x094D, prExtend}, // Mn DEVANAGARI SIGN VIRAMA
+ {0x094E, 0x094F, prSpacingMark}, // Mc [2] DEVANAGARI VOWEL SIGN PRISHTHAMATRA E..DEVANAGARI VOWEL SIGN AW
+ {0x0951, 0x0957, prExtend}, // Mn [7] DEVANAGARI STRESS SIGN UDATTA..DEVANAGARI VOWEL SIGN UUE
+ {0x0962, 0x0963, prExtend}, // Mn [2] DEVANAGARI VOWEL SIGN VOCALIC L..DEVANAGARI VOWEL SIGN VOCALIC LL
+ {0x0981, 0x0981, prExtend}, // Mn BENGALI SIGN CANDRABINDU
+ {0x0982, 0x0983, prSpacingMark}, // Mc [2] BENGALI SIGN ANUSVARA..BENGALI SIGN VISARGA
+ {0x09BC, 0x09BC, prExtend}, // Mn BENGALI SIGN NUKTA
+ {0x09BE, 0x09BE, prExtend}, // Mc BENGALI VOWEL SIGN AA
+ {0x09BF, 0x09C0, prSpacingMark}, // Mc [2] BENGALI VOWEL SIGN I..BENGALI VOWEL SIGN II
+ {0x09C1, 0x09C4, prExtend}, // Mn [4] BENGALI VOWEL SIGN U..BENGALI VOWEL SIGN VOCALIC RR
+ {0x09C7, 0x09C8, prSpacingMark}, // Mc [2] BENGALI VOWEL SIGN E..BENGALI VOWEL SIGN AI
+ {0x09CB, 0x09CC, prSpacingMark}, // Mc [2] BENGALI VOWEL SIGN O..BENGALI VOWEL SIGN AU
+ {0x09CD, 0x09CD, prExtend}, // Mn BENGALI SIGN VIRAMA
+ {0x09D7, 0x09D7, prExtend}, // Mc BENGALI AU LENGTH MARK
+ {0x09E2, 0x09E3, prExtend}, // Mn [2] BENGALI VOWEL SIGN VOCALIC L..BENGALI VOWEL SIGN VOCALIC LL
+ {0x09FE, 0x09FE, prExtend}, // Mn BENGALI SANDHI MARK
+ {0x0A01, 0x0A02, prExtend}, // Mn [2] GURMUKHI SIGN ADAK BINDI..GURMUKHI SIGN BINDI
+ {0x0A03, 0x0A03, prSpacingMark}, // Mc GURMUKHI SIGN VISARGA
+ {0x0A3C, 0x0A3C, prExtend}, // Mn GURMUKHI SIGN NUKTA
+ {0x0A3E, 0x0A40, prSpacingMark}, // Mc [3] GURMUKHI VOWEL SIGN AA..GURMUKHI VOWEL SIGN II
+ {0x0A41, 0x0A42, prExtend}, // Mn [2] GURMUKHI VOWEL SIGN U..GURMUKHI VOWEL SIGN UU
+ {0x0A47, 0x0A48, prExtend}, // Mn [2] GURMUKHI VOWEL SIGN EE..GURMUKHI VOWEL SIGN AI
+ {0x0A4B, 0x0A4D, prExtend}, // Mn [3] GURMUKHI VOWEL SIGN OO..GURMUKHI SIGN VIRAMA
+ {0x0A51, 0x0A51, prExtend}, // Mn GURMUKHI SIGN UDAAT
+ {0x0A70, 0x0A71, prExtend}, // Mn [2] GURMUKHI TIPPI..GURMUKHI ADDAK
+ {0x0A75, 0x0A75, prExtend}, // Mn GURMUKHI SIGN YAKASH
+ {0x0A81, 0x0A82, prExtend}, // Mn [2] GUJARATI SIGN CANDRABINDU..GUJARATI SIGN ANUSVARA
+ {0x0A83, 0x0A83, prSpacingMark}, // Mc GUJARATI SIGN VISARGA
+ {0x0ABC, 0x0ABC, prExtend}, // Mn GUJARATI SIGN NUKTA
+ {0x0ABE, 0x0AC0, prSpacingMark}, // Mc [3] GUJARATI VOWEL SIGN AA..GUJARATI VOWEL SIGN II
+ {0x0AC1, 0x0AC5, prExtend}, // Mn [5] GUJARATI VOWEL SIGN U..GUJARATI VOWEL SIGN CANDRA E
+ {0x0AC7, 0x0AC8, prExtend}, // Mn [2] GUJARATI VOWEL SIGN E..GUJARATI VOWEL SIGN AI
+ {0x0AC9, 0x0AC9, prSpacingMark}, // Mc GUJARATI VOWEL SIGN CANDRA O
+ {0x0ACB, 0x0ACC, prSpacingMark}, // Mc [2] GUJARATI VOWEL SIGN O..GUJARATI VOWEL SIGN AU
+ {0x0ACD, 0x0ACD, prExtend}, // Mn GUJARATI SIGN VIRAMA
+ {0x0AE2, 0x0AE3, prExtend}, // Mn [2] GUJARATI VOWEL SIGN VOCALIC L..GUJARATI VOWEL SIGN VOCALIC LL
+ {0x0AFA, 0x0AFF, prExtend}, // Mn [6] GUJARATI SIGN SUKUN..GUJARATI SIGN TWO-CIRCLE NUKTA ABOVE
+ {0x0B01, 0x0B01, prExtend}, // Mn ORIYA SIGN CANDRABINDU
+ {0x0B02, 0x0B03, prSpacingMark}, // Mc [2] ORIYA SIGN ANUSVARA..ORIYA SIGN VISARGA
+ {0x0B3C, 0x0B3C, prExtend}, // Mn ORIYA SIGN NUKTA
+ {0x0B3E, 0x0B3E, prExtend}, // Mc ORIYA VOWEL SIGN AA
+ {0x0B3F, 0x0B3F, prExtend}, // Mn ORIYA VOWEL SIGN I
+ {0x0B40, 0x0B40, prSpacingMark}, // Mc ORIYA VOWEL SIGN II
+ {0x0B41, 0x0B44, prExtend}, // Mn [4] ORIYA VOWEL SIGN U..ORIYA VOWEL SIGN VOCALIC RR
+ {0x0B47, 0x0B48, prSpacingMark}, // Mc [2] ORIYA VOWEL SIGN E..ORIYA VOWEL SIGN AI
+ {0x0B4B, 0x0B4C, prSpacingMark}, // Mc [2] ORIYA VOWEL SIGN O..ORIYA VOWEL SIGN AU
+ {0x0B4D, 0x0B4D, prExtend}, // Mn ORIYA SIGN VIRAMA
+ {0x0B55, 0x0B56, prExtend}, // Mn [2] ORIYA SIGN OVERLINE..ORIYA AI LENGTH MARK
+ {0x0B57, 0x0B57, prExtend}, // Mc ORIYA AU LENGTH MARK
+ {0x0B62, 0x0B63, prExtend}, // Mn [2] ORIYA VOWEL SIGN VOCALIC L..ORIYA VOWEL SIGN VOCALIC LL
+ {0x0B82, 0x0B82, prExtend}, // Mn TAMIL SIGN ANUSVARA
+ {0x0BBE, 0x0BBE, prExtend}, // Mc TAMIL VOWEL SIGN AA
+ {0x0BBF, 0x0BBF, prSpacingMark}, // Mc TAMIL VOWEL SIGN I
+ {0x0BC0, 0x0BC0, prExtend}, // Mn TAMIL VOWEL SIGN II
+ {0x0BC1, 0x0BC2, prSpacingMark}, // Mc [2] TAMIL VOWEL SIGN U..TAMIL VOWEL SIGN UU
+ {0x0BC6, 0x0BC8, prSpacingMark}, // Mc [3] TAMIL VOWEL SIGN E..TAMIL VOWEL SIGN AI
+ {0x0BCA, 0x0BCC, prSpacingMark}, // Mc [3] TAMIL VOWEL SIGN O..TAMIL VOWEL SIGN AU
+ {0x0BCD, 0x0BCD, prExtend}, // Mn TAMIL SIGN VIRAMA
+ {0x0BD7, 0x0BD7, prExtend}, // Mc TAMIL AU LENGTH MARK
+ {0x0C00, 0x0C00, prExtend}, // Mn TELUGU SIGN COMBINING CANDRABINDU ABOVE
+ {0x0C01, 0x0C03, prSpacingMark}, // Mc [3] TELUGU SIGN CANDRABINDU..TELUGU SIGN VISARGA
+ {0x0C04, 0x0C04, prExtend}, // Mn TELUGU SIGN COMBINING ANUSVARA ABOVE
+ {0x0C3C, 0x0C3C, prExtend}, // Mn TELUGU SIGN NUKTA
+ {0x0C3E, 0x0C40, prExtend}, // Mn [3] TELUGU VOWEL SIGN AA..TELUGU VOWEL SIGN II
+ {0x0C41, 0x0C44, prSpacingMark}, // Mc [4] TELUGU VOWEL SIGN U..TELUGU VOWEL SIGN VOCALIC RR
+ {0x0C46, 0x0C48, prExtend}, // Mn [3] TELUGU VOWEL SIGN E..TELUGU VOWEL SIGN AI
+ {0x0C4A, 0x0C4D, prExtend}, // Mn [4] TELUGU VOWEL SIGN O..TELUGU SIGN VIRAMA
+ {0x0C55, 0x0C56, prExtend}, // Mn [2] TELUGU LENGTH MARK..TELUGU AI LENGTH MARK
+ {0x0C62, 0x0C63, prExtend}, // Mn [2] TELUGU VOWEL SIGN VOCALIC L..TELUGU VOWEL SIGN VOCALIC LL
+ {0x0C81, 0x0C81, prExtend}, // Mn KANNADA SIGN CANDRABINDU
+ {0x0C82, 0x0C83, prSpacingMark}, // Mc [2] KANNADA SIGN ANUSVARA..KANNADA SIGN VISARGA
+ {0x0CBC, 0x0CBC, prExtend}, // Mn KANNADA SIGN NUKTA
+ {0x0CBE, 0x0CBE, prSpacingMark}, // Mc KANNADA VOWEL SIGN AA
+ {0x0CBF, 0x0CBF, prExtend}, // Mn KANNADA VOWEL SIGN I
+ {0x0CC0, 0x0CC1, prSpacingMark}, // Mc [2] KANNADA VOWEL SIGN II..KANNADA VOWEL SIGN U
+ {0x0CC2, 0x0CC2, prExtend}, // Mc KANNADA VOWEL SIGN UU
+ {0x0CC3, 0x0CC4, prSpacingMark}, // Mc [2] KANNADA VOWEL SIGN VOCALIC R..KANNADA VOWEL SIGN VOCALIC RR
+ {0x0CC6, 0x0CC6, prExtend}, // Mn KANNADA VOWEL SIGN E
+ {0x0CC7, 0x0CC8, prSpacingMark}, // Mc [2] KANNADA VOWEL SIGN EE..KANNADA VOWEL SIGN AI
+ {0x0CCA, 0x0CCB, prSpacingMark}, // Mc [2] KANNADA VOWEL SIGN O..KANNADA VOWEL SIGN OO
+ {0x0CCC, 0x0CCD, prExtend}, // Mn [2] KANNADA VOWEL SIGN AU..KANNADA SIGN VIRAMA
+ {0x0CD5, 0x0CD6, prExtend}, // Mc [2] KANNADA LENGTH MARK..KANNADA AI LENGTH MARK
+ {0x0CE2, 0x0CE3, prExtend}, // Mn [2] KANNADA VOWEL SIGN VOCALIC L..KANNADA VOWEL SIGN VOCALIC LL
+ {0x0D00, 0x0D01, prExtend}, // Mn [2] MALAYALAM SIGN COMBINING ANUSVARA ABOVE..MALAYALAM SIGN CANDRABINDU
+ {0x0D02, 0x0D03, prSpacingMark}, // Mc [2] MALAYALAM SIGN ANUSVARA..MALAYALAM SIGN VISARGA
+ {0x0D3B, 0x0D3C, prExtend}, // Mn [2] MALAYALAM SIGN VERTICAL BAR VIRAMA..MALAYALAM SIGN CIRCULAR VIRAMA
+ {0x0D3E, 0x0D3E, prExtend}, // Mc MALAYALAM VOWEL SIGN AA
+ {0x0D3F, 0x0D40, prSpacingMark}, // Mc [2] MALAYALAM VOWEL SIGN I..MALAYALAM VOWEL SIGN II
+ {0x0D41, 0x0D44, prExtend}, // Mn [4] MALAYALAM VOWEL SIGN U..MALAYALAM VOWEL SIGN VOCALIC RR
+ {0x0D46, 0x0D48, prSpacingMark}, // Mc [3] MALAYALAM VOWEL SIGN E..MALAYALAM VOWEL SIGN AI
+ {0x0D4A, 0x0D4C, prSpacingMark}, // Mc [3] MALAYALAM VOWEL SIGN O..MALAYALAM VOWEL SIGN AU
+ {0x0D4D, 0x0D4D, prExtend}, // Mn MALAYALAM SIGN VIRAMA
+ {0x0D4E, 0x0D4E, prPrepend}, // Lo MALAYALAM LETTER DOT REPH
+ {0x0D57, 0x0D57, prExtend}, // Mc MALAYALAM AU LENGTH MARK
+ {0x0D62, 0x0D63, prExtend}, // Mn [2] MALAYALAM VOWEL SIGN VOCALIC L..MALAYALAM VOWEL SIGN VOCALIC LL
+ {0x0D81, 0x0D81, prExtend}, // Mn SINHALA SIGN CANDRABINDU
+ {0x0D82, 0x0D83, prSpacingMark}, // Mc [2] SINHALA SIGN ANUSVARAYA..SINHALA SIGN VISARGAYA
+ {0x0DCA, 0x0DCA, prExtend}, // Mn SINHALA SIGN AL-LAKUNA
+ {0x0DCF, 0x0DCF, prExtend}, // Mc SINHALA VOWEL SIGN AELA-PILLA
+ {0x0DD0, 0x0DD1, prSpacingMark}, // Mc [2] SINHALA VOWEL SIGN KETTI AEDA-PILLA..SINHALA VOWEL SIGN DIGA AEDA-PILLA
+ {0x0DD2, 0x0DD4, prExtend}, // Mn [3] SINHALA VOWEL SIGN KETTI IS-PILLA..SINHALA VOWEL SIGN KETTI PAA-PILLA
+ {0x0DD6, 0x0DD6, prExtend}, // Mn SINHALA VOWEL SIGN DIGA PAA-PILLA
+ {0x0DD8, 0x0DDE, prSpacingMark}, // Mc [7] SINHALA VOWEL SIGN GAETTA-PILLA..SINHALA VOWEL SIGN KOMBUVA HAA GAYANUKITTA
+ {0x0DDF, 0x0DDF, prExtend}, // Mc SINHALA VOWEL SIGN GAYANUKITTA
+ {0x0DF2, 0x0DF3, prSpacingMark}, // Mc [2] SINHALA VOWEL SIGN DIGA GAETTA-PILLA..SINHALA VOWEL SIGN DIGA GAYANUKITTA
+ {0x0E31, 0x0E31, prExtend}, // Mn THAI CHARACTER MAI HAN-AKAT
+ {0x0E33, 0x0E33, prSpacingMark}, // Lo THAI CHARACTER SARA AM
+ {0x0E34, 0x0E3A, prExtend}, // Mn [7] THAI CHARACTER SARA I..THAI CHARACTER PHINTHU
+ {0x0E47, 0x0E4E, prExtend}, // Mn [8] THAI CHARACTER MAITAIKHU..THAI CHARACTER YAMAKKAN
+ {0x0EB1, 0x0EB1, prExtend}, // Mn LAO VOWEL SIGN MAI KAN
+ {0x0EB3, 0x0EB3, prSpacingMark}, // Lo LAO VOWEL SIGN AM
+ {0x0EB4, 0x0EBC, prExtend}, // Mn [9] LAO VOWEL SIGN I..LAO SEMIVOWEL SIGN LO
+ {0x0EC8, 0x0ECD, prExtend}, // Mn [6] LAO TONE MAI EK..LAO NIGGAHITA
+ {0x0F18, 0x0F19, prExtend}, // Mn [2] TIBETAN ASTROLOGICAL SIGN -KHYUD PA..TIBETAN ASTROLOGICAL SIGN SDONG TSHUGS
+ {0x0F35, 0x0F35, prExtend}, // Mn TIBETAN MARK NGAS BZUNG NYI ZLA
+ {0x0F37, 0x0F37, prExtend}, // Mn TIBETAN MARK NGAS BZUNG SGOR RTAGS
+ {0x0F39, 0x0F39, prExtend}, // Mn TIBETAN MARK TSA -PHRU
+ {0x0F3E, 0x0F3F, prSpacingMark}, // Mc [2] TIBETAN SIGN YAR TSHES..TIBETAN SIGN MAR TSHES
+ {0x0F71, 0x0F7E, prExtend}, // Mn [14] TIBETAN VOWEL SIGN AA..TIBETAN SIGN RJES SU NGA RO
+ {0x0F7F, 0x0F7F, prSpacingMark}, // Mc TIBETAN SIGN RNAM BCAD
+ {0x0F80, 0x0F84, prExtend}, // Mn [5] TIBETAN VOWEL SIGN REVERSED I..TIBETAN MARK HALANTA
+ {0x0F86, 0x0F87, prExtend}, // Mn [2] TIBETAN SIGN LCI RTAGS..TIBETAN SIGN YANG RTAGS
+ {0x0F8D, 0x0F97, prExtend}, // Mn [11] TIBETAN SUBJOINED SIGN LCE TSA CAN..TIBETAN SUBJOINED LETTER JA
+ {0x0F99, 0x0FBC, prExtend}, // Mn [36] TIBETAN SUBJOINED LETTER NYA..TIBETAN SUBJOINED LETTER FIXED-FORM RA
+ {0x0FC6, 0x0FC6, prExtend}, // Mn TIBETAN SYMBOL PADMA GDAN
+ {0x102D, 0x1030, prExtend}, // Mn [4] MYANMAR VOWEL SIGN I..MYANMAR VOWEL SIGN UU
+ {0x1031, 0x1031, prSpacingMark}, // Mc MYANMAR VOWEL SIGN E
+ {0x1032, 0x1037, prExtend}, // Mn [6] MYANMAR VOWEL SIGN AI..MYANMAR SIGN DOT BELOW
+ {0x1039, 0x103A, prExtend}, // Mn [2] MYANMAR SIGN VIRAMA..MYANMAR SIGN ASAT
+ {0x103B, 0x103C, prSpacingMark}, // Mc [2] MYANMAR CONSONANT SIGN MEDIAL YA..MYANMAR CONSONANT SIGN MEDIAL RA
+ {0x103D, 0x103E, prExtend}, // Mn [2] MYANMAR CONSONANT SIGN MEDIAL WA..MYANMAR CONSONANT SIGN MEDIAL HA
+ {0x1056, 0x1057, prSpacingMark}, // Mc [2] MYANMAR VOWEL SIGN VOCALIC R..MYANMAR VOWEL SIGN VOCALIC RR
+ {0x1058, 0x1059, prExtend}, // Mn [2] MYANMAR VOWEL SIGN VOCALIC L..MYANMAR VOWEL SIGN VOCALIC LL
+ {0x105E, 0x1060, prExtend}, // Mn [3] MYANMAR CONSONANT SIGN MON MEDIAL NA..MYANMAR CONSONANT SIGN MON MEDIAL LA
+ {0x1071, 0x1074, prExtend}, // Mn [4] MYANMAR VOWEL SIGN GEBA KAREN I..MYANMAR VOWEL SIGN KAYAH EE
+ {0x1082, 0x1082, prExtend}, // Mn MYANMAR CONSONANT SIGN SHAN MEDIAL WA
+ {0x1084, 0x1084, prSpacingMark}, // Mc MYANMAR VOWEL SIGN SHAN E
+ {0x1085, 0x1086, prExtend}, // Mn [2] MYANMAR VOWEL SIGN SHAN E ABOVE..MYANMAR VOWEL SIGN SHAN FINAL Y
+ {0x108D, 0x108D, prExtend}, // Mn MYANMAR SIGN SHAN COUNCIL EMPHATIC TONE
+ {0x109D, 0x109D, prExtend}, // Mn MYANMAR VOWEL SIGN AITON AI
+ {0x1100, 0x115F, prL}, // Lo [96] HANGUL CHOSEONG KIYEOK..HANGUL CHOSEONG FILLER
+ {0x1160, 0x11A7, prV}, // Lo [72] HANGUL JUNGSEONG FILLER..HANGUL JUNGSEONG O-YAE
+ {0x11A8, 0x11FF, prT}, // Lo [88] HANGUL JONGSEONG KIYEOK..HANGUL JONGSEONG SSANGNIEUN
+ {0x135D, 0x135F, prExtend}, // Mn [3] ETHIOPIC COMBINING GEMINATION AND VOWEL LENGTH MARK..ETHIOPIC COMBINING GEMINATION MARK
+ {0x1712, 0x1714, prExtend}, // Mn [3] TAGALOG VOWEL SIGN I..TAGALOG SIGN VIRAMA
+ {0x1715, 0x1715, prSpacingMark}, // Mc TAGALOG SIGN PAMUDPOD
+ {0x1732, 0x1733, prExtend}, // Mn [2] HANUNOO VOWEL SIGN I..HANUNOO VOWEL SIGN U
+ {0x1734, 0x1734, prSpacingMark}, // Mc HANUNOO SIGN PAMUDPOD
+ {0x1752, 0x1753, prExtend}, // Mn [2] BUHID VOWEL SIGN I..BUHID VOWEL SIGN U
+ {0x1772, 0x1773, prExtend}, // Mn [2] TAGBANWA VOWEL SIGN I..TAGBANWA VOWEL SIGN U
+ {0x17B4, 0x17B5, prExtend}, // Mn [2] KHMER VOWEL INHERENT AQ..KHMER VOWEL INHERENT AA
+ {0x17B6, 0x17B6, prSpacingMark}, // Mc KHMER VOWEL SIGN AA
+ {0x17B7, 0x17BD, prExtend}, // Mn [7] KHMER VOWEL SIGN I..KHMER VOWEL SIGN UA
+ {0x17BE, 0x17C5, prSpacingMark}, // Mc [8] KHMER VOWEL SIGN OE..KHMER VOWEL SIGN AU
+ {0x17C6, 0x17C6, prExtend}, // Mn KHMER SIGN NIKAHIT
+ {0x17C7, 0x17C8, prSpacingMark}, // Mc [2] KHMER SIGN REAHMUK..KHMER SIGN YUUKALEAPINTU
+ {0x17C9, 0x17D3, prExtend}, // Mn [11] KHMER SIGN MUUSIKATOAN..KHMER SIGN BATHAMASAT
+ {0x17DD, 0x17DD, prExtend}, // Mn KHMER SIGN ATTHACAN
+ {0x180B, 0x180D, prExtend}, // Mn [3] MONGOLIAN FREE VARIATION SELECTOR ONE..MONGOLIAN FREE VARIATION SELECTOR THREE
+ {0x180E, 0x180E, prControl}, // Cf MONGOLIAN VOWEL SEPARATOR
+ {0x180F, 0x180F, prExtend}, // Mn MONGOLIAN FREE VARIATION SELECTOR FOUR
+ {0x1885, 0x1886, prExtend}, // Mn [2] MONGOLIAN LETTER ALI GALI BALUDA..MONGOLIAN LETTER ALI GALI THREE BALUDA
+ {0x18A9, 0x18A9, prExtend}, // Mn MONGOLIAN LETTER ALI GALI DAGALGA
+ {0x1920, 0x1922, prExtend}, // Mn [3] LIMBU VOWEL SIGN A..LIMBU VOWEL SIGN U
+ {0x1923, 0x1926, prSpacingMark}, // Mc [4] LIMBU VOWEL SIGN EE..LIMBU VOWEL SIGN AU
+ {0x1927, 0x1928, prExtend}, // Mn [2] LIMBU VOWEL SIGN E..LIMBU VOWEL SIGN O
+ {0x1929, 0x192B, prSpacingMark}, // Mc [3] LIMBU SUBJOINED LETTER YA..LIMBU SUBJOINED LETTER WA
+ {0x1930, 0x1931, prSpacingMark}, // Mc [2] LIMBU SMALL LETTER KA..LIMBU SMALL LETTER NGA
+ {0x1932, 0x1932, prExtend}, // Mn LIMBU SMALL LETTER ANUSVARA
+ {0x1933, 0x1938, prSpacingMark}, // Mc [6] LIMBU SMALL LETTER TA..LIMBU SMALL LETTER LA
+ {0x1939, 0x193B, prExtend}, // Mn [3] LIMBU SIGN MUKPHRENG..LIMBU SIGN SA-I
+ {0x1A17, 0x1A18, prExtend}, // Mn [2] BUGINESE VOWEL SIGN I..BUGINESE VOWEL SIGN U
+ {0x1A19, 0x1A1A, prSpacingMark}, // Mc [2] BUGINESE VOWEL SIGN E..BUGINESE VOWEL SIGN O
+ {0x1A1B, 0x1A1B, prExtend}, // Mn BUGINESE VOWEL SIGN AE
+ {0x1A55, 0x1A55, prSpacingMark}, // Mc TAI THAM CONSONANT SIGN MEDIAL RA
+ {0x1A56, 0x1A56, prExtend}, // Mn TAI THAM CONSONANT SIGN MEDIAL LA
+ {0x1A57, 0x1A57, prSpacingMark}, // Mc TAI THAM CONSONANT SIGN LA TANG LAI
+ {0x1A58, 0x1A5E, prExtend}, // Mn [7] TAI THAM SIGN MAI KANG LAI..TAI THAM CONSONANT SIGN SA
+ {0x1A60, 0x1A60, prExtend}, // Mn TAI THAM SIGN SAKOT
+ {0x1A62, 0x1A62, prExtend}, // Mn TAI THAM VOWEL SIGN MAI SAT
+ {0x1A65, 0x1A6C, prExtend}, // Mn [8] TAI THAM VOWEL SIGN I..TAI THAM VOWEL SIGN OA BELOW
+ {0x1A6D, 0x1A72, prSpacingMark}, // Mc [6] TAI THAM VOWEL SIGN OY..TAI THAM VOWEL SIGN THAM AI
+ {0x1A73, 0x1A7C, prExtend}, // Mn [10] TAI THAM VOWEL SIGN OA ABOVE..TAI THAM SIGN KHUEN-LUE KARAN
+ {0x1A7F, 0x1A7F, prExtend}, // Mn TAI THAM COMBINING CRYPTOGRAMMIC DOT
+ {0x1AB0, 0x1ABD, prExtend}, // Mn [14] COMBINING DOUBLED CIRCUMFLEX ACCENT..COMBINING PARENTHESES BELOW
+ {0x1ABE, 0x1ABE, prExtend}, // Me COMBINING PARENTHESES OVERLAY
+ {0x1ABF, 0x1ACE, prExtend}, // Mn [16] COMBINING LATIN SMALL LETTER W BELOW..COMBINING LATIN SMALL LETTER INSULAR T
+ {0x1B00, 0x1B03, prExtend}, // Mn [4] BALINESE SIGN ULU RICEM..BALINESE SIGN SURANG
+ {0x1B04, 0x1B04, prSpacingMark}, // Mc BALINESE SIGN BISAH
+ {0x1B34, 0x1B34, prExtend}, // Mn BALINESE SIGN REREKAN
+ {0x1B35, 0x1B35, prExtend}, // Mc BALINESE VOWEL SIGN TEDUNG
+ {0x1B36, 0x1B3A, prExtend}, // Mn [5] BALINESE VOWEL SIGN ULU..BALINESE VOWEL SIGN RA REPA
+ {0x1B3B, 0x1B3B, prSpacingMark}, // Mc BALINESE VOWEL SIGN RA REPA TEDUNG
+ {0x1B3C, 0x1B3C, prExtend}, // Mn BALINESE VOWEL SIGN LA LENGA
+ {0x1B3D, 0x1B41, prSpacingMark}, // Mc [5] BALINESE VOWEL SIGN LA LENGA TEDUNG..BALINESE VOWEL SIGN TALING REPA TEDUNG
+ {0x1B42, 0x1B42, prExtend}, // Mn BALINESE VOWEL SIGN PEPET
+ {0x1B43, 0x1B44, prSpacingMark}, // Mc [2] BALINESE VOWEL SIGN PEPET TEDUNG..BALINESE ADEG ADEG
+ {0x1B6B, 0x1B73, prExtend}, // Mn [9] BALINESE MUSICAL SYMBOL COMBINING TEGEH..BALINESE MUSICAL SYMBOL COMBINING GONG
+ {0x1B80, 0x1B81, prExtend}, // Mn [2] SUNDANESE SIGN PANYECEK..SUNDANESE SIGN PANGLAYAR
+ {0x1B82, 0x1B82, prSpacingMark}, // Mc SUNDANESE SIGN PANGWISAD
+ {0x1BA1, 0x1BA1, prSpacingMark}, // Mc SUNDANESE CONSONANT SIGN PAMINGKAL
+ {0x1BA2, 0x1BA5, prExtend}, // Mn [4] SUNDANESE CONSONANT SIGN PANYAKRA..SUNDANESE VOWEL SIGN PANYUKU
+ {0x1BA6, 0x1BA7, prSpacingMark}, // Mc [2] SUNDANESE VOWEL SIGN PANAELAENG..SUNDANESE VOWEL SIGN PANOLONG
+ {0x1BA8, 0x1BA9, prExtend}, // Mn [2] SUNDANESE VOWEL SIGN PAMEPET..SUNDANESE VOWEL SIGN PANEULEUNG
+ {0x1BAA, 0x1BAA, prSpacingMark}, // Mc SUNDANESE SIGN PAMAAEH
+ {0x1BAB, 0x1BAD, prExtend}, // Mn [3] SUNDANESE SIGN VIRAMA..SUNDANESE CONSONANT SIGN PASANGAN WA
+ {0x1BE6, 0x1BE6, prExtend}, // Mn BATAK SIGN TOMPI
+ {0x1BE7, 0x1BE7, prSpacingMark}, // Mc BATAK VOWEL SIGN E
+ {0x1BE8, 0x1BE9, prExtend}, // Mn [2] BATAK VOWEL SIGN PAKPAK E..BATAK VOWEL SIGN EE
+ {0x1BEA, 0x1BEC, prSpacingMark}, // Mc [3] BATAK VOWEL SIGN I..BATAK VOWEL SIGN O
+ {0x1BED, 0x1BED, prExtend}, // Mn BATAK VOWEL SIGN KARO O
+ {0x1BEE, 0x1BEE, prSpacingMark}, // Mc BATAK VOWEL SIGN U
+ {0x1BEF, 0x1BF1, prExtend}, // Mn [3] BATAK VOWEL SIGN U FOR SIMALUNGUN SA..BATAK CONSONANT SIGN H
+ {0x1BF2, 0x1BF3, prSpacingMark}, // Mc [2] BATAK PANGOLAT..BATAK PANONGONAN
+ {0x1C24, 0x1C2B, prSpacingMark}, // Mc [8] LEPCHA SUBJOINED LETTER YA..LEPCHA VOWEL SIGN UU
+ {0x1C2C, 0x1C33, prExtend}, // Mn [8] LEPCHA VOWEL SIGN E..LEPCHA CONSONANT SIGN T
+ {0x1C34, 0x1C35, prSpacingMark}, // Mc [2] LEPCHA CONSONANT SIGN NYIN-DO..LEPCHA CONSONANT SIGN KANG
+ {0x1C36, 0x1C37, prExtend}, // Mn [2] LEPCHA SIGN RAN..LEPCHA SIGN NUKTA
+ {0x1CD0, 0x1CD2, prExtend}, // Mn [3] VEDIC TONE KARSHANA..VEDIC TONE PRENKHA
+ {0x1CD4, 0x1CE0, prExtend}, // Mn [13] VEDIC SIGN YAJURVEDIC MIDLINE SVARITA..VEDIC TONE RIGVEDIC KASHMIRI INDEPENDENT SVARITA
+ {0x1CE1, 0x1CE1, prSpacingMark}, // Mc VEDIC TONE ATHARVAVEDIC INDEPENDENT SVARITA
+ {0x1CE2, 0x1CE8, prExtend}, // Mn [7] VEDIC SIGN VISARGA SVARITA..VEDIC SIGN VISARGA ANUDATTA WITH TAIL
+ {0x1CED, 0x1CED, prExtend}, // Mn VEDIC SIGN TIRYAK
+ {0x1CF4, 0x1CF4, prExtend}, // Mn VEDIC TONE CANDRA ABOVE
+ {0x1CF7, 0x1CF7, prSpacingMark}, // Mc VEDIC SIGN ATIKRAMA
+ {0x1CF8, 0x1CF9, prExtend}, // Mn [2] VEDIC TONE RING ABOVE..VEDIC TONE DOUBLE RING ABOVE
+ {0x1DC0, 0x1DFF, prExtend}, // Mn [64] COMBINING DOTTED GRAVE ACCENT..COMBINING RIGHT ARROWHEAD AND DOWN ARROWHEAD BELOW
+ {0x200B, 0x200B, prControl}, // Cf ZERO WIDTH SPACE
+ {0x200C, 0x200C, prExtend}, // Cf ZERO WIDTH NON-JOINER
+ {0x200D, 0x200D, prZWJ}, // Cf ZERO WIDTH JOINER
+ {0x200E, 0x200F, prControl}, // Cf [2] LEFT-TO-RIGHT MARK..RIGHT-TO-LEFT MARK
+ {0x2028, 0x2028, prControl}, // Zl LINE SEPARATOR
+ {0x2029, 0x2029, prControl}, // Zp PARAGRAPH SEPARATOR
+ {0x202A, 0x202E, prControl}, // Cf [5] LEFT-TO-RIGHT EMBEDDING..RIGHT-TO-LEFT OVERRIDE
+ {0x203C, 0x203C, prExtendedPictographic}, // E0.6 [1] (‼️) double exclamation mark
+ {0x2049, 0x2049, prExtendedPictographic}, // E0.6 [1] (⁉️) exclamation question mark
+ {0x2060, 0x2064, prControl}, // Cf [5] WORD JOINER..INVISIBLE PLUS
+ {0x2065, 0x2065, prControl}, // Cn
+ {0x2066, 0x206F, prControl}, // Cf [10] LEFT-TO-RIGHT ISOLATE..NOMINAL DIGIT SHAPES
+ {0x20D0, 0x20DC, prExtend}, // Mn [13] COMBINING LEFT HARPOON ABOVE..COMBINING FOUR DOTS ABOVE
+ {0x20DD, 0x20E0, prExtend}, // Me [4] COMBINING ENCLOSING CIRCLE..COMBINING ENCLOSING CIRCLE BACKSLASH
+ {0x20E1, 0x20E1, prExtend}, // Mn COMBINING LEFT RIGHT ARROW ABOVE
+ {0x20E2, 0x20E4, prExtend}, // Me [3] COMBINING ENCLOSING SCREEN..COMBINING ENCLOSING UPWARD POINTING TRIANGLE
+ {0x20E5, 0x20F0, prExtend}, // Mn [12] COMBINING REVERSE SOLIDUS OVERLAY..COMBINING ASTERISK ABOVE
+ {0x2122, 0x2122, prExtendedPictographic}, // E0.6 [1] (™️) trade mark
+ {0x2139, 0x2139, prExtendedPictographic}, // E0.6 [1] (ℹ️) information
+ {0x2194, 0x2199, prExtendedPictographic}, // E0.6 [6] (↔️..↙️) left-right arrow..down-left arrow
+ {0x21A9, 0x21AA, prExtendedPictographic}, // E0.6 [2] (↩️..↪️) right arrow curving left..left arrow curving right
+ {0x231A, 0x231B, prExtendedPictographic}, // E0.6 [2] (⌚..⌛) watch..hourglass done
+ {0x2328, 0x2328, prExtendedPictographic}, // E1.0 [1] (⌨️) keyboard
+ {0x2388, 0x2388, prExtendedPictographic}, // E0.0 [1] (⎈) HELM SYMBOL
+ {0x23CF, 0x23CF, prExtendedPictographic}, // E1.0 [1] (⏏️) eject button
+ {0x23E9, 0x23EC, prExtendedPictographic}, // E0.6 [4] (⏩..⏬) fast-forward button..fast down button
+ {0x23ED, 0x23EE, prExtendedPictographic}, // E0.7 [2] (⏭️..⏮️) next track button..last track button
+ {0x23EF, 0x23EF, prExtendedPictographic}, // E1.0 [1] (⏯️) play or pause button
+ {0x23F0, 0x23F0, prExtendedPictographic}, // E0.6 [1] (⏰) alarm clock
+ {0x23F1, 0x23F2, prExtendedPictographic}, // E1.0 [2] (⏱️..⏲️) stopwatch..timer clock
+ {0x23F3, 0x23F3, prExtendedPictographic}, // E0.6 [1] (⏳) hourglass not done
+ {0x23F8, 0x23FA, prExtendedPictographic}, // E0.7 [3] (⏸️..⏺️) pause button..record button
+ {0x24C2, 0x24C2, prExtendedPictographic}, // E0.6 [1] (Ⓜ️) circled M
+ {0x25AA, 0x25AB, prExtendedPictographic}, // E0.6 [2] (▪️..▫️) black small square..white small square
+ {0x25B6, 0x25B6, prExtendedPictographic}, // E0.6 [1] (▶️) play button
+ {0x25C0, 0x25C0, prExtendedPictographic}, // E0.6 [1] (◀️) reverse button
+ {0x25FB, 0x25FE, prExtendedPictographic}, // E0.6 [4] (◻️..◾) white medium square..black medium-small square
+ {0x2600, 0x2601, prExtendedPictographic}, // E0.6 [2] (☀️..☁️) sun..cloud
+ {0x2602, 0x2603, prExtendedPictographic}, // E0.7 [2] (☂️..☃️) umbrella..snowman
+ {0x2604, 0x2604, prExtendedPictographic}, // E1.0 [1] (☄️) comet
+ {0x2605, 0x2605, prExtendedPictographic}, // E0.0 [1] (★) BLACK STAR
+ {0x2607, 0x260D, prExtendedPictographic}, // E0.0 [7] (☇..☍) LIGHTNING..OPPOSITION
+ {0x260E, 0x260E, prExtendedPictographic}, // E0.6 [1] (☎️) telephone
+ {0x260F, 0x2610, prExtendedPictographic}, // E0.0 [2] (☏..☐) WHITE TELEPHONE..BALLOT BOX
+ {0x2611, 0x2611, prExtendedPictographic}, // E0.6 [1] (☑️) check box with check
+ {0x2612, 0x2612, prExtendedPictographic}, // E0.0 [1] (☒) BALLOT BOX WITH X
+ {0x2614, 0x2615, prExtendedPictographic}, // E0.6 [2] (☔..☕) umbrella with rain drops..hot beverage
+ {0x2616, 0x2617, prExtendedPictographic}, // E0.0 [2] (☖..☗) WHITE SHOGI PIECE..BLACK SHOGI PIECE
+ {0x2618, 0x2618, prExtendedPictographic}, // E1.0 [1] (☘️) shamrock
+ {0x2619, 0x261C, prExtendedPictographic}, // E0.0 [4] (☙..☜) REVERSED ROTATED FLORAL HEART BULLET..WHITE LEFT POINTING INDEX
+ {0x261D, 0x261D, prExtendedPictographic}, // E0.6 [1] (☝️) index pointing up
+ {0x261E, 0x261F, prExtendedPictographic}, // E0.0 [2] (☞..☟) WHITE RIGHT POINTING INDEX..WHITE DOWN POINTING INDEX
+ {0x2620, 0x2620, prExtendedPictographic}, // E1.0 [1] (☠️) skull and crossbones
+ {0x2621, 0x2621, prExtendedPictographic}, // E0.0 [1] (☡) CAUTION SIGN
+ {0x2622, 0x2623, prExtendedPictographic}, // E1.0 [2] (☢️..☣️) radioactive..biohazard
+ {0x2624, 0x2625, prExtendedPictographic}, // E0.0 [2] (☤..☥) CADUCEUS..ANKH
+ {0x2626, 0x2626, prExtendedPictographic}, // E1.0 [1] (☦️) orthodox cross
+ {0x2627, 0x2629, prExtendedPictographic}, // E0.0 [3] (☧..☩) CHI RHO..CROSS OF JERUSALEM
+ {0x262A, 0x262A, prExtendedPictographic}, // E0.7 [1] (☪️) star and crescent
+ {0x262B, 0x262D, prExtendedPictographic}, // E0.0 [3] (☫..☭) FARSI SYMBOL..HAMMER AND SICKLE
+ {0x262E, 0x262E, prExtendedPictographic}, // E1.0 [1] (☮️) peace symbol
+ {0x262F, 0x262F, prExtendedPictographic}, // E0.7 [1] (☯️) yin yang
+ {0x2630, 0x2637, prExtendedPictographic}, // E0.0 [8] (☰..☷) TRIGRAM FOR HEAVEN..TRIGRAM FOR EARTH
+ {0x2638, 0x2639, prExtendedPictographic}, // E0.7 [2] (☸️..☹️) wheel of dharma..frowning face
+ {0x263A, 0x263A, prExtendedPictographic}, // E0.6 [1] (☺️) smiling face
+ {0x263B, 0x263F, prExtendedPictographic}, // E0.0 [5] (☻..☿) BLACK SMILING FACE..MERCURY
+ {0x2640, 0x2640, prExtendedPictographic}, // E4.0 [1] (♀️) female sign
+ {0x2641, 0x2641, prExtendedPictographic}, // E0.0 [1] (♁) EARTH
+ {0x2642, 0x2642, prExtendedPictographic}, // E4.0 [1] (♂️) male sign
+ {0x2643, 0x2647, prExtendedPictographic}, // E0.0 [5] (♃..♇) JUPITER..PLUTO
+ {0x2648, 0x2653, prExtendedPictographic}, // E0.6 [12] (♈..♓) Aries..Pisces
+ {0x2654, 0x265E, prExtendedPictographic}, // E0.0 [11] (♔..♞) WHITE CHESS KING..BLACK CHESS KNIGHT
+ {0x265F, 0x265F, prExtendedPictographic}, // E11.0 [1] (♟️) chess pawn
+ {0x2660, 0x2660, prExtendedPictographic}, // E0.6 [1] (♠️) spade suit
+ {0x2661, 0x2662, prExtendedPictographic}, // E0.0 [2] (♡..♢) WHITE HEART SUIT..WHITE DIAMOND SUIT
+ {0x2663, 0x2663, prExtendedPictographic}, // E0.6 [1] (♣️) club suit
+ {0x2664, 0x2664, prExtendedPictographic}, // E0.0 [1] (♤) WHITE SPADE SUIT
+ {0x2665, 0x2666, prExtendedPictographic}, // E0.6 [2] (♥️..♦️) heart suit..diamond suit
+ {0x2667, 0x2667, prExtendedPictographic}, // E0.0 [1] (♧) WHITE CLUB SUIT
+ {0x2668, 0x2668, prExtendedPictographic}, // E0.6 [1] (♨️) hot springs
+ {0x2669, 0x267A, prExtendedPictographic}, // E0.0 [18] (♩..♺) QUARTER NOTE..RECYCLING SYMBOL FOR GENERIC MATERIALS
+ {0x267B, 0x267B, prExtendedPictographic}, // E0.6 [1] (♻️) recycling symbol
+ {0x267C, 0x267D, prExtendedPictographic}, // E0.0 [2] (♼..♽) RECYCLED PAPER SYMBOL..PARTIALLY-RECYCLED PAPER SYMBOL
+ {0x267E, 0x267E, prExtendedPictographic}, // E11.0 [1] (♾️) infinity
+ {0x267F, 0x267F, prExtendedPictographic}, // E0.6 [1] (♿) wheelchair symbol
+ {0x2680, 0x2685, prExtendedPictographic}, // E0.0 [6] (⚀..⚅) DIE FACE-1..DIE FACE-6
+ {0x2690, 0x2691, prExtendedPictographic}, // E0.0 [2] (⚐..⚑) WHITE FLAG..BLACK FLAG
+ {0x2692, 0x2692, prExtendedPictographic}, // E1.0 [1] (⚒️) hammer and pick
+ {0x2693, 0x2693, prExtendedPictographic}, // E0.6 [1] (⚓) anchor
+ {0x2694, 0x2694, prExtendedPictographic}, // E1.0 [1] (⚔️) crossed swords
+ {0x2695, 0x2695, prExtendedPictographic}, // E4.0 [1] (⚕️) medical symbol
+ {0x2696, 0x2697, prExtendedPictographic}, // E1.0 [2] (⚖️..⚗️) balance scale..alembic
+ {0x2698, 0x2698, prExtendedPictographic}, // E0.0 [1] (⚘) FLOWER
+ {0x2699, 0x2699, prExtendedPictographic}, // E1.0 [1] (⚙️) gear
+ {0x269A, 0x269A, prExtendedPictographic}, // E0.0 [1] (⚚) STAFF OF HERMES
+ {0x269B, 0x269C, prExtendedPictographic}, // E1.0 [2] (⚛️..⚜️) atom symbol..fleur-de-lis
+ {0x269D, 0x269F, prExtendedPictographic}, // E0.0 [3] (⚝..⚟) OUTLINED WHITE STAR..THREE LINES CONVERGING LEFT
+ {0x26A0, 0x26A1, prExtendedPictographic}, // E0.6 [2] (⚠️..⚡) warning..high voltage
+ {0x26A2, 0x26A6, prExtendedPictographic}, // E0.0 [5] (⚢..⚦) DOUBLED FEMALE SIGN..MALE WITH STROKE SIGN
+ {0x26A7, 0x26A7, prExtendedPictographic}, // E13.0 [1] (⚧️) transgender symbol
+ {0x26A8, 0x26A9, prExtendedPictographic}, // E0.0 [2] (⚨..⚩) VERTICAL MALE WITH STROKE SIGN..HORIZONTAL MALE WITH STROKE SIGN
+ {0x26AA, 0x26AB, prExtendedPictographic}, // E0.6 [2] (⚪..⚫) white circle..black circle
+ {0x26AC, 0x26AF, prExtendedPictographic}, // E0.0 [4] (⚬..⚯) MEDIUM SMALL WHITE CIRCLE..UNMARRIED PARTNERSHIP SYMBOL
+ {0x26B0, 0x26B1, prExtendedPictographic}, // E1.0 [2] (⚰️..⚱️) coffin..funeral urn
+ {0x26B2, 0x26BC, prExtendedPictographic}, // E0.0 [11] (⚲..⚼) NEUTER..SESQUIQUADRATE
+ {0x26BD, 0x26BE, prExtendedPictographic}, // E0.6 [2] (⚽..⚾) soccer ball..baseball
+ {0x26BF, 0x26C3, prExtendedPictographic}, // E0.0 [5] (⚿..⛃) SQUARED KEY..BLACK DRAUGHTS KING
+ {0x26C4, 0x26C5, prExtendedPictographic}, // E0.6 [2] (⛄..⛅) snowman without snow..sun behind cloud
+ {0x26C6, 0x26C7, prExtendedPictographic}, // E0.0 [2] (⛆..⛇) RAIN..BLACK SNOWMAN
+ {0x26C8, 0x26C8, prExtendedPictographic}, // E0.7 [1] (⛈️) cloud with lightning and rain
+ {0x26C9, 0x26CD, prExtendedPictographic}, // E0.0 [5] (⛉..⛍) TURNED WHITE SHOGI PIECE..DISABLED CAR
+ {0x26CE, 0x26CE, prExtendedPictographic}, // E0.6 [1] (⛎) Ophiuchus
+ {0x26CF, 0x26CF, prExtendedPictographic}, // E0.7 [1] (⛏️) pick
+ {0x26D0, 0x26D0, prExtendedPictographic}, // E0.0 [1] (⛐) CAR SLIDING
+ {0x26D1, 0x26D1, prExtendedPictographic}, // E0.7 [1] (⛑️) rescue worker’s helmet
+ {0x26D2, 0x26D2, prExtendedPictographic}, // E0.0 [1] (⛒) CIRCLED CROSSING LANES
+ {0x26D3, 0x26D3, prExtendedPictographic}, // E0.7 [1] (⛓️) chains
+ {0x26D4, 0x26D4, prExtendedPictographic}, // E0.6 [1] (⛔) no entry
+ {0x26D5, 0x26E8, prExtendedPictographic}, // E0.0 [20] (⛕..⛨) ALTERNATE ONE-WAY LEFT WAY TRAFFIC..BLACK CROSS ON SHIELD
+ {0x26E9, 0x26E9, prExtendedPictographic}, // E0.7 [1] (⛩️) shinto shrine
+ {0x26EA, 0x26EA, prExtendedPictographic}, // E0.6 [1] (⛪) church
+ {0x26EB, 0x26EF, prExtendedPictographic}, // E0.0 [5] (⛫..⛯) CASTLE..MAP SYMBOL FOR LIGHTHOUSE
+ {0x26F0, 0x26F1, prExtendedPictographic}, // E0.7 [2] (⛰️..⛱️) mountain..umbrella on ground
+ {0x26F2, 0x26F3, prExtendedPictographic}, // E0.6 [2] (⛲..⛳) fountain..flag in hole
+ {0x26F4, 0x26F4, prExtendedPictographic}, // E0.7 [1] (⛴️) ferry
+ {0x26F5, 0x26F5, prExtendedPictographic}, // E0.6 [1] (⛵) sailboat
+ {0x26F6, 0x26F6, prExtendedPictographic}, // E0.0 [1] (⛶) SQUARE FOUR CORNERS
+ {0x26F7, 0x26F9, prExtendedPictographic}, // E0.7 [3] (⛷️..⛹️) skier..person bouncing ball
+ {0x26FA, 0x26FA, prExtendedPictographic}, // E0.6 [1] (⛺) tent
+ {0x26FB, 0x26FC, prExtendedPictographic}, // E0.0 [2] (⛻..⛼) JAPANESE BANK SYMBOL..HEADSTONE GRAVEYARD SYMBOL
+ {0x26FD, 0x26FD, prExtendedPictographic}, // E0.6 [1] (⛽) fuel pump
+ {0x26FE, 0x2701, prExtendedPictographic}, // E0.0 [4] (⛾..✁) CUP ON BLACK SQUARE..UPPER BLADE SCISSORS
+ {0x2702, 0x2702, prExtendedPictographic}, // E0.6 [1] (✂️) scissors
+ {0x2703, 0x2704, prExtendedPictographic}, // E0.0 [2] (✃..✄) LOWER BLADE SCISSORS..WHITE SCISSORS
+ {0x2705, 0x2705, prExtendedPictographic}, // E0.6 [1] (✅) check mark button
+ {0x2708, 0x270C, prExtendedPictographic}, // E0.6 [5] (✈️..✌️) airplane..victory hand
+ {0x270D, 0x270D, prExtendedPictographic}, // E0.7 [1] (✍️) writing hand
+ {0x270E, 0x270E, prExtendedPictographic}, // E0.0 [1] (✎) LOWER RIGHT PENCIL
+ {0x270F, 0x270F, prExtendedPictographic}, // E0.6 [1] (✏️) pencil
+ {0x2710, 0x2711, prExtendedPictographic}, // E0.0 [2] (✐..✑) UPPER RIGHT PENCIL..WHITE NIB
+ {0x2712, 0x2712, prExtendedPictographic}, // E0.6 [1] (✒️) black nib
+ {0x2714, 0x2714, prExtendedPictographic}, // E0.6 [1] (✔️) check mark
+ {0x2716, 0x2716, prExtendedPictographic}, // E0.6 [1] (✖️) multiply
+ {0x271D, 0x271D, prExtendedPictographic}, // E0.7 [1] (✝️) latin cross
+ {0x2721, 0x2721, prExtendedPictographic}, // E0.7 [1] (✡️) star of David
+ {0x2728, 0x2728, prExtendedPictographic}, // E0.6 [1] (✨) sparkles
+ {0x2733, 0x2734, prExtendedPictographic}, // E0.6 [2] (✳️..✴️) eight-spoked asterisk..eight-pointed star
+ {0x2744, 0x2744, prExtendedPictographic}, // E0.6 [1] (❄️) snowflake
+ {0x2747, 0x2747, prExtendedPictographic}, // E0.6 [1] (❇️) sparkle
+ {0x274C, 0x274C, prExtendedPictographic}, // E0.6 [1] (❌) cross mark
+ {0x274E, 0x274E, prExtendedPictographic}, // E0.6 [1] (❎) cross mark button
+ {0x2753, 0x2755, prExtendedPictographic}, // E0.6 [3] (❓..❕) red question mark..white exclamation mark
+ {0x2757, 0x2757, prExtendedPictographic}, // E0.6 [1] (❗) red exclamation mark
+ {0x2763, 0x2763, prExtendedPictographic}, // E1.0 [1] (❣️) heart exclamation
+ {0x2764, 0x2764, prExtendedPictographic}, // E0.6 [1] (❤️) red heart
+ {0x2765, 0x2767, prExtendedPictographic}, // E0.0 [3] (❥..❧) ROTATED HEAVY BLACK HEART BULLET..ROTATED FLORAL HEART BULLET
+ {0x2795, 0x2797, prExtendedPictographic}, // E0.6 [3] (➕..➗) plus..divide
+ {0x27A1, 0x27A1, prExtendedPictographic}, // E0.6 [1] (➡️) right arrow
+ {0x27B0, 0x27B0, prExtendedPictographic}, // E0.6 [1] (➰) curly loop
+ {0x27BF, 0x27BF, prExtendedPictographic}, // E1.0 [1] (➿) double curly loop
+ {0x2934, 0x2935, prExtendedPictographic}, // E0.6 [2] (⤴️..⤵️) right arrow curving up..right arrow curving down
+ {0x2B05, 0x2B07, prExtendedPictographic}, // E0.6 [3] (⬅️..⬇️) left arrow..down arrow
+ {0x2B1B, 0x2B1C, prExtendedPictographic}, // E0.6 [2] (⬛..⬜) black large square..white large square
+ {0x2B50, 0x2B50, prExtendedPictographic}, // E0.6 [1] (⭐) star
+ {0x2B55, 0x2B55, prExtendedPictographic}, // E0.6 [1] (⭕) hollow red circle
+ {0x2CEF, 0x2CF1, prExtend}, // Mn [3] COPTIC COMBINING NI ABOVE..COPTIC COMBINING SPIRITUS LENIS
+ {0x2D7F, 0x2D7F, prExtend}, // Mn TIFINAGH CONSONANT JOINER
+ {0x2DE0, 0x2DFF, prExtend}, // Mn [32] COMBINING CYRILLIC LETTER BE..COMBINING CYRILLIC LETTER IOTIFIED BIG YUS
+ {0x302A, 0x302D, prExtend}, // Mn [4] IDEOGRAPHIC LEVEL TONE MARK..IDEOGRAPHIC ENTERING TONE MARK
+ {0x302E, 0x302F, prExtend}, // Mc [2] HANGUL SINGLE DOT TONE MARK..HANGUL DOUBLE DOT TONE MARK
+ {0x3030, 0x3030, prExtendedPictographic}, // E0.6 [1] (〰️) wavy dash
+ {0x303D, 0x303D, prExtendedPictographic}, // E0.6 [1] (〽️) part alternation mark
+ {0x3099, 0x309A, prExtend}, // Mn [2] COMBINING KATAKANA-HIRAGANA VOICED SOUND MARK..COMBINING KATAKANA-HIRAGANA SEMI-VOICED SOUND MARK
+ {0x3297, 0x3297, prExtendedPictographic}, // E0.6 [1] (㊗️) Japanese “congratulations” button
+ {0x3299, 0x3299, prExtendedPictographic}, // E0.6 [1] (㊙️) Japanese “secret” button
+ {0xA66F, 0xA66F, prExtend}, // Mn COMBINING CYRILLIC VZMET
+ {0xA670, 0xA672, prExtend}, // Me [3] COMBINING CYRILLIC TEN MILLIONS SIGN..COMBINING CYRILLIC THOUSAND MILLIONS SIGN
+ {0xA674, 0xA67D, prExtend}, // Mn [10] COMBINING CYRILLIC LETTER UKRAINIAN IE..COMBINING CYRILLIC PAYEROK
+ {0xA69E, 0xA69F, prExtend}, // Mn [2] COMBINING CYRILLIC LETTER EF..COMBINING CYRILLIC LETTER IOTIFIED E
+ {0xA6F0, 0xA6F1, prExtend}, // Mn [2] BAMUM COMBINING MARK KOQNDON..BAMUM COMBINING MARK TUKWENTIS
+ {0xA802, 0xA802, prExtend}, // Mn SYLOTI NAGRI SIGN DVISVARA
+ {0xA806, 0xA806, prExtend}, // Mn SYLOTI NAGRI SIGN HASANTA
+ {0xA80B, 0xA80B, prExtend}, // Mn SYLOTI NAGRI SIGN ANUSVARA
+ {0xA823, 0xA824, prSpacingMark}, // Mc [2] SYLOTI NAGRI VOWEL SIGN A..SYLOTI NAGRI VOWEL SIGN I
+ {0xA825, 0xA826, prExtend}, // Mn [2] SYLOTI NAGRI VOWEL SIGN U..SYLOTI NAGRI VOWEL SIGN E
+ {0xA827, 0xA827, prSpacingMark}, // Mc SYLOTI NAGRI VOWEL SIGN OO
+ {0xA82C, 0xA82C, prExtend}, // Mn SYLOTI NAGRI SIGN ALTERNATE HASANTA
+ {0xA880, 0xA881, prSpacingMark}, // Mc [2] SAURASHTRA SIGN ANUSVARA..SAURASHTRA SIGN VISARGA
+ {0xA8B4, 0xA8C3, prSpacingMark}, // Mc [16] SAURASHTRA CONSONANT SIGN HAARU..SAURASHTRA VOWEL SIGN AU
+ {0xA8C4, 0xA8C5, prExtend}, // Mn [2] SAURASHTRA SIGN VIRAMA..SAURASHTRA SIGN CANDRABINDU
+ {0xA8E0, 0xA8F1, prExtend}, // Mn [18] COMBINING DEVANAGARI DIGIT ZERO..COMBINING DEVANAGARI SIGN AVAGRAHA
+ {0xA8FF, 0xA8FF, prExtend}, // Mn DEVANAGARI VOWEL SIGN AY
+ {0xA926, 0xA92D, prExtend}, // Mn [8] KAYAH LI VOWEL UE..KAYAH LI TONE CALYA PLOPHU
+ {0xA947, 0xA951, prExtend}, // Mn [11] REJANG VOWEL SIGN I..REJANG CONSONANT SIGN R
+ {0xA952, 0xA953, prSpacingMark}, // Mc [2] REJANG CONSONANT SIGN H..REJANG VIRAMA
+ {0xA960, 0xA97C, prL}, // Lo [29] HANGUL CHOSEONG TIKEUT-MIEUM..HANGUL CHOSEONG SSANGYEORINHIEUH
+ {0xA980, 0xA982, prExtend}, // Mn [3] JAVANESE SIGN PANYANGGA..JAVANESE SIGN LAYAR
+ {0xA983, 0xA983, prSpacingMark}, // Mc JAVANESE SIGN WIGNYAN
+ {0xA9B3, 0xA9B3, prExtend}, // Mn JAVANESE SIGN CECAK TELU
+ {0xA9B4, 0xA9B5, prSpacingMark}, // Mc [2] JAVANESE VOWEL SIGN TARUNG..JAVANESE VOWEL SIGN TOLONG
+ {0xA9B6, 0xA9B9, prExtend}, // Mn [4] JAVANESE VOWEL SIGN WULU..JAVANESE VOWEL SIGN SUKU MENDUT
+ {0xA9BA, 0xA9BB, prSpacingMark}, // Mc [2] JAVANESE VOWEL SIGN TALING..JAVANESE VOWEL SIGN DIRGA MURE
+ {0xA9BC, 0xA9BD, prExtend}, // Mn [2] JAVANESE VOWEL SIGN PEPET..JAVANESE CONSONANT SIGN KERET
+ {0xA9BE, 0xA9C0, prSpacingMark}, // Mc [3] JAVANESE CONSONANT SIGN PENGKAL..JAVANESE PANGKON
+ {0xA9E5, 0xA9E5, prExtend}, // Mn MYANMAR SIGN SHAN SAW
+ {0xAA29, 0xAA2E, prExtend}, // Mn [6] CHAM VOWEL SIGN AA..CHAM VOWEL SIGN OE
+ {0xAA2F, 0xAA30, prSpacingMark}, // Mc [2] CHAM VOWEL SIGN O..CHAM VOWEL SIGN AI
+ {0xAA31, 0xAA32, prExtend}, // Mn [2] CHAM VOWEL SIGN AU..CHAM VOWEL SIGN UE
+ {0xAA33, 0xAA34, prSpacingMark}, // Mc [2] CHAM CONSONANT SIGN YA..CHAM CONSONANT SIGN RA
+ {0xAA35, 0xAA36, prExtend}, // Mn [2] CHAM CONSONANT SIGN LA..CHAM CONSONANT SIGN WA
+ {0xAA43, 0xAA43, prExtend}, // Mn CHAM CONSONANT SIGN FINAL NG
+ {0xAA4C, 0xAA4C, prExtend}, // Mn CHAM CONSONANT SIGN FINAL M
+ {0xAA4D, 0xAA4D, prSpacingMark}, // Mc CHAM CONSONANT SIGN FINAL H
+ {0xAA7C, 0xAA7C, prExtend}, // Mn MYANMAR SIGN TAI LAING TONE-2
+ {0xAAB0, 0xAAB0, prExtend}, // Mn TAI VIET MAI KANG
+ {0xAAB2, 0xAAB4, prExtend}, // Mn [3] TAI VIET VOWEL I..TAI VIET VOWEL U
+ {0xAAB7, 0xAAB8, prExtend}, // Mn [2] TAI VIET MAI KHIT..TAI VIET VOWEL IA
+ {0xAABE, 0xAABF, prExtend}, // Mn [2] TAI VIET VOWEL AM..TAI VIET TONE MAI EK
+ {0xAAC1, 0xAAC1, prExtend}, // Mn TAI VIET TONE MAI THO
+ {0xAAEB, 0xAAEB, prSpacingMark}, // Mc MEETEI MAYEK VOWEL SIGN II
+ {0xAAEC, 0xAAED, prExtend}, // Mn [2] MEETEI MAYEK VOWEL SIGN UU..MEETEI MAYEK VOWEL SIGN AAI
+ {0xAAEE, 0xAAEF, prSpacingMark}, // Mc [2] MEETEI MAYEK VOWEL SIGN AU..MEETEI MAYEK VOWEL SIGN AAU
+ {0xAAF5, 0xAAF5, prSpacingMark}, // Mc MEETEI MAYEK VOWEL SIGN VISARGA
+ {0xAAF6, 0xAAF6, prExtend}, // Mn MEETEI MAYEK VIRAMA
+ {0xABE3, 0xABE4, prSpacingMark}, // Mc [2] MEETEI MAYEK VOWEL SIGN ONAP..MEETEI MAYEK VOWEL SIGN INAP
+ {0xABE5, 0xABE5, prExtend}, // Mn MEETEI MAYEK VOWEL SIGN ANAP
+ {0xABE6, 0xABE7, prSpacingMark}, // Mc [2] MEETEI MAYEK VOWEL SIGN YENAP..MEETEI MAYEK VOWEL SIGN SOUNAP
+ {0xABE8, 0xABE8, prExtend}, // Mn MEETEI MAYEK VOWEL SIGN UNAP
+ {0xABE9, 0xABEA, prSpacingMark}, // Mc [2] MEETEI MAYEK VOWEL SIGN CHEINAP..MEETEI MAYEK VOWEL SIGN NUNG
+ {0xABEC, 0xABEC, prSpacingMark}, // Mc MEETEI MAYEK LUM IYEK
+ {0xABED, 0xABED, prExtend}, // Mn MEETEI MAYEK APUN IYEK
+ {0xAC00, 0xAC00, prLV}, // Lo HANGUL SYLLABLE GA
+ {0xAC01, 0xAC1B, prLVT}, // Lo [27] HANGUL SYLLABLE GAG..HANGUL SYLLABLE GAH
+ {0xAC1C, 0xAC1C, prLV}, // Lo HANGUL SYLLABLE GAE
+ {0xAC1D, 0xAC37, prLVT}, // Lo [27] HANGUL SYLLABLE GAEG..HANGUL SYLLABLE GAEH
+ {0xAC38, 0xAC38, prLV}, // Lo HANGUL SYLLABLE GYA
+ {0xAC39, 0xAC53, prLVT}, // Lo [27] HANGUL SYLLABLE GYAG..HANGUL SYLLABLE GYAH
+ {0xAC54, 0xAC54, prLV}, // Lo HANGUL SYLLABLE GYAE
+ {0xAC55, 0xAC6F, prLVT}, // Lo [27] HANGUL SYLLABLE GYAEG..HANGUL SYLLABLE GYAEH
+ {0xAC70, 0xAC70, prLV}, // Lo HANGUL SYLLABLE GEO
+ {0xAC71, 0xAC8B, prLVT}, // Lo [27] HANGUL SYLLABLE GEOG..HANGUL SYLLABLE GEOH
+ {0xAC8C, 0xAC8C, prLV}, // Lo HANGUL SYLLABLE GE
+ {0xAC8D, 0xACA7, prLVT}, // Lo [27] HANGUL SYLLABLE GEG..HANGUL SYLLABLE GEH
+ {0xACA8, 0xACA8, prLV}, // Lo HANGUL SYLLABLE GYEO
+ {0xACA9, 0xACC3, prLVT}, // Lo [27] HANGUL SYLLABLE GYEOG..HANGUL SYLLABLE GYEOH
+ {0xACC4, 0xACC4, prLV}, // Lo HANGUL SYLLABLE GYE
+ {0xACC5, 0xACDF, prLVT}, // Lo [27] HANGUL SYLLABLE GYEG..HANGUL SYLLABLE GYEH
+ {0xACE0, 0xACE0, prLV}, // Lo HANGUL SYLLABLE GO
+ {0xACE1, 0xACFB, prLVT}, // Lo [27] HANGUL SYLLABLE GOG..HANGUL SYLLABLE GOH
+ {0xACFC, 0xACFC, prLV}, // Lo HANGUL SYLLABLE GWA
+ {0xACFD, 0xAD17, prLVT}, // Lo [27] HANGUL SYLLABLE GWAG..HANGUL SYLLABLE GWAH
+ {0xAD18, 0xAD18, prLV}, // Lo HANGUL SYLLABLE GWAE
+ {0xAD19, 0xAD33, prLVT}, // Lo [27] HANGUL SYLLABLE GWAEG..HANGUL SYLLABLE GWAEH
+ {0xAD34, 0xAD34, prLV}, // Lo HANGUL SYLLABLE GOE
+ {0xAD35, 0xAD4F, prLVT}, // Lo [27] HANGUL SYLLABLE GOEG..HANGUL SYLLABLE GOEH
+ {0xAD50, 0xAD50, prLV}, // Lo HANGUL SYLLABLE GYO
+ {0xAD51, 0xAD6B, prLVT}, // Lo [27] HANGUL SYLLABLE GYOG..HANGUL SYLLABLE GYOH
+ {0xAD6C, 0xAD6C, prLV}, // Lo HANGUL SYLLABLE GU
+ {0xAD6D, 0xAD87, prLVT}, // Lo [27] HANGUL SYLLABLE GUG..HANGUL SYLLABLE GUH
+ {0xAD88, 0xAD88, prLV}, // Lo HANGUL SYLLABLE GWEO
+ {0xAD89, 0xADA3, prLVT}, // Lo [27] HANGUL SYLLABLE GWEOG..HANGUL SYLLABLE GWEOH
+ {0xADA4, 0xADA4, prLV}, // Lo HANGUL SYLLABLE GWE
+ {0xADA5, 0xADBF, prLVT}, // Lo [27] HANGUL SYLLABLE GWEG..HANGUL SYLLABLE GWEH
+ {0xADC0, 0xADC0, prLV}, // Lo HANGUL SYLLABLE GWI
+ {0xADC1, 0xADDB, prLVT}, // Lo [27] HANGUL SYLLABLE GWIG..HANGUL SYLLABLE GWIH
+ {0xADDC, 0xADDC, prLV}, // Lo HANGUL SYLLABLE GYU
+ {0xADDD, 0xADF7, prLVT}, // Lo [27] HANGUL SYLLABLE GYUG..HANGUL SYLLABLE GYUH
+ {0xADF8, 0xADF8, prLV}, // Lo HANGUL SYLLABLE GEU
+ {0xADF9, 0xAE13, prLVT}, // Lo [27] HANGUL SYLLABLE GEUG..HANGUL SYLLABLE GEUH
+ {0xAE14, 0xAE14, prLV}, // Lo HANGUL SYLLABLE GYI
+ {0xAE15, 0xAE2F, prLVT}, // Lo [27] HANGUL SYLLABLE GYIG..HANGUL SYLLABLE GYIH
+ {0xAE30, 0xAE30, prLV}, // Lo HANGUL SYLLABLE GI
+ {0xAE31, 0xAE4B, prLVT}, // Lo [27] HANGUL SYLLABLE GIG..HANGUL SYLLABLE GIH
+ {0xAE4C, 0xAE4C, prLV}, // Lo HANGUL SYLLABLE GGA
+ {0xAE4D, 0xAE67, prLVT}, // Lo [27] HANGUL SYLLABLE GGAG..HANGUL SYLLABLE GGAH
+ {0xAE68, 0xAE68, prLV}, // Lo HANGUL SYLLABLE GGAE
+ {0xAE69, 0xAE83, prLVT}, // Lo [27] HANGUL SYLLABLE GGAEG..HANGUL SYLLABLE GGAEH
+ {0xAE84, 0xAE84, prLV}, // Lo HANGUL SYLLABLE GGYA
+ {0xAE85, 0xAE9F, prLVT}, // Lo [27] HANGUL SYLLABLE GGYAG..HANGUL SYLLABLE GGYAH
+ {0xAEA0, 0xAEA0, prLV}, // Lo HANGUL SYLLABLE GGYAE
+ {0xAEA1, 0xAEBB, prLVT}, // Lo [27] HANGUL SYLLABLE GGYAEG..HANGUL SYLLABLE GGYAEH
+ {0xAEBC, 0xAEBC, prLV}, // Lo HANGUL SYLLABLE GGEO
+ {0xAEBD, 0xAED7, prLVT}, // Lo [27] HANGUL SYLLABLE GGEOG..HANGUL SYLLABLE GGEOH
+ {0xAED8, 0xAED8, prLV}, // Lo HANGUL SYLLABLE GGE
+ {0xAED9, 0xAEF3, prLVT}, // Lo [27] HANGUL SYLLABLE GGEG..HANGUL SYLLABLE GGEH
+ {0xAEF4, 0xAEF4, prLV}, // Lo HANGUL SYLLABLE GGYEO
+ {0xAEF5, 0xAF0F, prLVT}, // Lo [27] HANGUL SYLLABLE GGYEOG..HANGUL SYLLABLE GGYEOH
+ {0xAF10, 0xAF10, prLV}, // Lo HANGUL SYLLABLE GGYE
+ {0xAF11, 0xAF2B, prLVT}, // Lo [27] HANGUL SYLLABLE GGYEG..HANGUL SYLLABLE GGYEH
+ {0xAF2C, 0xAF2C, prLV}, // Lo HANGUL SYLLABLE GGO
+ {0xAF2D, 0xAF47, prLVT}, // Lo [27] HANGUL SYLLABLE GGOG..HANGUL SYLLABLE GGOH
+ {0xAF48, 0xAF48, prLV}, // Lo HANGUL SYLLABLE GGWA
+ {0xAF49, 0xAF63, prLVT}, // Lo [27] HANGUL SYLLABLE GGWAG..HANGUL SYLLABLE GGWAH
+ {0xAF64, 0xAF64, prLV}, // Lo HANGUL SYLLABLE GGWAE
+ {0xAF65, 0xAF7F, prLVT}, // Lo [27] HANGUL SYLLABLE GGWAEG..HANGUL SYLLABLE GGWAEH
+ {0xAF80, 0xAF80, prLV}, // Lo HANGUL SYLLABLE GGOE
+ {0xAF81, 0xAF9B, prLVT}, // Lo [27] HANGUL SYLLABLE GGOEG..HANGUL SYLLABLE GGOEH
+ {0xAF9C, 0xAF9C, prLV}, // Lo HANGUL SYLLABLE GGYO
+ {0xAF9D, 0xAFB7, prLVT}, // Lo [27] HANGUL SYLLABLE GGYOG..HANGUL SYLLABLE GGYOH
+ {0xAFB8, 0xAFB8, prLV}, // Lo HANGUL SYLLABLE GGU
+ {0xAFB9, 0xAFD3, prLVT}, // Lo [27] HANGUL SYLLABLE GGUG..HANGUL SYLLABLE GGUH
+ {0xAFD4, 0xAFD4, prLV}, // Lo HANGUL SYLLABLE GGWEO
+ {0xAFD5, 0xAFEF, prLVT}, // Lo [27] HANGUL SYLLABLE GGWEOG..HANGUL SYLLABLE GGWEOH
+ {0xAFF0, 0xAFF0, prLV}, // Lo HANGUL SYLLABLE GGWE
+ {0xAFF1, 0xB00B, prLVT}, // Lo [27] HANGUL SYLLABLE GGWEG..HANGUL SYLLABLE GGWEH
+ {0xB00C, 0xB00C, prLV}, // Lo HANGUL SYLLABLE GGWI
+ {0xB00D, 0xB027, prLVT}, // Lo [27] HANGUL SYLLABLE GGWIG..HANGUL SYLLABLE GGWIH
+ {0xB028, 0xB028, prLV}, // Lo HANGUL SYLLABLE GGYU
+ {0xB029, 0xB043, prLVT}, // Lo [27] HANGUL SYLLABLE GGYUG..HANGUL SYLLABLE GGYUH
+ {0xB044, 0xB044, prLV}, // Lo HANGUL SYLLABLE GGEU
+ {0xB045, 0xB05F, prLVT}, // Lo [27] HANGUL SYLLABLE GGEUG..HANGUL SYLLABLE GGEUH
+ {0xB060, 0xB060, prLV}, // Lo HANGUL SYLLABLE GGYI
+ {0xB061, 0xB07B, prLVT}, // Lo [27] HANGUL SYLLABLE GGYIG..HANGUL SYLLABLE GGYIH
+ {0xB07C, 0xB07C, prLV}, // Lo HANGUL SYLLABLE GGI
+ {0xB07D, 0xB097, prLVT}, // Lo [27] HANGUL SYLLABLE GGIG..HANGUL SYLLABLE GGIH
+ {0xB098, 0xB098, prLV}, // Lo HANGUL SYLLABLE NA
+ {0xB099, 0xB0B3, prLVT}, // Lo [27] HANGUL SYLLABLE NAG..HANGUL SYLLABLE NAH
+ {0xB0B4, 0xB0B4, prLV}, // Lo HANGUL SYLLABLE NAE
+ {0xB0B5, 0xB0CF, prLVT}, // Lo [27] HANGUL SYLLABLE NAEG..HANGUL SYLLABLE NAEH
+ {0xB0D0, 0xB0D0, prLV}, // Lo HANGUL SYLLABLE NYA
+ {0xB0D1, 0xB0EB, prLVT}, // Lo [27] HANGUL SYLLABLE NYAG..HANGUL SYLLABLE NYAH
+ {0xB0EC, 0xB0EC, prLV}, // Lo HANGUL SYLLABLE NYAE
+ {0xB0ED, 0xB107, prLVT}, // Lo [27] HANGUL SYLLABLE NYAEG..HANGUL SYLLABLE NYAEH
+ {0xB108, 0xB108, prLV}, // Lo HANGUL SYLLABLE NEO
+ {0xB109, 0xB123, prLVT}, // Lo [27] HANGUL SYLLABLE NEOG..HANGUL SYLLABLE NEOH
+ {0xB124, 0xB124, prLV}, // Lo HANGUL SYLLABLE NE
+ {0xB125, 0xB13F, prLVT}, // Lo [27] HANGUL SYLLABLE NEG..HANGUL SYLLABLE NEH
+ {0xB140, 0xB140, prLV}, // Lo HANGUL SYLLABLE NYEO
+ {0xB141, 0xB15B, prLVT}, // Lo [27] HANGUL SYLLABLE NYEOG..HANGUL SYLLABLE NYEOH
+ {0xB15C, 0xB15C, prLV}, // Lo HANGUL SYLLABLE NYE
+ {0xB15D, 0xB177, prLVT}, // Lo [27] HANGUL SYLLABLE NYEG..HANGUL SYLLABLE NYEH
+ {0xB178, 0xB178, prLV}, // Lo HANGUL SYLLABLE NO
+ {0xB179, 0xB193, prLVT}, // Lo [27] HANGUL SYLLABLE NOG..HANGUL SYLLABLE NOH
+ {0xB194, 0xB194, prLV}, // Lo HANGUL SYLLABLE NWA
+ {0xB195, 0xB1AF, prLVT}, // Lo [27] HANGUL SYLLABLE NWAG..HANGUL SYLLABLE NWAH
+ {0xB1B0, 0xB1B0, prLV}, // Lo HANGUL SYLLABLE NWAE
+ {0xB1B1, 0xB1CB, prLVT}, // Lo [27] HANGUL SYLLABLE NWAEG..HANGUL SYLLABLE NWAEH
+ {0xB1CC, 0xB1CC, prLV}, // Lo HANGUL SYLLABLE NOE
+ {0xB1CD, 0xB1E7, prLVT}, // Lo [27] HANGUL SYLLABLE NOEG..HANGUL SYLLABLE NOEH
+ {0xB1E8, 0xB1E8, prLV}, // Lo HANGUL SYLLABLE NYO
+ {0xB1E9, 0xB203, prLVT}, // Lo [27] HANGUL SYLLABLE NYOG..HANGUL SYLLABLE NYOH
+ {0xB204, 0xB204, prLV}, // Lo HANGUL SYLLABLE NU
+ {0xB205, 0xB21F, prLVT}, // Lo [27] HANGUL SYLLABLE NUG..HANGUL SYLLABLE NUH
+ {0xB220, 0xB220, prLV}, // Lo HANGUL SYLLABLE NWEO
+ {0xB221, 0xB23B, prLVT}, // Lo [27] HANGUL SYLLABLE NWEOG..HANGUL SYLLABLE NWEOH
+ {0xB23C, 0xB23C, prLV}, // Lo HANGUL SYLLABLE NWE
+ {0xB23D, 0xB257, prLVT}, // Lo [27] HANGUL SYLLABLE NWEG..HANGUL SYLLABLE NWEH
+ {0xB258, 0xB258, prLV}, // Lo HANGUL SYLLABLE NWI
+ {0xB259, 0xB273, prLVT}, // Lo [27] HANGUL SYLLABLE NWIG..HANGUL SYLLABLE NWIH
+ {0xB274, 0xB274, prLV}, // Lo HANGUL SYLLABLE NYU
+ {0xB275, 0xB28F, prLVT}, // Lo [27] HANGUL SYLLABLE NYUG..HANGUL SYLLABLE NYUH
+ {0xB290, 0xB290, prLV}, // Lo HANGUL SYLLABLE NEU
+ {0xB291, 0xB2AB, prLVT}, // Lo [27] HANGUL SYLLABLE NEUG..HANGUL SYLLABLE NEUH
+ {0xB2AC, 0xB2AC, prLV}, // Lo HANGUL SYLLABLE NYI
+ {0xB2AD, 0xB2C7, prLVT}, // Lo [27] HANGUL SYLLABLE NYIG..HANGUL SYLLABLE NYIH
+ {0xB2C8, 0xB2C8, prLV}, // Lo HANGUL SYLLABLE NI
+ {0xB2C9, 0xB2E3, prLVT}, // Lo [27] HANGUL SYLLABLE NIG..HANGUL SYLLABLE NIH
+ {0xB2E4, 0xB2E4, prLV}, // Lo HANGUL SYLLABLE DA
+ {0xB2E5, 0xB2FF, prLVT}, // Lo [27] HANGUL SYLLABLE DAG..HANGUL SYLLABLE DAH
+ {0xB300, 0xB300, prLV}, // Lo HANGUL SYLLABLE DAE
+ {0xB301, 0xB31B, prLVT}, // Lo [27] HANGUL SYLLABLE DAEG..HANGUL SYLLABLE DAEH
+ {0xB31C, 0xB31C, prLV}, // Lo HANGUL SYLLABLE DYA
+ {0xB31D, 0xB337, prLVT}, // Lo [27] HANGUL SYLLABLE DYAG..HANGUL SYLLABLE DYAH
+ {0xB338, 0xB338, prLV}, // Lo HANGUL SYLLABLE DYAE
+ {0xB339, 0xB353, prLVT}, // Lo [27] HANGUL SYLLABLE DYAEG..HANGUL SYLLABLE DYAEH
+ {0xB354, 0xB354, prLV}, // Lo HANGUL SYLLABLE DEO
+ {0xB355, 0xB36F, prLVT}, // Lo [27] HANGUL SYLLABLE DEOG..HANGUL SYLLABLE DEOH
+ {0xB370, 0xB370, prLV}, // Lo HANGUL SYLLABLE DE
+ {0xB371, 0xB38B, prLVT}, // Lo [27] HANGUL SYLLABLE DEG..HANGUL SYLLABLE DEH
+ {0xB38C, 0xB38C, prLV}, // Lo HANGUL SYLLABLE DYEO
+ {0xB38D, 0xB3A7, prLVT}, // Lo [27] HANGUL SYLLABLE DYEOG..HANGUL SYLLABLE DYEOH
+ {0xB3A8, 0xB3A8, prLV}, // Lo HANGUL SYLLABLE DYE
+ {0xB3A9, 0xB3C3, prLVT}, // Lo [27] HANGUL SYLLABLE DYEG..HANGUL SYLLABLE DYEH
+ {0xB3C4, 0xB3C4, prLV}, // Lo HANGUL SYLLABLE DO
+ {0xB3C5, 0xB3DF, prLVT}, // Lo [27] HANGUL SYLLABLE DOG..HANGUL SYLLABLE DOH
+ {0xB3E0, 0xB3E0, prLV}, // Lo HANGUL SYLLABLE DWA
+ {0xB3E1, 0xB3FB, prLVT}, // Lo [27] HANGUL SYLLABLE DWAG..HANGUL SYLLABLE DWAH
+ {0xB3FC, 0xB3FC, prLV}, // Lo HANGUL SYLLABLE DWAE
+ {0xB3FD, 0xB417, prLVT}, // Lo [27] HANGUL SYLLABLE DWAEG..HANGUL SYLLABLE DWAEH
+ {0xB418, 0xB418, prLV}, // Lo HANGUL SYLLABLE DOE
+ {0xB419, 0xB433, prLVT}, // Lo [27] HANGUL SYLLABLE DOEG..HANGUL SYLLABLE DOEH
+ {0xB434, 0xB434, prLV}, // Lo HANGUL SYLLABLE DYO
+ {0xB435, 0xB44F, prLVT}, // Lo [27] HANGUL SYLLABLE DYOG..HANGUL SYLLABLE DYOH
+ {0xB450, 0xB450, prLV}, // Lo HANGUL SYLLABLE DU
+ {0xB451, 0xB46B, prLVT}, // Lo [27] HANGUL SYLLABLE DUG..HANGUL SYLLABLE DUH
+ {0xB46C, 0xB46C, prLV}, // Lo HANGUL SYLLABLE DWEO
+ {0xB46D, 0xB487, prLVT}, // Lo [27] HANGUL SYLLABLE DWEOG..HANGUL SYLLABLE DWEOH
+ {0xB488, 0xB488, prLV}, // Lo HANGUL SYLLABLE DWE
+ {0xB489, 0xB4A3, prLVT}, // Lo [27] HANGUL SYLLABLE DWEG..HANGUL SYLLABLE DWEH
+ {0xB4A4, 0xB4A4, prLV}, // Lo HANGUL SYLLABLE DWI
+ {0xB4A5, 0xB4BF, prLVT}, // Lo [27] HANGUL SYLLABLE DWIG..HANGUL SYLLABLE DWIH
+ {0xB4C0, 0xB4C0, prLV}, // Lo HANGUL SYLLABLE DYU
+ {0xB4C1, 0xB4DB, prLVT}, // Lo [27] HANGUL SYLLABLE DYUG..HANGUL SYLLABLE DYUH
+ {0xB4DC, 0xB4DC, prLV}, // Lo HANGUL SYLLABLE DEU
+ {0xB4DD, 0xB4F7, prLVT}, // Lo [27] HANGUL SYLLABLE DEUG..HANGUL SYLLABLE DEUH
+ {0xB4F8, 0xB4F8, prLV}, // Lo HANGUL SYLLABLE DYI
+ {0xB4F9, 0xB513, prLVT}, // Lo [27] HANGUL SYLLABLE DYIG..HANGUL SYLLABLE DYIH
+ {0xB514, 0xB514, prLV}, // Lo HANGUL SYLLABLE DI
+ {0xB515, 0xB52F, prLVT}, // Lo [27] HANGUL SYLLABLE DIG..HANGUL SYLLABLE DIH
+ {0xB530, 0xB530, prLV}, // Lo HANGUL SYLLABLE DDA
+ {0xB531, 0xB54B, prLVT}, // Lo [27] HANGUL SYLLABLE DDAG..HANGUL SYLLABLE DDAH
+ {0xB54C, 0xB54C, prLV}, // Lo HANGUL SYLLABLE DDAE
+ {0xB54D, 0xB567, prLVT}, // Lo [27] HANGUL SYLLABLE DDAEG..HANGUL SYLLABLE DDAEH
+ {0xB568, 0xB568, prLV}, // Lo HANGUL SYLLABLE DDYA
+ {0xB569, 0xB583, prLVT}, // Lo [27] HANGUL SYLLABLE DDYAG..HANGUL SYLLABLE DDYAH
+ {0xB584, 0xB584, prLV}, // Lo HANGUL SYLLABLE DDYAE
+ {0xB585, 0xB59F, prLVT}, // Lo [27] HANGUL SYLLABLE DDYAEG..HANGUL SYLLABLE DDYAEH
+ {0xB5A0, 0xB5A0, prLV}, // Lo HANGUL SYLLABLE DDEO
+ {0xB5A1, 0xB5BB, prLVT}, // Lo [27] HANGUL SYLLABLE DDEOG..HANGUL SYLLABLE DDEOH
+ {0xB5BC, 0xB5BC, prLV}, // Lo HANGUL SYLLABLE DDE
+ {0xB5BD, 0xB5D7, prLVT}, // Lo [27] HANGUL SYLLABLE DDEG..HANGUL SYLLABLE DDEH
+ {0xB5D8, 0xB5D8, prLV}, // Lo HANGUL SYLLABLE DDYEO
+ {0xB5D9, 0xB5F3, prLVT}, // Lo [27] HANGUL SYLLABLE DDYEOG..HANGUL SYLLABLE DDYEOH
+ {0xB5F4, 0xB5F4, prLV}, // Lo HANGUL SYLLABLE DDYE
+ {0xB5F5, 0xB60F, prLVT}, // Lo [27] HANGUL SYLLABLE DDYEG..HANGUL SYLLABLE DDYEH
+ {0xB610, 0xB610, prLV}, // Lo HANGUL SYLLABLE DDO
+ {0xB611, 0xB62B, prLVT}, // Lo [27] HANGUL SYLLABLE DDOG..HANGUL SYLLABLE DDOH
+ {0xB62C, 0xB62C, prLV}, // Lo HANGUL SYLLABLE DDWA
+ {0xB62D, 0xB647, prLVT}, // Lo [27] HANGUL SYLLABLE DDWAG..HANGUL SYLLABLE DDWAH
+ {0xB648, 0xB648, prLV}, // Lo HANGUL SYLLABLE DDWAE
+ {0xB649, 0xB663, prLVT}, // Lo [27] HANGUL SYLLABLE DDWAEG..HANGUL SYLLABLE DDWAEH
+ {0xB664, 0xB664, prLV}, // Lo HANGUL SYLLABLE DDOE
+ {0xB665, 0xB67F, prLVT}, // Lo [27] HANGUL SYLLABLE DDOEG..HANGUL SYLLABLE DDOEH
+ {0xB680, 0xB680, prLV}, // Lo HANGUL SYLLABLE DDYO
+ {0xB681, 0xB69B, prLVT}, // Lo [27] HANGUL SYLLABLE DDYOG..HANGUL SYLLABLE DDYOH
+ {0xB69C, 0xB69C, prLV}, // Lo HANGUL SYLLABLE DDU
+ {0xB69D, 0xB6B7, prLVT}, // Lo [27] HANGUL SYLLABLE DDUG..HANGUL SYLLABLE DDUH
+ {0xB6B8, 0xB6B8, prLV}, // Lo HANGUL SYLLABLE DDWEO
+ {0xB6B9, 0xB6D3, prLVT}, // Lo [27] HANGUL SYLLABLE DDWEOG..HANGUL SYLLABLE DDWEOH
+ {0xB6D4, 0xB6D4, prLV}, // Lo HANGUL SYLLABLE DDWE
+ {0xB6D5, 0xB6EF, prLVT}, // Lo [27] HANGUL SYLLABLE DDWEG..HANGUL SYLLABLE DDWEH
+ {0xB6F0, 0xB6F0, prLV}, // Lo HANGUL SYLLABLE DDWI
+ {0xB6F1, 0xB70B, prLVT}, // Lo [27] HANGUL SYLLABLE DDWIG..HANGUL SYLLABLE DDWIH
+ {0xB70C, 0xB70C, prLV}, // Lo HANGUL SYLLABLE DDYU
+ {0xB70D, 0xB727, prLVT}, // Lo [27] HANGUL SYLLABLE DDYUG..HANGUL SYLLABLE DDYUH
+ {0xB728, 0xB728, prLV}, // Lo HANGUL SYLLABLE DDEU
+ {0xB729, 0xB743, prLVT}, // Lo [27] HANGUL SYLLABLE DDEUG..HANGUL SYLLABLE DDEUH
+ {0xB744, 0xB744, prLV}, // Lo HANGUL SYLLABLE DDYI
+ {0xB745, 0xB75F, prLVT}, // Lo [27] HANGUL SYLLABLE DDYIG..HANGUL SYLLABLE DDYIH
+ {0xB760, 0xB760, prLV}, // Lo HANGUL SYLLABLE DDI
+ {0xB761, 0xB77B, prLVT}, // Lo [27] HANGUL SYLLABLE DDIG..HANGUL SYLLABLE DDIH
+ {0xB77C, 0xB77C, prLV}, // Lo HANGUL SYLLABLE RA
+ {0xB77D, 0xB797, prLVT}, // Lo [27] HANGUL SYLLABLE RAG..HANGUL SYLLABLE RAH
+ {0xB798, 0xB798, prLV}, // Lo HANGUL SYLLABLE RAE
+ {0xB799, 0xB7B3, prLVT}, // Lo [27] HANGUL SYLLABLE RAEG..HANGUL SYLLABLE RAEH
+ {0xB7B4, 0xB7B4, prLV}, // Lo HANGUL SYLLABLE RYA
+ {0xB7B5, 0xB7CF, prLVT}, // Lo [27] HANGUL SYLLABLE RYAG..HANGUL SYLLABLE RYAH
+ {0xB7D0, 0xB7D0, prLV}, // Lo HANGUL SYLLABLE RYAE
+ {0xB7D1, 0xB7EB, prLVT}, // Lo [27] HANGUL SYLLABLE RYAEG..HANGUL SYLLABLE RYAEH
+ {0xB7EC, 0xB7EC, prLV}, // Lo HANGUL SYLLABLE REO
+ {0xB7ED, 0xB807, prLVT}, // Lo [27] HANGUL SYLLABLE REOG..HANGUL SYLLABLE REOH
+ {0xB808, 0xB808, prLV}, // Lo HANGUL SYLLABLE RE
+ {0xB809, 0xB823, prLVT}, // Lo [27] HANGUL SYLLABLE REG..HANGUL SYLLABLE REH
+ {0xB824, 0xB824, prLV}, // Lo HANGUL SYLLABLE RYEO
+ {0xB825, 0xB83F, prLVT}, // Lo [27] HANGUL SYLLABLE RYEOG..HANGUL SYLLABLE RYEOH
+ {0xB840, 0xB840, prLV}, // Lo HANGUL SYLLABLE RYE
+ {0xB841, 0xB85B, prLVT}, // Lo [27] HANGUL SYLLABLE RYEG..HANGUL SYLLABLE RYEH
+ {0xB85C, 0xB85C, prLV}, // Lo HANGUL SYLLABLE RO
+ {0xB85D, 0xB877, prLVT}, // Lo [27] HANGUL SYLLABLE ROG..HANGUL SYLLABLE ROH
+ {0xB878, 0xB878, prLV}, // Lo HANGUL SYLLABLE RWA
+ {0xB879, 0xB893, prLVT}, // Lo [27] HANGUL SYLLABLE RWAG..HANGUL SYLLABLE RWAH
+ {0xB894, 0xB894, prLV}, // Lo HANGUL SYLLABLE RWAE
+ {0xB895, 0xB8AF, prLVT}, // Lo [27] HANGUL SYLLABLE RWAEG..HANGUL SYLLABLE RWAEH
+ {0xB8B0, 0xB8B0, prLV}, // Lo HANGUL SYLLABLE ROE
+ {0xB8B1, 0xB8CB, prLVT}, // Lo [27] HANGUL SYLLABLE ROEG..HANGUL SYLLABLE ROEH
+ {0xB8CC, 0xB8CC, prLV}, // Lo HANGUL SYLLABLE RYO
+ {0xB8CD, 0xB8E7, prLVT}, // Lo [27] HANGUL SYLLABLE RYOG..HANGUL SYLLABLE RYOH
+ {0xB8E8, 0xB8E8, prLV}, // Lo HANGUL SYLLABLE RU
+ {0xB8E9, 0xB903, prLVT}, // Lo [27] HANGUL SYLLABLE RUG..HANGUL SYLLABLE RUH
+ {0xB904, 0xB904, prLV}, // Lo HANGUL SYLLABLE RWEO
+ {0xB905, 0xB91F, prLVT}, // Lo [27] HANGUL SYLLABLE RWEOG..HANGUL SYLLABLE RWEOH
+ {0xB920, 0xB920, prLV}, // Lo HANGUL SYLLABLE RWE
+ {0xB921, 0xB93B, prLVT}, // Lo [27] HANGUL SYLLABLE RWEG..HANGUL SYLLABLE RWEH
+ {0xB93C, 0xB93C, prLV}, // Lo HANGUL SYLLABLE RWI
+ {0xB93D, 0xB957, prLVT}, // Lo [27] HANGUL SYLLABLE RWIG..HANGUL SYLLABLE RWIH
+ {0xB958, 0xB958, prLV}, // Lo HANGUL SYLLABLE RYU
+ {0xB959, 0xB973, prLVT}, // Lo [27] HANGUL SYLLABLE RYUG..HANGUL SYLLABLE RYUH
+ {0xB974, 0xB974, prLV}, // Lo HANGUL SYLLABLE REU
+ {0xB975, 0xB98F, prLVT}, // Lo [27] HANGUL SYLLABLE REUG..HANGUL SYLLABLE REUH
+ {0xB990, 0xB990, prLV}, // Lo HANGUL SYLLABLE RYI
+ {0xB991, 0xB9AB, prLVT}, // Lo [27] HANGUL SYLLABLE RYIG..HANGUL SYLLABLE RYIH
+ {0xB9AC, 0xB9AC, prLV}, // Lo HANGUL SYLLABLE RI
+ {0xB9AD, 0xB9C7, prLVT}, // Lo [27] HANGUL SYLLABLE RIG..HANGUL SYLLABLE RIH
+ {0xB9C8, 0xB9C8, prLV}, // Lo HANGUL SYLLABLE MA
+ {0xB9C9, 0xB9E3, prLVT}, // Lo [27] HANGUL SYLLABLE MAG..HANGUL SYLLABLE MAH
+ {0xB9E4, 0xB9E4, prLV}, // Lo HANGUL SYLLABLE MAE
+ {0xB9E5, 0xB9FF, prLVT}, // Lo [27] HANGUL SYLLABLE MAEG..HANGUL SYLLABLE MAEH
+ {0xBA00, 0xBA00, prLV}, // Lo HANGUL SYLLABLE MYA
+ {0xBA01, 0xBA1B, prLVT}, // Lo [27] HANGUL SYLLABLE MYAG..HANGUL SYLLABLE MYAH
+ {0xBA1C, 0xBA1C, prLV}, // Lo HANGUL SYLLABLE MYAE
+ {0xBA1D, 0xBA37, prLVT}, // Lo [27] HANGUL SYLLABLE MYAEG..HANGUL SYLLABLE MYAEH
+ {0xBA38, 0xBA38, prLV}, // Lo HANGUL SYLLABLE MEO
+ {0xBA39, 0xBA53, prLVT}, // Lo [27] HANGUL SYLLABLE MEOG..HANGUL SYLLABLE MEOH
+ {0xBA54, 0xBA54, prLV}, // Lo HANGUL SYLLABLE ME
+ {0xBA55, 0xBA6F, prLVT}, // Lo [27] HANGUL SYLLABLE MEG..HANGUL SYLLABLE MEH
+ {0xBA70, 0xBA70, prLV}, // Lo HANGUL SYLLABLE MYEO
+ {0xBA71, 0xBA8B, prLVT}, // Lo [27] HANGUL SYLLABLE MYEOG..HANGUL SYLLABLE MYEOH
+ {0xBA8C, 0xBA8C, prLV}, // Lo HANGUL SYLLABLE MYE
+ {0xBA8D, 0xBAA7, prLVT}, // Lo [27] HANGUL SYLLABLE MYEG..HANGUL SYLLABLE MYEH
+ {0xBAA8, 0xBAA8, prLV}, // Lo HANGUL SYLLABLE MO
+ {0xBAA9, 0xBAC3, prLVT}, // Lo [27] HANGUL SYLLABLE MOG..HANGUL SYLLABLE MOH
+ {0xBAC4, 0xBAC4, prLV}, // Lo HANGUL SYLLABLE MWA
+ {0xBAC5, 0xBADF, prLVT}, // Lo [27] HANGUL SYLLABLE MWAG..HANGUL SYLLABLE MWAH
+ {0xBAE0, 0xBAE0, prLV}, // Lo HANGUL SYLLABLE MWAE
+ {0xBAE1, 0xBAFB, prLVT}, // Lo [27] HANGUL SYLLABLE MWAEG..HANGUL SYLLABLE MWAEH
+ {0xBAFC, 0xBAFC, prLV}, // Lo HANGUL SYLLABLE MOE
+ {0xBAFD, 0xBB17, prLVT}, // Lo [27] HANGUL SYLLABLE MOEG..HANGUL SYLLABLE MOEH
+ {0xBB18, 0xBB18, prLV}, // Lo HANGUL SYLLABLE MYO
+ {0xBB19, 0xBB33, prLVT}, // Lo [27] HANGUL SYLLABLE MYOG..HANGUL SYLLABLE MYOH
+ {0xBB34, 0xBB34, prLV}, // Lo HANGUL SYLLABLE MU
+ {0xBB35, 0xBB4F, prLVT}, // Lo [27] HANGUL SYLLABLE MUG..HANGUL SYLLABLE MUH
+ {0xBB50, 0xBB50, prLV}, // Lo HANGUL SYLLABLE MWEO
+ {0xBB51, 0xBB6B, prLVT}, // Lo [27] HANGUL SYLLABLE MWEOG..HANGUL SYLLABLE MWEOH
+ {0xBB6C, 0xBB6C, prLV}, // Lo HANGUL SYLLABLE MWE
+ {0xBB6D, 0xBB87, prLVT}, // Lo [27] HANGUL SYLLABLE MWEG..HANGUL SYLLABLE MWEH
+ {0xBB88, 0xBB88, prLV}, // Lo HANGUL SYLLABLE MWI
+ {0xBB89, 0xBBA3, prLVT}, // Lo [27] HANGUL SYLLABLE MWIG..HANGUL SYLLABLE MWIH
+ {0xBBA4, 0xBBA4, prLV}, // Lo HANGUL SYLLABLE MYU
+ {0xBBA5, 0xBBBF, prLVT}, // Lo [27] HANGUL SYLLABLE MYUG..HANGUL SYLLABLE MYUH
+ {0xBBC0, 0xBBC0, prLV}, // Lo HANGUL SYLLABLE MEU
+ {0xBBC1, 0xBBDB, prLVT}, // Lo [27] HANGUL SYLLABLE MEUG..HANGUL SYLLABLE MEUH
+ {0xBBDC, 0xBBDC, prLV}, // Lo HANGUL SYLLABLE MYI
+ {0xBBDD, 0xBBF7, prLVT}, // Lo [27] HANGUL SYLLABLE MYIG..HANGUL SYLLABLE MYIH
+ {0xBBF8, 0xBBF8, prLV}, // Lo HANGUL SYLLABLE MI
+ {0xBBF9, 0xBC13, prLVT}, // Lo [27] HANGUL SYLLABLE MIG..HANGUL SYLLABLE MIH
+ {0xBC14, 0xBC14, prLV}, // Lo HANGUL SYLLABLE BA
+ {0xBC15, 0xBC2F, prLVT}, // Lo [27] HANGUL SYLLABLE BAG..HANGUL SYLLABLE BAH
+ {0xBC30, 0xBC30, prLV}, // Lo HANGUL SYLLABLE BAE
+ {0xBC31, 0xBC4B, prLVT}, // Lo [27] HANGUL SYLLABLE BAEG..HANGUL SYLLABLE BAEH
+ {0xBC4C, 0xBC4C, prLV}, // Lo HANGUL SYLLABLE BYA
+ {0xBC4D, 0xBC67, prLVT}, // Lo [27] HANGUL SYLLABLE BYAG..HANGUL SYLLABLE BYAH
+ {0xBC68, 0xBC68, prLV}, // Lo HANGUL SYLLABLE BYAE
+ {0xBC69, 0xBC83, prLVT}, // Lo [27] HANGUL SYLLABLE BYAEG..HANGUL SYLLABLE BYAEH
+ {0xBC84, 0xBC84, prLV}, // Lo HANGUL SYLLABLE BEO
+ {0xBC85, 0xBC9F, prLVT}, // Lo [27] HANGUL SYLLABLE BEOG..HANGUL SYLLABLE BEOH
+ {0xBCA0, 0xBCA0, prLV}, // Lo HANGUL SYLLABLE BE
+ {0xBCA1, 0xBCBB, prLVT}, // Lo [27] HANGUL SYLLABLE BEG..HANGUL SYLLABLE BEH
+ {0xBCBC, 0xBCBC, prLV}, // Lo HANGUL SYLLABLE BYEO
+ {0xBCBD, 0xBCD7, prLVT}, // Lo [27] HANGUL SYLLABLE BYEOG..HANGUL SYLLABLE BYEOH
+ {0xBCD8, 0xBCD8, prLV}, // Lo HANGUL SYLLABLE BYE
+ {0xBCD9, 0xBCF3, prLVT}, // Lo [27] HANGUL SYLLABLE BYEG..HANGUL SYLLABLE BYEH
+ {0xBCF4, 0xBCF4, prLV}, // Lo HANGUL SYLLABLE BO
+ {0xBCF5, 0xBD0F, prLVT}, // Lo [27] HANGUL SYLLABLE BOG..HANGUL SYLLABLE BOH
+ {0xBD10, 0xBD10, prLV}, // Lo HANGUL SYLLABLE BWA
+ {0xBD11, 0xBD2B, prLVT}, // Lo [27] HANGUL SYLLABLE BWAG..HANGUL SYLLABLE BWAH
+ {0xBD2C, 0xBD2C, prLV}, // Lo HANGUL SYLLABLE BWAE
+ {0xBD2D, 0xBD47, prLVT}, // Lo [27] HANGUL SYLLABLE BWAEG..HANGUL SYLLABLE BWAEH
+ {0xBD48, 0xBD48, prLV}, // Lo HANGUL SYLLABLE BOE
+ {0xBD49, 0xBD63, prLVT}, // Lo [27] HANGUL SYLLABLE BOEG..HANGUL SYLLABLE BOEH
+ {0xBD64, 0xBD64, prLV}, // Lo HANGUL SYLLABLE BYO
+ {0xBD65, 0xBD7F, prLVT}, // Lo [27] HANGUL SYLLABLE BYOG..HANGUL SYLLABLE BYOH
+ {0xBD80, 0xBD80, prLV}, // Lo HANGUL SYLLABLE BU
+ {0xBD81, 0xBD9B, prLVT}, // Lo [27] HANGUL SYLLABLE BUG..HANGUL SYLLABLE BUH
+ {0xBD9C, 0xBD9C, prLV}, // Lo HANGUL SYLLABLE BWEO
+ {0xBD9D, 0xBDB7, prLVT}, // Lo [27] HANGUL SYLLABLE BWEOG..HANGUL SYLLABLE BWEOH
+ {0xBDB8, 0xBDB8, prLV}, // Lo HANGUL SYLLABLE BWE
+ {0xBDB9, 0xBDD3, prLVT}, // Lo [27] HANGUL SYLLABLE BWEG..HANGUL SYLLABLE BWEH
+ {0xBDD4, 0xBDD4, prLV}, // Lo HANGUL SYLLABLE BWI
+ {0xBDD5, 0xBDEF, prLVT}, // Lo [27] HANGUL SYLLABLE BWIG..HANGUL SYLLABLE BWIH
+ {0xBDF0, 0xBDF0, prLV}, // Lo HANGUL SYLLABLE BYU
+ {0xBDF1, 0xBE0B, prLVT}, // Lo [27] HANGUL SYLLABLE BYUG..HANGUL SYLLABLE BYUH
+ {0xBE0C, 0xBE0C, prLV}, // Lo HANGUL SYLLABLE BEU
+ {0xBE0D, 0xBE27, prLVT}, // Lo [27] HANGUL SYLLABLE BEUG..HANGUL SYLLABLE BEUH
+ {0xBE28, 0xBE28, prLV}, // Lo HANGUL SYLLABLE BYI
+ {0xBE29, 0xBE43, prLVT}, // Lo [27] HANGUL SYLLABLE BYIG..HANGUL SYLLABLE BYIH
+ {0xBE44, 0xBE44, prLV}, // Lo HANGUL SYLLABLE BI
+ {0xBE45, 0xBE5F, prLVT}, // Lo [27] HANGUL SYLLABLE BIG..HANGUL SYLLABLE BIH
+ {0xBE60, 0xBE60, prLV}, // Lo HANGUL SYLLABLE BBA
+ {0xBE61, 0xBE7B, prLVT}, // Lo [27] HANGUL SYLLABLE BBAG..HANGUL SYLLABLE BBAH
+ {0xBE7C, 0xBE7C, prLV}, // Lo HANGUL SYLLABLE BBAE
+ {0xBE7D, 0xBE97, prLVT}, // Lo [27] HANGUL SYLLABLE BBAEG..HANGUL SYLLABLE BBAEH
+ {0xBE98, 0xBE98, prLV}, // Lo HANGUL SYLLABLE BBYA
+ {0xBE99, 0xBEB3, prLVT}, // Lo [27] HANGUL SYLLABLE BBYAG..HANGUL SYLLABLE BBYAH
+ {0xBEB4, 0xBEB4, prLV}, // Lo HANGUL SYLLABLE BBYAE
+ {0xBEB5, 0xBECF, prLVT}, // Lo [27] HANGUL SYLLABLE BBYAEG..HANGUL SYLLABLE BBYAEH
+ {0xBED0, 0xBED0, prLV}, // Lo HANGUL SYLLABLE BBEO
+ {0xBED1, 0xBEEB, prLVT}, // Lo [27] HANGUL SYLLABLE BBEOG..HANGUL SYLLABLE BBEOH
+ {0xBEEC, 0xBEEC, prLV}, // Lo HANGUL SYLLABLE BBE
+ {0xBEED, 0xBF07, prLVT}, // Lo [27] HANGUL SYLLABLE BBEG..HANGUL SYLLABLE BBEH
+ {0xBF08, 0xBF08, prLV}, // Lo HANGUL SYLLABLE BBYEO
+ {0xBF09, 0xBF23, prLVT}, // Lo [27] HANGUL SYLLABLE BBYEOG..HANGUL SYLLABLE BBYEOH
+ {0xBF24, 0xBF24, prLV}, // Lo HANGUL SYLLABLE BBYE
+ {0xBF25, 0xBF3F, prLVT}, // Lo [27] HANGUL SYLLABLE BBYEG..HANGUL SYLLABLE BBYEH
+ {0xBF40, 0xBF40, prLV}, // Lo HANGUL SYLLABLE BBO
+ {0xBF41, 0xBF5B, prLVT}, // Lo [27] HANGUL SYLLABLE BBOG..HANGUL SYLLABLE BBOH
+ {0xBF5C, 0xBF5C, prLV}, // Lo HANGUL SYLLABLE BBWA
+ {0xBF5D, 0xBF77, prLVT}, // Lo [27] HANGUL SYLLABLE BBWAG..HANGUL SYLLABLE BBWAH
+ {0xBF78, 0xBF78, prLV}, // Lo HANGUL SYLLABLE BBWAE
+ {0xBF79, 0xBF93, prLVT}, // Lo [27] HANGUL SYLLABLE BBWAEG..HANGUL SYLLABLE BBWAEH
+ {0xBF94, 0xBF94, prLV}, // Lo HANGUL SYLLABLE BBOE
+ {0xBF95, 0xBFAF, prLVT}, // Lo [27] HANGUL SYLLABLE BBOEG..HANGUL SYLLABLE BBOEH
+ {0xBFB0, 0xBFB0, prLV}, // Lo HANGUL SYLLABLE BBYO
+ {0xBFB1, 0xBFCB, prLVT}, // Lo [27] HANGUL SYLLABLE BBYOG..HANGUL SYLLABLE BBYOH
+ {0xBFCC, 0xBFCC, prLV}, // Lo HANGUL SYLLABLE BBU
+ {0xBFCD, 0xBFE7, prLVT}, // Lo [27] HANGUL SYLLABLE BBUG..HANGUL SYLLABLE BBUH
+ {0xBFE8, 0xBFE8, prLV}, // Lo HANGUL SYLLABLE BBWEO
+ {0xBFE9, 0xC003, prLVT}, // Lo [27] HANGUL SYLLABLE BBWEOG..HANGUL SYLLABLE BBWEOH
+ {0xC004, 0xC004, prLV}, // Lo HANGUL SYLLABLE BBWE
+ {0xC005, 0xC01F, prLVT}, // Lo [27] HANGUL SYLLABLE BBWEG..HANGUL SYLLABLE BBWEH
+ {0xC020, 0xC020, prLV}, // Lo HANGUL SYLLABLE BBWI
+ {0xC021, 0xC03B, prLVT}, // Lo [27] HANGUL SYLLABLE BBWIG..HANGUL SYLLABLE BBWIH
+ {0xC03C, 0xC03C, prLV}, // Lo HANGUL SYLLABLE BBYU
+ {0xC03D, 0xC057, prLVT}, // Lo [27] HANGUL SYLLABLE BBYUG..HANGUL SYLLABLE BBYUH
+ {0xC058, 0xC058, prLV}, // Lo HANGUL SYLLABLE BBEU
+ {0xC059, 0xC073, prLVT}, // Lo [27] HANGUL SYLLABLE BBEUG..HANGUL SYLLABLE BBEUH
+ {0xC074, 0xC074, prLV}, // Lo HANGUL SYLLABLE BBYI
+ {0xC075, 0xC08F, prLVT}, // Lo [27] HANGUL SYLLABLE BBYIG..HANGUL SYLLABLE BBYIH
+ {0xC090, 0xC090, prLV}, // Lo HANGUL SYLLABLE BBI
+ {0xC091, 0xC0AB, prLVT}, // Lo [27] HANGUL SYLLABLE BBIG..HANGUL SYLLABLE BBIH
+ {0xC0AC, 0xC0AC, prLV}, // Lo HANGUL SYLLABLE SA
+ {0xC0AD, 0xC0C7, prLVT}, // Lo [27] HANGUL SYLLABLE SAG..HANGUL SYLLABLE SAH
+ {0xC0C8, 0xC0C8, prLV}, // Lo HANGUL SYLLABLE SAE
+ {0xC0C9, 0xC0E3, prLVT}, // Lo [27] HANGUL SYLLABLE SAEG..HANGUL SYLLABLE SAEH
+ {0xC0E4, 0xC0E4, prLV}, // Lo HANGUL SYLLABLE SYA
+ {0xC0E5, 0xC0FF, prLVT}, // Lo [27] HANGUL SYLLABLE SYAG..HANGUL SYLLABLE SYAH
+ {0xC100, 0xC100, prLV}, // Lo HANGUL SYLLABLE SYAE
+ {0xC101, 0xC11B, prLVT}, // Lo [27] HANGUL SYLLABLE SYAEG..HANGUL SYLLABLE SYAEH
+ {0xC11C, 0xC11C, prLV}, // Lo HANGUL SYLLABLE SEO
+ {0xC11D, 0xC137, prLVT}, // Lo [27] HANGUL SYLLABLE SEOG..HANGUL SYLLABLE SEOH
+ {0xC138, 0xC138, prLV}, // Lo HANGUL SYLLABLE SE
+ {0xC139, 0xC153, prLVT}, // Lo [27] HANGUL SYLLABLE SEG..HANGUL SYLLABLE SEH
+ {0xC154, 0xC154, prLV}, // Lo HANGUL SYLLABLE SYEO
+ {0xC155, 0xC16F, prLVT}, // Lo [27] HANGUL SYLLABLE SYEOG..HANGUL SYLLABLE SYEOH
+ {0xC170, 0xC170, prLV}, // Lo HANGUL SYLLABLE SYE
+ {0xC171, 0xC18B, prLVT}, // Lo [27] HANGUL SYLLABLE SYEG..HANGUL SYLLABLE SYEH
+ {0xC18C, 0xC18C, prLV}, // Lo HANGUL SYLLABLE SO
+ {0xC18D, 0xC1A7, prLVT}, // Lo [27] HANGUL SYLLABLE SOG..HANGUL SYLLABLE SOH
+ {0xC1A8, 0xC1A8, prLV}, // Lo HANGUL SYLLABLE SWA
+ {0xC1A9, 0xC1C3, prLVT}, // Lo [27] HANGUL SYLLABLE SWAG..HANGUL SYLLABLE SWAH
+ {0xC1C4, 0xC1C4, prLV}, // Lo HANGUL SYLLABLE SWAE
+ {0xC1C5, 0xC1DF, prLVT}, // Lo [27] HANGUL SYLLABLE SWAEG..HANGUL SYLLABLE SWAEH
+ {0xC1E0, 0xC1E0, prLV}, // Lo HANGUL SYLLABLE SOE
+ {0xC1E1, 0xC1FB, prLVT}, // Lo [27] HANGUL SYLLABLE SOEG..HANGUL SYLLABLE SOEH
+ {0xC1FC, 0xC1FC, prLV}, // Lo HANGUL SYLLABLE SYO
+ {0xC1FD, 0xC217, prLVT}, // Lo [27] HANGUL SYLLABLE SYOG..HANGUL SYLLABLE SYOH
+ {0xC218, 0xC218, prLV}, // Lo HANGUL SYLLABLE SU
+ {0xC219, 0xC233, prLVT}, // Lo [27] HANGUL SYLLABLE SUG..HANGUL SYLLABLE SUH
+ {0xC234, 0xC234, prLV}, // Lo HANGUL SYLLABLE SWEO
+ {0xC235, 0xC24F, prLVT}, // Lo [27] HANGUL SYLLABLE SWEOG..HANGUL SYLLABLE SWEOH
+ {0xC250, 0xC250, prLV}, // Lo HANGUL SYLLABLE SWE
+ {0xC251, 0xC26B, prLVT}, // Lo [27] HANGUL SYLLABLE SWEG..HANGUL SYLLABLE SWEH
+ {0xC26C, 0xC26C, prLV}, // Lo HANGUL SYLLABLE SWI
+ {0xC26D, 0xC287, prLVT}, // Lo [27] HANGUL SYLLABLE SWIG..HANGUL SYLLABLE SWIH
+ {0xC288, 0xC288, prLV}, // Lo HANGUL SYLLABLE SYU
+ {0xC289, 0xC2A3, prLVT}, // Lo [27] HANGUL SYLLABLE SYUG..HANGUL SYLLABLE SYUH
+ {0xC2A4, 0xC2A4, prLV}, // Lo HANGUL SYLLABLE SEU
+ {0xC2A5, 0xC2BF, prLVT}, // Lo [27] HANGUL SYLLABLE SEUG..HANGUL SYLLABLE SEUH
+ {0xC2C0, 0xC2C0, prLV}, // Lo HANGUL SYLLABLE SYI
+ {0xC2C1, 0xC2DB, prLVT}, // Lo [27] HANGUL SYLLABLE SYIG..HANGUL SYLLABLE SYIH
+ {0xC2DC, 0xC2DC, prLV}, // Lo HANGUL SYLLABLE SI
+ {0xC2DD, 0xC2F7, prLVT}, // Lo [27] HANGUL SYLLABLE SIG..HANGUL SYLLABLE SIH
+ {0xC2F8, 0xC2F8, prLV}, // Lo HANGUL SYLLABLE SSA
+ {0xC2F9, 0xC313, prLVT}, // Lo [27] HANGUL SYLLABLE SSAG..HANGUL SYLLABLE SSAH
+ {0xC314, 0xC314, prLV}, // Lo HANGUL SYLLABLE SSAE
+ {0xC315, 0xC32F, prLVT}, // Lo [27] HANGUL SYLLABLE SSAEG..HANGUL SYLLABLE SSAEH
+ {0xC330, 0xC330, prLV}, // Lo HANGUL SYLLABLE SSYA
+ {0xC331, 0xC34B, prLVT}, // Lo [27] HANGUL SYLLABLE SSYAG..HANGUL SYLLABLE SSYAH
+ {0xC34C, 0xC34C, prLV}, // Lo HANGUL SYLLABLE SSYAE
+ {0xC34D, 0xC367, prLVT}, // Lo [27] HANGUL SYLLABLE SSYAEG..HANGUL SYLLABLE SSYAEH
+ {0xC368, 0xC368, prLV}, // Lo HANGUL SYLLABLE SSEO
+ {0xC369, 0xC383, prLVT}, // Lo [27] HANGUL SYLLABLE SSEOG..HANGUL SYLLABLE SSEOH
+ {0xC384, 0xC384, prLV}, // Lo HANGUL SYLLABLE SSE
+ {0xC385, 0xC39F, prLVT}, // Lo [27] HANGUL SYLLABLE SSEG..HANGUL SYLLABLE SSEH
+ {0xC3A0, 0xC3A0, prLV}, // Lo HANGUL SYLLABLE SSYEO
+ {0xC3A1, 0xC3BB, prLVT}, // Lo [27] HANGUL SYLLABLE SSYEOG..HANGUL SYLLABLE SSYEOH
+ {0xC3BC, 0xC3BC, prLV}, // Lo HANGUL SYLLABLE SSYE
+ {0xC3BD, 0xC3D7, prLVT}, // Lo [27] HANGUL SYLLABLE SSYEG..HANGUL SYLLABLE SSYEH
+ {0xC3D8, 0xC3D8, prLV}, // Lo HANGUL SYLLABLE SSO
+ {0xC3D9, 0xC3F3, prLVT}, // Lo [27] HANGUL SYLLABLE SSOG..HANGUL SYLLABLE SSOH
+ {0xC3F4, 0xC3F4, prLV}, // Lo HANGUL SYLLABLE SSWA
+ {0xC3F5, 0xC40F, prLVT}, // Lo [27] HANGUL SYLLABLE SSWAG..HANGUL SYLLABLE SSWAH
+ {0xC410, 0xC410, prLV}, // Lo HANGUL SYLLABLE SSWAE
+ {0xC411, 0xC42B, prLVT}, // Lo [27] HANGUL SYLLABLE SSWAEG..HANGUL SYLLABLE SSWAEH
+ {0xC42C, 0xC42C, prLV}, // Lo HANGUL SYLLABLE SSOE
+ {0xC42D, 0xC447, prLVT}, // Lo [27] HANGUL SYLLABLE SSOEG..HANGUL SYLLABLE SSOEH
+ {0xC448, 0xC448, prLV}, // Lo HANGUL SYLLABLE SSYO
+ {0xC449, 0xC463, prLVT}, // Lo [27] HANGUL SYLLABLE SSYOG..HANGUL SYLLABLE SSYOH
+ {0xC464, 0xC464, prLV}, // Lo HANGUL SYLLABLE SSU
+ {0xC465, 0xC47F, prLVT}, // Lo [27] HANGUL SYLLABLE SSUG..HANGUL SYLLABLE SSUH
+ {0xC480, 0xC480, prLV}, // Lo HANGUL SYLLABLE SSWEO
+ {0xC481, 0xC49B, prLVT}, // Lo [27] HANGUL SYLLABLE SSWEOG..HANGUL SYLLABLE SSWEOH
+ {0xC49C, 0xC49C, prLV}, // Lo HANGUL SYLLABLE SSWE
+ {0xC49D, 0xC4B7, prLVT}, // Lo [27] HANGUL SYLLABLE SSWEG..HANGUL SYLLABLE SSWEH
+ {0xC4B8, 0xC4B8, prLV}, // Lo HANGUL SYLLABLE SSWI
+ {0xC4B9, 0xC4D3, prLVT}, // Lo [27] HANGUL SYLLABLE SSWIG..HANGUL SYLLABLE SSWIH
+ {0xC4D4, 0xC4D4, prLV}, // Lo HANGUL SYLLABLE SSYU
+ {0xC4D5, 0xC4EF, prLVT}, // Lo [27] HANGUL SYLLABLE SSYUG..HANGUL SYLLABLE SSYUH
+ {0xC4F0, 0xC4F0, prLV}, // Lo HANGUL SYLLABLE SSEU
+ {0xC4F1, 0xC50B, prLVT}, // Lo [27] HANGUL SYLLABLE SSEUG..HANGUL SYLLABLE SSEUH
+ {0xC50C, 0xC50C, prLV}, // Lo HANGUL SYLLABLE SSYI
+ {0xC50D, 0xC527, prLVT}, // Lo [27] HANGUL SYLLABLE SSYIG..HANGUL SYLLABLE SSYIH
+ {0xC528, 0xC528, prLV}, // Lo HANGUL SYLLABLE SSI
+ {0xC529, 0xC543, prLVT}, // Lo [27] HANGUL SYLLABLE SSIG..HANGUL SYLLABLE SSIH
+ {0xC544, 0xC544, prLV}, // Lo HANGUL SYLLABLE A
+ {0xC545, 0xC55F, prLVT}, // Lo [27] HANGUL SYLLABLE AG..HANGUL SYLLABLE AH
+ {0xC560, 0xC560, prLV}, // Lo HANGUL SYLLABLE AE
+ {0xC561, 0xC57B, prLVT}, // Lo [27] HANGUL SYLLABLE AEG..HANGUL SYLLABLE AEH
+ {0xC57C, 0xC57C, prLV}, // Lo HANGUL SYLLABLE YA
+ {0xC57D, 0xC597, prLVT}, // Lo [27] HANGUL SYLLABLE YAG..HANGUL SYLLABLE YAH
+ {0xC598, 0xC598, prLV}, // Lo HANGUL SYLLABLE YAE
+ {0xC599, 0xC5B3, prLVT}, // Lo [27] HANGUL SYLLABLE YAEG..HANGUL SYLLABLE YAEH
+ {0xC5B4, 0xC5B4, prLV}, // Lo HANGUL SYLLABLE EO
+ {0xC5B5, 0xC5CF, prLVT}, // Lo [27] HANGUL SYLLABLE EOG..HANGUL SYLLABLE EOH
+ {0xC5D0, 0xC5D0, prLV}, // Lo HANGUL SYLLABLE E
+ {0xC5D1, 0xC5EB, prLVT}, // Lo [27] HANGUL SYLLABLE EG..HANGUL SYLLABLE EH
+ {0xC5EC, 0xC5EC, prLV}, // Lo HANGUL SYLLABLE YEO
+ {0xC5ED, 0xC607, prLVT}, // Lo [27] HANGUL SYLLABLE YEOG..HANGUL SYLLABLE YEOH
+ {0xC608, 0xC608, prLV}, // Lo HANGUL SYLLABLE YE
+ {0xC609, 0xC623, prLVT}, // Lo [27] HANGUL SYLLABLE YEG..HANGUL SYLLABLE YEH
+ {0xC624, 0xC624, prLV}, // Lo HANGUL SYLLABLE O
+ {0xC625, 0xC63F, prLVT}, // Lo [27] HANGUL SYLLABLE OG..HANGUL SYLLABLE OH
+ {0xC640, 0xC640, prLV}, // Lo HANGUL SYLLABLE WA
+ {0xC641, 0xC65B, prLVT}, // Lo [27] HANGUL SYLLABLE WAG..HANGUL SYLLABLE WAH
+ {0xC65C, 0xC65C, prLV}, // Lo HANGUL SYLLABLE WAE
+ {0xC65D, 0xC677, prLVT}, // Lo [27] HANGUL SYLLABLE WAEG..HANGUL SYLLABLE WAEH
+ {0xC678, 0xC678, prLV}, // Lo HANGUL SYLLABLE OE
+ {0xC679, 0xC693, prLVT}, // Lo [27] HANGUL SYLLABLE OEG..HANGUL SYLLABLE OEH
+ {0xC694, 0xC694, prLV}, // Lo HANGUL SYLLABLE YO
+ {0xC695, 0xC6AF, prLVT}, // Lo [27] HANGUL SYLLABLE YOG..HANGUL SYLLABLE YOH
+ {0xC6B0, 0xC6B0, prLV}, // Lo HANGUL SYLLABLE U
+ {0xC6B1, 0xC6CB, prLVT}, // Lo [27] HANGUL SYLLABLE UG..HANGUL SYLLABLE UH
+ {0xC6CC, 0xC6CC, prLV}, // Lo HANGUL SYLLABLE WEO
+ {0xC6CD, 0xC6E7, prLVT}, // Lo [27] HANGUL SYLLABLE WEOG..HANGUL SYLLABLE WEOH
+ {0xC6E8, 0xC6E8, prLV}, // Lo HANGUL SYLLABLE WE
+ {0xC6E9, 0xC703, prLVT}, // Lo [27] HANGUL SYLLABLE WEG..HANGUL SYLLABLE WEH
+ {0xC704, 0xC704, prLV}, // Lo HANGUL SYLLABLE WI
+ {0xC705, 0xC71F, prLVT}, // Lo [27] HANGUL SYLLABLE WIG..HANGUL SYLLABLE WIH
+ {0xC720, 0xC720, prLV}, // Lo HANGUL SYLLABLE YU
+ {0xC721, 0xC73B, prLVT}, // Lo [27] HANGUL SYLLABLE YUG..HANGUL SYLLABLE YUH
+ {0xC73C, 0xC73C, prLV}, // Lo HANGUL SYLLABLE EU
+ {0xC73D, 0xC757, prLVT}, // Lo [27] HANGUL SYLLABLE EUG..HANGUL SYLLABLE EUH
+ {0xC758, 0xC758, prLV}, // Lo HANGUL SYLLABLE YI
+ {0xC759, 0xC773, prLVT}, // Lo [27] HANGUL SYLLABLE YIG..HANGUL SYLLABLE YIH
+ {0xC774, 0xC774, prLV}, // Lo HANGUL SYLLABLE I
+ {0xC775, 0xC78F, prLVT}, // Lo [27] HANGUL SYLLABLE IG..HANGUL SYLLABLE IH
+ {0xC790, 0xC790, prLV}, // Lo HANGUL SYLLABLE JA
+ {0xC791, 0xC7AB, prLVT}, // Lo [27] HANGUL SYLLABLE JAG..HANGUL SYLLABLE JAH
+ {0xC7AC, 0xC7AC, prLV}, // Lo HANGUL SYLLABLE JAE
+ {0xC7AD, 0xC7C7, prLVT}, // Lo [27] HANGUL SYLLABLE JAEG..HANGUL SYLLABLE JAEH
+ {0xC7C8, 0xC7C8, prLV}, // Lo HANGUL SYLLABLE JYA
+ {0xC7C9, 0xC7E3, prLVT}, // Lo [27] HANGUL SYLLABLE JYAG..HANGUL SYLLABLE JYAH
+ {0xC7E4, 0xC7E4, prLV}, // Lo HANGUL SYLLABLE JYAE
+ {0xC7E5, 0xC7FF, prLVT}, // Lo [27] HANGUL SYLLABLE JYAEG..HANGUL SYLLABLE JYAEH
+ {0xC800, 0xC800, prLV}, // Lo HANGUL SYLLABLE JEO
+ {0xC801, 0xC81B, prLVT}, // Lo [27] HANGUL SYLLABLE JEOG..HANGUL SYLLABLE JEOH
+ {0xC81C, 0xC81C, prLV}, // Lo HANGUL SYLLABLE JE
+ {0xC81D, 0xC837, prLVT}, // Lo [27] HANGUL SYLLABLE JEG..HANGUL SYLLABLE JEH
+ {0xC838, 0xC838, prLV}, // Lo HANGUL SYLLABLE JYEO
+ {0xC839, 0xC853, prLVT}, // Lo [27] HANGUL SYLLABLE JYEOG..HANGUL SYLLABLE JYEOH
+ {0xC854, 0xC854, prLV}, // Lo HANGUL SYLLABLE JYE
+ {0xC855, 0xC86F, prLVT}, // Lo [27] HANGUL SYLLABLE JYEG..HANGUL SYLLABLE JYEH
+ {0xC870, 0xC870, prLV}, // Lo HANGUL SYLLABLE JO
+ {0xC871, 0xC88B, prLVT}, // Lo [27] HANGUL SYLLABLE JOG..HANGUL SYLLABLE JOH
+ {0xC88C, 0xC88C, prLV}, // Lo HANGUL SYLLABLE JWA
+ {0xC88D, 0xC8A7, prLVT}, // Lo [27] HANGUL SYLLABLE JWAG..HANGUL SYLLABLE JWAH
+ {0xC8A8, 0xC8A8, prLV}, // Lo HANGUL SYLLABLE JWAE
+ {0xC8A9, 0xC8C3, prLVT}, // Lo [27] HANGUL SYLLABLE JWAEG..HANGUL SYLLABLE JWAEH
+ {0xC8C4, 0xC8C4, prLV}, // Lo HANGUL SYLLABLE JOE
+ {0xC8C5, 0xC8DF, prLVT}, // Lo [27] HANGUL SYLLABLE JOEG..HANGUL SYLLABLE JOEH
+ {0xC8E0, 0xC8E0, prLV}, // Lo HANGUL SYLLABLE JYO
+ {0xC8E1, 0xC8FB, prLVT}, // Lo [27] HANGUL SYLLABLE JYOG..HANGUL SYLLABLE JYOH
+ {0xC8FC, 0xC8FC, prLV}, // Lo HANGUL SYLLABLE JU
+ {0xC8FD, 0xC917, prLVT}, // Lo [27] HANGUL SYLLABLE JUG..HANGUL SYLLABLE JUH
+ {0xC918, 0xC918, prLV}, // Lo HANGUL SYLLABLE JWEO
+ {0xC919, 0xC933, prLVT}, // Lo [27] HANGUL SYLLABLE JWEOG..HANGUL SYLLABLE JWEOH
+ {0xC934, 0xC934, prLV}, // Lo HANGUL SYLLABLE JWE
+ {0xC935, 0xC94F, prLVT}, // Lo [27] HANGUL SYLLABLE JWEG..HANGUL SYLLABLE JWEH
+ {0xC950, 0xC950, prLV}, // Lo HANGUL SYLLABLE JWI
+ {0xC951, 0xC96B, prLVT}, // Lo [27] HANGUL SYLLABLE JWIG..HANGUL SYLLABLE JWIH
+ {0xC96C, 0xC96C, prLV}, // Lo HANGUL SYLLABLE JYU
+ {0xC96D, 0xC987, prLVT}, // Lo [27] HANGUL SYLLABLE JYUG..HANGUL SYLLABLE JYUH
+ {0xC988, 0xC988, prLV}, // Lo HANGUL SYLLABLE JEU
+ {0xC989, 0xC9A3, prLVT}, // Lo [27] HANGUL SYLLABLE JEUG..HANGUL SYLLABLE JEUH
+ {0xC9A4, 0xC9A4, prLV}, // Lo HANGUL SYLLABLE JYI
+ {0xC9A5, 0xC9BF, prLVT}, // Lo [27] HANGUL SYLLABLE JYIG..HANGUL SYLLABLE JYIH
+ {0xC9C0, 0xC9C0, prLV}, // Lo HANGUL SYLLABLE JI
+ {0xC9C1, 0xC9DB, prLVT}, // Lo [27] HANGUL SYLLABLE JIG..HANGUL SYLLABLE JIH
+ {0xC9DC, 0xC9DC, prLV}, // Lo HANGUL SYLLABLE JJA
+ {0xC9DD, 0xC9F7, prLVT}, // Lo [27] HANGUL SYLLABLE JJAG..HANGUL SYLLABLE JJAH
+ {0xC9F8, 0xC9F8, prLV}, // Lo HANGUL SYLLABLE JJAE
+ {0xC9F9, 0xCA13, prLVT}, // Lo [27] HANGUL SYLLABLE JJAEG..HANGUL SYLLABLE JJAEH
+ {0xCA14, 0xCA14, prLV}, // Lo HANGUL SYLLABLE JJYA
+ {0xCA15, 0xCA2F, prLVT}, // Lo [27] HANGUL SYLLABLE JJYAG..HANGUL SYLLABLE JJYAH
+ {0xCA30, 0xCA30, prLV}, // Lo HANGUL SYLLABLE JJYAE
+ {0xCA31, 0xCA4B, prLVT}, // Lo [27] HANGUL SYLLABLE JJYAEG..HANGUL SYLLABLE JJYAEH
+ {0xCA4C, 0xCA4C, prLV}, // Lo HANGUL SYLLABLE JJEO
+ {0xCA4D, 0xCA67, prLVT}, // Lo [27] HANGUL SYLLABLE JJEOG..HANGUL SYLLABLE JJEOH
+ {0xCA68, 0xCA68, prLV}, // Lo HANGUL SYLLABLE JJE
+ {0xCA69, 0xCA83, prLVT}, // Lo [27] HANGUL SYLLABLE JJEG..HANGUL SYLLABLE JJEH
+ {0xCA84, 0xCA84, prLV}, // Lo HANGUL SYLLABLE JJYEO
+ {0xCA85, 0xCA9F, prLVT}, // Lo [27] HANGUL SYLLABLE JJYEOG..HANGUL SYLLABLE JJYEOH
+ {0xCAA0, 0xCAA0, prLV}, // Lo HANGUL SYLLABLE JJYE
+ {0xCAA1, 0xCABB, prLVT}, // Lo [27] HANGUL SYLLABLE JJYEG..HANGUL SYLLABLE JJYEH
+ {0xCABC, 0xCABC, prLV}, // Lo HANGUL SYLLABLE JJO
+ {0xCABD, 0xCAD7, prLVT}, // Lo [27] HANGUL SYLLABLE JJOG..HANGUL SYLLABLE JJOH
+ {0xCAD8, 0xCAD8, prLV}, // Lo HANGUL SYLLABLE JJWA
+ {0xCAD9, 0xCAF3, prLVT}, // Lo [27] HANGUL SYLLABLE JJWAG..HANGUL SYLLABLE JJWAH
+ {0xCAF4, 0xCAF4, prLV}, // Lo HANGUL SYLLABLE JJWAE
+ {0xCAF5, 0xCB0F, prLVT}, // Lo [27] HANGUL SYLLABLE JJWAEG..HANGUL SYLLABLE JJWAEH
+ {0xCB10, 0xCB10, prLV}, // Lo HANGUL SYLLABLE JJOE
+ {0xCB11, 0xCB2B, prLVT}, // Lo [27] HANGUL SYLLABLE JJOEG..HANGUL SYLLABLE JJOEH
+ {0xCB2C, 0xCB2C, prLV}, // Lo HANGUL SYLLABLE JJYO
+ {0xCB2D, 0xCB47, prLVT}, // Lo [27] HANGUL SYLLABLE JJYOG..HANGUL SYLLABLE JJYOH
+ {0xCB48, 0xCB48, prLV}, // Lo HANGUL SYLLABLE JJU
+ {0xCB49, 0xCB63, prLVT}, // Lo [27] HANGUL SYLLABLE JJUG..HANGUL SYLLABLE JJUH
+ {0xCB64, 0xCB64, prLV}, // Lo HANGUL SYLLABLE JJWEO
+ {0xCB65, 0xCB7F, prLVT}, // Lo [27] HANGUL SYLLABLE JJWEOG..HANGUL SYLLABLE JJWEOH
+ {0xCB80, 0xCB80, prLV}, // Lo HANGUL SYLLABLE JJWE
+ {0xCB81, 0xCB9B, prLVT}, // Lo [27] HANGUL SYLLABLE JJWEG..HANGUL SYLLABLE JJWEH
+ {0xCB9C, 0xCB9C, prLV}, // Lo HANGUL SYLLABLE JJWI
+ {0xCB9D, 0xCBB7, prLVT}, // Lo [27] HANGUL SYLLABLE JJWIG..HANGUL SYLLABLE JJWIH
+ {0xCBB8, 0xCBB8, prLV}, // Lo HANGUL SYLLABLE JJYU
+ {0xCBB9, 0xCBD3, prLVT}, // Lo [27] HANGUL SYLLABLE JJYUG..HANGUL SYLLABLE JJYUH
+ {0xCBD4, 0xCBD4, prLV}, // Lo HANGUL SYLLABLE JJEU
+ {0xCBD5, 0xCBEF, prLVT}, // Lo [27] HANGUL SYLLABLE JJEUG..HANGUL SYLLABLE JJEUH
+ {0xCBF0, 0xCBF0, prLV}, // Lo HANGUL SYLLABLE JJYI
+ {0xCBF1, 0xCC0B, prLVT}, // Lo [27] HANGUL SYLLABLE JJYIG..HANGUL SYLLABLE JJYIH
+ {0xCC0C, 0xCC0C, prLV}, // Lo HANGUL SYLLABLE JJI
+ {0xCC0D, 0xCC27, prLVT}, // Lo [27] HANGUL SYLLABLE JJIG..HANGUL SYLLABLE JJIH
+ {0xCC28, 0xCC28, prLV}, // Lo HANGUL SYLLABLE CA
+ {0xCC29, 0xCC43, prLVT}, // Lo [27] HANGUL SYLLABLE CAG..HANGUL SYLLABLE CAH
+ {0xCC44, 0xCC44, prLV}, // Lo HANGUL SYLLABLE CAE
+ {0xCC45, 0xCC5F, prLVT}, // Lo [27] HANGUL SYLLABLE CAEG..HANGUL SYLLABLE CAEH
+ {0xCC60, 0xCC60, prLV}, // Lo HANGUL SYLLABLE CYA
+ {0xCC61, 0xCC7B, prLVT}, // Lo [27] HANGUL SYLLABLE CYAG..HANGUL SYLLABLE CYAH
+ {0xCC7C, 0xCC7C, prLV}, // Lo HANGUL SYLLABLE CYAE
+ {0xCC7D, 0xCC97, prLVT}, // Lo [27] HANGUL SYLLABLE CYAEG..HANGUL SYLLABLE CYAEH
+ {0xCC98, 0xCC98, prLV}, // Lo HANGUL SYLLABLE CEO
+ {0xCC99, 0xCCB3, prLVT}, // Lo [27] HANGUL SYLLABLE CEOG..HANGUL SYLLABLE CEOH
+ {0xCCB4, 0xCCB4, prLV}, // Lo HANGUL SYLLABLE CE
+ {0xCCB5, 0xCCCF, prLVT}, // Lo [27] HANGUL SYLLABLE CEG..HANGUL SYLLABLE CEH
+ {0xCCD0, 0xCCD0, prLV}, // Lo HANGUL SYLLABLE CYEO
+ {0xCCD1, 0xCCEB, prLVT}, // Lo [27] HANGUL SYLLABLE CYEOG..HANGUL SYLLABLE CYEOH
+ {0xCCEC, 0xCCEC, prLV}, // Lo HANGUL SYLLABLE CYE
+ {0xCCED, 0xCD07, prLVT}, // Lo [27] HANGUL SYLLABLE CYEG..HANGUL SYLLABLE CYEH
+ {0xCD08, 0xCD08, prLV}, // Lo HANGUL SYLLABLE CO
+ {0xCD09, 0xCD23, prLVT}, // Lo [27] HANGUL SYLLABLE COG..HANGUL SYLLABLE COH
+ {0xCD24, 0xCD24, prLV}, // Lo HANGUL SYLLABLE CWA
+ {0xCD25, 0xCD3F, prLVT}, // Lo [27] HANGUL SYLLABLE CWAG..HANGUL SYLLABLE CWAH
+ {0xCD40, 0xCD40, prLV}, // Lo HANGUL SYLLABLE CWAE
+ {0xCD41, 0xCD5B, prLVT}, // Lo [27] HANGUL SYLLABLE CWAEG..HANGUL SYLLABLE CWAEH
+ {0xCD5C, 0xCD5C, prLV}, // Lo HANGUL SYLLABLE COE
+ {0xCD5D, 0xCD77, prLVT}, // Lo [27] HANGUL SYLLABLE COEG..HANGUL SYLLABLE COEH
+ {0xCD78, 0xCD78, prLV}, // Lo HANGUL SYLLABLE CYO
+ {0xCD79, 0xCD93, prLVT}, // Lo [27] HANGUL SYLLABLE CYOG..HANGUL SYLLABLE CYOH
+ {0xCD94, 0xCD94, prLV}, // Lo HANGUL SYLLABLE CU
+ {0xCD95, 0xCDAF, prLVT}, // Lo [27] HANGUL SYLLABLE CUG..HANGUL SYLLABLE CUH
+ {0xCDB0, 0xCDB0, prLV}, // Lo HANGUL SYLLABLE CWEO
+ {0xCDB1, 0xCDCB, prLVT}, // Lo [27] HANGUL SYLLABLE CWEOG..HANGUL SYLLABLE CWEOH
+ {0xCDCC, 0xCDCC, prLV}, // Lo HANGUL SYLLABLE CWE
+ {0xCDCD, 0xCDE7, prLVT}, // Lo [27] HANGUL SYLLABLE CWEG..HANGUL SYLLABLE CWEH
+ {0xCDE8, 0xCDE8, prLV}, // Lo HANGUL SYLLABLE CWI
+ {0xCDE9, 0xCE03, prLVT}, // Lo [27] HANGUL SYLLABLE CWIG..HANGUL SYLLABLE CWIH
+ {0xCE04, 0xCE04, prLV}, // Lo HANGUL SYLLABLE CYU
+ {0xCE05, 0xCE1F, prLVT}, // Lo [27] HANGUL SYLLABLE CYUG..HANGUL SYLLABLE CYUH
+ {0xCE20, 0xCE20, prLV}, // Lo HANGUL SYLLABLE CEU
+ {0xCE21, 0xCE3B, prLVT}, // Lo [27] HANGUL SYLLABLE CEUG..HANGUL SYLLABLE CEUH
+ {0xCE3C, 0xCE3C, prLV}, // Lo HANGUL SYLLABLE CYI
+ {0xCE3D, 0xCE57, prLVT}, // Lo [27] HANGUL SYLLABLE CYIG..HANGUL SYLLABLE CYIH
+ {0xCE58, 0xCE58, prLV}, // Lo HANGUL SYLLABLE CI
+ {0xCE59, 0xCE73, prLVT}, // Lo [27] HANGUL SYLLABLE CIG..HANGUL SYLLABLE CIH
+ {0xCE74, 0xCE74, prLV}, // Lo HANGUL SYLLABLE KA
+ {0xCE75, 0xCE8F, prLVT}, // Lo [27] HANGUL SYLLABLE KAG..HANGUL SYLLABLE KAH
+ {0xCE90, 0xCE90, prLV}, // Lo HANGUL SYLLABLE KAE
+ {0xCE91, 0xCEAB, prLVT}, // Lo [27] HANGUL SYLLABLE KAEG..HANGUL SYLLABLE KAEH
+ {0xCEAC, 0xCEAC, prLV}, // Lo HANGUL SYLLABLE KYA
+ {0xCEAD, 0xCEC7, prLVT}, // Lo [27] HANGUL SYLLABLE KYAG..HANGUL SYLLABLE KYAH
+ {0xCEC8, 0xCEC8, prLV}, // Lo HANGUL SYLLABLE KYAE
+ {0xCEC9, 0xCEE3, prLVT}, // Lo [27] HANGUL SYLLABLE KYAEG..HANGUL SYLLABLE KYAEH
+ {0xCEE4, 0xCEE4, prLV}, // Lo HANGUL SYLLABLE KEO
+ {0xCEE5, 0xCEFF, prLVT}, // Lo [27] HANGUL SYLLABLE KEOG..HANGUL SYLLABLE KEOH
+ {0xCF00, 0xCF00, prLV}, // Lo HANGUL SYLLABLE KE
+ {0xCF01, 0xCF1B, prLVT}, // Lo [27] HANGUL SYLLABLE KEG..HANGUL SYLLABLE KEH
+ {0xCF1C, 0xCF1C, prLV}, // Lo HANGUL SYLLABLE KYEO
+ {0xCF1D, 0xCF37, prLVT}, // Lo [27] HANGUL SYLLABLE KYEOG..HANGUL SYLLABLE KYEOH
+ {0xCF38, 0xCF38, prLV}, // Lo HANGUL SYLLABLE KYE
+ {0xCF39, 0xCF53, prLVT}, // Lo [27] HANGUL SYLLABLE KYEG..HANGUL SYLLABLE KYEH
+ {0xCF54, 0xCF54, prLV}, // Lo HANGUL SYLLABLE KO
+ {0xCF55, 0xCF6F, prLVT}, // Lo [27] HANGUL SYLLABLE KOG..HANGUL SYLLABLE KOH
+ {0xCF70, 0xCF70, prLV}, // Lo HANGUL SYLLABLE KWA
+ {0xCF71, 0xCF8B, prLVT}, // Lo [27] HANGUL SYLLABLE KWAG..HANGUL SYLLABLE KWAH
+ {0xCF8C, 0xCF8C, prLV}, // Lo HANGUL SYLLABLE KWAE
+ {0xCF8D, 0xCFA7, prLVT}, // Lo [27] HANGUL SYLLABLE KWAEG..HANGUL SYLLABLE KWAEH
+ {0xCFA8, 0xCFA8, prLV}, // Lo HANGUL SYLLABLE KOE
+ {0xCFA9, 0xCFC3, prLVT}, // Lo [27] HANGUL SYLLABLE KOEG..HANGUL SYLLABLE KOEH
+ {0xCFC4, 0xCFC4, prLV}, // Lo HANGUL SYLLABLE KYO
+ {0xCFC5, 0xCFDF, prLVT}, // Lo [27] HANGUL SYLLABLE KYOG..HANGUL SYLLABLE KYOH
+ {0xCFE0, 0xCFE0, prLV}, // Lo HANGUL SYLLABLE KU
+ {0xCFE1, 0xCFFB, prLVT}, // Lo [27] HANGUL SYLLABLE KUG..HANGUL SYLLABLE KUH
+ {0xCFFC, 0xCFFC, prLV}, // Lo HANGUL SYLLABLE KWEO
+ {0xCFFD, 0xD017, prLVT}, // Lo [27] HANGUL SYLLABLE KWEOG..HANGUL SYLLABLE KWEOH
+ {0xD018, 0xD018, prLV}, // Lo HANGUL SYLLABLE KWE
+ {0xD019, 0xD033, prLVT}, // Lo [27] HANGUL SYLLABLE KWEG..HANGUL SYLLABLE KWEH
+ {0xD034, 0xD034, prLV}, // Lo HANGUL SYLLABLE KWI
+ {0xD035, 0xD04F, prLVT}, // Lo [27] HANGUL SYLLABLE KWIG..HANGUL SYLLABLE KWIH
+ {0xD050, 0xD050, prLV}, // Lo HANGUL SYLLABLE KYU
+ {0xD051, 0xD06B, prLVT}, // Lo [27] HANGUL SYLLABLE KYUG..HANGUL SYLLABLE KYUH
+ {0xD06C, 0xD06C, prLV}, // Lo HANGUL SYLLABLE KEU
+ {0xD06D, 0xD087, prLVT}, // Lo [27] HANGUL SYLLABLE KEUG..HANGUL SYLLABLE KEUH
+ {0xD088, 0xD088, prLV}, // Lo HANGUL SYLLABLE KYI
+ {0xD089, 0xD0A3, prLVT}, // Lo [27] HANGUL SYLLABLE KYIG..HANGUL SYLLABLE KYIH
+ {0xD0A4, 0xD0A4, prLV}, // Lo HANGUL SYLLABLE KI
+ {0xD0A5, 0xD0BF, prLVT}, // Lo [27] HANGUL SYLLABLE KIG..HANGUL SYLLABLE KIH
+ {0xD0C0, 0xD0C0, prLV}, // Lo HANGUL SYLLABLE TA
+ {0xD0C1, 0xD0DB, prLVT}, // Lo [27] HANGUL SYLLABLE TAG..HANGUL SYLLABLE TAH
+ {0xD0DC, 0xD0DC, prLV}, // Lo HANGUL SYLLABLE TAE
+ {0xD0DD, 0xD0F7, prLVT}, // Lo [27] HANGUL SYLLABLE TAEG..HANGUL SYLLABLE TAEH
+ {0xD0F8, 0xD0F8, prLV}, // Lo HANGUL SYLLABLE TYA
+ {0xD0F9, 0xD113, prLVT}, // Lo [27] HANGUL SYLLABLE TYAG..HANGUL SYLLABLE TYAH
+ {0xD114, 0xD114, prLV}, // Lo HANGUL SYLLABLE TYAE
+ {0xD115, 0xD12F, prLVT}, // Lo [27] HANGUL SYLLABLE TYAEG..HANGUL SYLLABLE TYAEH
+ {0xD130, 0xD130, prLV}, // Lo HANGUL SYLLABLE TEO
+ {0xD131, 0xD14B, prLVT}, // Lo [27] HANGUL SYLLABLE TEOG..HANGUL SYLLABLE TEOH
+ {0xD14C, 0xD14C, prLV}, // Lo HANGUL SYLLABLE TE
+ {0xD14D, 0xD167, prLVT}, // Lo [27] HANGUL SYLLABLE TEG..HANGUL SYLLABLE TEH
+ {0xD168, 0xD168, prLV}, // Lo HANGUL SYLLABLE TYEO
+ {0xD169, 0xD183, prLVT}, // Lo [27] HANGUL SYLLABLE TYEOG..HANGUL SYLLABLE TYEOH
+ {0xD184, 0xD184, prLV}, // Lo HANGUL SYLLABLE TYE
+ {0xD185, 0xD19F, prLVT}, // Lo [27] HANGUL SYLLABLE TYEG..HANGUL SYLLABLE TYEH
+ {0xD1A0, 0xD1A0, prLV}, // Lo HANGUL SYLLABLE TO
+ {0xD1A1, 0xD1BB, prLVT}, // Lo [27] HANGUL SYLLABLE TOG..HANGUL SYLLABLE TOH
+ {0xD1BC, 0xD1BC, prLV}, // Lo HANGUL SYLLABLE TWA
+ {0xD1BD, 0xD1D7, prLVT}, // Lo [27] HANGUL SYLLABLE TWAG..HANGUL SYLLABLE TWAH
+ {0xD1D8, 0xD1D8, prLV}, // Lo HANGUL SYLLABLE TWAE
+ {0xD1D9, 0xD1F3, prLVT}, // Lo [27] HANGUL SYLLABLE TWAEG..HANGUL SYLLABLE TWAEH
+ {0xD1F4, 0xD1F4, prLV}, // Lo HANGUL SYLLABLE TOE
+ {0xD1F5, 0xD20F, prLVT}, // Lo [27] HANGUL SYLLABLE TOEG..HANGUL SYLLABLE TOEH
+ {0xD210, 0xD210, prLV}, // Lo HANGUL SYLLABLE TYO
+ {0xD211, 0xD22B, prLVT}, // Lo [27] HANGUL SYLLABLE TYOG..HANGUL SYLLABLE TYOH
+ {0xD22C, 0xD22C, prLV}, // Lo HANGUL SYLLABLE TU
+ {0xD22D, 0xD247, prLVT}, // Lo [27] HANGUL SYLLABLE TUG..HANGUL SYLLABLE TUH
+ {0xD248, 0xD248, prLV}, // Lo HANGUL SYLLABLE TWEO
+ {0xD249, 0xD263, prLVT}, // Lo [27] HANGUL SYLLABLE TWEOG..HANGUL SYLLABLE TWEOH
+ {0xD264, 0xD264, prLV}, // Lo HANGUL SYLLABLE TWE
+ {0xD265, 0xD27F, prLVT}, // Lo [27] HANGUL SYLLABLE TWEG..HANGUL SYLLABLE TWEH
+ {0xD280, 0xD280, prLV}, // Lo HANGUL SYLLABLE TWI
+ {0xD281, 0xD29B, prLVT}, // Lo [27] HANGUL SYLLABLE TWIG..HANGUL SYLLABLE TWIH
+ {0xD29C, 0xD29C, prLV}, // Lo HANGUL SYLLABLE TYU
+ {0xD29D, 0xD2B7, prLVT}, // Lo [27] HANGUL SYLLABLE TYUG..HANGUL SYLLABLE TYUH
+ {0xD2B8, 0xD2B8, prLV}, // Lo HANGUL SYLLABLE TEU
+ {0xD2B9, 0xD2D3, prLVT}, // Lo [27] HANGUL SYLLABLE TEUG..HANGUL SYLLABLE TEUH
+ {0xD2D4, 0xD2D4, prLV}, // Lo HANGUL SYLLABLE TYI
+ {0xD2D5, 0xD2EF, prLVT}, // Lo [27] HANGUL SYLLABLE TYIG..HANGUL SYLLABLE TYIH
+ {0xD2F0, 0xD2F0, prLV}, // Lo HANGUL SYLLABLE TI
+ {0xD2F1, 0xD30B, prLVT}, // Lo [27] HANGUL SYLLABLE TIG..HANGUL SYLLABLE TIH
+ {0xD30C, 0xD30C, prLV}, // Lo HANGUL SYLLABLE PA
+ {0xD30D, 0xD327, prLVT}, // Lo [27] HANGUL SYLLABLE PAG..HANGUL SYLLABLE PAH
+ {0xD328, 0xD328, prLV}, // Lo HANGUL SYLLABLE PAE
+ {0xD329, 0xD343, prLVT}, // Lo [27] HANGUL SYLLABLE PAEG..HANGUL SYLLABLE PAEH
+ {0xD344, 0xD344, prLV}, // Lo HANGUL SYLLABLE PYA
+ {0xD345, 0xD35F, prLVT}, // Lo [27] HANGUL SYLLABLE PYAG..HANGUL SYLLABLE PYAH
+ {0xD360, 0xD360, prLV}, // Lo HANGUL SYLLABLE PYAE
+ {0xD361, 0xD37B, prLVT}, // Lo [27] HANGUL SYLLABLE PYAEG..HANGUL SYLLABLE PYAEH
+ {0xD37C, 0xD37C, prLV}, // Lo HANGUL SYLLABLE PEO
+ {0xD37D, 0xD397, prLVT}, // Lo [27] HANGUL SYLLABLE PEOG..HANGUL SYLLABLE PEOH
+ {0xD398, 0xD398, prLV}, // Lo HANGUL SYLLABLE PE
+ {0xD399, 0xD3B3, prLVT}, // Lo [27] HANGUL SYLLABLE PEG..HANGUL SYLLABLE PEH
+ {0xD3B4, 0xD3B4, prLV}, // Lo HANGUL SYLLABLE PYEO
+ {0xD3B5, 0xD3CF, prLVT}, // Lo [27] HANGUL SYLLABLE PYEOG..HANGUL SYLLABLE PYEOH
+ {0xD3D0, 0xD3D0, prLV}, // Lo HANGUL SYLLABLE PYE
+ {0xD3D1, 0xD3EB, prLVT}, // Lo [27] HANGUL SYLLABLE PYEG..HANGUL SYLLABLE PYEH
+ {0xD3EC, 0xD3EC, prLV}, // Lo HANGUL SYLLABLE PO
+ {0xD3ED, 0xD407, prLVT}, // Lo [27] HANGUL SYLLABLE POG..HANGUL SYLLABLE POH
+ {0xD408, 0xD408, prLV}, // Lo HANGUL SYLLABLE PWA
+ {0xD409, 0xD423, prLVT}, // Lo [27] HANGUL SYLLABLE PWAG..HANGUL SYLLABLE PWAH
+ {0xD424, 0xD424, prLV}, // Lo HANGUL SYLLABLE PWAE
+ {0xD425, 0xD43F, prLVT}, // Lo [27] HANGUL SYLLABLE PWAEG..HANGUL SYLLABLE PWAEH
+ {0xD440, 0xD440, prLV}, // Lo HANGUL SYLLABLE POE
+ {0xD441, 0xD45B, prLVT}, // Lo [27] HANGUL SYLLABLE POEG..HANGUL SYLLABLE POEH
+ {0xD45C, 0xD45C, prLV}, // Lo HANGUL SYLLABLE PYO
+ {0xD45D, 0xD477, prLVT}, // Lo [27] HANGUL SYLLABLE PYOG..HANGUL SYLLABLE PYOH
+ {0xD478, 0xD478, prLV}, // Lo HANGUL SYLLABLE PU
+ {0xD479, 0xD493, prLVT}, // Lo [27] HANGUL SYLLABLE PUG..HANGUL SYLLABLE PUH
+ {0xD494, 0xD494, prLV}, // Lo HANGUL SYLLABLE PWEO
+ {0xD495, 0xD4AF, prLVT}, // Lo [27] HANGUL SYLLABLE PWEOG..HANGUL SYLLABLE PWEOH
+ {0xD4B0, 0xD4B0, prLV}, // Lo HANGUL SYLLABLE PWE
+ {0xD4B1, 0xD4CB, prLVT}, // Lo [27] HANGUL SYLLABLE PWEG..HANGUL SYLLABLE PWEH
+ {0xD4CC, 0xD4CC, prLV}, // Lo HANGUL SYLLABLE PWI
+ {0xD4CD, 0xD4E7, prLVT}, // Lo [27] HANGUL SYLLABLE PWIG..HANGUL SYLLABLE PWIH
+ {0xD4E8, 0xD4E8, prLV}, // Lo HANGUL SYLLABLE PYU
+ {0xD4E9, 0xD503, prLVT}, // Lo [27] HANGUL SYLLABLE PYUG..HANGUL SYLLABLE PYUH
+ {0xD504, 0xD504, prLV}, // Lo HANGUL SYLLABLE PEU
+ {0xD505, 0xD51F, prLVT}, // Lo [27] HANGUL SYLLABLE PEUG..HANGUL SYLLABLE PEUH
+ {0xD520, 0xD520, prLV}, // Lo HANGUL SYLLABLE PYI
+ {0xD521, 0xD53B, prLVT}, // Lo [27] HANGUL SYLLABLE PYIG..HANGUL SYLLABLE PYIH
+ {0xD53C, 0xD53C, prLV}, // Lo HANGUL SYLLABLE PI
+ {0xD53D, 0xD557, prLVT}, // Lo [27] HANGUL SYLLABLE PIG..HANGUL SYLLABLE PIH
+ {0xD558, 0xD558, prLV}, // Lo HANGUL SYLLABLE HA
+ {0xD559, 0xD573, prLVT}, // Lo [27] HANGUL SYLLABLE HAG..HANGUL SYLLABLE HAH
+ {0xD574, 0xD574, prLV}, // Lo HANGUL SYLLABLE HAE
+ {0xD575, 0xD58F, prLVT}, // Lo [27] HANGUL SYLLABLE HAEG..HANGUL SYLLABLE HAEH
+ {0xD590, 0xD590, prLV}, // Lo HANGUL SYLLABLE HYA
+ {0xD591, 0xD5AB, prLVT}, // Lo [27] HANGUL SYLLABLE HYAG..HANGUL SYLLABLE HYAH
+ {0xD5AC, 0xD5AC, prLV}, // Lo HANGUL SYLLABLE HYAE
+ {0xD5AD, 0xD5C7, prLVT}, // Lo [27] HANGUL SYLLABLE HYAEG..HANGUL SYLLABLE HYAEH
+ {0xD5C8, 0xD5C8, prLV}, // Lo HANGUL SYLLABLE HEO
+ {0xD5C9, 0xD5E3, prLVT}, // Lo [27] HANGUL SYLLABLE HEOG..HANGUL SYLLABLE HEOH
+ {0xD5E4, 0xD5E4, prLV}, // Lo HANGUL SYLLABLE HE
+ {0xD5E5, 0xD5FF, prLVT}, // Lo [27] HANGUL SYLLABLE HEG..HANGUL SYLLABLE HEH
+ {0xD600, 0xD600, prLV}, // Lo HANGUL SYLLABLE HYEO
+ {0xD601, 0xD61B, prLVT}, // Lo [27] HANGUL SYLLABLE HYEOG..HANGUL SYLLABLE HYEOH
+ {0xD61C, 0xD61C, prLV}, // Lo HANGUL SYLLABLE HYE
+ {0xD61D, 0xD637, prLVT}, // Lo [27] HANGUL SYLLABLE HYEG..HANGUL SYLLABLE HYEH
+ {0xD638, 0xD638, prLV}, // Lo HANGUL SYLLABLE HO
+ {0xD639, 0xD653, prLVT}, // Lo [27] HANGUL SYLLABLE HOG..HANGUL SYLLABLE HOH
+ {0xD654, 0xD654, prLV}, // Lo HANGUL SYLLABLE HWA
+ {0xD655, 0xD66F, prLVT}, // Lo [27] HANGUL SYLLABLE HWAG..HANGUL SYLLABLE HWAH
+ {0xD670, 0xD670, prLV}, // Lo HANGUL SYLLABLE HWAE
+ {0xD671, 0xD68B, prLVT}, // Lo [27] HANGUL SYLLABLE HWAEG..HANGUL SYLLABLE HWAEH
+ {0xD68C, 0xD68C, prLV}, // Lo HANGUL SYLLABLE HOE
+ {0xD68D, 0xD6A7, prLVT}, // Lo [27] HANGUL SYLLABLE HOEG..HANGUL SYLLABLE HOEH
+ {0xD6A8, 0xD6A8, prLV}, // Lo HANGUL SYLLABLE HYO
+ {0xD6A9, 0xD6C3, prLVT}, // Lo [27] HANGUL SYLLABLE HYOG..HANGUL SYLLABLE HYOH
+ {0xD6C4, 0xD6C4, prLV}, // Lo HANGUL SYLLABLE HU
+ {0xD6C5, 0xD6DF, prLVT}, // Lo [27] HANGUL SYLLABLE HUG..HANGUL SYLLABLE HUH
+ {0xD6E0, 0xD6E0, prLV}, // Lo HANGUL SYLLABLE HWEO
+ {0xD6E1, 0xD6FB, prLVT}, // Lo [27] HANGUL SYLLABLE HWEOG..HANGUL SYLLABLE HWEOH
+ {0xD6FC, 0xD6FC, prLV}, // Lo HANGUL SYLLABLE HWE
+ {0xD6FD, 0xD717, prLVT}, // Lo [27] HANGUL SYLLABLE HWEG..HANGUL SYLLABLE HWEH
+ {0xD718, 0xD718, prLV}, // Lo HANGUL SYLLABLE HWI
+ {0xD719, 0xD733, prLVT}, // Lo [27] HANGUL SYLLABLE HWIG..HANGUL SYLLABLE HWIH
+ {0xD734, 0xD734, prLV}, // Lo HANGUL SYLLABLE HYU
+ {0xD735, 0xD74F, prLVT}, // Lo [27] HANGUL SYLLABLE HYUG..HANGUL SYLLABLE HYUH
+ {0xD750, 0xD750, prLV}, // Lo HANGUL SYLLABLE HEU
+ {0xD751, 0xD76B, prLVT}, // Lo [27] HANGUL SYLLABLE HEUG..HANGUL SYLLABLE HEUH
+ {0xD76C, 0xD76C, prLV}, // Lo HANGUL SYLLABLE HYI
+ {0xD76D, 0xD787, prLVT}, // Lo [27] HANGUL SYLLABLE HYIG..HANGUL SYLLABLE HYIH
+ {0xD788, 0xD788, prLV}, // Lo HANGUL SYLLABLE HI
+ {0xD789, 0xD7A3, prLVT}, // Lo [27] HANGUL SYLLABLE HIG..HANGUL SYLLABLE HIH
+ {0xD7B0, 0xD7C6, prV}, // Lo [23] HANGUL JUNGSEONG O-YEO..HANGUL JUNGSEONG ARAEA-E
+ {0xD7CB, 0xD7FB, prT}, // Lo [49] HANGUL JONGSEONG NIEUN-RIEUL..HANGUL JONGSEONG PHIEUPH-THIEUTH
+ {0xFB1E, 0xFB1E, prExtend}, // Mn HEBREW POINT JUDEO-SPANISH VARIKA
+ {0xFE00, 0xFE0F, prExtend}, // Mn [16] VARIATION SELECTOR-1..VARIATION SELECTOR-16
+ {0xFE20, 0xFE2F, prExtend}, // Mn [16] COMBINING LIGATURE LEFT HALF..COMBINING CYRILLIC TITLO RIGHT HALF
+ {0xFEFF, 0xFEFF, prControl}, // Cf ZERO WIDTH NO-BREAK SPACE
+ {0xFF9E, 0xFF9F, prExtend}, // Lm [2] HALFWIDTH KATAKANA VOICED SOUND MARK..HALFWIDTH KATAKANA SEMI-VOICED SOUND MARK
+ {0xFFF0, 0xFFF8, prControl}, // Cn [9] ..
+ {0xFFF9, 0xFFFB, prControl}, // Cf [3] INTERLINEAR ANNOTATION ANCHOR..INTERLINEAR ANNOTATION TERMINATOR
+ {0x101FD, 0x101FD, prExtend}, // Mn PHAISTOS DISC SIGN COMBINING OBLIQUE STROKE
+ {0x102E0, 0x102E0, prExtend}, // Mn COPTIC EPACT THOUSANDS MARK
+ {0x10376, 0x1037A, prExtend}, // Mn [5] COMBINING OLD PERMIC LETTER AN..COMBINING OLD PERMIC LETTER SII
+ {0x10A01, 0x10A03, prExtend}, // Mn [3] KHAROSHTHI VOWEL SIGN I..KHAROSHTHI VOWEL SIGN VOCALIC R
+ {0x10A05, 0x10A06, prExtend}, // Mn [2] KHAROSHTHI VOWEL SIGN E..KHAROSHTHI VOWEL SIGN O
+ {0x10A0C, 0x10A0F, prExtend}, // Mn [4] KHAROSHTHI VOWEL LENGTH MARK..KHAROSHTHI SIGN VISARGA
+ {0x10A38, 0x10A3A, prExtend}, // Mn [3] KHAROSHTHI SIGN BAR ABOVE..KHAROSHTHI SIGN DOT BELOW
+ {0x10A3F, 0x10A3F, prExtend}, // Mn KHAROSHTHI VIRAMA
+ {0x10AE5, 0x10AE6, prExtend}, // Mn [2] MANICHAEAN ABBREVIATION MARK ABOVE..MANICHAEAN ABBREVIATION MARK BELOW
+ {0x10D24, 0x10D27, prExtend}, // Mn [4] HANIFI ROHINGYA SIGN HARBAHAY..HANIFI ROHINGYA SIGN TASSI
+ {0x10EAB, 0x10EAC, prExtend}, // Mn [2] YEZIDI COMBINING HAMZA MARK..YEZIDI COMBINING MADDA MARK
+ {0x10F46, 0x10F50, prExtend}, // Mn [11] SOGDIAN COMBINING DOT BELOW..SOGDIAN COMBINING STROKE BELOW
+ {0x10F82, 0x10F85, prExtend}, // Mn [4] OLD UYGHUR COMBINING DOT ABOVE..OLD UYGHUR COMBINING TWO DOTS BELOW
+ {0x11000, 0x11000, prSpacingMark}, // Mc BRAHMI SIGN CANDRABINDU
+ {0x11001, 0x11001, prExtend}, // Mn BRAHMI SIGN ANUSVARA
+ {0x11002, 0x11002, prSpacingMark}, // Mc BRAHMI SIGN VISARGA
+ {0x11038, 0x11046, prExtend}, // Mn [15] BRAHMI VOWEL SIGN AA..BRAHMI VIRAMA
+ {0x11070, 0x11070, prExtend}, // Mn BRAHMI SIGN OLD TAMIL VIRAMA
+ {0x11073, 0x11074, prExtend}, // Mn [2] BRAHMI VOWEL SIGN OLD TAMIL SHORT E..BRAHMI VOWEL SIGN OLD TAMIL SHORT O
+ {0x1107F, 0x11081, prExtend}, // Mn [3] BRAHMI NUMBER JOINER..KAITHI SIGN ANUSVARA
+ {0x11082, 0x11082, prSpacingMark}, // Mc KAITHI SIGN VISARGA
+ {0x110B0, 0x110B2, prSpacingMark}, // Mc [3] KAITHI VOWEL SIGN AA..KAITHI VOWEL SIGN II
+ {0x110B3, 0x110B6, prExtend}, // Mn [4] KAITHI VOWEL SIGN U..KAITHI VOWEL SIGN AI
+ {0x110B7, 0x110B8, prSpacingMark}, // Mc [2] KAITHI VOWEL SIGN O..KAITHI VOWEL SIGN AU
+ {0x110B9, 0x110BA, prExtend}, // Mn [2] KAITHI SIGN VIRAMA..KAITHI SIGN NUKTA
+ {0x110BD, 0x110BD, prPrepend}, // Cf KAITHI NUMBER SIGN
+ {0x110C2, 0x110C2, prExtend}, // Mn KAITHI VOWEL SIGN VOCALIC R
+ {0x110CD, 0x110CD, prPrepend}, // Cf KAITHI NUMBER SIGN ABOVE
+ {0x11100, 0x11102, prExtend}, // Mn [3] CHAKMA SIGN CANDRABINDU..CHAKMA SIGN VISARGA
+ {0x11127, 0x1112B, prExtend}, // Mn [5] CHAKMA VOWEL SIGN A..CHAKMA VOWEL SIGN UU
+ {0x1112C, 0x1112C, prSpacingMark}, // Mc CHAKMA VOWEL SIGN E
+ {0x1112D, 0x11134, prExtend}, // Mn [8] CHAKMA VOWEL SIGN AI..CHAKMA MAAYYAA
+ {0x11145, 0x11146, prSpacingMark}, // Mc [2] CHAKMA VOWEL SIGN AA..CHAKMA VOWEL SIGN EI
+ {0x11173, 0x11173, prExtend}, // Mn MAHAJANI SIGN NUKTA
+ {0x11180, 0x11181, prExtend}, // Mn [2] SHARADA SIGN CANDRABINDU..SHARADA SIGN ANUSVARA
+ {0x11182, 0x11182, prSpacingMark}, // Mc SHARADA SIGN VISARGA
+ {0x111B3, 0x111B5, prSpacingMark}, // Mc [3] SHARADA VOWEL SIGN AA..SHARADA VOWEL SIGN II
+ {0x111B6, 0x111BE, prExtend}, // Mn [9] SHARADA VOWEL SIGN U..SHARADA VOWEL SIGN O
+ {0x111BF, 0x111C0, prSpacingMark}, // Mc [2] SHARADA VOWEL SIGN AU..SHARADA SIGN VIRAMA
+ {0x111C2, 0x111C3, prPrepend}, // Lo [2] SHARADA SIGN JIHVAMULIYA..SHARADA SIGN UPADHMANIYA
+ {0x111C9, 0x111CC, prExtend}, // Mn [4] SHARADA SANDHI MARK..SHARADA EXTRA SHORT VOWEL MARK
+ {0x111CE, 0x111CE, prSpacingMark}, // Mc SHARADA VOWEL SIGN PRISHTHAMATRA E
+ {0x111CF, 0x111CF, prExtend}, // Mn SHARADA SIGN INVERTED CANDRABINDU
+ {0x1122C, 0x1122E, prSpacingMark}, // Mc [3] KHOJKI VOWEL SIGN AA..KHOJKI VOWEL SIGN II
+ {0x1122F, 0x11231, prExtend}, // Mn [3] KHOJKI VOWEL SIGN U..KHOJKI VOWEL SIGN AI
+ {0x11232, 0x11233, prSpacingMark}, // Mc [2] KHOJKI VOWEL SIGN O..KHOJKI VOWEL SIGN AU
+ {0x11234, 0x11234, prExtend}, // Mn KHOJKI SIGN ANUSVARA
+ {0x11235, 0x11235, prSpacingMark}, // Mc KHOJKI SIGN VIRAMA
+ {0x11236, 0x11237, prExtend}, // Mn [2] KHOJKI SIGN NUKTA..KHOJKI SIGN SHADDA
+ {0x1123E, 0x1123E, prExtend}, // Mn KHOJKI SIGN SUKUN
+ {0x112DF, 0x112DF, prExtend}, // Mn KHUDAWADI SIGN ANUSVARA
+ {0x112E0, 0x112E2, prSpacingMark}, // Mc [3] KHUDAWADI VOWEL SIGN AA..KHUDAWADI VOWEL SIGN II
+ {0x112E3, 0x112EA, prExtend}, // Mn [8] KHUDAWADI VOWEL SIGN U..KHUDAWADI SIGN VIRAMA
+ {0x11300, 0x11301, prExtend}, // Mn [2] GRANTHA SIGN COMBINING ANUSVARA ABOVE..GRANTHA SIGN CANDRABINDU
+ {0x11302, 0x11303, prSpacingMark}, // Mc [2] GRANTHA SIGN ANUSVARA..GRANTHA SIGN VISARGA
+ {0x1133B, 0x1133C, prExtend}, // Mn [2] COMBINING BINDU BELOW..GRANTHA SIGN NUKTA
+ {0x1133E, 0x1133E, prExtend}, // Mc GRANTHA VOWEL SIGN AA
+ {0x1133F, 0x1133F, prSpacingMark}, // Mc GRANTHA VOWEL SIGN I
+ {0x11340, 0x11340, prExtend}, // Mn GRANTHA VOWEL SIGN II
+ {0x11341, 0x11344, prSpacingMark}, // Mc [4] GRANTHA VOWEL SIGN U..GRANTHA VOWEL SIGN VOCALIC RR
+ {0x11347, 0x11348, prSpacingMark}, // Mc [2] GRANTHA VOWEL SIGN EE..GRANTHA VOWEL SIGN AI
+ {0x1134B, 0x1134D, prSpacingMark}, // Mc [3] GRANTHA VOWEL SIGN OO..GRANTHA SIGN VIRAMA
+ {0x11357, 0x11357, prExtend}, // Mc GRANTHA AU LENGTH MARK
+ {0x11362, 0x11363, prSpacingMark}, // Mc [2] GRANTHA VOWEL SIGN VOCALIC L..GRANTHA VOWEL SIGN VOCALIC LL
+ {0x11366, 0x1136C, prExtend}, // Mn [7] COMBINING GRANTHA DIGIT ZERO..COMBINING GRANTHA DIGIT SIX
+ {0x11370, 0x11374, prExtend}, // Mn [5] COMBINING GRANTHA LETTER A..COMBINING GRANTHA LETTER PA
+ {0x11435, 0x11437, prSpacingMark}, // Mc [3] NEWA VOWEL SIGN AA..NEWA VOWEL SIGN II
+ {0x11438, 0x1143F, prExtend}, // Mn [8] NEWA VOWEL SIGN U..NEWA VOWEL SIGN AI
+ {0x11440, 0x11441, prSpacingMark}, // Mc [2] NEWA VOWEL SIGN O..NEWA VOWEL SIGN AU
+ {0x11442, 0x11444, prExtend}, // Mn [3] NEWA SIGN VIRAMA..NEWA SIGN ANUSVARA
+ {0x11445, 0x11445, prSpacingMark}, // Mc NEWA SIGN VISARGA
+ {0x11446, 0x11446, prExtend}, // Mn NEWA SIGN NUKTA
+ {0x1145E, 0x1145E, prExtend}, // Mn NEWA SANDHI MARK
+ {0x114B0, 0x114B0, prExtend}, // Mc TIRHUTA VOWEL SIGN AA
+ {0x114B1, 0x114B2, prSpacingMark}, // Mc [2] TIRHUTA VOWEL SIGN I..TIRHUTA VOWEL SIGN II
+ {0x114B3, 0x114B8, prExtend}, // Mn [6] TIRHUTA VOWEL SIGN U..TIRHUTA VOWEL SIGN VOCALIC LL
+ {0x114B9, 0x114B9, prSpacingMark}, // Mc TIRHUTA VOWEL SIGN E
+ {0x114BA, 0x114BA, prExtend}, // Mn TIRHUTA VOWEL SIGN SHORT E
+ {0x114BB, 0x114BC, prSpacingMark}, // Mc [2] TIRHUTA VOWEL SIGN AI..TIRHUTA VOWEL SIGN O
+ {0x114BD, 0x114BD, prExtend}, // Mc TIRHUTA VOWEL SIGN SHORT O
+ {0x114BE, 0x114BE, prSpacingMark}, // Mc TIRHUTA VOWEL SIGN AU
+ {0x114BF, 0x114C0, prExtend}, // Mn [2] TIRHUTA SIGN CANDRABINDU..TIRHUTA SIGN ANUSVARA
+ {0x114C1, 0x114C1, prSpacingMark}, // Mc TIRHUTA SIGN VISARGA
+ {0x114C2, 0x114C3, prExtend}, // Mn [2] TIRHUTA SIGN VIRAMA..TIRHUTA SIGN NUKTA
+ {0x115AF, 0x115AF, prExtend}, // Mc SIDDHAM VOWEL SIGN AA
+ {0x115B0, 0x115B1, prSpacingMark}, // Mc [2] SIDDHAM VOWEL SIGN I..SIDDHAM VOWEL SIGN II
+ {0x115B2, 0x115B5, prExtend}, // Mn [4] SIDDHAM VOWEL SIGN U..SIDDHAM VOWEL SIGN VOCALIC RR
+ {0x115B8, 0x115BB, prSpacingMark}, // Mc [4] SIDDHAM VOWEL SIGN E..SIDDHAM VOWEL SIGN AU
+ {0x115BC, 0x115BD, prExtend}, // Mn [2] SIDDHAM SIGN CANDRABINDU..SIDDHAM SIGN ANUSVARA
+ {0x115BE, 0x115BE, prSpacingMark}, // Mc SIDDHAM SIGN VISARGA
+ {0x115BF, 0x115C0, prExtend}, // Mn [2] SIDDHAM SIGN VIRAMA..SIDDHAM SIGN NUKTA
+ {0x115DC, 0x115DD, prExtend}, // Mn [2] SIDDHAM VOWEL SIGN ALTERNATE U..SIDDHAM VOWEL SIGN ALTERNATE UU
+ {0x11630, 0x11632, prSpacingMark}, // Mc [3] MODI VOWEL SIGN AA..MODI VOWEL SIGN II
+ {0x11633, 0x1163A, prExtend}, // Mn [8] MODI VOWEL SIGN U..MODI VOWEL SIGN AI
+ {0x1163B, 0x1163C, prSpacingMark}, // Mc [2] MODI VOWEL SIGN O..MODI VOWEL SIGN AU
+ {0x1163D, 0x1163D, prExtend}, // Mn MODI SIGN ANUSVARA
+ {0x1163E, 0x1163E, prSpacingMark}, // Mc MODI SIGN VISARGA
+ {0x1163F, 0x11640, prExtend}, // Mn [2] MODI SIGN VIRAMA..MODI SIGN ARDHACANDRA
+ {0x116AB, 0x116AB, prExtend}, // Mn TAKRI SIGN ANUSVARA
+ {0x116AC, 0x116AC, prSpacingMark}, // Mc TAKRI SIGN VISARGA
+ {0x116AD, 0x116AD, prExtend}, // Mn TAKRI VOWEL SIGN AA
+ {0x116AE, 0x116AF, prSpacingMark}, // Mc [2] TAKRI VOWEL SIGN I..TAKRI VOWEL SIGN II
+ {0x116B0, 0x116B5, prExtend}, // Mn [6] TAKRI VOWEL SIGN U..TAKRI VOWEL SIGN AU
+ {0x116B6, 0x116B6, prSpacingMark}, // Mc TAKRI SIGN VIRAMA
+ {0x116B7, 0x116B7, prExtend}, // Mn TAKRI SIGN NUKTA
+ {0x1171D, 0x1171F, prExtend}, // Mn [3] AHOM CONSONANT SIGN MEDIAL LA..AHOM CONSONANT SIGN MEDIAL LIGATING RA
+ {0x11722, 0x11725, prExtend}, // Mn [4] AHOM VOWEL SIGN I..AHOM VOWEL SIGN UU
+ {0x11726, 0x11726, prSpacingMark}, // Mc AHOM VOWEL SIGN E
+ {0x11727, 0x1172B, prExtend}, // Mn [5] AHOM VOWEL SIGN AW..AHOM SIGN KILLER
+ {0x1182C, 0x1182E, prSpacingMark}, // Mc [3] DOGRA VOWEL SIGN AA..DOGRA VOWEL SIGN II
+ {0x1182F, 0x11837, prExtend}, // Mn [9] DOGRA VOWEL SIGN U..DOGRA SIGN ANUSVARA
+ {0x11838, 0x11838, prSpacingMark}, // Mc DOGRA SIGN VISARGA
+ {0x11839, 0x1183A, prExtend}, // Mn [2] DOGRA SIGN VIRAMA..DOGRA SIGN NUKTA
+ {0x11930, 0x11930, prExtend}, // Mc DIVES AKURU VOWEL SIGN AA
+ {0x11931, 0x11935, prSpacingMark}, // Mc [5] DIVES AKURU VOWEL SIGN I..DIVES AKURU VOWEL SIGN E
+ {0x11937, 0x11938, prSpacingMark}, // Mc [2] DIVES AKURU VOWEL SIGN AI..DIVES AKURU VOWEL SIGN O
+ {0x1193B, 0x1193C, prExtend}, // Mn [2] DIVES AKURU SIGN ANUSVARA..DIVES AKURU SIGN CANDRABINDU
+ {0x1193D, 0x1193D, prSpacingMark}, // Mc DIVES AKURU SIGN HALANTA
+ {0x1193E, 0x1193E, prExtend}, // Mn DIVES AKURU VIRAMA
+ {0x1193F, 0x1193F, prPrepend}, // Lo DIVES AKURU PREFIXED NASAL SIGN
+ {0x11940, 0x11940, prSpacingMark}, // Mc DIVES AKURU MEDIAL YA
+ {0x11941, 0x11941, prPrepend}, // Lo DIVES AKURU INITIAL RA
+ {0x11942, 0x11942, prSpacingMark}, // Mc DIVES AKURU MEDIAL RA
+ {0x11943, 0x11943, prExtend}, // Mn DIVES AKURU SIGN NUKTA
+ {0x119D1, 0x119D3, prSpacingMark}, // Mc [3] NANDINAGARI VOWEL SIGN AA..NANDINAGARI VOWEL SIGN II
+ {0x119D4, 0x119D7, prExtend}, // Mn [4] NANDINAGARI VOWEL SIGN U..NANDINAGARI VOWEL SIGN VOCALIC RR
+ {0x119DA, 0x119DB, prExtend}, // Mn [2] NANDINAGARI VOWEL SIGN E..NANDINAGARI VOWEL SIGN AI
+ {0x119DC, 0x119DF, prSpacingMark}, // Mc [4] NANDINAGARI VOWEL SIGN O..NANDINAGARI SIGN VISARGA
+ {0x119E0, 0x119E0, prExtend}, // Mn NANDINAGARI SIGN VIRAMA
+ {0x119E4, 0x119E4, prSpacingMark}, // Mc NANDINAGARI VOWEL SIGN PRISHTHAMATRA E
+ {0x11A01, 0x11A0A, prExtend}, // Mn [10] ZANABAZAR SQUARE VOWEL SIGN I..ZANABAZAR SQUARE VOWEL LENGTH MARK
+ {0x11A33, 0x11A38, prExtend}, // Mn [6] ZANABAZAR SQUARE FINAL CONSONANT MARK..ZANABAZAR SQUARE SIGN ANUSVARA
+ {0x11A39, 0x11A39, prSpacingMark}, // Mc ZANABAZAR SQUARE SIGN VISARGA
+ {0x11A3A, 0x11A3A, prPrepend}, // Lo ZANABAZAR SQUARE CLUSTER-INITIAL LETTER RA
+ {0x11A3B, 0x11A3E, prExtend}, // Mn [4] ZANABAZAR SQUARE CLUSTER-FINAL LETTER YA..ZANABAZAR SQUARE CLUSTER-FINAL LETTER VA
+ {0x11A47, 0x11A47, prExtend}, // Mn ZANABAZAR SQUARE SUBJOINER
+ {0x11A51, 0x11A56, prExtend}, // Mn [6] SOYOMBO VOWEL SIGN I..SOYOMBO VOWEL SIGN OE
+ {0x11A57, 0x11A58, prSpacingMark}, // Mc [2] SOYOMBO VOWEL SIGN AI..SOYOMBO VOWEL SIGN AU
+ {0x11A59, 0x11A5B, prExtend}, // Mn [3] SOYOMBO VOWEL SIGN VOCALIC R..SOYOMBO VOWEL LENGTH MARK
+ {0x11A84, 0x11A89, prPrepend}, // Lo [6] SOYOMBO SIGN JIHVAMULIYA..SOYOMBO CLUSTER-INITIAL LETTER SA
+ {0x11A8A, 0x11A96, prExtend}, // Mn [13] SOYOMBO FINAL CONSONANT SIGN G..SOYOMBO SIGN ANUSVARA
+ {0x11A97, 0x11A97, prSpacingMark}, // Mc SOYOMBO SIGN VISARGA
+ {0x11A98, 0x11A99, prExtend}, // Mn [2] SOYOMBO GEMINATION MARK..SOYOMBO SUBJOINER
+ {0x11C2F, 0x11C2F, prSpacingMark}, // Mc BHAIKSUKI VOWEL SIGN AA
+ {0x11C30, 0x11C36, prExtend}, // Mn [7] BHAIKSUKI VOWEL SIGN I..BHAIKSUKI VOWEL SIGN VOCALIC L
+ {0x11C38, 0x11C3D, prExtend}, // Mn [6] BHAIKSUKI VOWEL SIGN E..BHAIKSUKI SIGN ANUSVARA
+ {0x11C3E, 0x11C3E, prSpacingMark}, // Mc BHAIKSUKI SIGN VISARGA
+ {0x11C3F, 0x11C3F, prExtend}, // Mn BHAIKSUKI SIGN VIRAMA
+ {0x11C92, 0x11CA7, prExtend}, // Mn [22] MARCHEN SUBJOINED LETTER KA..MARCHEN SUBJOINED LETTER ZA
+ {0x11CA9, 0x11CA9, prSpacingMark}, // Mc MARCHEN SUBJOINED LETTER YA
+ {0x11CAA, 0x11CB0, prExtend}, // Mn [7] MARCHEN SUBJOINED LETTER RA..MARCHEN VOWEL SIGN AA
+ {0x11CB1, 0x11CB1, prSpacingMark}, // Mc MARCHEN VOWEL SIGN I
+ {0x11CB2, 0x11CB3, prExtend}, // Mn [2] MARCHEN VOWEL SIGN U..MARCHEN VOWEL SIGN E
+ {0x11CB4, 0x11CB4, prSpacingMark}, // Mc MARCHEN VOWEL SIGN O
+ {0x11CB5, 0x11CB6, prExtend}, // Mn [2] MARCHEN SIGN ANUSVARA..MARCHEN SIGN CANDRABINDU
+ {0x11D31, 0x11D36, prExtend}, // Mn [6] MASARAM GONDI VOWEL SIGN AA..MASARAM GONDI VOWEL SIGN VOCALIC R
+ {0x11D3A, 0x11D3A, prExtend}, // Mn MASARAM GONDI VOWEL SIGN E
+ {0x11D3C, 0x11D3D, prExtend}, // Mn [2] MASARAM GONDI VOWEL SIGN AI..MASARAM GONDI VOWEL SIGN O
+ {0x11D3F, 0x11D45, prExtend}, // Mn [7] MASARAM GONDI VOWEL SIGN AU..MASARAM GONDI VIRAMA
+ {0x11D46, 0x11D46, prPrepend}, // Lo MASARAM GONDI REPHA
+ {0x11D47, 0x11D47, prExtend}, // Mn MASARAM GONDI RA-KARA
+ {0x11D8A, 0x11D8E, prSpacingMark}, // Mc [5] GUNJALA GONDI VOWEL SIGN AA..GUNJALA GONDI VOWEL SIGN UU
+ {0x11D90, 0x11D91, prExtend}, // Mn [2] GUNJALA GONDI VOWEL SIGN EE..GUNJALA GONDI VOWEL SIGN AI
+ {0x11D93, 0x11D94, prSpacingMark}, // Mc [2] GUNJALA GONDI VOWEL SIGN OO..GUNJALA GONDI VOWEL SIGN AU
+ {0x11D95, 0x11D95, prExtend}, // Mn GUNJALA GONDI SIGN ANUSVARA
+ {0x11D96, 0x11D96, prSpacingMark}, // Mc GUNJALA GONDI SIGN VISARGA
+ {0x11D97, 0x11D97, prExtend}, // Mn GUNJALA GONDI VIRAMA
+ {0x11EF3, 0x11EF4, prExtend}, // Mn [2] MAKASAR VOWEL SIGN I..MAKASAR VOWEL SIGN U
+ {0x11EF5, 0x11EF6, prSpacingMark}, // Mc [2] MAKASAR VOWEL SIGN E..MAKASAR VOWEL SIGN O
+ {0x13430, 0x13438, prControl}, // Cf [9] EGYPTIAN HIEROGLYPH VERTICAL JOINER..EGYPTIAN HIEROGLYPH END SEGMENT
+ {0x16AF0, 0x16AF4, prExtend}, // Mn [5] BASSA VAH COMBINING HIGH TONE..BASSA VAH COMBINING HIGH-LOW TONE
+ {0x16B30, 0x16B36, prExtend}, // Mn [7] PAHAWH HMONG MARK CIM TUB..PAHAWH HMONG MARK CIM TAUM
+ {0x16F4F, 0x16F4F, prExtend}, // Mn MIAO SIGN CONSONANT MODIFIER BAR
+ {0x16F51, 0x16F87, prSpacingMark}, // Mc [55] MIAO SIGN ASPIRATION..MIAO VOWEL SIGN UI
+ {0x16F8F, 0x16F92, prExtend}, // Mn [4] MIAO TONE RIGHT..MIAO TONE BELOW
+ {0x16FE4, 0x16FE4, prExtend}, // Mn KHITAN SMALL SCRIPT FILLER
+ {0x16FF0, 0x16FF1, prSpacingMark}, // Mc [2] VIETNAMESE ALTERNATE READING MARK CA..VIETNAMESE ALTERNATE READING MARK NHAY
+ {0x1BC9D, 0x1BC9E, prExtend}, // Mn [2] DUPLOYAN THICK LETTER SELECTOR..DUPLOYAN DOUBLE MARK
+ {0x1BCA0, 0x1BCA3, prControl}, // Cf [4] SHORTHAND FORMAT LETTER OVERLAP..SHORTHAND FORMAT UP STEP
+ {0x1CF00, 0x1CF2D, prExtend}, // Mn [46] ZNAMENNY COMBINING MARK GORAZDO NIZKO S KRYZHEM ON LEFT..ZNAMENNY COMBINING MARK KRYZH ON LEFT
+ {0x1CF30, 0x1CF46, prExtend}, // Mn [23] ZNAMENNY COMBINING TONAL RANGE MARK MRACHNO..ZNAMENNY PRIZNAK MODIFIER ROG
+ {0x1D165, 0x1D165, prExtend}, // Mc MUSICAL SYMBOL COMBINING STEM
+ {0x1D166, 0x1D166, prSpacingMark}, // Mc MUSICAL SYMBOL COMBINING SPRECHGESANG STEM
+ {0x1D167, 0x1D169, prExtend}, // Mn [3] MUSICAL SYMBOL COMBINING TREMOLO-1..MUSICAL SYMBOL COMBINING TREMOLO-3
+ {0x1D16D, 0x1D16D, prSpacingMark}, // Mc MUSICAL SYMBOL COMBINING AUGMENTATION DOT
+ {0x1D16E, 0x1D172, prExtend}, // Mc [5] MUSICAL SYMBOL COMBINING FLAG-1..MUSICAL SYMBOL COMBINING FLAG-5
+ {0x1D173, 0x1D17A, prControl}, // Cf [8] MUSICAL SYMBOL BEGIN BEAM..MUSICAL SYMBOL END PHRASE
+ {0x1D17B, 0x1D182, prExtend}, // Mn [8] MUSICAL SYMBOL COMBINING ACCENT..MUSICAL SYMBOL COMBINING LOURE
+ {0x1D185, 0x1D18B, prExtend}, // Mn [7] MUSICAL SYMBOL COMBINING DOIT..MUSICAL SYMBOL COMBINING TRIPLE TONGUE
+ {0x1D1AA, 0x1D1AD, prExtend}, // Mn [4] MUSICAL SYMBOL COMBINING DOWN BOW..MUSICAL SYMBOL COMBINING SNAP PIZZICATO
+ {0x1D242, 0x1D244, prExtend}, // Mn [3] COMBINING GREEK MUSICAL TRISEME..COMBINING GREEK MUSICAL PENTASEME
+ {0x1DA00, 0x1DA36, prExtend}, // Mn [55] SIGNWRITING HEAD RIM..SIGNWRITING AIR SUCKING IN
+ {0x1DA3B, 0x1DA6C, prExtend}, // Mn [50] SIGNWRITING MOUTH CLOSED NEUTRAL..SIGNWRITING EXCITEMENT
+ {0x1DA75, 0x1DA75, prExtend}, // Mn SIGNWRITING UPPER BODY TILTING FROM HIP JOINTS
+ {0x1DA84, 0x1DA84, prExtend}, // Mn SIGNWRITING LOCATION HEAD NECK
+ {0x1DA9B, 0x1DA9F, prExtend}, // Mn [5] SIGNWRITING FILL MODIFIER-2..SIGNWRITING FILL MODIFIER-6
+ {0x1DAA1, 0x1DAAF, prExtend}, // Mn [15] SIGNWRITING ROTATION MODIFIER-2..SIGNWRITING ROTATION MODIFIER-16
+ {0x1E000, 0x1E006, prExtend}, // Mn [7] COMBINING GLAGOLITIC LETTER AZU..COMBINING GLAGOLITIC LETTER ZHIVETE
+ {0x1E008, 0x1E018, prExtend}, // Mn [17] COMBINING GLAGOLITIC LETTER ZEMLJA..COMBINING GLAGOLITIC LETTER HERU
+ {0x1E01B, 0x1E021, prExtend}, // Mn [7] COMBINING GLAGOLITIC LETTER SHTA..COMBINING GLAGOLITIC LETTER YATI
+ {0x1E023, 0x1E024, prExtend}, // Mn [2] COMBINING GLAGOLITIC LETTER YU..COMBINING GLAGOLITIC LETTER SMALL YUS
+ {0x1E026, 0x1E02A, prExtend}, // Mn [5] COMBINING GLAGOLITIC LETTER YO..COMBINING GLAGOLITIC LETTER FITA
+ {0x1E130, 0x1E136, prExtend}, // Mn [7] NYIAKENG PUACHUE HMONG TONE-B..NYIAKENG PUACHUE HMONG TONE-D
+ {0x1E2AE, 0x1E2AE, prExtend}, // Mn TOTO SIGN RISING TONE
+ {0x1E2EC, 0x1E2EF, prExtend}, // Mn [4] WANCHO TONE TUP..WANCHO TONE KOINI
+ {0x1E8D0, 0x1E8D6, prExtend}, // Mn [7] MENDE KIKAKUI COMBINING NUMBER TEENS..MENDE KIKAKUI COMBINING NUMBER MILLIONS
+ {0x1E944, 0x1E94A, prExtend}, // Mn [7] ADLAM ALIF LENGTHENER..ADLAM NUKTA
+ {0x1F000, 0x1F003, prExtendedPictographic}, // E0.0 [4] (🀀..🀃) MAHJONG TILE EAST WIND..MAHJONG TILE NORTH WIND
+ {0x1F004, 0x1F004, prExtendedPictographic}, // E0.6 [1] (🀄) mahjong red dragon
+ {0x1F005, 0x1F0CE, prExtendedPictographic}, // E0.0 [202] (🀅..🃎) MAHJONG TILE GREEN DRAGON..PLAYING CARD KING OF DIAMONDS
+ {0x1F0CF, 0x1F0CF, prExtendedPictographic}, // E0.6 [1] (🃏) joker
+ {0x1F0D0, 0x1F0FF, prExtendedPictographic}, // E0.0 [48] (..) ..
+ {0x1F10D, 0x1F10F, prExtendedPictographic}, // E0.0 [3] (🄍..🄏) CIRCLED ZERO WITH SLASH..CIRCLED DOLLAR SIGN WITH OVERLAID BACKSLASH
+ {0x1F12F, 0x1F12F, prExtendedPictographic}, // E0.0 [1] (🄯) COPYLEFT SYMBOL
+ {0x1F16C, 0x1F16F, prExtendedPictographic}, // E0.0 [4] (🅬..🅯) RAISED MR SIGN..CIRCLED HUMAN FIGURE
+ {0x1F170, 0x1F171, prExtendedPictographic}, // E0.6 [2] (🅰️..🅱️) A button (blood type)..B button (blood type)
+ {0x1F17E, 0x1F17F, prExtendedPictographic}, // E0.6 [2] (🅾️..🅿️) O button (blood type)..P button
+ {0x1F18E, 0x1F18E, prExtendedPictographic}, // E0.6 [1] (🆎) AB button (blood type)
+ {0x1F191, 0x1F19A, prExtendedPictographic}, // E0.6 [10] (🆑..🆚) CL button..VS button
+ {0x1F1AD, 0x1F1E5, prExtendedPictographic}, // E0.0 [57] (🆭..) MASK WORK SYMBOL..
+ {0x1F1E6, 0x1F1FF, prRegionalIndicator}, // So [26] REGIONAL INDICATOR SYMBOL LETTER A..REGIONAL INDICATOR SYMBOL LETTER Z
+ {0x1F201, 0x1F202, prExtendedPictographic}, // E0.6 [2] (🈁..🈂️) Japanese “here” button..Japanese “service charge” button
+ {0x1F203, 0x1F20F, prExtendedPictographic}, // E0.0 [13] (..) ..
+ {0x1F21A, 0x1F21A, prExtendedPictographic}, // E0.6 [1] (🈚) Japanese “free of charge” button
+ {0x1F22F, 0x1F22F, prExtendedPictographic}, // E0.6 [1] (🈯) Japanese “reserved” button
+ {0x1F232, 0x1F23A, prExtendedPictographic}, // E0.6 [9] (🈲..🈺) Japanese “prohibited” button..Japanese “open for business” button
+ {0x1F23C, 0x1F23F, prExtendedPictographic}, // E0.0 [4] (..) ..
+ {0x1F249, 0x1F24F, prExtendedPictographic}, // E0.0 [7] (..) ..
+ {0x1F250, 0x1F251, prExtendedPictographic}, // E0.6 [2] (🉐..🉑) Japanese “bargain” button..Japanese “acceptable” button
+ {0x1F252, 0x1F2FF, prExtendedPictographic}, // E0.0 [174] (..) ..
+ {0x1F300, 0x1F30C, prExtendedPictographic}, // E0.6 [13] (🌀..🌌) cyclone..milky way
+ {0x1F30D, 0x1F30E, prExtendedPictographic}, // E0.7 [2] (🌍..🌎) globe showing Europe-Africa..globe showing Americas
+ {0x1F30F, 0x1F30F, prExtendedPictographic}, // E0.6 [1] (🌏) globe showing Asia-Australia
+ {0x1F310, 0x1F310, prExtendedPictographic}, // E1.0 [1] (🌐) globe with meridians
+ {0x1F311, 0x1F311, prExtendedPictographic}, // E0.6 [1] (🌑) new moon
+ {0x1F312, 0x1F312, prExtendedPictographic}, // E1.0 [1] (🌒) waxing crescent moon
+ {0x1F313, 0x1F315, prExtendedPictographic}, // E0.6 [3] (🌓..🌕) first quarter moon..full moon
+ {0x1F316, 0x1F318, prExtendedPictographic}, // E1.0 [3] (🌖..🌘) waning gibbous moon..waning crescent moon
+ {0x1F319, 0x1F319, prExtendedPictographic}, // E0.6 [1] (🌙) crescent moon
+ {0x1F31A, 0x1F31A, prExtendedPictographic}, // E1.0 [1] (🌚) new moon face
+ {0x1F31B, 0x1F31B, prExtendedPictographic}, // E0.6 [1] (🌛) first quarter moon face
+ {0x1F31C, 0x1F31C, prExtendedPictographic}, // E0.7 [1] (🌜) last quarter moon face
+ {0x1F31D, 0x1F31E, prExtendedPictographic}, // E1.0 [2] (🌝..🌞) full moon face..sun with face
+ {0x1F31F, 0x1F320, prExtendedPictographic}, // E0.6 [2] (🌟..🌠) glowing star..shooting star
+ {0x1F321, 0x1F321, prExtendedPictographic}, // E0.7 [1] (🌡️) thermometer
+ {0x1F322, 0x1F323, prExtendedPictographic}, // E0.0 [2] (🌢..🌣) BLACK DROPLET..WHITE SUN
+ {0x1F324, 0x1F32C, prExtendedPictographic}, // E0.7 [9] (🌤️..🌬️) sun behind small cloud..wind face
+ {0x1F32D, 0x1F32F, prExtendedPictographic}, // E1.0 [3] (🌭..🌯) hot dog..burrito
+ {0x1F330, 0x1F331, prExtendedPictographic}, // E0.6 [2] (🌰..🌱) chestnut..seedling
+ {0x1F332, 0x1F333, prExtendedPictographic}, // E1.0 [2] (🌲..🌳) evergreen tree..deciduous tree
+ {0x1F334, 0x1F335, prExtendedPictographic}, // E0.6 [2] (🌴..🌵) palm tree..cactus
+ {0x1F336, 0x1F336, prExtendedPictographic}, // E0.7 [1] (🌶️) hot pepper
+ {0x1F337, 0x1F34A, prExtendedPictographic}, // E0.6 [20] (🌷..🍊) tulip..tangerine
+ {0x1F34B, 0x1F34B, prExtendedPictographic}, // E1.0 [1] (🍋) lemon
+ {0x1F34C, 0x1F34F, prExtendedPictographic}, // E0.6 [4] (🍌..🍏) banana..green apple
+ {0x1F350, 0x1F350, prExtendedPictographic}, // E1.0 [1] (🍐) pear
+ {0x1F351, 0x1F37B, prExtendedPictographic}, // E0.6 [43] (🍑..🍻) peach..clinking beer mugs
+ {0x1F37C, 0x1F37C, prExtendedPictographic}, // E1.0 [1] (🍼) baby bottle
+ {0x1F37D, 0x1F37D, prExtendedPictographic}, // E0.7 [1] (🍽️) fork and knife with plate
+ {0x1F37E, 0x1F37F, prExtendedPictographic}, // E1.0 [2] (🍾..🍿) bottle with popping cork..popcorn
+ {0x1F380, 0x1F393, prExtendedPictographic}, // E0.6 [20] (🎀..🎓) ribbon..graduation cap
+ {0x1F394, 0x1F395, prExtendedPictographic}, // E0.0 [2] (🎔..🎕) HEART WITH TIP ON THE LEFT..BOUQUET OF FLOWERS
+ {0x1F396, 0x1F397, prExtendedPictographic}, // E0.7 [2] (🎖️..🎗️) military medal..reminder ribbon
+ {0x1F398, 0x1F398, prExtendedPictographic}, // E0.0 [1] (🎘) MUSICAL KEYBOARD WITH JACKS
+ {0x1F399, 0x1F39B, prExtendedPictographic}, // E0.7 [3] (🎙️..🎛️) studio microphone..control knobs
+ {0x1F39C, 0x1F39D, prExtendedPictographic}, // E0.0 [2] (🎜..🎝) BEAMED ASCENDING MUSICAL NOTES..BEAMED DESCENDING MUSICAL NOTES
+ {0x1F39E, 0x1F39F, prExtendedPictographic}, // E0.7 [2] (🎞️..🎟️) film frames..admission tickets
+ {0x1F3A0, 0x1F3C4, prExtendedPictographic}, // E0.6 [37] (🎠..🏄) carousel horse..person surfing
+ {0x1F3C5, 0x1F3C5, prExtendedPictographic}, // E1.0 [1] (🏅) sports medal
+ {0x1F3C6, 0x1F3C6, prExtendedPictographic}, // E0.6 [1] (🏆) trophy
+ {0x1F3C7, 0x1F3C7, prExtendedPictographic}, // E1.0 [1] (🏇) horse racing
+ {0x1F3C8, 0x1F3C8, prExtendedPictographic}, // E0.6 [1] (🏈) american football
+ {0x1F3C9, 0x1F3C9, prExtendedPictographic}, // E1.0 [1] (🏉) rugby football
+ {0x1F3CA, 0x1F3CA, prExtendedPictographic}, // E0.6 [1] (🏊) person swimming
+ {0x1F3CB, 0x1F3CE, prExtendedPictographic}, // E0.7 [4] (🏋️..🏎️) person lifting weights..racing car
+ {0x1F3CF, 0x1F3D3, prExtendedPictographic}, // E1.0 [5] (🏏..🏓) cricket game..ping pong
+ {0x1F3D4, 0x1F3DF, prExtendedPictographic}, // E0.7 [12] (🏔️..🏟️) snow-capped mountain..stadium
+ {0x1F3E0, 0x1F3E3, prExtendedPictographic}, // E0.6 [4] (🏠..🏣) house..Japanese post office
+ {0x1F3E4, 0x1F3E4, prExtendedPictographic}, // E1.0 [1] (🏤) post office
+ {0x1F3E5, 0x1F3F0, prExtendedPictographic}, // E0.6 [12] (🏥..🏰) hospital..castle
+ {0x1F3F1, 0x1F3F2, prExtendedPictographic}, // E0.0 [2] (🏱..🏲) WHITE PENNANT..BLACK PENNANT
+ {0x1F3F3, 0x1F3F3, prExtendedPictographic}, // E0.7 [1] (🏳️) white flag
+ {0x1F3F4, 0x1F3F4, prExtendedPictographic}, // E1.0 [1] (🏴) black flag
+ {0x1F3F5, 0x1F3F5, prExtendedPictographic}, // E0.7 [1] (🏵️) rosette
+ {0x1F3F6, 0x1F3F6, prExtendedPictographic}, // E0.0 [1] (🏶) BLACK ROSETTE
+ {0x1F3F7, 0x1F3F7, prExtendedPictographic}, // E0.7 [1] (🏷️) label
+ {0x1F3F8, 0x1F3FA, prExtendedPictographic}, // E1.0 [3] (🏸..🏺) badminton..amphora
+ {0x1F3FB, 0x1F3FF, prExtend}, // Sk [5] EMOJI MODIFIER FITZPATRICK TYPE-1-2..EMOJI MODIFIER FITZPATRICK TYPE-6
+ {0x1F400, 0x1F407, prExtendedPictographic}, // E1.0 [8] (🐀..🐇) rat..rabbit
+ {0x1F408, 0x1F408, prExtendedPictographic}, // E0.7 [1] (🐈) cat
+ {0x1F409, 0x1F40B, prExtendedPictographic}, // E1.0 [3] (🐉..🐋) dragon..whale
+ {0x1F40C, 0x1F40E, prExtendedPictographic}, // E0.6 [3] (🐌..🐎) snail..horse
+ {0x1F40F, 0x1F410, prExtendedPictographic}, // E1.0 [2] (🐏..🐐) ram..goat
+ {0x1F411, 0x1F412, prExtendedPictographic}, // E0.6 [2] (🐑..🐒) ewe..monkey
+ {0x1F413, 0x1F413, prExtendedPictographic}, // E1.0 [1] (🐓) rooster
+ {0x1F414, 0x1F414, prExtendedPictographic}, // E0.6 [1] (🐔) chicken
+ {0x1F415, 0x1F415, prExtendedPictographic}, // E0.7 [1] (🐕) dog
+ {0x1F416, 0x1F416, prExtendedPictographic}, // E1.0 [1] (🐖) pig
+ {0x1F417, 0x1F429, prExtendedPictographic}, // E0.6 [19] (🐗..🐩) boar..poodle
+ {0x1F42A, 0x1F42A, prExtendedPictographic}, // E1.0 [1] (🐪) camel
+ {0x1F42B, 0x1F43E, prExtendedPictographic}, // E0.6 [20] (🐫..🐾) two-hump camel..paw prints
+ {0x1F43F, 0x1F43F, prExtendedPictographic}, // E0.7 [1] (🐿️) chipmunk
+ {0x1F440, 0x1F440, prExtendedPictographic}, // E0.6 [1] (👀) eyes
+ {0x1F441, 0x1F441, prExtendedPictographic}, // E0.7 [1] (👁️) eye
+ {0x1F442, 0x1F464, prExtendedPictographic}, // E0.6 [35] (👂..👤) ear..bust in silhouette
+ {0x1F465, 0x1F465, prExtendedPictographic}, // E1.0 [1] (👥) busts in silhouette
+ {0x1F466, 0x1F46B, prExtendedPictographic}, // E0.6 [6] (👦..👫) boy..woman and man holding hands
+ {0x1F46C, 0x1F46D, prExtendedPictographic}, // E1.0 [2] (👬..👭) men holding hands..women holding hands
+ {0x1F46E, 0x1F4AC, prExtendedPictographic}, // E0.6 [63] (👮..💬) police officer..speech balloon
+ {0x1F4AD, 0x1F4AD, prExtendedPictographic}, // E1.0 [1] (💭) thought balloon
+ {0x1F4AE, 0x1F4B5, prExtendedPictographic}, // E0.6 [8] (💮..💵) white flower..dollar banknote
+ {0x1F4B6, 0x1F4B7, prExtendedPictographic}, // E1.0 [2] (💶..💷) euro banknote..pound banknote
+ {0x1F4B8, 0x1F4EB, prExtendedPictographic}, // E0.6 [52] (💸..📫) money with wings..closed mailbox with raised flag
+ {0x1F4EC, 0x1F4ED, prExtendedPictographic}, // E0.7 [2] (📬..📭) open mailbox with raised flag..open mailbox with lowered flag
+ {0x1F4EE, 0x1F4EE, prExtendedPictographic}, // E0.6 [1] (📮) postbox
+ {0x1F4EF, 0x1F4EF, prExtendedPictographic}, // E1.0 [1] (📯) postal horn
+ {0x1F4F0, 0x1F4F4, prExtendedPictographic}, // E0.6 [5] (📰..📴) newspaper..mobile phone off
+ {0x1F4F5, 0x1F4F5, prExtendedPictographic}, // E1.0 [1] (📵) no mobile phones
+ {0x1F4F6, 0x1F4F7, prExtendedPictographic}, // E0.6 [2] (📶..📷) antenna bars..camera
+ {0x1F4F8, 0x1F4F8, prExtendedPictographic}, // E1.0 [1] (📸) camera with flash
+ {0x1F4F9, 0x1F4FC, prExtendedPictographic}, // E0.6 [4] (📹..📼) video camera..videocassette
+ {0x1F4FD, 0x1F4FD, prExtendedPictographic}, // E0.7 [1] (📽️) film projector
+ {0x1F4FE, 0x1F4FE, prExtendedPictographic}, // E0.0 [1] (📾) PORTABLE STEREO
+ {0x1F4FF, 0x1F502, prExtendedPictographic}, // E1.0 [4] (📿..🔂) prayer beads..repeat single button
+ {0x1F503, 0x1F503, prExtendedPictographic}, // E0.6 [1] (🔃) clockwise vertical arrows
+ {0x1F504, 0x1F507, prExtendedPictographic}, // E1.0 [4] (🔄..🔇) counterclockwise arrows button..muted speaker
+ {0x1F508, 0x1F508, prExtendedPictographic}, // E0.7 [1] (🔈) speaker low volume
+ {0x1F509, 0x1F509, prExtendedPictographic}, // E1.0 [1] (🔉) speaker medium volume
+ {0x1F50A, 0x1F514, prExtendedPictographic}, // E0.6 [11] (🔊..🔔) speaker high volume..bell
+ {0x1F515, 0x1F515, prExtendedPictographic}, // E1.0 [1] (🔕) bell with slash
+ {0x1F516, 0x1F52B, prExtendedPictographic}, // E0.6 [22] (🔖..🔫) bookmark..water pistol
+ {0x1F52C, 0x1F52D, prExtendedPictographic}, // E1.0 [2] (🔬..🔭) microscope..telescope
+ {0x1F52E, 0x1F53D, prExtendedPictographic}, // E0.6 [16] (🔮..🔽) crystal ball..downwards button
+ {0x1F546, 0x1F548, prExtendedPictographic}, // E0.0 [3] (🕆..🕈) WHITE LATIN CROSS..CELTIC CROSS
+ {0x1F549, 0x1F54A, prExtendedPictographic}, // E0.7 [2] (🕉️..🕊️) om..dove
+ {0x1F54B, 0x1F54E, prExtendedPictographic}, // E1.0 [4] (🕋..🕎) kaaba..menorah
+ {0x1F54F, 0x1F54F, prExtendedPictographic}, // E0.0 [1] (🕏) BOWL OF HYGIEIA
+ {0x1F550, 0x1F55B, prExtendedPictographic}, // E0.6 [12] (🕐..🕛) one o’clock..twelve o’clock
+ {0x1F55C, 0x1F567, prExtendedPictographic}, // E0.7 [12] (🕜..🕧) one-thirty..twelve-thirty
+ {0x1F568, 0x1F56E, prExtendedPictographic}, // E0.0 [7] (🕨..🕮) RIGHT SPEAKER..BOOK
+ {0x1F56F, 0x1F570, prExtendedPictographic}, // E0.7 [2] (🕯️..🕰️) candle..mantelpiece clock
+ {0x1F571, 0x1F572, prExtendedPictographic}, // E0.0 [2] (🕱..🕲) BLACK SKULL AND CROSSBONES..NO PIRACY
+ {0x1F573, 0x1F579, prExtendedPictographic}, // E0.7 [7] (🕳️..🕹️) hole..joystick
+ {0x1F57A, 0x1F57A, prExtendedPictographic}, // E3.0 [1] (🕺) man dancing
+ {0x1F57B, 0x1F586, prExtendedPictographic}, // E0.0 [12] (🕻..🖆) LEFT HAND TELEPHONE RECEIVER..PEN OVER STAMPED ENVELOPE
+ {0x1F587, 0x1F587, prExtendedPictographic}, // E0.7 [1] (🖇️) linked paperclips
+ {0x1F588, 0x1F589, prExtendedPictographic}, // E0.0 [2] (🖈..🖉) BLACK PUSHPIN..LOWER LEFT PENCIL
+ {0x1F58A, 0x1F58D, prExtendedPictographic}, // E0.7 [4] (🖊️..🖍️) pen..crayon
+ {0x1F58E, 0x1F58F, prExtendedPictographic}, // E0.0 [2] (🖎..🖏) LEFT WRITING HAND..TURNED OK HAND SIGN
+ {0x1F590, 0x1F590, prExtendedPictographic}, // E0.7 [1] (🖐️) hand with fingers splayed
+ {0x1F591, 0x1F594, prExtendedPictographic}, // E0.0 [4] (🖑..🖔) REVERSED RAISED HAND WITH FINGERS SPLAYED..REVERSED VICTORY HAND
+ {0x1F595, 0x1F596, prExtendedPictographic}, // E1.0 [2] (🖕..🖖) middle finger..vulcan salute
+ {0x1F597, 0x1F5A3, prExtendedPictographic}, // E0.0 [13] (🖗..🖣) WHITE DOWN POINTING LEFT HAND INDEX..BLACK DOWN POINTING BACKHAND INDEX
+ {0x1F5A4, 0x1F5A4, prExtendedPictographic}, // E3.0 [1] (🖤) black heart
+ {0x1F5A5, 0x1F5A5, prExtendedPictographic}, // E0.7 [1] (🖥️) desktop computer
+ {0x1F5A6, 0x1F5A7, prExtendedPictographic}, // E0.0 [2] (🖦..🖧) KEYBOARD AND MOUSE..THREE NETWORKED COMPUTERS
+ {0x1F5A8, 0x1F5A8, prExtendedPictographic}, // E0.7 [1] (🖨️) printer
+ {0x1F5A9, 0x1F5B0, prExtendedPictographic}, // E0.0 [8] (🖩..🖰) POCKET CALCULATOR..TWO BUTTON MOUSE
+ {0x1F5B1, 0x1F5B2, prExtendedPictographic}, // E0.7 [2] (🖱️..🖲️) computer mouse..trackball
+ {0x1F5B3, 0x1F5BB, prExtendedPictographic}, // E0.0 [9] (🖳..🖻) OLD PERSONAL COMPUTER..DOCUMENT WITH PICTURE
+ {0x1F5BC, 0x1F5BC, prExtendedPictographic}, // E0.7 [1] (🖼️) framed picture
+ {0x1F5BD, 0x1F5C1, prExtendedPictographic}, // E0.0 [5] (🖽..🗁) FRAME WITH TILES..OPEN FOLDER
+ {0x1F5C2, 0x1F5C4, prExtendedPictographic}, // E0.7 [3] (🗂️..🗄️) card index dividers..file cabinet
+ {0x1F5C5, 0x1F5D0, prExtendedPictographic}, // E0.0 [12] (🗅..🗐) EMPTY NOTE..PAGES
+ {0x1F5D1, 0x1F5D3, prExtendedPictographic}, // E0.7 [3] (🗑️..🗓️) wastebasket..spiral calendar
+ {0x1F5D4, 0x1F5DB, prExtendedPictographic}, // E0.0 [8] (🗔..🗛) DESKTOP WINDOW..DECREASE FONT SIZE SYMBOL
+ {0x1F5DC, 0x1F5DE, prExtendedPictographic}, // E0.7 [3] (🗜️..🗞️) clamp..rolled-up newspaper
+ {0x1F5DF, 0x1F5E0, prExtendedPictographic}, // E0.0 [2] (🗟..🗠) PAGE WITH CIRCLED TEXT..STOCK CHART
+ {0x1F5E1, 0x1F5E1, prExtendedPictographic}, // E0.7 [1] (🗡️) dagger
+ {0x1F5E2, 0x1F5E2, prExtendedPictographic}, // E0.0 [1] (🗢) LIPS
+ {0x1F5E3, 0x1F5E3, prExtendedPictographic}, // E0.7 [1] (🗣️) speaking head
+ {0x1F5E4, 0x1F5E7, prExtendedPictographic}, // E0.0 [4] (🗤..🗧) THREE RAYS ABOVE..THREE RAYS RIGHT
+ {0x1F5E8, 0x1F5E8, prExtendedPictographic}, // E2.0 [1] (🗨️) left speech bubble
+ {0x1F5E9, 0x1F5EE, prExtendedPictographic}, // E0.0 [6] (🗩..🗮) RIGHT SPEECH BUBBLE..LEFT ANGER BUBBLE
+ {0x1F5EF, 0x1F5EF, prExtendedPictographic}, // E0.7 [1] (🗯️) right anger bubble
+ {0x1F5F0, 0x1F5F2, prExtendedPictographic}, // E0.0 [3] (🗰..🗲) MOOD BUBBLE..LIGHTNING MOOD
+ {0x1F5F3, 0x1F5F3, prExtendedPictographic}, // E0.7 [1] (🗳️) ballot box with ballot
+ {0x1F5F4, 0x1F5F9, prExtendedPictographic}, // E0.0 [6] (🗴..🗹) BALLOT SCRIPT X..BALLOT BOX WITH BOLD CHECK
+ {0x1F5FA, 0x1F5FA, prExtendedPictographic}, // E0.7 [1] (🗺️) world map
+ {0x1F5FB, 0x1F5FF, prExtendedPictographic}, // E0.6 [5] (🗻..🗿) mount fuji..moai
+ {0x1F600, 0x1F600, prExtendedPictographic}, // E1.0 [1] (😀) grinning face
+ {0x1F601, 0x1F606, prExtendedPictographic}, // E0.6 [6] (😁..😆) beaming face with smiling eyes..grinning squinting face
+ {0x1F607, 0x1F608, prExtendedPictographic}, // E1.0 [2] (😇..😈) smiling face with halo..smiling face with horns
+ {0x1F609, 0x1F60D, prExtendedPictographic}, // E0.6 [5] (😉..😍) winking face..smiling face with heart-eyes
+ {0x1F60E, 0x1F60E, prExtendedPictographic}, // E1.0 [1] (😎) smiling face with sunglasses
+ {0x1F60F, 0x1F60F, prExtendedPictographic}, // E0.6 [1] (😏) smirking face
+ {0x1F610, 0x1F610, prExtendedPictographic}, // E0.7 [1] (😐) neutral face
+ {0x1F611, 0x1F611, prExtendedPictographic}, // E1.0 [1] (😑) expressionless face
+ {0x1F612, 0x1F614, prExtendedPictographic}, // E0.6 [3] (😒..😔) unamused face..pensive face
+ {0x1F615, 0x1F615, prExtendedPictographic}, // E1.0 [1] (😕) confused face
+ {0x1F616, 0x1F616, prExtendedPictographic}, // E0.6 [1] (😖) confounded face
+ {0x1F617, 0x1F617, prExtendedPictographic}, // E1.0 [1] (😗) kissing face
+ {0x1F618, 0x1F618, prExtendedPictographic}, // E0.6 [1] (😘) face blowing a kiss
+ {0x1F619, 0x1F619, prExtendedPictographic}, // E1.0 [1] (😙) kissing face with smiling eyes
+ {0x1F61A, 0x1F61A, prExtendedPictographic}, // E0.6 [1] (😚) kissing face with closed eyes
+ {0x1F61B, 0x1F61B, prExtendedPictographic}, // E1.0 [1] (😛) face with tongue
+ {0x1F61C, 0x1F61E, prExtendedPictographic}, // E0.6 [3] (😜..😞) winking face with tongue..disappointed face
+ {0x1F61F, 0x1F61F, prExtendedPictographic}, // E1.0 [1] (😟) worried face
+ {0x1F620, 0x1F625, prExtendedPictographic}, // E0.6 [6] (😠..😥) angry face..sad but relieved face
+ {0x1F626, 0x1F627, prExtendedPictographic}, // E1.0 [2] (😦..😧) frowning face with open mouth..anguished face
+ {0x1F628, 0x1F62B, prExtendedPictographic}, // E0.6 [4] (😨..😫) fearful face..tired face
+ {0x1F62C, 0x1F62C, prExtendedPictographic}, // E1.0 [1] (😬) grimacing face
+ {0x1F62D, 0x1F62D, prExtendedPictographic}, // E0.6 [1] (😭) loudly crying face
+ {0x1F62E, 0x1F62F, prExtendedPictographic}, // E1.0 [2] (😮..😯) face with open mouth..hushed face
+ {0x1F630, 0x1F633, prExtendedPictographic}, // E0.6 [4] (😰..😳) anxious face with sweat..flushed face
+ {0x1F634, 0x1F634, prExtendedPictographic}, // E1.0 [1] (😴) sleeping face
+ {0x1F635, 0x1F635, prExtendedPictographic}, // E0.6 [1] (😵) face with crossed-out eyes
+ {0x1F636, 0x1F636, prExtendedPictographic}, // E1.0 [1] (😶) face without mouth
+ {0x1F637, 0x1F640, prExtendedPictographic}, // E0.6 [10] (😷..🙀) face with medical mask..weary cat
+ {0x1F641, 0x1F644, prExtendedPictographic}, // E1.0 [4] (🙁..🙄) slightly frowning face..face with rolling eyes
+ {0x1F645, 0x1F64F, prExtendedPictographic}, // E0.6 [11] (🙅..🙏) person gesturing NO..folded hands
+ {0x1F680, 0x1F680, prExtendedPictographic}, // E0.6 [1] (🚀) rocket
+ {0x1F681, 0x1F682, prExtendedPictographic}, // E1.0 [2] (🚁..🚂) helicopter..locomotive
+ {0x1F683, 0x1F685, prExtendedPictographic}, // E0.6 [3] (🚃..🚅) railway car..bullet train
+ {0x1F686, 0x1F686, prExtendedPictographic}, // E1.0 [1] (🚆) train
+ {0x1F687, 0x1F687, prExtendedPictographic}, // E0.6 [1] (🚇) metro
+ {0x1F688, 0x1F688, prExtendedPictographic}, // E1.0 [1] (🚈) light rail
+ {0x1F689, 0x1F689, prExtendedPictographic}, // E0.6 [1] (🚉) station
+ {0x1F68A, 0x1F68B, prExtendedPictographic}, // E1.0 [2] (🚊..🚋) tram..tram car
+ {0x1F68C, 0x1F68C, prExtendedPictographic}, // E0.6 [1] (🚌) bus
+ {0x1F68D, 0x1F68D, prExtendedPictographic}, // E0.7 [1] (🚍) oncoming bus
+ {0x1F68E, 0x1F68E, prExtendedPictographic}, // E1.0 [1] (🚎) trolleybus
+ {0x1F68F, 0x1F68F, prExtendedPictographic}, // E0.6 [1] (🚏) bus stop
+ {0x1F690, 0x1F690, prExtendedPictographic}, // E1.0 [1] (🚐) minibus
+ {0x1F691, 0x1F693, prExtendedPictographic}, // E0.6 [3] (🚑..🚓) ambulance..police car
+ {0x1F694, 0x1F694, prExtendedPictographic}, // E0.7 [1] (🚔) oncoming police car
+ {0x1F695, 0x1F695, prExtendedPictographic}, // E0.6 [1] (🚕) taxi
+ {0x1F696, 0x1F696, prExtendedPictographic}, // E1.0 [1] (🚖) oncoming taxi
+ {0x1F697, 0x1F697, prExtendedPictographic}, // E0.6 [1] (🚗) automobile
+ {0x1F698, 0x1F698, prExtendedPictographic}, // E0.7 [1] (🚘) oncoming automobile
+ {0x1F699, 0x1F69A, prExtendedPictographic}, // E0.6 [2] (🚙..🚚) sport utility vehicle..delivery truck
+ {0x1F69B, 0x1F6A1, prExtendedPictographic}, // E1.0 [7] (🚛..🚡) articulated lorry..aerial tramway
+ {0x1F6A2, 0x1F6A2, prExtendedPictographic}, // E0.6 [1] (🚢) ship
+ {0x1F6A3, 0x1F6A3, prExtendedPictographic}, // E1.0 [1] (🚣) person rowing boat
+ {0x1F6A4, 0x1F6A5, prExtendedPictographic}, // E0.6 [2] (🚤..🚥) speedboat..horizontal traffic light
+ {0x1F6A6, 0x1F6A6, prExtendedPictographic}, // E1.0 [1] (🚦) vertical traffic light
+ {0x1F6A7, 0x1F6AD, prExtendedPictographic}, // E0.6 [7] (🚧..🚭) construction..no smoking
+ {0x1F6AE, 0x1F6B1, prExtendedPictographic}, // E1.0 [4] (🚮..🚱) litter in bin sign..non-potable water
+ {0x1F6B2, 0x1F6B2, prExtendedPictographic}, // E0.6 [1] (🚲) bicycle
+ {0x1F6B3, 0x1F6B5, prExtendedPictographic}, // E1.0 [3] (🚳..🚵) no bicycles..person mountain biking
+ {0x1F6B6, 0x1F6B6, prExtendedPictographic}, // E0.6 [1] (🚶) person walking
+ {0x1F6B7, 0x1F6B8, prExtendedPictographic}, // E1.0 [2] (🚷..🚸) no pedestrians..children crossing
+ {0x1F6B9, 0x1F6BE, prExtendedPictographic}, // E0.6 [6] (🚹..🚾) men’s room..water closet
+ {0x1F6BF, 0x1F6BF, prExtendedPictographic}, // E1.0 [1] (🚿) shower
+ {0x1F6C0, 0x1F6C0, prExtendedPictographic}, // E0.6 [1] (🛀) person taking bath
+ {0x1F6C1, 0x1F6C5, prExtendedPictographic}, // E1.0 [5] (🛁..🛅) bathtub..left luggage
+ {0x1F6C6, 0x1F6CA, prExtendedPictographic}, // E0.0 [5] (🛆..🛊) TRIANGLE WITH ROUNDED CORNERS..GIRLS SYMBOL
+ {0x1F6CB, 0x1F6CB, prExtendedPictographic}, // E0.7 [1] (🛋️) couch and lamp
+ {0x1F6CC, 0x1F6CC, prExtendedPictographic}, // E1.0 [1] (🛌) person in bed
+ {0x1F6CD, 0x1F6CF, prExtendedPictographic}, // E0.7 [3] (🛍️..🛏️) shopping bags..bed
+ {0x1F6D0, 0x1F6D0, prExtendedPictographic}, // E1.0 [1] (🛐) place of worship
+ {0x1F6D1, 0x1F6D2, prExtendedPictographic}, // E3.0 [2] (🛑..🛒) stop sign..shopping cart
+ {0x1F6D3, 0x1F6D4, prExtendedPictographic}, // E0.0 [2] (🛓..🛔) STUPA..PAGODA
+ {0x1F6D5, 0x1F6D5, prExtendedPictographic}, // E12.0 [1] (🛕) hindu temple
+ {0x1F6D6, 0x1F6D7, prExtendedPictographic}, // E13.0 [2] (🛖..🛗) hut..elevator
+ {0x1F6D8, 0x1F6DC, prExtendedPictographic}, // E0.0 [5] (..🛜) ..
+ {0x1F6DD, 0x1F6DF, prExtendedPictographic}, // E14.0 [3] (🛝..🛟) playground slide..ring buoy
+ {0x1F6E0, 0x1F6E5, prExtendedPictographic}, // E0.7 [6] (🛠️..🛥️) hammer and wrench..motor boat
+ {0x1F6E6, 0x1F6E8, prExtendedPictographic}, // E0.0 [3] (🛦..🛨) UP-POINTING MILITARY AIRPLANE..UP-POINTING SMALL AIRPLANE
+ {0x1F6E9, 0x1F6E9, prExtendedPictographic}, // E0.7 [1] (🛩️) small airplane
+ {0x1F6EA, 0x1F6EA, prExtendedPictographic}, // E0.0 [1] (🛪) NORTHEAST-POINTING AIRPLANE
+ {0x1F6EB, 0x1F6EC, prExtendedPictographic}, // E1.0 [2] (🛫..🛬) airplane departure..airplane arrival
+ {0x1F6ED, 0x1F6EF, prExtendedPictographic}, // E0.0 [3] (..) ..
+ {0x1F6F0, 0x1F6F0, prExtendedPictographic}, // E0.7 [1] (🛰️) satellite
+ {0x1F6F1, 0x1F6F2, prExtendedPictographic}, // E0.0 [2] (🛱..🛲) ONCOMING FIRE ENGINE..DIESEL LOCOMOTIVE
+ {0x1F6F3, 0x1F6F3, prExtendedPictographic}, // E0.7 [1] (🛳️) passenger ship
+ {0x1F6F4, 0x1F6F6, prExtendedPictographic}, // E3.0 [3] (🛴..🛶) kick scooter..canoe
+ {0x1F6F7, 0x1F6F8, prExtendedPictographic}, // E5.0 [2] (🛷..🛸) sled..flying saucer
+ {0x1F6F9, 0x1F6F9, prExtendedPictographic}, // E11.0 [1] (🛹) skateboard
+ {0x1F6FA, 0x1F6FA, prExtendedPictographic}, // E12.0 [1] (🛺) auto rickshaw
+ {0x1F6FB, 0x1F6FC, prExtendedPictographic}, // E13.0 [2] (🛻..🛼) pickup truck..roller skate
+ {0x1F6FD, 0x1F6FF, prExtendedPictographic}, // E0.0 [3] (..) ..
+ {0x1F774, 0x1F77F, prExtendedPictographic}, // E0.0 [12] (🝴..🝿) ..
+ {0x1F7D5, 0x1F7DF, prExtendedPictographic}, // E0.0 [11] (🟕..) CIRCLED TRIANGLE..
+ {0x1F7E0, 0x1F7EB, prExtendedPictographic}, // E12.0 [12] (🟠..🟫) orange circle..brown square
+ {0x1F7EC, 0x1F7EF, prExtendedPictographic}, // E0.0 [4] (..) ..
+ {0x1F7F0, 0x1F7F0, prExtendedPictographic}, // E14.0 [1] (🟰) heavy equals sign
+ {0x1F7F1, 0x1F7FF, prExtendedPictographic}, // E0.0 [15] (..) ..
+ {0x1F80C, 0x1F80F, prExtendedPictographic}, // E0.0 [4] (..) ..
+ {0x1F848, 0x1F84F, prExtendedPictographic}, // E0.0 [8] (..) ..
+ {0x1F85A, 0x1F85F, prExtendedPictographic}, // E0.0 [6] (..) ..
+ {0x1F888, 0x1F88F, prExtendedPictographic}, // E0.0 [8] (..) ..
+ {0x1F8AE, 0x1F8FF, prExtendedPictographic}, // E0.0 [82] (..) ..
+ {0x1F90C, 0x1F90C, prExtendedPictographic}, // E13.0 [1] (🤌) pinched fingers
+ {0x1F90D, 0x1F90F, prExtendedPictographic}, // E12.0 [3] (🤍..🤏) white heart..pinching hand
+ {0x1F910, 0x1F918, prExtendedPictographic}, // E1.0 [9] (🤐..🤘) zipper-mouth face..sign of the horns
+ {0x1F919, 0x1F91E, prExtendedPictographic}, // E3.0 [6] (🤙..🤞) call me hand..crossed fingers
+ {0x1F91F, 0x1F91F, prExtendedPictographic}, // E5.0 [1] (🤟) love-you gesture
+ {0x1F920, 0x1F927, prExtendedPictographic}, // E3.0 [8] (🤠..🤧) cowboy hat face..sneezing face
+ {0x1F928, 0x1F92F, prExtendedPictographic}, // E5.0 [8] (🤨..🤯) face with raised eyebrow..exploding head
+ {0x1F930, 0x1F930, prExtendedPictographic}, // E3.0 [1] (🤰) pregnant woman
+ {0x1F931, 0x1F932, prExtendedPictographic}, // E5.0 [2] (🤱..🤲) breast-feeding..palms up together
+ {0x1F933, 0x1F93A, prExtendedPictographic}, // E3.0 [8] (🤳..🤺) selfie..person fencing
+ {0x1F93C, 0x1F93E, prExtendedPictographic}, // E3.0 [3] (🤼..🤾) people wrestling..person playing handball
+ {0x1F93F, 0x1F93F, prExtendedPictographic}, // E12.0 [1] (🤿) diving mask
+ {0x1F940, 0x1F945, prExtendedPictographic}, // E3.0 [6] (🥀..🥅) wilted flower..goal net
+ {0x1F947, 0x1F94B, prExtendedPictographic}, // E3.0 [5] (🥇..🥋) 1st place medal..martial arts uniform
+ {0x1F94C, 0x1F94C, prExtendedPictographic}, // E5.0 [1] (🥌) curling stone
+ {0x1F94D, 0x1F94F, prExtendedPictographic}, // E11.0 [3] (🥍..🥏) lacrosse..flying disc
+ {0x1F950, 0x1F95E, prExtendedPictographic}, // E3.0 [15] (🥐..🥞) croissant..pancakes
+ {0x1F95F, 0x1F96B, prExtendedPictographic}, // E5.0 [13] (🥟..🥫) dumpling..canned food
+ {0x1F96C, 0x1F970, prExtendedPictographic}, // E11.0 [5] (🥬..🥰) leafy green..smiling face with hearts
+ {0x1F971, 0x1F971, prExtendedPictographic}, // E12.0 [1] (🥱) yawning face
+ {0x1F972, 0x1F972, prExtendedPictographic}, // E13.0 [1] (🥲) smiling face with tear
+ {0x1F973, 0x1F976, prExtendedPictographic}, // E11.0 [4] (🥳..🥶) partying face..cold face
+ {0x1F977, 0x1F978, prExtendedPictographic}, // E13.0 [2] (🥷..🥸) ninja..disguised face
+ {0x1F979, 0x1F979, prExtendedPictographic}, // E14.0 [1] (🥹) face holding back tears
+ {0x1F97A, 0x1F97A, prExtendedPictographic}, // E11.0 [1] (🥺) pleading face
+ {0x1F97B, 0x1F97B, prExtendedPictographic}, // E12.0 [1] (🥻) sari
+ {0x1F97C, 0x1F97F, prExtendedPictographic}, // E11.0 [4] (🥼..🥿) lab coat..flat shoe
+ {0x1F980, 0x1F984, prExtendedPictographic}, // E1.0 [5] (🦀..🦄) crab..unicorn
+ {0x1F985, 0x1F991, prExtendedPictographic}, // E3.0 [13] (🦅..🦑) eagle..squid
+ {0x1F992, 0x1F997, prExtendedPictographic}, // E5.0 [6] (🦒..🦗) giraffe..cricket
+ {0x1F998, 0x1F9A2, prExtendedPictographic}, // E11.0 [11] (🦘..🦢) kangaroo..swan
+ {0x1F9A3, 0x1F9A4, prExtendedPictographic}, // E13.0 [2] (🦣..🦤) mammoth..dodo
+ {0x1F9A5, 0x1F9AA, prExtendedPictographic}, // E12.0 [6] (🦥..🦪) sloth..oyster
+ {0x1F9AB, 0x1F9AD, prExtendedPictographic}, // E13.0 [3] (🦫..🦭) beaver..seal
+ {0x1F9AE, 0x1F9AF, prExtendedPictographic}, // E12.0 [2] (🦮..🦯) guide dog..white cane
+ {0x1F9B0, 0x1F9B9, prExtendedPictographic}, // E11.0 [10] (🦰..🦹) red hair..supervillain
+ {0x1F9BA, 0x1F9BF, prExtendedPictographic}, // E12.0 [6] (🦺..🦿) safety vest..mechanical leg
+ {0x1F9C0, 0x1F9C0, prExtendedPictographic}, // E1.0 [1] (🧀) cheese wedge
+ {0x1F9C1, 0x1F9C2, prExtendedPictographic}, // E11.0 [2] (🧁..🧂) cupcake..salt
+ {0x1F9C3, 0x1F9CA, prExtendedPictographic}, // E12.0 [8] (🧃..🧊) beverage box..ice
+ {0x1F9CB, 0x1F9CB, prExtendedPictographic}, // E13.0 [1] (🧋) bubble tea
+ {0x1F9CC, 0x1F9CC, prExtendedPictographic}, // E14.0 [1] (🧌) troll
+ {0x1F9CD, 0x1F9CF, prExtendedPictographic}, // E12.0 [3] (🧍..🧏) person standing..deaf person
+ {0x1F9D0, 0x1F9E6, prExtendedPictographic}, // E5.0 [23] (🧐..🧦) face with monocle..socks
+ {0x1F9E7, 0x1F9FF, prExtendedPictographic}, // E11.0 [25] (🧧..🧿) red envelope..nazar amulet
+ {0x1FA00, 0x1FA6F, prExtendedPictographic}, // E0.0 [112] (🨀..) NEUTRAL CHESS KING..