Skip to content

Commit

Permalink
Feat: Refactoring LLM Plugin, update docs. (#165)
Browse files Browse the repository at this point in the history
Refactoring LLM Plugin, update docs.
  • Loading branch information
mariocandela authored Feb 16, 2025
1 parent 8703d1a commit 38297fa
Show file tree
Hide file tree
Showing 8 changed files with 82 additions and 58 deletions.
13 changes: 8 additions & 5 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -211,9 +211,9 @@ commands:
#### Example SSH Honeypot
###### Honeypot LLM Honeypots
###### LLM Honeypots
Example with OpenAI GPT-4:
Follow a SSH LLM Honeypot using OpenAI as provider LLM:
```yaml
apiVersion: "v1"
Expand All @@ -228,11 +228,12 @@ serverName: "ubuntu"
passwordRegex: "^(root|qwerty|Smoker666|123456|jenkins|minecraft|sinus|alex|postgres|Ly123456)$"
deadlineTimeoutSeconds: 60
plugin:
llmModel: "gpt4-o"
llmProvider: "openai"
llmModel: "gpt4-o" #Models https://platform.openai.com/docs/models
openAISecretKey: "sk-proj-123456"
```
Example with Ollama Llama3:
Examples with local Ollama instance using model codellama:7b:
```yaml
apiVersion: "v1"
Expand All @@ -247,7 +248,8 @@ serverName: "ubuntu"
passwordRegex: "^(root|qwerty|Smoker666|123456|jenkins|minecraft|sinus|alex|postgres|Ly123456)$"
deadlineTimeoutSeconds: 60
plugin:
llmModel: "llama3"
llmProvider: "ollama"
llmModel: "codellama:7b" #Models https://ollama.com/search
host: "http://example.com/api/chat" #default http://localhost:11434/api/chat
```
Example with custom prompt:
Expand All @@ -265,6 +267,7 @@ serverName: "ubuntu"
passwordRegex: "^(root|qwerty|Smoker666|123456|jenkins|minecraft|sinus|alex|postgres|Ly123456)$"
deadlineTimeoutSeconds: 60
plugin:
llmProvider: "openai"
llmModel: "gpt4-o"
openAISecretKey: "sk-proj-123456"
prompt: "You will act as an Ubuntu Linux terminal. The user will type commands, and you are to reply with what the terminal should show. Your responses must be contained within a single code block."
Expand Down
6 changes: 4 additions & 2 deletions configurations/services/ssh-2222.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,9 @@ commands:
plugin: "LLMHoneypot"
serverVersion: "OpenSSH"
serverName: "ubuntu"
passwordRegex: "^(root|qwerty|Smoker666|123456|jenkins|minecraft|sinus|alex|postgres|Ly123456)$"
passwordRegex: "^(root|qwerty|Smoker666|123456|jenkins|minecraft|sinus|alex|postgres|Ly123456|1234)$"
deadlineTimeoutSeconds: 6000
plugin:
llmModel: "llama3"
llmProvider: "openai"
llmModel: "gpt4-o"
openAISecretKey: "sk-proj-12345"
1 change: 1 addition & 0 deletions parser/configurations_parser.go
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,7 @@ type Plugin struct {
OpenAISecretKey string `yaml:"openAISecretKey"`
Host string `yaml:"host"`
LLMModel string `yaml:"llmModel"`
LLMProvider string `yaml:"llmProvider"`
Prompt string `yaml:"prompt"`
}

Expand Down
2 changes: 2 additions & 0 deletions parser/configurations_parser_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,7 @@ commands:
plugin:
openAISecretKey: "qwerty"
llmModel: "llama3"
llmProvider: "ollama"
host: "localhost:1563"
prompt: "hello world"
`)
Expand Down Expand Up @@ -135,6 +136,7 @@ func TestReadConfigurationsServicesValid(t *testing.T) {
assert.Equal(t, firstBeelzebubServiceConfiguration.Commands[0].Headers[0], "Content-Type: text/html")
assert.Equal(t, firstBeelzebubServiceConfiguration.Plugin.OpenAISecretKey, "qwerty")
assert.Equal(t, firstBeelzebubServiceConfiguration.Plugin.LLMModel, "llama3")
assert.Equal(t, firstBeelzebubServiceConfiguration.Plugin.LLMProvider, "ollama")
assert.Equal(t, firstBeelzebubServiceConfiguration.Plugin.Host, "localhost:1563")
assert.Equal(t, firstBeelzebubServiceConfiguration.Plugin.Prompt, "hello world")
assert.Equal(t, firstBeelzebubServiceConfiguration.TLSCertPath, "/tmp/cert.crt")
Expand Down
40 changes: 21 additions & 19 deletions plugins/llm-integration.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,13 +8,14 @@ import (
"github.com/mariocandela/beelzebub/v3/tracer"
log "github.com/sirupsen/logrus"
"regexp"
"strings"
)

const (
systemPromptVirtualizeLinuxTerminal = "You will act as an Ubuntu Linux terminal. The user will type commands, and you are to reply with what the terminal should show. Your responses must be contained within a single code block. Do not provide note. Do not provide explanations or type commands unless explicitly instructed by the user. Your entire response/output is going to consist of a simple text with \n for new line, and you will NOT wrap it within string md markers"
systemPromptVirtualizeHTTPServer = "You will act as an unsecure HTTP Server with multiple vulnerability like aws and git credentials stored into root http directory. The user will send HTTP requests, and you are to reply with what the server should show. Do not provide explanations or type commands unless explicitly instructed by the user."
LLMPluginName = "LLMHoneypot"
openAIGPTEndpoint = "https://api.openai.com/v1/chat/completions"
openAIEndpoint = "https://api.openai.com/v1/chat/completions"
ollamaEndpoint = "http://localhost:11434/api/chat"
)

Expand All @@ -23,7 +24,8 @@ type LLMHoneypot struct {
OpenAIKey string
client *resty.Client
Protocol tracer.Protocol
Model LLMModel
Provider LLMProvider
Model string
Host string
CustomPrompt string
}
Expand Down Expand Up @@ -71,21 +73,21 @@ func (role Role) String() string {
return [...]string{"system", "user", "assistant"}[role]
}

type LLMModel int
type LLMProvider int

const (
LLAMA3 LLMModel = iota
GPT4O
Ollama LLMProvider = iota
OpenAI
)

func FromStringToLLMModel(llmModel string) (LLMModel, error) {
switch llmModel {
case "llama3":
return LLAMA3, nil
case "gpt4-o":
return GPT4O, nil
func FromStringToLLMProvider(llmProvider string) (LLMProvider, error) {
switch strings.ToLower(llmProvider) {
case "ollama":
return Ollama, nil
case "openai":
return OpenAI, nil
default:
return -1, fmt.Errorf("model %s not found", llmModel)
return -1, fmt.Errorf("provider %s not found, valid providers: ollama, openai", llmProvider)
}
}

Expand Down Expand Up @@ -153,7 +155,7 @@ func (llmHoneypot *LLMHoneypot) openAICaller(messages []Message) (string, error)
var err error

requestJson, err := json.Marshal(Request{
Model: "gpt-4o",
Model: llmHoneypot.Model,
Messages: messages,
Stream: false,
})
Expand All @@ -166,7 +168,7 @@ func (llmHoneypot *LLMHoneypot) openAICaller(messages []Message) (string, error)
}

if llmHoneypot.Host == "" {
llmHoneypot.Host = openAIGPTEndpoint
llmHoneypot.Host = openAIEndpoint
}

log.Debug(string(requestJson))
Expand All @@ -192,7 +194,7 @@ func (llmHoneypot *LLMHoneypot) ollamaCaller(messages []Message) (string, error)
var err error

requestJson, err := json.Marshal(Request{
Model: "llama3",
Model: llmHoneypot.Model,
Messages: messages,
Stream: false,
})
Expand Down Expand Up @@ -229,13 +231,13 @@ func (llmHoneypot *LLMHoneypot) ExecuteModel(command string) (string, error) {
return "", err
}

switch llmHoneypot.Model {
case LLAMA3:
switch llmHoneypot.Provider {
case Ollama:
return llmHoneypot.ollamaCaller(prompt)
case GPT4O:
case OpenAI:
return llmHoneypot.openAICaller(prompt)
default:
return "", errors.New("no model selected")
return "", fmt.Errorf("provider %d not found, valid providers: ollama, openai", llmHoneypot.Provider)
}
}

Expand Down
54 changes: 32 additions & 22 deletions plugins/llm-integration_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,8 @@ func TestBuildExecuteModelFailValidation(t *testing.T) {
Histories: make([]Message, 0),
OpenAIKey: "",
Protocol: tracer.SSH,
Model: GPT4O,
Model: "gpt4-o",
Provider: OpenAI,
}

openAIGPTVirtualTerminal := InitLLMHoneypot(llmHoneypot)
Expand All @@ -101,7 +102,7 @@ func TestBuildExecuteModelWithCustomPrompt(t *testing.T) {
defer httpmock.DeactivateAndReset()

// Given
httpmock.RegisterMatcherResponder("POST", openAIGPTEndpoint,
httpmock.RegisterMatcherResponder("POST", openAIEndpoint,
httpmock.BodyContainsString("hello world"),
func(req *http.Request) (*http.Response, error) {
resp, err := httpmock.NewJsonResponse(200, &Response{
Expand All @@ -125,7 +126,8 @@ func TestBuildExecuteModelWithCustomPrompt(t *testing.T) {
Histories: make([]Message, 0),
OpenAIKey: "sdjdnklfjndslkjanfk",
Protocol: tracer.HTTP,
Model: GPT4O,
Model: "gpt4-o",
Provider: OpenAI,
CustomPrompt: "hello world",
}

Expand All @@ -146,7 +148,8 @@ func TestBuildExecuteModelFailValidationStrategyType(t *testing.T) {
Histories: make([]Message, 0),
OpenAIKey: "",
Protocol: tracer.TCP,
Model: GPT4O,
Model: "gpt4-o",
Provider: OpenAI,
}

openAIGPTVirtualTerminal := InitLLMHoneypot(llmHoneypot)
Expand All @@ -161,7 +164,8 @@ func TestBuildExecuteModelFailValidationModelType(t *testing.T) {
llmHoneypot := LLMHoneypot{
Histories: make([]Message, 0),
Protocol: tracer.SSH,
Model: 5,
Model: "llama3",
Provider: 5,
}

openAIGPTVirtualTerminal := InitLLMHoneypot(llmHoneypot)
Expand All @@ -179,7 +183,7 @@ func TestBuildExecuteModelSSHWithResultsOpenAI(t *testing.T) {
defer httpmock.DeactivateAndReset()

// Given
httpmock.RegisterResponder("POST", openAIGPTEndpoint,
httpmock.RegisterResponder("POST", openAIEndpoint,
func(req *http.Request) (*http.Response, error) {
resp, err := httpmock.NewJsonResponse(200, &Response{
Choices: []Choice{
Expand All @@ -202,7 +206,8 @@ func TestBuildExecuteModelSSHWithResultsOpenAI(t *testing.T) {
Histories: make([]Message, 0),
OpenAIKey: "sdjdnklfjndslkjanfk",
Protocol: tracer.SSH,
Model: GPT4O,
Model: "gpt4-o",
Provider: OpenAI,
}

openAIGPTVirtualTerminal := InitLLMHoneypot(llmHoneypot)
Expand Down Expand Up @@ -240,7 +245,8 @@ func TestBuildExecuteModelSSHWithResultsLLama(t *testing.T) {
llmHoneypot := LLMHoneypot{
Histories: make([]Message, 0),
Protocol: tracer.SSH,
Model: LLAMA3,
Model: "llama3",
Provider: Ollama,
}

openAIGPTVirtualTerminal := InitLLMHoneypot(llmHoneypot)
Expand All @@ -260,7 +266,7 @@ func TestBuildExecuteModelSSHWithoutResults(t *testing.T) {
defer httpmock.DeactivateAndReset()

// Given
httpmock.RegisterResponder("POST", openAIGPTEndpoint,
httpmock.RegisterResponder("POST", openAIEndpoint,
func(req *http.Request) (*http.Response, error) {
resp, err := httpmock.NewJsonResponse(200, &Response{
Choices: []Choice{},
Expand All @@ -276,7 +282,8 @@ func TestBuildExecuteModelSSHWithoutResults(t *testing.T) {
Histories: make([]Message, 0),
OpenAIKey: "sdjdnklfjndslkjanfk",
Protocol: tracer.SSH,
Model: GPT4O,
Model: "gpt4-o",
Provider: OpenAI,
}

openAIGPTVirtualTerminal := InitLLMHoneypot(llmHoneypot)
Expand All @@ -295,7 +302,7 @@ func TestBuildExecuteModelHTTPWithResults(t *testing.T) {
defer httpmock.DeactivateAndReset()

// Given
httpmock.RegisterResponder("POST", openAIGPTEndpoint,
httpmock.RegisterResponder("POST", openAIEndpoint,
func(req *http.Request) (*http.Response, error) {
resp, err := httpmock.NewJsonResponse(200, &Response{
Choices: []Choice{
Expand All @@ -318,7 +325,8 @@ func TestBuildExecuteModelHTTPWithResults(t *testing.T) {
Histories: make([]Message, 0),
OpenAIKey: "sdjdnklfjndslkjanfk",
Protocol: tracer.HTTP,
Model: GPT4O,
Model: "gpt4-o",
Provider: OpenAI,
}

openAIGPTVirtualTerminal := InitLLMHoneypot(llmHoneypot)
Expand All @@ -338,7 +346,7 @@ func TestBuildExecuteModelHTTPWithoutResults(t *testing.T) {
defer httpmock.DeactivateAndReset()

// Given
httpmock.RegisterResponder("POST", openAIGPTEndpoint,
httpmock.RegisterResponder("POST", openAIEndpoint,
func(req *http.Request) (*http.Response, error) {
resp, err := httpmock.NewJsonResponse(200, &Response{
Choices: []Choice{},
Expand All @@ -354,7 +362,8 @@ func TestBuildExecuteModelHTTPWithoutResults(t *testing.T) {
Histories: make([]Message, 0),
OpenAIKey: "sdjdnklfjndslkjanfk",
Protocol: tracer.HTTP,
Model: GPT4O,
Model: "gpt4-o",
Provider: OpenAI,
}

openAIGPTVirtualTerminal := InitLLMHoneypot(llmHoneypot)
Expand All @@ -368,16 +377,16 @@ func TestBuildExecuteModelHTTPWithoutResults(t *testing.T) {
}

func TestFromString(t *testing.T) {
model, err := FromStringToLLMModel("llama3")
model, err := FromStringToLLMProvider("openai")
assert.Nil(t, err)
assert.Equal(t, LLAMA3, model)
assert.Equal(t, OpenAI, model)

model, err = FromStringToLLMModel("gpt4-o")
model, err = FromStringToLLMProvider("ollama")
assert.Nil(t, err)
assert.Equal(t, GPT4O, model)
assert.Equal(t, Ollama, model)

model, err = FromStringToLLMModel("beelzebub-model")
assert.Errorf(t, err, "model beelzebub-model not found")
model, err = FromStringToLLMProvider("beelzebub-model")
assert.Errorf(t, err, "provider beelzebub-model not found")
}

func TestBuildExecuteModelSSHWithoutPlaintextSection(t *testing.T) {
Expand All @@ -404,7 +413,7 @@ func TestBuildExecuteModelSSHWithoutPlaintextSection(t *testing.T) {
llmHoneypot := LLMHoneypot{
Histories: make([]Message, 0),
Protocol: tracer.SSH,
Model: LLAMA3,
Model: "llama3",
}

openAIGPTVirtualTerminal := InitLLMHoneypot(llmHoneypot)
Expand Down Expand Up @@ -442,7 +451,8 @@ func TestBuildExecuteModelSSHWithoutQuotesSection(t *testing.T) {
llmHoneypot := LLMHoneypot{
Histories: make([]Message, 0),
Protocol: tracer.SSH,
Model: LLAMA3,
Model: "llama3",
Provider: Ollama,
}

openAIGPTVirtualTerminal := InitLLMHoneypot(llmHoneypot)
Expand Down
7 changes: 4 additions & 3 deletions protocols/strategies/http.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,10 +37,10 @@ func (httpStrategy HTTPStrategy) Init(beelzebubServiceConfiguration parser.Beelz

if command.Plugin == plugins.LLMPluginName {

llmModel, err := plugins.FromStringToLLMModel(beelzebubServiceConfiguration.Plugin.LLMModel)
llmProvider, err := plugins.FromStringToLLMProvider(beelzebubServiceConfiguration.Plugin.LLMProvider)

if err != nil {
log.Errorf("Error fromString: %s", err.Error())
log.Errorf("Error: %s", err.Error())
responseHTTPBody = "404 Not Found!"
}

Expand All @@ -49,7 +49,8 @@ func (httpStrategy HTTPStrategy) Init(beelzebubServiceConfiguration parser.Beelz
OpenAIKey: beelzebubServiceConfiguration.Plugin.OpenAISecretKey,
Protocol: tracer.HTTP,
Host: beelzebubServiceConfiguration.Plugin.Host,
Model: llmModel,
Model: beelzebubServiceConfiguration.Plugin.LLMModel,
Provider: llmProvider,
CustomPrompt: beelzebubServiceConfiguration.Plugin.Prompt,
}

Expand Down
Loading

0 comments on commit 38297fa

Please sign in to comment.