Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add cleanup, typing, update to py3.11, makefile #21

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
127 changes: 126 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
@@ -1,2 +1,127 @@
*.pyc
*.swp

# Editors
.vscode/
.idea/

# Vagrant
.vagrant/

# Mac/OSX
.DS_Store

# Windows
Thumbs.db

# Source for the following rules: https://raw.githubusercontent.com/github/gitignore/master/Python.gitignore
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class

# C extensions
*.so

# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST

# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec

# Installer logs
pip-log.txt
pip-delete-this-directory.txt

# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
.hypothesis/
.pytest_cache/

# Translations
*.mo
*.pot

# Django stuff:
*.log
local_settings.py
db.sqlite3

# Flask stuff:
instance/
.webassets-cache

# Scrapy stuff:
.scrapy

# Sphinx documentation
docs/_build/

# PyBuilder
target/

# Jupyter Notebook
.ipynb_checkpoints

# IPython
profile_default/
ipython_config.py

# pyenv
.python-version

# celery beat schedule file
celerybeat-schedule

# SageMath parsed files
*.sage.py

# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/

# Spyder project settings
.spyderproject
.spyproject

# Rope project settings
.ropeproject

# mkdocs documentation
/site

# mypy
.mypy_cache/
.dmypy.json
dmypy.json
21 changes: 21 additions & 0 deletions .ruff.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
# Global linting and formatting config for ruff
select = [
"I", # isort
"N", # pep8-naming
"F", # pyflakes - flake8
"E", # pycodesytle - flake8
"C90", # mccabe - flake8
"D", # pydocstyle - flake8-docstring
]
ignore = ["D104", "D105", "D107", "D206", "D401", "E501"]
line-length = 130

[lint.isort]
combine-as-imports = true
known-first-party = ["behave2cucumber"]

[lint.per-file-ignores]
"tests/**/test_*.py" = ["D100", "D101", "D102", "D103"]

[lint.pydocstyle]
convention = "pep257"
13 changes: 13 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
.PHONY: mypy
mypy: # Run mypy analysis.
mypy --config-file mypy.ini

.PHONY: unit-test
unit-test: # Run unit tests.
pytest ./tests -vvv

.PHONY: help
help: # Show help for each of the Makefile recipes.
@grep -E '^[a-zA-Z0-9 -]+:.*#' Makefile | sort | while read -r l; do printf "\033[1;32m$$(echo $$l | cut -f 1 -d':')\033[00m:$$(echo $$l | cut -f 2- -d'#')\n"; done

.DEFAULT_GOAL := help
79 changes: 42 additions & 37 deletions behave2cucumber/__init__.py
Original file line number Diff line number Diff line change
@@ -1,53 +1,59 @@
'''
"""
Copyright (c) 2016 Behalf Inc.

Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files
(the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify,
merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:

The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""


def convert(json_file, remove_background=False, duration_format=False, deduplicate=False):
def convert(json_file: dict, remove_background: bool = False, duration_format: bool = False, deduplicate: bool = False) -> dict:
# json_nodes are the scopes available in behave/cucumber json: Feature -> elements(Scnerios) -> Steps
json_nodes = ['feature', 'elements', 'steps']
json_nodes = ["feature", "elements", "steps"]
# These fields doesn't exist in cucumber report, there-fore when converting from behave, we need to delete these
# fields.
fields_not_exist_in_cucumber_json = ['status', 'step_type']
fields_not_exist_in_cucumber_json = ["status", "step_type"]

def format_level(tree, index=0, id_counter=0):
def format_level(tree: dict, index: int = 0, id_counter: int = 0) -> dict:
for item in tree:
# Location in behave json translates to uri and line in cucumber json
uri, line_number = item.pop("location").split(":")
item["line"] = int(line_number)
for field in fields_not_exist_in_cucumber_json:
if field in item:
item.pop(field)
if 'tags' in item:
if "tags" in item:
# Tags in behave are just a list of tag names, in cucumber every tag has a name and a line number.
item['tags'] = [{"name": tag if tag.startswith('@') else '@' + tag, "line": item["line"] - 1} for tag in
item['tags']]
if json_nodes[index] == 'steps':
if 'result' in item:
item["tags"] = [
{"name": tag if tag.startswith("@") else "@" + tag, "line": item["line"] - 1} for tag in item["tags"]
]
if json_nodes[index] == "steps":
if "result" in item:
# Because several problems with long error messages the message sub-stringed to maximum 2000 chars.
if 'error_message' in item["result"]:
error_msg = item["result"].pop('error_message')
item["result"]["error_message"] = str(
(str(error_msg).replace("\"", "").replace("\\'", ""))[:2000])
if 'duration' in item["result"] and duration_format:
if "error_message" in item["result"]:
error_msg = item["result"].pop("error_message")
item["result"]["error_message"] = str((str(error_msg).replace('"', "").replace("\\'", ""))[:2000])
if "duration" in item["result"] and duration_format:
item["result"]["duration"] = int(item["result"]["duration"] * 1000000000)
else:
# In behave, skipped tests doesn't have result object in their json, there-fore when we generating
# Cucumber report for every skipped test we need to generated a new result with status skipped
item["result"] = {"status": "skipped", "duration": 0}
if 'table' in item:
item['rows'] = []
if "table" in item:
item["rows"] = []
t_line = 1
item['rows'].append({"cells": item['table']['headings'], "line": item["line"] + t_line})
for table_row in item['table']['rows']:
item["rows"].append({"cells": item["table"]["headings"], "line": item["line"] + t_line})
for table_row in item["table"]["rows"]:
t_line += 1
item['rows'].append({"cells": table_row, "line": item["line"] + t_line})
item["rows"].append({"cells": table_row, "line": item["line"] + t_line})
else:
# uri is the name of the feature file the current item located
item["uri"] = uri
Expand All @@ -56,30 +62,29 @@ def format_level(tree, index=0, id_counter=0):
id_counter += 1
# If the scope is not "steps" proceed with the recursion
if index != 2 and json_nodes[index + 1] in item:
item[json_nodes[index + 1]] = format_level(
item[json_nodes[index + 1]], index + 1, id_counter=id_counter
)
item[json_nodes[index + 1]] = format_level(item[json_nodes[index + 1]], index + 1, id_counter=id_counter)
return tree

# Option to remove background element because behave pushes it steps to all scenarios already
if remove_background:
for feature in json_file:
if feature['elements'][0]['type'] == 'background':
feature['elements'].pop(0)
if feature["elements"][0]["type"] == "background":
feature["elements"].pop(0)

if deduplicate:
def check_dupe(current_feature, current_scenario, previous_scenario):
if "autoretry" not in current_feature['tags'] and "autoretry" not in current_scenario['tags']:

def check_dupe(current_feature: dict, current_scenario: dict, previous_scenario: dict) -> bool:
if "autoretry" not in current_feature["tags"] and "autoretry" not in current_scenario["tags"]:
return False
if previous_scenario['keyword'] != current_scenario['keyword']:
if previous_scenario["keyword"] != current_scenario["keyword"]:
return False
elif previous_scenario['location'] != current_scenario['location']:
elif previous_scenario["location"] != current_scenario["location"]:
return False
elif previous_scenario['name'] != current_scenario['name']:
elif previous_scenario["name"] != current_scenario["name"]:
return False
elif previous_scenario['tags'] != current_scenario['tags']:
elif previous_scenario["tags"] != current_scenario["tags"]:
return False
elif previous_scenario['type'] != current_scenario['type']:
elif previous_scenario["type"] != current_scenario["type"]:
return False
else:
return True
Expand All @@ -89,7 +94,7 @@ def check_dupe(current_feature, current_scenario, previous_scenario):
scenarios = []

# For each scenario in the feature
for scenario in feature['elements']:
for scenario in feature["elements"]:
# Append the scenario to the working list
scenarios.append(scenario)

Expand All @@ -105,7 +110,7 @@ def check_dupe(current_feature, current_scenario, previous_scenario):
pass

# Replace the existing list with the working list
feature['elements'] = scenarios
feature["elements"] = scenarios

# Begin the recursion
return format_level(json_file)
Loading