Skip to content

Commit eb98ac5

Browse files
MaineCDrost-Frommzkoppertjmeridth
authored
First stab at figuring out the number of very active mentors (#206)
* First stab at figuring out the number of very active mentors in a project. * Fix format and linter errors. * Add flake to requirements. * Adds call to mentor counter and output in markdown. This adds the call to mentor counter and displays the results in markdown including first tests for this functionality. * Make mentor counting configurable. This adds two configuration options: One to enable mentor counting and one for configuring how many comments a user needs to leave in discussions, PRs. and issues to be counted as an active mentor. * Adds mentor counting to json output and adds missing config. This adds mentor counting output to json format. In addition this change makes max number of comments to evaluate configurable as well as the cutoff for heavily involved mentors. * Fix merge conflicts. * Fix linting errors. * fix: linting fixes Signed-off-by: Zack Koppert <[email protected]> * 8 is reasonable number of attrs Signed-off-by: Zack Koppert <[email protected]> * Update test_most_active_mentors.py Co-authored-by: Jason Meridth <[email protected]> * Update config.py Co-authored-by: Jason Meridth <[email protected]> * Update config.py Remove merge residual * Update requirements.txt Remove lib only needed for testing. * Update issue_metrics.py Co-authored-by: Jason Meridth <[email protected]> * Update config.py * Update config.py set type of `enable_mentor_count` to `bool` * Update test_config.py change tests to handle boolean change of enable_mentor_count --------- Signed-off-by: Zack Koppert <[email protected]> Co-authored-by: Drost-Fromm <[email protected]> Co-authored-by: Zack Koppert <[email protected]> Co-authored-by: Jason Meridth <[email protected]> Co-authored-by: Jason Meridth <[email protected]>
1 parent 0a896e9 commit eb98ac5

12 files changed

+370
-8
lines changed

README.md

+4
Original file line numberDiff line numberDiff line change
@@ -145,6 +145,10 @@ This action can be configured to authenticate with GitHub App Installation or Pe
145145
| `HIDE_TIME_TO_CLOSE` | False | False | If set to `true`, the time to close will not be displayed in the generated Markdown file. |
146146
| `HIDE_TIME_TO_FIRST_RESPONSE` | False | False | If set to `true`, the time to first response will not be displayed in the generated Markdown file. |
147147
| `IGNORE_USERS` | False | False | A comma separated list of users to ignore when calculating metrics. (ie. `IGNORE_USERS: 'user1,user2'`). To ignore bots, append `[bot]` to the user (ie. `IGNORE_USERS: 'github-actions[bot]'`) |
148+
| `ENABLE_MENTOR_COUNT` | False | False | If set to 'TRUE' count number of comments users left on discussions, issues and PRs and display number of active mentors |
149+
| `MIN_MENTOR_COMMENTS` | False | 10 | Minimum number of comments to count as a mentor |
150+
| `MAX_COMMENTS_EVAL` | False | 20 | Maximum number of comments per thread to evaluate for mentor stats |
151+
| `HEAVILY_INVOLVED_CUTOFF` | False | 3 | Cutoff after which a mentor's comments in one issue are no longer counted against their total score |
148152
| `LABELS_TO_MEASURE` | False | `""` | A comma separated list of labels to measure how much time the label is applied. If not provided, no labels durations will be measured. Not compatible with discussions at this time. |
149153
| `SEARCH_QUERY` | True | `""` | The query by which you can filter issues/PRs which must contain a `repo:`, `org:`, `owner:`, or a `user:` entry. For discussions, include `type:discussions` in the query. |
150154

classes.py

+5
Original file line numberDiff line numberDiff line change
@@ -19,9 +19,12 @@ class IssueWithMetrics:
1919
time_to_answer (timedelta, optional): The time it took to answer the
2020
discussions in the issue.
2121
label_metrics (dict, optional): A dictionary containing the label metrics
22+
mentor_activity (dict, optional): A dictionary containing active mentors
2223
2324
"""
2425

26+
# pylint: disable=too-many-instance-attributes
27+
2528
def __init__(
2629
self,
2730
title,
@@ -31,6 +34,7 @@ def __init__(
3134
time_to_close=None,
3235
time_to_answer=None,
3336
labels_metrics=None,
37+
mentor_activity=None,
3438
):
3539
self.title = title
3640
self.html_url = html_url
@@ -39,3 +43,4 @@ def __init__(
3943
self.time_to_close = time_to_close
4044
self.time_to_answer = time_to_answer
4145
self.label_metrics = labels_metrics
46+
self.mentor_activity = mentor_activity

config.py

+24
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,10 @@ class EnvVars:
3434
hide_time_to_first_response (bool): If true, the time to first response metric is hidden in the output
3535
ignore_users (List[str]): List of usernames to ignore when calculating metrics
3636
labels_to_measure (List[str]): List of labels to measure how much time the lable is applied
37+
enable_mentor_count (bool): If set to TRUE, compute number of mentors
38+
min_mentor_comments (str): If set, defines the minimum number of comments for mentors
39+
max_comments_eval (str): If set, defines the maximum number of comments to look at for mentor evaluation
40+
heavily_involved_cutoff (str): If set, defines the cutoff after which heavily involved commentors in
3741
search_query (str): Search query used to filter issues/prs/discussions on GitHub
3842
"""
3943

@@ -51,6 +55,10 @@ def __init__(
5155
hide_time_to_first_response: bool,
5256
ignore_user: List[str],
5357
labels_to_measure: List[str],
58+
enable_mentor_count: bool,
59+
min_mentor_comments: str,
60+
max_comments_eval: str,
61+
heavily_involved_cutoff: str,
5462
search_query: str,
5563
):
5664
self.gh_app_id = gh_app_id
@@ -65,6 +73,10 @@ def __init__(
6573
self.hide_time_to_answer = hide_time_to_answer
6674
self.hide_time_to_close = hide_time_to_close
6775
self.hide_time_to_first_response = hide_time_to_first_response
76+
self.enable_mentor_count = enable_mentor_count
77+
self.min_mentor_comments = min_mentor_comments
78+
self.max_comments_eval = max_comments_eval
79+
self.heavily_involved_cutoff = heavily_involved_cutoff
6880
self.search_query = search_query
6981

7082
def __repr__(self):
@@ -82,6 +94,10 @@ def __repr__(self):
8294
f"{self.hide_time_to_first_response},"
8395
f"{self.ignore_users},"
8496
f"{self.labels_to_measure},"
97+
f"{self.enable_mentor_count},"
98+
f"{self.min_mentor_comments},"
99+
f"{self.max_comments_eval},"
100+
f"{self.heavily_involved_cutoff},"
85101
f"{self.search_query})"
86102
)
87103

@@ -166,6 +182,10 @@ def get_env_vars(test: bool = False) -> EnvVars:
166182
hide_time_to_answer = get_bool_env_var("HIDE_TIME_TO_ANSWER")
167183
hide_time_to_close = get_bool_env_var("HIDE_TIME_TO_CLOSE")
168184
hide_time_to_first_response = get_bool_env_var("HIDE_TIME_TO_FIRST_RESPONSE")
185+
enable_mentor_count = get_bool_env_var("ENABLE_MENTOR_COUNT")
186+
min_mentor_comments = os.getenv("MIN_MENTOR_COMMENTS", "10")
187+
max_comments_eval = os.getenv("MAX_COMMENTS_EVAL", "20")
188+
heavily_involved_cutoff = os.getenv("HEAVILY_INVOLVED_CUTOFF", "3")
169189

170190
return EnvVars(
171191
gh_app_id,
@@ -180,5 +200,9 @@ def get_env_vars(test: bool = False) -> EnvVars:
180200
hide_time_to_first_response,
181201
ignore_users_list,
182202
labels_to_measure_list,
203+
enable_mentor_count,
204+
min_mentor_comments,
205+
max_comments_eval,
206+
heavily_involved_cutoff,
183207
search_query,
184208
)

issue_metrics.py

+36-4
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,7 @@
3131
from json_writer import write_to_json
3232
from labels import get_label_metrics, get_stats_time_in_labels
3333
from markdown_writer import write_to_markdown
34+
from most_active_mentors import count_comments_per_user, get_mentor_count
3435
from time_to_answer import get_stats_time_to_answer, measure_time_to_answer
3536
from time_to_close import get_stats_time_to_close, measure_time_to_close
3637
from time_to_first_response import (
@@ -40,8 +41,6 @@
4041
from time_to_merge import measure_time_to_merge
4142
from time_to_ready_for_review import get_time_to_ready_for_review
4243

43-
GITHUB_BASE_URL = "https://github.com"
44-
4544

4645
def search_issues(
4746
search_query: str, github_connection: github3.GitHub
@@ -126,6 +125,8 @@ def get_per_issue_metrics(
126125
discussions: bool = False,
127126
labels: Union[List[str], None] = None,
128127
ignore_users: Union[List[str], None] = None,
128+
max_comments_to_eval: int = 20,
129+
heavily_involved: int = 3,
129130
) -> tuple[List, int, int]:
130131
"""
131132
Calculate the metrics for each issue/pr/discussion in a list provided.
@@ -158,10 +159,20 @@ def get_per_issue_metrics(
158159
None,
159160
None,
160161
None,
162+
None,
161163
)
162164
issue_with_metrics.time_to_first_response = measure_time_to_first_response(
163165
None, issue, ignore_users
164166
)
167+
issue_with_metrics.mentor_activity = count_comments_per_user(
168+
None,
169+
issue,
170+
ignore_users,
171+
None,
172+
None,
173+
max_comments_to_eval,
174+
heavily_involved,
175+
)
165176
issue_with_metrics.time_to_answer = measure_time_to_answer(issue)
166177
if issue["closedAt"]:
167178
issue_with_metrics.time_to_close = measure_time_to_close(None, issue)
@@ -188,6 +199,15 @@ def get_per_issue_metrics(
188199
issue_with_metrics.time_to_first_response = measure_time_to_first_response(
189200
issue, None, pull_request, ready_for_review_at, ignore_users
190201
)
202+
issue_with_metrics.mentor_activity = count_comments_per_user(
203+
issue,
204+
None,
205+
pull_request,
206+
ready_for_review_at,
207+
ignore_users,
208+
max_comments_to_eval,
209+
heavily_involved,
210+
)
191211
if labels:
192212
issue_with_metrics.label_metrics = get_label_metrics(issue, labels)
193213
if issue.state == "closed": # type: ignore
@@ -259,6 +279,10 @@ def main():
259279
token,
260280
env_vars.ghe,
261281
)
282+
enable_mentor_count = env_vars.enable_mentor_count
283+
min_mentor_count = int(env_vars.min_mentor_comments)
284+
max_comments_eval = int(env_vars.max_comments_eval)
285+
heavily_involved_cutoff = int(env_vars.heavily_involved_cutoff)
262286

263287
# Get the repository owner and name from the search query
264288
owner = get_owner(search_query)
@@ -283,13 +307,13 @@ def main():
283307
issues = get_discussions(token, search_query)
284308
if len(issues) <= 0:
285309
print("No discussions found")
286-
write_to_markdown(None, None, None, None, None, None, None)
310+
write_to_markdown(None, None, None, None, None, None, None, None)
287311
return
288312
else:
289313
issues = search_issues(search_query, github_connection)
290314
if len(issues) <= 0:
291315
print("No issues found")
292-
write_to_markdown(None, None, None, None, None, None, None)
316+
write_to_markdown(None, None, None, None, None, None, None, None)
293317
return
294318

295319
# Get all the metrics
@@ -298,6 +322,8 @@ def main():
298322
discussions="type:discussions" in search_query,
299323
labels=labels,
300324
ignore_users=ignore_users,
325+
max_comments_to_eval=max_comments_eval,
326+
heavily_involved=heavily_involved_cutoff,
301327
)
302328

303329
stats_time_to_first_response = get_stats_time_to_first_response(issues_with_metrics)
@@ -307,6 +333,10 @@ def main():
307333

308334
stats_time_to_answer = get_stats_time_to_answer(issues_with_metrics)
309335

336+
num_mentor_count = 0
337+
if enable_mentor_count:
338+
num_mentor_count = get_mentor_count(issues_with_metrics, min_mentor_count)
339+
310340
# Get stats describing the time in label for each label and store it in a dictionary
311341
# where the key is the label and the value is the average time
312342
stats_time_in_labels = get_stats_time_in_labels(issues_with_metrics, labels)
@@ -320,6 +350,7 @@ def main():
320350
stats_time_in_labels,
321351
num_issues_open,
322352
num_issues_closed,
353+
num_mentor_count,
323354
search_query,
324355
)
325356
write_to_markdown(
@@ -330,6 +361,7 @@ def main():
330361
stats_time_in_labels,
331362
num_issues_open,
332363
num_issues_closed,
364+
num_mentor_count,
333365
labels,
334366
search_query,
335367
)

json_writer.py

+3
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@ def write_to_json(
3030
stats_time_in_labels: Union[dict[str, dict[str, timedelta]], None],
3131
num_issues_opened: Union[int, None],
3232
num_issues_closed: Union[int, None],
33+
num_mentor_count: Union[int, None],
3334
search_query: str,
3435
) -> str:
3536
"""
@@ -42,6 +43,7 @@ def write_to_json(
4243
"average_time_to_answer": "1 day, 0:00:00",
4344
"num_items_opened": 2,
4445
"num_items_closed": 1,
46+
"num_mentor_count": 5,
4547
"total_item_count": 2,
4648
"issues": [
4749
{
@@ -129,6 +131,7 @@ def write_to_json(
129131
"90_percentile_time_in_labels": p90_time_in_labels,
130132
"num_items_opened": num_issues_opened,
131133
"num_items_closed": num_issues_closed,
134+
"num_mentor_count": num_mentor_count,
132135
"total_item_count": len(issues_with_metrics),
133136
}
134137

markdown_writer.py

+7-1
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@
1313
average_time_to_answer: timedelta,
1414
num_issues_opened: int,
1515
num_issues_closed: int,
16+
num_mentor_count: int,
1617
file: file object = None
1718
) -> None:
1819
Write the issues with metrics to a markdown file.
@@ -79,6 +80,7 @@ def write_to_markdown(
7980
average_time_in_labels: Union[dict, None],
8081
num_issues_opened: Union[int, None],
8182
num_issues_closed: Union[int, None],
83+
num_mentor_count: Union[int, None],
8284
labels=None,
8385
search_query=None,
8486
hide_label_metrics=False,
@@ -95,7 +97,8 @@ def write_to_markdown(
9597
file (file object, optional): The file object to write to. If not provided,
9698
a file named "issue_metrics.md" will be created.
9799
num_issues_opened (int): The Number of items that remain opened.
98-
num_issues_closed (int): The number of issues that were closed.
100+
num_issues_closed (int): The number of issues that were closedi.
101+
num_mentor_count (int): The number of very active commentors.
99102
labels (List[str]): A list of the labels that are used in the issues.
100103
search_query (str): The search query used to find the issues.
101104
hide_label_metrics (bool): Represents whether the user has chosen to hide label metrics in the output
@@ -127,6 +130,7 @@ def write_to_markdown(
127130
average_time_in_labels,
128131
num_issues_opened,
129132
num_issues_closed,
133+
num_mentor_count,
130134
labels,
131135
columns,
132136
file,
@@ -184,6 +188,7 @@ def write_overall_metrics_tables(
184188
stats_time_in_labels,
185189
num_issues_opened,
186190
num_issues_closed,
191+
num_mentor_count,
187192
labels,
188193
columns,
189194
file,
@@ -246,4 +251,5 @@ def write_overall_metrics_tables(
246251
file.write("| --- | ---: |\n")
247252
file.write(f"| Number of items that remain open | {num_issues_opened} |\n")
248253
file.write(f"| Number of items closed | {num_issues_closed} |\n")
254+
file.write(f"| Number of most active mentors | {num_mentor_count} |\n")
249255
file.write(f"| Total number of items created | {len(issues_with_metrics)} |\n\n")

0 commit comments

Comments
 (0)