Skip to content

Commit 067cb4a

Browse files
committed
Update a score computation
1 parent 5f089dd commit 067cb4a

File tree

3 files changed

+9
-57
lines changed

3 files changed

+9
-57
lines changed

codepass/get_config.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -140,9 +140,11 @@ def parser_args(default_config: dict) -> Namespace:
140140
default=default_config.get("model", "gpt-4o-mini"),
141141
)
142142
parser.add_argument(
143-
"version",
143+
"-v",
144+
"--version",
144145
help="Print version",
145146
type=bool,
147+
action=BooleanOptionalAction,
146148
default=False,
147149
)
148150

codepass/llm/a_score_parser.py

Lines changed: 5 additions & 55 deletions
Original file line numberDiff line numberDiff line change
@@ -96,70 +96,20 @@ def _mean_weighted_linear_complexity_score(self) -> float:
9696
/ 15
9797
)
9898

99-
def _normalized_mean_correction_sum_weighted_sq_complexity_score(self) -> float:
100-
linear_mean = self._mean_weighted_linear_complexity_score()
101-
102-
if linear_mean == 0:
103-
return 0
104-
105-
weighted_advanced_programming_techniques_usage = 5 * math.pow(
106-
self.advanced_code_techniques_score, 2
107-
)
108-
weighted_technical_domain_expertise_usage = 4 * (
109-
math.pow(self.technical_domain_knowledge_score, 2)
110-
)
111-
weighted_algorithms_usage = 3 * math.pow(
112-
self.project_specific_knowledge_score, 2
113-
)
114-
weighted_computation_usage = 2 * math.pow(self.cognitive_complexity_score, 2)
115-
weighted_project_specific_knowledge_usage = 1 * math.pow(
116-
self.readability_score, 2
117-
)
118-
119-
sq_mean = (
120-
sum(
121-
[
122-
weighted_advanced_programming_techniques_usage,
123-
weighted_technical_domain_expertise_usage,
124-
weighted_algorithms_usage,
125-
weighted_computation_usage,
126-
weighted_project_specific_knowledge_usage,
127-
]
128-
)
129-
/ 15
130-
)
131-
132-
sum_weighted_sq_complexity_score = (
133-
weighted_advanced_programming_techniques_usage
134-
+ weighted_technical_domain_expertise_usage
135-
+ weighted_algorithms_usage
136-
+ weighted_computation_usage
137-
+ weighted_project_specific_knowledge_usage
138-
)
139-
sum_weighted_sq_complexity_score_mean_correction = (
140-
sum_weighted_sq_complexity_score * (linear_mean / sq_mean)
141-
)
142-
normalized_mean_correction_sum_weighted_sq_complexity_score = (
143-
sum_weighted_sq_complexity_score_mean_correction / 15
144-
)
145-
146-
return normalized_mean_correction_sum_weighted_sq_complexity_score
147-
14899
def a_score_per_line(self) -> float:
149100
if self.is_setup_of_declaration:
150101
return 0
151102
line_count = self.line_count()
152-
_normalized_mean_correction_sum_weighted_sq_complexity_score = (
153-
self._normalized_mean_correction_sum_weighted_sq_complexity_score()
103+
_mean_weighted_linear_complexity_score = (
104+
self._mean_weighted_linear_complexity_score()
154105
)
155106
line_complexity_coefficient = (line_count / 10) ** (1 / 4)
156-
_normalized_mean_correction_sum_weighted_sq_complexity_score_with_line_complexity = (
157-
_normalized_mean_correction_sum_weighted_sq_complexity_score
158-
* line_complexity_coefficient
107+
_mean_weighted_linear_complexity_score_with_line_complexity = (
108+
_mean_weighted_linear_complexity_score * line_complexity_coefficient
159109
)
160110

161111
a_score = SCORE_MIN_VALUE + SCORE_RANGE_VALUE * math.tanh(
162-
_normalized_mean_correction_sum_weighted_sq_complexity_score_with_line_complexity
112+
_mean_weighted_linear_complexity_score_with_line_complexity
163113
)
164114
a_score_pre_line = a_score * line_count
165115

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[tool.poetry]
22
name = "codepass"
3-
version = "0.3.3"
3+
version = "0.3.4"
44
description = "A CLI tool to evaluate the quality of your code"
55
authors = ["airtucha <[email protected]>"]
66
readme = "README.md"

0 commit comments

Comments
 (0)