Skip to content

Commit 46eec4b

Browse files
committed
fix: pylint errors
1 parent 0578608 commit 46eec4b

File tree

5 files changed

+17
-14
lines changed

5 files changed

+17
-14
lines changed

vllm/config.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,8 @@ def __init__(
5555
self.seed = seed
5656

5757
self.hf_config = get_config(model, trust_remote_code)
58-
self.skip_special_tokens = self.hf_config.skip_special_tokens if hasattr(
58+
self.skip_special_tokens = self.hf_config.skip_special_tokens \
59+
if hasattr(
5960
self.hf_config, "skip_special_tokens") else True
6061
self.dtype = _get_and_verify_dtype(self.hf_config, dtype)
6162
self._verify_tokenizer_mode()

vllm/sequence.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,8 @@ class SequenceData:
5757
def __init__(self, prompt_token_ids: List[int], **kwargs) -> None:
5858
self.prompt_token_ids = prompt_token_ids
5959

60-
# position_ids: The position IDs of the prompt, it can be 2D for some model architectures (e.g. GLM)
60+
# position_ids: The position IDs of the prompt,
61+
# it can be 2D for some model architectures (e.g. GLM)
6162
self.position_ids: Union[List[int], List[List[int]]] = kwargs.get(
6263
"position_ids", [])
6364
self.block_position_encoding = kwargs.get("block_position_encoding",

vllm/transformers_utils/configs/__init__.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,6 @@
77
from vllm.transformers_utils.configs.falcon import RWConfig
88
from vllm.transformers_utils.configs.glm import GLMConfig
99
from vllm.transformers_utils.configs.mpt import MPTConfig
10-
from vllm.transformers_utils.configs.qwen import QWenConfig
1110

1211
__all__ = [
1312
"MPTConfig", "BaiChuanConfig", "AquilaConfig", "QWenConfig", "RWConfig",

vllm/transformers_utils/configs/glm.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,8 @@
88
class GLMConfig(PretrainedConfig):
99
model_type = "glm"
1010

11-
# A dict that maps model specific attribute names to the standardized naming of attributes.
11+
# A dict that maps model specific attribute names
12+
# to the standardized naming of attributes.
1213
attribute_map = {
1314
"num_hidden_layers": "num_layers",
1415
}
@@ -51,7 +52,8 @@ def __init__(self,
5152
self.attention_dropout = attention_dropout
5253
self.layer_norm_epsilon = layer_norm_epsilon
5354
self.rmsnorm = rmsnorm
54-
self.apply_residual_connection_post_layernorm = apply_residual_connection_post_layernorm
55+
self.apply_residual_connection_post_layernorm = \
56+
apply_residual_connection_post_layernorm
5557
self.post_layer_norm = post_layer_norm
5658
self.add_bias_linear = add_bias_linear
5759
self.add_qkv_bias = add_qkv_bias
@@ -66,4 +68,4 @@ def __init__(self,
6668
self.prefix_projection = prefix_projection
6769
self.n_inner = None
6870
self.skip_special_tokens = False
69-
super().__init__(**kwargs)
71+
super().__init__(**kwargs)

vllm/worker/worker.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -179,8 +179,8 @@ def _prepare_inputs(
179179
position_ids, block_position_encoding = seq_data.get_position_ids()
180180

181181
if not position_ids:
182-
# NOTE(woosuk): Here we assume that the first token in the prompt
183-
# is always the first token in the sequence.
182+
# NOTE(woosuk): Here we assume that the first token in the
183+
# prompt is always the first token in the sequence.
184184
input_positions.extend(range(len(prompt_tokens)))
185185
else:
186186
if block_position_encoding:
@@ -230,8 +230,8 @@ def _prepare_inputs(
230230
context_len = seq_data.get_len()
231231
position = context_len - 1
232232

233-
position_ids, block_position_encoding = seq_data.get_position_ids(
234-
)
233+
position_ids, block_position_encoding = \
234+
seq_data.get_position_ids()
235235
if not position_ids:
236236
input_positions.append(position)
237237
else:
@@ -376,12 +376,12 @@ def _pad_to_alignment(x: List[int],
376376
return x + [0] * ((-len(x)) % multiple_of)
377377
else:
378378
ret = []
379-
for _x in x:
380-
if isinstance(_x, list) and isinstance(_x[0], list):
381-
pad_element = [[[0]] * len(_x)]
379+
for x_ in x:
380+
if isinstance(x_, list) and isinstance(x_[0], list):
381+
pad_element = [[[0]] * len(x_)]
382382
return x + pad_element * ((-len(x)) % multiple_of)
383383
else:
384-
ret.append(_x + [0] * ((-len(_x)) % multiple_of))
384+
ret.append(x_ + [0] * ((-len(x_)) % multiple_of))
385385
return ret
386386

387387

0 commit comments

Comments
 (0)