Skip to content

Commit cc20c69

Browse files
Fix import formatting issues
1 parent 5eb0fe6 commit cc20c69

37 files changed

+140
-157
lines changed

.github/workflows/docs.yml

Lines changed: 0 additions & 42 deletions
This file was deleted.

quantllm/__init__.py

Lines changed: 8 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -1,23 +1,12 @@
1-
from .data import LoadDataset, DatasetPreprocessor, DatasetSplitter, DataLoader
2-
from .trainer import FineTuningTrainer, ModelEvaluator
3-
from .hub import HubManager, CheckpointManager
4-
from .utils import (
5-
get_optimal_training_settings,
6-
configure_logging,
7-
enable_logging,
8-
QuantizationBenchmark,
9-
)
101
from .api import QuantLLM
11-
12-
from .quant import (
13-
QuantizationConfig,
14-
QuantizationEngine,
15-
QuantizedLinear,
16-
GGUFQuantizer,
17-
)
18-
19-
20-
from .config import ModelConfig, DatasetConfig, TrainingConfig
2+
from .config import DatasetConfig, ModelConfig, TrainingConfig
3+
from .data import DataLoader, DatasetPreprocessor, DatasetSplitter, LoadDataset
4+
from .hub import CheckpointManager, HubManager
5+
from .quant import (GGUFQuantizer, QuantizationConfig, QuantizationEngine,
6+
QuantizedLinear)
7+
from .trainer import FineTuningTrainer, ModelEvaluator
8+
from .utils import (QuantizationBenchmark, configure_logging, enable_logging,
9+
get_optimal_training_settings)
2110

2211
# Configure package-wide logging
2312
configure_logging()

quantllm/api/high_level.py

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,12 @@
1-
from typing import Optional, Dict, Any, Union, Tuple
1+
from typing import Any, Dict, Optional, Tuple, Union
2+
23
import torch
3-
from transformers import PreTrainedModel, AutoTokenizer
4-
from ..quant.gguf import GGUFQuantizer, SUPPORTED_GGUF_BITS, SUPPORTED_GGUF_TYPES
5-
from ..utils.logger import logger
4+
from transformers import AutoTokenizer, PreTrainedModel
5+
6+
from ..quant.gguf import (SUPPORTED_GGUF_BITS, SUPPORTED_GGUF_TYPES,
7+
GGUFQuantizer)
68
from ..utils.benchmark import QuantizationBenchmark
9+
from ..utils.logger import logger
710

811

912
class QuantLLM:

quantllm/cli/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
"""Command-line interface for QuantLLM package."""
22

3-
from .commands import train, evaluate, quantize, serve
3+
from .commands import evaluate, quantize, serve, train
44
from .parser import create_parser
55

66
__all__ = ["main", "train", "evaluate", "quantize", "serve"]

quantllm/cli/commands.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,10 @@
11
from argparse import Namespace
2-
from ..models import ModelLoader, QuantConfig
2+
3+
from ..config import DatasetConfig, ModelConfig, TrainingConfig
34
from ..data import DatasetLoader
4-
from ..training import FineTuningTrainer, ModelEvaluator
5-
from ..config import ModelConfig, TrainingConfig, DatasetConfig
5+
from ..models import ModelLoader, QuantConfig
66
from ..runtime import DeviceManager
7+
from ..training import FineTuningTrainer, ModelEvaluator
78
from ..utils.logger import logger
89

910

quantllm/cli/parser.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
import argparse
22
from typing import Any
3-
from .commands import train, evaluate, quantize, serve
3+
4+
from .commands import evaluate, quantize, serve, train
45

56

67
def create_parser() -> argparse.ArgumentParser:

quantllm/config/__init__.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
from .config_manager import ConfigManager
2-
from .training_config import TrainingConfig
3-
from .model_config import ModelConfig
42
from .dataset_config import DatasetConfig
3+
from .model_config import ModelConfig
4+
from .training_config import TrainingConfig
55

66
__all__ = ["ConfigManager", "TrainingConfig", "ModelConfig", "DatasetConfig"]

quantllm/config/config_manager.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,9 @@
1-
import yaml
21
import json
3-
from typing import Dict, Any, Optional
42
from pathlib import Path
3+
from typing import Any, Dict, Optional
4+
5+
import yaml
6+
57
from ..utils.logger import logger
68

79

quantllm/config/dataset_config.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
from dataclasses import dataclass
2-
from typing import Optional, Dict, Any, List, Union
32
from pathlib import Path
3+
from typing import Any, Dict, List, Optional, Union
44

55

66
@dataclass

quantllm/config/model_config.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
from dataclasses import dataclass
2-
from typing import Optional, Dict, Any, List
32
from pathlib import Path
3+
from typing import Any, Dict, List, Optional
44

55

66
@dataclass

quantllm/config/training_config.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
from dataclasses import dataclass
2-
from typing import Optional, List, Dict, Any
32
from pathlib import Path
3+
from typing import Any, Dict, List, Optional
44

55

66
@dataclass

quantllm/data/__init__.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
1-
from .load_dataset import LoadDataset
1+
from .dataloader import DataLoader
22
from .dataset_preprocessor import DatasetPreprocessor
33
from .dataset_splitter import DatasetSplitter
4-
from .dataloader import DataLoader
4+
from .load_dataset import LoadDataset
55

66
__all__ = ["LoadDataset", "DatasetPreprocessor", "DatasetSplitter", "DataLoader"]

quantllm/data/dataloader.py

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,10 @@
1-
from typing import Optional, Union, Dict, Any
1+
from typing import Any, Dict, Optional, Union
2+
23
import torch
3-
from torch.utils.data import DataLoader as TorchDataLoader, Dataset, TensorDataset
44
from datasets import Dataset as HFDataset
5+
from torch.utils.data import DataLoader as TorchDataLoader
6+
from torch.utils.data import Dataset, TensorDataset
7+
58
from .dataset_preprocessor import DatasetPreprocessor
69

710

quantllm/data/dataset_preprocessor.py

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,12 @@
1+
import logging
2+
import warnings
3+
from typing import Any, Callable, Dict, Optional, Tuple
4+
15
from datasets import Dataset
2-
from typing import Optional, Dict, Any, Callable, Tuple
6+
from tqdm.auto import tqdm
37
from transformers import PreTrainedTokenizer
8+
49
from ..utils.logger import logger
5-
from tqdm.auto import tqdm
6-
import logging
7-
import warnings
810

911
# Disable unnecessary logging
1012
logging.getLogger("tokenizers").setLevel(logging.ERROR)

quantllm/data/dataset_splitter.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,11 @@
1-
from datasets import Dataset, DatasetDict
1+
import logging
22
from typing import Optional, Tuple, Union
3+
34
import numpy as np
4-
from ..utils.logger import logger
5+
from datasets import Dataset, DatasetDict
56
from tqdm.auto import tqdm
6-
import logging
7+
8+
from ..utils.logger import logger
79

810
# Configure logging
911
logging.getLogger("datasets").setLevel(logging.WARNING)

quantllm/data/load_dataset.py

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,12 @@
1-
from datasets import load_dataset, Dataset
2-
from typing import Optional, Dict, Any, Union
3-
import os
4-
from ..utils.logger import logger
5-
from tqdm.auto import tqdm
61
import logging
2+
import os
73
import warnings
4+
from typing import Any, Dict, Optional, Union
5+
6+
from datasets import Dataset, load_dataset
7+
from tqdm.auto import tqdm
8+
9+
from ..utils.logger import logger
810

911
# Disable unnecessary logging
1012
logging.getLogger("datasets").setLevel(logging.WARNING)

quantllm/hub/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
from .hub_manager import HubManager
21
from .checkpoint_manager import CheckpointManager
2+
from .hub_manager import HubManager
33

44
__all__ = ["HubManager", "CheckpointManager"]

quantllm/hub/checkpoint_manager.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,8 @@
11
import os
22
import shutil
3-
from typing import Optional, Dict, Any
43
from datetime import datetime
4+
from typing import Any, Dict, Optional
5+
56
from ..utils.logger import logger
67

78

quantllm/hub/hub_manager.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,9 @@
1-
from huggingface_hub import login, HfApi, Repository
2-
from typing import Optional, Dict, Any
3-
from ..utils.logger import logger
41
import os
2+
from typing import Any, Dict, Optional
3+
4+
from huggingface_hub import HfApi, Repository, login
5+
6+
from ..utils.logger import logger
57

68

79
class HubManager:

quantllm/quant/__init__.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,8 @@
11
"""GGUF Quantization for Large Language Models."""
22

3-
from .quantization_engine import QuantizationConfig, QuantizedLinear, QuantizationEngine
43
from .gguf import GGUFQuantizer
4+
from .quantization_engine import (QuantizationConfig, QuantizationEngine,
5+
QuantizedLinear)
56

67
__all__ = [
78
"QuantizationConfig",

quantllm/quant/formats.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,8 @@
11
"""Support for advanced quantization formats."""
22

33
from dataclasses import dataclass
4-
from typing import Optional, Dict, Any, Literal
4+
from typing import Any, Dict, Literal, Optional
5+
56
import torch
67

78

quantllm/quant/gguf.py

Lines changed: 8 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -2,19 +2,17 @@
22

33
import gc
44
import math
5+
import time
6+
from typing import Any, Dict, List, Optional, Tuple, Union
7+
58
import torch
69
import torch.nn as nn
7-
from typing import Optional, Dict, Any, List, Union, Tuple
10+
from tqdm.auto import tqdm
811
from transformers import PreTrainedModel
9-
from .quantization_engine import (
10-
move_to_device,
11-
BaseQuantizer,
12-
QuantizationConfig,
13-
QuantizedLinear,
14-
)
12+
1513
from ..utils.logger import logger
16-
import time
17-
from tqdm.auto import tqdm
14+
from .quantization_engine import (BaseQuantizer, QuantizationConfig,
15+
QuantizedLinear, move_to_device)
1816

1917
try:
2018
import ctransformers
@@ -376,9 +374,9 @@ def convert_to_gguf(self, output_path: str):
376374
self.model.save_pretrained(temp_dir, safe_serialization=True)
377375

378376
# Prepare conversion command
377+
import os
379378
import subprocess
380379
import sys
381-
import os
382380

383381
# Try to find convert.py from llama.cpp
384382
convert_script = None

quantllm/quant/kernels.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,12 @@
11
"""Optimized Triton kernels for faster model operations."""
22

3+
import math
4+
from typing import Any, Dict, List, Optional, Union
5+
36
import torch
47
import triton
58
import triton.language as tl
6-
from typing import Dict, Any, Optional, List, Union
79
from transformers import PreTrainedModel
8-
import math
910

1011

1112
@triton.jit

quantllm/quant/quantization_engine.py

Lines changed: 7 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1,17 +1,15 @@
11
"""Efficient model quantization engine."""
22

3-
from typing import Dict, Any, Optional, Union, List, Tuple
3+
import gc
4+
from typing import Any, Dict, List, Optional, Tuple, Union
5+
6+
import numpy as np
47
import torch
58
import torch.nn as nn
6-
from transformers import (
7-
PreTrainedModel,
8-
AutoConfig,
9-
AutoModelForCausalLM,
10-
AutoTokenizer,
11-
)
12-
import numpy as np
9+
from transformers import (AutoConfig, AutoModelForCausalLM, AutoTokenizer,
10+
PreTrainedModel)
11+
1312
from ..utils.logger import logger
14-
import gc
1513

1614

1715
def get_device_map(model: PreTrainedModel) -> Dict[str, torch.device]:

quantllm/quant/tests/helpers.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
import torch
22
import torch.nn as nn
3-
from transformers import PreTrainedModel, PretrainedConfig
3+
from transformers import PretrainedConfig, PreTrainedModel
44

55

66
class DummyConfig(PretrainedConfig):

0 commit comments

Comments
 (0)