python
import lightgbm as lgb
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
iris = load_iris()
X_train, X_test, y_train, y_test = train_test_split(iris.data, iris.target, test_size=0.2, random_state=42)
train_data = lgb.Dataset(X_train, label=y_train)
parameters = {
'objective': 'multiclass',
'num_class': 3,
'metric': 'multi_logloss',
'boosting_type': 'gbdt',
'num_leaves': 31,
'learning_rate': 0.05,
'feature_fraction': 0.9
}
lgb_model = lgb.train(parameters,
train_data,
valid_sets=[train_data],
num_boost_round=100,
early_stopping_rounds=10)
y_pred = lgb_model.predict(X_test)
y_pred = [list(x).index(max(x)) for x in y_pred]
accuracy = accuracy_score(y_test, y_pred)
print(f"Accuracy: {accuracy}")
'데이터분석 > 머신러닝' 카테고리의 다른 글
일반화된 분산팽창지수(Generalized VIF) (0) | 2023.09.13 |
---|---|
Multiclassification imbalance problem improvement (0) | 2023.08.02 |
LGBM 이해 (0) | 2023.06.18 |
엔트로피 (0) | 2023.06.16 |
TPE 알고리즘 (0) | 2023.06.12 |