Selasa, 20 Agustus 2024

GENERATE TEXTBLOB

#Install Library

# !pip install tweet-preprocessor
# !pip install textblob
# !pip install wordcloud
# !pip  install Sastrawi
# !pip install nltk  
# !pip install tweet-preprocessor
# !pip install googletrans==4.0.0-rc1
# # c:\programdata\anaconda3\lib\site-packages (3.8.1)
#pip install scikit-learn

print('Start...')

#Import Library
import pandas as pd
import re
import seaborn as sns
import matplotlib.pyplot as plt
import Sastrawi
from Sastrawi.StopWordRemover.StopWordRemoverFactory import StopWordRemoverFactory, StopWordRemover, ArrayDictionary
from Sastrawi.Stemmer.StemmerFactory import StemmerFactory
import preprocessor as p
from textblob import TextBlob
import nltk
from nltk.stem import PorterStemmer
from nltk.tokenize import word_tokenize
from googletrans import Translator
 
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import MultinomialNB 
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix, ConfusionMatrixDisplay
import matplotlib.pyplot as plt
import seaborn as sns
nltk.download('punkt')

import re
import pandas as pd
from Sastrawi.Stemmer.StemmerFactory import StemmerFactory
from Sastrawi.StopWordRemover.StopWordRemoverFactory import StopWordRemoverFactory, StopWordRemover, ArrayDictionary

# Definisi kamus normalisasi
norm = {
    ' bgt ': ' banget ', ' gak ': ' tidak ', ' bs ': ' bisa ', ' emg ': ' memang ',
    ' yg ': ' yang ', ' g ': ' tidak', ' udah': ' sudah ', ' gini ': ' begini ',
    ' d ': ' di ', ' ad ': ' ada ', ' bhw ': ' bahwa ', ' tp ': ' tetapi ',
    ' sy ': ' saya ', ' ga ': ' tidak ', ' bkt ': ' bukti ', ' jt ': ' juta ',
    ' ajah ': ' saja ', ' gw ': ' saya '
}

# Fungsi untuk normalisasi, stopword removal, dan stemming dalam satu fungsi
def process_text(text):
    # Pastikan input adalah string
    if not isinstance(text, str):
        text = str(text)
    
    # Normalisasi dan pembersihan teks
    text = re.sub(r'@[A-Za-z0-9_]+', '', text)
    text = re.sub(r'#\w+', '', text)
    text = re.sub(r'RT[\s]+', '', text)
    text = re.sub(r'https?://\S+', '', text)
    text = re.sub(r'[^A-Za-z0-9 ]', '', text)
    text = re.sub(r'\s+', ' ', text).strip().lower()

    # Normalisasi menggunakan kamus
    for key, value in norm.items():
        text = text.replace(key, value)

    # Menghapus stopwords yang didefinisikan
    stop_words = stop_words_remover_new.remove(text)

    # Stemming
    stemmed_text = stemmer.stem(stop_words)

    return stemmed_text

# Fungsi clean_string tetap digunakan di akhir proses untuk pembersihan tambahan
def clean_string(text):
    text = text.strip()
    text = re.sub(r'\s+', ' ', text)
    
    # Menghilangkan kata-kata tertentu
    words_to_remove = ["ahok", "hari", "anies","dgn","dengan","atau","org","orang","utk"," nya","pak","atau","djarot","aku","saya","jkw","prabowo","jkt","jakarta","pks","pdip","sama"]
    pattern = re.compile(r'\b(' + '|'.join(words_to_remove) + r')\b', re.IGNORECASE)
    text = pattern.sub('', text)
    
    # Menghilangkan angka dalam bentuk kata dan angka
    numbers_in_words = ["satu", "dua", "tiga", "empat", "lima", "enam", "tujuh", "delapan", "sembilan", "nol","ribu","juta"]
    pattern = re.compile(r'\b(' + '|'.join(numbers_in_words) + r')\b', re.IGNORECASE)
    text = pattern.sub('', text)
    text = re.sub(r'\b\d+\b', '', text)
    
    # Menghilangkan kata yang kurang dari 3 huruf
    text = ' '.join([word for word in text.split() if len(word) >= 3])
    
    # Menghilangkan spasi yang lebih dari satu lagi setelah penghapusan kata-kata tertentu dan angka
    text =re.sub(r'\d+', '', text)
    text = re.sub(r'\s+', ' ', text).strip()
    return text 

# Inisialisasi StopWordRemover
more_stop_words =["tidak","buk","tidak","tidak"]
stop_words = StopWordRemoverFactory().get_stop_words()
stop_words.extend(more_stop_words)
new_array = ArrayDictionary(stop_words)
stop_words_remover_new = StopWordRemover(new_array)

# Inisialisasi Stemmer
factory = StemmerFactory()
stemmer = factory.create_stemmer()

#============================================================
# Membaca dataset
df = pd.read_csv("cthdata.csv")
df = df[['text']]
print(df.head())
print(df.shape)

# Mengatasi nilai NaN atau nilai non-string
df['text'] = df['text'].fillna('').astype(str)

# Proses teks secara keseluruhan
df['text'] = df['text'].apply(lambda x: clean_string(process_text(x)))

# Simpan hasil
df['text'].to_csv("DataStemFilter.csv", index=False)
print('Sukses normalisasi data...')
import time        
from googletrans import Translator
from textblob import TextBlob
import pandas as pd

def translate_with_retry(text, src='id', dest='en', max_retries=3):
    for attempt in range(max_retries):
        try:
            return translator.translate(text, src=src, dest=dest).text
        except Exception as e:
            print(f"Attempt {attempt + 1} failed: {e}")
            time.sleep(1)  # Tunggu 1 detik sebelum mencoba lagi
    return None

def translate_simple(tweet):
    translator = Translator()
    try:
        translated_text = translator.translate(tweet, src='id', dest='en').text
        return translated_text
    except Exception as e:
        print(f"Terjadi kesalahan dalam proses terjemahan: {e}")
        return None

# Load Data yang sudah ternormalisasi (StopWord+Lemmatizer+Stemming)
data = pd.read_csv('DataStemFilter.csv')
data_tweet = list(data['text'])
polaritas = 0
status = []
full_text2 = []
polarity = []
total_positif = total_negatif = total_netral = total = 0

for i, tweet in enumerate(data_tweet):
    # Periksa apakah elemen adalah string, jika tidak konversi ke string
    if isinstance(tweet, float):
        tweet = str(tweet) if not pd.isna(tweet) else ""
    elif not isinstance(tweet, str):
        tweet = str(tweet)
    
    # Analisis hanya jika tweet bukan string kosong
    if tweet:
        translated_text = translate_simple(tweet)
        #translated_text = translator.translate(tweet, src='id', dest='en').text 
        #translated_text = translator.translate(tweet, src='id', dest='en', timeout=30).text  # Timeout 10 detik
        #translated_text = translate_with_retry(tweet) 
        
        # Jika terjemahan berhasil, tambahkan ke daftar, jika tidak tambahkan None
        if translated_text:
            full_text2.append(translated_text)
            # Membuat objek TextBlob dari teks yang diterjemahkan
            analysis = TextBlob(str(translated_text))  # tweet
            polar = analysis.polarity
            polaritas += polar
            polarity.append(polar)
            if analysis.sentiment.polarity > 0.0:
                total_positif += 1
                status.append('Positif')
            elif analysis.sentiment.polarity == 0.0:
                total_netral += 1
                status.append('Netral')
            else:
                total_negatif += 1
                status.append('Negatif')
        else:
            # Jika terjemahan gagal, tambahkan nilai default
            full_text2.append(None)
            polarity.append(0)
            status.append('Tidak Ada Data')
    else:
        # Jika tweet kosong, tambahkan nilai default
        full_text2.append(None)
        polarity.append(0)
        status.append('Tidak Ada Data')
    
    total += 1

# Pastikan panjang semua daftar sama dengan panjang DataFrame
if len(full_text2) < len(data):
    missing_entries = len(data) - len(full_text2)
    full_text2.extend([None] * missing_entries)
    polarity.extend([0] * missing_entries)
    status.extend(['Tidak Ada Data'] * missing_entries)

# Menambahkan kolom 'klasifikasi' ke DataFrame
data['translate'] = full_text2
data['polarity'] = polarity
data['klasifikasi'] = status

# Output hasil analisis
print(f'Hasil Analisis Data:\nPositif = {total_positif}\nNetral = {total_netral}\nNegatif = {total_negatif}')
print(f'\nTotal Data : {total}')
data.to_csv("DataAutoKlasifikasi.csv", index=False)
print('Sukses update dataset...')


import seaborn as sns
sns.set_theme()
labels = ['Positif', 'Netral', 'Negatif']
counts = [total_positif, total_netral, total_negatif]

def show_bar(labels, counts):
    sns.set_theme()
    plt.figure(figsize=(8, 6))
    sns.barplot(x=labels, y=counts)
    plt.title('Distribusi Sentimen')
    plt.xlabel('Kategori Sentimen')
    plt.ylabel('Jumlah')
    plt.show()

# Memanggil fungsi untuk menampilkan bar plot
show_bar(labels, counts)


import pandas as pd
print('Isi dataset yang terbaru....')
dataNorm = pd.read_csv('DataAutoKlasifikasi.csv')
print(dataNorm)

 

NB KNN NLP

#Install Library

# !pip install tweet-preprocessor
# !pip install textblob
# !pip install wordcloud
# !pip  install Sastrawi
# !pip install nltk  
# !pip install tweet-preprocessor
# !pip install googletrans==4.0.0-rc1
# # c:\programdata\anaconda3\lib\site-packages (3.8.1)
#pip install scikit-learn

print('Start...')
NF='dataset6.csv'
print('Start...'+NF)
#Import Library
import pandas as pd
import re
import seaborn as sns
import matplotlib.pyplot as plt
import Sastrawi
from Sastrawi.StopWordRemover.StopWordRemoverFactory import StopWordRemoverFactory, StopWordRemover, ArrayDictionary
from Sastrawi.Stemmer.StemmerFactory import StemmerFactory
import preprocessor as p
from textblob import TextBlob
import nltk
from nltk.stem import PorterStemmer
from nltk.tokenize import word_tokenize
from googletrans import Translator
 
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import MultinomialNB 
from sklearn.svm import SVC  
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix, ConfusionMatrixDisplay
from sklearn.metrics import precision_score, recall_score, f1_score 
nltk.download('punkt')
from wordcloud import WordCloud, STOPWORDS



norm = {
    ' bgt ': ' banget ',
    ' gak ': ' tidak ',
    ' bs ': ' bisa ',
    ' emg ': ' memang ',
    ' yg ': ' yang ',
    ' g ': ' tidak',
    ' udah': ' sudah ',
    ' gini ': ' begini ',
    ' d ': ' di ',
    ' ad ': ' ada ',
    ' bhw ': ' bahwa ',
    ' tp ': ' tetapi ',
    ' sy ': ' saya ',
    ' ga ': ' tidak ',
    ' bkt ': ' bukti ',
    ' jt ': ' juta ',
    ' ajah ': ' saja ',
    ' gw ': ' saya '
}
def clean_twitter_text(text):
    text = re.sub(r'@[A-Za-z0-9_]+', '', text)
    text = re.sub(r'#\w+', '', text)
    text = re.sub(r'RT[\s]+', '', text)
    text = re.sub(r'https?://\S+', '', text)
    text = re.sub(r'[^A-Za-z0-9 ]', '', text)
    text = re.sub(r'\s+', ' ', text).strip()
    return text
 
def lemhatizer(str_text):
  for i in norm:
    str_text = str_text.replace(i, norm[i])
  return str_text


def stopword(str_text):
  str_text = stop_words_remover_new.remove(str_text)
  return str_text


def clean_string(text):
    # Menghilangkan spasi di depan dan di akhir
    text = text.strip()
    
    # Menghilangkan spasi yang lebih dari satu
    text = re.sub(r'\s+', ' ', text)
    
    # Menghilangkan kata-kata tertentu
    words_to_remove = ["jokowi", "hari", "anies","dgn","dengan","atau","org","orang","utk"," nya","pak","atau","djarot","aku","saya","jkw","prabowo","jkt","jakarta","pks","pdip","sama"]
    pattern = re.compile(r'\b(' + '|'.join(words_to_remove) + r')\b', re.IGNORECASE)
    text = pattern.sub('', text)
    
    # Menghilangkan angka dalam bentuk kata dan angka
    numbers_in_words = ["satu", "dua", "tiga", "empat", "lima", "enam", "tujuh", "delapan", "sembilan", "nol","ribu","juta"]
    pattern = re.compile(r'\b(' + '|'.join(numbers_in_words) + r')\b', re.IGNORECASE)
    text = pattern.sub('', text)
    text = re.sub(r'\b\d+\b', '', text)
    
    # Menghilangkan kata yang kurang dari 3 huruf
    text = ' '.join([word for word in text.split() if len(word) >= 3])
    
    # Menghilangkan spasi yang lebih dari satu lagi setelah penghapusan kata-kata tertentu dan angka
    text =re.sub(r'\d+', '', text)
    text = re.sub(r'\s+', ' ', text).strip()
    text = re.sub(r'\s+', ' ', text).strip()
    return text
 
def stemming(text_cleaning): 
    factory = StemmerFactory()
    stemmer = factory.create_stemmer()
    
    # Asumsikan text_cleaning adalah string
    words = text_cleaning.split()  # Pisahkan teks menjadi kata-kata
    stemmed_words = []
    
    for word in words:
        stemmed_word = stemmer.stem(word)  # Stem setiap kata
        stemmed_word = stopword(stemmed_word)  # Hapus stopword jika ada
        if stemmed_word:  # Jika tidak kosong, tambahkan ke daftar
            stemmed_words.append(stemmed_word)
    
    # Gabungkan kembali kata-kata yang sudah di-stem
    d_clean = " ".join(stemmed_words)
    d_clean = clean_string(d_clean)  # Bersihkan lagi string jika diperlukan
    
    return d_clean

def plot_cloud(wordcloud):
    plt.figure(figsize=(10, 8))
    plt.imshow(wordcloud, interpolation='bilinear')
    plt.axis("off")
    plt.show()
dataNorm = pd.read_csv(NF) 
all_words = ' '.join([text for text in dataNorm['text'].astype(str)])
#full_text
wordcloud = WordCloud(
    width=3000,
    height=2000,
    random_state=3,
    background_color='black',
    colormap='RdPu',
    collocations=False,
    stopwords=STOPWORDS
).generate(all_words)

plot_cloud(wordcloud)

data = dataNorm
######################## 
# Mengonversi teks menjadi string jika belum
data['full_text'] = data['text'].astype(str)  #full_text
# Menghapus entri dengan klasifikasi "Tidak Ada Data"
data = data[data['klasifikasi'] != 'Tidak Ada Data']
# Preprocessing: Mengonversi teks menjadi vektor fitur
vectorizer = TfidfVectorizer()
X = vectorizer.fit_transform(data['full_text'])
y = data['klasifikasi']
# Memisahkan data menjadi data latih dan data uji
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)

#-----------------------------------------------------
# Melatih model Naive Bayes
model_nb = MultinomialNB()
model_nb.fit(X_train, y_train)

# Memprediksi klasifikasi pada data uji
y_pred = model_nb.predict(X_test) 
accuracy = accuracy_score(y_test, y_pred)
precision = precision_score(y_test, y_pred, average='weighted', zero_division=0)
recall = recall_score(y_test, y_pred, average='weighted', zero_division=0)
f1 = f1_score(y_test, y_pred, average='weighted', zero_division=0)

print(f"Akurasi: {accuracy * 100:.2f}%")
print(f"Presisi: {precision * 100:.2f}%")
print(f"Recall: {recall * 100:.2f}%")
print(f"F1 Score: {f1 * 100:.2f}%") 
print("\nLaporan Klasifikasi NB:")
print(classification_report(y_test, y_pred, zero_division=0))
# Menghitung F1 Score
f1 = f1_score(y_test, y_pred, average='weighted')
print(f"F1 Score: {f1 * 100:.2f}%")
#-----------------------------------------------------
# Confusion Matrix
cm = confusion_matrix(y_test, y_pred)
print(cm)
cmd = ConfusionMatrixDisplay(confusion_matrix=cm, display_labels=model_nb.classes_)
# Menampilkan Confusion Matrix sebagai grafik
cmd.plot(cmap=plt.cm.Blues)
plt.title('Confusion Matrix NBC')
plt.show() 

data = dataNorm
######################## 
# Mengonversi teks menjadi string jika belum
data['full_text'] = data['text'].astype(str)  #full_text
# Menghapus entri dengan klasifikasi "Tidak Ada Data"
data = data[data['klasifikasi'] != 'Tidak Ada Data']
# Preprocessing: Mengonversi teks menjadi vektor fitur
vectorizer = TfidfVectorizer()
X = vectorizer.fit_transform(data['full_text'])
y = data['klasifikasi'] 
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
#-----------------------------------------------------

# Melatih model SVM
svm_model = SVC(kernel='linear')
svm_model.fit(X_train, y_train)

y_pred = svm_model.predict(X_test)

accuracy = accuracy_score(y_test, y_pred)
precision = precision_score(y_test, y_pred, average='weighted', zero_division=0)
recall = recall_score(y_test, y_pred, average='weighted', zero_division=0)
f1 = f1_score(y_test, y_pred, average='weighted', zero_division=0)

print(f'Akurasi model SVM: {accuracy * 100:.2f}%')
print(f"Presisi: {precision * 100:.2f}%")
print(f"Recall: {recall * 100:.2f}%")
print(f"F1 Score: {f1 * 100:.2f}%") 
print("\nLaporan Klasifikasi SVM:")
print(classification_report(y_test, y_pred, zero_division=0))
# Menghitung F1 Score 
print(f"F1 Score: {f1 * 100:.2f}%")
 
#-----------------------------------------------------

# Confusion Matrix
cm = confusion_matrix(y_test, y_pred)
print(cm)
cmd = ConfusionMatrixDisplay(confusion_matrix=cm, display_labels=svm_model.classes_)

# Menampilkan Confusion Matrix sebagai grafik
cmd.plot(cmap=plt.cm.Blues)
plt.title('Confusion Matrix SVM')
plt.show()

#UJI KALIMAT NB dan SVM
new_string0 = "bhima yudhistira harap kinerja tim ekonomi amin iklim investasi"  
new_string0 = "kami berharap akan kinerjanya bertim perekonomian mengaminkan beriklim investasinya"  
new_string = clean_twitter_text(new_string0)
new_string= new_string.lower() #casefolding

stop_words = StopWordRemoverFactory().get_stop_words() 
new_array = ArrayDictionary(stop_words)
stop_words_remover_new = StopWordRemover(new_array)

new_string=lemhatizer(new_string)
new_string=stopword(new_string)
new_string = stop_words_remover_new.remove(new_string) 
new_string= stemming(new_string)  
new_string_vector = vectorizer.transform([new_string]) 
new_prediction1 = model_nb.predict(new_string_vector)
new_prediction2 = svm_model.predict(new_string_vector)

print(new_string0)
print(new_string)
print(f'Kategori untuk Kalimat Uji / NB: {new_prediction1[0]}')
print(f'Kategori untuk Kalimat Uji / SVM: {new_prediction2[0]}')


 

Sabtu, 20 Juli 2024

Jam Indonesia Python

 !pip install pytz

from datetime import datetime
import pytz
from time import gmtime, strftime,sleep
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning) 

import warnings
warnings.filterwarnings("ignore")
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("ignore", module="sklearn")

import warnings
# Menyembunyikan semua peringatan
warnings.filterwarnings("ignore")
# Menyembunyikan peringatan spesifik, misalnya UserWarning
warnings.filterwarnings("ignore", category=UserWarning)

jakarta_tz = pytz.timezone('Asia/Jakarta') 
jakarta_time = datetime.now(jakarta_tz) 
time_str = jakarta_time.strftime("%H:%M:%S")
print("Waktu di Indonesia/Jakarta:", time_str)


 tgl=strftime("%Y-%m-%d", gmtime()) #u waktu
jam=strftime("%H:%M:%S", gmtime()) #u waktu
jakarta_tz = pytz.timezone('Asia/Jakarta') 
jakarta_time = datetime.now(jakarta_tz) 
jam = jakarta_time.strftime("%H:%M:%S")

+++++++++++++
Contoh penerapan :

def updateDB(cursor,db,idx):
    sql = "UPDATE `tb_temp`  set `status`='1',`status2`='1' where id='%s'" % (idx)
    #print(sql)
    v=0
    try:
        cursor.execute(sql)
        db.commit()
        v=1
    except:
        db.rollback()
        return v


# In[3]:E:\xampp\htdocs\appPlatkendaraan\ypathfile



#pathme='E:\xampp\htdocs\appPlatkendaraan\ypathfile'
pathme='E:\\xampp\\htdocs\\appPlatkendaraan\\ypathfile'

sfr = SimpleFacerec()
AL=pathme+"/dataset/"
print (AL)
sfr.load_encoding_images(AL) 


# In[ ]:


IMG=""
kon=True
db = MySQLdb.connect("localhost","root","","24_eparkir")
cursor = db.cursor()

while (kon):  
    ada=0
    ID=''
    gambar1=''
    gambar2=''
    NAME1=''
    NAME2=''

    try:
        cursor.execute("SELECT `id`,`gambar1`,`gambar2` FROM `tb_temp` where `status`='0' order by `id` desc limit 0,1")
        db.commit()
        for row in cursor.fetchall():
            ID=row[0]
            gambar1=row[1]
            gambar2=row[2] 
            ada=1 

            AL=pathme+'/'+gambar1
            print("Face:"+AL)
            frame= cv2.imread(r''+AL)
            face_locations, face_names = sfr.detect_known_faces(frame)
            for face_loc, name in zip(face_locations, face_names):
                y1, x2, y2, x1 = face_loc[0], face_loc[1], face_loc[2], face_loc[3]
                
                ar=name.split('-')
                NAME1=ar[0]
                print ('GET1='+NAME1)
                #updateDB(cursor,db,ID)
                #kon=False
            ###################################

            AL=pathme+'/'+gambar2
            print("Plat:"+AL)
            img = cv.imread(r''+AL)

            img = cv.resize(img, (int(img.shape[1]*.4),int(img.shape[0]*.4)))
            img_gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)  
            kernel = cv.getStructuringElement(cv.MORPH_ELLIPSE,(20,20))
            img_opening = cv.morphologyEx(img_gray, cv.MORPH_OPEN, kernel)
            img_norm = img_gray - img_opening
            (thresh, img_norm_bw) = cv.threshold(img_norm, 0, 255, cv.THRESH_BINARY + cv.THRESH_OTSU)
            (thresh, img_without_norm_bw) = cv.threshold(img_gray, 0, 255, cv.THRESH_BINARY + cv.THRESH_OTSU)

            contours_vehicle, hierarchy = cv.findContours(img_norm_bw, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE) 
            index_plate_candidate = []
            index_counter_contour_vehicle = 0

            for contour_vehicle in contours_vehicle:
                x,y,w,h = cv.boundingRect(contour_vehicle)
                aspect_ratio = w/h
                if w >= 200 and aspect_ratio <= 4 : 
                    index_plate_candidate.append(index_counter_contour_vehicle)
                index_counter_contour_vehicle += 1

            img_show_plate = img.copy() 
            img_show_plate_bw = cv.cvtColor(img_norm_bw, cv.COLOR_GRAY2RGB)

            if len(index_plate_candidate) == 0:
                print("Plat nomor tidak ditemukan")

            elif len(index_plate_candidate) == 1:
                x_plate,y_plate,w_plate,h_plate = cv.boundingRect(contours_vehicle[index_plate_candidate[0]])
                cv.rectangle(img_show_plate,(x_plate,y_plate),(x_plate+w_plate,y_plate+h_plate),(0,255,0),5)
                cv.rectangle(img_show_plate_bw,(x_plate,y_plate),(x_plate+w_plate,y_plate+h_plate),(0,255,0),5)
                img_plate_gray = img_gray[y_plate:y_plate+h_plate, x_plate:x_plate+w_plate]
            else:
                #print('Dapat dua lokasi plat, pilih lokasi plat kedua')
                x_plate,y_plate,w_plate,h_plate = cv.boundingRect(contours_vehicle[index_plate_candidate[1]])
                cv.rectangle(img_show_plate,(x_plate,y_plate),(x_plate+w_plate,y_plate+h_plate),(0,255,0),5)
                cv.rectangle(img_show_plate_bw,(x_plate,y_plate),(x_plate+w_plate,y_plate+h_plate),(0,255,0),5)
                img_plate_gray = img_gray[y_plate:y_plate+h_plate, x_plate:x_plate+w_plate]

            (thresh, img_plate_bw) = cv.threshold(img_plate_gray, 0, 255, cv.THRESH_BINARY + cv.THRESH_OTSU)
            kernel = cv.getStructuringElement(cv.MORPH_CROSS, (3,3))
            img_plate_bw = cv.morphologyEx(img_plate_bw, cv.MORPH_OPEN, kernel) # apply morph open
            contours_plate, hierarchy = cv.findContours(img_plate_bw, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE) 

            index_chars_candidate = [] #index
            index_counter_contour_plate = 0 #idx
            img_plate_rgb = cv.cvtColor(img_plate_gray,cv.COLOR_GRAY2BGR)
            img_plate_bw_rgb = cv.cvtColor(img_plate_bw, cv.COLOR_GRAY2RGB)
            for contour_plate in contours_plate:
                x_char,y_char,w_char,h_char = cv.boundingRect(contour_plate)
                if h_char >= 40 and h_char <= 60 and w_char >=10:
                    index_chars_candidate.append(index_counter_contour_plate)
                    cv.rectangle(img_plate_rgb,(x_char,y_char),(x_char+w_char,y_char+h_char),(0,255,0),5)
                    cv.rectangle(img_plate_bw_rgb,(x_char,y_char),(x_char+w_char,y_char+h_char),(0,255,0),5)

                index_counter_contour_plate += 1

            if index_chars_candidate == []:
                print('Karakter tidak tersegmentasi')
            else:
                score_chars_candidate = np.zeros(len(index_chars_candidate))
                counter_index_chars_candidate = 0
                for chars_candidateA in index_chars_candidate:
                    xA,yA,wA,hA = cv.boundingRect(contours_plate[chars_candidateA])
                    for chars_candidateB in index_chars_candidate:
                        if chars_candidateA == chars_candidateB:
                            continue
                        else: 
                            xB,yB,wB,hB = cv.boundingRect(contours_plate[chars_candidateB]) 
                            y_difference = abs(yA - yB)
                            if y_difference < 11:
                                score_chars_candidate[counter_index_chars_candidate] = score_chars_candidate[counter_index_chars_candidate] + 1 

                    counter_index_chars_candidate += 1

                index_chars = [] 
                chars_counter = 0 
                for score in score_chars_candidate:
                    if score == max(score_chars_candidate): 
                        index_chars.append(index_chars_candidate[chars_counter])
                    chars_counter += 1

                x_coors = []
                for char in index_chars:
                    x, y, w, h = cv.boundingRect(contours_plate[char])
                    x_coors.append(x) 
                x_coors = sorted(x_coors) 
                index_chars_sorted = []
                for x_coor in x_coors:
                    for char in index_chars:
                        x, y, w, h = cv.boundingRect(contours_plate[char])
                        if x_coors[x_coors.index(x_coor)] == x:
                            index_chars_sorted.append(char) 

                num_plate = []
                for char_sorted in index_chars_sorted:
                    x,y,w,h = cv.boundingRect(contours_plate[char_sorted])
                    char_crop = cv.cvtColor(img_plate_bw[y:y+h,x:x+w], cv.COLOR_GRAY2BGR)
                    char_crop = cv.resize(char_crop, (img_width, img_height))
                    img_array = keras.preprocessing.image.img_to_array(char_crop)
                    img_array = tf.expand_dims(img_array, 0)
                    predictions = model.predict(img_array)
                    score = tf.nn.softmax(predictions[0]) 
                    num_plate.append(class_names[np.argmax(score)])
                    #print(class_names[np.argmax(score)], end='')
                plate_number = ''
                for a in num_plate:
                    plate_number += a 

            print(plate_number)
            NAME2=plate_number
            print ('GET2='+NAME2)
            updateDB(cursor,db,ID)

            tgl=strftime("%Y-%m-%d", gmtime()) #u waktu
            jam=strftime("%H:%M:%S", gmtime()) #u waktu
            jakarta_tz = pytz.timezone('Asia/Jakarta') 
            jakarta_time = datetime.now(jakarta_tz) 
            jam = jakarta_time.strftime("%H:%M:%S")

            sql2="INSERT INTO `tb_lewat` (`id_lewat`, `tanggal`, `jam`, `gambar_wajah`, `id_pemilik`, `status_wajah`, `gambar_nomor`, `id_kendaraan`, `status_kendaraan`, `status_kepemilikan`, `keterangan`, `temp1`, `temp2`) VALUES ('', '%s', '%s', '%s', '%s', '%s','%s', '%s', '%s', '%s', '%s', '%s', '%s')" % (tgl,jam,gambar1,NAME1,'Proses',gambar2,NAME2,'Proses','','',NAME1,NAME2)
            print(sql2)
            try:
                cursor.execute(sql2)
                db.commit()
            except:
                db.rollback()
    except:
        print('No data....')

cursor.close()
db.close()


Minggu, 09 Juli 2023

Python 2023

 # import warnings
# #warnings.filterwarnings('ignore')
# warnings.filterwarnings("ignore", category=FutureWarning)

import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning) 


# import warnings
# with warnings.catch_warnings():
#     warnings.filterwarnings("ignore",category=DeprecationWarning)    
#ENV PYTHONWARNINGS="ignore::DeprecationWarning"
#logging.captureWarnings(True)

# from warnings import filterwarnings
# filterwarnings("ignore")


import warnings
def fxn():
    warnings.warn("deprecated", DeprecationWarning)

with warnings.catch_warnings():
    warnings.simplefilter("ignore")
    fxn()

==============================================

import matplotlib.pyplot as plt
import matplotlib.image as img


def uk(img):
    a=np. array(img)
    print(type(a))
    print(a.shape)


def uk0(img):
    width =len(img[0])
    height = len(img[1])
    dim = len(img[2])
    x= (width ,' x ' , height, ' x ' , dim)
    print(x)
    return x

def uk1(img):
    width = int(img.shape[0])
    height = int(img.shape[1])
    x=(width ,' x ' , height)
    print(x)
    return x

def uk2(img):
    width =len(img[0])
    height = len(img[1])
    x=(width ,' x ' , height)
    print(x)
    return x

def uk3(img):
    s =len(img)
    print(s)
    return s

def uk4(img):

    s =img.shape()
    print(s)
    return s

def model(img):
    x=type(img)
    print(x)
    return x

def rgb2gray1(rgb):
    return np.dot(rgb[...,:3], [0.2989, 0.5870, 0.1140])

def rgb2gray2(rgb):
    return cv2.cvtColor(rgb, cv2.COLOR_BGR2GRAY) 

def resize(img,b,c):
    dim = (b, c)
    resized = cv2.resize(img, dim, interpolation = cv2.INTER_AREA)
    return resized

def lihat(citra,label):
    asli = cv2.imread(citra)
    gray1 = rgb2gray1(asli)
    gray2 = rgb2gray2(asli)
    gray=gray2
    edges = cv2.Canny(gray,width,height)  

    fig = plt.figure()
    plt.subplot(1, 4, 1)
    plt.imshow(asli)
    plt.title('RGB '+label)

    plt.subplot(1, 4, 2)
    plt.imshow(gray1)
    plt.title('Gray1 '+label)

    plt.subplot(1, 4, 3)
    plt.imshow(gray2)
    plt.title('Gray2 '+label)

    plt.subplot(1, 4, 4)
    #plt.hist(gray2)
    plt.imshow(edges)
    plt.title('Edge '+label)

    plt.show()
    return 1

#####################################

cwd = os.getcwd()
dataset_dir=cwd+'\\datatraining\\'
print(cwd)
imagePaths = sorted(list(path.list_images(dataset_dir)))

size=128
labels = []
descs = []
data = []
train_set_files = os.listdir(dataset_dir) #list
Kategori = set([f.split('_')[0] for f in train_set_files])
JD=len(train_set_files)
for i in range(JD):
    NF=train_set_files[i]
    AL=dataset_dir + NF
    label=NF.split('_')[0]
    ####print(label+'='+AL)
    #os.path.join(dataset_dir, folder, sub_folder, filename)
    #img = plt.imread(AL)
    #labels.append(normalize_label(os.path.splitext(filename)[0]))
    img = cv2.imread(AL)
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)            
    h, w = gray.shape
    ymin, ymax, xmin, xmax = h//3, h*2//3, w//3, w*2//3
    crop = gray[ymin:ymax, xmin:xmax] #43x43
    #rezize = cv.resize (crop, (size, size)) #128x128
    resize = cv2.resize(crop, (0,0), fx=0.5, fy=0.5) #22x22
    data.append(resize)
    labels.append(label)
    descs.append(AL)
    #print_progress(i, JD, AL) 
print(Kategori)



#################################


import os
import MySQLdb
import time
from time import gmtime, strftime
from random import randint
import signal
from PIL import Image
import requests
import cv2
import numpy as np
from matplotlib import pyplot as plt


size  = 128
LOCALHOST="localhost"
ROOT="root"
PASS=""
DBASE="absen_pare"

db = MySQLdb.connect(LOCALHOST,ROOT,PASS,DBASE)
cursor = db.cursor()
print ("Koneksi Ke Database")

cwd = os.getcwd()
print('PWD='+cwd)
#dataset_dir=cwd+'\\datatraining\\'
dataset_dir='C:\\xampp7\\htdocs\\_2023\\NICO\\AbsenPare\\admin\\ypathfile\\'

imagePaths = sorted(list(path.list_images(dataset_dir)))
face_cascade = cv.CascadeClassifier('haarcascade_frontalface_default.xml') 
font=cv.FONT_HERSHEY_SIMPLEX
def uk(img):
    a=np. array(img)
    total=a.size
    print(str(type(img)),' :' ,str(total),' item')
    print(a.shape)
    
def uks(img):
    a=np. asarray(img)
    total=a.size
    print(str(type(img)),' :' ,str(total),' item')
    print(a.shape)

def getWajah(dataset_dir,NF,F1,F2):
    AL=dataset_dir + NF
    print(AL)
    #gray = cv.cvtColor(img, cv2.COLOR_BGR2GRAY) 
    img = cv.imread(AL)
    gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)  
    
    roi=gray
    pathsimpan = dataset_dir.replace(F1,F2)
    faces = face_cascade.detectMultiScale(gray, 1.3, 5)    
    for (x,y,w,h) in faces:
        #cv2.rectangle(img,(x,y),(x+w=[],y+h),(255,0,0),2)          
        roi = gray[y:y+h, x:x+w]    
        #myresize = cv2.resize(roi, (0,0), fx=0.5, fy=0.5) #22x22)
        
    GB= pathsimpan + NF
    print('PathGB:',GB) 
    cv.imwrite(GB,roi)
    h, w = roi.shape
    ymin, ymax, xmin, xmax = h//3, h*2//3, w//3, w*2//3
    roi2= roi[ymin:ymax, xmin:xmax] #43x43 crop
    myresize = cv.resize(roi2, (0,0), fx=0.5, fy=0.5)
    #myresize = cv2.resize (roi2, (size, size)) #128x128 rezize
    return myresize

def updateDB(cursor,db,idx,status,dkolom1,dkolom2,nf): 
    sql = "UPDATE `tb_absensi`  set `tag`='1',`res_masuk`='%s',`catatan_masuk`='%s',`foto_masuk2`='%s'  where `id_absensi`='%s'" % (dkolom1,dkolom2,nf,idx)
    if status=='Pulang':
        sql = "UPDATE `tb_absensi`  set `tag`='1',`res_pulang`='%s',`catatan_pulang`='%s',`foto_pulang2`='%s'  where `id_absensi`='%s'" % (dkolom1,dkolom2,nf,idx)
    print(sql)
    v=0
    try:
        cursor.execute(sql)
        db.commit()
        v=1
    except:
        db.rollback()
        return v
    
def lastDB(cursor,db,idx):
    tgl=strftime("%Y-%m-%d", gmtime()) #u waktu
    jam=strftime("%H:%M:%S", gmtime()) #u waktu
        
    stgl=strftime("%Y%m%d", gmtime()) #u namafile
    sjam=strftime("%H%M%S", gmtime()) #u namafile
    NF="Img"+stgl+sjam+".jpg"
    cursor.execute("SELECT `id_absensi` FROM `tb_absensi` where `tag`='0' order by id_absensi desc limit 0,1")
    v=0
    for row in cursor.fetchall():
        v=row[0]

    return v



============================

new_string = string.replace("r", "e" )

SET DAN LIST:

Input : {1, 2, 3, 4} #SET
Output : [1, 2, 3, 4] #LIST
my_set = {'Geeks', 'for', 'geeks'}
 
s = list(my_set)
print(s)
NA = NA.astype(float)
import numpy as np

#list of strings
A = ['33.33', '33.33', '33.33', '33.37']
print A

#numpy of strings
arr = np.array(A)
print arr

#numpy of float32's
arr = np.array(A, dtype=np.float32)
print arr

#post process
print np.mean(arr), np.max(arr), np.min(arr)



import numpy as np
A = ['33.33', '33.33', '33.33', '33.37']
# convert to float
arr = np.array(map(float, A)) 
# calc values
print np.mean(arr), np.max(arr), np.min(arr)



To convert your strings to floats, the simplest way is a list comprehension:

A = ['33.33', '33.33', '33.33', '33.37']
floats = [float(e) for e in A]

Now you can convert to an array:

array_A = np.array(floats)

The rest is probably known to you:

mean, min, max = np.mean(array_A), np.min(array_A), np.max(array_A)




import numpy as np

A = ["33.33", "33.33", "33.33", "33.37"]
for i in range(0,len(A)):
    n = A[i]
    n=float(n)
    A[i] = n

NA = np.asarray(A)

AVG = np.mean(NA, axis=0)
maxx = max(A)
minn = min(A)

print (AVG)
print (maxx)
print (minn)










https://stackoverflow.com/questions/42663171/how-to-convert-a-list-of-strings-into-a-numeric-numpy-array

https://www.simplilearn.com/tutorials/python-tutorial/list-to-string-in-python

https://www.freecodecamp.org/news/python-string-to-array-how-to-convert-text-to-a-list/





birthdate = "19/10/1993"
birthdate_list
=
birthdate.split("/")

print(birthdate_list)
print(type(birthdate_list))

#output
#['19', '10', '1993']
#<class 'list'>

------------------------

birthdate = "19/10/1993"
birthdate_list
=
birthdate.split("/")
str_to_int
=
list(map(int, birthdate_list))

print(type(str_to_int))
print(str_to_int)

#output
#<class 'list'>
#[19, 10, 1993]