forked from NTUYWANG103/APEX_AIMBOT
-
Notifications
You must be signed in to change notification settings - Fork 1
/
AimBot.py
212 lines (185 loc) · 9.95 KB
/
AimBot.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
import os
import time
import numpy as np
import pyautogui
import argparse
from pynput.mouse import Button, Listener
import cv2
from mss.windows import MSS as mss
from rich import print
from simple_pid import PID
from math import atan2
from mouse_driver.MouseMove import mouse_move
from utils.InferenceEngine import BaseEngine, precise_sleep
from tensorrt_python.export_to_trt import export_to_trt
import yaml
from multiprocessing import Process, Queue
class AimBot:
def __init__(self, config_path='configs/default.yaml', onnx_path='weights/best.onnx', engine_path='weights/best.trt'):
config = yaml.load(open(config_path, 'r', encoding='UTF-8'), Loader=yaml.FullLoader)
self.args = argparse.Namespace(**config)
self.initialize_params()
self.build_trt_model(onnx_path, engine_path)
# visualization and screenshot
self.q_visual, self.q_save = Queue(), Queue()
if self.args.visualization:
Process(target=self.visualization, args=(self.args, self.q_visual,)).start()
if self.args.save_screenshot:
Process(target=self.save_screenshot, args=(self.q_save,)).start()
# model settings
self.engine = BaseEngine(engine_path)
self.initialize_camera()
if self.args.speed_test:
self.speed_test()
listener = Listener(on_click=self.on_click)
listener.start()
print('---------------------Startup complete.---------------------')
def initialize_params(self):
self.auto_lock = True
self.locking=False
# default settings by game
self.detect_length = 640
self.smooth = self.args.smooth * 1920 / self.args.resolution_x # higher resolution requires less move
self.scale = self.args.resolution_x / 1920 # set hyperparameters on 1920*1080, making all resolution the same effect
for key in self.args.__dict__:
if 'dis' in key:
self.args.__dict__[key] *= self.scale
# mouse settings
self.pidx = PID(self.args.pidx_kp, self.args.pidx_kd, self.args.pidx_ki, setpoint=0, sample_time=0.001,)
self.pidy = PID(self.args.pidy_kp, self.args.pidy_kd, self.args.pidy_ki, setpoint=0, sample_time=0.001,)
self.pidx(0),self.pidy(0)
self.detect_center_x, self.detect_center_y = self.detect_length//2, self.detect_length//2
def build_trt_model(self, onnx_path, engine_path):
if not os.path.exists(engine_path):
print('-------The first time the model is being built, the waiting time is long (about 15 mins), and there will be a text message when it is finished.-------')
export_to_trt(onnx=onnx_path, engine=engine_path)
def initialize_camera(self):
self.screen_width, self.screen_height = pyautogui.size()
self.top, self.left=self.screen_height//2-self.detect_length//2,self.screen_width//2-self.detect_length//2
self.camera = mss()
self.region = {"top": self.top, "left": self.left, "width": self.detect_length, "height": self.detect_length}
def grab_screen(self):
return cv2.cvtColor(np.asarray(self.camera.grab(self.region)), cv2.COLOR_BGR2RGB)
def on_click(self, x, y, button, pressed):
# Turn on and off auto_lock
if button == getattr(Button, self.args.auto_lock_button) and pressed:
if self.auto_lock:
self.auto_lock = False
print('---------------------Control is off.---------------------')
else:
self.auto_lock = True
print('---------------------Control is on.---------------------')
# Press the left button to turn on auto aim
if button in [getattr(Button, self.args.mouse_button_1), getattr(Button, self.args.mouse_button_2)] and self.auto_lock:
if pressed:
self.locking = True
print('On...')
else:
self.locking = False
print('OFF')
# Print button press for debugging purposes
if self.args.print_button:
print(f'button {button.name} pressed')
def speed_test(self):
t = time.time()
for _ in range(100):
img = self.grab_screen()
print(f'Average time for 100 screenshots: {(time.time()-t)/100:.3f}s FPS: {100/(time.time()-t):.3f}FPS')
t = time.time()
for _ in range(100):
self.engine.inference(img)
print(f'Average time to reason 100 times:{(time.time()-t)/100:.3f}s FPS: {100/(time.time()-t):.3f}FPS')
t = time.time()
for _ in range(100):
self.forward()
print(f'Overall 100 average time: {(time.time()-t)/100:.3f}s FPS: {100/(time.time()-t):.3f}FPS')
def sort_target(self, boxes, confidences, classes):
target_sort_list = []
for box, conf, cls in zip(boxes, confidences, classes):
label = self.args.label_list[cls]
x1, y1, x2, y2 = box.tolist()
target_x, target_y = (x1 + x2) / 2, (y1 + y2) / 2 - self.args.pos_factor * (y2 - y1)
move_dis = ((target_x - self.detect_center_x) ** 2 + (target_y - self.detect_center_y) ** 2) ** (1 / 2)
if label in self.args.enemy_list and conf >= self.args.conf and move_dis < self.args.max_lock_dis:
target_info = {'target_x': target_x, 'target_y': target_y, 'move_dis': move_dis, 'label': label, 'conf': conf}
target_sort_list.append(target_info)
# Sort the list by label and then by distance
return sorted(target_sort_list, key=lambda x: (x['label'], x['move_dis']))
def get_move_dis(self, target_sort_list):
# Get the target with the lowest distance
target_info = min(target_sort_list, key=lambda x: (x['label'], x['move_dis']))
target_x, target_y, move_dis = target_info['target_x'], target_info['target_y'], target_info['move_dis']
# Compute the relative movement needed to aim at the target
move_rel_x = (target_x - self.detect_center_x) * self.smooth
move_rel_y = (target_y - self.detect_center_y) * self.smooth
if move_dis >= self.args.max_step_dis:
# Limit the movement to the maximum step distance
move_rel_x = move_rel_x / move_dis * self.args.max_step_dis
move_rel_y = move_rel_y / move_dis * self.args.max_step_dis
elif move_dis <= self.args.max_pid_dis:
# Use a PID controller to smooth the movement
move_rel_x = self.pidx(atan2(-move_rel_x, self.detect_length) * self.detect_length)
move_rel_y = self.pidy(atan2(-move_rel_y, self.detect_length) * self.detect_length)
return move_rel_x, move_rel_y, move_dis
def lock_target(self, target_sort_list):
if len(target_sort_list) > 0 and self.locking:
move_rel_x, move_rel_y, move_dis = self.get_move_dis(target_sort_list)
mouse_move(move_rel_x, move_rel_y) # //2 for solving the shaking problem when
self.pidx(0), self.pidy(0)
def visualization(self, args, queue):
while True:
# Retrieve information from queue
if queue.qsize() > 0:
while queue.qsize() >= 1: # get tht latest item
img, xyxy_list, conf_list, cls_list, target_sort_list, fps_track = queue.get()
# Draw FPS on image
cv2.putText(img, f'FPS: {fps_track:.2f}', (10, 30), 0, 0.7, (0, 255, 0), 2)
# Draw detected targets
for xyxy, conf, cls in zip(xyxy_list, conf_list, cls_list):
cls_name = args.label_list[cls]
x1, y1, x2, y2 = xyxy.tolist()
label = f'{cls_name} {conf:.2f}'
if conf > args.conf:
color = (255, 0, 0) if cls_name in self.args.enemy_list else (0, 255, 0)
else:
color = (0, 0, 255)
cv2.putText(img, label, (x1, y1 - 25), 0, 0.7, color, 2)
cv2.rectangle(img, (x1, y1), (x2, y2), color, 2)
# Draw locked target
if len(target_sort_list) > 0:
target_info = target_sort_list[0]
target_x, target_y, move_dis = target_info['target_x'], target_info['target_y'], target_info['move_dis']
cv2.circle(img, (int(target_x), int(target_y)), 5, (255, 0, 0), -1)
cv2.line(img, (int(self.detect_center_x), int(self.detect_center_y)), (int(target_x), int(target_y)), (255, 0, 0), 2)
cv2.putText(img, f'{move_dis:.2f}', (int(target_x), int(target_y)), 0, 0.7, (255, 0, 0), 2)
# Display image
cv2.imshow('Detection Window', cv2.cvtColor(img, cv2.COLOR_RGB2BGR))
if cv2.waitKey(25) & 0xFF == ord('q'):
cv2.destroyAllWindows()
@staticmethod
def save_screenshot(queue, dir='screenshot', freq=0.5):
if not os.path.exists(dir):
os.makedirs(dir)
start_time = time.time()
while True:
img, locking, nums = queue.get()
if (locking or nums > 0) and (time.time() - start_time >= freq): # having bounding boxes or locking will get screenshot
img_bgr = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
cv2.imwrite(os.path.join(dir, f'{time.time():.5f}.png'), img_bgr)
start_time = time.time()
def forward(self):
start_time = time.time()
img = self.grab_screen()
nums, boxes, confidences, classes = self.engine.inference(img)
target_sort_list = self.sort_target(boxes, confidences, classes)
self.lock_target(target_sort_list)
fps_track = 1/(time.time()-start_time)
if self.args.save_screenshot:
self.q_save.put([img, self.locking, nums])
if self.args.visualization:
self.q_visual.put([img, boxes, confidences, classes, target_sort_list, fps_track])
precise_sleep(self.args.delay)
if __name__ == '__main__':
apex = AimBot()
while True:
apex.forward()