micro_sam.sam_annotator.training_ui
1import os 2import warnings 3 4from qtpy import QtWidgets 5# from napari.qt.threading import thread_worker 6 7import torch_em 8from torch.utils.data import random_split 9 10import micro_sam.util as util 11import micro_sam.sam_annotator._widgets as widgets 12from micro_sam.training.training import _find_best_configuration, _export_helper 13from micro_sam.training import default_sam_dataset, train_sam_for_configuration, CONFIGURATIONS 14 15from ._tooltips import get_tooltip 16 17 18class TrainingWidget(widgets._WidgetBase): 19 def __init__(self, parent=None): 20 super().__init__(parent=parent) 21 22 # Create the UI: the general options. 23 self._create_options() 24 25 # Add the settings (collapsible). 26 self.layout().addWidget(self._create_settings()) 27 28 # Add the run button to trigger the embedding computation. 29 self.run_button = QtWidgets.QPushButton("Start Training") 30 self.run_button.clicked.connect(self.__call__) 31 self.layout().addWidget(self.run_button) 32 33 def _create_options(self): 34 self.raw_path = None 35 _, layout = self._add_path_param( 36 "raw_path", self.raw_path, "both", placeholder="/path/to/images", title="Path to images", 37 tooltip=get_tooltip("training", "raw_path") 38 ) 39 self.layout().addLayout(layout) 40 41 self.raw_key = None 42 _, layout = self._add_string_param( 43 "raw_key", self.raw_key, placeholder="e.g. \"*.tif\"", title="Image data key", 44 tooltip=get_tooltip("training", "raw_key") 45 ) 46 self.layout().addLayout(layout) 47 48 self.label_path = None 49 _, layout = self._add_path_param( 50 "label_path", self.label_path, "both", placeholder="/path/to/labels", title="Path to labels", 51 tooltip=get_tooltip("training", "label_path") 52 ) 53 self.layout().addLayout(layout) 54 55 self.label_key = None 56 _, layout = self._add_string_param( 57 "label_key", self.label_key, placeholder="e.g. \"*.tif\"", title="Label data key", 58 tooltip=get_tooltip("training", "label_key") 59 ) 60 self.layout().addLayout(layout) 61 62 self.configuration = _find_best_configuration() 63 self.setting_dropdown, layout = self._add_choice_param( 64 "configuration", self.configuration, list(CONFIGURATIONS.keys()), title="Configuration", 65 tooltip=get_tooltip("training", "configuration") 66 ) 67 self.layout().addLayout(layout) 68 69 self.with_segmentation_decoder = True 70 self.layout().addWidget(self._add_boolean_param( 71 "with_segmentation_decoder", self.with_segmentation_decoder, title="With segmentation decoder", 72 tooltip=get_tooltip("training", "segmentation_decoder") 73 )) 74 75 def _create_settings(self): 76 setting_values = QtWidgets.QWidget() 77 setting_values.setLayout(QtWidgets.QVBoxLayout()) 78 79 # TODO use CPU instead of MPS on MAC because training with MPS is slower! 80 # Device and patch shape settings. 81 self.device = "auto" 82 device_options = ["auto"] + util._available_devices() 83 self.device_dropdown, layout = self._add_choice_param( 84 "device", self.device, device_options, title="Device", tooltip=get_tooltip("training", "device") 85 ) 86 setting_values.layout().addLayout(layout) 87 88 self.patch_x, self.patch_y = 512, 512 89 self.patch_x_param, self.patch_y_param, layout = self._add_shape_param( 90 ("patch_x", "patch_y"), (self.patch_x, self.patch_y), min_val=0, max_val=2048, 91 tooltip=get_tooltip("training", "patch"), title=("Patch size x", "Patch size y") 92 ) 93 setting_values.layout().addLayout(layout) 94 95 # Paths for validation data. 96 self.raw_path_val = None 97 _, layout = self._add_path_param( 98 "raw_path_val", self.raw_path_val, "both", placeholder="/path/to/images", 99 title="Path to validation images", tooltip=get_tooltip("training", "raw_path_val") 100 ) 101 setting_values.layout().addLayout(layout) 102 103 self.label_path_val = None 104 _, layout = self._add_path_param( 105 "label_path_val", self.label_path_val, "both", placeholder="/path/to/images", 106 title="Path to validation labels", tooltip=get_tooltip("training", "label_path_val") 107 ) 108 setting_values.layout().addLayout(layout) 109 110 # Name of the model to be trained and options to over-ride the initial model 111 # on top of which the finetuning is run. 112 self.name = "sam_model" 113 self.name_param, layout = self._add_string_param( 114 "name", self.name, title="Model name", tooltip=get_tooltip("training", "name") 115 ) 116 setting_values.layout().addLayout(layout) 117 118 self.initial_model = None 119 self.initial_model_param, layout = self._add_string_param( 120 "initial_model", self.initial_model, title="Initial model", tooltip=get_tooltip("training", "initial_model") 121 ) 122 setting_values.layout().addLayout(layout) 123 124 self.checkpoint = None 125 self.checkpoint_param, layout = self._add_string_param( 126 "checkpoint", self.name, title="Checkpoint", tooltip=get_tooltip("training", "checkpoint") 127 ) 128 setting_values.layout().addLayout(layout) 129 130 self.output_path = None 131 self.output_path_param, layout = self._add_string_param( 132 "output_path", self.output_path, title="Output Path", tooltip=get_tooltip("training", "output_path") 133 ) 134 setting_values.layout().addLayout(layout) 135 136 self.n_epochs = 100 137 self.n_epochs_param, layout = self._add_int_param( 138 "n_epochs", self.n_epochs, title="Number of epochs", min_val=1, max_val=1000, 139 tooltip=get_tooltip("training", "n_epochs"), 140 ) 141 setting_values.layout().addLayout(layout) 142 143 settings = widgets._make_collapsible(setting_values, title="Advanced Settings") 144 return settings 145 146 def _get_loaders(self): 147 batch_size = 1 148 num_workers = 1 if str(self.device) == "cpu" else 4 149 150 patch_shape = (self.patch_x, self.patch_y) 151 dataset = default_sam_dataset( 152 raw_paths=str(self.raw_path), 153 raw_key=self.raw_key, 154 label_paths=str(self.label_path), 155 label_key=self.label_key, 156 patch_shape=patch_shape, 157 with_segmentation_decoder=self.with_segmentation_decoder, 158 ) 159 160 raw_path_val, label_path_val = self.raw_path_val, self.label_path_val 161 if raw_path_val is None: 162 # Use 10% of the dataset - at least one image - for validation. 163 n_val = min(1, int(0.1 * len(dataset))) 164 train_dataset, val_dataset = random_split(dataset, lengths=[len(dataset) - n_val, n_val]) 165 else: 166 train_dataset = dataset 167 val_dataset = default_sam_dataset( 168 raw_paths=str(raw_path_val), 169 raw_key=self.raw_key, 170 label_paths=str(label_path_val), 171 label_key=self.label_key, 172 patch_shape=patch_shape, 173 with_segmentation_decoder=self.with_segmentation_decoder, 174 ) 175 176 train_loader = torch_em.segmentation.get_data_loader( 177 train_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers, 178 ) 179 val_loader = torch_em.segmentation.get_data_loader( 180 val_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers, 181 ) 182 return train_loader, val_loader 183 184 def _get_model_type(self): 185 # Consolidate initial model name, the checkpoint path and the model type according to the configuration. 186 if self.initial_model is None or self.initial_model in ("None", ""): 187 model_type = CONFIGURATIONS[self.configuration]["model_type"] 188 else: 189 model_type = self.initial_model[:5] 190 if model_type != CONFIGURATIONS[self.configuration]["model_type"]: 191 warnings.warn( 192 f"You have changed the model type for your chosen configuration {self.configuration} " 193 f"from {CONFIGURATIONS[self.configuration]['model_type']} to {model_type}. " 194 "The training may be very slow or not work at all." 195 ) 196 assert model_type is not None 197 return model_type 198 199 # Make sure that raw and label path have been passed. 200 # If they haven't raise an error message. 201 # (We could do a more extensive validation here, but for now keep it minimal.) 202 def _validate_inputs(self): 203 missing_raw = self.raw_path is None or not os.path.exists(self.raw_path) 204 missing_label = self.label_path is None or not os.path.exists(self.label_path) 205 if missing_raw or missing_label: 206 msg = "" 207 if missing_raw: 208 msg += "The path to raw data is missing or does not exist. " 209 if missing_label: 210 msg += "The path to label data is missing or does not exist." 211 return widgets._generate_message("error", msg) 212 return False 213 214 def __call__(self, skip_validate=False): 215 if not skip_validate and self._validate_inputs(): 216 return 217 218 # Set up progress bar and signals for using it within a threadworker. 219 pbar, pbar_signals = widgets._create_pbar_for_threadworker() 220 221 model_type = self._get_model_type() 222 if self.checkpoint is None: 223 model_registry = util.models() 224 checkpoint_path = model_registry.fetch(model_type) 225 else: 226 checkpoint_path = self.checkpoint 227 228 # @thread_worker() 229 def run_training(): 230 train_loader, val_loader = self._get_loaders() 231 train_sam_for_configuration( 232 name=self.name, configuration=self.configuration, 233 train_loader=train_loader, val_loader=val_loader, 234 checkpoint_path=checkpoint_path, 235 with_segmentation_decoder=self.with_segmentation_decoder, 236 model_type=model_type, device=self.device, 237 n_epochs=self.n_epochs, pbar_signals=pbar_signals, 238 ) 239 240 # The best checkpoint after training. 241 export_checkpoint = os.path.join("checkpoints", self.name, "best.pt") 242 assert os.path.exists(export_checkpoint), export_checkpoint 243 244 output_path = _export_helper( 245 "", self.name, self.output_path, model_type, self.with_segmentation_decoder, val_loader 246 ) 247 pbar_signals.pbar_stop.emit() 248 return output_path 249 250 path = run_training() 251 print(f"Training has finished. The trained model is saved at {path}.") 252 # worker = run_training() 253 # worker.returned.connect(lambda path: print(f"Training has finished. The trained model is saved at {path}.")) 254 # worker.start() 255 # return worker
19class TrainingWidget(widgets._WidgetBase): 20 def __init__(self, parent=None): 21 super().__init__(parent=parent) 22 23 # Create the UI: the general options. 24 self._create_options() 25 26 # Add the settings (collapsible). 27 self.layout().addWidget(self._create_settings()) 28 29 # Add the run button to trigger the embedding computation. 30 self.run_button = QtWidgets.QPushButton("Start Training") 31 self.run_button.clicked.connect(self.__call__) 32 self.layout().addWidget(self.run_button) 33 34 def _create_options(self): 35 self.raw_path = None 36 _, layout = self._add_path_param( 37 "raw_path", self.raw_path, "both", placeholder="/path/to/images", title="Path to images", 38 tooltip=get_tooltip("training", "raw_path") 39 ) 40 self.layout().addLayout(layout) 41 42 self.raw_key = None 43 _, layout = self._add_string_param( 44 "raw_key", self.raw_key, placeholder="e.g. \"*.tif\"", title="Image data key", 45 tooltip=get_tooltip("training", "raw_key") 46 ) 47 self.layout().addLayout(layout) 48 49 self.label_path = None 50 _, layout = self._add_path_param( 51 "label_path", self.label_path, "both", placeholder="/path/to/labels", title="Path to labels", 52 tooltip=get_tooltip("training", "label_path") 53 ) 54 self.layout().addLayout(layout) 55 56 self.label_key = None 57 _, layout = self._add_string_param( 58 "label_key", self.label_key, placeholder="e.g. \"*.tif\"", title="Label data key", 59 tooltip=get_tooltip("training", "label_key") 60 ) 61 self.layout().addLayout(layout) 62 63 self.configuration = _find_best_configuration() 64 self.setting_dropdown, layout = self._add_choice_param( 65 "configuration", self.configuration, list(CONFIGURATIONS.keys()), title="Configuration", 66 tooltip=get_tooltip("training", "configuration") 67 ) 68 self.layout().addLayout(layout) 69 70 self.with_segmentation_decoder = True 71 self.layout().addWidget(self._add_boolean_param( 72 "with_segmentation_decoder", self.with_segmentation_decoder, title="With segmentation decoder", 73 tooltip=get_tooltip("training", "segmentation_decoder") 74 )) 75 76 def _create_settings(self): 77 setting_values = QtWidgets.QWidget() 78 setting_values.setLayout(QtWidgets.QVBoxLayout()) 79 80 # TODO use CPU instead of MPS on MAC because training with MPS is slower! 81 # Device and patch shape settings. 82 self.device = "auto" 83 device_options = ["auto"] + util._available_devices() 84 self.device_dropdown, layout = self._add_choice_param( 85 "device", self.device, device_options, title="Device", tooltip=get_tooltip("training", "device") 86 ) 87 setting_values.layout().addLayout(layout) 88 89 self.patch_x, self.patch_y = 512, 512 90 self.patch_x_param, self.patch_y_param, layout = self._add_shape_param( 91 ("patch_x", "patch_y"), (self.patch_x, self.patch_y), min_val=0, max_val=2048, 92 tooltip=get_tooltip("training", "patch"), title=("Patch size x", "Patch size y") 93 ) 94 setting_values.layout().addLayout(layout) 95 96 # Paths for validation data. 97 self.raw_path_val = None 98 _, layout = self._add_path_param( 99 "raw_path_val", self.raw_path_val, "both", placeholder="/path/to/images", 100 title="Path to validation images", tooltip=get_tooltip("training", "raw_path_val") 101 ) 102 setting_values.layout().addLayout(layout) 103 104 self.label_path_val = None 105 _, layout = self._add_path_param( 106 "label_path_val", self.label_path_val, "both", placeholder="/path/to/images", 107 title="Path to validation labels", tooltip=get_tooltip("training", "label_path_val") 108 ) 109 setting_values.layout().addLayout(layout) 110 111 # Name of the model to be trained and options to over-ride the initial model 112 # on top of which the finetuning is run. 113 self.name = "sam_model" 114 self.name_param, layout = self._add_string_param( 115 "name", self.name, title="Model name", tooltip=get_tooltip("training", "name") 116 ) 117 setting_values.layout().addLayout(layout) 118 119 self.initial_model = None 120 self.initial_model_param, layout = self._add_string_param( 121 "initial_model", self.initial_model, title="Initial model", tooltip=get_tooltip("training", "initial_model") 122 ) 123 setting_values.layout().addLayout(layout) 124 125 self.checkpoint = None 126 self.checkpoint_param, layout = self._add_string_param( 127 "checkpoint", self.name, title="Checkpoint", tooltip=get_tooltip("training", "checkpoint") 128 ) 129 setting_values.layout().addLayout(layout) 130 131 self.output_path = None 132 self.output_path_param, layout = self._add_string_param( 133 "output_path", self.output_path, title="Output Path", tooltip=get_tooltip("training", "output_path") 134 ) 135 setting_values.layout().addLayout(layout) 136 137 self.n_epochs = 100 138 self.n_epochs_param, layout = self._add_int_param( 139 "n_epochs", self.n_epochs, title="Number of epochs", min_val=1, max_val=1000, 140 tooltip=get_tooltip("training", "n_epochs"), 141 ) 142 setting_values.layout().addLayout(layout) 143 144 settings = widgets._make_collapsible(setting_values, title="Advanced Settings") 145 return settings 146 147 def _get_loaders(self): 148 batch_size = 1 149 num_workers = 1 if str(self.device) == "cpu" else 4 150 151 patch_shape = (self.patch_x, self.patch_y) 152 dataset = default_sam_dataset( 153 raw_paths=str(self.raw_path), 154 raw_key=self.raw_key, 155 label_paths=str(self.label_path), 156 label_key=self.label_key, 157 patch_shape=patch_shape, 158 with_segmentation_decoder=self.with_segmentation_decoder, 159 ) 160 161 raw_path_val, label_path_val = self.raw_path_val, self.label_path_val 162 if raw_path_val is None: 163 # Use 10% of the dataset - at least one image - for validation. 164 n_val = min(1, int(0.1 * len(dataset))) 165 train_dataset, val_dataset = random_split(dataset, lengths=[len(dataset) - n_val, n_val]) 166 else: 167 train_dataset = dataset 168 val_dataset = default_sam_dataset( 169 raw_paths=str(raw_path_val), 170 raw_key=self.raw_key, 171 label_paths=str(label_path_val), 172 label_key=self.label_key, 173 patch_shape=patch_shape, 174 with_segmentation_decoder=self.with_segmentation_decoder, 175 ) 176 177 train_loader = torch_em.segmentation.get_data_loader( 178 train_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers, 179 ) 180 val_loader = torch_em.segmentation.get_data_loader( 181 val_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers, 182 ) 183 return train_loader, val_loader 184 185 def _get_model_type(self): 186 # Consolidate initial model name, the checkpoint path and the model type according to the configuration. 187 if self.initial_model is None or self.initial_model in ("None", ""): 188 model_type = CONFIGURATIONS[self.configuration]["model_type"] 189 else: 190 model_type = self.initial_model[:5] 191 if model_type != CONFIGURATIONS[self.configuration]["model_type"]: 192 warnings.warn( 193 f"You have changed the model type for your chosen configuration {self.configuration} " 194 f"from {CONFIGURATIONS[self.configuration]['model_type']} to {model_type}. " 195 "The training may be very slow or not work at all." 196 ) 197 assert model_type is not None 198 return model_type 199 200 # Make sure that raw and label path have been passed. 201 # If they haven't raise an error message. 202 # (We could do a more extensive validation here, but for now keep it minimal.) 203 def _validate_inputs(self): 204 missing_raw = self.raw_path is None or not os.path.exists(self.raw_path) 205 missing_label = self.label_path is None or not os.path.exists(self.label_path) 206 if missing_raw or missing_label: 207 msg = "" 208 if missing_raw: 209 msg += "The path to raw data is missing or does not exist. " 210 if missing_label: 211 msg += "The path to label data is missing or does not exist." 212 return widgets._generate_message("error", msg) 213 return False 214 215 def __call__(self, skip_validate=False): 216 if not skip_validate and self._validate_inputs(): 217 return 218 219 # Set up progress bar and signals for using it within a threadworker. 220 pbar, pbar_signals = widgets._create_pbar_for_threadworker() 221 222 model_type = self._get_model_type() 223 if self.checkpoint is None: 224 model_registry = util.models() 225 checkpoint_path = model_registry.fetch(model_type) 226 else: 227 checkpoint_path = self.checkpoint 228 229 # @thread_worker() 230 def run_training(): 231 train_loader, val_loader = self._get_loaders() 232 train_sam_for_configuration( 233 name=self.name, configuration=self.configuration, 234 train_loader=train_loader, val_loader=val_loader, 235 checkpoint_path=checkpoint_path, 236 with_segmentation_decoder=self.with_segmentation_decoder, 237 model_type=model_type, device=self.device, 238 n_epochs=self.n_epochs, pbar_signals=pbar_signals, 239 ) 240 241 # The best checkpoint after training. 242 export_checkpoint = os.path.join("checkpoints", self.name, "best.pt") 243 assert os.path.exists(export_checkpoint), export_checkpoint 244 245 output_path = _export_helper( 246 "", self.name, self.output_path, model_type, self.with_segmentation_decoder, val_loader 247 ) 248 pbar_signals.pbar_stop.emit() 249 return output_path 250 251 path = run_training() 252 print(f"Training has finished. The trained model is saved at {path}.") 253 # worker = run_training() 254 # worker.returned.connect(lambda path: print(f"Training has finished. The trained model is saved at {path}.")) 255 # worker.start() 256 # return worker
QWidget(parent: typing.Optional[QWidget] = None, flags: Union[Qt.WindowFlags, Qt.WindowType] = Qt.WindowFlags())
TrainingWidget(parent=None)
20 def __init__(self, parent=None): 21 super().__init__(parent=parent) 22 23 # Create the UI: the general options. 24 self._create_options() 25 26 # Add the settings (collapsible). 27 self.layout().addWidget(self._create_settings()) 28 29 # Add the run button to trigger the embedding computation. 30 self.run_button = QtWidgets.QPushButton("Start Training") 31 self.run_button.clicked.connect(self.__call__) 32 self.layout().addWidget(self.run_button)
Inherited Members
- PyQt5.QtWidgets.QWidget
- RenderFlag
- RenderFlags
- acceptDrops
- accessibleDescription
- accessibleName
- actionEvent
- actions
- activateWindow
- addAction
- addActions
- adjustSize
- autoFillBackground
- backgroundRole
- baseSize
- changeEvent
- childAt
- childrenRect
- childrenRegion
- clearFocus
- clearMask
- close
- closeEvent
- contentsMargins
- contentsRect
- contextMenuEvent
- contextMenuPolicy
- create
- createWindowContainer
- cursor
- destroy
- devType
- dragEnterEvent
- dragLeaveEvent
- dragMoveEvent
- dropEvent
- effectiveWinId
- ensurePolished
- enterEvent
- event
- find
- focusInEvent
- focusNextChild
- focusNextPrevChild
- focusOutEvent
- focusPolicy
- focusPreviousChild
- focusProxy
- focusWidget
- font
- fontInfo
- fontMetrics
- foregroundRole
- frameGeometry
- frameSize
- geometry
- getContentsMargins
- grab
- grabGesture
- grabKeyboard
- grabMouse
- grabShortcut
- graphicsEffect
- graphicsProxyWidget
- hasFocus
- hasHeightForWidth
- hasMouseTracking
- hasTabletTracking
- height
- heightForWidth
- hide
- hideEvent
- initPainter
- inputMethodEvent
- inputMethodHints
- inputMethodQuery
- insertAction
- insertActions
- isActiveWindow
- isAncestorOf
- isEnabled
- isEnabledTo
- isFullScreen
- isHidden
- isLeftToRight
- isMaximized
- isMinimized
- isModal
- isRightToLeft
- isVisible
- isVisibleTo
- isWindow
- isWindowModified
- keyPressEvent
- keyReleaseEvent
- keyboardGrabber
- layout
- layoutDirection
- leaveEvent
- locale
- lower
- mapFrom
- mapFromGlobal
- mapFromParent
- mapTo
- mapToGlobal
- mapToParent
- mask
- maximumHeight
- maximumSize
- maximumWidth
- metric
- minimumHeight
- minimumSize
- minimumSizeHint
- minimumWidth
- mouseDoubleClickEvent
- mouseGrabber
- mouseMoveEvent
- mousePressEvent
- mouseReleaseEvent
- move
- moveEvent
- nativeEvent
- nativeParentWidget
- nextInFocusChain
- normalGeometry
- overrideWindowFlags
- overrideWindowState
- paintEngine
- paintEvent
- palette
- parentWidget
- pos
- previousInFocusChain
- raise_
- rect
- releaseKeyboard
- releaseMouse
- releaseShortcut
- removeAction
- render
- repaint
- resize
- resizeEvent
- restoreGeometry
- saveGeometry
- screen
- scroll
- setAcceptDrops
- setAccessibleDescription
- setAccessibleName
- setAttribute
- setAutoFillBackground
- setBackgroundRole
- setBaseSize
- setContentsMargins
- setContextMenuPolicy
- setCursor
- setDisabled
- setEnabled
- setFixedHeight
- setFixedSize
- setFixedWidth
- setFocus
- setFocusPolicy
- setFocusProxy
- setFont
- setForegroundRole
- setGeometry
- setGraphicsEffect
- setHidden
- setInputMethodHints
- setLayout
- setLayoutDirection
- setLocale
- setMask
- setMaximumHeight
- setMaximumSize
- setMaximumWidth
- setMinimumHeight
- setMinimumSize
- setMinimumWidth
- setMouseTracking
- setPalette
- setParent
- setShortcutAutoRepeat
- setShortcutEnabled
- setSizeIncrement
- setSizePolicy
- setStatusTip
- setStyle
- setStyleSheet
- setTabOrder
- setTabletTracking
- setToolTip
- setToolTipDuration
- setUpdatesEnabled
- setVisible
- setWhatsThis
- setWindowFilePath
- setWindowFlag
- setWindowFlags
- setWindowIcon
- setWindowIconText
- setWindowModality
- setWindowModified
- setWindowOpacity
- setWindowRole
- setWindowState
- setWindowTitle
- show
- showEvent
- showFullScreen
- showMaximized
- showMinimized
- showNormal
- size
- sizeHint
- sizeIncrement
- sizePolicy
- stackUnder
- statusTip
- style
- styleSheet
- tabletEvent
- testAttribute
- toolTip
- toolTipDuration
- underMouse
- ungrabGesture
- unsetCursor
- unsetLayoutDirection
- unsetLocale
- update
- updateGeometry
- updateMicroFocus
- updatesEnabled
- visibleRegion
- whatsThis
- wheelEvent
- width
- winId
- window
- windowFilePath
- windowFlags
- windowHandle
- windowIcon
- windowIconText
- windowModality
- windowOpacity
- windowRole
- windowState
- windowTitle
- windowType
- x
- y
- DrawChildren
- DrawWindowBackground
- IgnoreMask
- windowIconTextChanged
- windowIconChanged
- windowTitleChanged
- customContextMenuRequested
- PyQt5.QtCore.QObject
- blockSignals
- childEvent
- children
- connectNotify
- customEvent
- deleteLater
- disconnect
- disconnectNotify
- dumpObjectInfo
- dumpObjectTree
- dynamicPropertyNames
- eventFilter
- findChild
- findChildren
- inherits
- installEventFilter
- isSignalConnected
- isWidgetType
- isWindowType
- killTimer
- metaObject
- moveToThread
- objectName
- parent
- property
- pyqtConfigure
- receivers
- removeEventFilter
- sender
- senderSignalIndex
- setObjectName
- setProperty
- signalsBlocked
- startTimer
- thread
- timerEvent
- tr
- staticMetaObject
- objectNameChanged
- destroyed
- PyQt5.QtGui.QPaintDevice
- PaintDeviceMetric
- colorCount
- depth
- devicePixelRatio
- devicePixelRatioF
- devicePixelRatioFScale
- heightMM
- logicalDpiX
- logicalDpiY
- paintingActive
- physicalDpiX
- physicalDpiY
- widthMM
- PdmDepth
- PdmDevicePixelRatio
- PdmDevicePixelRatioScaled
- PdmDpiX
- PdmDpiY
- PdmHeight
- PdmHeightMM
- PdmNumColors
- PdmPhysicalDpiX
- PdmPhysicalDpiY
- PdmWidth
- PdmWidthMM